This commit is contained in:
vet
2026-04-13 01:27:34 +07:00
commit c7c3c02bc3
18 changed files with 2730 additions and 0 deletions

106
01-init-env.sh Executable file
View File

@@ -0,0 +1,106 @@
#!/usr/bin/env bash
# =============================================================================
# 01-init-env.sh — 生成 .env.deploy-test 配置模板(测试服务器环境)
#
# 作用:在项目根目录生成 .env.deploy-test填写各服务的连接信息
# 后续步骤:编辑 .env.deploy-test然后执行 02-patch-config.sh
# =============================================================================
set -euo pipefail
source "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/common.sh"
init_dirs
init_script_log # ← 脚本执行日志
header "步骤 1 / 5 — 初始化 .env.deploy-test 配置"
if [[ -f "$ENV_FILE" ]]; then
warn ".env.deploy-test 已存在,跳过创建"
echo ""
echo -e "${BOLD}当前配置:${NC}"
grep -v '^\s*#' "$ENV_FILE" | grep -v '^\s*$' | sed 's/^/ /'
echo ""
echo -e "如需重置:${YELLOW}rm $ENV_FILE && $0${NC}"
exit 0
fi
cat > "$ENV_FILE" <<'EOF'
# =============================================================================
# 测试服务器环境配置 — .env.deploy-test
# 部署场景:测试服务器(有公网 IP所有服务本地运行
# 编辑完成后执行:./deploy-test/02-patch-config.sh
# =============================================================================
# ── 测试服务器公网 IP必填─────────────────────────────────────────────────
# 本机(测试服务器)的公网 IPLiveKit WebRTC 媒体流需要通过此 IP 对外暴露
# 本地 Mac 环境deploy-local也会引用此地址连接 LiveKit
DEPLOY_TEST_IP=54.116.29.247
# ── MongoDB远程服务必填─────────────────────────────────────────────────
# open-im-server / chat / build-server 共用同一个连接,只是 DB 名不同
MONGO_HOST=47.237.103.4
MONGO_PORT=27017
MONGO_USERNAME=minio_pC5wMB
MONGO_PASSWORD=rI57PJsJhnz_qlRkfnTa0RPT
MONGO_AUTHSOURCE=openim_v3
MONGO_DATABASE=openim_v3 # open-im-server / chat 使用
BUILD_MONGO_DATABASE=build # build-server 使用
# ── Amazon S3 — open-im-serverIM 聊天文件存储,必填)──────────────────────
# 对应 open-im-server/config/openim-rpc-third.yml → object.aws
OPENIM_AWS_REGION=ap-southeast-1
OPENIM_AWS_BUCKET=im1688
OPENIM_AWS_ACCESS_KEY_ID=AKIA5TMMSZWVFYCLKJ2G
OPENIM_AWS_SECRET_ACCESS_KEY=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
# 自定义 EndpointCloudFlare R2 / 其他 S3 兼容服务),留空则使用 AWS 官方
OPENIM_AWS_ENDPOINT=
OPENIM_AWS_PUBLIC_READ=true
# ── Amazon S3 — build-serverApp APK/IPA 构建产物存储,必填)───────────────
# 对应 build-server/config/config.yaml → aws
BUILD_AWS_REGION=ap-east-1
BUILD_AWS_BUCKET=im-hk-apk
BUILD_AWS_ACCESS_KEY=AKIASJ7PFAWCXUDC7KQV
BUILD_AWS_SECRET_KEY=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
# ── RedisDocker 本地运行)─────────────────────────────────────────────────
REDIS_PORT=6379
REDIS_PASSWORD=openIM123
# ── KafkaDocker 本地运行KRaft 模式)────────────────────────────────────
KAFKA_PORT=9092
# ── EtcdDocker 本地运行,服务发现注册中心)───────────────────────────────
ETCD_PORT=2379
# ── LiveKit ServerDocker 本地运行,使用本机公网 IP──────────────────────
# LiveKit 通过 Docker 启动(容器名: dev-livekit复用 dev-redis。
# WebRTC 媒体流需要公网 IP使用上方 DEPLOY_TEST_IP。
#
# LIVEKIT_NODE_IP: = DEPLOY_TEST_IPWebRTC 客户端通过此 IP 直连媒体流
# LIVEKIT_URL: 后端服务连接 LiveKit 的地址(服务器内部用回环即可)
# LIVEKIT_API_KEY / LIVEKIT_API_SECRET: 来自 livekit/livekit.yaml → keys 段
LIVEKIT_NODE_IP=54.116.29.247 # 与 DEPLOY_TEST_IP 保持一致
LIVEKIT_URL=ws://127.0.0.1:7880
LIVEKIT_API_KEY=API8462dba2
LIVEKIT_API_SECRET=U0l7/3IQjWzusK2eOrWlGmLD5jSzALvV2G5tIxGQaQc=
# ── Cloudflare Streamlivestream 服务使用)──────────────────────────────────
# 来源: livestream/config.yaml → cloudflare 段(若已有值请从该文件复制)
CF_ACCOUNT_ID=
CF_API_TOKEN=
CF_EMAIL=
CF_API_KEY=
CF_CUSTOMER_CODE=
# ── 腾讯云 RTClivecloud 服务使用)─────────────────────────────────────────
# 来源: livecloud/config/config.yml → cloud.tencent 段
TENCENT_SDK_APP_ID=20033091
TENCENT_SDK_SECRET_KEY=cceba44084aaa04f8c48a1858ffd5385875c3a5ec006d34278d9d3714b40e3b0
EOF
success ".env.deploy-test 已创建: $ENV_FILE"
echo ""
echo -e "${BOLD}下一步:${NC}"
echo -e " 1. 确认 DEPLOY_TEST_IP 等关键配置正确:"
echo -e " ${CYAN}vim $ENV_FILE${NC}"
echo -e " 2. 将配置写入各服务 YAML"
echo -e " ${CYAN}./deploy-test/02-patch-config.sh${NC}"

483
02-patch-config.sh Executable file
View File

@@ -0,0 +1,483 @@
#!/usr/bin/env bash
# =============================================================================
# 02-patch-config.sh — 将 .env.local 中的变量写入各服务 YAML 配置文件
#
# 影响文件:
# open-im-server/config/redis.yml kafka.yml discovery.yml
# open-im-server/config/mongodb.yml minio.yml openim-rpc-third.yml
# chat/config/redis.yml discovery.yml mongodb.yml
# meetingmsg/manifest/config/config.yaml
#
# 后续步骤03-start-infra.sh启动 Docker 基础设施)
# =============================================================================
set -euo pipefail
source "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/common.sh"
load_env
init_script_log # ← 脚本执行日志
header "步骤 2 / 5 — 修改服务配置文件"
OPENIM_CONF="$ROOT_DIR/open-im-server/config"
CHAT_CONF="$ROOT_DIR/chat/config"
[[ ! -d "$OPENIM_CONF" ]] && { error "目录不存在: $OPENIM_CONF"; exit 1; }
[[ ! -d "$CHAT_CONF" ]] && { error "目录不存在: $CHAT_CONF"; exit 1; }
# ──────────────────────────────────────────────────────────────────────────────
# open-im-server
# ──────────────────────────────────────────────────────────────────────────────
step "open-im-server 配置"
# Redis
cat > "$OPENIM_CONF/redis.yml" <<EOF
address: [ "127.0.0.1:${REDIS_PORT}" ]
username: ''
password: '${REDIS_PASSWORD}'
enablePipeline: false
clusterMode: false
db: 0
maxRetry: 10
poolSize: 100
onlineKeyPrefix: "openim:local-dev"
onlineKeyPrefixHashTag: false
sentinelMode:
masterName: "redis-master"
sentinelsAddrs: []
routeByLatency: false
routeRandomly: false
EOF
success " redis.yml → 127.0.0.1:${REDIS_PORT}"
# Kafka
cat > "$OPENIM_CONF/kafka.yml" <<EOF
username:
password:
producerAck:
compressType: none
address: ["127.0.0.1:${KAFKA_PORT}"]
toRedisTopic: toRedis
toMongoTopic: toMongo
toPushTopic: toPush
toOfflinePushTopic: toOfflinePush
toRedisGroupID: redis
toMongoGroupID: mongo
toPushGroupID: push
toOfflinePushGroupID: offlinePush
tls:
enableTLS: false
caCrt:
clientCrt:
clientKey:
clientKeyPwd:
insecureSkipVerify: false
EOF
success " kafka.yml → 127.0.0.1:${KAFKA_PORT}"
# Etcd
cat > "$OPENIM_CONF/discovery.yml" <<EOF
enable: etcd
etcd:
rootDirectory: openim
address: ["127.0.0.1:${ETCD_PORT}"]
username:
password:
kubernetes:
namespace: default
rpcService:
user: user-rpc-service
friend: friend-rpc-service
msg: msg-rpc-service
push: push-rpc-service
messageGateway: messagegateway-rpc-service
group: group-rpc-service
auth: auth-rpc-service
conversation: conversation-rpc-service
third: third-rpc-service
EOF
success " discovery.yml → etcd 127.0.0.1:${ETCD_PORT}"
# MongoDB远程
cat > "$OPENIM_CONF/mongodb.yml" <<EOF
uri: "mongodb://${MONGO_USERNAME}:${MONGO_PASSWORD}@${MONGO_HOST}:${MONGO_PORT}/${MONGO_DATABASE}?authSource=${MONGO_AUTHSOURCE}&directConnection=true"
address: []
database: ${MONGO_DATABASE}
username:
password:
authSource:
mongoMode: "standalone"
maxPoolSize: 100
maxRetry: 10
EOF
success " mongodb.yml → ${MONGO_HOST}:${MONGO_PORT}/${MONGO_DATABASE}"
# minio.yml标记已切换至 S3实际由 openim-rpc-third.yml 控制)
cat > "$OPENIM_CONF/minio.yml" <<EOF
# 已切换为 Amazon S3存储驱动由 openim-rpc-third.yml 中 object.enable: aws 控制
bucket: ${OPENIM_AWS_BUCKET}
accessKeyID: ${OPENIM_AWS_ACCESS_KEY_ID}
secretAccessKey: ${OPENIM_AWS_SECRET_ACCESS_KEY}
sessionToken:
internalAddress: s3.${OPENIM_AWS_REGION}.amazonaws.com
externalAddress: https://s3.${OPENIM_AWS_REGION}.amazonaws.com
publicRead: ${OPENIM_AWS_PUBLIC_READ}
EOF
success " minio.yml → S3 (${OPENIM_AWS_REGION})"
# openim-rpc-third.yml — 切换 enable: aws更新凭证
THIRD_CONF="$OPENIM_CONF/openim-rpc-third.yml"
if [[ -f "$THIRD_CONF" ]]; then
cp "$THIRD_CONF" "${THIRD_CONF}.bak"
python3 - "$THIRD_CONF" <<PYEOF
import re, sys
path = sys.argv[1]
with open(path) as f:
content = f.read()
content = re.sub(r'(\benable:\s*)\S+', r'\1aws', content)
endpoint_line = ""
endpoint_val = "${OPENIM_AWS_ENDPOINT:-}"
if endpoint_val:
endpoint_line = f" endpoint: {endpoint_val}\n"
new_aws = f""" aws:
{endpoint_line} region: ${OPENIM_AWS_REGION}
bucket: ${OPENIM_AWS_BUCKET}
accessKeyID: ${OPENIM_AWS_ACCESS_KEY_ID}
secretAccessKey: ${OPENIM_AWS_SECRET_ACCESS_KEY}
sessionToken:
publicRead: ${OPENIM_AWS_PUBLIC_READ}"""
content = re.sub(
r'(\s{{2}}aws:\n(?:[ \t]+\S[^\n]*\n?)*)',
'\n' + new_aws + '\n',
content
)
with open(path, 'w') as f:
f.write(content)
PYEOF
success " openim-rpc-third.yml → enable: aws, bucket=${OPENIM_AWS_BUCKET} (备份: .bak)"
else
warn " openim-rpc-third.yml 不存在,跳过"
fi
# ──────────────────────────────────────────────────────────────────────────────
# chat
# ──────────────────────────────────────────────────────────────────────────────
step "chat 配置"
# Redis
cat > "$CHAT_CONF/redis.yml" <<EOF
address: [ "127.0.0.1:${REDIS_PORT}" ]
username: ''
password: '${REDIS_PASSWORD}'
enablePipeline: false
clusterMode: false
db: 0
maxRetry: 10
EOF
success " redis.yml → 127.0.0.1:${REDIS_PORT}"
# Etcd
cat > "$CHAT_CONF/discovery.yml" <<EOF
enable: "etcd"
etcd:
rootDirectory: openim
address: [ "127.0.0.1:${ETCD_PORT}" ]
username: ''
password: ''
kubernetes:
namespace: default
rpcService:
chat: chat-rpc-service
admin: admin-rpc-service
bot: bot-rpc-service
EOF
success " discovery.yml → etcd 127.0.0.1:${ETCD_PORT}"
# MongoDB远程
cat > "$CHAT_CONF/mongodb.yml" <<EOF
uri: "mongodb://${MONGO_USERNAME}:${MONGO_PASSWORD}@${MONGO_HOST}:${MONGO_PORT}/${MONGO_DATABASE}?authSource=${MONGO_AUTHSOURCE}&directConnection=true"
address: []
database: ${MONGO_DATABASE}
username:
password:
authSource:
mongoMode: "standalone"
maxPoolSize: 100
maxRetry: 10
EOF
success " mongodb.yml → ${MONGO_HOST}:${MONGO_PORT}/${MONGO_DATABASE}"
# ──────────────────────────────────────────────────────────────────────────────
# meetingmsg
# ──────────────────────────────────────────────────────────────────────────────
step "meetingmsg 配置"
MM_CONF="$ROOT_DIR/meetingmsg/manifest/config/config.yaml"
if [[ -f "$MM_CONF" ]]; then
cat > "$MM_CONF" <<EOF
server:
address: :8000
redis:
default:
address: 127.0.0.1:${REDIS_PORT}
pass: ${REDIS_PASSWORD}
logger:
level: all
stdout: true
EOF
success " config.yaml → redis 127.0.0.1:${REDIS_PORT}"
else
warn " meetingmsg config.yaml 不存在,跳过"
fi
# openim-server webhooks.yml — 开启 afterSendGroupMsg 回调给 meetingmsg
# meetingmsg 接收路径: /event/:commandcommand=callbackAfterSendGroupMsgCommand
WH_CONF="$OPENIM_CONF/webhooks.yml"
if [[ -f "$WH_CONF" ]]; then
cp "$WH_CONF" "${WH_CONF}.bak"
# 用 sed 只修改 url 和 afterSendGroupMsg.enable其余保持默认
sed -i.tmp \
-e 's|^url:.*|url: http://127.0.0.1:8000/event/callbackAfterSendGroupMsgCommand|' \
"$WH_CONF"
# afterSendGroupMsg 的 enable 改为 true精准匹配该段的第一个 enable: false
python3 - "$WH_CONF" <<'PYEOF'
import sys, re
path = sys.argv[1]
content = open(path).read()
# 只把 afterSendGroupMsg 下的 enable: false 改为 true
content = re.sub(
r'(afterSendGroupMsg:\n(?:.*\n)*? enable:) false',
r'\1 true',
content, count=1
)
open(path, 'w').write(content)
print(" patched afterSendGroupMsg.enable = true")
PYEOF
rm -f "${WH_CONF}.tmp"
success " webhooks.yml → url=http://127.0.0.1:8000/event/callbackAfterSendGroupMsgCommand, afterSendGroupMsg.enable=true (备份: .bak)"
else
warn " webhooks.yml 不存在跳过meetingmsg 将收不到消息推送)"
fi
# ──────────────────────────────────────────────────────────────────────────────
# livekit server本地 Docker复用 dev-redis
# ──────────────────────────────────────────────────────────────────────────────
step "livekit 配置"
LK_CONF="$ROOT_DIR/livekit/livekit.yaml"
if [[ -f "$LK_CONF" ]]; then
cp "$LK_CONF" "${LK_CONF}.bak"
fi
cat > "$LK_CONF" <<EOF
port: 7880
bind_addresses:
- 0.0.0.0
rtc:
# WebRTC 媒体流直连端口(客户端直接连 node_ip
port_range_start: 50000
port_range_end: 51000
tcp_port: 7882
use_external_ip: true
# 公网 IP来自 .env.local → LIVEKIT_NODE_IP
node_ip: ${LIVEKIT_NODE_IP}
enable_loopback_candidate: false
congestion_control:
enabled: true
probing_interval: 2000
# 复用 dev-redis 容器host.docker.internal 可从 Docker 容器访问宿主机端口)
redis:
address: host.docker.internal:${REDIS_PORT}
password: ${REDIS_PASSWORD}
db: 0
# API 密钥(来自 .env.local → LIVEKIT_API_KEY: LIVEKIT_API_SECRET
keys:
${LIVEKIT_API_KEY}: ${LIVEKIT_API_SECRET}
logging:
level: info
json: false
sample: false
room:
auto_create: true
empty_timeout: 300
departure_timeout: 20
enable_remote_unmute: true
max_participants: 100
development: true
EOF
success " livekit/livekit.yaml → node_ip=${LIVEKIT_NODE_IP}, redis=host.docker.internal:${REDIS_PORT} (备份: .bak)"
# ──────────────────────────────────────────────────────────────────────────────
# livecloud
# ──────────────────────────────────────────────────────────────────────────────
step "livecloud 配置"
LC_CONF="$ROOT_DIR/livecloud/config/config.yml"
if [[ -f "$LC_CONF" ]]; then
cp "$LC_CONF" "${LC_CONF}.bak"
cat > "$LC_CONF" <<EOF
server:
port: "8080"
env: "dev"
jwt:
secret_key: "your-secret-key-change-in-production"
redis:
host: 127.0.0.1
port: ${REDIS_PORT}
password: ${REDIS_PASSWORD}
cloud:
default_provider: tencent
load_balance_strategy: round_robin
aliyun:
enabled: false
app_id: your_aliyun_app_id
app_key: your_aliyun_app_key
region: cn-hangzhou
push_domain: push.example.com
play_domain: play.example.com
tencent:
enabled: true
sdk_app_id: ${TENCENT_SDK_APP_ID}
sdk_secret_key: ${TENCENT_SDK_SECRET_KEY}
region: ap-guangzhou
push_domain: push-tx.example.com
play_domain: play-tx.example.com
EOF
success " livecloud/config/config.yml → redis=127.0.0.1:${REDIS_PORT}, tencent sdk_app_id=${TENCENT_SDK_APP_ID} (备份: .bak)"
else
warn " livecloud/config/config.yml 不存在,跳过"
fi
# ──────────────────────────────────────────────────────────────────────────────
# livestream
# ──────────────────────────────────────────────────────────────────────────────
step "livestream 配置"
LS_CONF="$ROOT_DIR/livestream/config.yaml"
cp "${LS_CONF}" "${LS_CONF}.bak" 2>/dev/null || true
cat > "$LS_CONF" <<EOF
# LiveKit Server来源: deploy/livekit-config/livekit.yaml → keys 段)
livekit:
url: "${LIVEKIT_URL}"
apiKey: "${LIVEKIT_API_KEY}"
apiSecret: "${LIVEKIT_API_SECRET}"
# Cloudflare Stream用于直播推流 CDN
cloudflare:
accountId: "${CF_ACCOUNT_ID}"
apiToken: "${CF_API_TOKEN}"
email: "${CF_EMAIL}"
apiKey: "${CF_API_KEY}"
customerCode: "${CF_CUSTOMER_CODE}"
EOF
success " livestream/config.yaml → livekit=${LIVEKIT_URL}, key=${LIVEKIT_API_KEY} (备份: .bak)"
[[ -z "${CF_ACCOUNT_ID}" ]] && warn " Cloudflare 配置为空,推流 CDN 功能不可用,请填写 .env.local 中 CF_* 变量"
# ──────────────────────────────────────────────────────────────────────────────
# build-server
# ──────────────────────────────────────────────────────────────────────────────
step "build-server 配置"
BS_CONF="$ROOT_DIR/build-server/config/config.yaml"
if [[ -f "$BS_CONF" ]]; then
cp "$BS_CONF" "${BS_CONF}.bak"
# build-server 用同一个 MongoDB 连接,只是 dbname 不同
cat > "$BS_CONF" <<EOF
mode: debug
host: 0.0.0.0
port: 8281
server:
read_timeout: 30
write_timeout: 30
database:
host: ${MONGO_HOST}
port: ${MONGO_PORT}
user: ${MONGO_USERNAME}
password: ${MONGO_PASSWORD}
dbname: ${BUILD_MONGO_DATABASE}
auth_source: ${MONGO_AUTHSOURCE}
jwt:
secret: "your-secret-key-change-in-production"
expire_time: 24
upload:
path: "./uploads"
max_size: 5242880
allow_exts: "jpg,jpeg,png,gif,webp"
base_url: "http://localhost:8281"
aws:
access_key: "${BUILD_AWS_ACCESS_KEY}"
secret_key: "${BUILD_AWS_SECRET_KEY}"
region: "${BUILD_AWS_REGION}"
bucket: "${BUILD_AWS_BUCKET}"
EOF
success " build-server/config/config.yaml → ${MONGO_HOST}/${BUILD_MONGO_DATABASE}, S3 bucket=${BUILD_AWS_BUCKET} (备份: .bak)"
else
warn " build-server/config/config.yaml 不存在,跳过"
fi
# ──────────────────────────────────────────────────────────────────────────────
# meetingh5 — 生成 Vite 环境变量文件(测试服务器环境)
# ──────────────────────────────────────────────────────────────────────────────
step "meetingh5 环境变量"
MH5_ENV="$ROOT_DIR/meetingh5/.env.local"
cat > "$MH5_ENV" <<EOF
# meetingh5 本地环境变量 — 由 deploy-test/02-patch-config.sh 生成,勿手动编辑
# 对应测试服务器DEPLOY_TEST_IP=${DEPLOY_TEST_IP}
#
# 优先级URL参数 ?ws= ?liveApi= > 以下变量 > 代码中的生产默认值
# 弹幕 WebSocketmeetingmsg 服务(:8000
VITE_WS_BASE_URL=ws://${DEPLOY_TEST_IP}:8000
# 直播间 APIlivestream 服务(:8081
VITE_LIVE_API_BASE_URL=http://${DEPLOY_TEST_IP}:8081
EOF
success " meetingh5/.env.local → ws=${DEPLOY_TEST_IP}:8000, liveApi=${DEPLOY_TEST_IP}:8081"
echo ""
success "所有配置文件已更新!"
echo ""
echo -e "${BOLD}已修改配置摘要:${NC}"
echo " Redis → 127.0.0.1:${REDIS_PORT} password=${REDIS_PASSWORD} (Docker)"
echo " Kafka → 127.0.0.1:${KAFKA_PORT} (Docker)"
echo " Etcd → 127.0.0.1:${ETCD_PORT} (Docker)"
echo " MongoDB → ${MONGO_HOST}:${MONGO_PORT} DB(openim)=${MONGO_DATABASE} DB(build)=${BUILD_MONGO_DATABASE}"
echo " LiveKit → ${LIVEKIT_URL} node_ip=${LIVEKIT_NODE_IP} key=${LIVEKIT_API_KEY}"
echo " Tencent RTC → sdk_app_id=${TENCENT_SDK_APP_ID}"
echo " S3 (openim) → s3://${OPENIM_AWS_BUCKET} region=${OPENIM_AWS_REGION}"
echo " S3 (build) → s3://${BUILD_AWS_BUCKET} region=${BUILD_AWS_REGION}"
echo " MeetingMsg → webhook afterSendGroupMsg=enabled → 127.0.0.1:8000"
echo " MeetingH5 → ws=${DEPLOY_TEST_IP}:8000, liveApi=${DEPLOY_TEST_IP}:8081"
echo ""
echo -e "${BOLD}下一步:${NC}"
echo -e " 启动 Docker 基础设施Redis/Kafka/Etcd"
echo -e " ${CYAN}./deploy-test/03-start-infra.sh${NC}"

227
03-start-infra.sh Executable file
View File

@@ -0,0 +1,227 @@
#!/usr/bin/env bash
# =============================================================================
# 03-start-infra.sh — 启动 Docker 基础设施Redis / Kafka / Etcd / LiveKit
#
# 数据目录: .local-dev/docker-data/<svc>/
# 容器日志: .local-dev/docker-logs/<svc>/<svc>-YYYYMMDD.log每日一文件
# 脚本日志: .local-dev/script-logs/03-start-infra-<ts>.log
#
# 后续步骤04-build.sh编译后端服务
# =============================================================================
set -euo pipefail
source "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/common.sh"
load_env
init_dirs
init_script_log # ← 脚本执行日志
require_docker_running
# Docker 日志驱动公共参数JSON 文件,最多 5 个 50MB 轮转)
LOG_OPTS=(
--log-driver json-file
--log-opt max-size=50m
--log-opt max-file=5
)
header "步骤 3 / 5 — 启动 Docker 基础设施"
# ──────────────────────────────────────────────────────────────────────────────
# Redis
# ──────────────────────────────────────────────────────────────────────────────
step "Redis"
if docker ps --format '{{.Names}}' | grep -q '^dev-redis$'; then
success "Redis 已在运行 (container=dev-redis) :${REDIS_PORT}"
elif docker ps -a --format '{{.Names}}' | grep -q '^dev-redis$'; then
info "重新启动已有容器 dev-redis..."
docker start dev-redis > /dev/null
success "Redis 已启动 :${REDIS_PORT}"
else
info "创建并启动 Redis 容器..."
docker run -d \
--name dev-redis \
--restart unless-stopped \
-p "${REDIS_PORT}:6379" \
-v "${DATA_DIR}/redis:/data" \
"${LOG_OPTS[@]}" \
redis:7-alpine \
redis-server --requirepass "${REDIS_PASSWORD:-openIM123}" --appendonly yes \
> /dev/null
success "Redis 容器已创建并启动 :${REDIS_PORT} (密码: ${REDIS_PASSWORD:-openIM123})"
fi
sleep 1
if docker exec dev-redis redis-cli -a "${REDIS_PASSWORD:-openIM123}" ping 2>/dev/null | grep -q PONG; then
success "Redis 连通性: PONG ✓"
else
warn "Redis 未响应 PING请查看日志: ./deploy-test/logs.sh redis"
fi
start_docker_logger "dev-redis"
# ──────────────────────────────────────────────────────────────────────────────
# KafkaKRaft 模式)
# ──────────────────────────────────────────────────────────────────────────────
step "Kafka (KRaft)"
if docker ps --format '{{.Names}}' | grep -q '^dev-kafka$'; then
success "Kafka 已在运行 (container=dev-kafka) :${KAFKA_PORT}"
elif docker ps -a --format '{{.Names}}' | grep -q '^dev-kafka$'; then
info "重新启动已有容器 dev-kafka..."
docker start dev-kafka > /dev/null
info "等待 Kafka 就绪 (8s)..."
sleep 8
success "Kafka 已启动 :${KAFKA_PORT}"
else
info "创建并启动 Kafka 容器(首次拉取镜像可能较慢)..."
KAFKA_CLUSTER_ID="MkU3OEVBNTcwNTJENDM2Qk"
docker run -d \
--name dev-kafka \
--restart unless-stopped \
-p "${KAFKA_PORT}:9092" \
-v "${DATA_DIR}/kafka:/bitnami/kafka" \
-e KAFKA_CFG_NODE_ID=0 \
-e KAFKA_CFG_PROCESS_ROLES=controller,broker \
-e KAFKA_CFG_LISTENERS="PLAINTEXT://:9092,CONTROLLER://:9093" \
-e KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP="CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT" \
-e KAFKA_CFG_CONTROLLER_QUORUM_VOTERS="0@localhost:9093" \
-e KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER \
-e KAFKA_CFG_ADVERTISED_LISTENERS="PLAINTEXT://127.0.0.1:${KAFKA_PORT}" \
-e KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true \
-e KAFKA_KRAFT_CLUSTER_ID="$KAFKA_CLUSTER_ID" \
"${LOG_OPTS[@]}" \
bitnami/kafka:3.7 \
> /dev/null
info "等待 Kafka 就绪 (15s)..."
sleep 15
success "Kafka 容器已创建并启动 :${KAFKA_PORT}"
fi
start_docker_logger "dev-kafka"
# 初始化必要 Topics
step "Kafka Topics 初始化"
TOPICS=(toRedis toMongo toPush toOfflinePush)
for topic in "${TOPICS[@]}"; do
if docker exec dev-kafka kafka-topics.sh \
--bootstrap-server localhost:9092 \
--list 2>/dev/null | grep -q "^${topic}$"; then
info " topic 已存在: $topic"
else
docker exec dev-kafka kafka-topics.sh \
--create \
--topic "$topic" \
--bootstrap-server localhost:9092 \
--partitions 8 \
--replication-factor 1 \
2>/dev/null \
&& success " ✓ 创建 topic: $topic" \
|| warn " ✗ 创建失败: $topicKafka 未就绪,重试: ./deploy-test/03-start-infra.sh"
fi
done
# ──────────────────────────────────────────────────────────────────────────────
# Etcd
# ──────────────────────────────────────────────────────────────────────────────
step "Etcd"
if docker ps --format '{{.Names}}' | grep -q '^dev-etcd$'; then
success "Etcd 已在运行 (container=dev-etcd) :${ETCD_PORT}"
elif docker ps -a --format '{{.Names}}' | grep -q '^dev-etcd$'; then
info "重新启动已有容器 dev-etcd..."
docker start dev-etcd > /dev/null
sleep 2
success "Etcd 已启动 :${ETCD_PORT}"
else
info "创建并启动 Etcd 容器..."
docker run -d \
--name dev-etcd \
--restart unless-stopped \
-p "${ETCD_PORT}:2379" \
-v "${DATA_DIR}/etcd:/etcd-data" \
-e ALLOW_NONE_AUTHENTICATION=yes \
-e ETCD_DATA_DIR=/etcd-data \
"${LOG_OPTS[@]}" \
bitnami/etcd:3.5 \
> /dev/null
sleep 2
success "Etcd 容器已创建并启动 :${ETCD_PORT}"
fi
if docker exec dev-etcd etcdctl endpoint health 2>/dev/null | grep -q 'is healthy'; then
success "Etcd 连通性: healthy ✓"
else
warn "Etcd 未返回健康状态,可能仍在启动中"
fi
start_docker_logger "dev-etcd"
# ──────────────────────────────────────────────────────────────────────────────
# LiveKit本地 Docker 容器,复用 dev-redis
# ──────────────────────────────────────────────────────────────────────────────
step "LiveKit"
LK_CONF="$ROOT_DIR/livekit/livekit.yaml"
if [[ ! -f "$LK_CONF" ]]; then
warn "livekit/livekit.yaml 不存在,请先执行: ./deploy-test/02-patch-config.sh"
else
if docker ps --format '{{.Names}}' | grep -q '^dev-livekit$'; then
success "LiveKit 已在运行 (container=dev-livekit) :7880"
elif docker ps -a --format '{{.Names}}' | grep -q '^dev-livekit$'; then
info "重新启动已有容器 dev-livekit..."
docker start dev-livekit > /dev/null
sleep 2
success "LiveKit 已启动 :7880"
else
info "创建并启动 LiveKit 容器(首次拉取镜像需要一点时间)..."
EXTRA_HOSTS=""
[[ "$(uname -s)" == "Linux" ]] && EXTRA_HOSTS="--add-host host.docker.internal:host-gateway"
# shellcheck disable=SC2086
docker run -d \
--name dev-livekit \
--restart unless-stopped \
-p 7880:7880 \
-p 7882:7882/tcp \
-p 7882:7882/udp \
-p 50000-51000:50000-51000/udp \
-v "${LK_CONF}:/etc/livekit.yaml:ro" \
"${LOG_OPTS[@]}" \
$EXTRA_HOSTS \
livekit/livekit-server:latest \
--config /etc/livekit.yaml \
> /dev/null
sleep 3
if docker ps --format '{{.Names}}' | grep -q '^dev-livekit$'; then
success "LiveKit 容器已创建并启动 :7880"
info " 公网 IP: ${LIVEKIT_NODE_IP} (WebRTC 媒体流直连)"
info " API Key: ${LIVEKIT_API_KEY}"
else
error "LiveKit 启动失败,查看日志: ./deploy-test/logs.sh livekit"
fi
fi
start_docker_logger "dev-livekit"
fi
# ──────────────────────────────────────────────────────────────────────────────
# 汇总
# ──────────────────────────────────────────────────────────────────────────────
echo ""
echo -e "${BOLD}基础设施状态:${NC}"
print_container_status "Redis" "dev-redis" "${REDIS_PORT}"
print_container_status "Kafka" "dev-kafka" "${KAFKA_PORT}"
print_container_status "Etcd" "dev-etcd" "${ETCD_PORT}"
print_container_status "LiveKit" "dev-livekit" "7880"
echo ""
echo -e "${BOLD}日志目录:${NC}"
echo " Docker 容器日志: $DOCKER_LOG_DIR/"
echo " 本脚本执行日志: $_CURRENT_SCRIPT_LOG"
echo ""
echo -e " LiveKit 公网: ${LIVEKIT_NODE_IP}:50000-51000/udp (WebRTC 媒体流)"
echo ""
success "Docker 基础设施已就绪!"
echo ""
echo -e "${BOLD}下一步:${NC}"
echo -e " 编译所有后端 Go 服务:"
echo -e " ${CYAN}./deploy-test/04-build.sh${NC}"

104
04-build.sh Executable file
View File

@@ -0,0 +1,104 @@
#!/usr/bin/env bash
# =============================================================================
# 04-build.sh — 编译所有后端 Go 服务
#
# 编译产物输出至 .local-dev/bin/
# 支持只编译单个服务:./04-build.sh [service-name]
#
# 可用服务名: openim-server, chat-rpc, admin-rpc, chat-api, admin-api,
# meetingmsg, livecloud, livestream
#
# 后续步骤05-start.sh启动后端服务
# =============================================================================
set -euo pipefail
source "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/common.sh"
init_dirs
init_script_log # ← 脚本执行日志
require_tools go
header "步骤 4 / 5 — 编译后端 Go 服务"
# 编译单个服务的函数
# 用法: _build <output-name> <source-dir> <package>
_build() {
local name="$1" dir="$ROOT_DIR/$2" pkg="$3"
local out="$BUILD_DIR/$name"
info "编译 ${BOLD}$name${NC} ..."
local start_ts=$SECONDS
if (cd "$dir" && go build -o "$out" "$pkg"); then
local elapsed=$(( SECONDS - start_ts ))
success "$name$out (${elapsed}s)"
else
error "$name 编译失败"
return 1
fi
}
# 服务列表:名称 | 源码目录(相对 ROOT | 包路径
declare -A SVC_DIR=(
[openim-server]="open-im-server"
[chat-rpc]="chat"
[admin-rpc]="chat"
[chat-api]="chat"
[admin-api]="chat"
[meetingmsg]="meetingmsg"
[livecloud]="livecloud"
[livestream]="livestream"
[build-server]="build-server"
)
declare -A SVC_PKG=(
[openim-server]="./cmd/main.go"
[chat-rpc]="./cmd/rpc/chat-rpc/"
[admin-rpc]="./cmd/rpc/admin-rpc/"
[chat-api]="./cmd/api/chat-api/"
[admin-api]="./cmd/api/admin-api/"
[meetingmsg]="."
[livecloud]="."
[livestream]="."
[build-server]="."
)
# ── 判断是编译单个还是全部 ────────────────────────────────────────────────────
TARGET="${1:-all}"
if [[ "$TARGET" == "all" ]]; then
step "编译全部服务(共 ${#SVC_DIR[@]} 个)"
FAILED=()
for svc in openim-server chat-rpc admin-rpc chat-api admin-api meetingmsg livecloud livestream build-server; do
_build "$svc" "${SVC_DIR[$svc]}" "${SVC_PKG[$svc]}" || FAILED+=("$svc")
done
echo ""
if [[ ${#FAILED[@]} -eq 0 ]]; then
success "所有服务编译完成!"
ls -lh "$BUILD_DIR/" | awk 'NR>1 {printf " %-20s %s\n", $NF, $5}'
else
error "以下服务编译失败: ${FAILED[*]}"
echo ""
echo "排查建议:"
echo " 1. cd $ROOT_DIR/<service-dir> && go mod tidy"
echo " 2. 检查 Go 版本: go version推荐 1.21+"
echo " 3. 检查模块依赖: go mod download"
exit 1
fi
else
# 编译单个服务
if [[ -z "${SVC_DIR[$TARGET]:-}" ]]; then
error "未知服务: $TARGET"
echo "可用: ${!SVC_DIR[*]}"
exit 1
fi
step "编译单个服务: $TARGET"
_build "$TARGET" "${SVC_DIR[$TARGET]}" "${SVC_PKG[$TARGET]}"
success "$TARGET 编译完成"
fi
echo ""
echo -e "${BOLD}下一步:${NC}"
echo -e " 启动所有后端服务:"
echo -e " ${CYAN}./deploy-test/05-start.sh${NC}"
echo -e " 或只启动单个服务:"
echo -e " ${CYAN}./deploy-test/05-start.sh openim-server${NC}"

138
05-start.sh Executable file
View File

@@ -0,0 +1,138 @@
#!/usr/bin/env bash
# =============================================================================
# 05-start.sh — 启动后端 Go 服务
#
# 用法:
# ./05-start.sh # 按依赖顺序启动全部服务
# ./05-start.sh <svc> # 只启动指定服务
#
# 启动顺序(有依赖关系):
# openim-server → chat-rpc / admin-rpc → chat-api / admin-api
# → meetingmsg / livecloud / livestream
#
# 日志文件: .local-dev/logs/<service>.log
# PID 文件: .local-dev/pids/<service>.pid
# =============================================================================
set -euo pipefail
source "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/common.sh"
load_env
init_dirs
init_script_log # ← 脚本执行日志
header "步骤 5 / 5 — 启动后端服务"
TARGET="${1:-all}"
# ── 服务启动配置 ─────────────────────────────────────────────────────────────
# 格式: svc_workdir[name] svc_args[name]
declare -A svc_workdir=(
[openim-server]="$ROOT_DIR/open-im-server"
[chat-rpc]="$ROOT_DIR/chat"
[admin-rpc]="$ROOT_DIR/chat"
[chat-api]="$ROOT_DIR/chat"
[admin-api]="$ROOT_DIR/chat"
[meetingmsg]="$ROOT_DIR/meetingmsg"
[livecloud]="$ROOT_DIR/livecloud"
[livestream]="$ROOT_DIR/livestream"
[build-server]="$ROOT_DIR/build-server"
)
declare -A svc_args=(
[openim-server]="-c $ROOT_DIR/open-im-server/config"
[chat-rpc]="-c $ROOT_DIR/chat/config"
[admin-rpc]="-c $ROOT_DIR/chat/config"
[chat-api]="-c $ROOT_DIR/chat/config"
[admin-api]="-c $ROOT_DIR/chat/config"
[meetingmsg]=""
[livecloud]=""
[livestream]=""
[build-server]=""
)
declare -A svc_desc=(
[openim-server]=":10002 (API) :10001 (MsgGateway WS)"
[chat-rpc]="内部 RPC"
[admin-rpc]="内部 RPC"
[chat-api]=":10008"
[admin-api]=":10009"
[meetingmsg]=":8000 (WS)"
[livecloud]=":8080"
[livestream]=":8081"
[build-server]=":8281"
)
_start_one() {
local svc="$1"
start_svc "$svc" \
"$BUILD_DIR/$svc" \
"${svc_args[$svc]}" \
"${svc_workdir[$svc]}"
}
# ── 启动全部(有序) ──────────────────────────────────────────────────────────
_start_all() {
step "第 1 组: openim-server核心 IM 服务)"
_start_one openim-server
info "等待 openim-server 将 RPC 注册到 Etcd... (8s)"
sleep 8
step "第 2 组: chat RPC 服务"
_start_one chat-rpc
_start_one admin-rpc
info "等待 chat RPC 注册到 Etcd... (4s)"
sleep 4
step "第 3 组: chat API 服务"
_start_one chat-api
_start_one admin-api
step "第 4 组: 业务服务"
_start_one meetingmsg
_start_one livecloud
_start_one livestream
_start_one build-server
echo ""
echo -e "${BOLD}服务汇总:${NC}"
for svc in openim-server chat-rpc admin-rpc chat-api admin-api meetingmsg livecloud livestream build-server; do
print_svc_status "$svc" "${svc_desc[$svc]}"
done
echo ""
echo -e "${BOLD}常用地址:${NC}"
echo " IM API: http://localhost:10002"
echo " IM WebSocket: ws://localhost:10001"
echo " Chat API: http://localhost:10008"
echo " Admin API: http://localhost:10009"
echo " MeetingMsg WS: ws://localhost:8000"
echo " Livecloud: http://localhost:8080"
echo " Livestream: http://localhost:8081"
echo " Build Server: http://localhost:8281"
echo ""
echo -e "${BOLD}查看日志:${NC}"
echo -e " ${CYAN}./deploy-test/logs.sh openim-server${NC}"
echo -e " ${CYAN}./deploy-test/logs.sh chat-api${NC}"
}
# ── 启动单个 ──────────────────────────────────────────────────────────────────
_start_single() {
local svc="$1"
if [[ -z "${svc_workdir[$svc]:-}" ]]; then
error "未知服务: $svc"
echo "可用: ${!svc_workdir[*]}"
exit 1
fi
step "启动: $svc"
_start_one "$svc"
echo ""
print_svc_status "$svc" "${svc_desc[$svc]}"
}
# ── 入口 ─────────────────────────────────────────────────────────────────────
if [[ "$TARGET" == "all" ]]; then
_start_all
else
_start_single "$TARGET"
fi

95
06-install-frontend.sh Executable file
View File

@@ -0,0 +1,95 @@
#!/usr/bin/env bash
# =============================================================================
# 06-install-frontend.sh — 安装前端项目依赖
#
# 首次使用或依赖变更后运行(对应后端的 04-build.sh
#
# 用法:
# ./06-install-frontend.sh # 安装全部前端项目依赖
# ./06-install-frontend.sh <project> # 只安装指定项目
#
# 可用项目: pc, meetingh5, h5, cms, build-cms, build-down
# =============================================================================
set -euo pipefail
source "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/common.sh"
init_dirs
init_script_log # ← 脚本执行日志
header "前端依赖安装"
# ── 工具检查 ──────────────────────────────────────────────────────────────────
_check_pm() {
local pm="$1"
if ! command -v "$pm" &>/dev/null; then
error "$pm 未安装"
case "$pm" in
yarn) echo " → npm install -g yarn" ;;
pnpm) echo " → npm install -g pnpm" ;;
npm) echo " → https://nodejs.org/" ;;
esac
return 1
fi
return 0
}
# ── 安装单个项目 ──────────────────────────────────────────────────────────────
# 用法: _install <显示名> <目录> <包管理器>
_install() {
local name="$1" dir="$ROOT_DIR/$2" pm="$3"
if [[ ! -d "$dir" ]]; then
warn " 目录不存在,跳过: $dir"
return 0
fi
_check_pm "$pm" || return 1
local logfile="$LOG_DIR/install-${name}.log"
info "安装 ${BOLD}$name${NC} 依赖 ($pm install) ..."
if [[ -d "$dir/node_modules" ]]; then
warn " node_modules 已存在,执行增量安装(如需全量重装请先 rm -rf $dir/node_modules"
fi
local start_ts=$SECONDS
(cd "$dir" && "$pm" install 2>&1) | tee -a "$logfile" | tail -5
local elapsed=$(( SECONDS - start_ts ))
success "$name 安装完成 (${elapsed}s) → 日志: $logfile"
}
# ── 项目列表 ──────────────────────────────────────────────────────────────────
TARGET="${1:-all}"
case "$TARGET" in
all)
step "安装全部前端项目依赖(共 6 个)"
_install "pc" "pc" "yarn"
_install "meetingh5" "meetingh5" "npm"
_install "h5" "h5" "npm"
_install "cms" "cms" "pnpm"
_install "build-cms" "build-cms" "pnpm"
_install "build-down" "build-down" "npm"
echo ""
success "所有前端依赖安装完成!"
echo ""
echo -e "${BOLD}下一步:${NC}"
echo -e " 启动前端开发服务器:"
echo -e " ${CYAN}./deploy-test/07-start-frontend.sh${NC}"
;;
pc|meetingh5|h5|cms|build-cms|build-down)
local PM
case "$TARGET" in
pc) PM="yarn" ;;
cms|build-cms) PM="pnpm" ;;
*) PM="npm" ;;
esac
step "安装 $TARGET 依赖"
_install "$TARGET" "$TARGET" "$PM"
;;
*)
error "未知项目: $TARGET"
echo "可用: pc, meetingh5, h5, cms, build-cms, build-down"
exit 1
;;
esac

196
07-start-frontend.sh Executable file
View File

@@ -0,0 +1,196 @@
#!/usr/bin/env bash
# =============================================================================
# 07-start-frontend.sh — 启动前端开发服务器
#
# 用法:
# ./07-start-frontend.sh # 启动全部前端项目
# ./07-start-frontend.sh <project> # 只启动指定项目
#
# 项目与端口:
# pc → Electron + Vite :7777
# meetingh5 → React + Vite :5188
# h5 → Vue + Vite :3003
# cms → UMI Max :8001
# build-cms → UMI Max :8002
# build-down → UMI v3 :8003
#
# 日志文件: .local-dev/logs/fe-<project>.log
# PID 文件: .local-dev/pids/fe-<project>.pid
# =============================================================================
set -euo pipefail
source "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/common.sh"
init_dirs
init_script_log # ← 脚本执行日志
header "启动前端开发服务器"
# ── 前端服务配置 ──────────────────────────────────────────────────────────────
# name → (目录, 包管理器, 启动命令, 环境变量, 端口描述)
declare -A FE_DIR=(
[pc]="pc"
[meetingh5]="meetingh5"
[h5]="h5"
[cms]="cms"
[build-cms]="build-cms"
[build-down]="build-down"
)
declare -A FE_PM=(
[pc]="yarn"
[meetingh5]="npm"
[h5]="npm"
[cms]="pnpm"
[build-cms]="pnpm"
[build-down]="npm"
)
declare -A FE_CMD=(
[pc]="yarn dev"
[meetingh5]="npm run dev"
[h5]="npm run dev"
[cms]="pnpm run dev"
[build-cms]="pnpm run dev"
[build-down]="npm run dev"
)
# cms/build-cms/build-down 不配置端口时默认都是 8000需手动指定
declare -A FE_ENV=(
[pc]=""
[meetingh5]=""
[h5]=""
[cms]="PORT=8001"
[build-cms]="PORT=8002"
[build-down]="PORT=8003"
)
declare -A FE_PORT=(
[pc]=":7777 (Electron Vite 调试服务器)"
[meetingh5]=":5188"
[h5]=":3003"
[cms]=":8001"
[build-cms]=":8002"
[build-down]=":8003"
)
# ── 启动单个前端服务 ──────────────────────────────────────────────────────────
_start_fe() {
local name="$1"
local dir="$ROOT_DIR/${FE_DIR[$name]}"
local pm="${FE_PM[$name]}"
local cmd="${FE_CMD[$name]}"
local env_prefix="${FE_ENV[$name]}"
local pidfile="$PID_DIR/fe-${name}.pid"
local logfile="$LOG_DIR/fe-${name}.log"
# 已在运行
if [[ -f "$pidfile" ]] && kill -0 "$(cat "$pidfile")" 2>/dev/null; then
warn "$name 已在运行 (PID=$(cat "$pidfile")),跳过"
return 0
fi
# 目录检查
if [[ ! -d "$dir" ]]; then
warn "$name 目录不存在 ($dir),跳过"
return 0
fi
# 依赖检查
if [[ ! -d "$dir/node_modules" ]]; then
warn "$name node_modules 不存在,请先执行: ./deploy-test/06-install-frontend.sh $name"
return 1
fi
# 包管理器检查
if ! command -v "$pm" &>/dev/null; then
error "$name 需要 $pm,未安装"
return 1
fi
info "启动 ${BOLD}$name${NC} ..."
# 写日志分隔符
{
echo ""
echo "──── 启动 $(date '+%Y-%m-%d %H:%M:%S') ────"
} >> "$logfile"
# 后台启动(带环境变量前缀)
(
cd "$dir"
if [[ -n "$env_prefix" ]]; then
# shellcheck disable=SC2086
nohup env $env_prefix $cmd >> "$logfile" 2>&1 &
else
# shellcheck disable=SC2086
nohup $cmd >> "$logfile" 2>&1 &
fi
echo $! > "$pidfile"
)
sleep 2
if [[ -f "$pidfile" ]] && kill -0 "$(cat "$pidfile")" 2>/dev/null; then
success "$name (PID=$(cat "$pidfile")) ${FE_PORT[$name]}$logfile"
else
error "$name 启动失败,查看日志:"
tail -20 "$logfile" 2>/dev/null || true
return 1
fi
}
# ── 入口 ─────────────────────────────────────────────────────────────────────
TARGET="${1:-all}"
FE_PROJECTS=(pc meetingh5 h5 cms build-cms build-down)
_all_valid() {
for p in "${FE_PROJECTS[@]}"; do
[[ "$p" == "$1" ]] && return 0
done
return 1
}
if [[ "$TARGET" == "all" ]]; then
step "启动全部前端开发服务器"
FAILED=()
for proj in "${FE_PROJECTS[@]}"; do
_start_fe "$proj" || FAILED+=("$proj")
done
echo ""
echo -e "${BOLD}前端服务汇总:${NC}"
for proj in "${FE_PROJECTS[@]}"; do
local_pidfile="$PID_DIR/fe-${proj}.pid"
if [[ -f "$local_pidfile" ]] && kill -0 "$(cat "$local_pidfile")" 2>/dev/null; then
printf " ${GREEN}${NC} %-14s PID=%-7s %s\n" "$proj" "$(cat "$local_pidfile")" "${FE_PORT[$proj]}"
else
printf " ${RED}${NC} %-14s 未运行 %s\n" "$proj" "${FE_PORT[$proj]}"
fi
done
echo ""
echo -e "${BOLD}访问地址:${NC}"
echo " PC (Electron): yarn dev 启动后自动打开窗口"
echo " H5: http://${DEPLOY_TEST_IP}:3003"
echo " CMS: http://${DEPLOY_TEST_IP}:8001"
echo " Build CMS: http://${DEPLOY_TEST_IP}:8002"
echo " Build Download: http://${DEPLOY_TEST_IP}:8003"
echo ""
echo -e "${BOLD}MeetingH5 访问地址(后端 URL 由 .env.local 默认设置,也可通过 URL 参数覆盖):${NC}"
echo " 默认: http://${DEPLOY_TEST_IP}:5188"
echo " 显式指定后端: http://${DEPLOY_TEST_IP}:5188?ws=ws://${DEPLOY_TEST_IP}:8000&liveApi=http://${DEPLOY_TEST_IP}:8081"
echo " 说明: ws → meetingmsg 弹幕 WebSocket (:8000)"
echo " liveApi → livestream 直播间 API (:8081)"
if [[ ${#FAILED[@]} -gt 0 ]]; then
echo ""
warn "以下项目启动失败: ${FAILED[*]}"
echo " 可能原因: node_modules 未安装,执行: ./deploy-test/06-install-frontend.sh"
fi
else
if ! _all_valid "$TARGET"; then
error "未知项目: $TARGET"
echo "可用: ${FE_PROJECTS[*]}"
exit 1
fi
step "启动前端项目: $TARGET"
_start_fe "$TARGET"
fi

280
README.md Normal file
View File

@@ -0,0 +1,280 @@
# deploy-test — 测试服务器部署脚本集
> **适用场景**:部署在**有公网 IP 的测试服务器**上。所有服务后端、前端、Docker 基础设施、LiveKit均在本机运行。
>
> 如果你在本机 Mac 开发,请使用 `deploy-local/` 目录。
---
## 两套环境对比
| 项目 | deploy-test本目录| deploy-local/ |
|------|---------------------|----------------|
| 适用机器 | 测试服务器(有公网 IP | 本机 Mac无公网 IP |
| 配置文件 | `.env.deploy-test` | `.env.deploy-local` |
| 运行时目录 | `.deploy-test/` | `.deploy-local/` |
| LiveKit | 本机 Docker 启动,使用公网 IP | 指向本目录服务器的 LiveKit |
| Redis/Kafka/Etcd | 本机 Docker | 本机 Docker |
| 后端服务 | 本机进程 | 本机进程 |
| 前端服务 | 本机进程(可选) | 本机进程 |
---
## 目录结构
```
deploy-test/
├── common.sh # 公共函数库(路径、日志函数)
├── 01-init-env.sh # 步骤1生成 .env.deploy-test 配置模板
├── 02-patch-config.sh # 步骤2将 .env.deploy-test 写入各服务 YAML
├── 03-start-infra.sh # 步骤3启动 Docker 容器Redis/Kafka/Etcd/LiveKit
├── 04-build.sh # 步骤4编译所有后端 Go 服务
├── 05-start.sh # 步骤5启动所有后端服务
├── 06-install-frontend.sh # 步骤6安装前端依赖可选
├── 07-start-frontend.sh # 步骤7启动前端开发服务器可选
├── stop.sh # 停止后端服务
├── stop-infra.sh # 停止 Docker 容器(含 LiveKit
├── stop-frontend.sh # 停止前端服务
├── remove-infra.sh # 删除 Docker 容器及数据(危险!)
├── restart.sh # 重启指定服务(支持 --build
├── status.sh # 查看所有服务状态
├── logs.sh # 查看日志(统一入口)
├── check-conn.sh # 验证 MongoDB / S3 连接
└── setup.sh # 一键完整部署(首次使用)
```
运行时目录(`.deploy-test/`,已加入 `.gitignore`
```
.deploy-test/
├── bin/ # Go 编译产物
├── pids/ # PID 文件
├── logs/ # 后端/前端服务日志
├── docker-data/ # Docker 数据卷
├── docker-logs/ # Docker 容器日志(按日期滚动)
└── script-logs/ # 脚本执行日志(带时间戳)
```
---
## 快速开始
### 首次使用
```bash
# 一键执行(推荐)
./deploy-test/setup.sh
```
### 分步执行
```bash
# 1. 生成配置模板
./deploy-test/01-init-env.sh
# 2. 修改配置(重要:确认 DEPLOY_TEST_IP 等信息正确)
vim .env.deploy-test
# 3. 将配置写入各服务 YAML包括 livekit/livekit.yaml
./deploy-test/02-patch-config.sh
# 4. 启动 Docker 基础设施Redis / Kafka / Etcd / LiveKit
./deploy-test/03-start-infra.sh
# 5. 编译后端服务
./deploy-test/04-build.sh
# 6. 启动后端服务
./deploy-test/05-start.sh
# 7. 安装前端依赖(可选)
./deploy-test/06-install-frontend.sh
# 8. 启动前端开发服务器(可选)
./deploy-test/07-start-frontend.sh
```
---
## 配置文件(`.env.deploy-test`
```bash
# ══ 测试服务器公网 IP ═══════════════════════════════════════════
DEPLOY_TEST_IP=54.116.29.247 # 本机公网 IPLiveKit WebRTC 必需)
# ══ MongoDB ════════════════════════════════════════════════════
MONGO_HOST=47.237.103.4
MONGO_PORT=27017
MONGO_USERNAME=minio_pC5wMB
MONGO_PASSWORD=rI57PJsJhnz_qlRkfnTa0RPT
MONGO_AUTHSOURCE=openim_v3
MONGO_DATABASE=openim_v3
BUILD_MONGO_DATABASE=build
# ══ Amazon S3 ══════════════════════════════════════════════════
OPENIM_AWS_REGION=ap-southeast-1
OPENIM_AWS_BUCKET=im1688
OPENIM_AWS_ACCESS_KEY_ID=xxx
OPENIM_AWS_SECRET_ACCESS_KEY=xxx
BUILD_AWS_REGION=ap-east-1
BUILD_AWS_BUCKET=im-hk-apk
BUILD_AWS_ACCESS_KEY=xxx
BUILD_AWS_SECRET_KEY=xxx
# ══ Docker Redis / Kafka / Etcd ════════════════════════════════
REDIS_PORT=6379
REDIS_PASSWORD=openIM123
KAFKA_PORT=9092
ETCD_PORT=2379
# ══ LiveKit本机 Docker使用公网 IP════════════════════════
LIVEKIT_NODE_IP=54.116.29.247 # 与 DEPLOY_TEST_IP 保持一致
LIVEKIT_URL=ws://127.0.0.1:7880
LIVEKIT_API_KEY=API8462dba2
LIVEKIT_API_SECRET=xxx
# ══ Cloudflare Stream / 腾讯云 RTC ════════════════════════════
CF_ACCOUNT_ID=
CF_API_TOKEN=
TENCENT_SDK_APP_ID=xxx
TENCENT_SDK_SECRET_KEY=xxx
```
---
## 服务地址
### 后端服务
| 服务 | 端口 |
|------|------|
| openim-server | :10002 (HTTP) / :10001 (WS) |
| chat-api | :10008 |
| admin-api | :10009 |
| meetingmsg | :8000 (WS) |
| livecloud | :8080 |
| livestream | :8081 |
| build-server | :8281 |
### 前端开发服务器(可选)
| 项目 | 端口 | 说明 |
|------|------|------|
| pc (Electron) | :7777 | Electron 桌面客户端 |
| meetingh5 | :5188 | 直播观看 H5弹幕+视频) |
| h5 | :3003 | 移动端 H5 |
| cms | :8001 | 后台管理 |
| build-cms | :8002 | 构建管理后台 |
| build-down | :8003 | 下载页 |
> **meetingh5 访问方式**
>
> `02-patch-config.sh` 会自动生成 `meetingh5/.env.local`,设置默认后端地址:
>
> ```
> # 直接访问(使用 .env.local 中的默认后端)
> http://<DEPLOY_TEST_IP>:5188
>
> # 或显式传入 URL 参数(优先级最高)
> http://<DEPLOY_TEST_IP>:5188?ws=ws://<DEPLOY_TEST_IP>:8000&liveApi=http://<DEPLOY_TEST_IP>:8081
> ```
>
> - `ws` → meetingmsg 弹幕 WebSocket `:8000`
> - `liveApi` → livestream 直播间 API `:8081`
### Docker 基础设施
| 服务 | 端口 |
|------|------|
| Redis | :6379 |
| Kafka | :9092 |
| Etcd | :2379 |
| LiveKit | :7880 (API) / :7882 (TCP) / :50000-51000/udp (WebRTC) |
---
## LiveKit 说明
本测试服务器运行本地 LiveKit 容器WebRTC 媒体流通过公网 IP 对外暴露。
```
测试服务器DEPLOY_TEST_IP: 54.116.29.247
├── dev-redis :6379 ←── dev-livekit 通过 host.docker.internal 访问
└── dev-livekit
:7880 → HTTP API后端连接
:7882/tcp+udp → WebRTC fallback
:50000-51000/udp → WebRTC 媒体流(客户端直连公网 IP
```
**防火墙必须开放**7880/tcp、7882/tcp+udp、50000-51000/udp
本机 Macdeploy-local的 LiveKit 连接地址为 `ws://54.116.29.247:7880`,与此保持一致。
---
## 日志体系
```
.deploy-test/
├── script-logs/ ← 每次脚本执行的完整输出(带时间戳,自动去除颜色码)
├── logs/ ← 后端/前端服务进程 stdout+stderr
└── docker-logs/ ← Docker 容器日志(每日一文件)
├── redis/
├── kafka/
├── etcd/
└── livekit/
```
```bash
# 查看所有日志概览
./deploy-test/logs.sh
# 实时跟踪某个服务
./deploy-test/logs.sh openim-server
./deploy-test/logs.sh livekit
./deploy-test/logs.sh cms
# 查看脚本执行历史
./deploy-test/logs.sh scripts
./deploy-test/logs.sh scripts --last # 最新一次完整输出
```
---
## 日常操作
```bash
# 早上开机
./deploy-test/03-start-infra.sh # Docker 容器(含 LiveKit
./deploy-test/05-start.sh # 后端服务
# 查看状态
./deploy-test/status.sh
# 重启单个后端服务
./deploy-test/restart.sh chat-api
./deploy-test/restart.sh chat-api --build # 重编译 + 重启
# 下班关机
./deploy-test/stop.sh # 后端进程
./deploy-test/stop-infra.sh # Docker 容器(含 LiveKit数据保留
```
---
## 故障排查
```bash
# 验证 MongoDB / S3 连接
./deploy-test/check-conn.sh
# 查看 LiveKit 日志WebRTC 不通时)
./deploy-test/logs.sh livekit --last
# 重置 Docker 环境(删除所有数据)
./deploy-test/remove-infra.sh
./deploy-test/02-patch-config.sh
./deploy-test/03-start-infra.sh
```

180
check-conn.sh Executable file
View File

@@ -0,0 +1,180 @@
#!/usr/bin/env bash
# =============================================================================
# check-conn.sh — 验证远程服务连接MongoDB 和 Amazon S3
#
# 用法:
# ./check-conn.sh # 同时检查 MongoDB 和 S3
# ./check-conn.sh mongo # 只检查 MongoDB
# ./check-conn.sh s3 # 只检查 S3
# =============================================================================
set -euo pipefail
source "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/common.sh"
load_env
init_script_log # ← 脚本执行日志
TARGET="${1:-all}"
header "远程服务连接检查"
# ──────────────────────────────────────────────────────────────────────────────
# MongoDB
# ──────────────────────────────────────────────────────────────────────────────
check_mongo() {
step "MongoDB: ${MONGO_HOST}:${MONGO_PORT}/${MONGO_DATABASE}"
echo -e " Host: ${MONGO_HOST}"
echo -e " Port: ${MONGO_PORT}"
echo -e " Database: ${MONGO_DATABASE}"
echo -e " AuthSource: ${MONGO_AUTHSOURCE}"
echo -e " Username: ${MONGO_USERNAME}"
echo ""
MONGO_URI="mongodb://${MONGO_USERNAME}:${MONGO_PASSWORD}@${MONGO_HOST}:${MONGO_PORT}/${MONGO_DATABASE}?authSource=${MONGO_AUTHSOURCE}&directConnection=true"
# 方法1mongosh
if command -v mongosh &>/dev/null; then
info "使用 mongosh 验证..."
if mongosh "$MONGO_URI" --quiet --eval \
'db.runCommand({ping:1}); db.getSiblingDB("'"${MONGO_DATABASE}"'").getCollectionNames().slice(0,5)' \
2>/dev/null; then
success "MongoDB 连接正常 ✓"
else
error "MongoDB 连接失败!请检查 .env.local 中的配置"
echo ""
echo " 排查步骤:"
echo " 1. 确认 MongoDB 服务器 ${MONGO_HOST} 可从本机访问"
echo " 2. 确认端口 ${MONGO_PORT} 已开放防火墙"
echo " 3. 确认用户名/密码/authSource 正确"
echo " 4. 手动测试: mongosh \"$MONGO_URI\""
fi
# 方法2nc 端口连通
elif command -v nc &>/dev/null; then
info "mongosh 未安装,使用 nc 检查端口..."
if nc -z -w5 "${MONGO_HOST}" "${MONGO_PORT}" 2>/dev/null; then
success "MongoDB 端口 ${MONGO_HOST}:${MONGO_PORT} 可达 ✓"
warn "(未验证认证,安装 mongosh 可做完整测试)"
else
error "MongoDB 端口不可达: ${MONGO_HOST}:${MONGO_PORT}"
fi
# 方法3Python pymongo
elif command -v python3 &>/dev/null && python3 -c "import pymongo" 2>/dev/null; then
info "使用 Python pymongo 验证..."
python3 - <<PYEOF
from pymongo import MongoClient
import sys
try:
c = MongoClient("${MONGO_URI}", serverSelectionTimeoutMS=5000)
c.server_info()
print(" MongoDB 连接正常 ✓")
c.close()
except Exception as e:
print(f" 连接失败: {e}", file=sys.stderr)
sys.exit(1)
PYEOF
[[ $? -eq 0 ]] && success "MongoDB 连接正常" || error "MongoDB 连接失败"
else
warn "跳过 MongoDB 连接验证(请安装 mongosh: brew install mongosh"
echo -e " 手动验证: mongosh \"${MONGO_URI}\""
fi
}
# ──────────────────────────────────────────────────────────────────────────────
# Amazon S3
# ──────────────────────────────────────────────────────────────────────────────
# 通用 S3 检查函数
# 用法: _check_s3_bucket <label> <key_id> <secret_key> <region> <bucket> [endpoint]
_check_s3_bucket() {
local label="$1" key_id="$2" secret_key="$3" region="$4" bucket="$5" endpoint="${6:-}"
echo -e " Bucket: ${bucket}"
echo -e " Region: ${region}"
echo -e " AccessKey: ${key_id}"
[[ -n "$endpoint" ]] && echo -e " Endpoint: ${endpoint}"
echo ""
if [[ "${key_id}" == "YOUR_"* || -z "${key_id}" ]]; then
error "S3 AccessKeyID 未配置,请编辑 .env.local"
return 1
fi
if command -v aws &>/dev/null; then
info "使用 awscli 验证..."
local endpoint_arg=""
[[ -n "$endpoint" ]] && endpoint_arg="--endpoint-url $endpoint"
local result rc=0
result=$(
AWS_ACCESS_KEY_ID="$key_id" \
AWS_SECRET_ACCESS_KEY="$secret_key" \
AWS_DEFAULT_REGION="$region" \
aws s3 ls "s3://${bucket}" $endpoint_arg 2>&1 | head -5
) || rc=$?
if [[ $rc -eq 0 ]]; then
success "S3 Bucket 可访问 ✓"
[[ -n "$result" ]] && echo "$result" | sed 's/^/ /' || echo " Bucket 为空)"
else
error "S3 访问失败!错误: $result"
echo " 排查: 确认 AccessKey/SecretKey、Bucket 名称、IAM s3:ListBucket 权限"
fi
# 测试写入
info "测试写入权限..."
local test_key="local-dev-test-$(date +%s)"
local write_ok=false
AWS_ACCESS_KEY_ID="$key_id" \
AWS_SECRET_ACCESS_KEY="$secret_key" \
AWS_DEFAULT_REGION="$region" \
aws s3 cp /dev/stdin "s3://${bucket}/${test_key}" \
$endpoint_arg --content-type text/plain \
<<< "local-dev-test" 2>/dev/null && write_ok=true || true
if $write_ok; then
AWS_ACCESS_KEY_ID="$key_id" AWS_SECRET_ACCESS_KEY="$secret_key" \
AWS_DEFAULT_REGION="$region" \
aws s3 rm "s3://${bucket}/${test_key}" $endpoint_arg 2>/dev/null || true
success "S3 写入权限正常 ✓"
else
warn "S3 写入测试失败Bucket 可读但可能无写权限)"
fi
else
warn "awscli 未安装跳过验证brew install awscli"
echo " 手动验证: AWS_ACCESS_KEY_ID=${key_id} aws s3 ls s3://${bucket}"
fi
}
check_s3() {
step "S3 (open-im-server) — IM 文件存储"
_check_s3_bucket \
"openim" \
"${OPENIM_AWS_ACCESS_KEY_ID}" \
"${OPENIM_AWS_SECRET_ACCESS_KEY}" \
"${OPENIM_AWS_REGION}" \
"${OPENIM_AWS_BUCKET}" \
"${OPENIM_AWS_ENDPOINT:-}"
echo ""
step "S3 (build-server) — App APK/IPA 构建产物"
_check_s3_bucket \
"build" \
"${BUILD_AWS_ACCESS_KEY}" \
"${BUILD_AWS_SECRET_KEY}" \
"${BUILD_AWS_REGION}" \
"${BUILD_AWS_BUCKET}"
}
# ──────────────────────────────────────────────────────────────────────────────
# 入口
# ──────────────────────────────────────────────────────────────────────────────
case "$TARGET" in
all) check_mongo; echo ""; check_s3 ;;
mongo) check_mongo ;;
s3) check_s3 ;;
*)
error "未知参数: $TARGET"
echo "用法: $0 [all|mongo|s3]"
exit 1
;;
esac
echo ""

225
common.sh Executable file
View File

@@ -0,0 +1,225 @@
#!/usr/bin/env bash
# =============================================================================
# common.sh — 公共函数库,供各子脚本 source 引入
# 不可直接执行
# =============================================================================
# 防止重复加载
[[ -n "${_COMMON_LOADED:-}" ]] && return 0
_COMMON_LOADED=1
# ── 根目录workspace46/)──────────────────────────────────────────────────────
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
# ── 运行时目录(测试服务器环境) ───────────────────────────────────────────────
LOG_DIR="$ROOT_DIR/.deploy-test/logs" # 后端服务日志
PID_DIR="$ROOT_DIR/.deploy-test/pids" # PID 文件(含日志收集进程)
BUILD_DIR="$ROOT_DIR/.deploy-test/bin" # 编译产物
DATA_DIR="$ROOT_DIR/.deploy-test/docker-data" # Docker 数据卷
DOCKER_LOG_DIR="$ROOT_DIR/.deploy-test/docker-logs" # Docker 容器日志
SCRIPT_LOG_DIR="$ROOT_DIR/.deploy-test/script-logs" # 脚本执行日志
ENV_FILE="$ROOT_DIR/.env.deploy-test"
# ── 颜色 ───────────────────────────────────────────────────────────────────────
RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'
BLUE='\033[0;34m'; CYAN='\033[0;36m'; BOLD='\033[1m'; NC='\033[0m'
info() { echo -e "${CYAN}[INFO]${NC} $*"; }
success() { echo -e "${GREEN}[OK]${NC} $*"; }
warn() { echo -e "${YELLOW}[WARN]${NC} $*"; }
error() { echo -e "${RED}[ERROR]${NC} $*" >&2; }
step() { echo -e "\n${BOLD}${BLUE}$*${NC}"; }
header() {
echo ""
echo -e "${BOLD}${BLUE}══════════════════════════════════════════${NC}"
echo -e "${BOLD}${BLUE} $*${NC}"
echo -e "${BOLD}${BLUE}══════════════════════════════════════════${NC}"
}
# ── 初始化运行时目录 ────────────────────────────────────────────────────────────
init_dirs() {
mkdir -p "$LOG_DIR" "$PID_DIR" "$BUILD_DIR" "$DATA_DIR" \
"$DOCKER_LOG_DIR" "$SCRIPT_LOG_DIR"
}
# ──────────────────────────────────────────────────────────────────────────────
# 脚本执行日志
# 调用位置:每个脚本 init_dirs 之后
# 效果所有输出stdout+stderr同时写入 .local-dev/script-logs/<name>-<ts>.log
# ──────────────────────────────────────────────────────────────────────────────
init_script_log() {
local script_name
script_name="$(basename "${BASH_SOURCE[1]:-$0}" .sh)"
local ts; ts="$(date +%Y%m%d-%H%M%S)"
export _CURRENT_SCRIPT_LOG="$SCRIPT_LOG_DIR/${script_name}-${ts}.log"
mkdir -p "$SCRIPT_LOG_DIR"
# 写入文件头(纯文本,不含颜色码)
{
echo "========================================"
echo "Script : $script_name"
echo "Started: $(date '+%Y-%m-%d %H:%M:%S')"
echo "========================================"
} > "$_CURRENT_SCRIPT_LOG"
# exec将所有后续输出同时流向终端和日志文件
# 用 sed 去除 ANSI 颜色码,保证日志文件可读
exec > >(tee >(sed $'s/\033\\[[0-9;]*m//g' >> "$_CURRENT_SCRIPT_LOG")) 2>&1
info "脚本日志 → $_CURRENT_SCRIPT_LOG"
}
# ──────────────────────────────────────────────────────────────────────────────
# Docker 容器日志收集
# ──────────────────────────────────────────────────────────────────────────────
# 启动后台日志收集进程docker logs -f → 本地文件(按日期滚动)
start_docker_logger() {
local cname="$1"
local svc="${cname#dev-}" # dev-redis → redis
local log_dir="$DOCKER_LOG_DIR/$svc"
local logfile="$log_dir/${svc}-$(date +%Y%m%d).log"
local pid_file="$PID_DIR/docker-log-${cname}.pid"
mkdir -p "$log_dir"
# 停止已有的收集进程
if [[ -f "$pid_file" ]] && kill -0 "$(cat "$pid_file")" 2>/dev/null; then
kill "$(cat "$pid_file")" 2>/dev/null || true
sleep 0.3
fi
# 写分隔符,区分每次启动会话
{
echo ""
echo "──── 容器启动 $(date '+%Y-%m-%d %H:%M:%S') ────"
} >> "$logfile"
# 后台跟踪容器日志docker logs 本身已含历史,--tail 0 只取新增)
# 首次启动时先 dump 当前快照,再 follow 新增
docker logs "$cname" >> "$logfile" 2>&1 || true
docker logs -f --tail 0 "$cname" >> "$logfile" 2>&1 &
echo $! > "$pid_file"
info " 容器日志 → $logfile"
}
# 停止容器日志收集进程
stop_docker_logger() {
local cname="$1"
local pid_file="$PID_DIR/docker-log-${cname}.pid"
if [[ -f "$pid_file" ]]; then
local pid; pid=$(cat "$pid_file")
kill "$pid" 2>/dev/null || true
rm -f "$pid_file"
fi
}
# ── 加载 .env.local ─────────────────────────────────────────────────────────────
load_env() {
if [[ ! -f "$ENV_FILE" ]]; then
error ".env.local 不存在,请先执行: ./deploy-test/01-init-env.sh"
exit 1
fi
set -a
# shellcheck source=/dev/null
source "$ENV_FILE"
set +a
}
# ── 检查必要工具 ────────────────────────────────────────────────────────────────
require_tools() {
local missing=()
for tool in "$@"; do
command -v "$tool" &>/dev/null || missing+=("$tool")
done
if [[ ${#missing[@]} -gt 0 ]]; then
error "缺少必要工具: ${missing[*]}"
for t in "${missing[@]}"; do
case "$t" in
go) echo " → 安装 Go: https://go.dev/dl/" ;;
docker) echo " → 安装 Docker Desktop: https://www.docker.com/products/docker-desktop/" ;;
esac
done
exit 1
fi
}
# ── Docker daemon 检查 ──────────────────────────────────────────────────────────
require_docker_running() {
require_tools docker
if ! docker info &>/dev/null; then
error "Docker daemon 未运行,请启动 Docker Desktop"
exit 1
fi
}
# ── 启动单个后端服务nohup 后台) ───────────────────────────────────────────────
start_svc() {
local name="$1" bin="$2" args="${3:-}" workdir="${4:-$ROOT_DIR}"
local pidfile="$PID_DIR/$name.pid" logfile="$LOG_DIR/$name.log"
if [[ -f "$pidfile" ]] && kill -0 "$(cat "$pidfile")" 2>/dev/null; then
warn "$name 已在运行 (PID=$(cat "$pidfile")),跳过"
return 0
fi
[[ ! -f "$bin" ]] && { error "$name 二进制不存在 ($bin),请先执行 04-build.sh"; return 1; }
info "启动 $name ..."
(
cd "$workdir"
# shellcheck disable=SC2086
nohup "$bin" $args > "$logfile" 2>&1 &
echo $! > "$pidfile"
)
sleep 1
if kill -0 "$(cat "$pidfile")" 2>/dev/null; then
success " $name 已启动 (PID=$(cat "$pidfile")) → $logfile"
else
error " $name 启动失败,查看日志:"
tail -20 "$logfile" 2>/dev/null || true
return 1
fi
}
# ── 停止单个后端服务 ────────────────────────────────────────────────────────────
stop_svc() {
local name="$1" pidfile="$PID_DIR/$name.pid"
if [[ -f "$pidfile" ]]; then
local pid; pid=$(cat "$pidfile")
if kill -0 "$pid" 2>/dev/null; then
kill "$pid" && success "$name 已停止 (PID=$pid)"
else
warn "$name 进程 $pid 不存在(可能已退出)"
fi
rm -f "$pidfile"
else
warn "$name 没有 PID 记录(未运行)"
fi
}
# ── Docker 容器状态打印 ─────────────────────────────────────────────────────────
print_container_status() {
local label="$1" cname="$2" port="$3"
if docker ps --format '{{.Names}}' 2>/dev/null | grep -q "^${cname}$"; then
printf " ${GREEN}${NC} %-12s container=%-14s :%-5s\n" "$label" "$cname" "$port"
elif docker ps -a --format '{{.Names}}' 2>/dev/null | grep -q "^${cname}$"; then
printf " ${YELLOW}${NC} %-12s stopped=%-14s :%-5s\n" "$label" "$cname" "$port"
else
printf " ${RED}${NC} %-12s (未创建) :%-5s\n" "$label" "$port"
fi
}
# ── 后端服务状态打印 ────────────────────────────────────────────────────────────
print_svc_status() {
local name="$1" desc="$2" pidfile="$PID_DIR/$name.pid"
if [[ -f "$pidfile" ]] && kill -0 "$(cat "$pidfile")" 2>/dev/null; then
printf " ${GREEN}${NC} %-18s PID=%-7s %s\n" "$name" "$(cat "$pidfile")" "$desc"
else
printf " ${RED}${NC} %-18s %-11s %s\n" "$name" "未运行" "$desc"
fi
}
# ── 所有后端服务名列表 ──────────────────────────────────────────────────────────
ALL_SVCS=(openim-server chat-rpc admin-rpc chat-api admin-api meetingmsg livecloud livestream build-server)

205
logs.sh Executable file
View File

@@ -0,0 +1,205 @@
#!/usr/bin/env bash
# =============================================================================
# logs.sh — 查看服务日志
#
# 用法:
# ./logs.sh <service> # tail -f 实时跟踪日志
# ./logs.sh <service> --last # 只显示最后 100 行(不跟踪)
# ./logs.sh <service> -n 50 # 显示最后 50 行并跟踪
# ./logs.sh scripts # 列出所有脚本执行日志
# ./logs.sh scripts --last # 查看最新一次脚本日志
#
# 日志目录一览:
# .local-dev/logs/ — 后端服务运行日志
# .local-dev/docker-logs/ — Docker 容器日志(每日一文件)
# .local-dev/script-logs/ — 脚本执行日志(带时间戳)
#
# 后端服务: openim-server, chat-rpc, admin-rpc, chat-api, admin-api,
# meetingmsg, livecloud, livestream, build-server
# 前端服务: pc, meetingh5, h5, cms, build-cms, build-down
# Docker: redis, kafka, etcd, livekit
# =============================================================================
set -euo pipefail
source "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/common.sh"
# 注意logs.sh 不调用 init_script_log避免 tail -f 被重定向到日志文件
SVC="${1:-}"
OPT="${2:-}"
NLINES="${3:-100}"
# ── 无参数:打印概览 ──────────────────────────────────────────────────────────
if [[ -z "$SVC" ]]; then
echo ""
echo -e "${BOLD}用法:${NC} $0 <service|scripts> [--last|-n <lines>]"
echo ""
echo -e "${BOLD}后端服务日志${NC} ($LOG_DIR/):"
for svc in "${ALL_SVCS[@]}"; do
local_log="$LOG_DIR/$svc.log"
if [[ -f "$local_log" ]]; then
size=$(du -sh "$local_log" 2>/dev/null | awk '{print $1}')
printf " %-18s %s (%s)\n" "$svc" "$local_log" "$size"
else
printf " %-18s (无日志)\n" "$svc"
fi
done
echo ""
echo -e "${BOLD}Docker 容器日志${NC} ($DOCKER_LOG_DIR/):"
for svc in redis kafka etcd livekit; do
log_dir="$DOCKER_LOG_DIR/$svc"
if [[ -d "$log_dir" ]]; then
latest=$(ls -t "$log_dir"/*.log 2>/dev/null | head -1 || echo "")
if [[ -n "$latest" ]]; then
size=$(du -sh "$latest" 2>/dev/null | awk '{print $1}')
printf " %-10s %s (%s)\n" "$svc" "$latest" "$size"
else
printf " %-10s (目录存在,暂无日志文件)\n" "$svc"
fi
else
printf " %-10s (未启动)\n" "$svc"
fi
done
echo ""
echo -e "${BOLD}前端服务日志${NC} ($LOG_DIR/fe-*.log):"
FE_LIST=(pc meetingh5 h5 cms build-cms build-down)
for fe in "${FE_LIST[@]}"; do
fe_log="$LOG_DIR/fe-${fe}.log"
if [[ -f "$fe_log" ]]; then
size=$(du -sh "$fe_log" 2>/dev/null | awk '{print $1}')
printf " %-14s %s (%s)\n" "$fe" "$fe_log" "$size"
else
printf " %-14s (无日志)\n" "$fe"
fi
done
echo ""
echo -e "${BOLD}脚本执行日志${NC} ($SCRIPT_LOG_DIR/):"
if [[ -d "$SCRIPT_LOG_DIR" ]]; then
ls -t "$SCRIPT_LOG_DIR"/*.log 2>/dev/null | head -5 | while read -r f; do
size=$(du -sh "$f" 2>/dev/null | awk '{print $1}')
printf " %s (%s)\n" "$(basename "$f")" "$size"
done
count=$(ls "$SCRIPT_LOG_DIR"/*.log 2>/dev/null | wc -l | tr -d ' ')
[[ "$count" -gt 5 ]] && echo " ... 共 $count 个文件"
else
echo " (尚无脚本日志)"
fi
echo ""
exit 0
fi
# ── 脚本执行日志 ──────────────────────────────────────────────────────────────
if [[ "$SVC" == "scripts" ]]; then
if [[ ! -d "$SCRIPT_LOG_DIR" ]] || [[ -z "$(ls "$SCRIPT_LOG_DIR"/*.log 2>/dev/null)" ]]; then
warn "暂无脚本执行日志 ($SCRIPT_LOG_DIR/)"
exit 0
fi
if [[ "$OPT" == "--last" ]]; then
latest=$(ls -t "$SCRIPT_LOG_DIR"/*.log | head -1)
info "最新脚本日志: $latest"
echo "──────────────────────────────────────"
cat "$latest"
else
info "所有脚本执行日志 ($SCRIPT_LOG_DIR/):"
ls -lht "$SCRIPT_LOG_DIR"/*.log 2>/dev/null | awk '{printf " %-8s %s %s\n", $5, $6" "$7" "$8, $9}'
echo ""
echo "查看最新: $0 scripts --last"
echo "查看指定: cat $SCRIPT_LOG_DIR/<filename>"
fi
exit 0
fi
# ── Docker 容器日志 ───────────────────────────────────────────────────────────
_docker_log() {
local cname="$1"
local log_dir="$DOCKER_LOG_DIR/${cname#dev-}"
local latest_file
info "$cname 容器日志"
# 展示日志文件路径
if [[ -d "$log_dir" ]]; then
latest_file=$(ls -t "$log_dir"/*.log 2>/dev/null | head -1 || echo "")
if [[ -n "$latest_file" ]]; then
info "本地日志文件: $latest_file"
fi
fi
echo "──────────────────────────────────────"
if [[ "$OPT" == "--last" ]]; then
docker logs --tail "$NLINES" "$cname" 2>&1
else
docker logs -f --tail "${NLINES}" "$cname" 2>&1
fi
}
case "$SVC" in
redis) _docker_log "dev-redis"; exit 0 ;;
kafka) _docker_log "dev-kafka"; exit 0 ;;
etcd) _docker_log "dev-etcd"; exit 0 ;;
livekit) _docker_log "dev-livekit"; exit 0 ;;
esac
# ── 前端服务日志 ──────────────────────────────────────────────────────────────
FE_LIST=(pc meetingh5 h5 cms build-cms build-down)
for _fe in "${FE_LIST[@]}"; do
if [[ "$SVC" == "$_fe" ]]; then
LOGFILE="$LOG_DIR/fe-${SVC}.log"
pidfile="$PID_DIR/fe-${SVC}.pid"
if [[ ! -f "$LOGFILE" ]]; then
error "日志文件不存在: $LOGFILE"
echo " $SVC 可能尚未启动。启动命令: ./deploy-test/07-start-frontend.sh $SVC"
exit 1
fi
if [[ -f "$pidfile" ]] && kill -0 "$(cat "$pidfile")" 2>/dev/null; then
info "$SVC 正在运行 (PID=$(cat "$pidfile"))"
else
warn "$SVC 当前未运行(显示历史日志)"
fi
info "日志文件: $LOGFILE"
size=$(du -sh "$LOGFILE" 2>/dev/null | awk '{print $1}')
info "文件大小: $size"
echo "──────────────────────────────────────"
if [[ "$OPT" == "--last" ]]; then
tail -n "${NLINES}" "$LOGFILE"
elif [[ "$OPT" == "-n" ]]; then
tail -f -n "${NLINES}" "$LOGFILE"
else
tail -f -n 100 "$LOGFILE"
fi
exit 0
fi
done
# ── 后端服务日志 ──────────────────────────────────────────────────────────────
LOGFILE="$LOG_DIR/$SVC.log"
if [[ ! -f "$LOGFILE" ]]; then
error "日志文件不存在: $LOGFILE"
echo " 服务 $SVC 可能尚未启动。启动命令: ./deploy-test/05-start.sh $SVC"
exit 1
fi
PIDFILE="$PID_DIR/$SVC.pid"
if [[ -f "$PIDFILE" ]] && kill -0 "$(cat "$PIDFILE")" 2>/dev/null; then
info "$SVC 正在运行 (PID=$(cat "$PIDFILE"))"
else
warn "$SVC 当前未运行(显示历史日志)"
fi
info "日志文件: $LOGFILE"
size=$(du -sh "$LOGFILE" 2>/dev/null | awk '{print $1}')
info "文件大小: $size"
echo "──────────────────────────────────────"
if [[ "$OPT" == "--last" ]]; then
tail -n "${NLINES}" "$LOGFILE"
elif [[ "$OPT" == "-n" ]]; then
tail -f -n "${NLINES}" "$LOGFILE"
else
tail -f -n 100 "$LOGFILE"
fi

42
remove-infra.sh Executable file
View File

@@ -0,0 +1,42 @@
#!/usr/bin/env bash
# =============================================================================
# remove-infra.sh — 删除 Docker 容器及本地持久化数据
#
# ⚠️ 危险操作:本地 Redis / Kafka / Etcd 数据将全部清除
# 适用场景:环境损坏需重置,或希望全新干净启动
# =============================================================================
set -euo pipefail
source "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/common.sh"
require_docker_running
init_script_log # ← 脚本执行日志
step "删除 Docker 基础设施容器及数据"
echo ""
warn "⚠️ 此操作将删除以下内容:"
echo " 容器: dev-livekit, dev-redis, dev-kafka, dev-etcd"
echo " 数据: $DATA_DIR/"
echo ""
read -p "确认删除?(输入 yes 继续): " -r CONFIRM
if [[ "$CONFIRM" != "yes" ]]; then
info "已取消"
exit 0
fi
echo ""
for cname in dev-livekit dev-redis dev-kafka dev-etcd; do
if docker ps -a --format '{{.Names}}' | grep -q "^${cname}$"; then
docker rm -f "$cname" > /dev/null && success "已删除容器: $cname"
else
info "容器不存在,跳过: $cname"
fi
done
if [[ -d "$DATA_DIR" ]]; then
rm -rf "$DATA_DIR"
success "已删除数据目录: $DATA_DIR"
fi
echo ""
success "清理完成"
echo -e " 重新初始化: ${CYAN}./deploy-test/03-start-infra.sh${NC}"

125
restart.sh Executable file
View File

@@ -0,0 +1,125 @@
#!/usr/bin/env bash
# =============================================================================
# restart.sh — 重启指定服务
#
# 用法:
# ./restart.sh <service> # 重启后端服务(使用已有二进制)
# ./restart.sh <service> --build # 先重新编译再重启
# ./restart.sh <docker-service> # 重启 Docker 容器redis/kafka/etcd
#
# 示例:
# ./restart.sh chat-api
# ./restart.sh chat-api --build
# ./restart.sh redis
# =============================================================================
set -euo pipefail
source "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/common.sh"
load_env
init_script_log # ← 脚本执行日志
SVC="${1:-}"
OPT="${2:-}"
[[ -z "$SVC" ]] && { error "用法: $0 <service> [--build]"; exit 1; }
# ── 服务配置(与 05-start.sh 保持一致)───────────────────────────────────────
declare -A svc_workdir=(
[openim-server]="$ROOT_DIR/open-im-server"
[chat-rpc]="$ROOT_DIR/chat"
[admin-rpc]="$ROOT_DIR/chat"
[chat-api]="$ROOT_DIR/chat"
[admin-api]="$ROOT_DIR/chat"
[meetingmsg]="$ROOT_DIR/meetingmsg"
[livecloud]="$ROOT_DIR/livecloud"
[livestream]="$ROOT_DIR/livestream"
)
declare -A svc_args=(
[openim-server]="-c $ROOT_DIR/open-im-server/config"
[chat-rpc]="-c $ROOT_DIR/chat/config"
[admin-rpc]="-c $ROOT_DIR/chat/config"
[chat-api]="-c $ROOT_DIR/chat/config"
[admin-api]="-c $ROOT_DIR/chat/config"
[meetingmsg]=""
[livecloud]=""
[livestream]=""
)
declare -A svc_src_dir=(
[openim-server]="open-im-server"
[chat-rpc]="chat"
[admin-rpc]="chat"
[chat-api]="chat"
[admin-api]="chat"
[meetingmsg]="meetingmsg"
[livecloud]="livecloud"
[livestream]="livestream"
)
declare -A svc_src_pkg=(
[openim-server]="./cmd/main.go"
[chat-rpc]="./cmd/rpc/chat-rpc/"
[admin-rpc]="./cmd/rpc/admin-rpc/"
[chat-api]="./cmd/api/chat-api/"
[admin-api]="./cmd/api/admin-api/"
[meetingmsg]="."
[livecloud]="."
[livestream]="."
)
# ── Docker 容器重启 ───────────────────────────────────────────────────────────
_restart_docker() {
local label="$1" cname="$2"
require_docker_running
if docker ps -a --format '{{.Names}}' | grep -q "^${cname}$"; then
info "重启容器 $cname ..."
docker restart "$cname" > /dev/null
sleep 2
if docker ps --format '{{.Names}}' | grep -q "^${cname}$"; then
success "$label 容器已重启 (container=$cname)"
else
error "$label 重启失败"
fi
else
warn "容器 $cname 不存在,请先执行 03-start-infra.sh"
fi
}
# ── 处理 Docker 容器 ──────────────────────────────────────────────────────────
case "$SVC" in
redis) _restart_docker "Redis" "dev-redis"; exit 0 ;;
kafka) _restart_docker "Kafka" "dev-kafka"; exit 0 ;;
etcd) _restart_docker "Etcd" "dev-etcd"; exit 0 ;;
esac
# ── 处理后端服务 ──────────────────────────────────────────────────────────────
if [[ -z "${svc_workdir[$SVC]:-}" ]]; then
error "未知服务: $SVC"
echo "后端服务: ${!svc_workdir[*]}"
echo "Docker: redis, kafka, etcd"
exit 1
fi
step "重启服务: $SVC"
# 是否先重新编译
if [[ "$OPT" == "--build" ]]; then
info "重新编译 $SVC ..."
dir="$ROOT_DIR/${svc_src_dir[$SVC]}"
pkg="${svc_src_pkg[$SVC]}"
(cd "$dir" && go build -o "$BUILD_DIR/$SVC" "$pkg") && \
success "编译完成" || { error "编译失败"; exit 1; }
fi
# 停止旧进程
stop_svc "$SVC"
sleep 1
# 启动新进程
start_svc "$SVC" \
"$BUILD_DIR/$SVC" \
"${svc_args[$SVC]}" \
"${svc_workdir[$SVC]}"
echo ""
echo -e "查看日志: ${CYAN}./deploy-test/logs.sh $SVC${NC}"

87
setup.sh Executable file
View File

@@ -0,0 +1,87 @@
#!/usr/bin/env bash
# =============================================================================
# setup.sh — 一键完整部署(首次使用)
#
# 按顺序执行:
# 01-init-env.sh → 生成 .env.local
# 02-patch-config.sh → 写入服务 YAML 配置
# 03-start-infra.sh → 启动 Docker 容器
# 04-build.sh → 编译所有 Go 服务
# 05-start.sh → 启动所有后端服务
#
# 各步骤均可独立重新执行,不必从头来过。
# =============================================================================
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$SCRIPT_DIR/common.sh"
init_dirs
init_script_log # ← 脚本执行日志
header "一键完整部署(首次使用)"
echo -e "${BOLD}基础设施策略:${NC}"
echo " Redis / Kafka / Etcd → Docker 容器(本地)"
echo " MongoDB → 远程服务"
echo " 文件存储 → Amazon S3"
echo ""
# ── 步骤 1初始化 .env.local ─────────────────────────────────────────────────
step "[1/5] 初始化 .env.local"
bash "$SCRIPT_DIR/01-init-env.sh"
echo ""
warn "请确认 .env.local 中的 MongoDB 和 AWS S3 配置已正确填写!"
echo -e " ${CYAN}vim $ENV_FILE${NC}"
echo ""
read -p "配置已填写好,继续执行?(y/N): " -n 1 -r REPLY; echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
info "已暂停,编辑完成后重新执行: ./deploy-test/setup.sh"
info "或跳过此步直接从步骤 2 开始: ./deploy-test/02-patch-config.sh"
exit 0
fi
# ── 步骤 2写入服务配置 ──────────────────────────────────────────────────────
step "[2/5] 修改服务配置文件"
bash "$SCRIPT_DIR/02-patch-config.sh"
# ── 步骤 3启动 Docker 基础设施 ─────────────────────────────────────────────
step "[3/5] 启动 Docker 基础设施"
bash "$SCRIPT_DIR/03-start-infra.sh"
info "等待基础设施就绪 (5s)..."
sleep 5
# ── 步骤 4编译 ──────────────────────────────────────────────────────────────
step "[4/5] 编译所有后端服务"
bash "$SCRIPT_DIR/04-build.sh"
# ── 步骤 5启动 ──────────────────────────────────────────────────────────────
step "[5/5] 启动所有后端服务"
bash "$SCRIPT_DIR/05-start.sh"
# ── 完成汇总 ─────────────────────────────────────────────────────────────────
echo ""
echo -e "${BOLD}${GREEN}╔══════════════════════════════════════════╗${NC}"
echo -e "${BOLD}${GREEN}║ 本地环境部署完成! ║${NC}"
echo -e "${BOLD}${GREEN}╚══════════════════════════════════════════╝${NC}"
echo ""
echo -e "${BOLD}服务地址:${NC}"
echo " IM API: http://localhost:10002"
echo " IM WebSocket: ws://localhost:10001"
echo " Chat API: http://localhost:10008"
echo " Admin API: http://localhost:10009"
echo " MeetingMsg WS: ws://localhost:8000"
echo " Livecloud: http://localhost:8080"
echo " Livestream: http://localhost:8081"
echo ""
echo -e "${BOLD}日常命令:${NC}"
echo " ./deploy-test/status.sh # 查看全部状态"
echo " ./deploy-test/logs.sh <service> # 实时日志"
echo " ./deploy-test/restart.sh <service> # 重启服务"
echo " ./deploy-test/restart.sh <svc> --build # 重编译并重启"
echo " ./deploy-test/check-conn.sh # 验证 MongoDB/S3"
echo ""
echo -e "${BOLD}停止服务:${NC}"
echo " ./deploy-test/stop.sh # 停止后端进程"
echo " ./deploy-test/stop-infra.sh # 停止 Docker 容器"
echo ""

90
status.sh Executable file
View File

@@ -0,0 +1,90 @@
#!/usr/bin/env bash
# =============================================================================
# status.sh — 查看所有服务运行状态
#
# 显示内容:
# - Docker 容器状态Redis/Kafka/Etcd
# - 远程服务配置摘要MongoDB/S3
# - 后端服务进程状态PID + 监听端口)
# =============================================================================
set -euo pipefail
source "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/common.sh"
load_env 2>/dev/null || true
header "服务运行状态"
# ── Docker 基础设施 ───────────────────────────────────────────────────────────
echo ""
echo -e "${BOLD}[ Docker 基础设施 ]${NC}"
print_container_status "Redis" "dev-redis" "${REDIS_PORT:-6379}"
print_container_status "Kafka" "dev-kafka" "${KAFKA_PORT:-9092}"
print_container_status "Etcd" "dev-etcd" "${ETCD_PORT:-2379}"
print_container_status "LiveKit" "dev-livekit" "7880"
printf " ${CYAN}${NC} %-10s 公网 %s:50000-51000/udp (WebRTC)\n" "" "${LIVEKIT_NODE_IP:-?}"
# ── 远程服务 ─────────────────────────────────────────────────────────────────
echo ""
echo -e "${BOLD}[ 远程服务(连接配置)]${NC}"
printf " ${CYAN}${NC} %-10s %s\n" "MongoDB" \
"${MONGO_HOST:-?}:${MONGO_PORT:-27017}/${MONGO_DATABASE:-?} (authSource=${MONGO_AUTHSOURCE:-?})"
printf " ${CYAN}${NC} %-10s %s\n" "S3" \
"s3://${AWS_BUCKET:-?} region=${AWS_REGION:-?}"
# ── 后端服务 ─────────────────────────────────────────────────────────────────
echo ""
echo -e "${BOLD}[ 后端服务 ]${NC}"
print_svc_status "openim-server" ":10002 (API) :10001 (MsgGateway WS)"
print_svc_status "chat-rpc" "内部 RPC → Etcd"
print_svc_status "admin-rpc" "内部 RPC → Etcd"
print_svc_status "chat-api" ":10008"
print_svc_status "admin-api" ":10009"
print_svc_status "meetingmsg" ":8000 (WS)"
print_svc_status "livecloud" ":8080"
print_svc_status "livestream" ":8081"
print_svc_status "build-server" ":8281"
# ── 前端服务 ─────────────────────────────────────────────────────────────────
echo ""
echo -e "${BOLD}[ 前端开发服务器 ]${NC}"
declare -A FE_PORT_MAP=(
[pc]="7777 (Electron Vite)"
[meetingh5]="5188"
[h5]="3003"
[cms]="8001"
[build-cms]="8002"
[build-down]="8003"
)
for fe in pc meetingh5 h5 cms build-cms build-down; do
pidfile="$PID_DIR/fe-${fe}.pid"
logfile="$LOG_DIR/fe-${fe}.log"
if [[ -f "$pidfile" ]] && kill -0 "$(cat "$pidfile")" 2>/dev/null; then
printf " ${GREEN}${NC} %-14s PID=%-7s :%s\n" "$fe" "$(cat "$pidfile")" "${FE_PORT_MAP[$fe]}"
else
printf " ${RED}${NC} %-14s 未运行 :%s\n" "$fe" "${FE_PORT_MAP[$fe]}"
fi
done
# ── 端口占用检查 ──────────────────────────────────────────────────────────────
echo ""
echo -e "${BOLD}[ 端口占用 ]${NC}"
PORTS=(10002 10001 10008 10009 8000 8080 8081 8281 7777 5188 3003 8001 8002 8003)
for port in "${PORTS[@]}"; do
pid=$(lsof -ti :"$port" 2>/dev/null | head -1 || true)
if [[ -n "$pid" ]]; then
cmd=$(ps -p "$pid" -o comm= 2>/dev/null || echo "?")
printf " ${GREEN}${NC} :%d PID=%-6s (%s)\n" "$port" "$pid" "$cmd"
else
printf " ${YELLOW}${NC} :%d (未监听)\n" "$port"
fi
done
# ── 快速操作提示 ──────────────────────────────────────────────────────────────
echo ""
echo -e "${BOLD}[ 快捷命令 ]${NC}"
echo " ./deploy-test/logs.sh <service> 查看日志(支持前端: pc/h5/cms 等)"
echo " ./deploy-test/restart.sh <service> 重启后端服务"
echo " ./deploy-test/restart.sh <svc> --build 重编译并重启"
echo " ./deploy-test/07-start-frontend.sh 启动前端服务"
echo " ./deploy-test/stop-frontend.sh 停止前端服务"
echo " ./deploy-test/check-conn.sh 验证 MongoDB/S3 连接"
echo ""

54
stop-frontend.sh Executable file
View File

@@ -0,0 +1,54 @@
#!/usr/bin/env bash
# =============================================================================
# stop-frontend.sh — 停止前端开发服务器
#
# 用法:
# ./stop-frontend.sh # 停止全部前端服务
# ./stop-frontend.sh <project> # 只停止指定项目
#
# 可用项目: pc, meetingh5, h5, cms, build-cms, build-down
# =============================================================================
set -euo pipefail
source "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/common.sh"
init_dirs
init_script_log # ← 脚本执行日志
FE_PROJECTS=(pc meetingh5 h5 cms build-cms build-down)
TARGET="${1:-all}"
_stop_fe() {
local name="$1" pidfile="$PID_DIR/fe-${name}.pid"
if [[ -f "$pidfile" ]]; then
local pid; pid=$(cat "$pidfile")
if kill -0 "$pid" 2>/dev/null; then
# 杀掉整个进程组(覆盖 npm/pnpm/yarn 子进程)
kill -- -"$(ps -o pgid= -p "$pid" 2>/dev/null | tr -d ' ')" 2>/dev/null || kill "$pid" 2>/dev/null || true
success "$name 已停止 (PID=$pid)"
else
warn "$name 进程不存在(可能已退出)"
fi
rm -f "$pidfile"
else
warn "$name 没有 PID 记录(未运行)"
fi
}
if [[ "$TARGET" == "all" ]]; then
step "停止全部前端开发服务器"
for proj in "${FE_PROJECTS[@]}"; do
_stop_fe "$proj"
done
success "所有前端服务已停止"
else
local_valid=false
for p in "${FE_PROJECTS[@]}"; do
[[ "$p" == "$TARGET" ]] && local_valid=true && break
done
if ! $local_valid; then
error "未知项目: $TARGET"
echo "可用: ${FE_PROJECTS[*]}"
exit 1
fi
step "停止: $TARGET"
_stop_fe "$TARGET"
fi

51
stop-infra.sh Executable file
View File

@@ -0,0 +1,51 @@
#!/usr/bin/env bash
# =============================================================================
# stop-infra.sh — 停止 Docker 基础设施容器
#
# 用法:
# ./stop-infra.sh # 停止 Redis / Kafka / Etcd
# ./stop-infra.sh redis # 只停止 Redis
# ./stop-infra.sh kafka # 只停止 Kafka
# ./stop-infra.sh etcd # 只停止 Etcd
#
# 注意:仅停止容器,不删除数据。重新启动执行: 03-start-infra.sh
# =============================================================================
set -euo pipefail
source "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/common.sh"
require_docker_running
init_script_log # ← 脚本执行日志
TARGET="${1:-all}"
_stop_container() {
local label="$1" cname="$2"
# 先停日志收集进程
stop_docker_logger "$cname"
if docker ps --format '{{.Names}}' | grep -q "^${cname}$"; then
docker stop "$cname" > /dev/null && success "$label 容器已停止 (container=$cname)"
else
warn "$label 容器未在运行 (container=$cname)"
fi
}
case "$TARGET" in
all)
step "停止所有 Docker 基础设施"
_stop_container "LiveKit" "dev-livekit"
_stop_container "Kafka" "dev-kafka"
_stop_container "Redis" "dev-redis"
_stop_container "Etcd" "dev-etcd"
echo ""
success "所有 Docker 容器已停止(数据已保留)"
echo -e " 重新启动: ${CYAN}./deploy-test/03-start-infra.sh${NC}"
;;
redis) _stop_container "Redis" "dev-redis" ;;
kafka) _stop_container "Kafka" "dev-kafka" ;;
etcd) _stop_container "Etcd" "dev-etcd" ;;
livekit) _stop_container "LiveKit" "dev-livekit" ;;
*)
error "未知组件: $TARGET"
echo "可用: redis, kafka, etcd, livekit, all"
exit 1
;;
esac

42
stop.sh Executable file
View File

@@ -0,0 +1,42 @@
#!/usr/bin/env bash
# =============================================================================
# stop.sh — 停止后端 Go 服务
#
# 用法:
# ./stop.sh # 停止全部后端服务
# ./stop.sh <svc> # 只停止指定服务
#
# 注意:此命令只停止后端进程,不影响 Docker 容器Redis/Kafka/Etcd
# 如需停止 Docker 容器,执行: ./stop-infra.sh
# =============================================================================
set -euo pipefail
source "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/common.sh"
init_dirs
init_script_log # ← 脚本执行日志
TARGET="${1:-all}"
if [[ "$TARGET" == "all" ]]; then
step "停止所有后端服务"
for svc in "${ALL_SVCS[@]}"; do
stop_svc "$svc"
done
echo ""
success "所有后端服务已停止"
echo ""
echo -e "如需停止 Docker 基础设施:"
echo -e " ${CYAN}./deploy-test/stop-infra.sh${NC}"
else
step "停止服务: $TARGET"
# 验证服务名合法
local_valid=false
for svc in "${ALL_SVCS[@]}"; do
[[ "$svc" == "$TARGET" ]] && local_valid=true && break
done
if ! $local_valid; then
error "未知服务: $TARGET"
echo "可用: ${ALL_SVCS[*]}"
exit 1
fi
stop_svc "$TARGET"
fi