Files
deploy-test/02-patch-config.sh
2026-04-13 01:27:34 +07:00

484 lines
16 KiB
Bash
Executable File
Raw Permalink Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

#!/usr/bin/env bash
# =============================================================================
# 02-patch-config.sh — 将 .env.local 中的变量写入各服务 YAML 配置文件
#
# 影响文件:
# open-im-server/config/redis.yml kafka.yml discovery.yml
# open-im-server/config/mongodb.yml minio.yml openim-rpc-third.yml
# chat/config/redis.yml discovery.yml mongodb.yml
# meetingmsg/manifest/config/config.yaml
#
# 后续步骤03-start-infra.sh启动 Docker 基础设施)
# =============================================================================
set -euo pipefail
source "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/common.sh"
load_env
init_script_log # ← 脚本执行日志
header "步骤 2 / 5 — 修改服务配置文件"
OPENIM_CONF="$ROOT_DIR/open-im-server/config"
CHAT_CONF="$ROOT_DIR/chat/config"
[[ ! -d "$OPENIM_CONF" ]] && { error "目录不存在: $OPENIM_CONF"; exit 1; }
[[ ! -d "$CHAT_CONF" ]] && { error "目录不存在: $CHAT_CONF"; exit 1; }
# ──────────────────────────────────────────────────────────────────────────────
# open-im-server
# ──────────────────────────────────────────────────────────────────────────────
step "open-im-server 配置"
# Redis
cat > "$OPENIM_CONF/redis.yml" <<EOF
address: [ "127.0.0.1:${REDIS_PORT}" ]
username: ''
password: '${REDIS_PASSWORD}'
enablePipeline: false
clusterMode: false
db: 0
maxRetry: 10
poolSize: 100
onlineKeyPrefix: "openim:local-dev"
onlineKeyPrefixHashTag: false
sentinelMode:
masterName: "redis-master"
sentinelsAddrs: []
routeByLatency: false
routeRandomly: false
EOF
success " redis.yml → 127.0.0.1:${REDIS_PORT}"
# Kafka
cat > "$OPENIM_CONF/kafka.yml" <<EOF
username:
password:
producerAck:
compressType: none
address: ["127.0.0.1:${KAFKA_PORT}"]
toRedisTopic: toRedis
toMongoTopic: toMongo
toPushTopic: toPush
toOfflinePushTopic: toOfflinePush
toRedisGroupID: redis
toMongoGroupID: mongo
toPushGroupID: push
toOfflinePushGroupID: offlinePush
tls:
enableTLS: false
caCrt:
clientCrt:
clientKey:
clientKeyPwd:
insecureSkipVerify: false
EOF
success " kafka.yml → 127.0.0.1:${KAFKA_PORT}"
# Etcd
cat > "$OPENIM_CONF/discovery.yml" <<EOF
enable: etcd
etcd:
rootDirectory: openim
address: ["127.0.0.1:${ETCD_PORT}"]
username:
password:
kubernetes:
namespace: default
rpcService:
user: user-rpc-service
friend: friend-rpc-service
msg: msg-rpc-service
push: push-rpc-service
messageGateway: messagegateway-rpc-service
group: group-rpc-service
auth: auth-rpc-service
conversation: conversation-rpc-service
third: third-rpc-service
EOF
success " discovery.yml → etcd 127.0.0.1:${ETCD_PORT}"
# MongoDB远程
cat > "$OPENIM_CONF/mongodb.yml" <<EOF
uri: "mongodb://${MONGO_USERNAME}:${MONGO_PASSWORD}@${MONGO_HOST}:${MONGO_PORT}/${MONGO_DATABASE}?authSource=${MONGO_AUTHSOURCE}&directConnection=true"
address: []
database: ${MONGO_DATABASE}
username:
password:
authSource:
mongoMode: "standalone"
maxPoolSize: 100
maxRetry: 10
EOF
success " mongodb.yml → ${MONGO_HOST}:${MONGO_PORT}/${MONGO_DATABASE}"
# minio.yml标记已切换至 S3实际由 openim-rpc-third.yml 控制)
cat > "$OPENIM_CONF/minio.yml" <<EOF
# 已切换为 Amazon S3存储驱动由 openim-rpc-third.yml 中 object.enable: aws 控制
bucket: ${OPENIM_AWS_BUCKET}
accessKeyID: ${OPENIM_AWS_ACCESS_KEY_ID}
secretAccessKey: ${OPENIM_AWS_SECRET_ACCESS_KEY}
sessionToken:
internalAddress: s3.${OPENIM_AWS_REGION}.amazonaws.com
externalAddress: https://s3.${OPENIM_AWS_REGION}.amazonaws.com
publicRead: ${OPENIM_AWS_PUBLIC_READ}
EOF
success " minio.yml → S3 (${OPENIM_AWS_REGION})"
# openim-rpc-third.yml — 切换 enable: aws更新凭证
THIRD_CONF="$OPENIM_CONF/openim-rpc-third.yml"
if [[ -f "$THIRD_CONF" ]]; then
cp "$THIRD_CONF" "${THIRD_CONF}.bak"
python3 - "$THIRD_CONF" <<PYEOF
import re, sys
path = sys.argv[1]
with open(path) as f:
content = f.read()
content = re.sub(r'(\benable:\s*)\S+', r'\1aws', content)
endpoint_line = ""
endpoint_val = "${OPENIM_AWS_ENDPOINT:-}"
if endpoint_val:
endpoint_line = f" endpoint: {endpoint_val}\n"
new_aws = f""" aws:
{endpoint_line} region: ${OPENIM_AWS_REGION}
bucket: ${OPENIM_AWS_BUCKET}
accessKeyID: ${OPENIM_AWS_ACCESS_KEY_ID}
secretAccessKey: ${OPENIM_AWS_SECRET_ACCESS_KEY}
sessionToken:
publicRead: ${OPENIM_AWS_PUBLIC_READ}"""
content = re.sub(
r'(\s{{2}}aws:\n(?:[ \t]+\S[^\n]*\n?)*)',
'\n' + new_aws + '\n',
content
)
with open(path, 'w') as f:
f.write(content)
PYEOF
success " openim-rpc-third.yml → enable: aws, bucket=${OPENIM_AWS_BUCKET} (备份: .bak)"
else
warn " openim-rpc-third.yml 不存在,跳过"
fi
# ──────────────────────────────────────────────────────────────────────────────
# chat
# ──────────────────────────────────────────────────────────────────────────────
step "chat 配置"
# Redis
cat > "$CHAT_CONF/redis.yml" <<EOF
address: [ "127.0.0.1:${REDIS_PORT}" ]
username: ''
password: '${REDIS_PASSWORD}'
enablePipeline: false
clusterMode: false
db: 0
maxRetry: 10
EOF
success " redis.yml → 127.0.0.1:${REDIS_PORT}"
# Etcd
cat > "$CHAT_CONF/discovery.yml" <<EOF
enable: "etcd"
etcd:
rootDirectory: openim
address: [ "127.0.0.1:${ETCD_PORT}" ]
username: ''
password: ''
kubernetes:
namespace: default
rpcService:
chat: chat-rpc-service
admin: admin-rpc-service
bot: bot-rpc-service
EOF
success " discovery.yml → etcd 127.0.0.1:${ETCD_PORT}"
# MongoDB远程
cat > "$CHAT_CONF/mongodb.yml" <<EOF
uri: "mongodb://${MONGO_USERNAME}:${MONGO_PASSWORD}@${MONGO_HOST}:${MONGO_PORT}/${MONGO_DATABASE}?authSource=${MONGO_AUTHSOURCE}&directConnection=true"
address: []
database: ${MONGO_DATABASE}
username:
password:
authSource:
mongoMode: "standalone"
maxPoolSize: 100
maxRetry: 10
EOF
success " mongodb.yml → ${MONGO_HOST}:${MONGO_PORT}/${MONGO_DATABASE}"
# ──────────────────────────────────────────────────────────────────────────────
# meetingmsg
# ──────────────────────────────────────────────────────────────────────────────
step "meetingmsg 配置"
MM_CONF="$ROOT_DIR/meetingmsg/manifest/config/config.yaml"
if [[ -f "$MM_CONF" ]]; then
cat > "$MM_CONF" <<EOF
server:
address: :8000
redis:
default:
address: 127.0.0.1:${REDIS_PORT}
pass: ${REDIS_PASSWORD}
logger:
level: all
stdout: true
EOF
success " config.yaml → redis 127.0.0.1:${REDIS_PORT}"
else
warn " meetingmsg config.yaml 不存在,跳过"
fi
# openim-server webhooks.yml — 开启 afterSendGroupMsg 回调给 meetingmsg
# meetingmsg 接收路径: /event/:commandcommand=callbackAfterSendGroupMsgCommand
WH_CONF="$OPENIM_CONF/webhooks.yml"
if [[ -f "$WH_CONF" ]]; then
cp "$WH_CONF" "${WH_CONF}.bak"
# 用 sed 只修改 url 和 afterSendGroupMsg.enable其余保持默认
sed -i.tmp \
-e 's|^url:.*|url: http://127.0.0.1:8000/event/callbackAfterSendGroupMsgCommand|' \
"$WH_CONF"
# afterSendGroupMsg 的 enable 改为 true精准匹配该段的第一个 enable: false
python3 - "$WH_CONF" <<'PYEOF'
import sys, re
path = sys.argv[1]
content = open(path).read()
# 只把 afterSendGroupMsg 下的 enable: false 改为 true
content = re.sub(
r'(afterSendGroupMsg:\n(?:.*\n)*? enable:) false',
r'\1 true',
content, count=1
)
open(path, 'w').write(content)
print(" patched afterSendGroupMsg.enable = true")
PYEOF
rm -f "${WH_CONF}.tmp"
success " webhooks.yml → url=http://127.0.0.1:8000/event/callbackAfterSendGroupMsgCommand, afterSendGroupMsg.enable=true (备份: .bak)"
else
warn " webhooks.yml 不存在跳过meetingmsg 将收不到消息推送)"
fi
# ──────────────────────────────────────────────────────────────────────────────
# livekit server本地 Docker复用 dev-redis
# ──────────────────────────────────────────────────────────────────────────────
step "livekit 配置"
LK_CONF="$ROOT_DIR/livekit/livekit.yaml"
if [[ -f "$LK_CONF" ]]; then
cp "$LK_CONF" "${LK_CONF}.bak"
fi
cat > "$LK_CONF" <<EOF
port: 7880
bind_addresses:
- 0.0.0.0
rtc:
# WebRTC 媒体流直连端口(客户端直接连 node_ip
port_range_start: 50000
port_range_end: 51000
tcp_port: 7882
use_external_ip: true
# 公网 IP来自 .env.local → LIVEKIT_NODE_IP
node_ip: ${LIVEKIT_NODE_IP}
enable_loopback_candidate: false
congestion_control:
enabled: true
probing_interval: 2000
# 复用 dev-redis 容器host.docker.internal 可从 Docker 容器访问宿主机端口)
redis:
address: host.docker.internal:${REDIS_PORT}
password: ${REDIS_PASSWORD}
db: 0
# API 密钥(来自 .env.local → LIVEKIT_API_KEY: LIVEKIT_API_SECRET
keys:
${LIVEKIT_API_KEY}: ${LIVEKIT_API_SECRET}
logging:
level: info
json: false
sample: false
room:
auto_create: true
empty_timeout: 300
departure_timeout: 20
enable_remote_unmute: true
max_participants: 100
development: true
EOF
success " livekit/livekit.yaml → node_ip=${LIVEKIT_NODE_IP}, redis=host.docker.internal:${REDIS_PORT} (备份: .bak)"
# ──────────────────────────────────────────────────────────────────────────────
# livecloud
# ──────────────────────────────────────────────────────────────────────────────
step "livecloud 配置"
LC_CONF="$ROOT_DIR/livecloud/config/config.yml"
if [[ -f "$LC_CONF" ]]; then
cp "$LC_CONF" "${LC_CONF}.bak"
cat > "$LC_CONF" <<EOF
server:
port: "8080"
env: "dev"
jwt:
secret_key: "your-secret-key-change-in-production"
redis:
host: 127.0.0.1
port: ${REDIS_PORT}
password: ${REDIS_PASSWORD}
cloud:
default_provider: tencent
load_balance_strategy: round_robin
aliyun:
enabled: false
app_id: your_aliyun_app_id
app_key: your_aliyun_app_key
region: cn-hangzhou
push_domain: push.example.com
play_domain: play.example.com
tencent:
enabled: true
sdk_app_id: ${TENCENT_SDK_APP_ID}
sdk_secret_key: ${TENCENT_SDK_SECRET_KEY}
region: ap-guangzhou
push_domain: push-tx.example.com
play_domain: play-tx.example.com
EOF
success " livecloud/config/config.yml → redis=127.0.0.1:${REDIS_PORT}, tencent sdk_app_id=${TENCENT_SDK_APP_ID} (备份: .bak)"
else
warn " livecloud/config/config.yml 不存在,跳过"
fi
# ──────────────────────────────────────────────────────────────────────────────
# livestream
# ──────────────────────────────────────────────────────────────────────────────
step "livestream 配置"
LS_CONF="$ROOT_DIR/livestream/config.yaml"
cp "${LS_CONF}" "${LS_CONF}.bak" 2>/dev/null || true
cat > "$LS_CONF" <<EOF
# LiveKit Server来源: deploy/livekit-config/livekit.yaml → keys 段)
livekit:
url: "${LIVEKIT_URL}"
apiKey: "${LIVEKIT_API_KEY}"
apiSecret: "${LIVEKIT_API_SECRET}"
# Cloudflare Stream用于直播推流 CDN
cloudflare:
accountId: "${CF_ACCOUNT_ID}"
apiToken: "${CF_API_TOKEN}"
email: "${CF_EMAIL}"
apiKey: "${CF_API_KEY}"
customerCode: "${CF_CUSTOMER_CODE}"
EOF
success " livestream/config.yaml → livekit=${LIVEKIT_URL}, key=${LIVEKIT_API_KEY} (备份: .bak)"
[[ -z "${CF_ACCOUNT_ID}" ]] && warn " Cloudflare 配置为空,推流 CDN 功能不可用,请填写 .env.local 中 CF_* 变量"
# ──────────────────────────────────────────────────────────────────────────────
# build-server
# ──────────────────────────────────────────────────────────────────────────────
step "build-server 配置"
BS_CONF="$ROOT_DIR/build-server/config/config.yaml"
if [[ -f "$BS_CONF" ]]; then
cp "$BS_CONF" "${BS_CONF}.bak"
# build-server 用同一个 MongoDB 连接,只是 dbname 不同
cat > "$BS_CONF" <<EOF
mode: debug
host: 0.0.0.0
port: 8281
server:
read_timeout: 30
write_timeout: 30
database:
host: ${MONGO_HOST}
port: ${MONGO_PORT}
user: ${MONGO_USERNAME}
password: ${MONGO_PASSWORD}
dbname: ${BUILD_MONGO_DATABASE}
auth_source: ${MONGO_AUTHSOURCE}
jwt:
secret: "your-secret-key-change-in-production"
expire_time: 24
upload:
path: "./uploads"
max_size: 5242880
allow_exts: "jpg,jpeg,png,gif,webp"
base_url: "http://localhost:8281"
aws:
access_key: "${BUILD_AWS_ACCESS_KEY}"
secret_key: "${BUILD_AWS_SECRET_KEY}"
region: "${BUILD_AWS_REGION}"
bucket: "${BUILD_AWS_BUCKET}"
EOF
success " build-server/config/config.yaml → ${MONGO_HOST}/${BUILD_MONGO_DATABASE}, S3 bucket=${BUILD_AWS_BUCKET} (备份: .bak)"
else
warn " build-server/config/config.yaml 不存在,跳过"
fi
# ──────────────────────────────────────────────────────────────────────────────
# meetingh5 — 生成 Vite 环境变量文件(测试服务器环境)
# ──────────────────────────────────────────────────────────────────────────────
step "meetingh5 环境变量"
MH5_ENV="$ROOT_DIR/meetingh5/.env.local"
cat > "$MH5_ENV" <<EOF
# meetingh5 本地环境变量 — 由 deploy-test/02-patch-config.sh 生成,勿手动编辑
# 对应测试服务器DEPLOY_TEST_IP=${DEPLOY_TEST_IP}
#
# 优先级URL参数 ?ws= ?liveApi= > 以下变量 > 代码中的生产默认值
# 弹幕 WebSocketmeetingmsg 服务(:8000
VITE_WS_BASE_URL=ws://${DEPLOY_TEST_IP}:8000
# 直播间 APIlivestream 服务(:8081
VITE_LIVE_API_BASE_URL=http://${DEPLOY_TEST_IP}:8081
EOF
success " meetingh5/.env.local → ws=${DEPLOY_TEST_IP}:8000, liveApi=${DEPLOY_TEST_IP}:8081"
echo ""
success "所有配置文件已更新!"
echo ""
echo -e "${BOLD}已修改配置摘要:${NC}"
echo " Redis → 127.0.0.1:${REDIS_PORT} password=${REDIS_PASSWORD} (Docker)"
echo " Kafka → 127.0.0.1:${KAFKA_PORT} (Docker)"
echo " Etcd → 127.0.0.1:${ETCD_PORT} (Docker)"
echo " MongoDB → ${MONGO_HOST}:${MONGO_PORT} DB(openim)=${MONGO_DATABASE} DB(build)=${BUILD_MONGO_DATABASE}"
echo " LiveKit → ${LIVEKIT_URL} node_ip=${LIVEKIT_NODE_IP} key=${LIVEKIT_API_KEY}"
echo " Tencent RTC → sdk_app_id=${TENCENT_SDK_APP_ID}"
echo " S3 (openim) → s3://${OPENIM_AWS_BUCKET} region=${OPENIM_AWS_REGION}"
echo " S3 (build) → s3://${BUILD_AWS_BUCKET} region=${BUILD_AWS_REGION}"
echo " MeetingMsg → webhook afterSendGroupMsg=enabled → 127.0.0.1:8000"
echo " MeetingH5 → ws=${DEPLOY_TEST_IP}:8000, liveApi=${DEPLOY_TEST_IP}:8081"
echo ""
echo -e "${BOLD}下一步:${NC}"
echo -e " 启动 Docker 基础设施Redis/Kafka/Etcd"
echo -e " ${CYAN}./deploy-test/03-start-infra.sh${NC}"