复制项目

This commit is contained in:
kim.dev.6789
2026-01-14 22:16:44 +08:00
parent e2577b8cee
commit e50142a3b9
691 changed files with 97009 additions and 1 deletions

163
internal/push/push.go Normal file
View File

@@ -0,0 +1,163 @@
package push
import (
"context"
"math/rand"
"strconv"
"time"
"github.com/openimsdk/tools/mq"
"git.imall.cloud/openim/open-im-server-deploy/internal/push/offlinepush"
"git.imall.cloud/openim/open-im-server-deploy/pkg/authverify"
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/mcache"
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/redis"
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/controller"
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database/mgo"
"git.imall.cloud/openim/open-im-server-deploy/pkg/dbbuild"
"git.imall.cloud/openim/open-im-server-deploy/pkg/mqbuild"
pbpush "git.imall.cloud/openim/protocol/push"
"github.com/openimsdk/tools/discovery"
"github.com/openimsdk/tools/log"
"github.com/openimsdk/tools/mcontext"
"google.golang.org/grpc"
)
type pushServer struct {
pbpush.UnimplementedPushMsgServiceServer
database controller.PushDatabase
disCov discovery.Conn
offlinePusher offlinepush.OfflinePusher
}
type Config struct {
RpcConfig config.Push
RedisConfig config.Redis
MongoConfig config.Mongo
KafkaConfig config.Kafka
NotificationConfig config.Notification
Share config.Share
WebhooksConfig config.Webhooks
LocalCacheConfig config.LocalCache
Discovery config.Discovery
FcmConfigPath config.Path
}
func (p pushServer) DelUserPushToken(ctx context.Context,
req *pbpush.DelUserPushTokenReq) (resp *pbpush.DelUserPushTokenResp, err error) {
if err = p.database.DelFcmToken(ctx, req.UserID, int(req.PlatformID)); err != nil {
return nil, err
}
return &pbpush.DelUserPushTokenResp{}, nil
}
func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryRegistry, server grpc.ServiceRegistrar) error {
dbb := dbbuild.NewBuilder(&config.MongoConfig, &config.RedisConfig)
rdb, err := dbb.Redis(ctx)
if err != nil {
return err
}
var cacheModel cache.ThirdCache
if rdb == nil {
mdb, err := dbb.Mongo(ctx)
if err != nil {
return err
}
mc, err := mgo.NewCacheMgo(mdb.GetDB())
if err != nil {
return err
}
cacheModel = mcache.NewThirdCache(mc)
} else {
cacheModel = redis.NewThirdCache(rdb)
}
offlinePusher, err := offlinepush.NewOfflinePusher(&config.RpcConfig, cacheModel, string(config.FcmConfigPath))
if err != nil {
return err
}
builder := mqbuild.NewBuilder(&config.KafkaConfig)
offlinePushProducer, err := builder.GetTopicProducer(ctx, config.KafkaConfig.ToOfflinePushTopic)
if err != nil {
return err
}
database := controller.NewPushDatabase(cacheModel, offlinePushProducer)
pushConsumer, err := builder.GetTopicConsumer(ctx, config.KafkaConfig.ToPushTopic)
if err != nil {
return err
}
offlinePushConsumer, err := builder.GetTopicConsumer(ctx, config.KafkaConfig.ToOfflinePushTopic)
if err != nil {
return err
}
pushHandler, err := NewConsumerHandler(ctx, config, database, offlinePusher, rdb, client)
if err != nil {
return err
}
offlineHandler := NewOfflinePushConsumerHandler(offlinePusher)
pbpush.RegisterPushMsgServiceServer(server, &pushServer{
database: database,
disCov: client,
offlinePusher: offlinePusher,
})
go func() {
consumerCtx := mcontext.SetOperationID(context.Background(), "push_"+strconv.Itoa(int(rand.Uint32())))
waitDone := make(chan struct{})
go func() {
defer func() {
if r := recover(); r != nil {
log.ZError(consumerCtx, "WaitCache panic", nil, "panic", r)
}
close(waitDone)
}()
pushHandler.WaitCache()
}()
select {
case <-waitDone:
log.ZInfo(consumerCtx, "WaitCache completed successfully")
case <-time.After(30 * time.Second):
log.ZWarn(consumerCtx, "WaitCache timeout after 30s, will start subscribe anyway", nil)
}
fn := func(msg mq.Message) error {
pushHandler.HandleMs2PsChat(authverify.WithTempAdmin(msg.Context()), msg.Value())
return nil
}
log.ZInfo(consumerCtx, "begin consume messages")
for {
if err := pushConsumer.Subscribe(consumerCtx, fn); err != nil {
log.ZError(consumerCtx, "subscribe err, will retry in 5 seconds", err)
time.Sleep(5 * time.Second)
continue
}
// Subscribe returned normally (possibly due to context cancellation), retry immediately
log.ZWarn(consumerCtx, "Subscribe returned normally, will retry immediately", nil)
}
}()
go func() {
fn := func(msg mq.Message) error {
offlineHandler.HandleMsg2OfflinePush(msg.Context(), msg.Value())
return nil
}
consumerCtx := mcontext.SetOperationID(context.Background(), "push_"+strconv.Itoa(int(rand.Uint32())))
log.ZInfo(consumerCtx, "begin consume messages")
for {
if err := offlinePushConsumer.Subscribe(consumerCtx, fn); err != nil {
log.ZError(consumerCtx, "subscribe err, will retry in 5 seconds", err)
time.Sleep(5 * time.Second)
continue
}
// Subscribe returned normally (possibly due to context cancellation), retry immediately
log.ZWarn(consumerCtx, "Subscribe returned normally, will retry immediately", nil)
}
}()
return nil
}