复制项目
This commit is contained in:
97
pkg/common/cmd/api.go
Normal file
97
pkg/common/cmd/api.go
Normal file
@@ -0,0 +1,97 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/internal/api"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/prommetrics"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/startrpc"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/version"
|
||||
"github.com/openimsdk/tools/system/program"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type ApiCmd struct {
|
||||
*RootCmd
|
||||
ctx context.Context
|
||||
configMap map[string]any
|
||||
apiConfig *api.Config
|
||||
}
|
||||
|
||||
func NewApiCmd() *ApiCmd {
|
||||
var apiConfig api.Config
|
||||
ret := &ApiCmd{apiConfig: &apiConfig}
|
||||
ret.configMap = map[string]any{
|
||||
config.DiscoveryConfigFilename: &apiConfig.Discovery,
|
||||
config.KafkaConfigFileName: &apiConfig.Kafka,
|
||||
config.LocalCacheConfigFileName: &apiConfig.LocalCache,
|
||||
config.LogConfigFileName: &apiConfig.Log,
|
||||
config.MinioConfigFileName: &apiConfig.Minio,
|
||||
config.MongodbConfigFileName: &apiConfig.Mongo,
|
||||
config.NotificationFileName: &apiConfig.Notification,
|
||||
config.OpenIMAPICfgFileName: &apiConfig.API,
|
||||
config.OpenIMCronTaskCfgFileName: &apiConfig.CronTask,
|
||||
config.OpenIMMsgGatewayCfgFileName: &apiConfig.MsgGateway,
|
||||
config.OpenIMMsgTransferCfgFileName: &apiConfig.MsgTransfer,
|
||||
config.OpenIMPushCfgFileName: &apiConfig.Push,
|
||||
config.OpenIMRPCAuthCfgFileName: &apiConfig.Auth,
|
||||
config.OpenIMRPCConversationCfgFileName: &apiConfig.Conversation,
|
||||
config.OpenIMRPCFriendCfgFileName: &apiConfig.Friend,
|
||||
config.OpenIMRPCGroupCfgFileName: &apiConfig.Group,
|
||||
config.OpenIMRPCMsgCfgFileName: &apiConfig.Msg,
|
||||
config.OpenIMRPCThirdCfgFileName: &apiConfig.Third,
|
||||
config.OpenIMRPCUserCfgFileName: &apiConfig.User,
|
||||
config.RedisConfigFileName: &apiConfig.Redis,
|
||||
config.ShareFileName: &apiConfig.Share,
|
||||
config.WebhooksConfigFileName: &apiConfig.Webhooks,
|
||||
}
|
||||
ret.RootCmd = NewRootCmd(program.GetProcessName(), WithConfigMap(ret.configMap))
|
||||
ret.ctx = context.WithValue(context.Background(), "version", version.Version)
|
||||
ret.Command.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
apiConfig.ConfigPath = config.Path(ret.configPath)
|
||||
return ret.runE()
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (a *ApiCmd) Exec() error {
|
||||
return a.Execute()
|
||||
}
|
||||
|
||||
func (a *ApiCmd) runE() error {
|
||||
a.apiConfig.Index = config.Index(a.Index())
|
||||
prometheus := config.Prometheus{
|
||||
Enable: a.apiConfig.API.Prometheus.Enable,
|
||||
Ports: a.apiConfig.API.Prometheus.Ports,
|
||||
}
|
||||
return startrpc.Start(
|
||||
a.ctx, &a.apiConfig.Discovery,
|
||||
nil, // circuitBreakerConfig - API doesn't use circuit breaker
|
||||
nil, // rateLimiterConfig - API uses its own rate limiter middleware
|
||||
&prometheus,
|
||||
a.apiConfig.API.Api.ListenIP, "",
|
||||
a.apiConfig.API.Prometheus.AutoSetPorts,
|
||||
nil, int(a.apiConfig.Index),
|
||||
prommetrics.APIKeyName,
|
||||
&a.apiConfig.Notification,
|
||||
a.apiConfig,
|
||||
[]string{},
|
||||
[]string{},
|
||||
api.Start,
|
||||
)
|
||||
}
|
||||
73
pkg/common/cmd/auth.go
Normal file
73
pkg/common/cmd/auth.go
Normal file
@@ -0,0 +1,73 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/internal/rpc/auth"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/startrpc"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/version"
|
||||
"github.com/openimsdk/tools/system/program"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type AuthRpcCmd struct {
|
||||
*RootCmd
|
||||
ctx context.Context
|
||||
configMap map[string]any
|
||||
authConfig *auth.Config
|
||||
}
|
||||
|
||||
func NewAuthRpcCmd() *AuthRpcCmd {
|
||||
var authConfig auth.Config
|
||||
ret := &AuthRpcCmd{authConfig: &authConfig}
|
||||
ret.configMap = map[string]any{
|
||||
config.OpenIMRPCAuthCfgFileName: &authConfig.RpcConfig,
|
||||
config.RedisConfigFileName: &authConfig.RedisConfig,
|
||||
config.MongodbConfigFileName: &authConfig.MongoConfig,
|
||||
config.ShareFileName: &authConfig.Share,
|
||||
config.LocalCacheConfigFileName: &authConfig.LocalCacheConfig,
|
||||
config.DiscoveryConfigFilename: &authConfig.Discovery,
|
||||
}
|
||||
ret.RootCmd = NewRootCmd(program.GetProcessName(), WithConfigMap(ret.configMap))
|
||||
ret.ctx = context.WithValue(context.Background(), "version", version.Version)
|
||||
ret.Command.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
return ret.runE()
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (a *AuthRpcCmd) Exec() error {
|
||||
return a.Execute()
|
||||
}
|
||||
|
||||
func (a *AuthRpcCmd) runE() error {
|
||||
return startrpc.Start(a.ctx, &a.authConfig.Discovery, &a.authConfig.RpcConfig.CircuitBreaker, &a.authConfig.RpcConfig.RateLimiter, &a.authConfig.RpcConfig.Prometheus, a.authConfig.RpcConfig.RPC.ListenIP,
|
||||
a.authConfig.RpcConfig.RPC.RegisterIP, a.authConfig.RpcConfig.RPC.AutoSetPorts, a.authConfig.RpcConfig.RPC.Ports,
|
||||
a.Index(), a.authConfig.Discovery.RpcService.Auth, nil, a.authConfig,
|
||||
[]string{
|
||||
a.authConfig.RpcConfig.GetConfigFileName(),
|
||||
a.authConfig.Share.GetConfigFileName(),
|
||||
a.authConfig.RedisConfig.GetConfigFileName(),
|
||||
a.authConfig.Discovery.GetConfigFileName(),
|
||||
},
|
||||
[]string{
|
||||
a.authConfig.Discovery.RpcService.MessageGateway,
|
||||
},
|
||||
auth.Start)
|
||||
}
|
||||
75
pkg/common/cmd/conversation.go
Normal file
75
pkg/common/cmd/conversation.go
Normal file
@@ -0,0 +1,75 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/internal/rpc/conversation"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/startrpc"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/version"
|
||||
"github.com/openimsdk/tools/system/program"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type ConversationRpcCmd struct {
|
||||
*RootCmd
|
||||
ctx context.Context
|
||||
configMap map[string]any
|
||||
conversationConfig *conversation.Config
|
||||
}
|
||||
|
||||
func NewConversationRpcCmd() *ConversationRpcCmd {
|
||||
var conversationConfig conversation.Config
|
||||
ret := &ConversationRpcCmd{conversationConfig: &conversationConfig}
|
||||
ret.configMap = map[string]any{
|
||||
config.OpenIMRPCConversationCfgFileName: &conversationConfig.RpcConfig,
|
||||
config.RedisConfigFileName: &conversationConfig.RedisConfig,
|
||||
config.MongodbConfigFileName: &conversationConfig.MongodbConfig,
|
||||
config.ShareFileName: &conversationConfig.Share,
|
||||
config.NotificationFileName: &conversationConfig.NotificationConfig,
|
||||
config.WebhooksConfigFileName: &conversationConfig.WebhooksConfig,
|
||||
config.LocalCacheConfigFileName: &conversationConfig.LocalCacheConfig,
|
||||
config.DiscoveryConfigFilename: &conversationConfig.Discovery,
|
||||
}
|
||||
ret.RootCmd = NewRootCmd(program.GetProcessName(), WithConfigMap(ret.configMap))
|
||||
ret.ctx = context.WithValue(context.Background(), "version", version.Version)
|
||||
ret.Command.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
return ret.runE()
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (a *ConversationRpcCmd) Exec() error {
|
||||
return a.Execute()
|
||||
}
|
||||
|
||||
func (a *ConversationRpcCmd) runE() error {
|
||||
return startrpc.Start(a.ctx, &a.conversationConfig.Discovery, &a.conversationConfig.RpcConfig.CircuitBreaker, &a.conversationConfig.RpcConfig.RateLimiter, &a.conversationConfig.RpcConfig.Prometheus, a.conversationConfig.RpcConfig.RPC.ListenIP,
|
||||
a.conversationConfig.RpcConfig.RPC.RegisterIP, a.conversationConfig.RpcConfig.RPC.AutoSetPorts, a.conversationConfig.RpcConfig.RPC.Ports,
|
||||
a.Index(), a.conversationConfig.Discovery.RpcService.Conversation, &a.conversationConfig.NotificationConfig, a.conversationConfig,
|
||||
[]string{
|
||||
a.conversationConfig.RpcConfig.GetConfigFileName(),
|
||||
a.conversationConfig.RedisConfig.GetConfigFileName(),
|
||||
a.conversationConfig.MongodbConfig.GetConfigFileName(),
|
||||
a.conversationConfig.NotificationConfig.GetConfigFileName(),
|
||||
a.conversationConfig.Share.GetConfigFileName(),
|
||||
a.conversationConfig.LocalCacheConfig.GetConfigFileName(),
|
||||
a.conversationConfig.WebhooksConfig.GetConfigFileName(),
|
||||
a.conversationConfig.Discovery.GetConfigFileName(),
|
||||
}, nil,
|
||||
conversation.Start)
|
||||
}
|
||||
75
pkg/common/cmd/cron_task.go
Normal file
75
pkg/common/cmd/cron_task.go
Normal file
@@ -0,0 +1,75 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/internal/tools/cron"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/startrpc"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/version"
|
||||
"github.com/openimsdk/tools/system/program"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type CronTaskCmd struct {
|
||||
*RootCmd
|
||||
ctx context.Context
|
||||
configMap map[string]any
|
||||
cronTaskConfig *cron.Config
|
||||
}
|
||||
|
||||
func NewCronTaskCmd() *CronTaskCmd {
|
||||
var cronTaskConfig cron.Config
|
||||
ret := &CronTaskCmd{cronTaskConfig: &cronTaskConfig}
|
||||
ret.configMap = map[string]any{
|
||||
config.OpenIMCronTaskCfgFileName: &cronTaskConfig.CronTask,
|
||||
config.ShareFileName: &cronTaskConfig.Share,
|
||||
config.DiscoveryConfigFilename: &cronTaskConfig.Discovery,
|
||||
config.MongodbConfigFileName: &cronTaskConfig.Mongo,
|
||||
config.RedisConfigFileName: &cronTaskConfig.Redis,
|
||||
config.NotificationFileName: &cronTaskConfig.Notification,
|
||||
}
|
||||
ret.RootCmd = NewRootCmd(program.GetProcessName(), WithConfigMap(ret.configMap))
|
||||
ret.ctx = context.WithValue(context.Background(), "version", version.Version)
|
||||
ret.Command.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
return ret.runE()
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (a *CronTaskCmd) Exec() error {
|
||||
return a.Execute()
|
||||
}
|
||||
|
||||
func (a *CronTaskCmd) runE() error {
|
||||
var prometheus config.Prometheus
|
||||
return startrpc.Start(
|
||||
a.ctx, &a.cronTaskConfig.Discovery,
|
||||
nil,
|
||||
nil,
|
||||
&prometheus,
|
||||
"", "",
|
||||
true,
|
||||
nil, 0,
|
||||
"",
|
||||
nil,
|
||||
a.cronTaskConfig,
|
||||
[]string{},
|
||||
[]string{},
|
||||
cron.Start,
|
||||
)
|
||||
}
|
||||
15
pkg/common/cmd/doc.go
Normal file
15
pkg/common/cmd/doc.go
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright © 2024 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cmd // import "git.imall.cloud/openim/open-im-server-deploy/pkg/common/cmd"
|
||||
75
pkg/common/cmd/friend.go
Normal file
75
pkg/common/cmd/friend.go
Normal file
@@ -0,0 +1,75 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/internal/rpc/relation"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/startrpc"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/version"
|
||||
"github.com/openimsdk/tools/system/program"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type FriendRpcCmd struct {
|
||||
*RootCmd
|
||||
ctx context.Context
|
||||
configMap map[string]any
|
||||
relationConfig *relation.Config
|
||||
}
|
||||
|
||||
func NewFriendRpcCmd() *FriendRpcCmd {
|
||||
var relationConfig relation.Config
|
||||
ret := &FriendRpcCmd{relationConfig: &relationConfig}
|
||||
ret.configMap = map[string]any{
|
||||
config.OpenIMRPCFriendCfgFileName: &relationConfig.RpcConfig,
|
||||
config.RedisConfigFileName: &relationConfig.RedisConfig,
|
||||
config.MongodbConfigFileName: &relationConfig.MongodbConfig,
|
||||
config.ShareFileName: &relationConfig.Share,
|
||||
config.NotificationFileName: &relationConfig.NotificationConfig,
|
||||
config.WebhooksConfigFileName: &relationConfig.WebhooksConfig,
|
||||
config.LocalCacheConfigFileName: &relationConfig.LocalCacheConfig,
|
||||
config.DiscoveryConfigFilename: &relationConfig.Discovery,
|
||||
}
|
||||
ret.RootCmd = NewRootCmd(program.GetProcessName(), WithConfigMap(ret.configMap))
|
||||
ret.ctx = context.WithValue(context.Background(), "version", version.Version)
|
||||
ret.Command.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
return ret.runE()
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (a *FriendRpcCmd) Exec() error {
|
||||
return a.Execute()
|
||||
}
|
||||
|
||||
func (a *FriendRpcCmd) runE() error {
|
||||
return startrpc.Start(a.ctx, &a.relationConfig.Discovery, &a.relationConfig.RpcConfig.CircuitBreaker, &a.relationConfig.RpcConfig.RateLimiter, &a.relationConfig.RpcConfig.Prometheus, a.relationConfig.RpcConfig.RPC.ListenIP,
|
||||
a.relationConfig.RpcConfig.RPC.RegisterIP, a.relationConfig.RpcConfig.RPC.AutoSetPorts, a.relationConfig.RpcConfig.RPC.Ports,
|
||||
a.Index(), a.relationConfig.Discovery.RpcService.Friend, &a.relationConfig.NotificationConfig, a.relationConfig,
|
||||
[]string{
|
||||
a.relationConfig.RpcConfig.GetConfigFileName(),
|
||||
a.relationConfig.RedisConfig.GetConfigFileName(),
|
||||
a.relationConfig.MongodbConfig.GetConfigFileName(),
|
||||
a.relationConfig.NotificationConfig.GetConfigFileName(),
|
||||
a.relationConfig.Share.GetConfigFileName(),
|
||||
a.relationConfig.WebhooksConfig.GetConfigFileName(),
|
||||
a.relationConfig.LocalCacheConfig.GetConfigFileName(),
|
||||
a.relationConfig.Discovery.GetConfigFileName(),
|
||||
}, nil,
|
||||
relation.Start)
|
||||
}
|
||||
76
pkg/common/cmd/group.go
Normal file
76
pkg/common/cmd/group.go
Normal file
@@ -0,0 +1,76 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/internal/rpc/group"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/startrpc"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/versionctx"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/version"
|
||||
"github.com/openimsdk/tools/system/program"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type GroupRpcCmd struct {
|
||||
*RootCmd
|
||||
ctx context.Context
|
||||
configMap map[string]any
|
||||
groupConfig *group.Config
|
||||
}
|
||||
|
||||
func NewGroupRpcCmd() *GroupRpcCmd {
|
||||
var groupConfig group.Config
|
||||
ret := &GroupRpcCmd{groupConfig: &groupConfig}
|
||||
ret.configMap = map[string]any{
|
||||
config.OpenIMRPCGroupCfgFileName: &groupConfig.RpcConfig,
|
||||
config.RedisConfigFileName: &groupConfig.RedisConfig,
|
||||
config.MongodbConfigFileName: &groupConfig.MongodbConfig,
|
||||
config.ShareFileName: &groupConfig.Share,
|
||||
config.NotificationFileName: &groupConfig.NotificationConfig,
|
||||
config.WebhooksConfigFileName: &groupConfig.WebhooksConfig,
|
||||
config.LocalCacheConfigFileName: &groupConfig.LocalCacheConfig,
|
||||
config.DiscoveryConfigFilename: &groupConfig.Discovery,
|
||||
}
|
||||
ret.RootCmd = NewRootCmd(program.GetProcessName(), WithConfigMap(ret.configMap))
|
||||
ret.ctx = context.WithValue(context.Background(), "version", version.Version)
|
||||
ret.Command.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
return ret.runE()
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (a *GroupRpcCmd) Exec() error {
|
||||
return a.Execute()
|
||||
}
|
||||
|
||||
func (a *GroupRpcCmd) runE() error {
|
||||
return startrpc.Start(a.ctx, &a.groupConfig.Discovery, &a.groupConfig.RpcConfig.CircuitBreaker, &a.groupConfig.RpcConfig.RateLimiter, &a.groupConfig.RpcConfig.Prometheus, a.groupConfig.RpcConfig.RPC.ListenIP,
|
||||
a.groupConfig.RpcConfig.RPC.RegisterIP, a.groupConfig.RpcConfig.RPC.AutoSetPorts, a.groupConfig.RpcConfig.RPC.Ports,
|
||||
a.Index(), a.groupConfig.Discovery.RpcService.Group, &a.groupConfig.NotificationConfig, a.groupConfig,
|
||||
[]string{
|
||||
a.groupConfig.RpcConfig.GetConfigFileName(),
|
||||
a.groupConfig.RedisConfig.GetConfigFileName(),
|
||||
a.groupConfig.MongodbConfig.GetConfigFileName(),
|
||||
a.groupConfig.NotificationConfig.GetConfigFileName(),
|
||||
a.groupConfig.Share.GetConfigFileName(),
|
||||
a.groupConfig.WebhooksConfig.GetConfigFileName(),
|
||||
a.groupConfig.LocalCacheConfig.GetConfigFileName(),
|
||||
a.groupConfig.Discovery.GetConfigFileName(),
|
||||
}, nil,
|
||||
group.Start, versionctx.EnableVersionCtx())
|
||||
}
|
||||
77
pkg/common/cmd/msg.go
Normal file
77
pkg/common/cmd/msg.go
Normal file
@@ -0,0 +1,77 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/internal/rpc/msg"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/startrpc"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/version"
|
||||
"github.com/openimsdk/tools/system/program"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type MsgRpcCmd struct {
|
||||
*RootCmd
|
||||
ctx context.Context
|
||||
configMap map[string]any
|
||||
msgConfig *msg.Config
|
||||
}
|
||||
|
||||
func NewMsgRpcCmd() *MsgRpcCmd {
|
||||
var msgConfig msg.Config
|
||||
ret := &MsgRpcCmd{msgConfig: &msgConfig}
|
||||
ret.configMap = map[string]any{
|
||||
config.OpenIMRPCMsgCfgFileName: &msgConfig.RpcConfig,
|
||||
config.RedisConfigFileName: &msgConfig.RedisConfig,
|
||||
config.MongodbConfigFileName: &msgConfig.MongodbConfig,
|
||||
config.KafkaConfigFileName: &msgConfig.KafkaConfig,
|
||||
config.ShareFileName: &msgConfig.Share,
|
||||
config.NotificationFileName: &msgConfig.NotificationConfig,
|
||||
config.WebhooksConfigFileName: &msgConfig.WebhooksConfig,
|
||||
config.LocalCacheConfigFileName: &msgConfig.LocalCacheConfig,
|
||||
config.DiscoveryConfigFilename: &msgConfig.Discovery,
|
||||
}
|
||||
ret.RootCmd = NewRootCmd(program.GetProcessName(), WithConfigMap(ret.configMap))
|
||||
ret.ctx = context.WithValue(context.Background(), "version", version.Version)
|
||||
ret.Command.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
return ret.runE()
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (a *MsgRpcCmd) Exec() error {
|
||||
return a.Execute()
|
||||
}
|
||||
|
||||
func (a *MsgRpcCmd) runE() error {
|
||||
return startrpc.Start(a.ctx, &a.msgConfig.Discovery, &a.msgConfig.RpcConfig.CircuitBreaker, &a.msgConfig.RpcConfig.RateLimiter, &a.msgConfig.RpcConfig.Prometheus, a.msgConfig.RpcConfig.RPC.ListenIP,
|
||||
a.msgConfig.RpcConfig.RPC.RegisterIP, a.msgConfig.RpcConfig.RPC.AutoSetPorts, a.msgConfig.RpcConfig.RPC.Ports,
|
||||
a.Index(), a.msgConfig.Discovery.RpcService.Msg, &a.msgConfig.NotificationConfig, a.msgConfig,
|
||||
[]string{
|
||||
a.msgConfig.RpcConfig.GetConfigFileName(),
|
||||
a.msgConfig.RedisConfig.GetConfigFileName(),
|
||||
a.msgConfig.MongodbConfig.GetConfigFileName(),
|
||||
a.msgConfig.KafkaConfig.GetConfigFileName(),
|
||||
a.msgConfig.NotificationConfig.GetConfigFileName(),
|
||||
a.msgConfig.Share.GetConfigFileName(),
|
||||
a.msgConfig.WebhooksConfig.GetConfigFileName(),
|
||||
a.msgConfig.LocalCacheConfig.GetConfigFileName(),
|
||||
a.msgConfig.Discovery.GetConfigFileName(),
|
||||
}, nil,
|
||||
msg.Start)
|
||||
}
|
||||
105
pkg/common/cmd/msg_gateway.go
Normal file
105
pkg/common/cmd/msg_gateway.go
Normal file
@@ -0,0 +1,105 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/internal/msggateway"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/startrpc"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/version"
|
||||
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/system/program"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type MsgGatewayCmd struct {
|
||||
*RootCmd
|
||||
ctx context.Context
|
||||
configMap map[string]any
|
||||
msgGatewayConfig *msggateway.Config
|
||||
}
|
||||
|
||||
func NewMsgGatewayCmd() *MsgGatewayCmd {
|
||||
var msgGatewayConfig msggateway.Config
|
||||
ret := &MsgGatewayCmd{msgGatewayConfig: &msgGatewayConfig}
|
||||
ret.configMap = map[string]any{
|
||||
config.OpenIMMsgGatewayCfgFileName: &msgGatewayConfig.MsgGateway,
|
||||
config.ShareFileName: &msgGatewayConfig.Share,
|
||||
config.RedisConfigFileName: &msgGatewayConfig.RedisConfig,
|
||||
config.WebhooksConfigFileName: &msgGatewayConfig.WebhooksConfig,
|
||||
config.DiscoveryConfigFilename: &msgGatewayConfig.Discovery,
|
||||
}
|
||||
ret.RootCmd = NewRootCmd(program.GetProcessName(), WithConfigMap(ret.configMap))
|
||||
ret.ctx = context.WithValue(context.Background(), "version", version.Version)
|
||||
ret.Command.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
return ret.runE()
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m *MsgGatewayCmd) Exec() error {
|
||||
return m.Execute()
|
||||
}
|
||||
|
||||
func (m *MsgGatewayCmd) runE() error {
|
||||
m.msgGatewayConfig.Index = config.Index(m.Index())
|
||||
rpc := m.msgGatewayConfig.MsgGateway.RPC
|
||||
// 从配置读取 Prometheus,避免空配置导致的下标越界
|
||||
prometheus := m.msgGatewayConfig.MsgGateway.Prometheus
|
||||
|
||||
b, err := json.Marshal(prometheus)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(string(b))
|
||||
log.CInfo(m.ctx, "prometheus", "prometheus", string(b))
|
||||
// 调试日志:打印关键启动参数
|
||||
log.CInfo(m.ctx, "msg-gateway starting",
|
||||
"autoSetPorts", rpc.AutoSetPorts,
|
||||
"rpcPorts", rpc.Ports,
|
||||
"prometheusEnable", prometheus.Enable,
|
||||
"prometheusPorts", prometheus.Ports,
|
||||
"index", int(m.msgGatewayConfig.Index),
|
||||
"listenIP", rpc.ListenIP,
|
||||
"registerIP", rpc.RegisterIP,
|
||||
)
|
||||
|
||||
if !rpc.AutoSetPorts && (len(rpc.Ports) == 0) {
|
||||
log.ZWarn(m.ctx, "rpc ports is empty while autoSetPorts=false", nil)
|
||||
}
|
||||
if prometheus.Enable && len(prometheus.Ports) == 0 {
|
||||
log.ZWarn(m.ctx, "prometheus enabled but ports is empty", nil)
|
||||
}
|
||||
return startrpc.Start(
|
||||
m.ctx, &m.msgGatewayConfig.Discovery,
|
||||
&m.msgGatewayConfig.MsgGateway.CircuitBreaker,
|
||||
&m.msgGatewayConfig.MsgGateway.RateLimiter,
|
||||
&prometheus,
|
||||
rpc.ListenIP, rpc.RegisterIP,
|
||||
rpc.AutoSetPorts,
|
||||
rpc.Ports, int(m.msgGatewayConfig.Index),
|
||||
m.msgGatewayConfig.Discovery.RpcService.MessageGateway,
|
||||
nil,
|
||||
m.msgGatewayConfig,
|
||||
[]string{},
|
||||
[]string{},
|
||||
msggateway.Start,
|
||||
)
|
||||
}
|
||||
69
pkg/common/cmd/msg_gateway_test.go
Normal file
69
pkg/common/cmd/msg_gateway_test.go
Normal file
@@ -0,0 +1,69 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"git.imall.cloud/openim/protocol/auth"
|
||||
"github.com/openimsdk/tools/apiresp"
|
||||
"github.com/openimsdk/tools/utils/jsonutil"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
)
|
||||
|
||||
// MockRootCmd is a mock type for the RootCmd type
|
||||
type MockRootCmd struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (m *MockRootCmd) Execute() error {
|
||||
args := m.Called()
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
func TestName(t *testing.T) {
|
||||
resp := &apiresp.ApiResponse{
|
||||
ErrCode: 1234,
|
||||
ErrMsg: "test",
|
||||
ErrDlt: "4567",
|
||||
Data: &auth.GetUserTokenResp{
|
||||
Token: "1234567",
|
||||
ExpireTimeSeconds: math.MaxInt64,
|
||||
},
|
||||
}
|
||||
data, err := resp.MarshalJSON()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
t.Log(string(data))
|
||||
|
||||
var rReso apiresp.ApiResponse
|
||||
rReso.Data = &auth.GetUserTokenResp{}
|
||||
|
||||
if err := jsonutil.JsonUnmarshal(data, &rReso); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
t.Logf("%+v\n", rReso)
|
||||
|
||||
}
|
||||
|
||||
func TestName1(t *testing.T) {
|
||||
t.Log(primitive.NewObjectID().String())
|
||||
t.Log(primitive.NewObjectID().Hex())
|
||||
|
||||
}
|
||||
78
pkg/common/cmd/msg_transfer.go
Normal file
78
pkg/common/cmd/msg_transfer.go
Normal file
@@ -0,0 +1,78 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/internal/msgtransfer"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/prommetrics"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/startrpc"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/version"
|
||||
"github.com/openimsdk/tools/system/program"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type MsgTransferCmd struct {
|
||||
*RootCmd
|
||||
ctx context.Context
|
||||
configMap map[string]any
|
||||
msgTransferConfig *msgtransfer.Config
|
||||
}
|
||||
|
||||
func NewMsgTransferCmd() *MsgTransferCmd {
|
||||
var msgTransferConfig msgtransfer.Config
|
||||
ret := &MsgTransferCmd{msgTransferConfig: &msgTransferConfig}
|
||||
ret.configMap = map[string]any{
|
||||
config.OpenIMMsgTransferCfgFileName: &msgTransferConfig.MsgTransfer,
|
||||
config.RedisConfigFileName: &msgTransferConfig.RedisConfig,
|
||||
config.MongodbConfigFileName: &msgTransferConfig.MongodbConfig,
|
||||
config.KafkaConfigFileName: &msgTransferConfig.KafkaConfig,
|
||||
config.ShareFileName: &msgTransferConfig.Share,
|
||||
config.WebhooksConfigFileName: &msgTransferConfig.WebhooksConfig,
|
||||
config.DiscoveryConfigFilename: &msgTransferConfig.Discovery,
|
||||
}
|
||||
ret.RootCmd = NewRootCmd(program.GetProcessName(), WithConfigMap(ret.configMap))
|
||||
ret.ctx = context.WithValue(context.Background(), "version", version.Version)
|
||||
ret.Command.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
return ret.runE()
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m *MsgTransferCmd) Exec() error {
|
||||
return m.Execute()
|
||||
}
|
||||
|
||||
func (m *MsgTransferCmd) runE() error {
|
||||
m.msgTransferConfig.Index = config.Index(m.Index())
|
||||
var prometheus config.Prometheus
|
||||
return startrpc.Start(
|
||||
m.ctx, &m.msgTransferConfig.Discovery,
|
||||
&m.msgTransferConfig.MsgTransfer.CircuitBreaker,
|
||||
&m.msgTransferConfig.MsgTransfer.RateLimiter,
|
||||
&prometheus,
|
||||
"", "",
|
||||
true,
|
||||
nil, int(m.msgTransferConfig.Index),
|
||||
prommetrics.MessageTransferKeyName,
|
||||
nil,
|
||||
m.msgTransferConfig,
|
||||
[]string{},
|
||||
[]string{},
|
||||
msgtransfer.Start,
|
||||
)
|
||||
}
|
||||
171
pkg/common/cmd/msg_utils.go
Normal file
171
pkg/common/cmd/msg_utils.go
Normal file
@@ -0,0 +1,171 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type MsgUtilsCmd struct {
|
||||
cobra.Command
|
||||
}
|
||||
|
||||
func (m *MsgUtilsCmd) AddUserIDFlag() {
|
||||
m.Command.PersistentFlags().StringP("userID", "u", "", "openIM userID")
|
||||
}
|
||||
func (m *MsgUtilsCmd) AddIndexFlag() {
|
||||
m.Command.PersistentFlags().IntP(config.FlagTransferIndex, "i", 0, "process startup sequence number")
|
||||
}
|
||||
|
||||
func (m *MsgUtilsCmd) AddConfigDirFlag() {
|
||||
m.Command.PersistentFlags().StringP(config.FlagConf, "c", "", "path of config directory")
|
||||
|
||||
}
|
||||
|
||||
func (m *MsgUtilsCmd) getUserIDFlag(cmdLines *cobra.Command) string {
|
||||
userID, _ := cmdLines.Flags().GetString("userID")
|
||||
return userID
|
||||
}
|
||||
|
||||
func (m *MsgUtilsCmd) AddFixAllFlag() {
|
||||
m.Command.PersistentFlags().BoolP("fixAll", "f", false, "openIM fix all seqs")
|
||||
}
|
||||
|
||||
/* func (m *MsgUtilsCmd) getFixAllFlag(cmdLines *cobra.Command) bool {
|
||||
fixAll, _ := cmdLines.Flags().GetBool("fixAll")
|
||||
return fixAll
|
||||
} */
|
||||
|
||||
func (m *MsgUtilsCmd) AddClearAllFlag() {
|
||||
m.Command.PersistentFlags().BoolP("clearAll", "", false, "openIM clear all seqs")
|
||||
}
|
||||
|
||||
/* func (m *MsgUtilsCmd) getClearAllFlag(cmdLines *cobra.Command) bool {
|
||||
clearAll, _ := cmdLines.Flags().GetBool("clearAll")
|
||||
return clearAll
|
||||
} */
|
||||
|
||||
func (m *MsgUtilsCmd) AddSuperGroupIDFlag() {
|
||||
m.Command.PersistentFlags().StringP("superGroupID", "g", "", "openIM superGroupID")
|
||||
}
|
||||
|
||||
func (m *MsgUtilsCmd) getSuperGroupIDFlag(cmdLines *cobra.Command) string {
|
||||
superGroupID, _ := cmdLines.Flags().GetString("superGroupID")
|
||||
return superGroupID
|
||||
}
|
||||
|
||||
func (m *MsgUtilsCmd) AddBeginSeqFlag() {
|
||||
m.Command.PersistentFlags().Int64P("beginSeq", "b", 0, "openIM beginSeq")
|
||||
}
|
||||
|
||||
/* func (m *MsgUtilsCmd) getBeginSeqFlag(cmdLines *cobra.Command) int64 {
|
||||
beginSeq, _ := cmdLines.Flags().GetInt64("beginSeq")
|
||||
return beginSeq
|
||||
} */
|
||||
|
||||
func (m *MsgUtilsCmd) AddLimitFlag() {
|
||||
m.Command.PersistentFlags().Int64P("limit", "l", 0, "openIM limit")
|
||||
}
|
||||
|
||||
/* func (m *MsgUtilsCmd) getLimitFlag(cmdLines *cobra.Command) int64 {
|
||||
limit, _ := cmdLines.Flags().GetInt64("limit")
|
||||
return limit
|
||||
} */
|
||||
|
||||
func (m *MsgUtilsCmd) Execute() error {
|
||||
return m.Command.Execute()
|
||||
}
|
||||
|
||||
func NewMsgUtilsCmd(use, short string, args cobra.PositionalArgs) *MsgUtilsCmd {
|
||||
return &MsgUtilsCmd{
|
||||
Command: cobra.Command{
|
||||
Use: use,
|
||||
Short: short,
|
||||
Args: args,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type GetCmd struct {
|
||||
*MsgUtilsCmd
|
||||
}
|
||||
|
||||
func NewGetCmd() *GetCmd {
|
||||
return &GetCmd{
|
||||
NewMsgUtilsCmd("get [resource]", "get action", cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs)),
|
||||
}
|
||||
}
|
||||
|
||||
type FixCmd struct {
|
||||
*MsgUtilsCmd
|
||||
}
|
||||
|
||||
func NewFixCmd() *FixCmd {
|
||||
return &FixCmd{
|
||||
NewMsgUtilsCmd("fix [resource]", "fix action", cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs)),
|
||||
}
|
||||
}
|
||||
|
||||
type ClearCmd struct {
|
||||
*MsgUtilsCmd
|
||||
}
|
||||
|
||||
func NewClearCmd() *ClearCmd {
|
||||
return &ClearCmd{
|
||||
NewMsgUtilsCmd("clear [resource]", "clear action", cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs)),
|
||||
}
|
||||
}
|
||||
|
||||
type SeqCmd struct {
|
||||
*MsgUtilsCmd
|
||||
}
|
||||
|
||||
func NewSeqCmd() *SeqCmd {
|
||||
seqCmd := &SeqCmd{
|
||||
NewMsgUtilsCmd("seq", "seq", nil),
|
||||
}
|
||||
return seqCmd
|
||||
}
|
||||
|
||||
func (s *SeqCmd) GetSeqCmd() *cobra.Command {
|
||||
s.Command.Run = func(cmdLines *cobra.Command, args []string) {
|
||||
|
||||
}
|
||||
return &s.Command
|
||||
}
|
||||
|
||||
func (s *SeqCmd) FixSeqCmd() *cobra.Command {
|
||||
return &s.Command
|
||||
}
|
||||
|
||||
type MsgCmd struct {
|
||||
*MsgUtilsCmd
|
||||
}
|
||||
|
||||
func NewMsgCmd() *MsgCmd {
|
||||
msgCmd := &MsgCmd{
|
||||
NewMsgUtilsCmd("msg", "msg", nil),
|
||||
}
|
||||
return msgCmd
|
||||
}
|
||||
|
||||
func (m *MsgCmd) GetMsgCmd() *cobra.Command {
|
||||
return &m.Command
|
||||
}
|
||||
|
||||
func (m *MsgCmd) ClearMsgCmd() *cobra.Command {
|
||||
return &m.Command
|
||||
}
|
||||
80
pkg/common/cmd/push.go
Normal file
80
pkg/common/cmd/push.go
Normal file
@@ -0,0 +1,80 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/internal/push"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/startrpc"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/version"
|
||||
"github.com/openimsdk/tools/system/program"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type PushRpcCmd struct {
|
||||
*RootCmd
|
||||
ctx context.Context
|
||||
configMap map[string]any
|
||||
pushConfig *push.Config
|
||||
}
|
||||
|
||||
func NewPushRpcCmd() *PushRpcCmd {
|
||||
var pushConfig push.Config
|
||||
ret := &PushRpcCmd{pushConfig: &pushConfig}
|
||||
ret.configMap = map[string]any{
|
||||
config.OpenIMPushCfgFileName: &pushConfig.RpcConfig,
|
||||
config.RedisConfigFileName: &pushConfig.RedisConfig,
|
||||
config.MongodbConfigFileName: &pushConfig.MongoConfig,
|
||||
config.KafkaConfigFileName: &pushConfig.KafkaConfig,
|
||||
config.ShareFileName: &pushConfig.Share,
|
||||
config.NotificationFileName: &pushConfig.NotificationConfig,
|
||||
config.WebhooksConfigFileName: &pushConfig.WebhooksConfig,
|
||||
config.LocalCacheConfigFileName: &pushConfig.LocalCacheConfig,
|
||||
config.DiscoveryConfigFilename: &pushConfig.Discovery,
|
||||
}
|
||||
ret.RootCmd = NewRootCmd(program.GetProcessName(), WithConfigMap(ret.configMap))
|
||||
ret.ctx = context.WithValue(context.Background(), "version", version.Version)
|
||||
ret.Command.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
ret.pushConfig.FcmConfigPath = config.Path(ret.ConfigPath())
|
||||
return ret.runE()
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (a *PushRpcCmd) Exec() error {
|
||||
return a.Execute()
|
||||
}
|
||||
|
||||
func (a *PushRpcCmd) runE() error {
|
||||
return startrpc.Start(a.ctx, &a.pushConfig.Discovery, &a.pushConfig.RpcConfig.CircuitBreaker, &a.pushConfig.RpcConfig.RateLimiter, &a.pushConfig.RpcConfig.Prometheus, a.pushConfig.RpcConfig.RPC.ListenIP,
|
||||
a.pushConfig.RpcConfig.RPC.RegisterIP, a.pushConfig.RpcConfig.RPC.AutoSetPorts, a.pushConfig.RpcConfig.RPC.Ports,
|
||||
a.Index(), a.pushConfig.Discovery.RpcService.Push, &a.pushConfig.NotificationConfig, a.pushConfig,
|
||||
[]string{
|
||||
a.pushConfig.RpcConfig.GetConfigFileName(),
|
||||
a.pushConfig.RedisConfig.GetConfigFileName(),
|
||||
a.pushConfig.KafkaConfig.GetConfigFileName(),
|
||||
a.pushConfig.NotificationConfig.GetConfigFileName(),
|
||||
a.pushConfig.Share.GetConfigFileName(),
|
||||
a.pushConfig.WebhooksConfig.GetConfigFileName(),
|
||||
a.pushConfig.LocalCacheConfig.GetConfigFileName(),
|
||||
a.pushConfig.Discovery.GetConfigFileName(),
|
||||
},
|
||||
[]string{
|
||||
a.pushConfig.Discovery.RpcService.MessageGateway,
|
||||
},
|
||||
push.Start)
|
||||
}
|
||||
251
pkg/common/cmd/root.go
Normal file
251
pkg/common/cmd/root.go
Normal file
@@ -0,0 +1,251 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
kdisc "git.imall.cloud/openim/open-im-server-deploy/pkg/common/discovery"
|
||||
disetcd "git.imall.cloud/openim/open-im-server-deploy/pkg/common/discovery/etcd"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/version"
|
||||
"github.com/openimsdk/tools/discovery/etcd"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/spf13/cobra"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
)
|
||||
|
||||
type RootCmd struct {
|
||||
Command cobra.Command
|
||||
processName string
|
||||
port int
|
||||
prometheusPort int
|
||||
log config.Log
|
||||
index int
|
||||
configPath string
|
||||
etcdClient *clientv3.Client
|
||||
}
|
||||
|
||||
func (r *RootCmd) ConfigPath() string {
|
||||
return r.configPath
|
||||
}
|
||||
|
||||
func (r *RootCmd) Index() int {
|
||||
return r.index
|
||||
}
|
||||
|
||||
func (r *RootCmd) Port() int {
|
||||
return r.port
|
||||
}
|
||||
|
||||
type CmdOpts struct {
|
||||
loggerPrefixName string
|
||||
configMap map[string]any
|
||||
}
|
||||
|
||||
func WithCronTaskLogName() func(*CmdOpts) {
|
||||
return func(opts *CmdOpts) {
|
||||
opts.loggerPrefixName = "openim-crontask"
|
||||
}
|
||||
}
|
||||
|
||||
func WithLogName(logName string) func(*CmdOpts) {
|
||||
return func(opts *CmdOpts) {
|
||||
opts.loggerPrefixName = logName
|
||||
}
|
||||
}
|
||||
func WithConfigMap(configMap map[string]any) func(*CmdOpts) {
|
||||
return func(opts *CmdOpts) {
|
||||
opts.configMap = configMap
|
||||
}
|
||||
}
|
||||
|
||||
func NewRootCmd(processName string, opts ...func(*CmdOpts)) *RootCmd {
|
||||
rootCmd := &RootCmd{processName: processName}
|
||||
cmd := cobra.Command{
|
||||
Use: "Start openIM application",
|
||||
Long: fmt.Sprintf(`Start %s `, processName),
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
return rootCmd.persistentPreRun(cmd, opts...)
|
||||
},
|
||||
SilenceUsage: true,
|
||||
SilenceErrors: false,
|
||||
}
|
||||
cmd.Flags().StringP(config.FlagConf, "c", "", "path of config directory")
|
||||
cmd.Flags().IntP(config.FlagTransferIndex, "i", 0, "process startup sequence number")
|
||||
|
||||
rootCmd.Command = cmd
|
||||
return rootCmd
|
||||
}
|
||||
|
||||
func (r *RootCmd) initEtcd() error {
|
||||
configDirectory, _, err := r.getFlag(&r.Command)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
disConfig := config.Discovery{}
|
||||
err = config.Load(configDirectory, config.DiscoveryConfigFilename, config.EnvPrefixMap[config.DiscoveryConfigFilename], &disConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if disConfig.Enable == config.ETCD {
|
||||
discov, _ := kdisc.NewDiscoveryRegister(&disConfig, nil)
|
||||
if etcdDiscov, ok := discov.(*etcd.SvcDiscoveryRegistryImpl); ok {
|
||||
r.etcdClient = etcdDiscov.GetClient()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RootCmd) persistentPreRun(cmd *cobra.Command, opts ...func(*CmdOpts)) error {
|
||||
if err := r.initEtcd(); err != nil {
|
||||
return err
|
||||
}
|
||||
cmdOpts := r.applyOptions(opts...)
|
||||
if err := r.initializeConfiguration(cmd, cmdOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := r.updateConfigFromEtcd(cmdOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := r.initializeLogger(cmdOpts); err != nil {
|
||||
return errs.WrapMsg(err, "failed to initialize logger")
|
||||
}
|
||||
if r.etcdClient != nil {
|
||||
if err := r.etcdClient.Close(); err != nil {
|
||||
return errs.WrapMsg(err, "failed to close etcd client")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RootCmd) initializeConfiguration(cmd *cobra.Command, opts *CmdOpts) error {
|
||||
configDirectory, _, err := r.getFlag(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Load common configuration file
|
||||
//opts.configMap[ShareFileName] = StructEnvPrefix{EnvPrefix: shareEnvPrefix, ConfigStruct: &r.share}
|
||||
for configFileName, configStruct := range opts.configMap {
|
||||
err := config.Load(configDirectory, configFileName, config.EnvPrefixMap[configFileName], configStruct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Load common log configuration file
|
||||
return config.Load(configDirectory, config.LogConfigFileName, config.EnvPrefixMap[config.LogConfigFileName], &r.log)
|
||||
}
|
||||
|
||||
func (r *RootCmd) updateConfigFromEtcd(opts *CmdOpts) error {
|
||||
if r.etcdClient == nil {
|
||||
return nil
|
||||
}
|
||||
ctx := context.TODO()
|
||||
|
||||
res, err := r.etcdClient.Get(ctx, disetcd.BuildKey(disetcd.EnableConfigCenterKey))
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "root cmd updateConfigFromEtcd, etcd Get EnableConfigCenterKey err: %v", errs.Wrap(err))
|
||||
return nil
|
||||
}
|
||||
if res.Count == 0 {
|
||||
return nil
|
||||
} else {
|
||||
if string(res.Kvs[0].Value) == disetcd.Disable {
|
||||
return nil
|
||||
} else if string(res.Kvs[0].Value) != disetcd.Enable {
|
||||
return errs.New("unknown EnableConfigCenter value").Wrap()
|
||||
}
|
||||
}
|
||||
|
||||
update := func(configFileName string, configStruct any) error {
|
||||
key := disetcd.BuildKey(configFileName)
|
||||
etcdRes, err := r.etcdClient.Get(ctx, key)
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "root cmd updateConfigFromEtcd, etcd Get err: %v", errs.Wrap(err))
|
||||
return nil
|
||||
}
|
||||
if etcdRes.Count == 0 {
|
||||
data, err := json.Marshal(configStruct)
|
||||
if err != nil {
|
||||
return errs.ErrArgs.WithDetail(err.Error()).Wrap()
|
||||
}
|
||||
_, err = r.etcdClient.Put(ctx, disetcd.BuildKey(configFileName), string(data))
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "root cmd updateConfigFromEtcd, etcd Put err: %v", errs.Wrap(err))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
err = json.Unmarshal(etcdRes.Kvs[0].Value, configStruct)
|
||||
if err != nil {
|
||||
return errs.WrapMsg(err, "failed to unmarshal config from etcd")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
for configFileName, configStruct := range opts.configMap {
|
||||
if err := update(configFileName, configStruct); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := update(config.LogConfigFileName, &r.log); err != nil {
|
||||
return err
|
||||
}
|
||||
// Load common log configuration file
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (r *RootCmd) applyOptions(opts ...func(*CmdOpts)) *CmdOpts {
|
||||
cmdOpts := defaultCmdOpts()
|
||||
for _, opt := range opts {
|
||||
opt(cmdOpts)
|
||||
}
|
||||
|
||||
return cmdOpts
|
||||
}
|
||||
|
||||
func (r *RootCmd) initializeLogger(cmdOpts *CmdOpts) error {
|
||||
err := log.InitLoggerFromConfig(
|
||||
cmdOpts.loggerPrefixName,
|
||||
r.processName,
|
||||
"", "",
|
||||
r.log.RemainLogLevel,
|
||||
r.log.IsStdout,
|
||||
r.log.IsJson,
|
||||
r.log.StorageLocation,
|
||||
r.log.RemainRotationCount,
|
||||
r.log.RotationTime,
|
||||
version.Version,
|
||||
r.log.IsSimplify,
|
||||
)
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
return errs.Wrap(log.InitConsoleLogger(r.processName, r.log.RemainLogLevel, r.log.IsJson, version.Version))
|
||||
|
||||
}
|
||||
|
||||
func defaultCmdOpts() *CmdOpts {
|
||||
return &CmdOpts{
|
||||
loggerPrefixName: "openim-service-log",
|
||||
}
|
||||
}
|
||||
|
||||
func (r *RootCmd) getFlag(cmd *cobra.Command) (string, int, error) {
|
||||
configDirectory, err := cmd.Flags().GetString(config.FlagConf)
|
||||
if err != nil {
|
||||
return "", 0, errs.Wrap(err)
|
||||
}
|
||||
r.configPath = configDirectory
|
||||
index, err := cmd.Flags().GetInt(config.FlagTransferIndex)
|
||||
if err != nil {
|
||||
return "", 0, errs.Wrap(err)
|
||||
}
|
||||
r.index = index
|
||||
return configDirectory, index, nil
|
||||
}
|
||||
|
||||
func (r *RootCmd) Execute() error {
|
||||
return r.Command.Execute()
|
||||
}
|
||||
75
pkg/common/cmd/third.go
Normal file
75
pkg/common/cmd/third.go
Normal file
@@ -0,0 +1,75 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/internal/rpc/third"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/startrpc"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/version"
|
||||
"github.com/openimsdk/tools/system/program"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type ThirdRpcCmd struct {
|
||||
*RootCmd
|
||||
ctx context.Context
|
||||
configMap map[string]any
|
||||
thirdConfig *third.Config
|
||||
}
|
||||
|
||||
func NewThirdRpcCmd() *ThirdRpcCmd {
|
||||
var thirdConfig third.Config
|
||||
ret := &ThirdRpcCmd{thirdConfig: &thirdConfig}
|
||||
ret.configMap = map[string]any{
|
||||
config.OpenIMRPCThirdCfgFileName: &thirdConfig.RpcConfig,
|
||||
config.RedisConfigFileName: &thirdConfig.RedisConfig,
|
||||
config.MongodbConfigFileName: &thirdConfig.MongodbConfig,
|
||||
config.ShareFileName: &thirdConfig.Share,
|
||||
config.NotificationFileName: &thirdConfig.NotificationConfig,
|
||||
config.MinioConfigFileName: &thirdConfig.MinioConfig,
|
||||
config.LocalCacheConfigFileName: &thirdConfig.LocalCacheConfig,
|
||||
config.DiscoveryConfigFilename: &thirdConfig.Discovery,
|
||||
}
|
||||
ret.RootCmd = NewRootCmd(program.GetProcessName(), WithConfigMap(ret.configMap))
|
||||
ret.ctx = context.WithValue(context.Background(), "version", version.Version)
|
||||
ret.Command.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
return ret.runE()
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (a *ThirdRpcCmd) Exec() error {
|
||||
return a.Execute()
|
||||
}
|
||||
|
||||
func (a *ThirdRpcCmd) runE() error {
|
||||
return startrpc.Start(a.ctx, &a.thirdConfig.Discovery, &a.thirdConfig.RpcConfig.CircuitBreaker, &a.thirdConfig.RpcConfig.RateLimiter, &a.thirdConfig.RpcConfig.Prometheus, a.thirdConfig.RpcConfig.RPC.ListenIP,
|
||||
a.thirdConfig.RpcConfig.RPC.RegisterIP, a.thirdConfig.RpcConfig.RPC.AutoSetPorts, a.thirdConfig.RpcConfig.RPC.Ports,
|
||||
a.Index(), a.thirdConfig.Discovery.RpcService.Third, &a.thirdConfig.NotificationConfig, a.thirdConfig,
|
||||
[]string{
|
||||
a.thirdConfig.RpcConfig.GetConfigFileName(),
|
||||
a.thirdConfig.RedisConfig.GetConfigFileName(),
|
||||
a.thirdConfig.MongodbConfig.GetConfigFileName(),
|
||||
a.thirdConfig.NotificationConfig.GetConfigFileName(),
|
||||
a.thirdConfig.Share.GetConfigFileName(),
|
||||
a.thirdConfig.MinioConfig.GetConfigFileName(),
|
||||
a.thirdConfig.LocalCacheConfig.GetConfigFileName(),
|
||||
a.thirdConfig.Discovery.GetConfigFileName(),
|
||||
}, nil,
|
||||
third.Start)
|
||||
}
|
||||
77
pkg/common/cmd/user.go
Normal file
77
pkg/common/cmd/user.go
Normal file
@@ -0,0 +1,77 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/internal/rpc/user"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/startrpc"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/version"
|
||||
"github.com/openimsdk/tools/system/program"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type UserRpcCmd struct {
|
||||
*RootCmd
|
||||
ctx context.Context
|
||||
configMap map[string]any
|
||||
userConfig *user.Config
|
||||
}
|
||||
|
||||
func NewUserRpcCmd() *UserRpcCmd {
|
||||
var userConfig user.Config
|
||||
ret := &UserRpcCmd{userConfig: &userConfig}
|
||||
ret.configMap = map[string]any{
|
||||
config.OpenIMRPCUserCfgFileName: &userConfig.RpcConfig,
|
||||
config.RedisConfigFileName: &userConfig.RedisConfig,
|
||||
config.MongodbConfigFileName: &userConfig.MongodbConfig,
|
||||
config.KafkaConfigFileName: &userConfig.KafkaConfig,
|
||||
config.ShareFileName: &userConfig.Share,
|
||||
config.NotificationFileName: &userConfig.NotificationConfig,
|
||||
config.WebhooksConfigFileName: &userConfig.WebhooksConfig,
|
||||
config.LocalCacheConfigFileName: &userConfig.LocalCacheConfig,
|
||||
config.DiscoveryConfigFilename: &userConfig.Discovery,
|
||||
}
|
||||
ret.RootCmd = NewRootCmd(program.GetProcessName(), WithConfigMap(ret.configMap))
|
||||
ret.ctx = context.WithValue(context.Background(), "version", version.Version)
|
||||
ret.Command.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
return ret.runE()
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (a *UserRpcCmd) Exec() error {
|
||||
return a.Execute()
|
||||
}
|
||||
|
||||
func (a *UserRpcCmd) runE() error {
|
||||
return startrpc.Start(a.ctx, &a.userConfig.Discovery, &a.userConfig.RpcConfig.CircuitBreaker, &a.userConfig.RpcConfig.RateLimiter, &a.userConfig.RpcConfig.Prometheus, a.userConfig.RpcConfig.RPC.ListenIP,
|
||||
a.userConfig.RpcConfig.RPC.RegisterIP, a.userConfig.RpcConfig.RPC.AutoSetPorts, a.userConfig.RpcConfig.RPC.Ports,
|
||||
a.Index(), a.userConfig.Discovery.RpcService.User, &a.userConfig.NotificationConfig, a.userConfig,
|
||||
[]string{
|
||||
a.userConfig.RpcConfig.GetConfigFileName(),
|
||||
a.userConfig.RedisConfig.GetConfigFileName(),
|
||||
a.userConfig.MongodbConfig.GetConfigFileName(),
|
||||
a.userConfig.KafkaConfig.GetConfigFileName(),
|
||||
a.userConfig.NotificationConfig.GetConfigFileName(),
|
||||
a.userConfig.Share.GetConfigFileName(),
|
||||
a.userConfig.WebhooksConfig.GetConfigFileName(),
|
||||
a.userConfig.LocalCacheConfig.GetConfigFileName(),
|
||||
a.userConfig.Discovery.GetConfigFileName(),
|
||||
}, nil,
|
||||
user.Start)
|
||||
}
|
||||
979
pkg/common/config/config.go
Normal file
979
pkg/common/config/config.go
Normal file
@@ -0,0 +1,979 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
"github.com/openimsdk/tools/mq/kafka"
|
||||
"github.com/openimsdk/tools/s3/aws"
|
||||
"github.com/openimsdk/tools/s3/cos"
|
||||
"github.com/openimsdk/tools/s3/kodo"
|
||||
"github.com/openimsdk/tools/s3/minio"
|
||||
"github.com/openimsdk/tools/s3/oss"
|
||||
)
|
||||
|
||||
const StructTagName = "yaml"
|
||||
|
||||
type Path string
|
||||
|
||||
type Index int
|
||||
|
||||
type CacheConfig struct {
|
||||
Topic string `yaml:"topic"`
|
||||
SlotNum int `yaml:"slotNum"`
|
||||
SlotSize int `yaml:"slotSize"`
|
||||
SuccessExpire int `yaml:"successExpire"`
|
||||
FailedExpire int `yaml:"failedExpire"`
|
||||
}
|
||||
|
||||
type LocalCache struct {
|
||||
Auth CacheConfig `yaml:"auth"`
|
||||
User CacheConfig `yaml:"user"`
|
||||
Group CacheConfig `yaml:"group"`
|
||||
Friend CacheConfig `yaml:"friend"`
|
||||
Conversation CacheConfig `yaml:"conversation"`
|
||||
}
|
||||
|
||||
type Log struct {
|
||||
StorageLocation string `yaml:"storageLocation"`
|
||||
RotationTime uint `yaml:"rotationTime"`
|
||||
RemainRotationCount uint `yaml:"remainRotationCount"`
|
||||
RemainLogLevel int `yaml:"remainLogLevel"`
|
||||
IsStdout bool `yaml:"isStdout"`
|
||||
IsJson bool `yaml:"isJson"`
|
||||
IsSimplify bool `yaml:"isSimplify"`
|
||||
WithStack bool `yaml:"withStack"`
|
||||
}
|
||||
|
||||
type Minio struct {
|
||||
Bucket string `yaml:"bucket"`
|
||||
AccessKeyID string `yaml:"accessKeyID"`
|
||||
SecretAccessKey string `yaml:"secretAccessKey"`
|
||||
SessionToken string `yaml:"sessionToken"`
|
||||
InternalAddress string `yaml:"internalAddress"`
|
||||
ExternalAddress string `yaml:"externalAddress"`
|
||||
PublicRead bool `yaml:"publicRead"`
|
||||
}
|
||||
|
||||
type Mongo struct {
|
||||
URI string `yaml:"uri"`
|
||||
Address []string `yaml:"address"`
|
||||
Database string `yaml:"database"`
|
||||
Username string `yaml:"username"`
|
||||
Password string `yaml:"password"`
|
||||
AuthSource string `yaml:"authSource"`
|
||||
MaxPoolSize int `yaml:"maxPoolSize"`
|
||||
MaxRetry int `yaml:"maxRetry"`
|
||||
MongoMode string `yaml:"mongoMode"`
|
||||
ReplicaSet ReplicaSetConfig
|
||||
ReadPreference ReadPrefConfig
|
||||
WriteConcern WriteConcernConfig
|
||||
}
|
||||
|
||||
type ReplicaSetConfig struct {
|
||||
Name string `yaml:"name"`
|
||||
Hosts []string `yaml:"hosts"`
|
||||
ReadConcern string `yaml:"readConcern"`
|
||||
MaxStaleness time.Duration `yaml:"maxStaleness"`
|
||||
}
|
||||
|
||||
type ReadPrefConfig struct {
|
||||
Mode string `yaml:"mode"`
|
||||
TagSets []map[string]string `yaml:"tagSets"`
|
||||
MaxStaleness time.Duration `yaml:"maxStaleness"`
|
||||
}
|
||||
|
||||
type WriteConcernConfig struct {
|
||||
W any `yaml:"w"`
|
||||
J bool `yaml:"j"`
|
||||
WTimeout time.Duration `yaml:"wtimeout"`
|
||||
}
|
||||
|
||||
type Kafka struct {
|
||||
Username string `yaml:"username"`
|
||||
Password string `yaml:"password"`
|
||||
ProducerAck string `yaml:"producerAck"`
|
||||
CompressType string `yaml:"compressType"`
|
||||
Address []string `yaml:"address"`
|
||||
ToRedisTopic string `yaml:"toRedisTopic"`
|
||||
ToMongoTopic string `yaml:"toMongoTopic"`
|
||||
ToPushTopic string `yaml:"toPushTopic"`
|
||||
ToOfflinePushTopic string `yaml:"toOfflinePushTopic"`
|
||||
ToRedisGroupID string `yaml:"toRedisGroupID"`
|
||||
ToMongoGroupID string `yaml:"toMongoGroupID"`
|
||||
ToPushGroupID string `yaml:"toPushGroupID"`
|
||||
ToOfflineGroupID string `yaml:"toOfflinePushGroupID"`
|
||||
|
||||
Tls TLSConfig `yaml:"tls"`
|
||||
}
|
||||
type TLSConfig struct {
|
||||
EnableTLS bool `yaml:"enableTLS"`
|
||||
CACrt string `yaml:"caCrt"`
|
||||
ClientCrt string `yaml:"clientCrt"`
|
||||
ClientKey string `yaml:"clientKey"`
|
||||
ClientKeyPwd string `yaml:"clientKeyPwd"`
|
||||
InsecureSkipVerify bool `yaml:"insecureSkipVerify"`
|
||||
}
|
||||
|
||||
type API struct {
|
||||
Api struct {
|
||||
ListenIP string `yaml:"listenIP"`
|
||||
Ports []int `yaml:"ports"`
|
||||
CompressionLevel int `yaml:"compressionLevel"`
|
||||
} `yaml:"api"`
|
||||
Prometheus struct {
|
||||
Enable bool `yaml:"enable"`
|
||||
AutoSetPorts bool `yaml:"autoSetPorts"`
|
||||
Ports []int `yaml:"ports"`
|
||||
GrafanaURL string `yaml:"grafanaURL"`
|
||||
} `yaml:"prometheus"`
|
||||
RateLimiter RateLimiter `yaml:"ratelimiter"`
|
||||
OnlineCountRefresh struct {
|
||||
Enable bool `yaml:"enable"`
|
||||
Interval time.Duration `yaml:"interval"`
|
||||
} `yaml:"onlineCountRefresh"`
|
||||
}
|
||||
|
||||
type CronTask struct {
|
||||
CronExecuteTime string `yaml:"cronExecuteTime"`
|
||||
RetainChatRecords int `yaml:"retainChatRecords"`
|
||||
FileExpireTime int `yaml:"fileExpireTime"`
|
||||
DeleteObjectType []string `yaml:"deleteObjectType"`
|
||||
}
|
||||
|
||||
type OfflinePushConfig struct {
|
||||
Enable bool `yaml:"enable"`
|
||||
Title string `yaml:"title"`
|
||||
Desc string `yaml:"desc"`
|
||||
Ext string `yaml:"ext"`
|
||||
}
|
||||
|
||||
type NotificationConfig struct {
|
||||
IsSendMsg bool `yaml:"isSendMsg"`
|
||||
ReliabilityLevel int `yaml:"reliabilityLevel"`
|
||||
UnreadCount bool `yaml:"unreadCount"`
|
||||
OfflinePush OfflinePushConfig `yaml:"offlinePush"`
|
||||
}
|
||||
|
||||
type Notification struct {
|
||||
GroupCreated NotificationConfig `yaml:"groupCreated"`
|
||||
GroupInfoSet NotificationConfig `yaml:"groupInfoSet"`
|
||||
JoinGroupApplication NotificationConfig `yaml:"joinGroupApplication"`
|
||||
MemberQuit NotificationConfig `yaml:"memberQuit"`
|
||||
GroupApplicationAccepted NotificationConfig `yaml:"groupApplicationAccepted"`
|
||||
GroupApplicationRejected NotificationConfig `yaml:"groupApplicationRejected"`
|
||||
GroupOwnerTransferred NotificationConfig `yaml:"groupOwnerTransferred"`
|
||||
MemberKicked NotificationConfig `yaml:"memberKicked"`
|
||||
MemberInvited NotificationConfig `yaml:"memberInvited"`
|
||||
MemberEnter NotificationConfig `yaml:"memberEnter"`
|
||||
GroupDismissed NotificationConfig `yaml:"groupDismissed"`
|
||||
GroupMuted NotificationConfig `yaml:"groupMuted"`
|
||||
GroupCancelMuted NotificationConfig `yaml:"groupCancelMuted"`
|
||||
GroupMemberMuted NotificationConfig `yaml:"groupMemberMuted"`
|
||||
GroupMemberCancelMuted NotificationConfig `yaml:"groupMemberCancelMuted"`
|
||||
GroupMemberInfoSet NotificationConfig `yaml:"groupMemberInfoSet"`
|
||||
GroupMemberSetToAdmin NotificationConfig `yaml:"groupMemberSetToAdmin"`
|
||||
GroupMemberSetToOrdinary NotificationConfig `yaml:"groupMemberSetToOrdinaryUser"`
|
||||
GroupInfoSetAnnouncement NotificationConfig `yaml:"groupInfoSetAnnouncement"`
|
||||
GroupInfoSetName NotificationConfig `yaml:"groupInfoSetName"`
|
||||
FriendApplicationAdded NotificationConfig `yaml:"friendApplicationAdded"`
|
||||
FriendApplicationApproved NotificationConfig `yaml:"friendApplicationApproved"`
|
||||
FriendApplicationRejected NotificationConfig `yaml:"friendApplicationRejected"`
|
||||
FriendAdded NotificationConfig `yaml:"friendAdded"`
|
||||
FriendDeleted NotificationConfig `yaml:"friendDeleted"`
|
||||
FriendRemarkSet NotificationConfig `yaml:"friendRemarkSet"`
|
||||
BlackAdded NotificationConfig `yaml:"blackAdded"`
|
||||
BlackDeleted NotificationConfig `yaml:"blackDeleted"`
|
||||
FriendInfoUpdated NotificationConfig `yaml:"friendInfoUpdated"`
|
||||
UserInfoUpdated NotificationConfig `yaml:"userInfoUpdated"`
|
||||
UserStatusChanged NotificationConfig `yaml:"userStatusChanged"`
|
||||
ConversationChanged NotificationConfig `yaml:"conversationChanged"`
|
||||
ConversationSetPrivate NotificationConfig `yaml:"conversationSetPrivate"`
|
||||
}
|
||||
|
||||
type Prometheus struct {
|
||||
Enable bool `yaml:"enable"`
|
||||
Ports []int `yaml:"ports"`
|
||||
}
|
||||
|
||||
type MsgGateway struct {
|
||||
RPC RPC `yaml:"rpc"`
|
||||
Prometheus Prometheus `yaml:"prometheus"`
|
||||
ListenIP string `yaml:"listenIP"`
|
||||
LongConnSvr struct {
|
||||
Ports []int `yaml:"ports"`
|
||||
WebsocketMaxConnNum int `yaml:"websocketMaxConnNum"`
|
||||
WebsocketMaxMsgLen int `yaml:"websocketMaxMsgLen"`
|
||||
WebsocketTimeout int `yaml:"websocketTimeout"`
|
||||
} `yaml:"longConnSvr"`
|
||||
RateLimiter RateLimiter `yaml:"ratelimiter"`
|
||||
CircuitBreaker CircuitBreaker `yaml:"circuitBreaker"`
|
||||
}
|
||||
|
||||
type MsgTransfer struct {
|
||||
Prometheus struct {
|
||||
Enable bool `yaml:"enable"`
|
||||
AutoSetPorts bool `yaml:"autoSetPorts"`
|
||||
Ports []int `yaml:"ports"`
|
||||
} `yaml:"prometheus"`
|
||||
RateLimiter RateLimiter `yaml:"ratelimiter"`
|
||||
CircuitBreaker CircuitBreaker `yaml:"circuitBreaker"`
|
||||
}
|
||||
|
||||
type Push struct {
|
||||
RPC RPC `yaml:"rpc"`
|
||||
Prometheus Prometheus `yaml:"prometheus"`
|
||||
MaxConcurrentWorkers int `yaml:"maxConcurrentWorkers"`
|
||||
Enable string `yaml:"enable"`
|
||||
GeTui struct {
|
||||
PushUrl string `yaml:"pushUrl"`
|
||||
MasterSecret string `yaml:"masterSecret"`
|
||||
AppKey string `yaml:"appKey"`
|
||||
Intent string `yaml:"intent"`
|
||||
ChannelID string `yaml:"channelID"`
|
||||
ChannelName string `yaml:"channelName"`
|
||||
} `yaml:"geTui"`
|
||||
FCM struct {
|
||||
FilePath string `yaml:"filePath"`
|
||||
AuthURL string `yaml:"authURL"`
|
||||
} `yaml:"fcm"`
|
||||
JPush struct {
|
||||
AppKey string `yaml:"appKey"`
|
||||
MasterSecret string `yaml:"masterSecret"`
|
||||
PushURL string `yaml:"pushURL"`
|
||||
PushIntent string `yaml:"pushIntent"`
|
||||
} `yaml:"jpush"`
|
||||
IOSPush struct {
|
||||
PushSound string `yaml:"pushSound"`
|
||||
BadgeCount bool `yaml:"badgeCount"`
|
||||
Production bool `yaml:"production"`
|
||||
} `yaml:"iosPush"`
|
||||
FullUserCache bool `yaml:"fullUserCache"`
|
||||
RateLimiter RateLimiter `yaml:"ratelimiter"`
|
||||
CircuitBreaker CircuitBreaker `yaml:"circuitBreaker"`
|
||||
}
|
||||
|
||||
type Auth struct {
|
||||
RPC RPC `yaml:"rpc"`
|
||||
Prometheus Prometheus `yaml:"prometheus"`
|
||||
TokenPolicy struct {
|
||||
Expire int64 `yaml:"expire"`
|
||||
} `yaml:"tokenPolicy"`
|
||||
RateLimiter RateLimiter `yaml:"ratelimiter"`
|
||||
CircuitBreaker CircuitBreaker `yaml:"circuitBreaker"`
|
||||
}
|
||||
|
||||
type Conversation struct {
|
||||
RPC RPC `yaml:"rpc"`
|
||||
Prometheus Prometheus `yaml:"prometheus"`
|
||||
RateLimiter RateLimiter `yaml:"ratelimiter"`
|
||||
CircuitBreaker CircuitBreaker `yaml:"circuitBreaker"`
|
||||
}
|
||||
|
||||
type Friend struct {
|
||||
RPC RPC `yaml:"rpc"`
|
||||
Prometheus Prometheus `yaml:"prometheus"`
|
||||
RateLimiter RateLimiter `yaml:"ratelimiter"`
|
||||
CircuitBreaker CircuitBreaker `yaml:"circuitBreaker"`
|
||||
}
|
||||
|
||||
type Group struct {
|
||||
RPC RPC `yaml:"rpc"`
|
||||
Prometheus Prometheus `yaml:"prometheus"`
|
||||
EnableHistoryForNewMembers bool `yaml:"enableHistoryForNewMembers"`
|
||||
RateLimiter RateLimiter `yaml:"ratelimiter"`
|
||||
CircuitBreaker CircuitBreaker `yaml:"circuitBreaker"`
|
||||
}
|
||||
|
||||
type Msg struct {
|
||||
RPC RPC `yaml:"rpc"`
|
||||
Prometheus Prometheus `yaml:"prometheus"`
|
||||
FriendVerify bool `yaml:"friendVerify"`
|
||||
RateLimiter RateLimiter `yaml:"ratelimiter"`
|
||||
CircuitBreaker CircuitBreaker `yaml:"circuitBreaker"`
|
||||
}
|
||||
|
||||
type Third struct {
|
||||
RPC RPC `yaml:"rpc"`
|
||||
Prometheus Prometheus `yaml:"prometheus"`
|
||||
Object struct {
|
||||
Enable string `yaml:"enable"`
|
||||
Cos Cos `yaml:"cos"`
|
||||
Oss Oss `yaml:"oss"`
|
||||
Kodo Kodo `yaml:"kodo"`
|
||||
Aws Aws `yaml:"aws"`
|
||||
} `yaml:"object"`
|
||||
RateLimiter RateLimiter `yaml:"ratelimiter"`
|
||||
CircuitBreaker CircuitBreaker `yaml:"circuitBreaker"`
|
||||
}
|
||||
type Cos struct {
|
||||
BucketURL string `yaml:"bucketURL"`
|
||||
SecretID string `yaml:"secretID"`
|
||||
SecretKey string `yaml:"secretKey"`
|
||||
SessionToken string `yaml:"sessionToken"`
|
||||
PublicRead bool `yaml:"publicRead"`
|
||||
}
|
||||
type Oss struct {
|
||||
Endpoint string `yaml:"endpoint"`
|
||||
Bucket string `yaml:"bucket"`
|
||||
BucketURL string `yaml:"bucketURL"`
|
||||
AccessKeyID string `yaml:"accessKeyID"`
|
||||
AccessKeySecret string `yaml:"accessKeySecret"`
|
||||
SessionToken string `yaml:"sessionToken"`
|
||||
PublicRead bool `yaml:"publicRead"`
|
||||
}
|
||||
|
||||
type Kodo struct {
|
||||
Endpoint string `yaml:"endpoint"`
|
||||
Bucket string `yaml:"bucket"`
|
||||
BucketURL string `yaml:"bucketURL"`
|
||||
AccessKeyID string `yaml:"accessKeyID"`
|
||||
AccessKeySecret string `yaml:"accessKeySecret"`
|
||||
SessionToken string `yaml:"sessionToken"`
|
||||
PublicRead bool `yaml:"publicRead"`
|
||||
}
|
||||
|
||||
type Aws struct {
|
||||
Region string `yaml:"region"`
|
||||
Bucket string `yaml:"bucket"`
|
||||
Endpoint string `yaml:"endpoint"`
|
||||
AccessKeyID string `yaml:"accessKeyID"`
|
||||
SecretAccessKey string `yaml:"secretAccessKey"`
|
||||
SessionToken string `yaml:"sessionToken"`
|
||||
PublicRead bool `yaml:"publicRead"`
|
||||
}
|
||||
|
||||
type User struct {
|
||||
RPC RPC `yaml:"rpc"`
|
||||
Prometheus Prometheus `yaml:"prometheus"`
|
||||
RateLimiter RateLimiter `yaml:"ratelimiter"`
|
||||
CircuitBreaker CircuitBreaker `yaml:"circuitBreaker"`
|
||||
}
|
||||
|
||||
type RPC struct {
|
||||
RegisterIP string `yaml:"registerIP"`
|
||||
ListenIP string `yaml:"listenIP"`
|
||||
AutoSetPorts bool `yaml:"autoSetPorts"`
|
||||
Ports []int `yaml:"ports"`
|
||||
}
|
||||
|
||||
type Redis struct {
|
||||
Disable bool `yaml:"-"`
|
||||
Address []string `yaml:"address"`
|
||||
Username string `yaml:"username"`
|
||||
Password string `yaml:"password"`
|
||||
RedisMode string `yaml:"redisMode"`
|
||||
DB int `yaml:"db"`
|
||||
MaxRetry int `yaml:"maxRetry"`
|
||||
PoolSize int `yaml:"poolSize"`
|
||||
OnlineKeyPrefix string `yaml:"onlineKeyPrefix"`
|
||||
OnlineKeyPrefixHashTag bool `yaml:"onlineKeyPrefixHashTag"`
|
||||
SentinelMode Sentinel `yaml:"sentinelMode"`
|
||||
}
|
||||
|
||||
type Sentinel struct {
|
||||
MasterName string `yaml:"masterName"`
|
||||
SentinelAddrs []string `yaml:"sentinelsAddrs"`
|
||||
RouteByLatency bool `yaml:"routeByLatency"`
|
||||
RouteRandomly bool `yaml:"routeRandomly"`
|
||||
}
|
||||
|
||||
type BeforeConfig struct {
|
||||
Enable bool `yaml:"enable"`
|
||||
Timeout int `yaml:"timeout"`
|
||||
FailedContinue bool `yaml:"failedContinue"`
|
||||
DeniedTypes []int32 `yaml:"deniedTypes"`
|
||||
}
|
||||
|
||||
type AfterConfig struct {
|
||||
Enable bool `yaml:"enable"`
|
||||
Timeout int `yaml:"timeout"`
|
||||
AttentionIds []string `yaml:"attentionIds"`
|
||||
DeniedTypes []int32 `yaml:"deniedTypes"`
|
||||
}
|
||||
|
||||
type RateLimiter struct {
|
||||
Enable bool `yaml:"enable"`
|
||||
Window time.Duration `yaml:"window"`
|
||||
Bucket int `yaml:"bucket"`
|
||||
CPUThreshold int64 `yaml:"cpuThreshold"`
|
||||
}
|
||||
|
||||
type CircuitBreaker struct {
|
||||
Enable bool `yaml:"enable"`
|
||||
Window time.Duration `yaml:"window"`
|
||||
Bucket int `yaml:"bucket"`
|
||||
Success float64 `yaml:"success"`
|
||||
Request int64 `yaml:"request"`
|
||||
}
|
||||
|
||||
type Share struct {
|
||||
Secret string `yaml:"secret"`
|
||||
IMAdminUser struct {
|
||||
UserIDs []string `yaml:"userIDs"`
|
||||
Nicknames []string `yaml:"nicknames"`
|
||||
} `yaml:"imAdminUser"`
|
||||
MultiLogin MultiLogin `yaml:"multiLogin"`
|
||||
RPCMaxBodySize MaxRequestBody `yaml:"rpcMaxBodySize"`
|
||||
}
|
||||
|
||||
type MaxRequestBody struct {
|
||||
RequestMaxBodySize int `yaml:"requestMaxBodySize"`
|
||||
ResponseMaxBodySize int `yaml:"responseMaxBodySize"`
|
||||
}
|
||||
|
||||
type MultiLogin struct {
|
||||
Policy int `yaml:"policy"`
|
||||
MaxNumOneEnd int `yaml:"maxNumOneEnd"`
|
||||
}
|
||||
|
||||
type RpcService struct {
|
||||
User string `yaml:"user"`
|
||||
Friend string `yaml:"friend"`
|
||||
Msg string `yaml:"msg"`
|
||||
Push string `yaml:"push"`
|
||||
MessageGateway string `yaml:"messageGateway"`
|
||||
Group string `yaml:"group"`
|
||||
Auth string `yaml:"auth"`
|
||||
Conversation string `yaml:"conversation"`
|
||||
Third string `yaml:"third"`
|
||||
}
|
||||
|
||||
func (r *RpcService) GetServiceNames() []string {
|
||||
return []string{
|
||||
r.User,
|
||||
r.Friend,
|
||||
r.Msg,
|
||||
r.Push,
|
||||
r.MessageGateway,
|
||||
r.Group,
|
||||
r.Auth,
|
||||
r.Conversation,
|
||||
r.Third,
|
||||
}
|
||||
}
|
||||
|
||||
// FullConfig stores all configurations for before and after events
|
||||
type Webhooks struct {
|
||||
URL string `yaml:"url"`
|
||||
BeforeSendSingleMsg BeforeConfig `yaml:"beforeSendSingleMsg"`
|
||||
BeforeUpdateUserInfoEx BeforeConfig `yaml:"beforeUpdateUserInfoEx"`
|
||||
AfterUpdateUserInfoEx AfterConfig `yaml:"afterUpdateUserInfoEx"`
|
||||
AfterSendSingleMsg AfterConfig `yaml:"afterSendSingleMsg"`
|
||||
BeforeSendGroupMsg BeforeConfig `yaml:"beforeSendGroupMsg"`
|
||||
BeforeMsgModify BeforeConfig `yaml:"beforeMsgModify"`
|
||||
AfterSendGroupMsg AfterConfig `yaml:"afterSendGroupMsg"`
|
||||
AfterMsgSaveDB AfterConfig `yaml:"afterMsgSaveDB"`
|
||||
AfterUserOnline AfterConfig `yaml:"afterUserOnline"`
|
||||
AfterUserOffline AfterConfig `yaml:"afterUserOffline"`
|
||||
AfterUserKickOff AfterConfig `yaml:"afterUserKickOff"`
|
||||
BeforeOfflinePush BeforeConfig `yaml:"beforeOfflinePush"`
|
||||
BeforeOnlinePush BeforeConfig `yaml:"beforeOnlinePush"`
|
||||
BeforeGroupOnlinePush BeforeConfig `yaml:"beforeGroupOnlinePush"`
|
||||
BeforeAddFriend BeforeConfig `yaml:"beforeAddFriend"`
|
||||
BeforeUpdateUserInfo BeforeConfig `yaml:"beforeUpdateUserInfo"`
|
||||
AfterUpdateUserInfo AfterConfig `yaml:"afterUpdateUserInfo"`
|
||||
BeforeCreateGroup BeforeConfig `yaml:"beforeCreateGroup"`
|
||||
AfterCreateGroup AfterConfig `yaml:"afterCreateGroup"`
|
||||
BeforeMemberJoinGroup BeforeConfig `yaml:"beforeMemberJoinGroup"`
|
||||
BeforeSetGroupMemberInfo BeforeConfig `yaml:"beforeSetGroupMemberInfo"`
|
||||
AfterSetGroupMemberInfo AfterConfig `yaml:"afterSetGroupMemberInfo"`
|
||||
AfterQuitGroup AfterConfig `yaml:"afterQuitGroup"`
|
||||
AfterKickGroupMember AfterConfig `yaml:"afterKickGroupMember"`
|
||||
AfterDismissGroup AfterConfig `yaml:"afterDismissGroup"`
|
||||
BeforeApplyJoinGroup BeforeConfig `yaml:"beforeApplyJoinGroup"`
|
||||
AfterGroupMsgRead AfterConfig `yaml:"afterGroupMsgRead"`
|
||||
AfterSingleMsgRead AfterConfig `yaml:"afterSingleMsgRead"`
|
||||
BeforeUserRegister BeforeConfig `yaml:"beforeUserRegister"`
|
||||
AfterUserRegister AfterConfig `yaml:"afterUserRegister"`
|
||||
AfterTransferGroupOwner AfterConfig `yaml:"afterTransferGroupOwner"`
|
||||
BeforeSetFriendRemark BeforeConfig `yaml:"beforeSetFriendRemark"`
|
||||
AfterSetFriendRemark AfterConfig `yaml:"afterSetFriendRemark"`
|
||||
AfterGroupMsgRevoke AfterConfig `yaml:"afterGroupMsgRevoke"`
|
||||
AfterJoinGroup AfterConfig `yaml:"afterJoinGroup"`
|
||||
BeforeInviteUserToGroup BeforeConfig `yaml:"beforeInviteUserToGroup"`
|
||||
AfterSetGroupInfo AfterConfig `yaml:"afterSetGroupInfo"`
|
||||
BeforeSetGroupInfo BeforeConfig `yaml:"beforeSetGroupInfo"`
|
||||
AfterSetGroupInfoEx AfterConfig `yaml:"afterSetGroupInfoEx"`
|
||||
BeforeSetGroupInfoEx BeforeConfig `yaml:"beforeSetGroupInfoEx"`
|
||||
AfterRevokeMsg AfterConfig `yaml:"afterRevokeMsg"`
|
||||
BeforeAddBlack BeforeConfig `yaml:"beforeAddBlack"`
|
||||
AfterAddFriend AfterConfig `yaml:"afterAddFriend"`
|
||||
BeforeAddFriendAgree BeforeConfig `yaml:"beforeAddFriendAgree"`
|
||||
AfterAddFriendAgree AfterConfig `yaml:"afterAddFriendAgree"`
|
||||
AfterDeleteFriend AfterConfig `yaml:"afterDeleteFriend"`
|
||||
BeforeImportFriends BeforeConfig `yaml:"beforeImportFriends"`
|
||||
AfterImportFriends AfterConfig `yaml:"afterImportFriends"`
|
||||
AfterRemoveBlack AfterConfig `yaml:"afterRemoveBlack"`
|
||||
BeforeCreateSingleChatConversations BeforeConfig `yaml:"beforeCreateSingleChatConversations"`
|
||||
AfterCreateSingleChatConversations AfterConfig `yaml:"afterCreateSingleChatConversations"`
|
||||
BeforeCreateGroupChatConversations BeforeConfig `yaml:"beforeCreateGroupChatConversations"`
|
||||
AfterCreateGroupChatConversations AfterConfig `yaml:"afterCreateGroupChatConversations"`
|
||||
}
|
||||
|
||||
type ZooKeeper struct {
|
||||
Schema string `yaml:"schema"`
|
||||
Address []string `yaml:"address"`
|
||||
Username string `yaml:"username"`
|
||||
Password string `yaml:"password"`
|
||||
}
|
||||
|
||||
type Discovery struct {
|
||||
Enable string `yaml:"enable"`
|
||||
Etcd Etcd `yaml:"etcd"`
|
||||
Kubernetes Kubernetes `yaml:"kubernetes"`
|
||||
RpcService RpcService `yaml:"rpcService"`
|
||||
}
|
||||
|
||||
type Kubernetes struct {
|
||||
Namespace string `yaml:"namespace"`
|
||||
}
|
||||
|
||||
type Etcd struct {
|
||||
RootDirectory string `yaml:"rootDirectory"`
|
||||
Address []string `yaml:"address"`
|
||||
Username string `yaml:"username"`
|
||||
Password string `yaml:"password"`
|
||||
}
|
||||
|
||||
func (m *Mongo) Build() *mongoutil.Config {
|
||||
return &mongoutil.Config{
|
||||
Uri: m.URI,
|
||||
Address: m.Address,
|
||||
Database: m.Database,
|
||||
Username: m.Username,
|
||||
Password: m.Password,
|
||||
AuthSource: m.AuthSource,
|
||||
MaxPoolSize: m.MaxPoolSize,
|
||||
MaxRetry: m.MaxRetry,
|
||||
MongoMode: m.MongoMode,
|
||||
ReplicaSet: &mongoutil.ReplicaSetConfig{
|
||||
Name: m.ReplicaSet.Name,
|
||||
Hosts: m.ReplicaSet.Hosts,
|
||||
ReadConcern: m.ReplicaSet.ReadConcern,
|
||||
MaxStaleness: m.ReplicaSet.MaxStaleness,
|
||||
},
|
||||
ReadPreference: &mongoutil.ReadPrefConfig{
|
||||
Mode: m.ReadPreference.Mode,
|
||||
TagSets: m.ReadPreference.TagSets,
|
||||
MaxStaleness: m.ReadPreference.MaxStaleness,
|
||||
},
|
||||
WriteConcern: &mongoutil.WriteConcernConfig{
|
||||
W: m.WriteConcern.W,
|
||||
J: m.WriteConcern.J,
|
||||
WTimeout: m.WriteConcern.WTimeout,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Redis) Build() *redisutil.Config {
|
||||
return &redisutil.Config{
|
||||
RedisMode: r.RedisMode,
|
||||
Address: r.Address,
|
||||
Username: r.Username,
|
||||
Password: r.Password,
|
||||
DB: r.DB,
|
||||
MaxRetry: r.MaxRetry,
|
||||
PoolSize: r.PoolSize,
|
||||
Sentinel: &redisutil.Sentinel{
|
||||
MasterName: r.SentinelMode.MasterName,
|
||||
SentinelAddrs: r.SentinelMode.SentinelAddrs,
|
||||
RouteByLatency: r.SentinelMode.RouteByLatency,
|
||||
RouteRandomly: r.SentinelMode.RouteRandomly,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (k *Kafka) Build() *kafka.Config {
|
||||
return &kafka.Config{
|
||||
Username: k.Username,
|
||||
Password: k.Password,
|
||||
ProducerAck: k.ProducerAck,
|
||||
CompressType: k.CompressType,
|
||||
Addr: k.Address,
|
||||
TLS: kafka.TLSConfig{
|
||||
EnableTLS: k.Tls.EnableTLS,
|
||||
CACrt: k.Tls.CACrt,
|
||||
ClientCrt: k.Tls.ClientCrt,
|
||||
ClientKey: k.Tls.ClientKey,
|
||||
ClientKeyPwd: k.Tls.ClientKeyPwd,
|
||||
InsecureSkipVerify: k.Tls.InsecureSkipVerify,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Minio) Build() *minio.Config {
|
||||
formatEndpoint := func(address string) string {
|
||||
if strings.HasPrefix(address, "http://") || strings.HasPrefix(address, "https://") {
|
||||
return address
|
||||
}
|
||||
return "http://" + address
|
||||
}
|
||||
return &minio.Config{
|
||||
Bucket: m.Bucket,
|
||||
AccessKeyID: m.AccessKeyID,
|
||||
SecretAccessKey: m.SecretAccessKey,
|
||||
SessionToken: m.SessionToken,
|
||||
PublicRead: m.PublicRead,
|
||||
Endpoint: formatEndpoint(m.InternalAddress),
|
||||
SignEndpoint: formatEndpoint(m.ExternalAddress),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cos) Build() *cos.Config {
|
||||
return &cos.Config{
|
||||
BucketURL: c.BucketURL,
|
||||
SecretID: c.SecretID,
|
||||
SecretKey: c.SecretKey,
|
||||
SessionToken: c.SessionToken,
|
||||
PublicRead: c.PublicRead,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Oss) Build() *oss.Config {
|
||||
return &oss.Config{
|
||||
Endpoint: o.Endpoint,
|
||||
Bucket: o.Bucket,
|
||||
BucketURL: o.BucketURL,
|
||||
AccessKeyID: o.AccessKeyID,
|
||||
AccessKeySecret: o.AccessKeySecret,
|
||||
SessionToken: o.SessionToken,
|
||||
PublicRead: o.PublicRead,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Kodo) Build() *kodo.Config {
|
||||
return &kodo.Config{
|
||||
Endpoint: o.Endpoint,
|
||||
Bucket: o.Bucket,
|
||||
BucketURL: o.BucketURL,
|
||||
AccessKeyID: o.AccessKeyID,
|
||||
AccessKeySecret: o.AccessKeySecret,
|
||||
SessionToken: o.SessionToken,
|
||||
PublicRead: o.PublicRead,
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Aws) Build() *aws.Config {
|
||||
return &aws.Config{
|
||||
Region: o.Region,
|
||||
Bucket: o.Bucket,
|
||||
AccessKeyID: o.AccessKeyID,
|
||||
SecretAccessKey: o.SecretAccessKey,
|
||||
SessionToken: o.SessionToken,
|
||||
}
|
||||
}
|
||||
|
||||
func (l *CacheConfig) Failed() time.Duration {
|
||||
return time.Second * time.Duration(l.FailedExpire)
|
||||
}
|
||||
|
||||
func (l *CacheConfig) Success() time.Duration {
|
||||
return time.Second * time.Duration(l.SuccessExpire)
|
||||
}
|
||||
|
||||
func (l *CacheConfig) Enable() bool {
|
||||
return l.Topic != "" && l.SlotNum > 0 && l.SlotSize > 0
|
||||
}
|
||||
|
||||
func InitNotification(notification *Notification) {
|
||||
notification.GroupCreated.UnreadCount = false
|
||||
notification.GroupCreated.ReliabilityLevel = 1
|
||||
notification.GroupInfoSet.UnreadCount = false
|
||||
notification.GroupInfoSet.ReliabilityLevel = 1
|
||||
notification.JoinGroupApplication.UnreadCount = false
|
||||
notification.JoinGroupApplication.ReliabilityLevel = 1
|
||||
notification.MemberQuit.UnreadCount = false
|
||||
notification.MemberQuit.ReliabilityLevel = 1
|
||||
notification.GroupApplicationAccepted.UnreadCount = false
|
||||
notification.GroupApplicationAccepted.ReliabilityLevel = 1
|
||||
notification.GroupApplicationRejected.UnreadCount = false
|
||||
notification.GroupApplicationRejected.ReliabilityLevel = 1
|
||||
notification.GroupOwnerTransferred.UnreadCount = false
|
||||
notification.GroupOwnerTransferred.ReliabilityLevel = 1
|
||||
notification.MemberKicked.UnreadCount = false
|
||||
notification.MemberKicked.ReliabilityLevel = 1
|
||||
notification.MemberInvited.UnreadCount = false
|
||||
notification.MemberInvited.ReliabilityLevel = 1
|
||||
notification.MemberEnter.UnreadCount = false
|
||||
notification.MemberEnter.ReliabilityLevel = 1
|
||||
notification.GroupDismissed.UnreadCount = false
|
||||
notification.GroupDismissed.ReliabilityLevel = 1
|
||||
notification.GroupMuted.UnreadCount = false
|
||||
notification.GroupMuted.ReliabilityLevel = 1
|
||||
notification.GroupCancelMuted.UnreadCount = false
|
||||
notification.GroupCancelMuted.ReliabilityLevel = 1
|
||||
notification.GroupMemberMuted.UnreadCount = false
|
||||
notification.GroupMemberMuted.ReliabilityLevel = 1
|
||||
notification.GroupMemberCancelMuted.UnreadCount = false
|
||||
notification.GroupMemberCancelMuted.ReliabilityLevel = 1
|
||||
notification.GroupMemberInfoSet.UnreadCount = false
|
||||
notification.GroupMemberInfoSet.ReliabilityLevel = 1
|
||||
notification.GroupMemberSetToAdmin.UnreadCount = false
|
||||
notification.GroupMemberSetToAdmin.ReliabilityLevel = 1
|
||||
notification.GroupMemberSetToOrdinary.UnreadCount = false
|
||||
notification.GroupMemberSetToOrdinary.ReliabilityLevel = 1
|
||||
notification.GroupInfoSetAnnouncement.UnreadCount = false
|
||||
notification.GroupInfoSetAnnouncement.ReliabilityLevel = 1
|
||||
notification.GroupInfoSetName.UnreadCount = false
|
||||
notification.GroupInfoSetName.ReliabilityLevel = 1
|
||||
notification.FriendApplicationAdded.UnreadCount = false
|
||||
notification.FriendApplicationAdded.ReliabilityLevel = 1
|
||||
notification.FriendApplicationApproved.UnreadCount = false
|
||||
notification.FriendApplicationApproved.ReliabilityLevel = 1
|
||||
notification.FriendApplicationRejected.UnreadCount = false
|
||||
notification.FriendApplicationRejected.ReliabilityLevel = 1
|
||||
notification.FriendAdded.UnreadCount = false
|
||||
notification.FriendAdded.ReliabilityLevel = 1
|
||||
notification.FriendDeleted.UnreadCount = false
|
||||
notification.FriendDeleted.ReliabilityLevel = 1
|
||||
notification.FriendRemarkSet.UnreadCount = false
|
||||
notification.FriendRemarkSet.ReliabilityLevel = 1
|
||||
notification.BlackAdded.UnreadCount = false
|
||||
notification.BlackAdded.ReliabilityLevel = 1
|
||||
notification.BlackDeleted.UnreadCount = false
|
||||
notification.BlackDeleted.ReliabilityLevel = 1
|
||||
notification.FriendInfoUpdated.UnreadCount = false
|
||||
notification.FriendInfoUpdated.ReliabilityLevel = 1
|
||||
notification.UserInfoUpdated.UnreadCount = false
|
||||
notification.UserInfoUpdated.ReliabilityLevel = 1
|
||||
notification.UserStatusChanged.UnreadCount = false
|
||||
notification.UserStatusChanged.ReliabilityLevel = 1
|
||||
notification.ConversationChanged.UnreadCount = false
|
||||
notification.ConversationChanged.ReliabilityLevel = 1
|
||||
notification.ConversationSetPrivate.UnreadCount = false
|
||||
notification.ConversationSetPrivate.ReliabilityLevel = 1
|
||||
}
|
||||
|
||||
type AllConfig struct {
|
||||
Discovery Discovery
|
||||
Kafka Kafka
|
||||
LocalCache LocalCache
|
||||
Log Log
|
||||
Minio Minio
|
||||
Mongo Mongo
|
||||
Notification Notification
|
||||
API API
|
||||
CronTask CronTask
|
||||
MsgGateway MsgGateway
|
||||
MsgTransfer MsgTransfer
|
||||
Push Push
|
||||
Auth Auth
|
||||
Conversation Conversation
|
||||
Friend Friend
|
||||
Group Group
|
||||
Msg Msg
|
||||
Third Third
|
||||
User User
|
||||
Redis Redis
|
||||
Share Share
|
||||
Webhooks Webhooks
|
||||
}
|
||||
|
||||
func (a *AllConfig) Name2Config(name string) any {
|
||||
switch name {
|
||||
case a.Discovery.GetConfigFileName():
|
||||
return a.Discovery
|
||||
case a.Kafka.GetConfigFileName():
|
||||
return a.Kafka
|
||||
case a.LocalCache.GetConfigFileName():
|
||||
return a.LocalCache
|
||||
case a.Log.GetConfigFileName():
|
||||
return a.Log
|
||||
case a.Minio.GetConfigFileName():
|
||||
return a.Minio
|
||||
case a.Mongo.GetConfigFileName():
|
||||
return a.Mongo
|
||||
case a.Notification.GetConfigFileName():
|
||||
return a.Notification
|
||||
case a.API.GetConfigFileName():
|
||||
return a.API
|
||||
case a.CronTask.GetConfigFileName():
|
||||
return a.CronTask
|
||||
case a.MsgGateway.GetConfigFileName():
|
||||
return a.MsgGateway
|
||||
case a.MsgTransfer.GetConfigFileName():
|
||||
return a.MsgTransfer
|
||||
case a.Push.GetConfigFileName():
|
||||
return a.Push
|
||||
case a.Auth.GetConfigFileName():
|
||||
return a.Auth
|
||||
case a.Conversation.GetConfigFileName():
|
||||
return a.Conversation
|
||||
case a.Friend.GetConfigFileName():
|
||||
return a.Friend
|
||||
case a.Group.GetConfigFileName():
|
||||
return a.Group
|
||||
case a.Msg.GetConfigFileName():
|
||||
return a.Msg
|
||||
case a.Third.GetConfigFileName():
|
||||
return a.Third
|
||||
case a.User.GetConfigFileName():
|
||||
return a.User
|
||||
case a.Redis.GetConfigFileName():
|
||||
return a.Redis
|
||||
case a.Share.GetConfigFileName():
|
||||
return a.Share
|
||||
case a.Webhooks.GetConfigFileName():
|
||||
return a.Webhooks
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (a *AllConfig) GetConfigNames() []string {
|
||||
return []string{
|
||||
a.Discovery.GetConfigFileName(),
|
||||
a.Kafka.GetConfigFileName(),
|
||||
a.LocalCache.GetConfigFileName(),
|
||||
a.Log.GetConfigFileName(),
|
||||
a.Minio.GetConfigFileName(),
|
||||
a.Mongo.GetConfigFileName(),
|
||||
a.Notification.GetConfigFileName(),
|
||||
a.API.GetConfigFileName(),
|
||||
a.CronTask.GetConfigFileName(),
|
||||
a.MsgGateway.GetConfigFileName(),
|
||||
a.MsgTransfer.GetConfigFileName(),
|
||||
a.Push.GetConfigFileName(),
|
||||
a.Auth.GetConfigFileName(),
|
||||
a.Conversation.GetConfigFileName(),
|
||||
a.Friend.GetConfigFileName(),
|
||||
a.Group.GetConfigFileName(),
|
||||
a.Msg.GetConfigFileName(),
|
||||
a.Third.GetConfigFileName(),
|
||||
a.User.GetConfigFileName(),
|
||||
a.Redis.GetConfigFileName(),
|
||||
a.Share.GetConfigFileName(),
|
||||
a.Webhooks.GetConfigFileName(),
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
FileName = "config.yaml"
|
||||
DiscoveryConfigFilename = "discovery.yml"
|
||||
KafkaConfigFileName = "kafka.yml"
|
||||
LocalCacheConfigFileName = "local-cache.yml"
|
||||
LogConfigFileName = "log.yml"
|
||||
MinioConfigFileName = "minio.yml"
|
||||
MongodbConfigFileName = "mongodb.yml"
|
||||
NotificationFileName = "notification.yml"
|
||||
OpenIMAPICfgFileName = "openim-api.yml"
|
||||
OpenIMCronTaskCfgFileName = "openim-crontask.yml"
|
||||
OpenIMMsgGatewayCfgFileName = "openim-msggateway.yml"
|
||||
OpenIMMsgTransferCfgFileName = "openim-msgtransfer.yml"
|
||||
OpenIMPushCfgFileName = "openim-push.yml"
|
||||
OpenIMRPCAuthCfgFileName = "openim-rpc-auth.yml"
|
||||
OpenIMRPCConversationCfgFileName = "openim-rpc-conversation.yml"
|
||||
OpenIMRPCFriendCfgFileName = "openim-rpc-friend.yml"
|
||||
OpenIMRPCGroupCfgFileName = "openim-rpc-group.yml"
|
||||
OpenIMRPCMsgCfgFileName = "openim-rpc-msg.yml"
|
||||
OpenIMRPCThirdCfgFileName = "openim-rpc-third.yml"
|
||||
OpenIMRPCUserCfgFileName = "openim-rpc-user.yml"
|
||||
RedisConfigFileName = "redis.yml"
|
||||
ShareFileName = "share.yml"
|
||||
WebhooksConfigFileName = "webhooks.yml"
|
||||
)
|
||||
|
||||
func (d *Discovery) GetConfigFileName() string {
|
||||
return DiscoveryConfigFilename
|
||||
}
|
||||
|
||||
func (k *Kafka) GetConfigFileName() string {
|
||||
return KafkaConfigFileName
|
||||
}
|
||||
|
||||
func (lc *LocalCache) GetConfigFileName() string {
|
||||
return LocalCacheConfigFileName
|
||||
}
|
||||
|
||||
func (l *Log) GetConfigFileName() string {
|
||||
return LogConfigFileName
|
||||
}
|
||||
|
||||
func (m *Minio) GetConfigFileName() string {
|
||||
return MinioConfigFileName
|
||||
}
|
||||
|
||||
func (m *Mongo) GetConfigFileName() string {
|
||||
return MongodbConfigFileName
|
||||
}
|
||||
|
||||
func (n *Notification) GetConfigFileName() string {
|
||||
return NotificationFileName
|
||||
}
|
||||
|
||||
func (a *API) GetConfigFileName() string {
|
||||
return OpenIMAPICfgFileName
|
||||
}
|
||||
|
||||
func (ct *CronTask) GetConfigFileName() string {
|
||||
return OpenIMCronTaskCfgFileName
|
||||
}
|
||||
|
||||
func (mg *MsgGateway) GetConfigFileName() string {
|
||||
return OpenIMMsgGatewayCfgFileName
|
||||
}
|
||||
|
||||
func (mt *MsgTransfer) GetConfigFileName() string {
|
||||
return OpenIMMsgTransferCfgFileName
|
||||
}
|
||||
|
||||
func (p *Push) GetConfigFileName() string {
|
||||
return OpenIMPushCfgFileName
|
||||
}
|
||||
|
||||
func (a *Auth) GetConfigFileName() string {
|
||||
return OpenIMRPCAuthCfgFileName
|
||||
}
|
||||
|
||||
func (c *Conversation) GetConfigFileName() string {
|
||||
return OpenIMRPCConversationCfgFileName
|
||||
}
|
||||
|
||||
func (f *Friend) GetConfigFileName() string {
|
||||
return OpenIMRPCFriendCfgFileName
|
||||
}
|
||||
|
||||
func (g *Group) GetConfigFileName() string {
|
||||
return OpenIMRPCGroupCfgFileName
|
||||
}
|
||||
|
||||
func (m *Msg) GetConfigFileName() string {
|
||||
return OpenIMRPCMsgCfgFileName
|
||||
}
|
||||
|
||||
func (t *Third) GetConfigFileName() string {
|
||||
return OpenIMRPCThirdCfgFileName
|
||||
}
|
||||
|
||||
func (u *User) GetConfigFileName() string {
|
||||
return OpenIMRPCUserCfgFileName
|
||||
}
|
||||
|
||||
func (r *Redis) GetConfigFileName() string {
|
||||
return RedisConfigFileName
|
||||
}
|
||||
|
||||
func (s *Share) GetConfigFileName() string {
|
||||
return ShareFileName
|
||||
}
|
||||
|
||||
func (w *Webhooks) GetConfigFileName() string {
|
||||
return WebhooksConfigFileName
|
||||
}
|
||||
47
pkg/common/config/constant.go
Normal file
47
pkg/common/config/constant.go
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright © 2024 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package config
|
||||
|
||||
import "github.com/openimsdk/tools/utils/runtimeenv"
|
||||
|
||||
const ConfKey = "conf"
|
||||
|
||||
const (
|
||||
MountConfigFilePath = "CONFIG_PATH"
|
||||
DeploymentType = "DEPLOYMENT_TYPE"
|
||||
KUBERNETES = runtimeenv.Kubernetes
|
||||
ETCD = "etcd"
|
||||
//Standalone = "standalone"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultDirPerm is used for creating general directories, allowing the owner to read, write, and execute,
|
||||
// while the group and others can only read and execute.
|
||||
DefaultDirPerm = 0755
|
||||
|
||||
// PrivateFilePerm is used for sensitive files, allowing only the owner to read and write.
|
||||
PrivateFilePerm = 0600
|
||||
|
||||
// ExecFilePerm is used for executable files, allowing the owner to read, write, and execute,
|
||||
// while the group and others can only read.
|
||||
ExecFilePerm = 0754
|
||||
|
||||
// SharedDirPerm is used for shared directories, allowing the owner and group to read, write, and execute,
|
||||
// with no permissions for others.
|
||||
SharedDirPerm = 0770
|
||||
|
||||
// ReadOnlyDirPerm is used for read-only directories, allowing the owner, group, and others to only read.
|
||||
ReadOnlyDirPerm = 0555
|
||||
)
|
||||
15
pkg/common/config/doc.go
Normal file
15
pkg/common/config/doc.go
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright © 2024 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package config // import "git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
30
pkg/common/config/env.go
Normal file
30
pkg/common/config/env.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package config
|
||||
|
||||
import "strings"
|
||||
|
||||
var EnvPrefixMap map[string]string
|
||||
|
||||
func init() {
|
||||
EnvPrefixMap = make(map[string]string)
|
||||
fileNames := []string{
|
||||
FileName, NotificationFileName, ShareFileName, WebhooksConfigFileName,
|
||||
KafkaConfigFileName, RedisConfigFileName,
|
||||
MongodbConfigFileName, MinioConfigFileName, LogConfigFileName,
|
||||
OpenIMAPICfgFileName, OpenIMCronTaskCfgFileName, OpenIMMsgGatewayCfgFileName,
|
||||
OpenIMMsgTransferCfgFileName, OpenIMPushCfgFileName, OpenIMRPCAuthCfgFileName,
|
||||
OpenIMRPCConversationCfgFileName, OpenIMRPCFriendCfgFileName, OpenIMRPCGroupCfgFileName,
|
||||
OpenIMRPCMsgCfgFileName, OpenIMRPCThirdCfgFileName, OpenIMRPCUserCfgFileName, DiscoveryConfigFilename,
|
||||
}
|
||||
|
||||
for _, fileName := range fileNames {
|
||||
envKey := strings.TrimSuffix(strings.TrimSuffix(fileName, ".yml"), ".yaml")
|
||||
envKey = "IMENV_" + envKey
|
||||
envKey = strings.ToUpper(strings.ReplaceAll(envKey, "-", "_"))
|
||||
EnvPrefixMap[fileName] = envKey
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
FlagConf = "config_folder_path"
|
||||
FlagTransferIndex = "index"
|
||||
)
|
||||
11
pkg/common/config/global.go
Normal file
11
pkg/common/config/global.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package config
|
||||
|
||||
var standalone bool
|
||||
|
||||
func SetStandalone() {
|
||||
standalone = true
|
||||
}
|
||||
|
||||
func Standalone() bool {
|
||||
return standalone
|
||||
}
|
||||
44
pkg/common/config/load_config.go
Normal file
44
pkg/common/config/load_config.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/utils/runtimeenv"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func Load(configDirectory string, configFileName string, envPrefix string, config any) error {
|
||||
if runtimeenv.RuntimeEnvironment() == KUBERNETES {
|
||||
mountPath := os.Getenv(MountConfigFilePath)
|
||||
if mountPath == "" {
|
||||
return errs.ErrArgs.WrapMsg(MountConfigFilePath + " env is empty")
|
||||
}
|
||||
|
||||
return loadConfig(filepath.Join(mountPath, configFileName), envPrefix, config)
|
||||
}
|
||||
|
||||
return loadConfig(filepath.Join(configDirectory, configFileName), envPrefix, config)
|
||||
}
|
||||
|
||||
func loadConfig(path string, envPrefix string, config any) error {
|
||||
v := viper.New()
|
||||
v.SetConfigFile(path)
|
||||
v.SetEnvPrefix(envPrefix)
|
||||
v.AutomaticEnv()
|
||||
v.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
|
||||
|
||||
if err := v.ReadInConfig(); err != nil {
|
||||
return errs.WrapMsg(err, "failed to read config file", "path", path, "envPrefix", envPrefix)
|
||||
}
|
||||
|
||||
if err := v.Unmarshal(config, func(config *mapstructure.DecoderConfig) {
|
||||
config.TagName = StructTagName
|
||||
}); err != nil {
|
||||
return errs.WrapMsg(err, "failed to unmarshal config", "path", path, "envPrefix", envPrefix)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
93
pkg/common/config/load_config_test.go
Normal file
93
pkg/common/config/load_config_test.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestLoadLogConfig(t *testing.T) {
|
||||
var log Log
|
||||
os.Setenv("IMENV_LOG_REMAINLOGLEVEL", "5")
|
||||
err := Load("../../../config/", "log.yml", "IMENV_LOG", &log)
|
||||
assert.Nil(t, err)
|
||||
t.Log(log.RemainLogLevel)
|
||||
// assert.Equal(t, "../../../../logs/", log.StorageLocation)
|
||||
}
|
||||
|
||||
func TestLoadMongoConfig(t *testing.T) {
|
||||
var mongo Mongo
|
||||
// os.Setenv("DEPLOYMENT_TYPE", "kubernetes")
|
||||
os.Setenv("IMENV_MONGODB_PASSWORD", "openIM1231231")
|
||||
// os.Setenv("IMENV_MONGODB_URI", "openIM123")
|
||||
// os.Setenv("IMENV_MONGODB_USERNAME", "openIM123")
|
||||
err := Load("../../../config/", "mongodb.yml", "IMENV_MONGODB", &mongo)
|
||||
// err := LoadApiConfig("../../../config/mongodb.yml", "IMENV_MONGODB", &mongo)
|
||||
|
||||
assert.Nil(t, err)
|
||||
t.Log(mongo.Password)
|
||||
// assert.Equal(t, "openIM123", mongo.Password)
|
||||
t.Log(os.Getenv("IMENV_MONGODB_PASSWORD"))
|
||||
t.Log(mongo)
|
||||
// //export IMENV_OPENIM_RPC_USER_RPC_LISTENIP="0.0.0.0"
|
||||
// assert.Equal(t, "0.0.0.0", user.RPC.ListenIP)
|
||||
// //export IMENV_OPENIM_RPC_USER_RPC_PORTS="10110,10111,10112"
|
||||
// assert.Equal(t, []int{10110, 10111, 10112}, user.RPC.Ports)
|
||||
}
|
||||
|
||||
func TestLoadMinioConfig(t *testing.T) {
|
||||
var storageConfig Minio
|
||||
err := Load("../../../config/minio.yml", "IMENV_MINIO", "", &storageConfig)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "openim", storageConfig.Bucket)
|
||||
}
|
||||
|
||||
func TestLoadWebhooksConfig(t *testing.T) {
|
||||
var webhooks Webhooks
|
||||
err := Load("../../../config/webhooks.yml", "IMENV_WEBHOOKS", "", &webhooks)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 5, webhooks.BeforeAddBlack.Timeout)
|
||||
|
||||
}
|
||||
|
||||
func TestLoadOpenIMRpcUserConfig(t *testing.T) {
|
||||
var user User
|
||||
err := Load("../../../config/openim-rpc-user.yml", "IMENV_OPENIM_RPC_USER", "", &user)
|
||||
assert.Nil(t, err)
|
||||
//export IMENV_OPENIM_RPC_USER_RPC_LISTENIP="0.0.0.0"
|
||||
assert.Equal(t, "0.0.0.0", user.RPC.ListenIP)
|
||||
//export IMENV_OPENIM_RPC_USER_RPC_PORTS="10110,10111,10112"
|
||||
assert.Equal(t, []int{10110, 10111, 10112}, user.RPC.Ports)
|
||||
}
|
||||
|
||||
func TestLoadNotificationConfig(t *testing.T) {
|
||||
var noti Notification
|
||||
err := Load("../../../config/notification.yml", "IMENV_NOTIFICATION", "", ¬i)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "Your friend's profile has been changed", noti.FriendRemarkSet.OfflinePush.Title)
|
||||
}
|
||||
|
||||
func TestLoadOpenIMThirdConfig(t *testing.T) {
|
||||
var third Third
|
||||
err := Load("../../../config/openim-rpc-third.yml", "IMENV_OPENIM_RPC_THIRD", "", &third)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "enabled", third.Object.Enable)
|
||||
assert.Equal(t, "https://oss-cn-chengdu.aliyuncs.com", third.Object.Oss.Endpoint)
|
||||
assert.Equal(t, "my_bucket_name", third.Object.Oss.Bucket)
|
||||
assert.Equal(t, "https://my_bucket_name.oss-cn-chengdu.aliyuncs.com", third.Object.Oss.BucketURL)
|
||||
assert.Equal(t, "AKID1234567890", third.Object.Oss.AccessKeyID)
|
||||
assert.Equal(t, "abc123xyz789", third.Object.Oss.AccessKeySecret)
|
||||
assert.Equal(t, "session_token_value", third.Object.Oss.SessionToken) // Uncomment if session token is needed
|
||||
assert.Equal(t, true, third.Object.Oss.PublicRead)
|
||||
|
||||
// Environment: IMENV_OPENIM_RPC_THIRD_OBJECT_ENABLE=enabled;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_ENDPOINT=https://oss-cn-chengdu.aliyuncs.com;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_BUCKET=my_bucket_name;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_BUCKETURL=https://my_bucket_name.oss-cn-chengdu.aliyuncs.com;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_ACCESSKEYID=AKID1234567890;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_ACCESSKEYSECRET=abc123xyz789;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_SESSIONTOKEN=session_token_value;IMENV_OPENIM_RPC_THIRD_OBJECT_OSS_PUBLICREAD=true
|
||||
}
|
||||
|
||||
func TestTransferConfig(t *testing.T) {
|
||||
var tran MsgTransfer
|
||||
err := Load("../../../config/openim-msgtransfer.yml", "IMENV_OPENIM-MSGTRANSFER", "", &tran)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, true, tran.Prometheus.Enable)
|
||||
assert.Equal(t, true, tran.Prometheus.AutoSetPorts)
|
||||
}
|
||||
107
pkg/common/config/parse.go
Normal file
107
pkg/common/config/parse.go
Normal file
@@ -0,0 +1,107 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/msgprocessor"
|
||||
"git.imall.cloud/openim/protocol/constant"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/field"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultFolderPath = "../config/"
|
||||
)
|
||||
|
||||
// return absolude path join ../config/, this is k8s container config path.
|
||||
func GetDefaultConfigPath() (string, error) {
|
||||
executablePath, err := os.Executable()
|
||||
if err != nil {
|
||||
return "", errs.WrapMsg(err, "failed to get executable path")
|
||||
}
|
||||
|
||||
configPath, err := field.OutDir(filepath.Join(filepath.Dir(executablePath), "../config/"))
|
||||
if err != nil {
|
||||
return "", errs.WrapMsg(err, "failed to get output directory", "outDir", filepath.Join(filepath.Dir(executablePath), "../config/"))
|
||||
}
|
||||
return configPath, nil
|
||||
}
|
||||
|
||||
// getProjectRoot returns the absolute path of the project root directory.
|
||||
func GetProjectRoot() (string, error) {
|
||||
executablePath, err := os.Executable()
|
||||
if err != nil {
|
||||
return "", errs.Wrap(err)
|
||||
}
|
||||
projectRoot, err := field.OutDir(filepath.Join(filepath.Dir(executablePath), "../../../../.."))
|
||||
if err != nil {
|
||||
return "", errs.Wrap(err)
|
||||
}
|
||||
return projectRoot, nil
|
||||
}
|
||||
|
||||
func GetOptionsByNotification(cfg NotificationConfig, sendMessage *bool) msgprocessor.Options {
|
||||
opts := msgprocessor.NewOptions()
|
||||
|
||||
if sendMessage != nil {
|
||||
cfg.IsSendMsg = *sendMessage
|
||||
}
|
||||
if cfg.IsSendMsg {
|
||||
opts = msgprocessor.WithOptions(opts, msgprocessor.WithUnreadCount(true))
|
||||
}
|
||||
if cfg.OfflinePush.Enable {
|
||||
opts = msgprocessor.WithOptions(opts, msgprocessor.WithOfflinePush(true))
|
||||
}
|
||||
switch cfg.ReliabilityLevel {
|
||||
case constant.UnreliableNotification:
|
||||
case constant.ReliableNotificationNoMsg:
|
||||
opts = msgprocessor.WithOptions(opts, msgprocessor.WithHistory(true), msgprocessor.WithPersistent())
|
||||
}
|
||||
opts = msgprocessor.WithOptions(opts, msgprocessor.WithSendMsg(cfg.IsSendMsg))
|
||||
|
||||
return opts
|
||||
}
|
||||
|
||||
// initConfig loads configuration from a specified path into the provided config structure.
|
||||
// If the specified config file does not exist, it attempts to load from the project's default "config" directory.
|
||||
// It logs informative messages regarding the configuration path being used.
|
||||
func initConfig(config any, configName, configFolderPath string) error {
|
||||
configFolderPath = filepath.Join(configFolderPath, configName)
|
||||
_, err := os.Stat(configFolderPath)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return errs.WrapMsg(err, "stat config path error", "config Folder Path", configFolderPath)
|
||||
}
|
||||
path, err := GetProjectRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
configFolderPath = filepath.Join(path, "config", configName)
|
||||
}
|
||||
data, err := os.ReadFile(configFolderPath)
|
||||
if err != nil {
|
||||
return errs.WrapMsg(err, "read file error", "config Folder Path", configFolderPath)
|
||||
}
|
||||
if err = yaml.Unmarshal(data, config); err != nil {
|
||||
return errs.WrapMsg(err, "unmarshal yaml error", "config Folder Path", configFolderPath)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
25
pkg/common/convert/auth.go
Normal file
25
pkg/common/convert/auth.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package convert
|
||||
|
||||
func TokenMapDB2Pb(tokenMapDB map[string]int) map[string]int32 {
|
||||
if tokenMapDB == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
tokenMapPB := make(map[string]int32, len(tokenMapDB))
|
||||
for k, v := range tokenMapDB {
|
||||
tokenMapPB[k] = int32(v)
|
||||
}
|
||||
return tokenMapPB
|
||||
}
|
||||
|
||||
func TokenMapPb2DB(tokenMapPB map[string]int32) map[string]int {
|
||||
if tokenMapPB == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
tokenMapDB := make(map[string]int, len(tokenMapPB))
|
||||
for k, v := range tokenMapPB {
|
||||
tokenMapDB[k] = int(v)
|
||||
}
|
||||
return tokenMapDB
|
||||
}
|
||||
56
pkg/common/convert/black.go
Normal file
56
pkg/common/convert/black.go
Normal file
@@ -0,0 +1,56 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package convert
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
|
||||
"git.imall.cloud/openim/protocol/sdkws"
|
||||
sdk "git.imall.cloud/openim/protocol/sdkws"
|
||||
)
|
||||
|
||||
func BlackDB2Pb(ctx context.Context, blackDBs []*model.Black, f func(ctx context.Context, userIDs []string) (map[string]*sdkws.UserInfo, error)) (blackPbs []*sdk.BlackInfo, err error) {
|
||||
if len(blackDBs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var userIDs []string
|
||||
for _, blackDB := range blackDBs {
|
||||
userIDs = append(userIDs, blackDB.BlockUserID)
|
||||
}
|
||||
userInfos, err := f(ctx, userIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, blackDB := range blackDBs {
|
||||
blackPb := &sdk.BlackInfo{
|
||||
OwnerUserID: blackDB.OwnerUserID,
|
||||
CreateTime: blackDB.CreateTime.Unix(),
|
||||
AddSource: blackDB.AddSource,
|
||||
Ex: blackDB.Ex,
|
||||
OperatorUserID: blackDB.OperatorUserID,
|
||||
BlackUserInfo: &sdkws.PublicUserInfo{
|
||||
UserID: userInfos[blackDB.BlockUserID].UserID,
|
||||
Nickname: userInfos[blackDB.BlockUserID].Nickname,
|
||||
FaceURL: userInfos[blackDB.BlockUserID].FaceURL,
|
||||
Ex: userInfos[blackDB.BlockUserID].Ex,
|
||||
UserType: userInfos[blackDB.BlockUserID].UserType,
|
||||
},
|
||||
}
|
||||
blackPbs = append(blackPbs, blackPb)
|
||||
}
|
||||
return blackPbs, nil
|
||||
}
|
||||
61
pkg/common/convert/conversation.go
Normal file
61
pkg/common/convert/conversation.go
Normal file
@@ -0,0 +1,61 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package convert
|
||||
|
||||
import (
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
"git.imall.cloud/openim/protocol/conversation"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
)
|
||||
|
||||
func ConversationDB2Pb(conversationDB *model.Conversation) *conversation.Conversation {
|
||||
conversationPB := &conversation.Conversation{}
|
||||
conversationPB.LatestMsgDestructTime = conversationDB.LatestMsgDestructTime.UnixMilli()
|
||||
if err := datautil.CopyStructFields(conversationPB, conversationDB); err != nil {
|
||||
return nil
|
||||
}
|
||||
return conversationPB
|
||||
}
|
||||
|
||||
func ConversationsDB2Pb(conversationsDB []*model.Conversation) (conversationsPB []*conversation.Conversation) {
|
||||
for _, conversationDB := range conversationsDB {
|
||||
conversationPB := &conversation.Conversation{}
|
||||
if err := datautil.CopyStructFields(conversationPB, conversationDB); err != nil {
|
||||
continue
|
||||
}
|
||||
conversationPB.LatestMsgDestructTime = conversationDB.LatestMsgDestructTime.UnixMilli()
|
||||
conversationsPB = append(conversationsPB, conversationPB)
|
||||
}
|
||||
return conversationsPB
|
||||
}
|
||||
|
||||
func ConversationPb2DB(conversationPB *conversation.Conversation) *model.Conversation {
|
||||
conversationDB := &model.Conversation{}
|
||||
if err := datautil.CopyStructFields(conversationDB, conversationPB); err != nil {
|
||||
return nil
|
||||
}
|
||||
return conversationDB
|
||||
}
|
||||
|
||||
func ConversationsPb2DB(conversationsPB []*conversation.Conversation) (conversationsDB []*model.Conversation) {
|
||||
for _, conversationPB := range conversationsPB {
|
||||
conversationDB := &model.Conversation{}
|
||||
if err := datautil.CopyStructFields(conversationDB, conversationPB); err != nil {
|
||||
continue
|
||||
}
|
||||
conversationsDB = append(conversationsDB, conversationDB)
|
||||
}
|
||||
return conversationsDB
|
||||
}
|
||||
15
pkg/common/convert/doc.go
Normal file
15
pkg/common/convert/doc.go
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright © 2024 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package convert // import "git.imall.cloud/openim/open-im-server-deploy/pkg/common/convert"
|
||||
171
pkg/common/convert/friend.go
Normal file
171
pkg/common/convert/friend.go
Normal file
@@ -0,0 +1,171 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package convert
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/notification/common_user"
|
||||
"git.imall.cloud/openim/protocol/relation"
|
||||
|
||||
"git.imall.cloud/openim/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/openimsdk/tools/utils/timeutil"
|
||||
)
|
||||
|
||||
func FriendPb2DB(friend *sdkws.FriendInfo) *model.Friend {
|
||||
dbFriend := &model.Friend{}
|
||||
err := datautil.CopyStructFields(dbFriend, friend)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
dbFriend.FriendUserID = friend.FriendUser.UserID
|
||||
dbFriend.CreateTime = timeutil.UnixSecondToTime(friend.CreateTime)
|
||||
return dbFriend
|
||||
}
|
||||
|
||||
func FriendDB2Pb(ctx context.Context, friendDB *model.Friend, getUsers func(ctx context.Context, userIDs []string) (map[string]*sdkws.UserInfo, error)) (*sdkws.FriendInfo, error) {
|
||||
users, err := getUsers(ctx, []string{friendDB.FriendUserID})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
user, ok := users[friendDB.FriendUserID]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("user not found: %s", friendDB.FriendUserID)
|
||||
}
|
||||
|
||||
return &sdkws.FriendInfo{
|
||||
FriendUser: user,
|
||||
CreateTime: friendDB.CreateTime.Unix(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func FriendsDB2Pb(ctx context.Context, friendsDB []*model.Friend, getUsers func(ctx context.Context, userIDs []string) (map[string]*sdkws.UserInfo, error)) (friendsPb []*sdkws.FriendInfo, err error) {
|
||||
if len(friendsDB) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var userID []string
|
||||
for _, friendDB := range friendsDB {
|
||||
userID = append(userID, friendDB.FriendUserID)
|
||||
}
|
||||
|
||||
users, err := getUsers(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, friend := range friendsDB {
|
||||
friendPb := &sdkws.FriendInfo{FriendUser: &sdkws.UserInfo{}}
|
||||
err := datautil.CopyStructFields(friendPb, friend)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
friendPb.FriendUser.UserID = users[friend.FriendUserID].UserID
|
||||
friendPb.FriendUser.Nickname = users[friend.FriendUserID].Nickname
|
||||
friendPb.FriendUser.FaceURL = users[friend.FriendUserID].FaceURL
|
||||
friendPb.FriendUser.Ex = users[friend.FriendUserID].Ex
|
||||
friendPb.CreateTime = friend.CreateTime.Unix()
|
||||
friendPb.IsPinned = friend.IsPinned
|
||||
friendsPb = append(friendsPb, friendPb)
|
||||
}
|
||||
return friendsPb, nil
|
||||
}
|
||||
|
||||
func FriendOnlyDB2PbOnly(friendsDB []*model.Friend) []*relation.FriendInfoOnly {
|
||||
return datautil.Slice(friendsDB, func(f *model.Friend) *relation.FriendInfoOnly {
|
||||
return &relation.FriendInfoOnly{
|
||||
OwnerUserID: f.OwnerUserID,
|
||||
FriendUserID: f.FriendUserID,
|
||||
Remark: f.Remark,
|
||||
CreateTime: f.CreateTime.UnixMilli(),
|
||||
AddSource: f.AddSource,
|
||||
OperatorUserID: f.OperatorUserID,
|
||||
Ex: f.Ex,
|
||||
IsPinned: f.IsPinned,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func FriendRequestDB2Pb(ctx context.Context, friendRequests []*model.FriendRequest, getUsers func(ctx context.Context, userIDs []string) (map[string]common_user.CommonUser, error)) ([]*sdkws.FriendRequest, error) {
|
||||
if len(friendRequests) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
userIDMap := make(map[string]struct{})
|
||||
for _, friendRequest := range friendRequests {
|
||||
userIDMap[friendRequest.ToUserID] = struct{}{}
|
||||
userIDMap[friendRequest.FromUserID] = struct{}{}
|
||||
}
|
||||
users, err := getUsers(ctx, datautil.Keys(userIDMap))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res := make([]*sdkws.FriendRequest, 0, len(friendRequests))
|
||||
for _, friendRequest := range friendRequests {
|
||||
toUser := users[friendRequest.ToUserID]
|
||||
fromUser := users[friendRequest.FromUserID]
|
||||
res = append(res, &sdkws.FriendRequest{
|
||||
FromUserID: friendRequest.FromUserID,
|
||||
FromNickname: fromUser.GetNickname(),
|
||||
FromFaceURL: fromUser.GetFaceURL(),
|
||||
ToUserID: friendRequest.ToUserID,
|
||||
ToNickname: toUser.GetNickname(),
|
||||
ToFaceURL: toUser.GetFaceURL(),
|
||||
HandleResult: friendRequest.HandleResult,
|
||||
ReqMsg: friendRequest.ReqMsg,
|
||||
CreateTime: friendRequest.CreateTime.UnixMilli(),
|
||||
HandlerUserID: friendRequest.HandlerUserID,
|
||||
HandleMsg: friendRequest.HandleMsg,
|
||||
HandleTime: friendRequest.HandleTime.UnixMilli(),
|
||||
Ex: friendRequest.Ex,
|
||||
})
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// FriendPb2DBMap converts a FriendInfo protobuf object to a map suitable for database operations.
|
||||
// It only includes non-zero or non-empty fields in the map.
|
||||
func FriendPb2DBMap(friend *sdkws.FriendInfo) map[string]any {
|
||||
if friend == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
val := make(map[string]any)
|
||||
|
||||
// Assuming FriendInfo has similar fields to those in Friend.
|
||||
// Add or remove fields based on your actual FriendInfo and Friend structures.
|
||||
if friend.FriendUser != nil {
|
||||
if friend.FriendUser.UserID != "" {
|
||||
val["friend_user_id"] = friend.FriendUser.UserID
|
||||
}
|
||||
if friend.FriendUser.Nickname != "" {
|
||||
val["nickname"] = friend.FriendUser.Nickname
|
||||
}
|
||||
if friend.FriendUser.FaceURL != "" {
|
||||
val["face_url"] = friend.FriendUser.FaceURL
|
||||
}
|
||||
if friend.FriendUser.Ex != "" {
|
||||
val["ex"] = friend.FriendUser.Ex
|
||||
}
|
||||
}
|
||||
if friend.CreateTime != 0 {
|
||||
val["create_time"] = friend.CreateTime // You might need to convert this to a proper time format.
|
||||
}
|
||||
|
||||
// Include other fields from FriendInfo as needed, similar to the above pattern.
|
||||
|
||||
return val
|
||||
}
|
||||
174
pkg/common/convert/group.go
Normal file
174
pkg/common/convert/group.go
Normal file
@@ -0,0 +1,174 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package convert
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
|
||||
pbgroup "git.imall.cloud/openim/protocol/group"
|
||||
sdkws "git.imall.cloud/openim/protocol/sdkws"
|
||||
"github.com/openimsdk/tools/log"
|
||||
)
|
||||
|
||||
func Db2PbGroupInfo(m *model.Group, ownerUserID string, memberCount uint32) *sdkws.GroupInfo {
|
||||
return &sdkws.GroupInfo{
|
||||
GroupID: m.GroupID,
|
||||
GroupName: m.GroupName,
|
||||
Notification: m.Notification,
|
||||
Introduction: m.Introduction,
|
||||
FaceURL: m.FaceURL,
|
||||
OwnerUserID: ownerUserID,
|
||||
CreateTime: m.CreateTime.UnixMilli(),
|
||||
MemberCount: memberCount,
|
||||
Ex: m.Ex,
|
||||
Status: m.Status,
|
||||
CreatorUserID: m.CreatorUserID,
|
||||
GroupType: m.GroupType,
|
||||
NeedVerification: m.NeedVerification,
|
||||
LookMemberInfo: m.LookMemberInfo,
|
||||
ApplyMemberFriend: m.ApplyMemberFriend,
|
||||
NotificationUpdateTime: m.NotificationUpdateTime.UnixMilli(),
|
||||
NotificationUserID: m.NotificationUserID,
|
||||
}
|
||||
}
|
||||
|
||||
func Pb2DbGroupRequest(req *pbgroup.GroupApplicationResponseReq, handleUserID string) *model.GroupRequest {
|
||||
return &model.GroupRequest{
|
||||
UserID: req.FromUserID,
|
||||
GroupID: req.GroupID,
|
||||
HandleResult: req.HandleResult,
|
||||
HandledMsg: req.HandledMsg,
|
||||
HandleUserID: handleUserID,
|
||||
HandledTime: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
func Db2PbCMSGroup(m *model.Group, ownerUserID string, ownerUserName string, memberCount uint32) *pbgroup.CMSGroup {
|
||||
return &pbgroup.CMSGroup{
|
||||
GroupInfo: Db2PbGroupInfo(m, ownerUserID, memberCount),
|
||||
GroupOwnerUserID: ownerUserID,
|
||||
GroupOwnerUserName: ownerUserName,
|
||||
}
|
||||
}
|
||||
|
||||
// Db2PbGroupMember 将数据库群成员模型转换为 protobuf 群成员信息
|
||||
// 返回的 GroupMemberFullInfo 包含以下禁言相关字段:
|
||||
// - MuteEndTime: 禁言结束时间(毫秒时间戳)
|
||||
// 判断是否被禁言:MuteEndTime >= 当前时间戳(毫秒)表示正在被禁言
|
||||
// 判断剩余禁言时间:max(0, MuteEndTime - 当前时间戳) / 1000 得到剩余秒数
|
||||
func Db2PbGroupMember(m *model.GroupMember) *sdkws.GroupMemberFullInfo {
|
||||
muteEndTime := int64(0)
|
||||
if !m.MuteEndTime.IsZero() && m.MuteEndTime.After(time.Unix(0, 0)) {
|
||||
muteEndTime = m.MuteEndTime.UnixMilli()
|
||||
// 记录从数据库读取的禁言时间,用于排查自动禁言问题
|
||||
now := time.Now()
|
||||
if muteEndTime >= now.UnixMilli() {
|
||||
// 只有在用户被禁言时才记录日志,避免日志过多
|
||||
log.ZInfo(context.Background(), "Db2PbGroupMember: found muted user in database",
|
||||
"groupID", m.GroupID,
|
||||
"userID", m.UserID,
|
||||
"muteEndTimeFromDB", m.MuteEndTime.Format(time.RFC3339),
|
||||
"muteEndTimeTimestamp", muteEndTime,
|
||||
"now", now.Format(time.RFC3339),
|
||||
"mutedDurationSeconds", (muteEndTime-now.UnixMilli())/1000,
|
||||
"isZero", m.MuteEndTime.IsZero(),
|
||||
"afterUnixZero", m.MuteEndTime.After(time.Unix(0, 0)))
|
||||
}
|
||||
}
|
||||
return &sdkws.GroupMemberFullInfo{
|
||||
GroupID: m.GroupID,
|
||||
UserID: m.UserID,
|
||||
RoleLevel: m.RoleLevel,
|
||||
JoinTime: m.JoinTime.UnixMilli(),
|
||||
Nickname: m.Nickname,
|
||||
FaceURL: m.FaceURL,
|
||||
// AppMangerLevel: m.AppMangerLevel,
|
||||
JoinSource: m.JoinSource,
|
||||
OperatorUserID: m.OperatorUserID,
|
||||
Ex: m.Ex,
|
||||
MuteEndTime: muteEndTime,
|
||||
InviterUserID: m.InviterUserID,
|
||||
}
|
||||
}
|
||||
|
||||
func Db2PbGroupRequest(m *model.GroupRequest, user *sdkws.UserInfo, group *sdkws.GroupInfo) *sdkws.GroupRequest {
|
||||
var pu *sdkws.PublicUserInfo
|
||||
if user != nil {
|
||||
pu = &sdkws.PublicUserInfo{
|
||||
UserID: user.UserID,
|
||||
Nickname: user.Nickname,
|
||||
FaceURL: user.FaceURL,
|
||||
Ex: user.Ex,
|
||||
UserType: user.UserType,
|
||||
}
|
||||
}
|
||||
return &sdkws.GroupRequest{
|
||||
UserInfo: pu,
|
||||
GroupInfo: group,
|
||||
HandleResult: m.HandleResult,
|
||||
ReqMsg: m.ReqMsg,
|
||||
HandleMsg: m.HandledMsg,
|
||||
ReqTime: m.ReqTime.UnixMilli(),
|
||||
HandleUserID: m.HandleUserID,
|
||||
HandleTime: m.HandledTime.UnixMilli(),
|
||||
Ex: m.Ex,
|
||||
JoinSource: m.JoinSource,
|
||||
InviterUserID: m.InviterUserID,
|
||||
}
|
||||
}
|
||||
|
||||
func Db2PbGroupAbstractInfo(
|
||||
groupID string,
|
||||
groupMemberNumber uint32,
|
||||
groupMemberListHash uint64,
|
||||
) *pbgroup.GroupAbstractInfo {
|
||||
return &pbgroup.GroupAbstractInfo{
|
||||
GroupID: groupID,
|
||||
GroupMemberNumber: groupMemberNumber,
|
||||
GroupMemberListHash: groupMemberListHash,
|
||||
}
|
||||
}
|
||||
|
||||
func Pb2DBGroupInfo(m *sdkws.GroupInfo) *model.Group {
|
||||
return &model.Group{
|
||||
GroupID: m.GroupID,
|
||||
GroupName: m.GroupName,
|
||||
Notification: m.Notification,
|
||||
Introduction: m.Introduction,
|
||||
FaceURL: m.FaceURL,
|
||||
CreateTime: time.Now(),
|
||||
Ex: m.Ex,
|
||||
Status: m.Status,
|
||||
CreatorUserID: m.CreatorUserID,
|
||||
GroupType: m.GroupType,
|
||||
NeedVerification: m.NeedVerification,
|
||||
LookMemberInfo: m.LookMemberInfo,
|
||||
ApplyMemberFriend: m.ApplyMemberFriend,
|
||||
NotificationUpdateTime: time.UnixMilli(m.NotificationUpdateTime),
|
||||
NotificationUserID: m.NotificationUserID,
|
||||
}
|
||||
}
|
||||
|
||||
// func Pb2DbGroupMember(m *sdkws.UserInfo) *relation.GroupMember {
|
||||
// return &relation.GroupMember{
|
||||
// UserID: m.UserID,
|
||||
// Nickname: m.Nickname,
|
||||
// FaceURL: m.FaceURL,
|
||||
// Ex: m.Ex,
|
||||
// }
|
||||
//}
|
||||
98
pkg/common/convert/msg.go
Normal file
98
pkg/common/convert/msg.go
Normal file
@@ -0,0 +1,98 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package convert
|
||||
|
||||
import (
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
"git.imall.cloud/openim/protocol/constant"
|
||||
"git.imall.cloud/openim/protocol/sdkws"
|
||||
)
|
||||
|
||||
func MsgPb2DB(msg *sdkws.MsgData) *model.MsgDataModel {
|
||||
if msg == nil {
|
||||
return nil
|
||||
}
|
||||
var msgDataModel model.MsgDataModel
|
||||
msgDataModel.SendID = msg.SendID
|
||||
msgDataModel.RecvID = msg.RecvID
|
||||
msgDataModel.GroupID = msg.GroupID
|
||||
msgDataModel.ClientMsgID = msg.ClientMsgID
|
||||
msgDataModel.ServerMsgID = msg.ServerMsgID
|
||||
msgDataModel.SenderPlatformID = msg.SenderPlatformID
|
||||
msgDataModel.SenderNickname = msg.SenderNickname
|
||||
msgDataModel.SenderFaceURL = msg.SenderFaceURL
|
||||
msgDataModel.SessionType = msg.SessionType
|
||||
msgDataModel.MsgFrom = msg.MsgFrom
|
||||
msgDataModel.ContentType = msg.ContentType
|
||||
msgDataModel.Content = string(msg.Content)
|
||||
msgDataModel.Seq = msg.Seq
|
||||
msgDataModel.SendTime = msg.SendTime
|
||||
msgDataModel.CreateTime = msg.CreateTime
|
||||
msgDataModel.Status = msg.Status
|
||||
msgDataModel.Options = msg.Options
|
||||
if msg.OfflinePushInfo != nil {
|
||||
msgDataModel.OfflinePush = &model.OfflinePushModel{
|
||||
Title: msg.OfflinePushInfo.Title,
|
||||
Desc: msg.OfflinePushInfo.Desc,
|
||||
Ex: msg.OfflinePushInfo.Ex,
|
||||
IOSPushSound: msg.OfflinePushInfo.IOSPushSound,
|
||||
IOSBadgeCount: msg.OfflinePushInfo.IOSBadgeCount,
|
||||
}
|
||||
}
|
||||
msgDataModel.AtUserIDList = msg.AtUserIDList
|
||||
msgDataModel.AttachedInfo = msg.AttachedInfo
|
||||
msgDataModel.Ex = msg.Ex
|
||||
return &msgDataModel
|
||||
}
|
||||
|
||||
func MsgDB2Pb(msgModel *model.MsgDataModel) *sdkws.MsgData {
|
||||
if msgModel == nil {
|
||||
return nil
|
||||
}
|
||||
var msg sdkws.MsgData
|
||||
msg.SendID = msgModel.SendID
|
||||
msg.RecvID = msgModel.RecvID
|
||||
msg.GroupID = msgModel.GroupID
|
||||
msg.ClientMsgID = msgModel.ClientMsgID
|
||||
msg.ServerMsgID = msgModel.ServerMsgID
|
||||
msg.SenderPlatformID = msgModel.SenderPlatformID
|
||||
msg.SenderNickname = msgModel.SenderNickname
|
||||
msg.SenderFaceURL = msgModel.SenderFaceURL
|
||||
msg.SessionType = msgModel.SessionType
|
||||
msg.MsgFrom = msgModel.MsgFrom
|
||||
msg.ContentType = msgModel.ContentType
|
||||
msg.Content = []byte(msgModel.Content)
|
||||
msg.Seq = msgModel.Seq
|
||||
msg.SendTime = msgModel.SendTime
|
||||
msg.CreateTime = msgModel.CreateTime
|
||||
msg.Status = msgModel.Status
|
||||
if msgModel.SessionType == constant.SingleChatType {
|
||||
msg.IsRead = msgModel.IsRead
|
||||
}
|
||||
msg.Options = msgModel.Options
|
||||
if msgModel.OfflinePush != nil {
|
||||
msg.OfflinePushInfo = &sdkws.OfflinePushInfo{
|
||||
Title: msgModel.OfflinePush.Title,
|
||||
Desc: msgModel.OfflinePush.Desc,
|
||||
Ex: msgModel.OfflinePush.Ex,
|
||||
IOSPushSound: msgModel.OfflinePush.IOSPushSound,
|
||||
IOSBadgeCount: msgModel.OfflinePush.IOSBadgeCount,
|
||||
}
|
||||
}
|
||||
msg.AtUserIDList = msgModel.AtUserIDList
|
||||
msg.AttachedInfo = msgModel.AttachedInfo
|
||||
msg.Ex = msgModel.Ex
|
||||
return &msg
|
||||
}
|
||||
109
pkg/common/convert/user.go
Normal file
109
pkg/common/convert/user.go
Normal file
@@ -0,0 +1,109 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package convert
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
relationtb "git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
|
||||
"git.imall.cloud/openim/protocol/sdkws"
|
||||
)
|
||||
|
||||
func UserDB2Pb(user *relationtb.User) *sdkws.UserInfo {
|
||||
return &sdkws.UserInfo{
|
||||
UserID: user.UserID,
|
||||
Nickname: user.Nickname,
|
||||
FaceURL: user.FaceURL,
|
||||
Ex: user.Ex,
|
||||
CreateTime: user.CreateTime.UnixMilli(),
|
||||
AppMangerLevel: user.AppMangerLevel,
|
||||
GlobalRecvMsgOpt: user.GlobalRecvMsgOpt,
|
||||
UserType: user.UserType,
|
||||
UserFlag: user.UserFlag,
|
||||
}
|
||||
}
|
||||
|
||||
func UsersDB2Pb(users []*relationtb.User) []*sdkws.UserInfo {
|
||||
return datautil.Slice(users, UserDB2Pb)
|
||||
}
|
||||
|
||||
func UserPb2DB(user *sdkws.UserInfo) *relationtb.User {
|
||||
return &relationtb.User{
|
||||
UserID: user.UserID,
|
||||
Nickname: user.Nickname,
|
||||
FaceURL: user.FaceURL,
|
||||
Ex: user.Ex,
|
||||
CreateTime: time.UnixMilli(user.CreateTime),
|
||||
AppMangerLevel: user.AppMangerLevel,
|
||||
GlobalRecvMsgOpt: user.GlobalRecvMsgOpt,
|
||||
UserType: user.UserType,
|
||||
UserFlag: user.UserFlag,
|
||||
}
|
||||
}
|
||||
|
||||
func UserPb2DBMap(user *sdkws.UserInfo) map[string]any {
|
||||
if user == nil {
|
||||
return nil
|
||||
}
|
||||
val := make(map[string]any)
|
||||
fields := map[string]any{
|
||||
"nickname": user.Nickname,
|
||||
"face_url": user.FaceURL,
|
||||
"ex": user.Ex,
|
||||
"app_manager_level": user.AppMangerLevel,
|
||||
"global_recv_msg_opt": user.GlobalRecvMsgOpt,
|
||||
"user_type": user.UserType,
|
||||
"user_flag": user.UserFlag,
|
||||
}
|
||||
for key, value := range fields {
|
||||
if v, ok := value.(string); ok && v != "" {
|
||||
val[key] = v
|
||||
} else if v, ok := value.(int32); ok {
|
||||
// 对于 int32 类型,包括 0 值也要更新
|
||||
val[key] = v
|
||||
}
|
||||
}
|
||||
return val
|
||||
}
|
||||
func UserPb2DBMapEx(user *sdkws.UserInfoWithEx) map[string]any {
|
||||
if user == nil {
|
||||
return nil
|
||||
}
|
||||
val := make(map[string]any)
|
||||
|
||||
// Map fields from UserInfoWithEx to val
|
||||
if user.Nickname != nil {
|
||||
val["nickname"] = user.Nickname.Value
|
||||
}
|
||||
if user.FaceURL != nil {
|
||||
val["face_url"] = user.FaceURL.Value
|
||||
}
|
||||
if user.Ex != nil {
|
||||
val["ex"] = user.Ex.Value
|
||||
}
|
||||
if user.GlobalRecvMsgOpt != nil {
|
||||
val["global_recv_msg_opt"] = user.GlobalRecvMsgOpt.Value
|
||||
}
|
||||
if user.UserType != nil {
|
||||
val["user_type"] = user.UserType.Value
|
||||
}
|
||||
if user.UserFlag != nil {
|
||||
val["user_flag"] = user.UserFlag.Value
|
||||
}
|
||||
|
||||
return val
|
||||
}
|
||||
87
pkg/common/convert/user_test.go
Normal file
87
pkg/common/convert/user_test.go
Normal file
@@ -0,0 +1,87 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package convert
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
relationtb "git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
|
||||
"git.imall.cloud/openim/protocol/sdkws"
|
||||
)
|
||||
|
||||
func TestUsersDB2Pb(t *testing.T) {
|
||||
type args struct {
|
||||
users []*relationtb.User
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantResult []*sdkws.UserInfo
|
||||
}{
|
||||
// TODO: Add test cases.
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if gotResult := UsersDB2Pb(tt.args.users); !reflect.DeepEqual(gotResult, tt.wantResult) {
|
||||
t.Errorf("UsersDB2Pb() = %v, want %v", gotResult, tt.wantResult)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUserPb2DB(t *testing.T) {
|
||||
type args struct {
|
||||
user *sdkws.UserInfo
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want *relationtb.User
|
||||
}{
|
||||
// TODO: Add test cases.
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := UserPb2DB(tt.args.user); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("UserPb2DB() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUserPb2DBMap(t *testing.T) {
|
||||
user := &sdkws.UserInfo{
|
||||
Nickname: "TestUser",
|
||||
FaceURL: "http://openim.io/logo.jpg",
|
||||
Ex: "Extra Data",
|
||||
AppMangerLevel: 1,
|
||||
GlobalRecvMsgOpt: 2,
|
||||
}
|
||||
|
||||
expected := map[string]any{
|
||||
"nickname": "TestUser",
|
||||
"face_url": "http://openim.io/logo.jpg",
|
||||
"ex": "Extra Data",
|
||||
"app_manager_level": int32(1),
|
||||
"global_recv_msg_opt": int32(2),
|
||||
}
|
||||
|
||||
result := UserPb2DBMap(user)
|
||||
if !reflect.DeepEqual(result, expected) {
|
||||
t.Errorf("UserPb2DBMap returned unexpected map. Got %v, want %v", result, expected)
|
||||
}
|
||||
}
|
||||
96
pkg/common/discovery/direct/direct_resolver.go
Normal file
96
pkg/common/discovery/direct/direct_resolver.go
Normal file
@@ -0,0 +1,96 @@
|
||||
// Copyright © 2024 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package direct
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"strings"
|
||||
|
||||
"github.com/openimsdk/tools/log"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
const (
|
||||
slashSeparator = "/"
|
||||
// EndpointSepChar is the separator char in endpoints.
|
||||
EndpointSepChar = ','
|
||||
|
||||
subsetSize = 32
|
||||
scheme = "direct"
|
||||
)
|
||||
|
||||
type ResolverDirect struct {
|
||||
}
|
||||
|
||||
func NewResolverDirect() *ResolverDirect {
|
||||
return &ResolverDirect{}
|
||||
}
|
||||
|
||||
func (rd *ResolverDirect) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (
|
||||
resolver.Resolver, error) {
|
||||
log.ZDebug(context.Background(), "Build", "target", target)
|
||||
endpoints := strings.FieldsFunc(GetEndpoints(target), func(r rune) bool {
|
||||
return r == EndpointSepChar
|
||||
})
|
||||
endpoints = subset(endpoints, subsetSize)
|
||||
addrs := make([]resolver.Address, 0, len(endpoints))
|
||||
|
||||
for _, val := range endpoints {
|
||||
addrs = append(addrs, resolver.Address{
|
||||
Addr: val,
|
||||
})
|
||||
}
|
||||
if err := cc.UpdateState(resolver.State{
|
||||
Addresses: addrs,
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &nopResolver{cc: cc}, nil
|
||||
}
|
||||
func init() {
|
||||
resolver.Register(&ResolverDirect{})
|
||||
}
|
||||
func (rd *ResolverDirect) Scheme() string {
|
||||
return scheme // return your custom scheme name
|
||||
}
|
||||
|
||||
// GetEndpoints returns the endpoints from the given target.
|
||||
func GetEndpoints(target resolver.Target) string {
|
||||
return strings.Trim(target.URL.Path, slashSeparator)
|
||||
}
|
||||
func subset(set []string, sub int) []string {
|
||||
rand.Shuffle(len(set), func(i, j int) {
|
||||
set[i], set[j] = set[j], set[i]
|
||||
})
|
||||
if len(set) <= sub {
|
||||
return set
|
||||
}
|
||||
|
||||
return set[:sub]
|
||||
}
|
||||
|
||||
type nopResolver struct {
|
||||
cc resolver.ClientConn
|
||||
}
|
||||
|
||||
func (n nopResolver) ResolveNow(options resolver.ResolveNowOptions) {
|
||||
|
||||
}
|
||||
|
||||
func (n nopResolver) Close() {
|
||||
|
||||
}
|
||||
174
pkg/common/discovery/direct/directconn.go
Normal file
174
pkg/common/discovery/direct/directconn.go
Normal file
@@ -0,0 +1,174 @@
|
||||
// Copyright © 2024 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package direct
|
||||
|
||||
//import (
|
||||
// "context"
|
||||
// "fmt"
|
||||
//
|
||||
// config2 "git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
// "github.com/openimsdk/tools/errs"
|
||||
// "google.golang.org/grpc"
|
||||
// "google.golang.org/grpc/credentials/insecure"
|
||||
//)
|
||||
//
|
||||
//type ServiceAddresses map[string][]int
|
||||
//
|
||||
//func getServiceAddresses(rpcRegisterName *config2.RpcRegisterName,
|
||||
// rpcPort *config2.RpcPort, longConnSvrPort []int) ServiceAddresses {
|
||||
// return ServiceAddresses{
|
||||
// rpcRegisterName.OpenImUserName: rpcPort.OpenImUserPort,
|
||||
// rpcRegisterName.OpenImFriendName: rpcPort.OpenImFriendPort,
|
||||
// rpcRegisterName.OpenImMsgName: rpcPort.OpenImMessagePort,
|
||||
// rpcRegisterName.OpenImMessageGatewayName: longConnSvrPort,
|
||||
// rpcRegisterName.OpenImGroupName: rpcPort.OpenImGroupPort,
|
||||
// rpcRegisterName.OpenImAuthName: rpcPort.OpenImAuthPort,
|
||||
// rpcRegisterName.OpenImPushName: rpcPort.OpenImPushPort,
|
||||
// rpcRegisterName.OpenImConversationName: rpcPort.OpenImConversationPort,
|
||||
// rpcRegisterName.OpenImThirdName: rpcPort.OpenImThirdPort,
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//type ConnDirect struct {
|
||||
// additionalOpts []grpc.DialOption
|
||||
// currentServiceAddress string
|
||||
// conns map[string][]*grpc.ClientConn
|
||||
// resolverDirect *ResolverDirect
|
||||
// config *config2.GlobalConfig
|
||||
//}
|
||||
//
|
||||
//func (cd *ConnDirect) GetClientLocalConns() map[string][]*grpc.ClientConn {
|
||||
// return nil
|
||||
//}
|
||||
//
|
||||
//func (cd *ConnDirect) GetUserIdHashGatewayHost(ctx context.Context, userId string) (string, error) {
|
||||
// return "", nil
|
||||
//}
|
||||
//
|
||||
//func (cd *ConnDirect) Register(serviceName, host string, port int, opts ...grpc.DialOption) error {
|
||||
// return nil
|
||||
//}
|
||||
//
|
||||
//func (cd *ConnDirect) UnRegister() error {
|
||||
// return nil
|
||||
//}
|
||||
//
|
||||
//func (cd *ConnDirect) CreateRpcRootNodes(serviceNames []string) error {
|
||||
// return nil
|
||||
//}
|
||||
//
|
||||
//func (cd *ConnDirect) RegisterConf2Registry(key string, conf []byte) error {
|
||||
// return nil
|
||||
//}
|
||||
//
|
||||
//func (cd *ConnDirect) GetConfFromRegistry(key string) ([]byte, error) {
|
||||
// return nil, nil
|
||||
//}
|
||||
//
|
||||
//func (cd *ConnDirect) Close() {
|
||||
//
|
||||
//}
|
||||
//
|
||||
//func NewConnDirect(config *config2.GlobalConfig) (*ConnDirect, error) {
|
||||
// return &ConnDirect{
|
||||
// conns: make(map[string][]*grpc.ClientConn),
|
||||
// resolverDirect: NewResolverDirect(),
|
||||
// config: config,
|
||||
// }, nil
|
||||
//}
|
||||
//
|
||||
//func (cd *ConnDirect) GetConns(ctx context.Context,
|
||||
// serviceName string, opts ...grpc.DialOption) ([]*grpc.ClientConn, error) {
|
||||
//
|
||||
// if conns, exists := cd.conns[serviceName]; exists {
|
||||
// return conns, nil
|
||||
// }
|
||||
// ports := getServiceAddresses(&cd.config.RpcRegisterName,
|
||||
// &cd.config.RpcPort, cd.config.LongConnSvr.OpenImMessageGatewayPort)[serviceName]
|
||||
// var connections []*grpc.ClientConn
|
||||
// for _, port := range ports {
|
||||
// conn, err := cd.dialServiceWithoutResolver(ctx, fmt.Sprintf(cd.config.Rpc.ListenIP+":%d", port), append(cd.additionalOpts, opts...)...)
|
||||
// if err != nil {
|
||||
// return nil, errs.Wrap(fmt.Errorf("connect to port %d failed,serviceName %s, IP %s", port, serviceName, cd.config.Rpc.ListenIP))
|
||||
// }
|
||||
// connections = append(connections, conn)
|
||||
// }
|
||||
//
|
||||
// if len(connections) == 0 {
|
||||
// return nil, errs.New("no connections found for service", "serviceName", serviceName).Wrap()
|
||||
// }
|
||||
// return connections, nil
|
||||
//}
|
||||
//
|
||||
//func (cd *ConnDirect) GetConn(ctx context.Context, serviceName string, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
||||
// // Get service addresses
|
||||
// addresses := getServiceAddresses(&cd.config.RpcRegisterName,
|
||||
// &cd.config.RpcPort, cd.config.LongConnSvr.OpenImMessageGatewayPort)
|
||||
// address, ok := addresses[serviceName]
|
||||
// if !ok {
|
||||
// return nil, errs.New("unknown service name", "serviceName", serviceName).Wrap()
|
||||
// }
|
||||
// var result string
|
||||
// for _, addr := range address {
|
||||
// if result != "" {
|
||||
// result = result + "," + fmt.Sprintf(cd.config.Rpc.ListenIP+":%d", addr)
|
||||
// } else {
|
||||
// result = fmt.Sprintf(cd.config.Rpc.ListenIP+":%d", addr)
|
||||
// }
|
||||
// }
|
||||
// // Try to dial a new connection
|
||||
// conn, err := cd.dialService(ctx, result, append(cd.additionalOpts, opts...)...)
|
||||
// if err != nil {
|
||||
// return nil, errs.WrapMsg(err, "address", result)
|
||||
// }
|
||||
//
|
||||
// // Store the new connection
|
||||
// cd.conns[serviceName] = append(cd.conns[serviceName], conn)
|
||||
// return conn, nil
|
||||
//}
|
||||
//
|
||||
//func (cd *ConnDirect) GetSelfConnTarget() string {
|
||||
// return cd.currentServiceAddress
|
||||
//}
|
||||
//
|
||||
//func (cd *ConnDirect) AddOption(opts ...grpc.DialOption) {
|
||||
// cd.additionalOpts = append(cd.additionalOpts, opts...)
|
||||
//}
|
||||
//
|
||||
//func (cd *ConnDirect) CloseConn(conn *grpc.ClientConn) {
|
||||
// if conn != nil {
|
||||
// conn.Close()
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//func (cd *ConnDirect) dialService(ctx context.Context, address string, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
||||
// options := append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
// conn, err := grpc.DialContext(ctx, cd.resolverDirect.Scheme()+":///"+address, options...)
|
||||
//
|
||||
// if err != nil {
|
||||
// return nil, errs.WrapMsg(err, "address", address)
|
||||
// }
|
||||
// return conn, nil
|
||||
//}
|
||||
//
|
||||
//func (cd *ConnDirect) dialServiceWithoutResolver(ctx context.Context, address string, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
||||
// options := append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
// conn, err := grpc.DialContext(ctx, address, options...)
|
||||
//
|
||||
// if err != nil {
|
||||
// return nil, errs.Wrap(err)
|
||||
// }
|
||||
// return conn, nil
|
||||
//}
|
||||
15
pkg/common/discovery/direct/doc.go
Normal file
15
pkg/common/discovery/direct/doc.go
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright © 2024 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package direct // import "git.imall.cloud/openim/open-im-server-deploy/pkg/common/discovery/direct"
|
||||
55
pkg/common/discovery/discoveryregister.go
Normal file
55
pkg/common/discovery/discoveryregister.go
Normal file
@@ -0,0 +1,55 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package discovery
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/discovery/kubernetes"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/discovery/etcd"
|
||||
"github.com/openimsdk/tools/discovery/standalone"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/utils/runtimeenv"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// NewDiscoveryRegister creates a new service discovery and registry client based on the provided environment type.
|
||||
func NewDiscoveryRegister(confDiscovery *config.Discovery, watchNames []string) (discovery.SvcDiscoveryRegistry, error) {
|
||||
if config.Standalone() {
|
||||
return standalone.GetSvcDiscoveryRegistry(), nil
|
||||
}
|
||||
if runtimeenv.RuntimeEnvironment() == config.KUBERNETES {
|
||||
return kubernetes.NewKubernetesConnManager(confDiscovery.Kubernetes.Namespace, watchNames,
|
||||
grpc.WithDefaultCallOptions(
|
||||
grpc.MaxCallSendMsgSize(1024*1024*20),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
switch confDiscovery.Enable {
|
||||
case config.ETCD:
|
||||
return etcd.NewSvcDiscoveryRegistry(
|
||||
confDiscovery.Etcd.RootDirectory,
|
||||
confDiscovery.Etcd.Address,
|
||||
watchNames,
|
||||
etcd.WithDialTimeout(10*time.Second),
|
||||
etcd.WithMaxCallSendMsgSize(20*1024*1024),
|
||||
etcd.WithUsernameAndPassword(confDiscovery.Etcd.Username, confDiscovery.Etcd.Password))
|
||||
default:
|
||||
return nil, errs.New("unsupported discovery type", "type", confDiscovery.Enable).Wrap()
|
||||
}
|
||||
}
|
||||
60
pkg/common/discovery/discoveryregister_test.go
Normal file
60
pkg/common/discovery/discoveryregister_test.go
Normal file
@@ -0,0 +1,60 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package discovery
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
func setupTestEnvironment() {
|
||||
os.Setenv("ZOOKEEPER_SCHEMA", "openim")
|
||||
os.Setenv("ZOOKEEPER_ADDRESS", "172.28.0.1")
|
||||
os.Setenv("ZOOKEEPER_PORT", "12181")
|
||||
os.Setenv("ZOOKEEPER_USERNAME", "")
|
||||
os.Setenv("ZOOKEEPER_PASSWORD", "")
|
||||
}
|
||||
|
||||
//func TestNewDiscoveryRegister(t *testing.T) {
|
||||
// setupTestEnvironment()
|
||||
// conf := config.NewGlobalConfig()
|
||||
// tests := []struct {
|
||||
// envType string
|
||||
// gatewayName string
|
||||
// expectedError bool
|
||||
// expectedResult bool
|
||||
// }{
|
||||
// {"zookeeper", "MessageGateway", false, true},
|
||||
// {"k8s", "MessageGateway", false, true},
|
||||
// {"direct", "MessageGateway", false, true},
|
||||
// {"invalid", "MessageGateway", true, false},
|
||||
// }
|
||||
//
|
||||
// for _, test := range tests {
|
||||
// conf.Envs.Discovery = test.envType
|
||||
// conf.RpcRegisterName.OpenImMessageGatewayName = test.gatewayName
|
||||
// client, err := NewDiscoveryRegister(conf)
|
||||
//
|
||||
// if test.expectedError {
|
||||
// assert.Error(t, err)
|
||||
// } else {
|
||||
// assert.NoError(t, err)
|
||||
// if test.expectedResult {
|
||||
// assert.Implements(t, (*discovery.SvcDiscoveryRegistry)(nil), client)
|
||||
// } else {
|
||||
// assert.Nil(t, client)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//}
|
||||
15
pkg/common/discovery/doc.go
Normal file
15
pkg/common/discovery/doc.go
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright © 2024 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package discovery // import "git.imall.cloud/openim/open-im-server-deploy/pkg/common/discovery"
|
||||
106
pkg/common/discovery/etcd/config_manager.go
Normal file
106
pkg/common/discovery/etcd/config_manager.go
Normal file
@@ -0,0 +1,106 @@
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
clientv3 "go.etcd.io/etcd/client/v3"
|
||||
)
|
||||
|
||||
var (
|
||||
ShutDowns []func() error
|
||||
)
|
||||
|
||||
func RegisterShutDown(shutDown ...func() error) {
|
||||
ShutDowns = append(ShutDowns, shutDown...)
|
||||
}
|
||||
|
||||
type ConfigManager struct {
|
||||
client *clientv3.Client
|
||||
watchConfigNames []string
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
func BuildKey(s string) string {
|
||||
return ConfigKeyPrefix + s
|
||||
}
|
||||
|
||||
func NewConfigManager(client *clientv3.Client, configNames []string) *ConfigManager {
|
||||
return &ConfigManager{
|
||||
client: client,
|
||||
watchConfigNames: datautil.Batch(func(s string) string { return BuildKey(s) }, append(configNames, RestartKey))}
|
||||
}
|
||||
|
||||
func (c *ConfigManager) Watch(ctx context.Context) {
|
||||
chans := make([]clientv3.WatchChan, 0, len(c.watchConfigNames))
|
||||
for _, name := range c.watchConfigNames {
|
||||
chans = append(chans, c.client.Watch(ctx, name, clientv3.WithPrefix()))
|
||||
}
|
||||
|
||||
doWatch := func(watchChan clientv3.WatchChan) {
|
||||
for watchResp := range watchChan {
|
||||
if watchResp.Err() != nil {
|
||||
log.ZError(ctx, "watch err", errs.Wrap(watchResp.Err()))
|
||||
continue
|
||||
}
|
||||
for _, event := range watchResp.Events {
|
||||
if event.IsModify() {
|
||||
if datautil.Contain(string(event.Kv.Key), c.watchConfigNames...) {
|
||||
c.lock.Lock()
|
||||
err := restartServer(ctx)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "restart server err", err)
|
||||
}
|
||||
c.lock.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, ch := range chans {
|
||||
go doWatch(ch)
|
||||
}
|
||||
}
|
||||
|
||||
func restartServer(ctx context.Context) error {
|
||||
exePath, err := os.Executable()
|
||||
if err != nil {
|
||||
return errs.New("get executable path fail").Wrap()
|
||||
}
|
||||
|
||||
args := os.Args
|
||||
env := os.Environ()
|
||||
|
||||
cmd := exec.Command(exePath, args[1:]...)
|
||||
cmd.Env = env
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
cmd.Stdin = os.Stdin
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{}
|
||||
}
|
||||
log.ZInfo(ctx, "shutdown server")
|
||||
for _, f := range ShutDowns {
|
||||
if err = f(); err != nil {
|
||||
log.ZError(ctx, "shutdown fail", err)
|
||||
}
|
||||
}
|
||||
|
||||
log.ZInfo(ctx, "restart server")
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
return errs.New("restart server fail").Wrap()
|
||||
}
|
||||
log.ZInfo(ctx, "cmd start over")
|
||||
|
||||
os.Exit(0)
|
||||
return nil
|
||||
}
|
||||
9
pkg/common/discovery/etcd/const.go
Normal file
9
pkg/common/discovery/etcd/const.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package etcd
|
||||
|
||||
const (
|
||||
ConfigKeyPrefix = "/open-im/config/"
|
||||
RestartKey = "restart"
|
||||
EnableConfigCenterKey = "enable-config-center"
|
||||
Enable = "enable"
|
||||
Disable = "disable"
|
||||
)
|
||||
15
pkg/common/discovery/kubernetes/doc.go
Normal file
15
pkg/common/discovery/kubernetes/doc.go
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright © 2024 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package kubernetes // import "git.imall.cloud/openim/open-im-server-deploy/pkg/common/discovery/kubernetes"
|
||||
640
pkg/common/discovery/kubernetes/kubernetes.go
Normal file
640
pkg/common/discovery/kubernetes/kubernetes.go
Normal file
@@ -0,0 +1,640 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// addrConn 存储连接和地址信息,用于连接复用
|
||||
type addrConn struct {
|
||||
conn *grpc.ClientConn
|
||||
addr string
|
||||
reused bool // 标记是否被复用
|
||||
}
|
||||
|
||||
type KubernetesConnManager struct {
|
||||
clientset *kubernetes.Clientset
|
||||
namespace string
|
||||
dialOptions []grpc.DialOption
|
||||
|
||||
rpcTargets map[string]string
|
||||
selfTarget string
|
||||
|
||||
// watchNames 只监听这些服务的 Endpoints 变化
|
||||
watchNames []string
|
||||
|
||||
mu sync.RWMutex
|
||||
connMap map[string][]*addrConn
|
||||
}
|
||||
|
||||
// NewKubernetesConnManager creates a new connection manager that uses Kubernetes services for service discovery.
|
||||
func NewKubernetesConnManager(namespace string, watchNames []string, options ...grpc.DialOption) (*KubernetesConnManager, error) {
|
||||
ctx := context.Background()
|
||||
log.ZInfo(ctx, "K8s Discovery: Initializing connection manager", "namespace", namespace, "watchNames", watchNames)
|
||||
|
||||
// 获取集群内配置
|
||||
config, err := rest.InClusterConfig()
|
||||
if err != nil {
|
||||
log.ZError(ctx, "K8s Discovery: Failed to create in-cluster config", err)
|
||||
return nil, fmt.Errorf("failed to create in-cluster config: %v", err)
|
||||
}
|
||||
log.ZDebug(ctx, "K8s Discovery: Successfully created in-cluster config")
|
||||
|
||||
// 创建 K8s API 客户端
|
||||
clientset, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "K8s Discovery: Failed to create clientset", err)
|
||||
return nil, fmt.Errorf("failed to create clientset: %v", err)
|
||||
}
|
||||
log.ZDebug(ctx, "K8s Discovery: Successfully created clientset")
|
||||
|
||||
// 初始化连接管理器
|
||||
k := &KubernetesConnManager{
|
||||
clientset: clientset,
|
||||
namespace: namespace,
|
||||
dialOptions: options,
|
||||
connMap: make(map[string][]*addrConn),
|
||||
rpcTargets: make(map[string]string),
|
||||
watchNames: watchNames,
|
||||
}
|
||||
|
||||
// 启动后台 goroutine 监听 Endpoints 变化
|
||||
log.ZInfo(ctx, "K8s Discovery: Starting Endpoints watcher")
|
||||
go k.watchEndpoints()
|
||||
|
||||
log.ZInfo(ctx, "K8s Discovery: Connection manager initialized successfully")
|
||||
return k, nil
|
||||
}
|
||||
|
||||
// parseServiceName 解析服务名,去掉端口信息
|
||||
// 例如:user-rpc-service:http-10320 -> user-rpc-service
|
||||
func parseServiceName(serviceName string) string {
|
||||
if idx := strings.Index(serviceName, ":"); idx != -1 {
|
||||
return serviceName[:idx]
|
||||
}
|
||||
return serviceName
|
||||
}
|
||||
|
||||
// initializeConns 初始化指定服务的所有 gRPC 连接(支持连接复用)
|
||||
func (k *KubernetesConnManager) initializeConns(serviceName string) error {
|
||||
ctx := context.Background()
|
||||
log.ZInfo(ctx, "K8s Discovery: Starting to initialize connections", "serviceName", serviceName)
|
||||
|
||||
// 步骤 1: 获取 Service 的端口
|
||||
port, err := k.getServicePort(serviceName)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "K8s Discovery: Failed to get service port", err, "serviceName", serviceName)
|
||||
return fmt.Errorf("failed to get service port: %w", err)
|
||||
}
|
||||
log.ZDebug(ctx, "K8s Discovery: Got service port", "serviceName", serviceName, "port", port)
|
||||
|
||||
// 步骤 2: 获取旧连接,建立地址到连接的映射(用于复用)
|
||||
k.mu.Lock()
|
||||
oldList := k.connMap[serviceName]
|
||||
addrMap := make(map[string]*addrConn, len(oldList))
|
||||
for _, ac := range oldList {
|
||||
addrMap[ac.addr] = ac
|
||||
ac.reused = false // 重置复用标记
|
||||
}
|
||||
k.mu.Unlock()
|
||||
|
||||
log.ZDebug(ctx, "K8s Discovery: Old connections snapshot", "serviceName", serviceName, "count", len(oldList))
|
||||
|
||||
// 步骤 3: 获取 Service 对应的 Endpoints
|
||||
endpoints, err := k.clientset.CoreV1().Endpoints(k.namespace).Get(
|
||||
ctx,
|
||||
serviceName,
|
||||
metav1.GetOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "K8s Discovery: Failed to get endpoints", err, "serviceName", serviceName)
|
||||
return fmt.Errorf("failed to get endpoints for service %s: %w", serviceName, err)
|
||||
}
|
||||
|
||||
// 统计 Endpoints 数量
|
||||
var totalAddresses int
|
||||
for _, subset := range endpoints.Subsets {
|
||||
totalAddresses += len(subset.Addresses)
|
||||
}
|
||||
log.ZDebug(ctx, "K8s Discovery: Found endpoint addresses", "serviceName", serviceName, "count", totalAddresses)
|
||||
|
||||
// 步骤 4: 为每个 Pod IP 创建或复用 gRPC 连接
|
||||
var newList []*addrConn
|
||||
var reusedCount, createdCount int
|
||||
|
||||
for _, subset := range endpoints.Subsets {
|
||||
for _, address := range subset.Addresses {
|
||||
target := fmt.Sprintf("%s:%d", address.IP, port)
|
||||
|
||||
// 检查是否可以复用旧连接
|
||||
if ac, ok := addrMap[target]; ok {
|
||||
// 复用旧连接
|
||||
ac.reused = true
|
||||
newList = append(newList, ac)
|
||||
reusedCount++
|
||||
log.ZDebug(ctx, "K8s Discovery: Reusing existing connection", "serviceName", serviceName, "target", target)
|
||||
continue
|
||||
}
|
||||
|
||||
// 创建新连接
|
||||
log.ZDebug(ctx, "K8s Discovery: Creating new connection", "serviceName", serviceName, "target", target)
|
||||
conn, err := grpc.Dial(
|
||||
target,
|
||||
append(k.dialOptions,
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithKeepaliveParams(keepalive.ClientParameters{
|
||||
Time: 10 * time.Second,
|
||||
Timeout: 3 * time.Second,
|
||||
PermitWithoutStream: true,
|
||||
}),
|
||||
)...,
|
||||
)
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "K8s Discovery: Failed to dial endpoint, skipping", err, "serviceName", serviceName, "target", target)
|
||||
// 跳过无法连接的端点,不终止整个初始化
|
||||
continue
|
||||
}
|
||||
|
||||
state := conn.GetState()
|
||||
log.ZDebug(ctx, "K8s Discovery: New connection created", "serviceName", serviceName, "target", target, "state", state.String())
|
||||
|
||||
newList = append(newList, &addrConn{conn: conn, addr: target, reused: false})
|
||||
createdCount++
|
||||
}
|
||||
}
|
||||
|
||||
log.ZInfo(ctx, "K8s Discovery: Connection initialization summary", "serviceName", serviceName,
|
||||
"total", len(newList), "reused", reusedCount, "created", createdCount)
|
||||
|
||||
// 步骤 5: 收集需要关闭的旧连接(未被复用的)
|
||||
var connsToClose []*addrConn
|
||||
for _, ac := range oldList {
|
||||
if !ac.reused {
|
||||
connsToClose = append(connsToClose, ac)
|
||||
}
|
||||
}
|
||||
|
||||
// 步骤 6: 更新连接映射
|
||||
k.mu.Lock()
|
||||
k.connMap[serviceName] = newList
|
||||
k.mu.Unlock()
|
||||
|
||||
log.ZDebug(ctx, "K8s Discovery: Connection map updated", "serviceName", serviceName,
|
||||
"oldCount", len(oldList), "newCount", len(newList), "toClose", len(connsToClose))
|
||||
|
||||
// 步骤 7: 延迟关闭未复用的旧连接
|
||||
if len(connsToClose) > 0 {
|
||||
log.ZInfo(ctx, "K8s Discovery: Scheduling delayed close for unused connections", "serviceName", serviceName, "count", len(connsToClose), "delaySeconds", 5)
|
||||
go func() {
|
||||
time.Sleep(5 * time.Second)
|
||||
log.ZDebug(ctx, "K8s Discovery: Closing unused old connections", "serviceName", serviceName, "count", len(connsToClose))
|
||||
closedCount := 0
|
||||
for _, ac := range connsToClose {
|
||||
if err := ac.conn.Close(); err != nil {
|
||||
log.ZError(ctx, "K8s Discovery: Failed to close old connection", err, "serviceName", serviceName, "addr", ac.addr)
|
||||
} else {
|
||||
closedCount++
|
||||
}
|
||||
}
|
||||
log.ZInfo(ctx, "K8s Discovery: Closed unused connections", "serviceName", serviceName, "closed", closedCount, "total", len(connsToClose))
|
||||
}()
|
||||
}
|
||||
|
||||
log.ZInfo(ctx, "K8s Discovery: Connection initialization completed", "serviceName", serviceName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetConns returns gRPC client connections for a given Kubernetes service name.
|
||||
func (k *KubernetesConnManager) GetConns(ctx context.Context, serviceName string, opts ...grpc.DialOption) ([]grpc.ClientConnInterface, error) {
|
||||
// 解析服务名,去掉端口信息
|
||||
svcName := parseServiceName(serviceName)
|
||||
log.ZDebug(ctx, "K8s Discovery: GetConns called", "serviceName", serviceName, "parsedName", svcName)
|
||||
|
||||
// 步骤 1: 第一次检查缓存(读锁)
|
||||
k.mu.RLock()
|
||||
conns, exists := k.connMap[svcName]
|
||||
k.mu.RUnlock()
|
||||
|
||||
// 步骤 2: 如果缓存中有连接,检查健康状态
|
||||
if exists && len(conns) > 0 {
|
||||
log.ZDebug(ctx, "K8s Discovery: Found connections in cache, checking health", "serviceName", svcName, "count", len(conns))
|
||||
|
||||
// 检查连接健康状态
|
||||
validConns := k.filterValidConns(ctx, svcName, conns)
|
||||
|
||||
// 如果还有有效连接,更新缓存并返回
|
||||
if len(validConns) > 0 {
|
||||
if len(validConns) < len(conns) {
|
||||
log.ZWarn(ctx, "K8s Discovery: Removed invalid connections", nil, "serviceName", svcName, "removed", len(conns)-len(validConns), "remaining", len(validConns))
|
||||
k.mu.Lock()
|
||||
k.connMap[svcName] = validConns
|
||||
k.mu.Unlock()
|
||||
} else {
|
||||
log.ZDebug(ctx, "K8s Discovery: All connections are healthy", "serviceName", svcName, "count", len(validConns))
|
||||
}
|
||||
// 转换为接口类型
|
||||
result := make([]grpc.ClientConnInterface, len(validConns))
|
||||
for i, ac := range validConns {
|
||||
result[i] = ac.conn
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// 如果所有连接都失效,清除缓存并重新初始化
|
||||
log.ZWarn(ctx, "K8s Discovery: All connections are invalid, reinitializing", nil, "serviceName", svcName)
|
||||
k.mu.Lock()
|
||||
delete(k.connMap, svcName)
|
||||
k.mu.Unlock()
|
||||
} else {
|
||||
log.ZDebug(ctx, "K8s Discovery: No connections in cache, initializing", "serviceName", svcName)
|
||||
}
|
||||
|
||||
// 步骤 3: 缓存中没有连接或所有连接都失效,重新初始化
|
||||
k.mu.Lock()
|
||||
conns, exists = k.connMap[svcName]
|
||||
if exists && len(conns) > 0 {
|
||||
log.ZDebug(ctx, "K8s Discovery: Connections were initialized by another goroutine", "serviceName", svcName)
|
||||
k.mu.Unlock()
|
||||
result := make([]grpc.ClientConnInterface, len(conns))
|
||||
for i, ac := range conns {
|
||||
result[i] = ac.conn
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
k.mu.Unlock()
|
||||
|
||||
// 初始化新连接
|
||||
log.ZDebug(ctx, "K8s Discovery: Initializing new connections", "serviceName", svcName)
|
||||
if err := k.initializeConns(svcName); err != nil {
|
||||
log.ZError(ctx, "K8s Discovery: Failed to initialize connections", err, "serviceName", svcName)
|
||||
return nil, fmt.Errorf("failed to initialize connections for service %s: %w", svcName, err)
|
||||
}
|
||||
|
||||
// 返回新初始化的连接
|
||||
k.mu.RLock()
|
||||
conns = k.connMap[svcName]
|
||||
k.mu.RUnlock()
|
||||
|
||||
log.ZDebug(ctx, "K8s Discovery: Returning connections", "serviceName", svcName, "count", len(conns))
|
||||
result := make([]grpc.ClientConnInterface, len(conns))
|
||||
for i, ac := range conns {
|
||||
result[i] = ac.conn
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// filterValidConns 过滤出有效的连接
|
||||
func (k *KubernetesConnManager) filterValidConns(ctx context.Context, serviceName string, conns []*addrConn) []*addrConn {
|
||||
validConns := make([]*addrConn, 0, len(conns))
|
||||
invalidStates := make(map[string]int)
|
||||
|
||||
for _, ac := range conns {
|
||||
state := ac.conn.GetState()
|
||||
|
||||
// 只保留 Ready 和 Idle 状态的连接
|
||||
if state == connectivity.Ready || state == connectivity.Idle {
|
||||
validConns = append(validConns, ac)
|
||||
} else {
|
||||
invalidStates[state.String()]++
|
||||
log.ZDebug(ctx, "K8s Discovery: Connection is invalid, closing", "serviceName", serviceName, "addr", ac.addr, "state", state.String())
|
||||
if err := ac.conn.Close(); err != nil {
|
||||
log.ZError(ctx, "K8s Discovery: Failed to close invalid connection", err, "serviceName", serviceName, "addr", ac.addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(invalidStates) > 0 {
|
||||
log.ZWarn(ctx, "K8s Discovery: Found invalid connections", nil, "serviceName", serviceName, "invalidStates", invalidStates)
|
||||
}
|
||||
|
||||
return validConns
|
||||
}
|
||||
|
||||
// GetConn returns a single gRPC client connection for a given Kubernetes service name.
|
||||
// 重要:GetConn 使用 DNS,避免连接被强制关闭
|
||||
func (k *KubernetesConnManager) GetConn(ctx context.Context, serviceName string, opts ...grpc.DialOption) (grpc.ClientConnInterface, error) {
|
||||
// 解析服务名,去掉端口信息
|
||||
svcName := parseServiceName(serviceName)
|
||||
log.ZDebug(ctx, "K8s Discovery: GetConn called (using DNS)", "serviceName", serviceName, "parsedName", svcName)
|
||||
|
||||
var target string
|
||||
|
||||
// 检查是否有自定义目标
|
||||
if k.rpcTargets[svcName] == "" {
|
||||
// 获取 Service 端口
|
||||
svcPort, err := k.getServicePort(svcName)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "K8s Discovery: Failed to get service port", err, "serviceName", svcName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 构建 K8s DNS 名称
|
||||
target = fmt.Sprintf("%s.%s.svc.cluster.local:%d", svcName, k.namespace, svcPort)
|
||||
log.ZDebug(ctx, "K8s Discovery: Using DNS target", "serviceName", svcName, "target", target)
|
||||
} else {
|
||||
target = k.rpcTargets[svcName]
|
||||
log.ZDebug(ctx, "K8s Discovery: Using custom target", "serviceName", svcName, "target", target)
|
||||
}
|
||||
|
||||
// 创建 gRPC 连接
|
||||
log.ZDebug(ctx, "K8s Discovery: Dialing DNS target", "serviceName", svcName, "target", target)
|
||||
conn, err := grpc.DialContext(
|
||||
ctx,
|
||||
target,
|
||||
append([]grpc.DialOption{
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithDefaultCallOptions(
|
||||
grpc.MaxCallRecvMsgSize(1024*1024*10),
|
||||
grpc.MaxCallSendMsgSize(1024*1024*20),
|
||||
),
|
||||
grpc.WithKeepaliveParams(keepalive.ClientParameters{
|
||||
Time: 10 * time.Second,
|
||||
Timeout: 3 * time.Second,
|
||||
PermitWithoutStream: true,
|
||||
}),
|
||||
}, k.dialOptions...)...,
|
||||
)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "K8s Discovery: Failed to dial DNS target", err, "serviceName", svcName, "target", target)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.ZDebug(ctx, "K8s Discovery: GetConn completed successfully", "serviceName", svcName)
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// IsSelfNode checks if the given connection is to the current node
|
||||
func (k *KubernetesConnManager) IsSelfNode(cc grpc.ClientConnInterface) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// watchEndpoints 监听 Endpoints 资源变化
|
||||
func (k *KubernetesConnManager) watchEndpoints() {
|
||||
ctx := context.Background()
|
||||
log.ZInfo(ctx, "K8s Discovery: Starting Endpoints watcher")
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactoryWithOptions(k.clientset, time.Minute*10,
|
||||
informers.WithNamespace(k.namespace))
|
||||
informer := informerFactory.Core().V1().Endpoints().Informer()
|
||||
log.ZDebug(ctx, "K8s Discovery: Endpoints Informer created")
|
||||
|
||||
informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
k.handleEndpointChange(obj)
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
oldEndpoint := oldObj.(*v1.Endpoints)
|
||||
newEndpoint := newObj.(*v1.Endpoints)
|
||||
|
||||
if k.endpointsChanged(oldEndpoint, newEndpoint) {
|
||||
k.handleEndpointChange(newObj)
|
||||
}
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
k.handleEndpointChange(obj)
|
||||
},
|
||||
})
|
||||
|
||||
log.ZDebug(ctx, "K8s Discovery: Starting Informer factory")
|
||||
informerFactory.Start(ctx.Done())
|
||||
|
||||
log.ZDebug(ctx, "K8s Discovery: Waiting for Informer cache to sync")
|
||||
if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) {
|
||||
log.ZError(ctx, "K8s Discovery: Failed to sync Informer cache", nil)
|
||||
return
|
||||
}
|
||||
log.ZInfo(ctx, "K8s Discovery: Informer cache synced successfully")
|
||||
|
||||
log.ZInfo(ctx, "K8s Discovery: Endpoints watcher is running")
|
||||
<-ctx.Done()
|
||||
log.ZInfo(ctx, "K8s Discovery: Endpoints watcher stopped")
|
||||
}
|
||||
|
||||
// endpointsChanged 检查 Endpoints 是否有实际变化
|
||||
func (k *KubernetesConnManager) endpointsChanged(old, new *v1.Endpoints) bool {
|
||||
oldAddresses := make(map[string]bool)
|
||||
for _, subset := range old.Subsets {
|
||||
for _, address := range subset.Addresses {
|
||||
oldAddresses[address.IP] = true
|
||||
}
|
||||
}
|
||||
|
||||
newAddresses := make(map[string]bool)
|
||||
for _, subset := range new.Subsets {
|
||||
for _, address := range subset.Addresses {
|
||||
newAddresses[address.IP] = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(oldAddresses) != len(newAddresses) {
|
||||
return true
|
||||
}
|
||||
|
||||
for ip := range oldAddresses {
|
||||
if !newAddresses[ip] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// handleEndpointChange 处理 Endpoints 资源变化
|
||||
func (k *KubernetesConnManager) handleEndpointChange(obj interface{}) {
|
||||
ctx := context.Background()
|
||||
|
||||
endpoint, ok := obj.(*v1.Endpoints)
|
||||
if !ok {
|
||||
log.ZError(ctx, "K8s Discovery: Expected *v1.Endpoints", nil, "actualType", fmt.Sprintf("%T", obj))
|
||||
return
|
||||
}
|
||||
|
||||
serviceName := endpoint.Name
|
||||
|
||||
// 只处理 watchNames 中的服务
|
||||
if len(k.watchNames) > 0 && !datautil.Contain(serviceName, k.watchNames...) {
|
||||
log.ZDebug(ctx, "K8s Discovery: Ignoring Endpoints change (not in watchNames)", "serviceName", serviceName)
|
||||
return
|
||||
}
|
||||
|
||||
log.ZInfo(ctx, "K8s Discovery: Handling Endpoints change", "serviceName", serviceName)
|
||||
|
||||
var totalAddresses int
|
||||
for _, subset := range endpoint.Subsets {
|
||||
totalAddresses += len(subset.Addresses)
|
||||
}
|
||||
log.ZDebug(ctx, "K8s Discovery: Endpoint addresses count", "serviceName", serviceName, "count", totalAddresses)
|
||||
|
||||
if err := k.initializeConns(serviceName); err != nil {
|
||||
log.ZError(ctx, "K8s Discovery: Failed to initialize connections", err, "serviceName", serviceName)
|
||||
} else {
|
||||
log.ZInfo(ctx, "K8s Discovery: Successfully updated connections", "serviceName", serviceName)
|
||||
}
|
||||
}
|
||||
|
||||
// getServicePort 获取 Service 的 RPC 端口
|
||||
func (k *KubernetesConnManager) getServicePort(serviceName string) (int32, error) {
|
||||
ctx := context.Background()
|
||||
log.ZDebug(ctx, "K8s Discovery: Getting service port", "serviceName", serviceName)
|
||||
|
||||
svc, err := k.clientset.CoreV1().Services(k.namespace).Get(
|
||||
ctx,
|
||||
serviceName,
|
||||
metav1.GetOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "K8s Discovery: Failed to get service", err, "serviceName", serviceName, "namespace", k.namespace)
|
||||
return 0, fmt.Errorf("failed to get service %s: %w", serviceName, err)
|
||||
}
|
||||
|
||||
if len(svc.Spec.Ports) == 0 {
|
||||
log.ZError(ctx, "K8s Discovery: Service has no ports defined", nil, "serviceName", serviceName)
|
||||
return 0, fmt.Errorf("service %s has no ports defined", serviceName)
|
||||
}
|
||||
|
||||
var svcPort int32
|
||||
for _, port := range svc.Spec.Ports {
|
||||
if port.Port != 10001 {
|
||||
svcPort = port.Port
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if svcPort == 0 {
|
||||
log.ZError(ctx, "K8s Discovery: Service has no RPC port", nil, "serviceName", serviceName)
|
||||
return 0, fmt.Errorf("service %s has no RPC port (all ports are 10001)", serviceName)
|
||||
}
|
||||
|
||||
log.ZDebug(ctx, "K8s Discovery: Got service port", "serviceName", serviceName, "port", svcPort)
|
||||
return svcPort, nil
|
||||
}
|
||||
|
||||
// Close 关闭所有连接
|
||||
func (k *KubernetesConnManager) Close() {
|
||||
ctx := context.Background()
|
||||
log.ZInfo(ctx, "K8s Discovery: Closing all connections")
|
||||
|
||||
k.mu.Lock()
|
||||
defer k.mu.Unlock()
|
||||
|
||||
totalConns := 0
|
||||
for serviceName, conns := range k.connMap {
|
||||
log.ZDebug(ctx, "K8s Discovery: Closing connections for service", "serviceName", serviceName, "count", len(conns))
|
||||
for _, ac := range conns {
|
||||
if err := ac.conn.Close(); err != nil {
|
||||
log.ZError(ctx, "K8s Discovery: Failed to close connection", err, "serviceName", serviceName, "addr", ac.addr)
|
||||
}
|
||||
}
|
||||
totalConns += len(conns)
|
||||
}
|
||||
|
||||
log.ZInfo(ctx, "K8s Discovery: Closed all connections", "totalCount", totalConns)
|
||||
k.connMap = make(map[string][]*addrConn)
|
||||
}
|
||||
|
||||
// GetSelfConnTarget returns the connection target for the current service.
|
||||
func (k *KubernetesConnManager) GetSelfConnTarget() string {
|
||||
ctx := context.Background()
|
||||
|
||||
if k.selfTarget == "" {
|
||||
hostName := os.Getenv("HOSTNAME")
|
||||
log.ZDebug(ctx, "K8s Discovery: Getting self connection target", "hostname", hostName)
|
||||
|
||||
pod, err := k.clientset.CoreV1().Pods(k.namespace).Get(ctx, hostName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
log.ZError(ctx, "K8s Discovery: Failed to get pod", err, "hostname", hostName)
|
||||
}
|
||||
|
||||
for pod.Status.PodIP == "" {
|
||||
log.ZDebug(ctx, "K8s Discovery: Waiting for pod IP to be assigned", "hostname", hostName)
|
||||
pod, err = k.clientset.CoreV1().Pods(k.namespace).Get(context.TODO(), hostName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
log.ZError(ctx, "K8s Discovery: Failed to get pod", err)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
}
|
||||
|
||||
var selfPort int32
|
||||
for _, port := range pod.Spec.Containers[0].Ports {
|
||||
if port.ContainerPort != 10001 {
|
||||
selfPort = port.ContainerPort
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
k.selfTarget = fmt.Sprintf("%s:%d", pod.Status.PodIP, selfPort)
|
||||
log.ZInfo(ctx, "K8s Discovery: Self connection target", "target", k.selfTarget)
|
||||
}
|
||||
|
||||
return k.selfTarget
|
||||
}
|
||||
|
||||
// AddOption appends gRPC dial options to the existing options.
|
||||
func (k *KubernetesConnManager) AddOption(opts ...grpc.DialOption) {
|
||||
k.mu.Lock()
|
||||
defer k.mu.Unlock()
|
||||
k.dialOptions = append(k.dialOptions, opts...)
|
||||
log.ZDebug(context.Background(), "K8s Discovery: Added dial options", "count", len(opts))
|
||||
}
|
||||
|
||||
// CloseConn closes a given gRPC client connection.
|
||||
func (k *KubernetesConnManager) CloseConn(conn *grpc.ClientConn) {
|
||||
log.ZDebug(context.Background(), "K8s Discovery: Closing single connection")
|
||||
conn.Close()
|
||||
}
|
||||
|
||||
func (k *KubernetesConnManager) Register(ctx context.Context, serviceName, host string, port int, opts ...grpc.DialOption) error {
|
||||
// K8s 环境下不需要注册,返回 nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *KubernetesConnManager) UnRegister() error {
|
||||
// K8s 环境下不需要注销,返回 nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *KubernetesConnManager) GetUserIdHashGatewayHost(ctx context.Context, userId string) (string, error) {
|
||||
// K8s 环境下不支持,返回空
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// KeyValue interface methods - K8s环境下不支持,返回 discovery.ErrNotSupported
|
||||
// 这样调用方可以通过 errors.Is(err, discovery.ErrNotSupported) 来判断并忽略
|
||||
|
||||
func (k *KubernetesConnManager) SetKey(ctx context.Context, key string, value []byte) error {
|
||||
return discovery.ErrNotSupported
|
||||
}
|
||||
|
||||
func (k *KubernetesConnManager) SetWithLease(ctx context.Context, key string, val []byte, ttl int64) error {
|
||||
return discovery.ErrNotSupported
|
||||
}
|
||||
|
||||
func (k *KubernetesConnManager) GetKey(ctx context.Context, key string) ([]byte, error) {
|
||||
return nil, discovery.ErrNotSupported
|
||||
}
|
||||
|
||||
func (k *KubernetesConnManager) GetKeyWithPrefix(ctx context.Context, key string) ([][]byte, error) {
|
||||
return nil, discovery.ErrNotSupported
|
||||
}
|
||||
|
||||
func (k *KubernetesConnManager) WatchKey(ctx context.Context, key string, fn discovery.WatchKeyHandler) error {
|
||||
return discovery.ErrNotSupported
|
||||
}
|
||||
15
pkg/common/ginprometheus/doc.go
Normal file
15
pkg/common/ginprometheus/doc.go
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright © 2024 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ginprometheus // import "git.imall.cloud/openim/open-im-server-deploy/pkg/common/ginprometheus"
|
||||
444
pkg/common/ginprometheus/ginprometheus.go
Normal file
444
pkg/common/ginprometheus/ginprometheus.go
Normal file
@@ -0,0 +1,444 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ginprometheus
|
||||
|
||||
//
|
||||
//import (
|
||||
// "bytes"
|
||||
// "fmt"
|
||||
// "io"
|
||||
// "net/http"
|
||||
// "os"
|
||||
// "strconv"
|
||||
// "time"
|
||||
//
|
||||
// "github.com/gin-gonic/gin"
|
||||
// "github.com/prometheus/client_golang/prometheus"
|
||||
// "github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
//)
|
||||
//
|
||||
//var defaultMetricPath = "/metrics"
|
||||
//
|
||||
//// counter, counter_vec, gauge, gauge_vec,
|
||||
//// histogram, histogram_vec, summary, summary_vec.
|
||||
//var (
|
||||
// reqCounter = &Metric{
|
||||
// ID: "reqCnt",
|
||||
// Name: "requests_total",
|
||||
// Description: "How many HTTP requests processed, partitioned by status code and HTTP method.",
|
||||
// Type: "counter_vec",
|
||||
// Args: []string{"code", "method", "handler", "host", "url"}}
|
||||
//
|
||||
// reqDuration = &Metric{
|
||||
// ID: "reqDur",
|
||||
// Name: "request_duration_seconds",
|
||||
// Description: "The HTTP request latencies in seconds.",
|
||||
// Type: "histogram_vec",
|
||||
// Args: []string{"code", "method", "url"},
|
||||
// }
|
||||
//
|
||||
// resSize = &Metric{
|
||||
// ID: "resSz",
|
||||
// Name: "response_size_bytes",
|
||||
// Description: "The HTTP response sizes in bytes.",
|
||||
// Type: "summary"}
|
||||
//
|
||||
// reqSize = &Metric{
|
||||
// ID: "reqSz",
|
||||
// Name: "request_size_bytes",
|
||||
// Description: "The HTTP request sizes in bytes.",
|
||||
// Type: "summary"}
|
||||
//
|
||||
// standardMetrics = []*Metric{
|
||||
// reqCounter,
|
||||
// reqDuration,
|
||||
// resSize,
|
||||
// reqSize,
|
||||
// }
|
||||
//)
|
||||
//
|
||||
///*
|
||||
//RequestCounterURLLabelMappingFn is a function which can be supplied to the middleware to control
|
||||
//the cardinality of the request counter's "url" label, which might be required in some contexts.
|
||||
//For instance, if for a "/customer/:name" route you don't want to generate a time series for every
|
||||
//possible customer name, you could use this function:
|
||||
//
|
||||
// func(c *gin.Context) string {
|
||||
// url := c.Request.URL.Path
|
||||
// for _, p := range c.Params {
|
||||
// if p.Key == "name" {
|
||||
// url = strings.Replace(url, p.Value, ":name", 1)
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
// return url
|
||||
// }
|
||||
//
|
||||
//which would map "/customer/alice" and "/customer/bob" to their template "/customer/:name".
|
||||
//*/
|
||||
//type RequestCounterURLLabelMappingFn func(c *gin.Context) string
|
||||
//
|
||||
//// Metric is a definition for the name, description, type, ID, and
|
||||
//// prometheus.Collector type (i.e. CounterVec, Summary, etc) of each metric.
|
||||
//type Metric struct {
|
||||
// MetricCollector prometheus.Collector
|
||||
// ID string
|
||||
// Name string
|
||||
// Description string
|
||||
// Type string
|
||||
// Args []string
|
||||
//}
|
||||
//
|
||||
//// Prometheus contains the metrics gathered by the instance and its path.
|
||||
//type Prometheus struct {
|
||||
// reqCnt *prometheus.CounterVec
|
||||
// reqDur *prometheus.HistogramVec
|
||||
// reqSz, resSz prometheus.Summary
|
||||
// router *gin.Engine
|
||||
// listenAddress string
|
||||
// Ppg PrometheusPushGateway
|
||||
//
|
||||
// MetricsList []*Metric
|
||||
// MetricsPath string
|
||||
//
|
||||
// ReqCntURLLabelMappingFn RequestCounterURLLabelMappingFn
|
||||
//
|
||||
// // gin.Context string to use as a prometheus URL label
|
||||
// URLLabelFromContext string
|
||||
//}
|
||||
//
|
||||
//// PrometheusPushGateway contains the configuration for pushing to a Prometheus pushgateway (optional).
|
||||
//type PrometheusPushGateway struct {
|
||||
//
|
||||
// // Push interval in seconds
|
||||
// PushIntervalSeconds time.Duration
|
||||
//
|
||||
// // Push Gateway URL in format http://domain:port
|
||||
// // where JOBNAME can be any string of your choice
|
||||
// PushGatewayURL string
|
||||
//
|
||||
// // Local metrics URL where metrics are fetched from, this could be omitted in the future
|
||||
// // if implemented using prometheus common/expfmt instead
|
||||
// MetricsURL string
|
||||
//
|
||||
// // pushgateway job name, defaults to "gin"
|
||||
// Job string
|
||||
//}
|
||||
//
|
||||
//// NewPrometheus generates a new set of metrics with a certain subsystem name.
|
||||
//func NewPrometheus(subsystem string, customMetricsList ...[]*Metric) *Prometheus {
|
||||
// if subsystem == "" {
|
||||
// subsystem = "app"
|
||||
// }
|
||||
//
|
||||
// var metricsList []*Metric
|
||||
//
|
||||
// if len(customMetricsList) > 1 {
|
||||
// panic("Too many args. NewPrometheus( string, <optional []*Metric> ).")
|
||||
// } else if len(customMetricsList) == 1 {
|
||||
// metricsList = customMetricsList[0]
|
||||
// }
|
||||
// metricsList = append(metricsList, standardMetrics...)
|
||||
//
|
||||
// p := &Prometheus{
|
||||
// MetricsList: metricsList,
|
||||
// MetricsPath: defaultMetricPath,
|
||||
// ReqCntURLLabelMappingFn: func(c *gin.Context) string {
|
||||
// return c.FullPath() // e.g. /user/:id , /user/:id/info
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// p.registerMetrics(subsystem)
|
||||
//
|
||||
// return p
|
||||
//}
|
||||
//
|
||||
//// SetPushGateway sends metrics to a remote pushgateway exposed on pushGatewayURL
|
||||
//// every pushIntervalSeconds. Metrics are fetched from metricsURL.
|
||||
//func (p *Prometheus) SetPushGateway(pushGatewayURL, metricsURL string, pushIntervalSeconds time.Duration) {
|
||||
// p.Ppg.PushGatewayURL = pushGatewayURL
|
||||
// p.Ppg.MetricsURL = metricsURL
|
||||
// p.Ppg.PushIntervalSeconds = pushIntervalSeconds
|
||||
// p.startPushTicker()
|
||||
//}
|
||||
//
|
||||
//// SetPushGatewayJob job name, defaults to "gin".
|
||||
//func (p *Prometheus) SetPushGatewayJob(j string) {
|
||||
// p.Ppg.Job = j
|
||||
//}
|
||||
//
|
||||
//// SetListenAddress for exposing metrics on address. If not set, it will be exposed at the
|
||||
//// same address of the gin engine that is being used.
|
||||
//func (p *Prometheus) SetListenAddress(address string) {
|
||||
// p.listenAddress = address
|
||||
// if p.listenAddress != "" {
|
||||
// p.router = gin.Default()
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//// SetListenAddressWithRouter for using a separate router to expose metrics. (this keeps things like GET /metrics out of
|
||||
//// your content's access log).
|
||||
//func (p *Prometheus) SetListenAddressWithRouter(listenAddress string, r *gin.Engine) {
|
||||
// p.listenAddress = listenAddress
|
||||
// if len(p.listenAddress) > 0 {
|
||||
// p.router = r
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//// SetMetricsPath set metrics paths.
|
||||
//func (p *Prometheus) SetMetricsPath(e *gin.Engine) error {
|
||||
//
|
||||
// if p.listenAddress != "" {
|
||||
// p.router.GET(p.MetricsPath, prometheusHandler())
|
||||
// return p.runServer()
|
||||
// } else {
|
||||
// e.GET(p.MetricsPath, prometheusHandler())
|
||||
// return nil
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//// SetMetricsPathWithAuth set metrics paths with authentication.
|
||||
//func (p *Prometheus) SetMetricsPathWithAuth(e *gin.Engine, accounts gin.Accounts) error {
|
||||
//
|
||||
// if p.listenAddress != "" {
|
||||
// p.router.GET(p.MetricsPath, gin.BasicAuth(accounts), prometheusHandler())
|
||||
// return p.runServer()
|
||||
// } else {
|
||||
// e.GET(p.MetricsPath, gin.BasicAuth(accounts), prometheusHandler())
|
||||
// return nil
|
||||
// }
|
||||
//
|
||||
//}
|
||||
//
|
||||
//func (p *Prometheus) runServer() error {
|
||||
// return p.router.Run(p.listenAddress)
|
||||
//}
|
||||
//
|
||||
//func (p *Prometheus) getMetrics() []byte {
|
||||
// response, err := http.Get(p.Ppg.MetricsURL)
|
||||
// if err != nil {
|
||||
// return nil
|
||||
// }
|
||||
//
|
||||
// defer response.Body.Close()
|
||||
//
|
||||
// body, _ := io.ReadAll(response.Body)
|
||||
// return body
|
||||
//}
|
||||
//
|
||||
//var hostname, _ = os.Hostname()
|
||||
//
|
||||
//func (p *Prometheus) getPushGatewayURL() string {
|
||||
// if p.Ppg.Job == "" {
|
||||
// p.Ppg.Job = "gin"
|
||||
// }
|
||||
// return p.Ppg.PushGatewayURL + "/metrics/job/" + p.Ppg.Job + "/instance/" + hostname
|
||||
//}
|
||||
//
|
||||
//func (p *Prometheus) sendMetricsToPushGateway(metrics []byte) {
|
||||
// req, err := http.NewRequest("POST", p.getPushGatewayURL(), bytes.NewBuffer(metrics))
|
||||
// if err != nil {
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// client := &http.Client{}
|
||||
// resp, err := client.Do(req)
|
||||
// if err != nil {
|
||||
// fmt.Println("Error sending to push gateway error:", err.Error())
|
||||
// }
|
||||
//
|
||||
// resp.Body.Close()
|
||||
//}
|
||||
//
|
||||
//func (p *Prometheus) startPushTicker() {
|
||||
// ticker := time.NewTicker(time.Second * p.Ppg.PushIntervalSeconds)
|
||||
// go func() {
|
||||
// for range ticker.C {
|
||||
// p.sendMetricsToPushGateway(p.getMetrics())
|
||||
// }
|
||||
// }()
|
||||
//}
|
||||
//
|
||||
//// NewMetric associates prometheus.Collector based on Metric.Type.
|
||||
//func NewMetric(m *Metric, subsystem string) prometheus.Collector {
|
||||
// var metric prometheus.Collector
|
||||
// switch m.Type {
|
||||
// case "counter_vec":
|
||||
// metric = prometheus.NewCounterVec(
|
||||
// prometheus.CounterOpts{
|
||||
// Subsystem: subsystem,
|
||||
// Name: m.Name,
|
||||
// Help: m.Description,
|
||||
// },
|
||||
// m.Args,
|
||||
// )
|
||||
// case "counter":
|
||||
// metric = prometheus.NewCounter(
|
||||
// prometheus.CounterOpts{
|
||||
// Subsystem: subsystem,
|
||||
// Name: m.Name,
|
||||
// Help: m.Description,
|
||||
// },
|
||||
// )
|
||||
// case "gauge_vec":
|
||||
// metric = prometheus.NewGaugeVec(
|
||||
// prometheus.GaugeOpts{
|
||||
// Subsystem: subsystem,
|
||||
// Name: m.Name,
|
||||
// Help: m.Description,
|
||||
// },
|
||||
// m.Args,
|
||||
// )
|
||||
// case "gauge":
|
||||
// metric = prometheus.NewGauge(
|
||||
// prometheus.GaugeOpts{
|
||||
// Subsystem: subsystem,
|
||||
// Name: m.Name,
|
||||
// Help: m.Description,
|
||||
// },
|
||||
// )
|
||||
// case "histogram_vec":
|
||||
// metric = prometheus.NewHistogramVec(
|
||||
// prometheus.HistogramOpts{
|
||||
// Subsystem: subsystem,
|
||||
// Name: m.Name,
|
||||
// Help: m.Description,
|
||||
// },
|
||||
// m.Args,
|
||||
// )
|
||||
// case "histogram":
|
||||
// metric = prometheus.NewHistogram(
|
||||
// prometheus.HistogramOpts{
|
||||
// Subsystem: subsystem,
|
||||
// Name: m.Name,
|
||||
// Help: m.Description,
|
||||
// },
|
||||
// )
|
||||
// case "summary_vec":
|
||||
// metric = prometheus.NewSummaryVec(
|
||||
// prometheus.SummaryOpts{
|
||||
// Subsystem: subsystem,
|
||||
// Name: m.Name,
|
||||
// Help: m.Description,
|
||||
// },
|
||||
// m.Args,
|
||||
// )
|
||||
// case "summary":
|
||||
// metric = prometheus.NewSummary(
|
||||
// prometheus.SummaryOpts{
|
||||
// Subsystem: subsystem,
|
||||
// Name: m.Name,
|
||||
// Help: m.Description,
|
||||
// },
|
||||
// )
|
||||
// }
|
||||
// return metric
|
||||
//}
|
||||
//
|
||||
//func (p *Prometheus) registerMetrics(subsystem string) {
|
||||
// for _, metricDef := range p.MetricsList {
|
||||
// metric := NewMetric(metricDef, subsystem)
|
||||
// if err := prometheus.Register(metric); err != nil {
|
||||
// fmt.Println("could not be registered in Prometheus,metricDef.Name:", metricDef.Name, " error:", err.Error())
|
||||
// }
|
||||
//
|
||||
// switch metricDef {
|
||||
// case reqCounter:
|
||||
// p.reqCnt = metric.(*prometheus.CounterVec)
|
||||
// case reqDuration:
|
||||
// p.reqDur = metric.(*prometheus.HistogramVec)
|
||||
// case resSize:
|
||||
// p.resSz = metric.(prometheus.Summary)
|
||||
// case reqSize:
|
||||
// p.reqSz = metric.(prometheus.Summary)
|
||||
// }
|
||||
// metricDef.MetricCollector = metric
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//// Use adds the middleware to a gin engine.
|
||||
//func (p *Prometheus) Use(e *gin.Engine) error {
|
||||
// e.Use(p.HandlerFunc())
|
||||
// return p.SetMetricsPath(e)
|
||||
//}
|
||||
//
|
||||
//// UseWithAuth adds the middleware to a gin engine with BasicAuth.
|
||||
//func (p *Prometheus) UseWithAuth(e *gin.Engine, accounts gin.Accounts) error {
|
||||
// e.Use(p.HandlerFunc())
|
||||
// return p.SetMetricsPathWithAuth(e, accounts)
|
||||
//}
|
||||
//
|
||||
//// HandlerFunc defines handler function for middleware.
|
||||
//func (p *Prometheus) HandlerFunc() gin.HandlerFunc {
|
||||
// return func(c *gin.Context) {
|
||||
// if c.Request.URL.Path == p.MetricsPath {
|
||||
// c.Next()
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// start := time.Now()
|
||||
// reqSz := computeApproximateRequestSize(c.Request)
|
||||
//
|
||||
// c.Next()
|
||||
//
|
||||
// status := strconv.Itoa(c.Writer.Status())
|
||||
// elapsed := float64(time.Since(start)) / float64(time.Second)
|
||||
// resSz := float64(c.Writer.Size())
|
||||
//
|
||||
// url := p.ReqCntURLLabelMappingFn(c)
|
||||
// if len(p.URLLabelFromContext) > 0 {
|
||||
// u, found := c.Get(p.URLLabelFromContext)
|
||||
// if !found {
|
||||
// u = "unknown"
|
||||
// }
|
||||
// url = u.(string)
|
||||
// }
|
||||
// p.reqDur.WithLabelValues(status, c.Request.Method, url).Observe(elapsed)
|
||||
// p.reqCnt.WithLabelValues(status, c.Request.Method, c.HandlerName(), c.Request.Host, url).Inc()
|
||||
// p.reqSz.Observe(float64(reqSz))
|
||||
// p.resSz.Observe(resSz)
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//func prometheusHandler() gin.HandlerFunc {
|
||||
// h := promhttp.Handler()
|
||||
// return func(c *gin.Context) {
|
||||
// h.ServeHTTP(c.Writer, c.Request)
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//func computeApproximateRequestSize(r *http.Request) int {
|
||||
// var s int
|
||||
// if r.URL != nil {
|
||||
// s = len(r.URL.Path)
|
||||
// }
|
||||
//
|
||||
// s += len(r.Method)
|
||||
// s += len(r.Proto)
|
||||
// for name, values := range r.Header {
|
||||
// s += len(name)
|
||||
// for _, value := range values {
|
||||
// s += len(value)
|
||||
// }
|
||||
// }
|
||||
// s += len(r.Host)
|
||||
//
|
||||
// // r.FormData and r.MultipartForm are assumed to be included in r.URL.
|
||||
//
|
||||
// if r.ContentLength != -1 {
|
||||
// s += int(r.ContentLength)
|
||||
// }
|
||||
// return s
|
||||
//}
|
||||
48
pkg/common/prommetrics/api.go
Normal file
48
pkg/common/prommetrics/api.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package prommetrics
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
var (
|
||||
apiCounter = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "api_count",
|
||||
Help: "Total number of API calls",
|
||||
},
|
||||
[]string{"path", "method", "code"},
|
||||
)
|
||||
httpCounter = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "http_count",
|
||||
Help: "Total number of HTTP calls",
|
||||
},
|
||||
[]string{"path", "method", "status"},
|
||||
)
|
||||
)
|
||||
|
||||
func RegistryApi() {
|
||||
registry.MustRegister(apiCounter, httpCounter)
|
||||
}
|
||||
|
||||
func ApiInit(listener net.Listener) error {
|
||||
apiRegistry := prometheus.NewRegistry()
|
||||
cs := append(
|
||||
baseCollector,
|
||||
apiCounter,
|
||||
httpCounter,
|
||||
)
|
||||
return Init(apiRegistry, listener, commonPath, promhttp.HandlerFor(apiRegistry, promhttp.HandlerOpts{}), cs...)
|
||||
}
|
||||
|
||||
func APICall(path string, method string, apiCode int) {
|
||||
apiCounter.With(prometheus.Labels{"path": path, "method": method, "code": strconv.Itoa(apiCode)}).Inc()
|
||||
}
|
||||
|
||||
func HttpCall(path string, method string, status int) {
|
||||
httpCounter.With(prometheus.Labels{"path": path, "method": method, "status": strconv.Itoa(status)}).Inc()
|
||||
}
|
||||
30
pkg/common/prommetrics/grpc_auth.go
Normal file
30
pkg/common/prommetrics/grpc_auth.go
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prommetrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
UserLoginCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "user_login_total",
|
||||
Help: "The number of user login",
|
||||
})
|
||||
)
|
||||
|
||||
func RegistryAuth() {
|
||||
registry.MustRegister(UserLoginCounter)
|
||||
}
|
||||
47
pkg/common/prommetrics/grpc_msg.go
Normal file
47
pkg/common/prommetrics/grpc_msg.go
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prommetrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
SingleChatMsgProcessSuccessCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "single_chat_msg_process_success_total",
|
||||
Help: "The number of single chat msg successful processed",
|
||||
})
|
||||
SingleChatMsgProcessFailedCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "single_chat_msg_process_failed_total",
|
||||
Help: "The number of single chat msg failed processed",
|
||||
})
|
||||
GroupChatMsgProcessSuccessCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "group_chat_msg_process_success_total",
|
||||
Help: "The number of group chat msg successful processed",
|
||||
})
|
||||
GroupChatMsgProcessFailedCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "group_chat_msg_process_failed_total",
|
||||
Help: "The number of group chat msg failed processed",
|
||||
})
|
||||
)
|
||||
|
||||
func RegistryMsg() {
|
||||
registry.MustRegister(
|
||||
SingleChatMsgProcessSuccessCounter,
|
||||
SingleChatMsgProcessFailedCounter,
|
||||
GroupChatMsgProcessSuccessCounter,
|
||||
GroupChatMsgProcessFailedCounter,
|
||||
)
|
||||
}
|
||||
30
pkg/common/prommetrics/grpc_msggateway.go
Normal file
30
pkg/common/prommetrics/grpc_msggateway.go
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prommetrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
OnlineUserGauge = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "online_user_num",
|
||||
Help: "The number of online user num",
|
||||
})
|
||||
)
|
||||
|
||||
func RegistryMsgGateway() {
|
||||
registry.MustRegister(OnlineUserGauge)
|
||||
}
|
||||
37
pkg/common/prommetrics/grpc_push.go
Normal file
37
pkg/common/prommetrics/grpc_push.go
Normal file
@@ -0,0 +1,37 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prommetrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
MsgOfflinePushFailedCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "msg_offline_push_failed_total",
|
||||
Help: "The number of msg failed offline pushed",
|
||||
})
|
||||
MsgLoneTimePushCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "msg_long_time_push_total",
|
||||
Help: "The number of messages with a push time exceeding 10 seconds",
|
||||
})
|
||||
)
|
||||
|
||||
func RegistryPush() {
|
||||
registry.MustRegister(
|
||||
MsgOfflinePushFailedCounter,
|
||||
MsgLoneTimePushCounter,
|
||||
)
|
||||
}
|
||||
14
pkg/common/prommetrics/grpc_user.go
Normal file
14
pkg/common/prommetrics/grpc_user.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package prommetrics
|
||||
|
||||
import "github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
var (
|
||||
UserRegisterCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "user_register_total",
|
||||
Help: "The number of user login",
|
||||
})
|
||||
)
|
||||
|
||||
func RegistryUser() {
|
||||
registry.MustRegister(UserRegisterCounter)
|
||||
}
|
||||
117
pkg/common/prommetrics/prommetrics.go
Normal file
117
pkg/common/prommetrics/prommetrics.go
Normal file
@@ -0,0 +1,117 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prommetrics
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
const commonPath = "/metrics"
|
||||
|
||||
var registry = &prometheusRegistry{prometheus.NewRegistry()}
|
||||
|
||||
type prometheusRegistry struct {
|
||||
*prometheus.Registry
|
||||
}
|
||||
|
||||
func (x *prometheusRegistry) MustRegister(cs ...prometheus.Collector) {
|
||||
for _, c := range cs {
|
||||
if err := x.Registry.Register(c); err != nil {
|
||||
if errors.As(err, &prometheus.AlreadyRegisteredError{}) {
|
||||
continue
|
||||
}
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
registry.MustRegister(
|
||||
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
|
||||
collectors.NewGoCollector(),
|
||||
)
|
||||
}
|
||||
|
||||
var (
|
||||
baseCollector = []prometheus.Collector{
|
||||
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
|
||||
collectors.NewGoCollector(),
|
||||
}
|
||||
)
|
||||
|
||||
func Init(registry *prometheus.Registry, listener net.Listener, path string, handler http.Handler, cs ...prometheus.Collector) error {
|
||||
registry.MustRegister(cs...)
|
||||
srv := http.NewServeMux()
|
||||
srv.Handle(path, handler)
|
||||
return http.Serve(listener, srv)
|
||||
}
|
||||
|
||||
func RegistryAll() {
|
||||
RegistryApi()
|
||||
RegistryAuth()
|
||||
RegistryMsg()
|
||||
RegistryMsgGateway()
|
||||
RegistryPush()
|
||||
RegistryUser()
|
||||
RegistryRpc()
|
||||
RegistryTransfer()
|
||||
}
|
||||
|
||||
func Start(listener net.Listener) error {
|
||||
srv := http.NewServeMux()
|
||||
srv.Handle(commonPath, promhttp.HandlerFor(registry, promhttp.HandlerOpts{}))
|
||||
return http.Serve(listener, srv)
|
||||
}
|
||||
|
||||
const (
|
||||
APIKeyName = "api"
|
||||
MessageTransferKeyName = "message-transfer"
|
||||
|
||||
TTL = 300
|
||||
)
|
||||
|
||||
type Target struct {
|
||||
Target string `json:"target"`
|
||||
Labels map[string]string `json:"labels"`
|
||||
}
|
||||
|
||||
type RespTarget struct {
|
||||
Targets []string `json:"targets"`
|
||||
Labels map[string]string `json:"labels"`
|
||||
}
|
||||
|
||||
func BuildDiscoveryKeyPrefix(name string) string {
|
||||
return fmt.Sprintf("%s/%s/%s", "openim", "prometheus_discovery", name)
|
||||
}
|
||||
|
||||
func BuildDiscoveryKey(name string, index int) string {
|
||||
return fmt.Sprintf("%s/%s/%s/%d", "openim", "prometheus_discovery", name, index)
|
||||
}
|
||||
|
||||
func BuildDefaultTarget(host string, ip int) Target {
|
||||
return Target{
|
||||
Target: fmt.Sprintf("%s:%d", host, ip),
|
||||
Labels: map[string]string{
|
||||
"namespace": "default",
|
||||
},
|
||||
}
|
||||
}
|
||||
77
pkg/common/prommetrics/prommetrics_test.go
Normal file
77
pkg/common/prommetrics/prommetrics_test.go
Normal file
@@ -0,0 +1,77 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prommetrics
|
||||
|
||||
import "testing"
|
||||
|
||||
//func TestNewGrpcPromObj(t *testing.T) {
|
||||
// // Create a custom metric to pass into the NewGrpcPromObj function.
|
||||
// customMetric := prometheus.NewCounter(prometheus.CounterOpts{
|
||||
// Name: "test_metric",
|
||||
// Help: "This is a test metric.",
|
||||
// })
|
||||
// cusMetrics := []prometheus.Collector{customMetric}
|
||||
//
|
||||
// // Call NewGrpcPromObj with the custom metrics.
|
||||
// reg, grpcMetrics, err := NewGrpcPromObj(cusMetrics)
|
||||
//
|
||||
// // Assert no error was returned.
|
||||
// assert.NoError(t, err)
|
||||
//
|
||||
// // Assert the registry was correctly initialized.
|
||||
// assert.NotNil(t, reg)
|
||||
//
|
||||
// // Assert the grpcMetrics was correctly initialized.
|
||||
// assert.NotNil(t, grpcMetrics)
|
||||
//
|
||||
// // Assert that the custom metric is registered.
|
||||
// mfs, err := reg.Gather()
|
||||
// assert.NoError(t, err)
|
||||
// assert.NotEmpty(t, mfs) // Ensure some metrics are present.
|
||||
// found := false
|
||||
// for _, mf := range mfs {
|
||||
// if *mf.Name == "test_metric" {
|
||||
// found = true
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
// assert.True(t, found, "Custom metric not found in registry")
|
||||
//}
|
||||
|
||||
//func TestGetGrpcCusMetrics(t *testing.T) {
|
||||
// conf := config2.NewGlobalConfig()
|
||||
//
|
||||
// config2.InitConfig(conf, "../../config")
|
||||
// // Test various cases based on the switch statement in the GetGrpcCusMetrics function.
|
||||
// testCases := []struct {
|
||||
// name string
|
||||
// expected int // The expected number of metrics for each case.
|
||||
// }{
|
||||
// {conf.RpcRegisterName.OpenImMessageGatewayName, 1},
|
||||
// }
|
||||
//
|
||||
// for _, tc := range testCases {
|
||||
// t.Run(tc.name, func(t *testing.T) {
|
||||
// metrics := GetGrpcCusMetrics(tc.name, &conf.RpcRegisterName)
|
||||
// assert.Len(t, metrics, tc.expected)
|
||||
// })
|
||||
// }
|
||||
//}
|
||||
|
||||
func TestName(t *testing.T) {
|
||||
RegistryApi()
|
||||
RegistryApi()
|
||||
|
||||
}
|
||||
74
pkg/common/prommetrics/rpc.go
Normal file
74
pkg/common/prommetrics/rpc.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package prommetrics
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
gp "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
const rpcPath = commonPath
|
||||
|
||||
var (
|
||||
grpcMetrics *gp.ServerMetrics
|
||||
rpcCounter = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "rpc_count",
|
||||
Help: "Total number of RPC calls",
|
||||
},
|
||||
[]string{"name", "path", "code"},
|
||||
)
|
||||
)
|
||||
|
||||
func RegistryRpc() {
|
||||
registry.MustRegister(rpcCounter)
|
||||
}
|
||||
|
||||
func RpcInit(cs []prometheus.Collector, listener net.Listener) error {
|
||||
reg := prometheus.NewRegistry()
|
||||
cs = append(append(
|
||||
baseCollector,
|
||||
rpcCounter,
|
||||
), cs...)
|
||||
return Init(reg, listener, rpcPath, promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}), cs...)
|
||||
}
|
||||
|
||||
func RPCCall(name string, path string, code int) {
|
||||
rpcCounter.With(prometheus.Labels{"name": name, "path": path, "code": strconv.Itoa(code)}).Inc()
|
||||
}
|
||||
|
||||
func GetGrpcServerMetrics() *gp.ServerMetrics {
|
||||
if grpcMetrics == nil {
|
||||
grpcMetrics = gp.NewServerMetrics()
|
||||
grpcMetrics.EnableHandlingTimeHistogram()
|
||||
}
|
||||
return grpcMetrics
|
||||
}
|
||||
|
||||
func GetGrpcCusMetrics(registerName string, discovery *config.Discovery) []prometheus.Collector {
|
||||
switch registerName {
|
||||
case discovery.RpcService.MessageGateway:
|
||||
return []prometheus.Collector{OnlineUserGauge}
|
||||
case discovery.RpcService.Msg:
|
||||
return []prometheus.Collector{
|
||||
SingleChatMsgProcessSuccessCounter,
|
||||
SingleChatMsgProcessFailedCounter,
|
||||
GroupChatMsgProcessSuccessCounter,
|
||||
GroupChatMsgProcessFailedCounter,
|
||||
}
|
||||
case discovery.RpcService.Push:
|
||||
return []prometheus.Collector{
|
||||
MsgOfflinePushFailedCounter,
|
||||
MsgLoneTimePushCounter,
|
||||
}
|
||||
case discovery.RpcService.Auth:
|
||||
return []prometheus.Collector{UserLoginCounter}
|
||||
case discovery.RpcService.User:
|
||||
return []prometheus.Collector{UserRegisterCounter}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
68
pkg/common/prommetrics/transfer.go
Normal file
68
pkg/common/prommetrics/transfer.go
Normal file
@@ -0,0 +1,68 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prommetrics
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
var (
|
||||
MsgInsertRedisSuccessCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "msg_insert_redis_success_total",
|
||||
Help: "The number of successful insert msg to redis",
|
||||
})
|
||||
MsgInsertRedisFailedCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "msg_insert_redis_failed_total",
|
||||
Help: "The number of failed insert msg to redis",
|
||||
})
|
||||
MsgInsertMongoSuccessCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "msg_insert_mongo_success_total",
|
||||
Help: "The number of successful insert msg to mongo",
|
||||
})
|
||||
MsgInsertMongoFailedCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "msg_insert_mongo_failed_total",
|
||||
Help: "The number of failed insert msg to mongo",
|
||||
})
|
||||
SeqSetFailedCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "seq_set_failed_total",
|
||||
Help: "The number of failed set seq",
|
||||
})
|
||||
)
|
||||
|
||||
func RegistryTransfer() {
|
||||
registry.MustRegister(
|
||||
MsgInsertRedisSuccessCounter,
|
||||
MsgInsertRedisFailedCounter,
|
||||
MsgInsertMongoSuccessCounter,
|
||||
MsgInsertMongoFailedCounter,
|
||||
SeqSetFailedCounter,
|
||||
)
|
||||
}
|
||||
|
||||
func TransferInit(listener net.Listener) error {
|
||||
reg := prometheus.NewRegistry()
|
||||
cs := append(
|
||||
baseCollector,
|
||||
MsgInsertRedisSuccessCounter,
|
||||
MsgInsertRedisFailedCounter,
|
||||
MsgInsertMongoSuccessCounter,
|
||||
MsgInsertMongoFailedCounter,
|
||||
SeqSetFailedCounter,
|
||||
)
|
||||
return Init(reg, listener, commonPath, promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}), cs...)
|
||||
}
|
||||
104
pkg/common/servererrs/code.go
Normal file
104
pkg/common/servererrs/code.go
Normal file
@@ -0,0 +1,104 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package servererrs
|
||||
|
||||
// UnknownCode represents the error code when code is not parsed or parsed code equals 0.
|
||||
const UnknownCode = 1000
|
||||
|
||||
// Error codes for various error scenarios.
|
||||
const (
|
||||
FormattingError = 10001 // Error in formatting
|
||||
HasRegistered = 10002 // user has already registered
|
||||
NotRegistered = 10003 // user is not registered
|
||||
PasswordErr = 10004 // Password error
|
||||
GetIMTokenErr = 10005 // Error in getting IM token
|
||||
RepeatSendCode = 10006 // Repeat sending code
|
||||
MailSendCodeErr = 10007 // Error in sending code via email
|
||||
SmsSendCodeErr = 10008 // Error in sending code via SMS
|
||||
CodeInvalidOrExpired = 10009 // Code is invalid or expired
|
||||
RegisterFailed = 10010 // Registration failed
|
||||
ResetPasswordFailed = 10011 // Resetting password failed
|
||||
RegisterLimit = 10012 // Registration limit exceeded
|
||||
LoginLimit = 10013 // Login limit exceeded
|
||||
InvitationError = 10014 // Error in invitation
|
||||
)
|
||||
|
||||
// General error codes.
|
||||
const (
|
||||
NoError = 0 // No error
|
||||
|
||||
DatabaseError = 90002 // Database error (redis/mysql, etc.)
|
||||
NetworkError = 90004 // Network error
|
||||
DataError = 90007 // Data error
|
||||
|
||||
CallbackError = 80000
|
||||
|
||||
// General error codes.
|
||||
ServerInternalError = 500 // Server internal error
|
||||
ArgsError = 1001 // Input parameter error
|
||||
NoPermissionError = 1002 // Insufficient permission
|
||||
DuplicateKeyError = 1003
|
||||
RecordNotFoundError = 1004 // Record does not exist
|
||||
SecretNotChangedError = 1050 // secret not changed
|
||||
|
||||
// Account error codes.
|
||||
UserIDNotFoundError = 1101 // UserID does not exist or is not registered
|
||||
RegisteredAlreadyError = 1102 // user is already registered
|
||||
|
||||
// Group error codes.
|
||||
GroupIDNotFoundError = 1201 // GroupID does not exist
|
||||
GroupIDExisted = 1202 // GroupID already exists
|
||||
NotInGroupYetError = 1203 // Not in the group yet
|
||||
DismissedAlreadyError = 1204 // Group has already been dismissed
|
||||
GroupTypeNotSupport = 1205
|
||||
GroupRequestHandled = 1206
|
||||
|
||||
// Relationship error codes.
|
||||
CanNotAddYourselfError = 1301 // Cannot add yourself as a friend
|
||||
BlockedByPeer = 1302 // Blocked by the peer
|
||||
NotPeersFriend = 1303 // Not the peer's friend
|
||||
RelationshipAlreadyError = 1304 // Already in a friend relationship
|
||||
|
||||
// Message error codes.
|
||||
MessageHasReadDisable = 1401
|
||||
MutedInGroup = 1402 // Member muted in the group
|
||||
MutedGroup = 1403 // Group is muted
|
||||
MsgAlreadyRevoke = 1404 // Message already revoked
|
||||
MessageContainsLink = 1405 // Message contains link (not allowed for userType=0)
|
||||
ImageContainsQRCode = 1406 // Image contains QR code (not allowed for userType=0)
|
||||
|
||||
// Token error codes.
|
||||
TokenExpiredError = 1501
|
||||
TokenInvalidError = 1502
|
||||
TokenMalformedError = 1503
|
||||
TokenNotValidYetError = 1504
|
||||
TokenUnknownError = 1505
|
||||
TokenKickedError = 1506
|
||||
TokenNotExistError = 1507
|
||||
|
||||
// Long connection gateway error codes.
|
||||
ConnOverMaxNumLimit = 1601
|
||||
ConnArgsErr = 1602
|
||||
PushMsgErr = 1603
|
||||
IOSBackgroundPushErr = 1604
|
||||
|
||||
// S3 error codes.
|
||||
FileUploadedExpiredError = 1701 // Upload expired
|
||||
|
||||
// Red packet error codes.
|
||||
RedPacketFinishedError = 1801 // Red packet has been finished
|
||||
RedPacketExpiredError = 1802 // Red packet has expired
|
||||
RedPacketAlreadyReceivedError = 1803 // User has already received this red packet
|
||||
)
|
||||
1
pkg/common/servererrs/doc.go
Normal file
1
pkg/common/servererrs/doc.go
Normal file
@@ -0,0 +1 @@
|
||||
package servererrs // import "git.imall.cloud/openim/open-im-server-deploy/pkg/common/servererrs"
|
||||
77
pkg/common/servererrs/predefine.go
Normal file
77
pkg/common/servererrs/predefine.go
Normal file
@@ -0,0 +1,77 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package servererrs
|
||||
|
||||
import "github.com/openimsdk/tools/errs"
|
||||
|
||||
var (
|
||||
ErrSecretNotChanged = errs.NewCodeError(SecretNotChangedError, "secret not changed, please change secret in config/share.yml for security reasons")
|
||||
|
||||
ErrDatabase = errs.NewCodeError(DatabaseError, "DatabaseError")
|
||||
ErrNetwork = errs.NewCodeError(NetworkError, "NetworkError")
|
||||
ErrCallback = errs.NewCodeError(CallbackError, "CallbackError")
|
||||
ErrCallbackContinue = errs.NewCodeError(CallbackError, "ErrCallbackContinue")
|
||||
|
||||
ErrInternalServer = errs.NewCodeError(ServerInternalError, "ServerInternalError")
|
||||
ErrArgs = errs.NewCodeError(ArgsError, "ArgsError")
|
||||
ErrNoPermission = errs.NewCodeError(NoPermissionError, "NoPermissionError")
|
||||
ErrDuplicateKey = errs.NewCodeError(DuplicateKeyError, "DuplicateKeyError")
|
||||
ErrRecordNotFound = errs.NewCodeError(RecordNotFoundError, "RecordNotFoundError")
|
||||
|
||||
ErrUserIDNotFound = errs.NewCodeError(UserIDNotFoundError, "UserIDNotFoundError")
|
||||
ErrGroupIDNotFound = errs.NewCodeError(GroupIDNotFoundError, "GroupIDNotFoundError")
|
||||
ErrGroupIDExisted = errs.NewCodeError(GroupIDExisted, "GroupIDExisted")
|
||||
|
||||
ErrNotInGroupYet = errs.NewCodeError(NotInGroupYetError, "NotInGroupYetError")
|
||||
ErrDismissedAlready = errs.NewCodeError(DismissedAlreadyError, "DismissedAlreadyError")
|
||||
ErrRegisteredAlready = errs.NewCodeError(RegisteredAlreadyError, "RegisteredAlreadyError")
|
||||
ErrGroupTypeNotSupport = errs.NewCodeError(GroupTypeNotSupport, "")
|
||||
ErrGroupRequestHandled = errs.NewCodeError(GroupRequestHandled, "GroupRequestHandled")
|
||||
|
||||
ErrData = errs.NewCodeError(DataError, "DataError")
|
||||
ErrTokenExpired = errs.NewCodeError(TokenExpiredError, "TokenExpiredError")
|
||||
ErrTokenInvalid = errs.NewCodeError(TokenInvalidError, "TokenInvalidError") //
|
||||
ErrTokenMalformed = errs.NewCodeError(TokenMalformedError, "TokenMalformedError") //
|
||||
ErrTokenNotValidYet = errs.NewCodeError(TokenNotValidYetError, "TokenNotValidYetError") //
|
||||
ErrTokenUnknown = errs.NewCodeError(TokenUnknownError, "TokenUnknownError") //
|
||||
ErrTokenKicked = errs.NewCodeError(TokenKickedError, "TokenKickedError")
|
||||
ErrTokenNotExist = errs.NewCodeError(TokenNotExistError, "TokenNotExistError") //
|
||||
|
||||
ErrMessageHasReadDisable = errs.NewCodeError(MessageHasReadDisable, "MessageHasReadDisable")
|
||||
|
||||
ErrCanNotAddYourself = errs.NewCodeError(CanNotAddYourselfError, "CanNotAddYourselfError")
|
||||
ErrBlockedByPeer = errs.NewCodeError(BlockedByPeer, "BlockedByPeer")
|
||||
ErrNotPeersFriend = errs.NewCodeError(NotPeersFriend, "NotPeersFriend")
|
||||
ErrRelationshipAlready = errs.NewCodeError(RelationshipAlreadyError, "RelationshipAlreadyError")
|
||||
|
||||
ErrMutedInGroup = errs.NewCodeError(MutedInGroup, "MutedInGroup")
|
||||
ErrMutedGroup = errs.NewCodeError(MutedGroup, "MutedGroup")
|
||||
ErrMsgAlreadyRevoke = errs.NewCodeError(MsgAlreadyRevoke, "MsgAlreadyRevoke")
|
||||
ErrMessageContainsLink = errs.NewCodeError(MessageContainsLink, "MessageContainsLink")
|
||||
ErrImageContainsQRCode = errs.NewCodeError(ImageContainsQRCode, "ImageContainsQRCode")
|
||||
|
||||
ErrConnOverMaxNumLimit = errs.NewCodeError(ConnOverMaxNumLimit, "ConnOverMaxNumLimit")
|
||||
|
||||
ErrConnArgsErr = errs.NewCodeError(ConnArgsErr, "args err, need token, sendID, platformID")
|
||||
ErrPushMsgErr = errs.NewCodeError(PushMsgErr, "push msg err")
|
||||
ErrIOSBackgroundPushErr = errs.NewCodeError(IOSBackgroundPushErr, "ios background push err")
|
||||
|
||||
ErrFileUploadedExpired = errs.NewCodeError(FileUploadedExpiredError, "FileUploadedExpiredError")
|
||||
|
||||
// Red packet errors.
|
||||
ErrRedPacketFinished = errs.NewCodeError(RedPacketFinishedError, "red packet has been finished")
|
||||
ErrRedPacketExpired = errs.NewCodeError(RedPacketExpiredError, "red packet has expired")
|
||||
ErrRedPacketAlreadyReceived = errs.NewCodeError(RedPacketAlreadyReceivedError, "you have already received this red packet")
|
||||
)
|
||||
58
pkg/common/servererrs/relation.go
Normal file
58
pkg/common/servererrs/relation.go
Normal file
@@ -0,0 +1,58 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package servererrs
|
||||
|
||||
import "github.com/openimsdk/tools/errs"
|
||||
|
||||
var Relation = &relation{m: make(map[int]map[int]struct{})}
|
||||
|
||||
func init() {
|
||||
Relation.Add(errs.RecordNotFoundError, UserIDNotFoundError)
|
||||
Relation.Add(errs.RecordNotFoundError, GroupIDNotFoundError)
|
||||
Relation.Add(errs.DuplicateKeyError, GroupIDExisted)
|
||||
}
|
||||
|
||||
type relation struct {
|
||||
m map[int]map[int]struct{}
|
||||
}
|
||||
|
||||
func (r *relation) Add(codes ...int) {
|
||||
if len(codes) < 2 {
|
||||
panic("codes length must be greater than 2")
|
||||
}
|
||||
for i := 1; i < len(codes); i++ {
|
||||
parent := codes[i-1]
|
||||
s, ok := r.m[parent]
|
||||
if !ok {
|
||||
s = make(map[int]struct{})
|
||||
r.m[parent] = s
|
||||
}
|
||||
for _, code := range codes[i:] {
|
||||
s[code] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *relation) Is(parent, child int) bool {
|
||||
if parent == child {
|
||||
return true
|
||||
}
|
||||
s, ok := r.m[parent]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
_, ok = s[child]
|
||||
return ok
|
||||
}
|
||||
107
pkg/common/startrpc/circuitbreaker.go
Normal file
107
pkg/common/startrpc/circuitbreaker.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package startrpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/stability/circuitbreaker"
|
||||
"github.com/openimsdk/tools/stability/circuitbreaker/sre"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type CircuitBreaker struct {
|
||||
Enable bool `yaml:"enable"`
|
||||
Success float64 `yaml:"success"` // success rate threshold (0.0-1.0)
|
||||
Request int64 `yaml:"request"` // request threshold
|
||||
Bucket int `yaml:"bucket"` // number of buckets
|
||||
Window time.Duration `yaml:"window"` // time window for statistics
|
||||
}
|
||||
|
||||
func NewCircuitBreaker(config *CircuitBreaker) circuitbreaker.CircuitBreaker {
|
||||
if !config.Enable {
|
||||
return nil
|
||||
}
|
||||
|
||||
return sre.NewSREBraker(
|
||||
sre.WithWindow(config.Window),
|
||||
sre.WithBucket(config.Bucket),
|
||||
sre.WithSuccess(config.Success),
|
||||
sre.WithRequest(config.Request),
|
||||
)
|
||||
}
|
||||
|
||||
func UnaryCircuitBreakerInterceptor(breaker circuitbreaker.CircuitBreaker) grpc.ServerOption {
|
||||
if breaker == nil {
|
||||
return grpc.ChainUnaryInterceptor(func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
|
||||
return handler(ctx, req)
|
||||
})
|
||||
}
|
||||
|
||||
return grpc.ChainUnaryInterceptor(func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
|
||||
if err := breaker.Allow(); err != nil {
|
||||
log.ZWarn(ctx, "rpc circuit breaker open", err, "method", info.FullMethod)
|
||||
return nil, status.Error(codes.Unavailable, "service unavailable due to circuit breaker")
|
||||
}
|
||||
|
||||
resp, err = handler(ctx, req)
|
||||
|
||||
if err != nil {
|
||||
if st, ok := status.FromError(err); ok {
|
||||
switch st.Code() {
|
||||
case codes.OK:
|
||||
breaker.MarkSuccess()
|
||||
case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.PermissionDenied:
|
||||
breaker.MarkSuccess()
|
||||
default:
|
||||
breaker.MarkFailed()
|
||||
}
|
||||
} else {
|
||||
breaker.MarkFailed()
|
||||
}
|
||||
} else {
|
||||
breaker.MarkSuccess()
|
||||
}
|
||||
|
||||
return resp, err
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
func StreamCircuitBreakerInterceptor(breaker circuitbreaker.CircuitBreaker) grpc.ServerOption {
|
||||
if breaker == nil {
|
||||
return grpc.ChainStreamInterceptor(func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
return handler(srv, ss)
|
||||
})
|
||||
}
|
||||
|
||||
return grpc.ChainStreamInterceptor(func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
if err := breaker.Allow(); err != nil {
|
||||
log.ZWarn(ss.Context(), "rpc circuit breaker open", err, "method", info.FullMethod)
|
||||
return status.Error(codes.Unavailable, "service unavailable due to circuit breaker")
|
||||
}
|
||||
|
||||
err := handler(srv, ss)
|
||||
|
||||
if err != nil {
|
||||
if st, ok := status.FromError(err); ok {
|
||||
switch st.Code() {
|
||||
case codes.OK:
|
||||
breaker.MarkSuccess()
|
||||
case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.PermissionDenied:
|
||||
breaker.MarkSuccess()
|
||||
default:
|
||||
breaker.MarkFailed()
|
||||
}
|
||||
} else {
|
||||
breaker.MarkFailed()
|
||||
}
|
||||
} else {
|
||||
breaker.MarkSuccess()
|
||||
}
|
||||
|
||||
return err
|
||||
})
|
||||
}
|
||||
15
pkg/common/startrpc/mw.go
Normal file
15
pkg/common/startrpc/mw.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package startrpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/authverify"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func grpcServerIMAdminUserID(imAdminUserID []string) grpc.ServerOption {
|
||||
return grpc.ChainUnaryInterceptor(func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
|
||||
ctx = authverify.WithIMAdminUserIDs(ctx, imAdminUserID)
|
||||
return handler(ctx, req)
|
||||
})
|
||||
}
|
||||
70
pkg/common/startrpc/ratelimit.go
Normal file
70
pkg/common/startrpc/ratelimit.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package startrpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/stability/ratelimit"
|
||||
"github.com/openimsdk/tools/stability/ratelimit/bbr"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type RateLimiter struct {
|
||||
Enable bool
|
||||
Window time.Duration
|
||||
Bucket int
|
||||
CPUThreshold int64
|
||||
}
|
||||
|
||||
func NewRateLimiter(config *RateLimiter) ratelimit.Limiter {
|
||||
if !config.Enable {
|
||||
return nil
|
||||
}
|
||||
|
||||
return bbr.NewBBRLimiter(
|
||||
bbr.WithWindow(config.Window),
|
||||
bbr.WithBucket(config.Bucket),
|
||||
bbr.WithCPUThreshold(config.CPUThreshold),
|
||||
)
|
||||
}
|
||||
|
||||
func UnaryRateLimitInterceptor(limiter ratelimit.Limiter) grpc.ServerOption {
|
||||
if limiter == nil {
|
||||
return grpc.ChainUnaryInterceptor(func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
|
||||
return handler(ctx, req)
|
||||
})
|
||||
}
|
||||
|
||||
return grpc.ChainUnaryInterceptor(func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
|
||||
done, err := limiter.Allow()
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "rpc rate limited", err, "method", info.FullMethod)
|
||||
return nil, status.Errorf(codes.ResourceExhausted, "rpc request rate limit exceeded: %v, please try again later", err)
|
||||
}
|
||||
|
||||
defer done(ratelimit.DoneInfo{})
|
||||
return handler(ctx, req)
|
||||
})
|
||||
}
|
||||
|
||||
func StreamRateLimitInterceptor(limiter ratelimit.Limiter) grpc.ServerOption {
|
||||
if limiter == nil {
|
||||
return grpc.ChainStreamInterceptor(func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
return handler(srv, ss)
|
||||
})
|
||||
}
|
||||
|
||||
return grpc.ChainStreamInterceptor(func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
done, err := limiter.Allow()
|
||||
if err != nil {
|
||||
log.ZWarn(ss.Context(), "rpc rate limited", err, "method", info.FullMethod)
|
||||
return status.Errorf(codes.ResourceExhausted, "rpc request rate limit exceeded: %v, please try again later", err)
|
||||
}
|
||||
defer done(ratelimit.DoneInfo{})
|
||||
|
||||
return handler(srv, ss)
|
||||
})
|
||||
}
|
||||
321
pkg/common/startrpc/start.go
Normal file
321
pkg/common/startrpc/start.go
Normal file
@@ -0,0 +1,321 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package startrpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"os/signal"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
conf "git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/openimsdk/tools/utils/jsonutil"
|
||||
"github.com/openimsdk/tools/utils/network"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
kdisc "git.imall.cloud/openim/open-im-server-deploy/pkg/common/discovery"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/prommetrics"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
grpccli "github.com/openimsdk/tools/mw/grpc/client"
|
||||
grpcsrv "github.com/openimsdk/tools/mw/grpc/server"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
func init() {
|
||||
prommetrics.RegistryAll()
|
||||
}
|
||||
|
||||
func Start[T any](ctx context.Context, disc *conf.Discovery, circuitBreakerConfig *conf.CircuitBreaker, rateLimiterConfig *conf.RateLimiter, prometheusConfig *conf.Prometheus, listenIP,
|
||||
registerIP string, autoSetPorts bool, rpcPorts []int, index int, rpcRegisterName string, notification *conf.Notification, config T,
|
||||
watchConfigNames []string, watchServiceNames []string,
|
||||
rpcFn func(ctx context.Context, config T, client discovery.SvcDiscoveryRegistry, server grpc.ServiceRegistrar) error,
|
||||
options ...grpc.ServerOption) error {
|
||||
|
||||
if notification != nil {
|
||||
conf.InitNotification(notification)
|
||||
}
|
||||
|
||||
maxRequestBody := getConfigRpcMaxRequestBody(reflect.ValueOf(config))
|
||||
shareConfig := getConfigShare(reflect.ValueOf(config))
|
||||
|
||||
log.ZDebug(ctx, "rpc start", "rpcMaxRequestBody", maxRequestBody, "rpcRegisterName", rpcRegisterName, "registerIP", registerIP, "listenIP", listenIP)
|
||||
|
||||
options = append(options,
|
||||
grpcsrv.GrpcServerMetadataContext(),
|
||||
grpcsrv.GrpcServerErrorConvert(),
|
||||
grpcsrv.GrpcServerLogger(),
|
||||
grpcsrv.GrpcServerRequestValidate(),
|
||||
grpcsrv.GrpcServerPanicCapture(),
|
||||
)
|
||||
if shareConfig != nil && len(shareConfig.IMAdminUser.UserIDs) > 0 {
|
||||
options = append(options, grpcServerIMAdminUserID(shareConfig.IMAdminUser.UserIDs))
|
||||
}
|
||||
var clientOptions []grpc.DialOption
|
||||
if maxRequestBody != nil {
|
||||
if maxRequestBody.RequestMaxBodySize > 0 {
|
||||
options = append(options, grpc.MaxRecvMsgSize(maxRequestBody.RequestMaxBodySize))
|
||||
clientOptions = append(clientOptions, grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(maxRequestBody.RequestMaxBodySize)))
|
||||
}
|
||||
if maxRequestBody.ResponseMaxBodySize > 0 {
|
||||
options = append(options, grpc.MaxSendMsgSize(maxRequestBody.ResponseMaxBodySize))
|
||||
clientOptions = append(clientOptions, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxRequestBody.ResponseMaxBodySize)))
|
||||
}
|
||||
}
|
||||
|
||||
if circuitBreakerConfig != nil && circuitBreakerConfig.Enable {
|
||||
cb := &CircuitBreaker{
|
||||
Enable: circuitBreakerConfig.Enable,
|
||||
Success: circuitBreakerConfig.Success,
|
||||
Request: circuitBreakerConfig.Request,
|
||||
Bucket: circuitBreakerConfig.Bucket,
|
||||
Window: circuitBreakerConfig.Window,
|
||||
}
|
||||
|
||||
breaker := NewCircuitBreaker(cb)
|
||||
|
||||
options = append(options,
|
||||
UnaryCircuitBreakerInterceptor(breaker),
|
||||
StreamCircuitBreakerInterceptor(breaker),
|
||||
)
|
||||
|
||||
log.ZInfo(ctx, "RPC circuit breaker enabled",
|
||||
"service", rpcRegisterName,
|
||||
"window", circuitBreakerConfig.Window,
|
||||
"bucket", circuitBreakerConfig.Bucket,
|
||||
"success", circuitBreakerConfig.Success,
|
||||
"requestThreshold", circuitBreakerConfig.Request)
|
||||
}
|
||||
|
||||
if rateLimiterConfig != nil && rateLimiterConfig.Enable {
|
||||
rl := &RateLimiter{
|
||||
Enable: rateLimiterConfig.Enable,
|
||||
Window: rateLimiterConfig.Window,
|
||||
Bucket: rateLimiterConfig.Bucket,
|
||||
CPUThreshold: rateLimiterConfig.CPUThreshold,
|
||||
}
|
||||
|
||||
limiter := NewRateLimiter(rl)
|
||||
|
||||
options = append(options,
|
||||
UnaryRateLimitInterceptor(limiter),
|
||||
StreamRateLimitInterceptor(limiter),
|
||||
)
|
||||
|
||||
log.ZInfo(ctx, "RPC rate limiter enabled",
|
||||
"service", rpcRegisterName,
|
||||
"window", rateLimiterConfig.Window,
|
||||
"bucket", rateLimiterConfig.Bucket,
|
||||
"cpuThreshold", rateLimiterConfig.CPUThreshold)
|
||||
}
|
||||
|
||||
registerIP, err := network.GetRpcRegisterIP(registerIP)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var prometheusListenAddr string
|
||||
if autoSetPorts {
|
||||
prometheusListenAddr = net.JoinHostPort(listenIP, "0")
|
||||
} else {
|
||||
prometheusPort, err := datautil.GetElemByIndex(prometheusConfig.Ports, index)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
prometheusListenAddr = net.JoinHostPort(listenIP, strconv.Itoa(prometheusPort))
|
||||
}
|
||||
|
||||
watchConfigNames = append(watchConfigNames, conf.LogConfigFileName)
|
||||
|
||||
client, err := kdisc.NewDiscoveryRegister(disc, watchServiceNames)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer client.Close()
|
||||
client.AddOption(
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithDefaultServiceConfig(fmt.Sprintf(`{"LoadBalancingPolicy": "%s"}`, "round_robin")),
|
||||
|
||||
grpccli.GrpcClientLogger(),
|
||||
grpccli.GrpcClientContext(),
|
||||
grpccli.GrpcClientErrorConvert(),
|
||||
)
|
||||
if len(clientOptions) > 0 {
|
||||
client.AddOption(clientOptions...)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancelCause(ctx)
|
||||
|
||||
go func() {
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, syscall.SIGTERM, syscall.SIGINT, syscall.SIGKILL)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case val := <-sigs:
|
||||
log.ZDebug(ctx, "recv signal", "signal", val.String())
|
||||
cancel(fmt.Errorf("signal %s", val.String()))
|
||||
}
|
||||
}()
|
||||
|
||||
if prometheusListenAddr != "" {
|
||||
options = append(
|
||||
options,
|
||||
prommetricsUnaryInterceptor(rpcRegisterName),
|
||||
prommetricsStreamInterceptor(rpcRegisterName),
|
||||
)
|
||||
prometheusListener, prometheusPort, err := listenTCP(prometheusListenAddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.ZDebug(ctx, "prometheus start", "addr", prometheusListener.Addr(), "rpcRegisterName", rpcRegisterName)
|
||||
target, err := jsonutil.JsonMarshal(prommetrics.BuildDefaultTarget(registerIP, prometheusPort))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if autoSetPorts {
|
||||
if err = client.SetWithLease(ctx, prommetrics.BuildDiscoveryKey(rpcRegisterName, index), target, prommetrics.TTL); err != nil {
|
||||
if !errors.Is(err, discovery.ErrNotSupported) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
go func() {
|
||||
err := prommetrics.Start(prometheusListener)
|
||||
if err == nil {
|
||||
err = fmt.Errorf("listener done")
|
||||
}
|
||||
cancel(fmt.Errorf("prommetrics %s %w", rpcRegisterName, err))
|
||||
}()
|
||||
}
|
||||
|
||||
var (
|
||||
rpcServer *grpc.Server
|
||||
rpcGracefulStop chan struct{}
|
||||
)
|
||||
|
||||
onGrpcServiceRegistrar := func(desc *grpc.ServiceDesc, impl any) {
|
||||
if rpcServer != nil {
|
||||
rpcServer.RegisterService(desc, impl)
|
||||
return
|
||||
}
|
||||
var rpcListenAddr string
|
||||
if autoSetPorts {
|
||||
rpcListenAddr = net.JoinHostPort(listenIP, "0")
|
||||
} else {
|
||||
rpcPort, err := datautil.GetElemByIndex(rpcPorts, index)
|
||||
if err != nil {
|
||||
cancel(fmt.Errorf("rpcPorts index out of range %s %w", rpcRegisterName, err))
|
||||
return
|
||||
}
|
||||
rpcListenAddr = net.JoinHostPort(listenIP, strconv.Itoa(rpcPort))
|
||||
}
|
||||
rpcListener, err := net.Listen("tcp", rpcListenAddr)
|
||||
if err != nil {
|
||||
cancel(fmt.Errorf("listen rpc %s %s %w", rpcRegisterName, rpcListenAddr, err))
|
||||
return
|
||||
}
|
||||
|
||||
rpcServer = grpc.NewServer(options...)
|
||||
rpcServer.RegisterService(desc, impl)
|
||||
rpcGracefulStop = make(chan struct{})
|
||||
rpcPort := rpcListener.Addr().(*net.TCPAddr).Port
|
||||
log.ZDebug(ctx, "rpc start register", "rpcRegisterName", rpcRegisterName, "registerIP", registerIP, "rpcPort", rpcPort)
|
||||
grpcOpt := grpc.WithTransportCredentials(insecure.NewCredentials())
|
||||
rpcGracefulStop = make(chan struct{})
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
rpcServer.GracefulStop()
|
||||
close(rpcGracefulStop)
|
||||
}()
|
||||
if err := client.Register(ctx, rpcRegisterName, registerIP, rpcListener.Addr().(*net.TCPAddr).Port, grpcOpt); err != nil {
|
||||
cancel(fmt.Errorf("rpc register %s %w", rpcRegisterName, err))
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
err := rpcServer.Serve(rpcListener)
|
||||
if err == nil {
|
||||
err = fmt.Errorf("serve end")
|
||||
}
|
||||
cancel(fmt.Errorf("rpc %s %w", rpcRegisterName, err))
|
||||
}()
|
||||
}
|
||||
|
||||
err = rpcFn(ctx, config, client, &grpcServiceRegistrar{onRegisterService: onGrpcServiceRegistrar})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
<-ctx.Done()
|
||||
log.ZDebug(ctx, "cmd wait done", "err", context.Cause(ctx))
|
||||
if rpcGracefulStop != nil {
|
||||
timeout := time.NewTimer(time.Second * 15)
|
||||
defer timeout.Stop()
|
||||
select {
|
||||
case <-timeout.C:
|
||||
log.ZWarn(ctx, "rcp graceful stop timeout", nil)
|
||||
case <-rpcGracefulStop:
|
||||
log.ZDebug(ctx, "rcp graceful stop done")
|
||||
}
|
||||
}
|
||||
return context.Cause(ctx)
|
||||
}
|
||||
|
||||
func listenTCP(addr string) (net.Listener, int, error) {
|
||||
listener, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
return nil, 0, errs.WrapMsg(err, "listen err", "addr", addr)
|
||||
}
|
||||
return listener, listener.Addr().(*net.TCPAddr).Port, nil
|
||||
}
|
||||
|
||||
func prommetricsUnaryInterceptor(rpcRegisterName string) grpc.ServerOption {
|
||||
getCode := func(err error) int {
|
||||
if err == nil {
|
||||
return 0
|
||||
}
|
||||
rpcErr, ok := err.(interface{ GRPCStatus() *status.Status })
|
||||
if !ok {
|
||||
return -1
|
||||
}
|
||||
return int(rpcErr.GRPCStatus().Code())
|
||||
}
|
||||
return grpc.ChainUnaryInterceptor(func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
|
||||
resp, err := handler(ctx, req)
|
||||
prommetrics.RPCCall(rpcRegisterName, info.FullMethod, getCode(err))
|
||||
return resp, err
|
||||
})
|
||||
}
|
||||
|
||||
func prommetricsStreamInterceptor(rpcRegisterName string) grpc.ServerOption {
|
||||
return grpc.ChainStreamInterceptor()
|
||||
}
|
||||
|
||||
type grpcServiceRegistrar struct {
|
||||
onRegisterService func(desc *grpc.ServiceDesc, impl any)
|
||||
}
|
||||
|
||||
func (x *grpcServiceRegistrar) RegisterService(desc *grpc.ServiceDesc, impl any) {
|
||||
x.onRegisterService(desc, impl)
|
||||
}
|
||||
47
pkg/common/startrpc/tools.go
Normal file
47
pkg/common/startrpc/tools.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package startrpc
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
conf "git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
)
|
||||
|
||||
func getConfig[T any](value reflect.Value) *T {
|
||||
for value.Kind() == reflect.Pointer {
|
||||
value = value.Elem()
|
||||
}
|
||||
if value.Kind() == reflect.Struct {
|
||||
num := value.NumField()
|
||||
for i := 0; i < num; i++ {
|
||||
field := value.Field(i)
|
||||
for field.Kind() == reflect.Pointer {
|
||||
field = field.Elem()
|
||||
}
|
||||
if field.Kind() == reflect.Struct {
|
||||
if elem, ok := field.Interface().(T); ok {
|
||||
return &elem
|
||||
}
|
||||
if elem := getConfig[T](field); elem != nil {
|
||||
return elem
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getConfigRpcMaxRequestBody(value reflect.Value) *conf.MaxRequestBody {
|
||||
return getConfig[conf.MaxRequestBody](value)
|
||||
}
|
||||
|
||||
func getConfigShare(value reflect.Value) *conf.Share {
|
||||
return getConfig[conf.Share](value)
|
||||
}
|
||||
|
||||
func getConfigRateLimiter(value reflect.Value) *conf.RateLimiter {
|
||||
return getConfig[conf.RateLimiter](value)
|
||||
}
|
||||
|
||||
func getConfigCircuitBreaker(value reflect.Value) *conf.CircuitBreaker {
|
||||
return getConfig[conf.CircuitBreaker](value)
|
||||
}
|
||||
17
pkg/common/storage/cache/batch_handler.go
vendored
Normal file
17
pkg/common/storage/cache/batch_handler.go
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// BatchDeleter interface defines a set of methods for batch deleting cache and publishing deletion information.
|
||||
type BatchDeleter interface {
|
||||
//ChainExecDel method is used for chain calls and must call Clone to prevent memory pollution.
|
||||
ChainExecDel(ctx context.Context) error
|
||||
//ExecDelWithKeys method directly takes keys for deletion.
|
||||
ExecDelWithKeys(ctx context.Context, keys []string) error
|
||||
//Clone method creates a copy of the BatchDeleter to avoid modifying the original object.
|
||||
Clone() BatchDeleter
|
||||
//AddKeys method adds keys to be deleted.
|
||||
AddKeys(keys ...string)
|
||||
}
|
||||
27
pkg/common/storage/cache/black.go
vendored
Normal file
27
pkg/common/storage/cache/black.go
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
type BlackCache interface {
|
||||
BatchDeleter
|
||||
CloneBlackCache() BlackCache
|
||||
GetBlackIDs(ctx context.Context, userID string) (blackIDs []string, err error)
|
||||
// del user's blackIDs msgCache, exec when a user's black list changed
|
||||
DelBlackIDs(ctx context.Context, userID string) BlackCache
|
||||
}
|
||||
8
pkg/common/storage/cache/client_config.go
vendored
Normal file
8
pkg/common/storage/cache/client_config.go
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
package cache
|
||||
|
||||
import "context"
|
||||
|
||||
type ClientConfigCache interface {
|
||||
DeleteUserCache(ctx context.Context, userIDs []string) error
|
||||
GetUserConfig(ctx context.Context, userID string) (map[string]string, error)
|
||||
}
|
||||
65
pkg/common/storage/cache/conversation.go
vendored
Normal file
65
pkg/common/storage/cache/conversation.go
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
relationtb "git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
)
|
||||
|
||||
// arg fn will exec when no data in msgCache.
|
||||
type ConversationCache interface {
|
||||
BatchDeleter
|
||||
CloneConversationCache() ConversationCache
|
||||
// get user's conversationIDs from msgCache
|
||||
GetUserConversationIDs(ctx context.Context, ownerUserID string) ([]string, error)
|
||||
GetUserNotNotifyConversationIDs(ctx context.Context, userID string) ([]string, error)
|
||||
GetPinnedConversationIDs(ctx context.Context, userID string) ([]string, error)
|
||||
DelConversationIDs(userIDs ...string) ConversationCache
|
||||
|
||||
GetUserConversationIDsHash(ctx context.Context, ownerUserID string) (hash uint64, err error)
|
||||
DelUserConversationIDsHash(ownerUserIDs ...string) ConversationCache
|
||||
|
||||
// get one conversation from msgCache
|
||||
GetConversation(ctx context.Context, ownerUserID, conversationID string) (*relationtb.Conversation, error)
|
||||
DelConversations(ownerUserID string, conversationIDs ...string) ConversationCache
|
||||
DelUsersConversation(conversationID string, ownerUserIDs ...string) ConversationCache
|
||||
// get one conversation from msgCache
|
||||
GetConversations(ctx context.Context, ownerUserID string,
|
||||
conversationIDs []string) ([]*relationtb.Conversation, error)
|
||||
// get one user's all conversations from msgCache
|
||||
GetUserAllConversations(ctx context.Context, ownerUserID string) ([]*relationtb.Conversation, error)
|
||||
// get user conversation recv msg from msgCache
|
||||
GetUserRecvMsgOpt(ctx context.Context, ownerUserID, conversationID string) (opt int, err error)
|
||||
DelUserRecvMsgOpt(ownerUserID, conversationID string) ConversationCache
|
||||
// get one super group recv msg but do not notification userID list
|
||||
// GetSuperGroupRecvMsgNotNotifyUserIDs(ctx context.Context, groupID string) (userIDs []string, err error)
|
||||
DelSuperGroupRecvMsgNotNotifyUserIDs(groupID string) ConversationCache
|
||||
// get one super group recv msg but do not notification userID list hash
|
||||
// GetSuperGroupRecvMsgNotNotifyUserIDsHash(ctx context.Context, groupID string) (hash uint64, err error)
|
||||
DelSuperGroupRecvMsgNotNotifyUserIDsHash(groupID string) ConversationCache
|
||||
|
||||
// GetUserAllHasReadSeqs(ctx context.Context, ownerUserID string) (map[string]int64, error)
|
||||
DelUserAllHasReadSeqs(ownerUserID string, conversationIDs ...string) ConversationCache
|
||||
|
||||
GetConversationNotReceiveMessageUserIDs(ctx context.Context, conversationID string) ([]string, error)
|
||||
DelConversationNotReceiveMessageUserIDs(conversationIDs ...string) ConversationCache
|
||||
DelConversationNotNotifyMessageUserIDs(userIDs ...string) ConversationCache
|
||||
DelUserPinnedConversations(userIDs ...string) ConversationCache
|
||||
DelConversationVersionUserIDs(userIDs ...string) ConversationCache
|
||||
|
||||
FindMaxConversationUserVersion(ctx context.Context, userID string) (*relationtb.VersionLog, error)
|
||||
}
|
||||
15
pkg/common/storage/cache/doc.go
vendored
Normal file
15
pkg/common/storage/cache/doc.go
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright © 2024 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache // import "git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
48
pkg/common/storage/cache/friend.go
vendored
Normal file
48
pkg/common/storage/cache/friend.go
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
relationtb "git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
)
|
||||
|
||||
// FriendCache is an interface for caching friend-related data.
|
||||
type FriendCache interface {
|
||||
BatchDeleter
|
||||
CloneFriendCache() FriendCache
|
||||
GetFriendIDs(ctx context.Context, ownerUserID string) (friendIDs []string, err error)
|
||||
// Called when friendID list changed
|
||||
DelFriendIDs(ownerUserID ...string) FriendCache
|
||||
// Get single friendInfo from the cache
|
||||
GetFriend(ctx context.Context, ownerUserID, friendUserID string) (friend *relationtb.Friend, err error)
|
||||
// Delete friend when friend info changed
|
||||
DelFriend(ownerUserID, friendUserID string) FriendCache
|
||||
// Delete friends when friends' info changed
|
||||
DelFriends(ownerUserID string, friendUserIDs []string) FriendCache
|
||||
|
||||
DelOwner(friendUserID string, ownerUserIDs []string) FriendCache
|
||||
|
||||
DelMaxFriendVersion(ownerUserIDs ...string) FriendCache
|
||||
|
||||
//DelSortFriendUserIDs(ownerUserIDs ...string) FriendCache
|
||||
|
||||
//FindSortFriendUserIDs(ctx context.Context, ownerUserID string) ([]string, error)
|
||||
|
||||
//FindFriendIncrVersion(ctx context.Context, ownerUserID string, version uint, limit int) (*relationtb.VersionLog, error)
|
||||
|
||||
FindMaxFriendVersion(ctx context.Context, ownerUserID string) (*relationtb.VersionLog, error)
|
||||
}
|
||||
70
pkg/common/storage/cache/group.go
vendored
Normal file
70
pkg/common/storage/cache/group.go
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/common"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
)
|
||||
|
||||
type GroupHash interface {
|
||||
GetGroupHash(ctx context.Context, groupID string) (uint64, error)
|
||||
}
|
||||
|
||||
type GroupCache interface {
|
||||
BatchDeleter
|
||||
CloneGroupCache() GroupCache
|
||||
GetGroupsInfo(ctx context.Context, groupIDs []string) (groups []*model.Group, err error)
|
||||
GetGroupInfo(ctx context.Context, groupID string) (group *model.Group, err error)
|
||||
DelGroupsInfo(groupIDs ...string) GroupCache
|
||||
|
||||
GetGroupMembersHash(ctx context.Context, groupID string) (hashCode uint64, err error)
|
||||
GetGroupMemberHashMap(ctx context.Context, groupIDs []string) (map[string]*common.GroupSimpleUserID, error)
|
||||
DelGroupMembersHash(groupID string) GroupCache
|
||||
|
||||
GetGroupMemberIDs(ctx context.Context, groupID string) (groupMemberIDs []string, err error)
|
||||
|
||||
DelGroupMemberIDs(groupID string) GroupCache
|
||||
|
||||
GetJoinedGroupIDs(ctx context.Context, userID string) (joinedGroupIDs []string, err error)
|
||||
DelJoinedGroupID(userID ...string) GroupCache
|
||||
|
||||
GetGroupMemberInfo(ctx context.Context, groupID, userID string) (groupMember *model.GroupMember, err error)
|
||||
GetGroupMembersInfo(ctx context.Context, groupID string, userID []string) (groupMembers []*model.GroupMember, err error)
|
||||
GetAllGroupMembersInfo(ctx context.Context, groupID string) (groupMembers []*model.GroupMember, err error)
|
||||
FindGroupMemberUser(ctx context.Context, groupIDs []string, userID string) ([]*model.GroupMember, error)
|
||||
|
||||
GetGroupRoleLevelMemberIDs(ctx context.Context, groupID string, roleLevel int32) ([]string, error)
|
||||
GetGroupOwner(ctx context.Context, groupID string) (*model.GroupMember, error)
|
||||
GetGroupsOwner(ctx context.Context, groupIDs []string) ([]*model.GroupMember, error)
|
||||
DelGroupRoleLevel(groupID string, roleLevel []int32) GroupCache
|
||||
DelGroupAllRoleLevel(groupID string) GroupCache
|
||||
DelGroupMembersInfo(groupID string, userID ...string) GroupCache
|
||||
GetGroupRoleLevelMemberInfo(ctx context.Context, groupID string, roleLevel int32) ([]*model.GroupMember, error)
|
||||
GetGroupRolesLevelMemberInfo(ctx context.Context, groupID string, roleLevels []int32) ([]*model.GroupMember, error)
|
||||
GetGroupMemberNum(ctx context.Context, groupID string) (memberNum int64, err error)
|
||||
DelGroupsMemberNum(groupID ...string) GroupCache
|
||||
|
||||
//FindSortGroupMemberUserIDs(ctx context.Context, groupID string) ([]string, error)
|
||||
//FindSortJoinGroupIDs(ctx context.Context, userID string) ([]string, error)
|
||||
|
||||
DelMaxGroupMemberVersion(groupIDs ...string) GroupCache
|
||||
DelMaxJoinGroupVersion(userIDs ...string) GroupCache
|
||||
FindMaxGroupMemberVersion(ctx context.Context, groupID string) (*model.VersionLog, error)
|
||||
BatchFindMaxGroupMemberVersion(ctx context.Context, groupIDs []string) ([]*model.VersionLog, error)
|
||||
FindMaxJoinGroupVersion(ctx context.Context, userID string) (*model.VersionLog, error)
|
||||
}
|
||||
50
pkg/common/storage/cache/mcache/minio.go
vendored
Normal file
50
pkg/common/storage/cache/mcache/minio.go
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
package mcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"github.com/openimsdk/tools/s3/minio"
|
||||
)
|
||||
|
||||
func NewMinioCache(cache database.Cache) minio.Cache {
|
||||
return &minioCache{
|
||||
cache: cache,
|
||||
expireTime: time.Hour * 24 * 7,
|
||||
}
|
||||
}
|
||||
|
||||
type minioCache struct {
|
||||
cache database.Cache
|
||||
expireTime time.Duration
|
||||
}
|
||||
|
||||
func (g *minioCache) getObjectImageInfoKey(key string) string {
|
||||
return cachekey.GetObjectImageInfoKey(key)
|
||||
}
|
||||
|
||||
func (g *minioCache) getMinioImageThumbnailKey(key string, format string, width int, height int) string {
|
||||
return cachekey.GetMinioImageThumbnailKey(key, format, width, height)
|
||||
}
|
||||
|
||||
func (g *minioCache) DelObjectImageInfoKey(ctx context.Context, keys ...string) error {
|
||||
ks := make([]string, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
ks = append(ks, g.getObjectImageInfoKey(key))
|
||||
}
|
||||
return g.cache.Del(ctx, ks)
|
||||
}
|
||||
|
||||
func (g *minioCache) DelImageThumbnailKey(ctx context.Context, key string, format string, width int, height int) error {
|
||||
return g.cache.Del(ctx, []string{g.getMinioImageThumbnailKey(key, format, width, height)})
|
||||
}
|
||||
|
||||
func (g *minioCache) GetImageObjectKeyInfo(ctx context.Context, key string, fn func(ctx context.Context) (*minio.ImageInfo, error)) (*minio.ImageInfo, error) {
|
||||
return getCache[*minio.ImageInfo](ctx, g.cache, g.getObjectImageInfoKey(key), g.expireTime, fn)
|
||||
}
|
||||
|
||||
func (g *minioCache) GetThumbnailKey(ctx context.Context, key string, format string, width int, height int, minioCache func(ctx context.Context) (string, error)) (string, error) {
|
||||
return getCache[string](ctx, g.cache, g.getMinioImageThumbnailKey(key, format, width, height), g.expireTime, minioCache)
|
||||
}
|
||||
132
pkg/common/storage/cache/mcache/msg_cache.go
vendored
Normal file
132
pkg/common/storage/cache/mcache/msg_cache.go
vendored
Normal file
@@ -0,0 +1,132 @@
|
||||
package mcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/localcache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/localcache/lru"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
var (
|
||||
memMsgCache lru.LRU[string, *model.MsgInfoModel]
|
||||
initMemMsgCache sync.Once
|
||||
)
|
||||
|
||||
func NewMsgCache(cache database.Cache, msgDocDatabase database.Msg) cache.MsgCache {
|
||||
initMemMsgCache.Do(func() {
|
||||
memMsgCache = lru.NewLazyLRU[string, *model.MsgInfoModel](1024*8, time.Hour, time.Second*10, localcache.EmptyTarget{}, nil)
|
||||
})
|
||||
return &msgCache{
|
||||
cache: cache,
|
||||
msgDocDatabase: msgDocDatabase,
|
||||
memMsgCache: memMsgCache,
|
||||
}
|
||||
}
|
||||
|
||||
type msgCache struct {
|
||||
cache database.Cache
|
||||
msgDocDatabase database.Msg
|
||||
memMsgCache lru.LRU[string, *model.MsgInfoModel]
|
||||
}
|
||||
|
||||
func (x *msgCache) getSendMsgKey(id string) string {
|
||||
return cachekey.GetSendMsgKey(id)
|
||||
}
|
||||
|
||||
func (x *msgCache) SetSendMsgStatus(ctx context.Context, id string, status int32) error {
|
||||
return x.cache.Set(ctx, x.getSendMsgKey(id), strconv.Itoa(int(status)), time.Hour*24)
|
||||
}
|
||||
|
||||
func (x *msgCache) GetSendMsgStatus(ctx context.Context, id string) (int32, error) {
|
||||
key := x.getSendMsgKey(id)
|
||||
res, err := x.cache.Get(ctx, []string{key})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
val, ok := res[key]
|
||||
if !ok {
|
||||
return 0, errs.Wrap(redis.Nil)
|
||||
}
|
||||
status, err := strconv.Atoi(val)
|
||||
if err != nil {
|
||||
return 0, errs.WrapMsg(err, "GetSendMsgStatus strconv.Atoi error", "val", val)
|
||||
}
|
||||
return int32(status), nil
|
||||
}
|
||||
|
||||
func (x *msgCache) getMsgCacheKey(conversationID string, seq int64) string {
|
||||
return cachekey.GetMsgCacheKey(conversationID, seq)
|
||||
|
||||
}
|
||||
|
||||
func (x *msgCache) GetMessageBySeqs(ctx context.Context, conversationID string, seqs []int64) ([]*model.MsgInfoModel, error) {
|
||||
if len(seqs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
keys := make([]string, 0, len(seqs))
|
||||
keySeq := make(map[string]int64, len(seqs))
|
||||
for _, seq := range seqs {
|
||||
key := x.getMsgCacheKey(conversationID, seq)
|
||||
keys = append(keys, key)
|
||||
keySeq[key] = seq
|
||||
}
|
||||
res, err := x.memMsgCache.GetBatch(keys, func(keys []string) (map[string]*model.MsgInfoModel, error) {
|
||||
findSeqs := make([]int64, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
seq, ok := keySeq[key]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
findSeqs = append(findSeqs, seq)
|
||||
}
|
||||
res, err := x.msgDocDatabase.FindSeqs(ctx, conversationID, seqs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kv := make(map[string]*model.MsgInfoModel)
|
||||
for i := range res {
|
||||
msg := res[i]
|
||||
if msg == nil || msg.Msg == nil || msg.Msg.Seq <= 0 {
|
||||
continue
|
||||
}
|
||||
key := x.getMsgCacheKey(conversationID, msg.Msg.Seq)
|
||||
kv[key] = msg
|
||||
}
|
||||
return kv, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return datautil.Values(res), nil
|
||||
}
|
||||
|
||||
func (x msgCache) DelMessageBySeqs(ctx context.Context, conversationID string, seqs []int64) error {
|
||||
if len(seqs) == 0 {
|
||||
return nil
|
||||
}
|
||||
for _, seq := range seqs {
|
||||
x.memMsgCache.Del(x.getMsgCacheKey(conversationID, seq))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *msgCache) SetMessageBySeqs(ctx context.Context, conversationID string, msgs []*model.MsgInfoModel) error {
|
||||
for i := range msgs {
|
||||
msg := msgs[i]
|
||||
if msg == nil || msg.Msg == nil || msg.Msg.Seq <= 0 {
|
||||
continue
|
||||
}
|
||||
x.memMsgCache.Set(x.getMsgCacheKey(conversationID, msg.Msg.Seq), msg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
82
pkg/common/storage/cache/mcache/online.go
vendored
Normal file
82
pkg/common/storage/cache/mcache/online.go
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
package mcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
globalOnlineCache cache.OnlineCache
|
||||
globalOnlineOnce sync.Once
|
||||
)
|
||||
|
||||
func NewOnlineCache() cache.OnlineCache {
|
||||
globalOnlineOnce.Do(func() {
|
||||
globalOnlineCache = &onlineCache{
|
||||
user: make(map[string]map[int32]struct{}),
|
||||
}
|
||||
})
|
||||
return globalOnlineCache
|
||||
}
|
||||
|
||||
type onlineCache struct {
|
||||
lock sync.RWMutex
|
||||
user map[string]map[int32]struct{}
|
||||
}
|
||||
|
||||
func (x *onlineCache) GetOnline(ctx context.Context, userID string) ([]int32, error) {
|
||||
x.lock.RLock()
|
||||
defer x.lock.RUnlock()
|
||||
pSet, ok := x.user[userID]
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
res := make([]int32, 0, len(pSet))
|
||||
for k := range pSet {
|
||||
res = append(res, k)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (x *onlineCache) SetUserOnline(ctx context.Context, userID string, online, offline []int32) error {
|
||||
x.lock.Lock()
|
||||
defer x.lock.Unlock()
|
||||
pSet, ok := x.user[userID]
|
||||
if ok {
|
||||
for _, p := range offline {
|
||||
delete(pSet, p)
|
||||
}
|
||||
}
|
||||
if len(online) > 0 {
|
||||
if !ok {
|
||||
pSet = make(map[int32]struct{})
|
||||
x.user[userID] = pSet
|
||||
}
|
||||
for _, p := range online {
|
||||
pSet[p] = struct{}{}
|
||||
}
|
||||
}
|
||||
if len(pSet) == 0 {
|
||||
delete(x.user, userID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *onlineCache) GetAllOnlineUsers(ctx context.Context, cursor uint64) (map[string][]int32, uint64, error) {
|
||||
if cursor != 0 {
|
||||
return nil, 0, nil
|
||||
}
|
||||
x.lock.RLock()
|
||||
defer x.lock.RUnlock()
|
||||
res := make(map[string][]int32)
|
||||
for k, v := range x.user {
|
||||
pSet := make([]int32, 0, len(v))
|
||||
for p := range v {
|
||||
pSet = append(pSet, p)
|
||||
}
|
||||
res[k] = pSet
|
||||
}
|
||||
return res, 0, nil
|
||||
}
|
||||
79
pkg/common/storage/cache/mcache/seq_conversation.go
vendored
Normal file
79
pkg/common/storage/cache/mcache/seq_conversation.go
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
package mcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
)
|
||||
|
||||
func NewSeqConversationCache(sc database.SeqConversation) cache.SeqConversationCache {
|
||||
return &seqConversationCache{
|
||||
sc: sc,
|
||||
}
|
||||
}
|
||||
|
||||
type seqConversationCache struct {
|
||||
sc database.SeqConversation
|
||||
}
|
||||
|
||||
func (x *seqConversationCache) Malloc(ctx context.Context, conversationID string, size int64) (int64, error) {
|
||||
return x.sc.Malloc(ctx, conversationID, size)
|
||||
}
|
||||
|
||||
func (x *seqConversationCache) SetMinSeq(ctx context.Context, conversationID string, seq int64) error {
|
||||
return x.sc.SetMinSeq(ctx, conversationID, seq)
|
||||
}
|
||||
|
||||
func (x *seqConversationCache) GetMinSeq(ctx context.Context, conversationID string) (int64, error) {
|
||||
return x.sc.GetMinSeq(ctx, conversationID)
|
||||
}
|
||||
|
||||
func (x *seqConversationCache) GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error) {
|
||||
res := make(map[string]int64)
|
||||
for _, conversationID := range conversationIDs {
|
||||
seq, err := x.GetMinSeq(ctx, conversationID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res[conversationID] = seq
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (x *seqConversationCache) GetMaxSeqsWithTime(ctx context.Context, conversationIDs []string) (map[string]database.SeqTime, error) {
|
||||
res := make(map[string]database.SeqTime)
|
||||
for _, conversationID := range conversationIDs {
|
||||
seq, err := x.GetMinSeq(ctx, conversationID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res[conversationID] = database.SeqTime{Seq: seq}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (x *seqConversationCache) GetMaxSeq(ctx context.Context, conversationID string) (int64, error) {
|
||||
return x.sc.GetMaxSeq(ctx, conversationID)
|
||||
}
|
||||
|
||||
func (x *seqConversationCache) GetMaxSeqWithTime(ctx context.Context, conversationID string) (database.SeqTime, error) {
|
||||
seq, err := x.GetMinSeq(ctx, conversationID)
|
||||
if err != nil {
|
||||
return database.SeqTime{}, err
|
||||
}
|
||||
return database.SeqTime{Seq: seq}, nil
|
||||
}
|
||||
|
||||
func (x *seqConversationCache) SetMinSeqs(ctx context.Context, seqs map[string]int64) error {
|
||||
for conversationID, seq := range seqs {
|
||||
if err := x.sc.SetMinSeq(ctx, conversationID, seq); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *seqConversationCache) GetCacheMaxSeqWithTime(ctx context.Context, conversationIDs []string) (map[string]database.SeqTime, error) {
|
||||
return x.GetMaxSeqsWithTime(ctx, conversationIDs)
|
||||
}
|
||||
98
pkg/common/storage/cache/mcache/third.go
vendored
Normal file
98
pkg/common/storage/cache/mcache/third.go
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
package mcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
func NewThirdCache(cache database.Cache) cache.ThirdCache {
|
||||
return &thirdCache{
|
||||
cache: cache,
|
||||
}
|
||||
}
|
||||
|
||||
type thirdCache struct {
|
||||
cache database.Cache
|
||||
}
|
||||
|
||||
func (c *thirdCache) getGetuiTokenKey() string {
|
||||
return cachekey.GetGetuiTokenKey()
|
||||
}
|
||||
|
||||
func (c *thirdCache) getGetuiTaskIDKey() string {
|
||||
return cachekey.GetGetuiTaskIDKey()
|
||||
}
|
||||
|
||||
func (c *thirdCache) getUserBadgeUnreadCountSumKey(userID string) string {
|
||||
return cachekey.GetUserBadgeUnreadCountSumKey(userID)
|
||||
}
|
||||
|
||||
func (c *thirdCache) getFcmAccountTokenKey(account string, platformID int) string {
|
||||
return cachekey.GetFcmAccountTokenKey(account, platformID)
|
||||
}
|
||||
|
||||
func (c *thirdCache) get(ctx context.Context, key string) (string, error) {
|
||||
res, err := c.cache.Get(ctx, []string{key})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if val, ok := res[key]; ok {
|
||||
return val, nil
|
||||
}
|
||||
return "", errs.Wrap(redis.Nil)
|
||||
}
|
||||
|
||||
func (c *thirdCache) SetFcmToken(ctx context.Context, account string, platformID int, fcmToken string, expireTime int64) (err error) {
|
||||
return errs.Wrap(c.cache.Set(ctx, c.getFcmAccountTokenKey(account, platformID), fcmToken, time.Duration(expireTime)*time.Second))
|
||||
}
|
||||
|
||||
func (c *thirdCache) GetFcmToken(ctx context.Context, account string, platformID int) (string, error) {
|
||||
return c.get(ctx, c.getFcmAccountTokenKey(account, platformID))
|
||||
}
|
||||
|
||||
func (c *thirdCache) DelFcmToken(ctx context.Context, account string, platformID int) error {
|
||||
return c.cache.Del(ctx, []string{c.getFcmAccountTokenKey(account, platformID)})
|
||||
}
|
||||
|
||||
func (c *thirdCache) IncrUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error) {
|
||||
return c.cache.Incr(ctx, c.getUserBadgeUnreadCountSumKey(userID), 1)
|
||||
}
|
||||
|
||||
func (c *thirdCache) SetUserBadgeUnreadCountSum(ctx context.Context, userID string, value int) error {
|
||||
return c.cache.Set(ctx, c.getUserBadgeUnreadCountSumKey(userID), strconv.Itoa(value), 0)
|
||||
}
|
||||
|
||||
func (c *thirdCache) GetUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error) {
|
||||
str, err := c.get(ctx, c.getUserBadgeUnreadCountSumKey(userID))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
val, err := strconv.Atoi(str)
|
||||
if err != nil {
|
||||
return 0, errs.WrapMsg(err, "strconv.Atoi", "str", str)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (c *thirdCache) SetGetuiToken(ctx context.Context, token string, expireTime int64) error {
|
||||
return c.cache.Set(ctx, c.getGetuiTokenKey(), token, time.Duration(expireTime)*time.Second)
|
||||
}
|
||||
|
||||
func (c *thirdCache) GetGetuiToken(ctx context.Context) (string, error) {
|
||||
return c.get(ctx, c.getGetuiTokenKey())
|
||||
}
|
||||
|
||||
func (c *thirdCache) SetGetuiTaskID(ctx context.Context, taskID string, expireTime int64) error {
|
||||
return c.cache.Set(ctx, c.getGetuiTaskIDKey(), taskID, time.Duration(expireTime)*time.Second)
|
||||
}
|
||||
|
||||
func (c *thirdCache) GetGetuiTaskID(ctx context.Context) (string, error) {
|
||||
return c.get(ctx, c.getGetuiTaskIDKey())
|
||||
}
|
||||
166
pkg/common/storage/cache/mcache/token.go
vendored
Normal file
166
pkg/common/storage/cache/mcache/token.go
vendored
Normal file
@@ -0,0 +1,166 @@
|
||||
package mcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
)
|
||||
|
||||
func NewTokenCacheModel(cache database.Cache, accessExpire int64) cache.TokenModel {
|
||||
c := &tokenCache{cache: cache}
|
||||
c.accessExpire = c.getExpireTime(accessExpire)
|
||||
return c
|
||||
}
|
||||
|
||||
type tokenCache struct {
|
||||
cache database.Cache
|
||||
accessExpire time.Duration
|
||||
}
|
||||
|
||||
func (x *tokenCache) getTokenKey(userID string, platformID int, token string) string {
|
||||
return cachekey.GetTokenKey(userID, platformID) + ":" + token
|
||||
}
|
||||
|
||||
func (x *tokenCache) SetTokenFlag(ctx context.Context, userID string, platformID int, token string, flag int) error {
|
||||
return x.cache.Set(ctx, x.getTokenKey(userID, platformID, token), strconv.Itoa(flag), x.accessExpire)
|
||||
}
|
||||
|
||||
// SetTokenFlagEx set token and flag with expire time
|
||||
func (x *tokenCache) SetTokenFlagEx(ctx context.Context, userID string, platformID int, token string, flag int) error {
|
||||
return x.SetTokenFlag(ctx, userID, platformID, token, flag)
|
||||
}
|
||||
|
||||
func (x *tokenCache) GetTokensWithoutError(ctx context.Context, userID string, platformID int) (map[string]int, error) {
|
||||
prefix := x.getTokenKey(userID, platformID, "")
|
||||
m, err := x.cache.Prefix(ctx, prefix)
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
mm := make(map[string]int)
|
||||
for k, v := range m {
|
||||
state, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "token value is not int", err, "value", v, "userID", userID, "platformID", platformID)
|
||||
continue
|
||||
}
|
||||
mm[strings.TrimPrefix(k, prefix)] = state
|
||||
}
|
||||
return mm, nil
|
||||
}
|
||||
|
||||
func (x *tokenCache) HasTemporaryToken(ctx context.Context, userID string, platformID int, token string) error {
|
||||
key := cachekey.GetTemporaryTokenKey(userID, platformID, token)
|
||||
if _, err := x.cache.Get(ctx, []string{key}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *tokenCache) GetAllTokensWithoutError(ctx context.Context, userID string) (map[int]map[string]int, error) {
|
||||
prefix := cachekey.UidPidToken + userID + ":"
|
||||
tokens, err := x.cache.Prefix(ctx, prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res := make(map[int]map[string]int)
|
||||
for key, flagStr := range tokens {
|
||||
flag, err := strconv.Atoi(flagStr)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "token value is not int", err, "key", key, "value", flagStr, "userID", userID)
|
||||
continue
|
||||
}
|
||||
arr := strings.SplitN(strings.TrimPrefix(key, prefix), ":", 2)
|
||||
if len(arr) != 2 {
|
||||
log.ZError(ctx, "token value is not int", err, "key", key, "value", flagStr, "userID", userID)
|
||||
continue
|
||||
}
|
||||
platformID, err := strconv.Atoi(arr[0])
|
||||
if err != nil {
|
||||
log.ZError(ctx, "token value is not int", err, "key", key, "value", flagStr, "userID", userID)
|
||||
continue
|
||||
}
|
||||
token := arr[1]
|
||||
if token == "" {
|
||||
log.ZError(ctx, "token value is not int", err, "key", key, "value", flagStr, "userID", userID)
|
||||
continue
|
||||
}
|
||||
tk, ok := res[platformID]
|
||||
if !ok {
|
||||
tk = make(map[string]int)
|
||||
res[platformID] = tk
|
||||
}
|
||||
tk[token] = flag
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (x *tokenCache) SetTokenMapByUidPid(ctx context.Context, userID string, platformID int, m map[string]int) error {
|
||||
for token, flag := range m {
|
||||
err := x.SetTokenFlag(ctx, userID, platformID, token, flag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *tokenCache) BatchSetTokenMapByUidPid(ctx context.Context, tokens map[string]map[string]any) error {
|
||||
for prefix, tokenFlag := range tokens {
|
||||
for token, flag := range tokenFlag {
|
||||
flagStr := fmt.Sprintf("%v", flag)
|
||||
if err := x.cache.Set(ctx, prefix+":"+token, flagStr, x.accessExpire); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *tokenCache) DeleteTokenByUidPid(ctx context.Context, userID string, platformID int, fields []string) error {
|
||||
keys := make([]string, 0, len(fields))
|
||||
for _, token := range fields {
|
||||
keys = append(keys, x.getTokenKey(userID, platformID, token))
|
||||
}
|
||||
return x.cache.Del(ctx, keys)
|
||||
}
|
||||
|
||||
func (x *tokenCache) getExpireTime(t int64) time.Duration {
|
||||
return time.Hour * 24 * time.Duration(t)
|
||||
}
|
||||
|
||||
func (x *tokenCache) DeleteTokenByTokenMap(ctx context.Context, userID string, tokens map[int][]string) error {
|
||||
keys := make([]string, 0, len(tokens))
|
||||
for platformID, ts := range tokens {
|
||||
for _, t := range ts {
|
||||
keys = append(keys, x.getTokenKey(userID, platformID, t))
|
||||
}
|
||||
}
|
||||
return x.cache.Del(ctx, keys)
|
||||
}
|
||||
|
||||
func (x *tokenCache) DeleteAndSetTemporary(ctx context.Context, userID string, platformID int, fields []string) error {
|
||||
keys := make([]string, 0, len(fields))
|
||||
for _, f := range fields {
|
||||
keys = append(keys, x.getTokenKey(userID, platformID, f))
|
||||
}
|
||||
if err := x.cache.Del(ctx, keys); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, f := range fields {
|
||||
k := cachekey.GetTemporaryTokenKey(userID, platformID, f)
|
||||
if err := x.cache.Set(ctx, k, "", x.accessExpire); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
63
pkg/common/storage/cache/mcache/tools.go
vendored
Normal file
63
pkg/common/storage/cache/mcache/tools.go
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
package mcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"github.com/openimsdk/tools/log"
|
||||
)
|
||||
|
||||
func getCache[V any](ctx context.Context, cache database.Cache, key string, expireTime time.Duration, fn func(ctx context.Context) (V, error)) (V, error) {
|
||||
getDB := func() (V, bool, error) {
|
||||
res, err := cache.Get(ctx, []string{key})
|
||||
if err != nil {
|
||||
var val V
|
||||
return val, false, err
|
||||
}
|
||||
var val V
|
||||
if str, ok := res[key]; ok {
|
||||
if json.Unmarshal([]byte(str), &val) != nil {
|
||||
return val, false, err
|
||||
}
|
||||
return val, true, nil
|
||||
}
|
||||
return val, false, nil
|
||||
}
|
||||
dbVal, ok, err := getDB()
|
||||
if err != nil {
|
||||
return dbVal, err
|
||||
}
|
||||
if ok {
|
||||
return dbVal, nil
|
||||
}
|
||||
lockValue, err := cache.Lock(ctx, key, time.Minute)
|
||||
if err != nil {
|
||||
return dbVal, err
|
||||
}
|
||||
defer func() {
|
||||
if err := cache.Unlock(ctx, key, lockValue); err != nil {
|
||||
log.ZError(ctx, "unlock cache key", err, "key", key, "value", lockValue)
|
||||
}
|
||||
}()
|
||||
dbVal, ok, err = getDB()
|
||||
if err != nil {
|
||||
return dbVal, err
|
||||
}
|
||||
if ok {
|
||||
return dbVal, nil
|
||||
}
|
||||
val, err := fn(ctx)
|
||||
if err != nil {
|
||||
return val, err
|
||||
}
|
||||
data, err := json.Marshal(val)
|
||||
if err != nil {
|
||||
return val, err
|
||||
}
|
||||
if err := cache.Set(ctx, key, string(data), expireTime); err != nil {
|
||||
return val, err
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
30
pkg/common/storage/cache/msg.go
vendored
Normal file
30
pkg/common/storage/cache/msg.go
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
)
|
||||
|
||||
type MsgCache interface {
|
||||
SetSendMsgStatus(ctx context.Context, id string, status int32) error
|
||||
GetSendMsgStatus(ctx context.Context, id string) (int32, error)
|
||||
|
||||
GetMessageBySeqs(ctx context.Context, conversationID string, seqs []int64) ([]*model.MsgInfoModel, error)
|
||||
DelMessageBySeqs(ctx context.Context, conversationID string, seqs []int64) error
|
||||
SetMessageBySeqs(ctx context.Context, conversationID string, msgs []*model.MsgInfoModel) error
|
||||
}
|
||||
9
pkg/common/storage/cache/online.go
vendored
Normal file
9
pkg/common/storage/cache/online.go
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
package cache
|
||||
|
||||
import "context"
|
||||
|
||||
type OnlineCache interface {
|
||||
GetOnline(ctx context.Context, userID string) ([]int32, error)
|
||||
SetUserOnline(ctx context.Context, userID string, online, offline []int32) error
|
||||
GetAllOnlineUsers(ctx context.Context, cursor uint64) (map[string][]int32, uint64, error)
|
||||
}
|
||||
135
pkg/common/storage/cache/redis/batch.go
vendored
Normal file
135
pkg/common/storage/cache/redis/batch.go
vendored
Normal file
@@ -0,0 +1,135 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
// GetRocksCacheOptions returns the default configuration options for RocksCache.
|
||||
func GetRocksCacheOptions() *rockscache.Options {
|
||||
opts := rockscache.NewDefaultOptions()
|
||||
opts.LockExpire = rocksCacheTimeout
|
||||
opts.WaitReplicasTimeout = rocksCacheTimeout
|
||||
opts.StrongConsistency = true
|
||||
opts.RandomExpireAdjustment = 0.2
|
||||
|
||||
return &opts
|
||||
}
|
||||
|
||||
func newRocksCacheClient(rdb redis.UniversalClient) *rocksCacheClient {
|
||||
if rdb == nil {
|
||||
return &rocksCacheClient{}
|
||||
}
|
||||
rc := &rocksCacheClient{
|
||||
rdb: rdb,
|
||||
client: rockscache.NewClient(rdb, *GetRocksCacheOptions()),
|
||||
}
|
||||
return rc
|
||||
}
|
||||
|
||||
type rocksCacheClient struct {
|
||||
rdb redis.UniversalClient
|
||||
client *rockscache.Client
|
||||
}
|
||||
|
||||
func (x *rocksCacheClient) GetClient() *rockscache.Client {
|
||||
return x.client
|
||||
}
|
||||
|
||||
func (x *rocksCacheClient) Disable() bool {
|
||||
return x.client == nil
|
||||
}
|
||||
|
||||
func (x *rocksCacheClient) GetRedis() redis.UniversalClient {
|
||||
return x.rdb
|
||||
}
|
||||
|
||||
func (x *rocksCacheClient) GetBatchDeleter(topics ...string) cache.BatchDeleter {
|
||||
return NewBatchDeleterRedis(x, topics)
|
||||
}
|
||||
|
||||
func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rocksCacheClient, expire time.Duration, ids []K, idKey func(id K) string, vId func(v *V) K, fn func(ctx context.Context, ids []K) ([]*V, error)) ([]*V, error) {
|
||||
if len(ids) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if rcClient.Disable() {
|
||||
return fn(ctx, ids)
|
||||
}
|
||||
findKeys := make([]string, 0, len(ids))
|
||||
keyId := make(map[string]K)
|
||||
for _, id := range ids {
|
||||
key := idKey(id)
|
||||
if _, ok := keyId[key]; ok {
|
||||
continue
|
||||
}
|
||||
keyId[key] = id
|
||||
findKeys = append(findKeys, key)
|
||||
}
|
||||
slotKeys, err := groupKeysBySlot(ctx, rcClient.GetRedis(), findKeys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := make([]*V, 0, len(findKeys))
|
||||
for _, keys := range slotKeys {
|
||||
indexCache, err := rcClient.GetClient().FetchBatch2(ctx, keys, expire, func(idx []int) (map[int]string, error) {
|
||||
queryIds := make([]K, 0, len(idx))
|
||||
idIndex := make(map[K]int)
|
||||
for _, index := range idx {
|
||||
id := keyId[keys[index]]
|
||||
idIndex[id] = index
|
||||
queryIds = append(queryIds, id)
|
||||
}
|
||||
values, err := fn(ctx, queryIds)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "batchGetCache query database failed", err, "keys", keys, "queryIds", queryIds)
|
||||
return nil, err
|
||||
}
|
||||
if len(values) == 0 {
|
||||
return map[int]string{}, nil
|
||||
}
|
||||
cacheIndex := make(map[int]string)
|
||||
for _, value := range values {
|
||||
id := vId(value)
|
||||
index, ok := idIndex[id]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
bs, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "marshal failed", err)
|
||||
return nil, err
|
||||
}
|
||||
cacheIndex[index] = string(bs)
|
||||
}
|
||||
return cacheIndex, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errs.WrapMsg(err, "FetchBatch2 failed")
|
||||
}
|
||||
for index, data := range indexCache {
|
||||
if data == "" {
|
||||
continue
|
||||
}
|
||||
var value V
|
||||
if err := json.Unmarshal([]byte(data), &value); err != nil {
|
||||
return nil, errs.WrapMsg(err, "Unmarshal failed")
|
||||
}
|
||||
if cb, ok := any(&value).(BatchCacheCallback[K]); ok {
|
||||
cb.BatchCache(keyId[keys[index]])
|
||||
}
|
||||
result = append(result, &value)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type BatchCacheCallback[K comparable] interface {
|
||||
BatchCache(id K)
|
||||
}
|
||||
149
pkg/common/storage/cache/redis/batch_handler.go
vendored
Normal file
149
pkg/common/storage/cache/redis/batch_handler.go
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/localcache"
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
const (
|
||||
rocksCacheTimeout = 11 * time.Second
|
||||
)
|
||||
|
||||
// BatchDeleterRedis is a concrete implementation of the BatchDeleter interface based on Redis and RocksCache.
|
||||
type BatchDeleterRedis struct {
|
||||
redisClient redis.UniversalClient
|
||||
keys []string
|
||||
rocksClient *rockscache.Client
|
||||
redisPubTopics []string
|
||||
}
|
||||
|
||||
// NewBatchDeleterRedis creates a new BatchDeleterRedis instance.
|
||||
func NewBatchDeleterRedis(rcClient *rocksCacheClient, redisPubTopics []string) *BatchDeleterRedis {
|
||||
return &BatchDeleterRedis{
|
||||
redisClient: rcClient.GetRedis(),
|
||||
rocksClient: rcClient.GetClient(),
|
||||
redisPubTopics: redisPubTopics,
|
||||
}
|
||||
}
|
||||
|
||||
// ExecDelWithKeys directly takes keys for batch deletion and publishes deletion information.
|
||||
func (c *BatchDeleterRedis) ExecDelWithKeys(ctx context.Context, keys []string) error {
|
||||
distinctKeys := datautil.Distinct(keys)
|
||||
return c.execDel(ctx, distinctKeys)
|
||||
}
|
||||
|
||||
// ChainExecDel is used for chain calls for batch deletion. It must call Clone to prevent memory pollution.
|
||||
func (c *BatchDeleterRedis) ChainExecDel(ctx context.Context) error {
|
||||
distinctKeys := datautil.Distinct(c.keys)
|
||||
return c.execDel(ctx, distinctKeys)
|
||||
}
|
||||
|
||||
// execDel performs batch deletion and publishes the keys that have been deleted to update the local cache information of other nodes.
|
||||
func (c *BatchDeleterRedis) execDel(ctx context.Context, keys []string) error {
|
||||
if len(keys) > 0 {
|
||||
log.ZDebug(ctx, "delete cache", "topic", c.redisPubTopics, "keys", keys)
|
||||
// Batch delete keys
|
||||
err := ProcessKeysBySlot(ctx, c.redisClient, keys, func(ctx context.Context, slot int64, keys []string) error {
|
||||
return c.rocksClient.TagAsDeletedBatch2(ctx, keys)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Publish the keys that have been deleted to Redis to update the local cache information of other nodes
|
||||
if len(c.redisPubTopics) > 0 && len(keys) > 0 {
|
||||
keysByTopic := localcache.GetPublishKeysByTopic(c.redisPubTopics, keys)
|
||||
for topic, keys := range keysByTopic {
|
||||
if len(keys) > 0 {
|
||||
data, err := json.Marshal(keys)
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "keys json marshal failed", err, "topic", topic, "keys", keys)
|
||||
} else {
|
||||
if err := c.redisClient.Publish(ctx, topic, string(data)).Err(); err != nil {
|
||||
log.ZWarn(ctx, "redis publish cache delete error", err, "topic", topic, "keys", keys)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clone creates a copy of BatchDeleterRedis for chain calls to prevent memory pollution.
|
||||
func (c *BatchDeleterRedis) Clone() cache.BatchDeleter {
|
||||
return &BatchDeleterRedis{
|
||||
redisClient: c.redisClient,
|
||||
keys: c.keys,
|
||||
rocksClient: c.rocksClient,
|
||||
redisPubTopics: c.redisPubTopics,
|
||||
}
|
||||
}
|
||||
|
||||
// AddKeys adds keys to be deleted.
|
||||
func (c *BatchDeleterRedis) AddKeys(keys ...string) {
|
||||
c.keys = append(c.keys, keys...)
|
||||
}
|
||||
|
||||
type disableBatchDeleter struct{}
|
||||
|
||||
func (x disableBatchDeleter) ChainExecDel(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x disableBatchDeleter) ExecDelWithKeys(ctx context.Context, keys []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x disableBatchDeleter) Clone() cache.BatchDeleter {
|
||||
return x
|
||||
}
|
||||
|
||||
func (x disableBatchDeleter) AddKeys(keys ...string) {}
|
||||
|
||||
func getCache[T any](ctx context.Context, rcClient *rocksCacheClient, key string, expire time.Duration, fn func(ctx context.Context) (T, error)) (T, error) {
|
||||
if rcClient.Disable() {
|
||||
return fn(ctx)
|
||||
}
|
||||
var t T
|
||||
var write bool
|
||||
v, err := rcClient.GetClient().Fetch2(ctx, key, expire, func() (s string, err error) {
|
||||
t, err = fn(ctx)
|
||||
if err != nil {
|
||||
//log.ZError(ctx, "getCache query database failed", err, "key", key)
|
||||
return "", err
|
||||
}
|
||||
bs, err := json.Marshal(t)
|
||||
if err != nil {
|
||||
return "", errs.WrapMsg(err, "marshal failed")
|
||||
}
|
||||
write = true
|
||||
|
||||
return string(bs), nil
|
||||
})
|
||||
if err != nil {
|
||||
return t, errs.Wrap(err)
|
||||
}
|
||||
if write {
|
||||
return t, nil
|
||||
}
|
||||
if v == "" {
|
||||
return t, errs.ErrRecordNotFound.WrapMsg("cache is not found")
|
||||
}
|
||||
err = json.Unmarshal([]byte(v), &t)
|
||||
if err != nil {
|
||||
errInfo := fmt.Sprintf("cache json.Unmarshal failed, key:%s, value:%s, expire:%s", key, v, expire)
|
||||
return t, errs.WrapMsg(err, errInfo)
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
56
pkg/common/storage/cache/redis/batch_test.go
vendored
Normal file
56
pkg/common/storage/cache/redis/batch_test.go
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database/mgo"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
)
|
||||
|
||||
func TestName(t *testing.T) {
|
||||
//var rocks rockscache.Client
|
||||
//rdb := getRocksCacheRedisClient(&rocks)
|
||||
//t.Log(rdb == nil)
|
||||
|
||||
ctx := context.Background()
|
||||
rdb, err := redisutil.NewRedisClient(ctx, (&config.Redis{
|
||||
Address: []string{"172.16.8.48:16379"},
|
||||
Password: "openIM123",
|
||||
DB: 3,
|
||||
}).Build())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
mgocli, err := mongoutil.NewMongoDB(ctx, (&config.Mongo{
|
||||
Address: []string{"172.16.8.48:37017"},
|
||||
Database: "openim_v3",
|
||||
Username: "openIM",
|
||||
Password: "openIM123",
|
||||
MaxPoolSize: 100,
|
||||
MaxRetry: 1,
|
||||
}).Build())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
//userMgo, err := mgo.NewUserMongo(mgocli.GetDB())
|
||||
//if err != nil {
|
||||
// panic(err)
|
||||
//}
|
||||
//rock := rockscache.NewClient(rdb, rockscache.NewDefaultOptions())
|
||||
mgoSeqUser, err := mgo.NewSeqUserMongo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
seqUser := NewSeqUserCacheRedis(rdb, mgoSeqUser)
|
||||
|
||||
res, err := seqUser.GetUserReadSeqs(ctx, "2110910952", []string{"sg_2920732023", "sg_345762580"})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
t.Log(res)
|
||||
|
||||
}
|
||||
65
pkg/common/storage/cache/redis/black.go
vendored
Normal file
65
pkg/common/storage/cache/redis/black.go
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
const (
|
||||
blackExpireTime = time.Second * 60 * 60 * 12
|
||||
)
|
||||
|
||||
type BlackCacheRedis struct {
|
||||
cache.BatchDeleter
|
||||
expireTime time.Duration
|
||||
rcClient *rocksCacheClient
|
||||
blackDB database.Black
|
||||
}
|
||||
|
||||
func NewBlackCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache, blackDB database.Black) cache.BlackCache {
|
||||
rc := newRocksCacheClient(rdb)
|
||||
return &BlackCacheRedis{
|
||||
BatchDeleter: rc.GetBatchDeleter(localCache.Friend.Topic),
|
||||
expireTime: blackExpireTime,
|
||||
rcClient: rc,
|
||||
blackDB: blackDB,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BlackCacheRedis) CloneBlackCache() cache.BlackCache {
|
||||
return &BlackCacheRedis{
|
||||
BatchDeleter: b.BatchDeleter.Clone(),
|
||||
expireTime: b.expireTime,
|
||||
rcClient: b.rcClient,
|
||||
blackDB: b.blackDB,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BlackCacheRedis) getBlackIDsKey(ownerUserID string) string {
|
||||
return cachekey.GetBlackIDsKey(ownerUserID)
|
||||
}
|
||||
|
||||
func (b *BlackCacheRedis) GetBlackIDs(ctx context.Context, userID string) (blackIDs []string, err error) {
|
||||
return getCache(
|
||||
ctx,
|
||||
b.rcClient,
|
||||
b.getBlackIDsKey(userID),
|
||||
b.expireTime,
|
||||
func(ctx context.Context) ([]string, error) {
|
||||
return b.blackDB.FindBlackUserIDs(ctx, userID)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (b *BlackCacheRedis) DelBlackIDs(_ context.Context, userID string) cache.BlackCache {
|
||||
cache := b.CloneBlackCache()
|
||||
cache.AddKeys(b.getBlackIDsKey(userID))
|
||||
|
||||
return cache
|
||||
}
|
||||
69
pkg/common/storage/cache/redis/client_config.go
vendored
Normal file
69
pkg/common/storage/cache/redis/client_config.go
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
func NewClientConfigCache(rdb redis.UniversalClient, mgo database.ClientConfig) cache.ClientConfigCache {
|
||||
rc := newRocksCacheClient(rdb)
|
||||
return &ClientConfigCache{
|
||||
mgo: mgo,
|
||||
rcClient: rc,
|
||||
delete: rc.GetBatchDeleter(),
|
||||
}
|
||||
}
|
||||
|
||||
type ClientConfigCache struct {
|
||||
mgo database.ClientConfig
|
||||
rcClient *rocksCacheClient
|
||||
delete cache.BatchDeleter
|
||||
}
|
||||
|
||||
func (x *ClientConfigCache) getExpireTime(userID string) time.Duration {
|
||||
if userID == "" {
|
||||
return time.Hour * 24
|
||||
} else {
|
||||
return time.Hour
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ClientConfigCache) getClientConfigKey(userID string) string {
|
||||
return cachekey.GetClientConfigKey(userID)
|
||||
}
|
||||
|
||||
func (x *ClientConfigCache) GetConfig(ctx context.Context, userID string) (map[string]string, error) {
|
||||
return getCache(ctx, x.rcClient, x.getClientConfigKey(userID), x.getExpireTime(userID), func(ctx context.Context) (map[string]string, error) {
|
||||
return x.mgo.Get(ctx, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (x *ClientConfigCache) DeleteUserCache(ctx context.Context, userIDs []string) error {
|
||||
keys := make([]string, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
keys = append(keys, x.getClientConfigKey(userID))
|
||||
}
|
||||
return x.delete.ExecDelWithKeys(ctx, keys)
|
||||
}
|
||||
|
||||
func (x *ClientConfigCache) GetUserConfig(ctx context.Context, userID string) (map[string]string, error) {
|
||||
config, err := x.GetConfig(ctx, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if userID != "" {
|
||||
userConfig, err := x.GetConfig(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for k, v := range userConfig {
|
||||
config[k] = v
|
||||
}
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
276
pkg/common/storage/cache/redis/conversation.go
vendored
Normal file
276
pkg/common/storage/cache/redis/conversation.go
vendored
Normal file
@@ -0,0 +1,276 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/openimsdk/tools/utils/encrypt"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
const (
|
||||
conversationExpireTime = time.Second * 60 * 60 * 12
|
||||
)
|
||||
|
||||
func NewConversationRedis(rdb redis.UniversalClient, localCache *config.LocalCache, db database.Conversation) cache.ConversationCache {
|
||||
rc := newRocksCacheClient(rdb)
|
||||
return &ConversationRedisCache{
|
||||
BatchDeleter: rc.GetBatchDeleter(localCache.Conversation.Topic),
|
||||
rcClient: rc,
|
||||
conversationDB: db,
|
||||
expireTime: conversationExpireTime,
|
||||
}
|
||||
}
|
||||
|
||||
type ConversationRedisCache struct {
|
||||
cache.BatchDeleter
|
||||
rcClient *rocksCacheClient
|
||||
conversationDB database.Conversation
|
||||
expireTime time.Duration
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) CloneConversationCache() cache.ConversationCache {
|
||||
return &ConversationRedisCache{
|
||||
BatchDeleter: c.BatchDeleter.Clone(),
|
||||
rcClient: c.rcClient,
|
||||
conversationDB: c.conversationDB,
|
||||
expireTime: c.expireTime,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getConversationKey(ownerUserID, conversationID string) string {
|
||||
return cachekey.GetConversationKey(ownerUserID, conversationID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getConversationIDsKey(ownerUserID string) string {
|
||||
return cachekey.GetConversationIDsKey(ownerUserID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getNotNotifyConversationIDsKey(ownerUserID string) string {
|
||||
return cachekey.GetNotNotifyConversationIDsKey(ownerUserID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getPinnedConversationIDsKey(ownerUserID string) string {
|
||||
return cachekey.GetPinnedConversationIDs(ownerUserID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getSuperGroupRecvNotNotifyUserIDsKey(groupID string) string {
|
||||
return cachekey.GetSuperGroupRecvNotNotifyUserIDsKey(groupID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getRecvMsgOptKey(ownerUserID, conversationID string) string {
|
||||
return cachekey.GetRecvMsgOptKey(ownerUserID, conversationID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getSuperGroupRecvNotNotifyUserIDsHashKey(groupID string) string {
|
||||
return cachekey.GetSuperGroupRecvNotNotifyUserIDsHashKey(groupID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getConversationHasReadSeqKey(ownerUserID, conversationID string) string {
|
||||
return cachekey.GetConversationHasReadSeqKey(ownerUserID, conversationID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getConversationNotReceiveMessageUserIDsKey(conversationID string) string {
|
||||
return cachekey.GetConversationNotReceiveMessageUserIDsKey(conversationID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getUserConversationIDsHashKey(ownerUserID string) string {
|
||||
return cachekey.GetUserConversationIDsHashKey(ownerUserID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getConversationUserMaxVersionKey(ownerUserID string) string {
|
||||
return cachekey.GetConversationUserMaxVersionKey(ownerUserID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetUserConversationIDs(ctx context.Context, ownerUserID string) ([]string, error) {
|
||||
return getCache(ctx, c.rcClient, c.getConversationIDsKey(ownerUserID), c.expireTime, func(ctx context.Context) ([]string, error) {
|
||||
return c.conversationDB.FindUserIDAllConversationID(ctx, ownerUserID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetUserNotNotifyConversationIDs(ctx context.Context, userID string) ([]string, error) {
|
||||
return getCache(ctx, c.rcClient, c.getNotNotifyConversationIDsKey(userID), c.expireTime, func(ctx context.Context) ([]string, error) {
|
||||
return c.conversationDB.FindUserIDAllNotNotifyConversationID(ctx, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetPinnedConversationIDs(ctx context.Context, userID string) ([]string, error) {
|
||||
return getCache(ctx, c.rcClient, c.getPinnedConversationIDsKey(userID), c.expireTime, func(ctx context.Context) ([]string, error) {
|
||||
return c.conversationDB.FindUserIDAllPinnedConversationID(ctx, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelConversationIDs(userIDs ...string) cache.ConversationCache {
|
||||
keys := make([]string, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
keys = append(keys, c.getConversationIDsKey(userID))
|
||||
}
|
||||
cache := c.CloneConversationCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetUserConversationIDsHash(ctx context.Context, ownerUserID string) (hash uint64, err error) {
|
||||
return getCache(
|
||||
ctx,
|
||||
c.rcClient,
|
||||
c.getUserConversationIDsHashKey(ownerUserID),
|
||||
c.expireTime,
|
||||
func(ctx context.Context) (uint64, error) {
|
||||
conversationIDs, err := c.GetUserConversationIDs(ctx, ownerUserID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
datautil.Sort(conversationIDs, true)
|
||||
bi := big.NewInt(0)
|
||||
bi.SetString(encrypt.Md5(strings.Join(conversationIDs, ";"))[0:8], 16)
|
||||
return bi.Uint64(), nil
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelUserConversationIDsHash(ownerUserIDs ...string) cache.ConversationCache {
|
||||
keys := make([]string, 0, len(ownerUserIDs))
|
||||
for _, ownerUserID := range ownerUserIDs {
|
||||
keys = append(keys, c.getUserConversationIDsHashKey(ownerUserID))
|
||||
}
|
||||
cache := c.CloneConversationCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetConversation(ctx context.Context, ownerUserID, conversationID string) (*model.Conversation, error) {
|
||||
return getCache(ctx, c.rcClient, c.getConversationKey(ownerUserID, conversationID), c.expireTime, func(ctx context.Context) (*model.Conversation, error) {
|
||||
return c.conversationDB.Take(ctx, ownerUserID, conversationID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelConversations(ownerUserID string, conversationIDs ...string) cache.ConversationCache {
|
||||
keys := make([]string, 0, len(conversationIDs))
|
||||
for _, conversationID := range conversationIDs {
|
||||
keys = append(keys, c.getConversationKey(ownerUserID, conversationID))
|
||||
}
|
||||
cache := c.CloneConversationCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetConversations(ctx context.Context, ownerUserID string, conversationIDs []string) ([]*model.Conversation, error) {
|
||||
return batchGetCache2(ctx, c.rcClient, c.expireTime, conversationIDs, func(conversationID string) string {
|
||||
return c.getConversationKey(ownerUserID, conversationID)
|
||||
}, func(conversation *model.Conversation) string {
|
||||
return conversation.ConversationID
|
||||
}, func(ctx context.Context, conversationIDs []string) ([]*model.Conversation, error) {
|
||||
return c.conversationDB.Find(ctx, ownerUserID, conversationIDs)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetUserAllConversations(ctx context.Context, ownerUserID string) ([]*model.Conversation, error) {
|
||||
conversationIDs, err := c.GetUserConversationIDs(ctx, ownerUserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.GetConversations(ctx, ownerUserID, conversationIDs)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetUserRecvMsgOpt(ctx context.Context, ownerUserID, conversationID string) (opt int, err error) {
|
||||
return getCache(ctx, c.rcClient, c.getRecvMsgOptKey(ownerUserID, conversationID), c.expireTime, func(ctx context.Context) (opt int, err error) {
|
||||
return c.conversationDB.GetUserRecvMsgOpt(ctx, ownerUserID, conversationID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelUsersConversation(conversationID string, ownerUserIDs ...string) cache.ConversationCache {
|
||||
keys := make([]string, 0, len(ownerUserIDs))
|
||||
for _, ownerUserID := range ownerUserIDs {
|
||||
keys = append(keys, c.getConversationKey(ownerUserID, conversationID))
|
||||
}
|
||||
cache := c.CloneConversationCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelUserRecvMsgOpt(ownerUserID, conversationID string) cache.ConversationCache {
|
||||
cache := c.CloneConversationCache()
|
||||
cache.AddKeys(c.getRecvMsgOptKey(ownerUserID, conversationID))
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelSuperGroupRecvMsgNotNotifyUserIDs(groupID string) cache.ConversationCache {
|
||||
cache := c.CloneConversationCache()
|
||||
cache.AddKeys(c.getSuperGroupRecvNotNotifyUserIDsKey(groupID))
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelSuperGroupRecvMsgNotNotifyUserIDsHash(groupID string) cache.ConversationCache {
|
||||
cache := c.CloneConversationCache()
|
||||
cache.AddKeys(c.getSuperGroupRecvNotNotifyUserIDsHashKey(groupID))
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelUserAllHasReadSeqs(ownerUserID string, conversationIDs ...string) cache.ConversationCache {
|
||||
cache := c.CloneConversationCache()
|
||||
for _, conversationID := range conversationIDs {
|
||||
cache.AddKeys(c.getConversationHasReadSeqKey(ownerUserID, conversationID))
|
||||
}
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetConversationNotReceiveMessageUserIDs(ctx context.Context, conversationID string) ([]string, error) {
|
||||
return getCache(ctx, c.rcClient, c.getConversationNotReceiveMessageUserIDsKey(conversationID), c.expireTime, func(ctx context.Context) ([]string, error) {
|
||||
return c.conversationDB.GetConversationNotReceiveMessageUserIDs(ctx, conversationID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelConversationNotReceiveMessageUserIDs(conversationIDs ...string) cache.ConversationCache {
|
||||
cache := c.CloneConversationCache()
|
||||
for _, conversationID := range conversationIDs {
|
||||
cache.AddKeys(c.getConversationNotReceiveMessageUserIDsKey(conversationID))
|
||||
}
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelConversationNotNotifyMessageUserIDs(userIDs ...string) cache.ConversationCache {
|
||||
cache := c.CloneConversationCache()
|
||||
for _, userID := range userIDs {
|
||||
cache.AddKeys(c.getNotNotifyConversationIDsKey(userID))
|
||||
}
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelUserPinnedConversations(userIDs ...string) cache.ConversationCache {
|
||||
cache := c.CloneConversationCache()
|
||||
for _, userID := range userIDs {
|
||||
cache.AddKeys(c.getPinnedConversationIDsKey(userID))
|
||||
}
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelConversationVersionUserIDs(userIDs ...string) cache.ConversationCache {
|
||||
cache := c.CloneConversationCache()
|
||||
for _, userID := range userIDs {
|
||||
cache.AddKeys(c.getConversationUserMaxVersionKey(userID))
|
||||
}
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) FindMaxConversationUserVersion(ctx context.Context, userID string) (*model.VersionLog, error) {
|
||||
return getCache(ctx, c.rcClient, c.getConversationUserMaxVersionKey(userID), c.expireTime, func(ctx context.Context) (*model.VersionLog, error) {
|
||||
return c.conversationDB.FindConversationUserVersion(ctx, userID, 0, 0)
|
||||
})
|
||||
}
|
||||
167
pkg/common/storage/cache/redis/friend.go
vendored
Normal file
167
pkg/common/storage/cache/redis/friend.go
vendored
Normal file
@@ -0,0 +1,167 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
const (
|
||||
friendExpireTime = time.Second * 60 * 60 * 12
|
||||
)
|
||||
|
||||
// FriendCacheRedis is an implementation of the FriendCache interface using Redis.
|
||||
type FriendCacheRedis struct {
|
||||
cache.BatchDeleter
|
||||
friendDB database.Friend
|
||||
expireTime time.Duration
|
||||
rcClient *rocksCacheClient
|
||||
syncCount int
|
||||
}
|
||||
|
||||
// NewFriendCacheRedis creates a new instance of FriendCacheRedis.
|
||||
func NewFriendCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache, friendDB database.Friend) cache.FriendCache {
|
||||
rc := newRocksCacheClient(rdb)
|
||||
return &FriendCacheRedis{
|
||||
BatchDeleter: rc.GetBatchDeleter(localCache.Friend.Topic),
|
||||
friendDB: friendDB,
|
||||
expireTime: friendExpireTime,
|
||||
rcClient: rc,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FriendCacheRedis) CloneFriendCache() cache.FriendCache {
|
||||
return &FriendCacheRedis{
|
||||
BatchDeleter: f.BatchDeleter.Clone(),
|
||||
friendDB: f.friendDB,
|
||||
expireTime: f.expireTime,
|
||||
rcClient: f.rcClient,
|
||||
}
|
||||
}
|
||||
|
||||
// getFriendIDsKey returns the key for storing friend IDs in the cache.
|
||||
func (f *FriendCacheRedis) getFriendIDsKey(ownerUserID string) string {
|
||||
return cachekey.GetFriendIDsKey(ownerUserID)
|
||||
}
|
||||
|
||||
func (f *FriendCacheRedis) getFriendMaxVersionKey(ownerUserID string) string {
|
||||
return cachekey.GetFriendMaxVersionKey(ownerUserID)
|
||||
}
|
||||
|
||||
// getTwoWayFriendsIDsKey returns the key for storing two-way friend IDs in the cache.
|
||||
func (f *FriendCacheRedis) getTwoWayFriendsIDsKey(ownerUserID string) string {
|
||||
return cachekey.GetTwoWayFriendsIDsKey(ownerUserID)
|
||||
}
|
||||
|
||||
// getFriendKey returns the key for storing friend info in the cache.
|
||||
func (f *FriendCacheRedis) getFriendKey(ownerUserID, friendUserID string) string {
|
||||
return cachekey.GetFriendKey(ownerUserID, friendUserID)
|
||||
}
|
||||
|
||||
// GetFriendIDs retrieves friend IDs from the cache or the database if not found.
|
||||
func (f *FriendCacheRedis) GetFriendIDs(ctx context.Context, ownerUserID string) (friendIDs []string, err error) {
|
||||
return getCache(ctx, f.rcClient, f.getFriendIDsKey(ownerUserID), f.expireTime, func(ctx context.Context) ([]string, error) {
|
||||
return f.friendDB.FindFriendUserIDs(ctx, ownerUserID)
|
||||
})
|
||||
}
|
||||
|
||||
// DelFriendIDs deletes friend IDs from the cache.
|
||||
func (f *FriendCacheRedis) DelFriendIDs(ownerUserIDs ...string) cache.FriendCache {
|
||||
newFriendCache := f.CloneFriendCache()
|
||||
keys := make([]string, 0, len(ownerUserIDs))
|
||||
for _, userID := range ownerUserIDs {
|
||||
keys = append(keys, f.getFriendIDsKey(userID))
|
||||
}
|
||||
newFriendCache.AddKeys(keys...)
|
||||
|
||||
return newFriendCache
|
||||
}
|
||||
|
||||
// GetTwoWayFriendIDs retrieves two-way friend IDs from the cache.
|
||||
func (f *FriendCacheRedis) GetTwoWayFriendIDs(ctx context.Context, ownerUserID string) (twoWayFriendIDs []string, err error) {
|
||||
friendIDs, err := f.GetFriendIDs(ctx, ownerUserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, friendID := range friendIDs {
|
||||
friendFriendID, err := f.GetFriendIDs(ctx, friendID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if datautil.Contain(ownerUserID, friendFriendID...) {
|
||||
twoWayFriendIDs = append(twoWayFriendIDs, ownerUserID)
|
||||
}
|
||||
}
|
||||
|
||||
return twoWayFriendIDs, nil
|
||||
}
|
||||
|
||||
// DelTwoWayFriendIDs deletes two-way friend IDs from the cache.
|
||||
func (f *FriendCacheRedis) DelTwoWayFriendIDs(ctx context.Context, ownerUserID string) cache.FriendCache {
|
||||
newFriendCache := f.CloneFriendCache()
|
||||
newFriendCache.AddKeys(f.getTwoWayFriendsIDsKey(ownerUserID))
|
||||
|
||||
return newFriendCache
|
||||
}
|
||||
|
||||
// GetFriend retrieves friend info from the cache or the database if not found.
|
||||
func (f *FriendCacheRedis) GetFriend(ctx context.Context, ownerUserID, friendUserID string) (friend *model.Friend, err error) {
|
||||
return getCache(ctx, f.rcClient, f.getFriendKey(ownerUserID,
|
||||
friendUserID), f.expireTime, func(ctx context.Context) (*model.Friend, error) {
|
||||
return f.friendDB.Take(ctx, ownerUserID, friendUserID)
|
||||
})
|
||||
}
|
||||
|
||||
// DelFriend deletes friend info from the cache.
|
||||
func (f *FriendCacheRedis) DelFriend(ownerUserID, friendUserID string) cache.FriendCache {
|
||||
newFriendCache := f.CloneFriendCache()
|
||||
newFriendCache.AddKeys(f.getFriendKey(ownerUserID, friendUserID))
|
||||
|
||||
return newFriendCache
|
||||
}
|
||||
|
||||
// DelFriends deletes multiple friend infos from the cache.
|
||||
func (f *FriendCacheRedis) DelFriends(ownerUserID string, friendUserIDs []string) cache.FriendCache {
|
||||
newFriendCache := f.CloneFriendCache()
|
||||
|
||||
for _, friendUserID := range friendUserIDs {
|
||||
key := f.getFriendKey(ownerUserID, friendUserID)
|
||||
newFriendCache.AddKeys(key) // Assuming AddKeys marks the keys for deletion
|
||||
}
|
||||
|
||||
return newFriendCache
|
||||
}
|
||||
|
||||
func (f *FriendCacheRedis) DelOwner(friendUserID string, ownerUserIDs []string) cache.FriendCache {
|
||||
newFriendCache := f.CloneFriendCache()
|
||||
|
||||
for _, ownerUserID := range ownerUserIDs {
|
||||
key := f.getFriendKey(ownerUserID, friendUserID)
|
||||
newFriendCache.AddKeys(key) // Assuming AddKeys marks the keys for deletion
|
||||
}
|
||||
|
||||
return newFriendCache
|
||||
}
|
||||
|
||||
func (f *FriendCacheRedis) DelMaxFriendVersion(ownerUserIDs ...string) cache.FriendCache {
|
||||
newFriendCache := f.CloneFriendCache()
|
||||
for _, ownerUserID := range ownerUserIDs {
|
||||
key := f.getFriendMaxVersionKey(ownerUserID)
|
||||
newFriendCache.AddKeys(key) // Assuming AddKeys marks the keys for deletion
|
||||
}
|
||||
|
||||
return newFriendCache
|
||||
}
|
||||
|
||||
func (f *FriendCacheRedis) FindMaxFriendVersion(ctx context.Context, ownerUserID string) (*model.VersionLog, error) {
|
||||
return getCache(ctx, f.rcClient, f.getFriendMaxVersionKey(ownerUserID), f.expireTime, func(ctx context.Context) (*model.VersionLog, error) {
|
||||
return f.friendDB.FindIncrVersion(ctx, ownerUserID, 0, 0)
|
||||
})
|
||||
}
|
||||
385
pkg/common/storage/cache/redis/group.go
vendored
Normal file
385
pkg/common/storage/cache/redis/group.go
vendored
Normal file
@@ -0,0 +1,385 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/common"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
"git.imall.cloud/openim/protocol/constant"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
const (
|
||||
groupExpireTime = time.Second * 60 * 60 * 12
|
||||
)
|
||||
|
||||
type GroupCacheRedis struct {
|
||||
cache.BatchDeleter
|
||||
groupDB database.Group
|
||||
groupMemberDB database.GroupMember
|
||||
groupRequestDB database.GroupRequest
|
||||
expireTime time.Duration
|
||||
rcClient *rocksCacheClient
|
||||
groupHash cache.GroupHash
|
||||
}
|
||||
|
||||
func NewGroupCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache, groupDB database.Group, groupMemberDB database.GroupMember, groupRequestDB database.GroupRequest, hashCode cache.GroupHash) cache.GroupCache {
|
||||
rc := newRocksCacheClient(rdb)
|
||||
return &GroupCacheRedis{
|
||||
BatchDeleter: rc.GetBatchDeleter(localCache.Group.Topic),
|
||||
rcClient: rc,
|
||||
expireTime: groupExpireTime,
|
||||
groupDB: groupDB,
|
||||
groupMemberDB: groupMemberDB,
|
||||
groupRequestDB: groupRequestDB,
|
||||
groupHash: hashCode,
|
||||
}
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) CloneGroupCache() cache.GroupCache {
|
||||
return &GroupCacheRedis{
|
||||
BatchDeleter: g.BatchDeleter.Clone(),
|
||||
rcClient: g.rcClient,
|
||||
expireTime: g.expireTime,
|
||||
groupDB: g.groupDB,
|
||||
groupMemberDB: g.groupMemberDB,
|
||||
groupRequestDB: g.groupRequestDB,
|
||||
}
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) getGroupInfoKey(groupID string) string {
|
||||
return cachekey.GetGroupInfoKey(groupID)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) getJoinedGroupsKey(userID string) string {
|
||||
return cachekey.GetJoinedGroupsKey(userID)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) getGroupMembersHashKey(groupID string) string {
|
||||
return cachekey.GetGroupMembersHashKey(groupID)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) getGroupMemberIDsKey(groupID string) string {
|
||||
return cachekey.GetGroupMemberIDsKey(groupID)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) getGroupMemberInfoKey(groupID, userID string) string {
|
||||
return cachekey.GetGroupMemberInfoKey(groupID, userID)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) getGroupMemberNumKey(groupID string) string {
|
||||
return cachekey.GetGroupMemberNumKey(groupID)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) getGroupRoleLevelMemberIDsKey(groupID string, roleLevel int32) string {
|
||||
return cachekey.GetGroupRoleLevelMemberIDsKey(groupID, roleLevel)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) getGroupMemberMaxVersionKey(groupID string) string {
|
||||
return cachekey.GetGroupMemberMaxVersionKey(groupID)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) getJoinGroupMaxVersionKey(userID string) string {
|
||||
return cachekey.GetJoinGroupMaxVersionKey(userID)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) getGroupID(group *model.Group) string {
|
||||
return group.GroupID
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupsInfo(ctx context.Context, groupIDs []string) (groups []*model.Group, err error) {
|
||||
return batchGetCache2(ctx, g.rcClient, g.expireTime, groupIDs, g.getGroupInfoKey, g.getGroupID, g.groupDB.Find)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupInfo(ctx context.Context, groupID string) (group *model.Group, err error) {
|
||||
return getCache(ctx, g.rcClient, g.getGroupInfoKey(groupID), g.expireTime, func(ctx context.Context) (*model.Group, error) {
|
||||
return g.groupDB.Take(ctx, groupID)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupsInfo(groupIDs ...string) cache.GroupCache {
|
||||
newGroupCache := g.CloneGroupCache()
|
||||
keys := make([]string, 0, len(groupIDs))
|
||||
for _, groupID := range groupIDs {
|
||||
keys = append(keys, g.getGroupInfoKey(groupID))
|
||||
}
|
||||
newGroupCache.AddKeys(keys...)
|
||||
|
||||
return newGroupCache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupsOwner(groupIDs ...string) cache.GroupCache {
|
||||
newGroupCache := g.CloneGroupCache()
|
||||
keys := make([]string, 0, len(groupIDs))
|
||||
for _, groupID := range groupIDs {
|
||||
keys = append(keys, g.getGroupRoleLevelMemberIDsKey(groupID, constant.GroupOwner))
|
||||
}
|
||||
newGroupCache.AddKeys(keys...)
|
||||
|
||||
return newGroupCache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupRoleLevel(groupID string, roleLevels []int32) cache.GroupCache {
|
||||
newGroupCache := g.CloneGroupCache()
|
||||
keys := make([]string, 0, len(roleLevels))
|
||||
for _, roleLevel := range roleLevels {
|
||||
keys = append(keys, g.getGroupRoleLevelMemberIDsKey(groupID, roleLevel))
|
||||
}
|
||||
newGroupCache.AddKeys(keys...)
|
||||
return newGroupCache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupAllRoleLevel(groupID string) cache.GroupCache {
|
||||
return g.DelGroupRoleLevel(groupID, []int32{constant.GroupOwner, constant.GroupAdmin, constant.GroupOrdinaryUsers})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupMembersHash(ctx context.Context, groupID string) (hashCode uint64, err error) {
|
||||
if g.groupHash == nil {
|
||||
return 0, errs.ErrInternalServer.WrapMsg("group hash is nil")
|
||||
}
|
||||
return getCache(ctx, g.rcClient, g.getGroupMembersHashKey(groupID), g.expireTime, func(ctx context.Context) (uint64, error) {
|
||||
return g.groupHash.GetGroupHash(ctx, groupID)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupMemberHashMap(ctx context.Context, groupIDs []string) (map[string]*common.GroupSimpleUserID, error) {
|
||||
if g.groupHash == nil {
|
||||
return nil, errs.ErrInternalServer.WrapMsg("group hash is nil")
|
||||
}
|
||||
res := make(map[string]*common.GroupSimpleUserID)
|
||||
for _, groupID := range groupIDs {
|
||||
hash, err := g.GetGroupMembersHash(ctx, groupID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.ZDebug(ctx, "GetGroupMemberHashMap", "groupID", groupID, "hash", hash)
|
||||
num, err := g.GetGroupMemberNum(ctx, groupID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res[groupID] = &common.GroupSimpleUserID{Hash: hash, MemberNum: uint32(num)}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupMembersHash(groupID string) cache.GroupCache {
|
||||
cache := g.CloneGroupCache()
|
||||
cache.AddKeys(g.getGroupMembersHashKey(groupID))
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupMemberIDs(ctx context.Context, groupID string) (groupMemberIDs []string, err error) {
|
||||
return getCache(ctx, g.rcClient, g.getGroupMemberIDsKey(groupID), g.expireTime, func(ctx context.Context) ([]string, error) {
|
||||
return g.groupMemberDB.FindMemberUserID(ctx, groupID)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupMemberIDs(groupID string) cache.GroupCache {
|
||||
cache := g.CloneGroupCache()
|
||||
cache.AddKeys(g.getGroupMemberIDsKey(groupID))
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) findUserJoinedGroupID(ctx context.Context, userID string) ([]string, error) {
|
||||
groupIDs, err := g.groupMemberDB.FindUserJoinedGroupID(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return g.groupDB.FindJoinSortGroupID(ctx, groupIDs)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetJoinedGroupIDs(ctx context.Context, userID string) (joinedGroupIDs []string, err error) {
|
||||
return getCache(ctx, g.rcClient, g.getJoinedGroupsKey(userID), g.expireTime, func(ctx context.Context) ([]string, error) {
|
||||
return g.findUserJoinedGroupID(ctx, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelJoinedGroupID(userIDs ...string) cache.GroupCache {
|
||||
keys := make([]string, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
keys = append(keys, g.getJoinedGroupsKey(userID))
|
||||
}
|
||||
cache := g.CloneGroupCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupMemberInfo(ctx context.Context, groupID, userID string) (groupMember *model.GroupMember, err error) {
|
||||
return getCache(ctx, g.rcClient, g.getGroupMemberInfoKey(groupID, userID), g.expireTime, func(ctx context.Context) (*model.GroupMember, error) {
|
||||
return g.groupMemberDB.Take(ctx, groupID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupMembersInfo(ctx context.Context, groupID string, userIDs []string) ([]*model.GroupMember, error) {
|
||||
return batchGetCache2(ctx, g.rcClient, g.expireTime, userIDs, func(userID string) string {
|
||||
return g.getGroupMemberInfoKey(groupID, userID)
|
||||
}, func(member *model.GroupMember) string {
|
||||
return member.UserID
|
||||
}, func(ctx context.Context, userIDs []string) ([]*model.GroupMember, error) {
|
||||
return g.groupMemberDB.Find(ctx, groupID, userIDs)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetAllGroupMembersInfo(ctx context.Context, groupID string) (groupMembers []*model.GroupMember, err error) {
|
||||
groupMemberIDs, err := g.GetGroupMemberIDs(ctx, groupID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return g.GetGroupMembersInfo(ctx, groupID, groupMemberIDs)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupMembersInfo(groupID string, userIDs ...string) cache.GroupCache {
|
||||
keys := make([]string, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
keys = append(keys, g.getGroupMemberInfoKey(groupID, userID))
|
||||
}
|
||||
cache := g.CloneGroupCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupMemberNum(ctx context.Context, groupID string) (memberNum int64, err error) {
|
||||
return getCache(ctx, g.rcClient, g.getGroupMemberNumKey(groupID), g.expireTime, func(ctx context.Context) (int64, error) {
|
||||
return g.groupMemberDB.TakeGroupMemberNum(ctx, groupID)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupsMemberNum(groupID ...string) cache.GroupCache {
|
||||
keys := make([]string, 0, len(groupID))
|
||||
for _, groupID := range groupID {
|
||||
keys = append(keys, g.getGroupMemberNumKey(groupID))
|
||||
}
|
||||
cache := g.CloneGroupCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupOwner(ctx context.Context, groupID string) (*model.GroupMember, error) {
|
||||
members, err := g.GetGroupRoleLevelMemberInfo(ctx, groupID, constant.GroupOwner)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(members) == 0 {
|
||||
return nil, errs.ErrRecordNotFound.WrapMsg(fmt.Sprintf("group %s owner not found", groupID))
|
||||
}
|
||||
return members[0], nil
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupsOwner(ctx context.Context, groupIDs []string) ([]*model.GroupMember, error) {
|
||||
members := make([]*model.GroupMember, 0, len(groupIDs))
|
||||
for _, groupID := range groupIDs {
|
||||
items, err := g.GetGroupRoleLevelMemberInfo(ctx, groupID, constant.GroupOwner)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(items) > 0 {
|
||||
members = append(members, items[0])
|
||||
}
|
||||
}
|
||||
return members, nil
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupRoleLevelMemberIDs(ctx context.Context, groupID string, roleLevel int32) ([]string, error) {
|
||||
return getCache(ctx, g.rcClient, g.getGroupRoleLevelMemberIDsKey(groupID, roleLevel), g.expireTime, func(ctx context.Context) ([]string, error) {
|
||||
return g.groupMemberDB.FindRoleLevelUserIDs(ctx, groupID, roleLevel)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupRoleLevelMemberInfo(ctx context.Context, groupID string, roleLevel int32) ([]*model.GroupMember, error) {
|
||||
userIDs, err := g.GetGroupRoleLevelMemberIDs(ctx, groupID, roleLevel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return g.GetGroupMembersInfo(ctx, groupID, userIDs)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupRolesLevelMemberInfo(ctx context.Context, groupID string, roleLevels []int32) ([]*model.GroupMember, error) {
|
||||
var userIDs []string
|
||||
for _, roleLevel := range roleLevels {
|
||||
ids, err := g.GetGroupRoleLevelMemberIDs(ctx, groupID, roleLevel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
userIDs = append(userIDs, ids...)
|
||||
}
|
||||
return g.GetGroupMembersInfo(ctx, groupID, userIDs)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) FindGroupMemberUser(ctx context.Context, groupIDs []string, userID string) ([]*model.GroupMember, error) {
|
||||
if len(groupIDs) == 0 {
|
||||
var err error
|
||||
groupIDs, err = g.GetJoinedGroupIDs(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return batchGetCache2(ctx, g.rcClient, g.expireTime, groupIDs, func(groupID string) string {
|
||||
return g.getGroupMemberInfoKey(groupID, userID)
|
||||
}, func(member *model.GroupMember) string {
|
||||
return member.GroupID
|
||||
}, func(ctx context.Context, groupIDs []string) ([]*model.GroupMember, error) {
|
||||
return g.groupMemberDB.FindInGroup(ctx, userID, groupIDs)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelMaxGroupMemberVersion(groupIDs ...string) cache.GroupCache {
|
||||
keys := make([]string, 0, len(groupIDs))
|
||||
for _, groupID := range groupIDs {
|
||||
keys = append(keys, g.getGroupMemberMaxVersionKey(groupID))
|
||||
}
|
||||
cache := g.CloneGroupCache()
|
||||
cache.AddKeys(keys...)
|
||||
return cache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelMaxJoinGroupVersion(userIDs ...string) cache.GroupCache {
|
||||
keys := make([]string, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
keys = append(keys, g.getJoinGroupMaxVersionKey(userID))
|
||||
}
|
||||
cache := g.CloneGroupCache()
|
||||
cache.AddKeys(keys...)
|
||||
return cache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) FindMaxGroupMemberVersion(ctx context.Context, groupID string) (*model.VersionLog, error) {
|
||||
return getCache(ctx, g.rcClient, g.getGroupMemberMaxVersionKey(groupID), g.expireTime, func(ctx context.Context) (*model.VersionLog, error) {
|
||||
return g.groupMemberDB.FindMemberIncrVersion(ctx, groupID, 0, 0)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) BatchFindMaxGroupMemberVersion(ctx context.Context, groupIDs []string) ([]*model.VersionLog, error) {
|
||||
return batchGetCache2(ctx, g.rcClient, g.expireTime, groupIDs,
|
||||
func(groupID string) string {
|
||||
return g.getGroupMemberMaxVersionKey(groupID)
|
||||
}, func(versionLog *model.VersionLog) string {
|
||||
return versionLog.DID
|
||||
}, func(ctx context.Context, groupIDs []string) ([]*model.VersionLog, error) {
|
||||
// create two slices with len is groupIDs, just need 0
|
||||
versions := make([]uint, len(groupIDs))
|
||||
limits := make([]int, len(groupIDs))
|
||||
|
||||
return g.groupMemberDB.BatchFindMemberIncrVersion(ctx, groupIDs, versions, limits)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) FindMaxJoinGroupVersion(ctx context.Context, userID string) (*model.VersionLog, error) {
|
||||
return getCache(ctx, g.rcClient, g.getJoinGroupMaxVersionKey(userID), g.expireTime, func(ctx context.Context) (*model.VersionLog, error) {
|
||||
return g.groupMemberDB.FindJoinIncrVersion(ctx, userID, 0, 0)
|
||||
})
|
||||
}
|
||||
127
pkg/common/storage/cache/redis/lua_script.go
vendored
Normal file
127
pkg/common/storage/cache/redis/lua_script.go
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/servererrs"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
var (
|
||||
setBatchWithCommonExpireScript = redis.NewScript(`
|
||||
local expire = tonumber(ARGV[1])
|
||||
for i, key in ipairs(KEYS) do
|
||||
redis.call('SET', key, ARGV[i + 1])
|
||||
redis.call('EXPIRE', key, expire)
|
||||
end
|
||||
return #KEYS
|
||||
`)
|
||||
|
||||
setBatchWithIndividualExpireScript = redis.NewScript(`
|
||||
local n = #KEYS
|
||||
for i = 1, n do
|
||||
redis.call('SET', KEYS[i], ARGV[i])
|
||||
redis.call('EXPIRE', KEYS[i], ARGV[i + n])
|
||||
end
|
||||
return n
|
||||
`)
|
||||
|
||||
deleteBatchScript = redis.NewScript(`
|
||||
for i, key in ipairs(KEYS) do
|
||||
redis.call('DEL', key)
|
||||
end
|
||||
return #KEYS
|
||||
`)
|
||||
|
||||
getBatchScript = redis.NewScript(`
|
||||
local values = {}
|
||||
for i, key in ipairs(KEYS) do
|
||||
local value = redis.call('GET', key)
|
||||
table.insert(values, value)
|
||||
end
|
||||
return values
|
||||
`)
|
||||
)
|
||||
|
||||
func callLua(ctx context.Context, rdb redis.Scripter, script *redis.Script, keys []string, args []any) (any, error) {
|
||||
log.ZDebug(ctx, "callLua args", "scriptHash", script.Hash(), "keys", keys, "args", args)
|
||||
r := script.EvalSha(ctx, rdb, keys, args)
|
||||
if redis.HasErrorPrefix(r.Err(), "NOSCRIPT") {
|
||||
if err := script.Load(ctx, rdb).Err(); err != nil {
|
||||
r = script.Eval(ctx, rdb, keys, args)
|
||||
} else {
|
||||
r = script.EvalSha(ctx, rdb, keys, args)
|
||||
}
|
||||
}
|
||||
v, err := r.Result()
|
||||
if errors.Is(err, redis.Nil) {
|
||||
err = nil
|
||||
}
|
||||
return v, errs.WrapMsg(err, "call lua err", "scriptHash", script.Hash(), "keys", keys, "args", args)
|
||||
}
|
||||
|
||||
func LuaSetBatchWithCommonExpire(ctx context.Context, rdb redis.Scripter, keys []string, values []string, expire int) error {
|
||||
// Check if the lengths of keys and values match
|
||||
if len(keys) != len(values) {
|
||||
return errs.New("keys and values length mismatch").Wrap()
|
||||
}
|
||||
|
||||
// Ensure allocation size does not overflow
|
||||
maxAllowedLen := (1 << 31) - 1 // 2GB limit (maximum address space for 32-bit systems)
|
||||
|
||||
if len(values) > maxAllowedLen-1 {
|
||||
return fmt.Errorf("values length is too large, causing overflow")
|
||||
}
|
||||
var vals = make([]any, 0, 1+len(values))
|
||||
vals = append(vals, expire)
|
||||
for _, v := range values {
|
||||
vals = append(vals, v)
|
||||
}
|
||||
_, err := callLua(ctx, rdb, setBatchWithCommonExpireScript, keys, vals)
|
||||
return err
|
||||
}
|
||||
|
||||
func LuaSetBatchWithIndividualExpire(ctx context.Context, rdb redis.Scripter, keys []string, values []string, expires []int) error {
|
||||
// Check if the lengths of keys, values, and expires match
|
||||
if len(keys) != len(values) || len(keys) != len(expires) {
|
||||
return errs.New("keys and values length mismatch").Wrap()
|
||||
}
|
||||
|
||||
// Ensure the allocation size does not overflow
|
||||
maxAllowedLen := (1 << 31) - 1 // 2GB limit (maximum address space for 32-bit systems)
|
||||
|
||||
if len(values) > maxAllowedLen-1 {
|
||||
return errs.New(fmt.Sprintf("values length %d exceeds the maximum allowed length %d", len(values), maxAllowedLen-1)).Wrap()
|
||||
}
|
||||
var vals = make([]any, 0, len(values)+len(expires))
|
||||
for _, v := range values {
|
||||
vals = append(vals, v)
|
||||
}
|
||||
for _, ex := range expires {
|
||||
vals = append(vals, ex)
|
||||
}
|
||||
_, err := callLua(ctx, rdb, setBatchWithIndividualExpireScript, keys, vals)
|
||||
return err
|
||||
}
|
||||
|
||||
func LuaDeleteBatch(ctx context.Context, rdb redis.Scripter, keys []string) error {
|
||||
_, err := callLua(ctx, rdb, deleteBatchScript, keys, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func LuaGetBatch(ctx context.Context, rdb redis.Scripter, keys []string) ([]any, error) {
|
||||
v, err := callLua(ctx, rdb, getBatchScript, keys, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
values, ok := v.([]any)
|
||||
if !ok {
|
||||
return nil, servererrs.ErrArgs.WrapMsg("invalid lua get batch result")
|
||||
}
|
||||
return values, nil
|
||||
|
||||
}
|
||||
75
pkg/common/storage/cache/redis/lua_script_test.go
vendored
Normal file
75
pkg/common/storage/cache/redis/lua_script_test.go
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/go-redis/redismock/v9"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLuaSetBatchWithCommonExpire(t *testing.T) {
|
||||
rdb, mock := redismock.NewClientMock()
|
||||
ctx := context.Background()
|
||||
|
||||
keys := []string{"key1", "key2"}
|
||||
values := []string{"value1", "value2"}
|
||||
expire := 10
|
||||
|
||||
mock.ExpectEvalSha(setBatchWithCommonExpireScript.Hash(), keys, []any{expire, "value1", "value2"}).SetVal(int64(len(keys)))
|
||||
|
||||
err := LuaSetBatchWithCommonExpire(ctx, rdb, keys, values, expire)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestLuaSetBatchWithIndividualExpire(t *testing.T) {
|
||||
rdb, mock := redismock.NewClientMock()
|
||||
ctx := context.Background()
|
||||
|
||||
keys := []string{"key1", "key2"}
|
||||
values := []string{"value1", "value2"}
|
||||
expires := []int{10, 20}
|
||||
|
||||
args := make([]any, 0, len(values)+len(expires))
|
||||
for _, v := range values {
|
||||
args = append(args, v)
|
||||
}
|
||||
for _, ex := range expires {
|
||||
args = append(args, ex)
|
||||
}
|
||||
|
||||
mock.ExpectEvalSha(setBatchWithIndividualExpireScript.Hash(), keys, args).SetVal(int64(len(keys)))
|
||||
|
||||
err := LuaSetBatchWithIndividualExpire(ctx, rdb, keys, values, expires)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestLuaDeleteBatch(t *testing.T) {
|
||||
rdb, mock := redismock.NewClientMock()
|
||||
ctx := context.Background()
|
||||
|
||||
keys := []string{"key1", "key2"}
|
||||
|
||||
mock.ExpectEvalSha(deleteBatchScript.Hash(), keys, []any{}).SetVal(int64(len(keys)))
|
||||
|
||||
err := LuaDeleteBatch(ctx, rdb, keys)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestLuaGetBatch(t *testing.T) {
|
||||
rdb, mock := redismock.NewClientMock()
|
||||
ctx := context.Background()
|
||||
|
||||
keys := []string{"key1", "key2"}
|
||||
expectedValues := []any{"value1", "value2"}
|
||||
|
||||
mock.ExpectEvalSha(getBatchScript.Hash(), keys, []any{}).SetVal(expectedValues)
|
||||
|
||||
values, err := LuaGetBatch(ctx, rdb, keys)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, mock.ExpectationsWereMet())
|
||||
assert.Equal(t, expectedValues, values)
|
||||
}
|
||||
59
pkg/common/storage/cache/redis/minio.go
vendored
Normal file
59
pkg/common/storage/cache/redis/minio.go
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/tools/s3/minio"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
func NewMinioCache(rdb redis.UniversalClient) minio.Cache {
|
||||
rc := newRocksCacheClient(rdb)
|
||||
return &minioCacheRedis{
|
||||
BatchDeleter: rc.GetBatchDeleter(),
|
||||
rcClient: rc,
|
||||
expireTime: time.Hour * 24 * 7,
|
||||
}
|
||||
}
|
||||
|
||||
type minioCacheRedis struct {
|
||||
cache.BatchDeleter
|
||||
rcClient *rocksCacheClient
|
||||
expireTime time.Duration
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) getObjectImageInfoKey(key string) string {
|
||||
return cachekey.GetObjectImageInfoKey(key)
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) getMinioImageThumbnailKey(key string, format string, width int, height int) string {
|
||||
return cachekey.GetMinioImageThumbnailKey(key, format, width, height)
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) DelObjectImageInfoKey(ctx context.Context, keys ...string) error {
|
||||
ks := make([]string, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
ks = append(ks, g.getObjectImageInfoKey(key))
|
||||
}
|
||||
return g.BatchDeleter.ExecDelWithKeys(ctx, ks)
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) DelImageThumbnailKey(ctx context.Context, key string, format string, width int, height int) error {
|
||||
return g.BatchDeleter.ExecDelWithKeys(ctx, []string{g.getMinioImageThumbnailKey(key, format, width, height)})
|
||||
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) GetImageObjectKeyInfo(ctx context.Context, key string, fn func(ctx context.Context) (*minio.ImageInfo, error)) (*minio.ImageInfo, error) {
|
||||
info, err := getCache(ctx, g.rcClient, g.getObjectImageInfoKey(key), g.expireTime, fn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) GetThumbnailKey(ctx context.Context, key string, format string, width int, height int, minioCache func(ctx context.Context) (string, error)) (string, error) {
|
||||
return getCache(ctx, g.rcClient, g.getMinioImageThumbnailKey(key, format, width, height), g.expireTime, minioCache)
|
||||
}
|
||||
94
pkg/common/storage/cache/redis/msg.go
vendored
Normal file
94
pkg/common/storage/cache/redis/msg.go
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
) //
|
||||
|
||||
// msgCacheTimeout is expiration time of message cache, 86400 seconds
|
||||
const msgCacheTimeout = time.Hour * 24
|
||||
|
||||
func NewMsgCache(client redis.UniversalClient, db database.Msg) cache.MsgCache {
|
||||
return &msgCache{
|
||||
rcClient: newRocksCacheClient(client),
|
||||
msgDocDatabase: db,
|
||||
}
|
||||
}
|
||||
|
||||
type msgCache struct {
|
||||
rcClient *rocksCacheClient
|
||||
msgDocDatabase database.Msg
|
||||
}
|
||||
|
||||
func (c *msgCache) getSendMsgKey(id string) string {
|
||||
return cachekey.GetSendMsgKey(id)
|
||||
}
|
||||
|
||||
func (c *msgCache) SetSendMsgStatus(ctx context.Context, id string, status int32) error {
|
||||
return errs.Wrap(c.rcClient.GetRedis().Set(ctx, c.getSendMsgKey(id), status, time.Hour*24).Err())
|
||||
}
|
||||
|
||||
func (c *msgCache) GetSendMsgStatus(ctx context.Context, id string) (int32, error) {
|
||||
result, err := c.rcClient.GetRedis().Get(ctx, c.getSendMsgKey(id)).Int()
|
||||
return int32(result), errs.Wrap(err)
|
||||
}
|
||||
|
||||
func (c *msgCache) GetMessageBySeqs(ctx context.Context, conversationID string, seqs []int64) ([]*model.MsgInfoModel, error) {
|
||||
if len(seqs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
getKey := func(seq int64) string {
|
||||
return cachekey.GetMsgCacheKey(conversationID, seq)
|
||||
}
|
||||
getMsgID := func(msg *model.MsgInfoModel) int64 {
|
||||
return msg.Msg.Seq
|
||||
}
|
||||
find := func(ctx context.Context, seqs []int64) ([]*model.MsgInfoModel, error) {
|
||||
return c.msgDocDatabase.FindSeqs(ctx, conversationID, seqs)
|
||||
}
|
||||
return batchGetCache2(ctx, c.rcClient, msgCacheTimeout, seqs, getKey, getMsgID, find)
|
||||
}
|
||||
|
||||
func (c *msgCache) DelMessageBySeqs(ctx context.Context, conversationID string, seqs []int64) error {
|
||||
if len(seqs) == 0 {
|
||||
return nil
|
||||
}
|
||||
keys := datautil.Slice(seqs, func(seq int64) string {
|
||||
return cachekey.GetMsgCacheKey(conversationID, seq)
|
||||
})
|
||||
slotKeys, err := groupKeysBySlot(ctx, c.rcClient.GetRedis(), keys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, keys := range slotKeys {
|
||||
if err := c.rcClient.GetClient().TagAsDeletedBatch2(ctx, keys); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *msgCache) SetMessageBySeqs(ctx context.Context, conversationID string, msgs []*model.MsgInfoModel) error {
|
||||
for _, msg := range msgs {
|
||||
if msg == nil || msg.Msg == nil || msg.Msg.Seq <= 0 {
|
||||
continue
|
||||
}
|
||||
data, err := json.Marshal(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.rcClient.GetClient().RawSet(ctx, cachekey.GetMsgCacheKey(conversationID, msg.Msg.Seq), string(data), msgCacheTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
161
pkg/common/storage/cache/redis/online.go
vendored
Normal file
161
pkg/common/storage/cache/redis/online.go
vendored
Normal file
@@ -0,0 +1,161 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/mcache"
|
||||
"git.imall.cloud/openim/protocol/constant"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
func NewUserOnline(rdb redis.UniversalClient) cache.OnlineCache {
|
||||
if rdb == nil || config.Standalone() {
|
||||
return mcache.NewOnlineCache()
|
||||
}
|
||||
return &userOnline{
|
||||
rdb: rdb,
|
||||
expire: cachekey.OnlineExpire,
|
||||
channelName: cachekey.OnlineChannel,
|
||||
}
|
||||
}
|
||||
|
||||
type userOnline struct {
|
||||
rdb redis.UniversalClient
|
||||
expire time.Duration
|
||||
channelName string
|
||||
}
|
||||
|
||||
func (s *userOnline) getUserOnlineKey(userID string) string {
|
||||
return cachekey.GetOnlineKey(userID)
|
||||
}
|
||||
|
||||
func (s *userOnline) GetOnline(ctx context.Context, userID string) ([]int32, error) {
|
||||
members, err := s.rdb.ZRangeByScore(ctx, s.getUserOnlineKey(userID), &redis.ZRangeBy{
|
||||
Min: strconv.FormatInt(time.Now().Unix(), 10),
|
||||
Max: "+inf",
|
||||
}).Result()
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
platformIDs := make([]int32, 0, len(members))
|
||||
for _, member := range members {
|
||||
val, err := strconv.Atoi(member)
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
platformIDs = append(platformIDs, int32(val))
|
||||
}
|
||||
return platformIDs, nil
|
||||
}
|
||||
|
||||
func (s *userOnline) GetAllOnlineUsers(ctx context.Context, cursor uint64) (map[string][]int32, uint64, error) {
|
||||
result := make(map[string][]int32)
|
||||
|
||||
keys, nextCursor, err := s.rdb.Scan(ctx, cursor, fmt.Sprintf("%s*", cachekey.OnlineKey), constant.ParamMaxLength).Result()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
for _, key := range keys {
|
||||
userID := cachekey.GetOnlineKeyUserID(key)
|
||||
strValues, err := s.rdb.ZRange(ctx, key, 0, -1).Result()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
values := make([]int32, 0, len(strValues))
|
||||
for _, value := range strValues {
|
||||
intValue, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
return nil, 0, errs.Wrap(err)
|
||||
}
|
||||
values = append(values, int32(intValue))
|
||||
}
|
||||
|
||||
result[userID] = values
|
||||
}
|
||||
|
||||
return result, nextCursor, nil
|
||||
}
|
||||
|
||||
func (s *userOnline) SetUserOnline(ctx context.Context, userID string, online, offline []int32) error {
|
||||
// 使用Lua脚本原子更新在线状态与在线人数缓存
|
||||
script := `
|
||||
local key = KEYS[1]
|
||||
local countKey = KEYS[2]
|
||||
local expire = tonumber(ARGV[1])
|
||||
local now = ARGV[2]
|
||||
local score = ARGV[3]
|
||||
local offlineLen = tonumber(ARGV[4])
|
||||
redis.call("ZREMRANGEBYSCORE", key, "-inf", now)
|
||||
for i = 5, offlineLen+4 do
|
||||
redis.call("ZREM", key, ARGV[i])
|
||||
end
|
||||
local before = redis.call("ZCARD", key)
|
||||
for i = 5+offlineLen, #ARGV do
|
||||
redis.call("ZADD", key, score, ARGV[i])
|
||||
end
|
||||
redis.call("EXPIRE", key, expire)
|
||||
local after = redis.call("ZCARD", key)
|
||||
local current = redis.call("GET", countKey)
|
||||
if not current then
|
||||
current = 0
|
||||
else
|
||||
current = tonumber(current)
|
||||
end
|
||||
if before == 0 and after > 0 then
|
||||
redis.call("SET", countKey, current + 1)
|
||||
elseif before > 0 and after == 0 then
|
||||
local next = current - 1
|
||||
if next < 0 then
|
||||
next = 0
|
||||
end
|
||||
redis.call("SET", countKey, next)
|
||||
end
|
||||
if before ~= after then
|
||||
local members = redis.call("ZRANGE", key, 0, -1)
|
||||
table.insert(members, "1")
|
||||
return members
|
||||
else
|
||||
return {"0"}
|
||||
end
|
||||
`
|
||||
now := time.Now()
|
||||
argv := make([]any, 0, 2+len(online)+len(offline))
|
||||
argv = append(argv, int32(s.expire/time.Second), now.Unix(), now.Add(s.expire).Unix(), int32(len(offline)))
|
||||
for _, platformID := range offline {
|
||||
argv = append(argv, platformID)
|
||||
}
|
||||
for _, platformID := range online {
|
||||
argv = append(argv, platformID)
|
||||
}
|
||||
keys := []string{s.getUserOnlineKey(userID), cachekey.OnlineUserCountKey}
|
||||
platformIDs, err := s.rdb.Eval(ctx, script, keys, argv).StringSlice()
|
||||
if err != nil {
|
||||
log.ZError(ctx, "redis SetUserOnline", err, "userID", userID, "online", online, "offline", offline)
|
||||
return err
|
||||
}
|
||||
if len(platformIDs) == 0 {
|
||||
return errs.ErrInternalServer.WrapMsg("SetUserOnline redis lua invalid return value")
|
||||
}
|
||||
if platformIDs[len(platformIDs)-1] != "0" {
|
||||
log.ZDebug(ctx, "redis SetUserOnline push", "userID", userID, "online", online, "offline", offline, "platformIDs", platformIDs[:len(platformIDs)-1])
|
||||
platformIDs[len(platformIDs)-1] = userID
|
||||
msg := strings.Join(platformIDs, ":")
|
||||
if err := s.rdb.Publish(ctx, s.channelName, msg).Err(); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
} else {
|
||||
log.ZDebug(ctx, "redis SetUserOnline not push", "userID", userID, "online", online, "offline", offline)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
149
pkg/common/storage/cache/redis/online_count.go
vendored
Normal file
149
pkg/common/storage/cache/redis/online_count.go
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/protocol/constant"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
const onlineUserCountHistorySeparator = ":"
|
||||
|
||||
// OnlineUserCountSample 在线人数历史采样点
|
||||
type OnlineUserCountSample struct {
|
||||
// Timestamp 采样时间(毫秒时间戳)
|
||||
Timestamp int64
|
||||
// Count 采样在线人数
|
||||
Count int64
|
||||
}
|
||||
|
||||
// GetOnlineUserCount 读取在线人数缓存
|
||||
func GetOnlineUserCount(ctx context.Context, rdb redis.UniversalClient) (int64, error) {
|
||||
if rdb == nil {
|
||||
return 0, errs.ErrInternalServer.WrapMsg("redis client is nil")
|
||||
}
|
||||
val, err := rdb.Get(ctx, cachekey.OnlineUserCountKey).Result()
|
||||
if err != nil {
|
||||
if errors.Is(err, redis.Nil) {
|
||||
return 0, err
|
||||
}
|
||||
return 0, errs.Wrap(err)
|
||||
}
|
||||
count, err := strconv.ParseInt(val, 10, 64)
|
||||
if err != nil {
|
||||
return 0, errs.WrapMsg(err, "parse online user count failed")
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// RefreshOnlineUserCount 刷新在线人数缓存
|
||||
func RefreshOnlineUserCount(ctx context.Context, rdb redis.UniversalClient) (int64, error) {
|
||||
if rdb == nil {
|
||||
return 0, errs.ErrInternalServer.WrapMsg("redis client is nil")
|
||||
}
|
||||
var (
|
||||
cursor uint64
|
||||
total int64
|
||||
)
|
||||
now := strconv.FormatInt(time.Now().Unix(), 10)
|
||||
for {
|
||||
keys, nextCursor, err := rdb.Scan(ctx, cursor, fmt.Sprintf("%s*", cachekey.OnlineKey), constant.ParamMaxLength).Result()
|
||||
if err != nil {
|
||||
return 0, errs.Wrap(err)
|
||||
}
|
||||
for _, key := range keys {
|
||||
count, err := rdb.ZCount(ctx, key, now, "+inf").Result()
|
||||
if err != nil {
|
||||
return 0, errs.Wrap(err)
|
||||
}
|
||||
if count > 0 {
|
||||
total++
|
||||
}
|
||||
}
|
||||
cursor = nextCursor
|
||||
if cursor == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err := rdb.Set(ctx, cachekey.OnlineUserCountKey, total, 0).Err(); err != nil {
|
||||
return 0, errs.Wrap(err)
|
||||
}
|
||||
return total, nil
|
||||
}
|
||||
|
||||
// AppendOnlineUserCountHistory 写入在线人数历史采样
|
||||
func AppendOnlineUserCountHistory(ctx context.Context, rdb redis.UniversalClient, timestamp int64, count int64) error {
|
||||
if rdb == nil {
|
||||
return errs.ErrInternalServer.WrapMsg("redis client is nil")
|
||||
}
|
||||
if timestamp <= 0 {
|
||||
return errs.ErrArgs.WrapMsg("invalid timestamp")
|
||||
}
|
||||
member := fmt.Sprintf("%d%s%d", timestamp, onlineUserCountHistorySeparator, count)
|
||||
if err := rdb.ZAdd(ctx, cachekey.OnlineUserCountHistoryKey, redis.Z{
|
||||
Score: float64(timestamp),
|
||||
Member: member,
|
||||
}).Err(); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
// 清理历史数据,避免无界增长
|
||||
retentionMs := int64(cachekey.OnlineUserCountHistoryRetention / time.Millisecond)
|
||||
cutoff := timestamp - retentionMs
|
||||
if cutoff > 0 {
|
||||
if err := rdb.ZRemRangeByScore(ctx, cachekey.OnlineUserCountHistoryKey, "0", strconv.FormatInt(cutoff, 10)).Err(); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetOnlineUserCountHistory 读取在线人数历史采样
|
||||
func GetOnlineUserCountHistory(ctx context.Context, rdb redis.UniversalClient, startTime int64, endTime int64) ([]OnlineUserCountSample, error) {
|
||||
if rdb == nil {
|
||||
return nil, errs.ErrInternalServer.WrapMsg("redis client is nil")
|
||||
}
|
||||
if startTime <= 0 || endTime <= 0 || endTime <= startTime {
|
||||
return nil, nil
|
||||
}
|
||||
// 包含endTime的数据,使用endTime作为最大值
|
||||
values, err := rdb.ZRangeByScore(ctx, cachekey.OnlineUserCountHistoryKey, &redis.ZRangeBy{
|
||||
Min: strconv.FormatInt(startTime, 10),
|
||||
Max: strconv.FormatInt(endTime, 10),
|
||||
}).Result()
|
||||
if err != nil {
|
||||
if errors.Is(err, redis.Nil) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
if len(values) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
samples := make([]OnlineUserCountSample, 0, len(values))
|
||||
for _, val := range values {
|
||||
parts := strings.SplitN(val, onlineUserCountHistorySeparator, 2)
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
ts, err := strconv.ParseInt(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
cnt, err := strconv.ParseInt(parts[1], 10, 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
samples = append(samples, OnlineUserCountSample{
|
||||
Timestamp: ts,
|
||||
Count: cnt,
|
||||
})
|
||||
}
|
||||
return samples, nil
|
||||
}
|
||||
52
pkg/common/storage/cache/redis/online_test.go
vendored
Normal file
52
pkg/common/storage/cache/redis/online_test.go
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
)
|
||||
|
||||
/*
|
||||
address: [ 172.16.8.48:7001, 172.16.8.48:7002, 172.16.8.48:7003, 172.16.8.48:7004, 172.16.8.48:7005, 172.16.8.48:7006 ]
|
||||
username:
|
||||
password: passwd123
|
||||
clusterMode: true
|
||||
db: 0
|
||||
maxRetry: 10
|
||||
*/
|
||||
func TestName111111(t *testing.T) {
|
||||
conf := config.Redis{
|
||||
Address: []string{
|
||||
"172.16.8.124:7001",
|
||||
"172.16.8.124:7002",
|
||||
"172.16.8.124:7003",
|
||||
"172.16.8.124:7004",
|
||||
"172.16.8.124:7005",
|
||||
"172.16.8.124:7006",
|
||||
},
|
||||
RedisMode: "cluster",
|
||||
Password: "passwd123",
|
||||
//Address: []string{"localhost:16379"},
|
||||
//Password: "openIM123",
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*1000)
|
||||
defer cancel()
|
||||
rdb, err := redisutil.NewRedisClient(ctx, conf.Build())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
online := NewUserOnline(rdb)
|
||||
|
||||
userID := "a123456"
|
||||
t.Log(online.GetOnline(ctx, userID))
|
||||
t.Log(online.SetUserOnline(ctx, userID, []int32{1, 2, 3, 4}, nil))
|
||||
t.Log(online.GetOnline(ctx, userID))
|
||||
|
||||
}
|
||||
|
||||
func TestName111(t *testing.T) {
|
||||
|
||||
}
|
||||
211
pkg/common/storage/cache/redis/redis_shard_manager.go
vendored
Normal file
211
pkg/common/storage/cache/redis/redis_shard_manager.go
vendored
Normal file
@@ -0,0 +1,211 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultBatchSize = 50
|
||||
defaultConcurrentLimit = 3
|
||||
)
|
||||
|
||||
// RedisShardManager is a class for sharding and processing keys
|
||||
type RedisShardManager struct {
|
||||
redisClient redis.UniversalClient
|
||||
config *Config
|
||||
}
|
||||
type Config struct {
|
||||
batchSize int
|
||||
continueOnError bool
|
||||
concurrentLimit int
|
||||
}
|
||||
|
||||
// Option is a function type for configuring Config
|
||||
type Option func(c *Config)
|
||||
|
||||
//// NewRedisShardManager creates a new RedisShardManager instance
|
||||
//func NewRedisShardManager(redisClient redis.UniversalClient, opts ...Option) *RedisShardManager {
|
||||
// config := &Config{
|
||||
// batchSize: defaultBatchSize, // Default batch size is 50 keys
|
||||
// continueOnError: false,
|
||||
// concurrentLimit: defaultConcurrentLimit, // Default concurrent limit is 3
|
||||
// }
|
||||
// for _, opt := range opts {
|
||||
// opt(config)
|
||||
// }
|
||||
// rsm := &RedisShardManager{
|
||||
// redisClient: redisClient,
|
||||
// config: config,
|
||||
// }
|
||||
// return rsm
|
||||
//}
|
||||
//
|
||||
//// WithBatchSize sets the number of keys to process per batch
|
||||
//func WithBatchSize(size int) Option {
|
||||
// return func(c *Config) {
|
||||
// c.batchSize = size
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//// WithContinueOnError sets whether to continue processing on error
|
||||
//func WithContinueOnError(continueOnError bool) Option {
|
||||
// return func(c *Config) {
|
||||
// c.continueOnError = continueOnError
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//// WithConcurrentLimit sets the concurrency limit
|
||||
//func WithConcurrentLimit(limit int) Option {
|
||||
// return func(c *Config) {
|
||||
// c.concurrentLimit = limit
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//// ProcessKeysBySlot groups keys by their Redis cluster hash slots and processes them using the provided function.
|
||||
//func (rsm *RedisShardManager) ProcessKeysBySlot(
|
||||
// ctx context.Context,
|
||||
// keys []string,
|
||||
// processFunc func(ctx context.Context, slot int64, keys []string) error,
|
||||
//) error {
|
||||
//
|
||||
// // Group keys by slot
|
||||
// slots, err := groupKeysBySlot(ctx, rsm.redisClient, keys)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// g, ctx := errgroup.WithContext(ctx)
|
||||
// g.SetLimit(rsm.config.concurrentLimit)
|
||||
//
|
||||
// // Process keys in each slot using the provided function
|
||||
// for slot, singleSlotKeys := range slots {
|
||||
// batches := splitIntoBatches(singleSlotKeys, rsm.config.batchSize)
|
||||
// for _, batch := range batches {
|
||||
// slot, batch := slot, batch // Avoid closure capture issue
|
||||
// g.Go(func() error {
|
||||
// err := processFunc(ctx, slot, batch)
|
||||
// if err != nil {
|
||||
// log.ZWarn(ctx, "Batch processFunc failed", err, "slot", slot, "keys", batch)
|
||||
// if !rsm.config.continueOnError {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
// return nil
|
||||
// })
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// if err := g.Wait(); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// return nil
|
||||
//}
|
||||
|
||||
// groupKeysBySlot groups keys by their Redis cluster hash slots.
|
||||
func groupKeysBySlot(ctx context.Context, redisClient redis.UniversalClient, keys []string) (map[int64][]string, error) {
|
||||
slots := make(map[int64][]string)
|
||||
clusterClient, isCluster := redisClient.(*redis.ClusterClient)
|
||||
if isCluster && len(keys) > 1 {
|
||||
pipe := clusterClient.Pipeline()
|
||||
cmds := make([]*redis.IntCmd, len(keys))
|
||||
for i, key := range keys {
|
||||
cmds[i] = pipe.ClusterKeySlot(ctx, key)
|
||||
}
|
||||
_, err := pipe.Exec(ctx)
|
||||
if err != nil {
|
||||
return nil, errs.WrapMsg(err, "get slot err")
|
||||
}
|
||||
|
||||
for i, cmd := range cmds {
|
||||
slot, err := cmd.Result()
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "some key get slot err", err, "key", keys[i])
|
||||
return nil, errs.WrapMsg(err, "get slot err", "key", keys[i])
|
||||
}
|
||||
slots[slot] = append(slots[slot], keys[i])
|
||||
}
|
||||
} else {
|
||||
// If not a cluster client, put all keys in the same slot (0)
|
||||
slots[0] = keys
|
||||
}
|
||||
|
||||
return slots, nil
|
||||
}
|
||||
|
||||
// splitIntoBatches splits keys into batches of the specified size
|
||||
func splitIntoBatches(keys []string, batchSize int) [][]string {
|
||||
var batches [][]string
|
||||
for batchSize < len(keys) {
|
||||
keys, batches = keys[batchSize:], append(batches, keys[0:batchSize:batchSize])
|
||||
}
|
||||
return append(batches, keys)
|
||||
}
|
||||
|
||||
// ProcessKeysBySlot groups keys by their Redis cluster hash slots and processes them using the provided function.
|
||||
func ProcessKeysBySlot(
|
||||
ctx context.Context,
|
||||
redisClient redis.UniversalClient,
|
||||
keys []string,
|
||||
processFunc func(ctx context.Context, slot int64, keys []string) error,
|
||||
opts ...Option,
|
||||
) error {
|
||||
|
||||
config := &Config{
|
||||
batchSize: defaultBatchSize,
|
||||
continueOnError: false,
|
||||
concurrentLimit: defaultConcurrentLimit,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(config)
|
||||
}
|
||||
|
||||
// Group keys by slot
|
||||
slots, err := groupKeysBySlot(ctx, redisClient, keys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
g.SetLimit(config.concurrentLimit)
|
||||
|
||||
// Process keys in each slot using the provided function
|
||||
for slot, singleSlotKeys := range slots {
|
||||
batches := splitIntoBatches(singleSlotKeys, config.batchSize)
|
||||
for _, batch := range batches {
|
||||
slot, batch := slot, batch // Avoid closure capture issue
|
||||
g.Go(func() error {
|
||||
err := processFunc(ctx, slot, batch)
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "Batch processFunc failed", err, "slot", slot, "keys", batch)
|
||||
if !config.continueOnError {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeleteCacheBySlot(ctx context.Context, rcClient *rocksCacheClient, keys []string) error {
|
||||
switch len(keys) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return rcClient.GetClient().TagAsDeletedBatch2(ctx, keys)
|
||||
default:
|
||||
return ProcessKeysBySlot(ctx, rcClient.GetRedis(), keys, func(ctx context.Context, slot int64, keys []string) error {
|
||||
return rcClient.GetClient().TagAsDeletedBatch2(ctx, keys)
|
||||
})
|
||||
}
|
||||
}
|
||||
95
pkg/common/storage/cache/redis/s3.go
vendored
Normal file
95
pkg/common/storage/cache/redis/s3.go
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/s3"
|
||||
"github.com/openimsdk/tools/s3/cont"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
func NewObjectCacheRedis(rdb redis.UniversalClient, objDB database.ObjectInfo) cache.ObjectCache {
|
||||
rc := newRocksCacheClient(rdb)
|
||||
return &objectCacheRedis{
|
||||
BatchDeleter: rc.GetBatchDeleter(),
|
||||
rcClient: rc,
|
||||
expireTime: time.Hour * 12,
|
||||
objDB: objDB,
|
||||
}
|
||||
}
|
||||
|
||||
type objectCacheRedis struct {
|
||||
cache.BatchDeleter
|
||||
objDB database.ObjectInfo
|
||||
rcClient *rocksCacheClient
|
||||
expireTime time.Duration
|
||||
}
|
||||
|
||||
func (g *objectCacheRedis) getObjectKey(engine string, name string) string {
|
||||
return cachekey.GetObjectKey(engine, name)
|
||||
}
|
||||
|
||||
func (g *objectCacheRedis) CloneObjectCache() cache.ObjectCache {
|
||||
return &objectCacheRedis{
|
||||
BatchDeleter: g.BatchDeleter.Clone(),
|
||||
rcClient: g.rcClient,
|
||||
expireTime: g.expireTime,
|
||||
objDB: g.objDB,
|
||||
}
|
||||
}
|
||||
|
||||
func (g *objectCacheRedis) DelObjectName(engine string, names ...string) cache.ObjectCache {
|
||||
objectCache := g.CloneObjectCache()
|
||||
keys := make([]string, 0, len(names))
|
||||
for _, name := range names {
|
||||
keys = append(keys, g.getObjectKey(name, engine))
|
||||
}
|
||||
objectCache.AddKeys(keys...)
|
||||
return objectCache
|
||||
}
|
||||
|
||||
func (g *objectCacheRedis) GetName(ctx context.Context, engine string, name string) (*model.Object, error) {
|
||||
return getCache(ctx, g.rcClient, g.getObjectKey(name, engine), g.expireTime, func(ctx context.Context) (*model.Object, error) {
|
||||
return g.objDB.Take(ctx, engine, name)
|
||||
})
|
||||
}
|
||||
|
||||
func NewS3Cache(rdb redis.UniversalClient, s3 s3.Interface) cont.S3Cache {
|
||||
rc := newRocksCacheClient(rdb)
|
||||
return &s3CacheRedis{
|
||||
BatchDeleter: rc.GetBatchDeleter(),
|
||||
rcClient: rc,
|
||||
expireTime: time.Hour * 12,
|
||||
s3: s3,
|
||||
}
|
||||
}
|
||||
|
||||
type s3CacheRedis struct {
|
||||
cache.BatchDeleter
|
||||
s3 s3.Interface
|
||||
rcClient *rocksCacheClient
|
||||
expireTime time.Duration
|
||||
}
|
||||
|
||||
func (g *s3CacheRedis) getS3Key(engine string, name string) string {
|
||||
return cachekey.GetS3Key(engine, name)
|
||||
}
|
||||
|
||||
func (g *s3CacheRedis) DelS3Key(ctx context.Context, engine string, keys ...string) error {
|
||||
ks := make([]string, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
ks = append(ks, g.getS3Key(engine, key))
|
||||
}
|
||||
return g.BatchDeleter.ExecDelWithKeys(ctx, ks)
|
||||
}
|
||||
|
||||
func (g *s3CacheRedis) GetKey(ctx context.Context, engine string, name string) (*s3.ObjectInfo, error) {
|
||||
return getCache(ctx, g.rcClient, g.getS3Key(engine, name), g.expireTime, func(ctx context.Context) (*s3.ObjectInfo, error) {
|
||||
return g.s3.StatObject(ctx, name)
|
||||
})
|
||||
}
|
||||
521
pkg/common/storage/cache/redis/seq_conversation.go
vendored
Normal file
521
pkg/common/storage/cache/redis/seq_conversation.go
vendored
Normal file
@@ -0,0 +1,521 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/mcache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/msgprocessor"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
func NewSeqConversationCacheRedis(rdb redis.UniversalClient, mgo database.SeqConversation) cache.SeqConversationCache {
|
||||
if rdb == nil {
|
||||
return mcache.NewSeqConversationCache(mgo)
|
||||
}
|
||||
return &seqConversationCacheRedis{
|
||||
mgo: mgo,
|
||||
lockTime: time.Second * 3,
|
||||
dataTime: time.Hour * 24 * 365,
|
||||
minSeqExpireTime: time.Hour,
|
||||
rcClient: newRocksCacheClient(rdb),
|
||||
}
|
||||
}
|
||||
|
||||
type seqConversationCacheRedis struct {
|
||||
mgo database.SeqConversation
|
||||
rcClient *rocksCacheClient
|
||||
lockTime time.Duration
|
||||
dataTime time.Duration
|
||||
minSeqExpireTime time.Duration
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) getMinSeqKey(conversationID string) string {
|
||||
return cachekey.GetMallocMinSeqKey(conversationID)
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) SetMinSeq(ctx context.Context, conversationID string, seq int64) error {
|
||||
return s.SetMinSeqs(ctx, map[string]int64{conversationID: seq})
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) GetMinSeq(ctx context.Context, conversationID string) (int64, error) {
|
||||
return getCache(ctx, s.rcClient, s.getMinSeqKey(conversationID), s.minSeqExpireTime, func(ctx context.Context) (int64, error) {
|
||||
return s.mgo.GetMinSeq(ctx, conversationID)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) getSingleMaxSeq(ctx context.Context, conversationID string) (map[string]int64, error) {
|
||||
seq, err := s.GetMaxSeq(ctx, conversationID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return map[string]int64{conversationID: seq}, nil
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) getSingleMaxSeqWithTime(ctx context.Context, conversationID string) (map[string]database.SeqTime, error) {
|
||||
seq, err := s.GetMaxSeqWithTime(ctx, conversationID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return map[string]database.SeqTime{conversationID: seq}, nil
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) batchGetMaxSeq(ctx context.Context, keys []string, keyConversationID map[string]string, seqs map[string]int64) error {
|
||||
result := make([]*redis.StringCmd, len(keys))
|
||||
pipe := s.rcClient.GetRedis().Pipeline()
|
||||
for i, key := range keys {
|
||||
result[i] = pipe.HGet(ctx, key, "CURR")
|
||||
}
|
||||
if _, err := pipe.Exec(ctx); err != nil && !errors.Is(err, redis.Nil) {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
var notFoundKey []string
|
||||
for i, r := range result {
|
||||
req, err := r.Int64()
|
||||
if err == nil {
|
||||
seqs[keyConversationID[keys[i]]] = req
|
||||
} else if errors.Is(err, redis.Nil) {
|
||||
notFoundKey = append(notFoundKey, keys[i])
|
||||
} else {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
for _, key := range notFoundKey {
|
||||
conversationID := keyConversationID[key]
|
||||
seq, err := s.GetMaxSeq(ctx, conversationID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
seqs[conversationID] = seq
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) batchGetMaxSeqWithTime(ctx context.Context, keys []string, keyConversationID map[string]string, seqs map[string]database.SeqTime) error {
|
||||
result := make([]*redis.SliceCmd, len(keys))
|
||||
pipe := s.rcClient.GetRedis().Pipeline()
|
||||
for i, key := range keys {
|
||||
result[i] = pipe.HMGet(ctx, key, "CURR", "TIME")
|
||||
}
|
||||
if _, err := pipe.Exec(ctx); err != nil && !errors.Is(err, redis.Nil) {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
var notFoundKey []string
|
||||
for i, r := range result {
|
||||
val, err := r.Result()
|
||||
if len(val) != 2 {
|
||||
return errs.WrapMsg(err, "batchGetMaxSeqWithTime invalid result", "key", keys[i], "res", val)
|
||||
}
|
||||
if val[0] == nil {
|
||||
notFoundKey = append(notFoundKey, keys[i])
|
||||
continue
|
||||
}
|
||||
seq, err := s.parseInt64(val[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mill, err := s.parseInt64(val[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
seqs[keyConversationID[keys[i]]] = database.SeqTime{Seq: seq, Time: mill}
|
||||
}
|
||||
for _, key := range notFoundKey {
|
||||
conversationID := keyConversationID[key]
|
||||
seq, err := s.GetMaxSeqWithTime(ctx, conversationID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
seqs[conversationID] = seq
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error) {
|
||||
switch len(conversationIDs) {
|
||||
case 0:
|
||||
return map[string]int64{}, nil
|
||||
case 1:
|
||||
return s.getSingleMaxSeq(ctx, conversationIDs[0])
|
||||
}
|
||||
keys := make([]string, 0, len(conversationIDs))
|
||||
keyConversationID := make(map[string]string, len(conversationIDs))
|
||||
for _, conversationID := range conversationIDs {
|
||||
key := s.getSeqMallocKey(conversationID)
|
||||
if _, ok := keyConversationID[key]; ok {
|
||||
continue
|
||||
}
|
||||
keys = append(keys, key)
|
||||
keyConversationID[key] = conversationID
|
||||
}
|
||||
if len(keys) == 1 {
|
||||
return s.getSingleMaxSeq(ctx, conversationIDs[0])
|
||||
}
|
||||
slotKeys, err := groupKeysBySlot(ctx, s.rcClient.GetRedis(), keys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
seqs := make(map[string]int64, len(conversationIDs))
|
||||
for _, keys := range slotKeys {
|
||||
if err := s.batchGetMaxSeq(ctx, keys, keyConversationID, seqs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return seqs, nil
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) GetMaxSeqsWithTime(ctx context.Context, conversationIDs []string) (map[string]database.SeqTime, error) {
|
||||
switch len(conversationIDs) {
|
||||
case 0:
|
||||
return map[string]database.SeqTime{}, nil
|
||||
case 1:
|
||||
return s.getSingleMaxSeqWithTime(ctx, conversationIDs[0])
|
||||
}
|
||||
keys := make([]string, 0, len(conversationIDs))
|
||||
keyConversationID := make(map[string]string, len(conversationIDs))
|
||||
for _, conversationID := range conversationIDs {
|
||||
key := s.getSeqMallocKey(conversationID)
|
||||
if _, ok := keyConversationID[key]; ok {
|
||||
continue
|
||||
}
|
||||
keys = append(keys, key)
|
||||
keyConversationID[key] = conversationID
|
||||
}
|
||||
if len(keys) == 1 {
|
||||
return s.getSingleMaxSeqWithTime(ctx, conversationIDs[0])
|
||||
}
|
||||
slotKeys, err := groupKeysBySlot(ctx, s.rcClient.GetRedis(), keys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
seqs := make(map[string]database.SeqTime, len(conversationIDs))
|
||||
for _, keys := range slotKeys {
|
||||
if err := s.batchGetMaxSeqWithTime(ctx, keys, keyConversationID, seqs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return seqs, nil
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) getSeqMallocKey(conversationID string) string {
|
||||
return cachekey.GetMallocSeqKey(conversationID)
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) setSeq(ctx context.Context, key string, owner int64, currSeq int64, lastSeq int64, mill int64) (int64, error) {
|
||||
if lastSeq < currSeq {
|
||||
return 0, errs.New("lastSeq must be greater than currSeq")
|
||||
}
|
||||
// 0: success
|
||||
// 1: success the lock has expired, but has not been locked by anyone else
|
||||
// 2: already locked, but not by yourself
|
||||
script := `
|
||||
local key = KEYS[1]
|
||||
local lockValue = ARGV[1]
|
||||
local dataSecond = ARGV[2]
|
||||
local curr_seq = tonumber(ARGV[3])
|
||||
local last_seq = tonumber(ARGV[4])
|
||||
local mallocTime = ARGV[5]
|
||||
if redis.call("EXISTS", key) == 0 then
|
||||
redis.call("HSET", key, "CURR", curr_seq, "LAST", last_seq, "TIME", mallocTime)
|
||||
redis.call("EXPIRE", key, dataSecond)
|
||||
return 1
|
||||
end
|
||||
if redis.call("HGET", key, "LOCK") ~= lockValue then
|
||||
return 2
|
||||
end
|
||||
redis.call("HDEL", key, "LOCK")
|
||||
redis.call("HSET", key, "CURR", curr_seq, "LAST", last_seq, "TIME", mallocTime)
|
||||
redis.call("EXPIRE", key, dataSecond)
|
||||
return 0
|
||||
`
|
||||
result, err := s.rcClient.GetRedis().Eval(ctx, script, []string{key}, owner, int64(s.dataTime/time.Second), currSeq, lastSeq, mill).Int64()
|
||||
if err != nil {
|
||||
return 0, errs.Wrap(err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// malloc size=0 is to get the current seq size>0 is to allocate seq
|
||||
func (s *seqConversationCacheRedis) malloc(ctx context.Context, key string, size int64) ([]int64, error) {
|
||||
// 0: success
|
||||
// 1: need to obtain and lock
|
||||
// 2: already locked
|
||||
// 3: exceeded the maximum value and locked
|
||||
script := `
|
||||
local key = KEYS[1]
|
||||
local size = tonumber(ARGV[1])
|
||||
local lockSecond = ARGV[2]
|
||||
local dataSecond = ARGV[3]
|
||||
local mallocTime = ARGV[4]
|
||||
local result = {}
|
||||
if redis.call("EXISTS", key) == 0 then
|
||||
local lockValue = math.random(0, 999999999)
|
||||
redis.call("HSET", key, "LOCK", lockValue)
|
||||
redis.call("EXPIRE", key, lockSecond)
|
||||
table.insert(result, 1)
|
||||
table.insert(result, lockValue)
|
||||
table.insert(result, mallocTime)
|
||||
return result
|
||||
end
|
||||
if redis.call("HEXISTS", key, "LOCK") == 1 then
|
||||
table.insert(result, 2)
|
||||
return result
|
||||
end
|
||||
local curr_seq = tonumber(redis.call("HGET", key, "CURR"))
|
||||
local last_seq = tonumber(redis.call("HGET", key, "LAST"))
|
||||
if size == 0 then
|
||||
redis.call("EXPIRE", key, dataSecond)
|
||||
table.insert(result, 0)
|
||||
table.insert(result, curr_seq)
|
||||
table.insert(result, last_seq)
|
||||
local setTime = redis.call("HGET", key, "TIME")
|
||||
if setTime then
|
||||
table.insert(result, setTime)
|
||||
else
|
||||
table.insert(result, 0)
|
||||
end
|
||||
return result
|
||||
end
|
||||
local max_seq = curr_seq + size
|
||||
if max_seq > last_seq then
|
||||
local lockValue = math.random(0, 999999999)
|
||||
redis.call("HSET", key, "LOCK", lockValue)
|
||||
redis.call("HSET", key, "CURR", last_seq)
|
||||
redis.call("HSET", key, "TIME", mallocTime)
|
||||
redis.call("EXPIRE", key, lockSecond)
|
||||
table.insert(result, 3)
|
||||
table.insert(result, curr_seq)
|
||||
table.insert(result, last_seq)
|
||||
table.insert(result, lockValue)
|
||||
table.insert(result, mallocTime)
|
||||
return result
|
||||
end
|
||||
redis.call("HSET", key, "CURR", max_seq)
|
||||
redis.call("HSET", key, "TIME", ARGV[4])
|
||||
redis.call("EXPIRE", key, dataSecond)
|
||||
table.insert(result, 0)
|
||||
table.insert(result, curr_seq)
|
||||
table.insert(result, last_seq)
|
||||
table.insert(result, mallocTime)
|
||||
return result
|
||||
`
|
||||
result, err := s.rcClient.GetRedis().Eval(ctx, script, []string{key}, size, int64(s.lockTime/time.Second), int64(s.dataTime/time.Second), time.Now().UnixMilli()).Int64Slice()
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) wait(ctx context.Context) error {
|
||||
timer := time.NewTimer(time.Second / 4)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-timer.C:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) setSeqRetry(ctx context.Context, key string, owner int64, currSeq int64, lastSeq int64, mill int64) {
|
||||
for i := 0; i < 10; i++ {
|
||||
state, err := s.setSeq(ctx, key, owner, currSeq, lastSeq, mill)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "set seq cache failed", err, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq, "count", i+1)
|
||||
if err := s.wait(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
switch state {
|
||||
case 0: // ideal state
|
||||
case 1:
|
||||
log.ZWarn(ctx, "set seq cache lock not found", nil, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq)
|
||||
case 2:
|
||||
log.ZWarn(ctx, "set seq cache lock to be held by someone else", nil, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq)
|
||||
default:
|
||||
log.ZError(ctx, "set seq cache lock unknown state", nil, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq)
|
||||
}
|
||||
return
|
||||
}
|
||||
log.ZError(ctx, "set seq cache retrying still failed", nil, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq)
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) getMallocSize(conversationID string, size int64) int64 {
|
||||
if size == 0 {
|
||||
return 0
|
||||
}
|
||||
var basicSize int64
|
||||
if msgprocessor.IsGroupConversationID(conversationID) {
|
||||
basicSize = 100
|
||||
} else {
|
||||
basicSize = 50
|
||||
}
|
||||
basicSize += size
|
||||
return basicSize
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) Malloc(ctx context.Context, conversationID string, size int64) (int64, error) {
|
||||
seq, _, err := s.mallocTime(ctx, conversationID, size)
|
||||
return seq, err
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) mallocTime(ctx context.Context, conversationID string, size int64) (int64, int64, error) {
|
||||
if size < 0 {
|
||||
return 0, 0, errs.New("size must be greater than 0")
|
||||
}
|
||||
key := s.getSeqMallocKey(conversationID)
|
||||
for i := 0; i < 10; i++ {
|
||||
states, err := s.malloc(ctx, key, size)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
switch states[0] {
|
||||
case 0: // success
|
||||
return states[1], states[3], nil
|
||||
case 1: // not found
|
||||
mallocSize := s.getMallocSize(conversationID, size)
|
||||
seq, err := s.mgo.Malloc(ctx, conversationID, mallocSize)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
s.setSeqRetry(ctx, key, states[1], seq+size, seq+mallocSize, states[2])
|
||||
return seq, 0, nil
|
||||
case 2: // locked
|
||||
if err := s.wait(ctx); err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
continue
|
||||
case 3: // exceeded cache max value
|
||||
currSeq := states[1]
|
||||
lastSeq := states[2]
|
||||
mill := states[4]
|
||||
mallocSize := s.getMallocSize(conversationID, size)
|
||||
seq, err := s.mgo.Malloc(ctx, conversationID, mallocSize)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
if lastSeq == seq {
|
||||
s.setSeqRetry(ctx, key, states[3], currSeq+size, seq+mallocSize, mill)
|
||||
return currSeq, states[4], nil
|
||||
} else {
|
||||
log.ZWarn(ctx, "malloc seq not equal cache last seq", nil, "conversationID", conversationID, "currSeq", currSeq, "lastSeq", lastSeq, "mallocSeq", seq)
|
||||
s.setSeqRetry(ctx, key, states[3], seq+size, seq+mallocSize, mill)
|
||||
return seq, mill, nil
|
||||
}
|
||||
default:
|
||||
log.ZError(ctx, "malloc seq unknown state", nil, "state", states[0], "conversationID", conversationID, "size", size)
|
||||
return 0, 0, errs.New(fmt.Sprintf("unknown state: %d", states[0]))
|
||||
}
|
||||
}
|
||||
log.ZError(ctx, "malloc seq retrying still failed", nil, "conversationID", conversationID, "size", size)
|
||||
return 0, 0, errs.New("malloc seq waiting for lock timeout", "conversationID", conversationID, "size", size)
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) GetMaxSeq(ctx context.Context, conversationID string) (int64, error) {
|
||||
return s.Malloc(ctx, conversationID, 0)
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) GetMaxSeqWithTime(ctx context.Context, conversationID string) (database.SeqTime, error) {
|
||||
seq, mill, err := s.mallocTime(ctx, conversationID, 0)
|
||||
if err != nil {
|
||||
return database.SeqTime{}, err
|
||||
}
|
||||
return database.SeqTime{Seq: seq, Time: mill}, nil
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) SetMinSeqs(ctx context.Context, seqs map[string]int64) error {
|
||||
keys := make([]string, 0, len(seqs))
|
||||
for conversationID, seq := range seqs {
|
||||
keys = append(keys, s.getMinSeqKey(conversationID))
|
||||
if err := s.mgo.SetMinSeq(ctx, conversationID, seq); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return DeleteCacheBySlot(ctx, s.rcClient, keys)
|
||||
}
|
||||
|
||||
// GetCacheMaxSeqWithTime only get the existing cache, if there is no cache, no cache will be generated
|
||||
func (s *seqConversationCacheRedis) GetCacheMaxSeqWithTime(ctx context.Context, conversationIDs []string) (map[string]database.SeqTime, error) {
|
||||
if len(conversationIDs) == 0 {
|
||||
return map[string]database.SeqTime{}, nil
|
||||
}
|
||||
key2conversationID := make(map[string]string)
|
||||
keys := make([]string, 0, len(conversationIDs))
|
||||
for _, conversationID := range conversationIDs {
|
||||
key := s.getSeqMallocKey(conversationID)
|
||||
if _, ok := key2conversationID[key]; ok {
|
||||
continue
|
||||
}
|
||||
key2conversationID[key] = conversationID
|
||||
keys = append(keys, key)
|
||||
}
|
||||
slotKeys, err := groupKeysBySlot(ctx, s.rcClient.GetRedis(), keys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res := make(map[string]database.SeqTime)
|
||||
for _, keys := range slotKeys {
|
||||
if len(keys) == 0 {
|
||||
continue
|
||||
}
|
||||
pipe := s.rcClient.GetRedis().Pipeline()
|
||||
cmds := make([]*redis.SliceCmd, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
cmds = append(cmds, pipe.HMGet(ctx, key, "CURR", "TIME"))
|
||||
}
|
||||
if _, err := pipe.Exec(ctx); err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
for i, cmd := range cmds {
|
||||
val, err := cmd.Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(val) != 2 {
|
||||
return nil, errs.WrapMsg(err, "GetCacheMaxSeqWithTime invalid result", "key", keys[i], "res", val)
|
||||
}
|
||||
if val[0] == nil {
|
||||
continue
|
||||
}
|
||||
seq, err := s.parseInt64(val[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mill, err := s.parseInt64(val[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conversationID := key2conversationID[keys[i]]
|
||||
res[conversationID] = database.SeqTime{Seq: seq, Time: mill}
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) parseInt64(val any) (int64, error) {
|
||||
switch v := val.(type) {
|
||||
case nil:
|
||||
return 0, nil
|
||||
case int:
|
||||
return int64(v), nil
|
||||
case int64:
|
||||
return v, nil
|
||||
case string:
|
||||
res, err := strconv.ParseInt(v, 10, 64)
|
||||
if err != nil {
|
||||
return 0, errs.WrapMsg(err, "invalid string not int64", "value", v)
|
||||
}
|
||||
return res, nil
|
||||
default:
|
||||
return 0, errs.New("invalid result not int64", "resType", fmt.Sprintf("%T", v), "value", v)
|
||||
}
|
||||
}
|
||||
144
pkg/common/storage/cache/redis/seq_conversation_test.go
vendored
Normal file
144
pkg/common/storage/cache/redis/seq_conversation_test.go
vendored
Normal file
@@ -0,0 +1,144 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database/mgo"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
)
|
||||
|
||||
func newTestSeq() *seqConversationCacheRedis {
|
||||
mgocli, err := mongo.Connect(context.Background(), options.Client().ApplyURI("mongodb://openIM:openIM123@127.0.0.1:37017/openim_v3?maxPoolSize=100").SetConnectTimeout(5*time.Second))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
model, err := mgo.NewSeqConversationMongo(mgocli.Database("openim_v3"))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
opt := &redis.Options{
|
||||
Addr: "127.0.0.1:16379",
|
||||
Password: "openIM123",
|
||||
DB: 1,
|
||||
}
|
||||
rdb := redis.NewClient(opt)
|
||||
if err := rdb.Ping(context.Background()).Err(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return NewSeqConversationCacheRedis(rdb, model).(*seqConversationCacheRedis)
|
||||
}
|
||||
|
||||
func TestSeq(t *testing.T) {
|
||||
ts := newTestSeq()
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
speed atomic.Int64
|
||||
)
|
||||
|
||||
const count = 128
|
||||
wg.Add(count)
|
||||
for i := 0; i < count; i++ {
|
||||
index := i + 1
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
var size int64 = 10
|
||||
cID := strconv.Itoa(index * 1)
|
||||
for i := 1; ; i++ {
|
||||
//first, err := ts.mgo.Malloc(context.Background(), cID, size) // mongo
|
||||
first, err := ts.Malloc(context.Background(), cID, size) // redis
|
||||
if err != nil {
|
||||
t.Logf("[%d-%d] %s %s", index, i, cID, err)
|
||||
return
|
||||
}
|
||||
speed.Add(size)
|
||||
_ = first
|
||||
//t.Logf("[%d] %d -> %d", i, first+1, first+size)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(time.Second)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
ticker.Stop()
|
||||
return
|
||||
case <-ticker.C:
|
||||
value := speed.Swap(0)
|
||||
t.Logf("speed: %d/s", value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDel(t *testing.T) {
|
||||
ts := newTestSeq()
|
||||
for i := 1; i < 100; i++ {
|
||||
var size int64 = 100
|
||||
first, err := ts.Malloc(context.Background(), "100", size)
|
||||
if err != nil {
|
||||
t.Logf("[%d] %s", i, err)
|
||||
return
|
||||
}
|
||||
t.Logf("[%d] %d -> %d", i, first+1, first+size)
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSeqMalloc(t *testing.T) {
|
||||
ts := newTestSeq()
|
||||
t.Log(ts.GetMaxSeq(context.Background(), "100"))
|
||||
}
|
||||
|
||||
func TestMinSeq(t *testing.T) {
|
||||
ts := newTestSeq()
|
||||
t.Log(ts.GetMinSeq(context.Background(), "10000000"))
|
||||
}
|
||||
|
||||
func TestMalloc(t *testing.T) {
|
||||
ts := newTestSeq()
|
||||
t.Log(ts.mallocTime(context.Background(), "10000000", 100))
|
||||
}
|
||||
|
||||
func TestHMGET(t *testing.T) {
|
||||
ts := newTestSeq()
|
||||
res, err := ts.GetCacheMaxSeqWithTime(context.Background(), []string{"10000000", "123456"})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
t.Log(res)
|
||||
}
|
||||
|
||||
func TestGetMaxSeqWithTime(t *testing.T) {
|
||||
ts := newTestSeq()
|
||||
t.Log(ts.GetMaxSeqWithTime(context.Background(), "10000000"))
|
||||
}
|
||||
|
||||
func TestGetMaxSeqWithTime1(t *testing.T) {
|
||||
ts := newTestSeq()
|
||||
t.Log(ts.GetMaxSeqsWithTime(context.Background(), []string{"10000000", "12345", "111"}))
|
||||
}
|
||||
|
||||
//
|
||||
//func TestHMGET(t *testing.T) {
|
||||
// ts := newTestSeq()
|
||||
// res, err := ts.rdb.HMGet(context.Background(), "MALLOC_SEQ:1", "CURR", "TIME1").Result()
|
||||
// if err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
// t.Log(res)
|
||||
//}
|
||||
184
pkg/common/storage/cache/redis/seq_user.go
vendored
Normal file
184
pkg/common/storage/cache/redis/seq_user.go
vendored
Normal file
@@ -0,0 +1,184 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
func NewSeqUserCacheRedis(rdb redis.UniversalClient, mgo database.SeqUser) cache.SeqUser {
|
||||
return &seqUserCacheRedis{
|
||||
mgo: mgo,
|
||||
readSeqWriteRatio: 100,
|
||||
expireTime: time.Hour * 24 * 7,
|
||||
readExpireTime: time.Hour * 24 * 30,
|
||||
rocks: newRocksCacheClient(rdb),
|
||||
}
|
||||
}
|
||||
|
||||
type seqUserCacheRedis struct {
|
||||
mgo database.SeqUser
|
||||
rocks *rocksCacheClient
|
||||
expireTime time.Duration
|
||||
readExpireTime time.Duration
|
||||
readSeqWriteRatio int64
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) getSeqUserMaxSeqKey(conversationID string, userID string) string {
|
||||
return cachekey.GetSeqUserMaxSeqKey(conversationID, userID)
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) getSeqUserMinSeqKey(conversationID string, userID string) string {
|
||||
return cachekey.GetSeqUserMinSeqKey(conversationID, userID)
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) getSeqUserReadSeqKey(conversationID string, userID string) string {
|
||||
return cachekey.GetSeqUserReadSeqKey(conversationID, userID)
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) GetUserMaxSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
|
||||
return getCache(ctx, s.rocks, s.getSeqUserMaxSeqKey(conversationID, userID), s.expireTime, func(ctx context.Context) (int64, error) {
|
||||
return s.mgo.GetUserMaxSeq(ctx, conversationID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) SetUserMaxSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
|
||||
if err := s.mgo.SetUserMaxSeq(ctx, conversationID, userID, seq); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.rocks.GetClient().TagAsDeleted2(ctx, s.getSeqUserMaxSeqKey(conversationID, userID))
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) GetUserMinSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
|
||||
return getCache(ctx, s.rocks, s.getSeqUserMinSeqKey(conversationID, userID), s.expireTime, func(ctx context.Context) (int64, error) {
|
||||
return s.mgo.GetUserMinSeq(ctx, conversationID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) SetUserMinSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
|
||||
return s.SetUserMinSeqs(ctx, userID, map[string]int64{conversationID: seq})
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) GetUserReadSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
|
||||
return getCache(ctx, s.rocks, s.getSeqUserReadSeqKey(conversationID, userID), s.readExpireTime, func(ctx context.Context) (int64, error) {
|
||||
return s.mgo.GetUserReadSeq(ctx, conversationID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) SetUserReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
|
||||
if s.rocks.GetRedis() == nil {
|
||||
return s.SetUserReadSeqToDB(ctx, conversationID, userID, seq)
|
||||
}
|
||||
dbSeq, err := s.GetUserReadSeq(ctx, conversationID, userID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dbSeq < seq {
|
||||
if err := s.rocks.GetClient().RawSet(ctx, s.getSeqUserReadSeqKey(conversationID, userID), strconv.Itoa(int(seq)), s.readExpireTime); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) SetUserReadSeqToDB(ctx context.Context, conversationID string, userID string, seq int64) error {
|
||||
return s.mgo.SetUserReadSeq(ctx, conversationID, userID, seq)
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) SetUserMinSeqs(ctx context.Context, userID string, seqs map[string]int64) error {
|
||||
keys := make([]string, 0, len(seqs))
|
||||
for conversationID, seq := range seqs {
|
||||
if err := s.mgo.SetUserMinSeq(ctx, conversationID, userID, seq); err != nil {
|
||||
return err
|
||||
}
|
||||
keys = append(keys, s.getSeqUserMinSeqKey(conversationID, userID))
|
||||
}
|
||||
return DeleteCacheBySlot(ctx, s.rocks, keys)
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) setUserRedisReadSeqs(ctx context.Context, userID string, seqs map[string]int64) error {
|
||||
keys := make([]string, 0, len(seqs))
|
||||
keySeq := make(map[string]int64)
|
||||
for conversationID, seq := range seqs {
|
||||
key := s.getSeqUserReadSeqKey(conversationID, userID)
|
||||
keys = append(keys, key)
|
||||
keySeq[key] = seq
|
||||
}
|
||||
slotKeys, err := groupKeysBySlot(ctx, s.rocks.GetRedis(), keys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, keys := range slotKeys {
|
||||
pipe := s.rocks.GetRedis().Pipeline()
|
||||
for _, key := range keys {
|
||||
pipe.HSet(ctx, key, "value", strconv.FormatInt(keySeq[key], 10))
|
||||
pipe.Expire(ctx, key, s.readExpireTime)
|
||||
}
|
||||
if _, err := pipe.Exec(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) SetUserReadSeqs(ctx context.Context, userID string, seqs map[string]int64) error {
|
||||
if len(seqs) == 0 {
|
||||
return nil
|
||||
}
|
||||
if err := s.setUserRedisReadSeqs(ctx, userID, seqs); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) GetUserReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error) {
|
||||
res, err := batchGetCache2(ctx, s.rocks, s.readExpireTime, conversationIDs, func(conversationID string) string {
|
||||
return s.getSeqUserReadSeqKey(conversationID, userID)
|
||||
}, func(v *readSeqModel) string {
|
||||
return v.ConversationID
|
||||
}, func(ctx context.Context, conversationIDs []string) ([]*readSeqModel, error) {
|
||||
seqs, err := s.mgo.GetUserReadSeqs(ctx, userID, conversationIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res := make([]*readSeqModel, 0, len(seqs))
|
||||
for conversationID, seq := range seqs {
|
||||
res = append(res, &readSeqModel{ConversationID: conversationID, Seq: seq})
|
||||
}
|
||||
return res, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data := make(map[string]int64)
|
||||
for _, v := range res {
|
||||
data[v.ConversationID] = v.Seq
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
var _ BatchCacheCallback[string] = (*readSeqModel)(nil)
|
||||
|
||||
type readSeqModel struct {
|
||||
ConversationID string
|
||||
Seq int64
|
||||
}
|
||||
|
||||
func (r *readSeqModel) BatchCache(conversationID string) {
|
||||
r.ConversationID = conversationID
|
||||
}
|
||||
|
||||
func (r *readSeqModel) UnmarshalJSON(bytes []byte) (err error) {
|
||||
r.Seq, err = strconv.ParseInt(string(bytes), 10, 64)
|
||||
return
|
||||
}
|
||||
|
||||
func (r *readSeqModel) MarshalJSON() ([]byte, error) {
|
||||
return []byte(strconv.FormatInt(r.Seq, 10)), nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user