复制项目
This commit is contained in:
17
pkg/common/storage/cache/batch_handler.go
vendored
Normal file
17
pkg/common/storage/cache/batch_handler.go
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// BatchDeleter interface defines a set of methods for batch deleting cache and publishing deletion information.
|
||||
type BatchDeleter interface {
|
||||
//ChainExecDel method is used for chain calls and must call Clone to prevent memory pollution.
|
||||
ChainExecDel(ctx context.Context) error
|
||||
//ExecDelWithKeys method directly takes keys for deletion.
|
||||
ExecDelWithKeys(ctx context.Context, keys []string) error
|
||||
//Clone method creates a copy of the BatchDeleter to avoid modifying the original object.
|
||||
Clone() BatchDeleter
|
||||
//AddKeys method adds keys to be deleted.
|
||||
AddKeys(keys ...string)
|
||||
}
|
||||
27
pkg/common/storage/cache/black.go
vendored
Normal file
27
pkg/common/storage/cache/black.go
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
type BlackCache interface {
|
||||
BatchDeleter
|
||||
CloneBlackCache() BlackCache
|
||||
GetBlackIDs(ctx context.Context, userID string) (blackIDs []string, err error)
|
||||
// del user's blackIDs msgCache, exec when a user's black list changed
|
||||
DelBlackIDs(ctx context.Context, userID string) BlackCache
|
||||
}
|
||||
8
pkg/common/storage/cache/client_config.go
vendored
Normal file
8
pkg/common/storage/cache/client_config.go
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
package cache
|
||||
|
||||
import "context"
|
||||
|
||||
type ClientConfigCache interface {
|
||||
DeleteUserCache(ctx context.Context, userIDs []string) error
|
||||
GetUserConfig(ctx context.Context, userID string) (map[string]string, error)
|
||||
}
|
||||
65
pkg/common/storage/cache/conversation.go
vendored
Normal file
65
pkg/common/storage/cache/conversation.go
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
relationtb "git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
)
|
||||
|
||||
// arg fn will exec when no data in msgCache.
|
||||
type ConversationCache interface {
|
||||
BatchDeleter
|
||||
CloneConversationCache() ConversationCache
|
||||
// get user's conversationIDs from msgCache
|
||||
GetUserConversationIDs(ctx context.Context, ownerUserID string) ([]string, error)
|
||||
GetUserNotNotifyConversationIDs(ctx context.Context, userID string) ([]string, error)
|
||||
GetPinnedConversationIDs(ctx context.Context, userID string) ([]string, error)
|
||||
DelConversationIDs(userIDs ...string) ConversationCache
|
||||
|
||||
GetUserConversationIDsHash(ctx context.Context, ownerUserID string) (hash uint64, err error)
|
||||
DelUserConversationIDsHash(ownerUserIDs ...string) ConversationCache
|
||||
|
||||
// get one conversation from msgCache
|
||||
GetConversation(ctx context.Context, ownerUserID, conversationID string) (*relationtb.Conversation, error)
|
||||
DelConversations(ownerUserID string, conversationIDs ...string) ConversationCache
|
||||
DelUsersConversation(conversationID string, ownerUserIDs ...string) ConversationCache
|
||||
// get one conversation from msgCache
|
||||
GetConversations(ctx context.Context, ownerUserID string,
|
||||
conversationIDs []string) ([]*relationtb.Conversation, error)
|
||||
// get one user's all conversations from msgCache
|
||||
GetUserAllConversations(ctx context.Context, ownerUserID string) ([]*relationtb.Conversation, error)
|
||||
// get user conversation recv msg from msgCache
|
||||
GetUserRecvMsgOpt(ctx context.Context, ownerUserID, conversationID string) (opt int, err error)
|
||||
DelUserRecvMsgOpt(ownerUserID, conversationID string) ConversationCache
|
||||
// get one super group recv msg but do not notification userID list
|
||||
// GetSuperGroupRecvMsgNotNotifyUserIDs(ctx context.Context, groupID string) (userIDs []string, err error)
|
||||
DelSuperGroupRecvMsgNotNotifyUserIDs(groupID string) ConversationCache
|
||||
// get one super group recv msg but do not notification userID list hash
|
||||
// GetSuperGroupRecvMsgNotNotifyUserIDsHash(ctx context.Context, groupID string) (hash uint64, err error)
|
||||
DelSuperGroupRecvMsgNotNotifyUserIDsHash(groupID string) ConversationCache
|
||||
|
||||
// GetUserAllHasReadSeqs(ctx context.Context, ownerUserID string) (map[string]int64, error)
|
||||
DelUserAllHasReadSeqs(ownerUserID string, conversationIDs ...string) ConversationCache
|
||||
|
||||
GetConversationNotReceiveMessageUserIDs(ctx context.Context, conversationID string) ([]string, error)
|
||||
DelConversationNotReceiveMessageUserIDs(conversationIDs ...string) ConversationCache
|
||||
DelConversationNotNotifyMessageUserIDs(userIDs ...string) ConversationCache
|
||||
DelUserPinnedConversations(userIDs ...string) ConversationCache
|
||||
DelConversationVersionUserIDs(userIDs ...string) ConversationCache
|
||||
|
||||
FindMaxConversationUserVersion(ctx context.Context, userID string) (*relationtb.VersionLog, error)
|
||||
}
|
||||
15
pkg/common/storage/cache/doc.go
vendored
Normal file
15
pkg/common/storage/cache/doc.go
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright © 2024 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache // import "git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
48
pkg/common/storage/cache/friend.go
vendored
Normal file
48
pkg/common/storage/cache/friend.go
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
relationtb "git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
)
|
||||
|
||||
// FriendCache is an interface for caching friend-related data.
|
||||
type FriendCache interface {
|
||||
BatchDeleter
|
||||
CloneFriendCache() FriendCache
|
||||
GetFriendIDs(ctx context.Context, ownerUserID string) (friendIDs []string, err error)
|
||||
// Called when friendID list changed
|
||||
DelFriendIDs(ownerUserID ...string) FriendCache
|
||||
// Get single friendInfo from the cache
|
||||
GetFriend(ctx context.Context, ownerUserID, friendUserID string) (friend *relationtb.Friend, err error)
|
||||
// Delete friend when friend info changed
|
||||
DelFriend(ownerUserID, friendUserID string) FriendCache
|
||||
// Delete friends when friends' info changed
|
||||
DelFriends(ownerUserID string, friendUserIDs []string) FriendCache
|
||||
|
||||
DelOwner(friendUserID string, ownerUserIDs []string) FriendCache
|
||||
|
||||
DelMaxFriendVersion(ownerUserIDs ...string) FriendCache
|
||||
|
||||
//DelSortFriendUserIDs(ownerUserIDs ...string) FriendCache
|
||||
|
||||
//FindSortFriendUserIDs(ctx context.Context, ownerUserID string) ([]string, error)
|
||||
|
||||
//FindFriendIncrVersion(ctx context.Context, ownerUserID string, version uint, limit int) (*relationtb.VersionLog, error)
|
||||
|
||||
FindMaxFriendVersion(ctx context.Context, ownerUserID string) (*relationtb.VersionLog, error)
|
||||
}
|
||||
70
pkg/common/storage/cache/group.go
vendored
Normal file
70
pkg/common/storage/cache/group.go
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/common"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
)
|
||||
|
||||
type GroupHash interface {
|
||||
GetGroupHash(ctx context.Context, groupID string) (uint64, error)
|
||||
}
|
||||
|
||||
type GroupCache interface {
|
||||
BatchDeleter
|
||||
CloneGroupCache() GroupCache
|
||||
GetGroupsInfo(ctx context.Context, groupIDs []string) (groups []*model.Group, err error)
|
||||
GetGroupInfo(ctx context.Context, groupID string) (group *model.Group, err error)
|
||||
DelGroupsInfo(groupIDs ...string) GroupCache
|
||||
|
||||
GetGroupMembersHash(ctx context.Context, groupID string) (hashCode uint64, err error)
|
||||
GetGroupMemberHashMap(ctx context.Context, groupIDs []string) (map[string]*common.GroupSimpleUserID, error)
|
||||
DelGroupMembersHash(groupID string) GroupCache
|
||||
|
||||
GetGroupMemberIDs(ctx context.Context, groupID string) (groupMemberIDs []string, err error)
|
||||
|
||||
DelGroupMemberIDs(groupID string) GroupCache
|
||||
|
||||
GetJoinedGroupIDs(ctx context.Context, userID string) (joinedGroupIDs []string, err error)
|
||||
DelJoinedGroupID(userID ...string) GroupCache
|
||||
|
||||
GetGroupMemberInfo(ctx context.Context, groupID, userID string) (groupMember *model.GroupMember, err error)
|
||||
GetGroupMembersInfo(ctx context.Context, groupID string, userID []string) (groupMembers []*model.GroupMember, err error)
|
||||
GetAllGroupMembersInfo(ctx context.Context, groupID string) (groupMembers []*model.GroupMember, err error)
|
||||
FindGroupMemberUser(ctx context.Context, groupIDs []string, userID string) ([]*model.GroupMember, error)
|
||||
|
||||
GetGroupRoleLevelMemberIDs(ctx context.Context, groupID string, roleLevel int32) ([]string, error)
|
||||
GetGroupOwner(ctx context.Context, groupID string) (*model.GroupMember, error)
|
||||
GetGroupsOwner(ctx context.Context, groupIDs []string) ([]*model.GroupMember, error)
|
||||
DelGroupRoleLevel(groupID string, roleLevel []int32) GroupCache
|
||||
DelGroupAllRoleLevel(groupID string) GroupCache
|
||||
DelGroupMembersInfo(groupID string, userID ...string) GroupCache
|
||||
GetGroupRoleLevelMemberInfo(ctx context.Context, groupID string, roleLevel int32) ([]*model.GroupMember, error)
|
||||
GetGroupRolesLevelMemberInfo(ctx context.Context, groupID string, roleLevels []int32) ([]*model.GroupMember, error)
|
||||
GetGroupMemberNum(ctx context.Context, groupID string) (memberNum int64, err error)
|
||||
DelGroupsMemberNum(groupID ...string) GroupCache
|
||||
|
||||
//FindSortGroupMemberUserIDs(ctx context.Context, groupID string) ([]string, error)
|
||||
//FindSortJoinGroupIDs(ctx context.Context, userID string) ([]string, error)
|
||||
|
||||
DelMaxGroupMemberVersion(groupIDs ...string) GroupCache
|
||||
DelMaxJoinGroupVersion(userIDs ...string) GroupCache
|
||||
FindMaxGroupMemberVersion(ctx context.Context, groupID string) (*model.VersionLog, error)
|
||||
BatchFindMaxGroupMemberVersion(ctx context.Context, groupIDs []string) ([]*model.VersionLog, error)
|
||||
FindMaxJoinGroupVersion(ctx context.Context, userID string) (*model.VersionLog, error)
|
||||
}
|
||||
50
pkg/common/storage/cache/mcache/minio.go
vendored
Normal file
50
pkg/common/storage/cache/mcache/minio.go
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
package mcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"github.com/openimsdk/tools/s3/minio"
|
||||
)
|
||||
|
||||
func NewMinioCache(cache database.Cache) minio.Cache {
|
||||
return &minioCache{
|
||||
cache: cache,
|
||||
expireTime: time.Hour * 24 * 7,
|
||||
}
|
||||
}
|
||||
|
||||
type minioCache struct {
|
||||
cache database.Cache
|
||||
expireTime time.Duration
|
||||
}
|
||||
|
||||
func (g *minioCache) getObjectImageInfoKey(key string) string {
|
||||
return cachekey.GetObjectImageInfoKey(key)
|
||||
}
|
||||
|
||||
func (g *minioCache) getMinioImageThumbnailKey(key string, format string, width int, height int) string {
|
||||
return cachekey.GetMinioImageThumbnailKey(key, format, width, height)
|
||||
}
|
||||
|
||||
func (g *minioCache) DelObjectImageInfoKey(ctx context.Context, keys ...string) error {
|
||||
ks := make([]string, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
ks = append(ks, g.getObjectImageInfoKey(key))
|
||||
}
|
||||
return g.cache.Del(ctx, ks)
|
||||
}
|
||||
|
||||
func (g *minioCache) DelImageThumbnailKey(ctx context.Context, key string, format string, width int, height int) error {
|
||||
return g.cache.Del(ctx, []string{g.getMinioImageThumbnailKey(key, format, width, height)})
|
||||
}
|
||||
|
||||
func (g *minioCache) GetImageObjectKeyInfo(ctx context.Context, key string, fn func(ctx context.Context) (*minio.ImageInfo, error)) (*minio.ImageInfo, error) {
|
||||
return getCache[*minio.ImageInfo](ctx, g.cache, g.getObjectImageInfoKey(key), g.expireTime, fn)
|
||||
}
|
||||
|
||||
func (g *minioCache) GetThumbnailKey(ctx context.Context, key string, format string, width int, height int, minioCache func(ctx context.Context) (string, error)) (string, error) {
|
||||
return getCache[string](ctx, g.cache, g.getMinioImageThumbnailKey(key, format, width, height), g.expireTime, minioCache)
|
||||
}
|
||||
132
pkg/common/storage/cache/mcache/msg_cache.go
vendored
Normal file
132
pkg/common/storage/cache/mcache/msg_cache.go
vendored
Normal file
@@ -0,0 +1,132 @@
|
||||
package mcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/localcache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/localcache/lru"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
var (
|
||||
memMsgCache lru.LRU[string, *model.MsgInfoModel]
|
||||
initMemMsgCache sync.Once
|
||||
)
|
||||
|
||||
func NewMsgCache(cache database.Cache, msgDocDatabase database.Msg) cache.MsgCache {
|
||||
initMemMsgCache.Do(func() {
|
||||
memMsgCache = lru.NewLazyLRU[string, *model.MsgInfoModel](1024*8, time.Hour, time.Second*10, localcache.EmptyTarget{}, nil)
|
||||
})
|
||||
return &msgCache{
|
||||
cache: cache,
|
||||
msgDocDatabase: msgDocDatabase,
|
||||
memMsgCache: memMsgCache,
|
||||
}
|
||||
}
|
||||
|
||||
type msgCache struct {
|
||||
cache database.Cache
|
||||
msgDocDatabase database.Msg
|
||||
memMsgCache lru.LRU[string, *model.MsgInfoModel]
|
||||
}
|
||||
|
||||
func (x *msgCache) getSendMsgKey(id string) string {
|
||||
return cachekey.GetSendMsgKey(id)
|
||||
}
|
||||
|
||||
func (x *msgCache) SetSendMsgStatus(ctx context.Context, id string, status int32) error {
|
||||
return x.cache.Set(ctx, x.getSendMsgKey(id), strconv.Itoa(int(status)), time.Hour*24)
|
||||
}
|
||||
|
||||
func (x *msgCache) GetSendMsgStatus(ctx context.Context, id string) (int32, error) {
|
||||
key := x.getSendMsgKey(id)
|
||||
res, err := x.cache.Get(ctx, []string{key})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
val, ok := res[key]
|
||||
if !ok {
|
||||
return 0, errs.Wrap(redis.Nil)
|
||||
}
|
||||
status, err := strconv.Atoi(val)
|
||||
if err != nil {
|
||||
return 0, errs.WrapMsg(err, "GetSendMsgStatus strconv.Atoi error", "val", val)
|
||||
}
|
||||
return int32(status), nil
|
||||
}
|
||||
|
||||
func (x *msgCache) getMsgCacheKey(conversationID string, seq int64) string {
|
||||
return cachekey.GetMsgCacheKey(conversationID, seq)
|
||||
|
||||
}
|
||||
|
||||
func (x *msgCache) GetMessageBySeqs(ctx context.Context, conversationID string, seqs []int64) ([]*model.MsgInfoModel, error) {
|
||||
if len(seqs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
keys := make([]string, 0, len(seqs))
|
||||
keySeq := make(map[string]int64, len(seqs))
|
||||
for _, seq := range seqs {
|
||||
key := x.getMsgCacheKey(conversationID, seq)
|
||||
keys = append(keys, key)
|
||||
keySeq[key] = seq
|
||||
}
|
||||
res, err := x.memMsgCache.GetBatch(keys, func(keys []string) (map[string]*model.MsgInfoModel, error) {
|
||||
findSeqs := make([]int64, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
seq, ok := keySeq[key]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
findSeqs = append(findSeqs, seq)
|
||||
}
|
||||
res, err := x.msgDocDatabase.FindSeqs(ctx, conversationID, seqs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kv := make(map[string]*model.MsgInfoModel)
|
||||
for i := range res {
|
||||
msg := res[i]
|
||||
if msg == nil || msg.Msg == nil || msg.Msg.Seq <= 0 {
|
||||
continue
|
||||
}
|
||||
key := x.getMsgCacheKey(conversationID, msg.Msg.Seq)
|
||||
kv[key] = msg
|
||||
}
|
||||
return kv, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return datautil.Values(res), nil
|
||||
}
|
||||
|
||||
func (x msgCache) DelMessageBySeqs(ctx context.Context, conversationID string, seqs []int64) error {
|
||||
if len(seqs) == 0 {
|
||||
return nil
|
||||
}
|
||||
for _, seq := range seqs {
|
||||
x.memMsgCache.Del(x.getMsgCacheKey(conversationID, seq))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *msgCache) SetMessageBySeqs(ctx context.Context, conversationID string, msgs []*model.MsgInfoModel) error {
|
||||
for i := range msgs {
|
||||
msg := msgs[i]
|
||||
if msg == nil || msg.Msg == nil || msg.Msg.Seq <= 0 {
|
||||
continue
|
||||
}
|
||||
x.memMsgCache.Set(x.getMsgCacheKey(conversationID, msg.Msg.Seq), msg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
82
pkg/common/storage/cache/mcache/online.go
vendored
Normal file
82
pkg/common/storage/cache/mcache/online.go
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
package mcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
globalOnlineCache cache.OnlineCache
|
||||
globalOnlineOnce sync.Once
|
||||
)
|
||||
|
||||
func NewOnlineCache() cache.OnlineCache {
|
||||
globalOnlineOnce.Do(func() {
|
||||
globalOnlineCache = &onlineCache{
|
||||
user: make(map[string]map[int32]struct{}),
|
||||
}
|
||||
})
|
||||
return globalOnlineCache
|
||||
}
|
||||
|
||||
type onlineCache struct {
|
||||
lock sync.RWMutex
|
||||
user map[string]map[int32]struct{}
|
||||
}
|
||||
|
||||
func (x *onlineCache) GetOnline(ctx context.Context, userID string) ([]int32, error) {
|
||||
x.lock.RLock()
|
||||
defer x.lock.RUnlock()
|
||||
pSet, ok := x.user[userID]
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
res := make([]int32, 0, len(pSet))
|
||||
for k := range pSet {
|
||||
res = append(res, k)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (x *onlineCache) SetUserOnline(ctx context.Context, userID string, online, offline []int32) error {
|
||||
x.lock.Lock()
|
||||
defer x.lock.Unlock()
|
||||
pSet, ok := x.user[userID]
|
||||
if ok {
|
||||
for _, p := range offline {
|
||||
delete(pSet, p)
|
||||
}
|
||||
}
|
||||
if len(online) > 0 {
|
||||
if !ok {
|
||||
pSet = make(map[int32]struct{})
|
||||
x.user[userID] = pSet
|
||||
}
|
||||
for _, p := range online {
|
||||
pSet[p] = struct{}{}
|
||||
}
|
||||
}
|
||||
if len(pSet) == 0 {
|
||||
delete(x.user, userID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *onlineCache) GetAllOnlineUsers(ctx context.Context, cursor uint64) (map[string][]int32, uint64, error) {
|
||||
if cursor != 0 {
|
||||
return nil, 0, nil
|
||||
}
|
||||
x.lock.RLock()
|
||||
defer x.lock.RUnlock()
|
||||
res := make(map[string][]int32)
|
||||
for k, v := range x.user {
|
||||
pSet := make([]int32, 0, len(v))
|
||||
for p := range v {
|
||||
pSet = append(pSet, p)
|
||||
}
|
||||
res[k] = pSet
|
||||
}
|
||||
return res, 0, nil
|
||||
}
|
||||
79
pkg/common/storage/cache/mcache/seq_conversation.go
vendored
Normal file
79
pkg/common/storage/cache/mcache/seq_conversation.go
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
package mcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
)
|
||||
|
||||
func NewSeqConversationCache(sc database.SeqConversation) cache.SeqConversationCache {
|
||||
return &seqConversationCache{
|
||||
sc: sc,
|
||||
}
|
||||
}
|
||||
|
||||
type seqConversationCache struct {
|
||||
sc database.SeqConversation
|
||||
}
|
||||
|
||||
func (x *seqConversationCache) Malloc(ctx context.Context, conversationID string, size int64) (int64, error) {
|
||||
return x.sc.Malloc(ctx, conversationID, size)
|
||||
}
|
||||
|
||||
func (x *seqConversationCache) SetMinSeq(ctx context.Context, conversationID string, seq int64) error {
|
||||
return x.sc.SetMinSeq(ctx, conversationID, seq)
|
||||
}
|
||||
|
||||
func (x *seqConversationCache) GetMinSeq(ctx context.Context, conversationID string) (int64, error) {
|
||||
return x.sc.GetMinSeq(ctx, conversationID)
|
||||
}
|
||||
|
||||
func (x *seqConversationCache) GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error) {
|
||||
res := make(map[string]int64)
|
||||
for _, conversationID := range conversationIDs {
|
||||
seq, err := x.GetMinSeq(ctx, conversationID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res[conversationID] = seq
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (x *seqConversationCache) GetMaxSeqsWithTime(ctx context.Context, conversationIDs []string) (map[string]database.SeqTime, error) {
|
||||
res := make(map[string]database.SeqTime)
|
||||
for _, conversationID := range conversationIDs {
|
||||
seq, err := x.GetMinSeq(ctx, conversationID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res[conversationID] = database.SeqTime{Seq: seq}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (x *seqConversationCache) GetMaxSeq(ctx context.Context, conversationID string) (int64, error) {
|
||||
return x.sc.GetMaxSeq(ctx, conversationID)
|
||||
}
|
||||
|
||||
func (x *seqConversationCache) GetMaxSeqWithTime(ctx context.Context, conversationID string) (database.SeqTime, error) {
|
||||
seq, err := x.GetMinSeq(ctx, conversationID)
|
||||
if err != nil {
|
||||
return database.SeqTime{}, err
|
||||
}
|
||||
return database.SeqTime{Seq: seq}, nil
|
||||
}
|
||||
|
||||
func (x *seqConversationCache) SetMinSeqs(ctx context.Context, seqs map[string]int64) error {
|
||||
for conversationID, seq := range seqs {
|
||||
if err := x.sc.SetMinSeq(ctx, conversationID, seq); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *seqConversationCache) GetCacheMaxSeqWithTime(ctx context.Context, conversationIDs []string) (map[string]database.SeqTime, error) {
|
||||
return x.GetMaxSeqsWithTime(ctx, conversationIDs)
|
||||
}
|
||||
98
pkg/common/storage/cache/mcache/third.go
vendored
Normal file
98
pkg/common/storage/cache/mcache/third.go
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
package mcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
func NewThirdCache(cache database.Cache) cache.ThirdCache {
|
||||
return &thirdCache{
|
||||
cache: cache,
|
||||
}
|
||||
}
|
||||
|
||||
type thirdCache struct {
|
||||
cache database.Cache
|
||||
}
|
||||
|
||||
func (c *thirdCache) getGetuiTokenKey() string {
|
||||
return cachekey.GetGetuiTokenKey()
|
||||
}
|
||||
|
||||
func (c *thirdCache) getGetuiTaskIDKey() string {
|
||||
return cachekey.GetGetuiTaskIDKey()
|
||||
}
|
||||
|
||||
func (c *thirdCache) getUserBadgeUnreadCountSumKey(userID string) string {
|
||||
return cachekey.GetUserBadgeUnreadCountSumKey(userID)
|
||||
}
|
||||
|
||||
func (c *thirdCache) getFcmAccountTokenKey(account string, platformID int) string {
|
||||
return cachekey.GetFcmAccountTokenKey(account, platformID)
|
||||
}
|
||||
|
||||
func (c *thirdCache) get(ctx context.Context, key string) (string, error) {
|
||||
res, err := c.cache.Get(ctx, []string{key})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if val, ok := res[key]; ok {
|
||||
return val, nil
|
||||
}
|
||||
return "", errs.Wrap(redis.Nil)
|
||||
}
|
||||
|
||||
func (c *thirdCache) SetFcmToken(ctx context.Context, account string, platformID int, fcmToken string, expireTime int64) (err error) {
|
||||
return errs.Wrap(c.cache.Set(ctx, c.getFcmAccountTokenKey(account, platformID), fcmToken, time.Duration(expireTime)*time.Second))
|
||||
}
|
||||
|
||||
func (c *thirdCache) GetFcmToken(ctx context.Context, account string, platformID int) (string, error) {
|
||||
return c.get(ctx, c.getFcmAccountTokenKey(account, platformID))
|
||||
}
|
||||
|
||||
func (c *thirdCache) DelFcmToken(ctx context.Context, account string, platformID int) error {
|
||||
return c.cache.Del(ctx, []string{c.getFcmAccountTokenKey(account, platformID)})
|
||||
}
|
||||
|
||||
func (c *thirdCache) IncrUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error) {
|
||||
return c.cache.Incr(ctx, c.getUserBadgeUnreadCountSumKey(userID), 1)
|
||||
}
|
||||
|
||||
func (c *thirdCache) SetUserBadgeUnreadCountSum(ctx context.Context, userID string, value int) error {
|
||||
return c.cache.Set(ctx, c.getUserBadgeUnreadCountSumKey(userID), strconv.Itoa(value), 0)
|
||||
}
|
||||
|
||||
func (c *thirdCache) GetUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error) {
|
||||
str, err := c.get(ctx, c.getUserBadgeUnreadCountSumKey(userID))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
val, err := strconv.Atoi(str)
|
||||
if err != nil {
|
||||
return 0, errs.WrapMsg(err, "strconv.Atoi", "str", str)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (c *thirdCache) SetGetuiToken(ctx context.Context, token string, expireTime int64) error {
|
||||
return c.cache.Set(ctx, c.getGetuiTokenKey(), token, time.Duration(expireTime)*time.Second)
|
||||
}
|
||||
|
||||
func (c *thirdCache) GetGetuiToken(ctx context.Context) (string, error) {
|
||||
return c.get(ctx, c.getGetuiTokenKey())
|
||||
}
|
||||
|
||||
func (c *thirdCache) SetGetuiTaskID(ctx context.Context, taskID string, expireTime int64) error {
|
||||
return c.cache.Set(ctx, c.getGetuiTaskIDKey(), taskID, time.Duration(expireTime)*time.Second)
|
||||
}
|
||||
|
||||
func (c *thirdCache) GetGetuiTaskID(ctx context.Context) (string, error) {
|
||||
return c.get(ctx, c.getGetuiTaskIDKey())
|
||||
}
|
||||
166
pkg/common/storage/cache/mcache/token.go
vendored
Normal file
166
pkg/common/storage/cache/mcache/token.go
vendored
Normal file
@@ -0,0 +1,166 @@
|
||||
package mcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
)
|
||||
|
||||
func NewTokenCacheModel(cache database.Cache, accessExpire int64) cache.TokenModel {
|
||||
c := &tokenCache{cache: cache}
|
||||
c.accessExpire = c.getExpireTime(accessExpire)
|
||||
return c
|
||||
}
|
||||
|
||||
type tokenCache struct {
|
||||
cache database.Cache
|
||||
accessExpire time.Duration
|
||||
}
|
||||
|
||||
func (x *tokenCache) getTokenKey(userID string, platformID int, token string) string {
|
||||
return cachekey.GetTokenKey(userID, platformID) + ":" + token
|
||||
}
|
||||
|
||||
func (x *tokenCache) SetTokenFlag(ctx context.Context, userID string, platformID int, token string, flag int) error {
|
||||
return x.cache.Set(ctx, x.getTokenKey(userID, platformID, token), strconv.Itoa(flag), x.accessExpire)
|
||||
}
|
||||
|
||||
// SetTokenFlagEx set token and flag with expire time
|
||||
func (x *tokenCache) SetTokenFlagEx(ctx context.Context, userID string, platformID int, token string, flag int) error {
|
||||
return x.SetTokenFlag(ctx, userID, platformID, token, flag)
|
||||
}
|
||||
|
||||
func (x *tokenCache) GetTokensWithoutError(ctx context.Context, userID string, platformID int) (map[string]int, error) {
|
||||
prefix := x.getTokenKey(userID, platformID, "")
|
||||
m, err := x.cache.Prefix(ctx, prefix)
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
mm := make(map[string]int)
|
||||
for k, v := range m {
|
||||
state, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "token value is not int", err, "value", v, "userID", userID, "platformID", platformID)
|
||||
continue
|
||||
}
|
||||
mm[strings.TrimPrefix(k, prefix)] = state
|
||||
}
|
||||
return mm, nil
|
||||
}
|
||||
|
||||
func (x *tokenCache) HasTemporaryToken(ctx context.Context, userID string, platformID int, token string) error {
|
||||
key := cachekey.GetTemporaryTokenKey(userID, platformID, token)
|
||||
if _, err := x.cache.Get(ctx, []string{key}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *tokenCache) GetAllTokensWithoutError(ctx context.Context, userID string) (map[int]map[string]int, error) {
|
||||
prefix := cachekey.UidPidToken + userID + ":"
|
||||
tokens, err := x.cache.Prefix(ctx, prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res := make(map[int]map[string]int)
|
||||
for key, flagStr := range tokens {
|
||||
flag, err := strconv.Atoi(flagStr)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "token value is not int", err, "key", key, "value", flagStr, "userID", userID)
|
||||
continue
|
||||
}
|
||||
arr := strings.SplitN(strings.TrimPrefix(key, prefix), ":", 2)
|
||||
if len(arr) != 2 {
|
||||
log.ZError(ctx, "token value is not int", err, "key", key, "value", flagStr, "userID", userID)
|
||||
continue
|
||||
}
|
||||
platformID, err := strconv.Atoi(arr[0])
|
||||
if err != nil {
|
||||
log.ZError(ctx, "token value is not int", err, "key", key, "value", flagStr, "userID", userID)
|
||||
continue
|
||||
}
|
||||
token := arr[1]
|
||||
if token == "" {
|
||||
log.ZError(ctx, "token value is not int", err, "key", key, "value", flagStr, "userID", userID)
|
||||
continue
|
||||
}
|
||||
tk, ok := res[platformID]
|
||||
if !ok {
|
||||
tk = make(map[string]int)
|
||||
res[platformID] = tk
|
||||
}
|
||||
tk[token] = flag
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (x *tokenCache) SetTokenMapByUidPid(ctx context.Context, userID string, platformID int, m map[string]int) error {
|
||||
for token, flag := range m {
|
||||
err := x.SetTokenFlag(ctx, userID, platformID, token, flag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *tokenCache) BatchSetTokenMapByUidPid(ctx context.Context, tokens map[string]map[string]any) error {
|
||||
for prefix, tokenFlag := range tokens {
|
||||
for token, flag := range tokenFlag {
|
||||
flagStr := fmt.Sprintf("%v", flag)
|
||||
if err := x.cache.Set(ctx, prefix+":"+token, flagStr, x.accessExpire); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *tokenCache) DeleteTokenByUidPid(ctx context.Context, userID string, platformID int, fields []string) error {
|
||||
keys := make([]string, 0, len(fields))
|
||||
for _, token := range fields {
|
||||
keys = append(keys, x.getTokenKey(userID, platformID, token))
|
||||
}
|
||||
return x.cache.Del(ctx, keys)
|
||||
}
|
||||
|
||||
func (x *tokenCache) getExpireTime(t int64) time.Duration {
|
||||
return time.Hour * 24 * time.Duration(t)
|
||||
}
|
||||
|
||||
func (x *tokenCache) DeleteTokenByTokenMap(ctx context.Context, userID string, tokens map[int][]string) error {
|
||||
keys := make([]string, 0, len(tokens))
|
||||
for platformID, ts := range tokens {
|
||||
for _, t := range ts {
|
||||
keys = append(keys, x.getTokenKey(userID, platformID, t))
|
||||
}
|
||||
}
|
||||
return x.cache.Del(ctx, keys)
|
||||
}
|
||||
|
||||
func (x *tokenCache) DeleteAndSetTemporary(ctx context.Context, userID string, platformID int, fields []string) error {
|
||||
keys := make([]string, 0, len(fields))
|
||||
for _, f := range fields {
|
||||
keys = append(keys, x.getTokenKey(userID, platformID, f))
|
||||
}
|
||||
if err := x.cache.Del(ctx, keys); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, f := range fields {
|
||||
k := cachekey.GetTemporaryTokenKey(userID, platformID, f)
|
||||
if err := x.cache.Set(ctx, k, "", x.accessExpire); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
63
pkg/common/storage/cache/mcache/tools.go
vendored
Normal file
63
pkg/common/storage/cache/mcache/tools.go
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
package mcache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"github.com/openimsdk/tools/log"
|
||||
)
|
||||
|
||||
func getCache[V any](ctx context.Context, cache database.Cache, key string, expireTime time.Duration, fn func(ctx context.Context) (V, error)) (V, error) {
|
||||
getDB := func() (V, bool, error) {
|
||||
res, err := cache.Get(ctx, []string{key})
|
||||
if err != nil {
|
||||
var val V
|
||||
return val, false, err
|
||||
}
|
||||
var val V
|
||||
if str, ok := res[key]; ok {
|
||||
if json.Unmarshal([]byte(str), &val) != nil {
|
||||
return val, false, err
|
||||
}
|
||||
return val, true, nil
|
||||
}
|
||||
return val, false, nil
|
||||
}
|
||||
dbVal, ok, err := getDB()
|
||||
if err != nil {
|
||||
return dbVal, err
|
||||
}
|
||||
if ok {
|
||||
return dbVal, nil
|
||||
}
|
||||
lockValue, err := cache.Lock(ctx, key, time.Minute)
|
||||
if err != nil {
|
||||
return dbVal, err
|
||||
}
|
||||
defer func() {
|
||||
if err := cache.Unlock(ctx, key, lockValue); err != nil {
|
||||
log.ZError(ctx, "unlock cache key", err, "key", key, "value", lockValue)
|
||||
}
|
||||
}()
|
||||
dbVal, ok, err = getDB()
|
||||
if err != nil {
|
||||
return dbVal, err
|
||||
}
|
||||
if ok {
|
||||
return dbVal, nil
|
||||
}
|
||||
val, err := fn(ctx)
|
||||
if err != nil {
|
||||
return val, err
|
||||
}
|
||||
data, err := json.Marshal(val)
|
||||
if err != nil {
|
||||
return val, err
|
||||
}
|
||||
if err := cache.Set(ctx, key, string(data), expireTime); err != nil {
|
||||
return val, err
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
30
pkg/common/storage/cache/msg.go
vendored
Normal file
30
pkg/common/storage/cache/msg.go
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
)
|
||||
|
||||
type MsgCache interface {
|
||||
SetSendMsgStatus(ctx context.Context, id string, status int32) error
|
||||
GetSendMsgStatus(ctx context.Context, id string) (int32, error)
|
||||
|
||||
GetMessageBySeqs(ctx context.Context, conversationID string, seqs []int64) ([]*model.MsgInfoModel, error)
|
||||
DelMessageBySeqs(ctx context.Context, conversationID string, seqs []int64) error
|
||||
SetMessageBySeqs(ctx context.Context, conversationID string, msgs []*model.MsgInfoModel) error
|
||||
}
|
||||
9
pkg/common/storage/cache/online.go
vendored
Normal file
9
pkg/common/storage/cache/online.go
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
package cache
|
||||
|
||||
import "context"
|
||||
|
||||
type OnlineCache interface {
|
||||
GetOnline(ctx context.Context, userID string) ([]int32, error)
|
||||
SetUserOnline(ctx context.Context, userID string, online, offline []int32) error
|
||||
GetAllOnlineUsers(ctx context.Context, cursor uint64) (map[string][]int32, uint64, error)
|
||||
}
|
||||
135
pkg/common/storage/cache/redis/batch.go
vendored
Normal file
135
pkg/common/storage/cache/redis/batch.go
vendored
Normal file
@@ -0,0 +1,135 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
// GetRocksCacheOptions returns the default configuration options for RocksCache.
|
||||
func GetRocksCacheOptions() *rockscache.Options {
|
||||
opts := rockscache.NewDefaultOptions()
|
||||
opts.LockExpire = rocksCacheTimeout
|
||||
opts.WaitReplicasTimeout = rocksCacheTimeout
|
||||
opts.StrongConsistency = true
|
||||
opts.RandomExpireAdjustment = 0.2
|
||||
|
||||
return &opts
|
||||
}
|
||||
|
||||
func newRocksCacheClient(rdb redis.UniversalClient) *rocksCacheClient {
|
||||
if rdb == nil {
|
||||
return &rocksCacheClient{}
|
||||
}
|
||||
rc := &rocksCacheClient{
|
||||
rdb: rdb,
|
||||
client: rockscache.NewClient(rdb, *GetRocksCacheOptions()),
|
||||
}
|
||||
return rc
|
||||
}
|
||||
|
||||
type rocksCacheClient struct {
|
||||
rdb redis.UniversalClient
|
||||
client *rockscache.Client
|
||||
}
|
||||
|
||||
func (x *rocksCacheClient) GetClient() *rockscache.Client {
|
||||
return x.client
|
||||
}
|
||||
|
||||
func (x *rocksCacheClient) Disable() bool {
|
||||
return x.client == nil
|
||||
}
|
||||
|
||||
func (x *rocksCacheClient) GetRedis() redis.UniversalClient {
|
||||
return x.rdb
|
||||
}
|
||||
|
||||
func (x *rocksCacheClient) GetBatchDeleter(topics ...string) cache.BatchDeleter {
|
||||
return NewBatchDeleterRedis(x, topics)
|
||||
}
|
||||
|
||||
func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rocksCacheClient, expire time.Duration, ids []K, idKey func(id K) string, vId func(v *V) K, fn func(ctx context.Context, ids []K) ([]*V, error)) ([]*V, error) {
|
||||
if len(ids) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if rcClient.Disable() {
|
||||
return fn(ctx, ids)
|
||||
}
|
||||
findKeys := make([]string, 0, len(ids))
|
||||
keyId := make(map[string]K)
|
||||
for _, id := range ids {
|
||||
key := idKey(id)
|
||||
if _, ok := keyId[key]; ok {
|
||||
continue
|
||||
}
|
||||
keyId[key] = id
|
||||
findKeys = append(findKeys, key)
|
||||
}
|
||||
slotKeys, err := groupKeysBySlot(ctx, rcClient.GetRedis(), findKeys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := make([]*V, 0, len(findKeys))
|
||||
for _, keys := range slotKeys {
|
||||
indexCache, err := rcClient.GetClient().FetchBatch2(ctx, keys, expire, func(idx []int) (map[int]string, error) {
|
||||
queryIds := make([]K, 0, len(idx))
|
||||
idIndex := make(map[K]int)
|
||||
for _, index := range idx {
|
||||
id := keyId[keys[index]]
|
||||
idIndex[id] = index
|
||||
queryIds = append(queryIds, id)
|
||||
}
|
||||
values, err := fn(ctx, queryIds)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "batchGetCache query database failed", err, "keys", keys, "queryIds", queryIds)
|
||||
return nil, err
|
||||
}
|
||||
if len(values) == 0 {
|
||||
return map[int]string{}, nil
|
||||
}
|
||||
cacheIndex := make(map[int]string)
|
||||
for _, value := range values {
|
||||
id := vId(value)
|
||||
index, ok := idIndex[id]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
bs, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "marshal failed", err)
|
||||
return nil, err
|
||||
}
|
||||
cacheIndex[index] = string(bs)
|
||||
}
|
||||
return cacheIndex, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errs.WrapMsg(err, "FetchBatch2 failed")
|
||||
}
|
||||
for index, data := range indexCache {
|
||||
if data == "" {
|
||||
continue
|
||||
}
|
||||
var value V
|
||||
if err := json.Unmarshal([]byte(data), &value); err != nil {
|
||||
return nil, errs.WrapMsg(err, "Unmarshal failed")
|
||||
}
|
||||
if cb, ok := any(&value).(BatchCacheCallback[K]); ok {
|
||||
cb.BatchCache(keyId[keys[index]])
|
||||
}
|
||||
result = append(result, &value)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type BatchCacheCallback[K comparable] interface {
|
||||
BatchCache(id K)
|
||||
}
|
||||
149
pkg/common/storage/cache/redis/batch_handler.go
vendored
Normal file
149
pkg/common/storage/cache/redis/batch_handler.go
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/localcache"
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
const (
|
||||
rocksCacheTimeout = 11 * time.Second
|
||||
)
|
||||
|
||||
// BatchDeleterRedis is a concrete implementation of the BatchDeleter interface based on Redis and RocksCache.
|
||||
type BatchDeleterRedis struct {
|
||||
redisClient redis.UniversalClient
|
||||
keys []string
|
||||
rocksClient *rockscache.Client
|
||||
redisPubTopics []string
|
||||
}
|
||||
|
||||
// NewBatchDeleterRedis creates a new BatchDeleterRedis instance.
|
||||
func NewBatchDeleterRedis(rcClient *rocksCacheClient, redisPubTopics []string) *BatchDeleterRedis {
|
||||
return &BatchDeleterRedis{
|
||||
redisClient: rcClient.GetRedis(),
|
||||
rocksClient: rcClient.GetClient(),
|
||||
redisPubTopics: redisPubTopics,
|
||||
}
|
||||
}
|
||||
|
||||
// ExecDelWithKeys directly takes keys for batch deletion and publishes deletion information.
|
||||
func (c *BatchDeleterRedis) ExecDelWithKeys(ctx context.Context, keys []string) error {
|
||||
distinctKeys := datautil.Distinct(keys)
|
||||
return c.execDel(ctx, distinctKeys)
|
||||
}
|
||||
|
||||
// ChainExecDel is used for chain calls for batch deletion. It must call Clone to prevent memory pollution.
|
||||
func (c *BatchDeleterRedis) ChainExecDel(ctx context.Context) error {
|
||||
distinctKeys := datautil.Distinct(c.keys)
|
||||
return c.execDel(ctx, distinctKeys)
|
||||
}
|
||||
|
||||
// execDel performs batch deletion and publishes the keys that have been deleted to update the local cache information of other nodes.
|
||||
func (c *BatchDeleterRedis) execDel(ctx context.Context, keys []string) error {
|
||||
if len(keys) > 0 {
|
||||
log.ZDebug(ctx, "delete cache", "topic", c.redisPubTopics, "keys", keys)
|
||||
// Batch delete keys
|
||||
err := ProcessKeysBySlot(ctx, c.redisClient, keys, func(ctx context.Context, slot int64, keys []string) error {
|
||||
return c.rocksClient.TagAsDeletedBatch2(ctx, keys)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Publish the keys that have been deleted to Redis to update the local cache information of other nodes
|
||||
if len(c.redisPubTopics) > 0 && len(keys) > 0 {
|
||||
keysByTopic := localcache.GetPublishKeysByTopic(c.redisPubTopics, keys)
|
||||
for topic, keys := range keysByTopic {
|
||||
if len(keys) > 0 {
|
||||
data, err := json.Marshal(keys)
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "keys json marshal failed", err, "topic", topic, "keys", keys)
|
||||
} else {
|
||||
if err := c.redisClient.Publish(ctx, topic, string(data)).Err(); err != nil {
|
||||
log.ZWarn(ctx, "redis publish cache delete error", err, "topic", topic, "keys", keys)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clone creates a copy of BatchDeleterRedis for chain calls to prevent memory pollution.
|
||||
func (c *BatchDeleterRedis) Clone() cache.BatchDeleter {
|
||||
return &BatchDeleterRedis{
|
||||
redisClient: c.redisClient,
|
||||
keys: c.keys,
|
||||
rocksClient: c.rocksClient,
|
||||
redisPubTopics: c.redisPubTopics,
|
||||
}
|
||||
}
|
||||
|
||||
// AddKeys adds keys to be deleted.
|
||||
func (c *BatchDeleterRedis) AddKeys(keys ...string) {
|
||||
c.keys = append(c.keys, keys...)
|
||||
}
|
||||
|
||||
type disableBatchDeleter struct{}
|
||||
|
||||
func (x disableBatchDeleter) ChainExecDel(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x disableBatchDeleter) ExecDelWithKeys(ctx context.Context, keys []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x disableBatchDeleter) Clone() cache.BatchDeleter {
|
||||
return x
|
||||
}
|
||||
|
||||
func (x disableBatchDeleter) AddKeys(keys ...string) {}
|
||||
|
||||
func getCache[T any](ctx context.Context, rcClient *rocksCacheClient, key string, expire time.Duration, fn func(ctx context.Context) (T, error)) (T, error) {
|
||||
if rcClient.Disable() {
|
||||
return fn(ctx)
|
||||
}
|
||||
var t T
|
||||
var write bool
|
||||
v, err := rcClient.GetClient().Fetch2(ctx, key, expire, func() (s string, err error) {
|
||||
t, err = fn(ctx)
|
||||
if err != nil {
|
||||
//log.ZError(ctx, "getCache query database failed", err, "key", key)
|
||||
return "", err
|
||||
}
|
||||
bs, err := json.Marshal(t)
|
||||
if err != nil {
|
||||
return "", errs.WrapMsg(err, "marshal failed")
|
||||
}
|
||||
write = true
|
||||
|
||||
return string(bs), nil
|
||||
})
|
||||
if err != nil {
|
||||
return t, errs.Wrap(err)
|
||||
}
|
||||
if write {
|
||||
return t, nil
|
||||
}
|
||||
if v == "" {
|
||||
return t, errs.ErrRecordNotFound.WrapMsg("cache is not found")
|
||||
}
|
||||
err = json.Unmarshal([]byte(v), &t)
|
||||
if err != nil {
|
||||
errInfo := fmt.Sprintf("cache json.Unmarshal failed, key:%s, value:%s, expire:%s", key, v, expire)
|
||||
return t, errs.WrapMsg(err, errInfo)
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
56
pkg/common/storage/cache/redis/batch_test.go
vendored
Normal file
56
pkg/common/storage/cache/redis/batch_test.go
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database/mgo"
|
||||
"github.com/openimsdk/tools/db/mongoutil"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
)
|
||||
|
||||
func TestName(t *testing.T) {
|
||||
//var rocks rockscache.Client
|
||||
//rdb := getRocksCacheRedisClient(&rocks)
|
||||
//t.Log(rdb == nil)
|
||||
|
||||
ctx := context.Background()
|
||||
rdb, err := redisutil.NewRedisClient(ctx, (&config.Redis{
|
||||
Address: []string{"172.16.8.48:16379"},
|
||||
Password: "openIM123",
|
||||
DB: 3,
|
||||
}).Build())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
mgocli, err := mongoutil.NewMongoDB(ctx, (&config.Mongo{
|
||||
Address: []string{"172.16.8.48:37017"},
|
||||
Database: "openim_v3",
|
||||
Username: "openIM",
|
||||
Password: "openIM123",
|
||||
MaxPoolSize: 100,
|
||||
MaxRetry: 1,
|
||||
}).Build())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
//userMgo, err := mgo.NewUserMongo(mgocli.GetDB())
|
||||
//if err != nil {
|
||||
// panic(err)
|
||||
//}
|
||||
//rock := rockscache.NewClient(rdb, rockscache.NewDefaultOptions())
|
||||
mgoSeqUser, err := mgo.NewSeqUserMongo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
seqUser := NewSeqUserCacheRedis(rdb, mgoSeqUser)
|
||||
|
||||
res, err := seqUser.GetUserReadSeqs(ctx, "2110910952", []string{"sg_2920732023", "sg_345762580"})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
t.Log(res)
|
||||
|
||||
}
|
||||
65
pkg/common/storage/cache/redis/black.go
vendored
Normal file
65
pkg/common/storage/cache/redis/black.go
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
const (
|
||||
blackExpireTime = time.Second * 60 * 60 * 12
|
||||
)
|
||||
|
||||
type BlackCacheRedis struct {
|
||||
cache.BatchDeleter
|
||||
expireTime time.Duration
|
||||
rcClient *rocksCacheClient
|
||||
blackDB database.Black
|
||||
}
|
||||
|
||||
func NewBlackCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache, blackDB database.Black) cache.BlackCache {
|
||||
rc := newRocksCacheClient(rdb)
|
||||
return &BlackCacheRedis{
|
||||
BatchDeleter: rc.GetBatchDeleter(localCache.Friend.Topic),
|
||||
expireTime: blackExpireTime,
|
||||
rcClient: rc,
|
||||
blackDB: blackDB,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BlackCacheRedis) CloneBlackCache() cache.BlackCache {
|
||||
return &BlackCacheRedis{
|
||||
BatchDeleter: b.BatchDeleter.Clone(),
|
||||
expireTime: b.expireTime,
|
||||
rcClient: b.rcClient,
|
||||
blackDB: b.blackDB,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BlackCacheRedis) getBlackIDsKey(ownerUserID string) string {
|
||||
return cachekey.GetBlackIDsKey(ownerUserID)
|
||||
}
|
||||
|
||||
func (b *BlackCacheRedis) GetBlackIDs(ctx context.Context, userID string) (blackIDs []string, err error) {
|
||||
return getCache(
|
||||
ctx,
|
||||
b.rcClient,
|
||||
b.getBlackIDsKey(userID),
|
||||
b.expireTime,
|
||||
func(ctx context.Context) ([]string, error) {
|
||||
return b.blackDB.FindBlackUserIDs(ctx, userID)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (b *BlackCacheRedis) DelBlackIDs(_ context.Context, userID string) cache.BlackCache {
|
||||
cache := b.CloneBlackCache()
|
||||
cache.AddKeys(b.getBlackIDsKey(userID))
|
||||
|
||||
return cache
|
||||
}
|
||||
69
pkg/common/storage/cache/redis/client_config.go
vendored
Normal file
69
pkg/common/storage/cache/redis/client_config.go
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
func NewClientConfigCache(rdb redis.UniversalClient, mgo database.ClientConfig) cache.ClientConfigCache {
|
||||
rc := newRocksCacheClient(rdb)
|
||||
return &ClientConfigCache{
|
||||
mgo: mgo,
|
||||
rcClient: rc,
|
||||
delete: rc.GetBatchDeleter(),
|
||||
}
|
||||
}
|
||||
|
||||
type ClientConfigCache struct {
|
||||
mgo database.ClientConfig
|
||||
rcClient *rocksCacheClient
|
||||
delete cache.BatchDeleter
|
||||
}
|
||||
|
||||
func (x *ClientConfigCache) getExpireTime(userID string) time.Duration {
|
||||
if userID == "" {
|
||||
return time.Hour * 24
|
||||
} else {
|
||||
return time.Hour
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ClientConfigCache) getClientConfigKey(userID string) string {
|
||||
return cachekey.GetClientConfigKey(userID)
|
||||
}
|
||||
|
||||
func (x *ClientConfigCache) GetConfig(ctx context.Context, userID string) (map[string]string, error) {
|
||||
return getCache(ctx, x.rcClient, x.getClientConfigKey(userID), x.getExpireTime(userID), func(ctx context.Context) (map[string]string, error) {
|
||||
return x.mgo.Get(ctx, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (x *ClientConfigCache) DeleteUserCache(ctx context.Context, userIDs []string) error {
|
||||
keys := make([]string, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
keys = append(keys, x.getClientConfigKey(userID))
|
||||
}
|
||||
return x.delete.ExecDelWithKeys(ctx, keys)
|
||||
}
|
||||
|
||||
func (x *ClientConfigCache) GetUserConfig(ctx context.Context, userID string) (map[string]string, error) {
|
||||
config, err := x.GetConfig(ctx, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if userID != "" {
|
||||
userConfig, err := x.GetConfig(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for k, v := range userConfig {
|
||||
config[k] = v
|
||||
}
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
276
pkg/common/storage/cache/redis/conversation.go
vendored
Normal file
276
pkg/common/storage/cache/redis/conversation.go
vendored
Normal file
@@ -0,0 +1,276 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/openimsdk/tools/utils/encrypt"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
const (
|
||||
conversationExpireTime = time.Second * 60 * 60 * 12
|
||||
)
|
||||
|
||||
func NewConversationRedis(rdb redis.UniversalClient, localCache *config.LocalCache, db database.Conversation) cache.ConversationCache {
|
||||
rc := newRocksCacheClient(rdb)
|
||||
return &ConversationRedisCache{
|
||||
BatchDeleter: rc.GetBatchDeleter(localCache.Conversation.Topic),
|
||||
rcClient: rc,
|
||||
conversationDB: db,
|
||||
expireTime: conversationExpireTime,
|
||||
}
|
||||
}
|
||||
|
||||
type ConversationRedisCache struct {
|
||||
cache.BatchDeleter
|
||||
rcClient *rocksCacheClient
|
||||
conversationDB database.Conversation
|
||||
expireTime time.Duration
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) CloneConversationCache() cache.ConversationCache {
|
||||
return &ConversationRedisCache{
|
||||
BatchDeleter: c.BatchDeleter.Clone(),
|
||||
rcClient: c.rcClient,
|
||||
conversationDB: c.conversationDB,
|
||||
expireTime: c.expireTime,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getConversationKey(ownerUserID, conversationID string) string {
|
||||
return cachekey.GetConversationKey(ownerUserID, conversationID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getConversationIDsKey(ownerUserID string) string {
|
||||
return cachekey.GetConversationIDsKey(ownerUserID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getNotNotifyConversationIDsKey(ownerUserID string) string {
|
||||
return cachekey.GetNotNotifyConversationIDsKey(ownerUserID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getPinnedConversationIDsKey(ownerUserID string) string {
|
||||
return cachekey.GetPinnedConversationIDs(ownerUserID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getSuperGroupRecvNotNotifyUserIDsKey(groupID string) string {
|
||||
return cachekey.GetSuperGroupRecvNotNotifyUserIDsKey(groupID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getRecvMsgOptKey(ownerUserID, conversationID string) string {
|
||||
return cachekey.GetRecvMsgOptKey(ownerUserID, conversationID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getSuperGroupRecvNotNotifyUserIDsHashKey(groupID string) string {
|
||||
return cachekey.GetSuperGroupRecvNotNotifyUserIDsHashKey(groupID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getConversationHasReadSeqKey(ownerUserID, conversationID string) string {
|
||||
return cachekey.GetConversationHasReadSeqKey(ownerUserID, conversationID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getConversationNotReceiveMessageUserIDsKey(conversationID string) string {
|
||||
return cachekey.GetConversationNotReceiveMessageUserIDsKey(conversationID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getUserConversationIDsHashKey(ownerUserID string) string {
|
||||
return cachekey.GetUserConversationIDsHashKey(ownerUserID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) getConversationUserMaxVersionKey(ownerUserID string) string {
|
||||
return cachekey.GetConversationUserMaxVersionKey(ownerUserID)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetUserConversationIDs(ctx context.Context, ownerUserID string) ([]string, error) {
|
||||
return getCache(ctx, c.rcClient, c.getConversationIDsKey(ownerUserID), c.expireTime, func(ctx context.Context) ([]string, error) {
|
||||
return c.conversationDB.FindUserIDAllConversationID(ctx, ownerUserID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetUserNotNotifyConversationIDs(ctx context.Context, userID string) ([]string, error) {
|
||||
return getCache(ctx, c.rcClient, c.getNotNotifyConversationIDsKey(userID), c.expireTime, func(ctx context.Context) ([]string, error) {
|
||||
return c.conversationDB.FindUserIDAllNotNotifyConversationID(ctx, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetPinnedConversationIDs(ctx context.Context, userID string) ([]string, error) {
|
||||
return getCache(ctx, c.rcClient, c.getPinnedConversationIDsKey(userID), c.expireTime, func(ctx context.Context) ([]string, error) {
|
||||
return c.conversationDB.FindUserIDAllPinnedConversationID(ctx, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelConversationIDs(userIDs ...string) cache.ConversationCache {
|
||||
keys := make([]string, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
keys = append(keys, c.getConversationIDsKey(userID))
|
||||
}
|
||||
cache := c.CloneConversationCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetUserConversationIDsHash(ctx context.Context, ownerUserID string) (hash uint64, err error) {
|
||||
return getCache(
|
||||
ctx,
|
||||
c.rcClient,
|
||||
c.getUserConversationIDsHashKey(ownerUserID),
|
||||
c.expireTime,
|
||||
func(ctx context.Context) (uint64, error) {
|
||||
conversationIDs, err := c.GetUserConversationIDs(ctx, ownerUserID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
datautil.Sort(conversationIDs, true)
|
||||
bi := big.NewInt(0)
|
||||
bi.SetString(encrypt.Md5(strings.Join(conversationIDs, ";"))[0:8], 16)
|
||||
return bi.Uint64(), nil
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelUserConversationIDsHash(ownerUserIDs ...string) cache.ConversationCache {
|
||||
keys := make([]string, 0, len(ownerUserIDs))
|
||||
for _, ownerUserID := range ownerUserIDs {
|
||||
keys = append(keys, c.getUserConversationIDsHashKey(ownerUserID))
|
||||
}
|
||||
cache := c.CloneConversationCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetConversation(ctx context.Context, ownerUserID, conversationID string) (*model.Conversation, error) {
|
||||
return getCache(ctx, c.rcClient, c.getConversationKey(ownerUserID, conversationID), c.expireTime, func(ctx context.Context) (*model.Conversation, error) {
|
||||
return c.conversationDB.Take(ctx, ownerUserID, conversationID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelConversations(ownerUserID string, conversationIDs ...string) cache.ConversationCache {
|
||||
keys := make([]string, 0, len(conversationIDs))
|
||||
for _, conversationID := range conversationIDs {
|
||||
keys = append(keys, c.getConversationKey(ownerUserID, conversationID))
|
||||
}
|
||||
cache := c.CloneConversationCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetConversations(ctx context.Context, ownerUserID string, conversationIDs []string) ([]*model.Conversation, error) {
|
||||
return batchGetCache2(ctx, c.rcClient, c.expireTime, conversationIDs, func(conversationID string) string {
|
||||
return c.getConversationKey(ownerUserID, conversationID)
|
||||
}, func(conversation *model.Conversation) string {
|
||||
return conversation.ConversationID
|
||||
}, func(ctx context.Context, conversationIDs []string) ([]*model.Conversation, error) {
|
||||
return c.conversationDB.Find(ctx, ownerUserID, conversationIDs)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetUserAllConversations(ctx context.Context, ownerUserID string) ([]*model.Conversation, error) {
|
||||
conversationIDs, err := c.GetUserConversationIDs(ctx, ownerUserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.GetConversations(ctx, ownerUserID, conversationIDs)
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetUserRecvMsgOpt(ctx context.Context, ownerUserID, conversationID string) (opt int, err error) {
|
||||
return getCache(ctx, c.rcClient, c.getRecvMsgOptKey(ownerUserID, conversationID), c.expireTime, func(ctx context.Context) (opt int, err error) {
|
||||
return c.conversationDB.GetUserRecvMsgOpt(ctx, ownerUserID, conversationID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelUsersConversation(conversationID string, ownerUserIDs ...string) cache.ConversationCache {
|
||||
keys := make([]string, 0, len(ownerUserIDs))
|
||||
for _, ownerUserID := range ownerUserIDs {
|
||||
keys = append(keys, c.getConversationKey(ownerUserID, conversationID))
|
||||
}
|
||||
cache := c.CloneConversationCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelUserRecvMsgOpt(ownerUserID, conversationID string) cache.ConversationCache {
|
||||
cache := c.CloneConversationCache()
|
||||
cache.AddKeys(c.getRecvMsgOptKey(ownerUserID, conversationID))
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelSuperGroupRecvMsgNotNotifyUserIDs(groupID string) cache.ConversationCache {
|
||||
cache := c.CloneConversationCache()
|
||||
cache.AddKeys(c.getSuperGroupRecvNotNotifyUserIDsKey(groupID))
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelSuperGroupRecvMsgNotNotifyUserIDsHash(groupID string) cache.ConversationCache {
|
||||
cache := c.CloneConversationCache()
|
||||
cache.AddKeys(c.getSuperGroupRecvNotNotifyUserIDsHashKey(groupID))
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelUserAllHasReadSeqs(ownerUserID string, conversationIDs ...string) cache.ConversationCache {
|
||||
cache := c.CloneConversationCache()
|
||||
for _, conversationID := range conversationIDs {
|
||||
cache.AddKeys(c.getConversationHasReadSeqKey(ownerUserID, conversationID))
|
||||
}
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) GetConversationNotReceiveMessageUserIDs(ctx context.Context, conversationID string) ([]string, error) {
|
||||
return getCache(ctx, c.rcClient, c.getConversationNotReceiveMessageUserIDsKey(conversationID), c.expireTime, func(ctx context.Context) ([]string, error) {
|
||||
return c.conversationDB.GetConversationNotReceiveMessageUserIDs(ctx, conversationID)
|
||||
})
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelConversationNotReceiveMessageUserIDs(conversationIDs ...string) cache.ConversationCache {
|
||||
cache := c.CloneConversationCache()
|
||||
for _, conversationID := range conversationIDs {
|
||||
cache.AddKeys(c.getConversationNotReceiveMessageUserIDsKey(conversationID))
|
||||
}
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelConversationNotNotifyMessageUserIDs(userIDs ...string) cache.ConversationCache {
|
||||
cache := c.CloneConversationCache()
|
||||
for _, userID := range userIDs {
|
||||
cache.AddKeys(c.getNotNotifyConversationIDsKey(userID))
|
||||
}
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelUserPinnedConversations(userIDs ...string) cache.ConversationCache {
|
||||
cache := c.CloneConversationCache()
|
||||
for _, userID := range userIDs {
|
||||
cache.AddKeys(c.getPinnedConversationIDsKey(userID))
|
||||
}
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) DelConversationVersionUserIDs(userIDs ...string) cache.ConversationCache {
|
||||
cache := c.CloneConversationCache()
|
||||
for _, userID := range userIDs {
|
||||
cache.AddKeys(c.getConversationUserMaxVersionKey(userID))
|
||||
}
|
||||
return cache
|
||||
}
|
||||
|
||||
func (c *ConversationRedisCache) FindMaxConversationUserVersion(ctx context.Context, userID string) (*model.VersionLog, error) {
|
||||
return getCache(ctx, c.rcClient, c.getConversationUserMaxVersionKey(userID), c.expireTime, func(ctx context.Context) (*model.VersionLog, error) {
|
||||
return c.conversationDB.FindConversationUserVersion(ctx, userID, 0, 0)
|
||||
})
|
||||
}
|
||||
167
pkg/common/storage/cache/redis/friend.go
vendored
Normal file
167
pkg/common/storage/cache/redis/friend.go
vendored
Normal file
@@ -0,0 +1,167 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
const (
|
||||
friendExpireTime = time.Second * 60 * 60 * 12
|
||||
)
|
||||
|
||||
// FriendCacheRedis is an implementation of the FriendCache interface using Redis.
|
||||
type FriendCacheRedis struct {
|
||||
cache.BatchDeleter
|
||||
friendDB database.Friend
|
||||
expireTime time.Duration
|
||||
rcClient *rocksCacheClient
|
||||
syncCount int
|
||||
}
|
||||
|
||||
// NewFriendCacheRedis creates a new instance of FriendCacheRedis.
|
||||
func NewFriendCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache, friendDB database.Friend) cache.FriendCache {
|
||||
rc := newRocksCacheClient(rdb)
|
||||
return &FriendCacheRedis{
|
||||
BatchDeleter: rc.GetBatchDeleter(localCache.Friend.Topic),
|
||||
friendDB: friendDB,
|
||||
expireTime: friendExpireTime,
|
||||
rcClient: rc,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FriendCacheRedis) CloneFriendCache() cache.FriendCache {
|
||||
return &FriendCacheRedis{
|
||||
BatchDeleter: f.BatchDeleter.Clone(),
|
||||
friendDB: f.friendDB,
|
||||
expireTime: f.expireTime,
|
||||
rcClient: f.rcClient,
|
||||
}
|
||||
}
|
||||
|
||||
// getFriendIDsKey returns the key for storing friend IDs in the cache.
|
||||
func (f *FriendCacheRedis) getFriendIDsKey(ownerUserID string) string {
|
||||
return cachekey.GetFriendIDsKey(ownerUserID)
|
||||
}
|
||||
|
||||
func (f *FriendCacheRedis) getFriendMaxVersionKey(ownerUserID string) string {
|
||||
return cachekey.GetFriendMaxVersionKey(ownerUserID)
|
||||
}
|
||||
|
||||
// getTwoWayFriendsIDsKey returns the key for storing two-way friend IDs in the cache.
|
||||
func (f *FriendCacheRedis) getTwoWayFriendsIDsKey(ownerUserID string) string {
|
||||
return cachekey.GetTwoWayFriendsIDsKey(ownerUserID)
|
||||
}
|
||||
|
||||
// getFriendKey returns the key for storing friend info in the cache.
|
||||
func (f *FriendCacheRedis) getFriendKey(ownerUserID, friendUserID string) string {
|
||||
return cachekey.GetFriendKey(ownerUserID, friendUserID)
|
||||
}
|
||||
|
||||
// GetFriendIDs retrieves friend IDs from the cache or the database if not found.
|
||||
func (f *FriendCacheRedis) GetFriendIDs(ctx context.Context, ownerUserID string) (friendIDs []string, err error) {
|
||||
return getCache(ctx, f.rcClient, f.getFriendIDsKey(ownerUserID), f.expireTime, func(ctx context.Context) ([]string, error) {
|
||||
return f.friendDB.FindFriendUserIDs(ctx, ownerUserID)
|
||||
})
|
||||
}
|
||||
|
||||
// DelFriendIDs deletes friend IDs from the cache.
|
||||
func (f *FriendCacheRedis) DelFriendIDs(ownerUserIDs ...string) cache.FriendCache {
|
||||
newFriendCache := f.CloneFriendCache()
|
||||
keys := make([]string, 0, len(ownerUserIDs))
|
||||
for _, userID := range ownerUserIDs {
|
||||
keys = append(keys, f.getFriendIDsKey(userID))
|
||||
}
|
||||
newFriendCache.AddKeys(keys...)
|
||||
|
||||
return newFriendCache
|
||||
}
|
||||
|
||||
// GetTwoWayFriendIDs retrieves two-way friend IDs from the cache.
|
||||
func (f *FriendCacheRedis) GetTwoWayFriendIDs(ctx context.Context, ownerUserID string) (twoWayFriendIDs []string, err error) {
|
||||
friendIDs, err := f.GetFriendIDs(ctx, ownerUserID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, friendID := range friendIDs {
|
||||
friendFriendID, err := f.GetFriendIDs(ctx, friendID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if datautil.Contain(ownerUserID, friendFriendID...) {
|
||||
twoWayFriendIDs = append(twoWayFriendIDs, ownerUserID)
|
||||
}
|
||||
}
|
||||
|
||||
return twoWayFriendIDs, nil
|
||||
}
|
||||
|
||||
// DelTwoWayFriendIDs deletes two-way friend IDs from the cache.
|
||||
func (f *FriendCacheRedis) DelTwoWayFriendIDs(ctx context.Context, ownerUserID string) cache.FriendCache {
|
||||
newFriendCache := f.CloneFriendCache()
|
||||
newFriendCache.AddKeys(f.getTwoWayFriendsIDsKey(ownerUserID))
|
||||
|
||||
return newFriendCache
|
||||
}
|
||||
|
||||
// GetFriend retrieves friend info from the cache or the database if not found.
|
||||
func (f *FriendCacheRedis) GetFriend(ctx context.Context, ownerUserID, friendUserID string) (friend *model.Friend, err error) {
|
||||
return getCache(ctx, f.rcClient, f.getFriendKey(ownerUserID,
|
||||
friendUserID), f.expireTime, func(ctx context.Context) (*model.Friend, error) {
|
||||
return f.friendDB.Take(ctx, ownerUserID, friendUserID)
|
||||
})
|
||||
}
|
||||
|
||||
// DelFriend deletes friend info from the cache.
|
||||
func (f *FriendCacheRedis) DelFriend(ownerUserID, friendUserID string) cache.FriendCache {
|
||||
newFriendCache := f.CloneFriendCache()
|
||||
newFriendCache.AddKeys(f.getFriendKey(ownerUserID, friendUserID))
|
||||
|
||||
return newFriendCache
|
||||
}
|
||||
|
||||
// DelFriends deletes multiple friend infos from the cache.
|
||||
func (f *FriendCacheRedis) DelFriends(ownerUserID string, friendUserIDs []string) cache.FriendCache {
|
||||
newFriendCache := f.CloneFriendCache()
|
||||
|
||||
for _, friendUserID := range friendUserIDs {
|
||||
key := f.getFriendKey(ownerUserID, friendUserID)
|
||||
newFriendCache.AddKeys(key) // Assuming AddKeys marks the keys for deletion
|
||||
}
|
||||
|
||||
return newFriendCache
|
||||
}
|
||||
|
||||
func (f *FriendCacheRedis) DelOwner(friendUserID string, ownerUserIDs []string) cache.FriendCache {
|
||||
newFriendCache := f.CloneFriendCache()
|
||||
|
||||
for _, ownerUserID := range ownerUserIDs {
|
||||
key := f.getFriendKey(ownerUserID, friendUserID)
|
||||
newFriendCache.AddKeys(key) // Assuming AddKeys marks the keys for deletion
|
||||
}
|
||||
|
||||
return newFriendCache
|
||||
}
|
||||
|
||||
func (f *FriendCacheRedis) DelMaxFriendVersion(ownerUserIDs ...string) cache.FriendCache {
|
||||
newFriendCache := f.CloneFriendCache()
|
||||
for _, ownerUserID := range ownerUserIDs {
|
||||
key := f.getFriendMaxVersionKey(ownerUserID)
|
||||
newFriendCache.AddKeys(key) // Assuming AddKeys marks the keys for deletion
|
||||
}
|
||||
|
||||
return newFriendCache
|
||||
}
|
||||
|
||||
func (f *FriendCacheRedis) FindMaxFriendVersion(ctx context.Context, ownerUserID string) (*model.VersionLog, error) {
|
||||
return getCache(ctx, f.rcClient, f.getFriendMaxVersionKey(ownerUserID), f.expireTime, func(ctx context.Context) (*model.VersionLog, error) {
|
||||
return f.friendDB.FindIncrVersion(ctx, ownerUserID, 0, 0)
|
||||
})
|
||||
}
|
||||
385
pkg/common/storage/cache/redis/group.go
vendored
Normal file
385
pkg/common/storage/cache/redis/group.go
vendored
Normal file
@@ -0,0 +1,385 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/common"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
"git.imall.cloud/openim/protocol/constant"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
const (
|
||||
groupExpireTime = time.Second * 60 * 60 * 12
|
||||
)
|
||||
|
||||
type GroupCacheRedis struct {
|
||||
cache.BatchDeleter
|
||||
groupDB database.Group
|
||||
groupMemberDB database.GroupMember
|
||||
groupRequestDB database.GroupRequest
|
||||
expireTime time.Duration
|
||||
rcClient *rocksCacheClient
|
||||
groupHash cache.GroupHash
|
||||
}
|
||||
|
||||
func NewGroupCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache, groupDB database.Group, groupMemberDB database.GroupMember, groupRequestDB database.GroupRequest, hashCode cache.GroupHash) cache.GroupCache {
|
||||
rc := newRocksCacheClient(rdb)
|
||||
return &GroupCacheRedis{
|
||||
BatchDeleter: rc.GetBatchDeleter(localCache.Group.Topic),
|
||||
rcClient: rc,
|
||||
expireTime: groupExpireTime,
|
||||
groupDB: groupDB,
|
||||
groupMemberDB: groupMemberDB,
|
||||
groupRequestDB: groupRequestDB,
|
||||
groupHash: hashCode,
|
||||
}
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) CloneGroupCache() cache.GroupCache {
|
||||
return &GroupCacheRedis{
|
||||
BatchDeleter: g.BatchDeleter.Clone(),
|
||||
rcClient: g.rcClient,
|
||||
expireTime: g.expireTime,
|
||||
groupDB: g.groupDB,
|
||||
groupMemberDB: g.groupMemberDB,
|
||||
groupRequestDB: g.groupRequestDB,
|
||||
}
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) getGroupInfoKey(groupID string) string {
|
||||
return cachekey.GetGroupInfoKey(groupID)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) getJoinedGroupsKey(userID string) string {
|
||||
return cachekey.GetJoinedGroupsKey(userID)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) getGroupMembersHashKey(groupID string) string {
|
||||
return cachekey.GetGroupMembersHashKey(groupID)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) getGroupMemberIDsKey(groupID string) string {
|
||||
return cachekey.GetGroupMemberIDsKey(groupID)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) getGroupMemberInfoKey(groupID, userID string) string {
|
||||
return cachekey.GetGroupMemberInfoKey(groupID, userID)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) getGroupMemberNumKey(groupID string) string {
|
||||
return cachekey.GetGroupMemberNumKey(groupID)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) getGroupRoleLevelMemberIDsKey(groupID string, roleLevel int32) string {
|
||||
return cachekey.GetGroupRoleLevelMemberIDsKey(groupID, roleLevel)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) getGroupMemberMaxVersionKey(groupID string) string {
|
||||
return cachekey.GetGroupMemberMaxVersionKey(groupID)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) getJoinGroupMaxVersionKey(userID string) string {
|
||||
return cachekey.GetJoinGroupMaxVersionKey(userID)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) getGroupID(group *model.Group) string {
|
||||
return group.GroupID
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupsInfo(ctx context.Context, groupIDs []string) (groups []*model.Group, err error) {
|
||||
return batchGetCache2(ctx, g.rcClient, g.expireTime, groupIDs, g.getGroupInfoKey, g.getGroupID, g.groupDB.Find)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupInfo(ctx context.Context, groupID string) (group *model.Group, err error) {
|
||||
return getCache(ctx, g.rcClient, g.getGroupInfoKey(groupID), g.expireTime, func(ctx context.Context) (*model.Group, error) {
|
||||
return g.groupDB.Take(ctx, groupID)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupsInfo(groupIDs ...string) cache.GroupCache {
|
||||
newGroupCache := g.CloneGroupCache()
|
||||
keys := make([]string, 0, len(groupIDs))
|
||||
for _, groupID := range groupIDs {
|
||||
keys = append(keys, g.getGroupInfoKey(groupID))
|
||||
}
|
||||
newGroupCache.AddKeys(keys...)
|
||||
|
||||
return newGroupCache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupsOwner(groupIDs ...string) cache.GroupCache {
|
||||
newGroupCache := g.CloneGroupCache()
|
||||
keys := make([]string, 0, len(groupIDs))
|
||||
for _, groupID := range groupIDs {
|
||||
keys = append(keys, g.getGroupRoleLevelMemberIDsKey(groupID, constant.GroupOwner))
|
||||
}
|
||||
newGroupCache.AddKeys(keys...)
|
||||
|
||||
return newGroupCache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupRoleLevel(groupID string, roleLevels []int32) cache.GroupCache {
|
||||
newGroupCache := g.CloneGroupCache()
|
||||
keys := make([]string, 0, len(roleLevels))
|
||||
for _, roleLevel := range roleLevels {
|
||||
keys = append(keys, g.getGroupRoleLevelMemberIDsKey(groupID, roleLevel))
|
||||
}
|
||||
newGroupCache.AddKeys(keys...)
|
||||
return newGroupCache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupAllRoleLevel(groupID string) cache.GroupCache {
|
||||
return g.DelGroupRoleLevel(groupID, []int32{constant.GroupOwner, constant.GroupAdmin, constant.GroupOrdinaryUsers})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupMembersHash(ctx context.Context, groupID string) (hashCode uint64, err error) {
|
||||
if g.groupHash == nil {
|
||||
return 0, errs.ErrInternalServer.WrapMsg("group hash is nil")
|
||||
}
|
||||
return getCache(ctx, g.rcClient, g.getGroupMembersHashKey(groupID), g.expireTime, func(ctx context.Context) (uint64, error) {
|
||||
return g.groupHash.GetGroupHash(ctx, groupID)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupMemberHashMap(ctx context.Context, groupIDs []string) (map[string]*common.GroupSimpleUserID, error) {
|
||||
if g.groupHash == nil {
|
||||
return nil, errs.ErrInternalServer.WrapMsg("group hash is nil")
|
||||
}
|
||||
res := make(map[string]*common.GroupSimpleUserID)
|
||||
for _, groupID := range groupIDs {
|
||||
hash, err := g.GetGroupMembersHash(ctx, groupID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.ZDebug(ctx, "GetGroupMemberHashMap", "groupID", groupID, "hash", hash)
|
||||
num, err := g.GetGroupMemberNum(ctx, groupID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res[groupID] = &common.GroupSimpleUserID{Hash: hash, MemberNum: uint32(num)}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupMembersHash(groupID string) cache.GroupCache {
|
||||
cache := g.CloneGroupCache()
|
||||
cache.AddKeys(g.getGroupMembersHashKey(groupID))
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupMemberIDs(ctx context.Context, groupID string) (groupMemberIDs []string, err error) {
|
||||
return getCache(ctx, g.rcClient, g.getGroupMemberIDsKey(groupID), g.expireTime, func(ctx context.Context) ([]string, error) {
|
||||
return g.groupMemberDB.FindMemberUserID(ctx, groupID)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupMemberIDs(groupID string) cache.GroupCache {
|
||||
cache := g.CloneGroupCache()
|
||||
cache.AddKeys(g.getGroupMemberIDsKey(groupID))
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) findUserJoinedGroupID(ctx context.Context, userID string) ([]string, error) {
|
||||
groupIDs, err := g.groupMemberDB.FindUserJoinedGroupID(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return g.groupDB.FindJoinSortGroupID(ctx, groupIDs)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetJoinedGroupIDs(ctx context.Context, userID string) (joinedGroupIDs []string, err error) {
|
||||
return getCache(ctx, g.rcClient, g.getJoinedGroupsKey(userID), g.expireTime, func(ctx context.Context) ([]string, error) {
|
||||
return g.findUserJoinedGroupID(ctx, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelJoinedGroupID(userIDs ...string) cache.GroupCache {
|
||||
keys := make([]string, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
keys = append(keys, g.getJoinedGroupsKey(userID))
|
||||
}
|
||||
cache := g.CloneGroupCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupMemberInfo(ctx context.Context, groupID, userID string) (groupMember *model.GroupMember, err error) {
|
||||
return getCache(ctx, g.rcClient, g.getGroupMemberInfoKey(groupID, userID), g.expireTime, func(ctx context.Context) (*model.GroupMember, error) {
|
||||
return g.groupMemberDB.Take(ctx, groupID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupMembersInfo(ctx context.Context, groupID string, userIDs []string) ([]*model.GroupMember, error) {
|
||||
return batchGetCache2(ctx, g.rcClient, g.expireTime, userIDs, func(userID string) string {
|
||||
return g.getGroupMemberInfoKey(groupID, userID)
|
||||
}, func(member *model.GroupMember) string {
|
||||
return member.UserID
|
||||
}, func(ctx context.Context, userIDs []string) ([]*model.GroupMember, error) {
|
||||
return g.groupMemberDB.Find(ctx, groupID, userIDs)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetAllGroupMembersInfo(ctx context.Context, groupID string) (groupMembers []*model.GroupMember, err error) {
|
||||
groupMemberIDs, err := g.GetGroupMemberIDs(ctx, groupID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return g.GetGroupMembersInfo(ctx, groupID, groupMemberIDs)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupMembersInfo(groupID string, userIDs ...string) cache.GroupCache {
|
||||
keys := make([]string, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
keys = append(keys, g.getGroupMemberInfoKey(groupID, userID))
|
||||
}
|
||||
cache := g.CloneGroupCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupMemberNum(ctx context.Context, groupID string) (memberNum int64, err error) {
|
||||
return getCache(ctx, g.rcClient, g.getGroupMemberNumKey(groupID), g.expireTime, func(ctx context.Context) (int64, error) {
|
||||
return g.groupMemberDB.TakeGroupMemberNum(ctx, groupID)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelGroupsMemberNum(groupID ...string) cache.GroupCache {
|
||||
keys := make([]string, 0, len(groupID))
|
||||
for _, groupID := range groupID {
|
||||
keys = append(keys, g.getGroupMemberNumKey(groupID))
|
||||
}
|
||||
cache := g.CloneGroupCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupOwner(ctx context.Context, groupID string) (*model.GroupMember, error) {
|
||||
members, err := g.GetGroupRoleLevelMemberInfo(ctx, groupID, constant.GroupOwner)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(members) == 0 {
|
||||
return nil, errs.ErrRecordNotFound.WrapMsg(fmt.Sprintf("group %s owner not found", groupID))
|
||||
}
|
||||
return members[0], nil
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupsOwner(ctx context.Context, groupIDs []string) ([]*model.GroupMember, error) {
|
||||
members := make([]*model.GroupMember, 0, len(groupIDs))
|
||||
for _, groupID := range groupIDs {
|
||||
items, err := g.GetGroupRoleLevelMemberInfo(ctx, groupID, constant.GroupOwner)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(items) > 0 {
|
||||
members = append(members, items[0])
|
||||
}
|
||||
}
|
||||
return members, nil
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupRoleLevelMemberIDs(ctx context.Context, groupID string, roleLevel int32) ([]string, error) {
|
||||
return getCache(ctx, g.rcClient, g.getGroupRoleLevelMemberIDsKey(groupID, roleLevel), g.expireTime, func(ctx context.Context) ([]string, error) {
|
||||
return g.groupMemberDB.FindRoleLevelUserIDs(ctx, groupID, roleLevel)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupRoleLevelMemberInfo(ctx context.Context, groupID string, roleLevel int32) ([]*model.GroupMember, error) {
|
||||
userIDs, err := g.GetGroupRoleLevelMemberIDs(ctx, groupID, roleLevel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return g.GetGroupMembersInfo(ctx, groupID, userIDs)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) GetGroupRolesLevelMemberInfo(ctx context.Context, groupID string, roleLevels []int32) ([]*model.GroupMember, error) {
|
||||
var userIDs []string
|
||||
for _, roleLevel := range roleLevels {
|
||||
ids, err := g.GetGroupRoleLevelMemberIDs(ctx, groupID, roleLevel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
userIDs = append(userIDs, ids...)
|
||||
}
|
||||
return g.GetGroupMembersInfo(ctx, groupID, userIDs)
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) FindGroupMemberUser(ctx context.Context, groupIDs []string, userID string) ([]*model.GroupMember, error) {
|
||||
if len(groupIDs) == 0 {
|
||||
var err error
|
||||
groupIDs, err = g.GetJoinedGroupIDs(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return batchGetCache2(ctx, g.rcClient, g.expireTime, groupIDs, func(groupID string) string {
|
||||
return g.getGroupMemberInfoKey(groupID, userID)
|
||||
}, func(member *model.GroupMember) string {
|
||||
return member.GroupID
|
||||
}, func(ctx context.Context, groupIDs []string) ([]*model.GroupMember, error) {
|
||||
return g.groupMemberDB.FindInGroup(ctx, userID, groupIDs)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelMaxGroupMemberVersion(groupIDs ...string) cache.GroupCache {
|
||||
keys := make([]string, 0, len(groupIDs))
|
||||
for _, groupID := range groupIDs {
|
||||
keys = append(keys, g.getGroupMemberMaxVersionKey(groupID))
|
||||
}
|
||||
cache := g.CloneGroupCache()
|
||||
cache.AddKeys(keys...)
|
||||
return cache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) DelMaxJoinGroupVersion(userIDs ...string) cache.GroupCache {
|
||||
keys := make([]string, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
keys = append(keys, g.getJoinGroupMaxVersionKey(userID))
|
||||
}
|
||||
cache := g.CloneGroupCache()
|
||||
cache.AddKeys(keys...)
|
||||
return cache
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) FindMaxGroupMemberVersion(ctx context.Context, groupID string) (*model.VersionLog, error) {
|
||||
return getCache(ctx, g.rcClient, g.getGroupMemberMaxVersionKey(groupID), g.expireTime, func(ctx context.Context) (*model.VersionLog, error) {
|
||||
return g.groupMemberDB.FindMemberIncrVersion(ctx, groupID, 0, 0)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) BatchFindMaxGroupMemberVersion(ctx context.Context, groupIDs []string) ([]*model.VersionLog, error) {
|
||||
return batchGetCache2(ctx, g.rcClient, g.expireTime, groupIDs,
|
||||
func(groupID string) string {
|
||||
return g.getGroupMemberMaxVersionKey(groupID)
|
||||
}, func(versionLog *model.VersionLog) string {
|
||||
return versionLog.DID
|
||||
}, func(ctx context.Context, groupIDs []string) ([]*model.VersionLog, error) {
|
||||
// create two slices with len is groupIDs, just need 0
|
||||
versions := make([]uint, len(groupIDs))
|
||||
limits := make([]int, len(groupIDs))
|
||||
|
||||
return g.groupMemberDB.BatchFindMemberIncrVersion(ctx, groupIDs, versions, limits)
|
||||
})
|
||||
}
|
||||
|
||||
func (g *GroupCacheRedis) FindMaxJoinGroupVersion(ctx context.Context, userID string) (*model.VersionLog, error) {
|
||||
return getCache(ctx, g.rcClient, g.getJoinGroupMaxVersionKey(userID), g.expireTime, func(ctx context.Context) (*model.VersionLog, error) {
|
||||
return g.groupMemberDB.FindJoinIncrVersion(ctx, userID, 0, 0)
|
||||
})
|
||||
}
|
||||
127
pkg/common/storage/cache/redis/lua_script.go
vendored
Normal file
127
pkg/common/storage/cache/redis/lua_script.go
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/servererrs"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
var (
|
||||
setBatchWithCommonExpireScript = redis.NewScript(`
|
||||
local expire = tonumber(ARGV[1])
|
||||
for i, key in ipairs(KEYS) do
|
||||
redis.call('SET', key, ARGV[i + 1])
|
||||
redis.call('EXPIRE', key, expire)
|
||||
end
|
||||
return #KEYS
|
||||
`)
|
||||
|
||||
setBatchWithIndividualExpireScript = redis.NewScript(`
|
||||
local n = #KEYS
|
||||
for i = 1, n do
|
||||
redis.call('SET', KEYS[i], ARGV[i])
|
||||
redis.call('EXPIRE', KEYS[i], ARGV[i + n])
|
||||
end
|
||||
return n
|
||||
`)
|
||||
|
||||
deleteBatchScript = redis.NewScript(`
|
||||
for i, key in ipairs(KEYS) do
|
||||
redis.call('DEL', key)
|
||||
end
|
||||
return #KEYS
|
||||
`)
|
||||
|
||||
getBatchScript = redis.NewScript(`
|
||||
local values = {}
|
||||
for i, key in ipairs(KEYS) do
|
||||
local value = redis.call('GET', key)
|
||||
table.insert(values, value)
|
||||
end
|
||||
return values
|
||||
`)
|
||||
)
|
||||
|
||||
func callLua(ctx context.Context, rdb redis.Scripter, script *redis.Script, keys []string, args []any) (any, error) {
|
||||
log.ZDebug(ctx, "callLua args", "scriptHash", script.Hash(), "keys", keys, "args", args)
|
||||
r := script.EvalSha(ctx, rdb, keys, args)
|
||||
if redis.HasErrorPrefix(r.Err(), "NOSCRIPT") {
|
||||
if err := script.Load(ctx, rdb).Err(); err != nil {
|
||||
r = script.Eval(ctx, rdb, keys, args)
|
||||
} else {
|
||||
r = script.EvalSha(ctx, rdb, keys, args)
|
||||
}
|
||||
}
|
||||
v, err := r.Result()
|
||||
if errors.Is(err, redis.Nil) {
|
||||
err = nil
|
||||
}
|
||||
return v, errs.WrapMsg(err, "call lua err", "scriptHash", script.Hash(), "keys", keys, "args", args)
|
||||
}
|
||||
|
||||
func LuaSetBatchWithCommonExpire(ctx context.Context, rdb redis.Scripter, keys []string, values []string, expire int) error {
|
||||
// Check if the lengths of keys and values match
|
||||
if len(keys) != len(values) {
|
||||
return errs.New("keys and values length mismatch").Wrap()
|
||||
}
|
||||
|
||||
// Ensure allocation size does not overflow
|
||||
maxAllowedLen := (1 << 31) - 1 // 2GB limit (maximum address space for 32-bit systems)
|
||||
|
||||
if len(values) > maxAllowedLen-1 {
|
||||
return fmt.Errorf("values length is too large, causing overflow")
|
||||
}
|
||||
var vals = make([]any, 0, 1+len(values))
|
||||
vals = append(vals, expire)
|
||||
for _, v := range values {
|
||||
vals = append(vals, v)
|
||||
}
|
||||
_, err := callLua(ctx, rdb, setBatchWithCommonExpireScript, keys, vals)
|
||||
return err
|
||||
}
|
||||
|
||||
func LuaSetBatchWithIndividualExpire(ctx context.Context, rdb redis.Scripter, keys []string, values []string, expires []int) error {
|
||||
// Check if the lengths of keys, values, and expires match
|
||||
if len(keys) != len(values) || len(keys) != len(expires) {
|
||||
return errs.New("keys and values length mismatch").Wrap()
|
||||
}
|
||||
|
||||
// Ensure the allocation size does not overflow
|
||||
maxAllowedLen := (1 << 31) - 1 // 2GB limit (maximum address space for 32-bit systems)
|
||||
|
||||
if len(values) > maxAllowedLen-1 {
|
||||
return errs.New(fmt.Sprintf("values length %d exceeds the maximum allowed length %d", len(values), maxAllowedLen-1)).Wrap()
|
||||
}
|
||||
var vals = make([]any, 0, len(values)+len(expires))
|
||||
for _, v := range values {
|
||||
vals = append(vals, v)
|
||||
}
|
||||
for _, ex := range expires {
|
||||
vals = append(vals, ex)
|
||||
}
|
||||
_, err := callLua(ctx, rdb, setBatchWithIndividualExpireScript, keys, vals)
|
||||
return err
|
||||
}
|
||||
|
||||
func LuaDeleteBatch(ctx context.Context, rdb redis.Scripter, keys []string) error {
|
||||
_, err := callLua(ctx, rdb, deleteBatchScript, keys, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func LuaGetBatch(ctx context.Context, rdb redis.Scripter, keys []string) ([]any, error) {
|
||||
v, err := callLua(ctx, rdb, getBatchScript, keys, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
values, ok := v.([]any)
|
||||
if !ok {
|
||||
return nil, servererrs.ErrArgs.WrapMsg("invalid lua get batch result")
|
||||
}
|
||||
return values, nil
|
||||
|
||||
}
|
||||
75
pkg/common/storage/cache/redis/lua_script_test.go
vendored
Normal file
75
pkg/common/storage/cache/redis/lua_script_test.go
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/go-redis/redismock/v9"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLuaSetBatchWithCommonExpire(t *testing.T) {
|
||||
rdb, mock := redismock.NewClientMock()
|
||||
ctx := context.Background()
|
||||
|
||||
keys := []string{"key1", "key2"}
|
||||
values := []string{"value1", "value2"}
|
||||
expire := 10
|
||||
|
||||
mock.ExpectEvalSha(setBatchWithCommonExpireScript.Hash(), keys, []any{expire, "value1", "value2"}).SetVal(int64(len(keys)))
|
||||
|
||||
err := LuaSetBatchWithCommonExpire(ctx, rdb, keys, values, expire)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestLuaSetBatchWithIndividualExpire(t *testing.T) {
|
||||
rdb, mock := redismock.NewClientMock()
|
||||
ctx := context.Background()
|
||||
|
||||
keys := []string{"key1", "key2"}
|
||||
values := []string{"value1", "value2"}
|
||||
expires := []int{10, 20}
|
||||
|
||||
args := make([]any, 0, len(values)+len(expires))
|
||||
for _, v := range values {
|
||||
args = append(args, v)
|
||||
}
|
||||
for _, ex := range expires {
|
||||
args = append(args, ex)
|
||||
}
|
||||
|
||||
mock.ExpectEvalSha(setBatchWithIndividualExpireScript.Hash(), keys, args).SetVal(int64(len(keys)))
|
||||
|
||||
err := LuaSetBatchWithIndividualExpire(ctx, rdb, keys, values, expires)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestLuaDeleteBatch(t *testing.T) {
|
||||
rdb, mock := redismock.NewClientMock()
|
||||
ctx := context.Background()
|
||||
|
||||
keys := []string{"key1", "key2"}
|
||||
|
||||
mock.ExpectEvalSha(deleteBatchScript.Hash(), keys, []any{}).SetVal(int64(len(keys)))
|
||||
|
||||
err := LuaDeleteBatch(ctx, rdb, keys)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, mock.ExpectationsWereMet())
|
||||
}
|
||||
|
||||
func TestLuaGetBatch(t *testing.T) {
|
||||
rdb, mock := redismock.NewClientMock()
|
||||
ctx := context.Background()
|
||||
|
||||
keys := []string{"key1", "key2"}
|
||||
expectedValues := []any{"value1", "value2"}
|
||||
|
||||
mock.ExpectEvalSha(getBatchScript.Hash(), keys, []any{}).SetVal(expectedValues)
|
||||
|
||||
values, err := LuaGetBatch(ctx, rdb, keys)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, mock.ExpectationsWereMet())
|
||||
assert.Equal(t, expectedValues, values)
|
||||
}
|
||||
59
pkg/common/storage/cache/redis/minio.go
vendored
Normal file
59
pkg/common/storage/cache/redis/minio.go
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/tools/s3/minio"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
func NewMinioCache(rdb redis.UniversalClient) minio.Cache {
|
||||
rc := newRocksCacheClient(rdb)
|
||||
return &minioCacheRedis{
|
||||
BatchDeleter: rc.GetBatchDeleter(),
|
||||
rcClient: rc,
|
||||
expireTime: time.Hour * 24 * 7,
|
||||
}
|
||||
}
|
||||
|
||||
type minioCacheRedis struct {
|
||||
cache.BatchDeleter
|
||||
rcClient *rocksCacheClient
|
||||
expireTime time.Duration
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) getObjectImageInfoKey(key string) string {
|
||||
return cachekey.GetObjectImageInfoKey(key)
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) getMinioImageThumbnailKey(key string, format string, width int, height int) string {
|
||||
return cachekey.GetMinioImageThumbnailKey(key, format, width, height)
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) DelObjectImageInfoKey(ctx context.Context, keys ...string) error {
|
||||
ks := make([]string, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
ks = append(ks, g.getObjectImageInfoKey(key))
|
||||
}
|
||||
return g.BatchDeleter.ExecDelWithKeys(ctx, ks)
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) DelImageThumbnailKey(ctx context.Context, key string, format string, width int, height int) error {
|
||||
return g.BatchDeleter.ExecDelWithKeys(ctx, []string{g.getMinioImageThumbnailKey(key, format, width, height)})
|
||||
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) GetImageObjectKeyInfo(ctx context.Context, key string, fn func(ctx context.Context) (*minio.ImageInfo, error)) (*minio.ImageInfo, error) {
|
||||
info, err := getCache(ctx, g.rcClient, g.getObjectImageInfoKey(key), g.expireTime, fn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (g *minioCacheRedis) GetThumbnailKey(ctx context.Context, key string, format string, width int, height int, minioCache func(ctx context.Context) (string, error)) (string, error) {
|
||||
return getCache(ctx, g.rcClient, g.getMinioImageThumbnailKey(key, format, width, height), g.expireTime, minioCache)
|
||||
}
|
||||
94
pkg/common/storage/cache/redis/msg.go
vendored
Normal file
94
pkg/common/storage/cache/redis/msg.go
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
) //
|
||||
|
||||
// msgCacheTimeout is expiration time of message cache, 86400 seconds
|
||||
const msgCacheTimeout = time.Hour * 24
|
||||
|
||||
func NewMsgCache(client redis.UniversalClient, db database.Msg) cache.MsgCache {
|
||||
return &msgCache{
|
||||
rcClient: newRocksCacheClient(client),
|
||||
msgDocDatabase: db,
|
||||
}
|
||||
}
|
||||
|
||||
type msgCache struct {
|
||||
rcClient *rocksCacheClient
|
||||
msgDocDatabase database.Msg
|
||||
}
|
||||
|
||||
func (c *msgCache) getSendMsgKey(id string) string {
|
||||
return cachekey.GetSendMsgKey(id)
|
||||
}
|
||||
|
||||
func (c *msgCache) SetSendMsgStatus(ctx context.Context, id string, status int32) error {
|
||||
return errs.Wrap(c.rcClient.GetRedis().Set(ctx, c.getSendMsgKey(id), status, time.Hour*24).Err())
|
||||
}
|
||||
|
||||
func (c *msgCache) GetSendMsgStatus(ctx context.Context, id string) (int32, error) {
|
||||
result, err := c.rcClient.GetRedis().Get(ctx, c.getSendMsgKey(id)).Int()
|
||||
return int32(result), errs.Wrap(err)
|
||||
}
|
||||
|
||||
func (c *msgCache) GetMessageBySeqs(ctx context.Context, conversationID string, seqs []int64) ([]*model.MsgInfoModel, error) {
|
||||
if len(seqs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
getKey := func(seq int64) string {
|
||||
return cachekey.GetMsgCacheKey(conversationID, seq)
|
||||
}
|
||||
getMsgID := func(msg *model.MsgInfoModel) int64 {
|
||||
return msg.Msg.Seq
|
||||
}
|
||||
find := func(ctx context.Context, seqs []int64) ([]*model.MsgInfoModel, error) {
|
||||
return c.msgDocDatabase.FindSeqs(ctx, conversationID, seqs)
|
||||
}
|
||||
return batchGetCache2(ctx, c.rcClient, msgCacheTimeout, seqs, getKey, getMsgID, find)
|
||||
}
|
||||
|
||||
func (c *msgCache) DelMessageBySeqs(ctx context.Context, conversationID string, seqs []int64) error {
|
||||
if len(seqs) == 0 {
|
||||
return nil
|
||||
}
|
||||
keys := datautil.Slice(seqs, func(seq int64) string {
|
||||
return cachekey.GetMsgCacheKey(conversationID, seq)
|
||||
})
|
||||
slotKeys, err := groupKeysBySlot(ctx, c.rcClient.GetRedis(), keys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, keys := range slotKeys {
|
||||
if err := c.rcClient.GetClient().TagAsDeletedBatch2(ctx, keys); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *msgCache) SetMessageBySeqs(ctx context.Context, conversationID string, msgs []*model.MsgInfoModel) error {
|
||||
for _, msg := range msgs {
|
||||
if msg == nil || msg.Msg == nil || msg.Msg.Seq <= 0 {
|
||||
continue
|
||||
}
|
||||
data, err := json.Marshal(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.rcClient.GetClient().RawSet(ctx, cachekey.GetMsgCacheKey(conversationID, msg.Msg.Seq), string(data), msgCacheTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
161
pkg/common/storage/cache/redis/online.go
vendored
Normal file
161
pkg/common/storage/cache/redis/online.go
vendored
Normal file
@@ -0,0 +1,161 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/mcache"
|
||||
"git.imall.cloud/openim/protocol/constant"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
func NewUserOnline(rdb redis.UniversalClient) cache.OnlineCache {
|
||||
if rdb == nil || config.Standalone() {
|
||||
return mcache.NewOnlineCache()
|
||||
}
|
||||
return &userOnline{
|
||||
rdb: rdb,
|
||||
expire: cachekey.OnlineExpire,
|
||||
channelName: cachekey.OnlineChannel,
|
||||
}
|
||||
}
|
||||
|
||||
type userOnline struct {
|
||||
rdb redis.UniversalClient
|
||||
expire time.Duration
|
||||
channelName string
|
||||
}
|
||||
|
||||
func (s *userOnline) getUserOnlineKey(userID string) string {
|
||||
return cachekey.GetOnlineKey(userID)
|
||||
}
|
||||
|
||||
func (s *userOnline) GetOnline(ctx context.Context, userID string) ([]int32, error) {
|
||||
members, err := s.rdb.ZRangeByScore(ctx, s.getUserOnlineKey(userID), &redis.ZRangeBy{
|
||||
Min: strconv.FormatInt(time.Now().Unix(), 10),
|
||||
Max: "+inf",
|
||||
}).Result()
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
platformIDs := make([]int32, 0, len(members))
|
||||
for _, member := range members {
|
||||
val, err := strconv.Atoi(member)
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
platformIDs = append(platformIDs, int32(val))
|
||||
}
|
||||
return platformIDs, nil
|
||||
}
|
||||
|
||||
func (s *userOnline) GetAllOnlineUsers(ctx context.Context, cursor uint64) (map[string][]int32, uint64, error) {
|
||||
result := make(map[string][]int32)
|
||||
|
||||
keys, nextCursor, err := s.rdb.Scan(ctx, cursor, fmt.Sprintf("%s*", cachekey.OnlineKey), constant.ParamMaxLength).Result()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
for _, key := range keys {
|
||||
userID := cachekey.GetOnlineKeyUserID(key)
|
||||
strValues, err := s.rdb.ZRange(ctx, key, 0, -1).Result()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
values := make([]int32, 0, len(strValues))
|
||||
for _, value := range strValues {
|
||||
intValue, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
return nil, 0, errs.Wrap(err)
|
||||
}
|
||||
values = append(values, int32(intValue))
|
||||
}
|
||||
|
||||
result[userID] = values
|
||||
}
|
||||
|
||||
return result, nextCursor, nil
|
||||
}
|
||||
|
||||
func (s *userOnline) SetUserOnline(ctx context.Context, userID string, online, offline []int32) error {
|
||||
// 使用Lua脚本原子更新在线状态与在线人数缓存
|
||||
script := `
|
||||
local key = KEYS[1]
|
||||
local countKey = KEYS[2]
|
||||
local expire = tonumber(ARGV[1])
|
||||
local now = ARGV[2]
|
||||
local score = ARGV[3]
|
||||
local offlineLen = tonumber(ARGV[4])
|
||||
redis.call("ZREMRANGEBYSCORE", key, "-inf", now)
|
||||
for i = 5, offlineLen+4 do
|
||||
redis.call("ZREM", key, ARGV[i])
|
||||
end
|
||||
local before = redis.call("ZCARD", key)
|
||||
for i = 5+offlineLen, #ARGV do
|
||||
redis.call("ZADD", key, score, ARGV[i])
|
||||
end
|
||||
redis.call("EXPIRE", key, expire)
|
||||
local after = redis.call("ZCARD", key)
|
||||
local current = redis.call("GET", countKey)
|
||||
if not current then
|
||||
current = 0
|
||||
else
|
||||
current = tonumber(current)
|
||||
end
|
||||
if before == 0 and after > 0 then
|
||||
redis.call("SET", countKey, current + 1)
|
||||
elseif before > 0 and after == 0 then
|
||||
local next = current - 1
|
||||
if next < 0 then
|
||||
next = 0
|
||||
end
|
||||
redis.call("SET", countKey, next)
|
||||
end
|
||||
if before ~= after then
|
||||
local members = redis.call("ZRANGE", key, 0, -1)
|
||||
table.insert(members, "1")
|
||||
return members
|
||||
else
|
||||
return {"0"}
|
||||
end
|
||||
`
|
||||
now := time.Now()
|
||||
argv := make([]any, 0, 2+len(online)+len(offline))
|
||||
argv = append(argv, int32(s.expire/time.Second), now.Unix(), now.Add(s.expire).Unix(), int32(len(offline)))
|
||||
for _, platformID := range offline {
|
||||
argv = append(argv, platformID)
|
||||
}
|
||||
for _, platformID := range online {
|
||||
argv = append(argv, platformID)
|
||||
}
|
||||
keys := []string{s.getUserOnlineKey(userID), cachekey.OnlineUserCountKey}
|
||||
platformIDs, err := s.rdb.Eval(ctx, script, keys, argv).StringSlice()
|
||||
if err != nil {
|
||||
log.ZError(ctx, "redis SetUserOnline", err, "userID", userID, "online", online, "offline", offline)
|
||||
return err
|
||||
}
|
||||
if len(platformIDs) == 0 {
|
||||
return errs.ErrInternalServer.WrapMsg("SetUserOnline redis lua invalid return value")
|
||||
}
|
||||
if platformIDs[len(platformIDs)-1] != "0" {
|
||||
log.ZDebug(ctx, "redis SetUserOnline push", "userID", userID, "online", online, "offline", offline, "platformIDs", platformIDs[:len(platformIDs)-1])
|
||||
platformIDs[len(platformIDs)-1] = userID
|
||||
msg := strings.Join(platformIDs, ":")
|
||||
if err := s.rdb.Publish(ctx, s.channelName, msg).Err(); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
} else {
|
||||
log.ZDebug(ctx, "redis SetUserOnline not push", "userID", userID, "online", online, "offline", offline)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
149
pkg/common/storage/cache/redis/online_count.go
vendored
Normal file
149
pkg/common/storage/cache/redis/online_count.go
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/protocol/constant"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
const onlineUserCountHistorySeparator = ":"
|
||||
|
||||
// OnlineUserCountSample 在线人数历史采样点
|
||||
type OnlineUserCountSample struct {
|
||||
// Timestamp 采样时间(毫秒时间戳)
|
||||
Timestamp int64
|
||||
// Count 采样在线人数
|
||||
Count int64
|
||||
}
|
||||
|
||||
// GetOnlineUserCount 读取在线人数缓存
|
||||
func GetOnlineUserCount(ctx context.Context, rdb redis.UniversalClient) (int64, error) {
|
||||
if rdb == nil {
|
||||
return 0, errs.ErrInternalServer.WrapMsg("redis client is nil")
|
||||
}
|
||||
val, err := rdb.Get(ctx, cachekey.OnlineUserCountKey).Result()
|
||||
if err != nil {
|
||||
if errors.Is(err, redis.Nil) {
|
||||
return 0, err
|
||||
}
|
||||
return 0, errs.Wrap(err)
|
||||
}
|
||||
count, err := strconv.ParseInt(val, 10, 64)
|
||||
if err != nil {
|
||||
return 0, errs.WrapMsg(err, "parse online user count failed")
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// RefreshOnlineUserCount 刷新在线人数缓存
|
||||
func RefreshOnlineUserCount(ctx context.Context, rdb redis.UniversalClient) (int64, error) {
|
||||
if rdb == nil {
|
||||
return 0, errs.ErrInternalServer.WrapMsg("redis client is nil")
|
||||
}
|
||||
var (
|
||||
cursor uint64
|
||||
total int64
|
||||
)
|
||||
now := strconv.FormatInt(time.Now().Unix(), 10)
|
||||
for {
|
||||
keys, nextCursor, err := rdb.Scan(ctx, cursor, fmt.Sprintf("%s*", cachekey.OnlineKey), constant.ParamMaxLength).Result()
|
||||
if err != nil {
|
||||
return 0, errs.Wrap(err)
|
||||
}
|
||||
for _, key := range keys {
|
||||
count, err := rdb.ZCount(ctx, key, now, "+inf").Result()
|
||||
if err != nil {
|
||||
return 0, errs.Wrap(err)
|
||||
}
|
||||
if count > 0 {
|
||||
total++
|
||||
}
|
||||
}
|
||||
cursor = nextCursor
|
||||
if cursor == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err := rdb.Set(ctx, cachekey.OnlineUserCountKey, total, 0).Err(); err != nil {
|
||||
return 0, errs.Wrap(err)
|
||||
}
|
||||
return total, nil
|
||||
}
|
||||
|
||||
// AppendOnlineUserCountHistory 写入在线人数历史采样
|
||||
func AppendOnlineUserCountHistory(ctx context.Context, rdb redis.UniversalClient, timestamp int64, count int64) error {
|
||||
if rdb == nil {
|
||||
return errs.ErrInternalServer.WrapMsg("redis client is nil")
|
||||
}
|
||||
if timestamp <= 0 {
|
||||
return errs.ErrArgs.WrapMsg("invalid timestamp")
|
||||
}
|
||||
member := fmt.Sprintf("%d%s%d", timestamp, onlineUserCountHistorySeparator, count)
|
||||
if err := rdb.ZAdd(ctx, cachekey.OnlineUserCountHistoryKey, redis.Z{
|
||||
Score: float64(timestamp),
|
||||
Member: member,
|
||||
}).Err(); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
// 清理历史数据,避免无界增长
|
||||
retentionMs := int64(cachekey.OnlineUserCountHistoryRetention / time.Millisecond)
|
||||
cutoff := timestamp - retentionMs
|
||||
if cutoff > 0 {
|
||||
if err := rdb.ZRemRangeByScore(ctx, cachekey.OnlineUserCountHistoryKey, "0", strconv.FormatInt(cutoff, 10)).Err(); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetOnlineUserCountHistory 读取在线人数历史采样
|
||||
func GetOnlineUserCountHistory(ctx context.Context, rdb redis.UniversalClient, startTime int64, endTime int64) ([]OnlineUserCountSample, error) {
|
||||
if rdb == nil {
|
||||
return nil, errs.ErrInternalServer.WrapMsg("redis client is nil")
|
||||
}
|
||||
if startTime <= 0 || endTime <= 0 || endTime <= startTime {
|
||||
return nil, nil
|
||||
}
|
||||
// 包含endTime的数据,使用endTime作为最大值
|
||||
values, err := rdb.ZRangeByScore(ctx, cachekey.OnlineUserCountHistoryKey, &redis.ZRangeBy{
|
||||
Min: strconv.FormatInt(startTime, 10),
|
||||
Max: strconv.FormatInt(endTime, 10),
|
||||
}).Result()
|
||||
if err != nil {
|
||||
if errors.Is(err, redis.Nil) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
if len(values) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
samples := make([]OnlineUserCountSample, 0, len(values))
|
||||
for _, val := range values {
|
||||
parts := strings.SplitN(val, onlineUserCountHistorySeparator, 2)
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
ts, err := strconv.ParseInt(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
cnt, err := strconv.ParseInt(parts[1], 10, 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
samples = append(samples, OnlineUserCountSample{
|
||||
Timestamp: ts,
|
||||
Count: cnt,
|
||||
})
|
||||
}
|
||||
return samples, nil
|
||||
}
|
||||
52
pkg/common/storage/cache/redis/online_test.go
vendored
Normal file
52
pkg/common/storage/cache/redis/online_test.go
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"github.com/openimsdk/tools/db/redisutil"
|
||||
)
|
||||
|
||||
/*
|
||||
address: [ 172.16.8.48:7001, 172.16.8.48:7002, 172.16.8.48:7003, 172.16.8.48:7004, 172.16.8.48:7005, 172.16.8.48:7006 ]
|
||||
username:
|
||||
password: passwd123
|
||||
clusterMode: true
|
||||
db: 0
|
||||
maxRetry: 10
|
||||
*/
|
||||
func TestName111111(t *testing.T) {
|
||||
conf := config.Redis{
|
||||
Address: []string{
|
||||
"172.16.8.124:7001",
|
||||
"172.16.8.124:7002",
|
||||
"172.16.8.124:7003",
|
||||
"172.16.8.124:7004",
|
||||
"172.16.8.124:7005",
|
||||
"172.16.8.124:7006",
|
||||
},
|
||||
RedisMode: "cluster",
|
||||
Password: "passwd123",
|
||||
//Address: []string{"localhost:16379"},
|
||||
//Password: "openIM123",
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*1000)
|
||||
defer cancel()
|
||||
rdb, err := redisutil.NewRedisClient(ctx, conf.Build())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
online := NewUserOnline(rdb)
|
||||
|
||||
userID := "a123456"
|
||||
t.Log(online.GetOnline(ctx, userID))
|
||||
t.Log(online.SetUserOnline(ctx, userID, []int32{1, 2, 3, 4}, nil))
|
||||
t.Log(online.GetOnline(ctx, userID))
|
||||
|
||||
}
|
||||
|
||||
func TestName111(t *testing.T) {
|
||||
|
||||
}
|
||||
211
pkg/common/storage/cache/redis/redis_shard_manager.go
vendored
Normal file
211
pkg/common/storage/cache/redis/redis_shard_manager.go
vendored
Normal file
@@ -0,0 +1,211 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultBatchSize = 50
|
||||
defaultConcurrentLimit = 3
|
||||
)
|
||||
|
||||
// RedisShardManager is a class for sharding and processing keys
|
||||
type RedisShardManager struct {
|
||||
redisClient redis.UniversalClient
|
||||
config *Config
|
||||
}
|
||||
type Config struct {
|
||||
batchSize int
|
||||
continueOnError bool
|
||||
concurrentLimit int
|
||||
}
|
||||
|
||||
// Option is a function type for configuring Config
|
||||
type Option func(c *Config)
|
||||
|
||||
//// NewRedisShardManager creates a new RedisShardManager instance
|
||||
//func NewRedisShardManager(redisClient redis.UniversalClient, opts ...Option) *RedisShardManager {
|
||||
// config := &Config{
|
||||
// batchSize: defaultBatchSize, // Default batch size is 50 keys
|
||||
// continueOnError: false,
|
||||
// concurrentLimit: defaultConcurrentLimit, // Default concurrent limit is 3
|
||||
// }
|
||||
// for _, opt := range opts {
|
||||
// opt(config)
|
||||
// }
|
||||
// rsm := &RedisShardManager{
|
||||
// redisClient: redisClient,
|
||||
// config: config,
|
||||
// }
|
||||
// return rsm
|
||||
//}
|
||||
//
|
||||
//// WithBatchSize sets the number of keys to process per batch
|
||||
//func WithBatchSize(size int) Option {
|
||||
// return func(c *Config) {
|
||||
// c.batchSize = size
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//// WithContinueOnError sets whether to continue processing on error
|
||||
//func WithContinueOnError(continueOnError bool) Option {
|
||||
// return func(c *Config) {
|
||||
// c.continueOnError = continueOnError
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//// WithConcurrentLimit sets the concurrency limit
|
||||
//func WithConcurrentLimit(limit int) Option {
|
||||
// return func(c *Config) {
|
||||
// c.concurrentLimit = limit
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//// ProcessKeysBySlot groups keys by their Redis cluster hash slots and processes them using the provided function.
|
||||
//func (rsm *RedisShardManager) ProcessKeysBySlot(
|
||||
// ctx context.Context,
|
||||
// keys []string,
|
||||
// processFunc func(ctx context.Context, slot int64, keys []string) error,
|
||||
//) error {
|
||||
//
|
||||
// // Group keys by slot
|
||||
// slots, err := groupKeysBySlot(ctx, rsm.redisClient, keys)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// g, ctx := errgroup.WithContext(ctx)
|
||||
// g.SetLimit(rsm.config.concurrentLimit)
|
||||
//
|
||||
// // Process keys in each slot using the provided function
|
||||
// for slot, singleSlotKeys := range slots {
|
||||
// batches := splitIntoBatches(singleSlotKeys, rsm.config.batchSize)
|
||||
// for _, batch := range batches {
|
||||
// slot, batch := slot, batch // Avoid closure capture issue
|
||||
// g.Go(func() error {
|
||||
// err := processFunc(ctx, slot, batch)
|
||||
// if err != nil {
|
||||
// log.ZWarn(ctx, "Batch processFunc failed", err, "slot", slot, "keys", batch)
|
||||
// if !rsm.config.continueOnError {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
// return nil
|
||||
// })
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// if err := g.Wait(); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// return nil
|
||||
//}
|
||||
|
||||
// groupKeysBySlot groups keys by their Redis cluster hash slots.
|
||||
func groupKeysBySlot(ctx context.Context, redisClient redis.UniversalClient, keys []string) (map[int64][]string, error) {
|
||||
slots := make(map[int64][]string)
|
||||
clusterClient, isCluster := redisClient.(*redis.ClusterClient)
|
||||
if isCluster && len(keys) > 1 {
|
||||
pipe := clusterClient.Pipeline()
|
||||
cmds := make([]*redis.IntCmd, len(keys))
|
||||
for i, key := range keys {
|
||||
cmds[i] = pipe.ClusterKeySlot(ctx, key)
|
||||
}
|
||||
_, err := pipe.Exec(ctx)
|
||||
if err != nil {
|
||||
return nil, errs.WrapMsg(err, "get slot err")
|
||||
}
|
||||
|
||||
for i, cmd := range cmds {
|
||||
slot, err := cmd.Result()
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "some key get slot err", err, "key", keys[i])
|
||||
return nil, errs.WrapMsg(err, "get slot err", "key", keys[i])
|
||||
}
|
||||
slots[slot] = append(slots[slot], keys[i])
|
||||
}
|
||||
} else {
|
||||
// If not a cluster client, put all keys in the same slot (0)
|
||||
slots[0] = keys
|
||||
}
|
||||
|
||||
return slots, nil
|
||||
}
|
||||
|
||||
// splitIntoBatches splits keys into batches of the specified size
|
||||
func splitIntoBatches(keys []string, batchSize int) [][]string {
|
||||
var batches [][]string
|
||||
for batchSize < len(keys) {
|
||||
keys, batches = keys[batchSize:], append(batches, keys[0:batchSize:batchSize])
|
||||
}
|
||||
return append(batches, keys)
|
||||
}
|
||||
|
||||
// ProcessKeysBySlot groups keys by their Redis cluster hash slots and processes them using the provided function.
|
||||
func ProcessKeysBySlot(
|
||||
ctx context.Context,
|
||||
redisClient redis.UniversalClient,
|
||||
keys []string,
|
||||
processFunc func(ctx context.Context, slot int64, keys []string) error,
|
||||
opts ...Option,
|
||||
) error {
|
||||
|
||||
config := &Config{
|
||||
batchSize: defaultBatchSize,
|
||||
continueOnError: false,
|
||||
concurrentLimit: defaultConcurrentLimit,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(config)
|
||||
}
|
||||
|
||||
// Group keys by slot
|
||||
slots, err := groupKeysBySlot(ctx, redisClient, keys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
g, ctx := errgroup.WithContext(ctx)
|
||||
g.SetLimit(config.concurrentLimit)
|
||||
|
||||
// Process keys in each slot using the provided function
|
||||
for slot, singleSlotKeys := range slots {
|
||||
batches := splitIntoBatches(singleSlotKeys, config.batchSize)
|
||||
for _, batch := range batches {
|
||||
slot, batch := slot, batch // Avoid closure capture issue
|
||||
g.Go(func() error {
|
||||
err := processFunc(ctx, slot, batch)
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "Batch processFunc failed", err, "slot", slot, "keys", batch)
|
||||
if !config.continueOnError {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if err := g.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeleteCacheBySlot(ctx context.Context, rcClient *rocksCacheClient, keys []string) error {
|
||||
switch len(keys) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return rcClient.GetClient().TagAsDeletedBatch2(ctx, keys)
|
||||
default:
|
||||
return ProcessKeysBySlot(ctx, rcClient.GetRedis(), keys, func(ctx context.Context, slot int64, keys []string) error {
|
||||
return rcClient.GetClient().TagAsDeletedBatch2(ctx, keys)
|
||||
})
|
||||
}
|
||||
}
|
||||
95
pkg/common/storage/cache/redis/s3.go
vendored
Normal file
95
pkg/common/storage/cache/redis/s3.go
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/s3"
|
||||
"github.com/openimsdk/tools/s3/cont"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
func NewObjectCacheRedis(rdb redis.UniversalClient, objDB database.ObjectInfo) cache.ObjectCache {
|
||||
rc := newRocksCacheClient(rdb)
|
||||
return &objectCacheRedis{
|
||||
BatchDeleter: rc.GetBatchDeleter(),
|
||||
rcClient: rc,
|
||||
expireTime: time.Hour * 12,
|
||||
objDB: objDB,
|
||||
}
|
||||
}
|
||||
|
||||
type objectCacheRedis struct {
|
||||
cache.BatchDeleter
|
||||
objDB database.ObjectInfo
|
||||
rcClient *rocksCacheClient
|
||||
expireTime time.Duration
|
||||
}
|
||||
|
||||
func (g *objectCacheRedis) getObjectKey(engine string, name string) string {
|
||||
return cachekey.GetObjectKey(engine, name)
|
||||
}
|
||||
|
||||
func (g *objectCacheRedis) CloneObjectCache() cache.ObjectCache {
|
||||
return &objectCacheRedis{
|
||||
BatchDeleter: g.BatchDeleter.Clone(),
|
||||
rcClient: g.rcClient,
|
||||
expireTime: g.expireTime,
|
||||
objDB: g.objDB,
|
||||
}
|
||||
}
|
||||
|
||||
func (g *objectCacheRedis) DelObjectName(engine string, names ...string) cache.ObjectCache {
|
||||
objectCache := g.CloneObjectCache()
|
||||
keys := make([]string, 0, len(names))
|
||||
for _, name := range names {
|
||||
keys = append(keys, g.getObjectKey(name, engine))
|
||||
}
|
||||
objectCache.AddKeys(keys...)
|
||||
return objectCache
|
||||
}
|
||||
|
||||
func (g *objectCacheRedis) GetName(ctx context.Context, engine string, name string) (*model.Object, error) {
|
||||
return getCache(ctx, g.rcClient, g.getObjectKey(name, engine), g.expireTime, func(ctx context.Context) (*model.Object, error) {
|
||||
return g.objDB.Take(ctx, engine, name)
|
||||
})
|
||||
}
|
||||
|
||||
func NewS3Cache(rdb redis.UniversalClient, s3 s3.Interface) cont.S3Cache {
|
||||
rc := newRocksCacheClient(rdb)
|
||||
return &s3CacheRedis{
|
||||
BatchDeleter: rc.GetBatchDeleter(),
|
||||
rcClient: rc,
|
||||
expireTime: time.Hour * 12,
|
||||
s3: s3,
|
||||
}
|
||||
}
|
||||
|
||||
type s3CacheRedis struct {
|
||||
cache.BatchDeleter
|
||||
s3 s3.Interface
|
||||
rcClient *rocksCacheClient
|
||||
expireTime time.Duration
|
||||
}
|
||||
|
||||
func (g *s3CacheRedis) getS3Key(engine string, name string) string {
|
||||
return cachekey.GetS3Key(engine, name)
|
||||
}
|
||||
|
||||
func (g *s3CacheRedis) DelS3Key(ctx context.Context, engine string, keys ...string) error {
|
||||
ks := make([]string, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
ks = append(ks, g.getS3Key(engine, key))
|
||||
}
|
||||
return g.BatchDeleter.ExecDelWithKeys(ctx, ks)
|
||||
}
|
||||
|
||||
func (g *s3CacheRedis) GetKey(ctx context.Context, engine string, name string) (*s3.ObjectInfo, error) {
|
||||
return getCache(ctx, g.rcClient, g.getS3Key(engine, name), g.expireTime, func(ctx context.Context) (*s3.ObjectInfo, error) {
|
||||
return g.s3.StatObject(ctx, name)
|
||||
})
|
||||
}
|
||||
521
pkg/common/storage/cache/redis/seq_conversation.go
vendored
Normal file
521
pkg/common/storage/cache/redis/seq_conversation.go
vendored
Normal file
@@ -0,0 +1,521 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/mcache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/msgprocessor"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
func NewSeqConversationCacheRedis(rdb redis.UniversalClient, mgo database.SeqConversation) cache.SeqConversationCache {
|
||||
if rdb == nil {
|
||||
return mcache.NewSeqConversationCache(mgo)
|
||||
}
|
||||
return &seqConversationCacheRedis{
|
||||
mgo: mgo,
|
||||
lockTime: time.Second * 3,
|
||||
dataTime: time.Hour * 24 * 365,
|
||||
minSeqExpireTime: time.Hour,
|
||||
rcClient: newRocksCacheClient(rdb),
|
||||
}
|
||||
}
|
||||
|
||||
type seqConversationCacheRedis struct {
|
||||
mgo database.SeqConversation
|
||||
rcClient *rocksCacheClient
|
||||
lockTime time.Duration
|
||||
dataTime time.Duration
|
||||
minSeqExpireTime time.Duration
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) getMinSeqKey(conversationID string) string {
|
||||
return cachekey.GetMallocMinSeqKey(conversationID)
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) SetMinSeq(ctx context.Context, conversationID string, seq int64) error {
|
||||
return s.SetMinSeqs(ctx, map[string]int64{conversationID: seq})
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) GetMinSeq(ctx context.Context, conversationID string) (int64, error) {
|
||||
return getCache(ctx, s.rcClient, s.getMinSeqKey(conversationID), s.minSeqExpireTime, func(ctx context.Context) (int64, error) {
|
||||
return s.mgo.GetMinSeq(ctx, conversationID)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) getSingleMaxSeq(ctx context.Context, conversationID string) (map[string]int64, error) {
|
||||
seq, err := s.GetMaxSeq(ctx, conversationID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return map[string]int64{conversationID: seq}, nil
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) getSingleMaxSeqWithTime(ctx context.Context, conversationID string) (map[string]database.SeqTime, error) {
|
||||
seq, err := s.GetMaxSeqWithTime(ctx, conversationID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return map[string]database.SeqTime{conversationID: seq}, nil
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) batchGetMaxSeq(ctx context.Context, keys []string, keyConversationID map[string]string, seqs map[string]int64) error {
|
||||
result := make([]*redis.StringCmd, len(keys))
|
||||
pipe := s.rcClient.GetRedis().Pipeline()
|
||||
for i, key := range keys {
|
||||
result[i] = pipe.HGet(ctx, key, "CURR")
|
||||
}
|
||||
if _, err := pipe.Exec(ctx); err != nil && !errors.Is(err, redis.Nil) {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
var notFoundKey []string
|
||||
for i, r := range result {
|
||||
req, err := r.Int64()
|
||||
if err == nil {
|
||||
seqs[keyConversationID[keys[i]]] = req
|
||||
} else if errors.Is(err, redis.Nil) {
|
||||
notFoundKey = append(notFoundKey, keys[i])
|
||||
} else {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
for _, key := range notFoundKey {
|
||||
conversationID := keyConversationID[key]
|
||||
seq, err := s.GetMaxSeq(ctx, conversationID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
seqs[conversationID] = seq
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) batchGetMaxSeqWithTime(ctx context.Context, keys []string, keyConversationID map[string]string, seqs map[string]database.SeqTime) error {
|
||||
result := make([]*redis.SliceCmd, len(keys))
|
||||
pipe := s.rcClient.GetRedis().Pipeline()
|
||||
for i, key := range keys {
|
||||
result[i] = pipe.HMGet(ctx, key, "CURR", "TIME")
|
||||
}
|
||||
if _, err := pipe.Exec(ctx); err != nil && !errors.Is(err, redis.Nil) {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
var notFoundKey []string
|
||||
for i, r := range result {
|
||||
val, err := r.Result()
|
||||
if len(val) != 2 {
|
||||
return errs.WrapMsg(err, "batchGetMaxSeqWithTime invalid result", "key", keys[i], "res", val)
|
||||
}
|
||||
if val[0] == nil {
|
||||
notFoundKey = append(notFoundKey, keys[i])
|
||||
continue
|
||||
}
|
||||
seq, err := s.parseInt64(val[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mill, err := s.parseInt64(val[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
seqs[keyConversationID[keys[i]]] = database.SeqTime{Seq: seq, Time: mill}
|
||||
}
|
||||
for _, key := range notFoundKey {
|
||||
conversationID := keyConversationID[key]
|
||||
seq, err := s.GetMaxSeqWithTime(ctx, conversationID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
seqs[conversationID] = seq
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error) {
|
||||
switch len(conversationIDs) {
|
||||
case 0:
|
||||
return map[string]int64{}, nil
|
||||
case 1:
|
||||
return s.getSingleMaxSeq(ctx, conversationIDs[0])
|
||||
}
|
||||
keys := make([]string, 0, len(conversationIDs))
|
||||
keyConversationID := make(map[string]string, len(conversationIDs))
|
||||
for _, conversationID := range conversationIDs {
|
||||
key := s.getSeqMallocKey(conversationID)
|
||||
if _, ok := keyConversationID[key]; ok {
|
||||
continue
|
||||
}
|
||||
keys = append(keys, key)
|
||||
keyConversationID[key] = conversationID
|
||||
}
|
||||
if len(keys) == 1 {
|
||||
return s.getSingleMaxSeq(ctx, conversationIDs[0])
|
||||
}
|
||||
slotKeys, err := groupKeysBySlot(ctx, s.rcClient.GetRedis(), keys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
seqs := make(map[string]int64, len(conversationIDs))
|
||||
for _, keys := range slotKeys {
|
||||
if err := s.batchGetMaxSeq(ctx, keys, keyConversationID, seqs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return seqs, nil
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) GetMaxSeqsWithTime(ctx context.Context, conversationIDs []string) (map[string]database.SeqTime, error) {
|
||||
switch len(conversationIDs) {
|
||||
case 0:
|
||||
return map[string]database.SeqTime{}, nil
|
||||
case 1:
|
||||
return s.getSingleMaxSeqWithTime(ctx, conversationIDs[0])
|
||||
}
|
||||
keys := make([]string, 0, len(conversationIDs))
|
||||
keyConversationID := make(map[string]string, len(conversationIDs))
|
||||
for _, conversationID := range conversationIDs {
|
||||
key := s.getSeqMallocKey(conversationID)
|
||||
if _, ok := keyConversationID[key]; ok {
|
||||
continue
|
||||
}
|
||||
keys = append(keys, key)
|
||||
keyConversationID[key] = conversationID
|
||||
}
|
||||
if len(keys) == 1 {
|
||||
return s.getSingleMaxSeqWithTime(ctx, conversationIDs[0])
|
||||
}
|
||||
slotKeys, err := groupKeysBySlot(ctx, s.rcClient.GetRedis(), keys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
seqs := make(map[string]database.SeqTime, len(conversationIDs))
|
||||
for _, keys := range slotKeys {
|
||||
if err := s.batchGetMaxSeqWithTime(ctx, keys, keyConversationID, seqs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return seqs, nil
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) getSeqMallocKey(conversationID string) string {
|
||||
return cachekey.GetMallocSeqKey(conversationID)
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) setSeq(ctx context.Context, key string, owner int64, currSeq int64, lastSeq int64, mill int64) (int64, error) {
|
||||
if lastSeq < currSeq {
|
||||
return 0, errs.New("lastSeq must be greater than currSeq")
|
||||
}
|
||||
// 0: success
|
||||
// 1: success the lock has expired, but has not been locked by anyone else
|
||||
// 2: already locked, but not by yourself
|
||||
script := `
|
||||
local key = KEYS[1]
|
||||
local lockValue = ARGV[1]
|
||||
local dataSecond = ARGV[2]
|
||||
local curr_seq = tonumber(ARGV[3])
|
||||
local last_seq = tonumber(ARGV[4])
|
||||
local mallocTime = ARGV[5]
|
||||
if redis.call("EXISTS", key) == 0 then
|
||||
redis.call("HSET", key, "CURR", curr_seq, "LAST", last_seq, "TIME", mallocTime)
|
||||
redis.call("EXPIRE", key, dataSecond)
|
||||
return 1
|
||||
end
|
||||
if redis.call("HGET", key, "LOCK") ~= lockValue then
|
||||
return 2
|
||||
end
|
||||
redis.call("HDEL", key, "LOCK")
|
||||
redis.call("HSET", key, "CURR", curr_seq, "LAST", last_seq, "TIME", mallocTime)
|
||||
redis.call("EXPIRE", key, dataSecond)
|
||||
return 0
|
||||
`
|
||||
result, err := s.rcClient.GetRedis().Eval(ctx, script, []string{key}, owner, int64(s.dataTime/time.Second), currSeq, lastSeq, mill).Int64()
|
||||
if err != nil {
|
||||
return 0, errs.Wrap(err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// malloc size=0 is to get the current seq size>0 is to allocate seq
|
||||
func (s *seqConversationCacheRedis) malloc(ctx context.Context, key string, size int64) ([]int64, error) {
|
||||
// 0: success
|
||||
// 1: need to obtain and lock
|
||||
// 2: already locked
|
||||
// 3: exceeded the maximum value and locked
|
||||
script := `
|
||||
local key = KEYS[1]
|
||||
local size = tonumber(ARGV[1])
|
||||
local lockSecond = ARGV[2]
|
||||
local dataSecond = ARGV[3]
|
||||
local mallocTime = ARGV[4]
|
||||
local result = {}
|
||||
if redis.call("EXISTS", key) == 0 then
|
||||
local lockValue = math.random(0, 999999999)
|
||||
redis.call("HSET", key, "LOCK", lockValue)
|
||||
redis.call("EXPIRE", key, lockSecond)
|
||||
table.insert(result, 1)
|
||||
table.insert(result, lockValue)
|
||||
table.insert(result, mallocTime)
|
||||
return result
|
||||
end
|
||||
if redis.call("HEXISTS", key, "LOCK") == 1 then
|
||||
table.insert(result, 2)
|
||||
return result
|
||||
end
|
||||
local curr_seq = tonumber(redis.call("HGET", key, "CURR"))
|
||||
local last_seq = tonumber(redis.call("HGET", key, "LAST"))
|
||||
if size == 0 then
|
||||
redis.call("EXPIRE", key, dataSecond)
|
||||
table.insert(result, 0)
|
||||
table.insert(result, curr_seq)
|
||||
table.insert(result, last_seq)
|
||||
local setTime = redis.call("HGET", key, "TIME")
|
||||
if setTime then
|
||||
table.insert(result, setTime)
|
||||
else
|
||||
table.insert(result, 0)
|
||||
end
|
||||
return result
|
||||
end
|
||||
local max_seq = curr_seq + size
|
||||
if max_seq > last_seq then
|
||||
local lockValue = math.random(0, 999999999)
|
||||
redis.call("HSET", key, "LOCK", lockValue)
|
||||
redis.call("HSET", key, "CURR", last_seq)
|
||||
redis.call("HSET", key, "TIME", mallocTime)
|
||||
redis.call("EXPIRE", key, lockSecond)
|
||||
table.insert(result, 3)
|
||||
table.insert(result, curr_seq)
|
||||
table.insert(result, last_seq)
|
||||
table.insert(result, lockValue)
|
||||
table.insert(result, mallocTime)
|
||||
return result
|
||||
end
|
||||
redis.call("HSET", key, "CURR", max_seq)
|
||||
redis.call("HSET", key, "TIME", ARGV[4])
|
||||
redis.call("EXPIRE", key, dataSecond)
|
||||
table.insert(result, 0)
|
||||
table.insert(result, curr_seq)
|
||||
table.insert(result, last_seq)
|
||||
table.insert(result, mallocTime)
|
||||
return result
|
||||
`
|
||||
result, err := s.rcClient.GetRedis().Eval(ctx, script, []string{key}, size, int64(s.lockTime/time.Second), int64(s.dataTime/time.Second), time.Now().UnixMilli()).Int64Slice()
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) wait(ctx context.Context) error {
|
||||
timer := time.NewTimer(time.Second / 4)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-timer.C:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) setSeqRetry(ctx context.Context, key string, owner int64, currSeq int64, lastSeq int64, mill int64) {
|
||||
for i := 0; i < 10; i++ {
|
||||
state, err := s.setSeq(ctx, key, owner, currSeq, lastSeq, mill)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "set seq cache failed", err, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq, "count", i+1)
|
||||
if err := s.wait(ctx); err != nil {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
switch state {
|
||||
case 0: // ideal state
|
||||
case 1:
|
||||
log.ZWarn(ctx, "set seq cache lock not found", nil, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq)
|
||||
case 2:
|
||||
log.ZWarn(ctx, "set seq cache lock to be held by someone else", nil, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq)
|
||||
default:
|
||||
log.ZError(ctx, "set seq cache lock unknown state", nil, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq)
|
||||
}
|
||||
return
|
||||
}
|
||||
log.ZError(ctx, "set seq cache retrying still failed", nil, "key", key, "owner", owner, "currSeq", currSeq, "lastSeq", lastSeq)
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) getMallocSize(conversationID string, size int64) int64 {
|
||||
if size == 0 {
|
||||
return 0
|
||||
}
|
||||
var basicSize int64
|
||||
if msgprocessor.IsGroupConversationID(conversationID) {
|
||||
basicSize = 100
|
||||
} else {
|
||||
basicSize = 50
|
||||
}
|
||||
basicSize += size
|
||||
return basicSize
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) Malloc(ctx context.Context, conversationID string, size int64) (int64, error) {
|
||||
seq, _, err := s.mallocTime(ctx, conversationID, size)
|
||||
return seq, err
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) mallocTime(ctx context.Context, conversationID string, size int64) (int64, int64, error) {
|
||||
if size < 0 {
|
||||
return 0, 0, errs.New("size must be greater than 0")
|
||||
}
|
||||
key := s.getSeqMallocKey(conversationID)
|
||||
for i := 0; i < 10; i++ {
|
||||
states, err := s.malloc(ctx, key, size)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
switch states[0] {
|
||||
case 0: // success
|
||||
return states[1], states[3], nil
|
||||
case 1: // not found
|
||||
mallocSize := s.getMallocSize(conversationID, size)
|
||||
seq, err := s.mgo.Malloc(ctx, conversationID, mallocSize)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
s.setSeqRetry(ctx, key, states[1], seq+size, seq+mallocSize, states[2])
|
||||
return seq, 0, nil
|
||||
case 2: // locked
|
||||
if err := s.wait(ctx); err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
continue
|
||||
case 3: // exceeded cache max value
|
||||
currSeq := states[1]
|
||||
lastSeq := states[2]
|
||||
mill := states[4]
|
||||
mallocSize := s.getMallocSize(conversationID, size)
|
||||
seq, err := s.mgo.Malloc(ctx, conversationID, mallocSize)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
if lastSeq == seq {
|
||||
s.setSeqRetry(ctx, key, states[3], currSeq+size, seq+mallocSize, mill)
|
||||
return currSeq, states[4], nil
|
||||
} else {
|
||||
log.ZWarn(ctx, "malloc seq not equal cache last seq", nil, "conversationID", conversationID, "currSeq", currSeq, "lastSeq", lastSeq, "mallocSeq", seq)
|
||||
s.setSeqRetry(ctx, key, states[3], seq+size, seq+mallocSize, mill)
|
||||
return seq, mill, nil
|
||||
}
|
||||
default:
|
||||
log.ZError(ctx, "malloc seq unknown state", nil, "state", states[0], "conversationID", conversationID, "size", size)
|
||||
return 0, 0, errs.New(fmt.Sprintf("unknown state: %d", states[0]))
|
||||
}
|
||||
}
|
||||
log.ZError(ctx, "malloc seq retrying still failed", nil, "conversationID", conversationID, "size", size)
|
||||
return 0, 0, errs.New("malloc seq waiting for lock timeout", "conversationID", conversationID, "size", size)
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) GetMaxSeq(ctx context.Context, conversationID string) (int64, error) {
|
||||
return s.Malloc(ctx, conversationID, 0)
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) GetMaxSeqWithTime(ctx context.Context, conversationID string) (database.SeqTime, error) {
|
||||
seq, mill, err := s.mallocTime(ctx, conversationID, 0)
|
||||
if err != nil {
|
||||
return database.SeqTime{}, err
|
||||
}
|
||||
return database.SeqTime{Seq: seq, Time: mill}, nil
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) SetMinSeqs(ctx context.Context, seqs map[string]int64) error {
|
||||
keys := make([]string, 0, len(seqs))
|
||||
for conversationID, seq := range seqs {
|
||||
keys = append(keys, s.getMinSeqKey(conversationID))
|
||||
if err := s.mgo.SetMinSeq(ctx, conversationID, seq); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return DeleteCacheBySlot(ctx, s.rcClient, keys)
|
||||
}
|
||||
|
||||
// GetCacheMaxSeqWithTime only get the existing cache, if there is no cache, no cache will be generated
|
||||
func (s *seqConversationCacheRedis) GetCacheMaxSeqWithTime(ctx context.Context, conversationIDs []string) (map[string]database.SeqTime, error) {
|
||||
if len(conversationIDs) == 0 {
|
||||
return map[string]database.SeqTime{}, nil
|
||||
}
|
||||
key2conversationID := make(map[string]string)
|
||||
keys := make([]string, 0, len(conversationIDs))
|
||||
for _, conversationID := range conversationIDs {
|
||||
key := s.getSeqMallocKey(conversationID)
|
||||
if _, ok := key2conversationID[key]; ok {
|
||||
continue
|
||||
}
|
||||
key2conversationID[key] = conversationID
|
||||
keys = append(keys, key)
|
||||
}
|
||||
slotKeys, err := groupKeysBySlot(ctx, s.rcClient.GetRedis(), keys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res := make(map[string]database.SeqTime)
|
||||
for _, keys := range slotKeys {
|
||||
if len(keys) == 0 {
|
||||
continue
|
||||
}
|
||||
pipe := s.rcClient.GetRedis().Pipeline()
|
||||
cmds := make([]*redis.SliceCmd, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
cmds = append(cmds, pipe.HMGet(ctx, key, "CURR", "TIME"))
|
||||
}
|
||||
if _, err := pipe.Exec(ctx); err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
for i, cmd := range cmds {
|
||||
val, err := cmd.Result()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(val) != 2 {
|
||||
return nil, errs.WrapMsg(err, "GetCacheMaxSeqWithTime invalid result", "key", keys[i], "res", val)
|
||||
}
|
||||
if val[0] == nil {
|
||||
continue
|
||||
}
|
||||
seq, err := s.parseInt64(val[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mill, err := s.parseInt64(val[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conversationID := key2conversationID[keys[i]]
|
||||
res[conversationID] = database.SeqTime{Seq: seq, Time: mill}
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (s *seqConversationCacheRedis) parseInt64(val any) (int64, error) {
|
||||
switch v := val.(type) {
|
||||
case nil:
|
||||
return 0, nil
|
||||
case int:
|
||||
return int64(v), nil
|
||||
case int64:
|
||||
return v, nil
|
||||
case string:
|
||||
res, err := strconv.ParseInt(v, 10, 64)
|
||||
if err != nil {
|
||||
return 0, errs.WrapMsg(err, "invalid string not int64", "value", v)
|
||||
}
|
||||
return res, nil
|
||||
default:
|
||||
return 0, errs.New("invalid result not int64", "resType", fmt.Sprintf("%T", v), "value", v)
|
||||
}
|
||||
}
|
||||
144
pkg/common/storage/cache/redis/seq_conversation_test.go
vendored
Normal file
144
pkg/common/storage/cache/redis/seq_conversation_test.go
vendored
Normal file
@@ -0,0 +1,144 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database/mgo"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
)
|
||||
|
||||
func newTestSeq() *seqConversationCacheRedis {
|
||||
mgocli, err := mongo.Connect(context.Background(), options.Client().ApplyURI("mongodb://openIM:openIM123@127.0.0.1:37017/openim_v3?maxPoolSize=100").SetConnectTimeout(5*time.Second))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
model, err := mgo.NewSeqConversationMongo(mgocli.Database("openim_v3"))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
opt := &redis.Options{
|
||||
Addr: "127.0.0.1:16379",
|
||||
Password: "openIM123",
|
||||
DB: 1,
|
||||
}
|
||||
rdb := redis.NewClient(opt)
|
||||
if err := rdb.Ping(context.Background()).Err(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return NewSeqConversationCacheRedis(rdb, model).(*seqConversationCacheRedis)
|
||||
}
|
||||
|
||||
func TestSeq(t *testing.T) {
|
||||
ts := newTestSeq()
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
speed atomic.Int64
|
||||
)
|
||||
|
||||
const count = 128
|
||||
wg.Add(count)
|
||||
for i := 0; i < count; i++ {
|
||||
index := i + 1
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
var size int64 = 10
|
||||
cID := strconv.Itoa(index * 1)
|
||||
for i := 1; ; i++ {
|
||||
//first, err := ts.mgo.Malloc(context.Background(), cID, size) // mongo
|
||||
first, err := ts.Malloc(context.Background(), cID, size) // redis
|
||||
if err != nil {
|
||||
t.Logf("[%d-%d] %s %s", index, i, cID, err)
|
||||
return
|
||||
}
|
||||
speed.Add(size)
|
||||
_ = first
|
||||
//t.Logf("[%d] %d -> %d", i, first+1, first+size)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(time.Second)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
ticker.Stop()
|
||||
return
|
||||
case <-ticker.C:
|
||||
value := speed.Swap(0)
|
||||
t.Logf("speed: %d/s", value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDel(t *testing.T) {
|
||||
ts := newTestSeq()
|
||||
for i := 1; i < 100; i++ {
|
||||
var size int64 = 100
|
||||
first, err := ts.Malloc(context.Background(), "100", size)
|
||||
if err != nil {
|
||||
t.Logf("[%d] %s", i, err)
|
||||
return
|
||||
}
|
||||
t.Logf("[%d] %d -> %d", i, first+1, first+size)
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSeqMalloc(t *testing.T) {
|
||||
ts := newTestSeq()
|
||||
t.Log(ts.GetMaxSeq(context.Background(), "100"))
|
||||
}
|
||||
|
||||
func TestMinSeq(t *testing.T) {
|
||||
ts := newTestSeq()
|
||||
t.Log(ts.GetMinSeq(context.Background(), "10000000"))
|
||||
}
|
||||
|
||||
func TestMalloc(t *testing.T) {
|
||||
ts := newTestSeq()
|
||||
t.Log(ts.mallocTime(context.Background(), "10000000", 100))
|
||||
}
|
||||
|
||||
func TestHMGET(t *testing.T) {
|
||||
ts := newTestSeq()
|
||||
res, err := ts.GetCacheMaxSeqWithTime(context.Background(), []string{"10000000", "123456"})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
t.Log(res)
|
||||
}
|
||||
|
||||
func TestGetMaxSeqWithTime(t *testing.T) {
|
||||
ts := newTestSeq()
|
||||
t.Log(ts.GetMaxSeqWithTime(context.Background(), "10000000"))
|
||||
}
|
||||
|
||||
func TestGetMaxSeqWithTime1(t *testing.T) {
|
||||
ts := newTestSeq()
|
||||
t.Log(ts.GetMaxSeqsWithTime(context.Background(), []string{"10000000", "12345", "111"}))
|
||||
}
|
||||
|
||||
//
|
||||
//func TestHMGET(t *testing.T) {
|
||||
// ts := newTestSeq()
|
||||
// res, err := ts.rdb.HMGet(context.Background(), "MALLOC_SEQ:1", "CURR", "TIME1").Result()
|
||||
// if err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
// t.Log(res)
|
||||
//}
|
||||
184
pkg/common/storage/cache/redis/seq_user.go
vendored
Normal file
184
pkg/common/storage/cache/redis/seq_user.go
vendored
Normal file
@@ -0,0 +1,184 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
func NewSeqUserCacheRedis(rdb redis.UniversalClient, mgo database.SeqUser) cache.SeqUser {
|
||||
return &seqUserCacheRedis{
|
||||
mgo: mgo,
|
||||
readSeqWriteRatio: 100,
|
||||
expireTime: time.Hour * 24 * 7,
|
||||
readExpireTime: time.Hour * 24 * 30,
|
||||
rocks: newRocksCacheClient(rdb),
|
||||
}
|
||||
}
|
||||
|
||||
type seqUserCacheRedis struct {
|
||||
mgo database.SeqUser
|
||||
rocks *rocksCacheClient
|
||||
expireTime time.Duration
|
||||
readExpireTime time.Duration
|
||||
readSeqWriteRatio int64
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) getSeqUserMaxSeqKey(conversationID string, userID string) string {
|
||||
return cachekey.GetSeqUserMaxSeqKey(conversationID, userID)
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) getSeqUserMinSeqKey(conversationID string, userID string) string {
|
||||
return cachekey.GetSeqUserMinSeqKey(conversationID, userID)
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) getSeqUserReadSeqKey(conversationID string, userID string) string {
|
||||
return cachekey.GetSeqUserReadSeqKey(conversationID, userID)
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) GetUserMaxSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
|
||||
return getCache(ctx, s.rocks, s.getSeqUserMaxSeqKey(conversationID, userID), s.expireTime, func(ctx context.Context) (int64, error) {
|
||||
return s.mgo.GetUserMaxSeq(ctx, conversationID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) SetUserMaxSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
|
||||
if err := s.mgo.SetUserMaxSeq(ctx, conversationID, userID, seq); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.rocks.GetClient().TagAsDeleted2(ctx, s.getSeqUserMaxSeqKey(conversationID, userID))
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) GetUserMinSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
|
||||
return getCache(ctx, s.rocks, s.getSeqUserMinSeqKey(conversationID, userID), s.expireTime, func(ctx context.Context) (int64, error) {
|
||||
return s.mgo.GetUserMinSeq(ctx, conversationID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) SetUserMinSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
|
||||
return s.SetUserMinSeqs(ctx, userID, map[string]int64{conversationID: seq})
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) GetUserReadSeq(ctx context.Context, conversationID string, userID string) (int64, error) {
|
||||
return getCache(ctx, s.rocks, s.getSeqUserReadSeqKey(conversationID, userID), s.readExpireTime, func(ctx context.Context) (int64, error) {
|
||||
return s.mgo.GetUserReadSeq(ctx, conversationID, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) SetUserReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error {
|
||||
if s.rocks.GetRedis() == nil {
|
||||
return s.SetUserReadSeqToDB(ctx, conversationID, userID, seq)
|
||||
}
|
||||
dbSeq, err := s.GetUserReadSeq(ctx, conversationID, userID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dbSeq < seq {
|
||||
if err := s.rocks.GetClient().RawSet(ctx, s.getSeqUserReadSeqKey(conversationID, userID), strconv.Itoa(int(seq)), s.readExpireTime); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) SetUserReadSeqToDB(ctx context.Context, conversationID string, userID string, seq int64) error {
|
||||
return s.mgo.SetUserReadSeq(ctx, conversationID, userID, seq)
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) SetUserMinSeqs(ctx context.Context, userID string, seqs map[string]int64) error {
|
||||
keys := make([]string, 0, len(seqs))
|
||||
for conversationID, seq := range seqs {
|
||||
if err := s.mgo.SetUserMinSeq(ctx, conversationID, userID, seq); err != nil {
|
||||
return err
|
||||
}
|
||||
keys = append(keys, s.getSeqUserMinSeqKey(conversationID, userID))
|
||||
}
|
||||
return DeleteCacheBySlot(ctx, s.rocks, keys)
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) setUserRedisReadSeqs(ctx context.Context, userID string, seqs map[string]int64) error {
|
||||
keys := make([]string, 0, len(seqs))
|
||||
keySeq := make(map[string]int64)
|
||||
for conversationID, seq := range seqs {
|
||||
key := s.getSeqUserReadSeqKey(conversationID, userID)
|
||||
keys = append(keys, key)
|
||||
keySeq[key] = seq
|
||||
}
|
||||
slotKeys, err := groupKeysBySlot(ctx, s.rocks.GetRedis(), keys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, keys := range slotKeys {
|
||||
pipe := s.rocks.GetRedis().Pipeline()
|
||||
for _, key := range keys {
|
||||
pipe.HSet(ctx, key, "value", strconv.FormatInt(keySeq[key], 10))
|
||||
pipe.Expire(ctx, key, s.readExpireTime)
|
||||
}
|
||||
if _, err := pipe.Exec(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) SetUserReadSeqs(ctx context.Context, userID string, seqs map[string]int64) error {
|
||||
if len(seqs) == 0 {
|
||||
return nil
|
||||
}
|
||||
if err := s.setUserRedisReadSeqs(ctx, userID, seqs); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *seqUserCacheRedis) GetUserReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error) {
|
||||
res, err := batchGetCache2(ctx, s.rocks, s.readExpireTime, conversationIDs, func(conversationID string) string {
|
||||
return s.getSeqUserReadSeqKey(conversationID, userID)
|
||||
}, func(v *readSeqModel) string {
|
||||
return v.ConversationID
|
||||
}, func(ctx context.Context, conversationIDs []string) ([]*readSeqModel, error) {
|
||||
seqs, err := s.mgo.GetUserReadSeqs(ctx, userID, conversationIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res := make([]*readSeqModel, 0, len(seqs))
|
||||
for conversationID, seq := range seqs {
|
||||
res = append(res, &readSeqModel{ConversationID: conversationID, Seq: seq})
|
||||
}
|
||||
return res, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data := make(map[string]int64)
|
||||
for _, v := range res {
|
||||
data[v.ConversationID] = v.Seq
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
var _ BatchCacheCallback[string] = (*readSeqModel)(nil)
|
||||
|
||||
type readSeqModel struct {
|
||||
ConversationID string
|
||||
Seq int64
|
||||
}
|
||||
|
||||
func (r *readSeqModel) BatchCache(conversationID string) {
|
||||
r.ConversationID = conversationID
|
||||
}
|
||||
|
||||
func (r *readSeqModel) UnmarshalJSON(bytes []byte) (err error) {
|
||||
r.Seq, err = strconv.ParseInt(string(bytes), 10, 64)
|
||||
return
|
||||
}
|
||||
|
||||
func (r *readSeqModel) MarshalJSON() ([]byte, error) {
|
||||
return []byte(strconv.FormatInt(r.Seq, 10)), nil
|
||||
}
|
||||
112
pkg/common/storage/cache/redis/seq_user_test.go
vendored
Normal file
112
pkg/common/storage/cache/redis/seq_user_test.go
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
mgo2 "git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database/mgo"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
"go.mongodb.org/mongo-driver/mongo/options"
|
||||
)
|
||||
|
||||
func newTestOnline() *userOnline {
|
||||
opt := &redis.Options{
|
||||
Addr: "172.16.8.48:16379",
|
||||
Password: "openIM123",
|
||||
DB: 0,
|
||||
}
|
||||
rdb := redis.NewClient(opt)
|
||||
if err := rdb.Ping(context.Background()).Err(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &userOnline{rdb: rdb, expire: time.Hour, channelName: "user_online"}
|
||||
}
|
||||
|
||||
func TestOnline(t *testing.T) {
|
||||
ts := newTestOnline()
|
||||
var count atomic.Int64
|
||||
for i := 0; i < 64; i++ {
|
||||
go func(userID string) {
|
||||
var err error
|
||||
for i := 0; ; i++ {
|
||||
if i%2 == 0 {
|
||||
err = ts.SetUserOnline(context.Background(), userID, []int32{5, 6}, []int32{7, 8, 9})
|
||||
} else {
|
||||
err = ts.SetUserOnline(context.Background(), userID, []int32{1, 2, 3}, []int32{4, 5, 6})
|
||||
}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
count.Add(1)
|
||||
}
|
||||
}(strconv.Itoa(10000 + i))
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(time.Second)
|
||||
for range ticker.C {
|
||||
t.Log(count.Swap(0))
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetOnline(t *testing.T) {
|
||||
ts := newTestOnline()
|
||||
ctx := context.Background()
|
||||
pIDs, err := ts.GetOnline(ctx, "10000")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
t.Log(pIDs)
|
||||
}
|
||||
|
||||
func TestRecvOnline(t *testing.T) {
|
||||
ts := newTestOnline()
|
||||
ctx := context.Background()
|
||||
pubsub := ts.rdb.Subscribe(ctx, cachekey.OnlineChannel)
|
||||
|
||||
_, err := pubsub.Receive(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not subscribe: %v", err)
|
||||
}
|
||||
|
||||
ch := pubsub.Channel()
|
||||
|
||||
for msg := range ch {
|
||||
fmt.Printf("Received message from channel %s: %s\n", msg.Channel, msg.Payload)
|
||||
}
|
||||
}
|
||||
|
||||
func TestName1(t *testing.T) {
|
||||
opt := &redis.Options{
|
||||
Addr: "172.16.8.48:16379",
|
||||
Password: "openIM123",
|
||||
DB: 0,
|
||||
}
|
||||
rdb := redis.NewClient(opt)
|
||||
|
||||
mgo, err := mongo.Connect(context.Background(),
|
||||
options.Client().
|
||||
ApplyURI("mongodb://openIM:openIM123@172.16.8.48:37017/openim_v3?maxPoolSize=100").
|
||||
SetConnectTimeout(5*time.Second))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
model, err := mgo2.NewSeqUserMongo(mgo.Database("openim_v3"))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
seq := NewSeqUserCacheRedis(rdb, model)
|
||||
|
||||
res, err := seq.GetUserReadSeqs(context.Background(), "2110910952", []string{"sg_345762580", "2000", "3000"})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
t.Log(res)
|
||||
|
||||
}
|
||||
90
pkg/common/storage/cache/redis/third.go
vendored
Normal file
90
pkg/common/storage/cache/redis/third.go
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
func NewThirdCache(rdb redis.UniversalClient) cache.ThirdCache {
|
||||
return &thirdCache{rdb: rdb}
|
||||
}
|
||||
|
||||
type thirdCache struct {
|
||||
rdb redis.UniversalClient
|
||||
}
|
||||
|
||||
func (c *thirdCache) getGetuiTokenKey() string {
|
||||
return cachekey.GetGetuiTokenKey()
|
||||
}
|
||||
|
||||
func (c *thirdCache) getGetuiTaskIDKey() string {
|
||||
return cachekey.GetGetuiTaskIDKey()
|
||||
}
|
||||
|
||||
func (c *thirdCache) getUserBadgeUnreadCountSumKey(userID string) string {
|
||||
return cachekey.GetUserBadgeUnreadCountSumKey(userID)
|
||||
}
|
||||
|
||||
func (c *thirdCache) getFcmAccountTokenKey(account string, platformID int) string {
|
||||
return cachekey.GetFcmAccountTokenKey(account, platformID)
|
||||
}
|
||||
|
||||
func (c *thirdCache) SetFcmToken(ctx context.Context, account string, platformID int, fcmToken string, expireTime int64) (err error) {
|
||||
return errs.Wrap(c.rdb.Set(ctx, c.getFcmAccountTokenKey(account, platformID), fcmToken, time.Duration(expireTime)*time.Second).Err())
|
||||
}
|
||||
|
||||
func (c *thirdCache) GetFcmToken(ctx context.Context, account string, platformID int) (string, error) {
|
||||
val, err := c.rdb.Get(ctx, c.getFcmAccountTokenKey(account, platformID)).Result()
|
||||
if err != nil {
|
||||
return "", errs.Wrap(err)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (c *thirdCache) DelFcmToken(ctx context.Context, account string, platformID int) error {
|
||||
return errs.Wrap(c.rdb.Del(ctx, c.getFcmAccountTokenKey(account, platformID)).Err())
|
||||
}
|
||||
|
||||
func (c *thirdCache) IncrUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error) {
|
||||
seq, err := c.rdb.Incr(ctx, c.getUserBadgeUnreadCountSumKey(userID)).Result()
|
||||
|
||||
return int(seq), errs.Wrap(err)
|
||||
}
|
||||
|
||||
func (c *thirdCache) SetUserBadgeUnreadCountSum(ctx context.Context, userID string, value int) error {
|
||||
return errs.Wrap(c.rdb.Set(ctx, c.getUserBadgeUnreadCountSumKey(userID), value, 0).Err())
|
||||
}
|
||||
|
||||
func (c *thirdCache) GetUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error) {
|
||||
val, err := c.rdb.Get(ctx, c.getUserBadgeUnreadCountSumKey(userID)).Int()
|
||||
return val, errs.Wrap(err)
|
||||
}
|
||||
|
||||
func (c *thirdCache) SetGetuiToken(ctx context.Context, token string, expireTime int64) error {
|
||||
return errs.Wrap(c.rdb.Set(ctx, c.getGetuiTokenKey(), token, time.Duration(expireTime)*time.Second).Err())
|
||||
}
|
||||
|
||||
func (c *thirdCache) GetGetuiToken(ctx context.Context) (string, error) {
|
||||
val, err := c.rdb.Get(ctx, c.getGetuiTokenKey()).Result()
|
||||
if err != nil {
|
||||
return "", errs.Wrap(err)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func (c *thirdCache) SetGetuiTaskID(ctx context.Context, taskID string, expireTime int64) error {
|
||||
return errs.Wrap(c.rdb.Set(ctx, c.getGetuiTaskIDKey(), taskID, time.Duration(expireTime)*time.Second).Err())
|
||||
}
|
||||
|
||||
func (c *thirdCache) GetGetuiTaskID(ctx context.Context) (string, error) {
|
||||
val, err := c.rdb.Get(ctx, c.getGetuiTaskIDKey()).Result()
|
||||
if err != nil {
|
||||
return "", errs.Wrap(err)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
248
pkg/common/storage/cache/redis/token.go
vendored
Normal file
248
pkg/common/storage/cache/redis/token.go
vendored
Normal file
@@ -0,0 +1,248 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
type tokenCache struct {
|
||||
rdb redis.UniversalClient
|
||||
accessExpire time.Duration
|
||||
localCache *config.LocalCache
|
||||
}
|
||||
|
||||
func NewTokenCacheModel(rdb redis.UniversalClient, localCache *config.LocalCache, accessExpire int64) cache.TokenModel {
|
||||
c := &tokenCache{rdb: rdb, localCache: localCache}
|
||||
c.accessExpire = c.getExpireTime(accessExpire)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *tokenCache) SetTokenFlag(ctx context.Context, userID string, platformID int, token string, flag int) error {
|
||||
key := cachekey.GetTokenKey(userID, platformID)
|
||||
if err := c.rdb.HSet(ctx, key, token, flag).Err(); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
|
||||
if c.localCache != nil {
|
||||
c.removeLocalTokenCache(ctx, key)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetTokenFlagEx set token and flag with expire time
|
||||
func (c *tokenCache) SetTokenFlagEx(ctx context.Context, userID string, platformID int, token string, flag int) error {
|
||||
key := cachekey.GetTokenKey(userID, platformID)
|
||||
if err := c.rdb.HSet(ctx, key, token, flag).Err(); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
if err := c.rdb.Expire(ctx, key, c.accessExpire).Err(); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
|
||||
if c.localCache != nil {
|
||||
c.removeLocalTokenCache(ctx, key)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *tokenCache) GetTokensWithoutError(ctx context.Context, userID string, platformID int) (map[string]int, error) {
|
||||
m, err := c.rdb.HGetAll(ctx, cachekey.GetTokenKey(userID, platformID)).Result()
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
mm := make(map[string]int)
|
||||
for k, v := range m {
|
||||
state, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return nil, errs.WrapMsg(err, "redis token value is not int", "value", v, "userID", userID, "platformID", platformID)
|
||||
}
|
||||
mm[k] = state
|
||||
}
|
||||
return mm, nil
|
||||
}
|
||||
|
||||
func (c *tokenCache) HasTemporaryToken(ctx context.Context, userID string, platformID int, token string) error {
|
||||
err := c.rdb.Get(ctx, cachekey.GetTemporaryTokenKey(userID, platformID, token)).Err()
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *tokenCache) GetAllTokensWithoutError(ctx context.Context, userID string) (map[int]map[string]int, error) {
|
||||
var (
|
||||
res = make(map[int]map[string]int)
|
||||
resLock = sync.Mutex{}
|
||||
)
|
||||
|
||||
keys := cachekey.GetAllPlatformTokenKey(userID)
|
||||
if err := ProcessKeysBySlot(ctx, c.rdb, keys, func(ctx context.Context, slot int64, keys []string) error {
|
||||
pipe := c.rdb.Pipeline()
|
||||
mapRes := make([]*redis.MapStringStringCmd, len(keys))
|
||||
for i, key := range keys {
|
||||
mapRes[i] = pipe.HGetAll(ctx, key)
|
||||
}
|
||||
_, err := pipe.Exec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i, m := range mapRes {
|
||||
mm := make(map[string]int)
|
||||
for k, v := range m.Val() {
|
||||
state, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return errs.WrapMsg(err, "redis token value is not int", "value", v, "userID", userID)
|
||||
}
|
||||
mm[k] = state
|
||||
}
|
||||
resLock.Lock()
|
||||
res[cachekey.GetPlatformIDByTokenKey(keys[i])] = mm
|
||||
resLock.Unlock()
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (c *tokenCache) SetTokenMapByUidPid(ctx context.Context, userID string, platformID int, m map[string]int) error {
|
||||
mm := make(map[string]any)
|
||||
for k, v := range m {
|
||||
mm[k] = v
|
||||
}
|
||||
|
||||
err := c.rdb.HSet(ctx, cachekey.GetTokenKey(userID, platformID), mm).Err()
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
|
||||
if c.localCache != nil {
|
||||
c.removeLocalTokenCache(ctx, cachekey.GetTokenKey(userID, platformID))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *tokenCache) BatchSetTokenMapByUidPid(ctx context.Context, tokens map[string]map[string]any) error {
|
||||
keys := datautil.Keys(tokens)
|
||||
if err := ProcessKeysBySlot(ctx, c.rdb, keys, func(ctx context.Context, slot int64, keys []string) error {
|
||||
pipe := c.rdb.Pipeline()
|
||||
for k, v := range tokens {
|
||||
pipe.HSet(ctx, k, v)
|
||||
}
|
||||
_, err := pipe.Exec(ctx)
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.localCache != nil {
|
||||
c.removeLocalTokenCache(ctx, keys...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *tokenCache) DeleteTokenByUidPid(ctx context.Context, userID string, platformID int, fields []string) error {
|
||||
key := cachekey.GetTokenKey(userID, platformID)
|
||||
if err := c.rdb.HDel(ctx, key, fields...).Err(); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
|
||||
if c.localCache != nil {
|
||||
c.removeLocalTokenCache(ctx, key)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *tokenCache) getExpireTime(t int64) time.Duration {
|
||||
return time.Hour * 24 * time.Duration(t)
|
||||
}
|
||||
|
||||
// DeleteTokenByTokenMap tokens key is platformID, value is token slice
|
||||
func (c *tokenCache) DeleteTokenByTokenMap(ctx context.Context, userID string, tokens map[int][]string) error {
|
||||
var (
|
||||
keys = make([]string, 0, len(tokens))
|
||||
keyMap = make(map[string][]string)
|
||||
)
|
||||
for k, v := range tokens {
|
||||
k1 := cachekey.GetTokenKey(userID, k)
|
||||
keys = append(keys, k1)
|
||||
keyMap[k1] = v
|
||||
}
|
||||
|
||||
if err := ProcessKeysBySlot(ctx, c.rdb, keys, func(ctx context.Context, slot int64, keys []string) error {
|
||||
pipe := c.rdb.Pipeline()
|
||||
for k, v := range tokens {
|
||||
pipe.HDel(ctx, cachekey.GetTokenKey(userID, k), v...)
|
||||
}
|
||||
_, err := pipe.Exec(ctx)
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove local cache for the token
|
||||
if c.localCache != nil {
|
||||
c.removeLocalTokenCache(ctx, keys...)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *tokenCache) DeleteAndSetTemporary(ctx context.Context, userID string, platformID int, fields []string) error {
|
||||
for _, f := range fields {
|
||||
k := cachekey.GetTemporaryTokenKey(userID, platformID, f)
|
||||
if err := c.rdb.Set(ctx, k, "", c.accessExpire).Err(); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
key := cachekey.GetTokenKey(userID, platformID)
|
||||
if err := c.rdb.HDel(ctx, key, fields...).Err(); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
|
||||
if c.localCache != nil {
|
||||
c.removeLocalTokenCache(ctx, key)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *tokenCache) removeLocalTokenCache(ctx context.Context, keys ...string) {
|
||||
if len(keys) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
topic := c.localCache.Auth.Topic
|
||||
if topic == "" {
|
||||
return
|
||||
}
|
||||
|
||||
data, err := json.Marshal(keys)
|
||||
if err != nil {
|
||||
log.ZWarn(ctx, "keys json marshal failed", err, "topic", topic, "keys", keys)
|
||||
} else {
|
||||
if err := c.rdb.Publish(ctx, topic, string(data)).Err(); err != nil {
|
||||
log.ZWarn(ctx, "redis publish cache delete error", err, "topic", topic, "keys", keys)
|
||||
}
|
||||
}
|
||||
}
|
||||
107
pkg/common/storage/cache/redis/user.go
vendored
Normal file
107
pkg/common/storage/cache/redis/user.go
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/cachekey"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
)
|
||||
|
||||
const (
|
||||
userExpireTime = time.Second * 60 * 60 * 12
|
||||
userOlineStatusExpireTime = time.Second * 60 * 60 * 24
|
||||
statusMod = 501
|
||||
)
|
||||
|
||||
type UserCacheRedis struct {
|
||||
cache.BatchDeleter
|
||||
rdb redis.UniversalClient
|
||||
userDB database.User
|
||||
expireTime time.Duration
|
||||
rcClient *rocksCacheClient
|
||||
}
|
||||
|
||||
func NewUserCacheRedis(rdb redis.UniversalClient, localCache *config.LocalCache, userDB database.User, options *rockscache.Options) cache.UserCache {
|
||||
rc := newRocksCacheClient(rdb)
|
||||
return &UserCacheRedis{
|
||||
BatchDeleter: rc.GetBatchDeleter(localCache.User.Topic),
|
||||
rdb: rdb,
|
||||
userDB: userDB,
|
||||
expireTime: userExpireTime,
|
||||
rcClient: rc,
|
||||
}
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) getUserID(user *model.User) string {
|
||||
return user.UserID
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) CloneUserCache() cache.UserCache {
|
||||
return &UserCacheRedis{
|
||||
BatchDeleter: u.BatchDeleter.Clone(),
|
||||
rdb: u.rdb,
|
||||
userDB: u.userDB,
|
||||
expireTime: u.expireTime,
|
||||
rcClient: u.rcClient,
|
||||
}
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) getUserInfoKey(userID string) string {
|
||||
return cachekey.GetUserInfoKey(userID)
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) getUserGlobalRecvMsgOptKey(userID string) string {
|
||||
return cachekey.GetUserGlobalRecvMsgOptKey(userID)
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) GetUserInfo(ctx context.Context, userID string) (userInfo *model.User, err error) {
|
||||
return getCache(ctx, u.rcClient, u.getUserInfoKey(userID), u.expireTime, func(ctx context.Context) (*model.User, error) {
|
||||
return u.userDB.Take(ctx, userID)
|
||||
})
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) GetUsersInfo(ctx context.Context, userIDs []string) ([]*model.User, error) {
|
||||
log.ZInfo(ctx, "GetUsersInfo start", "userIDs", userIDs)
|
||||
return batchGetCache2(ctx, u.rcClient, u.expireTime, userIDs, u.getUserInfoKey, u.getUserID, u.userDB.Find)
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) DelUsersInfo(userIDs ...string) cache.UserCache {
|
||||
keys := make([]string, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
keys = append(keys, u.getUserInfoKey(userID))
|
||||
}
|
||||
cache := u.CloneUserCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) GetUserGlobalRecvMsgOpt(ctx context.Context, userID string) (opt int, err error) {
|
||||
return getCache(
|
||||
ctx,
|
||||
u.rcClient,
|
||||
u.getUserGlobalRecvMsgOptKey(userID),
|
||||
u.expireTime,
|
||||
func(ctx context.Context) (int, error) {
|
||||
return u.userDB.GetUserGlobalRecvMsgOpt(ctx, userID)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (u *UserCacheRedis) DelUsersGlobalRecvMsgOpt(userIDs ...string) cache.UserCache {
|
||||
keys := make([]string, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
keys = append(keys, u.getUserGlobalRecvMsgOptKey(userID))
|
||||
}
|
||||
cache := u.CloneUserCache()
|
||||
cache.AddKeys(keys...)
|
||||
|
||||
return cache
|
||||
}
|
||||
52
pkg/common/storage/cache/s3.go
vendored
Normal file
52
pkg/common/storage/cache/s3.go
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
relationtb "git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
"github.com/openimsdk/tools/s3"
|
||||
)
|
||||
|
||||
type ObjectCache interface {
|
||||
BatchDeleter
|
||||
CloneObjectCache() ObjectCache
|
||||
GetName(ctx context.Context, engine string, name string) (*relationtb.Object, error)
|
||||
DelObjectName(engine string, names ...string) ObjectCache
|
||||
}
|
||||
|
||||
type S3Cache interface {
|
||||
BatchDeleter
|
||||
GetKey(ctx context.Context, engine string, key string) (*s3.ObjectInfo, error)
|
||||
DelS3Key(engine string, keys ...string) S3Cache
|
||||
}
|
||||
|
||||
// TODO integrating minio.Cache and MinioCache interfaces.
|
||||
type MinioCache interface {
|
||||
BatchDeleter
|
||||
GetImageObjectKeyInfo(ctx context.Context, key string, fn func(ctx context.Context) (*MinioImageInfo, error)) (*MinioImageInfo, error)
|
||||
GetThumbnailKey(ctx context.Context, key string, format string, width int, height int, minioCache func(ctx context.Context) (string, error)) (string, error)
|
||||
DelObjectImageInfoKey(keys ...string) MinioCache
|
||||
DelImageThumbnailKey(key string, format string, width int, height int) MinioCache
|
||||
}
|
||||
|
||||
type MinioImageInfo struct {
|
||||
IsImg bool `json:"isImg"`
|
||||
Width int `json:"width"`
|
||||
Height int `json:"height"`
|
||||
Format string `json:"format"`
|
||||
Etag string `json:"etag"`
|
||||
}
|
||||
19
pkg/common/storage/cache/seq_conversation.go
vendored
Normal file
19
pkg/common/storage/cache/seq_conversation.go
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database"
|
||||
)
|
||||
|
||||
type SeqConversationCache interface {
|
||||
Malloc(ctx context.Context, conversationID string, size int64) (int64, error)
|
||||
GetMaxSeq(ctx context.Context, conversationID string) (int64, error)
|
||||
SetMinSeq(ctx context.Context, conversationID string, seq int64) error
|
||||
GetMinSeq(ctx context.Context, conversationID string) (int64, error)
|
||||
GetMaxSeqs(ctx context.Context, conversationIDs []string) (map[string]int64, error)
|
||||
SetMinSeqs(ctx context.Context, seqs map[string]int64) error
|
||||
GetCacheMaxSeqWithTime(ctx context.Context, conversationIDs []string) (map[string]database.SeqTime, error)
|
||||
GetMaxSeqsWithTime(ctx context.Context, conversationIDs []string) (map[string]database.SeqTime, error)
|
||||
GetMaxSeqWithTime(ctx context.Context, conversationID string) (database.SeqTime, error)
|
||||
}
|
||||
16
pkg/common/storage/cache/seq_user.go
vendored
Normal file
16
pkg/common/storage/cache/seq_user.go
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
package cache
|
||||
|
||||
import "context"
|
||||
|
||||
type SeqUser interface {
|
||||
GetUserMaxSeq(ctx context.Context, conversationID string, userID string) (int64, error)
|
||||
SetUserMaxSeq(ctx context.Context, conversationID string, userID string, seq int64) error
|
||||
GetUserMinSeq(ctx context.Context, conversationID string, userID string) (int64, error)
|
||||
SetUserMinSeq(ctx context.Context, conversationID string, userID string, seq int64) error
|
||||
GetUserReadSeq(ctx context.Context, conversationID string, userID string) (int64, error)
|
||||
SetUserReadSeq(ctx context.Context, conversationID string, userID string, seq int64) error
|
||||
SetUserReadSeqToDB(ctx context.Context, conversationID string, userID string, seq int64) error
|
||||
SetUserMinSeqs(ctx context.Context, userID string, seqs map[string]int64) error
|
||||
SetUserReadSeqs(ctx context.Context, userID string, seqs map[string]int64) error
|
||||
GetUserReadSeqs(ctx context.Context, userID string, conversationIDs []string) (map[string]int64, error)
|
||||
}
|
||||
18
pkg/common/storage/cache/third.go
vendored
Normal file
18
pkg/common/storage/cache/third.go
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
type ThirdCache interface {
|
||||
SetFcmToken(ctx context.Context, account string, platformID int, fcmToken string, expireTime int64) (err error)
|
||||
GetFcmToken(ctx context.Context, account string, platformID int) (string, error)
|
||||
DelFcmToken(ctx context.Context, account string, platformID int) error
|
||||
IncrUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error)
|
||||
SetUserBadgeUnreadCountSum(ctx context.Context, userID string, value int) error
|
||||
GetUserBadgeUnreadCountSum(ctx context.Context, userID string) (int, error)
|
||||
SetGetuiToken(ctx context.Context, token string, expireTime int64) error
|
||||
GetGetuiToken(ctx context.Context) (string, error)
|
||||
SetGetuiTaskID(ctx context.Context, taskID string, expireTime int64) error
|
||||
GetGetuiTaskID(ctx context.Context) (string, error)
|
||||
}
|
||||
19
pkg/common/storage/cache/token.go
vendored
Normal file
19
pkg/common/storage/cache/token.go
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
type TokenModel interface {
|
||||
SetTokenFlag(ctx context.Context, userID string, platformID int, token string, flag int) error
|
||||
// SetTokenFlagEx set token and flag with expire time
|
||||
SetTokenFlagEx(ctx context.Context, userID string, platformID int, token string, flag int) error
|
||||
GetTokensWithoutError(ctx context.Context, userID string, platformID int) (map[string]int, error)
|
||||
HasTemporaryToken(ctx context.Context, userID string, platformID int, token string) error
|
||||
GetAllTokensWithoutError(ctx context.Context, userID string) (map[int]map[string]int, error)
|
||||
SetTokenMapByUidPid(ctx context.Context, userID string, platformID int, m map[string]int) error
|
||||
BatchSetTokenMapByUidPid(ctx context.Context, tokens map[string]map[string]any) error
|
||||
DeleteTokenByUidPid(ctx context.Context, userID string, platformID int, fields []string) error
|
||||
DeleteTokenByTokenMap(ctx context.Context, userID string, tokens map[int][]string) error
|
||||
DeleteAndSetTemporary(ctx context.Context, userID string, platformID int, fields []string) error
|
||||
}
|
||||
33
pkg/common/storage/cache/user.go
vendored
Normal file
33
pkg/common/storage/cache/user.go
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
)
|
||||
|
||||
type UserCache interface {
|
||||
BatchDeleter
|
||||
CloneUserCache() UserCache
|
||||
GetUserInfo(ctx context.Context, userID string) (userInfo *model.User, err error)
|
||||
GetUsersInfo(ctx context.Context, userIDs []string) ([]*model.User, error)
|
||||
DelUsersInfo(userIDs ...string) UserCache
|
||||
GetUserGlobalRecvMsgOpt(ctx context.Context, userID string) (opt int, err error)
|
||||
DelUsersGlobalRecvMsgOpt(userIDs ...string) UserCache
|
||||
//GetUserStatus(ctx context.Context, userIDs []string) ([]*user.OnlineStatus, error)
|
||||
//SetUserStatus(ctx context.Context, userID string, status, platformID int32) error
|
||||
}
|
||||
Reference in New Issue
Block a user