复制项目
This commit is contained in:
233
internal/rpc/third/log.go
Normal file
233
internal/rpc/third/log.go
Normal file
@@ -0,0 +1,233 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package third
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/authverify"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/servererrs"
|
||||
relationtb "git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
"git.imall.cloud/openim/protocol/constant"
|
||||
"git.imall.cloud/openim/protocol/third"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
)
|
||||
|
||||
func genLogID() string {
|
||||
const dataLen = 10
|
||||
data := make([]byte, dataLen)
|
||||
rand.Read(data)
|
||||
chars := []byte("0123456789")
|
||||
for i := 0; i < len(data); i++ {
|
||||
if i == 0 {
|
||||
data[i] = chars[1:][data[i]%9]
|
||||
} else {
|
||||
data[i] = chars[data[i]%10]
|
||||
}
|
||||
}
|
||||
return string(data)
|
||||
}
|
||||
|
||||
// extractKeyFromLogURL 从日志URL中提取S3的key
|
||||
// URL格式: https://s3.jizhying.com/images/openim/data/hash/{hash}?...
|
||||
// 或: https://chatall.oss-ap-southeast-1.aliyuncs.com/openim%2Fdata%2Fhash%2F{hash}
|
||||
// key格式: openim/data/hash/{hash}(不包含bucket名称)
|
||||
// bucket名称在URL路径的第一段(如images),需要去掉
|
||||
func extractKeyFromLogURL(logURL string, bucketName string) string {
|
||||
if logURL == "" {
|
||||
return ""
|
||||
}
|
||||
parsedURL, err := url.Parse(logURL)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
// 获取路径部分,去掉开头的'/'
|
||||
path := strings.TrimPrefix(parsedURL.Path, "/")
|
||||
if path == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
// 如果配置了bucket名称,且路径以bucket名称开头,则去掉bucket名称前缀
|
||||
if bucketName != "" && strings.HasPrefix(path, bucketName+"/") {
|
||||
path = strings.TrimPrefix(path, bucketName+"/")
|
||||
} else {
|
||||
// 如果没有匹配到bucket名称,尝试去掉路径的第一段(可能是bucket名称)
|
||||
// 这种情况下,假设路径的第一段是bucket名称
|
||||
parts := strings.SplitN(path, "/", 2)
|
||||
if len(parts) > 1 {
|
||||
path = parts[1]
|
||||
}
|
||||
}
|
||||
|
||||
// URL.Path已经是解码后的路径,所以直接返回即可
|
||||
return path
|
||||
}
|
||||
|
||||
func (t *thirdServer) UploadLogs(ctx context.Context, req *third.UploadLogsReq) (*third.UploadLogsResp, error) {
|
||||
var dbLogs []*relationtb.Log
|
||||
userID := mcontext.GetOpUserID(ctx)
|
||||
platform := constant.PlatformID2Name[int(req.Platform)]
|
||||
for _, fileURL := range req.FileURLs {
|
||||
log := relationtb.Log{
|
||||
Platform: platform,
|
||||
UserID: userID,
|
||||
CreateTime: time.Now(),
|
||||
Url: fileURL.URL,
|
||||
FileName: fileURL.Filename,
|
||||
AppFramework: req.AppFramework,
|
||||
Version: req.Version,
|
||||
Ex: req.Ex,
|
||||
}
|
||||
for i := 0; i < 20; i++ {
|
||||
id := genLogID()
|
||||
logs, err := t.thirdDatabase.GetLogs(ctx, []string{id}, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(logs) == 0 {
|
||||
log.LogID = id
|
||||
break
|
||||
}
|
||||
}
|
||||
if log.LogID == "" {
|
||||
return nil, servererrs.ErrData.WrapMsg("Log id gen error")
|
||||
}
|
||||
dbLogs = append(dbLogs, &log)
|
||||
}
|
||||
err := t.thirdDatabase.UploadLogs(ctx, dbLogs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &third.UploadLogsResp{}, nil
|
||||
}
|
||||
|
||||
func (t *thirdServer) DeleteLogs(ctx context.Context, req *third.DeleteLogsReq) (*third.DeleteLogsResp, error) {
|
||||
if err := authverify.CheckAdmin(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
userID := ""
|
||||
logs, err := t.thirdDatabase.GetLogs(ctx, req.LogIDs, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var logIDs []string
|
||||
for _, log := range logs {
|
||||
logIDs = append(logIDs, log.LogID)
|
||||
}
|
||||
if ids := datautil.Single(req.LogIDs, logIDs); len(ids) > 0 {
|
||||
return nil, errs.ErrRecordNotFound.WrapMsg("logIDs not found", "logIDs", ids)
|
||||
}
|
||||
|
||||
// 在删除日志记录前,先删除对应的S3文件
|
||||
engine := t.config.RpcConfig.Object.Enable
|
||||
if engine != "" && t.s3 != nil {
|
||||
// 获取bucket名称(从minio配置中)
|
||||
bucketName := ""
|
||||
if engine == "minio" {
|
||||
bucketName = t.config.MinioConfig.Bucket
|
||||
}
|
||||
|
||||
for _, logRecord := range logs {
|
||||
if logRecord.Url == "" {
|
||||
continue
|
||||
}
|
||||
// 从URL中提取S3的key(不包含bucket名称)
|
||||
key := extractKeyFromLogURL(logRecord.Url, bucketName)
|
||||
if key == "" {
|
||||
log.ZDebug(ctx, "DeleteLogs: cannot extract key from URL, skipping S3 deletion", "logID", logRecord.LogID, "url", logRecord.Url)
|
||||
continue
|
||||
}
|
||||
// 直接使用key删除S3文件
|
||||
log.ZInfo(ctx, "DeleteLogs: attempting to delete S3 file", "logID", logRecord.LogID, "url", logRecord.Url, "key", key, "bucket", bucketName, "engine", engine)
|
||||
if err := t.s3.DeleteObject(ctx, key); err != nil {
|
||||
// S3文件删除失败,返回错误,不删除数据库记录
|
||||
log.ZError(ctx, "DeleteLogs: S3 file delete failed", err, "logID", logRecord.LogID, "url", logRecord.Url, "key", key, "bucket", bucketName, "engine", engine)
|
||||
return nil, errs.WrapMsg(err, "failed to delete S3 file for log", "logID", logRecord.LogID, "url", logRecord.Url, "key", key)
|
||||
}
|
||||
log.ZInfo(ctx, "DeleteLogs: S3 file delete command executed successfully", "logID", logRecord.LogID, "url", logRecord.Url, "key", key, "bucket", bucketName, "engine", engine)
|
||||
}
|
||||
}
|
||||
|
||||
err = t.thirdDatabase.DeleteLogs(ctx, req.LogIDs, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &third.DeleteLogsResp{}, nil
|
||||
}
|
||||
|
||||
func dbToPbLogInfos(logs []*relationtb.Log) []*third.LogInfo {
|
||||
db2pbForLogInfo := func(log *relationtb.Log) *third.LogInfo {
|
||||
return &third.LogInfo{
|
||||
Filename: log.FileName,
|
||||
UserID: log.UserID,
|
||||
Platform: log.Platform,
|
||||
Url: log.Url,
|
||||
CreateTime: log.CreateTime.UnixMilli(),
|
||||
LogID: log.LogID,
|
||||
SystemType: log.SystemType,
|
||||
Version: log.Version,
|
||||
Ex: log.Ex,
|
||||
}
|
||||
}
|
||||
return datautil.Slice(logs, db2pbForLogInfo)
|
||||
}
|
||||
|
||||
func (t *thirdServer) SearchLogs(ctx context.Context, req *third.SearchLogsReq) (*third.SearchLogsResp, error) {
|
||||
if err := authverify.CheckAdmin(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var (
|
||||
resp third.SearchLogsResp
|
||||
userIDs []string
|
||||
)
|
||||
if req.StartTime > req.EndTime {
|
||||
return nil, errs.ErrArgs.WrapMsg("startTime>endTime")
|
||||
}
|
||||
if req.StartTime == 0 && req.EndTime == 0 {
|
||||
t := time.Date(2019, time.January, 1, 0, 0, 0, 0, time.UTC)
|
||||
timestampMills := t.UnixNano() / int64(time.Millisecond)
|
||||
req.StartTime = timestampMills
|
||||
req.EndTime = time.Now().UnixNano() / int64(time.Millisecond)
|
||||
}
|
||||
|
||||
total, logs, err := t.thirdDatabase.SearchLogs(ctx, req.Keyword, time.UnixMilli(req.StartTime), time.UnixMilli(req.EndTime), req.Pagination)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pbLogs := dbToPbLogInfos(logs)
|
||||
for _, log := range logs {
|
||||
userIDs = append(userIDs, log.UserID)
|
||||
}
|
||||
userMap, err := t.userClient.GetUsersInfoMap(ctx, userIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, pbLog := range pbLogs {
|
||||
if user, ok := userMap[pbLog.UserID]; ok {
|
||||
pbLog.Nickname = user.Nickname
|
||||
}
|
||||
}
|
||||
resp.LogsInfos = pbLogs
|
||||
resp.Total = uint32(total)
|
||||
return &resp, nil
|
||||
}
|
||||
446
internal/rpc/third/r2.go
Normal file
446
internal/rpc/third/r2.go
Normal file
@@ -0,0 +1,446 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package third
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
|
||||
awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
aws3 "github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/openimsdk/tools/s3"
|
||||
)
|
||||
|
||||
const (
|
||||
minPartSize int64 = 1024 * 1024 * 5 // 5MB
|
||||
maxPartSize int64 = 1024 * 1024 * 1024 * 5 // 5GB
|
||||
maxNumSize int64 = 10000
|
||||
)
|
||||
|
||||
type R2Config struct {
|
||||
Endpoint string
|
||||
Region string
|
||||
Bucket string
|
||||
AccessKeyID string
|
||||
SecretAccessKey string
|
||||
SessionToken string
|
||||
}
|
||||
|
||||
// NewR2 创建支持 Cloudflare R2 的 S3 客户端
|
||||
func NewR2(conf R2Config) (*R2, error) {
|
||||
if conf.Endpoint == "" {
|
||||
return nil, errors.New("endpoint is required for R2")
|
||||
}
|
||||
|
||||
// 创建 HTTP 客户端,设置合理的超时
|
||||
httpClient := &http.Client{
|
||||
Timeout: 30 * time.Second,
|
||||
Transport: &http.Transport{
|
||||
MaxIdleConns: 100,
|
||||
MaxIdleConnsPerHost: 10,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
},
|
||||
}
|
||||
|
||||
cfg := aws.Config{
|
||||
Region: conf.Region,
|
||||
Credentials: credentials.NewStaticCredentialsProvider(conf.AccessKeyID, conf.SecretAccessKey, conf.SessionToken),
|
||||
HTTPClient: httpClient,
|
||||
}
|
||||
|
||||
// 创建 S3 客户端,启用路径风格访问(R2 要求)并设置自定义 endpoint
|
||||
client := aws3.NewFromConfig(cfg, func(o *aws3.Options) {
|
||||
o.BaseEndpoint = aws.String(conf.Endpoint)
|
||||
o.UsePathStyle = true
|
||||
})
|
||||
|
||||
r2 := &R2{
|
||||
bucket: conf.Bucket,
|
||||
client: client,
|
||||
presign: aws3.NewPresignClient(client),
|
||||
}
|
||||
|
||||
// 测试连接:尝试列出 bucket(验证 bucket 存在且有权限),设置 5 秒超时
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
fmt.Printf("[R2] Testing connection to bucket '%s' at endpoint '%s'...\n", conf.Bucket, conf.Endpoint)
|
||||
_, err := client.ListObjectsV2(ctx, &aws3.ListObjectsV2Input{
|
||||
Bucket: aws.String(conf.Bucket),
|
||||
MaxKeys: aws.Int32(1),
|
||||
})
|
||||
if err != nil {
|
||||
// 详细的错误信息
|
||||
var respErr *awshttp.ResponseError
|
||||
if errors.As(err, &respErr) {
|
||||
fmt.Printf("[R2] Bucket verification HTTP error:\n")
|
||||
fmt.Printf(" Status Code: %d\n", respErr.Response.StatusCode)
|
||||
fmt.Printf(" Status: %s\n", respErr.Response.Status)
|
||||
}
|
||||
fmt.Printf("[R2] Warning: failed to verify R2 bucket '%s' at endpoint '%s': %v\n", conf.Bucket, conf.Endpoint, err)
|
||||
fmt.Printf("[R2] Please ensure:\n")
|
||||
fmt.Printf(" 1. Bucket '%s' exists in your R2 account\n", conf.Bucket)
|
||||
fmt.Printf(" 2. API credentials have correct permissions (Object Read & Write)\n")
|
||||
fmt.Printf(" 3. Account ID in endpoint matches your R2 account\n")
|
||||
} else {
|
||||
fmt.Printf("[R2] Successfully connected to bucket '%s'\n", conf.Bucket)
|
||||
}
|
||||
|
||||
return r2, nil
|
||||
}
|
||||
|
||||
type R2 struct {
|
||||
bucket string
|
||||
client *aws3.Client
|
||||
presign *aws3.PresignClient
|
||||
}
|
||||
|
||||
func (r *R2) Engine() string {
|
||||
return "aws"
|
||||
}
|
||||
|
||||
func (r *R2) PartLimit() (*s3.PartLimit, error) {
|
||||
return &s3.PartLimit{
|
||||
MinPartSize: minPartSize,
|
||||
MaxPartSize: maxPartSize,
|
||||
MaxNumSize: maxNumSize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *R2) formatETag(etag string) string {
|
||||
return strings.Trim(etag, `"`)
|
||||
}
|
||||
|
||||
func (r *R2) PartSize(ctx context.Context, size int64) (int64, error) {
|
||||
if size <= 0 {
|
||||
return 0, errors.New("size must be greater than 0")
|
||||
}
|
||||
if size > maxPartSize*maxNumSize {
|
||||
return 0, fmt.Errorf("size must be less than the maximum allowed limit")
|
||||
}
|
||||
if size <= minPartSize*maxNumSize {
|
||||
return minPartSize, nil
|
||||
}
|
||||
partSize := size / maxNumSize
|
||||
if size%maxNumSize != 0 {
|
||||
partSize++
|
||||
}
|
||||
return partSize, nil
|
||||
}
|
||||
|
||||
func (r *R2) IsNotFound(err error) bool {
|
||||
var respErr *awshttp.ResponseError
|
||||
if !errors.As(err, &respErr) {
|
||||
return false
|
||||
}
|
||||
if respErr == nil || respErr.Response == nil {
|
||||
return false
|
||||
}
|
||||
return respErr.Response.StatusCode == http.StatusNotFound
|
||||
}
|
||||
|
||||
func (r *R2) PresignedPutObject(ctx context.Context, name string, expire time.Duration, opt *s3.PutOption) (*s3.PresignedPutResult, error) {
|
||||
res, err := r.presign.PresignPutObject(ctx, &aws3.PutObjectInput{
|
||||
Bucket: aws.String(r.bucket),
|
||||
Key: aws.String(name),
|
||||
}, aws3.WithPresignExpires(expire), withDisableHTTPPresignerHeaderV4(nil))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &s3.PresignedPutResult{URL: res.URL}, nil
|
||||
}
|
||||
|
||||
func (r *R2) DeleteObject(ctx context.Context, name string) error {
|
||||
_, err := r.client.DeleteObject(ctx, &aws3.DeleteObjectInput{
|
||||
Bucket: aws.String(r.bucket),
|
||||
Key: aws.String(name),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *R2) CopyObject(ctx context.Context, src string, dst string) (*s3.CopyObjectInfo, error) {
|
||||
res, err := r.client.CopyObject(ctx, &aws3.CopyObjectInput{
|
||||
Bucket: aws.String(r.bucket),
|
||||
CopySource: aws.String(r.bucket + "/" + src),
|
||||
Key: aws.String(dst),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res.CopyObjectResult == nil || res.CopyObjectResult.ETag == nil || *res.CopyObjectResult.ETag == "" {
|
||||
return nil, errors.New("CopyObject etag is nil")
|
||||
}
|
||||
return &s3.CopyObjectInfo{
|
||||
Key: dst,
|
||||
ETag: r.formatETag(*res.CopyObjectResult.ETag),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *R2) StatObject(ctx context.Context, name string) (*s3.ObjectInfo, error) {
|
||||
res, err := r.client.HeadObject(ctx, &aws3.HeadObjectInput{
|
||||
Bucket: aws.String(r.bucket),
|
||||
Key: aws.String(name),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res.ETag == nil || *res.ETag == "" {
|
||||
return nil, errors.New("GetObjectAttributes etag is nil")
|
||||
}
|
||||
if res.ContentLength == nil {
|
||||
return nil, errors.New("GetObjectAttributes object size is nil")
|
||||
}
|
||||
info := &s3.ObjectInfo{
|
||||
ETag: r.formatETag(*res.ETag),
|
||||
Key: name,
|
||||
Size: *res.ContentLength,
|
||||
}
|
||||
if res.LastModified == nil {
|
||||
info.LastModified = time.Unix(0, 0)
|
||||
} else {
|
||||
info.LastModified = *res.LastModified
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (r *R2) InitiateMultipartUpload(ctx context.Context, name string, opt *s3.PutOption) (*s3.InitiateMultipartUploadResult, error) {
|
||||
startTime := time.Now()
|
||||
fmt.Printf("[R2] InitiateMultipartUpload start: bucket=%s, key=%s\n", r.bucket, name)
|
||||
|
||||
input := &aws3.CreateMultipartUploadInput{
|
||||
Bucket: aws.String(r.bucket),
|
||||
Key: aws.String(name),
|
||||
}
|
||||
|
||||
// 如果提供了 ContentType,添加到请求中
|
||||
if opt != nil && opt.ContentType != "" {
|
||||
input.ContentType = aws.String(opt.ContentType)
|
||||
fmt.Printf("[R2] ContentType: %s\n", opt.ContentType)
|
||||
}
|
||||
|
||||
res, err := r.client.CreateMultipartUpload(ctx, input)
|
||||
duration := time.Since(startTime)
|
||||
|
||||
if err != nil {
|
||||
// 详细错误信息
|
||||
var respErr *awshttp.ResponseError
|
||||
if errors.As(err, &respErr) {
|
||||
fmt.Printf("[R2] HTTP Response Error after %v:\n", duration)
|
||||
fmt.Printf(" Status Code: %d\n", respErr.Response.StatusCode)
|
||||
fmt.Printf(" Status: %s\n", respErr.Response.Status)
|
||||
if respErr.Response.Body != nil {
|
||||
body := make([]byte, 1024)
|
||||
n, _ := respErr.Response.Body.Read(body)
|
||||
fmt.Printf(" Body: %s\n", string(body[:n]))
|
||||
}
|
||||
}
|
||||
fmt.Printf("[R2] InitiateMultipartUpload failed after %v: %v\n", duration, err)
|
||||
return nil, fmt.Errorf("CreateMultipartUpload failed (bucket=%s, key=%s): %w", r.bucket, name, err)
|
||||
}
|
||||
if res.UploadId == nil || *res.UploadId == "" {
|
||||
return nil, errors.New("CreateMultipartUpload upload id is nil")
|
||||
}
|
||||
|
||||
fmt.Printf("[R2] InitiateMultipartUpload success after %v: uploadID=%s\n", duration, *res.UploadId)
|
||||
return &s3.InitiateMultipartUploadResult{
|
||||
Key: name,
|
||||
Bucket: r.bucket,
|
||||
UploadID: *res.UploadId,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *R2) CompleteMultipartUpload(ctx context.Context, uploadID string, name string, parts []s3.Part) (*s3.CompleteMultipartUploadResult, error) {
|
||||
params := &aws3.CompleteMultipartUploadInput{
|
||||
Bucket: aws.String(r.bucket),
|
||||
Key: aws.String(name),
|
||||
UploadId: aws.String(uploadID),
|
||||
MultipartUpload: &types.CompletedMultipartUpload{
|
||||
Parts: make([]types.CompletedPart, 0, len(parts)),
|
||||
},
|
||||
}
|
||||
for _, part := range parts {
|
||||
params.MultipartUpload.Parts = append(params.MultipartUpload.Parts, types.CompletedPart{
|
||||
ETag: aws.String(part.ETag),
|
||||
PartNumber: aws.Int32(int32(part.PartNumber)),
|
||||
})
|
||||
}
|
||||
res, err := r.client.CompleteMultipartUpload(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res.ETag == nil || *res.ETag == "" {
|
||||
return nil, errors.New("CompleteMultipartUpload etag is nil")
|
||||
}
|
||||
info := &s3.CompleteMultipartUploadResult{
|
||||
Key: name,
|
||||
Bucket: r.bucket,
|
||||
ETag: r.formatETag(*res.ETag),
|
||||
}
|
||||
if res.Location != nil {
|
||||
info.Location = *res.Location
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (r *R2) AbortMultipartUpload(ctx context.Context, uploadID string, name string) error {
|
||||
_, err := r.client.AbortMultipartUpload(ctx, &aws3.AbortMultipartUploadInput{
|
||||
Bucket: aws.String(r.bucket),
|
||||
Key: aws.String(name),
|
||||
UploadId: aws.String(uploadID),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *R2) ListUploadedParts(ctx context.Context, uploadID string, name string, partNumberMarker int, maxParts int) (*s3.ListUploadedPartsResult, error) {
|
||||
params := &aws3.ListPartsInput{
|
||||
Bucket: aws.String(r.bucket),
|
||||
Key: aws.String(name),
|
||||
UploadId: aws.String(uploadID),
|
||||
PartNumberMarker: aws.String(strconv.Itoa(partNumberMarker)),
|
||||
MaxParts: aws.Int32(int32(maxParts)),
|
||||
}
|
||||
res, err := r.client.ListParts(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info := &s3.ListUploadedPartsResult{
|
||||
Key: name,
|
||||
UploadID: uploadID,
|
||||
UploadedParts: make([]s3.UploadedPart, 0, len(res.Parts)),
|
||||
}
|
||||
if res.MaxParts != nil {
|
||||
info.MaxParts = int(*res.MaxParts)
|
||||
}
|
||||
if res.NextPartNumberMarker != nil {
|
||||
info.NextPartNumberMarker, _ = strconv.Atoi(*res.NextPartNumberMarker)
|
||||
}
|
||||
for _, part := range res.Parts {
|
||||
var val s3.UploadedPart
|
||||
if part.PartNumber != nil {
|
||||
val.PartNumber = int(*part.PartNumber)
|
||||
}
|
||||
if part.LastModified != nil {
|
||||
val.LastModified = *part.LastModified
|
||||
}
|
||||
if part.Size != nil {
|
||||
val.Size = *part.Size
|
||||
}
|
||||
info.UploadedParts = append(info.UploadedParts, val)
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (r *R2) AuthSign(ctx context.Context, uploadID string, name string, expire time.Duration, partNumbers []int) (*s3.AuthSignResult, error) {
|
||||
res := &s3.AuthSignResult{
|
||||
Parts: make([]s3.SignPart, 0, len(partNumbers)),
|
||||
}
|
||||
params := &aws3.UploadPartInput{
|
||||
Bucket: aws.String(r.bucket),
|
||||
Key: aws.String(name),
|
||||
UploadId: aws.String(uploadID),
|
||||
}
|
||||
opt := aws3.WithPresignExpires(expire)
|
||||
for _, number := range partNumbers {
|
||||
params.PartNumber = aws.Int32(int32(number))
|
||||
val, err := r.presign.PresignUploadPart(ctx, params, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u, err := url.Parse(val.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
query := u.Query()
|
||||
u.RawQuery = ""
|
||||
urlstr := u.String()
|
||||
if res.URL == "" {
|
||||
res.URL = urlstr
|
||||
}
|
||||
if res.URL == urlstr {
|
||||
urlstr = ""
|
||||
}
|
||||
res.Parts = append(res.Parts, s3.SignPart{
|
||||
PartNumber: number,
|
||||
URL: urlstr,
|
||||
Query: query,
|
||||
Header: val.SignedHeader,
|
||||
})
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (r *R2) AccessURL(ctx context.Context, name string, expire time.Duration, opt *s3.AccessURLOption) (string, error) {
|
||||
params := &aws3.GetObjectInput{
|
||||
Bucket: aws.String(r.bucket),
|
||||
Key: aws.String(name),
|
||||
}
|
||||
res, err := r.presign.PresignGetObject(ctx, params, aws3.WithPresignExpires(expire), withDisableHTTPPresignerHeaderV4(opt))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return res.URL, nil
|
||||
}
|
||||
|
||||
func (r *R2) FormData(ctx context.Context, name string, size int64, contentType string, duration time.Duration) (*s3.FormData, error) {
|
||||
return nil, errors.New("R2 does not currently support form data file uploads")
|
||||
}
|
||||
|
||||
func withDisableHTTPPresignerHeaderV4(opt *s3.AccessURLOption) func(options *aws3.PresignOptions) {
|
||||
return func(options *aws3.PresignOptions) {
|
||||
options.Presigner = &disableHTTPPresignerHeaderV4{
|
||||
opt: opt,
|
||||
presigner: options.Presigner,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type disableHTTPPresignerHeaderV4 struct {
|
||||
opt *s3.AccessURLOption
|
||||
presigner aws3.HTTPPresignerV4
|
||||
}
|
||||
|
||||
func (d *disableHTTPPresignerHeaderV4) PresignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) (url string, signedHeader http.Header, err error) {
|
||||
optFns = append(optFns, func(options *v4.SignerOptions) {
|
||||
options.DisableHeaderHoisting = true
|
||||
})
|
||||
r.Header.Del("Amz-Sdk-Request")
|
||||
d.setOption(r.URL)
|
||||
return d.presigner.PresignHTTP(ctx, credentials, r, payloadHash, service, region, signingTime, optFns...)
|
||||
}
|
||||
|
||||
func (d *disableHTTPPresignerHeaderV4) setOption(u *url.URL) {
|
||||
if d.opt == nil {
|
||||
return
|
||||
}
|
||||
query := u.Query()
|
||||
if d.opt.ContentType != "" {
|
||||
query.Set("response-content-type", d.opt.ContentType)
|
||||
}
|
||||
if d.opt.Filename != "" {
|
||||
query.Set("response-content-disposition", `attachment; filename*=UTF-8''`+url.PathEscape(d.opt.Filename))
|
||||
}
|
||||
u.RawQuery = query.Encode()
|
||||
}
|
||||
352
internal/rpc/third/s3.go
Normal file
352
internal/rpc/third/s3.go
Normal file
@@ -0,0 +1,352 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package third
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/authverify"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/servererrs"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/model"
|
||||
"git.imall.cloud/openim/protocol/third"
|
||||
"github.com/google/uuid"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"github.com/openimsdk/tools/s3"
|
||||
"github.com/openimsdk/tools/s3/cont"
|
||||
"github.com/openimsdk/tools/utils/datautil"
|
||||
)
|
||||
|
||||
func (t *thirdServer) PartLimit(ctx context.Context, req *third.PartLimitReq) (*third.PartLimitResp, error) {
|
||||
limit, err := t.s3dataBase.PartLimit()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &third.PartLimitResp{
|
||||
MinPartSize: limit.MinPartSize,
|
||||
MaxPartSize: limit.MaxPartSize,
|
||||
MaxNumSize: int32(limit.MaxNumSize),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (t *thirdServer) PartSize(ctx context.Context, req *third.PartSizeReq) (*third.PartSizeResp, error) {
|
||||
size, err := t.s3dataBase.PartSize(ctx, req.Size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &third.PartSizeResp{Size: size}, nil
|
||||
}
|
||||
|
||||
func (t *thirdServer) InitiateMultipartUpload(ctx context.Context, req *third.InitiateMultipartUploadReq) (*third.InitiateMultipartUploadResp, error) {
|
||||
if err := t.checkUploadName(ctx, req.Name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
expireTime := time.Now().Add(t.defaultExpire)
|
||||
result, err := t.s3dataBase.InitiateMultipartUpload(ctx, req.Hash, req.Size, t.defaultExpire, int(req.MaxParts), req.ContentType)
|
||||
if err != nil {
|
||||
if haErr, ok := errs.Unwrap(err).(*cont.HashAlreadyExistsError); ok {
|
||||
obj := &model.Object{
|
||||
Name: req.Name,
|
||||
UserID: mcontext.GetOpUserID(ctx),
|
||||
Hash: req.Hash,
|
||||
Key: haErr.Object.Key,
|
||||
Size: haErr.Object.Size,
|
||||
ContentType: req.ContentType,
|
||||
Group: req.Cause,
|
||||
CreateTime: time.Now(),
|
||||
}
|
||||
if err := t.s3dataBase.SetObject(ctx, obj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 获取 OSS 的真实 URL
|
||||
_, rawURL, err := t.s3dataBase.AccessURL(ctx, obj.Name, t.defaultExpire, nil)
|
||||
if err != nil {
|
||||
// 如果获取 OSS URL 失败,则使用配置的 URL
|
||||
rawURL = t.apiAddress(req.UrlPrefix, obj.Name)
|
||||
}
|
||||
|
||||
return &third.InitiateMultipartUploadResp{
|
||||
Url: rawURL,
|
||||
}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
var sign *third.AuthSignParts
|
||||
if result.Sign != nil && len(result.Sign.Parts) > 0 {
|
||||
sign = &third.AuthSignParts{
|
||||
Url: result.Sign.URL,
|
||||
Query: toPbMapArray(result.Sign.Query),
|
||||
Header: toPbMapArray(result.Sign.Header),
|
||||
Parts: make([]*third.SignPart, len(result.Sign.Parts)),
|
||||
}
|
||||
for i, part := range result.Sign.Parts {
|
||||
sign.Parts[i] = &third.SignPart{
|
||||
PartNumber: int32(part.PartNumber),
|
||||
Url: part.URL,
|
||||
Query: toPbMapArray(part.Query),
|
||||
Header: toPbMapArray(part.Header),
|
||||
}
|
||||
}
|
||||
}
|
||||
return &third.InitiateMultipartUploadResp{
|
||||
Upload: &third.UploadInfo{
|
||||
UploadID: result.UploadID,
|
||||
PartSize: result.PartSize,
|
||||
Sign: sign,
|
||||
ExpireTime: expireTime.UnixMilli(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (t *thirdServer) AuthSign(ctx context.Context, req *third.AuthSignReq) (*third.AuthSignResp, error) {
|
||||
partNumbers := datautil.Slice(req.PartNumbers, func(partNumber int32) int { return int(partNumber) })
|
||||
result, err := t.s3dataBase.AuthSign(ctx, req.UploadID, partNumbers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp := &third.AuthSignResp{
|
||||
Url: result.URL,
|
||||
Query: toPbMapArray(result.Query),
|
||||
Header: toPbMapArray(result.Header),
|
||||
Parts: make([]*third.SignPart, len(result.Parts)),
|
||||
}
|
||||
for i, part := range result.Parts {
|
||||
resp.Parts[i] = &third.SignPart{
|
||||
PartNumber: int32(part.PartNumber),
|
||||
Url: part.URL,
|
||||
Query: toPbMapArray(part.Query),
|
||||
Header: toPbMapArray(part.Header),
|
||||
}
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (t *thirdServer) CompleteMultipartUpload(ctx context.Context, req *third.CompleteMultipartUploadReq) (*third.CompleteMultipartUploadResp, error) {
|
||||
if err := t.checkUploadName(ctx, req.Name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result, err := t.s3dataBase.CompleteMultipartUpload(ctx, req.UploadID, req.Parts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
obj := &model.Object{
|
||||
Name: req.Name,
|
||||
UserID: mcontext.GetOpUserID(ctx),
|
||||
Hash: result.Hash,
|
||||
Key: result.Key,
|
||||
Size: result.Size,
|
||||
ContentType: req.ContentType,
|
||||
Group: req.Cause,
|
||||
CreateTime: time.Now(),
|
||||
}
|
||||
if err := t.s3dataBase.SetObject(ctx, obj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// 获取 OSS 的真实 URL
|
||||
_, rawURL, err := t.s3dataBase.AccessURL(ctx, obj.Name, t.defaultExpire, nil)
|
||||
if err != nil {
|
||||
// 如果获取 OSS URL 失败,则使用配置的 URL
|
||||
rawURL = t.apiAddress(req.UrlPrefix, obj.Name)
|
||||
}
|
||||
|
||||
return &third.CompleteMultipartUploadResp{
|
||||
Url: rawURL,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (t *thirdServer) AccessURL(ctx context.Context, req *third.AccessURLReq) (*third.AccessURLResp, error) {
|
||||
opt := &s3.AccessURLOption{}
|
||||
if len(req.Query) > 0 {
|
||||
switch req.Query["type"] {
|
||||
case "":
|
||||
case "image":
|
||||
opt.Image = &s3.Image{}
|
||||
opt.Image.Format = req.Query["format"]
|
||||
opt.Image.Width, _ = strconv.Atoi(req.Query["width"])
|
||||
opt.Image.Height, _ = strconv.Atoi(req.Query["height"])
|
||||
log.ZDebug(ctx, "AccessURL image", "name", req.Name, "option", opt.Image)
|
||||
default:
|
||||
return nil, errs.ErrArgs.WrapMsg("invalid query type")
|
||||
}
|
||||
}
|
||||
expireTime, rawURL, err := t.s3dataBase.AccessURL(ctx, req.Name, t.defaultExpire, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &third.AccessURLResp{
|
||||
Url: rawURL,
|
||||
ExpireTime: expireTime.UnixMilli(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (t *thirdServer) InitiateFormData(ctx context.Context, req *third.InitiateFormDataReq) (*third.InitiateFormDataResp, error) {
|
||||
if req.Name == "" {
|
||||
return nil, errs.ErrArgs.WrapMsg("name is empty")
|
||||
}
|
||||
if req.Size <= 0 {
|
||||
return nil, errs.ErrArgs.WrapMsg("size must be greater than 0")
|
||||
}
|
||||
if err := t.checkUploadName(ctx, req.Name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var duration time.Duration
|
||||
opUserID := mcontext.GetOpUserID(ctx)
|
||||
var key string
|
||||
if authverify.CheckUserIsAdmin(ctx, opUserID) {
|
||||
if req.Millisecond <= 0 {
|
||||
duration = time.Minute * 10
|
||||
} else {
|
||||
duration = time.Millisecond * time.Duration(req.Millisecond)
|
||||
}
|
||||
if req.Absolute {
|
||||
key = req.Name
|
||||
}
|
||||
} else {
|
||||
duration = time.Minute * 10
|
||||
}
|
||||
uid, err := uuid.NewRandom()
|
||||
if err != nil {
|
||||
return nil, errs.WrapMsg(err, "uuid NewRandom failed")
|
||||
}
|
||||
if key == "" {
|
||||
date := time.Now().Format("20060102")
|
||||
key = path.Join(cont.DirectPath, date, opUserID, hex.EncodeToString(uid[:])+path.Ext(req.Name))
|
||||
}
|
||||
mate := FormDataMate{
|
||||
Name: req.Name,
|
||||
Size: req.Size,
|
||||
ContentType: req.ContentType,
|
||||
Group: req.Group,
|
||||
Key: key,
|
||||
}
|
||||
mateData, err := json.Marshal(&mate)
|
||||
if err != nil {
|
||||
return nil, errs.WrapMsg(err, "marshal failed")
|
||||
}
|
||||
resp, err := t.s3dataBase.FormData(ctx, key, req.Size, req.ContentType, duration)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &third.InitiateFormDataResp{
|
||||
Id: base64.RawStdEncoding.EncodeToString(mateData),
|
||||
Url: resp.URL,
|
||||
File: resp.File,
|
||||
Header: toPbMapArray(resp.Header),
|
||||
FormData: resp.FormData,
|
||||
Expires: resp.Expires.UnixMilli(),
|
||||
SuccessCodes: datautil.Slice(resp.SuccessCodes, func(code int) int32 {
|
||||
return int32(code)
|
||||
}),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (t *thirdServer) CompleteFormData(ctx context.Context, req *third.CompleteFormDataReq) (*third.CompleteFormDataResp, error) {
|
||||
if req.Id == "" {
|
||||
return nil, errs.ErrArgs.WrapMsg("id is empty")
|
||||
}
|
||||
data, err := base64.RawStdEncoding.DecodeString(req.Id)
|
||||
if err != nil {
|
||||
return nil, errs.ErrArgs.WrapMsg("invalid id " + err.Error())
|
||||
}
|
||||
var mate FormDataMate
|
||||
if err := json.Unmarshal(data, &mate); err != nil {
|
||||
return nil, errs.ErrArgs.WrapMsg("invalid id " + err.Error())
|
||||
}
|
||||
if err := t.checkUploadName(ctx, mate.Name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info, err := t.s3dataBase.StatObject(ctx, mate.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if info.Size > 0 && info.Size != mate.Size {
|
||||
return nil, servererrs.ErrData.WrapMsg("file size mismatch")
|
||||
}
|
||||
obj := &model.Object{
|
||||
Name: mate.Name,
|
||||
UserID: mcontext.GetOpUserID(ctx),
|
||||
Hash: "etag_" + info.ETag,
|
||||
Key: info.Key,
|
||||
Size: info.Size,
|
||||
ContentType: mate.ContentType,
|
||||
Group: mate.Group,
|
||||
CreateTime: time.Now(),
|
||||
}
|
||||
if err := t.s3dataBase.SetObject(ctx, obj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 获取 OSS 的真实 URL
|
||||
_, rawURL, err := t.s3dataBase.AccessURL(ctx, mate.Name, t.defaultExpire, nil)
|
||||
if err != nil {
|
||||
// 如果获取 OSS URL 失败,则使用配置的 URL
|
||||
rawURL = t.apiAddress(req.UrlPrefix, mate.Name)
|
||||
}
|
||||
|
||||
return &third.CompleteFormDataResp{Url: rawURL}, nil
|
||||
}
|
||||
|
||||
func (t *thirdServer) apiAddress(prefix, name string) string {
|
||||
return prefix + name
|
||||
}
|
||||
|
||||
func (t *thirdServer) DeleteOutdatedData(ctx context.Context, req *third.DeleteOutdatedDataReq) (*third.DeleteOutdatedDataResp, error) {
|
||||
if err := authverify.CheckAdmin(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
engine := t.config.RpcConfig.Object.Enable
|
||||
expireTime := time.UnixMilli(req.ExpireTime)
|
||||
// Find all expired data in S3 database
|
||||
models, err := t.s3dataBase.FindExpirationObject(ctx, engine, expireTime, req.ObjectGroup, int64(req.Limit))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i, obj := range models {
|
||||
if err := t.s3dataBase.DeleteSpecifiedData(ctx, engine, []string{obj.Name}); err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
if err := t.s3dataBase.DelS3Key(ctx, engine, obj.Name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
count, err := t.s3dataBase.GetKeyCount(ctx, engine, obj.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.ZDebug(ctx, "delete s3 object record", "index", i, "s3", obj, "count", count)
|
||||
if count == 0 {
|
||||
if err := t.s3.DeleteObject(ctx, obj.Key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return &third.DeleteOutdatedDataResp{Count: int32(len(models))}, nil
|
||||
}
|
||||
|
||||
type FormDataMate struct {
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
ContentType string `json:"contentType"`
|
||||
Group string `json:"group"`
|
||||
Key string `json:"key"`
|
||||
}
|
||||
175
internal/rpc/third/third.go
Normal file
175
internal/rpc/third/third.go
Normal file
@@ -0,0 +1,175 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package third
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/authverify"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/mcache"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/dbbuild"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/rpcli"
|
||||
"github.com/openimsdk/tools/s3/disable"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/config"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/cache/redis"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/database/mgo"
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/localcache"
|
||||
"github.com/openimsdk/tools/s3/aws"
|
||||
"github.com/openimsdk/tools/s3/kodo"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/common/storage/controller"
|
||||
"git.imall.cloud/openim/protocol/third"
|
||||
"github.com/openimsdk/tools/discovery"
|
||||
"github.com/openimsdk/tools/s3"
|
||||
"github.com/openimsdk/tools/s3/cos"
|
||||
"github.com/openimsdk/tools/s3/minio"
|
||||
"github.com/openimsdk/tools/s3/oss"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type thirdServer struct {
|
||||
third.UnimplementedThirdServer
|
||||
thirdDatabase controller.ThirdDatabase
|
||||
s3dataBase controller.S3Database
|
||||
defaultExpire time.Duration
|
||||
config *Config
|
||||
s3 s3.Interface
|
||||
userClient *rpcli.UserClient
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
RpcConfig config.Third
|
||||
RedisConfig config.Redis
|
||||
MongodbConfig config.Mongo
|
||||
NotificationConfig config.Notification
|
||||
Share config.Share
|
||||
MinioConfig config.Minio
|
||||
LocalCacheConfig config.LocalCache
|
||||
Discovery config.Discovery
|
||||
}
|
||||
|
||||
func Start(ctx context.Context, config *Config, client discovery.SvcDiscoveryRegistry, server grpc.ServiceRegistrar) error {
|
||||
dbb := dbbuild.NewBuilder(&config.MongodbConfig, &config.RedisConfig)
|
||||
mgocli, err := dbb.Mongo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rdb, err := dbb.Redis(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logdb, err := mgo.NewLogMongo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s3db, err := mgo.NewS3Mongo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var thirdCache cache.ThirdCache
|
||||
if rdb == nil {
|
||||
tc, err := mgo.NewCacheMgo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
thirdCache = mcache.NewThirdCache(tc)
|
||||
} else {
|
||||
thirdCache = redis.NewThirdCache(rdb)
|
||||
}
|
||||
// Select the oss method according to the profile policy
|
||||
var o s3.Interface
|
||||
switch enable := config.RpcConfig.Object.Enable; enable {
|
||||
case "minio":
|
||||
var minioCache minio.Cache
|
||||
if rdb == nil {
|
||||
mc, err := mgo.NewCacheMgo(mgocli.GetDB())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
minioCache = mcache.NewMinioCache(mc)
|
||||
} else {
|
||||
minioCache = redis.NewMinioCache(rdb)
|
||||
}
|
||||
o, err = minio.NewMinio(ctx, minioCache, *config.MinioConfig.Build())
|
||||
case "cos":
|
||||
o, err = cos.NewCos(*config.RpcConfig.Object.Cos.Build())
|
||||
case "oss":
|
||||
o, err = oss.NewOSS(*config.RpcConfig.Object.Oss.Build())
|
||||
case "kodo":
|
||||
o, err = kodo.NewKodo(*config.RpcConfig.Object.Kodo.Build())
|
||||
case "aws":
|
||||
// 使用自定义 R2 客户端支持 Cloudflare R2(需要自定义 endpoint)
|
||||
awsConf := config.RpcConfig.Object.Aws
|
||||
if awsConf.Endpoint != "" {
|
||||
// 如果配置了 endpoint,使用 R2 客户端
|
||||
o, err = NewR2(R2Config{
|
||||
Endpoint: awsConf.Endpoint,
|
||||
Region: awsConf.Region,
|
||||
Bucket: awsConf.Bucket,
|
||||
AccessKeyID: awsConf.AccessKeyID,
|
||||
SecretAccessKey: awsConf.SecretAccessKey,
|
||||
SessionToken: awsConf.SessionToken,
|
||||
})
|
||||
} else {
|
||||
// 标准 AWS S3
|
||||
o, err = aws.NewAws(*awsConf.Build())
|
||||
}
|
||||
case "":
|
||||
o = disable.NewDisable()
|
||||
default:
|
||||
err = fmt.Errorf("invalid object enable: %s", enable)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
userConn, err := client.GetConn(ctx, config.Discovery.RpcService.User)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
localcache.InitLocalCache(&config.LocalCacheConfig)
|
||||
third.RegisterThirdServer(server, &thirdServer{
|
||||
thirdDatabase: controller.NewThirdDatabase(thirdCache, logdb),
|
||||
s3dataBase: controller.NewS3Database(rdb, o, s3db),
|
||||
defaultExpire: time.Hour * 24 * 7,
|
||||
config: config,
|
||||
s3: o,
|
||||
userClient: rpcli.NewUserClient(userConn),
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *thirdServer) FcmUpdateToken(ctx context.Context, req *third.FcmUpdateTokenReq) (resp *third.FcmUpdateTokenResp, err error) {
|
||||
err = t.thirdDatabase.FcmUpdateToken(ctx, req.Account, int(req.PlatformID), req.FcmToken, req.ExpireTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &third.FcmUpdateTokenResp{}, nil
|
||||
}
|
||||
|
||||
func (t *thirdServer) SetAppBadge(ctx context.Context, req *third.SetAppBadgeReq) (resp *third.SetAppBadgeResp, err error) {
|
||||
if err := authverify.CheckAccess(ctx, req.UserID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = t.thirdDatabase.SetAppBadge(ctx, req.UserID, int(req.AppUnreadCount))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &third.SetAppBadgeResp{}, nil
|
||||
}
|
||||
88
internal/rpc/third/tool.go
Normal file
88
internal/rpc/third/tool.go
Normal file
@@ -0,0 +1,88 @@
|
||||
// Copyright © 2023 OpenIM. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package third
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"git.imall.cloud/openim/open-im-server-deploy/pkg/authverify"
|
||||
"git.imall.cloud/openim/protocol/third"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
)
|
||||
|
||||
func toPbMapArray(m map[string][]string) []*third.KeyValues {
|
||||
if len(m) == 0 {
|
||||
return nil
|
||||
}
|
||||
res := make([]*third.KeyValues, 0, len(m))
|
||||
for key := range m {
|
||||
res = append(res, &third.KeyValues{
|
||||
Key: key,
|
||||
Values: m[key],
|
||||
})
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (t *thirdServer) checkUploadName(ctx context.Context, name string) error {
|
||||
if name == "" {
|
||||
return errs.ErrArgs.WrapMsg("name is empty")
|
||||
}
|
||||
if name[0] == '/' {
|
||||
return errs.ErrArgs.WrapMsg("name cannot start with `/`")
|
||||
}
|
||||
if err := checkValidObjectName(name); err != nil {
|
||||
return errs.ErrArgs.WrapMsg(err.Error())
|
||||
}
|
||||
opUserID := mcontext.GetOpUserID(ctx)
|
||||
if opUserID == "" {
|
||||
return errs.ErrNoPermission.WrapMsg("opUserID is empty")
|
||||
}
|
||||
if !authverify.CheckUserIsAdmin(ctx, opUserID) {
|
||||
if !strings.HasPrefix(name, opUserID+"/") {
|
||||
return errs.ErrNoPermission.WrapMsg(fmt.Sprintf("name must start with `%s/`", opUserID))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkValidObjectNamePrefix(objectName string) error {
|
||||
if len(objectName) > 1024 {
|
||||
return errs.New("object name cannot be longer than 1024 characters")
|
||||
}
|
||||
if !utf8.ValidString(objectName) {
|
||||
return errs.New("object name with non UTF-8 strings are not supported")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkValidObjectName(objectName string) error {
|
||||
if strings.TrimSpace(objectName) == "" {
|
||||
return errs.New("object name cannot be empty")
|
||||
}
|
||||
return checkValidObjectNamePrefix(objectName)
|
||||
}
|
||||
|
||||
func putUpdate[T any](update map[string]any, name string, val interface{ GetValuePtr() *T }) {
|
||||
ptrVal := val.GetValuePtr()
|
||||
if ptrVal == nil {
|
||||
return
|
||||
}
|
||||
update[name] = *ptrVal
|
||||
}
|
||||
Reference in New Issue
Block a user