fix: deprecate minio and introduce s3 storage backend

This commit is contained in:
krau
2025-12-04 22:59:23 +08:00
parent 685047e463
commit 91814a83c7
17 changed files with 269 additions and 22 deletions

View File

@@ -6,6 +6,7 @@ import (
"io"
"path"
"strings"
"sync"
"github.com/charmbracelet/log"
config "github.com/krau/SaveAny-Bot/config/storage"
@@ -16,6 +17,10 @@ import (
"github.com/rs/xid"
)
var (
deprecatedOnce sync.Once
)
type Minio struct {
config config.MinioStorageConfig
client *minio.Client
@@ -23,6 +28,9 @@ type Minio struct {
}
func (m *Minio) Init(ctx context.Context, cfg config.StorageConfig) error {
deprecatedOnce.Do(func() {
log.FromContext(ctx).Warn("Minio storage is deprecated, please use S3 storage type instead.")
})
minioConfig, ok := cfg.(*config.MinioStorageConfig)
if !ok {
return fmt.Errorf("failed to cast minio config")
@@ -73,7 +81,7 @@ func (m *Minio) Save(ctx context.Context, r io.Reader, storagePath string) error
candidate := storagePath
for i := 1; m.Exists(ctx, candidate); i++ {
candidate = fmt.Sprintf("%s_%d%s", base, i, ext)
if i > 1000 {
if i > 100 {
m.logger.Errorf("Too many attempts to find a unique filename for %s", storagePath)
candidate = fmt.Sprintf("%s_%s%s", base, xid.New().String(), ext)
break

130
storage/s3/s3.go Normal file
View File

@@ -0,0 +1,130 @@
package s3
import (
"context"
"fmt"
"io"
"path"
"strings"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/charmbracelet/log"
storconfig "github.com/krau/SaveAny-Bot/config/storage"
"github.com/krau/SaveAny-Bot/pkg/enums/ctxkey"
storenum "github.com/krau/SaveAny-Bot/pkg/enums/storage"
"github.com/rs/xid"
)
type S3 struct {
config storconfig.S3StorageConfig
client *s3.Client
logger *log.Logger
}
func (m *S3) Init(ctx context.Context, cfg storconfig.StorageConfig) error {
s3Config, ok := cfg.(*storconfig.S3StorageConfig)
if !ok {
return fmt.Errorf("failed to cast s3 config")
}
if err := s3Config.Validate(); err != nil {
return err
}
m.config = *s3Config
m.logger = log.FromContext(ctx).WithPrefix(fmt.Sprintf("s3[%s]", m.config.Name))
awsCfg, err := config.LoadDefaultConfig(
ctx,
config.WithRegion(m.config.Region),
config.WithCredentialsProvider(
credentials.NewStaticCredentialsProvider(
m.config.AccessKeyID,
m.config.SecretAccessKey,
"",
),
),
)
if err != nil {
return fmt.Errorf("failed to load AWS config: %w", err)
}
m.client = s3.NewFromConfig(awsCfg)
// Check if bucket exists
_, err = m.client.HeadBucket(ctx, &s3.HeadBucketInput{
Bucket: aws.String(m.config.BucketName),
})
if err != nil {
return fmt.Errorf("bucket %s not accessible: %w", m.config.BucketName, err)
}
return nil
}
func (m *S3) Type() storenum.StorageType {
return storenum.S3
}
func (m *S3) Name() string {
return m.config.Name
}
func (m *S3) JoinStoragePath(p string) string {
return strings.TrimPrefix(path.Join(m.config.BasePath, p), "/")
}
func (m *S3) Save(ctx context.Context, r io.Reader, storagePath string) error {
m.logger.Infof("Saving file from reader to %s", storagePath)
ext := path.Ext(storagePath)
base := strings.TrimSuffix(storagePath, ext)
candidate := storagePath
// Unique filename
for i := 1; m.Exists(ctx, candidate); i++ {
candidate = fmt.Sprintf("%s_%d%s", base, i, ext)
if i > 100 {
m.logger.Errorf("Too many attempts for unique filename: %s", storagePath)
candidate = fmt.Sprintf("%s_%s%s", base, xid.New().String(), ext)
break
}
}
// Determine content length
size := int64(-1)
if length := ctx.Value(ctxkey.ContentLength); length != nil {
if l, ok := length.(int64); ok && l > 0 {
size = l
}
}
// S3 PutObject needs either size or StreamingBody
input := &s3.PutObjectInput{
Bucket: aws.String(m.config.BucketName),
Key: aws.String(candidate),
Body: r,
}
if size >= 0 {
input.ContentLength = &size
}
_, err := m.client.PutObject(ctx, input)
if err != nil {
return fmt.Errorf("failed to upload file to S3: %w", err)
}
return nil
}
func (m *S3) Exists(ctx context.Context, storagePath string) bool {
m.logger.Debugf("Checking if file exists at %s", storagePath)
_, err := m.client.HeadObject(ctx, &s3.HeadObjectInput{
Bucket: aws.String(m.config.BucketName),
Key: aws.String(storagePath),
})
return err == nil
}

View File

@@ -10,6 +10,7 @@ import (
"github.com/krau/SaveAny-Bot/storage/alist"
"github.com/krau/SaveAny-Bot/storage/local"
"github.com/krau/SaveAny-Bot/storage/minio"
"github.com/krau/SaveAny-Bot/storage/s3"
"github.com/krau/SaveAny-Bot/storage/telegram"
"github.com/krau/SaveAny-Bot/storage/webdav"
)
@@ -37,6 +38,7 @@ var storageConstructors = map[storenum.StorageType]StorageConstructor{
storenum.Local: func() Storage { return new(local.Local) },
storenum.Webdav: func() Storage { return new(webdav.Webdav) },
storenum.Minio: func() Storage { return new(minio.Minio) },
storenum.S3: func() Storage { return new(s3.S3) },
storenum.Telegram: func() Storage { return new(telegram.Telegram) },
}