Compare commits
5 Commits
rclone
...
copilot/op
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ce88dc70f4 | ||
|
|
154ea47e6b | ||
|
|
1b9c8cd2ad | ||
|
|
9ee9972dec | ||
|
|
bd70160555 |
@@ -26,7 +26,7 @@ RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
|
||||
FROM alpine:latest
|
||||
|
||||
RUN apk add --no-cache curl ffmpeg yt-dlp
|
||||
RUN apk add --no-cache curl ffmpeg
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
|
||||
@@ -26,16 +26,12 @@
|
||||
- Multi-user support
|
||||
- Auto organize files based on storage rules
|
||||
- Watch specified chats and auto-save messages, with filters
|
||||
- Transfer files between different storage backends
|
||||
- Integrate with yt-dlp to download and save media from 1000+ websites
|
||||
- Aria2 integration to download files from URLs/magnets and save to storages
|
||||
- Write JS parser plugins to save files from almost any website
|
||||
- Storage backends:
|
||||
- Alist
|
||||
- S3
|
||||
- WebDAV
|
||||
- Local filesystem
|
||||
- Rclone (via command line)
|
||||
- Telegram (re-upload to specified chats)
|
||||
|
||||
## 📦 Quick Start
|
||||
|
||||
@@ -24,16 +24,12 @@
|
||||
- 多用户使用
|
||||
- 基于存储规则的自动整理
|
||||
- 监听并自动转存指定聊天的消息, 支持过滤
|
||||
- 在不同存储端之间转存文件
|
||||
- 集成 yt-dlp, 从所支持的网站下载并转存媒体文件
|
||||
- 集成 Aria2, 支持直链/磁力下载和转存
|
||||
- 使用 js 编写解析器插件以转存任意网站的文件
|
||||
- 存储端支持:
|
||||
- Alist
|
||||
- S3
|
||||
- WebDAV
|
||||
- 本地磁盘
|
||||
- Rclone
|
||||
- Telegram (重传回指定聊天)
|
||||
|
||||
## 快速开始
|
||||
|
||||
@@ -101,8 +101,6 @@ func handleAddCallback(ctx *ext.Context, update *ext.Update) error {
|
||||
shortcut.CreateAndAddAria2TaskWithEdit(ctx, selectedStorage, dirPath, data.Aria2URIs, client, msgID, userID)
|
||||
case tasktype.TaskTypeYtdlp:
|
||||
shortcut.CreateAndAddYtdlpTaskWithEdit(ctx, selectedStorage, dirPath, data.YtdlpURLs, data.YtdlpFlags, msgID, userID)
|
||||
case tasktype.TaskTypeTransfer:
|
||||
return handleTransferCallback(ctx, userID, selectedStorage, dirPath, data, msgID)
|
||||
default:
|
||||
return fmt.Errorf("unexcept task type: %s", data.TaskType)
|
||||
}
|
||||
|
||||
@@ -31,7 +31,6 @@ var CommandHandlers = []DescCommandHandler{
|
||||
{"dl", i18nk.BotMsgCmdDl, handleDlCmd},
|
||||
{"aria2dl", i18nk.BotMsgCmdAria2dl, handleAria2DlCmd},
|
||||
{"ytdlp", i18nk.BotMsgCmdYtdlp, handleYtdlpCmd},
|
||||
{"transfer", i18nk.BotMsgCmdTransfer, handleTransferCmd},
|
||||
{"task", i18nk.BotMsgCmdTask, handleTaskCmd},
|
||||
{"cancel", i18nk.BotMsgCmdCancel, handleCancelCmd},
|
||||
{"config", i18nk.BotMsgCmdConfig, handleConfigCmd},
|
||||
|
||||
@@ -1,257 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/celestix/gotgproto/dispatcher"
|
||||
"github.com/celestix/gotgproto/ext"
|
||||
"github.com/charmbracelet/log"
|
||||
"github.com/gotd/td/tg"
|
||||
"github.com/krau/SaveAny-Bot/client/bot/handlers/utils/msgelem"
|
||||
"github.com/krau/SaveAny-Bot/common/i18n"
|
||||
"github.com/krau/SaveAny-Bot/common/i18n/i18nk"
|
||||
"github.com/krau/SaveAny-Bot/common/utils/strutil"
|
||||
"github.com/krau/SaveAny-Bot/common/utils/tgutil"
|
||||
"github.com/krau/SaveAny-Bot/core"
|
||||
"github.com/krau/SaveAny-Bot/core/tasks/transfer"
|
||||
"github.com/krau/SaveAny-Bot/pkg/enums/tasktype"
|
||||
"github.com/krau/SaveAny-Bot/pkg/storagetypes"
|
||||
"github.com/krau/SaveAny-Bot/pkg/tcbdata"
|
||||
"github.com/krau/SaveAny-Bot/storage"
|
||||
"github.com/rs/xid"
|
||||
)
|
||||
|
||||
func handleTransferCmd(ctx *ext.Context, update *ext.Update) error {
|
||||
logger := log.FromContext(ctx)
|
||||
args := strutil.ParseArgsRespectQuotes(update.EffectiveMessage.Text)
|
||||
|
||||
if len(args) < 2 {
|
||||
ctx.Reply(update, ext.ReplyTextString(i18n.T(i18nk.BotMsgTransferUsage, nil)), nil)
|
||||
return dispatcher.EndGroups
|
||||
}
|
||||
|
||||
// Parse source: storage_name:/path
|
||||
sourceParts := strings.SplitN(args[1], ":", 2)
|
||||
if len(sourceParts) != 2 {
|
||||
ctx.Reply(update, ext.ReplyTextString(i18n.T(i18nk.BotMsgTransferErrorInvalidSource, nil)), nil)
|
||||
return dispatcher.EndGroups
|
||||
}
|
||||
sourceStorageName := sourceParts[0]
|
||||
sourcePath := sourceParts[1]
|
||||
|
||||
userID := update.GetUserChat().GetID()
|
||||
|
||||
// Get source storage
|
||||
sourceStorage, err := storage.GetStorageByUserIDAndName(ctx, userID, sourceStorageName)
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to get source storage by user ID and name: %s", err)
|
||||
ctx.Reply(update, ext.ReplyTextString(i18n.T(i18nk.BotMsgTransferErrorStorageNotFound, map[string]any{
|
||||
"StorageName": sourceStorageName,
|
||||
"Error": err,
|
||||
})), nil)
|
||||
return dispatcher.EndGroups
|
||||
}
|
||||
|
||||
// Check if source storage supports listing
|
||||
listable, ok := sourceStorage.(storage.StorageListable)
|
||||
if !ok {
|
||||
ctx.Reply(update, ext.ReplyTextString(i18n.T(i18nk.BotMsgTransferErrorStorageNotListable, map[string]any{
|
||||
"StorageName": sourceStorageName,
|
||||
})), nil)
|
||||
return dispatcher.EndGroups
|
||||
}
|
||||
|
||||
// Check if source storage supports reading
|
||||
_, ok = sourceStorage.(storage.StorageReadable)
|
||||
if !ok {
|
||||
ctx.Reply(update, ext.ReplyTextString(i18n.T(i18nk.BotMsgTransferErrorStorageNotReadable, map[string]any{
|
||||
"StorageName": sourceStorageName,
|
||||
})), nil)
|
||||
return dispatcher.EndGroups
|
||||
}
|
||||
|
||||
// Fetch file list
|
||||
replied, err := ctx.Reply(update, ext.ReplyTextString(i18n.T(i18nk.BotMsgTransferInfoFetchingFiles, nil)), nil)
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to reply: %s", err)
|
||||
return dispatcher.EndGroups
|
||||
}
|
||||
|
||||
files, err := listable.ListFiles(ctx, sourcePath)
|
||||
if err != nil {
|
||||
ctx.EditMessage(update.EffectiveChat().GetID(), &tg.MessagesEditMessageRequest{
|
||||
ID: replied.ID,
|
||||
Message: i18n.T(i18nk.BotMsgTransferErrorListFilesFailed, map[string]any{"Error": err}),
|
||||
})
|
||||
return dispatcher.EndGroups
|
||||
}
|
||||
|
||||
// Optional filter
|
||||
var filter *regexp.Regexp
|
||||
if len(args) >= 3 {
|
||||
filter, err = regexp.Compile(args[2])
|
||||
if err != nil {
|
||||
ctx.EditMessage(update.EffectiveChat().GetID(), &tg.MessagesEditMessageRequest{
|
||||
ID: replied.ID,
|
||||
Message: i18n.T(i18nk.BotMsgTransferErrorInvalidRegex, map[string]any{"Error": err}),
|
||||
})
|
||||
return dispatcher.EndGroups
|
||||
}
|
||||
}
|
||||
|
||||
// Filter files
|
||||
filteredFiles := make([]storagetypes.FileInfo, 0)
|
||||
for _, file := range files {
|
||||
if file.IsDir {
|
||||
continue
|
||||
}
|
||||
if filter != nil && !filter.MatchString(file.Name) {
|
||||
continue
|
||||
}
|
||||
filteredFiles = append(filteredFiles, file)
|
||||
}
|
||||
|
||||
if len(filteredFiles) == 0 {
|
||||
ctx.EditMessage(update.EffectiveChat().GetID(), &tg.MessagesEditMessageRequest{
|
||||
ID: replied.ID,
|
||||
Message: i18n.T(i18nk.BotMsgTransferErrorNoFilesToTransfer, nil),
|
||||
})
|
||||
return dispatcher.EndGroups
|
||||
}
|
||||
|
||||
// Prepare file paths for callback data
|
||||
filePaths := make([]string, 0, len(filteredFiles))
|
||||
var totalSize int64
|
||||
for _, file := range filteredFiles {
|
||||
filePaths = append(filePaths, file.Path)
|
||||
totalSize += file.Size
|
||||
}
|
||||
|
||||
// Build storage selection keyboard
|
||||
markup, err := msgelem.BuildAddSelectStorageKeyboard(storage.GetUserStorages(ctx, userID), tcbdata.Add{
|
||||
TaskType: tasktype.TaskTypeTransfer,
|
||||
TransferSourceStorName: sourceStorageName,
|
||||
TransferSourcePath: sourcePath,
|
||||
TransferFiles: filePaths,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to build storage selection keyboard: %s", err)
|
||||
ctx.EditMessage(update.EffectiveChat().GetID(), &tg.MessagesEditMessageRequest{
|
||||
ID: replied.ID,
|
||||
Message: i18n.T(i18nk.BotMsgTransferErrorBuildStorageSelectKeyboardFailed, map[string]any{"Error": err}),
|
||||
})
|
||||
return dispatcher.EndGroups
|
||||
}
|
||||
|
||||
ctx.EditMessage(update.EffectiveChat().GetID(), &tg.MessagesEditMessageRequest{
|
||||
ID: replied.ID,
|
||||
Message: i18n.T(i18nk.BotMsgTransferInfoFilesSelectStorage, map[string]any{
|
||||
"Count": len(filteredFiles),
|
||||
"SizeMB": fmt.Sprintf("%.2f", float64(totalSize)/(1024*1024)),
|
||||
}),
|
||||
ReplyMarkup: markup,
|
||||
})
|
||||
|
||||
return dispatcher.EndGroups
|
||||
}
|
||||
|
||||
func handleTransferCallback(ctx *ext.Context, userID int64, targetStorage storage.Storage, dirPath string, data tcbdata.Add, msgID int) error {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
// Get source storage
|
||||
sourceStorage, err := storage.GetStorageByUserIDAndName(ctx, userID, data.TransferSourceStorName)
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to get source storage: %s", err)
|
||||
ctx.EditMessage(userID, &tg.MessagesEditMessageRequest{
|
||||
ID: msgID,
|
||||
Message: i18n.T(i18nk.BotMsgTransferErrorStorageNotFound, map[string]any{"StorageName": data.TransferSourceStorName, "Error": err}),
|
||||
})
|
||||
return dispatcher.EndGroups
|
||||
}
|
||||
|
||||
// Check if source storage supports listing
|
||||
listable, ok := sourceStorage.(storage.StorageListable)
|
||||
if !ok {
|
||||
ctx.EditMessage(userID, &tg.MessagesEditMessageRequest{
|
||||
ID: msgID,
|
||||
Message: i18n.T(i18nk.BotMsgTransferErrorStorageNotListable, map[string]any{"StorageName": data.TransferSourceStorName}),
|
||||
})
|
||||
return dispatcher.EndGroups
|
||||
}
|
||||
|
||||
// Re-fetch files to get FileInfo (since we only stored paths)
|
||||
// This is necessary to get size and other metadata
|
||||
ctx.EditMessage(userID, &tg.MessagesEditMessageRequest{
|
||||
ID: msgID,
|
||||
Message: i18n.T(i18nk.BotMsgTransferInfoFetchingFiles, nil),
|
||||
})
|
||||
|
||||
allFiles, err := listable.ListFiles(ctx, data.TransferSourcePath)
|
||||
if err != nil {
|
||||
ctx.EditMessage(userID, &tg.MessagesEditMessageRequest{
|
||||
ID: msgID,
|
||||
Message: i18n.T(i18nk.BotMsgTransferErrorListFilesFailed, map[string]any{"Error": err}),
|
||||
})
|
||||
return dispatcher.EndGroups
|
||||
}
|
||||
|
||||
// Create a map for quick lookup
|
||||
fileMap := make(map[string]storagetypes.FileInfo)
|
||||
for _, file := range allFiles {
|
||||
fileMap[file.Path] = file
|
||||
}
|
||||
|
||||
// Build task elements for the selected files
|
||||
elems := make([]transfer.TaskElement, 0, len(data.TransferFiles))
|
||||
var totalSize int64
|
||||
for _, filePath := range data.TransferFiles {
|
||||
fileInfo, ok := fileMap[filePath]
|
||||
if !ok {
|
||||
logger.Warnf("File not found in source storage: %s", filePath)
|
||||
continue
|
||||
}
|
||||
elem := transfer.NewTaskElement(sourceStorage, fileInfo, targetStorage, dirPath)
|
||||
elems = append(elems, *elem)
|
||||
totalSize += fileInfo.Size
|
||||
}
|
||||
|
||||
if len(elems) == 0 {
|
||||
ctx.EditMessage(userID, &tg.MessagesEditMessageRequest{
|
||||
ID: msgID,
|
||||
Message: i18n.T(i18nk.BotMsgTransferErrorNoFilesToTransfer, nil),
|
||||
})
|
||||
return dispatcher.EndGroups
|
||||
}
|
||||
|
||||
// Create and add task
|
||||
taskID := xid.New().String()
|
||||
injectCtx := tgutil.ExtWithContext(ctx.Context, ctx)
|
||||
task := transfer.NewTransferTask(
|
||||
taskID,
|
||||
injectCtx,
|
||||
elems,
|
||||
transfer.NewProgressTracker(msgID, userID),
|
||||
true, // IgnoreErrors
|
||||
)
|
||||
|
||||
if err := core.AddTask(injectCtx, task); err != nil {
|
||||
ctx.EditMessage(userID, &tg.MessagesEditMessageRequest{
|
||||
ID: msgID,
|
||||
Message: i18n.T(i18nk.BotMsgTransferErrorAddTaskFailed, map[string]any{"Error": err}),
|
||||
})
|
||||
return dispatcher.EndGroups
|
||||
}
|
||||
|
||||
ctx.EditMessage(userID, &tg.MessagesEditMessageRequest{
|
||||
ID: msgID,
|
||||
Message: i18n.T(i18nk.BotMsgTransferInfoTaskAdded, map[string]any{
|
||||
"Count": len(elems),
|
||||
"SizeMB": fmt.Sprintf("%.2f", float64(totalSize)/(1024*1024)),
|
||||
"TaskID": taskID,
|
||||
}),
|
||||
})
|
||||
|
||||
return dispatcher.EndGroups
|
||||
}
|
||||
@@ -53,10 +53,6 @@ func BuildAddSelectStorageKeyboard(stors []storage.Storage, adddata tcbdata.Add)
|
||||
Aria2URIs: adddata.Aria2URIs,
|
||||
YtdlpURLs: adddata.YtdlpURLs,
|
||||
YtdlpFlags: adddata.YtdlpFlags,
|
||||
|
||||
TransferSourceStorName: adddata.TransferSourceStorName,
|
||||
TransferSourcePath: adddata.TransferSourcePath,
|
||||
TransferFiles: adddata.TransferFiles,
|
||||
}
|
||||
dataid := xid.New().String()
|
||||
err := cache.Set(dataid, data)
|
||||
|
||||
@@ -46,7 +46,7 @@ func CreateAndAddAria2TaskWithEdit(ctx *ext.Context, stor storage.Storage, dirPa
|
||||
logger.Infof("Aria2 download added with GID: %s", gid)
|
||||
|
||||
// Create task with the GID
|
||||
task := aria2dl.NewTask(xid.New().String(), injectCtx, gid, uris, aria2Client, stor, dirPath, aria2dl.NewProgress(msgID, userID))
|
||||
task := aria2dl.NewTask(xid.New().String(), injectCtx, gid, uris, aria2Client, stor, stor.JoinStoragePath(dirPath), aria2dl.NewProgress(msgID, userID))
|
||||
if err := core.AddTask(injectCtx, task); err != nil {
|
||||
logger.Errorf("Failed to add task: %s", err)
|
||||
ctx.EditMessage(userID, &tg.MessagesEditMessageRequest{
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
|
||||
func CreateAndAddDirectTaskWithEdit(ctx *ext.Context, stor storage.Storage, dirPath string, links []string, msgID int, userID int64) error {
|
||||
injectCtx := tgutil.ExtWithContext(ctx.Context, ctx)
|
||||
task := directlinks.NewTask(xid.New().String(), injectCtx, links, stor, dirPath, directlinks.NewProgress(msgID, userID))
|
||||
task := directlinks.NewTask(xid.New().String(), injectCtx, links, stor, stor.JoinStoragePath(dirPath), directlinks.NewProgress(msgID, userID))
|
||||
if err := core.AddTask(injectCtx, task); err != nil {
|
||||
log.FromContext(ctx).Errorf("Failed to add task: %s", err)
|
||||
ctx.EditMessage(userID, &tg.MessagesEditMessageRequest{
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
|
||||
func CreateAndAddParsedTaskWithEdit(ctx *ext.Context, stor storage.Storage, dirPath string, item *parser.Item, msgID int, userID int64) error {
|
||||
injectCtx := tgutil.ExtWithContext(ctx.Context, ctx)
|
||||
task := parsed.NewTask(xid.New().String(), injectCtx, stor, dirPath, item, parsed.NewProgress(msgID, userID))
|
||||
task := parsed.NewTask(xid.New().String(), injectCtx, stor, stor.JoinStoragePath(dirPath), item, parsed.NewProgress(msgID, userID))
|
||||
if err := core.AddTask(injectCtx, task); err != nil {
|
||||
log.FromContext(ctx).Errorf("Failed to add task: %s", err)
|
||||
ctx.EditMessage(userID, &tg.MessagesEditMessageRequest{
|
||||
|
||||
@@ -59,7 +59,7 @@ func CreateAndAddTGFileTaskWithEdit(ctx *ext.Context, userID int64, stor storage
|
||||
}
|
||||
}
|
||||
startCreateTask:
|
||||
storagePath := path.Join(dirPath, file.Name())
|
||||
storagePath := stor.JoinStoragePath(path.Join(dirPath, file.Name()))
|
||||
injectCtx := tgutil.ExtWithContext(ctx.Context, ctx)
|
||||
taskid := xid.New().String()
|
||||
task, err := tftask.NewTGFileTask(taskid, injectCtx, file, stor, storagePath,
|
||||
@@ -151,7 +151,7 @@ func CreateAndAddBatchTGFileTaskWithEdit(ctx *ext.Context, userID int64, stor st
|
||||
}
|
||||
}
|
||||
if !dirPath.NeedNewForAlbum() {
|
||||
storPath := path.Join(dirPath.String(), file.Name())
|
||||
storPath := fileStor.JoinStoragePath(path.Join(dirPath.String(), file.Name()))
|
||||
elem, err := batchtfile.NewTaskElement(fileStor, storPath, file)
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to create task element: %s", err)
|
||||
@@ -188,7 +188,7 @@ func CreateAndAddBatchTGFileTaskWithEdit(ctx *ext.Context, userID int64, stor st
|
||||
albumDir := strings.TrimSuffix(path.Base(afiles[0].file.Name()), path.Ext(afiles[0].file.Name()))
|
||||
albumStor := afiles[0].storage
|
||||
for _, af := range afiles {
|
||||
afstorPath := path.Join(dirPath, albumDir, af.file.Name())
|
||||
afstorPath := af.storage.JoinStoragePath(path.Join(dirPath, albumDir, af.file.Name()))
|
||||
elem, err := batchtfile.NewTaskElement(albumStor, afstorPath, af.file)
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to create task element for album file: %s", err)
|
||||
|
||||
@@ -32,7 +32,7 @@ func CreateAndAddtelegraphWithEdit(
|
||||
tphpage.Path,
|
||||
pics,
|
||||
stor,
|
||||
dirPath,
|
||||
stor.JoinStoragePath(dirPath),
|
||||
tphutil.DefaultClient(),
|
||||
tphtask.NewProgress(trackMsgID, userID),
|
||||
)
|
||||
|
||||
@@ -38,7 +38,7 @@ func CreateAndAddYtdlpTaskWithEdit(ctx *ext.Context, stor storage.Storage, dirPa
|
||||
urls,
|
||||
flags,
|
||||
stor,
|
||||
dirPath,
|
||||
stor.JoinStoragePath(dirPath),
|
||||
ytdlp.NewProgress(msgID, userID),
|
||||
)
|
||||
|
||||
|
||||
@@ -309,7 +309,7 @@ func listenMediaMessageEvent(ch chan userclient.MediaMessageEvent) {
|
||||
}
|
||||
}
|
||||
startCreateTask:
|
||||
storagePath := path.Join(dirPath, file.Name())
|
||||
storagePath := stor.JoinStoragePath(path.Join(dirPath, file.Name()))
|
||||
injectCtx := tgutil.ExtWithContext(ctx.Context, ctx)
|
||||
taskid := xid.New().String()
|
||||
task, err := coretfile.NewTGFileTask(taskid, injectCtx, file, stor, storagePath, nil)
|
||||
@@ -403,7 +403,7 @@ func processWatchMediaGroup(ctx *ext.Context, user *database.User, stor storage.
|
||||
logger.Infof("Creating album folder for group %d: %s with %d files", groupID, albumDir, len(afiles))
|
||||
|
||||
for _, af := range afiles {
|
||||
afstorPath := path.Join(dirPath, albumDir, af.file.Name())
|
||||
afstorPath := af.storage.JoinStoragePath(path.Join(dirPath, albumDir, af.file.Name()))
|
||||
taskid := xid.New().String()
|
||||
task, err := coretfile.NewTGFileTask(taskid, injectCtx, af.file, albumStor, afstorPath, nil)
|
||||
if err != nil {
|
||||
|
||||
@@ -90,7 +90,7 @@ func Upload(cmd *cobra.Command, args []string) error {
|
||||
fileName := fileInfo.Name()
|
||||
fileSize := fileInfo.Size()
|
||||
|
||||
uploadPath := path.Join(dirPath, fileName)
|
||||
uploadPath := stor.JoinStoragePath(path.Join(dirPath, fileName))
|
||||
|
||||
ctx = context.WithValue(ctx, ctxkey.ContentLength, fileSize)
|
||||
ctx = tgutil.ExtWithContext(ctx, bot.ExtContext())
|
||||
|
||||
@@ -21,7 +21,6 @@ const (
|
||||
BotMsgCmdDl Key = "bot.msg.cmd.dl"
|
||||
BotMsgCmdFnametmpl Key = "bot.msg.cmd.fnametmpl"
|
||||
BotMsgCmdHelp Key = "bot.msg.cmd.help"
|
||||
BotMsgCmdImport Key = "bot.msg.cmd.import"
|
||||
BotMsgCmdLswatch Key = "bot.msg.cmd.lswatch"
|
||||
BotMsgCmdParser Key = "bot.msg.cmd.parser"
|
||||
BotMsgCmdRule Key = "bot.msg.cmd.rule"
|
||||
@@ -31,7 +30,6 @@ const (
|
||||
BotMsgCmdStorage Key = "bot.msg.cmd.storage"
|
||||
BotMsgCmdSyncpeers Key = "bot.msg.cmd.syncpeers"
|
||||
BotMsgCmdTask Key = "bot.msg.cmd.task"
|
||||
BotMsgCmdTransfer Key = "bot.msg.cmd.transfer"
|
||||
BotMsgCmdUnwatch Key = "bot.msg.cmd.unwatch"
|
||||
BotMsgCmdUpdate Key = "bot.msg.cmd.update"
|
||||
BotMsgCmdWatch Key = "bot.msg.cmd.watch"
|
||||
@@ -163,20 +161,6 @@ const (
|
||||
BotMsgProgressTelegraphProgressPrefix Key = "bot.msg.progress.telegraph_progress_prefix"
|
||||
BotMsgProgressTelegraphStartPrefix Key = "bot.msg.progress.telegraph_start_prefix"
|
||||
BotMsgProgressTotalSizePrefix Key = "bot.msg.progress.total_size_prefix"
|
||||
BotMsgProgressTransferAvgSpeedPrefix Key = "bot.msg.progress.transfer_avg_speed_prefix"
|
||||
BotMsgProgressTransferElapsedTimePrefix Key = "bot.msg.progress.transfer_elapsed_time_prefix"
|
||||
BotMsgProgressTransferFailedFilesPrefix Key = "bot.msg.progress.transfer_failed_files_prefix"
|
||||
BotMsgProgressTransferFailedPrefix Key = "bot.msg.progress.transfer_failed_prefix"
|
||||
BotMsgProgressTransferProcessingMore Key = "bot.msg.progress.transfer_processing_more"
|
||||
BotMsgProgressTransferProcessingPrefix Key = "bot.msg.progress.transfer_processing_prefix"
|
||||
BotMsgProgressTransferProgressPrefix Key = "bot.msg.progress.transfer_progress_prefix"
|
||||
BotMsgProgressTransferRemainingTimePrefix Key = "bot.msg.progress.transfer_remaining_time_prefix"
|
||||
BotMsgProgressTransferSpeedPrefix Key = "bot.msg.progress.transfer_speed_prefix"
|
||||
BotMsgProgressTransferStartPrefix Key = "bot.msg.progress.transfer_start_prefix"
|
||||
BotMsgProgressTransferSuccessPrefix Key = "bot.msg.progress.transfer_success_prefix"
|
||||
BotMsgProgressTransferTotalFilesPrefix Key = "bot.msg.progress.transfer_total_files_prefix"
|
||||
BotMsgProgressTransferTotalSizePrefix Key = "bot.msg.progress.transfer_total_size_prefix"
|
||||
BotMsgProgressTransferUploadedPrefix Key = "bot.msg.progress.transfer_uploaded_prefix"
|
||||
BotMsgProgressYtdlpDone Key = "bot.msg.progress.ytdlp_done"
|
||||
BotMsgProgressYtdlpDownloading Key = "bot.msg.progress.ytdlp_downloading"
|
||||
BotMsgProgressYtdlpStart Key = "bot.msg.progress.ytdlp_start"
|
||||
@@ -232,22 +216,6 @@ const (
|
||||
BotMsgTelegraphInfoPicCountPrefix Key = "bot.msg.telegraph.info_pic_count_prefix"
|
||||
BotMsgTelegraphInfoPromptSelectStorage Key = "bot.msg.telegraph.info_prompt_select_storage"
|
||||
BotMsgTelegraphInfoTitlePrefix Key = "bot.msg.telegraph.info_title_prefix"
|
||||
BotMsgTransferErrorAddTaskFailed Key = "bot.msg.transfer.error_add_task_failed"
|
||||
BotMsgTransferErrorBuildStorageSelectKeyboardFailed Key = "bot.msg.transfer.error_build_storage_select_keyboard_failed"
|
||||
BotMsgTransferErrorInvalidRegex Key = "bot.msg.transfer.error_invalid_regex"
|
||||
BotMsgTransferErrorInvalidSource Key = "bot.msg.transfer.error_invalid_source"
|
||||
BotMsgTransferErrorInvalidTarget Key = "bot.msg.transfer.error_invalid_target"
|
||||
BotMsgTransferErrorListFilesFailed Key = "bot.msg.transfer.error_list_files_failed"
|
||||
BotMsgTransferErrorNoFilesToTransfer Key = "bot.msg.transfer.error_no_files_to_transfer"
|
||||
BotMsgTransferErrorStorageNotFound Key = "bot.msg.transfer.error_storage_not_found"
|
||||
BotMsgTransferErrorStorageNotListable Key = "bot.msg.transfer.error_storage_not_listable"
|
||||
BotMsgTransferErrorStorageNotReadable Key = "bot.msg.transfer.error_storage_not_readable"
|
||||
BotMsgTransferErrorTargetNotFound Key = "bot.msg.transfer.error_target_not_found"
|
||||
BotMsgTransferInfoFetchingFiles Key = "bot.msg.transfer.info_fetching_files"
|
||||
BotMsgTransferInfoFilesSelectStorage Key = "bot.msg.transfer.info_files_select_storage"
|
||||
BotMsgTransferInfoTaskAdded Key = "bot.msg.transfer.info_task_added"
|
||||
BotMsgTransferStartStats Key = "bot.msg.transfer.start_stats"
|
||||
BotMsgTransferUsage Key = "bot.msg.transfer.usage"
|
||||
BotMsgUpdateButtonUpgrade Key = "bot.msg.update.button_upgrade"
|
||||
BotMsgUpdateErrorCheckLatestFailed Key = "bot.msg.update.error_check_latest_failed"
|
||||
BotMsgUpdateErrorNoReleaseFound Key = "bot.msg.update.error_no_release_found"
|
||||
|
||||
@@ -29,7 +29,6 @@ bot:
|
||||
/silent - Toggle silent mode
|
||||
/storage - Set default storage
|
||||
/save [custom filename] - Save file
|
||||
/import <storage_name> <dir_path> [channel_id] [filter] - Import files from storage to Telegram
|
||||
/dir - Manage storage directories
|
||||
/rule - Manage rules
|
||||
/config - Modify configuration
|
||||
@@ -53,8 +52,6 @@ bot:
|
||||
dl: "Download files from given links"
|
||||
aria2dl: "Download files using Aria2"
|
||||
ytdlp: "Download video/audio using yt-dlp"
|
||||
import: "Import files from storage to Telegram"
|
||||
transfer: "Transfer files between storages"
|
||||
task: "Manage task queue"
|
||||
cancel: "Cancel task"
|
||||
watch: "Watch chats (UserBot)"
|
||||
@@ -297,28 +294,6 @@ bot:
|
||||
info_urls_select_storage: "Found {{.Count}} links, please select storage"
|
||||
info_downloading: "Downloading via yt-dlp..."
|
||||
error_download_failed: "yt-dlp download failed: {{.Error}}"
|
||||
transfer:
|
||||
usage: |
|
||||
Usage: /transfer <source_storage>:/<source_path> [filter]
|
||||
Examples:
|
||||
/transfer local1:/downloads
|
||||
/transfer alist1:/media/photos
|
||||
/transfer webdav1:/files ".*\.mp4$"
|
||||
error_invalid_source: "Invalid source path format, should be: storage_name:/path"
|
||||
error_invalid_target: "Invalid target path format, should be: storage_name:/path"
|
||||
error_storage_not_found: "Storage '{{.StorageName}}' not found or access denied: {{.Error}}"
|
||||
error_storage_not_listable: "Storage '{{.StorageName}}' does not support listing files"
|
||||
error_storage_not_readable: "Storage '{{.StorageName}}' does not support reading files"
|
||||
error_target_not_found: "Target storage '{{.StorageName}}' not found or access denied: {{.Error}}"
|
||||
info_fetching_files: "Fetching file list..."
|
||||
error_list_files_failed: "Failed to list files: {{.Error}}"
|
||||
error_invalid_regex: "Invalid regular expression: {{.Error}}"
|
||||
error_no_files_to_transfer: "No files to transfer in directory"
|
||||
error_add_task_failed: "Failed to add task: {{.Error}}"
|
||||
info_task_added: "Added {{.Count}} files to transfer queue\nTotal size: {{.SizeMB}} MB\nTask ID: {{.TaskID}}"
|
||||
start_stats: "Total files: {{.Count}}\nTotal size: {{.SizeMB}} MB"
|
||||
info_files_select_storage: "Total {{.Count}} files ({{.SizeMB}} MB), please select target storage"
|
||||
error_build_storage_select_keyboard_failed: "Failed to build storage selection keyboard: {{.Error}}"
|
||||
cancel:
|
||||
usage: "Usage: /cancel <task_id>"
|
||||
error_cancel_failed: "Failed to cancel task: {{.Error}}"
|
||||
@@ -367,20 +342,6 @@ bot:
|
||||
ytdlp_done: "yt-dlp download completed and transferred ({{.Count}} files)\n"
|
||||
downloaded_prefix: "\nDownloaded: "
|
||||
current_speed_prefix: "\nCurrent speed: "
|
||||
transfer_start_prefix: "Transfering: "
|
||||
transfer_progress_prefix: "Transfer progress: "
|
||||
transfer_uploaded_prefix: "\nUploaded: "
|
||||
transfer_speed_prefix: "\nSpeed: "
|
||||
transfer_remaining_time_prefix: "\nRemaining time: "
|
||||
transfer_processing_prefix: "\nProcessing:\n"
|
||||
transfer_processing_more: "...and {{.Count}} more files\n"
|
||||
transfer_failed_prefix: "Transfer failed\n"
|
||||
transfer_success_prefix: "Transfer completed\n"
|
||||
transfer_total_files_prefix: "\nTotal files: "
|
||||
transfer_total_size_prefix: "\nTotal size: "
|
||||
transfer_elapsed_time_prefix: "\nElapsed time: "
|
||||
transfer_avg_speed_prefix: "\nAverage speed: "
|
||||
transfer_failed_files_prefix: "\nFailed files: "
|
||||
syncpeers:
|
||||
start: "Starting to sync peers..."
|
||||
done: "Peer sync completed, total {{.Count}} chats synced"
|
||||
|
||||
@@ -30,7 +30,6 @@ bot:
|
||||
/storage - 设置默认存储位置
|
||||
/save [自定义文件名] - 保存文件
|
||||
/dl <链接1> <链接2> ... - 下载给定链接的文件
|
||||
/import <存储名> <目录路径> [频道ID] [过滤器] - 从存储端导入文件到 Telegram
|
||||
/dir - 管理存储目录
|
||||
/rule - 管理规则
|
||||
/config - 修改配置
|
||||
@@ -54,8 +53,6 @@ bot:
|
||||
dl: "下载给定链接的文件"
|
||||
aria2dl: "使用 Aria2 下载给定链接的文件"
|
||||
ytdlp: "使用 yt-dlp 下载视频/音频"
|
||||
import: "从存储端导入文件到 Telegram"
|
||||
transfer: "在存储端之间传输文件"
|
||||
task: "管理任务队列"
|
||||
cancel: "取消任务"
|
||||
watch: "监听聊天(UserBot)"
|
||||
@@ -298,28 +295,6 @@ bot:
|
||||
info_urls_select_storage: "共 {{.Count}} 个链接, 请选择存储位置"
|
||||
info_downloading: "正在通过 yt-dlp 下载..."
|
||||
error_download_failed: "yt-dlp 下载失败: {{.Error}}"
|
||||
transfer:
|
||||
usage: |
|
||||
用法: /transfer <source_storage>:/<source_path> [filter]
|
||||
示例:
|
||||
/transfer local1:/downloads
|
||||
/transfer alist1:/media/photos
|
||||
/transfer webdav1:/files ".*\.mp4$"
|
||||
error_invalid_source: "源路径格式无效,应为: storage_name:/path"
|
||||
error_invalid_target: "目标路径格式无效,应为: storage_name:/path"
|
||||
error_storage_not_found: "存储端 '{{.StorageName}}' 不存在或您无权访问: {{.Error}}"
|
||||
error_storage_not_listable: "存储端 '{{.StorageName}}' 不支持列举文件功能"
|
||||
error_storage_not_readable: "存储端 '{{.StorageName}}' 不支持读取文件功能"
|
||||
error_target_not_found: "目标存储端 '{{.StorageName}}' 不存在或您无权访问: {{.Error}}"
|
||||
info_fetching_files: "正在获取文件列表..."
|
||||
error_list_files_failed: "获取文件列表失败: {{.Error}}"
|
||||
error_invalid_regex: "正则表达式无效: {{.Error}}"
|
||||
error_no_files_to_transfer: "目录中没有可传输的文件"
|
||||
error_add_task_failed: "添加任务失败: {{.Error}}"
|
||||
info_task_added: "已添加 {{.Count}} 个文件到传输队列\n总大小: {{.SizeMB}} MB\n任务 ID: {{.TaskID}}"
|
||||
start_stats: "总文件数: {{.Count}}\n总大小: {{.SizeMB}} MB"
|
||||
info_files_select_storage: "共 {{.Count}} 个文件 (总大小: {{.SizeMB}} MB),请选择目标存储位置"
|
||||
error_build_storage_select_keyboard_failed: "构建存储选择键盘失败: {{.Error}}"
|
||||
cancel:
|
||||
usage: "用法: /cancel <task_id>"
|
||||
error_cancel_failed: "取消任务失败: {{.Error}}"
|
||||
@@ -368,20 +343,6 @@ bot:
|
||||
ytdlp_done: "yt-dlp 下载完成并已转存 ({{.Count}} 个文件)\n"
|
||||
downloaded_prefix: "\n已下载: "
|
||||
current_speed_prefix: "\n当前速度: "
|
||||
transfer_start_prefix: "正在转存: "
|
||||
transfer_progress_prefix: "转存进度: "
|
||||
transfer_uploaded_prefix: "\n已上传: "
|
||||
transfer_speed_prefix: "\n速度: "
|
||||
transfer_remaining_time_prefix: "\n剩余时间: "
|
||||
transfer_processing_prefix: "\n正在处理:\n"
|
||||
transfer_processing_more: "...和其他 {{.Count}} 个文件\n"
|
||||
transfer_failed_prefix: "转存失败\n"
|
||||
transfer_success_prefix: "转存完成\n"
|
||||
transfer_total_files_prefix: "\n总文件数: "
|
||||
transfer_total_size_prefix: "\n总大小: "
|
||||
transfer_elapsed_time_prefix: "\n耗时: "
|
||||
transfer_avg_speed_prefix: "\n平均速度: "
|
||||
transfer_failed_files_prefix: "\n失败文件数: "
|
||||
syncpeers:
|
||||
start: "正在同步对话列表..."
|
||||
success: "对话列表同步完成, 共同步 {{.Count}} 个对话"
|
||||
@@ -392,4 +353,4 @@ bot:
|
||||
info_adding_aria2_download: "正在添加 Aria2 下载任务..."
|
||||
error_adding_aria2_download: "添加 Aria2 下载任务失败: {{.Error}}"
|
||||
info_aria2_download_added: "Aria2 下载任务已添加, GID: {{.GID}}"
|
||||
info_select_storage: "请选择存储位置, 选择后将添加到 Aria2 下载队列"
|
||||
info_select_storage: "请选择存储位置, 选择后将添加到 Aria2 下载队列"
|
||||
@@ -1,9 +1,6 @@
|
||||
package dlutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
import "time"
|
||||
|
||||
var threadsLevels = []struct {
|
||||
threads int
|
||||
@@ -34,23 +31,3 @@ func GetSpeed(downloaded int64, startTime time.Time) float64 {
|
||||
}
|
||||
return float64(downloaded) / elapsed
|
||||
}
|
||||
|
||||
// FormatSize formats a byte size as a human-readable string
|
||||
func FormatSize(bytes int64) string {
|
||||
const (
|
||||
KB = 1024
|
||||
MB = KB * 1024
|
||||
GB = MB * 1024
|
||||
)
|
||||
|
||||
switch {
|
||||
case bytes >= GB:
|
||||
return fmt.Sprintf("%.2f GB", float64(bytes)/float64(GB))
|
||||
case bytes >= MB:
|
||||
return fmt.Sprintf("%.2f MB", float64(bytes)/float64(MB))
|
||||
case bytes >= KB:
|
||||
return fmt.Sprintf("%.2f KB", float64(bytes)/float64(KB))
|
||||
default:
|
||||
return fmt.Sprintf("%d B", bytes)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ var storageFactories = map[storenum.StorageType]func(cfg *BaseConfig) (StorageCo
|
||||
storenum.Minio: createStorageConfig(&MinioStorageConfig{}),
|
||||
storenum.S3: createStorageConfig(&S3StorageConfig{}),
|
||||
storenum.Telegram: createStorageConfig(&TelegramStorageConfig{}),
|
||||
storenum.Rclone: createStorageConfig(&RcloneStorageConfig{}),
|
||||
}
|
||||
|
||||
func createStorageConfig(configType StorageConfig) func(cfg *BaseConfig) (StorageConfig, error) {
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
storenum "github.com/krau/SaveAny-Bot/pkg/enums/storage"
|
||||
)
|
||||
|
||||
type RcloneStorageConfig struct {
|
||||
BaseConfig
|
||||
// The name of the remote as defined in rclone config
|
||||
Remote string `toml:"remote" mapstructure:"remote" json:"remote"`
|
||||
BasePath string `toml:"base_path" mapstructure:"base_path" json:"base_path"`
|
||||
// The path to the rclone config file, if not using the default
|
||||
ConfigPath string `toml:"config_path" mapstructure:"config_path" json:"config_path"`
|
||||
// Additional flags to pass to rclone commands
|
||||
Flags []string `toml:"flags" mapstructure:"flags" json:"flags"`
|
||||
}
|
||||
|
||||
func (r *RcloneStorageConfig) Validate() error {
|
||||
if r.Remote == "" {
|
||||
return fmt.Errorf("remote is required for rclone storage")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RcloneStorageConfig) GetType() storenum.StorageType {
|
||||
return storenum.Rclone
|
||||
}
|
||||
|
||||
func (r *RcloneStorageConfig) GetName() string {
|
||||
return r.Name
|
||||
}
|
||||
@@ -45,17 +45,9 @@ func (t *Task) Execute(ctx context.Context) error {
|
||||
fetchedTotalBytes.Add(resp.ContentLength)
|
||||
file.Size = resp.ContentLength
|
||||
if name := resp.Header.Get("Content-Disposition"); name != "" {
|
||||
// Set file name
|
||||
filename := parseFilename(name)
|
||||
if filename != "" {
|
||||
file.Name = filename
|
||||
}
|
||||
}
|
||||
// extract filename from URL if Content-Disposition is empty or invalid
|
||||
if file.Name == "" {
|
||||
file.Name = parseFilenameFromURL(file.URL)
|
||||
}
|
||||
if file.Name == "" {
|
||||
return fmt.Errorf("failed to determine filename for %s: Content-Disposition header is empty and URL does not contain a valid filename", file.URL)
|
||||
file.Name = filename
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -76,9 +76,6 @@ func (t *Task) StorageName() string {
|
||||
|
||||
// StoragePath implements TaskInfo.
|
||||
func (t *Task) StoragePath() string {
|
||||
if len(t.files) == 1 {
|
||||
return t.StorPath + "/" + t.files[0].Name
|
||||
}
|
||||
return t.StorPath
|
||||
}
|
||||
|
||||
|
||||
@@ -144,41 +144,6 @@ func tryDecodeGBK(s string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// parseFilenameFromURL extracts filename from URL path
|
||||
// This is used as a fallback when Content-Disposition is not available
|
||||
func parseFilenameFromURL(rawURL string) string {
|
||||
parsed, err := url.Parse(rawURL)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Get the path part and extract the last segment
|
||||
path := parsed.Path
|
||||
if path == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
// URL decode the path first
|
||||
decodedPath, err := url.PathUnescape(path)
|
||||
if err != nil {
|
||||
decodedPath = path
|
||||
}
|
||||
|
||||
// Get the last segment of the path
|
||||
lastSlash := strings.LastIndex(decodedPath, "/")
|
||||
if lastSlash == -1 {
|
||||
return decodedPath
|
||||
}
|
||||
filename := decodedPath[lastSlash+1:]
|
||||
|
||||
// Remove query string if somehow still present
|
||||
if idx := strings.Index(filename, "?"); idx != -1 {
|
||||
filename = filename[:idx]
|
||||
}
|
||||
|
||||
return filename
|
||||
}
|
||||
|
||||
// parseFilenameFallback manually parses filename= when mime.ParseMediaType fails
|
||||
func parseFilenameFallback(cd string) string {
|
||||
// Look for filename= (case-insensitive)
|
||||
|
||||
@@ -1,73 +0,0 @@
|
||||
package directlinks
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseFilenameFromURL(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
url string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "simple filename",
|
||||
url: "https://example.com/files/document.pdf",
|
||||
expected: "document.pdf",
|
||||
},
|
||||
{
|
||||
name: "filename with encoded characters",
|
||||
url: "https://example.com/files/%E6%B5%8B%E8%AF%95.zip",
|
||||
expected: "测试.zip",
|
||||
},
|
||||
{
|
||||
name: "filename with query string in URL",
|
||||
url: "https://example.com/files/image.png?token=abc123",
|
||||
expected: "image.png",
|
||||
},
|
||||
{
|
||||
name: "nested path",
|
||||
url: "https://example.com/a/b/c/file.txt",
|
||||
expected: "file.txt",
|
||||
},
|
||||
{
|
||||
name: "URL with port",
|
||||
url: "https://example.com:8080/downloads/archive.tar.gz",
|
||||
expected: "archive.tar.gz",
|
||||
},
|
||||
{
|
||||
name: "empty path",
|
||||
url: "https://example.com",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "root path only",
|
||||
url: "https://example.com/",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "filename with spaces encoded",
|
||||
url: "https://example.com/my%20file%20name.pdf",
|
||||
expected: "my file name.pdf",
|
||||
},
|
||||
{
|
||||
name: "complex encoded filename",
|
||||
url: "https://example.com/downloads/%E4%B8%AD%E6%96%87%E6%96%87%E4%BB%B6.docx",
|
||||
expected: "中文文件.docx",
|
||||
},
|
||||
{
|
||||
name: "invalid URL",
|
||||
url: "://invalid-url",
|
||||
expected: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := parseFilenameFromURL(tt.url)
|
||||
if result != tt.expected {
|
||||
t.Errorf("parseFilenameFromURL(%q) = %q, want %q", tt.url, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,142 +0,0 @@
|
||||
package transfer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/charmbracelet/log"
|
||||
"github.com/krau/SaveAny-Bot/config"
|
||||
"github.com/krau/SaveAny-Bot/pkg/enums/ctxkey"
|
||||
"github.com/krau/SaveAny-Bot/storage"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// Execute implements core.Executable.
|
||||
func (t *Task) Execute(ctx context.Context) error {
|
||||
logger := log.FromContext(ctx).WithPrefix(fmt.Sprintf("transfer[%s]", t.ID))
|
||||
logger.Info("Starting transfer task")
|
||||
t.Progress.OnStart(ctx, t)
|
||||
|
||||
workers := config.C().Workers
|
||||
eg, gctx := errgroup.WithContext(ctx)
|
||||
eg.SetLimit(workers)
|
||||
|
||||
for _, elem := range t.elems {
|
||||
eg.Go(func() error {
|
||||
t.processingMu.RLock()
|
||||
if t.processing[elem.ID] != nil {
|
||||
t.processingMu.RUnlock()
|
||||
return fmt.Errorf("element with ID %s is already being processed", elem.ID)
|
||||
}
|
||||
t.processingMu.RUnlock()
|
||||
|
||||
t.processingMu.Lock()
|
||||
t.processing[elem.ID] = &elem
|
||||
t.processingMu.Unlock()
|
||||
|
||||
defer func() {
|
||||
t.processingMu.Lock()
|
||||
delete(t.processing, elem.ID)
|
||||
t.processingMu.Unlock()
|
||||
}()
|
||||
|
||||
err := t.processElement(gctx, elem)
|
||||
if err != nil && !t.IgnoreErrors {
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
t.processingMu.Lock()
|
||||
t.failed[elem.ID] = err
|
||||
t.processingMu.Unlock()
|
||||
logger.Errorf("Failed to process file %s: %v", elem.FileInfo.Name, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
err := eg.Wait()
|
||||
if err != nil {
|
||||
logger.Errorf("Error during transfer processing: %v", err)
|
||||
} else {
|
||||
logger.Info("Transfer task completed successfully")
|
||||
}
|
||||
|
||||
t.Progress.OnDone(ctx, t, err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *Task) processElement(ctx context.Context, elem TaskElement) error {
|
||||
logger := log.FromContext(ctx).WithPrefix(fmt.Sprintf("file[%s]", elem.FileInfo.Name))
|
||||
|
||||
// Check whether the source storage supports reading
|
||||
readableStorage, ok := elem.SourceStorage.(storage.StorageReadable)
|
||||
if !ok {
|
||||
return fmt.Errorf("source storage %s does not support reading", elem.SourceStorage.Name())
|
||||
}
|
||||
|
||||
logger.Info("Opening file from source storage")
|
||||
reader, size, err := readableStorage.OpenFile(ctx, elem.SourcePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open file: %w", err)
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
// Build target storage path: /target_path/filename
|
||||
storagePath := path.Join(elem.TargetPath, elem.FileInfo.Name)
|
||||
|
||||
// Inject file size into context
|
||||
ctx = context.WithValue(ctx, ctxkey.ContentLength, size)
|
||||
|
||||
if config.C().Stream {
|
||||
if err := elem.TargetStorage.Save(ctx, reader, storagePath); err != nil {
|
||||
return fmt.Errorf("failed to upload file to storage: %w", err)
|
||||
}
|
||||
} else {
|
||||
logger.Info("Downloading to temporary file for ReadSeeker support")
|
||||
tempFile, err := t.downloadToTemp(reader, elem.FileInfo.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to download to temp: %w", err)
|
||||
}
|
||||
defer os.Remove(tempFile.Name())
|
||||
defer tempFile.Close()
|
||||
|
||||
if _, err := tempFile.Seek(0, io.SeekStart); err != nil {
|
||||
return fmt.Errorf("failed to seek temp file: %w", err)
|
||||
}
|
||||
|
||||
logger.Infof("Uploading file to storage (size: %d bytes)", size)
|
||||
if err := elem.TargetStorage.Save(ctx, tempFile, storagePath); err != nil {
|
||||
return fmt.Errorf("failed to upload file to storage: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
t.uploaded.Add(size)
|
||||
t.Progress.OnProgress(ctx, t)
|
||||
|
||||
logger.Info("File uploaded successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Task) downloadToTemp(reader io.Reader, filename string) (*os.File, error) {
|
||||
tempDir := config.C().Temp.BasePath
|
||||
if tempDir == "" {
|
||||
tempDir = os.TempDir()
|
||||
}
|
||||
|
||||
tempFile, err := os.CreateTemp(tempDir, filepath.Base(filename)+"-*.tmp")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create temp file: %w", err)
|
||||
}
|
||||
|
||||
if _, err := io.Copy(tempFile, reader); err != nil {
|
||||
tempFile.Close()
|
||||
os.Remove(tempFile.Name())
|
||||
return nil, fmt.Errorf("failed to copy to temp file: %w", err)
|
||||
}
|
||||
|
||||
return tempFile, nil
|
||||
}
|
||||
@@ -1,247 +0,0 @@
|
||||
package transfer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/charmbracelet/log"
|
||||
"github.com/gotd/td/telegram/message/entity"
|
||||
"github.com/gotd/td/telegram/message/styling"
|
||||
"github.com/gotd/td/tg"
|
||||
"github.com/krau/SaveAny-Bot/common/i18n"
|
||||
"github.com/krau/SaveAny-Bot/common/i18n/i18nk"
|
||||
"github.com/krau/SaveAny-Bot/common/utils/dlutil"
|
||||
"github.com/krau/SaveAny-Bot/common/utils/tgutil"
|
||||
)
|
||||
|
||||
type ProgressTracker interface {
|
||||
OnStart(ctx context.Context, info TaskInfo)
|
||||
OnProgress(ctx context.Context, info TaskInfo)
|
||||
OnDone(ctx context.Context, info TaskInfo, err error)
|
||||
}
|
||||
|
||||
type Progress struct {
|
||||
MessageID int
|
||||
ChatID int64
|
||||
start time.Time
|
||||
lastUpdatePercent atomic.Int32
|
||||
}
|
||||
|
||||
func NewProgressTracker(messageID int, chatID int64) ProgressTracker {
|
||||
return &Progress{
|
||||
MessageID: messageID,
|
||||
ChatID: chatID,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Progress) OnStart(ctx context.Context, info TaskInfo) {
|
||||
p.start = time.Now()
|
||||
p.lastUpdatePercent.Store(0)
|
||||
log.FromContext(ctx).Debugf("Transfer task progress tracking started for message %d in chat %d", p.MessageID, p.ChatID)
|
||||
|
||||
sizeMB := float64(info.TotalSize()) / (1024 * 1024)
|
||||
statsText := i18n.T(i18nk.BotMsgTransferStartStats, map[string]any{
|
||||
"SizeMB": fmt.Sprintf("%.2f", sizeMB),
|
||||
"Count": info.Count(),
|
||||
})
|
||||
|
||||
entityBuilder := entity.Builder{}
|
||||
if err := styling.Perform(&entityBuilder,
|
||||
styling.Plain(i18n.T(i18nk.BotMsgProgressTransferStartPrefix, nil)),
|
||||
styling.Code(statsText),
|
||||
); err != nil {
|
||||
log.FromContext(ctx).Errorf("Failed to build entities: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
text, entities := entityBuilder.Complete()
|
||||
req := &tg.MessagesEditMessageRequest{
|
||||
ID: p.MessageID,
|
||||
}
|
||||
req.SetMessage(text)
|
||||
req.SetEntities(entities)
|
||||
req.SetReplyMarkup(&tg.ReplyInlineMarkup{
|
||||
Rows: []tg.KeyboardButtonRow{
|
||||
{
|
||||
Buttons: []tg.KeyboardButtonClass{
|
||||
tgutil.BuildCancelButton(info.TaskID()),
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
ext := tgutil.ExtFromContext(ctx)
|
||||
if ext != nil {
|
||||
_, err := ext.EditMessage(p.ChatID, req)
|
||||
if err != nil {
|
||||
log.FromContext(ctx).Errorf("Failed to send progress start message: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Progress) OnProgress(ctx context.Context, info TaskInfo) {
|
||||
if !shouldUpdateProgress(info.TotalSize(), info.Uploaded(), int(p.lastUpdatePercent.Load())) {
|
||||
return
|
||||
}
|
||||
percent := int((info.Uploaded() * 100) / info.TotalSize())
|
||||
if p.lastUpdatePercent.Load() == int32(percent) {
|
||||
return
|
||||
}
|
||||
p.lastUpdatePercent.Store(int32(percent))
|
||||
|
||||
log.FromContext(ctx).Debugf("Progress update: %s, %d/%d", info.TaskID(), info.Uploaded(), info.TotalSize())
|
||||
|
||||
entityBuilder := entity.Builder{}
|
||||
var progressText strings.Builder
|
||||
|
||||
progressText.WriteString(i18n.T(i18nk.BotMsgProgressTransferProgressPrefix, nil))
|
||||
fmt.Fprintf(&progressText, "%d%%", percent)
|
||||
progressText.WriteString(i18n.T(i18nk.BotMsgProgressTransferUploadedPrefix, nil))
|
||||
fmt.Fprintf(&progressText, "%.2f MB / %.2f MB",
|
||||
float64(info.Uploaded())/(1024*1024),
|
||||
float64(info.TotalSize())/(1024*1024))
|
||||
|
||||
if p.start.Unix() > 0 {
|
||||
elapsed := time.Since(p.start)
|
||||
speed := float64(info.Uploaded()) / elapsed.Seconds()
|
||||
progressText.WriteString(i18n.T(i18nk.BotMsgProgressTransferSpeedPrefix, nil))
|
||||
progressText.WriteString(dlutil.FormatSize(int64(speed)) + "/s")
|
||||
|
||||
if info.Uploaded() > 0 {
|
||||
remaining := time.Duration(float64(info.TotalSize()-info.Uploaded()) / speed * float64(time.Second))
|
||||
progressText.WriteString(i18n.T(i18nk.BotMsgProgressTransferRemainingTimePrefix, nil))
|
||||
progressText.WriteString(formatDuration(remaining))
|
||||
}
|
||||
}
|
||||
|
||||
processing := info.Processing()
|
||||
if len(processing) > 0 {
|
||||
progressText.WriteString(i18n.T(i18nk.BotMsgProgressTransferProcessingPrefix, nil))
|
||||
for i, elem := range processing {
|
||||
if i >= 3 {
|
||||
progressText.WriteString(i18n.T(i18nk.BotMsgProgressTransferProcessingMore, map[string]any{"Count": len(processing) - 3}))
|
||||
break
|
||||
}
|
||||
fmt.Fprintf(&progressText, "- %s\n", elem.FileName())
|
||||
}
|
||||
}
|
||||
|
||||
if err := styling.Perform(&entityBuilder,
|
||||
styling.Plain(progressText.String()),
|
||||
); err != nil {
|
||||
log.FromContext(ctx).Errorf("Failed to build entities: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
text, entities := entityBuilder.Complete()
|
||||
req := &tg.MessagesEditMessageRequest{
|
||||
ID: p.MessageID,
|
||||
}
|
||||
req.SetMessage(text)
|
||||
req.SetEntities(entities)
|
||||
req.SetReplyMarkup(&tg.ReplyInlineMarkup{
|
||||
Rows: []tg.KeyboardButtonRow{
|
||||
{
|
||||
Buttons: []tg.KeyboardButtonClass{
|
||||
tgutil.BuildCancelButton(info.TaskID()),
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
ext := tgutil.ExtFromContext(ctx)
|
||||
if ext != nil {
|
||||
ext.EditMessage(p.ChatID, req)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Progress) OnDone(ctx context.Context, info TaskInfo, err error) {
|
||||
log.FromContext(ctx).Debugf("Transfer task progress tracking done for message %d in chat %d", p.MessageID, p.ChatID)
|
||||
|
||||
entityBuilder := entity.Builder{}
|
||||
var resultText strings.Builder
|
||||
|
||||
if err != nil {
|
||||
resultText.WriteString(i18n.T(i18nk.BotMsgProgressTransferFailedPrefix, nil))
|
||||
resultText.WriteString(i18n.T(i18nk.BotMsgProgressErrorPrefix, nil))
|
||||
fmt.Fprintf(&resultText, "%v\n", err)
|
||||
} else {
|
||||
resultText.WriteString(i18n.T(i18nk.BotMsgProgressTransferSuccessPrefix, nil))
|
||||
}
|
||||
|
||||
elapsed := time.Since(p.start)
|
||||
resultText.WriteString(i18n.T(i18nk.BotMsgProgressTransferTotalFilesPrefix, nil))
|
||||
fmt.Fprintf(&resultText, "%d\n", info.Count())
|
||||
resultText.WriteString(i18n.T(i18nk.BotMsgProgressTransferTotalSizePrefix, nil))
|
||||
fmt.Fprintf(&resultText, "%.2f MB\n", float64(info.TotalSize())/(1024*1024))
|
||||
resultText.WriteString(i18n.T(i18nk.BotMsgProgressTransferUploadedPrefix, nil))
|
||||
fmt.Fprintf(&resultText, "%.2f MB\n", float64(info.Uploaded())/(1024*1024))
|
||||
resultText.WriteString(i18n.T(i18nk.BotMsgProgressTransferElapsedTimePrefix, nil))
|
||||
fmt.Fprintf(&resultText, "%s\n", formatDuration(elapsed))
|
||||
|
||||
if elapsed.Seconds() > 0 {
|
||||
avgSpeed := float64(info.Uploaded()) / elapsed.Seconds()
|
||||
resultText.WriteString(i18n.T(i18nk.BotMsgProgressTransferAvgSpeedPrefix, nil))
|
||||
fmt.Fprintf(&resultText, "%s/s\n", dlutil.FormatSize(int64(avgSpeed)))
|
||||
}
|
||||
|
||||
failedFiles := info.FailedFiles()
|
||||
if len(failedFiles) > 0 {
|
||||
resultText.WriteString(i18n.T(i18nk.BotMsgProgressTransferFailedFilesPrefix, nil))
|
||||
fmt.Fprintf(&resultText, "%d\n", len(failedFiles))
|
||||
for i, name := range failedFiles {
|
||||
if i >= 5 {
|
||||
resultText.WriteString(i18n.T(i18nk.BotMsgProgressTransferProcessingMore, map[string]any{"Count": len(failedFiles) - 5}))
|
||||
break
|
||||
}
|
||||
fmt.Fprintf(&resultText, "- %s\n", name)
|
||||
}
|
||||
}
|
||||
|
||||
if err := styling.Perform(&entityBuilder,
|
||||
styling.Plain(resultText.String()),
|
||||
); err != nil {
|
||||
log.FromContext(ctx).Errorf("Failed to build entities: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
text, entities := entityBuilder.Complete()
|
||||
req := &tg.MessagesEditMessageRequest{
|
||||
ID: p.MessageID,
|
||||
}
|
||||
req.SetMessage(text)
|
||||
req.SetEntities(entities)
|
||||
|
||||
ext := tgutil.ExtFromContext(ctx)
|
||||
if ext != nil {
|
||||
ext.EditMessage(p.ChatID, req)
|
||||
}
|
||||
}
|
||||
|
||||
func shouldUpdateProgress(total, current int64, lastPercent int) bool {
|
||||
if total == 0 {
|
||||
return false
|
||||
}
|
||||
currentPercent := int((current * 100) / total)
|
||||
return currentPercent > lastPercent && currentPercent%5 == 0
|
||||
}
|
||||
|
||||
func formatDuration(d time.Duration) string {
|
||||
d = d.Round(time.Second)
|
||||
h := d / time.Hour
|
||||
d -= h * time.Hour
|
||||
m := d / time.Minute
|
||||
d -= m * time.Minute
|
||||
s := d / time.Second
|
||||
|
||||
if h > 0 {
|
||||
return fmt.Sprintf("%dh%dm%ds", h, m, s)
|
||||
}
|
||||
if m > 0 {
|
||||
return fmt.Sprintf("%dm%ds", m, s)
|
||||
}
|
||||
return fmt.Sprintf("%ds", s)
|
||||
}
|
||||
@@ -1,97 +0,0 @@
|
||||
package transfer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/krau/SaveAny-Bot/core"
|
||||
"github.com/krau/SaveAny-Bot/pkg/enums/tasktype"
|
||||
"github.com/krau/SaveAny-Bot/pkg/storagetypes"
|
||||
"github.com/krau/SaveAny-Bot/storage"
|
||||
"github.com/rs/xid"
|
||||
)
|
||||
|
||||
var _ core.Executable = (*Task)(nil)
|
||||
|
||||
type TaskElement struct {
|
||||
ID string
|
||||
SourceStorage storage.Storage
|
||||
SourcePath string
|
||||
FileInfo storagetypes.FileInfo
|
||||
TargetStorage storage.Storage
|
||||
TargetPath string
|
||||
}
|
||||
|
||||
type Task struct {
|
||||
ID string
|
||||
ctx context.Context
|
||||
elems []TaskElement
|
||||
Progress ProgressTracker
|
||||
IgnoreErrors bool
|
||||
uploaded atomic.Int64
|
||||
totalSize int64
|
||||
processing map[string]TaskElementInfo
|
||||
processingMu sync.RWMutex
|
||||
failed map[string]error
|
||||
}
|
||||
|
||||
// Title implements core.Executable.
|
||||
func (t *Task) Title() string {
|
||||
return fmt.Sprintf("[%s](%d files/%.2fMB)", t.Type(), len(t.elems), float64(t.totalSize)/(1024*1024))
|
||||
}
|
||||
|
||||
// Type implements core.Executable.
|
||||
func (t *Task) Type() tasktype.TaskType {
|
||||
return tasktype.TaskTypeTransfer
|
||||
}
|
||||
|
||||
// TaskID implements core.Executable.
|
||||
func (t *Task) TaskID() string {
|
||||
return t.ID
|
||||
}
|
||||
|
||||
func NewTaskElement(
|
||||
sourceStorage storage.Storage,
|
||||
fileInfo storagetypes.FileInfo,
|
||||
targetStorage storage.Storage,
|
||||
targetPath string,
|
||||
) *TaskElement {
|
||||
id := xid.New().String()
|
||||
return &TaskElement{
|
||||
ID: id,
|
||||
SourceStorage: sourceStorage,
|
||||
SourcePath: fileInfo.Path,
|
||||
FileInfo: fileInfo,
|
||||
TargetStorage: targetStorage,
|
||||
TargetPath: targetPath,
|
||||
}
|
||||
}
|
||||
|
||||
func NewTransferTask(
|
||||
id string,
|
||||
ctx context.Context,
|
||||
elems []TaskElement,
|
||||
progress ProgressTracker,
|
||||
ignoreErrors bool,
|
||||
) *Task {
|
||||
task := &Task{
|
||||
ID: id,
|
||||
ctx: ctx,
|
||||
elems: elems,
|
||||
Progress: progress,
|
||||
uploaded: atomic.Int64{},
|
||||
totalSize: func() int64 {
|
||||
var total int64
|
||||
for _, elem := range elems {
|
||||
total += elem.FileInfo.Size
|
||||
}
|
||||
return total
|
||||
}(),
|
||||
processing: make(map[string]TaskElementInfo),
|
||||
IgnoreErrors: ignoreErrors,
|
||||
failed: make(map[string]error),
|
||||
}
|
||||
return task
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
package transfer
|
||||
|
||||
type TaskElementInfo interface {
|
||||
FileName() string
|
||||
FileSize() int64
|
||||
GetSourcePath() string
|
||||
SourceStorageName() string
|
||||
}
|
||||
|
||||
func (e *TaskElement) FileName() string {
|
||||
return e.FileInfo.Name
|
||||
}
|
||||
|
||||
func (e *TaskElement) FileSize() int64 {
|
||||
return e.FileInfo.Size
|
||||
}
|
||||
|
||||
func (e *TaskElement) GetSourcePath() string {
|
||||
return e.SourcePath
|
||||
}
|
||||
|
||||
func (e *TaskElement) SourceStorageName() string {
|
||||
return e.SourceStorage.Name()
|
||||
}
|
||||
|
||||
type TaskInfo interface {
|
||||
TaskID() string
|
||||
TotalSize() int64
|
||||
Uploaded() int64
|
||||
Count() int
|
||||
Processing() []TaskElementInfo
|
||||
FailedFiles() []string
|
||||
}
|
||||
|
||||
func (t *Task) TotalSize() int64 {
|
||||
return t.totalSize
|
||||
}
|
||||
|
||||
func (t *Task) Uploaded() int64 {
|
||||
return t.uploaded.Load()
|
||||
}
|
||||
|
||||
func (t *Task) Count() int {
|
||||
return len(t.elems)
|
||||
}
|
||||
|
||||
func (t *Task) Processing() []TaskElementInfo {
|
||||
t.processingMu.RLock()
|
||||
defer t.processingMu.RUnlock()
|
||||
|
||||
result := make([]TaskElementInfo, 0, len(t.processing))
|
||||
for _, elem := range t.processing {
|
||||
result = append(result, elem)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (t *Task) FailedFiles() []string {
|
||||
t.processingMu.RLock()
|
||||
defer t.processingMu.RUnlock()
|
||||
|
||||
result := make([]string, 0, len(t.failed))
|
||||
for id := range t.failed {
|
||||
// Find the element by ID
|
||||
for _, elem := range t.elems {
|
||||
if elem.ID == id {
|
||||
result = append(result, elem.FileInfo.Name)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
@@ -20,9 +20,6 @@ Save Any Bot is a tool that allows you to save files from Telegram to various st
|
||||
- Multi-user
|
||||
- Automatic organization based on storage rules
|
||||
- Watch specific chats and automatically save messages, with filters
|
||||
- Transfer files between different storage backends
|
||||
- Integrate with yt-dlp to download and save media from 1000+ websites
|
||||
- Aria2 integration to download files from URLs/magnets and save to storages
|
||||
- Write JS parser plugins to save files from almost any website
|
||||
- Supports multiple storage backends:
|
||||
- Alist
|
||||
|
||||
@@ -92,27 +92,6 @@ enable = false
|
||||
session = "data/usersession.db"
|
||||
```
|
||||
|
||||
### Aria2 Configuration
|
||||
|
||||
Aria2 is a powerful download manager that supports HTTP/HTTPS, FTP, BitTorrent, and other protocols. When enabled, the bot can use the `/aria2dl` command to download files via Aria2.
|
||||
|
||||
- `enable`: Whether to enable Aria2 support, default is `false`
|
||||
- `url`: Aria2 RPC address, typically `http://localhost:6800/jsonrpc`
|
||||
- `secret`: Aria2 RPC secret, if you configured `rpc-secret` in Aria2, you need to fill it in here
|
||||
- `remove_after_transfer`: Whether to remove local files downloaded by Aria2 after transfer, default is `true`
|
||||
|
||||
{{< hint info >}}
|
||||
Aria2 needs to be installed and running separately. You can refer to the [Aria2 official documentation](https://aria2.github.io/) to learn how to install and configure Aria2.
|
||||
{{< /hint >}}
|
||||
|
||||
```toml
|
||||
[aria2]
|
||||
enable = true
|
||||
url = "http://localhost:6800/jsonrpc"
|
||||
secret = "your-rpc-secret"
|
||||
remove_after_transfer = true
|
||||
```
|
||||
|
||||
### Storage Endpoints List
|
||||
|
||||
The storage endpoints list is used to define the storage locations supported by the Bot. Each storage endpoint needs to specify a name, type, and related configuration, using the double bracket syntax `[[storages]]`.
|
||||
|
||||
@@ -80,60 +80,4 @@ chat_id = "123456789" # Telegram chat ID, the bot will send files to this chat
|
||||
force_file = false # Force sending as file, default is false
|
||||
skip_large = false # Skip large files, default is false. If enabled, files exceeding Telegram's limit will not be uploaded.
|
||||
spilt_size_mb = 2000 # Split size in MB, default is 2000 MB (2 GB). Files larger than this will be split into multiple parts (zip format). Ignored when skip_large is true.
|
||||
```
|
||||
|
||||
## Rclone
|
||||
|
||||
`type=rclone`
|
||||
|
||||
Supports multiple cloud storage services through the [rclone](https://rclone.org/) command-line tool. You need to install rclone and configure remote storage first.
|
||||
|
||||
```toml
|
||||
# Remote name configured in rclone, can be any remote defined in rclone.conf
|
||||
remote = "mydrive"
|
||||
# Base path in the remote storage, all files will be stored under this path
|
||||
base_path = "/telegram"
|
||||
# Path to rclone config file, optional, leave empty to use default path (~/.config/rclone/rclone.conf)
|
||||
config_path = ""
|
||||
# Additional flags to pass to rclone commands, optional
|
||||
flags = ["--transfers", "4", "--checkers", "8"]
|
||||
```
|
||||
|
||||
### Configuring rclone Remote
|
||||
|
||||
First, you need to configure an rclone remote. Run `rclone config` for interactive configuration, or directly edit the `rclone.conf` file.
|
||||
|
||||
rclone supports many cloud storage services, including but not limited to:
|
||||
- Google Drive
|
||||
- Dropbox
|
||||
- OneDrive
|
||||
- Amazon S3 and compatible services
|
||||
- SFTP
|
||||
- FTP
|
||||
- For more services, please refer to the [rclone official documentation](https://rclone.org/overview/)
|
||||
|
||||
### Usage Examples
|
||||
|
||||
After configuring Google Drive, you can configure the storage like this:
|
||||
|
||||
```toml
|
||||
[[storages]]
|
||||
name = "GoogleDrive"
|
||||
type = "rclone"
|
||||
enable = true
|
||||
remote = "gdrive"
|
||||
base_path = "/SaveAnyBot"
|
||||
```
|
||||
|
||||
If using a custom rclone config file:
|
||||
|
||||
```toml
|
||||
[[storages]]
|
||||
name = "MyRemote"
|
||||
type = "rclone"
|
||||
enable = true
|
||||
remote = "myremote"
|
||||
base_path = "/backup"
|
||||
config_path = "/path/to/rclone.conf"
|
||||
flags = ["--progress"]
|
||||
```
|
||||
@@ -112,142 +112,6 @@ Regex-match the message text. For example:
|
||||
|
||||
This will watch the chat with ID `12345678`, and only save messages whose text contains `hello`.
|
||||
|
||||
## Direct Download Links
|
||||
|
||||
Use the `/dl` command to directly download one or more HTTP/HTTPS files to storage.
|
||||
|
||||
```bash
|
||||
/dl <url1> [url2] [url3] ...
|
||||
```
|
||||
|
||||
Examples:
|
||||
|
||||
```bash
|
||||
/dl https://example.com/file.zip
|
||||
/dl https://example.com/file1.zip https://example.com/file2.zip
|
||||
```
|
||||
|
||||
The bot will validate the link format and then ask you to select the target storage location.
|
||||
|
||||
## Aria2 Download
|
||||
|
||||
{{< hint warning >}}
|
||||
This feature requires enabling Aria2 in the configuration file and configuring the RPC connection.
|
||||
{{< /hint >}}
|
||||
|
||||
Use the `/aria2dl` command to download files via the Aria2 download manager, supporting HTTP/HTTPS, FTP, BitTorrent, and other protocols.
|
||||
|
||||
```bash
|
||||
/aria2dl <uri1> [uri2] [uri3] ...
|
||||
```
|
||||
|
||||
Examples:
|
||||
|
||||
```bash
|
||||
# Download HTTP link
|
||||
/aria2dl https://example.com/file.zip
|
||||
|
||||
# Download magnet link
|
||||
/aria2dl magnet:?xt=urn:btih:...
|
||||
|
||||
# Download torrent file (need to upload .torrent file first)
|
||||
/aria2dl https://example.com/file.torrent
|
||||
```
|
||||
|
||||
Configure Aria2:
|
||||
|
||||
Add to `config.toml`:
|
||||
|
||||
```toml
|
||||
[aria2]
|
||||
enable = true
|
||||
url = "http://localhost:6800/jsonrpc"
|
||||
secret = "your-rpc-secret" # If rpc-secret is configured
|
||||
remove_after_transfer = true # Remove local files after transfer
|
||||
```
|
||||
|
||||
## yt-dlp Video Download
|
||||
|
||||
{{< hint warning >}}
|
||||
This feature requires the yt-dlp command-line tool installed on your system.
|
||||
{{< /hint >}}
|
||||
|
||||
Use the `/ytdlp` command to download videos and audio from supported video websites, including YouTube, Bilibili, Twitter, and 1000+ other sites.
|
||||
|
||||
```bash
|
||||
/ytdlp <url1> [url2] [flags...]
|
||||
```
|
||||
|
||||
Examples:
|
||||
|
||||
```bash
|
||||
# Basic download
|
||||
/ytdlp https://www.youtube.com/watch?v=dQw4w9WgXcQ
|
||||
|
||||
# Download multiple videos
|
||||
/ytdlp https://www.youtube.com/watch?v=video1 https://www.youtube.com/watch?v=video2
|
||||
|
||||
# Use custom parameters
|
||||
/ytdlp https://www.youtube.com/watch?v=dQw4w9WgXcQ -f best
|
||||
/ytdlp https://www.youtube.com/watch?v=dQw4w9WgXcQ --extract-audio --audio-format mp3
|
||||
```
|
||||
|
||||
Common parameters:
|
||||
|
||||
- `-f <format>`: Specify download format (e.g., `best`, `worst`, `bestvideo+bestaudio`)
|
||||
- `--extract-audio`: Extract audio
|
||||
- `--audio-format <format>`: Audio format (e.g., `mp3`, `m4a`, `wav`)
|
||||
- `--write-sub`: Download subtitles
|
||||
- `--write-thumbnail`: Download thumbnail
|
||||
|
||||
For more parameters, see [yt-dlp documentation](https://github.com/yt-dlp/yt-dlp#usage-and-options).
|
||||
|
||||
## Storage Transfer
|
||||
|
||||
Use the `/transfer` command to transfer files directly between different storages without going through Telegram.
|
||||
|
||||
```bash
|
||||
/transfer <source_storage>:/<source_path> [filter]
|
||||
```
|
||||
|
||||
Parameters:
|
||||
|
||||
- `source_storage`: Source storage name
|
||||
- `source_path`: Source path
|
||||
- `filter`: Optional regex filter to transfer only matching files
|
||||
|
||||
Examples:
|
||||
|
||||
```bash
|
||||
# Transfer entire directory
|
||||
/transfer local1:/downloads
|
||||
|
||||
# Transfer files from specified path
|
||||
/transfer alist1:/media/photos
|
||||
|
||||
# Transfer only mp4 files
|
||||
/transfer webdav1:/videos ".*\.mp4$"
|
||||
|
||||
# Transfer image files
|
||||
/transfer local1:/pictures "(?i)\.(jpg|png|gif)$"
|
||||
```
|
||||
|
||||
The bot will:
|
||||
|
||||
1. List all files in the source path
|
||||
2. Apply the filter (if provided)
|
||||
3. Display file count and total size
|
||||
4. Ask you to select the target storage
|
||||
5. Ask you to select the target directory (if configured for that storage)
|
||||
6. Start the transfer task
|
||||
|
||||
Notes:
|
||||
|
||||
- Source storage must support listing and reading
|
||||
- Target storage must support writing
|
||||
- Real-time progress is displayed during transfer
|
||||
- Transfer tasks can be cancelled
|
||||
|
||||
## Save Files Outside Telegram
|
||||
|
||||
Besides files on Telegram, the bot can also save files from other websites via JavaScript plugins or built-in parsers.
|
||||
|
||||
@@ -20,16 +20,12 @@ title: 介绍
|
||||
- 多用户使用
|
||||
- 基于存储规则的自动整理
|
||||
- 监听并自动转存指定聊天的消息, 支持过滤
|
||||
- 在不同存储端之间转存文件
|
||||
- 集成 yt-dlp, 从所支持的网站下载并转存媒体文件
|
||||
- 集成 Aria2, 支持直链/磁力下载和转存
|
||||
- 使用 js 编写解析器插件以转存任意网站的文件
|
||||
- 存储端支持:
|
||||
- Alist
|
||||
- S3
|
||||
- WebDAV
|
||||
- 本地磁盘
|
||||
- Rclone (通过命令行调用)
|
||||
- Telegram (重传回指定聊天)
|
||||
|
||||
## [贡献者](https://github.com/krau/SaveAny-Bot/graphs/contributors)
|
||||
|
||||
@@ -90,27 +90,6 @@ enable = false
|
||||
session = "data/usersession.db"
|
||||
```
|
||||
|
||||
### Aria2 配置
|
||||
|
||||
Aria2 是一个强大的下载管理器,支持 HTTP/HTTPS、FTP、BitTorrent 等多种协议。启用后,Bot 可以使用 `/aria2dl` 命令通过 Aria2 下载文件。
|
||||
|
||||
- `enable`: 是否启用 Aria2 支持,默认为 `false`
|
||||
- `url`: Aria2 RPC 地址,通常为 `http://localhost:6800/jsonrpc`
|
||||
- `secret`: Aria2 RPC 密钥,如果你在 Aria2 中配置了 `rpc-secret`,需要在此填写
|
||||
- `remove_after_transfer`: 转存完成后是否删除 Aria2 下载的本地文件,默认为 `true`
|
||||
|
||||
{{< hint info >}}
|
||||
Aria2 需要单独安装和运行。你可以参考 [Aria2 官方文档](https://aria2.github.io/) 了解如何安装和配置 Aria2。
|
||||
{{< /hint >}}
|
||||
|
||||
```toml
|
||||
[aria2]
|
||||
enable = true
|
||||
url = "http://localhost:6800/jsonrpc"
|
||||
secret = "your-rpc-secret"
|
||||
remove_after_transfer = true
|
||||
```
|
||||
|
||||
### 存储端列表
|
||||
|
||||
存储端列表用于定义 Bot 支持的存储位置, 每个存储端需要指定名称、类型和相关配置, 使用双中括号语法 `[[storages]]` 定义.
|
||||
|
||||
@@ -86,60 +86,4 @@ skip_large = false
|
||||
# 超过该大小的文件将被分割成多个部分上传.(使用 zip 格式)
|
||||
# 当 skip_large 启用时, 该选项无效.
|
||||
spilt_size_mb = 2000
|
||||
```
|
||||
|
||||
## Rclone
|
||||
|
||||
`type=rclone`
|
||||
|
||||
通过 [rclone](https://rclone.org/) 命令行工具支持多种云存储服务. 需要先安装 rclone 并配置好远程存储.
|
||||
|
||||
```toml
|
||||
# rclone 配置的远程名称, 可以是任何在 rclone.conf 中配置的远程
|
||||
remote = "mydrive"
|
||||
# 在远程存储中的基础路径, 所有文件将存储在此路径下
|
||||
base_path = "/telegram"
|
||||
# rclone 配置文件的路径, 可选, 留空使用默认路径 (~/.config/rclone/rclone.conf)
|
||||
config_path = ""
|
||||
# 传递给 rclone 命令的额外参数, 可选
|
||||
flags = ["--transfers", "4", "--checkers", "8"]
|
||||
```
|
||||
|
||||
### 配置 rclone 远程
|
||||
|
||||
首先需要配置 rclone 远程, 运行 `rclone config` 命令进行交互式配置, 或直接编辑 `rclone.conf` 文件.
|
||||
|
||||
rclone 支持多种云存储服务, 包括但不限于:
|
||||
- Google Drive
|
||||
- Dropbox
|
||||
- OneDrive
|
||||
- Amazon S3 及兼容服务
|
||||
- SFTP
|
||||
- FTP
|
||||
- 更多服务请参考 [rclone 官方文档](https://rclone.org/overview/)
|
||||
|
||||
### 使用示例
|
||||
|
||||
配置 Google Drive 后, 可以这样配置存储:
|
||||
|
||||
```toml
|
||||
[[storages]]
|
||||
name = "GoogleDrive"
|
||||
type = "rclone"
|
||||
enable = true
|
||||
remote = "gdrive"
|
||||
base_path = "/SaveAnyBot"
|
||||
```
|
||||
|
||||
如果使用自定义的 rclone 配置文件:
|
||||
|
||||
```toml
|
||||
[[storages]]
|
||||
name = "MyRemote"
|
||||
type = "rclone"
|
||||
enable = true
|
||||
remote = "myremote"
|
||||
base_path = "/backup"
|
||||
config_path = "/path/to/rclone.conf"
|
||||
flags = ["--progress"]
|
||||
```
|
||||
@@ -112,142 +112,6 @@ IS-ALBUM true MyWebdav NEW-FOR-ALBUM
|
||||
|
||||
这将会监听 ID 为 12345678 的聊天, 并且只保存消息文本中包含 "hello" 的消息.
|
||||
|
||||
## 直接下载链接
|
||||
|
||||
使用 `/dl` 命令可以直接下载一个或多个 HTTP/HTTPS 链接的文件到存储中.
|
||||
|
||||
```bash
|
||||
/dl <url1> [url2] [url3] ...
|
||||
```
|
||||
|
||||
示例:
|
||||
|
||||
```bash
|
||||
/dl https://example.com/file.zip
|
||||
/dl https://example.com/file1.zip https://example.com/file2.zip
|
||||
```
|
||||
|
||||
Bot 会验证链接格式, 然后让你选择目标存储位置.
|
||||
|
||||
## Aria2 下载
|
||||
|
||||
{{< hint warning >}}
|
||||
该功能需要在配置文件中启用 Aria2 并配置 RPC 连接.
|
||||
{{< /hint >}}
|
||||
|
||||
使用 `/aria2dl` 命令可以通过 Aria2 下载管理器下载文件, 支持 HTTP/HTTPS、FTP、BitTorrent 等多种协议.
|
||||
|
||||
```bash
|
||||
/aria2dl <uri1> [uri2] [uri3] ...
|
||||
```
|
||||
|
||||
示例:
|
||||
|
||||
```bash
|
||||
# 下载 HTTP 链接
|
||||
/aria2dl https://example.com/file.zip
|
||||
|
||||
# 下载磁力链接
|
||||
/aria2dl magnet:?xt=urn:btih:...
|
||||
|
||||
# 下载种子文件 (需要先上传 .torrent 文件)
|
||||
/aria2dl https://example.com/file.torrent
|
||||
```
|
||||
|
||||
配置 Aria2:
|
||||
|
||||
在 `config.toml` 中添加:
|
||||
|
||||
```toml
|
||||
[aria2]
|
||||
enable = true
|
||||
url = "http://localhost:6800/jsonrpc"
|
||||
secret = "your-rpc-secret" # 如果配置了 rpc-secret
|
||||
remove_after_transfer = true # 转存完成后删除本地文件
|
||||
```
|
||||
|
||||
## yt-dlp 视频下载
|
||||
|
||||
{{< hint warning >}}
|
||||
该功能需要在系统中安装 yt-dlp 命令行工具.
|
||||
{{< /hint >}}
|
||||
|
||||
使用 `/ytdlp` 命令可以下载支持的视频网站的视频和音频, 支持 YouTube、Bilibili、Twitter 等 1000+ 个网站.
|
||||
|
||||
```bash
|
||||
/ytdlp <url1> [url2] [flags...]
|
||||
```
|
||||
|
||||
示例:
|
||||
|
||||
```bash
|
||||
# 基本下载
|
||||
/ytdlp https://www.youtube.com/watch?v=dQw4w9WgXcQ
|
||||
|
||||
# 下载多个视频
|
||||
/ytdlp https://www.youtube.com/watch?v=video1 https://www.youtube.com/watch?v=video2
|
||||
|
||||
# 使用自定义参数
|
||||
/ytdlp https://www.youtube.com/watch?v=dQw4w9WgXcQ -f best
|
||||
/ytdlp https://www.youtube.com/watch?v=dQw4w9WgXcQ --extract-audio --audio-format mp3
|
||||
```
|
||||
|
||||
常用参数:
|
||||
|
||||
- `-f <format>`: 指定下载格式 (如 `best`, `worst`, `bestvideo+bestaudio`)
|
||||
- `--extract-audio`: 提取音频
|
||||
- `--audio-format <format>`: 音频格式 (如 `mp3`, `m4a`, `wav`)
|
||||
- `--write-sub`: 下载字幕
|
||||
- `--write-thumbnail`: 下载缩略图
|
||||
|
||||
更多参数请参考 [yt-dlp 文档](https://github.com/yt-dlp/yt-dlp#usage-and-options).
|
||||
|
||||
## 存储间传输
|
||||
|
||||
使用 `/transfer` 命令可以在不同存储之间直接传输文件, 无需经过 Telegram.
|
||||
|
||||
```bash
|
||||
/transfer <source_storage>:/<source_path> [filter]
|
||||
```
|
||||
|
||||
参数说明:
|
||||
|
||||
- `source_storage`: 源存储名称
|
||||
- `source_path`: 源路径
|
||||
- `filter`: 可选的正则表达式过滤器, 只传输匹配的文件
|
||||
|
||||
示例:
|
||||
|
||||
```bash
|
||||
# 传输整个目录
|
||||
/transfer local1:/downloads
|
||||
|
||||
# 传输指定路径的文件
|
||||
/transfer alist1:/media/photos
|
||||
|
||||
# 只传输 mp4 文件
|
||||
/transfer webdav1:/videos ".*\.mp4$"
|
||||
|
||||
# 传输图片文件
|
||||
/transfer local1:/pictures "(?i)\.(jpg|png|gif)$"
|
||||
```
|
||||
|
||||
Bot 会:
|
||||
|
||||
1. 列出源路径下的所有文件
|
||||
2. 应用过滤器 (如果提供)
|
||||
3. 显示文件数量和总大小
|
||||
4. 让你选择目标存储
|
||||
5. 让你选择目标目录 (如果该存储配置了目录)
|
||||
6. 开始传输任务
|
||||
|
||||
注意:
|
||||
|
||||
- 源存储必须支持列举和读取功能
|
||||
- 目标存储必须支持写入功能
|
||||
- 传输过程显示实时进度
|
||||
- 支持取消正在进行的传输任务
|
||||
|
||||
## 转存 Telegram 之外的文件
|
||||
|
||||
除了 Telegram 上的文件, Bot 还可通过 JavaScript 插件或内置解析器来支持转存其他网站的文件.
|
||||
|
||||
@@ -4,6 +4,6 @@ package storage
|
||||
|
||||
// StorageType
|
||||
/* ENUM(
|
||||
local, webdav, alist, minio, telegram, s3, rclone
|
||||
local, webdav, alist, minio, telegram, s3
|
||||
) */
|
||||
type StorageType string
|
||||
|
||||
@@ -24,8 +24,6 @@ const (
|
||||
Telegram StorageType = "telegram"
|
||||
// S3 is a StorageType of type s3.
|
||||
S3 StorageType = "s3"
|
||||
// Rclone is a StorageType of type rclone.
|
||||
Rclone StorageType = "rclone"
|
||||
)
|
||||
|
||||
var ErrInvalidStorageType = fmt.Errorf("not a valid StorageType, try [%s]", strings.Join(_StorageTypeNames, ", "))
|
||||
@@ -37,7 +35,6 @@ var _StorageTypeNames = []string{
|
||||
string(Minio),
|
||||
string(Telegram),
|
||||
string(S3),
|
||||
string(Rclone),
|
||||
}
|
||||
|
||||
// StorageTypeNames returns a list of possible string values of StorageType.
|
||||
@@ -56,7 +53,6 @@ func StorageTypeValues() []StorageType {
|
||||
Minio,
|
||||
Telegram,
|
||||
S3,
|
||||
Rclone,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,7 +75,6 @@ var _StorageTypeValue = map[string]StorageType{
|
||||
"minio": Minio,
|
||||
"telegram": Telegram,
|
||||
"s3": S3,
|
||||
"rclone": Rclone,
|
||||
}
|
||||
|
||||
// ParseStorageType attempts to convert a string to a StorageType.
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
package tasktype
|
||||
|
||||
// ENUM(tgfiles,tphpics,parseditem,directlinks,aria2,ytdlp)
|
||||
//
|
||||
//go:generate go-enum --values --names --flag --nocase
|
||||
// ENUM(tgfiles,tphpics,parseditem,directlinks,aria2,ytdlp,transfer)
|
||||
type TaskType string
|
||||
|
||||
@@ -24,8 +24,6 @@ const (
|
||||
TaskTypeAria2 TaskType = "aria2"
|
||||
// TaskTypeYtdlp is a TaskType of type ytdlp.
|
||||
TaskTypeYtdlp TaskType = "ytdlp"
|
||||
// TaskTypeTransfer is a TaskType of type transfer.
|
||||
TaskTypeTransfer TaskType = "transfer"
|
||||
)
|
||||
|
||||
var ErrInvalidTaskType = fmt.Errorf("not a valid TaskType, try [%s]", strings.Join(_TaskTypeNames, ", "))
|
||||
@@ -37,7 +35,6 @@ var _TaskTypeNames = []string{
|
||||
string(TaskTypeDirectlinks),
|
||||
string(TaskTypeAria2),
|
||||
string(TaskTypeYtdlp),
|
||||
string(TaskTypeTransfer),
|
||||
}
|
||||
|
||||
// TaskTypeNames returns a list of possible string values of TaskType.
|
||||
@@ -56,7 +53,6 @@ func TaskTypeValues() []TaskType {
|
||||
TaskTypeDirectlinks,
|
||||
TaskTypeAria2,
|
||||
TaskTypeYtdlp,
|
||||
TaskTypeTransfer,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,7 +75,6 @@ var _TaskTypeValue = map[string]TaskType{
|
||||
"directlinks": TaskTypeDirectlinks,
|
||||
"aria2": TaskTypeAria2,
|
||||
"ytdlp": TaskTypeYtdlp,
|
||||
"transfer": TaskTypeTransfer,
|
||||
}
|
||||
|
||||
// ParseTaskType attempts to convert a string to a TaskType.
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
package storagetypes
|
||||
|
||||
import "time"
|
||||
|
||||
// FileInfo represents file metadata
|
||||
type FileInfo struct {
|
||||
Name string
|
||||
Path string
|
||||
Size int64
|
||||
IsDir bool
|
||||
ModTime time.Time
|
||||
}
|
||||
@@ -50,10 +50,6 @@ type Add struct {
|
||||
// ytdlp
|
||||
YtdlpURLs []string
|
||||
YtdlpFlags []string
|
||||
// transfer
|
||||
TransferSourceStorName string
|
||||
TransferSourcePath string
|
||||
TransferFiles []string // file paths relative to source storage
|
||||
}
|
||||
|
||||
type SetDefaultStorage struct {
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
config "github.com/krau/SaveAny-Bot/config/storage"
|
||||
"github.com/krau/SaveAny-Bot/pkg/enums/ctxkey"
|
||||
storenum "github.com/krau/SaveAny-Bot/pkg/enums/storage"
|
||||
"github.com/krau/SaveAny-Bot/pkg/storagetypes"
|
||||
)
|
||||
|
||||
type Alist struct {
|
||||
@@ -104,7 +103,7 @@ func (a *Alist) Name() string {
|
||||
|
||||
func (a *Alist) Save(ctx context.Context, reader io.Reader, storagePath string) error {
|
||||
a.logger.Infof("Saving file to %s", storagePath)
|
||||
storagePath = a.JoinStoragePath(storagePath)
|
||||
|
||||
ext := path.Ext(storagePath)
|
||||
base := strings.TrimSuffix(storagePath, ext)
|
||||
candidate := storagePath
|
||||
@@ -216,156 +215,3 @@ func (a *Alist) Exists(ctx context.Context, storagePath string) bool {
|
||||
func (a *Alist) CannotStream() string {
|
||||
return "Alist does not support chunked transfer encoding"
|
||||
}
|
||||
|
||||
// ListFiles implements StorageListable interface
|
||||
func (a *Alist) ListFiles(ctx context.Context, dirPath string) ([]storagetypes.FileInfo, error) {
|
||||
a.logger.Debugf("Listing files in directory: %s", dirPath)
|
||||
|
||||
reqBody := fsListRequest{
|
||||
Path: dirPath,
|
||||
Password: "",
|
||||
Page: 1,
|
||||
PerPage: 0, // 0 means all files
|
||||
Refresh: false,
|
||||
}
|
||||
|
||||
bodyBytes, err := json.Marshal(reqBody)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal request body: %w", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, a.baseURL+"/api/fs/list", bytes.NewBuffer(bodyBytes))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
req.Header.Set("Authorization", a.token)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := a.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to send request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("failed to list files: %s", resp.Status)
|
||||
}
|
||||
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read response body: %w", err)
|
||||
}
|
||||
|
||||
var listResp fsListResponse
|
||||
if err := json.Unmarshal(data, &listResp); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal list response: %w", err)
|
||||
}
|
||||
|
||||
if listResp.Code != http.StatusOK {
|
||||
return nil, fmt.Errorf("failed to list files: %d, %s", listResp.Code, listResp.Message)
|
||||
}
|
||||
|
||||
files := make([]storagetypes.FileInfo, 0, len(listResp.Data.Content))
|
||||
for _, item := range listResp.Data.Content {
|
||||
// Parse modified time; log failures but keep zero value on error.
|
||||
var modTime time.Time
|
||||
if item.Modified != "" {
|
||||
parsedTime, err := time.Parse(time.RFC3339, item.Modified)
|
||||
if err != nil {
|
||||
a.logger.With(
|
||||
"path", path.Join(dirPath, item.Name),
|
||||
"modified_raw", item.Modified,
|
||||
).Warnf("failed to parse modified time for file")
|
||||
} else {
|
||||
modTime = parsedTime
|
||||
}
|
||||
}
|
||||
|
||||
files = append(files, storagetypes.FileInfo{
|
||||
Name: item.Name,
|
||||
Path: path.Join(dirPath, item.Name),
|
||||
Size: item.Size,
|
||||
IsDir: item.IsDir,
|
||||
ModTime: modTime,
|
||||
})
|
||||
}
|
||||
|
||||
a.logger.Debugf("Found %d files in directory %s", len(files), dirPath)
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// OpenFile implements StorageReadable interface
|
||||
func (a *Alist) OpenFile(ctx context.Context, filePath string) (io.ReadCloser, int64, error) {
|
||||
a.logger.Debugf("Opening file: %s", filePath)
|
||||
|
||||
// First, get file info to get the raw_url
|
||||
reqBody := map[string]any{
|
||||
"path": filePath,
|
||||
"password": "",
|
||||
}
|
||||
|
||||
bodyBytes, err := json.Marshal(reqBody)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to marshal request body: %w", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, a.baseURL+"/api/fs/get", bytes.NewBuffer(bodyBytes))
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
req.Header.Set("Authorization", a.token)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := a.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to send request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, 0, fmt.Errorf("failed to get file info: %s", resp.Status)
|
||||
}
|
||||
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to read response body: %w", err)
|
||||
}
|
||||
|
||||
var getResp fsGetResponse
|
||||
if err := json.Unmarshal(data, &getResp); err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to unmarshal get response: %w", err)
|
||||
}
|
||||
|
||||
if getResp.Code != http.StatusOK {
|
||||
return nil, 0, fmt.Errorf("failed to get file info: %d, %s", getResp.Code, getResp.Message)
|
||||
}
|
||||
|
||||
if getResp.Data.IsDir {
|
||||
return nil, 0, fmt.Errorf("path is a directory, not a file")
|
||||
}
|
||||
|
||||
// Download the file from raw_url
|
||||
downloadURL := getResp.Data.RawURL
|
||||
if downloadURL == "" {
|
||||
// If no raw_url, construct download URL
|
||||
downloadURL = a.baseURL + "/d" + filePath
|
||||
}
|
||||
|
||||
downloadReq, err := http.NewRequestWithContext(ctx, http.MethodGet, downloadURL, nil)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to create download request: %w", err)
|
||||
}
|
||||
|
||||
downloadResp, err := a.client.Do(downloadReq)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to download file: %w", err)
|
||||
}
|
||||
|
||||
if downloadResp.StatusCode != http.StatusOK {
|
||||
downloadResp.Body.Close()
|
||||
return nil, 0, fmt.Errorf("failed to download file: %s", downloadResp.Status)
|
||||
}
|
||||
|
||||
a.logger.Debugf("Opened file %s, size: %d bytes", filePath, getResp.Data.Size)
|
||||
return downloadResp.Body, getResp.Data.Size, nil
|
||||
}
|
||||
|
||||
@@ -46,46 +46,4 @@ type putResponse struct {
|
||||
type fsGetResponse struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Data struct {
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
IsDir bool `json:"is_dir"`
|
||||
Modified string `json:"modified"`
|
||||
Created string `json:"created"`
|
||||
Sign string `json:"sign"`
|
||||
Thumb string `json:"thumb"`
|
||||
Type int `json:"type"`
|
||||
RawURL string `json:"raw_url"`
|
||||
Provider string `json:"provider"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type fsListRequest struct {
|
||||
Path string `json:"path"`
|
||||
Password string `json:"password"`
|
||||
Page int `json:"page"`
|
||||
PerPage int `json:"per_page"`
|
||||
Refresh bool `json:"refresh"`
|
||||
}
|
||||
|
||||
type fsListResponse struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Data struct {
|
||||
Content []struct {
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
IsDir bool `json:"is_dir"`
|
||||
Modified string `json:"modified"`
|
||||
Created string `json:"created"`
|
||||
Sign string `json:"sign"`
|
||||
Thumb string `json:"thumb"`
|
||||
Type int `json:"type"`
|
||||
} `json:"content"`
|
||||
Total int64 `json:"total"`
|
||||
Readme string `json:"readme"`
|
||||
Header string `json:"header"`
|
||||
Write bool `json:"write"`
|
||||
Provider string `json:"provider"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
|
||||
"github.com/charmbracelet/log"
|
||||
"github.com/krau/SaveAny-Bot/config"
|
||||
storenum "github.com/krau/SaveAny-Bot/pkg/enums/storage"
|
||||
)
|
||||
|
||||
var UserStorages = make(map[int64][]Storage)
|
||||
@@ -80,14 +79,3 @@ func LoadStorages(ctx context.Context) {
|
||||
UserStorages[int64(user)] = GetUserStorages(ctx, int64(user))
|
||||
}
|
||||
}
|
||||
|
||||
// GetTelegramStorageByUserID returns the first enabled Telegram storage for the user
|
||||
func GetTelegramStorageByUserID(ctx context.Context, chatID int64) (Storage, error) {
|
||||
storages := GetUserStorages(ctx, chatID)
|
||||
for _, stor := range storages {
|
||||
if stor.Type() == storenum.Telegram {
|
||||
return stor, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("no telegram storage found for user %d", chatID)
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/duke-git/lancet/v2/fileutil"
|
||||
config "github.com/krau/SaveAny-Bot/config/storage"
|
||||
storenum "github.com/krau/SaveAny-Bot/pkg/enums/storage"
|
||||
"github.com/krau/SaveAny-Bot/pkg/storagetypes"
|
||||
)
|
||||
|
||||
type Local struct {
|
||||
@@ -51,7 +50,6 @@ func (l *Local) JoinStoragePath(path string) string {
|
||||
|
||||
func (l *Local) Save(ctx context.Context, r io.Reader, storagePath string) error {
|
||||
l.logger.Infof("Saving file to %s", storagePath)
|
||||
storagePath = l.JoinStoragePath(storagePath)
|
||||
|
||||
ext := filepath.Ext(storagePath)
|
||||
base := strings.TrimSuffix(storagePath, ext)
|
||||
@@ -83,51 +81,3 @@ func (l *Local) Exists(ctx context.Context, storagePath string) bool {
|
||||
}
|
||||
return fileutil.IsExist(absPath)
|
||||
}
|
||||
|
||||
// ListFiles implements StorageListable interface
|
||||
func (l *Local) ListFiles(ctx context.Context, dirPath string) ([]storagetypes.FileInfo, error) {
|
||||
absPath := l.JoinStoragePath(dirPath)
|
||||
|
||||
entries, err := os.ReadDir(absPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read directory %s: %w", absPath, err)
|
||||
}
|
||||
|
||||
files := make([]storagetypes.FileInfo, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
info, err := entry.Info()
|
||||
if err != nil {
|
||||
l.logger.Warnf("Failed to get file info for %s: %v", entry.Name(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
filePath := filepath.Join(dirPath, entry.Name())
|
||||
files = append(files, storagetypes.FileInfo{
|
||||
Name: entry.Name(),
|
||||
Path: filePath,
|
||||
Size: info.Size(),
|
||||
IsDir: entry.IsDir(),
|
||||
ModTime: info.ModTime(),
|
||||
})
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// OpenFile implements StorageReadable interface
|
||||
func (l *Local) OpenFile(ctx context.Context, filePath string) (io.ReadCloser, int64, error) {
|
||||
absPath := l.JoinStoragePath(filePath)
|
||||
|
||||
file, err := os.Open(absPath)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to open file %s: %w", absPath, err)
|
||||
}
|
||||
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
file.Close()
|
||||
return nil, 0, fmt.Errorf("failed to stat file %s: %w", absPath, err)
|
||||
}
|
||||
|
||||
return file, stat.Size(), nil
|
||||
}
|
||||
|
||||
@@ -77,13 +77,13 @@ func (m *Minio) JoinStoragePath(p string) string {
|
||||
|
||||
func (m *Minio) Save(ctx context.Context, r io.Reader, storagePath string) error {
|
||||
m.logger.Infof("Saving file from reader to %s", storagePath)
|
||||
storagePath = m.JoinStoragePath(storagePath)
|
||||
|
||||
ext := path.Ext(storagePath)
|
||||
base := strings.TrimSuffix(storagePath, ext)
|
||||
candidate := storagePath
|
||||
for i := 1; m.Exists(ctx, candidate); i++ {
|
||||
candidate = fmt.Sprintf("%s_%d%s", base, i, ext)
|
||||
if i > 10 {
|
||||
if i > 100 {
|
||||
m.logger.Errorf("Too many attempts to find a unique filename for %s", storagePath)
|
||||
candidate = fmt.Sprintf("%s_%s%s", base, xid.New().String(), ext)
|
||||
break
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
package rclone
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
ErrRcloneNotFound = errors.New("rclone: rclone command not found in PATH")
|
||||
ErrRemoteNotFound = errors.New("rclone: remote not found")
|
||||
ErrFailedToSaveFile = errors.New("rclone: failed to save file")
|
||||
ErrFailedToListFiles = errors.New("rclone: failed to list files")
|
||||
ErrFailedToOpenFile = errors.New("rclone: failed to open file")
|
||||
ErrFailedToCheckFile = errors.New("rclone: failed to check file exists")
|
||||
ErrFailedToCreateDir = errors.New("rclone: failed to create directory")
|
||||
ErrCommandFailed = errors.New("rclone: command execution failed")
|
||||
)
|
||||
@@ -1,289 +0,0 @@
|
||||
package rclone
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/charmbracelet/log"
|
||||
config "github.com/krau/SaveAny-Bot/config/storage"
|
||||
storenum "github.com/krau/SaveAny-Bot/pkg/enums/storage"
|
||||
"github.com/krau/SaveAny-Bot/pkg/storagetypes"
|
||||
"github.com/rs/xid"
|
||||
)
|
||||
|
||||
type Rclone struct {
|
||||
config config.RcloneStorageConfig
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
func (r *Rclone) Init(ctx context.Context, cfg config.StorageConfig) error {
|
||||
rcloneConfig, ok := cfg.(*config.RcloneStorageConfig)
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to cast rclone config")
|
||||
}
|
||||
if err := rcloneConfig.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
r.config = *rcloneConfig
|
||||
r.logger = log.FromContext(ctx).WithPrefix(fmt.Sprintf("rclone[%s]", r.config.Name))
|
||||
|
||||
// 检查 rclone 是否安装
|
||||
if _, err := exec.LookPath("rclone"); err != nil {
|
||||
return ErrRcloneNotFound
|
||||
}
|
||||
|
||||
args := r.buildBaseArgs()
|
||||
args = append(args, "listremotes")
|
||||
cmd := exec.CommandContext(ctx, "rclone", args...)
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
r.logger.Errorf("Failed to list remotes: %v", err)
|
||||
return fmt.Errorf("failed to verify rclone: %w", err)
|
||||
}
|
||||
|
||||
remoteName := strings.TrimSuffix(r.config.Remote, ":")
|
||||
if !strings.HasSuffix(r.config.Remote, ":") {
|
||||
remoteName = r.config.Remote
|
||||
}
|
||||
|
||||
found := false
|
||||
scanner := bufio.NewScanner(bytes.NewReader(output))
|
||||
for scanner.Scan() {
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
line = strings.TrimSuffix(line, ":")
|
||||
if line == remoteName {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
r.logger.Errorf("Remote %s not found in rclone config", r.config.Remote)
|
||||
return ErrRemoteNotFound
|
||||
}
|
||||
|
||||
r.logger.Infof("Initialized rclone storage with remote: %s", r.config.Remote)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Rclone) Type() storenum.StorageType {
|
||||
return storenum.Rclone
|
||||
}
|
||||
|
||||
func (r *Rclone) Name() string {
|
||||
return r.config.Name
|
||||
}
|
||||
|
||||
func (r *Rclone) buildBaseArgs() []string {
|
||||
var args []string
|
||||
if r.config.ConfigPath != "" {
|
||||
args = append(args, "--config", r.config.ConfigPath)
|
||||
}
|
||||
args = append(args, r.config.Flags...)
|
||||
return args
|
||||
}
|
||||
|
||||
func (r *Rclone) getRemotePath(storagePath string) string {
|
||||
remote := r.config.Remote
|
||||
if !strings.HasSuffix(remote, ":") {
|
||||
remote += ":"
|
||||
}
|
||||
basePath := strings.TrimPrefix(r.config.BasePath, "/")
|
||||
fullPath := path.Join(basePath, storagePath)
|
||||
return remote + fullPath
|
||||
}
|
||||
|
||||
func (r *Rclone) Save(ctx context.Context, reader io.Reader, storagePath string) error {
|
||||
r.logger.Infof("Saving file to %s", storagePath)
|
||||
|
||||
ext := path.Ext(storagePath)
|
||||
base := strings.TrimSuffix(storagePath, ext)
|
||||
candidate := storagePath
|
||||
for i := 1; r.Exists(ctx, candidate); i++ {
|
||||
candidate = fmt.Sprintf("%s_%d%s", base, i, ext)
|
||||
if i > 100 {
|
||||
r.logger.Errorf("Too many attempts to find a unique filename for %s", storagePath)
|
||||
candidate = fmt.Sprintf("%s_%s%s", base, xid.New().String(), ext)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
remotePath := r.getRemotePath(candidate)
|
||||
r.logger.Debugf("Remote path: %s", remotePath)
|
||||
|
||||
// Use rclone rcat to read from stdin and upload
|
||||
args := r.buildBaseArgs()
|
||||
args = append(args, "rcat", remotePath)
|
||||
|
||||
cmd := exec.CommandContext(ctx, "rclone", args...)
|
||||
cmd.Stdin = reader
|
||||
|
||||
var stderr bytes.Buffer
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
r.logger.Errorf("Failed to save file: %v, stderr: %s", err, stderr.String())
|
||||
return fmt.Errorf("%w: %s", ErrFailedToSaveFile, stderr.String())
|
||||
}
|
||||
|
||||
r.logger.Infof("Successfully saved file to %s", candidate)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Rclone) Exists(ctx context.Context, storagePath string) bool {
|
||||
remotePath := r.getRemotePath(storagePath)
|
||||
|
||||
args := r.buildBaseArgs()
|
||||
args = append(args, "lsf", remotePath)
|
||||
|
||||
cmd := exec.CommandContext(ctx, "rclone", args...)
|
||||
err := cmd.Run()
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// lsjsonItem represents a single entry in the output of `rclone lsjson`
|
||||
type lsjsonItem struct {
|
||||
Path string `json:"Path"`
|
||||
Name string `json:"Name"`
|
||||
Size int64 `json:"Size"`
|
||||
MimeType string `json:"MimeType"`
|
||||
ModTime string `json:"ModTime"`
|
||||
IsDir bool `json:"IsDir"`
|
||||
}
|
||||
|
||||
// ListFiles implements storage.StorageListable
|
||||
func (r *Rclone) ListFiles(ctx context.Context, dirPath string) ([]storagetypes.FileInfo, error) {
|
||||
r.logger.Infof("Listing files in %s", dirPath)
|
||||
|
||||
remotePath := r.getRemotePath(dirPath)
|
||||
|
||||
args := r.buildBaseArgs()
|
||||
args = append(args, "lsjson", remotePath)
|
||||
|
||||
cmd := exec.CommandContext(ctx, "rclone", args...)
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
r.logger.Errorf("Failed to list files: %v, stderr: %s", err, stderr.String())
|
||||
return nil, fmt.Errorf("%w: %s", ErrFailedToListFiles, stderr.String())
|
||||
}
|
||||
|
||||
var items []lsjsonItem
|
||||
if err := json.Unmarshal(stdout.Bytes(), &items); err != nil {
|
||||
r.logger.Errorf("Failed to parse lsjson output: %v", err)
|
||||
return nil, fmt.Errorf("failed to parse lsjson output: %w", err)
|
||||
}
|
||||
|
||||
files := make([]storagetypes.FileInfo, 0, len(items))
|
||||
for _, item := range items {
|
||||
var modTime time.Time
|
||||
if item.ModTime != "" {
|
||||
parsedTime, err := time.Parse(time.RFC3339Nano, item.ModTime)
|
||||
if err != nil {
|
||||
r.logger.Warnf("Failed to parse mod time %q for %s: %v", item.ModTime, item.Name, err)
|
||||
} else {
|
||||
modTime = parsedTime
|
||||
}
|
||||
}
|
||||
|
||||
files = append(files, storagetypes.FileInfo{
|
||||
Name: item.Name,
|
||||
Path: path.Join(dirPath, item.Name),
|
||||
Size: item.Size,
|
||||
IsDir: item.IsDir,
|
||||
ModTime: modTime,
|
||||
})
|
||||
}
|
||||
|
||||
r.logger.Debugf("Found %d files/directories in %s", len(files), dirPath)
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// OpenFile implements storage.StorageReadable
|
||||
func (r *Rclone) OpenFile(ctx context.Context, filePath string) (io.ReadCloser, int64, error) {
|
||||
r.logger.Infof("Opening file %s", filePath)
|
||||
|
||||
remotePath := r.getRemotePath(filePath)
|
||||
|
||||
size, err := r.getFileSize(ctx, remotePath)
|
||||
if err != nil {
|
||||
r.logger.Errorf("Failed to get file size: %v", err)
|
||||
return nil, 0, fmt.Errorf("%w: %v", ErrFailedToOpenFile, err)
|
||||
}
|
||||
|
||||
args := r.buildBaseArgs()
|
||||
args = append(args, "cat", remotePath)
|
||||
|
||||
cmd := exec.CommandContext(ctx, "rclone", args...)
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to create stdout pipe: %w", err)
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to start rclone cat: %w", err)
|
||||
}
|
||||
|
||||
reader := &rcloneCatReader{
|
||||
reader: stdout,
|
||||
cmd: cmd,
|
||||
logger: r.logger,
|
||||
}
|
||||
|
||||
r.logger.Debugf("Opened file %s (size: %d bytes)", filePath, size)
|
||||
return reader, size, nil
|
||||
}
|
||||
|
||||
func (r *Rclone) getFileSize(ctx context.Context, remotePath string) (int64, error) {
|
||||
args := r.buildBaseArgs()
|
||||
args = append(args, "lsjson", remotePath)
|
||||
|
||||
cmd := exec.CommandContext(ctx, "rclone", args...)
|
||||
var stdout bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var items []lsjsonItem
|
||||
if err := json.Unmarshal(stdout.Bytes(), &items); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if len(items) > 0 {
|
||||
return items[0].Size, nil
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
type rcloneCatReader struct {
|
||||
reader io.ReadCloser
|
||||
cmd *exec.Cmd
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
func (r *rcloneCatReader) Read(p []byte) (n int, err error) {
|
||||
return r.reader.Read(p)
|
||||
}
|
||||
|
||||
func (r *rcloneCatReader) Close() error {
|
||||
if err := r.reader.Close(); err != nil {
|
||||
r.logger.Warnf("Failed to close reader: %v", err)
|
||||
}
|
||||
if err := r.cmd.Wait(); err != nil {
|
||||
r.logger.Warnf("rclone cat process exited with error: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -65,7 +65,7 @@ func (m *S3) JoinStoragePath(p string) string {
|
||||
|
||||
func (m *S3) Save(ctx context.Context, r io.Reader, storagePath string) error {
|
||||
m.logger.Infof("Saving file from reader to %s", storagePath)
|
||||
storagePath = m.JoinStoragePath(storagePath)
|
||||
|
||||
ext := path.Ext(storagePath)
|
||||
base := strings.TrimSuffix(storagePath, ext)
|
||||
candidate := storagePath
|
||||
@@ -73,7 +73,7 @@ func (m *S3) Save(ctx context.Context, r io.Reader, storagePath string) error {
|
||||
// Unique filename
|
||||
for i := 1; m.Exists(ctx, candidate); i++ {
|
||||
candidate = fmt.Sprintf("%s_%d%s", base, i, ext)
|
||||
if i > 10 {
|
||||
if i > 100 {
|
||||
m.logger.Errorf("Too many attempts for unique filename: %s", storagePath)
|
||||
candidate = fmt.Sprintf("%s_%s%s", base, xid.New().String(), ext)
|
||||
break
|
||||
|
||||
@@ -7,11 +7,9 @@ import (
|
||||
|
||||
storcfg "github.com/krau/SaveAny-Bot/config/storage"
|
||||
storenum "github.com/krau/SaveAny-Bot/pkg/enums/storage"
|
||||
"github.com/krau/SaveAny-Bot/pkg/storagetypes"
|
||||
"github.com/krau/SaveAny-Bot/storage/alist"
|
||||
"github.com/krau/SaveAny-Bot/storage/local"
|
||||
"github.com/krau/SaveAny-Bot/storage/minio"
|
||||
"github.com/krau/SaveAny-Bot/storage/rclone"
|
||||
"github.com/krau/SaveAny-Bot/storage/s3"
|
||||
"github.com/krau/SaveAny-Bot/storage/telegram"
|
||||
"github.com/krau/SaveAny-Bot/storage/webdav"
|
||||
@@ -22,6 +20,7 @@ type Storage interface {
|
||||
Init(ctx context.Context, cfg storcfg.StorageConfig) error
|
||||
Type() storenum.StorageType
|
||||
Name() string
|
||||
JoinStoragePath(p string) string
|
||||
Save(ctx context.Context, reader io.Reader, storagePath string) error
|
||||
Exists(ctx context.Context, storagePath string) bool
|
||||
}
|
||||
@@ -31,18 +30,6 @@ type StorageCannotStream interface {
|
||||
CannotStream() string
|
||||
}
|
||||
|
||||
// StorageListable 表示支持列举目录内容的存储
|
||||
type StorageListable interface {
|
||||
Storage
|
||||
ListFiles(ctx context.Context, dirPath string) ([]storagetypes.FileInfo, error)
|
||||
}
|
||||
|
||||
// StorageReadable 表示支持读取文件内容的存储
|
||||
type StorageReadable interface {
|
||||
Storage
|
||||
OpenFile(ctx context.Context, filePath string) (io.ReadCloser, int64, error)
|
||||
}
|
||||
|
||||
var Storages = make(map[string]Storage)
|
||||
|
||||
type StorageConstructor func() Storage
|
||||
@@ -54,7 +41,6 @@ var storageConstructors = map[storenum.StorageType]StorageConstructor{
|
||||
storenum.Minio: func() Storage { return new(minio.Minio) },
|
||||
storenum.S3: func() Storage { return new(s3.S3) },
|
||||
storenum.Telegram: func() Storage { return new(telegram.Telegram) },
|
||||
storenum.Rclone: func() Storage { return new(rclone.Rclone) },
|
||||
}
|
||||
|
||||
// NewStorage creates a new storage instance based on the provided config and initializes it
|
||||
|
||||
@@ -99,6 +99,12 @@ func (w *splitWriter) finalize() error {
|
||||
}
|
||||
|
||||
func CreateSplitZip(ctx context.Context, reader io.Reader, size int64, fileName, outputBase string, partSize int64) error {
|
||||
// seek the reader if possible
|
||||
if rs, ok := reader.(io.ReadSeeker); ok {
|
||||
if _, err := rs.Seek(0, io.SeekStart); err != nil {
|
||||
return fmt.Errorf("failed to seek reader: %w", err)
|
||||
}
|
||||
}
|
||||
outputDir := filepath.Dir(outputBase)
|
||||
if err := os.MkdirAll(outputDir, os.ModePerm); err != nil {
|
||||
return fmt.Errorf("failed to create output directory: %w", err)
|
||||
|
||||
@@ -66,12 +66,15 @@ func (t *Telegram) Name() string {
|
||||
return t.config.Name
|
||||
}
|
||||
|
||||
func (t *Telegram) JoinStoragePath(p string) string {
|
||||
return path.Clean(p)
|
||||
}
|
||||
|
||||
func (t *Telegram) Exists(ctx context.Context, storagePath string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *Telegram) Save(ctx context.Context, r io.Reader, storagePath string) error {
|
||||
storagePath = path.Clean(storagePath)
|
||||
tctx := tgutil.ExtFromContext(ctx)
|
||||
if tctx == nil {
|
||||
return fmt.Errorf("failed to get telegram context")
|
||||
@@ -89,6 +92,9 @@ func (t *Telegram) Save(ctx context.Context, r io.Reader, storagePath string) er
|
||||
return nil
|
||||
}
|
||||
rs, seekable := r.(io.ReadSeeker)
|
||||
if !seekable || rs == nil {
|
||||
return fmt.Errorf("reader must implement io.ReadSeeker")
|
||||
}
|
||||
splitSize := t.config.SplitSizeMB * 1024 * 1024
|
||||
if splitSize <= 0 {
|
||||
splitSize = DefaultSplitSize
|
||||
@@ -117,96 +123,88 @@ func (t *Telegram) Save(ctx context.Context, r io.Reader, storagePath string) er
|
||||
}
|
||||
chatID = cid
|
||||
}
|
||||
upler := uploader.NewUploader(tctx.Raw).
|
||||
WithPartSize(tglimit.MaxUploadPartSize).
|
||||
WithThreads(dlutil.BestThreads(size, config.C().Threads))
|
||||
mtype, err := mimetype.DetectReader(rs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to detect mimetype: %w", err)
|
||||
}
|
||||
if filename == "" {
|
||||
filename = xid.New().String() + mtype.Extension()
|
||||
}
|
||||
peer := tryGetInputPeer(tctx, chatID)
|
||||
if peer == nil || peer.Zero() {
|
||||
return fmt.Errorf("failed to get input peer for chat ID %d", chatID)
|
||||
}
|
||||
var mtype *mimetype.MIME
|
||||
if seekable {
|
||||
var err error
|
||||
mtype, err = mimetype.DetectReader(rs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to detect mimetype: %w", err)
|
||||
}
|
||||
if filename == "" {
|
||||
filename = xid.New().String() + mtype.Extension()
|
||||
}
|
||||
|
||||
if _, err := rs.Seek(0, io.SeekStart); err != nil {
|
||||
return fmt.Errorf("failed to seek reader: %w", err)
|
||||
}
|
||||
if _, err := rs.Seek(0, io.SeekStart); err != nil {
|
||||
return fmt.Errorf("failed to seek reader: %w", err)
|
||||
}
|
||||
upler := uploader.NewUploader(tctx.Raw).
|
||||
WithPartSize(tglimit.MaxUploadPartSize).
|
||||
WithThreads(dlutil.BestThreads(size, config.C().Threads))
|
||||
if size > splitSize {
|
||||
// large file, use split uploader
|
||||
return t.splitUpload(tctx, r, filename, upler, peer, size, splitSize)
|
||||
return t.splitUpload(tctx, rs, filename, upler, peer, size, splitSize)
|
||||
}
|
||||
|
||||
var file tg.InputFileClass
|
||||
var err error
|
||||
if size <= 0 {
|
||||
file, err = upler.FromReader(ctx, filename, r)
|
||||
if size < 0 {
|
||||
file, err = upler.FromReader(ctx, filename, rs)
|
||||
} else {
|
||||
file, err = upler.Upload(ctx, uploader.NewUpload(filename, r, size))
|
||||
file, err = upler.Upload(ctx, uploader.NewUpload(filename, rs, size))
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to upload file to telegram: %w", err)
|
||||
}
|
||||
caption := styling.Plain(filename)
|
||||
forceFile := t.config.ForceFile
|
||||
|
||||
if mtype != nil && strings.HasPrefix(mtype.String(), "image/") && size >= tglimit.MaxPhotoSize {
|
||||
if strings.HasPrefix(mtype.String(), "image/") && size >= tglimit.MaxPhotoSize {
|
||||
forceFile = true
|
||||
}
|
||||
doc := message.UploadedDocument(file, caption).
|
||||
Filename(filename).
|
||||
ForceFile(forceFile)
|
||||
if mtype != nil {
|
||||
doc = doc.MIME(mtype.String())
|
||||
}
|
||||
ForceFile(forceFile).
|
||||
MIME(mtype.String())
|
||||
|
||||
var media message.MediaOption = doc
|
||||
if mtype != nil && rs != nil {
|
||||
switch mtypeStr := mtype.String(); {
|
||||
case strings.HasPrefix(mtypeStr, "video/"):
|
||||
media = doc.Video().SupportsStreaming()
|
||||
thumb, err := extractThumbFrame(rs)
|
||||
|
||||
switch mtypeStr := mtype.String(); {
|
||||
case strings.HasPrefix(mtypeStr, "video/"):
|
||||
media = doc.Video().SupportsStreaming()
|
||||
thumb, err := extractThumbFrame(rs)
|
||||
if err == nil {
|
||||
thumb, err := upler.FromBytes(ctx, "thumb.jpg", thumb)
|
||||
if err == nil {
|
||||
thumb, err := upler.FromBytes(ctx, "thumb.jpg", thumb)
|
||||
if err == nil {
|
||||
doc = doc.Thumb(thumb)
|
||||
}
|
||||
doc = doc.Thumb(thumb)
|
||||
}
|
||||
rs.Seek(0, io.SeekStart)
|
||||
switch mtypeStr {
|
||||
case "video/mp4":
|
||||
info, err := getMP4Meta(rs)
|
||||
if err != nil {
|
||||
// Fallback to ffprobe if gomedia fails (e.g., malformed MP4)
|
||||
rs.Seek(0, io.SeekStart)
|
||||
info, err = getVideoMetadata(rs)
|
||||
}
|
||||
if err == nil {
|
||||
media = doc.Video().
|
||||
Duration(time.Duration(info.Duration)*time.Second).
|
||||
Resolution(info.Width, info.Height).
|
||||
SupportsStreaming()
|
||||
}
|
||||
default:
|
||||
info, err := getVideoMetadata(rs)
|
||||
if err == nil {
|
||||
media = doc.Video().
|
||||
Duration(time.Duration(info.Duration)*time.Second).
|
||||
Resolution(info.Width, info.Height).
|
||||
SupportsStreaming()
|
||||
}
|
||||
}
|
||||
case strings.HasPrefix(mtypeStr, "audio/"):
|
||||
media = doc.Audio().Title(filename)
|
||||
case strings.HasPrefix(mtypeStr, "image/") && !strings.HasSuffix(mtypeStr, "webp"):
|
||||
media = message.UploadedPhoto(file, caption)
|
||||
}
|
||||
rs.Seek(0, io.SeekStart)
|
||||
switch mtypeStr {
|
||||
case "video/mp4":
|
||||
info, err := getMP4Meta(rs)
|
||||
if err != nil {
|
||||
// Fallback to ffprobe if gomedia fails (e.g., malformed MP4)
|
||||
rs.Seek(0, io.SeekStart)
|
||||
info, err = getVideoMetadata(rs)
|
||||
}
|
||||
if err == nil {
|
||||
media = doc.Video().
|
||||
Duration(time.Duration(info.Duration)*time.Second).
|
||||
Resolution(info.Width, info.Height).
|
||||
SupportsStreaming()
|
||||
}
|
||||
default:
|
||||
info, err := getVideoMetadata(rs)
|
||||
if err == nil {
|
||||
media = doc.Video().
|
||||
Duration(time.Duration(info.Duration)*time.Second).
|
||||
Resolution(info.Width, info.Height).
|
||||
SupportsStreaming()
|
||||
}
|
||||
}
|
||||
case strings.HasPrefix(mtypeStr, "audio/"):
|
||||
media = doc.Audio().Title(filename)
|
||||
case strings.HasPrefix(mtypeStr, "image/") && !strings.HasSuffix(mtypeStr, "webp"):
|
||||
media = message.UploadedPhoto(file, caption)
|
||||
}
|
||||
sender := tctx.Sender
|
||||
_, err = sender.WithUploader(upler).To(peer).Media(ctx, media)
|
||||
@@ -217,7 +215,7 @@ func (t *Telegram) CannotStream() string {
|
||||
return "Telegram storage must use a ReaderSeeker"
|
||||
}
|
||||
|
||||
func (t *Telegram) splitUpload(ctx *ext.Context, r io.Reader, filename string, upler *uploader.Uploader, peer tg.InputPeerClass, fileSize, splitSize int64) error {
|
||||
func (t *Telegram) splitUpload(ctx *ext.Context, rs io.ReadSeeker, filename string, upler *uploader.Uploader, peer tg.InputPeerClass, fileSize, splitSize int64) error {
|
||||
tempId := xid.New().String()
|
||||
outputBase := filepath.Join(config.C().Temp.BasePath, tempId, strings.Split(filename, ".")[0])
|
||||
defer func() {
|
||||
@@ -226,7 +224,7 @@ func (t *Telegram) splitUpload(ctx *ext.Context, r io.Reader, filename string, u
|
||||
log.FromContext(ctx).Warnf("Failed to cleanup temp split files: %s", err)
|
||||
}
|
||||
}()
|
||||
if err := CreateSplitZip(ctx, r, fileSize, filename, outputBase, splitSize); err != nil {
|
||||
if err := CreateSplitZip(ctx, rs, fileSize, filename, outputBase, splitSize); err != nil {
|
||||
return fmt.Errorf("failed to create split zip: %w", err)
|
||||
}
|
||||
matched, err := filepath.Glob(outputBase + ".z*")
|
||||
|
||||
@@ -2,7 +2,6 @@ package webdav
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -26,40 +25,8 @@ const (
|
||||
WebdavMethodMkcol WebdavMethod = "MKCOL"
|
||||
WebdavMethodPropfind WebdavMethod = "PROPFIND"
|
||||
WebdavMethodPut WebdavMethod = "PUT"
|
||||
WebdavMethodGet WebdavMethod = "GET"
|
||||
)
|
||||
|
||||
// WebDAV XML structures for PROPFIND response
|
||||
type Multistatus struct {
|
||||
XMLName xml.Name `xml:"multistatus"`
|
||||
Responses []Response `xml:"response"`
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
Href string `xml:"href"`
|
||||
Propstat Propstat `xml:"propstat"`
|
||||
}
|
||||
|
||||
type Propstat struct {
|
||||
Prop Prop `xml:"prop"`
|
||||
Status string `xml:"status"`
|
||||
}
|
||||
|
||||
type Prop struct {
|
||||
ResourceType ResourceType `xml:"resourcetype"`
|
||||
GetContentLength int64 `xml:"getcontentlength"`
|
||||
GetLastModified string `xml:"getlastmodified"`
|
||||
DisplayName string `xml:"displayname"`
|
||||
}
|
||||
|
||||
type ResourceType struct {
|
||||
Collection *struct{} `xml:"collection"`
|
||||
}
|
||||
|
||||
func (rt ResourceType) IsCollection() bool {
|
||||
return rt.Collection != nil
|
||||
}
|
||||
|
||||
func NewClient(baseURL, username, password string, httpClient *http.Client) *Client {
|
||||
if !strings.HasSuffix(baseURL, "/") {
|
||||
baseURL += "/"
|
||||
@@ -164,79 +131,5 @@ func (c *Client) WriteFile(ctx context.Context, remotePath string, content io.Re
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("PUT: %s", resp.Status)
|
||||
}
|
||||
|
||||
// ListDir lists files and directories in the given path
|
||||
func (c *Client) ListDir(ctx context.Context, dirPath string) ([]Response, error) {
|
||||
dirPath = strings.Trim(dirPath, "/")
|
||||
u, err := url.Parse(c.BaseURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u.Path = path.Join(u.Path, dirPath)
|
||||
if !strings.HasSuffix(u.Path, "/") {
|
||||
u.Path += "/"
|
||||
}
|
||||
|
||||
resp, err := c.doRequest(ctx, WebdavMethodPropfind, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusMultiStatus {
|
||||
return nil, fmt.Errorf("PROPFIND: %s", resp.Status)
|
||||
}
|
||||
|
||||
var multistatus Multistatus
|
||||
if err := xml.NewDecoder(resp.Body).Decode(&multistatus); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode PROPFIND response: %w", err)
|
||||
}
|
||||
|
||||
// Filter out the directory itself from results
|
||||
var results []Response
|
||||
basePath := u.Path
|
||||
for _, r := range multistatus.Responses {
|
||||
decodedHref, err := url.PathUnescape(r.Href)
|
||||
if err != nil {
|
||||
decodedHref = r.Href
|
||||
}
|
||||
// Skip the directory itself
|
||||
if strings.TrimSuffix(decodedHref, "/") == strings.TrimSuffix(basePath, "/") {
|
||||
continue
|
||||
}
|
||||
results = append(results, r)
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// ReadFile downloads a file and returns a ReadCloser
|
||||
func (c *Client) ReadFile(ctx context.Context, filePath string) (io.ReadCloser, int64, error) {
|
||||
filePath = strings.Trim(filePath, "/")
|
||||
u, err := url.Parse(c.BaseURL)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
u.Path = path.Join(u.Path, filePath)
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if c.Username != "" && c.Password != "" {
|
||||
req.SetBasicAuth(c.Username, c.Password)
|
||||
}
|
||||
|
||||
resp, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
resp.Body.Close()
|
||||
return nil, 0, fmt.Errorf("GET: %s", resp.Status)
|
||||
}
|
||||
|
||||
return resp.Body, resp.ContentLength, nil
|
||||
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -13,7 +12,6 @@ import (
|
||||
"github.com/charmbracelet/log"
|
||||
config "github.com/krau/SaveAny-Bot/config/storage"
|
||||
storenum "github.com/krau/SaveAny-Bot/pkg/enums/storage"
|
||||
"github.com/krau/SaveAny-Bot/pkg/storagetypes"
|
||||
"github.com/rs/xid"
|
||||
)
|
||||
|
||||
@@ -53,7 +51,7 @@ func (w *Webdav) JoinStoragePath(p string) string {
|
||||
|
||||
func (w *Webdav) Save(ctx context.Context, r io.Reader, storagePath string) error {
|
||||
w.logger.Infof("Saving file to %s", storagePath)
|
||||
storagePath = w.JoinStoragePath(storagePath)
|
||||
|
||||
ext := path.Ext(storagePath)
|
||||
base := strings.TrimSuffix(storagePath, ext)
|
||||
candidate := storagePath
|
||||
@@ -86,80 +84,3 @@ func (w *Webdav) Exists(ctx context.Context, storagePath string) bool {
|
||||
}
|
||||
return exists
|
||||
}
|
||||
|
||||
// ListFiles implements storage.StorageListable
|
||||
func (w *Webdav) ListFiles(ctx context.Context, dirPath string) ([]storagetypes.FileInfo, error) {
|
||||
w.logger.Infof("Listing files in %s", dirPath)
|
||||
|
||||
// Join with base path
|
||||
fullPath := path.Join(w.config.BasePath, dirPath)
|
||||
|
||||
responses, err := w.client.ListDir(ctx, fullPath)
|
||||
if err != nil {
|
||||
w.logger.Errorf("Failed to list directory %s: %v", fullPath, err)
|
||||
return nil, fmt.Errorf("failed to list directory: %w", err)
|
||||
}
|
||||
|
||||
files := make([]storagetypes.FileInfo, 0, len(responses))
|
||||
for _, resp := range responses {
|
||||
// Parse the href to get the file name
|
||||
decodedHref, err := url.PathUnescape(resp.Href)
|
||||
if err != nil {
|
||||
w.logger.Warnf("Failed to unescape href %q: %v; using original value", resp.Href, err)
|
||||
decodedHref = resp.Href
|
||||
}
|
||||
|
||||
// Extract filename from href
|
||||
name := path.Base(strings.TrimSuffix(decodedHref, "/"))
|
||||
if name == "" || name == "." {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse modification time
|
||||
var modTime time.Time
|
||||
if resp.Propstat.Prop.GetLastModified != "" {
|
||||
// Try RFC1123 format (standard for WebDAV)
|
||||
parsedTime, err := time.Parse(time.RFC1123, resp.Propstat.Prop.GetLastModified)
|
||||
if err != nil {
|
||||
w.logger.Warnf("Failed to parse last modified time %q for %s: %v", resp.Propstat.Prop.GetLastModified, decodedHref, err)
|
||||
} else {
|
||||
modTime = parsedTime
|
||||
}
|
||||
}
|
||||
|
||||
isDir := resp.Propstat.Prop.ResourceType.IsCollection()
|
||||
|
||||
filePath := strings.TrimPrefix(decodedHref, path.Join("/", strings.Trim(path.Dir(fullPath), "/")))
|
||||
filePath = strings.TrimPrefix(filePath, "/")
|
||||
|
||||
fileInfo := storagetypes.FileInfo{
|
||||
Name: name,
|
||||
Path: path.Join(dirPath, name),
|
||||
Size: resp.Propstat.Prop.GetContentLength,
|
||||
IsDir: isDir,
|
||||
ModTime: modTime,
|
||||
}
|
||||
|
||||
files = append(files, fileInfo)
|
||||
}
|
||||
|
||||
w.logger.Debugf("Found %d files/directories in %s", len(files), dirPath)
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// OpenFile implements storage.StorageReadable
|
||||
func (w *Webdav) OpenFile(ctx context.Context, filePath string) (io.ReadCloser, int64, error) {
|
||||
w.logger.Infof("Opening file %s", filePath)
|
||||
|
||||
// Join with base path
|
||||
fullPath := path.Join(w.config.BasePath, filePath)
|
||||
|
||||
reader, size, err := w.client.ReadFile(ctx, fullPath)
|
||||
if err != nil {
|
||||
w.logger.Errorf("Failed to open file %s: %v", fullPath, err)
|
||||
return nil, 0, fmt.Errorf("failed to open file: %w", err)
|
||||
}
|
||||
|
||||
w.logger.Debugf("Opened file %s (size: %d bytes)", filePath, size)
|
||||
return reader, size, nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user