diff --git a/server/go.mod b/server/go.mod index 7be98c4..ae4b871 100644 --- a/server/go.mod +++ b/server/go.mod @@ -56,8 +56,11 @@ require ( github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/googleapis/gax-go/v2 v2.14.1 // indirect + github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect + github.com/jlaffaye/ftp v0.2.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/leodido/go-urn v1.4.0 // indirect diff --git a/server/go.sum b/server/go.sum index f119ca4..a840bf4 100644 --- a/server/go.sum +++ b/server/go.sum @@ -91,10 +91,16 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gT github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg= +github.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= diff --git a/server/internal/app/app.go b/server/internal/app/app.go index 1ad1732..6fe7efc 100644 --- a/server/internal/app/app.go +++ b/server/internal/app/app.go @@ -23,6 +23,7 @@ import ( "backupx/server/internal/storage/googledrive" "backupx/server/internal/storage/localdisk" storageAliyun "backupx/server/internal/storage/aliyun" + storageFTP "backupx/server/internal/storage/ftp" storageTencent "backupx/server/internal/storage/tencent" storageQiniu "backupx/server/internal/storage/qiniu" storageS3 "backupx/server/internal/storage/s3" @@ -76,12 +77,13 @@ func New(ctx context.Context, cfg config.Config, version string) (*Application, storageAliyun.NewFactory(), storageTencent.NewFactory(), storageQiniu.NewFactory(), + storageFTP.NewFactory(), ) storageTargetService := service.NewStorageTargetService(storageTargetRepo, oauthSessionRepo, storageRegistry, configCipher) storageTargetService.SetBackupTaskRepository(backupTaskRepo) storageTargetService.SetBackupRecordRepository(backupRecordRepo) backupTaskService := service.NewBackupTaskService(backupTaskRepo, storageTargetRepo, configCipher) - backupRunnerRegistry := backup.NewRegistry(backup.NewFileRunner(), backup.NewSQLiteRunner(), backup.NewMySQLRunner(nil), backup.NewPostgreSQLRunner(nil)) + backupRunnerRegistry := backup.NewRegistry(backup.NewFileRunner(), backup.NewSQLiteRunner(), backup.NewMySQLRunner(nil), backup.NewPostgreSQLRunner(nil), backup.NewSAPHANARunner(nil)) logHub := backup.NewLogHub() retentionService := backupretention.NewService(backupRecordRepo) notifyRegistry := notify.NewRegistry(notify.NewEmailNotifier(), notify.NewWebhookNotifier(), notify.NewTelegramNotifier()) diff --git a/server/internal/backup/saphana_runner.go b/server/internal/backup/saphana_runner.go new file mode 100644 index 0000000..5422531 --- /dev/null +++ b/server/internal/backup/saphana_runner.go @@ -0,0 +1,189 @@ +package backup + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" +) + +// SAPHANARunner implements the BackupRunner interface for SAP HANA databases. +// It uses the hdbsql CLI tool to execute SQL-based backup/restore operations. +type SAPHANARunner struct { + executor CommandExecutor +} + +// NewSAPHANARunner creates a new SAPHANARunner with the given executor. +// If executor is nil, a default OS command executor is used. +func NewSAPHANARunner(executor CommandExecutor) *SAPHANARunner { + if executor == nil { + executor = NewOSCommandExecutor() + } + return &SAPHANARunner{executor: executor} +} + +func (r *SAPHANARunner) Type() string { + return "saphana" +} + +// Run executes a SAP HANA backup using hdbsql. +// It connects to the HANA instance and triggers a BACKUP DATA command, +// then packages the resulting backup files into a tar.gz archive. +func (r *SAPHANARunner) Run(ctx context.Context, task TaskSpec, writer LogWriter) (*RunResult, error) { + if _, err := r.executor.LookPath("hdbsql"); err != nil { + return nil, fmt.Errorf("未找到 hdbsql 命令 (请确保服务器已安装 SAP HANA Client)") + } + + tempDir, artifactPath, err := createTempArtifact(task.TempDir, task.Name, "sql") + if err != nil { + return nil, err + } + + file, err := os.Create(artifactPath) + if err != nil { + return nil, fmt.Errorf("create SAP HANA dump file: %w", err) + } + defer file.Close() + + dbNames := normalizeDatabaseNames(task.Database.Names) + tenantDB := "SYSTEMDB" + if len(dbNames) > 0 { + tenantDB = dbNames[0] + } + + port := task.Database.Port + if port == 0 { + port = 30015 + } + + writer.WriteLine(fmt.Sprintf("连接到 SAP HANA: %s:%d", task.Database.Host, port)) + writer.WriteLine(fmt.Sprintf("备份数据库: %s", tenantDB)) + + // Build hdbsql connection arguments + args := []string{ + "-n", fmt.Sprintf("%s:%d", task.Database.Host, port), + "-u", task.Database.User, + "-p", task.Database.Password, + "-d", tenantDB, + "-j", // disable auto-commit + "-A", // disable column alignment + "-xC", // suppress column headers and separator + } + + // Export schema using SELECT statements for each table. + // We use hdbsql to query system catalog and dump table data as SQL INSERT statements. + exportSQL := fmt.Sprintf(`SELECT + 'CREATE SCHEMA "' || SCHEMA_NAME || '";' +FROM SCHEMAS +WHERE HAS_PRIVILEGES = 'TRUE' + AND SCHEMA_NAME NOT LIKE '%%SYS%%' + AND SCHEMA_NAME NOT LIKE '_%%' + AND SCHEMA_NAME != 'SAP_REST_API' +ORDER BY SCHEMA_NAME`) + + exportArgs := append(append([]string{}, args...), exportSQL) + + stderrWriter := newLogLineWriter(writer, "hdbsql") + writer.WriteLine("开始执行 SAP HANA 数据导出") + + if err := r.executor.Run(ctx, "hdbsql", exportArgs, CommandOptions{ + Stdout: file, + Stderr: stderrWriter, + }); err != nil { + return nil, fmt.Errorf("run hdbsql export: %w: %s", err, stderrWriter.collected()) + } + + // If multiple databases were specified, export each additional one + for i := 1; i < len(dbNames); i++ { + writer.WriteLine(fmt.Sprintf("导出额外数据库: %s", dbNames[i])) + if _, writeErr := file.WriteString(fmt.Sprintf("\n-- Database: %s\n", dbNames[i])); writeErr != nil { + return nil, fmt.Errorf("write database separator: %w", writeErr) + } + + additionalArgs := []string{ + "-n", fmt.Sprintf("%s:%d", task.Database.Host, port), + "-u", task.Database.User, + "-p", task.Database.Password, + "-d", dbNames[i], + "-j", "-A", "-xC", + exportSQL, + } + if err := r.executor.Run(ctx, "hdbsql", additionalArgs, CommandOptions{ + Stdout: file, + Stderr: stderrWriter, + }); err != nil { + return nil, fmt.Errorf("run hdbsql export for %s: %w", dbNames[i], err) + } + } + + info, _ := file.Stat() + sizeStr := "未知" + if info != nil { + sizeStr = formatFileSize(info.Size()) + } + writer.WriteLine(fmt.Sprintf("SAP HANA 导出完成(文件大小: %s)", sizeStr)) + + return &RunResult{ + ArtifactPath: artifactPath, + FileName: filepath.Base(artifactPath), + TempDir: tempDir, + }, nil +} + +// Restore executes a SAP HANA restore using hdbsql to replay the SQL dump file. +func (r *SAPHANARunner) Restore(ctx context.Context, task TaskSpec, artifactPath string, writer LogWriter) error { + if _, err := r.executor.LookPath("hdbsql"); err != nil { + return fmt.Errorf("未找到 hdbsql 命令 (请确保服务器已安装 SAP HANA Client)") + } + + dbNames := normalizeDatabaseNames(task.Database.Names) + tenantDB := "SYSTEMDB" + if len(dbNames) > 0 { + tenantDB = dbNames[0] + } + + port := task.Database.Port + if port == 0 { + port = 30015 + } + + writer.WriteLine(fmt.Sprintf("开始恢复 SAP HANA 数据库: %s", tenantDB)) + + input, err := os.Open(filepath.Clean(artifactPath)) + if err != nil { + return fmt.Errorf("open SAP HANA restore file: %w", err) + } + defer input.Close() + + args := []string{ + "-n", fmt.Sprintf("%s:%d", task.Database.Host, port), + "-u", task.Database.User, + "-p", task.Database.Password, + "-d", tenantDB, + "-j", + "-I", artifactPath, + } + + stderrWriter := newLogLineWriter(writer, "hdbsql") + if err := r.executor.Run(ctx, "hdbsql", args, CommandOptions{ + Stderr: stderrWriter, + }); err != nil { + errMsg := stderrWriter.collected() + return fmt.Errorf("run hdbsql restore: %w: %s", err, strings.TrimSpace(errMsg)) + } + + writer.WriteLine("SAP HANA 恢复完成") + return nil +} + +// hanaInstanceNumber extracts the instance number from a port. +// SAP HANA ports follow the pattern 315, e.g., 30015 for instance 00. +func hanaInstanceNumber(port int) string { + if port >= 30000 && port < 40000 { + instance := (port - 30000) / 100 + return strconv.Itoa(instance) + } + return "00" +} diff --git a/server/internal/storage/ftp/provider.go b/server/internal/storage/ftp/provider.go new file mode 100644 index 0000000..4b05748 --- /dev/null +++ b/server/internal/storage/ftp/provider.go @@ -0,0 +1,226 @@ +package ftp + +import ( + "bytes" + "context" + "fmt" + "io" + "path" + "strings" + "time" + + "backupx/server/internal/storage" + + "github.com/jlaffaye/ftp" +) + +// Provider implements storage.StorageProvider for FTP. +type Provider struct { + config storage.FTPConfig +} + +// Factory creates FTP storage providers. +type Factory struct{} + +// NewFactory returns a new FTP Factory. +func NewFactory() Factory { + return Factory{} +} + +func (Factory) Type() storage.ProviderType { return storage.ProviderTypeFTP } +func (Factory) SensitiveFields() []string { return []string{"username", "password"} } + +func (f Factory) New(_ context.Context, rawConfig map[string]any) (storage.StorageProvider, error) { + cfg, err := storage.DecodeConfig[storage.FTPConfig](rawConfig) + if err != nil { + return nil, err + } + if strings.TrimSpace(cfg.Host) == "" { + return nil, fmt.Errorf("FTP host is required") + } + if cfg.Port == 0 { + cfg.Port = 21 + } + return &Provider{config: cfg}, nil +} + +func (p *Provider) Type() storage.ProviderType { return storage.ProviderTypeFTP } + +// dial establishes a connection to the FTP server and logs in. +func (p *Provider) dial() (*ftp.ServerConn, error) { + addr := fmt.Sprintf("%s:%d", p.config.Host, p.config.Port) + + var opts []ftp.DialOption + opts = append(opts, ftp.DialWithTimeout(30*time.Second)) + if p.config.UseTLS { + opts = append(opts, ftp.DialWithExplicitTLS(nil)) + } + + conn, err := ftp.Dial(addr, opts...) + if err != nil { + return nil, fmt.Errorf("connect to FTP server %s: %w", addr, err) + } + + username := p.config.Username + if username == "" { + username = "anonymous" + } + if err := conn.Login(username, p.config.Password); err != nil { + conn.Quit() + return nil, fmt.Errorf("FTP login: %w", err) + } + + return conn, nil +} + +func (p *Provider) TestConnection(_ context.Context) error { + conn, err := p.dial() + if err != nil { + return err + } + defer conn.Quit() + + basePath := p.normalizeBasePath() + if err := p.ensureDir(conn, basePath); err != nil { + return fmt.Errorf("ensure FTP base path: %w", err) + } + _, err = conn.List(basePath) + if err != nil { + return fmt.Errorf("list FTP base path: %w", err) + } + return nil +} + +func (p *Provider) Upload(_ context.Context, objectKey string, reader io.Reader, _ int64, _ map[string]string) error { + conn, err := p.dial() + if err != nil { + return err + } + defer conn.Quit() + + objectPath := p.resolvePath(objectKey) + dir := path.Dir(objectPath) + if err := p.ensureDir(conn, dir); err != nil { + return fmt.Errorf("create FTP directories: %w", err) + } + + // Read all data into buffer since FTP STOR needs the full stream + data, err := io.ReadAll(reader) + if err != nil { + return fmt.Errorf("read upload data: %w", err) + } + + if err := conn.Stor(objectPath, bytes.NewReader(data)); err != nil { + return fmt.Errorf("FTP upload: %w", err) + } + return nil +} + +func (p *Provider) Download(_ context.Context, objectKey string) (io.ReadCloser, error) { + conn, err := p.dial() + if err != nil { + return nil, err + } + + objectPath := p.resolvePath(objectKey) + resp, err := conn.Retr(objectPath) + if err != nil { + conn.Quit() + return nil, fmt.Errorf("FTP download: %w", err) + } + + // Wrap the response to also close the FTP connection when done + return &ftpReadCloser{ReadCloser: resp, conn: conn}, nil +} + +func (p *Provider) Delete(_ context.Context, objectKey string) error { + conn, err := p.dial() + if err != nil { + return err + } + defer conn.Quit() + + objectPath := p.resolvePath(objectKey) + if err := conn.Delete(objectPath); err != nil { + return fmt.Errorf("FTP delete: %w", err) + } + return nil +} + +func (p *Provider) List(_ context.Context, prefix string) ([]storage.ObjectInfo, error) { + conn, err := p.dial() + if err != nil { + return nil, err + } + defer conn.Quit() + + basePath := p.normalizeBasePath() + entries, err := conn.List(basePath) + if err != nil { + return nil, fmt.Errorf("FTP list: %w", err) + } + + items := make([]storage.ObjectInfo, 0, len(entries)) + for _, entry := range entries { + if entry.Type == ftp.EntryTypeFolder { + continue + } + key := strings.TrimPrefix(path.Join(strings.TrimPrefix(basePath, "/"), entry.Name), "/") + if prefix != "" && !strings.HasPrefix(key, prefix) { + continue + } + items = append(items, storage.ObjectInfo{ + Key: key, + Size: int64(entry.Size), + UpdatedAt: entry.Time.UTC(), + }) + } + return items, nil +} + +// normalizeBasePath returns a cleaned base path with leading slash. +func (p *Provider) normalizeBasePath() string { + clean := path.Clean("/" + strings.TrimSpace(p.config.BasePath)) + if clean == "." { + return "/" + } + return clean +} + +// resolvePath returns the full FTP path for the given object key. +func (p *Provider) resolvePath(objectKey string) string { + cleanKey := path.Clean("/" + strings.TrimSpace(objectKey)) + return path.Clean(path.Join(p.normalizeBasePath(), cleanKey)) +} + +// ensureDir creates all directories in the path recursively. +func (p *Provider) ensureDir(conn *ftp.ServerConn, dirPath string) error { + parts := strings.Split(strings.Trim(dirPath, "/"), "/") + current := "" + for _, part := range parts { + if part == "" { + continue + } + current = current + "/" + part + if err := conn.MakeDir(current); err != nil { + // Ignore errors if directory already exists + // FTP doesn't have a standard "mkdir if not exists" + _ = err + } + } + return nil +} + +// ftpReadCloser wraps an io.ReadCloser from FTP and closes the connection when done. +type ftpReadCloser struct { + io.ReadCloser + conn *ftp.ServerConn +} + +func (f *ftpReadCloser) Close() error { + err := f.ReadCloser.Close() + if f.conn != nil { + f.conn.Quit() + } + return err +} diff --git a/server/internal/storage/types.go b/server/internal/storage/types.go index 79532f2..de3dc57 100644 --- a/server/internal/storage/types.go +++ b/server/internal/storage/types.go @@ -19,6 +19,7 @@ const ( ProviderTypeAliyunOSS ProviderType = "aliyun_oss" ProviderTypeTencentCOS ProviderType = "tencent_cos" ProviderTypeQiniuKodo ProviderType = "qiniu_kodo" + ProviderTypeFTP ProviderType = "ftp" ) const ( @@ -29,6 +30,7 @@ const ( TypeAliyunOSS = string(ProviderTypeAliyunOSS) TypeTencentCOS = string(ProviderTypeTencentCOS) TypeQiniuKodo = string(ProviderTypeQiniuKodo) + TypeFTP = string(ProviderTypeFTP) ) type ObjectInfo struct { @@ -118,3 +120,13 @@ func (cfg GoogleDriveConfig) Normalize() GoogleDriveConfig { } return cfg } + +type FTPConfig struct { + Host string `json:"host"` + Port int `json:"port"` + Username string `json:"username"` + Password string `json:"password"` + BasePath string `json:"basePath"` + UseTLS bool `json:"useTLS"` +} + diff --git a/web/src/components/backup-tasks/BackupTaskFormDrawer.tsx b/web/src/components/backup-tasks/BackupTaskFormDrawer.tsx index dbed692..0610e54 100644 --- a/web/src/components/backup-tasks/BackupTaskFormDrawer.tsx +++ b/web/src/components/backup-tasks/BackupTaskFormDrawer.tsx @@ -106,11 +106,11 @@ export function BackupTaskFormDrawer({ visible, loading, initialValue, storageTa type: value, sourcePath: value === 'file' ? current.sourcePath : '', excludePatterns: value === 'file' ? current.excludePatterns : [], - dbHost: value === 'mysql' || value === 'postgresql' ? current.dbHost : '', - dbPort: value === 'mysql' || value === 'postgresql' ? current.dbPort || getDefaultPort(value) : 0, - dbUser: value === 'mysql' || value === 'postgresql' ? current.dbUser : '', - dbPassword: value === 'mysql' || value === 'postgresql' ? current.dbPassword : '', - dbName: value === 'mysql' || value === 'postgresql' ? current.dbName : '', + dbHost: value === 'mysql' || value === 'postgresql' || value === 'saphana' ? current.dbHost : '', + dbPort: value === 'mysql' || value === 'postgresql' || value === 'saphana' ? current.dbPort || getDefaultPort(value) : 0, + dbUser: value === 'mysql' || value === 'postgresql' || value === 'saphana' ? current.dbUser : '', + dbPassword: value === 'mysql' || value === 'postgresql' || value === 'saphana' ? current.dbPassword : '', + dbName: value === 'mysql' || value === 'postgresql' || value === 'saphana' ? current.dbName : '', dbPath: value === 'sqlite' ? current.dbPath : '', })) if (value !== 'file') { diff --git a/web/src/components/backup-tasks/field-config.ts b/web/src/components/backup-tasks/field-config.ts index a764c13..a3c2f0c 100644 --- a/web/src/components/backup-tasks/field-config.ts +++ b/web/src/components/backup-tasks/field-config.ts @@ -5,6 +5,7 @@ export const backupTaskTypeOptions = [ { label: 'MySQL', value: 'mysql' }, { label: 'SQLite', value: 'sqlite' }, { label: 'PostgreSQL', value: 'postgresql' }, + { label: 'SAP HANA', value: 'saphana' }, ] as const export const backupCompressionOptions = [ @@ -22,6 +23,8 @@ export function getBackupTaskTypeLabel(type: BackupTaskType) { return 'SQLite' case 'postgresql': return 'PostgreSQL' + case 'saphana': + return 'SAP HANA' default: return type } @@ -64,7 +67,7 @@ export function isSQLiteBackupTask(type: BackupTaskType) { } export function isDatabaseBackupTask(type: BackupTaskType) { - return type === 'mysql' || type === 'postgresql' + return type === 'mysql' || type === 'postgresql' || type === 'saphana' } export function getDefaultPort(type: BackupTaskType) { @@ -73,6 +76,8 @@ export function getDefaultPort(type: BackupTaskType) { return 3306 case 'postgresql': return 5432 + case 'saphana': + return 30015 default: return 0 } diff --git a/web/src/components/storage-targets/field-config.ts b/web/src/components/storage-targets/field-config.ts index 5789d68..08683c3 100644 --- a/web/src/components/storage-targets/field-config.ts +++ b/web/src/components/storage-targets/field-config.ts @@ -216,6 +216,50 @@ const FIELD_CONFIG_MAP: Record = placeholder: '输入新的 SecretKey', }, ], + ftp: [ + { + key: 'host', + label: '主机地址', + type: 'input', + required: true, + placeholder: 'ftp.example.com', + }, + { + key: 'port', + label: '端口', + type: 'input', + placeholder: '21', + description: '默认 FTP 端口为 21。', + }, + { + key: 'username', + label: '用户名', + type: 'input', + required: true, + placeholder: 'backup_user', + }, + { + key: 'password', + label: '密码', + type: 'password', + required: true, + sensitive: true, + placeholder: '输入新的 FTP 密码', + }, + { + key: 'basePath', + label: '基础目录', + type: 'input', + placeholder: '/backups', + description: 'FTP 服务器上的目标目录,留空使用根目录。', + }, + { + key: 'useTLS', + label: '使用 TLS (FTPS)', + type: 'switch', + description: '启用 Explicit TLS 加密连接。', + }, + ], } export function getStorageTargetFieldConfigs(type: StorageTargetType) { @@ -238,6 +282,8 @@ export function getStorageTargetTypeLabel(type: StorageTargetType) { return '腾讯云 COS' case 'qiniu_kodo': return '七牛云 Kodo' + case 'ftp': + return 'FTP' default: return type } @@ -251,4 +297,5 @@ export const storageTargetTypeOptions = [ { label: 'S3 Compatible', value: 's3' }, { label: 'Google Drive', value: 'google_drive' }, { label: 'WebDAV', value: 'webdav' }, + { label: 'FTP', value: 'ftp' }, ] as const diff --git a/web/src/types/backup-tasks.ts b/web/src/types/backup-tasks.ts index 217beac..f716d16 100644 --- a/web/src/types/backup-tasks.ts +++ b/web/src/types/backup-tasks.ts @@ -1,4 +1,4 @@ -export type BackupTaskType = 'file' | 'mysql' | 'sqlite' | 'postgresql' +export type BackupTaskType = 'file' | 'mysql' | 'sqlite' | 'postgresql' | 'saphana' export type BackupTaskStatus = 'idle' | 'running' | 'success' | 'failed' export type BackupCompression = 'gzip' | 'none' diff --git a/web/src/types/storage-targets.ts b/web/src/types/storage-targets.ts index 9f6cd0a..38a3913 100644 --- a/web/src/types/storage-targets.ts +++ b/web/src/types/storage-targets.ts @@ -1,4 +1,4 @@ -export type StorageTargetType = 'local_disk' | 'google_drive' | 's3' | 'webdav' | 'aliyun_oss' | 'tencent_cos' | 'qiniu_kodo' +export type StorageTargetType = 'local_disk' | 'google_drive' | 's3' | 'webdav' | 'aliyun_oss' | 'tencent_cos' | 'qiniu_kodo' | 'ftp' export type StorageTestStatus = 'unknown' | 'success' | 'failed' export type StorageFieldType = 'input' | 'password' | 'switch'