From 0daf702d2569c17e6ef7a628114f7c7f90f9e5ed Mon Sep 17 00:00:00 2001 From: Syngnat Date: Mon, 9 Mar 2026 17:22:26 +0800 Subject: [PATCH] =?UTF-8?q?=E2=9C=A8=20feat(data-sync):=20=E6=89=A9?= =?UTF-8?q?=E5=B1=95=E8=B7=A8=E5=BA=93=E8=BF=81=E7=A7=BB=E9=93=BE=E8=B7=AF?= =?UTF-8?q?=E5=B9=B6=E4=BC=98=E5=8C=96=E6=95=B0=E6=8D=AE=E5=90=8C=E6=AD=A5?= =?UTF-8?q?=E4=BA=A4=E4=BA=92?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 统一同库同步与跨库迁移入口,补充模式区分与风险提示 - 扩展 ClickHouse 与 PG-like 双向迁移,并新增 PG-like、ClickHouse、TDengine 到 MongoDB 的迁移路由 - 完善 TDengine 目标端建表规划、回归测试与需求追踪文档 - refs #51 --- frontend/package.json.md5 | 2 +- frontend/src/components/DataSyncModal.tsx | 432 +++++- frontend/wailsjs/go/models.ts | 6 + internal/app/db_context.go | 6 + internal/app/methods_db.go | 61 + internal/connection/types.go | 1 + internal/db/clickhouse_impl.go | 132 ++ internal/db/mariadb_impl.go | 18 +- internal/db/mysql_impl.go | 25 +- internal/db/tdengine_applychanges_test.go | 168 +++ internal/db/tdengine_impl.go | 78 + internal/sync/analyze.go | 118 +- internal/sync/migration_clickhouse.go | 741 ++++++++++ internal/sync/migration_kernel_router.go | 379 +++++ internal/sync/migration_kernel_router_test.go | 447 ++++++ internal/sync/migration_kernel_types.go | 104 ++ internal/sync/migration_mongodb.go | 603 ++++++++ internal/sync/migration_redis.go | 1315 +++++++++++++++++ internal/sync/migration_runtime_helpers.go | 58 + internal/sync/migration_schema_inference.go | 53 + internal/sync/migration_tdengine.go | 296 ++++ internal/sync/migration_tdengine_target.go | 657 ++++++++ internal/sync/migration_type_resolver.go | 98 ++ internal/sync/preview.go | 43 +- internal/sync/redis_migration_test.go | 490 ++++++ internal/sync/schema_migration.go | 1014 +++++++++++++ internal/sync/schema_migration_test.go | 957 ++++++++++++ internal/sync/schema_sync.go | 11 +- internal/sync/sql_helpers.go | 19 +- internal/sync/sync_engine.go | 413 +++--- internal/sync/sync_events.go | 1 - 31 files changed, 8403 insertions(+), 343 deletions(-) create mode 100644 internal/db/tdengine_applychanges_test.go create mode 100644 internal/sync/migration_clickhouse.go create mode 100644 internal/sync/migration_kernel_router.go create mode 100644 internal/sync/migration_kernel_router_test.go create mode 100644 internal/sync/migration_kernel_types.go create mode 100644 internal/sync/migration_mongodb.go create mode 100644 internal/sync/migration_redis.go create mode 100644 internal/sync/migration_runtime_helpers.go create mode 100644 internal/sync/migration_schema_inference.go create mode 100644 internal/sync/migration_tdengine.go create mode 100644 internal/sync/migration_tdengine_target.go create mode 100644 internal/sync/migration_type_resolver.go create mode 100644 internal/sync/redis_migration_test.go create mode 100644 internal/sync/schema_migration.go create mode 100644 internal/sync/schema_migration_test.go diff --git a/frontend/package.json.md5 b/frontend/package.json.md5 index a7661c0..0f8f4fe 100755 --- a/frontend/package.json.md5 +++ b/frontend/package.json.md5 @@ -1 +1 @@ -d0f9366af59a6367ad3c7e2d4185ead4 \ No newline at end of file +5b8157374dae5f9340e31b2d0bd2c00e \ No newline at end of file diff --git a/frontend/src/components/DataSyncModal.tsx b/frontend/src/components/DataSyncModal.tsx index 57c4033..1389be7 100644 --- a/frontend/src/components/DataSyncModal.tsx +++ b/frontend/src/components/DataSyncModal.tsx @@ -1,9 +1,11 @@ import React, { useState, useEffect, useMemo, useRef } from 'react'; -import { Modal, Form, Select, Button, message, Steps, Transfer, Card, Alert, Divider, Typography, Progress, Checkbox, Table, Drawer, Tabs } from 'antd'; +import { Modal, Form, Select, Input, Button, message, Steps, Transfer, Card, Alert, Divider, Typography, Progress, Checkbox, Table, Drawer, Tabs, theme as antdTheme } from 'antd'; +import { DatabaseOutlined, RocketOutlined, SwapOutlined, TableOutlined } from '@ant-design/icons'; import { useStore } from '../store'; import { DBGetDatabases, DBGetTables, DataSync, DataSyncAnalyze, DataSyncPreview } from '../../wailsjs/go/app/App'; import { SavedConnection } from '../types'; import { EventsOn } from '../../wailsjs/runtime/runtime'; +import { normalizeOpacityForPlatform, resolveAppearanceValues } from '../utils/appearance'; const { Title, Text } = Typography; const { Step } = Steps; @@ -21,6 +23,12 @@ type TableDiffSummary = { deletes?: number; same?: number; message?: string; + targetTableExists?: boolean; + plannedAction?: string; + warnings?: string[]; + unsupportedObjects?: string[]; + indexesToCreate?: number; + indexesSkipped?: number; }; type TableOps = { insert: boolean; @@ -31,6 +39,8 @@ type TableOps = { selectedDeletePks?: string[]; }; +type WorkflowType = 'sync' | 'migration'; + const quoteSqlIdent = (dbType: string, ident: string): string => { const raw = String(ident || '').trim(); if (!raw) return raw; @@ -76,6 +86,11 @@ const toSqlLiteral = (value: any, dbType: string): string => { return `'${String(value).replace(/'/g, "''")}'`; }; +const resolveRedisDbIndex = (raw?: string): number => { + const value = Number(String(raw || '').trim()); + return Number.isInteger(value) && value >= 0 && value <= 15 ? value : 0; +}; + const buildSqlPreview = ( previewData: any, tableName: string, @@ -145,8 +160,14 @@ const buildSqlPreview = ( const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, onClose }) => { const connections = useStore((state) => state.connections); + const themeMode = useStore((state) => state.theme); + const appearance = useStore((state) => state.appearance); const [currentStep, setCurrentStep] = useState(0); const [loading, setLoading] = useState(false); + const { token } = antdTheme.useToken(); + const darkMode = themeMode === 'dark'; + const resolvedAppearance = resolveAppearanceValues(appearance); + const effectiveOpacity = normalizeOpacityForPlatform(resolvedAppearance.opacity); // Step 1: Config const [sourceConnId, setSourceConnId] = useState(''); @@ -162,9 +183,13 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, const [selectedTables, setSelectedTables] = useState([]); // Options + const [workflowType, setWorkflowType] = useState('sync'); const [syncContent, setSyncContent] = useState<'data' | 'schema' | 'both'>('data'); const [syncMode, setSyncMode] = useState('insert_update'); const [autoAddColumns, setAutoAddColumns] = useState(true); + const [targetTableStrategy, setTargetTableStrategy] = useState<'existing_only' | 'auto_create_if_missing' | 'smart'>('existing_only'); + const [createIndexes, setCreateIndexes] = useState(false); + const [mongoCollectionName, setMongoCollectionName] = useState(''); const [showSameTables, setShowSameTables] = useState(false); const [analyzing, setAnalyzing] = useState(false); const [diffTables, setDiffTables] = useState([]); @@ -240,9 +265,12 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, setSourceDb(''); setTargetDb(''); setSelectedTables([]); + setWorkflowType('sync'); setSyncContent('data'); setSyncMode('insert_update'); setAutoAddColumns(true); + setTargetTableStrategy('existing_only'); + setCreateIndexes(false); setShowSameTables(false); setAnalyzing(false); setDiffTables([]); @@ -260,6 +288,30 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, } }, [open]); + useEffect(() => { + if (workflowType === 'migration') { + if (syncMode === 'insert_update') { + setSyncMode('insert_only'); + } + if (syncContent === 'schema') { + setSyncContent('both'); + } + if (targetTableStrategy === 'existing_only') { + setTargetTableStrategy('smart'); + } + if (!createIndexes) { + setCreateIndexes(true); + } + } else { + if (targetTableStrategy !== 'existing_only') { + setTargetTableStrategy('existing_only'); + } + if (createIndexes) { + setCreateIndexes(false); + } + } + }, [workflowType]); + const handleSourceConnChange = async (connId: string) => { setSourceConnId(connId); setSourceDb(''); @@ -357,6 +409,9 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, content: syncContent, mode: "insert_update", autoAddColumns, + targetTableStrategy, + createIndexes, + mongoCollectionName: mongoCollectionName.trim(), jobId, }; @@ -407,6 +462,9 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, content: "data", mode: "insert_update", autoAddColumns, + targetTableStrategy, + createIndexes, + mongoCollectionName: mongoCollectionName.trim(), }; try { @@ -483,6 +541,9 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, content: syncContent, mode: syncMode, autoAddColumns, + targetTableStrategy, + createIndexes, + mongoCollectionName: mongoCollectionName.trim(), tableOptions, jobId, }; @@ -530,10 +591,132 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, return buildSqlPreview(previewData, previewTable, targetType, ops); }, [previewData, previewTable, targetConnId, connections, tableOptions]); + const analysisWarnings = useMemo(() => { + const items: string[] = []; + diffTables.forEach((table) => { + (table.warnings || []).forEach((warning) => items.push(`${table.table}: ${warning}`)); + (table.unsupportedObjects || []).forEach((warning) => items.push(`${table.table}: ${warning}`)); + }); + return Array.from(new Set(items)); + }, [diffTables]); + + const isMigrationWorkflow = workflowType === 'migration'; + const sourceConn = useMemo(() => connections.find(c => c.id === sourceConnId), [connections, sourceConnId]); + const targetConn = useMemo(() => connections.find(c => c.id === targetConnId), [connections, targetConnId]); + const sourceType = String(sourceConn?.config?.type || '').toLowerCase(); + const targetType = String(targetConn?.config?.type || '').toLowerCase(); + const isRedisMongoKeyspaceMigration = isMigrationWorkflow && ( + (sourceType === 'redis' && targetType === 'mongodb') || + (sourceType === 'mongodb' && targetType === 'redis') + ); + const defaultMongoCollectionName = useMemo(() => { + if (sourceType === 'redis' && targetType === 'mongodb') { + return `redis_db_${resolveRedisDbIndex(sourceDb || sourceConn?.config?.database)}_keys`; + } + if (sourceType === 'mongodb' && targetType === 'redis') { + return selectedTables[0] || `redis_db_${resolveRedisDbIndex(targetDb || targetConn?.config?.database)}_keys`; + } + return ''; + }, [sourceType, targetType, sourceDb, targetDb, sourceConn, targetConn, selectedTables]); + + const modalPanelStyle = useMemo(() => ({ + background: darkMode + ? 'linear-gradient(180deg, rgba(16,22,34,0.96) 0%, rgba(10,14,24,0.98) 100%)' + : 'linear-gradient(180deg, rgba(255,255,255,0.98) 0%, rgba(246,248,252,0.98) 100%)', + border: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(16,24,40,0.08)', + boxShadow: darkMode ? '0 24px 56px rgba(0,0,0,0.36)' : '0 18px 44px rgba(15,23,42,0.14)', + backdropFilter: darkMode ? 'blur(18px)' : 'none', + }), [darkMode]); + + const shellCardStyle = useMemo(() => ({ + borderRadius: 18, + border: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(15,23,42,0.08)', + background: darkMode ? 'rgba(255,255,255,0.03)' : `rgba(255,255,255,${Math.max(effectiveOpacity, 0.88)})`, + boxShadow: darkMode ? '0 12px 32px rgba(0,0,0,0.22)' : '0 10px 24px rgba(15,23,42,0.08)', + overflow: 'hidden', + }), [darkMode, effectiveOpacity]); + + const heroPanelStyle = useMemo(() => ({ + padding: 18, + borderRadius: 18, + border: darkMode ? '1px solid rgba(255,214,102,0.12)' : '1px solid rgba(24,144,255,0.12)', + background: darkMode + ? 'linear-gradient(135deg, rgba(255,214,102,0.10) 0%, rgba(255,255,255,0.03) 100%)' + : 'linear-gradient(135deg, rgba(24,144,255,0.10) 0%, rgba(255,255,255,0.95) 100%)', + marginBottom: 18, + }), [darkMode]); + + const badgeStyle = useMemo(() => ({ + display: 'inline-flex', + alignItems: 'center', + gap: 6, + padding: '6px 10px', + borderRadius: 999, + border: darkMode ? '1px solid rgba(255,255,255,0.10)' : '1px solid rgba(15,23,42,0.08)', + background: darkMode ? 'rgba(255,255,255,0.04)' : 'rgba(255,255,255,0.86)', + color: darkMode ? 'rgba(255,255,255,0.88)' : '#334155', + fontSize: 12, + fontWeight: 600, + }), [darkMode]); + + const quietPanelStyle = useMemo(() => ({ + padding: 14, + borderRadius: 16, + border: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(15,23,42,0.08)', + background: darkMode ? 'rgba(255,255,255,0.025)' : 'rgba(248,250,252,0.92)', + }), [darkMode]); + + const modalWorkspaceStyle = useMemo(() => ({ + display: 'flex', + flexDirection: 'column', + height: '100%', + minHeight: 0, + }), []); + + const modalScrollableContentStyle = useMemo(() => ({ + flex: 1, + minHeight: 0, + overflowY: 'auto', + overflowX: 'hidden', + paddingRight: 4, + overscrollBehavior: 'contain', + }), []); + + const modalFooterBarStyle = useMemo(() => ({ + marginTop: 18, + display: 'flex', + justifyContent: 'flex-end', + gap: 8, + paddingTop: 12, + borderTop: darkMode ? '1px solid rgba(255,255,255,0.06)' : '1px solid rgba(15,23,42,0.06)', + flex: '0 0 auto', + }), [darkMode]); + + const renderModalTitle = (title: string, description: string) => ( +
+
+ {isMigrationWorkflow ? : } +
+
+
{title}
+
{description}
+
+
+ ); + return ( <> { if (syncing) { @@ -542,23 +725,61 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, } onClose(); }} - width={800} + width={920} footer={null} destroyOnHidden closable={!syncing} maskClosable={!syncing} + styles={{ + content: modalPanelStyle, + header: { background: 'transparent', borderBottom: 'none', paddingBottom: 10 }, + body: { + paddingTop: 8, + height: 760, + maxHeight: 'calc(100vh - 120px)', + overflow: 'hidden', + display: 'flex', + flexDirection: 'column', + }, + footer: { background: 'transparent', borderTop: 'none', paddingTop: 12 }, + }} > +
+
+
+
+
+
{isMigrationWorkflow ? '跨数据源迁移' : '数据同步'}
+
+ {isMigrationWorkflow + ? '适合把源表迁移到另一套数据库,可按策略自动建表、导入数据并补建可兼容索引。' + : '适合目标表已存在的场景,先做差异分析,再按勾选执行插入、更新或删除。'} +
+
+
+ {isMigrationWorkflow ? : } {isMigrationWorkflow ? '迁移模式' : '同步模式'} + {sourceConnId ? '已选源连接' : '待选源连接'} + {selectedTables.length || 0} 张表 +
+
+
+
+
{/* STEP 1: CONFIG */} {currentStep === 0 && (
-
- +
+
@@ -589,27 +818,94 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
- + +
+ + 先明确当前要做的是“已有目标表同步”还是“跨库迁移”,页面会按功能类型自动给出更安全的默认策略。 + +
- + + + + + - + + + + + {isRedisMongoKeyspaceMigration && ( + + setMongoCollectionName(e.target.value)} + placeholder={defaultMongoCollectionName || '请输入 Mongo 集合名'} + allowClear + maxLength={128} + /> + + )} setAutoAddColumns(e.target.checked)}> - 自动补齐目标表缺失字段(仅 MySQL 目标) + 自动补齐目标表缺失字段(当前支持 MySQL 目标及 MySQL → Kingbase) + + setCreateIndexes(e.target.checked)} disabled={!isMigrationWorkflow || targetTableStrategy === 'existing_only'}> + 自动迁移可兼容的普通索引/唯一索引(仅自动建表模式生效) + + + {isMigrationWorkflow && targetTableStrategy !== 'existing_only' && ( + + )} + {!isMigrationWorkflow && ( + + )} {syncContent !== 'schema' && syncMode === 'full_overwrite' && ( void }> = ({ open, {/* STEP 2: TABLES */} {currentStep === 1 && ( -
-
- 请选择需要同步的表: +
+
+
+ 请选择需要同步的表: setShowSameTables(e.target.checked)}> 显示相同表 -
- + ({ key: t, title: t }))} titles={['源表', '已选表']} targetKeys={selectedTables} onChange={(keys) => setSelectedTables(keys as string[])} render={item => item.title} - listStyle={{ width: 350, height: 280, marginTop: 0 }} - locale={{ itemUnit: '项', itemsUnit: '项', searchPlaceholder: '搜索表', notFoundContent: '暂无数据' }} + listStyle={{ width: 390, height: 320, marginTop: 0, borderRadius: 14, overflow: 'hidden' }} + locale={{ itemUnit: '项', itemsUnit: '项', searchPlaceholder: '搜索表…', notFoundContent: '暂无数据' }} /> +
{diffTables.length > 0 && ( -
- 对比结果 +
+ 对比结果 + {analysisWarnings.length > 0 && ( + + {analysisWarnings.slice(0, 8).map((item) =>
  • {item}
  • )} + {analysisWarnings.length > 8 &&
  • 还有 {analysisWarnings.length - 8} 项未展开
  • } + + } + style={{ marginBottom: 12 }} + /> + )} void }> = ({ open, const same = Number(t.same || 0); const msg = String(t.message || '').trim(); const can = !!t.canSync; + const warns = Array.isArray(t.warnings) ? t.warnings.length : 0; + const unsupported = Array.isArray(t.unsupportedObjects) ? t.unsupportedObjects.length : 0; if (showSameTables) return true; if (!can) return true; - if (msg) return true; + if (msg || warns > 0 || unsupported > 0) return true; return ins > 0 || upd > 0 || del > 0 || same === 0; })} columns={[ { title: '表名', dataIndex: 'table', key: 'table', ellipsis: true }, + { + title: '目标表', + key: 'targetTableExists', + width: 90, + render: (_: any, r: any) => r.targetTableExists ? '已存在' : '不存在' + }, + { + title: '计划', + dataIndex: 'plannedAction', + key: 'plannedAction', + width: 220, + ellipsis: true, + render: (v: any) => String(v || '') + }, { title: '插入', key: 'inserts', @@ -670,11 +998,7 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, const ops = tableOptions[r.table] || { insert: true, update: true, delete: false }; const disabled = !r.canSync || analyzing || Number(r.inserts || 0) === 0; return ( - updateTableOption(r.table, 'insert', e.target.checked)} - > + updateTableOption(r.table, 'insert', e.target.checked)}> {Number(r.inserts || 0)} ); @@ -688,11 +1012,7 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, const ops = tableOptions[r.table] || { insert: true, update: true, delete: false }; const disabled = !r.canSync || analyzing || Number(r.updates || 0) === 0; return ( - updateTableOption(r.table, 'update', e.target.checked)} - > + updateTableOption(r.table, 'update', e.target.checked)}> {Number(r.updates || 0)} ); @@ -706,18 +1026,28 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, const ops = tableOptions[r.table] || { insert: true, update: true, delete: false }; const disabled = !r.canSync || analyzing || Number(r.deletes || 0) === 0; return ( - updateTableOption(r.table, 'delete', e.target.checked)} - > + updateTableOption(r.table, 'delete', e.target.checked)}> {Number(r.deletes || 0)} ); } }, { title: '相同', dataIndex: 'same', key: 'same', width: 70, render: (v: any) => Number(v || 0) }, - { title: '消息', dataIndex: 'message', key: 'message', ellipsis: true, render: (v: any) => (v ? String(v) : '') }, + { + title: '风险', + key: 'warnings', + width: 220, + render: (_: any, r: any) => { + const warns = [...(Array.isArray(r.warnings) ? r.warnings : []), ...(Array.isArray(r.unsupportedObjects) ? r.unsupportedObjects : [])]; + if (warns.length === 0) return '-'; + return ( +
    + {warns.slice(0, 2).map((item: string) =>
    {item}
    )} + {warns.length > 2 &&
    还有 {warns.length - 2} 项
    } +
    + ); + } + }, { title: '预览', key: 'preview', @@ -741,7 +1071,8 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, {/* STEP 3: RESULT */} {currentStep === 2 && ( -
    +
    +
    void }> = ({ open, showIcon /> -
    +
    void }> = ({ open, />
    - 日志 +
    +
    + 执行日志
    { @@ -770,14 +1103,25 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, const nearBottom = el.scrollHeight - el.scrollTop - el.clientHeight < 40; autoScrollRef.current = nearBottom; }} - style={{ background: '#f5f5f5', padding: 12, height: 300, overflowY: 'auto', fontFamily: 'monospace' }} + style={{ + background: darkMode ? 'rgba(255,255,255,0.03)' : 'rgba(248,250,252,0.92)', + border: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(15,23,42,0.06)', + borderRadius: 14, + padding: 12, + height: 300, + overflowY: 'auto', + fontFamily: 'SFMono-Regular, ui-monospace, Menlo, Consolas, monospace' + }} > {syncLogs.map((item, i: number) =>
    {renderSyncLogItem(item)}
    )}
    +
    )} -
    +
    + +
    {currentStep === 0 && ( )} @@ -804,14 +1148,16 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, )}
    +
    { setPreviewOpen(false); setPreviewTable(''); setPreviewData(null); }} width={900} > - {previewLoading && } + {previewLoading && } {!previewLoading && previewData && (
    ; static createFrom(source: any = {}) { @@ -292,6 +295,9 @@ export namespace sync { this.mode = source["mode"]; this.jobId = source["jobId"]; this.autoAddColumns = source["autoAddColumns"]; + this.targetTableStrategy = source["targetTableStrategy"]; + this.createIndexes = source["createIndexes"]; + this.mongoCollectionName = source["mongoCollectionName"]; this.tableOptions = this.convertValues(source["tableOptions"], TableOptions, true); } diff --git a/internal/app/db_context.go b/internal/app/db_context.go index ec56c89..009e405 100644 --- a/internal/app/db_context.go +++ b/internal/app/db_context.go @@ -1,6 +1,7 @@ package app import ( + "strconv" "strings" "GoNavi-Wails/internal/connection" @@ -20,6 +21,11 @@ func normalizeRunConfig(config connection.ConnectionConfig, dbName string) conne case "dameng": // 达梦使用 schema 参数,沿用现有行为:dbName 表示 schema。 runConfig.Database = name + case "redis": + runConfig.Database = name + if idx, err := strconv.Atoi(name); err == nil && idx >= 0 && idx <= 15 { + runConfig.RedisDB = idx + } default: // oracle: dbName 表示 schema/owner,不能覆盖 config.Database(服务名) // sqlite: 无需设置 Database diff --git a/internal/app/methods_db.go b/internal/app/methods_db.go index 24119e1..b28109f 100644 --- a/internal/app/methods_db.go +++ b/internal/app/methods_db.go @@ -3,6 +3,7 @@ package app import ( "context" "fmt" + "strconv" "strings" "time" @@ -547,6 +548,24 @@ func ensureNonNilSlice[T any](items []T) []T { func (a *App) DBGetDatabases(config connection.ConnectionConfig) connection.QueryResult { runConfig := normalizeRunConfig(config, "") + if strings.EqualFold(strings.TrimSpace(runConfig.Type), "redis") { + runConfig.Type = "redis" + client, err := a.getRedisClient(runConfig) + if err != nil { + logger.Error(err, "DBGetDatabases 获取 Redis 连接失败:%s", formatConnSummary(runConfig)) + return connection.QueryResult{Success: false, Message: err.Error()} + } + dbs, err := client.GetDatabases() + if err != nil { + logger.Error(err, "DBGetDatabases 获取 Redis 库列表失败:%s", formatConnSummary(runConfig)) + return connection.QueryResult{Success: false, Message: err.Error()} + } + resData := make([]map[string]string, 0, len(dbs)) + for _, item := range dbs { + resData = append(resData, map[string]string{"Database": strconv.Itoa(item.Index)}) + } + return connection.QueryResult{Success: true, Data: resData} + } dbInst, err := a.getDatabase(runConfig) if err != nil { logger.Error(err, "DBGetDatabases 获取连接失败:%s", formatConnSummary(runConfig)) @@ -579,6 +598,48 @@ func (a *App) DBGetDatabases(config connection.ConnectionConfig) connection.Quer func (a *App) DBGetTables(config connection.ConnectionConfig, dbName string) connection.QueryResult { runConfig := normalizeRunConfig(config, dbName) + if strings.EqualFold(strings.TrimSpace(runConfig.Type), "redis") { + runConfig.Type = "redis" + client, err := a.getRedisClient(runConfig) + if err != nil { + logger.Error(err, "DBGetTables 获取 Redis 连接失败:%s", formatConnSummary(runConfig)) + return connection.QueryResult{Success: false, Message: err.Error()} + } + cursor := uint64(0) + tables := make([]string, 0, 128) + seen := make(map[string]struct{}, 128) + for { + result, err := client.ScanKeys("*", cursor, 1000) + if err != nil { + logger.Error(err, "DBGetTables 扫描 Redis Key 失败:%s", formatConnSummary(runConfig)) + return connection.QueryResult{Success: false, Message: err.Error()} + } + for _, item := range result.Keys { + key := strings.TrimSpace(item.Key) + if key == "" { + continue + } + if _, ok := seen[key]; ok { + continue + } + seen[key] = struct{}{} + tables = append(tables, key) + } + if strings.TrimSpace(result.Cursor) == "" || strings.TrimSpace(result.Cursor) == "0" { + break + } + next, err := strconv.ParseUint(strings.TrimSpace(result.Cursor), 10, 64) + if err != nil || next == cursor { + break + } + cursor = next + } + resData := make([]map[string]string, 0, len(tables)) + for _, name := range tables { + resData = append(resData, map[string]string{"Table": name}) + } + return connection.QueryResult{Success: true, Data: resData} + } dbInst, err := a.getDatabase(runConfig) if err != nil { diff --git a/internal/connection/types.go b/internal/connection/types.go index bac9ec7..bddb794 100644 --- a/internal/connection/types.go +++ b/internal/connection/types.go @@ -90,6 +90,7 @@ type IndexDefinition struct { NonUnique int `json:"nonUnique"` SeqInIndex int `json:"seqInIndex"` IndexType string `json:"indexType"` + SubPart int `json:"subPart,omitempty"` } // ForeignKeyDefinition represents a foreign key diff --git a/internal/db/clickhouse_impl.go b/internal/db/clickhouse_impl.go index f1d5811..75a418c 100644 --- a/internal/db/clickhouse_impl.go +++ b/internal/db/clickhouse_impl.go @@ -8,6 +8,7 @@ import ( "fmt" "net" "net/url" + "sort" "strconv" "strings" "time" @@ -678,3 +679,134 @@ func isClickHouseTruthy(value interface{}) bool { return normalized == "1" || normalized == "true" || normalized == "yes" || normalized == "y" } } + +func (c *ClickHouseDB) ApplyChanges(tableName string, changes connection.ChangeSet) error { + if c.conn == nil { + return fmt.Errorf("connection not open") + } + + database, table, err := c.resolveDatabaseAndTable(c.database, tableName) + if err != nil { + return err + } + qualifiedTable := fmt.Sprintf("%s.%s", quoteClickHouseIdentifier(database), quoteClickHouseIdentifier(table)) + + for _, pk := range changes.Deletes { + whereExpr := buildClickHouseWhereClause(pk) + if whereExpr == "" { + continue + } + query := fmt.Sprintf("ALTER TABLE %s DELETE WHERE %s", qualifiedTable, whereExpr) + if _, err := c.conn.Exec(query); err != nil { + return fmt.Errorf("delete error: %v; sql=%s", err, query) + } + } + + for _, update := range changes.Updates { + setExpr := buildClickHouseAssignments(update.Values) + whereExpr := buildClickHouseWhereClause(update.Keys) + if setExpr == "" || whereExpr == "" { + continue + } + query := fmt.Sprintf("ALTER TABLE %s UPDATE %s WHERE %s", qualifiedTable, setExpr, whereExpr) + if _, err := c.conn.Exec(query); err != nil { + return fmt.Errorf("update error: %v; sql=%s", err, query) + } + } + + for _, row := range changes.Inserts { + query, err := buildClickHouseInsertSQL(qualifiedTable, row) + if err != nil { + return err + } + if query == "" { + continue + } + if _, err := c.conn.Exec(query); err != nil { + return fmt.Errorf("insert error: %v; sql=%s", err, query) + } + } + return nil +} + +func buildClickHouseInsertSQL(qualifiedTable string, row map[string]interface{}) (string, error) { + if len(row) == 0 { + return "", nil + } + cols := make([]string, 0, len(row)) + for k := range row { + if strings.TrimSpace(k) == "" { + continue + } + cols = append(cols, k) + } + if len(cols) == 0 { + return "", nil + } + sort.Strings(cols) + quotedCols := make([]string, 0, len(cols)) + values := make([]string, 0, len(cols)) + for _, col := range cols { + quotedCols = append(quotedCols, quoteClickHouseIdentifier(col)) + values = append(values, clickHouseLiteral(row[col])) + } + return fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)", qualifiedTable, strings.Join(quotedCols, ", "), strings.Join(values, ", ")), nil +} + +func buildClickHouseAssignments(values map[string]interface{}) string { + if len(values) == 0 { + return "" + } + cols := make([]string, 0, len(values)) + for k := range values { + if strings.TrimSpace(k) == "" { + continue + } + cols = append(cols, k) + } + sort.Strings(cols) + parts := make([]string, 0, len(cols)) + for _, col := range cols { + parts = append(parts, fmt.Sprintf("%s = %s", quoteClickHouseIdentifier(col), clickHouseLiteral(values[col]))) + } + return strings.Join(parts, ", ") +} + +func buildClickHouseWhereClause(keys map[string]interface{}) string { + if len(keys) == 0 { + return "" + } + cols := make([]string, 0, len(keys)) + for k := range keys { + if strings.TrimSpace(k) == "" { + continue + } + cols = append(cols, k) + } + sort.Strings(cols) + parts := make([]string, 0, len(cols)) + for _, col := range cols { + parts = append(parts, fmt.Sprintf("%s = %s", quoteClickHouseIdentifier(col), clickHouseLiteral(keys[col]))) + } + return strings.Join(parts, " AND ") +} + +func clickHouseLiteral(value interface{}) string { + switch val := value.(type) { + case nil: + return "NULL" + case bool: + if val { + return "1" + } + return "0" + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64: + return fmt.Sprintf("%v", val) + case time.Time: + return fmt.Sprintf("'%s'", val.Format("2006-01-02 15:04:05")) + case []byte: + return fmt.Sprintf("'%s'", strings.ReplaceAll(string(val), "'", "''")) + default: + return fmt.Sprintf("'%s'", strings.ReplaceAll(fmt.Sprintf("%v", val), "'", "''")) + } +} diff --git a/internal/db/mariadb_impl.go b/internal/db/mariadb_impl.go index 1e316ad..6a36400 100644 --- a/internal/db/mariadb_impl.go +++ b/internal/db/mariadb_impl.go @@ -250,12 +250,22 @@ func (m *MariaDB) GetIndexes(dbName, tableName string) ([]connection.IndexDefini } } + subPart := 0 + if val, ok := row["Sub_part"]; ok && val != nil { + if f, ok := val.(float64); ok { + subPart = int(f) + } else if i, ok := val.(int64); ok { + subPart = int(i) + } + } + idx := connection.IndexDefinition{ Name: fmt.Sprintf("%v", row["Key_name"]), ColumnName: fmt.Sprintf("%v", row["Column_name"]), NonUnique: nonUnique, SeqInIndex: seq, IndexType: fmt.Sprintf("%v", row["Index_type"]), + SubPart: subPart, } indexes = append(indexes, idx) } @@ -323,7 +333,7 @@ func (m *MariaDB) ApplyChanges(tableName string, changes connection.ChangeSet) e var args []interface{} for k, v := range pk { wheres = append(wheres, fmt.Sprintf("`%s` = ?", k)) - args = append(args, normalizeMySQLDateTimeValue(v)) + args = append(args, normalizeMySQLComplexValue(normalizeMySQLDateTimeValue(v))) } if len(wheres) == 0 { continue @@ -341,7 +351,7 @@ func (m *MariaDB) ApplyChanges(tableName string, changes connection.ChangeSet) e for k, v := range update.Values { sets = append(sets, fmt.Sprintf("`%s` = ?", k)) - args = append(args, normalizeMySQLDateTimeValue(v)) + args = append(args, normalizeMySQLComplexValue(normalizeMySQLDateTimeValue(v))) } if len(sets) == 0 { @@ -351,7 +361,7 @@ func (m *MariaDB) ApplyChanges(tableName string, changes connection.ChangeSet) e var wheres []string for k, v := range update.Keys { wheres = append(wheres, fmt.Sprintf("`%s` = ?", k)) - args = append(args, normalizeMySQLDateTimeValue(v)) + args = append(args, normalizeMySQLComplexValue(normalizeMySQLDateTimeValue(v))) } if len(wheres) == 0 { @@ -373,7 +383,7 @@ func (m *MariaDB) ApplyChanges(tableName string, changes connection.ChangeSet) e for k, v := range row { cols = append(cols, fmt.Sprintf("`%s`", k)) placeholders = append(placeholders, "?") - args = append(args, normalizeMySQLDateTimeValue(v)) + args = append(args, normalizeMySQLComplexValue(normalizeMySQLDateTimeValue(v))) } if len(cols) == 0 { diff --git a/internal/db/mysql_impl.go b/internal/db/mysql_impl.go index 4aefa29..5095f1c 100644 --- a/internal/db/mysql_impl.go +++ b/internal/db/mysql_impl.go @@ -3,6 +3,7 @@ package db import ( "context" "database/sql" + "encoding/json" "fmt" "net/url" "strconv" @@ -441,12 +442,22 @@ func (m *MySQLDB) GetIndexes(dbName, tableName string) ([]connection.IndexDefini } } + subPart := 0 + if val, ok := row["Sub_part"]; ok && val != nil { + if f, ok := val.(float64); ok { + subPart = int(f) + } else if i, ok := val.(int64); ok { + subPart = int(i) + } + } + idx := connection.IndexDefinition{ Name: fmt.Sprintf("%v", row["Key_name"]), ColumnName: fmt.Sprintf("%v", row["Column_name"]), NonUnique: nonUnique, SeqInIndex: seq, IndexType: fmt.Sprintf("%v", row["Index_type"]), + SubPart: subPart, } indexes = append(indexes, idx) } @@ -606,6 +617,18 @@ func (m *MySQLDB) ApplyChanges(tableName string, changes connection.ChangeSet) e return tx.Commit() } +func normalizeMySQLComplexValue(value interface{}) interface{} { + switch v := value.(type) { + case map[string]interface{}, []interface{}: + if data, err := json.Marshal(v); err == nil { + return string(data) + } + return fmt.Sprintf("%v", value) + default: + return value + } +} + func normalizeMySQLDateTimeValue(value interface{}) interface{} { text, ok := value.(string) if !ok { @@ -670,7 +693,7 @@ func (m *MySQLDB) loadColumnTypeMap(tableName string) map[string]string { func normalizeMySQLValueForInsert(columnName string, value interface{}, columnTypeMap map[string]string) (interface{}, bool) { columnType := strings.ToLower(strings.TrimSpace(columnTypeMap[strings.ToLower(strings.TrimSpace(columnName))])) if !isMySQLTemporalColumnType(columnType) { - return value, false + return normalizeMySQLComplexValue(value), false } text, ok := value.(string) if ok && strings.TrimSpace(text) == "" { diff --git a/internal/db/tdengine_applychanges_test.go b/internal/db/tdengine_applychanges_test.go new file mode 100644 index 0000000..8afebd4 --- /dev/null +++ b/internal/db/tdengine_applychanges_test.go @@ -0,0 +1,168 @@ +//go:build gonavi_full_drivers || gonavi_tdengine_driver + +package db + +import ( + "context" + "database/sql" + "database/sql/driver" + "fmt" + "strings" + "sync" + "testing" + + "GoNavi-Wails/internal/connection" +) + +const tdengineRecordingDriverName = "gonavi_tdengine_recording" + +var ( + registerTDengineRecordingDriverOnce sync.Once + tdengineRecordingDriverMu sync.Mutex + tdengineRecordingDriverSeq int + tdengineRecordingDriverStates = map[string]*tdengineRecordingState{} +) + +type tdengineRecordingState struct { + mu sync.Mutex + queries []string + execErr error +} + +func (s *tdengineRecordingState) snapshotQueries() []string { + s.mu.Lock() + defer s.mu.Unlock() + queries := make([]string, len(s.queries)) + copy(queries, s.queries) + return queries +} + +type tdengineRecordingDriver struct{} + +func (tdengineRecordingDriver) Open(name string) (driver.Conn, error) { + tdengineRecordingDriverMu.Lock() + state := tdengineRecordingDriverStates[name] + tdengineRecordingDriverMu.Unlock() + if state == nil { + return nil, fmt.Errorf("recording state not found: %s", name) + } + return &tdengineRecordingConn{state: state}, nil +} + +type tdengineRecordingConn struct { + state *tdengineRecordingState +} + +func (c *tdengineRecordingConn) Prepare(query string) (driver.Stmt, error) { + return nil, fmt.Errorf("prepare not supported in tdengine recording driver: %s", query) +} + +func (c *tdengineRecordingConn) Close() error { return nil } + +func (c *tdengineRecordingConn) Begin() (driver.Tx, error) { + return nil, fmt.Errorf("transactions not supported in tdengine recording driver") +} + +func (c *tdengineRecordingConn) ExecContext(_ context.Context, query string, args []driver.NamedValue) (driver.Result, error) { + if len(args) > 0 { + return nil, fmt.Errorf("unexpected exec args: %d", len(args)) + } + c.state.mu.Lock() + defer c.state.mu.Unlock() + if c.state.execErr != nil { + return nil, c.state.execErr + } + c.state.queries = append(c.state.queries, query) + return driver.RowsAffected(1), nil +} + +var _ driver.ExecerContext = (*tdengineRecordingConn)(nil) + +func openTDengineRecordingDB(t *testing.T) (*sql.DB, *tdengineRecordingState) { + t.Helper() + registerTDengineRecordingDriverOnce.Do(func() { + sql.Register(tdengineRecordingDriverName, tdengineRecordingDriver{}) + }) + + tdengineRecordingDriverMu.Lock() + tdengineRecordingDriverSeq++ + dsn := fmt.Sprintf("tdengine-recording-%d", tdengineRecordingDriverSeq) + state := &tdengineRecordingState{} + tdengineRecordingDriverStates[dsn] = state + tdengineRecordingDriverMu.Unlock() + + dbConn, err := sql.Open(tdengineRecordingDriverName, dsn) + if err != nil { + t.Fatalf("打开 recording db 失败: %v", err) + } + + t.Cleanup(func() { + _ = dbConn.Close() + tdengineRecordingDriverMu.Lock() + delete(tdengineRecordingDriverStates, dsn) + tdengineRecordingDriverMu.Unlock() + }) + + return dbConn, state +} + +func TestTDengineApplyChanges_InsertsIntoQualifiedTable(t *testing.T) { + t.Parallel() + + dbConn, state := openTDengineRecordingDB(t) + td := &TDengineDB{conn: dbConn} + + changes := connection.ChangeSet{ + Inserts: []map[string]interface{}{ + { + "ts": "2026-03-09 10:00:00", + "value": 12.5, + "device": "sensor-a", + "enabled": true, + }, + }, + } + + if err := td.ApplyChanges("analytics.metrics", changes); err != nil { + t.Fatalf("ApplyChanges 返回错误: %v", err) + } + + queries := state.snapshotQueries() + if len(queries) != 1 { + t.Fatalf("期望执行 1 条 SQL,实际 %d 条: %#v", len(queries), queries) + } + + want := "INSERT INTO `analytics`.`metrics` (`device`, `enabled`, `ts`, `value`) VALUES ('sensor-a', 1, '2026-03-09 10:00:00', 12.5)" + if queries[0] != want { + t.Fatalf("插入 SQL 不符合预期\nwant: %s\n got: %s", want, queries[0]) + } +} + +func TestTDengineApplyChanges_RejectsMixedUpdatesWithoutPartialWrite(t *testing.T) { + t.Parallel() + + dbConn, state := openTDengineRecordingDB(t) + td := &TDengineDB{conn: dbConn} + + changes := connection.ChangeSet{ + Inserts: []map[string]interface{}{{ + "ts": "2026-03-09 10:00:00", + "value": 12.5, + }}, + Updates: []connection.UpdateRow{{ + Keys: map[string]interface{}{"ts": "2026-03-09 10:00:00"}, + Values: map[string]interface{}{"value": 18.8}, + }}, + } + + err := td.ApplyChanges("metrics", changes) + if err == nil { + t.Fatalf("期望 mixed changes 被拒绝") + } + if !strings.Contains(err.Error(), "UPDATE/DELETE") { + t.Fatalf("错误信息未说明限制边界: %v", err) + } + if queries := state.snapshotQueries(); len(queries) != 0 { + t.Fatalf("期望拒绝 mixed changes 时不执行任何 SQL,实际=%#v", queries) + } +} diff --git a/internal/db/tdengine_impl.go b/internal/db/tdengine_impl.go index 300cfb0..7efcf92 100644 --- a/internal/db/tdengine_impl.go +++ b/internal/db/tdengine_impl.go @@ -7,6 +7,7 @@ import ( "database/sql" "fmt" "net" + "sort" "strconv" "strings" "time" @@ -362,6 +363,83 @@ func (t *TDengineDB) GetTriggers(dbName, tableName string) ([]connection.Trigger return []connection.TriggerDefinition{}, nil } +func (t *TDengineDB) ApplyChanges(tableName string, changes connection.ChangeSet) error { + if t.conn == nil { + return fmt.Errorf("connection not open") + } + if strings.TrimSpace(tableName) == "" { + return fmt.Errorf("table name required") + } + if len(changes.Updates) > 0 || len(changes.Deletes) > 0 { + return fmt.Errorf("TDengine 目标端当前仅支持 INSERT 写入,暂不支持 UPDATE/DELETE 差异同步,请改用仅插入或全量覆盖模式") + } + + qualifiedTable := quoteTDengineTable("", tableName) + for _, row := range changes.Inserts { + query, err := buildTDengineInsertSQL(qualifiedTable, row) + if err != nil { + return err + } + if query == "" { + continue + } + if _, err := t.conn.Exec(query); err != nil { + return fmt.Errorf("insert error: %v; sql=%s", err, query) + } + } + return nil +} + +func buildTDengineInsertSQL(qualifiedTable string, row map[string]interface{}) (string, error) { + if strings.TrimSpace(qualifiedTable) == "" { + return "", fmt.Errorf("qualified table required") + } + if len(row) == 0 { + return "", nil + } + + cols := make([]string, 0, len(row)) + for key := range row { + if strings.TrimSpace(key) == "" { + continue + } + cols = append(cols, key) + } + if len(cols) == 0 { + return "", nil + } + sort.Strings(cols) + + quotedCols := make([]string, 0, len(cols)) + values := make([]string, 0, len(cols)) + for _, col := range cols { + quotedCols = append(quotedCols, fmt.Sprintf("`%s`", escapeBacktickIdent(col))) + values = append(values, tdengineLiteral(row[col])) + } + + return fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)", qualifiedTable, strings.Join(quotedCols, ", "), strings.Join(values, ", ")), nil +} + +func tdengineLiteral(value interface{}) string { + switch val := value.(type) { + case nil: + return "NULL" + case bool: + if val { + return "1" + } + return "0" + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64: + return fmt.Sprintf("%v", val) + case time.Time: + return fmt.Sprintf("'%s'", val.Format("2006-01-02 15:04:05")) + case []byte: + return fmt.Sprintf("'%s'", strings.ReplaceAll(string(val), "'", "''")) + default: + return fmt.Sprintf("'%s'", strings.ReplaceAll(fmt.Sprintf("%v", val), "'", "''")) + } +} + func getValueFromRow(row map[string]interface{}, keys ...string) (interface{}, bool) { if len(row) == 0 { return nil, false diff --git a/internal/sync/analyze.go b/internal/sync/analyze.go index a12a2a0..e1a4af1 100644 --- a/internal/sync/analyze.go +++ b/internal/sync/analyze.go @@ -1,22 +1,27 @@ package sync import ( - "GoNavi-Wails/internal/db" "GoNavi-Wails/internal/logger" "fmt" "strings" ) type TableDiffSummary struct { - Table string `json:"table"` - PKColumn string `json:"pkColumn,omitempty"` - CanSync bool `json:"canSync"` - Inserts int `json:"inserts"` - Updates int `json:"updates"` - Deletes int `json:"deletes"` - Same int `json:"same"` - Message string `json:"message,omitempty"` - HasSchema bool `json:"hasSchema,omitempty"` + Table string `json:"table"` + PKColumn string `json:"pkColumn,omitempty"` + CanSync bool `json:"canSync"` + Inserts int `json:"inserts"` + Updates int `json:"updates"` + Deletes int `json:"deletes"` + Same int `json:"same"` + Message string `json:"message,omitempty"` + HasSchema bool `json:"hasSchema,omitempty"` + TargetTableExists bool `json:"targetTableExists,omitempty"` + PlannedAction string `json:"plannedAction,omitempty"` + Warnings []string `json:"warnings,omitempty"` + UnsupportedObjects []string `json:"unsupportedObjects,omitempty"` + IndexesToCreate int `json:"indexesToCreate,omitempty"` + IndexesSkipped int `json:"indexesSkipped,omitempty"` } type SyncAnalyzeResult struct { @@ -27,6 +32,12 @@ type SyncAnalyzeResult struct { func (s *SyncEngine) Analyze(config SyncConfig) SyncAnalyzeResult { result := SyncAnalyzeResult{Success: true, Tables: []TableDiffSummary{}} + if isRedisToMongoKeyspacePair(config) { + return s.analyzeRedisToMongo(config) + } + if isMongoToRedisKeyspacePair(config) { + return s.analyzeMongoToRedis(config) + } contentRaw := strings.ToLower(strings.TrimSpace(config.Content)) syncSchema := false @@ -48,25 +59,23 @@ func (s *SyncEngine) Analyze(config SyncConfig) SyncAnalyzeResult { totalTables := len(config.Tables) s.progress(config.JobID, 0, totalTables, "", "差异分析开始") - sourceDB, err := db.NewDatabase(config.SourceConfig.Type) + sourceDB, err := newSyncDatabase(config.SourceConfig.Type) if err != nil { logger.Error(err, "初始化源数据库驱动失败:类型=%s", config.SourceConfig.Type) return SyncAnalyzeResult{Success: false, Message: "初始化源数据库驱动失败: " + err.Error()} } - targetDB, err := db.NewDatabase(config.TargetConfig.Type) + targetDB, err := newSyncDatabase(config.TargetConfig.Type) if err != nil { logger.Error(err, "初始化目标数据库驱动失败:类型=%s", config.TargetConfig.Type) return SyncAnalyzeResult{Success: false, Message: "初始化目标数据库驱动失败: " + err.Error()} } - // Connect Source if err := sourceDB.Connect(config.SourceConfig); err != nil { logger.Error(err, "源数据库连接失败:%s", formatConnSummaryForSync(config.SourceConfig)) return SyncAnalyzeResult{Success: false, Message: "源数据库连接失败: " + err.Error()} } defer sourceDB.Close() - // Connect Target if err := targetDB.Connect(config.TargetConfig); err != nil { logger.Error(err, "目标数据库连接失败:%s", formatConnSummaryForSync(config.TargetConfig)) return SyncAnalyzeResult{Success: false, Message: "目标数据库连接失败: " + err.Error()} @@ -88,51 +97,76 @@ func (s *SyncEngine) Analyze(config SyncConfig) SyncAnalyzeResult { HasSchema: syncSchema, } - sourceSchema, sourceTable := normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName) - targetSchema, targetTable := normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName) - sourceQueryTable := qualifiedNameForQuery(config.SourceConfig.Type, sourceSchema, sourceTable, tableName) - targetQueryTable := qualifiedNameForQuery(config.TargetConfig.Type, targetSchema, targetTable, tableName) - - cols, err := sourceDB.GetColumns(sourceSchema, sourceTable) + plan, cols, _, err := buildSchemaMigrationPlan(config, tableName, sourceDB, targetDB) if err != nil { - summary.Message = "获取源表字段失败: " + err.Error() + summary.Message = err.Error() + result.Tables = append(result.Tables, summary) + return + } + summary.TargetTableExists = plan.TargetTableExists + summary.PlannedAction = plan.PlannedAction + summary.Warnings = append(summary.Warnings, plan.Warnings...) + summary.UnsupportedObjects = append(summary.UnsupportedObjects, plan.UnsupportedObjects...) + summary.IndexesToCreate = plan.IndexesToCreate + summary.IndexesSkipped = plan.IndexesSkipped + + if !plan.TargetTableExists && !plan.AutoCreate { + summary.Message = firstNonEmpty(plan.PlannedAction, "目标表不存在,无法执行同步") result.Tables = append(result.Tables, summary) return } if !syncData { summary.CanSync = true - summary.Message = "仅同步结构,未执行数据差异分析" + summary.Message = firstNonEmpty(plan.PlannedAction, "仅同步结构,未执行数据差异分析") result.Tables = append(result.Tables, summary) return } + tableMode := normalizeSyncMode(config.Mode) pkCols := make([]string, 0, 2) for _, c := range cols { if c.Key == "PRI" || c.Key == "PK" { pkCols = append(pkCols, c.Name) } } - if len(pkCols) == 0 { - summary.Message = "无主键,不支持数据对比/同步" - result.Tables = append(result.Tables, summary) - return - } - if len(pkCols) > 1 { - summary.Message = fmt.Sprintf("复合主键(%s),暂不支持数据对比/同步", strings.Join(pkCols, ",")) - result.Tables = append(result.Tables, summary) - return - } - summary.PKColumn = pkCols[0] - // Query data for diff - sourceRows, _, err := sourceDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.SourceConfig.Type, sourceQueryTable))) + sourceRows, _, err := sourceDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.SourceConfig.Type, plan.SourceQueryTable))) if err != nil { summary.Message = "读取源表失败: " + err.Error() result.Tables = append(result.Tables, summary) return } - targetRows, _, err := targetDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.TargetConfig.Type, targetQueryTable))) + + if !plan.TargetTableExists && plan.AutoCreate { + summary.CanSync = true + summary.Inserts = len(sourceRows) + summary.Message = firstNonEmpty(plan.PlannedAction, "目标表不存在,执行时将自动建表并导入全部源数据") + result.Tables = append(result.Tables, summary) + return + } + + if tableMode != "insert_update" { + summary.CanSync = true + summary.Inserts = len(sourceRows) + summary.Message = firstNonEmpty(plan.PlannedAction, "当前模式无需差异对比,将按源表数据执行导入") + result.Tables = append(result.Tables, summary) + return + } + + if len(pkCols) == 0 { + summary.Message = "无主键,不支持差异对比同步;如需直接导入请使用仅插入或全量覆盖模式" + result.Tables = append(result.Tables, summary) + return + } + if len(pkCols) > 1 { + summary.Message = fmt.Sprintf("复合主键(%s),暂不支持差异对比同步", strings.Join(pkCols, ",")) + result.Tables = append(result.Tables, summary) + return + } + summary.PKColumn = pkCols[0] + + targetRows, _, err := targetDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.TargetConfig.Type, plan.TargetQueryTable))) if err != nil { summary.Message = "读取目标表失败: " + err.Error() result.Tables = append(result.Tables, summary) @@ -188,6 +222,9 @@ func (s *SyncEngine) Analyze(config SyncConfig) SyncAnalyzeResult { } summary.CanSync = true + if strings.TrimSpace(summary.Message) == "" { + summary.Message = firstNonEmpty(plan.PlannedAction, "差异分析完成") + } result.Tables = append(result.Tables, summary) }() } @@ -196,3 +233,12 @@ func (s *SyncEngine) Analyze(config SyncConfig) SyncAnalyzeResult { result.Message = fmt.Sprintf("已完成 %d 张表的差异分析", len(result.Tables)) return result } + +func firstNonEmpty(values ...string) string { + for _, value := range values { + if strings.TrimSpace(value) != "" { + return value + } + } + return "" +} diff --git a/internal/sync/migration_clickhouse.go b/internal/sync/migration_clickhouse.go new file mode 100644 index 0000000..d67fcef --- /dev/null +++ b/internal/sync/migration_clickhouse.go @@ -0,0 +1,741 @@ +package sync + +import ( + "GoNavi-Wails/internal/connection" + "GoNavi-Wails/internal/db" + "fmt" + "regexp" + "strings" +) + +func buildMySQLToClickHousePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + plan := SchemaMigrationPlan{} + plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName) + plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName) + plan.SourceQueryTable = qualifiedNameForQuery(config.SourceConfig.Type, plan.SourceSchema, plan.SourceTable, tableName) + plan.TargetQueryTable = qualifiedNameForQuery(config.TargetConfig.Type, plan.TargetSchema, plan.TargetTable, tableName) + plan.PlannedAction = "使用已有目标表导入" + + sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err) + } + if !sourceExists { + return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName) + } + + targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable) + if err != nil { + return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err) + } + plan.TargetTableExists = targetExists + + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + if targetExists { + missing := diffMissingColumnNames(sourceCols, targetCols) + if len(missing) > 0 { + plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", "))) + } + if config.AutoAddColumns { + addSQL, addWarnings := buildMySQLToClickHouseAddColumnSQL(plan.TargetQueryTable, sourceCols, targetCols) + plan.PreDataSQL = append(plan.PreDataSQL, addSQL...) + plan.Warnings = append(plan.Warnings, addWarnings...) + if len(addSQL) > 0 { + plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL)) + } + } + plan.Warnings = append(plan.Warnings, "ClickHouse 目标端建议优先使用仅插入或全量覆盖;更新/删除语义与传统关系型存在差异") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } + + switch strategy { + case "existing_only": + plan.PlannedAction = "目标表不存在,需先手工创建" + plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + case "smart", "auto_create_if_missing": + plan.AutoCreate = true + plan.PlannedAction = "目标表不存在,将自动建表后导入" + createSQL, warnings, unsupported := buildMySQLToClickHouseCreateTableSQL(plan.TargetQueryTable, sourceCols) + plan.CreateTableSQL = createSQL + plan.Warnings = append(plan.Warnings, warnings...) + plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...) + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + default: + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } +} + +func buildPGLikeToClickHousePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + plan := SchemaMigrationPlan{} + sourceType := resolveMigrationDBType(config.SourceConfig) + targetType := resolveMigrationDBType(config.TargetConfig) + plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName) + plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName) + plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName) + plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName) + plan.PlannedAction = "使用已有目标表导入" + + sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err) + } + if !sourceExists { + return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName) + } + + targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable) + if err != nil { + return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err) + } + plan.TargetTableExists = targetExists + + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + if targetExists { + missing := diffMissingColumnNames(sourceCols, targetCols) + if len(missing) > 0 { + plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", "))) + } + if config.AutoAddColumns { + addSQL, addWarnings := buildPGLikeToClickHouseAddColumnSQL(plan.TargetQueryTable, sourceCols, targetCols) + plan.PreDataSQL = append(plan.PreDataSQL, addSQL...) + plan.Warnings = append(plan.Warnings, addWarnings...) + if len(addSQL) > 0 { + plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL)) + } + } + plan.Warnings = append(plan.Warnings, "ClickHouse 目标端建议优先使用仅插入或全量覆盖;更新/删除语义与传统关系型存在差异") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } + + switch strategy { + case "existing_only": + plan.PlannedAction = "目标表不存在,需先手工创建" + plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + case "smart", "auto_create_if_missing": + plan.AutoCreate = true + plan.PlannedAction = "目标表不存在,将自动建表后导入" + createSQL, warnings, unsupported := buildPGLikeToClickHouseCreateTableSQL(plan.TargetQueryTable, sourceCols) + plan.CreateTableSQL = createSQL + plan.Warnings = append(plan.Warnings, warnings...) + plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...) + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + default: + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } +} + +func buildClickHouseToMySQLPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + plan := SchemaMigrationPlan{} + plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName) + plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName) + plan.SourceQueryTable = qualifiedNameForQuery(config.SourceConfig.Type, plan.SourceSchema, plan.SourceTable, tableName) + plan.TargetQueryTable = qualifiedNameForQuery(config.TargetConfig.Type, plan.TargetSchema, plan.TargetTable, tableName) + plan.PlannedAction = "使用已有目标表导入" + + sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err) + } + if !sourceExists { + return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName) + } + + targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable) + if err != nil { + return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err) + } + plan.TargetTableExists = targetExists + + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + if targetExists { + missing := diffMissingColumnNames(sourceCols, targetCols) + if len(missing) > 0 { + plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", "))) + } + if config.AutoAddColumns { + addSQL, addWarnings := buildClickHouseToMySQLAddColumnSQL(plan.TargetQueryTable, sourceCols, targetCols) + plan.PreDataSQL = append(plan.PreDataSQL, addSQL...) + plan.Warnings = append(plan.Warnings, addWarnings...) + if len(addSQL) > 0 { + plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL)) + } + } + plan.Warnings = append(plan.Warnings, "ClickHouse 源端索引/约束元数据有限,反向迁移将以字段和数据为主") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } + + switch strategy { + case "existing_only": + plan.PlannedAction = "目标表不存在,需先手工创建" + plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + case "smart", "auto_create_if_missing": + plan.AutoCreate = true + plan.PlannedAction = "目标表不存在,将自动建表后导入" + createSQL, warnings := buildClickHouseToMySQLCreateTableSQL(plan.TargetQueryTable, sourceCols) + plan.CreateTableSQL = createSQL + plan.Warnings = append(plan.Warnings, warnings...) + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + default: + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } +} + +func buildClickHouseToPGLikePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + plan := SchemaMigrationPlan{} + sourceType := resolveMigrationDBType(config.SourceConfig) + targetType := resolveMigrationDBType(config.TargetConfig) + plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName) + plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName) + plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName) + plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName) + plan.PlannedAction = "使用已有目标表导入" + + sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err) + } + if !sourceExists { + return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName) + } + + targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable) + if err != nil { + return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err) + } + plan.TargetTableExists = targetExists + + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + if targetExists { + missing := diffMissingColumnNames(sourceCols, targetCols) + if len(missing) > 0 { + plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", "))) + } + if config.AutoAddColumns { + addSQL, addWarnings := buildClickHouseToPGLikeAddColumnSQL(targetType, plan.TargetQueryTable, sourceCols, targetCols) + plan.PreDataSQL = append(plan.PreDataSQL, addSQL...) + plan.Warnings = append(plan.Warnings, addWarnings...) + if len(addSQL) > 0 { + plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL)) + } + } + plan.Warnings = append(plan.Warnings, "ClickHouse 源端索引/约束元数据有限,反向迁移将以字段和数据为主") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } + + switch strategy { + case "existing_only": + plan.PlannedAction = "目标表不存在,需先手工创建" + plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + case "smart", "auto_create_if_missing": + plan.AutoCreate = true + plan.PlannedAction = "目标表不存在,将自动建表后导入" + createSQL, warnings, unsupported := buildClickHouseToPGLikeCreateTableSQL(targetType, plan.TargetQueryTable, sourceCols) + plan.CreateTableSQL = createSQL + plan.Warnings = append(plan.Warnings, warnings...) + plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...) + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + default: + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } +} + +func buildPGLikeToClickHouseAddColumnSQL(targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) { + targetSet := make(map[string]struct{}, len(targetCols)) + for _, col := range targetCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + targetSet[key] = struct{}{} + } + var sqlList []string + var warnings []string + for _, col := range sourceCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + if _, ok := targetSet[key]; ok { + continue + } + colType, mapWarnings := mapPGLikeColumnToClickHouse(col) + warnings = append(warnings, mapWarnings...) + sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s", + quoteQualifiedIdentByType("clickhouse", targetQueryTable), + quoteIdentByType("clickhouse", col.Name), + colType, + )) + } + return sqlList, dedupeStrings(warnings) +} + +func buildMySQLToClickHouseAddColumnSQL(targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) { + targetSet := make(map[string]struct{}, len(targetCols)) + for _, col := range targetCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + targetSet[key] = struct{}{} + } + var sqlList []string + var warnings []string + for _, col := range sourceCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + if _, ok := targetSet[key]; ok { + continue + } + colType, mapWarnings := mapMySQLColumnToClickHouse(col) + warnings = append(warnings, mapWarnings...) + sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s", + quoteQualifiedIdentByType("clickhouse", targetQueryTable), + quoteIdentByType("clickhouse", col.Name), + colType, + )) + } + return sqlList, dedupeStrings(warnings) +} + +func buildClickHouseToPGLikeAddColumnSQL(targetType string, targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) { + targetSet := make(map[string]struct{}, len(targetCols)) + for _, col := range targetCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + targetSet[key] = struct{}{} + } + var sqlList []string + var warnings []string + for _, col := range sourceCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + if _, ok := targetSet[key]; ok { + continue + } + colType, mapWarnings := mapClickHouseColumnToPGLike(col) + warnings = append(warnings, mapWarnings...) + sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL", + quoteQualifiedIdentByType(targetType, targetQueryTable), + quoteIdentByType(targetType, col.Name), + colType, + )) + } + return sqlList, dedupeStrings(warnings) +} + +func buildClickHouseToMySQLAddColumnSQL(targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) { + targetSet := make(map[string]struct{}, len(targetCols)) + for _, col := range targetCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + targetSet[key] = struct{}{} + } + var sqlList []string + var warnings []string + for _, col := range sourceCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + if _, ok := targetSet[key]; ok { + continue + } + colType, mapWarnings := mapClickHouseColumnToMySQL(col) + warnings = append(warnings, mapWarnings...) + sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL", + quoteQualifiedIdentByType("mysql", targetQueryTable), + quoteIdentByType("mysql", col.Name), + colType, + )) + } + return sqlList, dedupeStrings(warnings) +} + +func buildPGLikeToClickHouseCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition) (string, []string, []string) { + columnDefs := make([]string, 0, len(sourceCols)) + warnings := make([]string, 0) + unsupported := make([]string, 0) + orderByCols := make([]string, 0) + for _, col := range sourceCols { + def, colWarnings := buildPGLikeToClickHouseColumnDefinition(col) + warnings = append(warnings, colWarnings...) + columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("clickhouse", col.Name), def)) + if col.Key == "PRI" || col.Key == "PK" { + orderByCols = append(orderByCols, quoteIdentByType("clickhouse", col.Name)) + } + } + orderExpr := "tuple()" + if len(orderByCols) > 0 { + orderExpr = "(" + strings.Join(orderByCols, ", ") + ")" + } else { + warnings = append(warnings, "源表未识别到主键,ClickHouse 将使用 ORDER BY tuple() 建表,后续查询性能可能受影响") + } + warnings = append(warnings, "ClickHouse 不保留关系型外键/唯一约束语义,将仅迁移字段与数据") + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n) ENGINE = MergeTree() ORDER BY %s", quoteQualifiedIdentByType("clickhouse", targetQueryTable), strings.Join(columnDefs, ",\n "), orderExpr) + return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported) +} + +func buildMySQLToClickHouseCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition) (string, []string, []string) { + columnDefs := make([]string, 0, len(sourceCols)) + warnings := make([]string, 0) + unsupported := make([]string, 0) + orderByCols := make([]string, 0) + for _, col := range sourceCols { + def, colWarnings := buildMySQLToClickHouseColumnDefinition(col) + warnings = append(warnings, colWarnings...) + columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("clickhouse", col.Name), def)) + if col.Key == "PRI" || col.Key == "PK" { + orderByCols = append(orderByCols, quoteIdentByType("clickhouse", col.Name)) + } + } + orderExpr := "tuple()" + if len(orderByCols) > 0 { + orderExpr = "(" + strings.Join(orderByCols, ", ") + ")" + } else { + warnings = append(warnings, "源表未识别到主键,ClickHouse 将使用 ORDER BY tuple() 建表,后续查询性能可能受影响") + } + warnings = append(warnings, "ClickHouse 不保留关系型外键/唯一约束语义,将仅迁移字段与数据") + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n) ENGINE = MergeTree() ORDER BY %s", quoteQualifiedIdentByType("clickhouse", targetQueryTable), strings.Join(columnDefs, ",\n "), orderExpr) + return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported) +} + +func buildClickHouseToPGLikeCreateTableSQL(targetType string, targetQueryTable string, sourceCols []connection.ColumnDefinition) (string, []string, []string) { + columnDefs := make([]string, 0, len(sourceCols)+1) + warnings := make([]string, 0) + unsupported := []string{"ClickHouse ORDER BY/PARTITION/TTL/Projection/物化视图 语义当前不会自动迁移到 PG-like"} + pkCols := make([]string, 0) + for _, col := range sourceCols { + def, colWarnings := buildClickHouseToPGLikeColumnDefinition(col) + warnings = append(warnings, colWarnings...) + columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType(targetType, col.Name), def)) + if col.Key == "PRI" || col.Key == "PK" { + pkCols = append(pkCols, quoteIdentByType(targetType, col.Name)) + } + } + if len(pkCols) > 0 { + columnDefs = append(columnDefs, fmt.Sprintf("PRIMARY KEY (%s)", strings.Join(pkCols, ", "))) + } else { + warnings = append(warnings, "ClickHouse 源端未返回主键信息,目标 PG-like 表将不自动创建主键") + } + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType(targetType, targetQueryTable), strings.Join(columnDefs, ",\n ")) + return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported) +} + +func buildClickHouseToMySQLCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition) (string, []string) { + columnDefs := make([]string, 0, len(sourceCols)+1) + warnings := make([]string, 0) + pkCols := make([]string, 0) + for _, col := range sourceCols { + def, colWarnings := buildClickHouseToMySQLColumnDefinition(col) + warnings = append(warnings, colWarnings...) + columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("mysql", col.Name), def)) + if col.Key == "PRI" || col.Key == "PK" { + pkCols = append(pkCols, quoteIdentByType("mysql", col.Name)) + } + } + if len(pkCols) > 0 { + columnDefs = append(columnDefs, fmt.Sprintf("PRIMARY KEY (%s)", strings.Join(pkCols, ", "))) + } else { + warnings = append(warnings, "ClickHouse 源端未返回主键信息,目标 MySQL 表将不自动创建主键") + } + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("mysql", targetQueryTable), strings.Join(columnDefs, ",\n ")) + return createSQL, dedupeStrings(warnings) +} + +func buildPGLikeToClickHouseColumnDefinition(col connection.ColumnDefinition) (string, []string) { + targetType, warnings := mapPGLikeColumnToClickHouse(col) + parts := []string{targetType} + return strings.Join(parts, " "), dedupeStrings(warnings) +} + +func buildMySQLToClickHouseColumnDefinition(col connection.ColumnDefinition) (string, []string) { + targetType, warnings := mapMySQLColumnToClickHouse(col) + parts := []string{targetType} + if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") && !strings.HasPrefix(strings.ToLower(targetType), "nullable(") { + return strings.Join(parts, " "), dedupeStrings(warnings) + } + return strings.Join(parts, " "), dedupeStrings(warnings) +} + +func buildClickHouseToPGLikeColumnDefinition(col connection.ColumnDefinition) (string, []string) { + targetType, warnings := mapClickHouseColumnToPGLike(col) + parts := []string{targetType} + if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") { + parts = append(parts, "NOT NULL") + } + return strings.Join(parts, " "), dedupeStrings(warnings) +} + +func buildClickHouseToMySQLColumnDefinition(col connection.ColumnDefinition) (string, []string) { + targetType, warnings := mapClickHouseColumnToMySQL(col) + parts := []string{targetType} + if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") { + parts = append(parts, "NOT NULL") + } + return strings.Join(parts, " "), dedupeStrings(warnings) +} + +func mapPGLikeColumnToClickHouse(col connection.ColumnDefinition) (string, []string) { + raw := strings.ToLower(strings.TrimSpace(col.Type)) + warnings := make([]string, 0) + if raw == "" { + return "String", []string{fmt.Sprintf("字段 %s 类型为空,已降级为 String", col.Name)} + } + baseType := "String" + switch { + case raw == "boolean" || strings.HasPrefix(raw, "bool"): + baseType = "UInt8" + case raw == "smallint": + baseType = "Int16" + case raw == "integer" || raw == "int4": + baseType = "Int32" + case raw == "bigint" || raw == "int8": + baseType = "Int64" + case strings.HasPrefix(raw, "numeric"), strings.HasPrefix(raw, "decimal"): + baseType = replaceTypeBase(raw, []string{"numeric", "decimal"}, "Decimal") + case raw == "real" || raw == "float4": + baseType = "Float32" + case raw == "double precision" || raw == "float8": + baseType = "Float64" + case raw == "date": + baseType = "Date" + case strings.HasPrefix(raw, "timestamp") || strings.Contains(raw, "without time zone") || strings.Contains(raw, "with time zone"): + baseType = "DateTime" + case strings.HasPrefix(raw, "time"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 String", col.Name, col.Type)) + baseType = "String" + case strings.HasPrefix(raw, "character varying"), strings.HasPrefix(raw, "varchar("), strings.HasPrefix(raw, "character("), strings.HasPrefix(raw, "char("), raw == "character", raw == "text", raw == "uuid": + baseType = "String" + case raw == "json" || raw == "jsonb" || raw == "bytea": + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 String", col.Name, col.Type)) + baseType = "String" + case strings.HasSuffix(raw, "[]") || strings.HasPrefix(raw, "array"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 String", col.Name, col.Type)) + baseType = "String" + case raw == "user-defined": + warnings = append(warnings, fmt.Sprintf("字段 %s 为用户自定义类型,已降级为 String", col.Name)) + baseType = "String" + default: + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门映射,已降级为 String", col.Name, col.Type)) + baseType = "String" + } + if strings.EqualFold(strings.TrimSpace(col.Nullable), "YES") && !strings.HasPrefix(strings.ToLower(baseType), "nullable(") { + baseType = fmt.Sprintf("Nullable(%s)", baseType) + } + if strings.Contains(strings.ToLower(strings.TrimSpace(col.Extra)), "identity") || strings.Contains(strings.ToLower(strings.TrimSpace(col.Extra)), "auto_increment") { + warnings = append(warnings, fmt.Sprintf("字段 %s 的 identity/自增语义在 ClickHouse 中不保留", col.Name)) + } + return baseType, dedupeStrings(warnings) +} + +func mapMySQLColumnToClickHouse(col connection.ColumnDefinition) (string, []string) { + raw := strings.ToLower(strings.TrimSpace(col.Type)) + warnings := make([]string, 0) + if raw == "" { + return "String", []string{fmt.Sprintf("字段 %s 类型为空,已降级为 String", col.Name)} + } + unsigned := strings.Contains(raw, "unsigned") + clean := strings.ReplaceAll(raw, " unsigned", "") + clean = strings.ReplaceAll(clean, " zerofill", "") + baseType := "String" + switch { + case strings.HasPrefix(clean, "tinyint(1)"): + baseType = "UInt8" + case strings.HasPrefix(clean, "tinyint"): + if unsigned { + baseType = "UInt8" + } else { + baseType = "Int8" + } + case strings.HasPrefix(clean, "smallint"): + if unsigned { + baseType = "UInt16" + } else { + baseType = "Int16" + } + case strings.HasPrefix(clean, "mediumint"), strings.HasPrefix(clean, "int"), strings.HasPrefix(clean, "integer"): + if unsigned { + baseType = "UInt32" + } else { + baseType = "Int32" + } + case strings.HasPrefix(clean, "bigint"): + if unsigned { + baseType = "UInt64" + } else { + baseType = "Int64" + } + case strings.HasPrefix(clean, "decimal"), strings.HasPrefix(clean, "numeric"): + baseType = replaceTypeBase(strings.Title(clean), []string{"Decimal", "Numeric"}, "Decimal") + case strings.HasPrefix(clean, "float"): + baseType = "Float32" + case strings.HasPrefix(clean, "double"): + baseType = "Float64" + case strings.HasPrefix(clean, "date"): + baseType = "Date" + case strings.HasPrefix(clean, "datetime"), strings.HasPrefix(clean, "timestamp"): + baseType = "DateTime" + case strings.HasPrefix(clean, "time"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 time 已降级为 String", col.Name)) + baseType = "String" + case strings.HasPrefix(clean, "json"), strings.HasPrefix(clean, "enum"), strings.HasPrefix(clean, "set"), strings.HasPrefix(clean, "char"), strings.HasPrefix(clean, "varchar"), strings.Contains(clean, "text"): + baseType = "String" + case strings.Contains(clean, "blob"), strings.Contains(clean, "binary"): + warnings = append(warnings, fmt.Sprintf("字段 %s 二进制类型已降级为 String", col.Name)) + baseType = "String" + default: + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门映射,已降级为 String", col.Name, col.Type)) + baseType = "String" + } + if strings.EqualFold(strings.TrimSpace(col.Nullable), "YES") && !strings.HasPrefix(strings.ToLower(baseType), "nullable(") { + baseType = fmt.Sprintf("Nullable(%s)", baseType) + } + if strings.Contains(strings.ToLower(strings.TrimSpace(col.Extra)), "auto_increment") { + warnings = append(warnings, fmt.Sprintf("字段 %s 的 AUTO_INCREMENT 在 ClickHouse 中不保留自增语义", col.Name)) + } + return baseType, dedupeStrings(warnings) +} + +var clickHouseDecimalPattern = regexp.MustCompile(`^(decimal|numeric)\((\d+)\s*,\s*(\d+)\)$`) +var clickHouseStringArgsPattern = regexp.MustCompile(`^fixedstring\((\d+)\)$`) + +func mapClickHouseColumnToPGLike(col connection.ColumnDefinition) (string, []string) { + raw := strings.TrimSpace(col.Type) + lower := strings.ToLower(raw) + warnings := make([]string, 0) + if strings.HasPrefix(lower, "nullable(") && strings.HasSuffix(lower, ")") { + raw = strings.TrimSpace(raw[len("Nullable(") : len(raw)-1]) + lower = strings.ToLower(raw) + } + for { + if strings.HasPrefix(lower, "lowcardinality(") && strings.HasSuffix(lower, ")") { + raw = strings.TrimSpace(raw[len("LowCardinality(") : len(raw)-1]) + lower = strings.ToLower(raw) + continue + } + break + } + switch { + case lower == "bool" || lower == "boolean": + return "boolean", warnings + case lower == "int8": + return "smallint", warnings + case lower == "uint8": + return "smallint", warnings + case lower == "int16": + return "smallint", warnings + case lower == "uint16": + return "integer", warnings + case lower == "int32": + return "integer", warnings + case lower == "uint32": + return "bigint", warnings + case lower == "int64": + return "bigint", warnings + case lower == "uint64": + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已映射为 numeric(20,0) 以避免无符号溢出", col.Name, col.Type)) + return "numeric(20,0)", warnings + case lower == "float32": + return "real", warnings + case lower == "float64": + return "double precision", warnings + case lower == "date": + return "date", warnings + case strings.HasPrefix(lower, "datetime"): + return "timestamp", warnings + case lower == "string": + return "text", warnings + case lower == "uuid": + return "uuid", warnings + case lower == "json", strings.HasPrefix(lower, "map("), strings.HasPrefix(lower, "array("), strings.HasPrefix(lower, "tuple("), strings.HasPrefix(lower, "nested("): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 jsonb", col.Name, col.Type)) + return "jsonb", warnings + case strings.HasPrefix(lower, "enum8("), strings.HasPrefix(lower, "enum16("): + warnings = append(warnings, fmt.Sprintf("字段 %s 枚举类型 %s 已降级为 varchar(255)", col.Name, col.Type)) + return "varchar(255)", warnings + case clickHouseDecimalPattern.MatchString(lower): + parts := clickHouseDecimalPattern.FindStringSubmatch(lower) + return fmt.Sprintf("numeric(%s,%s)", parts[2], parts[3]), warnings + case clickHouseStringArgsPattern.MatchString(lower): + parts := clickHouseStringArgsPattern.FindStringSubmatch(lower) + return fmt.Sprintf("varchar(%s)", parts[1]), warnings + default: + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门 PG-like 映射,已降级为 text", col.Name, col.Type)) + return "text", warnings + } +} + +func mapClickHouseColumnToMySQL(col connection.ColumnDefinition) (string, []string) { + raw := strings.TrimSpace(col.Type) + lower := strings.ToLower(raw) + warnings := make([]string, 0) + nullable := false + if strings.HasPrefix(lower, "nullable(") && strings.HasSuffix(lower, ")") { + nullable = true + raw = strings.TrimSpace(raw[len("Nullable(") : len(raw)-1]) + lower = strings.ToLower(raw) + } + for { + if strings.HasPrefix(lower, "lowcardinality(") && strings.HasSuffix(lower, ")") { + raw = strings.TrimSpace(raw[len("LowCardinality(") : len(raw)-1]) + lower = strings.ToLower(raw) + continue + } + break + } + _ = nullable + switch { + case lower == "bool" || lower == "boolean" || lower == "uint8": + return "tinyint(1)", warnings + case lower == "int8": + return "tinyint", warnings + case lower == "uint16": + return "smallint unsigned", warnings + case lower == "int16": + return "smallint", warnings + case lower == "uint32": + return "int unsigned", warnings + case lower == "int32": + return "int", warnings + case lower == "uint64": + return "bigint unsigned", warnings + case lower == "int64": + return "bigint", warnings + case lower == "float32": + return "float", warnings + case lower == "float64": + return "double", warnings + case lower == "date": + return "date", warnings + case strings.HasPrefix(lower, "datetime"): + return "datetime", warnings + case lower == "string": + return "text", warnings + case lower == "uuid": + return "char(36)", warnings + case lower == "json", strings.HasPrefix(lower, "map("), strings.HasPrefix(lower, "array("), strings.HasPrefix(lower, "tuple("), strings.HasPrefix(lower, "nested("): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 json", col.Name, col.Type)) + return "json", warnings + case clickHouseDecimalPattern.MatchString(lower): + parts := clickHouseDecimalPattern.FindStringSubmatch(lower) + return fmt.Sprintf("decimal(%s,%s)", parts[2], parts[3]), warnings + case clickHouseStringArgsPattern.MatchString(lower): + parts := clickHouseStringArgsPattern.FindStringSubmatch(lower) + return fmt.Sprintf("varchar(%s)", parts[1]), warnings + default: + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门映射,已降级为 text", col.Name, col.Type)) + return "text", warnings + } +} diff --git a/internal/sync/migration_kernel_router.go b/internal/sync/migration_kernel_router.go new file mode 100644 index 0000000..aa88df2 --- /dev/null +++ b/internal/sync/migration_kernel_router.go @@ -0,0 +1,379 @@ +package sync + +import ( + "GoNavi-Wails/internal/connection" + "GoNavi-Wails/internal/db" + "fmt" + "strings" +) + +type genericLegacyPlanner struct{} + +type mysqlToPGLikePlanner struct{} + +type mysqlToClickHousePlanner struct{} + +type pgLikeToClickHousePlanner struct{} + +type clickHouseToMySQLPlanner struct{} + +type clickHouseToPGLikePlanner struct{} + +type mysqlToMongoPlanner struct{} + +type pgLikeToMongoPlanner struct{} + +type clickHouseToMongoPlanner struct{} + +type tdengineToMongoPlanner struct{} + +type mongoToMySQLPlanner struct{} + +type mongoToPGLikePlanner struct{} + +type pgLikeToMySQLPlanner struct{} + +type tdengineToMySQLPlanner struct{} + +type tdengineToPGLikePlanner struct{} + +type mongoToRelationalPlanner struct{} + +func buildSchemaMigrationPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + ctx := MigrationBuildContext{ + Config: config, + TableName: tableName, + SourceDB: sourceDB, + TargetDB: targetDB, + } + planner := resolveMigrationPlanner(ctx) + if planner == nil { + return buildSchemaMigrationPlanLegacy(config, tableName, sourceDB, targetDB) + } + return planner.BuildPlan(ctx) +} + +func resolveMigrationPlanner(ctx MigrationBuildContext) MigrationPlanner { + planners := []MigrationPlanner{ + mysqlToPGLikePlanner{}, + mySQLLikeToTDenginePlanner{}, + pgLikeToTDenginePlanner{}, + clickHouseToTDenginePlanner{}, + tdengineToTDenginePlanner{}, + tdengineToPGLikePlanner{}, + tdengineToMySQLPlanner{}, + mysqlToClickHousePlanner{}, + pgLikeToClickHousePlanner{}, + clickHouseToMySQLPlanner{}, + clickHouseToPGLikePlanner{}, + mysqlToMongoPlanner{}, + pgLikeToMongoPlanner{}, + clickHouseToMongoPlanner{}, + tdengineToMongoPlanner{}, + mongoToMySQLPlanner{}, + mongoToPGLikePlanner{}, + pgLikeToMySQLPlanner{}, + mongoToRelationalPlanner{}, + genericLegacyPlanner{}, + } + bestLevel := MigrationSupportLevelUnsupported + var bestPlanner MigrationPlanner + for _, planner := range planners { + level := planner.SupportLevel(ctx) + if migrationSupportRank(level) > migrationSupportRank(bestLevel) { + bestLevel = level + bestPlanner = planner + } + } + return bestPlanner +} + +func migrationSupportRank(level MigrationSupportLevel) int { + switch level { + case MigrationSupportLevelFull: + return 4 + case MigrationSupportLevelPlanned: + return 3 + case MigrationSupportLevelPartial: + return 2 + default: + return 1 + } +} + +func isMySQLLikeType(dbType string) bool { + return isMySQLLikeWritableTargetType(dbType) +} + +func classifyMigrationDataModel(dbType string) MigrationDataModel { + switch normalizeMigrationDBType(dbType) { + case "mysql", "mariadb", "postgres", "kingbase", "highgo", "vastbase", "oracle", "sqlserver", "dameng", "sqlite", "duckdb": + return MigrationDataModelRelational + case "mongodb": + return MigrationDataModelDocument + case "clickhouse", "diros", "sphinx": + return MigrationDataModelColumnar + case "tdengine": + return MigrationDataModelTimeSeries + case "redis": + return MigrationDataModelKeyValue + default: + return MigrationDataModelCustom + } +} + +func (genericLegacyPlanner) Name() string { return "generic-legacy-planner" } + +func (genericLegacyPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + _ = ctx + return MigrationSupportLevelPartial +} + +func (genericLegacyPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildSchemaMigrationPlanLegacy(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (mysqlToPGLikePlanner) Name() string { return "mysql-pglike-planner" } + +func (mysqlToPGLikePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if isMySQLLikeSourceType(sourceType) && isPGLikeTarget(targetType) { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (mysqlToPGLikePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildMySQLToPGLikePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (tdengineToMySQLPlanner) Name() string { return "tdengine-mysql-planner" } + +func (tdengineToMySQLPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if sourceType == "tdengine" && isMySQLLikeWritableTargetType(targetType) { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (tdengineToMySQLPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildTDengineToMySQLPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (tdengineToPGLikePlanner) Name() string { return "tdengine-pglike-planner" } + +func (tdengineToPGLikePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if sourceType == "tdengine" && isPGLikeTarget(targetType) { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (tdengineToPGLikePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildTDengineToPGLikePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (mysqlToClickHousePlanner) Name() string { return "mysql-clickhouse-planner" } + +func (mysqlToClickHousePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if isMySQLCoreType(sourceType) && targetType == "clickhouse" { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (mysqlToClickHousePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildMySQLToClickHousePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (pgLikeToClickHousePlanner) Name() string { return "pglike-clickhouse-planner" } + +func (pgLikeToClickHousePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if isPGLikeSource(sourceType) && targetType == "clickhouse" { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (pgLikeToClickHousePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildPGLikeToClickHousePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (clickHouseToMySQLPlanner) Name() string { return "clickhouse-mysql-planner" } + +func (clickHouseToMySQLPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if sourceType == "clickhouse" && isMySQLLikeWritableTargetType(targetType) { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (clickHouseToMySQLPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildClickHouseToMySQLPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (clickHouseToPGLikePlanner) Name() string { return "clickhouse-pglike-planner" } + +func (clickHouseToPGLikePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if sourceType == "clickhouse" && isPGLikeTarget(targetType) { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (clickHouseToPGLikePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildClickHouseToPGLikePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (mysqlToMongoPlanner) Name() string { return "mysql-mongo-planner" } + +func (mysqlToMongoPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if isMySQLCoreType(sourceType) && targetType == "mongodb" { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (mysqlToMongoPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildMySQLToMongoPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (pgLikeToMongoPlanner) Name() string { return "pglike-mongo-planner" } + +func (pgLikeToMongoPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if isPGLikeSource(sourceType) && targetType == "mongodb" { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (pgLikeToMongoPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildPGLikeToMongoPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (clickHouseToMongoPlanner) Name() string { return "clickhouse-mongo-planner" } + +func (clickHouseToMongoPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if sourceType == "clickhouse" && targetType == "mongodb" { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (clickHouseToMongoPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildClickHouseToMongoPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (tdengineToMongoPlanner) Name() string { return "tdengine-mongo-planner" } + +func (tdengineToMongoPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if sourceType == "tdengine" && targetType == "mongodb" { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (tdengineToMongoPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildTDengineToMongoPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (mongoToMySQLPlanner) Name() string { return "mongo-mysql-planner" } + +func (mongoToMySQLPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if sourceType == "mongodb" && isMySQLLikeWritableTargetType(targetType) { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (mongoToMySQLPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildMongoToMySQLPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (mongoToPGLikePlanner) Name() string { return "mongo-pglike-planner" } + +func (mongoToPGLikePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if sourceType == "mongodb" && isPGLikeTarget(targetType) { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (mongoToPGLikePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildMongoToPGLikePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (pgLikeToMySQLPlanner) Name() string { return "pglike-mysql-planner" } + +func (pgLikeToMySQLPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if isPGLikeSource(sourceType) && isMySQLLikeWritableTargetType(targetType) { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (pgLikeToMySQLPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildPGLikeToMySQLPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (mongoToRelationalPlanner) Name() string { return "mongo-relational-inference-planner" } + +func (mongoToRelationalPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if !shouldUseSchemaInference(sourceType, targetType) { + return MigrationSupportLevelUnsupported + } + return MigrationSupportLevelPlanned +} + +func (mongoToRelationalPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + inference, err := inferSchemaForPair(sourceType, targetType, ctx.TableName) + if err != nil { + return SchemaMigrationPlan{}, nil, nil, err + } + plan := SchemaMigrationPlan{} + plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, ctx.Config.SourceConfig.Database, ctx.TableName) + plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, ctx.Config.TargetConfig.Database, ctx.TableName) + plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, ctx.TableName) + plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, ctx.TableName) + plan.PlannedAction = "当前库对已进入迁移内核规划阶段,等待 schema 推断与目标方言生成器落地" + for _, issue := range inference.Issues { + msg := strings.TrimSpace(issue.Message) + if msg == "" { + continue + } + plan.Warnings = append(plan.Warnings, msg) + } + plan.Warnings = append(plan.Warnings, fmt.Sprintf("迁移对象=%s,目标类型=%s,当前仅提供规划入口,暂不执行自动建表", inference.Object.Kind, targetType)) + return dedupeSchemaMigrationPlan(plan), nil, nil, nil +} diff --git a/internal/sync/migration_kernel_router_test.go b/internal/sync/migration_kernel_router_test.go new file mode 100644 index 0000000..71e84e7 --- /dev/null +++ b/internal/sync/migration_kernel_router_test.go @@ -0,0 +1,447 @@ +package sync + +import ( + "GoNavi-Wails/internal/connection" + "strings" + "testing" +) + +func TestClassifyMigrationDataModel(t *testing.T) { + t.Parallel() + + cases := map[string]MigrationDataModel{ + "mysql": MigrationDataModelRelational, + "postgres": MigrationDataModelRelational, + "kingbase": MigrationDataModelRelational, + "mongodb": MigrationDataModelDocument, + "clickhouse": MigrationDataModelColumnar, + "tdengine": MigrationDataModelTimeSeries, + "redis": MigrationDataModelKeyValue, + "custom": MigrationDataModelCustom, + } + + for input, want := range cases { + input, want := input, want + t.Run(input, func(t *testing.T) { + t.Parallel() + got := classifyMigrationDataModel(input) + if got != want { + t.Fatalf("unexpected data model, input=%s got=%s want=%s", input, got, want) + } + }) + } +} + +func TestResolveMigrationPlanner_PrefersMySQLKingbasePlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mysql"}, + TargetConfig: connection.ConnectionConfig{Type: "kingbase"}, + }, + }) + if planner == nil { + t.Fatalf("expected planner") + } + if planner.Name() != "mysql-pglike-planner" { + t.Fatalf("unexpected planner: %s", planner.Name()) + } +} + +func TestResolveMigrationPlanner_UsesSchemaInferencePlannerForMongoToMySQL(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mongodb"}, + TargetConfig: connection.ConnectionConfig{Type: "mysql"}, + }, + }) + if planner == nil { + t.Fatalf("expected planner") + } + if planner.Name() != "mongo-mysql-planner" { + t.Fatalf("unexpected planner: %s", planner.Name()) + } +} + +func TestInferSchemaForPair_MongoToMySQLReturnsPlannedWarning(t *testing.T) { + t.Parallel() + + result, err := inferSchemaForPair("mongodb", "mysql", "users") + if err != nil { + t.Fatalf("inferSchemaForPair returned error: %v", err) + } + if !result.NeedsReview { + t.Fatalf("expected needs review") + } + if result.Object.Name != "users" { + t.Fatalf("unexpected object name: %s", result.Object.Name) + } + if len(result.Issues) == 0 || !strings.Contains(result.Issues[0].Message, "schema 推断") { + t.Fatalf("unexpected issues: %+v", result.Issues) + } +} + +func TestResolveMigrationPlanner_UsesPGLikeMySQLPlannerForKingbaseToMySQL(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "kingbase"}, + TargetConfig: connection.ConnectionConfig{Type: "mysql"}, + }, + }) + if planner == nil { + t.Fatalf("expected planner") + } + if planner.Name() != "pglike-mysql-planner" { + t.Fatalf("unexpected planner: %s", planner.Name()) + } +} + +func TestResolveMigrationPlanner_UsesMySQLPGLikePlannerForMySQLToPostgres(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mysql"}, + TargetConfig: connection.ConnectionConfig{Type: "postgres"}, + }, + }) + if planner == nil { + t.Fatalf("expected planner") + } + if planner.Name() != "mysql-pglike-planner" { + t.Fatalf("unexpected planner: %s", planner.Name()) + } +} + +func TestResolveMigrationPlanner_UsesMySQLClickHousePlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mysql"}, + TargetConfig: connection.ConnectionConfig{Type: "clickhouse"}, + }, + }) + if planner == nil { + t.Fatalf("expected planner") + } + if planner.Name() != "mysql-clickhouse-planner" { + t.Fatalf("unexpected planner: %s", planner.Name()) + } +} + +func TestResolveMigrationPlanner_UsesClickHouseMySQLPlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "clickhouse"}, + TargetConfig: connection.ConnectionConfig{Type: "mysql"}, + }, + }) + if planner == nil { + t.Fatalf("expected planner") + } + if planner.Name() != "clickhouse-mysql-planner" { + t.Fatalf("unexpected planner: %s", planner.Name()) + } +} + +func TestResolveMigrationPlanner_UsesMySQLMongoPlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mysql"}, + TargetConfig: connection.ConnectionConfig{Type: "mongodb"}, + }, + }) + if planner == nil || planner.Name() != "mysql-mongo-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesMongoMySQLPlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mongodb"}, + TargetConfig: connection.ConnectionConfig{Type: "mysql"}, + }, + }) + if planner == nil || planner.Name() != "mongo-mysql-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesMongoPGLikePlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mongodb"}, + TargetConfig: connection.ConnectionConfig{Type: "postgres"}, + }, + }) + if planner == nil || planner.Name() != "mongo-pglike-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesPGLikeMongoPlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "postgres"}, + TargetConfig: connection.ConnectionConfig{Type: "mongodb"}, + }, + }) + if planner == nil || planner.Name() != "pglike-mongo-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesClickHouseMongoPlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "clickhouse"}, + TargetConfig: connection.ConnectionConfig{Type: "mongodb"}, + }, + }) + if planner == nil || planner.Name() != "clickhouse-mongo-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesTDengineMongoPlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "tdengine"}, + TargetConfig: connection.ConnectionConfig{Type: "mongodb"}, + }, + }) + if planner == nil || planner.Name() != "tdengine-mongo-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesMySQLPGLikePlannerForDirosToPostgres(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "diros"}, + TargetConfig: connection.ConnectionConfig{Type: "postgres"}, + }, + }) + if planner == nil || planner.Name() != "mysql-pglike-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesPGLikeMySQLPlannerForPostgresToDiros(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "postgres"}, + TargetConfig: connection.ConnectionConfig{Type: "diros"}, + }, + }) + if planner == nil || planner.Name() != "pglike-mysql-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesMySQLPGLikePlannerForMySQLToDuckDB(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mysql"}, + TargetConfig: connection.ConnectionConfig{Type: "duckdb"}, + }, + }) + if planner == nil || planner.Name() != "mysql-pglike-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesPGLikeClickHousePlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "postgres"}, + TargetConfig: connection.ConnectionConfig{Type: "clickhouse"}, + }, + }) + if planner == nil || planner.Name() != "pglike-clickhouse-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesPGLikeMySQLPlannerForDuckDBToMySQL(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "duckdb"}, + TargetConfig: connection.ConnectionConfig{Type: "mysql"}, + }, + }) + if planner == nil || planner.Name() != "pglike-mysql-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesMySQLPGLikePlannerForSphinxToPostgres(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "sphinx"}, + TargetConfig: connection.ConnectionConfig{Type: "postgres"}, + }, + }) + if planner == nil || planner.Name() != "mysql-pglike-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesPGLikeMySQLPlannerForCustomKingbaseToMySQL(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "custom", Driver: "kingbase8"}, + TargetConfig: connection.ConnectionConfig{Type: "mysql"}, + }, + }) + if planner == nil || planner.Name() != "pglike-mysql-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesMySQLPGLikePlannerForMySQLToCustomPostgres(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mysql"}, + TargetConfig: connection.ConnectionConfig{Type: "custom", Driver: "postgresql"}, + }, + }) + if planner == nil || planner.Name() != "mysql-pglike-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesTDengineMySQLPlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "tdengine"}, + TargetConfig: connection.ConnectionConfig{Type: "mysql"}, + }, + }) + if planner == nil || planner.Name() != "tdengine-mysql-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesTDenginePGLikePlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "tdengine"}, + TargetConfig: connection.ConnectionConfig{Type: "kingbase"}, + }, + }) + if planner == nil || planner.Name() != "tdengine-pglike-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesMySQLLikeTDenginePlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mysql"}, + TargetConfig: connection.ConnectionConfig{Type: "tdengine"}, + }, + }) + if planner == nil || planner.Name() != "mysqllike-tdengine-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesPGLikeTDenginePlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "postgres"}, + TargetConfig: connection.ConnectionConfig{Type: "tdengine"}, + }, + }) + if planner == nil || planner.Name() != "pglike-tdengine-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesClickHouseTDenginePlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "clickhouse"}, + TargetConfig: connection.ConnectionConfig{Type: "tdengine"}, + }, + }) + if planner == nil || planner.Name() != "clickhouse-tdengine-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesClickHousePGLikePlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "clickhouse"}, + TargetConfig: connection.ConnectionConfig{Type: "postgres"}, + }, + }) + if planner == nil || planner.Name() != "clickhouse-pglike-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesTDengineTDenginePlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "tdengine"}, + TargetConfig: connection.ConnectionConfig{Type: "tdengine"}, + }, + }) + if planner == nil || planner.Name() != "tdengine-tdengine-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} diff --git a/internal/sync/migration_kernel_types.go b/internal/sync/migration_kernel_types.go new file mode 100644 index 0000000..f74fdcb --- /dev/null +++ b/internal/sync/migration_kernel_types.go @@ -0,0 +1,104 @@ +package sync + +import ( + "GoNavi-Wails/internal/connection" + "GoNavi-Wails/internal/db" +) + +type MigrationDataModel string + +const ( + MigrationDataModelRelational MigrationDataModel = "relational" + MigrationDataModelDocument MigrationDataModel = "document" + MigrationDataModelColumnar MigrationDataModel = "columnar" + MigrationDataModelTimeSeries MigrationDataModel = "timeseries" + MigrationDataModelKeyValue MigrationDataModel = "keyvalue" + MigrationDataModelCustom MigrationDataModel = "custom" +) + +type MigrationObjectKind string + +const ( + MigrationObjectKindTable MigrationObjectKind = "table" + MigrationObjectKindCollection MigrationObjectKind = "collection" + MigrationObjectKindKeyspace MigrationObjectKind = "keyspace" +) + +type MigrationSupportLevel string + +const ( + MigrationSupportLevelFull MigrationSupportLevel = "full" + MigrationSupportLevelPartial MigrationSupportLevel = "partial" + MigrationSupportLevelPlanned MigrationSupportLevel = "planned" + MigrationSupportLevelUnsupported MigrationSupportLevel = "unsupported" +) + +type CanonicalFieldSpec struct { + Name string + SourceType string + CanonicalType string + Nullable bool + DefaultValue *string + AutoIncrement bool + Comment string + NestedPath string + Confidence float64 +} + +type CanonicalIndexSpec struct { + Name string + Kind string + Columns []string + Expression string + PrefixLength int + Supported bool + DegradeStrategy string + Unique bool +} + +type CanonicalConstraintSpec struct { + Name string + Kind string + Columns []string + RefName string +} + +type CanonicalObjectSpec struct { + Name string + Schema string + Kind MigrationObjectKind + Fields []CanonicalFieldSpec + PrimaryKey []string + Indexes []CanonicalIndexSpec + Constraints []CanonicalConstraintSpec + Comments []string + SourceHints map[string]string +} + +type SchemaInferenceIssue struct { + Field string + Level string + Message string + Resolution string +} + +type SchemaInferenceResult struct { + Object CanonicalObjectSpec + Issues []SchemaInferenceIssue + SampleSize int + Confidence float64 + NeedsReview bool +} + +type MigrationBuildContext struct { + Config SyncConfig + TableName string + SourceDB db.Database + TargetDB db.Database +} + +type MigrationPlanner interface { + Name() string + SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel + BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) +} diff --git a/internal/sync/migration_mongodb.go b/internal/sync/migration_mongodb.go new file mode 100644 index 0000000..23a97c4 --- /dev/null +++ b/internal/sync/migration_mongodb.go @@ -0,0 +1,603 @@ +package sync + +import ( + "GoNavi-Wails/internal/connection" + "GoNavi-Wails/internal/db" + "encoding/json" + "fmt" + "sort" + "strings" + "time" +) + +func buildMySQLToMongoPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildTabularToMongoPlan(config, tableName, sourceDB, targetDB) +} + +func buildPGLikeToMongoPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildTabularToMongoPlan(config, tableName, sourceDB, targetDB) +} + +func buildClickHouseToMongoPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildTabularToMongoPlan(config, tableName, sourceDB, targetDB) +} + +func buildTDengineToMongoPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildTabularToMongoPlan(config, tableName, sourceDB, targetDB) +} + +func buildTabularToMongoPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + plan := SchemaMigrationPlan{} + sourceType := resolveMigrationDBType(config.SourceConfig) + targetType := resolveMigrationDBType(config.TargetConfig) + plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName) + plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName) + plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName) + plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName) + plan.PlannedAction = "使用已有目标集合导入" + + sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err) + } + if !sourceExists { + return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName) + } + + targetExists, err := inspectMongoCollection(targetDB, plan.TargetSchema, plan.TargetTable) + if err != nil { + return plan, sourceCols, nil, fmt.Errorf("检查目标集合失败: %w", err) + } + plan.TargetTableExists = targetExists + + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + if targetExists { + plan.Warnings = append(plan.Warnings, "MongoDB 为弱 schema 目标,字段结构以写入文档为准,不执行目标列校验") + return dedupeSchemaMigrationPlan(plan), sourceCols, nil, nil + } + + switch strategy { + case "existing_only": + plan.PlannedAction = "目标集合不存在,需先手工创建" + plan.Warnings = append(plan.Warnings, "当前策略要求目标集合已存在,执行时不会自动创建") + return dedupeSchemaMigrationPlan(plan), sourceCols, nil, nil + case "smart", "auto_create_if_missing": + plan.AutoCreate = true + plan.PlannedAction = "目标集合不存在,将自动创建集合后导入" + createCmd, err := buildMongoCreateCollectionCommand(plan.TargetTable) + if err != nil { + return plan, sourceCols, nil, err + } + plan.PreDataSQL = append(plan.PreDataSQL, createCmd) + if config.CreateIndexes { + indexCmds, warnings, unsupported, created, skipped, err := buildMongoIndexCommands(sourceDB, plan.SourceSchema, plan.SourceTable, plan.TargetTable) + if err != nil { + plan.Warnings = append(plan.Warnings, fmt.Sprintf("读取源表索引失败,已跳过索引迁移:%v", err)) + } else { + plan.PostDataSQL = append(plan.PostDataSQL, indexCmds...) + plan.Warnings = append(plan.Warnings, warnings...) + plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...) + plan.IndexesToCreate = created + plan.IndexesSkipped = skipped + } + } + return dedupeSchemaMigrationPlan(plan), sourceCols, nil, nil + default: + return dedupeSchemaMigrationPlan(plan), sourceCols, nil, nil + } +} + +func buildMongoToMySQLPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + plan := SchemaMigrationPlan{} + plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName) + plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName) + plan.SourceQueryTable = qualifiedNameForQuery(config.SourceConfig.Type, plan.SourceSchema, plan.SourceTable, tableName) + plan.TargetQueryTable = qualifiedNameForQuery(config.TargetConfig.Type, plan.TargetSchema, plan.TargetTable, tableName) + plan.PlannedAction = "使用已有目标表导入" + + sourceCols, warnings, err := inferMongoCollectionColumns(sourceDB, plan.SourceTable) + if err != nil { + return plan, nil, nil, err + } + plan.Warnings = append(plan.Warnings, warnings...) + if len(sourceCols) == 0 { + return plan, nil, nil, fmt.Errorf("源集合未推断出可迁移字段: %s", tableName) + } + + targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable) + if err != nil { + return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err) + } + plan.TargetTableExists = targetExists + + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + if targetExists { + missing := diffMissingColumnNames(sourceCols, targetCols) + if len(missing) > 0 { + plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", "))) + } + if config.AutoAddColumns { + addSQL, addWarnings := buildMongoToMySQLAddColumnSQL(plan.TargetQueryTable, sourceCols, targetCols) + plan.PreDataSQL = append(plan.PreDataSQL, addSQL...) + plan.Warnings = append(plan.Warnings, addWarnings...) + if len(addSQL) > 0 { + plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL)) + } + } + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } + + switch strategy { + case "existing_only": + plan.PlannedAction = "目标表不存在,需先手工创建" + plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + case "smart", "auto_create_if_missing": + plan.AutoCreate = true + plan.PlannedAction = "目标表不存在,将自动建表后导入" + createSQL, postSQL, moreWarnings, unsupported, idxCreate, idxSkip, err := buildMongoToMySQLCreateTablePlan(config, plan.TargetQueryTable, sourceCols, sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, sourceCols, targetCols, err + } + plan.CreateTableSQL = createSQL + plan.PostDataSQL = append(plan.PostDataSQL, postSQL...) + plan.Warnings = append(plan.Warnings, moreWarnings...) + plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...) + plan.IndexesToCreate = idxCreate + plan.IndexesSkipped = idxSkip + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + default: + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } +} + +func inspectMongoCollection(database db.Database, dbName, collection string) (bool, error) { + items, err := database.GetTables(dbName) + if err != nil { + return false, err + } + target := strings.TrimSpace(collection) + for _, item := range items { + if strings.EqualFold(strings.TrimSpace(item), target) { + return true, nil + } + } + return false, nil +} + +func buildMongoCreateCollectionCommand(collection string) (string, error) { + cmd := map[string]interface{}{"create": strings.TrimSpace(collection)} + data, err := json.Marshal(cmd) + if err != nil { + return "", err + } + return string(data), nil +} + +func buildMongoIndexCommands(sourceDB db.Database, dbName, tableName, targetCollection string) ([]string, []string, []string, int, int, error) { + indexes, err := sourceDB.GetIndexes(dbName, tableName) + if err != nil { + return nil, nil, nil, 0, 0, err + } + grouped := groupIndexDefinitions(indexes) + cmds := make([]string, 0, len(grouped)) + warnings := make([]string, 0) + unsupported := make([]string, 0) + created := 0 + skipped := 0 + for _, idx := range grouped { + name := strings.TrimSpace(idx.Name) + if name == "" || strings.EqualFold(name, "primary") { + continue + } + if len(idx.Columns) == 0 { + skipped++ + unsupported = append(unsupported, fmt.Sprintf("索引 %s 缺少列定义,已跳过", name)) + continue + } + kind := strings.ToLower(strings.TrimSpace(idx.IndexType)) + if idx.SubPart > 0 { + skipped++ + unsupported = append(unsupported, fmt.Sprintf("索引 %s 使用前缀长度,MongoDB 目标暂不支持等价迁移", name)) + continue + } + if kind != "" && kind != "btree" { + warnings = append(warnings, fmt.Sprintf("索引 %s 类型=%s 将按普通索引迁移到 MongoDB", name, idx.IndexType)) + } + keySpec := make(map[string]int) + for _, col := range idx.Columns { + keySpec[col] = 1 + } + command := map[string]interface{}{ + "createIndexes": strings.TrimSpace(targetCollection), + "indexes": []map[string]interface{}{{ + "name": name, + "key": keySpec, + "unique": idx.Unique, + }}, + } + data, err := json.Marshal(command) + if err != nil { + skipped++ + unsupported = append(unsupported, fmt.Sprintf("索引 %s 生成 MongoDB createIndexes 命令失败:%v", name, err)) + continue + } + cmds = append(cmds, string(data)) + created++ + } + return cmds, dedupeStrings(warnings), dedupeStrings(unsupported), created, skipped, nil +} + +func inferMongoCollectionColumns(sourceDB db.Database, collection string) ([]connection.ColumnDefinition, []string, error) { + query := fmt.Sprintf(`{"find":"%s","filter":{},"limit":200}`, strings.TrimSpace(collection)) + rows, _, err := sourceDB.Query(query) + if err != nil { + return nil, nil, fmt.Errorf("读取源集合样本失败: %w", err) + } + if len(rows) == 0 { + return []connection.ColumnDefinition{{Name: "_id", Type: "varchar(64)", Nullable: "NO", Key: "PRI"}}, []string{"源集合暂无样本数据,仅按 `_id` 生成基础主键列"}, nil + } + fieldNames := make(map[string]struct{}) + for _, row := range rows { + for key := range row { + fieldNames[key] = struct{}{} + } + } + orderedFields := make([]string, 0, len(fieldNames)) + for key := range fieldNames { + orderedFields = append(orderedFields, key) + } + sort.Strings(orderedFields) + if containsString(orderedFields, "_id") { + orderedFields = moveStringToFront(orderedFields, "_id") + } + columns := make([]connection.ColumnDefinition, 0, len(orderedFields)) + warnings := make([]string, 0) + for _, field := range orderedFields { + typeName, nullable, fieldWarnings := inferMongoFieldType(rows, field) + warnings = append(warnings, fieldWarnings...) + col := connection.ColumnDefinition{ + Name: field, + Type: typeName, + Nullable: ternaryString(nullable, "YES", "NO"), + Key: "", + Extra: "", + } + if field == "_id" { + col.Key = "PRI" + col.Nullable = "NO" + } + columns = append(columns, col) + } + return columns, dedupeStrings(warnings), nil +} + +func inferMongoFieldType(rows []map[string]interface{}, field string) (string, bool, []string) { + nullable := false + hasString, hasBool, hasInt, hasFloat, hasTime, hasComplex := false, false, false, false, false, false + for _, row := range rows { + value, ok := row[field] + if !ok || value == nil { + nullable = true + continue + } + switch value.(type) { + case bool: + hasBool = true + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + hasInt = true + case float32, float64: + hasFloat = true + case time.Time: + hasTime = true + case map[string]interface{}, []interface{}: + hasComplex = true + default: + hasString = true + } + } + kinds := 0 + for _, flag := range []bool{hasString, hasBool, hasInt, hasFloat, hasTime, hasComplex} { + if flag { + kinds++ + } + } + warnings := make([]string, 0) + if kinds > 1 { + warnings = append(warnings, fmt.Sprintf("字段 %s 存在多种 BSON 值类型,已按兼容类型降级", field)) + } + if field == "_id" { + return "varchar(64)", false, warnings + } + switch { + case hasComplex: + return "json", nullable, warnings + case hasTime: + return "datetime", nullable, warnings + case hasFloat: + return "double", nullable, warnings + case hasInt: + return "bigint", nullable, warnings + case hasBool: + return "tinyint(1)", nullable, warnings + default: + return "varchar(255)", nullable, warnings + } +} + +func buildMongoToMySQLAddColumnSQL(targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) { + targetSet := make(map[string]struct{}, len(targetCols)) + for _, col := range targetCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + targetSet[key] = struct{}{} + } + var sqlList []string + for _, col := range sourceCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + if _, ok := targetSet[key]; ok { + continue + } + sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL", + quoteQualifiedIdentByType("mysql", targetQueryTable), + quoteIdentByType("mysql", col.Name), + strings.TrimSpace(col.Type), + )) + } + return sqlList, nil +} + +func buildMongoToMySQLCreateTablePlan(config SyncConfig, targetQueryTable string, sourceCols []connection.ColumnDefinition, sourceDB db.Database, sourceSchema, sourceTable string) (string, []string, []string, []string, int, int, error) { + columnDefs := make([]string, 0, len(sourceCols)+1) + warnings := make([]string, 0) + unsupported := make([]string, 0) + pkCols := make([]string, 0, 1) + for _, col := range sourceCols { + columnDef := fmt.Sprintf("%s %s", quoteIdentByType("mysql", col.Name), strings.TrimSpace(col.Type)) + if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") { + columnDef += " NOT NULL" + } + columnDefs = append(columnDefs, columnDef) + if col.Key == "PRI" || col.Key == "PK" { + pkCols = append(pkCols, quoteIdentByType("mysql", col.Name)) + } + } + if len(pkCols) > 0 { + columnDefs = append(columnDefs, fmt.Sprintf("PRIMARY KEY (%s)", strings.Join(pkCols, ", "))) + } else { + warnings = append(warnings, "MongoDB 源集合未推断出稳定主键,目标表将不自动创建主键") + } + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("mysql", targetQueryTable), strings.Join(columnDefs, ",\n ")) + if !config.CreateIndexes { + return createSQL, nil, dedupeStrings(warnings), dedupeStrings(unsupported), 0, 0, nil + } + indexes, err := sourceDB.GetIndexes(sourceSchema, sourceTable) + if err != nil { + warnings = append(warnings, fmt.Sprintf("读取源集合索引失败,已跳过索引迁移:%v", err)) + return createSQL, nil, dedupeStrings(warnings), dedupeStrings(unsupported), 0, 0, nil + } + grouped := groupIndexDefinitions(indexes) + postSQL := make([]string, 0, len(grouped)) + created := 0 + skipped := 0 + for _, idx := range grouped { + name := strings.TrimSpace(idx.Name) + if name == "" || strings.EqualFold(name, "_id_") || strings.EqualFold(name, "primary") { + continue + } + if len(idx.Columns) == 0 { + skipped++ + unsupported = append(unsupported, fmt.Sprintf("索引 %s 缺少列定义,已跳过", name)) + continue + } + quotedCols := make([]string, 0, len(idx.Columns)) + for _, col := range idx.Columns { + quotedCols = append(quotedCols, quoteIdentByType("mysql", col)) + } + prefix := "CREATE INDEX" + if idx.Unique { + prefix = "CREATE UNIQUE INDEX" + } + postSQL = append(postSQL, fmt.Sprintf("%s %s ON %s (%s)", prefix, quoteIdentByType("mysql", name), quoteQualifiedIdentByType("mysql", targetQueryTable), strings.Join(quotedCols, ", "))) + created++ + } + return createSQL, postSQL, dedupeStrings(warnings), dedupeStrings(unsupported), created, skipped, nil +} + +func containsString(items []string, target string) bool { + for _, item := range items { + if item == target { + return true + } + } + return false +} + +func moveStringToFront(items []string, target string) []string { + out := make([]string, 0, len(items)) + for _, item := range items { + if item == target { + continue + } + out = append(out, item) + } + return append([]string{target}, out...) +} + +func buildMongoToPGLikePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + plan := SchemaMigrationPlan{} + targetType := strings.ToLower(strings.TrimSpace(config.TargetConfig.Type)) + plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName) + plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName) + plan.SourceQueryTable = qualifiedNameForQuery(config.SourceConfig.Type, plan.SourceSchema, plan.SourceTable, tableName) + plan.TargetQueryTable = qualifiedNameForQuery(config.TargetConfig.Type, plan.TargetSchema, plan.TargetTable, tableName) + plan.PlannedAction = "使用已有目标表导入" + + sourceCols, warnings, err := inferMongoCollectionColumns(sourceDB, plan.SourceTable) + if err != nil { + return plan, nil, nil, err + } + plan.Warnings = append(plan.Warnings, warnings...) + if len(sourceCols) == 0 { + return plan, nil, nil, fmt.Errorf("源集合未推断出可迁移字段: %s", tableName) + } + + targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable) + if err != nil { + return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err) + } + plan.TargetTableExists = targetExists + + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + if targetExists { + missing := diffMissingColumnNames(sourceCols, targetCols) + if len(missing) > 0 { + plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", "))) + } + if config.AutoAddColumns { + addSQL, addWarnings := buildMongoToPGLikeAddColumnSQL(targetType, plan.TargetQueryTable, sourceCols, targetCols) + plan.PreDataSQL = append(plan.PreDataSQL, addSQL...) + plan.Warnings = append(plan.Warnings, addWarnings...) + if len(addSQL) > 0 { + plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL)) + } + } + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } + + switch strategy { + case "existing_only": + plan.PlannedAction = "目标表不存在,需先手工创建" + plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + case "smart", "auto_create_if_missing": + plan.AutoCreate = true + plan.PlannedAction = "目标表不存在,将自动建表后导入" + createSQL, postSQL, moreWarnings, unsupported, idxCreate, idxSkip, err := buildMongoToPGLikeCreateTablePlan(targetType, config, plan.TargetQueryTable, sourceCols, sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, sourceCols, targetCols, err + } + plan.CreateTableSQL = createSQL + plan.PostDataSQL = append(plan.PostDataSQL, postSQL...) + plan.Warnings = append(plan.Warnings, moreWarnings...) + plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...) + plan.IndexesToCreate = idxCreate + plan.IndexesSkipped = idxSkip + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + default: + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } +} + +func buildMongoToPGLikeAddColumnSQL(targetType string, targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) { + targetSet := make(map[string]struct{}, len(targetCols)) + for _, col := range targetCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + targetSet[key] = struct{}{} + } + var sqlList []string + var warnings []string + for _, col := range sourceCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + if _, ok := targetSet[key]; ok { + continue + } + colType, mapWarnings := mapMongoInferredColumnToPGLike(col) + warnings = append(warnings, mapWarnings...) + sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL", + quoteQualifiedIdentByType(targetType, targetQueryTable), + quoteIdentByType(targetType, col.Name), + colType, + )) + } + return sqlList, dedupeStrings(warnings) +} + +func buildMongoToPGLikeCreateTablePlan(targetType string, config SyncConfig, targetQueryTable string, sourceCols []connection.ColumnDefinition, sourceDB db.Database, sourceSchema, sourceTable string) (string, []string, []string, []string, int, int, error) { + columnDefs := make([]string, 0, len(sourceCols)+1) + warnings := make([]string, 0) + unsupported := make([]string, 0) + pkCols := make([]string, 0, 1) + for _, col := range sourceCols { + colType, colWarnings := mapMongoInferredColumnToPGLike(col) + warnings = append(warnings, colWarnings...) + parts := []string{colType} + if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") { + parts = append(parts, "NOT NULL") + } + columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType(targetType, col.Name), strings.Join(parts, " "))) + if col.Key == "PRI" || col.Key == "PK" { + pkCols = append(pkCols, quoteIdentByType(targetType, col.Name)) + } + } + if len(pkCols) > 0 { + columnDefs = append(columnDefs, fmt.Sprintf("PRIMARY KEY (%s)", strings.Join(pkCols, ", "))) + } + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType(targetType, targetQueryTable), strings.Join(columnDefs, ",\n ")) + if !config.CreateIndexes { + return createSQL, nil, dedupeStrings(warnings), dedupeStrings(unsupported), 0, 0, nil + } + indexes, err := sourceDB.GetIndexes(sourceSchema, sourceTable) + if err != nil { + warnings = append(warnings, fmt.Sprintf("读取源集合索引失败,已跳过索引迁移:%v", err)) + return createSQL, nil, dedupeStrings(warnings), dedupeStrings(unsupported), 0, 0, nil + } + grouped := groupIndexDefinitions(indexes) + postSQL := make([]string, 0, len(grouped)) + created := 0 + skipped := 0 + for _, idx := range grouped { + name := strings.TrimSpace(idx.Name) + if name == "" || strings.EqualFold(name, "_id_") || strings.EqualFold(name, "primary") { + continue + } + if len(idx.Columns) == 0 { + skipped++ + unsupported = append(unsupported, fmt.Sprintf("索引 %s 缺少列定义,已跳过", name)) + continue + } + quotedCols := make([]string, 0, len(idx.Columns)) + for _, col := range idx.Columns { + quotedCols = append(quotedCols, quoteIdentByType(targetType, col)) + } + prefix := "CREATE INDEX" + if idx.Unique { + prefix = "CREATE UNIQUE INDEX" + } + postSQL = append(postSQL, fmt.Sprintf("%s %s ON %s (%s)", prefix, quoteIdentByType(targetType, name), quoteQualifiedIdentByType(targetType, targetQueryTable), strings.Join(quotedCols, ", "))) + created++ + } + return createSQL, postSQL, dedupeStrings(warnings), dedupeStrings(unsupported), created, skipped, nil +} + +func mapMongoInferredColumnToPGLike(col connection.ColumnDefinition) (string, []string) { + raw := strings.ToLower(strings.TrimSpace(col.Type)) + warnings := make([]string, 0) + switch { + case strings.HasPrefix(raw, "varchar"): + return col.Type, warnings + case raw == "json": + return "jsonb", warnings + case raw == "datetime": + return "timestamp", warnings + case raw == "tinyint(1)": + return "boolean", warnings + case raw == "double": + return "double precision", warnings + case raw == "bigint": + return "bigint", warnings + default: + return col.Type, warnings + } +} diff --git a/internal/sync/migration_redis.go b/internal/sync/migration_redis.go new file mode 100644 index 0000000..84f159f --- /dev/null +++ b/internal/sync/migration_redis.go @@ -0,0 +1,1315 @@ +package sync + +import ( + "GoNavi-Wails/internal/connection" + "GoNavi-Wails/internal/db" + redispkg "GoNavi-Wails/internal/redis" + "encoding/json" + "fmt" + "sort" + "strconv" + "strings" +) + +type redisMigrationClient interface { + Connect(config connection.ConnectionConfig) error + Close() error + ScanKeys(pattern string, cursor uint64, count int64) (*redispkg.RedisScanResult, error) + GetKeyType(key string) (string, error) + GetValue(key string) (*redispkg.RedisValue, error) + DeleteKeys(keys []string) (int64, error) + SetTTL(key string, ttl int64) error + SetString(key, value string, ttl int64) error + SetHashField(key, field, value string) error + ListPush(key string, values ...string) error + SetAdd(key string, members ...string) error + ZSetAdd(key string, members ...redispkg.ZSetMember) error + StreamAdd(key string, fields map[string]string, id string) (string, error) +} + +var newSyncDatabase = db.NewDatabase +var newRedisSourceClient = func() redisMigrationClient { return redispkg.NewRedisClient() } + +func isRedisToMongoKeyspacePair(config SyncConfig) bool { + return resolveMigrationDBType(config.SourceConfig) == "redis" && resolveMigrationDBType(config.TargetConfig) == "mongodb" +} + +func resolveRedisDBIndex(config connection.ConnectionConfig) int { + if config.RedisDB >= 0 && config.RedisDB <= 15 { + return config.RedisDB + } + if text := strings.TrimSpace(config.Database); text != "" { + if idx, err := strconv.Atoi(text); err == nil && idx >= 0 && idx <= 15 { + return idx + } + } + return 0 +} + +func withResolvedRedisDB(config connection.ConnectionConfig) connection.ConnectionConfig { + next := config + next.Type = "redis" + next.RedisDB = resolveRedisDBIndex(config) + return next +} + +func resolveMongoCollectionName(config SyncConfig) string { + if name := strings.TrimSpace(config.MongoCollectionName); name != "" { + return name + } + if resolveMigrationDBType(config.SourceConfig) == "redis" { + return fmt.Sprintf("redis_db_%d_keys", resolveRedisDBIndex(config.SourceConfig)) + } + return fmt.Sprintf("redis_db_%d_keys", resolveRedisDBIndex(config.TargetConfig)) +} + +func deriveRedisMongoCollectionName(config SyncConfig) string { + return resolveMongoCollectionName(config) +} + +func buildRedisToMongoPlan(config SyncConfig, keyName string, targetDB db.Database) (SchemaMigrationPlan, error) { + collection := deriveRedisMongoCollectionName(config) + plan := SchemaMigrationPlan{ + SourceSchema: strconv.Itoa(resolveRedisDBIndex(config.SourceConfig)), + SourceTable: keyName, + SourceQueryTable: keyName, + TargetSchema: strings.TrimSpace(config.TargetConfig.Database), + TargetTable: collection, + TargetQueryTable: collection, + PlannedAction: "按 Redis Key 生成 MongoDB 文档导入", + Warnings: []string{"Redis -> MongoDB 按 keyspace 语义迁移,不执行表级 schema 校验", "Redis TTL/集合顺序等语义会按文档字段保留,不保证与原系统完全等价"}, + UnsupportedObjects: []string{"Redis Consumer Group / PubSub / Lua 脚本 / 事务状态当前不迁移"}, + } + exists, err := inspectMongoCollection(targetDB, plan.TargetSchema, collection) + if err != nil { + return plan, fmt.Errorf("检查目标集合失败: %w", err) + } + plan.TargetTableExists = exists + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + if exists { + return dedupeSchemaMigrationPlan(plan), nil + } + if strategy == "existing_only" { + plan.PlannedAction = "目标集合不存在,需先手工创建" + plan.Warnings = append(plan.Warnings, "当前策略要求目标集合已存在,执行时不会自动建集合") + return dedupeSchemaMigrationPlan(plan), nil + } + createCommand, err := buildMongoCreateCollectionCommand(collection) + if err != nil { + return plan, err + } + plan.AutoCreate = true + plan.PlannedAction = "目标集合不存在,将自动创建集合后导入" + plan.PreDataSQL = []string{createCommand} + return dedupeSchemaMigrationPlan(plan), nil +} + +func listRedisMigrationKeys(client redisMigrationClient, selected []string) ([]string, error) { + if len(selected) > 0 { + return dedupeStrings(selected), nil + } + cursor := uint64(0) + keys := make([]string, 0, 64) + seen := map[string]struct{}{} + for { + result, err := client.ScanKeys("*", cursor, 1000) + if err != nil { + return nil, err + } + if result != nil { + for _, item := range result.Keys { + key := strings.TrimSpace(item.Key) + if key == "" { + continue + } + if _, ok := seen[key]; ok { + continue + } + seen[key] = struct{}{} + keys = append(keys, key) + } + if strings.TrimSpace(result.Cursor) == "" || strings.TrimSpace(result.Cursor) == "0" { + break + } + next, err := strconv.ParseUint(strings.TrimSpace(result.Cursor), 10, 64) + if err != nil || next == cursor { + break + } + cursor = next + continue + } + break + } + sort.Strings(keys) + return keys, nil +} + +func buildRedisMongoDocument(dbIndex int, key string, value *redispkg.RedisValue) map[string]interface{} { + doc := map[string]interface{}{ + "_id": fmt.Sprintf("db%d:%s", dbIndex, key), + "redisDb": dbIndex, + "key": key, + "source": "redis", + } + if value == nil { + return doc + } + doc["type"] = value.Type + doc["ttl"] = value.TTL + doc["length"] = value.Length + doc["value"] = normalizeRedisMongoValue(value.Value) + return doc +} + +func normalizeRedisMongoValue(value interface{}) interface{} { + switch typed := value.(type) { + case nil: + return nil + case []byte: + return string(typed) + case map[string]string: + result := make(map[string]interface{}, len(typed)) + for k, v := range typed { + result[k] = v + } + return result + case []string: + result := make([]interface{}, 0, len(typed)) + for _, item := range typed { + result = append(result, item) + } + return result + case []redispkg.ZSetMember: + result := make([]map[string]interface{}, 0, len(typed)) + for _, item := range typed { + result = append(result, map[string]interface{}{"member": item.Member, "score": item.Score}) + } + return result + case []redispkg.StreamEntry: + result := make([]map[string]interface{}, 0, len(typed)) + for _, item := range typed { + fields := make(map[string]interface{}, len(item.Fields)) + for k, v := range item.Fields { + fields[k] = v + } + result = append(result, map[string]interface{}{"id": item.ID, "fields": fields}) + } + return result + case map[string]interface{}: + result := make(map[string]interface{}, len(typed)) + for k, v := range typed { + result[k] = normalizeRedisMongoValue(v) + } + return result + case []interface{}: + result := make([]interface{}, 0, len(typed)) + for _, item := range typed { + result = append(result, normalizeRedisMongoValue(item)) + } + return result + default: + return typed + } +} + +func buildRedisMongoExistingDocsQuery(collection string, ids []string) (string, error) { + command := map[string]interface{}{ + "find": collection, + "filter": map[string]interface{}{ + "_id": map[string]interface{}{"$in": ids}, + }, + } + data, err := json.Marshal(command) + if err != nil { + return "", err + } + return string(data), nil +} + +func loadExistingRedisMongoDocs(targetDB db.Database, collection string, ids []string) (map[string]map[string]interface{}, error) { + result := make(map[string]map[string]interface{}, len(ids)) + if len(ids) == 0 { + return result, nil + } + query, err := buildRedisMongoExistingDocsQuery(collection, ids) + if err != nil { + return nil, err + } + rows, _, err := targetDB.Query(query) + if err != nil { + return nil, err + } + for _, row := range rows { + id := strings.TrimSpace(fmt.Sprintf("%v", row["_id"])) + if id == "" || id == "" { + continue + } + result[id] = row + } + return result, nil +} + +func buildRedisMongoChanges(config SyncConfig, keys []string, client redisMigrationClient, targetDB db.Database, collection string) (connection.ChangeSet, []map[string]interface{}, error) { + changeSet := connection.ChangeSet{Inserts: []map[string]interface{}{}, Updates: []connection.UpdateRow{}, Deletes: []map[string]interface{}{}} + documents := make([]map[string]interface{}, 0, len(keys)) + dbIndex := resolveRedisDBIndex(config.SourceConfig) + for _, key := range keys { + value, err := client.GetValue(key) + if err != nil { + return changeSet, nil, fmt.Errorf("读取 Redis Key 失败: key=%s err=%w", key, err) + } + documents = append(documents, buildRedisMongoDocument(dbIndex, key, value)) + } + ids := make([]string, 0, len(documents)) + for _, doc := range documents { + ids = append(ids, fmt.Sprintf("%v", doc["_id"])) + } + existing, err := loadExistingRedisMongoDocs(targetDB, collection, ids) + if err != nil { + return changeSet, nil, err + } + mode := normalizeSyncMode(config.Mode) + for _, doc := range documents { + id := fmt.Sprintf("%v", doc["_id"]) + existingDoc, ok := existing[id] + if !ok { + changeSet.Inserts = append(changeSet.Inserts, doc) + continue + } + if mode == "insert_only" { + continue + } + values := cloneMapWithoutKeys(doc, "_id") + if sameRedisMongoDocument(existingDoc, doc) { + continue + } + changeSet.Updates = append(changeSet.Updates, connection.UpdateRow{Keys: map[string]interface{}{"_id": id}, Values: values}) + } + return changeSet, documents, nil +} + +func sameRedisMongoDocument(existing map[string]interface{}, desired map[string]interface{}) bool { + for k, v := range desired { + if k == "_id" { + continue + } + if fmt.Sprintf("%v", normalizeRedisMongoValue(v)) != fmt.Sprintf("%v", normalizeRedisMongoValue(existing[k])) { + return false + } + } + return true +} + +func cloneMapWithoutKeys(input map[string]interface{}, skipKeys ...string) map[string]interface{} { + skip := make(map[string]struct{}, len(skipKeys)) + for _, key := range skipKeys { + skip[key] = struct{}{} + } + result := make(map[string]interface{}, len(input)) + for k, v := range input { + if _, ok := skip[k]; ok { + continue + } + result[k] = v + } + return result +} + +func (s *SyncEngine) runRedisToMongoSync(config SyncConfig, result SyncResult) SyncResult { + tables := config.Tables + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + mode := normalizeSyncMode(config.Mode) + s.progress(config.JobID, 0, len(tables), "", "开始 Redis 键空间迁移") + s.appendLog(config.JobID, &result, "info", fmt.Sprintf("Redis -> MongoDB 键空间迁移;模式:%s;目标策略:%s", mode, strategy)) + if mode == "full_overwrite" { + s.appendLog(config.JobID, &result, "warn", "Redis -> MongoDB 第一版暂不执行集合级 full_overwrite 删除,已降级为 insert_update") + } + + sourceClient := newRedisSourceClient() + sourceConfig := withResolvedRedisDB(config.SourceConfig) + if err := sourceClient.Connect(sourceConfig); err != nil { + return s.fail(config.JobID, len(tables), result, "源 Redis 连接失败: "+err.Error()) + } + defer sourceClient.Close() + + targetDB, err := newSyncDatabase(config.TargetConfig.Type) + if err != nil { + return s.fail(config.JobID, len(tables), result, "初始化目标数据库驱动失败: "+err.Error()) + } + if err := targetDB.Connect(config.TargetConfig); err != nil { + return s.fail(config.JobID, len(tables), result, "目标数据库连接失败: "+err.Error()) + } + defer targetDB.Close() + + keys, err := listRedisMigrationKeys(sourceClient, config.Tables) + if err != nil { + return s.fail(config.JobID, len(tables), result, "扫描 Redis Key 失败: "+err.Error()) + } + if len(keys) == 0 { + result.Message = "未发现可迁移的 Redis Key" + s.progress(config.JobID, 0, 0, "", "同步完成") + return result + } + totalKeys := len(keys) + collection := deriveRedisMongoCollectionName(config) + plan, err := buildRedisToMongoPlan(config, firstNonEmpty(keys[0], collection), targetDB) + if err != nil { + return s.fail(config.JobID, totalKeys, result, err.Error()) + } + for _, warning := range plan.Warnings { + s.appendLog(config.JobID, &result, "warn", " -> "+warning) + } + for _, unsupported := range plan.UnsupportedObjects { + s.appendLog(config.JobID, &result, "warn", " -> "+unsupported) + } + if strings.TrimSpace(plan.PlannedAction) != "" { + s.appendLog(config.JobID, &result, "info", " -> "+plan.PlannedAction) + } + if !plan.TargetTableExists && !plan.AutoCreate { + result.Message = firstNonEmpty(plan.PlannedAction, "目标集合不存在,当前策略不允许自动创建") + return result + } + if !plan.TargetTableExists && len(plan.PreDataSQL) > 0 { + s.progress(config.JobID, 0, totalKeys, collection, "创建目标集合") + if err := executeSQLStatements(targetDB.Exec, plan.PreDataSQL); err != nil { + return s.fail(config.JobID, totalKeys, result, "创建目标集合失败: "+err.Error()) + } + } + + changeSet, documents, err := buildRedisMongoChanges(config, keys, sourceClient, targetDB, collection) + if err != nil { + return s.fail(config.JobID, totalKeys, result, "构建 Redis 迁移变更失败: "+err.Error()) + } + for idx, key := range keys { + s.appendLog(config.JobID, &result, "info", fmt.Sprintf("正在迁移 Key: %s", key)) + s.progress(config.JobID, idx, totalKeys, key, fmt.Sprintf("迁移 Key(%d/%d)", idx+1, totalKeys)) + } + if len(changeSet.Inserts) == 0 && len(changeSet.Updates) == 0 && len(changeSet.Deletes) == 0 { + s.appendLog(config.JobID, &result, "info", " -> 目标集合中对应文档已是最新状态") + result.TablesSynced = totalKeys + result.Message = fmt.Sprintf("Redis 键空间迁移完成,共处理 %d 个 Key", totalKeys) + s.progress(config.JobID, totalKeys, totalKeys, collection, "同步完成") + return result + } + applier, ok := targetDB.(db.BatchApplier) + if !ok { + return s.fail(config.JobID, totalKeys, result, "目标驱动不支持 MongoDB 文档写入") + } + _ = documents + if err := applier.ApplyChanges(collection, changeSet); err != nil { + return s.fail(config.JobID, totalKeys, result, "应用 Redis 迁移变更失败: "+err.Error()) + } + result.RowsInserted += len(changeSet.Inserts) + result.RowsUpdated += len(changeSet.Updates) + result.RowsDeleted += len(changeSet.Deletes) + result.TablesSynced = totalKeys + result.Message = fmt.Sprintf("Redis 键空间迁移完成,共处理 %d 个 Key", totalKeys) + s.progress(config.JobID, totalKeys, totalKeys, collection, "同步完成") + return result +} + +func (s *SyncEngine) analyzeRedisToMongo(config SyncConfig) SyncAnalyzeResult { + result := SyncAnalyzeResult{Success: true, Tables: []TableDiffSummary{}} + sourceClient := newRedisSourceClient() + sourceConfig := withResolvedRedisDB(config.SourceConfig) + if err := sourceClient.Connect(sourceConfig); err != nil { + return SyncAnalyzeResult{Success: false, Message: "源 Redis 连接失败: " + err.Error()} + } + defer sourceClient.Close() + targetDB, err := newSyncDatabase(config.TargetConfig.Type) + if err != nil { + return SyncAnalyzeResult{Success: false, Message: "初始化目标数据库驱动失败: " + err.Error()} + } + if err := targetDB.Connect(config.TargetConfig); err != nil { + return SyncAnalyzeResult{Success: false, Message: "目标数据库连接失败: " + err.Error()} + } + defer targetDB.Close() + keys, err := listRedisMigrationKeys(sourceClient, config.Tables) + if err != nil { + return SyncAnalyzeResult{Success: false, Message: "扫描 Redis Key 失败: " + err.Error()} + } + collection := deriveRedisMongoCollectionName(config) + changeSet, documents, err := buildRedisMongoChanges(config, keys, sourceClient, targetDB, collection) + if err != nil { + return SyncAnalyzeResult{Success: false, Message: "分析 Redis 迁移变更失败: " + err.Error()} + } + insertSet := make(map[string]struct{}, len(changeSet.Inserts)) + updateSet := make(map[string]struct{}, len(changeSet.Updates)) + for _, row := range changeSet.Inserts { + insertSet[fmt.Sprintf("%v", row["_id"])] = struct{}{} + } + for _, row := range changeSet.Updates { + updateSet[fmt.Sprintf("%v", row.Keys["_id"])] = struct{}{} + } + for _, doc := range documents { + key := fmt.Sprintf("%v", doc["key"]) + id := fmt.Sprintf("%v", doc["_id"]) + summary := TableDiffSummary{ + Table: key, + PKColumn: "_id", + CanSync: true, + TargetTableExists: true, + PlannedAction: fmt.Sprintf("迁移到集合 %s", collection), + Warnings: []string{ + "Redis Key 将按文档写入 MongoDB 集合", + }, + } + if _, ok := insertSet[id]; ok { + summary.Inserts = 1 + summary.Message = "执行时将写入新文档" + } else if _, ok := updateSet[id]; ok { + summary.Updates = 1 + summary.Message = "执行时将更新已有文档" + } else { + summary.Same = 1 + summary.Message = "目标集合中对应文档已是最新状态" + } + result.Tables = append(result.Tables, summary) + } + result.Message = fmt.Sprintf("已完成 %d 个 Redis Key 的迁移分析", len(result.Tables)) + return result +} + +func (s *SyncEngine) previewRedisToMongo(config SyncConfig, keyName string, limit int) (TableDiffPreview, error) { + _ = limit + sourceClient := newRedisSourceClient() + sourceConfig := withResolvedRedisDB(config.SourceConfig) + if err := sourceClient.Connect(sourceConfig); err != nil { + return TableDiffPreview{}, fmt.Errorf("源 Redis 连接失败: %w", err) + } + defer sourceClient.Close() + targetDB, err := newSyncDatabase(config.TargetConfig.Type) + if err != nil { + return TableDiffPreview{}, fmt.Errorf("初始化目标数据库驱动失败: %w", err) + } + if err := targetDB.Connect(config.TargetConfig); err != nil { + return TableDiffPreview{}, fmt.Errorf("目标数据库连接失败: %w", err) + } + defer targetDB.Close() + collection := deriveRedisMongoCollectionName(config) + changeSet, documents, err := buildRedisMongoChanges(config, []string{keyName}, sourceClient, targetDB, collection) + if err != nil { + return TableDiffPreview{}, err + } + preview := TableDiffPreview{Table: keyName, PKColumn: "_id", Inserts: []PreviewRow{}, Updates: []PreviewUpdateRow{}, Deletes: []PreviewRow{}} + if len(documents) == 0 { + return preview, nil + } + doc := documents[0] + id := fmt.Sprintf("%v", doc["_id"]) + existingDocs, err := loadExistingRedisMongoDocs(targetDB, collection, []string{id}) + if err != nil { + return TableDiffPreview{}, err + } + if len(changeSet.Inserts) > 0 { + preview.TotalInserts = 1 + preview.Inserts = append(preview.Inserts, PreviewRow{PK: id, Row: doc}) + return preview, nil + } + if len(changeSet.Updates) > 0 { + preview.TotalUpdates = 1 + preview.Updates = append(preview.Updates, PreviewUpdateRow{PK: id, ChangedColumns: sortedMapKeys(changeSet.Updates[0].Values), Source: doc, Target: existingDocs[id]}) + return preview, nil + } + return preview, nil +} + +func sortedMapKeys(values map[string]interface{}) []string { + keys := make([]string, 0, len(values)) + for key := range values { + keys = append(keys, key) + } + sort.Strings(keys) + return keys +} + +func isMongoToRedisKeyspacePair(config SyncConfig) bool { + return resolveMigrationDBType(config.SourceConfig) == "mongodb" && resolveMigrationDBType(config.TargetConfig) == "redis" +} + +type mongoRedisKeyDocument struct { + Key string + Type string + TTL int64 + Value interface{} + SourceRow map[string]interface{} + Desired *redispkg.RedisValue +} + +type mongoRedisKeyDiff struct { + Collection string + Document mongoRedisKeyDocument + Current *redispkg.RedisValue + Exists bool + Action string + ChangedColumns []string +} + +func deriveRedisTargetLabel(config SyncConfig) string { + return fmt.Sprintf("Redis DB %d", resolveRedisDBIndex(config.TargetConfig)) +} + +func deriveDefaultMongoRedisCollection(config SyncConfig) string { + return resolveMongoCollectionName(config) +} + +func listMongoRedisCollections(sourceDB db.Database, config SyncConfig) ([]string, error) { + if len(config.Tables) > 0 { + return dedupeStrings(config.Tables), nil + } + tables, err := sourceDB.GetTables(strings.TrimSpace(config.SourceConfig.Database)) + if err == nil && len(tables) > 0 { + return dedupeStrings(tables), nil + } + return []string{deriveDefaultMongoRedisCollection(config)}, nil +} + +func buildMongoRedisFindQuery(collection string, limit int) (string, error) { + command := map[string]interface{}{ + "find": strings.TrimSpace(collection), + "filter": map[string]interface{}{}, + } + if limit > 0 { + command["limit"] = limit + } + data, err := json.Marshal(command) + if err != nil { + return "", err + } + return string(data), nil +} + +func loadMongoRedisDocuments(sourceDB db.Database, collection string, limit int) ([]map[string]interface{}, error) { + query, err := buildMongoRedisFindQuery(collection, limit) + if err != nil { + return nil, err + } + rows, _, err := sourceDB.Query(query) + if err != nil { + return nil, err + } + return rows, nil +} + +func parseMongoRedisDocument(row map[string]interface{}) (mongoRedisKeyDocument, error) { + key := strings.TrimSpace(asRedisMigrationString(row["key"])) + if key == "" { + if rawID := strings.TrimSpace(asRedisMigrationString(row["_id"])); rawID != "" { + if _, tail, ok := strings.Cut(rawID, ":"); ok { + key = strings.TrimSpace(tail) + } + } + } + if key == "" { + return mongoRedisKeyDocument{}, fmt.Errorf("文档缺少 key 字段") + } + + redisType := strings.ToLower(strings.TrimSpace(asRedisMigrationString(row["type"]))) + if redisType == "" { + return mongoRedisKeyDocument{}, fmt.Errorf("文档缺少 type 字段: key=%s", key) + } + + ttl := normalizeRedisMigrationTTL(asRedisMigrationInt64(row["ttl"], -1)) + desired := &redispkg.RedisValue{Type: redisType, TTL: ttl} + + sourceRow := cloneMapWithoutKeys(row) + sourceRow["key"] = key + sourceRow["type"] = redisType + sourceRow["ttl"] = ttl + + switch redisType { + case "string": + value := asRedisMigrationString(row["value"]) + desired.Value = value + desired.Length = int64(len(value)) + sourceRow["value"] = value + case "hash": + value, err := asRedisMigrationStringMap(row["value"]) + if err != nil { + return mongoRedisKeyDocument{}, fmt.Errorf("key=%s hash 值无效: %w", key, err) + } + desired.Value = value + desired.Length = int64(len(value)) + sourceRow["value"] = normalizeRedisMongoValue(value) + case "list": + value, err := asRedisMigrationStringSlice(row["value"]) + if err != nil { + return mongoRedisKeyDocument{}, fmt.Errorf("key=%s list 值无效: %w", key, err) + } + desired.Value = value + desired.Length = int64(len(value)) + sourceRow["value"] = normalizeRedisMongoValue(value) + case "set": + value, err := asRedisMigrationStringSlice(row["value"]) + if err != nil { + return mongoRedisKeyDocument{}, fmt.Errorf("key=%s set 值无效: %w", key, err) + } + sort.Strings(value) + desired.Value = value + desired.Length = int64(len(value)) + sourceRow["value"] = normalizeRedisMongoValue(value) + case "zset": + value, err := asRedisMigrationZSetMembers(row["value"]) + if err != nil { + return mongoRedisKeyDocument{}, fmt.Errorf("key=%s zset 值无效: %w", key, err) + } + sort.Slice(value, func(i, j int) bool { + if value[i].Score == value[j].Score { + return value[i].Member < value[j].Member + } + return value[i].Score < value[j].Score + }) + desired.Value = value + desired.Length = int64(len(value)) + sourceRow["value"] = normalizeRedisMongoValue(value) + case "stream": + value, err := asRedisMigrationStreamEntries(row["value"]) + if err != nil { + return mongoRedisKeyDocument{}, fmt.Errorf("key=%s stream 值无效: %w", key, err) + } + sort.Slice(value, func(i, j int) bool { return value[i].ID < value[j].ID }) + desired.Value = value + desired.Length = int64(len(value)) + sourceRow["value"] = normalizeRedisMongoValue(value) + default: + return mongoRedisKeyDocument{}, fmt.Errorf("key=%s 暂不支持 Redis 类型 %s", key, redisType) + } + + return mongoRedisKeyDocument{Key: key, Type: redisType, TTL: ttl, Value: desired.Value, SourceRow: sourceRow, Desired: desired}, nil +} + +func buildMongoToRedisDiffs(sourceDB db.Database, targetClient redisMigrationClient, collection string, mode string) ([]mongoRedisKeyDiff, error) { + rows, err := loadMongoRedisDocuments(sourceDB, collection, 0) + if err != nil { + return nil, err + } + diffs := make([]mongoRedisKeyDiff, 0, len(rows)) + effectiveMode := normalizeSyncMode(mode) + for _, row := range rows { + doc, err := parseMongoRedisDocument(row) + if err != nil { + return nil, err + } + current, exists, err := loadExistingRedisMigrationValue(targetClient, doc.Key) + if err != nil { + return nil, fmt.Errorf("读取目标 Redis Key 失败: key=%s err=%w", doc.Key, err) + } + action := "insert" + changedColumns := []string{"type", "ttl", "value"} + if exists { + if sameRedisMigrationValue(current, doc.Desired) { + action = "same" + changedColumns = nil + } else if effectiveMode == "insert_only" { + action = "same" + changedColumns = nil + } else { + action = "update" + changedColumns = diffRedisMigrationColumns(current, doc.Desired) + } + } + diffs = append(diffs, mongoRedisKeyDiff{ + Collection: collection, + Document: doc, + Current: current, + Exists: exists, + Action: action, + ChangedColumns: changedColumns, + }) + } + sort.Slice(diffs, func(i, j int) bool { return diffs[i].Document.Key < diffs[j].Document.Key }) + return diffs, nil +} + +func loadExistingRedisMigrationValue(client redisMigrationClient, key string) (*redispkg.RedisValue, bool, error) { + keyType, err := client.GetKeyType(key) + if err != nil { + return nil, false, err + } + keyType = strings.ToLower(strings.TrimSpace(keyType)) + if keyType == "" || keyType == "none" { + return nil, false, nil + } + value, err := client.GetValue(key) + if err != nil { + return nil, false, err + } + if value == nil { + return nil, false, nil + } + value.Type = keyType + value.TTL = normalizeRedisMigrationTTL(value.TTL) + return value, true, nil +} + +func normalizeRedisMigrationTTL(ttl int64) int64 { + if ttl > 0 { + return ttl + } + return -1 +} + +func sameRedisMigrationValue(current *redispkg.RedisValue, desired *redispkg.RedisValue) bool { + if current == nil || desired == nil { + return current == nil && desired == nil + } + if strings.ToLower(strings.TrimSpace(current.Type)) != strings.ToLower(strings.TrimSpace(desired.Type)) { + return false + } + if normalizeRedisMigrationTTL(current.TTL) != normalizeRedisMigrationTTL(desired.TTL) { + return false + } + return canonicalRedisMigrationValue(current) == canonicalRedisMigrationValue(desired) +} + +func canonicalRedisMigrationValue(value *redispkg.RedisValue) string { + if value == nil { + return "null" + } + payload := map[string]interface{}{ + "type": strings.ToLower(strings.TrimSpace(value.Type)), + "ttl": normalizeRedisMigrationTTL(value.TTL), + "value": normalizeRedisComparablePayload(strings.ToLower(strings.TrimSpace(value.Type)), value.Value), + } + data, err := json.Marshal(payload) + if err != nil { + return fmt.Sprintf("%v", payload) + } + return string(data) +} + +func normalizeRedisComparablePayload(redisType string, value interface{}) interface{} { + switch redisType { + case "string": + return asRedisMigrationString(value) + case "hash": + mapped, err := asRedisMigrationStringMap(value) + if err != nil { + return fmt.Sprintf("%v", value) + } + return normalizeRedisMongoValue(mapped) + case "list": + items, err := asRedisMigrationStringSlice(value) + if err != nil { + return fmt.Sprintf("%v", value) + } + return normalizeRedisMongoValue(items) + case "set": + items, err := asRedisMigrationStringSlice(value) + if err != nil { + return fmt.Sprintf("%v", value) + } + sort.Strings(items) + return normalizeRedisMongoValue(items) + case "zset": + members, err := asRedisMigrationZSetMembers(value) + if err != nil { + return fmt.Sprintf("%v", value) + } + sort.Slice(members, func(i, j int) bool { + if members[i].Score == members[j].Score { + return members[i].Member < members[j].Member + } + return members[i].Score < members[j].Score + }) + return normalizeRedisMongoValue(members) + case "stream": + entries, err := asRedisMigrationStreamEntries(value) + if err != nil { + return fmt.Sprintf("%v", value) + } + sort.Slice(entries, func(i, j int) bool { return entries[i].ID < entries[j].ID }) + return normalizeRedisMongoValue(entries) + default: + return normalizeRedisMongoValue(value) + } +} + +func diffRedisMigrationColumns(current *redispkg.RedisValue, desired *redispkg.RedisValue) []string { + changed := make([]string, 0, 3) + if current == nil || desired == nil { + return []string{"type", "ttl", "value"} + } + if strings.ToLower(strings.TrimSpace(current.Type)) != strings.ToLower(strings.TrimSpace(desired.Type)) { + changed = append(changed, "type") + } + if normalizeRedisMigrationTTL(current.TTL) != normalizeRedisMigrationTTL(desired.TTL) { + changed = append(changed, "ttl") + } + currentComparable := normalizeRedisComparablePayload(strings.ToLower(strings.TrimSpace(desired.Type)), current.Value) + desiredComparable := normalizeRedisComparablePayload(strings.ToLower(strings.TrimSpace(desired.Type)), desired.Value) + currentJSON, _ := json.Marshal(currentComparable) + desiredJSON, _ := json.Marshal(desiredComparable) + if string(currentJSON) != string(desiredJSON) { + changed = append(changed, "value") + } + return dedupeStrings(changed) +} + +func buildRedisPreviewRow(key string, value *redispkg.RedisValue) map[string]interface{} { + if value == nil { + return map[string]interface{}{"key": key} + } + return map[string]interface{}{ + "key": key, + "type": strings.ToLower(strings.TrimSpace(value.Type)), + "ttl": normalizeRedisMigrationTTL(value.TTL), + "value": normalizeRedisComparablePayload(strings.ToLower(strings.TrimSpace(value.Type)), value.Value), + } +} + +func applyMongoRedisDiff(targetClient redisMigrationClient, diff mongoRedisKeyDiff) error { + desired := diff.Document.Desired + if desired == nil { + return fmt.Errorf("空的 Redis 目标值: key=%s", diff.Document.Key) + } + redisType := strings.ToLower(strings.TrimSpace(desired.Type)) + ttl := normalizeRedisMigrationTTL(desired.TTL) + if diff.Exists && diff.Action == "update" && redisType != "string" { + if _, err := targetClient.DeleteKeys([]string{diff.Document.Key}); err != nil { + return err + } + } + + switch redisType { + case "string": + return targetClient.SetString(diff.Document.Key, asRedisMigrationString(desired.Value), ttl) + case "hash": + mapped, err := asRedisMigrationStringMap(desired.Value) + if err != nil { + return err + } + fields := make([]string, 0, len(mapped)) + for field := range mapped { + fields = append(fields, field) + } + sort.Strings(fields) + for _, field := range fields { + if err := targetClient.SetHashField(diff.Document.Key, field, mapped[field]); err != nil { + return err + } + } + return targetClient.SetTTL(diff.Document.Key, ttl) + case "list": + items, err := asRedisMigrationStringSlice(desired.Value) + if err != nil { + return err + } + if len(items) > 0 { + if err := targetClient.ListPush(diff.Document.Key, items...); err != nil { + return err + } + } + return targetClient.SetTTL(diff.Document.Key, ttl) + case "set": + items, err := asRedisMigrationStringSlice(desired.Value) + if err != nil { + return err + } + if len(items) > 0 { + if err := targetClient.SetAdd(diff.Document.Key, items...); err != nil { + return err + } + } + return targetClient.SetTTL(diff.Document.Key, ttl) + case "zset": + members, err := asRedisMigrationZSetMembers(desired.Value) + if err != nil { + return err + } + if len(members) > 0 { + if err := targetClient.ZSetAdd(diff.Document.Key, members...); err != nil { + return err + } + } + return targetClient.SetTTL(diff.Document.Key, ttl) + case "stream": + entries, err := asRedisMigrationStreamEntries(desired.Value) + if err != nil { + return err + } + for _, entry := range entries { + if _, err := targetClient.StreamAdd(diff.Document.Key, entry.Fields, entry.ID); err != nil { + return err + } + } + return targetClient.SetTTL(diff.Document.Key, ttl) + default: + return fmt.Errorf("暂不支持 Redis 类型 %s", redisType) + } +} + +func asRedisMigrationString(value interface{}) string { + switch typed := value.(type) { + case nil: + return "" + case string: + return typed + case []byte: + return string(typed) + default: + return fmt.Sprintf("%v", typed) + } +} + +func asRedisMigrationInt64(value interface{}, defaultValue int64) int64 { + switch typed := value.(type) { + case nil: + return defaultValue + case int: + return int64(typed) + case int8: + return int64(typed) + case int16: + return int64(typed) + case int32: + return int64(typed) + case int64: + return typed + case uint: + return int64(typed) + case uint8: + return int64(typed) + case uint16: + return int64(typed) + case uint32: + return int64(typed) + case uint64: + return int64(typed) + case float32: + return int64(typed) + case float64: + return int64(typed) + case json.Number: + if n, err := typed.Int64(); err == nil { + return n + } + case string: + if n, err := strconv.ParseInt(strings.TrimSpace(typed), 10, 64); err == nil { + return n + } + } + return defaultValue +} + +func asRedisMigrationFloat64(value interface{}) (float64, error) { + switch typed := value.(type) { + case float64: + return typed, nil + case float32: + return float64(typed), nil + case int: + return float64(typed), nil + case int8: + return float64(typed), nil + case int16: + return float64(typed), nil + case int32: + return float64(typed), nil + case int64: + return float64(typed), nil + case uint: + return float64(typed), nil + case uint8: + return float64(typed), nil + case uint16: + return float64(typed), nil + case uint32: + return float64(typed), nil + case uint64: + return float64(typed), nil + case json.Number: + return typed.Float64() + case string: + return strconv.ParseFloat(strings.TrimSpace(typed), 64) + default: + return 0, fmt.Errorf("无法转换为 float64: %T", value) + } +} + +func asRedisMigrationStringMap(value interface{}) (map[string]string, error) { + switch typed := value.(type) { + case nil: + return map[string]string{}, nil + case map[string]string: + result := make(map[string]string, len(typed)) + for k, v := range typed { + result[k] = v + } + return result, nil + case map[string]interface{}: + result := make(map[string]string, len(typed)) + for k, v := range typed { + result[k] = asRedisMigrationString(v) + } + return result, nil + default: + return nil, fmt.Errorf("期望对象,实际=%T", value) + } +} + +func asRedisMigrationStringSlice(value interface{}) ([]string, error) { + switch typed := value.(type) { + case nil: + return []string{}, nil + case []string: + result := append([]string(nil), typed...) + return result, nil + case []interface{}: + result := make([]string, 0, len(typed)) + for _, item := range typed { + result = append(result, asRedisMigrationString(item)) + } + return result, nil + default: + return nil, fmt.Errorf("期望数组,实际=%T", value) + } +} + +func asRedisMigrationZSetMembers(value interface{}) ([]redispkg.ZSetMember, error) { + switch typed := value.(type) { + case nil: + return []redispkg.ZSetMember{}, nil + case []redispkg.ZSetMember: + result := append([]redispkg.ZSetMember(nil), typed...) + return result, nil + case []interface{}: + result := make([]redispkg.ZSetMember, 0, len(typed)) + for _, item := range typed { + mapped, ok := item.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("zset 成员格式无效: %T", item) + } + score, err := asRedisMigrationFloat64(mapped["score"]) + if err != nil { + return nil, err + } + result = append(result, redispkg.ZSetMember{Member: asRedisMigrationString(mapped["member"]), Score: score}) + } + return result, nil + default: + return nil, fmt.Errorf("期望 zset 数组,实际=%T", value) + } +} + +func asRedisMigrationStreamEntries(value interface{}) ([]redispkg.StreamEntry, error) { + switch typed := value.(type) { + case nil: + return []redispkg.StreamEntry{}, nil + case []redispkg.StreamEntry: + result := append([]redispkg.StreamEntry(nil), typed...) + return result, nil + case []interface{}: + result := make([]redispkg.StreamEntry, 0, len(typed)) + for _, item := range typed { + mapped, ok := item.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("stream 条目格式无效: %T", item) + } + fields, err := asRedisMigrationStringMap(mapped["fields"]) + if err != nil { + return nil, err + } + result = append(result, redispkg.StreamEntry{ID: asRedisMigrationString(mapped["id"]), Fields: fields}) + } + return result, nil + default: + return nil, fmt.Errorf("期望 stream 数组,实际=%T", value) + } +} + +func (s *SyncEngine) runMongoToRedisSync(config SyncConfig, result SyncResult) SyncResult { + collections := dedupeStrings(config.Tables) + sourceDB, err := newSyncDatabase(config.SourceConfig.Type) + if err != nil { + return s.fail(config.JobID, len(collections), result, "初始化源数据库驱动失败: "+err.Error()) + } + if err := sourceDB.Connect(config.SourceConfig); err != nil { + return s.fail(config.JobID, len(collections), result, "源 MongoDB 连接失败: "+err.Error()) + } + defer sourceDB.Close() + if len(collections) == 0 { + collections, err = listMongoRedisCollections(sourceDB, config) + if err != nil { + return s.fail(config.JobID, 0, result, "获取 MongoDB 集合列表失败: "+err.Error()) + } + } + if len(collections) == 0 { + result.Message = "未发现可迁移的 MongoDB 集合" + s.progress(config.JobID, 0, 0, "", "同步完成") + return result + } + + effectiveMode := normalizeSyncMode(config.Mode) + totalCollections := len(collections) + s.progress(config.JobID, 0, totalCollections, "", "开始 MongoDB 键空间迁移") + s.appendLog(config.JobID, &result, "info", fmt.Sprintf("MongoDB -> Redis 键空间迁移;模式:%s;目标:%s", effectiveMode, deriveRedisTargetLabel(config))) + s.appendLog(config.JobID, &result, "warn", "MongoDB -> Redis 第一版仅支持固定文档格式:key/type/ttl/value") + if effectiveMode == "full_overwrite" { + s.appendLog(config.JobID, &result, "warn", "MongoDB -> Redis 第一版暂不执行 Redis DB 级 full_overwrite 删除,已降级为 insert_update") + effectiveMode = "insert_update" + } + + targetClient := newRedisSourceClient() + targetConfig := withResolvedRedisDB(config.TargetConfig) + if err := targetClient.Connect(targetConfig); err != nil { + return s.fail(config.JobID, totalCollections, result, "目标 Redis 连接失败: "+err.Error()) + } + defer targetClient.Close() + + processedKeys := 0 + for idx, collection := range collections { + s.appendLog(config.JobID, &result, "info", fmt.Sprintf("正在同步集合: %s", collection)) + s.progress(config.JobID, idx, totalCollections, collection, fmt.Sprintf("迁移集合(%d/%d)", idx+1, totalCollections)) + diffs, err := buildMongoToRedisDiffs(sourceDB, targetClient, collection, effectiveMode) + if err != nil { + return s.fail(config.JobID, totalCollections, result, fmt.Sprintf("分析集合 %s 失败: %v", collection, err)) + } + for _, diff := range diffs { + processedKeys++ + if diff.Action == "same" { + continue + } + s.appendLog(config.JobID, &result, "info", fmt.Sprintf("正在迁移 Key: %s", diff.Document.Key)) + if err := applyMongoRedisDiff(targetClient, diff); err != nil { + return s.fail(config.JobID, totalCollections, result, fmt.Sprintf("写入 Redis Key %s 失败: %v", diff.Document.Key, err)) + } + switch diff.Action { + case "insert": + result.RowsInserted++ + case "update": + result.RowsUpdated++ + } + } + result.TablesSynced++ + s.progress(config.JobID, idx+1, totalCollections, collection, "集合处理完成") + } + + if processedKeys == 0 { + result.Message = "未发现可迁移的 MongoDB Redis 文档" + return result + } + result.Message = fmt.Sprintf("MongoDB 键空间迁移完成,共处理 %d 个集合、%d 个 Key", result.TablesSynced, processedKeys) + return result +} + +func (s *SyncEngine) analyzeMongoToRedis(config SyncConfig) SyncAnalyzeResult { + result := SyncAnalyzeResult{Success: true, Tables: []TableDiffSummary{}} + sourceDB, err := newSyncDatabase(config.SourceConfig.Type) + if err != nil { + return SyncAnalyzeResult{Success: false, Message: "初始化源数据库驱动失败: " + err.Error()} + } + if err := sourceDB.Connect(config.SourceConfig); err != nil { + return SyncAnalyzeResult{Success: false, Message: "源 MongoDB 连接失败: " + err.Error()} + } + defer sourceDB.Close() + + collections, err := listMongoRedisCollections(sourceDB, config) + if err != nil { + return SyncAnalyzeResult{Success: false, Message: "获取 MongoDB 集合列表失败: " + err.Error()} + } + + effectiveMode := normalizeSyncMode(config.Mode) + modeWarning := "" + if effectiveMode == "full_overwrite" { + modeWarning = "MongoDB -> Redis 第一版会将 full_overwrite 降级为 insert_update,避免误删 DB 内其他 Key" + effectiveMode = "insert_update" + } + + targetClient := newRedisSourceClient() + targetConfig := withResolvedRedisDB(config.TargetConfig) + if err := targetClient.Connect(targetConfig); err != nil { + return SyncAnalyzeResult{Success: false, Message: "目标 Redis 连接失败: " + err.Error()} + } + defer targetClient.Close() + + for _, collection := range collections { + summary := TableDiffSummary{ + Table: collection, + PKColumn: "key", + CanSync: true, + TargetTableExists: true, + PlannedAction: fmt.Sprintf("迁移到 %s", deriveRedisTargetLabel(config)), + Warnings: []string{ + "MongoDB 集合中的文档会按 keyspace 语义写入 Redis", + "当前仅支持固定文档格式:key/type/ttl/value", + }, + } + if modeWarning != "" { + summary.Warnings = append(summary.Warnings, modeWarning) + } + diffs, err := buildMongoToRedisDiffs(sourceDB, targetClient, collection, effectiveMode) + if err != nil { + summary.CanSync = false + summary.Message = err.Error() + result.Tables = append(result.Tables, summary) + continue + } + for _, diff := range diffs { + switch diff.Action { + case "insert": + summary.Inserts++ + case "update": + summary.Updates++ + default: + summary.Same++ + } + } + if summary.Inserts == 0 && summary.Updates == 0 { + if summary.Same == 0 { + summary.Message = "集合中未发现可迁移文档" + } else { + summary.Message = "目标 Redis 中对应 Key 已是最新状态" + } + } else { + summary.Message = fmt.Sprintf("执行时将写入 %d 个新 Key、更新 %d 个已有 Key", summary.Inserts, summary.Updates) + } + result.Tables = append(result.Tables, summary) + } + result.Message = fmt.Sprintf("已完成 %d 个 MongoDB 集合的 Redis 迁移分析", len(result.Tables)) + return result +} + +func (s *SyncEngine) previewMongoToRedis(config SyncConfig, collection string, limit int) (TableDiffPreview, error) { + sourceDB, err := newSyncDatabase(config.SourceConfig.Type) + if err != nil { + return TableDiffPreview{}, fmt.Errorf("初始化源数据库驱动失败: %w", err) + } + if err := sourceDB.Connect(config.SourceConfig); err != nil { + return TableDiffPreview{}, fmt.Errorf("源 MongoDB 连接失败: %w", err) + } + defer sourceDB.Close() + + targetClient := newRedisSourceClient() + targetConfig := withResolvedRedisDB(config.TargetConfig) + if err := targetClient.Connect(targetConfig); err != nil { + return TableDiffPreview{}, fmt.Errorf("目标 Redis 连接失败: %w", err) + } + defer targetClient.Close() + + effectiveMode := normalizeSyncMode(config.Mode) + if effectiveMode == "full_overwrite" { + effectiveMode = "insert_update" + } + + diffs, err := buildMongoToRedisDiffs(sourceDB, targetClient, collection, effectiveMode) + if err != nil { + return TableDiffPreview{}, err + } + preview := TableDiffPreview{Table: collection, PKColumn: "key", Inserts: []PreviewRow{}, Updates: []PreviewUpdateRow{}, Deletes: []PreviewRow{}} + for _, diff := range diffs { + switch diff.Action { + case "insert": + preview.TotalInserts++ + if len(preview.Inserts) < limit { + preview.Inserts = append(preview.Inserts, PreviewRow{PK: diff.Document.Key, Row: diff.Document.SourceRow}) + } + case "update": + preview.TotalUpdates++ + if len(preview.Updates) < limit { + preview.Updates = append(preview.Updates, PreviewUpdateRow{PK: diff.Document.Key, ChangedColumns: diff.ChangedColumns, Source: diff.Document.SourceRow, Target: buildRedisPreviewRow(diff.Document.Key, diff.Current)}) + } + } + } + return preview, nil +} diff --git a/internal/sync/migration_runtime_helpers.go b/internal/sync/migration_runtime_helpers.go new file mode 100644 index 0000000..418080c --- /dev/null +++ b/internal/sync/migration_runtime_helpers.go @@ -0,0 +1,58 @@ +package sync + +import ( + "GoNavi-Wails/internal/connection" + "fmt" + "strings" +) + +func supportsAutoAddColumnsForPair(sourceType string, targetType string) bool { + source := normalizeMigrationDBType(sourceType) + target := normalizeMigrationDBType(targetType) + if isMySQLLikeWritableTargetType(target) { + return isMySQLCoreType(source) + } + if isPGLikeTarget(target) { + return isMySQLLikeSourceType(source) + } + return false +} + +func buildAddColumnSQLForPair(sourceType string, targetType string, targetQueryTable string, sourceCol connection.ColumnDefinition) (string, error) { + source := normalizeMigrationDBType(sourceType) + target := normalizeMigrationDBType(targetType) + switch { + case isMySQLCoreType(source) && isMySQLLikeWritableTargetType(target): + colType := sanitizeMySQLColumnType(sourceCol.Type) + return fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL", + quoteQualifiedIdentByType("mysql", targetQueryTable), + quoteIdentByType("mysql", sourceCol.Name), + colType, + ), nil + case isMySQLLikeSourceType(source) && isPGLikeTarget(target): + colType, _, warnings := mapMySQLColumnToKingbase(sourceCol) + if len(warnings) > 0 && strings.Contains(strings.Join(warnings, " "), "identity") { + // 对已有目标表补字段时保守处理,不补建自增语义。 + } + return fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL", + quoteQualifiedIdentByType(target, targetQueryTable), + quoteIdentByType(target, sourceCol.Name), + colType, + ), nil + default: + return "", fmt.Errorf("当前不支持 source=%s target=%s 的自动补字段", sourceType, targetType) + } +} + +func executeSQLStatements(execFn func(string) (int64, error), statements []string) error { + for _, stmt := range statements { + trimmed := strings.TrimSpace(stmt) + if trimmed == "" { + continue + } + if _, err := execFn(trimmed); err != nil { + return err + } + } + return nil +} diff --git a/internal/sync/migration_schema_inference.go b/internal/sync/migration_schema_inference.go new file mode 100644 index 0000000..178ef4e --- /dev/null +++ b/internal/sync/migration_schema_inference.go @@ -0,0 +1,53 @@ +package sync + +import ( + "fmt" + "strings" +) + +type SchemaInferenceStrategy string + +const ( + SchemaInferenceStrategySample SchemaInferenceStrategy = "sample" + SchemaInferenceStrategyStrict SchemaInferenceStrategy = "strict" +) + +func shouldUseSchemaInference(sourceType string, targetType string) bool { + sourceModel := classifyMigrationDataModel(sourceType) + targetModel := classifyMigrationDataModel(targetType) + return sourceModel == MigrationDataModelDocument && targetModel == MigrationDataModelRelational +} + +func inferMigrationObjectKind(sourceType string, targetType string) MigrationObjectKind { + sourceModel := classifyMigrationDataModel(sourceType) + targetModel := classifyMigrationDataModel(targetType) + switch { + case sourceModel == MigrationDataModelDocument || targetModel == MigrationDataModelDocument: + return MigrationObjectKindCollection + case sourceModel == MigrationDataModelKeyValue || targetModel == MigrationDataModelKeyValue: + return MigrationObjectKindKeyspace + default: + return MigrationObjectKindTable + } +} + +func inferSchemaForPair(sourceType string, targetType string, objectName string) (SchemaInferenceResult, error) { + if !shouldUseSchemaInference(sourceType, targetType) { + return SchemaInferenceResult{}, fmt.Errorf("当前迁移对 %s -> %s 不需要 schema 推断", sourceType, targetType) + } + return SchemaInferenceResult{ + Object: CanonicalObjectSpec{ + Name: strings.TrimSpace(objectName), + Kind: MigrationObjectKindCollection, + Fields: []CanonicalFieldSpec{}, + }, + Issues: []SchemaInferenceIssue{ + { + Level: "info", + Message: "MongoDB -> 关系型数据库的 schema 推断能力尚在建设中,当前仅提供内核入口。", + Resolution: "后续将基于样本数据生成列定义与类型降级策略。", + }, + }, + NeedsReview: true, + }, nil +} diff --git a/internal/sync/migration_tdengine.go b/internal/sync/migration_tdengine.go new file mode 100644 index 0000000..7e45e64 --- /dev/null +++ b/internal/sync/migration_tdengine.go @@ -0,0 +1,296 @@ +package sync + +import ( + "GoNavi-Wails/internal/connection" + "GoNavi-Wails/internal/db" + "fmt" + "strconv" + "strings" +) + +func buildTDengineToMySQLPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + plan := SchemaMigrationPlan{} + sourceType := resolveMigrationDBType(config.SourceConfig) + targetType := resolveMigrationDBType(config.TargetConfig) + plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName) + plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName) + plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName) + plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName) + plan.PlannedAction = "使用已有目标表导入" + + sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err) + } + if !sourceExists { + return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName) + } + + targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable) + if err != nil { + return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err) + } + plan.TargetTableExists = targetExists + plan.Warnings = append(plan.Warnings, tdengineSemanticWarnings(sourceCols)...) + + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + if targetExists { + missing := diffMissingColumnNames(sourceCols, targetCols) + if len(missing) > 0 { + plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", "))) + } + if strategy != "existing_only" { + plan.Warnings = append(plan.Warnings, "TDengine 源端当前不自动补齐已有目标表字段,请先确认目标表结构") + } + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } + + switch strategy { + case "existing_only": + plan.PlannedAction = "目标表不存在,需先手工创建" + plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + case "smart", "auto_create_if_missing": + plan.AutoCreate = true + plan.PlannedAction = "目标表不存在,将自动建表后导入" + createSQL, warnings, unsupported := buildTDengineToMySQLCreateTableSQL(plan.TargetQueryTable, sourceCols) + plan.CreateTableSQL = createSQL + plan.Warnings = append(plan.Warnings, warnings...) + plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...) + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + default: + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } +} + +func buildTDengineToPGLikePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + plan := SchemaMigrationPlan{} + sourceType := resolveMigrationDBType(config.SourceConfig) + targetType := resolveMigrationDBType(config.TargetConfig) + plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName) + plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName) + plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName) + plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName) + plan.PlannedAction = "使用已有目标表导入" + + sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err) + } + if !sourceExists { + return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName) + } + + targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable) + if err != nil { + return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err) + } + plan.TargetTableExists = targetExists + plan.Warnings = append(plan.Warnings, tdengineSemanticWarnings(sourceCols)...) + + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + if targetExists { + missing := diffMissingColumnNames(sourceCols, targetCols) + if len(missing) > 0 { + plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", "))) + } + if strategy != "existing_only" { + plan.Warnings = append(plan.Warnings, "TDengine 源端当前不自动补齐已有目标表字段,请先确认目标表结构") + } + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } + + switch strategy { + case "existing_only": + plan.PlannedAction = "目标表不存在,需先手工创建" + plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + case "smart", "auto_create_if_missing": + plan.AutoCreate = true + plan.PlannedAction = "目标表不存在,将自动建表后导入" + createSQL, warnings, unsupported := buildTDengineToPGLikeCreateTableSQL(targetType, plan.TargetQueryTable, sourceCols) + plan.CreateTableSQL = createSQL + plan.Warnings = append(plan.Warnings, warnings...) + plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...) + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + default: + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } +} + +func buildTDengineToMySQLCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition) (string, []string, []string) { + columnDefs := make([]string, 0, len(sourceCols)) + warnings := make([]string, 0) + unsupported := []string{"TDengine 的索引/外键/触发器/超级表/TTL 等时序语义当前不会自动迁移"} + for _, col := range sourceCols { + def, colWarnings := buildTDengineToMySQLColumnDefinition(col) + columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("mysql", col.Name), def)) + warnings = append(warnings, colWarnings...) + } + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("mysql", targetQueryTable), strings.Join(columnDefs, ",\n ")) + return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported) +} + +func buildTDengineToPGLikeCreateTableSQL(targetType string, targetQueryTable string, sourceCols []connection.ColumnDefinition) (string, []string, []string) { + columnDefs := make([]string, 0, len(sourceCols)) + warnings := make([]string, 0) + unsupported := []string{"TDengine 的索引/外键/触发器/超级表/TTL 等时序语义当前不会自动迁移"} + for _, col := range sourceCols { + def, colWarnings := buildTDengineToPGLikeColumnDefinition(col) + columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType(targetType, col.Name), def)) + warnings = append(warnings, colWarnings...) + } + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType(targetType, targetQueryTable), strings.Join(columnDefs, ",\n ")) + return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported) +} + +func buildTDengineToMySQLColumnDefinition(col connection.ColumnDefinition) (string, []string) { + targetType, warnings := mapTDengineColumnToMySQL(col) + parts := []string{targetType} + if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") { + parts = append(parts, "NOT NULL") + } else { + parts = append(parts, "NULL") + } + return strings.Join(parts, " "), warnings +} + +func buildTDengineToPGLikeColumnDefinition(col connection.ColumnDefinition) (string, []string) { + targetType, warnings := mapTDengineColumnToPGLike(col) + parts := []string{targetType} + if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") { + parts = append(parts, "NOT NULL") + } else { + parts = append(parts, "NULL") + } + return strings.Join(parts, " "), warnings +} + +func tdengineSemanticWarnings(sourceCols []connection.ColumnDefinition) []string { + warnings := []string{"TDengine 到关系型目标库当前仅迁移列与数据;超级表、TAG 关联、保留策略等时序语义会降级或丢失"} + for _, col := range sourceCols { + if isTDengineTagColumn(col) { + warnings = append(warnings, fmt.Sprintf("字段 %s 为 TDengine TAG 列,迁移到关系型目标后将降级为普通字段", col.Name)) + } + } + return dedupeStrings(warnings) +} + +func isTDengineTagColumn(col connection.ColumnDefinition) bool { + return strings.EqualFold(strings.TrimSpace(col.Key), "TAG") || strings.Contains(strings.ToUpper(strings.TrimSpace(col.Extra)), "TAG") +} + +func parseTDengineType(raw string) (string, int) { + cleaned := strings.TrimSpace(strings.ToUpper(raw)) + if cleaned == "" { + return "", 0 + } + base := cleaned + length := 0 + if idx := strings.Index(base, "("); idx >= 0 { + end := strings.Index(base[idx+1:], ")") + if end >= 0 { + lengthText := strings.TrimSpace(base[idx+1 : idx+1+end]) + if v, err := strconv.Atoi(lengthText); err == nil { + length = v + } + } + base = strings.TrimSpace(base[:idx]) + } + return base, length +} + +func mapTDengineColumnToMySQL(col connection.ColumnDefinition) (string, []string) { + base, length := parseTDengineType(col.Type) + warnings := make([]string, 0) + if isTDengineTagColumn(col) { + warnings = append(warnings, fmt.Sprintf("字段 %s 为 TDengine TAG 列,已按普通列映射", col.Name)) + } + switch base { + case "BOOL", "BOOLEAN": + return "tinyint(1)", warnings + case "TINYINT": + return "tinyint", warnings + case "UTINYINT": + return "tinyint unsigned", warnings + case "SMALLINT": + return "smallint", warnings + case "USMALLINT": + return "smallint unsigned", warnings + case "INT", "INTEGER": + return "int", warnings + case "UINT": + return "int unsigned", warnings + case "BIGINT": + return "bigint", warnings + case "UBIGINT": + return "bigint unsigned", warnings + case "FLOAT": + return "float", warnings + case "DOUBLE": + return "double", warnings + case "DECIMAL", "NUMERIC": + if length > 0 { + return strings.ToLower(strings.TrimSpace(col.Type)), warnings + } + return "decimal(38,10)", warnings + case "TIMESTAMP": + return "datetime", warnings + case "DATE": + return "date", warnings + case "JSON": + return "json", warnings + case "BINARY", "NCHAR", "VARCHAR", "VARBINARY": + if length > 0 && length <= 65535 { + return fmt.Sprintf("varchar(%d)", length), warnings + } + return "text", warnings + default: + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门 MySQL 映射,已降级为 text", col.Name, col.Type)) + return "text", warnings + } +} + +func mapTDengineColumnToPGLike(col connection.ColumnDefinition) (string, []string) { + base, length := parseTDengineType(col.Type) + warnings := make([]string, 0) + if isTDengineTagColumn(col) { + warnings = append(warnings, fmt.Sprintf("字段 %s 为 TDengine TAG 列,已按普通列映射", col.Name)) + } + switch base { + case "BOOL", "BOOLEAN": + return "boolean", warnings + case "TINYINT", "UTINYINT", "SMALLINT": + return "smallint", warnings + case "USMALLINT", "INT", "INTEGER": + return "integer", warnings + case "UINT", "BIGINT": + return "bigint", warnings + case "UBIGINT": + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 UBIGINT 已映射为 numeric(20,0) 以避免无符号溢出", col.Name)) + return "numeric(20,0)", warnings + case "FLOAT": + return "real", warnings + case "DOUBLE": + return "double precision", warnings + case "DECIMAL", "NUMERIC": + if length > 0 { + return strings.ToLower(strings.TrimSpace(col.Type)), warnings + } + return "numeric(38,10)", warnings + case "TIMESTAMP": + return "timestamp", warnings + case "DATE": + return "date", warnings + case "JSON": + return "jsonb", warnings + case "BINARY", "NCHAR", "VARCHAR", "VARBINARY": + if length > 0 { + return fmt.Sprintf("varchar(%d)", length), warnings + } + return "text", warnings + default: + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门 PG-like 映射,已降级为 text", col.Name, col.Type)) + return "text", warnings + } +} diff --git a/internal/sync/migration_tdengine_target.go b/internal/sync/migration_tdengine_target.go new file mode 100644 index 0000000..50a1839 --- /dev/null +++ b/internal/sync/migration_tdengine_target.go @@ -0,0 +1,657 @@ +package sync + +import ( + "GoNavi-Wails/internal/connection" + "GoNavi-Wails/internal/db" + "fmt" + "strconv" + "strings" +) + +type mySQLLikeToTDenginePlanner struct{} + +type pgLikeToTDenginePlanner struct{} + +type clickHouseToTDenginePlanner struct{} + +type tdengineToTDenginePlanner struct{} + +func (mySQLLikeToTDenginePlanner) Name() string { return "mysqllike-tdengine-planner" } + +func (mySQLLikeToTDenginePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if isMySQLLikeSourceType(sourceType) && targetType == "tdengine" { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (mySQLLikeToTDenginePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildMySQLLikeToTDenginePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (pgLikeToTDenginePlanner) Name() string { return "pglike-tdengine-planner" } + +func (pgLikeToTDenginePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if isPGLikeSource(sourceType) && targetType == "tdengine" { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (pgLikeToTDenginePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildPGLikeToTDenginePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func buildMySQLLikeToTDenginePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildSourceToTDenginePlan(config, tableName, sourceDB, targetDB, isMySQLLikeTDengineTimestampCandidate, buildMySQLLikeToTDengineCreateTableSQL) +} + +func buildPGLikeToTDenginePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildSourceToTDenginePlan(config, tableName, sourceDB, targetDB, isPGLikeTDengineTimestampCandidate, buildPGLikeToTDengineCreateTableSQL) +} + +func buildClickHouseToTDenginePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildSourceToTDenginePlan(config, tableName, sourceDB, targetDB, isClickHouseTDengineTimestampCandidate, buildClickHouseToTDengineCreateTableSQL) +} + +func buildTDengineToTDenginePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildSourceToTDenginePlan(config, tableName, sourceDB, targetDB, isTDengineTDengineTimestampCandidate, buildTDengineToTDengineCreateTableSQL) +} + +func (clickHouseToTDenginePlanner) Name() string { return "clickhouse-tdengine-planner" } + +func (clickHouseToTDenginePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if sourceType == "clickhouse" && targetType == "tdengine" { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (clickHouseToTDenginePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildClickHouseToTDenginePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (tdengineToTDenginePlanner) Name() string { return "tdengine-tdengine-planner" } + +func (tdengineToTDenginePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if sourceType == "tdengine" && targetType == "tdengine" { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (tdengineToTDenginePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildTDengineToTDenginePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +type tdengineTimestampCandidate func(connection.ColumnDefinition) bool + +type tdengineCreateTableBuilder func(string, []connection.ColumnDefinition, int) (string, []string, []string) + +func buildSourceToTDenginePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database, isTimestamp tdengineTimestampCandidate, buildCreateSQL tdengineCreateTableBuilder) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + plan := SchemaMigrationPlan{} + sourceType := resolveMigrationDBType(config.SourceConfig) + targetType := resolveMigrationDBType(config.TargetConfig) + plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName) + plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName) + plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName) + plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName) + plan.PlannedAction = "使用已有目标表导入" + + sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err) + } + if !sourceExists { + return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName) + } + + plan.Warnings = append(plan.Warnings, tdengineTargetBaseWarnings()...) + timestampIndex := findTDengineTimestampColumn(sourceCols, isTimestamp) + if timestampIndex < 0 { + plan.Warnings = append(plan.Warnings, tdengineTargetMissingTimeWarning()) + } + + targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable) + if err != nil { + return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err) + } + plan.TargetTableExists = targetExists + + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + if targetExists { + missing := diffMissingColumnNames(sourceCols, targetCols) + if len(missing) > 0 { + plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", "))) + } + if strategy != "existing_only" { + plan.Warnings = append(plan.Warnings, "TDengine 目标端当前不自动补齐已有目标表字段,请先确认目标表结构") + } + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } + + switch strategy { + case "existing_only": + plan.PlannedAction = "目标表不存在,需先手工创建" + plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + case "smart", "auto_create_if_missing": + if timestampIndex < 0 { + plan.PlannedAction = "源表未识别到可映射为 TDengine 首列的时间列,无法自动建表" + plan.UnsupportedObjects = append(plan.UnsupportedObjects, "TDengine regular table 首列必须为 TIMESTAMP,当前源表缺少可直接映射的时间列") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } + plan.AutoCreate = true + plan.PlannedAction = "目标表不存在,将自动建表后导入" + createSQL, warnings, unsupported := buildCreateSQL(plan.TargetQueryTable, sourceCols, timestampIndex) + plan.CreateTableSQL = createSQL + plan.Warnings = append(plan.Warnings, warnings...) + plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...) + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + default: + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } +} + +func tdengineTargetBaseWarnings() []string { + return []string{ + "TDengine 目标端当前仅支持 INSERT 写入;若存在差异 update/delete,执行期会被拒绝", + "TDengine 目标端 auto-create 当前仅创建基础表;索引、外键、触发器、supertable/TAGS/TTL 不会自动迁移", + } +} + +func tdengineTargetMissingTimeWarning() string { + return "源表缺少可映射的时间列,自动建表将不可用;如需继续,请先人工准备 TDengine 目标表与时间列" +} + +func findTDengineTimestampColumn(sourceCols []connection.ColumnDefinition, candidate tdengineTimestampCandidate) int { + preferred := []string{"ts", "timestamp", "event_time", "eventtime", "created_at", "create_time", "occurred_at"} + for _, name := range preferred { + for idx, col := range sourceCols { + if !candidate(col) { + continue + } + if strings.EqualFold(strings.TrimSpace(col.Name), name) { + return idx + } + } + } + for idx, col := range sourceCols { + if candidate(col) { + return idx + } + } + return -1 +} + +func reorderTDengineColumns(sourceCols []connection.ColumnDefinition, timestampIndex int) []connection.ColumnDefinition { + if timestampIndex <= 0 || timestampIndex >= len(sourceCols) { + cloned := make([]connection.ColumnDefinition, len(sourceCols)) + copy(cloned, sourceCols) + return cloned + } + ordered := make([]connection.ColumnDefinition, 0, len(sourceCols)) + ordered = append(ordered, sourceCols[timestampIndex]) + for idx, col := range sourceCols { + if idx == timestampIndex { + continue + } + ordered = append(ordered, col) + } + return ordered +} + +func buildMySQLLikeToTDengineCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition, timestampIndex int) (string, []string, []string) { + ordered := reorderTDengineColumns(sourceCols, timestampIndex) + columnDefs := make([]string, 0, len(ordered)) + warnings := make([]string, 0) + unsupported := []string{"源表索引/外键/触发器/唯一约束/自增语义当前不会自动迁移到 TDengine"} + if timestampIndex != 0 && timestampIndex >= 0 && timestampIndex < len(sourceCols) { + warnings = append(warnings, fmt.Sprintf("TDengine 基础表要求时间列优先,已将字段 %s 调整为首列", sourceCols[timestampIndex].Name)) + } + for idx, col := range ordered { + def, colWarnings := mapMySQLLikeColumnToTDengine(col, idx == 0) + warnings = append(warnings, colWarnings...) + columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("tdengine", col.Name), def)) + } + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("tdengine", targetQueryTable), strings.Join(columnDefs, ",\n ")) + return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported) +} + +func buildPGLikeToTDengineCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition, timestampIndex int) (string, []string, []string) { + ordered := reorderTDengineColumns(sourceCols, timestampIndex) + columnDefs := make([]string, 0, len(ordered)) + warnings := make([]string, 0) + unsupported := []string{"源表索引/外键/触发器/唯一约束/identity/sequence 语义当前不会自动迁移到 TDengine"} + if timestampIndex != 0 && timestampIndex >= 0 && timestampIndex < len(sourceCols) { + warnings = append(warnings, fmt.Sprintf("TDengine 基础表要求时间列优先,已将字段 %s 调整为首列", sourceCols[timestampIndex].Name)) + } + for idx, col := range ordered { + def, colWarnings := mapPGLikeColumnToTDengine(col, idx == 0) + warnings = append(warnings, colWarnings...) + columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("tdengine", col.Name), def)) + } + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("tdengine", targetQueryTable), strings.Join(columnDefs, ",\n ")) + return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported) +} + +func buildClickHouseToTDengineCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition, timestampIndex int) (string, []string, []string) { + ordered := reorderTDengineColumns(sourceCols, timestampIndex) + columnDefs := make([]string, 0, len(ordered)) + warnings := make([]string, 0) + unsupported := []string{"源表 ORDER BY/PARTITION/TTL/Projection/物化视图 语义当前不会自动迁移到 TDengine"} + if timestampIndex != 0 && timestampIndex >= 0 && timestampIndex < len(sourceCols) { + warnings = append(warnings, fmt.Sprintf("TDengine 基础表要求时间列优先,已将字段 %s 调整为首列", sourceCols[timestampIndex].Name)) + } + for idx, col := range ordered { + def, colWarnings := mapClickHouseColumnToTDengine(col, idx == 0) + warnings = append(warnings, colWarnings...) + columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("tdengine", col.Name), def)) + } + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("tdengine", targetQueryTable), strings.Join(columnDefs, ",\n ")) + return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported) +} + +func buildTDengineToTDengineCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition, timestampIndex int) (string, []string, []string) { + ordered := reorderTDengineColumns(sourceCols, timestampIndex) + columnDefs := make([]string, 0, len(ordered)) + warnings := make([]string, 0) + unsupported := []string{"源表 supertable/TAGS/TTL/保留策略/索引 语义当前不会自动迁移到 TDengine regular table"} + if timestampIndex != 0 && timestampIndex >= 0 && timestampIndex < len(sourceCols) { + warnings = append(warnings, fmt.Sprintf("TDengine 基础表要求时间列优先,已将字段 %s 调整为首列", sourceCols[timestampIndex].Name)) + } + for idx, col := range ordered { + def, colWarnings := mapTDengineColumnToTDengine(col, idx == 0) + warnings = append(warnings, colWarnings...) + columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("tdengine", col.Name), def)) + } + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("tdengine", targetQueryTable), strings.Join(columnDefs, ",\n ")) + return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported) +} + +func isMySQLLikeTDengineTimestampCandidate(col connection.ColumnDefinition) bool { + raw := strings.ToLower(strings.TrimSpace(col.Type)) + clean := strings.ReplaceAll(raw, " unsigned", "") + clean = strings.ReplaceAll(clean, " zerofill", "") + return strings.HasPrefix(clean, "timestamp") || strings.HasPrefix(clean, "datetime") +} + +func isPGLikeTDengineTimestampCandidate(col connection.ColumnDefinition) bool { + raw := strings.ToLower(strings.TrimSpace(col.Type)) + return strings.HasPrefix(raw, "timestamp") +} + +func isClickHouseTDengineTimestampCandidate(col connection.ColumnDefinition) bool { + lower, _ := unwrapClickHouseTDengineType(col.Type) + return strings.HasPrefix(lower, "datetime") +} + +func isTDengineTDengineTimestampCandidate(col connection.ColumnDefinition) bool { + base, _ := parseTDengineType(col.Type) + return base == "TIMESTAMP" +} + +func mapMySQLLikeColumnToTDengine(col connection.ColumnDefinition, forceTimestamp bool) (string, []string) { + warnings := make([]string, 0) + if forceTimestamp { + if !isMySQLLikeTDengineTimestampCandidate(col) { + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已提升为 TDengine 首列 TIMESTAMP", col.Name, col.Type)) + } + return "TIMESTAMP", warnings + } + + raw := strings.ToLower(strings.TrimSpace(col.Type)) + if raw == "" { + return "VARCHAR(1024)", []string{fmt.Sprintf("字段 %s 类型为空,已降级为 VARCHAR(1024)", col.Name)} + } + unsigned := strings.Contains(raw, "unsigned") + clean := strings.ReplaceAll(raw, " unsigned", "") + clean = strings.ReplaceAll(clean, " zerofill", "") + isAutoIncrement := strings.Contains(strings.ToLower(strings.TrimSpace(col.Extra)), "auto_increment") + if isAutoIncrement { + warnings = append(warnings, fmt.Sprintf("字段 %s 自增语义不会迁移到 TDengine", col.Name)) + } + if col.Key == "PRI" || col.Key == "PK" { + warnings = append(warnings, fmt.Sprintf("字段 %s 主键语义不会按关系型约束迁移到 TDengine", col.Name)) + } + + switch { + case strings.HasPrefix(clean, "tinyint(1)") && !unsigned && !isAutoIncrement: + return "BOOL", warnings + case strings.HasPrefix(clean, "tinyint"): + if unsigned { + return "UTINYINT", warnings + } + return "TINYINT", warnings + case strings.HasPrefix(clean, "smallint"): + if unsigned { + return "USMALLINT", warnings + } + return "SMALLINT", warnings + case strings.HasPrefix(clean, "mediumint"), strings.HasPrefix(clean, "int"), strings.HasPrefix(clean, "integer"): + if unsigned { + return "UINT", warnings + } + return "INT", warnings + case strings.HasPrefix(clean, "bigint"): + if unsigned { + return "UBIGINT", warnings + } + return "BIGINT", warnings + case strings.HasPrefix(clean, "decimal"), strings.HasPrefix(clean, "numeric"): + return normalizeTDengineDecimalType(clean), warnings + case strings.HasPrefix(clean, "float"): + return "FLOAT", warnings + case strings.HasPrefix(clean, "double"): + return "DOUBLE", warnings + case strings.HasPrefix(clean, "date"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 date 已降级映射为 TIMESTAMP", col.Name)) + return "TIMESTAMP", warnings + case strings.HasPrefix(clean, "timestamp"), strings.HasPrefix(clean, "datetime"): + return "TIMESTAMP", warnings + case strings.HasPrefix(clean, "time"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无稳定 TDengine 时间-only 映射,已降级为 VARCHAR(64)", col.Name, col.Type)) + return "VARCHAR(64)", warnings + case strings.HasPrefix(clean, "char("), strings.HasPrefix(clean, "varchar("): + return fmt.Sprintf("VARCHAR(%d)", normalizeTDengineVarcharLength(extractFirstTypeLength(clean), 255)), warnings + case strings.HasPrefix(clean, "tinytext"), strings.HasPrefix(clean, "text"), strings.HasPrefix(clean, "mediumtext"), strings.HasPrefix(clean, "longtext"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 VARCHAR(4096)", col.Name, col.Type)) + return "VARCHAR(4096)", warnings + case strings.HasPrefix(clean, "json"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 因 TDengine JSON 仅适用于 TAG,已降级为 VARCHAR(4096)", col.Name, col.Type)) + return "VARCHAR(4096)", warnings + case strings.HasPrefix(clean, "enum"), strings.HasPrefix(clean, "set"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 VARCHAR(255)", col.Name, col.Type)) + return "VARCHAR(255)", warnings + case strings.HasPrefix(clean, "binary"), strings.HasPrefix(clean, "varbinary"), strings.HasPrefix(clean, "tinyblob"), strings.HasPrefix(clean, "blob"), strings.HasPrefix(clean, "mediumblob"), strings.HasPrefix(clean, "longblob"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已按字符串语义降级为 VARCHAR(4096)", col.Name, col.Type)) + return "VARCHAR(4096)", warnings + default: + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门 TDengine 映射,已降级为 VARCHAR(1024)", col.Name, col.Type)) + return "VARCHAR(1024)", warnings + } +} + +func mapPGLikeColumnToTDengine(col connection.ColumnDefinition, forceTimestamp bool) (string, []string) { + warnings := make([]string, 0) + if forceTimestamp { + if raw := strings.ToLower(strings.TrimSpace(col.Type)); !strings.HasPrefix(raw, "timestamp") { + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已提升为 TDengine 首列 TIMESTAMP", col.Name, col.Type)) + } + return "TIMESTAMP", warnings + } + + raw := strings.ToLower(strings.TrimSpace(col.Type)) + if raw == "" { + return "VARCHAR(1024)", []string{fmt.Sprintf("字段 %s 类型为空,已降级为 VARCHAR(1024)", col.Name)} + } + if col.Key == "PRI" || col.Key == "PK" { + warnings = append(warnings, fmt.Sprintf("字段 %s 主键语义不会按关系型约束迁移到 TDengine", col.Name)) + } + if strings.Contains(strings.ToLower(strings.TrimSpace(col.Extra)), "identity") || strings.Contains(strings.ToLower(strings.TrimSpace(col.Extra)), "auto_increment") { + warnings = append(warnings, fmt.Sprintf("字段 %s 自增/identity 语义不会迁移到 TDengine", col.Name)) + } + + switch { + case raw == "boolean" || strings.HasPrefix(raw, "bool"): + return "BOOL", warnings + case raw == "smallint": + return "SMALLINT", warnings + case raw == "integer" || raw == "int4": + return "INT", warnings + case raw == "bigint" || raw == "int8": + return "BIGINT", warnings + case strings.HasPrefix(raw, "numeric"), strings.HasPrefix(raw, "decimal"): + return normalizeTDengineDecimalType(raw), warnings + case raw == "real" || raw == "float4": + return "FLOAT", warnings + case raw == "double precision" || raw == "float8": + return "DOUBLE", warnings + case raw == "date": + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 date 已降级映射为 TIMESTAMP", col.Name)) + return "TIMESTAMP", warnings + case strings.HasPrefix(raw, "timestamp"): + return "TIMESTAMP", warnings + case strings.HasPrefix(raw, "time"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无稳定 TDengine 时间-only 映射,已降级为 VARCHAR(64)", col.Name, col.Type)) + return "VARCHAR(64)", warnings + case strings.HasPrefix(raw, "character varying("), strings.HasPrefix(raw, "varchar("), strings.HasPrefix(raw, "character("), strings.HasPrefix(raw, "char("): + return fmt.Sprintf("VARCHAR(%d)", normalizeTDengineVarcharLength(extractFirstTypeLength(raw), 255)), warnings + case raw == "text": + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 text 已降级为 VARCHAR(4096)", col.Name)) + return "VARCHAR(4096)", warnings + case raw == "uuid": + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 uuid 已降级为 VARCHAR(36)", col.Name)) + return "VARCHAR(36)", warnings + case raw == "json" || raw == "jsonb": + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 因 TDengine JSON 仅适用于 TAG,已降级为 VARCHAR(4096)", col.Name, col.Type)) + return "VARCHAR(4096)", warnings + case raw == "bytea": + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 bytea 已按字符串语义降级为 VARCHAR(4096)", col.Name)) + return "VARCHAR(4096)", warnings + case strings.HasSuffix(raw, "[]") || strings.HasPrefix(raw, "array"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 VARCHAR(4096)", col.Name, col.Type)) + return "VARCHAR(4096)", warnings + case raw == "user-defined": + warnings = append(warnings, fmt.Sprintf("字段 %s 为用户自定义类型,已降级为 VARCHAR(1024)", col.Name)) + return "VARCHAR(1024)", warnings + default: + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门 TDengine 映射,已降级为 VARCHAR(1024)", col.Name, col.Type)) + return "VARCHAR(1024)", warnings + } +} + +func mapClickHouseColumnToTDengine(col connection.ColumnDefinition, forceTimestamp bool) (string, []string) { + warnings := make([]string, 0) + if forceTimestamp { + if !isClickHouseTDengineTimestampCandidate(col) { + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已提升为 TDengine 首列 TIMESTAMP", col.Name, col.Type)) + } + return "TIMESTAMP", warnings + } + + lower, _ := unwrapClickHouseTDengineType(col.Type) + if lower == "" { + return "VARCHAR(1024)", []string{fmt.Sprintf("字段 %s 类型为空,已降级为 VARCHAR(1024)", col.Name)} + } + + switch { + case lower == "bool" || lower == "boolean": + return "BOOL", warnings + case lower == "int8": + return "TINYINT", warnings + case lower == "uint8": + return "UTINYINT", warnings + case lower == "int16": + return "SMALLINT", warnings + case lower == "uint16": + return "USMALLINT", warnings + case lower == "int32": + return "INT", warnings + case lower == "uint32": + return "UINT", warnings + case lower == "int64": + return "BIGINT", warnings + case lower == "uint64": + return "UBIGINT", warnings + case lower == "float32": + return "FLOAT", warnings + case lower == "float64": + return "DOUBLE", warnings + case lower == "date": + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 date 已降级映射为 TIMESTAMP", col.Name)) + return "TIMESTAMP", warnings + case strings.HasPrefix(lower, "datetime"): + return "TIMESTAMP", warnings + case lower == "string": + return "VARCHAR(1024)", warnings + case lower == "uuid": + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 uuid 已降级为 VARCHAR(36)", col.Name)) + return "VARCHAR(36)", warnings + case lower == "json", strings.HasPrefix(lower, "map("), strings.HasPrefix(lower, "array("), strings.HasPrefix(lower, "tuple("), strings.HasPrefix(lower, "nested("): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 VARCHAR(4096)", col.Name, col.Type)) + return "VARCHAR(4096)", warnings + case strings.HasPrefix(lower, "enum8("), strings.HasPrefix(lower, "enum16("): + warnings = append(warnings, fmt.Sprintf("字段 %s 枚举类型 %s 已降级为 VARCHAR(255)", col.Name, col.Type)) + return "VARCHAR(255)", warnings + case clickHouseDecimalPattern.MatchString(lower): + parts := clickHouseDecimalPattern.FindStringSubmatch(lower) + return fmt.Sprintf("DECIMAL(%s,%s)", parts[2], parts[3]), warnings + case clickHouseStringArgsPattern.MatchString(lower): + parts := clickHouseStringArgsPattern.FindStringSubmatch(lower) + length, err := strconv.Atoi(parts[1]) + if err != nil { + warnings = append(warnings, fmt.Sprintf("字段 %s FixedString 长度解析失败,已降级为 VARCHAR(255)", col.Name)) + return "VARCHAR(255)", warnings + } + return fmt.Sprintf("VARCHAR(%d)", normalizeTDengineVarcharLength(length, 255)), warnings + default: + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门 TDengine 映射,已降级为 VARCHAR(1024)", col.Name, col.Type)) + return "VARCHAR(1024)", warnings + } +} + +func mapTDengineColumnToTDengine(col connection.ColumnDefinition, forceTimestamp bool) (string, []string) { + warnings := make([]string, 0) + if forceTimestamp { + if !isTDengineTDengineTimestampCandidate(col) { + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已提升为 TDengine 首列 TIMESTAMP", col.Name, col.Type)) + } + return "TIMESTAMP", warnings + } + + base, length := parseTDengineType(col.Type) + if base == "" { + return "VARCHAR(1024)", []string{fmt.Sprintf("字段 %s 类型为空,已降级为 VARCHAR(1024)", col.Name)} + } + if isTDengineTagColumn(col) { + warnings = append(warnings, fmt.Sprintf("字段 %s 为 TDengine TAG 列,迁移到 regular table 后将降级为普通字段", col.Name)) + } + + switch base { + case "BOOL", "BOOLEAN": + return "BOOL", warnings + case "TINYINT": + return "TINYINT", warnings + case "UTINYINT": + return "UTINYINT", warnings + case "SMALLINT": + return "SMALLINT", warnings + case "USMALLINT": + return "USMALLINT", warnings + case "INT", "INTEGER": + return "INT", warnings + case "UINT": + return "UINT", warnings + case "BIGINT": + return "BIGINT", warnings + case "UBIGINT": + return "UBIGINT", warnings + case "FLOAT": + return "FLOAT", warnings + case "DOUBLE": + return "DOUBLE", warnings + case "DECIMAL", "NUMERIC": + return normalizeTDengineDecimalType(col.Type), warnings + case "TIMESTAMP": + return "TIMESTAMP", warnings + case "DATE": + return "DATE", warnings + case "JSON": + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 JSON 在 TDengine regular table 中不保留 TAG 语义,已降级为 VARCHAR(4096)", col.Name)) + return "VARCHAR(4096)", warnings + case "BINARY", "NCHAR", "VARCHAR", "VARBINARY": + if length > 0 { + return fmt.Sprintf("%s(%d)", base, normalizeTDengineVarcharLength(length, length)), warnings + } + fallback := 255 + if base == "VARCHAR" { + fallback = 1024 + } + return fmt.Sprintf("%s(%d)", base, fallback), warnings + default: + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门 TDengine 同库映射,已降级为 VARCHAR(1024)", col.Name, col.Type)) + return "VARCHAR(1024)", warnings + } +} + +func unwrapClickHouseTDengineType(raw string) (string, bool) { + text := strings.TrimSpace(raw) + lower := strings.ToLower(text) + nullable := false + for { + switched := false + if strings.HasPrefix(lower, "nullable(") && strings.HasSuffix(lower, ")") { + text = strings.TrimSpace(text[len("Nullable(") : len(text)-1]) + lower = strings.ToLower(text) + nullable = true + switched = true + } + if strings.HasPrefix(lower, "lowcardinality(") && strings.HasSuffix(lower, ")") { + text = strings.TrimSpace(text[len("LowCardinality(") : len(text)-1]) + lower = strings.ToLower(text) + switched = true + } + if !switched { + break + } + } + return lower, nullable +} + +func normalizeTDengineDecimalType(raw string) string { + text := strings.TrimSpace(raw) + if text == "" { + return "DECIMAL(38,10)" + } + lower := strings.ToLower(text) + if strings.HasPrefix(lower, "numeric") { + return "DECIMAL" + text[len("numeric"):] + } + if strings.HasPrefix(lower, "decimal") { + return "DECIMAL" + text[len("decimal"):] + } + return "DECIMAL(38,10)" +} + +func normalizeTDengineVarcharLength(length int, fallback int) int { + if fallback <= 0 { + fallback = 255 + } + if length <= 0 { + return fallback + } + if length > 16384 { + return 16384 + } + return length +} + +func extractFirstTypeLength(raw string) int { + start := strings.Index(raw, "(") + if start < 0 { + return 0 + } + end := strings.Index(raw[start+1:], ")") + if end < 0 { + return 0 + } + inside := strings.TrimSpace(raw[start+1 : start+1+end]) + if inside == "" { + return 0 + } + parts := strings.SplitN(inside, ",", 2) + length, err := strconv.Atoi(strings.TrimSpace(parts[0])) + if err != nil { + return 0 + } + return length +} diff --git a/internal/sync/migration_type_resolver.go b/internal/sync/migration_type_resolver.go new file mode 100644 index 0000000..937e2d7 --- /dev/null +++ b/internal/sync/migration_type_resolver.go @@ -0,0 +1,98 @@ +package sync + +import ( + "GoNavi-Wails/internal/connection" + "strings" +) + +func normalizeMigrationDBType(dbType string) string { + normalized := strings.ToLower(strings.TrimSpace(dbType)) + switch normalized { + case "doris": + return "diros" + case "postgresql": + return "postgres" + case "dm", "dm8": + return "dameng" + case "sqlite3": + return "sqlite" + default: + return normalized + } +} + +func resolveMigrationDBType(config connection.ConnectionConfig) string { + dbType := normalizeMigrationDBType(config.Type) + if dbType != "custom" { + return dbType + } + + driver := strings.ToLower(strings.TrimSpace(config.Driver)) + switch driver { + case "postgresql", "postgres", "pg", "pq", "pgx": + return "postgres" + case "dm", "dameng", "dm8": + return "dameng" + case "sqlite3", "sqlite": + return "sqlite" + case "sphinxql": + return "sphinx" + case "diros", "doris": + return "diros" + case "kingbase", "kingbase8", "kingbasees", "kingbasev8": + return "kingbase" + case "highgo": + return "highgo" + case "vastbase": + return "vastbase" + case "mysql", "mysql2": + return "mysql" + case "mariadb": + return "mariadb" + } + + switch { + case strings.Contains(driver, "postgres"): + return "postgres" + case strings.Contains(driver, "kingbase"): + return "kingbase" + case strings.Contains(driver, "highgo"): + return "highgo" + case strings.Contains(driver, "vastbase"): + return "vastbase" + case strings.Contains(driver, "sqlite"): + return "sqlite" + case strings.Contains(driver, "sphinx"): + return "sphinx" + case strings.Contains(driver, "diros"), strings.Contains(driver, "doris"): + return "diros" + case strings.Contains(driver, "maria"): + return "mariadb" + case strings.Contains(driver, "mysql"): + return "mysql" + case strings.Contains(driver, "dameng"), strings.Contains(driver, "dm"): + return "dameng" + default: + return normalizeMigrationDBType(driver) + } +} + +func isMySQLCoreType(dbType string) bool { + switch normalizeMigrationDBType(dbType) { + case "mysql", "mariadb", "diros": + return true + default: + return false + } +} + +func isMySQLLikeSourceType(dbType string) bool { + if isMySQLCoreType(dbType) { + return true + } + return normalizeMigrationDBType(dbType) == "sphinx" +} + +func isMySQLLikeWritableTargetType(dbType string) bool { + return isMySQLCoreType(dbType) +} diff --git a/internal/sync/preview.go b/internal/sync/preview.go index 7cec537..2ce6434 100644 --- a/internal/sync/preview.go +++ b/internal/sync/preview.go @@ -1,7 +1,7 @@ package sync import ( - "GoNavi-Wails/internal/db" + "errors" "fmt" "strings" ) @@ -36,12 +36,18 @@ func (s *SyncEngine) Preview(config SyncConfig, tableName string, limit int) (Ta if limit > 500 { limit = 500 } + if isRedisToMongoKeyspacePair(config) { + return s.previewRedisToMongo(config, tableName, limit) + } + if isMongoToRedisKeyspacePair(config) { + return s.previewMongoToRedis(config, tableName, limit) + } - sourceDB, err := db.NewDatabase(config.SourceConfig.Type) + sourceDB, err := newSyncDatabase(config.SourceConfig.Type) if err != nil { return TableDiffPreview{}, fmt.Errorf("初始化源数据库驱动失败: %w", err) } - targetDB, err := db.NewDatabase(config.TargetConfig.Type) + targetDB, err := newSyncDatabase(config.TargetConfig.Type) if err != nil { return TableDiffPreview{}, fmt.Errorf("初始化目标数据库驱动失败: %w", err) } @@ -56,14 +62,12 @@ func (s *SyncEngine) Preview(config SyncConfig, tableName string, limit int) (Ta } defer targetDB.Close() - sourceSchema, sourceTable := normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName) - targetSchema, targetTable := normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName) - sourceQueryTable := qualifiedNameForQuery(config.SourceConfig.Type, sourceSchema, sourceTable, tableName) - targetQueryTable := qualifiedNameForQuery(config.TargetConfig.Type, targetSchema, targetTable, tableName) - - cols, err := sourceDB.GetColumns(sourceSchema, sourceTable) + plan, cols, _, err := buildSchemaMigrationPlan(config, tableName, sourceDB, targetDB) if err != nil { - return TableDiffPreview{}, fmt.Errorf("获取源表字段失败: %w", err) + return TableDiffPreview{}, err + } + if !plan.TargetTableExists && !plan.AutoCreate { + return TableDiffPreview{}, errors.New(firstNonEmpty(plan.PlannedAction, "目标表不存在,无法预览差异")) } pkCols := make([]string, 0, 2) @@ -80,13 +84,17 @@ func (s *SyncEngine) Preview(config SyncConfig, tableName string, limit int) (Ta } pkCol := pkCols[0] - sourceRows, _, err := sourceDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.SourceConfig.Type, sourceQueryTable))) + sourceRows, _, err := sourceDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(resolveMigrationDBType(config.SourceConfig), plan.SourceQueryTable))) if err != nil { return TableDiffPreview{}, fmt.Errorf("读取源表失败: %w", err) } - targetRows, _, err := targetDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.TargetConfig.Type, targetQueryTable))) - if err != nil { - return TableDiffPreview{}, fmt.Errorf("读取目标表失败: %w", err) + + targetRows := make([]map[string]interface{}, 0) + if plan.TargetTableExists { + targetRows, _, err = targetDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(resolveMigrationDBType(config.TargetConfig), plan.TargetQueryTable))) + if err != nil { + return TableDiffPreview{}, fmt.Errorf("读取目标表失败: %w", err) + } } targetMap := make(map[string]map[string]interface{}, len(targetRows)) @@ -133,12 +141,7 @@ func (s *SyncEngine) Preview(config SyncConfig, tableName string, limit int) (Ta if len(changedColumns) > 0 { out.TotalUpdates++ if len(out.Updates) < limit { - out.Updates = append(out.Updates, PreviewUpdateRow{ - PK: pkVal, - ChangedColumns: changedColumns, - Source: sRow, - Target: tRow, - }) + out.Updates = append(out.Updates, PreviewUpdateRow{PK: pkVal, ChangedColumns: changedColumns, Source: sRow, Target: tRow}) } } continue diff --git a/internal/sync/redis_migration_test.go b/internal/sync/redis_migration_test.go new file mode 100644 index 0000000..ac3e7f1 --- /dev/null +++ b/internal/sync/redis_migration_test.go @@ -0,0 +1,490 @@ +package sync + +import ( + "GoNavi-Wails/internal/connection" + "GoNavi-Wails/internal/db" + redispkg "GoNavi-Wails/internal/redis" + "fmt" + "sort" + "strings" + "testing" +) + +type fakeRedisMigrationClient struct { + values map[string]*redispkg.RedisValue + scannedKeys []string + connectConfig connection.ConnectionConfig + closed bool +} + +func (f *fakeRedisMigrationClient) Connect(config connection.ConnectionConfig) error { + f.connectConfig = config + return nil +} + +func (f *fakeRedisMigrationClient) Close() error { + f.closed = true + return nil +} + +func (f *fakeRedisMigrationClient) ScanKeys(pattern string, cursor uint64, count int64) (*redispkg.RedisScanResult, error) { + items := make([]redispkg.RedisKeyInfo, 0, len(f.scannedKeys)) + for _, key := range f.scannedKeys { + items = append(items, redispkg.RedisKeyInfo{Key: key, Type: "string", TTL: -1}) + } + return &redispkg.RedisScanResult{Keys: items, Cursor: "0"}, nil +} + +func (f *fakeRedisMigrationClient) GetKeyType(key string) (string, error) { + if value, ok := f.values[key]; ok && value != nil { + return value.Type, nil + } + return "none", nil +} + +func (f *fakeRedisMigrationClient) GetValue(key string) (*redispkg.RedisValue, error) { + if value, ok := f.values[key]; ok { + return value, nil + } + return nil, fmt.Errorf("key not found: %s", key) +} + +func (f *fakeRedisMigrationClient) DeleteKeys(keys []string) (int64, error) { + var deleted int64 + for _, key := range keys { + if _, ok := f.values[key]; ok { + delete(f.values, key) + deleted++ + } + } + return deleted, nil +} + +func (f *fakeRedisMigrationClient) SetTTL(key string, ttl int64) error { + value, ok := f.values[key] + if !ok { + return nil + } + value.TTL = ttl + return nil +} + +func (f *fakeRedisMigrationClient) SetString(key, value string, ttl int64) error { + if f.values == nil { + f.values = map[string]*redispkg.RedisValue{} + } + f.values[key] = &redispkg.RedisValue{Type: "string", TTL: ttl, Value: value, Length: int64(len(value))} + return nil +} + +func (f *fakeRedisMigrationClient) SetHashField(key, field, value string) error { + if f.values == nil { + f.values = map[string]*redispkg.RedisValue{} + } + current, ok := f.values[key] + if !ok || current == nil || current.Type != "hash" { + current = &redispkg.RedisValue{Type: "hash", TTL: -1, Value: map[string]string{}} + f.values[key] = current + } + hash, _ := current.Value.(map[string]string) + if hash == nil { + hash = map[string]string{} + } + hash[field] = value + current.Value = hash + current.Length = int64(len(hash)) + return nil +} + +func (f *fakeRedisMigrationClient) ListPush(key string, values ...string) error { + if f.values == nil { + f.values = map[string]*redispkg.RedisValue{} + } + current, ok := f.values[key] + if !ok || current == nil || current.Type != "list" { + current = &redispkg.RedisValue{Type: "list", TTL: -1, Value: []string{}} + f.values[key] = current + } + list, _ := current.Value.([]string) + list = append(list, values...) + current.Value = list + current.Length = int64(len(list)) + return nil +} + +func (f *fakeRedisMigrationClient) SetAdd(key string, members ...string) error { + if f.values == nil { + f.values = map[string]*redispkg.RedisValue{} + } + current, ok := f.values[key] + if !ok || current == nil || current.Type != "set" { + current = &redispkg.RedisValue{Type: "set", TTL: -1, Value: []string{}} + f.values[key] = current + } + setValues, _ := current.Value.([]string) + seen := make(map[string]struct{}, len(setValues)+len(members)) + for _, item := range setValues { + seen[item] = struct{}{} + } + for _, item := range members { + if _, ok := seen[item]; ok { + continue + } + seen[item] = struct{}{} + setValues = append(setValues, item) + } + sort.Strings(setValues) + current.Value = setValues + current.Length = int64(len(setValues)) + return nil +} + +func (f *fakeRedisMigrationClient) ZSetAdd(key string, members ...redispkg.ZSetMember) error { + if f.values == nil { + f.values = map[string]*redispkg.RedisValue{} + } + copied := append([]redispkg.ZSetMember(nil), members...) + sort.Slice(copied, func(i, j int) bool { + if copied[i].Score == copied[j].Score { + return copied[i].Member < copied[j].Member + } + return copied[i].Score < copied[j].Score + }) + f.values[key] = &redispkg.RedisValue{Type: "zset", TTL: -1, Value: copied, Length: int64(len(copied))} + return nil +} + +func (f *fakeRedisMigrationClient) StreamAdd(key string, fields map[string]string, id string) (string, error) { + if f.values == nil { + f.values = map[string]*redispkg.RedisValue{} + } + current, ok := f.values[key] + if !ok || current == nil || current.Type != "stream" { + current = &redispkg.RedisValue{Type: "stream", TTL: -1, Value: []redispkg.StreamEntry{}} + f.values[key] = current + } + entries, _ := current.Value.([]redispkg.StreamEntry) + entryID := id + if entryID == "" { + entryID = fmt.Sprintf("%d-0", len(entries)+1) + } + entries = append(entries, redispkg.StreamEntry{ID: entryID, Fields: fields}) + current.Value = entries + current.Length = int64(len(entries)) + return entryID, nil +} + +type fakeRedisMongoTargetDB struct { + tables []string + queryTable string + queryRows []map[string]interface{} + execs []string + applyTable string + applySet connection.ChangeSet +} + +func (f *fakeRedisMongoTargetDB) Connect(config connection.ConnectionConfig) error { return nil } +func (f *fakeRedisMongoTargetDB) Close() error { return nil } +func (f *fakeRedisMongoTargetDB) Ping() error { return nil } +func (f *fakeRedisMongoTargetDB) Query(query string) ([]map[string]interface{}, []string, error) { + queryTable := strings.TrimSpace(f.queryTable) + if queryTable == "" { + queryTable = "redis_db_0_keys" + } + if strings.Contains(query, fmt.Sprintf(`"find":"%s"`, queryTable)) { + return f.queryRows, []string{"_id", "key", "value"}, nil + } + return nil, nil, nil +} +func (f *fakeRedisMongoTargetDB) Exec(query string) (int64, error) { + f.execs = append(f.execs, query) + return 1, nil +} +func (f *fakeRedisMongoTargetDB) GetDatabases() ([]string, error) { return []string{"app"}, nil } +func (f *fakeRedisMongoTargetDB) GetTables(dbName string) ([]string, error) { + return f.tables, nil +} +func (f *fakeRedisMongoTargetDB) GetCreateStatement(dbName, tableName string) (string, error) { + return "", nil +} +func (f *fakeRedisMongoTargetDB) GetColumns(dbName, tableName string) ([]connection.ColumnDefinition, error) { + return nil, nil +} +func (f *fakeRedisMongoTargetDB) GetAllColumns(dbName string) ([]connection.ColumnDefinitionWithTable, error) { + return nil, nil +} +func (f *fakeRedisMongoTargetDB) GetIndexes(dbName, tableName string) ([]connection.IndexDefinition, error) { + return nil, nil +} +func (f *fakeRedisMongoTargetDB) GetForeignKeys(dbName, tableName string) ([]connection.ForeignKeyDefinition, error) { + return nil, nil +} +func (f *fakeRedisMongoTargetDB) GetTriggers(dbName, tableName string) ([]connection.TriggerDefinition, error) { + return nil, nil +} +func (f *fakeRedisMongoTargetDB) ApplyChanges(tableName string, changes connection.ChangeSet) error { + f.applyTable = tableName + f.applySet = changes + return nil +} + +type fakeMongoRedisSourceDB struct { + tables []string + rowsByTable map[string][]map[string]interface{} + connectConfig connection.ConnectionConfig +} + +func (f *fakeMongoRedisSourceDB) Connect(config connection.ConnectionConfig) error { + f.connectConfig = config + return nil +} +func (f *fakeMongoRedisSourceDB) Close() error { return nil } +func (f *fakeMongoRedisSourceDB) Ping() error { return nil } +func (f *fakeMongoRedisSourceDB) Query(query string) ([]map[string]interface{}, []string, error) { + for tableName, rows := range f.rowsByTable { + if strings.Contains(query, fmt.Sprintf(`"find":"%s"`, tableName)) { + return rows, []string{"_id", "key", "type", "ttl", "value"}, nil + } + } + return nil, nil, fmt.Errorf("unexpected query: %s", query) +} +func (f *fakeMongoRedisSourceDB) Exec(query string) (int64, error) { return 0, nil } +func (f *fakeMongoRedisSourceDB) GetDatabases() ([]string, error) { return []string{"app"}, nil } +func (f *fakeMongoRedisSourceDB) GetTables(dbName string) ([]string, error) { + return f.tables, nil +} +func (f *fakeMongoRedisSourceDB) GetCreateStatement(dbName, tableName string) (string, error) { + return "", nil +} +func (f *fakeMongoRedisSourceDB) GetColumns(dbName, tableName string) ([]connection.ColumnDefinition, error) { + return nil, nil +} +func (f *fakeMongoRedisSourceDB) GetAllColumns(dbName string) ([]connection.ColumnDefinitionWithTable, error) { + return nil, nil +} +func (f *fakeMongoRedisSourceDB) GetIndexes(dbName, tableName string) ([]connection.IndexDefinition, error) { + return nil, nil +} +func (f *fakeMongoRedisSourceDB) GetForeignKeys(dbName, tableName string) ([]connection.ForeignKeyDefinition, error) { + return nil, nil +} +func (f *fakeMongoRedisSourceDB) GetTriggers(dbName, tableName string) ([]connection.TriggerDefinition, error) { + return nil, nil +} + +func TestRunSync_RedisToMongoAppliesInsertAndUpdate(t *testing.T) { + fakeRedis := &fakeRedisMigrationClient{ + values: map[string]*redispkg.RedisValue{ + "user:1": {Type: "hash", TTL: 120, Length: 2, Value: map[string]string{"name": "alice"}}, + "user:2": {Type: "string", TTL: -1, Length: 1, Value: "online"}, + }, + } + fakeTarget := &fakeRedisMongoTargetDB{ + tables: []string{"redis_db_0_keys"}, + queryRows: []map[string]interface{}{ + {"_id": "db0:user:1", "redisDb": 0, "key": "user:1", "type": "hash", "ttl": 120, "length": int64(2), "value": map[string]interface{}{"name": "old"}}, + }, + } + + oldNewRedisClient := newRedisSourceClient + oldNewDatabase := newSyncDatabase + defer func() { + newRedisSourceClient = oldNewRedisClient + newSyncDatabase = oldNewDatabase + }() + newRedisSourceClient = func() redisMigrationClient { return fakeRedis } + newSyncDatabase = func(dbType string) (db.Database, error) { return fakeTarget, nil } + + engine := NewSyncEngine(Reporter{}) + result := engine.RunSync(SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "redis", Database: "0"}, + TargetConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"}, + Tables: []string{"user:1", "user:2"}, + Content: "data", + Mode: "insert_update", + }) + + if !result.Success { + t.Fatalf("expected success, got: %+v", result) + } + if fakeRedis.connectConfig.RedisDB != 0 { + t.Fatalf("expected redis db 0, got %d", fakeRedis.connectConfig.RedisDB) + } + if fakeTarget.applyTable != "redis_db_0_keys" { + t.Fatalf("unexpected apply table: %s", fakeTarget.applyTable) + } + if len(fakeTarget.applySet.Inserts) != 1 || len(fakeTarget.applySet.Updates) != 1 { + t.Fatalf("unexpected change set: %+v", fakeTarget.applySet) + } +} + +func TestRunSync_RedisToMongoUsesConfiguredCollectionName(t *testing.T) { + fakeRedis := &fakeRedisMigrationClient{ + values: map[string]*redispkg.RedisValue{ + "user:1": {Type: "string", TTL: -1, Length: 1, Value: "online"}, + }, + } + fakeTarget := &fakeRedisMongoTargetDB{ + tables: []string{"custom_keyspace_docs"}, + queryTable: "custom_keyspace_docs", + } + + oldNewRedisClient := newRedisSourceClient + oldNewDatabase := newSyncDatabase + defer func() { + newRedisSourceClient = oldNewRedisClient + newSyncDatabase = oldNewDatabase + }() + newRedisSourceClient = func() redisMigrationClient { return fakeRedis } + newSyncDatabase = func(dbType string) (db.Database, error) { return fakeTarget, nil } + + engine := NewSyncEngine(Reporter{}) + result := engine.RunSync(SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "redis", Database: "0"}, + TargetConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"}, + Tables: []string{"user:1"}, + Content: "data", + Mode: "insert_update", + MongoCollectionName: "custom_keyspace_docs", + }) + + if !result.Success { + t.Fatalf("expected success, got: %+v", result) + } + if fakeTarget.applyTable != "custom_keyspace_docs" { + t.Fatalf("unexpected apply table: %s", fakeTarget.applyTable) + } +} + +func TestPreview_RedisToMongoReturnsDocumentPreview(t *testing.T) { + fakeRedis := &fakeRedisMigrationClient{ + values: map[string]*redispkg.RedisValue{ + "session:1": {Type: "string", TTL: 60, Length: 1, Value: "token"}, + }, + } + fakeTarget := &fakeRedisMongoTargetDB{} + + oldNewRedisClient := newRedisSourceClient + oldNewDatabase := newSyncDatabase + defer func() { + newRedisSourceClient = oldNewRedisClient + newSyncDatabase = oldNewDatabase + }() + newRedisSourceClient = func() redisMigrationClient { return fakeRedis } + newSyncDatabase = func(dbType string) (db.Database, error) { return fakeTarget, nil } + + engine := NewSyncEngine(Reporter{}) + preview, err := engine.Preview(SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "redis", Database: "0"}, + TargetConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"}, + Tables: []string{"session:1"}, + Content: "data", + Mode: "insert_update", + }, "session:1", 20) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if preview.PKColumn != "_id" { + t.Fatalf("unexpected pk column: %s", preview.PKColumn) + } + if preview.TotalInserts != 1 || len(preview.Inserts) != 1 { + t.Fatalf("unexpected preview: %+v", preview) + } + if preview.Inserts[0].PK != "db0:session:1" { + t.Fatalf("unexpected preview pk: %+v", preview.Inserts[0]) + } +} + +func TestRunSync_MongoToRedisAppliesStringAndHash(t *testing.T) { + fakeSource := &fakeMongoRedisSourceDB{ + tables: []string{"redis_db_0_keys"}, + rowsByTable: map[string][]map[string]interface{}{ + "redis_db_0_keys": { + {"_id": "db0:session:1", "key": "session:1", "type": "string", "ttl": int64(60), "value": "token"}, + {"_id": "db0:user:1", "key": "user:1", "type": "hash", "ttl": int64(120), "value": map[string]interface{}{"name": "alice", "role": "admin"}}, + }, + }, + } + fakeRedis := &fakeRedisMigrationClient{ + values: map[string]*redispkg.RedisValue{ + "user:1": {Type: "hash", TTL: 120, Length: 1, Value: map[string]string{"name": "old"}}, + }, + } + + oldNewRedisClient := newRedisSourceClient + oldNewDatabase := newSyncDatabase + defer func() { + newRedisSourceClient = oldNewRedisClient + newSyncDatabase = oldNewDatabase + }() + newRedisSourceClient = func() redisMigrationClient { return fakeRedis } + newSyncDatabase = func(dbType string) (db.Database, error) { return fakeSource, nil } + + engine := NewSyncEngine(Reporter{}) + result := engine.RunSync(SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"}, + TargetConfig: connection.ConnectionConfig{Type: "redis", Database: "0"}, + Tables: []string{"redis_db_0_keys"}, + Content: "data", + Mode: "insert_update", + }) + + if !result.Success { + t.Fatalf("expected success, got: %+v", result) + } + if fakeRedis.connectConfig.RedisDB != 0 { + t.Fatalf("expected redis db 0, got %d", fakeRedis.connectConfig.RedisDB) + } + if got := fakeRedis.values["session:1"]; got == nil || got.Type != "string" || got.Value != "token" || got.TTL != 60 { + t.Fatalf("unexpected string value: %+v", got) + } + gotHash, _ := fakeRedis.values["user:1"].Value.(map[string]string) + if gotHash["name"] != "alice" || gotHash["role"] != "admin" { + t.Fatalf("unexpected hash value: %+v", fakeRedis.values["user:1"]) + } + if result.RowsInserted != 1 || result.RowsUpdated != 1 { + t.Fatalf("unexpected sync result: %+v", result) + } +} + +func TestPreview_MongoToRedisReturnsCollectionPreview(t *testing.T) { + fakeSource := &fakeMongoRedisSourceDB{ + tables: []string{"redis_db_0_keys"}, + rowsByTable: map[string][]map[string]interface{}{ + "redis_db_0_keys": { + {"_id": "db0:session:1", "key": "session:1", "type": "string", "ttl": int64(60), "value": "token"}, + }, + }, + } + fakeRedis := &fakeRedisMigrationClient{values: map[string]*redispkg.RedisValue{}} + + oldNewRedisClient := newRedisSourceClient + oldNewDatabase := newSyncDatabase + defer func() { + newRedisSourceClient = oldNewRedisClient + newSyncDatabase = oldNewDatabase + }() + newRedisSourceClient = func() redisMigrationClient { return fakeRedis } + newSyncDatabase = func(dbType string) (db.Database, error) { return fakeSource, nil } + + engine := NewSyncEngine(Reporter{}) + preview, err := engine.Preview(SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"}, + TargetConfig: connection.ConnectionConfig{Type: "redis", Database: "0"}, + Tables: []string{"redis_db_0_keys"}, + Content: "data", + Mode: "insert_update", + }, "redis_db_0_keys", 20) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if preview.Table != "redis_db_0_keys" || preview.PKColumn != "key" { + t.Fatalf("unexpected preview header: %+v", preview) + } + if preview.TotalInserts != 1 || len(preview.Inserts) != 1 { + t.Fatalf("unexpected preview rows: %+v", preview) + } + if preview.Inserts[0].PK != "session:1" { + t.Fatalf("unexpected preview pk: %+v", preview.Inserts[0]) + } +} diff --git a/internal/sync/schema_migration.go b/internal/sync/schema_migration.go new file mode 100644 index 0000000..ad6cdc6 --- /dev/null +++ b/internal/sync/schema_migration.go @@ -0,0 +1,1014 @@ +package sync + +import ( + "GoNavi-Wails/internal/connection" + "GoNavi-Wails/internal/db" + "fmt" + "regexp" + "sort" + "strconv" + "strings" +) + +type SchemaMigrationPlan struct { + SourceSchema string + SourceTable string + SourceQueryTable string + TargetSchema string + TargetTable string + TargetQueryTable string + TargetTableExists bool + AutoCreate bool + PlannedAction string + Warnings []string + UnsupportedObjects []string + IndexesToCreate int + IndexesSkipped int + CreateTableSQL string + PreDataSQL []string + PostDataSQL []string +} + +type groupedIndex struct { + Name string + Columns []string + Unique bool + IndexType string + SubPart int +} + +func normalizeTargetTableStrategy(strategy string) string { + switch strings.ToLower(strings.TrimSpace(strategy)) { + case "smart": + return "smart" + case "auto_create_if_missing": + return "auto_create_if_missing" + case "existing_only", "": + return "existing_only" + default: + return "existing_only" + } +} + +func supportsAutoCreateMigration(sourceType, targetType string) bool { + return normalizeMigrationDBType(sourceType) == "mysql" && normalizeMigrationDBType(targetType) == "kingbase" +} + +func inspectTableColumns(database db.Database, schema, table string) ([]connection.ColumnDefinition, bool, error) { + cols, err := database.GetColumns(schema, table) + if err != nil { + if isLikelyTableNotFound(err) { + return nil, false, nil + } + return nil, false, err + } + if len(cols) == 0 { + return cols, false, nil + } + return cols, true, nil +} + +func isLikelyTableNotFound(err error) bool { + if err == nil { + return false + } + text := strings.ToLower(strings.TrimSpace(err.Error())) + if text == "" { + return false + } + keywords := []string{ + "doesn't exist", + "does not exist", + "not exist", + "unknown table", + "未找到表", + "不存在", + "invalid object", + "relation", + } + for _, keyword := range keywords { + if strings.Contains(text, keyword) { + return true + } + } + return false +} + +func buildSchemaMigrationPlanLegacy(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + plan := SchemaMigrationPlan{} + sourceType := resolveMigrationDBType(config.SourceConfig) + targetType := resolveMigrationDBType(config.TargetConfig) + plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName) + plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName) + plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName) + plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName) + plan.PlannedAction = "使用已有目标表导入" + if targetType == "tdengine" { + plan.Warnings = append(plan.Warnings, "TDengine 目标端当前仅支持 INSERT 写入;若存在差异更新/删除,执行期会被拒绝,请优先使用仅插入或全量覆盖模式") + } + + sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err) + } + if !sourceExists { + return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName) + } + + targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable) + if err != nil { + return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err) + } + plan.TargetTableExists = targetExists + + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + if targetExists { + missing := diffMissingColumnNames(sourceCols, targetCols) + if len(missing) > 0 { + plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", "))) + } + if config.AutoAddColumns && isMySQLLikeSourceType(sourceType) && normalizeMigrationDBType(targetType) == "kingbase" { + addSQL, addWarnings := buildMySQLToKingbaseAddColumnSQL(plan.TargetQueryTable, sourceCols, targetCols) + plan.PreDataSQL = append(plan.PreDataSQL, addSQL...) + plan.Warnings = append(plan.Warnings, addWarnings...) + if len(addSQL) > 0 { + plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL)) + } + } + if strategy != "existing_only" { + plan.Warnings = append(plan.Warnings, "目标表已存在,当前仅执行数据导入;不会自动重建已有索引/约束") + } + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } + + switch strategy { + case "existing_only": + plan.PlannedAction = "目标表不存在,需先手工创建" + plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + case "smart", "auto_create_if_missing": + if !supportsAutoCreateMigration(config.SourceConfig.Type, config.TargetConfig.Type) { + plan.PlannedAction = "当前库对暂不支持自动建表" + plan.Warnings = append(plan.Warnings, fmt.Sprintf("当前仅支持 MySQL -> Kingbase 自动建表,当前组合=%s -> %s", config.SourceConfig.Type, config.TargetConfig.Type)) + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } + plan.AutoCreate = true + plan.PlannedAction = "目标表不存在,将自动建表后导入" + createSQL, postSQL, warnings, unsupported, idxCreate, idxSkip, err := buildMySQLToKingbaseCreateTablePlan(config, plan.TargetQueryTable, sourceCols, sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, sourceCols, targetCols, err + } + plan.CreateTableSQL = createSQL + plan.PostDataSQL = append(plan.PostDataSQL, postSQL...) + plan.Warnings = append(plan.Warnings, warnings...) + plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...) + plan.IndexesToCreate = idxCreate + plan.IndexesSkipped = idxSkip + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + default: + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } +} + +func dedupeSchemaMigrationPlan(plan SchemaMigrationPlan) SchemaMigrationPlan { + plan.Warnings = dedupeStrings(plan.Warnings) + plan.UnsupportedObjects = dedupeStrings(plan.UnsupportedObjects) + return plan +} + +func dedupeStrings(items []string) []string { + if len(items) == 0 { + return items + } + seen := make(map[string]struct{}, len(items)) + out := make([]string, 0, len(items)) + for _, item := range items { + text := strings.TrimSpace(item) + if text == "" { + continue + } + if _, ok := seen[text]; ok { + continue + } + seen[text] = struct{}{} + out = append(out, text) + } + return out +} + +func diffMissingColumnNames(sourceCols, targetCols []connection.ColumnDefinition) []string { + if len(sourceCols) == 0 { + return nil + } + targetSet := make(map[string]struct{}, len(targetCols)) + for _, col := range targetCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + targetSet[key] = struct{}{} + } + missing := make([]string, 0) + for _, col := range sourceCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + if _, ok := targetSet[key]; ok { + continue + } + missing = append(missing, col.Name) + } + sort.Strings(missing) + return missing +} + +func buildMySQLToKingbaseAddColumnSQL(targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) { + targetSet := make(map[string]struct{}, len(targetCols)) + for _, col := range targetCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + targetSet[key] = struct{}{} + } + + var sqlList []string + var warnings []string + for _, col := range sourceCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + if _, ok := targetSet[key]; ok { + continue + } + colType, _, mapWarnings := mapMySQLColumnToKingbase(col) + warnings = append(warnings, mapWarnings...) + if col.Extra != "" && strings.Contains(strings.ToLower(col.Extra), "auto_increment") { + warnings = append(warnings, fmt.Sprintf("字段 %s 为自增列,补齐到已有目标表时不会自动补建 identity/sequence", col.Name)) + } + sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL", + quoteQualifiedIdentByType("kingbase", targetQueryTable), + quoteIdentByType("kingbase", col.Name), + colType, + )) + } + return sqlList, dedupeStrings(warnings) +} + +func buildMySQLToKingbaseCreateTablePlan(config SyncConfig, targetQueryTable string, sourceCols []connection.ColumnDefinition, sourceDB db.Database, sourceSchema, sourceTable string) (string, []string, []string, []string, int, int, error) { + columnDefs := make([]string, 0, len(sourceCols)+1) + warnings := make([]string, 0) + unsupported := make([]string, 0) + pkCols := make([]string, 0, 2) + + for _, col := range sourceCols { + def, colWarnings := buildMySQLToKingbaseColumnDefinition(col) + warnings = append(warnings, colWarnings...) + columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("kingbase", col.Name), def)) + if col.Key == "PRI" || col.Key == "PK" { + pkCols = append(pkCols, quoteIdentByType("kingbase", col.Name)) + } + } + if len(pkCols) > 0 { + columnDefs = append(columnDefs, fmt.Sprintf("PRIMARY KEY (%s)", strings.Join(pkCols, ", "))) + } + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("kingbase", targetQueryTable), strings.Join(columnDefs, ",\n ")) + + if !config.CreateIndexes { + return createSQL, nil, dedupeStrings(warnings), dedupeStrings(unsupported), 0, 0, nil + } + + indexes, err := sourceDB.GetIndexes(sourceSchema, sourceTable) + if err != nil { + warnings = append(warnings, fmt.Sprintf("读取源表索引失败,已跳过索引迁移:%v", err)) + return createSQL, nil, dedupeStrings(warnings), dedupeStrings(unsupported), 0, 0, nil + } + grouped := groupIndexDefinitions(indexes) + postSQL := make([]string, 0, len(grouped)) + created := 0 + skipped := 0 + for _, idx := range grouped { + name := strings.TrimSpace(idx.Name) + if name == "" || strings.EqualFold(name, "primary") { + continue + } + if len(idx.Columns) == 0 { + skipped++ + unsupported = append(unsupported, fmt.Sprintf("索引 %s 缺少列定义,已跳过", name)) + continue + } + kind := strings.ToLower(strings.TrimSpace(idx.IndexType)) + if idx.SubPart > 0 { + skipped++ + unsupported = append(unsupported, fmt.Sprintf("索引 %s 使用前缀长度,当前暂不支持迁移", name)) + continue + } + if kind != "" && kind != "btree" { + skipped++ + unsupported = append(unsupported, fmt.Sprintf("索引 %s 类型=%s,当前暂不支持自动迁移", name, idx.IndexType)) + continue + } + quotedCols := make([]string, 0, len(idx.Columns)) + for _, col := range idx.Columns { + quotedCols = append(quotedCols, quoteIdentByType("kingbase", col)) + } + prefix := "CREATE INDEX" + if idx.Unique { + prefix = "CREATE UNIQUE INDEX" + } + postSQL = append(postSQL, fmt.Sprintf("%s %s ON %s (%s)", prefix, quoteIdentByType("kingbase", name), quoteQualifiedIdentByType("kingbase", targetQueryTable), strings.Join(quotedCols, ", "))) + created++ + } + return createSQL, postSQL, dedupeStrings(warnings), dedupeStrings(unsupported), created, skipped, nil +} + +func buildMySQLToKingbaseColumnDefinition(col connection.ColumnDefinition) (string, []string) { + targetType, useIdentity, warnings := mapMySQLColumnToKingbase(col) + parts := []string{targetType} + if useIdentity { + parts = append(parts, "GENERATED BY DEFAULT AS IDENTITY") + } + if !useIdentity { + if defaultSQL, ok, warningText := mapMySQLDefaultToKingbase(col, targetType); warningText != "" { + warnings = append(warnings, warningText) + } else if ok { + parts = append(parts, "DEFAULT "+defaultSQL) + } + } + if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") { + parts = append(parts, "NOT NULL") + } + return strings.Join(parts, " "), dedupeStrings(warnings) +} + +func mapMySQLColumnToKingbase(col connection.ColumnDefinition) (string, bool, []string) { + raw := strings.ToLower(strings.TrimSpace(col.Type)) + warnings := make([]string, 0) + if raw == "" { + return "text", false, []string{fmt.Sprintf("字段 %s 类型为空,已降级为 text", col.Name)} + } + unsigned := strings.Contains(raw, "unsigned") + clean := strings.ReplaceAll(raw, " unsigned", "") + clean = strings.ReplaceAll(clean, " zerofill", "") + isAutoIncrement := strings.Contains(strings.ToLower(strings.TrimSpace(col.Extra)), "auto_increment") + + switch { + case strings.HasPrefix(clean, "tinyint(1)") && !unsigned && !isAutoIncrement: + return "boolean", false, warnings + case strings.HasPrefix(clean, "tinyint"): + return ternaryString(unsigned, "smallint", "smallint"), false, warnings + case strings.HasPrefix(clean, "smallint"): + return ternaryString(unsigned, "integer", "smallint"), isAutoIncrement, warnings + case strings.HasPrefix(clean, "mediumint"): + return ternaryString(unsigned, "bigint", "integer"), isAutoIncrement, warnings + case strings.HasPrefix(clean, "int") || strings.HasPrefix(clean, "integer"): + return ternaryString(unsigned, "bigint", "integer"), isAutoIncrement, warnings + case strings.HasPrefix(clean, "bigint"): + if unsigned { + if isAutoIncrement { + warnings = append(warnings, fmt.Sprintf("字段 %s 为 unsigned bigint auto_increment,已降级为 numeric(20,0) 且不保留自增语义", col.Name)) + } + return "numeric(20,0)", false, warnings + } + return "bigint", isAutoIncrement, warnings + case strings.HasPrefix(clean, "decimal"), strings.HasPrefix(clean, "numeric"): + return replaceTypeBase(clean, []string{"decimal", "numeric"}, "numeric"), false, warnings + case strings.HasPrefix(clean, "float"): + return "real", false, warnings + case strings.HasPrefix(clean, "double"): + return "double precision", false, warnings + case strings.HasPrefix(clean, "bit("): + if clean == "bit(1)" { + return "boolean", false, warnings + } + return clean, false, warnings + case strings.HasPrefix(clean, "bool"), strings.HasPrefix(clean, "boolean"): + return "boolean", false, warnings + case strings.HasPrefix(clean, "char("), strings.HasPrefix(clean, "varchar("): + return clean, false, warnings + case strings.HasPrefix(clean, "tinytext"), strings.HasPrefix(clean, "text"), strings.HasPrefix(clean, "mediumtext"), strings.HasPrefix(clean, "longtext"): + return "text", false, warnings + case strings.HasPrefix(clean, "json"): + return "jsonb", false, warnings + case strings.HasPrefix(clean, "date"): + return "date", false, warnings + case strings.HasPrefix(clean, "time"): + return "time", false, warnings + case strings.HasPrefix(clean, "datetime"), strings.HasPrefix(clean, "timestamp"): + return "timestamp", false, warnings + case strings.HasPrefix(clean, "year"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 year 已映射为 integer", col.Name)) + return "integer", false, warnings + case strings.HasPrefix(clean, "binary"), strings.HasPrefix(clean, "varbinary"), strings.HasPrefix(clean, "tinyblob"), strings.HasPrefix(clean, "blob"), strings.HasPrefix(clean, "mediumblob"), strings.HasPrefix(clean, "longblob"): + return "bytea", false, warnings + case strings.HasPrefix(clean, "enum"), strings.HasPrefix(clean, "set"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 text", col.Name, col.Type)) + return "text", false, warnings + default: + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门映射,已降级为 text", col.Name, col.Type)) + return "text", false, warnings + } +} + +func replaceTypeBase(raw string, bases []string, target string) string { + for _, base := range bases { + if strings.HasPrefix(raw, base) { + return target + strings.TrimPrefix(raw, base) + } + } + return target +} + +var numericPattern = regexp.MustCompile(`^[+-]?\d+(\.\d+)?$`) + +func mapMySQLDefaultToKingbase(col connection.ColumnDefinition, targetType string) (string, bool, string) { + if col.Default == nil { + return "", false, "" + } + raw := strings.TrimSpace(*col.Default) + if raw == "" { + if isStringLikeTargetType(targetType) { + return "''", true, "" + } + return "", false, fmt.Sprintf("字段 %s 的空字符串默认值未保留", col.Name) + } + lower := strings.ToLower(raw) + if lower == "null" { + return "", false, "" + } + if strings.HasPrefix(lower, "current_timestamp") { + return "CURRENT_TIMESTAMP", true, "" + } + if targetType == "boolean" { + switch lower { + case "1", "true": + return "TRUE", true, "" + case "0", "false": + return "FALSE", true, "" + } + } + if numericPattern.MatchString(raw) && !isStringLikeTargetType(targetType) { + return raw, true, "" + } + if strings.ContainsAny(raw, "()") && !strings.HasPrefix(lower, "current_timestamp") { + return "", false, fmt.Sprintf("字段 %s 的默认值 %s 包含表达式,当前未自动迁移", col.Name, raw) + } + return "'" + strings.ReplaceAll(raw, "'", "''") + "'", true, "" +} + +func isStringLikeTargetType(targetType string) bool { + text := strings.ToLower(strings.TrimSpace(targetType)) + return strings.Contains(text, "char") || strings.Contains(text, "text") || strings.Contains(text, "json") || strings.Contains(text, "bytea") +} + +func ternaryString(ok bool, a, b string) string { + if ok { + return a + } + return b +} + +func groupIndexDefinitions(indexes []connection.IndexDefinition) []groupedIndex { + if len(indexes) == 0 { + return nil + } + groupMap := make(map[string][]connection.IndexDefinition) + order := make([]string, 0) + for _, idx := range indexes { + name := strings.TrimSpace(idx.Name) + if name == "" { + continue + } + if _, ok := groupMap[name]; !ok { + order = append(order, name) + } + groupMap[name] = append(groupMap[name], idx) + } + grouped := make([]groupedIndex, 0, len(groupMap)) + for _, name := range order { + rows := groupMap[name] + sort.SliceStable(rows, func(i, j int) bool { + return rows[i].SeqInIndex < rows[j].SeqInIndex + }) + gi := groupedIndex{Name: name, Unique: true, IndexType: "BTREE"} + for _, row := range rows { + if row.NonUnique != 0 { + gi.Unique = false + } + if strings.TrimSpace(row.IndexType) != "" { + gi.IndexType = row.IndexType + } + if row.SubPart > 0 && gi.SubPart == 0 { + gi.SubPart = row.SubPart + } + col := strings.TrimSpace(row.ColumnName) + if col != "" { + gi.Columns = append(gi.Columns, col) + } + } + grouped = append(grouped, gi) + } + return grouped +} + +func intFromAny(v interface{}) int { + switch typed := v.(type) { + case int: + return typed + case int64: + return int(typed) + case float64: + return int(typed) + case string: + i, _ := strconv.Atoi(strings.TrimSpace(typed)) + return i + default: + return 0 + } +} + +func isPGLikeSource(dbType string) bool { + switch normalizeMigrationDBType(dbType) { + case "postgres", "kingbase", "highgo", "vastbase", "duckdb": + return true + default: + return false + } +} + +func buildPGLikeToMySQLPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + plan := SchemaMigrationPlan{} + sourceType := resolveMigrationDBType(config.SourceConfig) + targetType := resolveMigrationDBType(config.TargetConfig) + plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName) + plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName) + plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName) + plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName) + plan.PlannedAction = "使用已有目标表导入" + + sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err) + } + if !sourceExists { + return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName) + } + + targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable) + if err != nil { + return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err) + } + plan.TargetTableExists = targetExists + + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + if targetExists { + missing := diffMissingColumnNames(sourceCols, targetCols) + if len(missing) > 0 { + plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", "))) + } + if config.AutoAddColumns { + addSQL, addWarnings := buildPGLikeToMySQLAddColumnSQL(plan.TargetQueryTable, sourceCols, targetCols) + plan.PreDataSQL = append(plan.PreDataSQL, addSQL...) + plan.Warnings = append(plan.Warnings, addWarnings...) + if len(addSQL) > 0 { + plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL)) + } + } + if strategy != "existing_only" { + plan.Warnings = append(plan.Warnings, "目标表已存在,当前仅执行数据导入;不会自动重建已有索引/约束") + } + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } + + switch strategy { + case "existing_only": + plan.PlannedAction = "目标表不存在,需先手工创建" + plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + case "smart", "auto_create_if_missing": + plan.AutoCreate = true + plan.PlannedAction = "目标表不存在,将自动建表后导入" + createSQL, postSQL, warnings, unsupported, idxCreate, idxSkip, err := buildPGLikeToMySQLCreateTablePlan(config, plan.TargetQueryTable, sourceCols, sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, sourceCols, targetCols, err + } + plan.CreateTableSQL = createSQL + plan.PostDataSQL = append(plan.PostDataSQL, postSQL...) + plan.Warnings = append(plan.Warnings, warnings...) + plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...) + plan.IndexesToCreate = idxCreate + plan.IndexesSkipped = idxSkip + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + default: + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } +} + +func buildPGLikeToMySQLAddColumnSQL(targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) { + targetSet := make(map[string]struct{}, len(targetCols)) + for _, col := range targetCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + targetSet[key] = struct{}{} + } + var sqlList []string + var warnings []string + for _, col := range sourceCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + if _, ok := targetSet[key]; ok { + continue + } + colType, mapWarnings := mapPGLikeColumnToMySQL(col) + warnings = append(warnings, mapWarnings...) + if col.Extra != "" && strings.Contains(strings.ToLower(col.Extra), "auto_increment") { + warnings = append(warnings, fmt.Sprintf("字段 %s 为自增列,补齐到已有目标表时不会自动补建 AUTO_INCREMENT 属性", col.Name)) + } + sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL", + quoteQualifiedIdentByType("mysql", targetQueryTable), + quoteIdentByType("mysql", col.Name), + colType, + )) + } + return sqlList, dedupeStrings(warnings) +} + +func buildPGLikeToMySQLCreateTablePlan(config SyncConfig, targetQueryTable string, sourceCols []connection.ColumnDefinition, sourceDB db.Database, sourceSchema, sourceTable string) (string, []string, []string, []string, int, int, error) { + columnDefs := make([]string, 0, len(sourceCols)+1) + warnings := make([]string, 0) + unsupported := make([]string, 0) + pkCols := make([]string, 0, 2) + for _, col := range sourceCols { + def, colWarnings := buildPGLikeToMySQLColumnDefinition(col) + warnings = append(warnings, colWarnings...) + columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("mysql", col.Name), def)) + if col.Key == "PRI" || col.Key == "PK" { + pkCols = append(pkCols, quoteIdentByType("mysql", col.Name)) + } + } + if len(pkCols) > 0 { + columnDefs = append(columnDefs, fmt.Sprintf("PRIMARY KEY (%s)", strings.Join(pkCols, ", "))) + } + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("mysql", targetQueryTable), strings.Join(columnDefs, ",\n ")) + if !config.CreateIndexes { + return createSQL, nil, dedupeStrings(warnings), dedupeStrings(unsupported), 0, 0, nil + } + indexes, err := sourceDB.GetIndexes(sourceSchema, sourceTable) + if err != nil { + warnings = append(warnings, fmt.Sprintf("读取源表索引失败,已跳过索引迁移:%v", err)) + return createSQL, nil, dedupeStrings(warnings), dedupeStrings(unsupported), 0, 0, nil + } + grouped := groupIndexDefinitions(indexes) + postSQL := make([]string, 0, len(grouped)) + created := 0 + skipped := 0 + for _, idx := range grouped { + name := strings.TrimSpace(idx.Name) + if name == "" || strings.EqualFold(name, "primary") { + continue + } + if len(idx.Columns) == 0 { + skipped++ + unsupported = append(unsupported, fmt.Sprintf("索引 %s 缺少列定义,已跳过", name)) + continue + } + kind := strings.ToLower(strings.TrimSpace(idx.IndexType)) + if idx.SubPart > 0 { + skipped++ + unsupported = append(unsupported, fmt.Sprintf("索引 %s 使用前缀长度,当前暂不支持迁移", name)) + continue + } + if kind != "" && kind != "btree" { + skipped++ + unsupported = append(unsupported, fmt.Sprintf("索引 %s 类型=%s,当前暂不支持自动迁移", name, idx.IndexType)) + continue + } + quotedCols := make([]string, 0, len(idx.Columns)) + for _, col := range idx.Columns { + quotedCols = append(quotedCols, quoteIdentByType("mysql", col)) + } + prefix := "CREATE INDEX" + if idx.Unique { + prefix = "CREATE UNIQUE INDEX" + } + postSQL = append(postSQL, fmt.Sprintf("%s %s ON %s (%s)", prefix, quoteIdentByType("mysql", name), quoteQualifiedIdentByType("mysql", targetQueryTable), strings.Join(quotedCols, ", "))) + created++ + } + return createSQL, postSQL, dedupeStrings(warnings), dedupeStrings(unsupported), created, skipped, nil +} + +func buildPGLikeToMySQLColumnDefinition(col connection.ColumnDefinition) (string, []string) { + targetType, warnings := mapPGLikeColumnToMySQL(col) + parts := []string{targetType} + if strings.Contains(strings.ToLower(strings.TrimSpace(col.Extra)), "auto_increment") && canUseMySQLAutoIncrement(targetType) { + parts = append(parts, "AUTO_INCREMENT") + } + if defaultSQL, ok, warningText := mapPGLikeDefaultToMySQL(col, targetType); warningText != "" { + warnings = append(warnings, warningText) + } else if ok { + parts = append(parts, "DEFAULT "+defaultSQL) + } + if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") { + parts = append(parts, "NOT NULL") + } + return strings.Join(parts, " "), dedupeStrings(warnings) +} + +func mapPGLikeColumnToMySQL(col connection.ColumnDefinition) (string, []string) { + raw := strings.ToLower(strings.TrimSpace(col.Type)) + warnings := make([]string, 0) + if raw == "" { + return "text", []string{fmt.Sprintf("字段 %s 类型为空,已降级为 text", col.Name)} + } + switch { + case raw == "boolean" || strings.HasPrefix(raw, "bool"): + return "tinyint(1)", warnings + case raw == "smallint": + return "smallint", warnings + case raw == "integer" || raw == "int4": + return "int", warnings + case raw == "bigint" || raw == "int8": + return "bigint", warnings + case strings.HasPrefix(raw, "numeric") || strings.HasPrefix(raw, "decimal"): + return replaceTypeBase(raw, []string{"numeric", "decimal"}, "decimal"), warnings + case raw == "real" || raw == "float4": + return "float", warnings + case raw == "double precision" || raw == "float8": + return "double", warnings + case strings.HasPrefix(raw, "character varying"): + return strings.Replace(raw, "character varying", "varchar", 1), warnings + case strings.HasPrefix(raw, "character("): + return strings.Replace(raw, "character", "char", 1), warnings + case raw == "character": + return "char(1)", warnings + case raw == "text": + return "text", warnings + case raw == "json" || raw == "jsonb": + return "json", warnings + case raw == "bytea": + return "longblob", warnings + case raw == "date": + return "date", warnings + case strings.HasPrefix(raw, "time"): + return "time", warnings + case strings.HasPrefix(raw, "timestamp"): + return "datetime", warnings + case strings.HasPrefix(raw, "uuid"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 uuid 已映射为 varchar(36)", col.Name)) + return "varchar(36)", warnings + case strings.Contains(raw, "without time zone") || strings.Contains(raw, "with time zone"): + return "datetime", warnings + case strings.HasPrefix(raw, "json"): + return "json", warnings + case strings.HasSuffix(raw, "[]") || strings.HasPrefix(raw, "array"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 json", col.Name, col.Type)) + return "json", warnings + case raw == "user-defined": + warnings = append(warnings, fmt.Sprintf("字段 %s 为用户自定义类型,已降级为 text", col.Name)) + return "text", warnings + default: + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门映射,已降级为 text", col.Name, col.Type)) + return "text", warnings + } +} + +func canUseMySQLAutoIncrement(targetType string) bool { + text := strings.ToLower(strings.TrimSpace(targetType)) + switch { + case strings.HasPrefix(text, "tinyint"), strings.HasPrefix(text, "smallint"), strings.HasPrefix(text, "mediumint"), strings.HasPrefix(text, "int"), strings.HasPrefix(text, "bigint"): + return true + default: + return false + } +} + +func mapPGLikeDefaultToMySQL(col connection.ColumnDefinition, targetType string) (string, bool, string) { + if col.Default == nil { + return "", false, "" + } + raw := strings.TrimSpace(*col.Default) + if raw == "" || strings.EqualFold(raw, "null") { + return "", false, "" + } + lower := strings.ToLower(raw) + if strings.HasPrefix(lower, "nextval(") { + return "", false, "" + } + if strings.Contains(lower, "current_timestamp") || strings.Contains(lower, "now()") { + return "CURRENT_TIMESTAMP", true, "" + } + if targetType == "tinyint(1)" { + switch lower { + case "true", "1": + return "1", true, "" + case "false", "0": + return "0", true, "" + } + } + if numericPattern.MatchString(raw) && !isStringLikeTargetType(targetType) { + return raw, true, "" + } + if strings.ContainsAny(raw, "()") && !strings.Contains(lower, "current_timestamp") && !strings.Contains(lower, "now()") { + return "", false, fmt.Sprintf("字段 %s 的默认值 %s 包含表达式,当前未自动迁移", col.Name, raw) + } + return "'" + strings.ReplaceAll(raw, "'", "''") + "'", true, "" +} + +func isPGLikeTarget(dbType string) bool { + switch normalizeMigrationDBType(dbType) { + case "postgres", "kingbase", "highgo", "vastbase", "duckdb": + return true + default: + return false + } +} + +func buildMySQLToPGLikePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + plan := SchemaMigrationPlan{} + sourceType := resolveMigrationDBType(config.SourceConfig) + targetType := resolveMigrationDBType(config.TargetConfig) + plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName) + plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName) + plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName) + plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName) + plan.PlannedAction = "使用已有目标表导入" + + sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err) + } + if !sourceExists { + return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName) + } + + targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable) + if err != nil { + return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err) + } + plan.TargetTableExists = targetExists + + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + if targetExists { + missing := diffMissingColumnNames(sourceCols, targetCols) + if len(missing) > 0 { + plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", "))) + } + if config.AutoAddColumns { + addSQL, addWarnings := buildMySQLToPGLikeAddColumnSQL(targetType, plan.TargetQueryTable, sourceCols, targetCols) + plan.PreDataSQL = append(plan.PreDataSQL, addSQL...) + plan.Warnings = append(plan.Warnings, addWarnings...) + if len(addSQL) > 0 { + plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL)) + } + } + if strategy != "existing_only" { + plan.Warnings = append(plan.Warnings, "目标表已存在,当前仅执行数据导入;不会自动重建已有索引/约束") + } + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } + + switch strategy { + case "existing_only": + plan.PlannedAction = "目标表不存在,需先手工创建" + plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + case "smart", "auto_create_if_missing": + plan.AutoCreate = true + plan.PlannedAction = "目标表不存在,将自动建表后导入" + createSQL, postSQL, warnings, unsupported, idxCreate, idxSkip, err := buildMySQLToPGLikeCreateTablePlan(targetType, config, plan.TargetQueryTable, sourceCols, sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, sourceCols, targetCols, err + } + plan.CreateTableSQL = createSQL + plan.PostDataSQL = append(plan.PostDataSQL, postSQL...) + plan.Warnings = append(plan.Warnings, warnings...) + plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...) + plan.IndexesToCreate = idxCreate + plan.IndexesSkipped = idxSkip + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + default: + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } +} + +func buildMySQLToPGLikeAddColumnSQL(targetType string, targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) { + targetSet := make(map[string]struct{}, len(targetCols)) + for _, col := range targetCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + targetSet[key] = struct{}{} + } + var sqlList []string + var warnings []string + for _, col := range sourceCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + if _, ok := targetSet[key]; ok { + continue + } + colType, _, mapWarnings := mapMySQLColumnToKingbase(col) + warnings = append(warnings, mapWarnings...) + if col.Extra != "" && strings.Contains(strings.ToLower(col.Extra), "auto_increment") { + warnings = append(warnings, fmt.Sprintf("字段 %s 为自增列,补齐到已有目标表时不会自动补建 identity/sequence", col.Name)) + } + sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL", + quoteQualifiedIdentByType(targetType, targetQueryTable), + quoteIdentByType(targetType, col.Name), + colType, + )) + } + return sqlList, dedupeStrings(warnings) +} + +func buildMySQLToPGLikeCreateTablePlan(targetType string, config SyncConfig, targetQueryTable string, sourceCols []connection.ColumnDefinition, sourceDB db.Database, sourceSchema, sourceTable string) (string, []string, []string, []string, int, int, error) { + columnDefs := make([]string, 0, len(sourceCols)+1) + warnings := make([]string, 0) + unsupported := make([]string, 0) + pkCols := make([]string, 0, 2) + for _, col := range sourceCols { + def, colWarnings := buildMySQLToPGLikeColumnDefinition(col) + warnings = append(warnings, colWarnings...) + columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType(targetType, col.Name), def)) + if col.Key == "PRI" || col.Key == "PK" { + pkCols = append(pkCols, quoteIdentByType(targetType, col.Name)) + } + } + if len(pkCols) > 0 { + columnDefs = append(columnDefs, fmt.Sprintf("PRIMARY KEY (%s)", strings.Join(pkCols, ", "))) + } + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType(targetType, targetQueryTable), strings.Join(columnDefs, ",\n ")) + if !config.CreateIndexes { + return createSQL, nil, dedupeStrings(warnings), dedupeStrings(unsupported), 0, 0, nil + } + indexes, err := sourceDB.GetIndexes(sourceSchema, sourceTable) + if err != nil { + warnings = append(warnings, fmt.Sprintf("读取源表索引失败,已跳过索引迁移:%v", err)) + return createSQL, nil, dedupeStrings(warnings), dedupeStrings(unsupported), 0, 0, nil + } + grouped := groupIndexDefinitions(indexes) + postSQL := make([]string, 0, len(grouped)) + created := 0 + skipped := 0 + for _, idx := range grouped { + name := strings.TrimSpace(idx.Name) + if name == "" || strings.EqualFold(name, "primary") { + continue + } + if len(idx.Columns) == 0 { + skipped++ + unsupported = append(unsupported, fmt.Sprintf("索引 %s 缺少列定义,已跳过", name)) + continue + } + kind := strings.ToLower(strings.TrimSpace(idx.IndexType)) + if idx.SubPart > 0 { + skipped++ + unsupported = append(unsupported, fmt.Sprintf("索引 %s 使用前缀长度,当前暂不支持迁移", name)) + continue + } + if kind != "" && kind != "btree" { + skipped++ + unsupported = append(unsupported, fmt.Sprintf("索引 %s 类型=%s,当前暂不支持自动迁移", name, idx.IndexType)) + continue + } + quotedCols := make([]string, 0, len(idx.Columns)) + for _, col := range idx.Columns { + quotedCols = append(quotedCols, quoteIdentByType(targetType, col)) + } + prefix := "CREATE INDEX" + if idx.Unique { + prefix = "CREATE UNIQUE INDEX" + } + postSQL = append(postSQL, fmt.Sprintf("%s %s ON %s (%s)", prefix, quoteIdentByType(targetType, name), quoteQualifiedIdentByType(targetType, targetQueryTable), strings.Join(quotedCols, ", "))) + created++ + } + return createSQL, postSQL, dedupeStrings(warnings), dedupeStrings(unsupported), created, skipped, nil +} + +func buildMySQLToPGLikeColumnDefinition(col connection.ColumnDefinition) (string, []string) { + targetType, useIdentity, warnings := mapMySQLColumnToKingbase(col) + parts := []string{targetType} + if useIdentity { + parts = append(parts, "GENERATED BY DEFAULT AS IDENTITY") + } + if !useIdentity { + if defaultSQL, ok, warningText := mapMySQLDefaultToKingbase(col, targetType); warningText != "" { + warnings = append(warnings, warningText) + } else if ok { + parts = append(parts, "DEFAULT "+defaultSQL) + } + } + if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") { + parts = append(parts, "NOT NULL") + } + return strings.Join(parts, " "), dedupeStrings(warnings) +} diff --git a/internal/sync/schema_migration_test.go b/internal/sync/schema_migration_test.go new file mode 100644 index 0000000..c946fbe --- /dev/null +++ b/internal/sync/schema_migration_test.go @@ -0,0 +1,957 @@ +package sync + +import ( + "GoNavi-Wails/internal/connection" + "context" + "reflect" + "strings" + "testing" +) + +type fakeMigrationDB struct { + columns map[string][]connection.ColumnDefinition + indexes map[string][]connection.IndexDefinition + queryData map[string][]map[string]interface{} + queryCols map[string][]string +} + +func (f *fakeMigrationDB) Connect(config connection.ConnectionConfig) error { return nil } +func (f *fakeMigrationDB) Close() error { return nil } +func (f *fakeMigrationDB) Ping() error { return nil } +func (f *fakeMigrationDB) Query(query string) ([]map[string]interface{}, []string, error) { + if rows, ok := f.queryData[query]; ok { + return rows, f.queryCols[query], nil + } + return nil, nil, nil +} +func (f *fakeMigrationDB) Exec(query string) (int64, error) { return 0, nil } +func (f *fakeMigrationDB) GetDatabases() ([]string, error) { return nil, nil } +func (f *fakeMigrationDB) GetTables(dbName string) ([]string, error) { + return nil, nil +} +func (f *fakeMigrationDB) GetCreateStatement(dbName, tableName string) (string, error) { + return "", nil +} +func (f *fakeMigrationDB) GetColumns(dbName, tableName string) ([]connection.ColumnDefinition, error) { + key := dbName + "." + tableName + if rows, ok := f.columns[key]; ok { + return rows, nil + } + return []connection.ColumnDefinition{}, nil +} +func (f *fakeMigrationDB) GetAllColumns(dbName string) ([]connection.ColumnDefinitionWithTable, error) { + return nil, nil +} +func (f *fakeMigrationDB) GetIndexes(dbName, tableName string) ([]connection.IndexDefinition, error) { + key := dbName + "." + tableName + if rows, ok := f.indexes[key]; ok { + return rows, nil + } + return nil, nil +} +func (f *fakeMigrationDB) GetForeignKeys(dbName, tableName string) ([]connection.ForeignKeyDefinition, error) { + return nil, nil +} +func (f *fakeMigrationDB) GetTriggers(dbName, tableName string) ([]connection.TriggerDefinition, error) { + return nil, nil +} +func (f *fakeMigrationDB) QueryContext(ctx context.Context, query string) ([]map[string]interface{}, []string, error) { + return f.Query(query) +} +func (f *fakeMigrationDB) ExecContext(ctx context.Context, query string) (int64, error) { + return 0, nil +} + +func TestBuildMySQLToKingbaseColumnDefinition_AutoIncrementAndBoolean(t *testing.T) { + t.Parallel() + + def, warnings := buildMySQLToKingbaseColumnDefinition(connection.ColumnDefinition{ + Name: "id", + Type: "int unsigned", + Nullable: "NO", + Extra: "auto_increment", + }) + if !strings.Contains(def, "bigint") || !strings.Contains(def, "GENERATED BY DEFAULT AS IDENTITY") || !strings.Contains(def, "NOT NULL") { + t.Fatalf("unexpected definition: %s", def) + } + if len(warnings) != 0 { + t.Fatalf("unexpected warnings: %v", warnings) + } + + def, warnings = buildMySQLToKingbaseColumnDefinition(connection.ColumnDefinition{ + Name: "enabled", + Type: "tinyint(1)", + Nullable: "YES", + Default: stringPtr("1"), + }) + if !strings.Contains(def, "boolean") || !strings.Contains(def, "DEFAULT TRUE") { + t.Fatalf("unexpected boolean definition: %s", def) + } + if len(warnings) != 0 { + t.Fatalf("unexpected warnings for boolean: %v", warnings) + } +} + +func TestBuildMySQLToKingbaseCreateTablePlan_GeneratesAndSkipsIndexes(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + indexes: map[string][]connection.IndexDefinition{ + "shop.orders": { + {Name: "PRIMARY", ColumnName: "id", NonUnique: 0, SeqInIndex: 1, IndexType: "BTREE"}, + {Name: "idx_user_status", ColumnName: "user_id", NonUnique: 1, SeqInIndex: 1, IndexType: "BTREE"}, + {Name: "idx_user_status", ColumnName: "status", NonUnique: 1, SeqInIndex: 2, IndexType: "BTREE"}, + {Name: "idx_name_prefix", ColumnName: "name", NonUnique: 1, SeqInIndex: 1, IndexType: "BTREE", SubPart: 12}, + {Name: "idx_fulltext_note", ColumnName: "note", NonUnique: 1, SeqInIndex: 1, IndexType: "FULLTEXT"}, + }, + }, + } + cols := []connection.ColumnDefinition{ + {Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI", Extra: "auto_increment"}, + {Name: "user_id", Type: "bigint", Nullable: "NO"}, + {Name: "status", Type: "varchar(32)", Nullable: "YES"}, + {Name: "name", Type: "varchar(128)", Nullable: "YES"}, + {Name: "note", Type: "text", Nullable: "YES"}, + } + cfg := SyncConfig{CreateIndexes: true} + createSQL, postSQL, warnings, unsupported, idxCreate, idxSkip, err := buildMySQLToKingbaseCreateTablePlan(cfg, "public.orders", cols, sourceDB, "shop", "orders") + if err != nil { + t.Fatalf("buildMySQLToKingbaseCreateTablePlan returned error: %v", err) + } + if !strings.Contains(createSQL, `CREATE TABLE "public"."orders"`) { + t.Fatalf("unexpected create SQL: %s", createSQL) + } + if !strings.Contains(createSQL, `PRIMARY KEY ("id")`) { + t.Fatalf("create SQL missing primary key: %s", createSQL) + } + if idxCreate != 1 || idxSkip != 2 { + t.Fatalf("unexpected index summary: create=%d skip=%d", idxCreate, idxSkip) + } + if len(postSQL) != 1 || !strings.Contains(postSQL[0], `CREATE INDEX "idx_user_status"`) { + t.Fatalf("unexpected post SQL: %v", postSQL) + } + if len(warnings) != 0 { + t.Fatalf("unexpected warnings: %v", warnings) + } + wantUnsupported := []string{ + "索引 idx_name_prefix 使用前缀长度,当前暂不支持迁移", + "索引 idx_fulltext_note 类型=FULLTEXT,当前暂不支持自动迁移", + } + if !reflect.DeepEqual(unsupported, wantUnsupported) { + t.Fatalf("unexpected unsupported objects: got=%v want=%v", unsupported, wantUnsupported) + } +} + +func TestBuildSchemaMigrationPlan_AutoCreateWhenTargetMissing(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "shop.orders": { + {Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI", Extra: "auto_increment"}, + {Name: "name", Type: "varchar(128)", Nullable: "YES"}, + }, + }, + indexes: map[string][]connection.IndexDefinition{}, + } + targetDB := &fakeMigrationDB{columns: map[string][]connection.ColumnDefinition{}} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mysql", Database: "shop"}, + TargetConfig: connection.ConnectionConfig{Type: "kingbase", Database: "demo"}, + TargetTableStrategy: "smart", + CreateIndexes: true, + } + plan, sourceCols, targetCols, err := buildSchemaMigrationPlan(cfg, "orders", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildSchemaMigrationPlan returned error: %v", err) + } + if len(sourceCols) != 2 || len(targetCols) != 0 { + t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols)) + } + if plan.TargetTableExists { + t.Fatalf("expected target table missing") + } + if !plan.AutoCreate { + t.Fatalf("expected auto create enabled") + } + if !strings.Contains(plan.PlannedAction, "自动建表") { + t.Fatalf("unexpected planned action: %s", plan.PlannedAction) + } + if !strings.Contains(plan.CreateTableSQL, `CREATE TABLE "public"."orders"`) { + t.Fatalf("unexpected create table SQL: %s", plan.CreateTableSQL) + } +} + +func stringPtr(v string) *string { return &v } + +func TestBuildPGLikeToMySQLCreateTablePlan_GeneratesMySQLDDL(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + indexes: map[string][]connection.IndexDefinition{ + "public.users": { + {Name: "users_email_key", ColumnName: "email", NonUnique: 0, SeqInIndex: 1, IndexType: "BTREE"}, + {Name: "idx_users_name", ColumnName: "name", NonUnique: 1, SeqInIndex: 1, IndexType: "BTREE"}, + }, + }, + } + cols := []connection.ColumnDefinition{ + {Name: "id", Type: "integer", Nullable: "NO", Key: "PRI", Extra: "auto_increment"}, + {Name: "email", Type: "character varying(120)", Nullable: "NO"}, + {Name: "name", Type: "text", Nullable: "YES"}, + {Name: "profile", Type: "jsonb", Nullable: "YES"}, + } + cfg := SyncConfig{CreateIndexes: true} + createSQL, postSQL, warnings, unsupported, idxCreate, idxSkip, err := buildPGLikeToMySQLCreateTablePlan(cfg, "app.users", cols, sourceDB, "public", "users") + if err != nil { + t.Fatalf("buildPGLikeToMySQLCreateTablePlan returned error: %v", err) + } + if !strings.Contains(createSQL, "CREATE TABLE `app`.`users`") { + t.Fatalf("unexpected create SQL: %s", createSQL) + } + if !strings.Contains(createSQL, "`id` int AUTO_INCREMENT NOT NULL") { + t.Fatalf("unexpected id definition: %s", createSQL) + } + if !strings.Contains(createSQL, "`profile` json") { + t.Fatalf("unexpected json definition: %s", createSQL) + } + if idxCreate != 2 || idxSkip != 0 { + t.Fatalf("unexpected index summary: create=%d skip=%d", idxCreate, idxSkip) + } + if len(postSQL) != 2 { + t.Fatalf("unexpected post sql length: %v", postSQL) + } + if len(warnings) != 0 { + t.Fatalf("unexpected warnings: %v", warnings) + } + if len(unsupported) != 0 { + t.Fatalf("unexpected unsupported: %v", unsupported) + } +} + +func TestBuildPGLikeToMySQLPlan_AutoCreateWhenTargetMissing(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "public.orders": { + {Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI", Extra: "auto_increment"}, + {Name: "amount", Type: "numeric(10,2)", Nullable: "NO"}, + }, + }, + indexes: map[string][]connection.IndexDefinition{}, + } + targetDB := &fakeMigrationDB{columns: map[string][]connection.ColumnDefinition{}} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "kingbase", Database: "public"}, + TargetConfig: connection.ConnectionConfig{Type: "mysql", Database: "app"}, + TargetTableStrategy: "smart", + CreateIndexes: true, + } + plan, sourceCols, targetCols, err := buildPGLikeToMySQLPlan(cfg, "orders", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildPGLikeToMySQLPlan returned error: %v", err) + } + if len(sourceCols) != 2 || len(targetCols) != 0 { + t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols)) + } + if plan.TargetTableExists { + t.Fatalf("expected target table missing") + } + if !plan.AutoCreate { + t.Fatalf("expected auto create enabled") + } + if !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `app`.`orders`") { + t.Fatalf("unexpected create table SQL: %s", plan.CreateTableSQL) + } +} + +func TestBuildMySQLToPGLikeCreateTablePlan_GeneratesPostgresDDL(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + indexes: map[string][]connection.IndexDefinition{ + "shop.orders": { + {Name: "idx_orders_user", ColumnName: "user_id", NonUnique: 1, SeqInIndex: 1, IndexType: "BTREE"}, + {Name: "idx_orders_user", ColumnName: "status", NonUnique: 1, SeqInIndex: 2, IndexType: "BTREE"}, + }, + }, + } + cols := []connection.ColumnDefinition{ + {Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI", Extra: "auto_increment"}, + {Name: "user_id", Type: "bigint", Nullable: "NO"}, + {Name: "status", Type: "varchar(32)", Nullable: "YES"}, + {Name: "payload", Type: "json", Nullable: "YES"}, + } + cfg := SyncConfig{CreateIndexes: true} + createSQL, postSQL, warnings, unsupported, idxCreate, idxSkip, err := buildMySQLToPGLikeCreateTablePlan("postgres", cfg, "public.orders", cols, sourceDB, "shop", "orders") + if err != nil { + t.Fatalf("buildMySQLToPGLikeCreateTablePlan returned error: %v", err) + } + if !strings.Contains(createSQL, `CREATE TABLE "public"."orders"`) { + t.Fatalf("unexpected create SQL: %s", createSQL) + } + if !strings.Contains(createSQL, `GENERATED BY DEFAULT AS IDENTITY`) { + t.Fatalf("missing identity mapping: %s", createSQL) + } + if !strings.Contains(createSQL, `jsonb`) { + t.Fatalf("missing jsonb mapping: %s", createSQL) + } + if idxCreate != 1 || idxSkip != 0 { + t.Fatalf("unexpected index summary: create=%d skip=%d", idxCreate, idxSkip) + } + if len(postSQL) != 1 || !strings.Contains(postSQL[0], `CREATE INDEX "idx_orders_user"`) { + t.Fatalf("unexpected post SQL: %v", postSQL) + } + if len(warnings) != 0 || len(unsupported) != 0 { + t.Fatalf("unexpected warnings/unsupported: warnings=%v unsupported=%v", warnings, unsupported) + } +} + +func TestBuildMySQLToClickHouseCreateTableSQL_GeneratesMergeTree(t *testing.T) { + t.Parallel() + + cols := []connection.ColumnDefinition{ + {Name: "id", Type: "bigint unsigned", Nullable: "NO", Key: "PRI"}, + {Name: "name", Type: "varchar(128)", Nullable: "YES"}, + {Name: "payload", Type: "json", Nullable: "YES"}, + } + createSQL, warnings, unsupported := buildMySQLToClickHouseCreateTableSQL("analytics.orders", cols) + if !strings.Contains(createSQL, "ENGINE = MergeTree()") { + t.Fatalf("unexpected create SQL: %s", createSQL) + } + if !strings.Contains(createSQL, "ORDER BY (`id`)") { + t.Fatalf("unexpected order by: %s", createSQL) + } + if !strings.Contains(createSQL, "`payload` Nullable(String)") { + t.Fatalf("unexpected json mapping: %s", createSQL) + } + if len(warnings) == 0 { + t.Fatalf("expected warnings for clickhouse semantics") + } + if len(unsupported) != 0 { + t.Fatalf("unexpected unsupported: %v", unsupported) + } +} + +func TestBuildClickHouseToMySQLCreateTableSQL_GeneratesMySQLDDL(t *testing.T) { + t.Parallel() + + cols := []connection.ColumnDefinition{ + {Name: "id", Type: "UInt64", Nullable: "NO", Key: "PRI"}, + {Name: "event_time", Type: "DateTime", Nullable: "NO"}, + {Name: "payload", Type: "Map(String, String)", Nullable: "YES"}, + } + createSQL, warnings := buildClickHouseToMySQLCreateTableSQL("app.metrics", cols) + if !strings.Contains(createSQL, "CREATE TABLE `app`.`metrics`") { + t.Fatalf("unexpected create SQL: %s", createSQL) + } + if !strings.Contains(createSQL, "`id` bigint unsigned NOT NULL") { + t.Fatalf("unexpected uint64 mapping: %s", createSQL) + } + if !strings.Contains(createSQL, "`payload` json") { + t.Fatalf("unexpected complex type mapping: %s", createSQL) + } + if len(warnings) == 0 { + t.Fatalf("expected warning for limited clickhouse reverse semantics") + } +} + +func TestBuildMySQLToMongoPlan_AutoCreateCollection(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "shop.users": { + {Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI"}, + {Name: "name", Type: "varchar(64)", Nullable: "YES"}, + }, + }, + indexes: map[string][]connection.IndexDefinition{ + "shop.users": { + {Name: "idx_users_name", ColumnName: "name", NonUnique: 1, SeqInIndex: 1, IndexType: "BTREE"}, + }, + }, + } + targetDB := &fakeMigrationDB{} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mysql", Database: "shop"}, + TargetConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"}, + TargetTableStrategy: "smart", + CreateIndexes: true, + } + plan, sourceCols, targetCols, err := buildMySQLToMongoPlan(cfg, "users", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildMySQLToMongoPlan returned error: %v", err) + } + if len(sourceCols) != 2 || targetCols != nil { + t.Fatalf("unexpected source/target columns: %d / %v", len(sourceCols), targetCols) + } + if !plan.AutoCreate || len(plan.PreDataSQL) == 0 { + t.Fatalf("expected auto create collection command: %+v", plan) + } + if !strings.Contains(plan.PreDataSQL[0], `"create":"users"`) { + t.Fatalf("unexpected create collection command: %v", plan.PreDataSQL) + } + if len(plan.PostDataSQL) != 1 || !strings.Contains(plan.PostDataSQL[0], `"createIndexes":"users"`) { + t.Fatalf("unexpected index commands: %v", plan.PostDataSQL) + } +} + +func TestBuildPGLikeToMongoPlan_AutoCreateCollection(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "public.orders": { + {Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI"}, + {Name: "name", Type: "varchar(64)", Nullable: "YES"}, + }, + }, + indexes: map[string][]connection.IndexDefinition{ + "public.orders": { + {Name: "idx_orders_name", ColumnName: "name", NonUnique: 1, SeqInIndex: 1, IndexType: "BTREE"}, + }, + }, + } + targetDB := &fakeMigrationDB{} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "postgres", Database: "public"}, + TargetConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"}, + TargetTableStrategy: "smart", + CreateIndexes: true, + } + plan, sourceCols, targetCols, err := buildPGLikeToMongoPlan(cfg, "orders", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildPGLikeToMongoPlan returned error: %v", err) + } + if len(sourceCols) != 2 || targetCols != nil { + t.Fatalf("unexpected source/target columns: %d / %v", len(sourceCols), targetCols) + } + if !plan.AutoCreate || len(plan.PreDataSQL) == 0 { + t.Fatalf("expected auto create collection command: %+v", plan) + } + if !strings.Contains(plan.PreDataSQL[0], `"create":"orders"`) { + t.Fatalf("unexpected create collection command: %v", plan.PreDataSQL) + } + if len(plan.PostDataSQL) != 1 || !strings.Contains(plan.PostDataSQL[0], `"createIndexes":"orders"`) { + t.Fatalf("unexpected index commands: %v", plan.PostDataSQL) + } +} + +func TestBuildClickHouseToMongoPlan_AutoCreateCollection(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "analytics.metrics": { + {Name: "id", Type: "UInt64", Nullable: "NO", Key: "PRI"}, + {Name: "host", Type: "String", Nullable: "YES"}, + }, + }, + } + targetDB := &fakeMigrationDB{} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "clickhouse", Database: "analytics"}, + TargetConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"}, + TargetTableStrategy: "smart", + } + plan, sourceCols, targetCols, err := buildClickHouseToMongoPlan(cfg, "metrics", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildClickHouseToMongoPlan returned error: %v", err) + } + if len(sourceCols) != 2 || targetCols != nil { + t.Fatalf("unexpected source/target columns: %d / %v", len(sourceCols), targetCols) + } + if !plan.AutoCreate || len(plan.PreDataSQL) == 0 { + t.Fatalf("expected auto create collection command: %+v", plan) + } + if !strings.Contains(plan.PreDataSQL[0], `"create":"metrics"`) { + t.Fatalf("unexpected create collection command: %v", plan.PreDataSQL) + } +} + +func TestBuildTDengineToMongoPlan_AutoCreateCollection(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "src.cpu": { + {Name: "ts", Type: "TIMESTAMP", Nullable: "NO"}, + {Name: "host", Type: "NCHAR(64)", Nullable: "YES"}, + }, + }, + } + targetDB := &fakeMigrationDB{} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "tdengine", Database: "src"}, + TargetConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"}, + TargetTableStrategy: "smart", + } + plan, sourceCols, targetCols, err := buildTDengineToMongoPlan(cfg, "cpu", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildTDengineToMongoPlan returned error: %v", err) + } + if len(sourceCols) != 2 || targetCols != nil { + t.Fatalf("unexpected source/target columns: %d / %v", len(sourceCols), targetCols) + } + if !plan.AutoCreate || len(plan.PreDataSQL) == 0 { + t.Fatalf("expected auto create collection command: %+v", plan) + } + if !strings.Contains(plan.PreDataSQL[0], `"create":"cpu"`) { + t.Fatalf("unexpected create collection command: %v", plan.PreDataSQL) + } +} + +func TestBuildMongoToMySQLPlan_InfersColumnsAndCreatesTable(t *testing.T) { + t.Parallel() + + query := `{"find":"users","filter":{},"limit":200}` + sourceDB := &fakeMigrationDB{ + queryData: map[string][]map[string]interface{}{ + query: { + {"_id": "a1", "name": "alice", "age": int64(18), "profile": map[string]interface{}{"city": "shanghai"}}, + {"_id": "b2", "name": "bob", "profile": map[string]interface{}{"city": "beijing"}}, + }, + }, + queryCols: map[string][]string{query: {"_id", "name", "age", "profile"}}, + indexes: map[string][]connection.IndexDefinition{ + "crm.users": {{Name: "email_1", ColumnName: "name", NonUnique: 1, SeqInIndex: 1, IndexType: "BTREE"}}, + }, + } + targetDB := &fakeMigrationDB{} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mongodb", Database: "crm"}, + TargetConfig: connection.ConnectionConfig{Type: "mysql", Database: "app"}, + TargetTableStrategy: "smart", + CreateIndexes: true, + } + plan, sourceCols, _, err := buildMongoToMySQLPlan(cfg, "users", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildMongoToMySQLPlan returned error: %v", err) + } + if len(sourceCols) == 0 { + t.Fatalf("expected inferred source cols") + } + if !plan.AutoCreate || !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `app`.`users`") { + t.Fatalf("unexpected create table sql: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`_id` text NOT NULL") && !strings.Contains(plan.CreateTableSQL, "`_id` varchar") { + t.Fatalf("missing inferred _id column: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`profile` json") { + t.Fatalf("expected nested field degrade to json: %s", plan.CreateTableSQL) + } + if len(plan.PostDataSQL) != 1 { + t.Fatalf("expected one post index sql, got=%v", plan.PostDataSQL) + } +} + +func TestBuildTDengineToMySQLPlan_AutoCreateWhenTargetMissing(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "metrics.cpu": { + {Name: "ts", Type: "TIMESTAMP", Nullable: "NO"}, + {Name: "host", Type: "NCHAR(64)", Nullable: "YES", Key: "TAG", Extra: "TAG"}, + {Name: "usage", Type: "DOUBLE", Nullable: "YES"}, + }, + }, + } + targetDB := &fakeMigrationDB{} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "tdengine", Database: "metrics"}, + TargetConfig: connection.ConnectionConfig{Type: "mysql", Database: "app"}, + TargetTableStrategy: "smart", + } + plan, sourceCols, targetCols, err := buildTDengineToMySQLPlan(cfg, "cpu", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildTDengineToMySQLPlan returned error: %v", err) + } + if len(sourceCols) != 3 || len(targetCols) != 0 { + t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols)) + } + if !plan.AutoCreate { + t.Fatalf("expected auto create enabled") + } + if !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `app`.`cpu`") { + t.Fatalf("unexpected create table sql: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`ts` datetime") { + t.Fatalf("expected timestamp mapping, got: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`host` varchar(64)") { + t.Fatalf("expected nchar mapping, got: %s", plan.CreateTableSQL) + } + if len(plan.Warnings) == 0 || !strings.Contains(strings.Join(plan.Warnings, " "), "TAG") { + t.Fatalf("expected TAG warning, got: %v", plan.Warnings) + } +} + +func TestBuildTDengineToPGLikePlan_AutoCreateWhenTargetMissing(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "metrics.cpu": { + {Name: "ts", Type: "TIMESTAMP", Nullable: "NO"}, + {Name: "payload", Type: "JSON", Nullable: "YES"}, + {Name: "host", Type: "BINARY(32)", Nullable: "YES", Key: "TAG", Extra: "TAG"}, + }, + }, + } + targetDB := &fakeMigrationDB{} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "tdengine", Database: "metrics"}, + TargetConfig: connection.ConnectionConfig{Type: "kingbase", Database: "ignored"}, + TargetTableStrategy: "smart", + } + plan, sourceCols, targetCols, err := buildTDengineToPGLikePlan(cfg, "cpu", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildTDengineToPGLikePlan returned error: %v", err) + } + if len(sourceCols) != 3 || len(targetCols) != 0 { + t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols)) + } + if !plan.AutoCreate { + t.Fatalf("expected auto create enabled") + } + if !strings.Contains(plan.CreateTableSQL, `CREATE TABLE "public"."cpu"`) { + t.Fatalf("unexpected create table sql: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, `"ts" timestamp`) { + t.Fatalf("expected timestamp mapping, got: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, `"payload" jsonb`) { + t.Fatalf("expected json mapping, got: %s", plan.CreateTableSQL) + } + if len(plan.Warnings) == 0 || !strings.Contains(strings.Join(plan.Warnings, " "), "TAG") { + t.Fatalf("expected TAG warning, got: %v", plan.Warnings) + } +} + +func TestBuildSchemaMigrationPlan_TDengineTargetWarnsInsertOnlyBoundary(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "shop.metrics": { + {Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI"}, + {Name: "ts", Type: "datetime", Nullable: "NO"}, + {Name: "value", Type: "double", Nullable: "YES"}, + }, + }, + } + targetDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "taos.metrics": { + {Name: "id", Type: "bigint", Nullable: "NO"}, + {Name: "ts", Type: "timestamp", Nullable: "NO"}, + {Name: "value", Type: "double", Nullable: "YES"}, + }, + }, + } + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mysql", Database: "shop"}, + TargetConfig: connection.ConnectionConfig{Type: "tdengine", Database: "taos"}, + Mode: "insert_update", + } + + plan, _, _, err := buildSchemaMigrationPlan(cfg, "metrics", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildSchemaMigrationPlan returned error: %v", err) + } + warnings := strings.Join(plan.Warnings, " ") + if !strings.Contains(warnings, "仅支持 INSERT 写入") { + t.Fatalf("expected TDengine target warning, got: %v", plan.Warnings) + } +} + +func TestBuildMySQLLikeToTDenginePlan_AutoCreateWhenTargetMissing(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "shop.metrics": { + {Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI", Extra: "auto_increment"}, + {Name: "ts", Type: "datetime", Nullable: "NO"}, + {Name: "payload", Type: "json", Nullable: "YES"}, + }, + }, + } + targetDB := &fakeMigrationDB{} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mysql", Database: "shop"}, + TargetConfig: connection.ConnectionConfig{Type: "tdengine", Database: "taos"}, + TargetTableStrategy: "smart", + } + plan, sourceCols, targetCols, err := buildMySQLLikeToTDenginePlan(cfg, "metrics", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildMySQLLikeToTDenginePlan returned error: %v", err) + } + if len(sourceCols) != 3 || len(targetCols) != 0 { + t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols)) + } + if !plan.AutoCreate { + t.Fatalf("expected auto create enabled") + } + if !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `taos`.`metrics`") { + t.Fatalf("unexpected create sql: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`ts` TIMESTAMP") { + t.Fatalf("expected ts first column mapped to TIMESTAMP, got: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`payload` VARCHAR(") { + t.Fatalf("expected json degrade to VARCHAR, got: %s", plan.CreateTableSQL) + } + if !strings.Contains(strings.Join(plan.Warnings, " "), "insert-only") && !strings.Contains(strings.Join(plan.Warnings, " "), "INSERT") { + t.Fatalf("expected tdengine target warning, got: %v", plan.Warnings) + } +} + +func TestBuildPGLikeToTDenginePlan_AutoCreateWhenTargetMissing(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "public.metrics": { + {Name: "event_time", Type: "timestamp without time zone", Nullable: "NO"}, + {Name: "name", Type: "character varying(64)", Nullable: "YES"}, + {Name: "meta", Type: "jsonb", Nullable: "YES"}, + }, + }, + } + targetDB := &fakeMigrationDB{} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "postgres", Database: "ignored"}, + TargetConfig: connection.ConnectionConfig{Type: "tdengine", Database: "taos"}, + TargetTableStrategy: "smart", + } + plan, sourceCols, targetCols, err := buildPGLikeToTDenginePlan(cfg, "metrics", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildPGLikeToTDenginePlan returned error: %v", err) + } + if len(sourceCols) != 3 || len(targetCols) != 0 { + t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols)) + } + if !plan.AutoCreate { + t.Fatalf("expected auto create enabled") + } + if !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `taos`.`metrics`") { + t.Fatalf("unexpected create sql: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`event_time` TIMESTAMP") { + t.Fatalf("expected timestamp mapping, got: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`meta` VARCHAR(") { + t.Fatalf("expected jsonb degrade to VARCHAR, got: %s", plan.CreateTableSQL) + } +} + +func TestBuildMySQLLikeToTDenginePlan_RejectsAutoCreateWithoutTimestampColumn(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "shop.metrics": { + {Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI"}, + {Name: "name", Type: "varchar(64)", Nullable: "YES"}, + }, + }, + } + targetDB := &fakeMigrationDB{} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mysql", Database: "shop"}, + TargetConfig: connection.ConnectionConfig{Type: "tdengine", Database: "taos"}, + TargetTableStrategy: "smart", + } + plan, _, _, err := buildMySQLLikeToTDenginePlan(cfg, "metrics", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildMySQLLikeToTDenginePlan returned error: %v", err) + } + if plan.AutoCreate { + t.Fatalf("expected auto create disabled when source has no timestamp column") + } + if !strings.Contains(plan.PlannedAction, "时间列") { + t.Fatalf("unexpected planned action: %s", plan.PlannedAction) + } + if !strings.Contains(strings.Join(plan.Warnings, " "), "时间列") { + t.Fatalf("expected missing timestamp warning, got: %v", plan.Warnings) + } +} + +func TestBuildClickHouseToTDenginePlan_AutoCreateWhenTargetMissing(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "analytics.metrics": { + {Name: "event_time", Type: "DateTime64(3)", Nullable: "NO"}, + {Name: "host", Type: "FixedString(64)", Nullable: "YES"}, + {Name: "payload", Type: "Map(String,String)", Nullable: "YES"}, + }, + }, + } + targetDB := &fakeMigrationDB{} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "clickhouse", Database: "analytics"}, + TargetConfig: connection.ConnectionConfig{Type: "tdengine", Database: "taos"}, + TargetTableStrategy: "smart", + } + plan, sourceCols, targetCols, err := buildClickHouseToTDenginePlan(cfg, "metrics", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildClickHouseToTDenginePlan returned error: %v", err) + } + if len(sourceCols) != 3 || len(targetCols) != 0 { + t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols)) + } + if !plan.AutoCreate { + t.Fatalf("expected auto create enabled") + } + if !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `taos`.`metrics`") { + t.Fatalf("unexpected create sql: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`event_time` TIMESTAMP") { + t.Fatalf("expected datetime64 mapping, got: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`host` VARCHAR(64)") { + t.Fatalf("expected fixedstring mapping, got: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`payload` VARCHAR(") { + t.Fatalf("expected complex type degrade to VARCHAR, got: %s", plan.CreateTableSQL) + } +} + +func TestBuildClickHouseToPGLikePlan_AutoCreateWhenTargetMissing(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "analytics.metrics": { + {Name: "id", Type: "UInt64", Nullable: "NO", Key: "PRI"}, + {Name: "event_time", Type: "DateTime64(3)", Nullable: "NO"}, + {Name: "host", Type: "FixedString(64)", Nullable: "YES"}, + {Name: "payload", Type: "Map(String,String)", Nullable: "YES"}, + }, + }, + } + targetDB := &fakeMigrationDB{} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "clickhouse", Database: "analytics"}, + TargetConfig: connection.ConnectionConfig{Type: "postgres", Database: "public"}, + TargetTableStrategy: "smart", + } + plan, sourceCols, targetCols, err := buildClickHouseToPGLikePlan(cfg, "metrics", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildClickHouseToPGLikePlan returned error: %v", err) + } + if len(sourceCols) != 4 || len(targetCols) != 0 { + t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols)) + } + if !plan.AutoCreate { + t.Fatalf("expected auto create enabled") + } + if !strings.Contains(plan.CreateTableSQL, `CREATE TABLE "public"."metrics"`) { + t.Fatalf("unexpected create sql: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, `"id" numeric(20,0)`) { + t.Fatalf("expected uint64 safeguard mapping, got: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, `"event_time" timestamp`) { + t.Fatalf("expected datetime64 mapping, got: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, `"host" varchar(64)`) { + t.Fatalf("expected fixedstring mapping, got: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, `"payload" jsonb`) { + t.Fatalf("expected complex type degrade to jsonb, got: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, `PRIMARY KEY ("id")`) { + t.Fatalf("expected primary key preservation, got: %s", plan.CreateTableSQL) + } +} + +func TestBuildPGLikeToClickHousePlan_AutoCreateWhenTargetMissing(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "public.orders": { + {Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI"}, + {Name: "created_at", Type: "timestamp without time zone", Nullable: "NO"}, + {Name: "profile", Type: "jsonb", Nullable: "YES"}, + }, + }, + } + targetDB := &fakeMigrationDB{} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "postgres", Database: "public"}, + TargetConfig: connection.ConnectionConfig{Type: "clickhouse", Database: "analytics"}, + TargetTableStrategy: "smart", + } + plan, sourceCols, targetCols, err := buildPGLikeToClickHousePlan(cfg, "orders", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildPGLikeToClickHousePlan returned error: %v", err) + } + if len(sourceCols) != 3 || len(targetCols) != 0 { + t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols)) + } + if !plan.AutoCreate { + t.Fatalf("expected auto create enabled") + } + if !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `analytics`.`orders`") { + t.Fatalf("unexpected create sql: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`created_at` DateTime") { + t.Fatalf("expected timestamp mapping, got: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`profile` Nullable(String)") { + t.Fatalf("expected jsonb degrade to Nullable(String), got: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "ORDER BY (`id`)") { + t.Fatalf("expected primary key order by, got: %s", plan.CreateTableSQL) + } +} + +func TestBuildTDengineToTDenginePlan_AutoCreateWhenTargetMissing(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "src.cpu": { + {Name: "ts", Type: "TIMESTAMP", Nullable: "NO"}, + {Name: "host", Type: "NCHAR(64)", Nullable: "YES"}, + {Name: "region", Type: "NCHAR(32)", Nullable: "YES", Key: "TAG"}, + }, + }, + } + targetDB := &fakeMigrationDB{} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "tdengine", Database: "src"}, + TargetConfig: connection.ConnectionConfig{Type: "tdengine", Database: "dst"}, + TargetTableStrategy: "smart", + } + plan, sourceCols, targetCols, err := buildTDengineToTDenginePlan(cfg, "cpu", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildTDengineToTDenginePlan returned error: %v", err) + } + if len(sourceCols) != 3 || len(targetCols) != 0 { + t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols)) + } + if !plan.AutoCreate { + t.Fatalf("expected auto create enabled") + } + if !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `dst`.`cpu`") { + t.Fatalf("unexpected create sql: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`ts` TIMESTAMP") { + t.Fatalf("expected timestamp preserved, got: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`region` NCHAR(32)") { + t.Fatalf("expected tag degrade to regular nchar column, got: %s", plan.CreateTableSQL) + } + if !strings.Contains(strings.Join(plan.Warnings, " "), "TAG") { + t.Fatalf("expected TAG degrade warning, got: %v", plan.Warnings) + } +} diff --git a/internal/sync/schema_sync.go b/internal/sync/schema_sync.go index 126b623..f93abad 100644 --- a/internal/sync/schema_sync.go +++ b/internal/sync/schema_sync.go @@ -7,15 +7,16 @@ import ( ) func (s *SyncEngine) syncTableSchema(config SyncConfig, res *SyncResult, sourceDB db.Database, targetDB db.Database, tableName string) error { - targetType := strings.ToLower(strings.TrimSpace(config.TargetConfig.Type)) + targetType := resolveMigrationDBType(config.TargetConfig) if targetType != "mysql" { s.appendLog(config.JobID, res, "warn", fmt.Sprintf("目标数据库类型=%s 暂不支持结构同步,已跳过表 %s", config.TargetConfig.Type, tableName)) return nil } - sourceSchema, sourceTable := normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName) - targetSchema, targetTable := normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName) - targetQueryTable := qualifiedNameForQuery(config.TargetConfig.Type, targetSchema, targetTable, tableName) + sourceType := resolveMigrationDBType(config.SourceConfig) + sourceSchema, sourceTable := normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName) + targetSchema, targetTable := normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName) + targetQueryTable := qualifiedNameForQuery(targetType, targetSchema, targetTable, tableName) // 1) 获取源表字段 sourceCols, err := sourceDB.GetColumns(sourceSchema, sourceTable) @@ -26,7 +27,6 @@ func (s *SyncEngine) syncTableSchema(config SyncConfig, res *SyncResult, sourceD // 2) 确保目标表存在 targetCols, err := targetDB.GetColumns(targetSchema, targetTable) if err != nil { - sourceType := strings.ToLower(strings.TrimSpace(config.SourceConfig.Type)) if sourceType != "mysql" { return fmt.Errorf("目标表不存在且源类型=%s 暂不支持自动建表: %w", config.SourceConfig.Type, err) } @@ -62,7 +62,6 @@ func (s *SyncEngine) syncTableSchema(config SyncConfig, res *SyncResult, sourceD // 3) 补齐目标缺失字段(安全策略:新增字段统一允许 NULL) missing := make([]string, 0) - sourceType := strings.ToLower(strings.TrimSpace(config.SourceConfig.Type)) for _, c := range sourceCols { colName := strings.TrimSpace(c.Name) if colName == "" { diff --git a/internal/sync/sql_helpers.go b/internal/sync/sql_helpers.go index 44b8a8b..af647b9 100644 --- a/internal/sync/sql_helpers.go +++ b/internal/sync/sql_helpers.go @@ -22,7 +22,7 @@ func quoteIdentByType(dbType string, ident string) string { } switch dbType { - case "mysql", "mariadb", "diros", "sphinx": + case "mysql", "mariadb", "diros", "sphinx", "clickhouse", "tdengine": return "`" + strings.ReplaceAll(ident, "`", "``") + "`" case "sqlserver": escaped := strings.ReplaceAll(ident, "]", "]]") @@ -74,8 +74,10 @@ func normalizeSchemaAndTable(dbType string, dbName string, tableName string) (st } switch strings.ToLower(strings.TrimSpace(dbType)) { - case "postgres", "kingbase", "vastbase": + case "postgres", "kingbase", "highgo", "vastbase": return "public", rawTable + case "duckdb": + return "main", rawTable default: return rawDB, rawTable } @@ -91,7 +93,7 @@ func qualifiedNameForQuery(dbType string, schema string, table string, original } switch strings.ToLower(strings.TrimSpace(dbType)) { - case "postgres", "kingbase", "vastbase": + case "postgres", "kingbase", "highgo", "vastbase": s := strings.TrimSpace(schema) if s == "" { s = "public" @@ -100,7 +102,16 @@ func qualifiedNameForQuery(dbType string, schema string, table string, original return raw } return s + "." + table - case "mysql", "mariadb", "diros", "sphinx": + case "duckdb": + s := strings.TrimSpace(schema) + if s == "" { + s = "main" + } + if table == "" { + return raw + } + return s + "." + table + case "mysql", "mariadb", "diros", "sphinx", "clickhouse", "tdengine": s := strings.TrimSpace(schema) if s == "" || table == "" { return table diff --git a/internal/sync/sync_engine.go b/internal/sync/sync_engine.go index d1d897c..15b5aaa 100644 --- a/internal/sync/sync_engine.go +++ b/internal/sync/sync_engine.go @@ -12,14 +12,17 @@ import ( // SyncConfig defines the parameters for a synchronization task type SyncConfig struct { - SourceConfig connection.ConnectionConfig `json:"sourceConfig"` - TargetConfig connection.ConnectionConfig `json:"targetConfig"` - Tables []string `json:"tables"` // Tables to sync - Content string `json:"content,omitempty"` // "data", "schema", "both" - Mode string `json:"mode"` // "insert_update", "insert_only", "full_overwrite" - JobID string `json:"jobId,omitempty"` - AutoAddColumns bool `json:"autoAddColumns,omitempty"` // 自动补齐缺失字段(当前仅 MySQL 目标支持) - TableOptions map[string]TableOptions `json:"tableOptions,omitempty"` + SourceConfig connection.ConnectionConfig `json:"sourceConfig"` + TargetConfig connection.ConnectionConfig `json:"targetConfig"` + Tables []string `json:"tables"` + Content string `json:"content,omitempty"` // "data", "schema", "both" + Mode string `json:"mode"` // "insert_update", "insert_only", "full_overwrite" + JobID string `json:"jobId,omitempty"` + AutoAddColumns bool `json:"autoAddColumns,omitempty"` // 自动补齐缺失字段 + TargetTableStrategy string `json:"targetTableStrategy,omitempty"` + CreateIndexes bool `json:"createIndexes,omitempty"` + MongoCollectionName string `json:"mongoCollectionName,omitempty"` + TableOptions map[string]TableOptions `json:"tableOptions,omitempty"` } // SyncResult holds the result of the sync operation @@ -45,6 +48,13 @@ func NewSyncEngine(reporter Reporter) *SyncEngine { func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { result := SyncResult{Success: true, Logs: []string{}} logger.Infof("开始数据同步:源=%s 目标=%s 表数量=%d", formatConnSummaryForSync(config.SourceConfig), formatConnSummaryForSync(config.TargetConfig), len(config.Tables)) + if isRedisToMongoKeyspacePair(config) { + return s.runRedisToMongoSync(config, result) + } + if isMongoToRedisKeyspacePair(config) { + return s.runMongoToRedisSync(config, result) + } + totalTables := len(config.Tables) s.progress(config.JobID, 0, totalTables, "", "开始同步") @@ -70,6 +80,7 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("未知同步模式 %q,已自动使用 insert_update", config.Mode)) } defaultMode := normalizeSyncMode(config.Mode) + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) contentLabel := "仅同步数据" if syncSchema && syncData { @@ -77,9 +88,9 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { } else if syncSchema { contentLabel = "仅同步结构" } - s.appendLog(config.JobID, &result, "info", fmt.Sprintf("同步内容:%s;模式:%s;自动补字段:%v", contentLabel, defaultMode, config.AutoAddColumns)) + s.appendLog(config.JobID, &result, "info", fmt.Sprintf("同步内容:%s;模式:%s;自动补字段:%v;目标表策略:%s;创建索引:%v", contentLabel, defaultMode, config.AutoAddColumns, strategy, config.CreateIndexes)) - sourceDB, err := db.NewDatabase(config.SourceConfig.Type) + sourceDB, err := newSyncDatabase(config.SourceConfig.Type) if err != nil { logger.Error(err, "初始化源数据库驱动失败:类型=%s", config.SourceConfig.Type) return s.fail(config.JobID, totalTables, result, "初始化源数据库驱动失败: "+err.Error()) @@ -88,7 +99,7 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { // Custom DB setup would go here if needed } - targetDB, err := db.NewDatabase(config.TargetConfig.Type) + targetDB, err := newSyncDatabase(config.TargetConfig.Type) if err != nil { logger.Error(err, "初始化目标数据库驱动失败:类型=%s", config.TargetConfig.Type) return s.fail(config.JobID, totalTables, result, "初始化目标数据库驱动失败: "+err.Error()) @@ -112,7 +123,6 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { } defer targetDB.Close() - // Iterate Tables for i, tableName := range config.Tables { func() { tableMode := defaultMode @@ -120,30 +130,82 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { s.progress(config.JobID, i, totalTables, tableName, fmt.Sprintf("同步表(%d/%d)", i+1, totalTables)) defer s.progress(config.JobID, i+1, totalTables, tableName, "表处理完成") - if syncSchema { - s.progress(config.JobID, i, totalTables, tableName, "同步表结构") - if err := s.syncTableSchema(config, &result, sourceDB, targetDB, tableName); err != nil { - s.appendLog(config.JobID, &result, "error", fmt.Sprintf("表结构同步失败:表=%s 错误=%v", tableName, err)) + plan, cols, targetCols, err := buildSchemaMigrationPlan(config, tableName, sourceDB, targetDB) + if err != nil { + s.appendLog(config.JobID, &result, "error", fmt.Sprintf("生成迁移计划失败:表=%s 错误=%v", tableName, err)) + return + } + for _, warning := range plan.Warnings { + s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> %s", warning)) + } + for _, unsupported := range plan.UnsupportedObjects { + s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> %s", unsupported)) + } + if strings.TrimSpace(plan.PlannedAction) != "" { + s.appendLog(config.JobID, &result, "info", fmt.Sprintf(" -> %s", plan.PlannedAction)) + } + + if !plan.TargetTableExists && !plan.AutoCreate { + s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("表 %s 目标表不存在,当前策略不允许自动建表,已跳过", tableName)) + return + } + + if !plan.TargetTableExists && plan.AutoCreate { + s.progress(config.JobID, i, totalTables, tableName, "创建目标表") + if len(plan.PreDataSQL) > 0 { + if err := executeSQLStatements(targetDB.Exec, plan.PreDataSQL); err != nil { + s.appendLog(config.JobID, &result, "error", fmt.Sprintf("预执行建表 SQL 失败:表=%s 错误=%v", tableName, err)) + return + } + } + if strings.TrimSpace(plan.CreateTableSQL) == "" { + s.appendLog(config.JobID, &result, "error", fmt.Sprintf("表 %s 自动建表失败:建表 SQL 为空", tableName)) return } + if _, err := targetDB.Exec(plan.CreateTableSQL); err != nil { + s.appendLog(config.JobID, &result, "error", fmt.Sprintf("创建目标表失败:表=%s 错误=%v", tableName, err)) + return + } + s.appendLog(config.JobID, &result, "info", fmt.Sprintf("目标表创建成功:%s", tableName)) + targetCols, err = targetDB.GetColumns(plan.TargetSchema, plan.TargetTable) + if err != nil { + s.appendLog(config.JobID, &result, "error", fmt.Sprintf("创建目标表后获取字段失败:表=%s 错误=%v", tableName, err)) + return + } + } else if len(plan.PreDataSQL) > 0 { + s.progress(config.JobID, i, totalTables, tableName, "同步表结构") + if err := executeSQLStatements(targetDB.Exec, plan.PreDataSQL); err != nil { + s.appendLog(config.JobID, &result, "error", fmt.Sprintf("同步表结构失败:表=%s 错误=%v", tableName, err)) + return + } + targetCols, err = targetDB.GetColumns(plan.TargetSchema, plan.TargetTable) + if err != nil { + s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("补字段后刷新目标字段失败:表=%s 错误=%v", tableName, err)) + } } + if !syncData { + if len(plan.PostDataSQL) > 0 { + s.progress(config.JobID, i, totalTables, tableName, "创建索引") + if err := executeSQLStatements(targetDB.Exec, plan.PostDataSQL); err != nil { + s.appendLog(config.JobID, &result, "error", fmt.Sprintf("创建索引失败:表=%s 错误=%v", tableName, err)) + return + } + } result.TablesSynced++ return } - sourceSchema, sourceTable := normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName) - targetSchema, targetTable := normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName) - sourceQueryTable := qualifiedNameForQuery(config.SourceConfig.Type, sourceSchema, sourceTable, tableName) - targetQueryTable := qualifiedNameForQuery(config.TargetConfig.Type, targetSchema, targetTable, tableName) - - // 1. Get Columns & PKs - cols, err := sourceDB.GetColumns(sourceSchema, sourceTable) - if err != nil { - logger.Error(err, "获取源表列信息失败:表=%s", tableName) - s.appendLog(config.JobID, &result, "error", fmt.Sprintf("获取表 %s 的列信息失败: %v", tableName, err)) - return + targetType := resolveMigrationDBType(config.TargetConfig) + sourceType := resolveMigrationDBType(config.SourceConfig) + targetTable := plan.TargetTable + sourceQueryTable, targetQueryTable := plan.SourceQueryTable, plan.TargetQueryTable + applyTableName := targetTable + switch targetType { + case "postgres", "kingbase", "highgo", "vastbase", "sqlserver": + applyTableName = targetQueryTable } + sourceColsByLower := make(map[string]connection.ColumnDefinition, len(cols)) for _, col := range cols { if strings.TrimSpace(col.Name) == "" { @@ -158,25 +220,24 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { pkCols = append(pkCols, col.Name) } } - - if len(pkCols) == 0 { - s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("表 %s 未找到主键,已跳过数据同步(避免产生重复数据)", tableName)) - return + requirePK := tableMode == "insert_update" && plan.TargetTableExists + pkCol := "" + if requirePK { + if len(pkCols) == 0 { + s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("表 %s 未找到主键,当前模式需要差异对比,已跳过", tableName)) + return + } + if len(pkCols) > 1 { + s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("表 %s 为复合主键(%s),当前暂不支持差异同步", tableName, strings.Join(pkCols, ","))) + return + } + pkCol = pkCols[0] } - if len(pkCols) > 1 { - s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("表 %s 为复合主键(%s),当前暂不支持数据同步", tableName, strings.Join(pkCols, ","))) - return - } - pkCol := pkCols[0] opts := TableOptions{Insert: true, Update: true, Delete: false} if config.TableOptions != nil { if t, ok := config.TableOptions[tableName]; ok { opts = t - // 默认防护:如用户未设置任意一个字段,保持 insert/update 默认 true、delete 默认 false - if !t.Insert && !t.Update && !t.Delete { - opts = t - } } } if !opts.Insert && !opts.Update && !opts.Delete { @@ -184,10 +245,8 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { return } - // 2. Fetch Data (MEMORY INTENSIVE - PROTOTYPE ONLY) - // TODO: Implement paging/streaming s.progress(config.JobID, i, totalTables, tableName, "读取源表数据") - sourceRows, _, err := sourceDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.SourceConfig.Type, sourceQueryTable))) + sourceRows, _, err := sourceDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(sourceType, sourceQueryTable))) if err != nil { logger.Error(err, "读取源表失败:表=%s", tableName) s.appendLog(config.JobID, &result, "error", fmt.Sprintf("读取源表 %s 失败: %v", tableName, err)) @@ -196,19 +255,19 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { var inserts []map[string]interface{} var updates []connection.UpdateRow + var deletes []map[string]interface{} - if tableMode == "insert_update" { + if tableMode == "insert_update" && plan.TargetTableExists { s.progress(config.JobID, i, totalTables, tableName, "读取目标表数据") - targetRows, _, err := targetDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.TargetConfig.Type, targetQueryTable))) + targetRows, _, err := targetDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(targetType, targetQueryTable))) if err != nil { logger.Error(err, "读取目标表失败:表=%s", tableName) s.appendLog(config.JobID, &result, "error", fmt.Sprintf("读取目标表 %s 失败: %v", tableName, err)) return } - // 3. Compare (In-Memory Hash Map) s.progress(config.JobID, i, totalTables, tableName, "对比差异") - targetMap := make(map[string]map[string]interface{}) + targetMap := make(map[string]map[string]interface{}, len(targetRows)) for _, row := range targetRows { if row[pkCol] == nil { continue @@ -220,7 +279,6 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { targetMap[pkVal] = row } sourcePKSet := make(map[string]struct{}, len(sourceRows)) - for _, sRow := range sourceRows { if sRow[pkCol] == nil { continue @@ -230,7 +288,6 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { continue } sourcePKSet[pkVal] = struct{}{} - if tRow, exists := targetMap[pkVal]; exists { changes := make(map[string]interface{}) for k, v := range sRow { @@ -239,17 +296,12 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { } } if len(changes) > 0 { - updates = append(updates, connection.UpdateRow{ - Keys: map[string]interface{}{pkCol: sRow[pkCol]}, - Values: changes, - }) + updates = append(updates, connection.UpdateRow{Keys: map[string]interface{}{pkCol: sRow[pkCol]}, Values: changes}) } } else { inserts = append(inserts, sRow) } } - - var deletes []map[string]interface{} if opts.Delete { for pkStr, row := range targetMap { if _, ok := sourcePKSet[pkStr]; ok { @@ -258,150 +310,49 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { deletes = append(deletes, map[string]interface{}{pkCol: row[pkCol]}) } } - - // apply operation selection inserts = filterRowsByPKSelection(pkCol, inserts, opts.Insert, opts.SelectedInsertPKs) updates = filterUpdatesByPKSelection(pkCol, updates, opts.Update, opts.SelectedUpdatePKs) deletes = filterRowsByPKSelection(pkCol, deletes, opts.Delete, opts.SelectedDeletePKs) - - changeSet := connection.ChangeSet{ - Inserts: inserts, - Updates: updates, - Deletes: deletes, + } else { + inserts = sourceRows + if !opts.Insert { + inserts = nil } + if tableMode == "full_overwrite" && plan.TargetTableExists { + s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 全量覆盖模式:即将清空目标表 %s", tableName)) + s.progress(config.JobID, i, totalTables, tableName, "清空目标表") + clearSQL := "" + if targetType == "mysql" { + clearSQL = fmt.Sprintf("TRUNCATE TABLE %s", quoteQualifiedIdentByType(targetType, targetQueryTable)) + } else { + clearSQL = fmt.Sprintf("DELETE FROM %s", quoteQualifiedIdentByType(targetType, targetQueryTable)) + } + if _, err := targetDB.Exec(clearSQL); err != nil { + s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 清空目标表失败: %v", err)) + return + } + } + } - // 4. Align schema (target missing columns) - s.progress(config.JobID, i, totalTables, tableName, "检查字段一致性") - requiredCols := collectRequiredColumns(changeSet.Inserts, changeSet.Updates) - targetCols, err := targetDB.GetColumns(targetSchema, targetTable) + changeSet := connection.ChangeSet{Inserts: inserts, Updates: updates, Deletes: deletes} + s.progress(config.JobID, i, totalTables, tableName, "检查字段一致性") + targetColsResolved := targetCols + if len(targetColsResolved) == 0 { + targetColsResolved, err = targetDB.GetColumns(plan.TargetSchema, plan.TargetTable) if err != nil { s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 获取目标表字段失败,已跳过字段一致性检查: %v", err)) - } else { - targetColSet := make(map[string]struct{}, len(targetCols)) - for _, c := range targetCols { - name := strings.ToLower(strings.TrimSpace(c.Name)) - if name == "" { - continue - } - targetColSet[name] = struct{}{} - } - - missing := make([]string, 0) - for lower, original := range requiredCols { - if _, ok := targetColSet[lower]; !ok { - missing = append(missing, original) - } - } - sort.Strings(missing) - - if len(missing) > 0 { - if config.AutoAddColumns && strings.ToLower(strings.TrimSpace(config.TargetConfig.Type)) == "mysql" { - s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 目标表缺少字段 %d 个,开始自动补齐: %s", len(missing), strings.Join(missing, ", "))) - added := 0 - for _, colName := range missing { - colLower := strings.ToLower(strings.TrimSpace(colName)) - colType := "TEXT" - if strings.ToLower(strings.TrimSpace(config.SourceConfig.Type)) == "mysql" { - if srcCol, ok := sourceColsByLower[colLower]; ok { - colType = sanitizeMySQLColumnType(srcCol.Type) - } - } - - alterSQL := fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL", - quoteQualifiedIdentByType("mysql", targetQueryTable), - quoteIdentByType("mysql", colName), - colType, - ) - if _, err := targetDB.Exec(alterSQL); err != nil { - s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 自动补字段失败:字段=%s 错误=%v", colName, err)) - continue - } - added++ - } - s.appendLog(config.JobID, &result, "info", fmt.Sprintf(" -> 自动补字段完成:成功=%d 失败=%d", added, len(missing)-added)) - - // refresh columns - targetCols, err = targetDB.GetColumns(targetSchema, targetTable) - if err == nil { - targetColSet = make(map[string]struct{}, len(targetCols)) - for _, c := range targetCols { - name := strings.ToLower(strings.TrimSpace(c.Name)) - if name == "" { - continue - } - targetColSet[name] = struct{}{} - } - } - } else { - s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 目标表缺少字段 %d 个(未开启自动补齐),将自动忽略:%s", len(missing), strings.Join(missing, ", "))) - } - - // filter out still-missing columns to avoid apply failure - changeSet.Inserts = filterInsertRows(changeSet.Inserts, targetColSet) - changeSet.Updates = filterUpdateRows(changeSet.Updates, targetColSet) - } - } - - // 5. Apply Changes - s.progress(config.JobID, i, totalTables, tableName, "应用变更") - - if len(changeSet.Inserts) > 0 || len(changeSet.Updates) > 0 || len(changeSet.Deletes) > 0 { - s.appendLog(config.JobID, &result, "info", fmt.Sprintf(" -> 需插入: %d 行, 需更新: %d 行, 需删除: %d 行", len(changeSet.Inserts), len(changeSet.Updates), len(changeSet.Deletes))) - - if applier, ok := targetDB.(db.BatchApplier); ok { - if err := applier.ApplyChanges(targetTable, changeSet); err != nil { - s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 应用变更失败: %v", err)) - } else { - result.RowsInserted += len(changeSet.Inserts) - result.RowsUpdated += len(changeSet.Updates) - result.RowsDeleted += len(changeSet.Deletes) - } - } else { - s.appendLog(config.JobID, &result, "warn", " -> 目标驱动不支持应用数据变更 (ApplyChanges).") - } - } else { - s.appendLog(config.JobID, &result, "info", " -> 数据一致,无需变更.") - } - - result.TablesSynced++ - return - } else { - // insert_only / full_overwrite: do not compare target, just insert source rows - inserts = sourceRows - } - - // full_overwrite: clear target table first - if tableMode == "full_overwrite" { - s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 全量覆盖模式:即将清空目标表 %s", tableName)) - s.progress(config.JobID, i, totalTables, tableName, "清空目标表") - clearSQL := "" - if strings.ToLower(strings.TrimSpace(config.TargetConfig.Type)) == "mysql" { - clearSQL = fmt.Sprintf("TRUNCATE TABLE %s", quoteQualifiedIdentByType(config.TargetConfig.Type, targetQueryTable)) - } else { - clearSQL = fmt.Sprintf("DELETE FROM %s", quoteQualifiedIdentByType(config.TargetConfig.Type, targetQueryTable)) - } - if _, err := targetDB.Exec(clearSQL); err != nil { - s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 清空目标表失败: %v", err)) - return } } - - // 4. Align schema (target missing columns) - s.progress(config.JobID, i, totalTables, tableName, "检查字段一致性") - requiredCols := collectRequiredColumns(inserts, updates) - targetCols, err := targetDB.GetColumns(targetSchema, targetTable) - if err != nil { - s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 获取目标表字段失败,已跳过字段一致性检查: %v", err)) - } else { - targetColSet := make(map[string]struct{}, len(targetCols)) - for _, c := range targetCols { + if len(targetColsResolved) > 0 { + targetColSet := make(map[string]struct{}, len(targetColsResolved)) + for _, c := range targetColsResolved { name := strings.ToLower(strings.TrimSpace(c.Name)) if name == "" { continue } targetColSet[name] = struct{}{} } - + requiredCols := collectRequiredColumns(changeSet.Inserts, changeSet.Updates) missing := make([]string, 0) for lower, original := range requiredCols { if _, ok := targetColSet[lower]; !ok { @@ -409,79 +360,64 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { } } sort.Strings(missing) - if len(missing) > 0 { - if config.AutoAddColumns && strings.ToLower(strings.TrimSpace(config.TargetConfig.Type)) == "mysql" { + if config.AutoAddColumns && supportsAutoAddColumnsForPair(sourceType, targetType) { s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 目标表缺少字段 %d 个,开始自动补齐: %s", len(missing), strings.Join(missing, ", "))) added := 0 for _, colName := range missing { colLower := strings.ToLower(strings.TrimSpace(colName)) - colType := "TEXT" - if strings.ToLower(strings.TrimSpace(config.SourceConfig.Type)) == "mysql" { - if srcCol, ok := sourceColsByLower[colLower]; ok { - colType = sanitizeMySQLColumnType(srcCol.Type) - } + srcCol, ok := sourceColsByLower[colLower] + if !ok { + continue + } + alterSQL, err := buildAddColumnSQLForPair(sourceType, targetType, targetQueryTable, srcCol) + if err != nil { + s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 自动补字段失败:字段=%s 错误=%v", colName, err)) + continue } - - alterSQL := fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL", - quoteQualifiedIdentByType("mysql", targetQueryTable), - quoteIdentByType("mysql", colName), - colType, - ) if _, err := targetDB.Exec(alterSQL); err != nil { s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 自动补字段失败:字段=%s 错误=%v", colName, err)) continue } added++ + targetColSet[colLower] = struct{}{} } s.appendLog(config.JobID, &result, "info", fmt.Sprintf(" -> 自动补字段完成:成功=%d 失败=%d", added, len(missing)-added)) - - // refresh columns - targetCols, err = targetDB.GetColumns(targetSchema, targetTable) - if err == nil { - targetColSet = make(map[string]struct{}, len(targetCols)) - for _, c := range targetCols { - name := strings.ToLower(strings.TrimSpace(c.Name)) - if name == "" { - continue - } - targetColSet[name] = struct{}{} - } - } } else { s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 目标表缺少字段 %d 个(未开启自动补齐),将自动忽略:%s", len(missing), strings.Join(missing, ", "))) } - - // filter out still-missing columns to avoid apply failure - inserts = filterInsertRows(inserts, targetColSet) - updates = filterUpdateRows(updates, targetColSet) + changeSet.Inserts = filterInsertRows(changeSet.Inserts, targetColSet) + changeSet.Updates = filterUpdateRows(changeSet.Updates, targetColSet) } } - // 5. Apply Changes s.progress(config.JobID, i, totalTables, tableName, "应用变更") - changeSet := connection.ChangeSet{ - Inserts: inserts, - Updates: updates, - } - - if len(changeSet.Inserts) > 0 || len(changeSet.Updates) > 0 { - s.appendLog(config.JobID, &result, "info", fmt.Sprintf(" -> 需插入: %d 行, 需更新: %d 行", len(changeSet.Inserts), len(changeSet.Updates))) - + if len(changeSet.Inserts) > 0 || len(changeSet.Updates) > 0 || len(changeSet.Deletes) > 0 { + s.appendLog(config.JobID, &result, "info", fmt.Sprintf(" -> 需插入: %d 行, 需更新: %d 行, 需删除: %d 行", len(changeSet.Inserts), len(changeSet.Updates), len(changeSet.Deletes))) if applier, ok := targetDB.(db.BatchApplier); ok { - if err := applier.ApplyChanges(targetTable, changeSet); err != nil { + if err := applier.ApplyChanges(applyTableName, changeSet); err != nil { s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 应用变更失败: %v", err)) - } else { - result.RowsInserted += len(changeSet.Inserts) - result.RowsUpdated += len(changeSet.Updates) + return } + result.RowsInserted += len(changeSet.Inserts) + result.RowsUpdated += len(changeSet.Updates) + result.RowsDeleted += len(changeSet.Deletes) } else { s.appendLog(config.JobID, &result, "warn", " -> 目标驱动不支持应用数据变更 (ApplyChanges).") + return } } else { s.appendLog(config.JobID, &result, "info", " -> 数据一致,无需变更.") } + if len(plan.PostDataSQL) > 0 { + s.progress(config.JobID, i, totalTables, tableName, "创建索引") + if err := executeSQLStatements(targetDB.Exec, plan.PostDataSQL); err != nil { + s.appendLog(config.JobID, &result, "error", fmt.Sprintf("创建索引失败:表=%s 错误=%v", tableName, err)) + return + } + } + result.TablesSynced++ }() } @@ -554,3 +490,26 @@ func (s *SyncEngine) fail(jobID string, totalTables int, res SyncResult, msg str s.progress(jobID, res.TablesSynced, totalTables, "", "同步失败") return res } + +func (s *SyncEngine) execDDLStatements(jobID string, res *SyncResult, database db.Database, tableName string, stage string, statements []string) error { + for _, statement := range statements { + sqlText := strings.TrimSpace(statement) + if sqlText == "" { + continue + } + if _, err := database.Exec(sqlText); err != nil { + return fmt.Errorf("%s失败: %w", stage, err) + } + s.appendLog(jobID, res, "info", fmt.Sprintf("表 %s %s成功:%s", tableName, stage, shortenSyncSQL(sqlText))) + } + return nil +} + +func shortenSyncSQL(sqlText string) string { + text := strings.TrimSpace(strings.ReplaceAll(strings.ReplaceAll(sqlText, "\n", " "), "\t", " ")) + text = strings.Join(strings.Fields(text), " ") + if len(text) <= 120 { + return text + } + return text[:117] + "..." +} diff --git a/internal/sync/sync_events.go b/internal/sync/sync_events.go index 1facae7..a7777e5 100644 --- a/internal/sync/sync_events.go +++ b/internal/sync/sync_events.go @@ -27,4 +27,3 @@ type Reporter struct { OnLog func(event SyncLogEvent) OnProgress func(event SyncProgressEvent) } -