From e3bf160072ab7bda928aab53f4921c2cd93f0e7f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=A8=E5=9B=BD=E9=94=8B?= <18508478357@163.com> Date: Tue, 3 Feb 2026 17:37:41 +0800 Subject: [PATCH] =?UTF-8?q?=E2=9C=A8=20feat(sync):=20=E6=95=B0=E6=8D=AE?= =?UTF-8?q?=E5=90=8C=E6=AD=A5=E6=94=AF=E6=8C=81=E5=B7=AE=E5=BC=82=E5=AF=B9?= =?UTF-8?q?=E6=AF=94=E3=80=81=E8=A1=8C=E7=BA=A7=E9=80=89=E6=8B=A9=E4=B8=8E?= =?UTF-8?q?=E5=AE=9E=E6=97=B6=E8=BF=9B=E5=BA=A6=E6=97=A5=E5=BF=97?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 新增差异分析/预览接口与前端预览抽屉(插入/更新/删除) - 支持按表勾选插入/更新/删除(删除默认不勾选) - 支持按主键选择行级同步;无主键/复合主键表跳过并提示 - 同步过程实时输出中文日志与进度条,便于定位失败步骤 --- frontend/src/components/DataGrid.tsx | 3 +- frontend/src/components/DataSyncModal.tsx | 695 ++++++++++++++++++++-- frontend/src/components/DataViewer.tsx | 14 +- frontend/wailsjs/go/app/App.d.ts | 4 + frontend/wailsjs/go/app/App.js | 8 + frontend/wailsjs/go/models.ts | 30 + internal/app/methods_sync.go | 92 ++- internal/db/custom_impl.go | 14 +- internal/db/dameng_impl.go | 14 +- internal/db/kingbase_impl.go | 14 +- internal/db/mysql_impl.go | 42 +- internal/db/oracle_impl.go | 14 +- internal/db/postgres_impl.go | 21 +- internal/db/query_value.go | 58 ++ internal/db/sqlite_impl.go | 20 +- internal/sync/analyze.go | 198 ++++++ internal/sync/preview.go | 164 +++++ internal/sync/row_selection.go | 58 ++ internal/sync/schema_align.go | 97 +++ internal/sync/schema_sync.go | 101 ++++ internal/sync/sql_helpers.go | 109 ++++ internal/sync/sync_engine.go | 554 +++++++++++++---- internal/sync/sync_events.go | 30 + internal/sync/table_options.go | 13 + 24 files changed, 2087 insertions(+), 280 deletions(-) create mode 100644 internal/db/query_value.go create mode 100644 internal/sync/analyze.go create mode 100644 internal/sync/preview.go create mode 100644 internal/sync/row_selection.go create mode 100644 internal/sync/schema_align.go create mode 100644 internal/sync/schema_sync.go create mode 100644 internal/sync/sql_helpers.go create mode 100644 internal/sync/sync_events.go create mode 100644 internal/sync/table_options.go diff --git a/frontend/src/components/DataGrid.tsx b/frontend/src/components/DataGrid.tsx index 427a49f..9567104 100644 --- a/frontend/src/components/DataGrid.tsx +++ b/frontend/src/components/DataGrid.tsx @@ -141,8 +141,7 @@ const EditableCell: React.FC = React.memo(({ return {childNode}; }); -const ContextMenuRow = React.memo(({ children, ...props }: any) => { - const record = props.record; +const ContextMenuRow = React.memo(({ children, record, ...props }: any) => { const context = useContext(DataContext); if (!record || !context) return {children}; diff --git a/frontend/src/components/DataSyncModal.tsx b/frontend/src/components/DataSyncModal.tsx index 3d4b97d..769885a 100644 --- a/frontend/src/components/DataSyncModal.tsx +++ b/frontend/src/components/DataSyncModal.tsx @@ -1,14 +1,36 @@ -import React, { useState, useEffect } from 'react'; -import { Modal, Form, Select, Button, message, Steps, Transfer, Card, Alert, Divider, Typography } from 'antd'; +import React, { useState, useEffect, useRef } from 'react'; +import { Modal, Form, Select, Button, message, Steps, Transfer, Card, Alert, Divider, Typography, Progress, Checkbox, Table, Drawer, Tabs } from 'antd'; import { useStore } from '../store'; -import { DBGetDatabases, DBGetTables, DataSync } from '../../wailsjs/go/app/App'; +import { DBGetDatabases, DBGetTables, DataSync, DataSyncAnalyze, DataSyncPreview } from '../../wailsjs/go/app/App'; import { SavedConnection } from '../types'; -import { connection } from '../../wailsjs/go/models'; +import { EventsOn } from '../../wailsjs/runtime/runtime'; const { Title, Text } = Typography; const { Step } = Steps; const { Option } = Select; +type SyncLogEvent = { jobId: string; level?: string; message?: string; ts?: number }; +type SyncProgressEvent = { jobId: string; percent?: number; current?: number; total?: number; table?: string; stage?: string }; +type SyncLogItem = { level: string; message: string; ts?: number }; +type TableDiffSummary = { + table: string; + pkColumn?: string; + canSync?: boolean; + inserts?: number; + updates?: number; + deletes?: number; + same?: number; + message?: string; +}; +type TableOps = { + insert: boolean; + update: boolean; + delete: boolean; + selectedInsertPks?: string[]; + selectedUpdatePks?: string[]; + selectedDeletePks?: string[]; +}; + const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, onClose }) => { const connections = useStore((state) => state.connections); const [currentStep, setCurrentStep] = useState(0); @@ -27,8 +49,76 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, const [allTables, setAllTables] = useState([]); const [selectedTables, setSelectedTables] = useState([]); + // Options + const [syncContent, setSyncContent] = useState<'data' | 'schema' | 'both'>('data'); + const [syncMode, setSyncMode] = useState('insert_update'); + const [autoAddColumns, setAutoAddColumns] = useState(true); + const [showSameTables, setShowSameTables] = useState(false); + const [analyzing, setAnalyzing] = useState(false); + const [diffTables, setDiffTables] = useState([]); + const [tableOptions, setTableOptions] = useState>({}); + + const [previewOpen, setPreviewOpen] = useState(false); + const [previewTable, setPreviewTable] = useState(''); + const [previewLoading, setPreviewLoading] = useState(false); + const [previewData, setPreviewData] = useState(null); + // Step 3: Result const [syncResult, setSyncResult] = useState(null); + const [syncing, setSyncing] = useState(false); + const [syncLogs, setSyncLogs] = useState([]); + const [syncProgress, setSyncProgress] = useState<{ percent: number; current: number; total: number; table: string; stage: string }>({ + percent: 0, + current: 0, + total: 0, + table: '', + stage: '' + }); + const jobIdRef = useRef(''); + const logBoxRef = useRef(null); + const autoScrollRef = useRef(true); + + const normalizeConnConfig = (conn: SavedConnection, database?: string) => ({ + ...conn.config, + port: Number((conn.config as any).port), + password: conn.config.password || "", + useSSH: conn.config.useSSH || false, + ssh: conn.config.ssh || { host: "", port: 22, user: "", password: "", keyPath: "" }, + database: typeof database === 'string' ? database : (conn.config.database || ""), + }); + + useEffect(() => { + if (!open) return; + + const offLog = EventsOn('sync:log', (event: SyncLogEvent) => { + if (!event || event.jobId !== jobIdRef.current) return; + const msg = String(event.message || '').trim(); + if (!msg) return; + setSyncLogs(prev => [...prev, { level: String(event.level || 'info'), message: msg, ts: event.ts }]); + }); + + const offProgress = EventsOn('sync:progress', (event: SyncProgressEvent) => { + if (!event || event.jobId !== jobIdRef.current) return; + setSyncProgress(prev => ({ + percent: typeof event.percent === 'number' ? event.percent : prev.percent, + current: typeof event.current === 'number' ? event.current : prev.current, + total: typeof event.total === 'number' ? event.total : prev.total, + table: typeof event.table === 'string' ? event.table : prev.table, + stage: typeof event.stage === 'string' ? event.stage : prev.stage, + })); + }); + + return () => { + offLog(); + offProgress(); + }; + }, [open]); + + useEffect(() => { + if (!logBoxRef.current) return; + if (!autoScrollRef.current) return; + logBoxRef.current.scrollTop = logBoxRef.current.scrollHeight; + }, [syncLogs]); useEffect(() => { if (open) { @@ -38,7 +128,23 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, setSourceDb(''); setTargetDb(''); setSelectedTables([]); + setSyncContent('data'); + setSyncMode('insert_update'); + setAutoAddColumns(true); + setShowSameTables(false); + setAnalyzing(false); + setDiffTables([]); + setTableOptions({}); + setPreviewOpen(false); + setPreviewTable(''); + setPreviewLoading(false); + setPreviewData(null); setSyncResult(null); + setSyncing(false); + setSyncLogs([]); + setSyncProgress({ percent: 0, current: 0, total: 0, table: '', stage: '' }); + jobIdRef.current = ''; + autoScrollRef.current = true; } }, [open]); @@ -49,7 +155,7 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, if (conn) { setLoading(true); try { - const res = await DBGetDatabases(conn.config as any); + const res = await DBGetDatabases(normalizeConnConfig(conn) as any); if (res.success) { setSourceDbs((res.data as any[]).map((r: any) => r.Database || r.database || r.username)); } @@ -65,7 +171,7 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, if (conn) { setLoading(true); try { - const res = await DBGetDatabases(conn.config as any); + const res = await DBGetDatabases(normalizeConnConfig(conn) as any); if (res.success) { setTargetDbs((res.data as any[]).map((r: any) => r.Database || r.database || r.username)); } @@ -83,7 +189,7 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, try { const conn = connections.find(c => c.id === sourceConnId); if (conn) { - const config = { ...conn.config, database: sourceDb }; + const config = normalizeConnConfig(conn, sourceDb); const res = await DBGetTables(config as any, sourceDb); if (res.success) { // DBGetTables returns [{Table: "name"}, ...] @@ -98,36 +204,221 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, setLoading(false); }; - const runSync = async () => { + const updateTableOption = (table: string, key: keyof TableOps, value: any) => { + setTableOptions(prev => ({ + ...prev, + [table]: { ...(prev[table] || { insert: true, update: true, delete: false }), [key]: value } + })); + }; + + const analyzeDiff = async () => { + if (selectedTables.length === 0) return; + if (!sourceConnId || !targetConnId) return message.error("Select connections first"); + if (!sourceDb || !targetDb) return message.error("Select databases first"); + setLoading(true); + setAnalyzing(true); + setDiffTables([]); + setTableOptions({}); + setSyncLogs([]); + const sConn = connections.find(c => c.id === sourceConnId)!; const tConn = connections.find(c => c.id === targetConnId)!; + const jobId = `analyze-${Date.now()}-${Math.random().toString(16).slice(2, 8)}`; + jobIdRef.current = jobId; + autoScrollRef.current = true; + setSyncProgress({ percent: 0, current: 0, total: selectedTables.length, table: '', stage: '差异分析' }); + + const config = { + sourceConfig: normalizeConnConfig(sConn, sourceDb), + targetConfig: normalizeConnConfig(tConn, targetDb), + tables: selectedTables, + content: syncContent, + mode: "insert_update", + autoAddColumns, + jobId, + }; + + try { + const res = await DataSyncAnalyze(config as any); + if (res.success) { + const tables = ((res.data as any)?.tables || []) as TableDiffSummary[]; + setDiffTables(tables); + const init: Record = {}; + tables.forEach(t => { + const can = !!t.canSync; + init[t.table] = { + insert: can, + update: can, + delete: false, + selectedInsertPks: [], + selectedUpdatePks: [], + selectedDeletePks: [], + }; + }); + setTableOptions(init); + message.success("差异分析完成"); + } else { + message.error(res.message || "差异分析失败"); + } + } catch (e: any) { + message.error("差异分析失败: " + (e?.message || "")); + } + + setLoading(false); + setAnalyzing(false); + }; + + const openPreview = async (table: string) => { + if (!table) return; + const sConn = connections.find(c => c.id === sourceConnId)!; + const tConn = connections.find(c => c.id === targetConnId)!; + + setPreviewOpen(true); + setPreviewTable(table); + setPreviewLoading(true); + setPreviewData(null); + + const config = { + sourceConfig: normalizeConnConfig(sConn, sourceDb), + targetConfig: normalizeConnConfig(tConn, targetDb), + tables: selectedTables, + content: "data", + mode: "insert_update", + autoAddColumns, + }; + + try { + const res = await DataSyncPreview(config as any, table, 200); + if (res.success) { + setPreviewData(res.data); + } else { + message.error(res.message || "加载差异预览失败"); + } + } catch (e: any) { + message.error("加载差异预览失败: " + (e?.message || "")); + } + + setPreviewLoading(false); + }; + + const runSync = async () => { + if (syncContent !== 'schema' && diffTables.length === 0) { + message.error("请先对比差异,再开始同步"); + return; + } + if (syncContent !== 'schema' && syncMode === 'full_overwrite') { + const ok = await new Promise((resolve) => { + Modal.confirm({ + title: '确认全量覆盖', + content: '全量覆盖会清空目标表数据后再插入,请确认已备份目标库。', + okText: '继续执行', + cancelText: '取消', + onOk: () => resolve(true), + onCancel: () => resolve(false), + }); + }); + if (!ok) return; + } + + setLoading(true); + setSyncing(true); + setCurrentStep(2); + setSyncResult(null); + setSyncLogs([]); + + const sConn = connections.find(c => c.id === sourceConnId)!; + const tConn = connections.find(c => c.id === targetConnId)!; + + const jobId = `sync-${Date.now()}-${Math.random().toString(16).slice(2, 8)}`; + jobIdRef.current = jobId; + autoScrollRef.current = true; + setSyncProgress({ + percent: 0, + current: 0, + total: selectedTables.length, + table: '', + stage: '准备开始', + }); const config = { - sourceConfig: { ...sConn.config, database: sourceDb }, - targetConfig: { ...tConn.config, database: targetDb }, + sourceConfig: { + ...sConn.config, + port: Number((sConn.config as any).port), + password: sConn.config.password || "", + useSSH: sConn.config.useSSH || false, + ssh: sConn.config.ssh || { host: "", port: 22, user: "", password: "", keyPath: "" }, + database: sourceDb, + }, + targetConfig: { + ...tConn.config, + port: Number((tConn.config as any).port), + password: tConn.config.password || "", + useSSH: tConn.config.useSSH || false, + ssh: tConn.config.ssh || { host: "", port: 22, user: "", password: "", keyPath: "" }, + database: targetDb, + }, tables: selectedTables, - mode: "insert_update" + content: syncContent, + mode: syncMode, + autoAddColumns, + tableOptions, + jobId, }; try { const res = await DataSync(config as any); setSyncResult(res); - setCurrentStep(2); + if (Array.isArray(res?.logs) && res.logs.length > 0) { + setSyncLogs(prev => { + if (prev.length > 0) return prev; + return (res.logs as string[]).map((log) => { + const msg = String(log || '').trim(); + if (msg.includes('致命错误') || msg.includes('失败')) return { level: 'error', message: msg }; + if (msg.includes('跳过') || msg.includes('警告')) return { level: 'warn', message: msg }; + return { level: 'info', message: msg }; + }); + }); + } } catch (e) { message.error("Sync execution failed"); + setSyncResult({ success: false, message: "同步执行失败", logs: [] }); } setLoading(false); + setSyncing(false); + }; + + const renderSyncLogItem = (item: SyncLogItem) => { + const level = String(item.level || 'info').toLowerCase(); + const color = level === 'error' ? '#ff4d4f' : (level === 'warn' ? '#faad14' : '#595959'); + const label = level === 'error' ? '错误' : (level === 'warn' ? '警告' : '信息'); + const timeText = typeof item.ts === 'number' ? new Date(item.ts).toLocaleTimeString('zh-CN', { hour12: false }) : ''; + return ( +
+ ● {label} + {timeText && {timeText}} + {item.message} +
+ ); }; return ( + <> { + if (syncing) { + message.warning("同步执行中,暂不支持关闭"); + return; + } + onClose(); + }} + width={800} + footer={null} + destroyOnHidden + closable={!syncing} + maskClosable={!syncing} > @@ -137,34 +428,67 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, {/* STEP 1: CONFIG */} {currentStep === 0 && ( -
- +
+
+ +
+ + + + + + +
+
+
+ +
+ + + + + + +
+
+
+ +
- - + + + - - + + + -
-
-
- -
- - - - - + + setAutoAddColumns(e.target.checked)}> + 自动补齐目标表缺失字段(仅 MySQL 目标) + + {syncContent !== 'schema' && syncMode === 'full_overwrite' && ( + + )}
@@ -172,32 +496,155 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, {/* STEP 2: TABLES */} {currentStep === 1 && ( -
- 请选择需要同步的表: +
+
+ 请选择需要同步的表: + setShowSameTables(e.target.checked)}> + 显示相同表 + +
({ key: t, title: t }))} titles={['源表', '已选表']} targetKeys={selectedTables} onChange={(keys) => setSelectedTables(keys as string[])} render={item => item.title} - listStyle={{ width: 350, height: 350, marginTop: 12 }} + listStyle={{ width: 350, height: 280, marginTop: 0 }} locale={{ itemUnit: '项', itemsUnit: '项', searchPlaceholder: '搜索表', notFoundContent: '暂无数据' }} /> + + {diffTables.length > 0 && ( +
+ 对比结果 + r.table} + dataSource={diffTables.filter(t => { + const ins = Number(t.inserts || 0); + const upd = Number(t.updates || 0); + const del = Number(t.deletes || 0); + const same = Number(t.same || 0); + const msg = String(t.message || '').trim(); + const can = !!t.canSync; + if (showSameTables) return true; + if (!can) return true; + if (msg) return true; + return ins > 0 || upd > 0 || del > 0 || same === 0; + })} + columns={[ + { title: '表名', dataIndex: 'table', key: 'table', ellipsis: true }, + { + title: '插入', + key: 'inserts', + width: 90, + render: (_: any, r: any) => { + const ops = tableOptions[r.table] || { insert: true, update: true, delete: false }; + const disabled = !r.canSync || analyzing || Number(r.inserts || 0) === 0; + return ( + updateTableOption(r.table, 'insert', e.target.checked)} + > + {Number(r.inserts || 0)} + + ); + } + }, + { + title: '更新', + key: 'updates', + width: 90, + render: (_: any, r: any) => { + const ops = tableOptions[r.table] || { insert: true, update: true, delete: false }; + const disabled = !r.canSync || analyzing || Number(r.updates || 0) === 0; + return ( + updateTableOption(r.table, 'update', e.target.checked)} + > + {Number(r.updates || 0)} + + ); + } + }, + { + title: '删除', + key: 'deletes', + width: 90, + render: (_: any, r: any) => { + const ops = tableOptions[r.table] || { insert: true, update: true, delete: false }; + const disabled = !r.canSync || analyzing || Number(r.deletes || 0) === 0; + return ( + updateTableOption(r.table, 'delete', e.target.checked)} + > + {Number(r.deletes || 0)} + + ); + } + }, + { title: '相同', dataIndex: 'same', key: 'same', width: 70, render: (v: any) => Number(v || 0) }, + { title: '消息', dataIndex: 'message', key: 'message', ellipsis: true, render: (v: any) => (v ? String(v) : '') }, + { + title: '预览', + key: 'preview', + width: 80, + render: (_: any, r: any) => { + const can = !!r.canSync; + const hasDiff = Number(r.inserts || 0) + Number(r.updates || 0) + Number(r.deletes || 0) > 0; + return ( + + ); + } + } + ]} + /> + + )} )} {/* STEP 3: RESULT */} - {currentStep === 2 && syncResult && ( + {currentStep === 2 && (
- + +
+ `${syncProgress.current}/${syncProgress.total}`} + /> +
+ 日志 -
- {syncResult.logs.map((log: string, i: number) =>
{log}
)} +
{ + const el = logBoxRef.current; + if (!el) return; + const nearBottom = el.scrollHeight - el.scrollTop - el.clientHeight < 40; + autoScrollRef.current = nearBottom; + }} + style={{ background: '#f5f5f5', padding: 12, height: 300, overflowY: 'auto', fontFamily: 'monospace' }} + > + {syncLogs.map((item, i: number) =>
{renderSyncLogItem(item)}
)}
)} @@ -206,20 +653,154 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, {currentStep === 0 && ( )} - {currentStep === 1 && ( - <> - - + {currentStep === 1 && ( + <> + + + )} {currentStep === 2 && ( <> - - + + )}
+ { setPreviewOpen(false); setPreviewTable(''); setPreviewData(null); }} + width={900} + > + {previewLoading && } + {!previewLoading && previewData && ( +
+ + + + 未勾选任何行表示“同步全部插入差异”;如不想执行插入请在对比结果中取消勾选“插入”。 +
r.pk} + dataSource={(previewData.inserts || []).map((r: any) => ({ ...r, key: r.pk }))} + pagination={false} + rowSelection={{ + selectedRowKeys: (tableOptions[previewTable]?.selectedInsertPks || []) as any, + onChange: (keys) => updateTableOption(previewTable, 'selectedInsertPks', keys as string[]), + getCheckboxProps: () => ({ disabled: !tableOptions[previewTable]?.insert }), + }} + columns={[ + { title: previewData.pkColumn || '主键', dataIndex: 'pk', key: 'pk', width: 200, ellipsis: true }, + { title: '数据', dataIndex: 'row', key: 'row', render: (v: any) =>
{JSON.stringify(v, null, 2)}
} + ]} + /> + + ) + }, + { + key: 'update', + label: `更新(${previewData.totalUpdates || 0})`, + children: ( +
+ 未勾选任何行表示“同步全部更新差异”;如不想执行更新请在对比结果中取消勾选“更新”。 +
r.pk} + dataSource={(previewData.updates || []).map((r: any) => ({ ...r, key: r.pk }))} + pagination={false} + rowSelection={{ + selectedRowKeys: (tableOptions[previewTable]?.selectedUpdatePks || []) as any, + onChange: (keys) => updateTableOption(previewTable, 'selectedUpdatePks', keys as string[]), + getCheckboxProps: () => ({ disabled: !tableOptions[previewTable]?.update }), + }} + columns={[ + { title: previewData.pkColumn || '主键', dataIndex: 'pk', key: 'pk', width: 200, ellipsis: true }, + { title: '变更字段', dataIndex: 'changedColumns', key: 'changedColumns', render: (v: any) => Array.isArray(v) ? v.join(', ') : '' }, + { + title: '详情', + key: 'detail', + width: 80, + render: (_: any, r: any) => ( + + ) + } + ]} + /> + + ) + }, + { + key: 'delete', + label: `删除(${previewData.totalDeletes || 0})`, + children: ( +
+ + 未勾选任何行表示“同步全部删除差异”;如不想执行删除请在对比结果中取消勾选“删除”。 +
r.pk} + dataSource={(previewData.deletes || []).map((r: any) => ({ ...r, key: r.pk }))} + pagination={false} + rowSelection={{ + selectedRowKeys: (tableOptions[previewTable]?.selectedDeletePks || []) as any, + onChange: (keys) => updateTableOption(previewTable, 'selectedDeletePks', keys as string[]), + getCheckboxProps: () => ({ disabled: !tableOptions[previewTable]?.delete }), + }} + columns={[ + { title: previewData.pkColumn || '主键', dataIndex: 'pk', key: 'pk', width: 200, ellipsis: true }, + { title: '数据', dataIndex: 'row', key: 'row', render: (v: any) =>
{JSON.stringify(v, null, 2)}
} + ]} + /> + + ) + } + ]} + /> + + )} + + ); }; diff --git a/frontend/src/components/DataViewer.tsx b/frontend/src/components/DataViewer.tsx index db332a4..65e1162 100644 --- a/frontend/src/components/DataViewer.tsx +++ b/frontend/src/components/DataViewer.tsx @@ -1,4 +1,4 @@ -import React, { useEffect, useState, useCallback } from 'react'; +import React, { useEffect, useState, useCallback, useRef } from 'react'; import { message } from 'antd'; import { TabData, ColumnDefinition } from '../types'; import { useStore } from '../store'; @@ -11,6 +11,7 @@ const DataViewer: React.FC<{ tab: TabData }> = ({ tab }) => { const [pkColumns, setPkColumns] = useState([]); const [loading, setLoading] = useState(false); const { connections, addSqlLog } = useStore(); + const fetchSeqRef = useRef(0); const [pagination, setPagination] = useState({ current: 1, @@ -24,11 +25,12 @@ const DataViewer: React.FC<{ tab: TabData }> = ({ tab }) => { const [filterConditions, setFilterConditions] = useState([]); const fetchData = useCallback(async (page = pagination.current, size = pagination.pageSize) => { + const seq = ++fetchSeqRef.current; setLoading(true); const conn = connections.find(c => c.id === tab.connectionId); if (!conn) { message.error("Connection not found"); - setLoading(false); + if (fetchSeqRef.current === seq) setLoading(false); return; } @@ -135,15 +137,15 @@ const DataViewer: React.FC<{ tab: TabData }> = ({ tab }) => { if (fieldNames.length === 0 && resultData.length > 0) { fieldNames = Object.keys(resultData[0]); } + if (fetchSeqRef.current !== seq) return; setColumnNames(fieldNames); - - setData(resultData.map((row: any, i: number) => ({ ...row, key: `row-${i}` }))); - + setData(resultData.map((row: any, i: number) => ({ ...row, key: `row-${i}` }))); setPagination(prev => ({ ...prev, current: page, pageSize: size, total: totalRecords })); } else { message.error(resData.message); } } catch (e: any) { + if (fetchSeqRef.current !== seq) return; message.error("Error fetching data: " + e.message); addSqlLog({ id: `log-${Date.now()}-error`, @@ -155,7 +157,7 @@ const DataViewer: React.FC<{ tab: TabData }> = ({ tab }) => { dbName }); } - setLoading(false); + if (fetchSeqRef.current === seq) setLoading(false); }, [connections, tab, sortInfo, filterConditions, pkColumns.length]); // Depend on pkColumns.length to avoid loop? No, pkColumns is updated inside. // Actually, 'pkColumns' state shouldn't trigger re-fetch. diff --git a/frontend/wailsjs/go/app/App.d.ts b/frontend/wailsjs/go/app/App.d.ts index e118dc1..15b34ad 100755 --- a/frontend/wailsjs/go/app/App.d.ts +++ b/frontend/wailsjs/go/app/App.d.ts @@ -29,6 +29,10 @@ export function DBShowCreateTable(arg1:connection.ConnectionConfig,arg2:string,a export function DataSync(arg1:sync.SyncConfig):Promise; +export function DataSyncAnalyze(arg1:sync.SyncConfig):Promise; + +export function DataSyncPreview(arg1:sync.SyncConfig,arg2:string,arg3:number):Promise; + export function ExportData(arg1:Array>,arg2:Array,arg3:string,arg4:string):Promise; export function ExportTable(arg1:connection.ConnectionConfig,arg2:string,arg3:string,arg4:string):Promise; diff --git a/frontend/wailsjs/go/app/App.js b/frontend/wailsjs/go/app/App.js index 2c1d675..1537055 100755 --- a/frontend/wailsjs/go/app/App.js +++ b/frontend/wailsjs/go/app/App.js @@ -54,6 +54,14 @@ export function DataSync(arg1) { return window['go']['app']['App']['DataSync'](arg1); } +export function DataSyncAnalyze(arg1) { + return window['go']['app']['App']['DataSyncAnalyze'](arg1); +} + +export function DataSyncPreview(arg1, arg2, arg3) { + return window['go']['app']['App']['DataSyncPreview'](arg1, arg2, arg3); +} + export function ExportData(arg1, arg2, arg3, arg4) { return window['go']['app']['App']['ExportData'](arg1, arg2, arg3, arg4); } diff --git a/frontend/wailsjs/go/models.ts b/frontend/wailsjs/go/models.ts index 5d44550..f6d5d7c 100755 --- a/frontend/wailsjs/go/models.ts +++ b/frontend/wailsjs/go/models.ts @@ -142,11 +142,37 @@ export namespace connection { export namespace sync { + export class TableOptions { + insert?: boolean; + update?: boolean; + delete?: boolean; + selectedInsertPks?: string[]; + selectedUpdatePks?: string[]; + selectedDeletePks?: string[]; + + static createFrom(source: any = {}) { + return new TableOptions(source); + } + + constructor(source: any = {}) { + if ('string' === typeof source) source = JSON.parse(source); + this.insert = source["insert"]; + this.update = source["update"]; + this.delete = source["delete"]; + this.selectedInsertPks = source["selectedInsertPks"]; + this.selectedUpdatePks = source["selectedUpdatePks"]; + this.selectedDeletePks = source["selectedDeletePks"]; + } + } export class SyncConfig { sourceConfig: connection.ConnectionConfig; targetConfig: connection.ConnectionConfig; tables: string[]; + content?: string; mode: string; + jobId?: string; + autoAddColumns?: boolean; + tableOptions?: Record; static createFrom(source: any = {}) { return new SyncConfig(source); @@ -157,7 +183,11 @@ export namespace sync { this.sourceConfig = this.convertValues(source["sourceConfig"], connection.ConnectionConfig); this.targetConfig = this.convertValues(source["targetConfig"], connection.ConnectionConfig); this.tables = source["tables"]; + this.content = source["content"]; this.mode = source["mode"]; + this.jobId = source["jobId"]; + this.autoAddColumns = source["autoAddColumns"]; + this.tableOptions = this.convertValues(source["tableOptions"], TableOptions, true); } convertValues(a: any, classs: any, asMap: boolean = false): any { diff --git a/internal/app/methods_sync.go b/internal/app/methods_sync.go index e3927d5..c60ba2a 100644 --- a/internal/app/methods_sync.go +++ b/internal/app/methods_sync.go @@ -1,11 +1,99 @@ package app import ( + "fmt" + "strings" + "time" + + "GoNavi-Wails/internal/connection" "GoNavi-Wails/internal/sync" + + "github.com/wailsapp/wails/v2/pkg/runtime" ) // DataSync executes a data synchronization task func (a *App) DataSync(config sync.SyncConfig) sync.SyncResult { - engine := sync.NewSyncEngine() - return engine.RunSync(config) + jobID := strings.TrimSpace(config.JobID) + if jobID == "" { + jobID = fmt.Sprintf("sync-%d", time.Now().UnixNano()) + config.JobID = jobID + } + + reporter := sync.Reporter{ + OnLog: func(event sync.SyncLogEvent) { + runtime.EventsEmit(a.ctx, sync.EventSyncLog, event) + }, + OnProgress: func(event sync.SyncProgressEvent) { + runtime.EventsEmit(a.ctx, sync.EventSyncProgress, event) + }, + } + + runtime.EventsEmit(a.ctx, sync.EventSyncStart, map[string]any{ + "jobId": jobID, + "total": len(config.Tables), + }) + + engine := sync.NewSyncEngine(reporter) + res := engine.RunSync(config) + + runtime.EventsEmit(a.ctx, sync.EventSyncDone, map[string]any{ + "jobId": jobID, + "result": res, + }) + + return res +} + +// DataSyncAnalyze analyzes differences between source and target for the given tables (dry-run). +func (a *App) DataSyncAnalyze(config sync.SyncConfig) connection.QueryResult { + jobID := strings.TrimSpace(config.JobID) + if jobID == "" { + jobID = fmt.Sprintf("analyze-%d", time.Now().UnixNano()) + config.JobID = jobID + } + + reporter := sync.Reporter{ + OnLog: func(event sync.SyncLogEvent) { + runtime.EventsEmit(a.ctx, sync.EventSyncLog, event) + }, + OnProgress: func(event sync.SyncProgressEvent) { + runtime.EventsEmit(a.ctx, sync.EventSyncProgress, event) + }, + } + + runtime.EventsEmit(a.ctx, sync.EventSyncStart, map[string]any{ + "jobId": jobID, + "total": len(config.Tables), + "type": "analyze", + }) + + engine := sync.NewSyncEngine(reporter) + res := engine.Analyze(config) + + runtime.EventsEmit(a.ctx, sync.EventSyncDone, map[string]any{ + "jobId": jobID, + "result": res, + "type": "analyze", + }) + + if !res.Success { + return connection.QueryResult{Success: false, Message: res.Message, Data: res} + } + return connection.QueryResult{Success: true, Message: res.Message, Data: res} +} + +// DataSyncPreview returns a limited preview of diff rows for one table. +func (a *App) DataSyncPreview(config sync.SyncConfig, tableName string, limit int) connection.QueryResult { + jobID := strings.TrimSpace(config.JobID) + if jobID == "" { + jobID = fmt.Sprintf("preview-%d", time.Now().UnixNano()) + config.JobID = jobID + } + + engine := sync.NewSyncEngine(sync.Reporter{}) + preview, err := engine.Preview(config, tableName, limit) + if err != nil { + return connection.QueryResult{Success: false, Message: err.Error()} + } + return connection.QueryResult{Success: true, Message: "OK", Data: preview} } diff --git a/internal/db/custom_impl.go b/internal/db/custom_impl.go index 3d21faa..27b5d78 100644 --- a/internal/db/custom_impl.go +++ b/internal/db/custom_impl.go @@ -88,19 +88,7 @@ func (c *CustomDB) Query(query string) ([]map[string]interface{}, []string, erro entry := make(map[string]interface{}) for i, col := range columns { - var v interface{} - val := values[i] - b, ok := val.([]byte) - if ok { - if b == nil { - v = nil - } else { - v = string(b) - } - } else { - v = val - } - entry[col] = v + entry[col] = normalizeQueryValue(values[i]) } resultData = append(resultData, entry) } diff --git a/internal/db/dameng_impl.go b/internal/db/dameng_impl.go index e8b7b31..b7ef3ec 100644 --- a/internal/db/dameng_impl.go +++ b/internal/db/dameng_impl.go @@ -119,19 +119,7 @@ func (d *DamengDB) Query(query string) ([]map[string]interface{}, []string, erro entry := make(map[string]interface{}) for i, col := range columns { - var v interface{} - val := values[i] - b, ok := val.([]byte) - if ok { - if b == nil { - v = nil - } else { - v = string(b) - } - } else { - v = val - } - entry[col] = v + entry[col] = normalizeQueryValue(values[i]) } resultData = append(resultData, entry) } diff --git a/internal/db/kingbase_impl.go b/internal/db/kingbase_impl.go index a85210f..19782c9 100644 --- a/internal/db/kingbase_impl.go +++ b/internal/db/kingbase_impl.go @@ -150,19 +150,7 @@ func (k *KingbaseDB) Query(query string) ([]map[string]interface{}, []string, er entry := make(map[string]interface{}) for i, col := range columns { - var v interface{} - val := values[i] - b, ok := val.([]byte) - if ok { - if b == nil { - v = nil - } else { - v = string(b) - } - } else { - v = val - } - entry[col] = v + entry[col] = normalizeQueryValue(values[i]) } resultData = append(resultData, entry) } diff --git a/internal/db/mysql_impl.go b/internal/db/mysql_impl.go index fdd11d2..9f34f3a 100644 --- a/internal/db/mysql_impl.go +++ b/internal/db/mysql_impl.go @@ -48,7 +48,7 @@ func (m *MySQLDB) Connect(config connection.ConnectionConfig) error { } m.conn = db m.pingTimeout = getConnectTimeout(config) - + // Force verification if err := m.Ping(); err != nil { return fmt.Errorf("连接建立后验证失败:%w", err) @@ -107,19 +107,7 @@ func (m *MySQLDB) Query(query string) ([]map[string]interface{}, []string, error entry := make(map[string]interface{}) for i, col := range columns { - var v interface{} - val := values[i] - b, ok := val.([]byte) - if ok { - if b == nil { - v = nil - } else { - v = string(b) - } - } else { - v = val - } - entry[col] = v + entry[col] = normalizeQueryValue(values[i]) } resultData = append(resultData, entry) } @@ -159,12 +147,12 @@ func (m *MySQLDB) GetTables(dbName string) ([]string, error) { if dbName != "" { query = fmt.Sprintf("SHOW TABLES FROM `%s`", dbName) } - + data, _, err := m.Query(query) if err != nil { return nil, err } - + var tables []string for _, row := range data { for _, v := range row { @@ -185,7 +173,7 @@ func (m *MySQLDB) GetCreateStatement(dbName, tableName string) (string, error) { if err != nil { return "", err } - + if len(data) > 0 { if val, ok := data[0]["Create Table"]; ok { return fmt.Sprintf("%v", val), nil @@ -215,12 +203,12 @@ func (m *MySQLDB) GetColumns(dbName, tableName string) ([]connection.ColumnDefin Extra: fmt.Sprintf("%v", row["Extra"]), Comment: fmt.Sprintf("%v", row["Comment"]), } - + if row["Default"] != nil { d := fmt.Sprintf("%v", row["Default"]) col.Default = &d } - + columns = append(columns, col) } return columns, nil @@ -248,14 +236,14 @@ func (m *MySQLDB) GetIndexes(dbName, tableName string) ([]connection.IndexDefini } } - seq := 0 - if val, ok := row["Seq_in_index"]; ok { + seq := 0 + if val, ok := row["Seq_in_index"]; ok { if f, ok := val.(float64); ok { seq = int(f) } else if i, ok := val.(int64); ok { seq = int(i) } - } + } idx := connection.IndexDefinition{ Name: fmt.Sprintf("%v", row["Key_name"]), @@ -345,12 +333,12 @@ func (m *MySQLDB) ApplyChanges(tableName string, changes connection.ChangeSet) e for _, update := range changes.Updates { var sets []string var args []interface{} - + for k, v := range update.Values { sets = append(sets, fmt.Sprintf("`%s` = ?", k)) args = append(args, v) } - + if len(sets) == 0 { continue } @@ -360,7 +348,7 @@ func (m *MySQLDB) ApplyChanges(tableName string, changes connection.ChangeSet) e wheres = append(wheres, fmt.Sprintf("`%s` = ?", k)) args = append(args, v) } - + if len(wheres) == 0 { return fmt.Errorf("update requires keys") } @@ -376,13 +364,13 @@ func (m *MySQLDB) ApplyChanges(tableName string, changes connection.ChangeSet) e var cols []string var placeholders []string var args []interface{} - + for k, v := range row { cols = append(cols, fmt.Sprintf("`%s`", k)) placeholders = append(placeholders, "?") args = append(args, v) } - + if len(cols) == 0 { continue } diff --git a/internal/db/oracle_impl.go b/internal/db/oracle_impl.go index 95598b8..bd94068 100644 --- a/internal/db/oracle_impl.go +++ b/internal/db/oracle_impl.go @@ -125,19 +125,7 @@ func (o *OracleDB) Query(query string) ([]map[string]interface{}, []string, erro entry := make(map[string]interface{}) for i, col := range columns { - var v interface{} - val := values[i] - b, ok := val.([]byte) - if ok { - if b == nil { - v = nil - } else { - v = string(b) - } - } else { - v = val - } - entry[col] = v + entry[col] = normalizeQueryValue(values[i]) } resultData = append(resultData, entry) } diff --git a/internal/db/postgres_impl.go b/internal/db/postgres_impl.go index fba01da..2796977 100644 --- a/internal/db/postgres_impl.go +++ b/internal/db/postgres_impl.go @@ -48,7 +48,7 @@ func (p *PostgresDB) Connect(config connection.ConnectionConfig) error { } p.conn = db p.pingTimeout = getConnectTimeout(config) - + // Force verification if err := p.Ping(); err != nil { return fmt.Errorf("连接建立后验证失败:%w", err) @@ -81,8 +81,7 @@ func (p *PostgresDB) Query(query string) ([]map[string]interface{}, []string, er return nil, nil, fmt.Errorf("connection not open") } - -rows, err := p.conn.Query(query) + rows, err := p.conn.Query(query) if err != nil { return nil, nil, err } @@ -108,19 +107,7 @@ rows, err := p.conn.Query(query) entry := make(map[string]interface{}) for i, col := range columns { - var v interface{} - val := values[i] - b, ok := val.([]byte) - if ok { - if b == nil { - v = nil - } else { - v = string(b) - } - } else { - v = val - } - entry[col] = v + entry[col] = normalizeQueryValue(values[i]) } resultData = append(resultData, entry) } @@ -159,7 +146,7 @@ func (p *PostgresDB) GetTables(dbName string) ([]string, error) { if err != nil { return nil, err } - + var tables []string for _, row := range data { schema, okSchema := row["schemaname"] diff --git a/internal/db/query_value.go b/internal/db/query_value.go new file mode 100644 index 0000000..764ccbf --- /dev/null +++ b/internal/db/query_value.go @@ -0,0 +1,58 @@ +package db + +import ( + "encoding/hex" + "unicode" + "unicode/utf8" +) + +// normalizeQueryValue normalizes driver-returned values for UI/JSON transport. +// 当前主要处理 []byte:如果是可读文本则转为 string,否则转为十六进制字符串,避免前端出现“空白值”。 +func normalizeQueryValue(v interface{}) interface{} { + if b, ok := v.([]byte); ok { + return bytesToReadableString(b) + } + return v +} + +func bytesToReadableString(b []byte) interface{} { + if b == nil { + return nil + } + if len(b) == 0 { + return "" + } + + if utf8.Valid(b) { + s := string(b) + if isMostlyPrintable(s) { + return s + } + } + + return "0x" + hex.EncodeToString(b) +} + +func isMostlyPrintable(s string) bool { + if s == "" { + return true + } + + total := 0 + printable := 0 + for _, r := range s { + total++ + switch r { + case '\n', '\r', '\t': + printable++ + continue + default: + } + if unicode.IsPrint(r) { + printable++ + } + } + + // 允许少量不可见字符,避免把正常文本误判为二进制。 + return printable*100 >= total*90 +} diff --git a/internal/db/sqlite_impl.go b/internal/db/sqlite_impl.go index 01daa1e..427cc2c 100644 --- a/internal/db/sqlite_impl.go +++ b/internal/db/sqlite_impl.go @@ -17,14 +17,14 @@ type SQLiteDB struct { } func (s *SQLiteDB) Connect(config connection.ConnectionConfig) error { - dsn := config.Host + dsn := config.Host db, err := sql.Open("sqlite", dsn) if err != nil { return fmt.Errorf("打开数据库连接失败:%w", err) } s.conn = db s.pingTimeout = getConnectTimeout(config) - + // Force verification if err := s.Ping(); err != nil { return fmt.Errorf("连接建立后验证失败:%w", err) @@ -83,19 +83,7 @@ func (s *SQLiteDB) Query(query string) ([]map[string]interface{}, []string, erro entry := make(map[string]interface{}) for i, col := range columns { - var v interface{} - val := values[i] - b, ok := val.([]byte) - if ok { - if b == nil { - v = nil - } else { - v = string(b) - } - } else { - v = val - } - entry[col] = v + entry[col] = normalizeQueryValue(values[i]) } resultData = append(resultData, entry) } @@ -124,7 +112,7 @@ func (s *SQLiteDB) GetTables(dbName string) ([]string, error) { if err != nil { return nil, err } - + var tables []string for _, row := range data { if val, ok := row["name"]; ok { diff --git a/internal/sync/analyze.go b/internal/sync/analyze.go new file mode 100644 index 0000000..a12a2a0 --- /dev/null +++ b/internal/sync/analyze.go @@ -0,0 +1,198 @@ +package sync + +import ( + "GoNavi-Wails/internal/db" + "GoNavi-Wails/internal/logger" + "fmt" + "strings" +) + +type TableDiffSummary struct { + Table string `json:"table"` + PKColumn string `json:"pkColumn,omitempty"` + CanSync bool `json:"canSync"` + Inserts int `json:"inserts"` + Updates int `json:"updates"` + Deletes int `json:"deletes"` + Same int `json:"same"` + Message string `json:"message,omitempty"` + HasSchema bool `json:"hasSchema,omitempty"` +} + +type SyncAnalyzeResult struct { + Success bool `json:"success"` + Message string `json:"message"` + Tables []TableDiffSummary `json:"tables"` +} + +func (s *SyncEngine) Analyze(config SyncConfig) SyncAnalyzeResult { + result := SyncAnalyzeResult{Success: true, Tables: []TableDiffSummary{}} + + contentRaw := strings.ToLower(strings.TrimSpace(config.Content)) + syncSchema := false + syncData := true + switch contentRaw { + case "", "data": + syncData = true + case "schema": + syncSchema = true + syncData = false + case "both": + syncSchema = true + syncData = true + default: + s.appendLog(config.JobID, nil, "warn", fmt.Sprintf("未知同步内容 %q,已自动使用仅同步数据", config.Content)) + syncData = true + } + + totalTables := len(config.Tables) + s.progress(config.JobID, 0, totalTables, "", "差异分析开始") + + sourceDB, err := db.NewDatabase(config.SourceConfig.Type) + if err != nil { + logger.Error(err, "初始化源数据库驱动失败:类型=%s", config.SourceConfig.Type) + return SyncAnalyzeResult{Success: false, Message: "初始化源数据库驱动失败: " + err.Error()} + } + targetDB, err := db.NewDatabase(config.TargetConfig.Type) + if err != nil { + logger.Error(err, "初始化目标数据库驱动失败:类型=%s", config.TargetConfig.Type) + return SyncAnalyzeResult{Success: false, Message: "初始化目标数据库驱动失败: " + err.Error()} + } + + // Connect Source + if err := sourceDB.Connect(config.SourceConfig); err != nil { + logger.Error(err, "源数据库连接失败:%s", formatConnSummaryForSync(config.SourceConfig)) + return SyncAnalyzeResult{Success: false, Message: "源数据库连接失败: " + err.Error()} + } + defer sourceDB.Close() + + // Connect Target + if err := targetDB.Connect(config.TargetConfig); err != nil { + logger.Error(err, "目标数据库连接失败:%s", formatConnSummaryForSync(config.TargetConfig)) + return SyncAnalyzeResult{Success: false, Message: "目标数据库连接失败: " + err.Error()} + } + defer targetDB.Close() + + for i, tableName := range config.Tables { + func() { + s.progress(config.JobID, i, totalTables, tableName, fmt.Sprintf("分析表(%d/%d)", i+1, totalTables)) + + summary := TableDiffSummary{ + Table: tableName, + CanSync: false, + Inserts: 0, + Updates: 0, + Deletes: 0, + Same: 0, + Message: "", + HasSchema: syncSchema, + } + + sourceSchema, sourceTable := normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName) + targetSchema, targetTable := normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName) + sourceQueryTable := qualifiedNameForQuery(config.SourceConfig.Type, sourceSchema, sourceTable, tableName) + targetQueryTable := qualifiedNameForQuery(config.TargetConfig.Type, targetSchema, targetTable, tableName) + + cols, err := sourceDB.GetColumns(sourceSchema, sourceTable) + if err != nil { + summary.Message = "获取源表字段失败: " + err.Error() + result.Tables = append(result.Tables, summary) + return + } + + if !syncData { + summary.CanSync = true + summary.Message = "仅同步结构,未执行数据差异分析" + result.Tables = append(result.Tables, summary) + return + } + + pkCols := make([]string, 0, 2) + for _, c := range cols { + if c.Key == "PRI" || c.Key == "PK" { + pkCols = append(pkCols, c.Name) + } + } + if len(pkCols) == 0 { + summary.Message = "无主键,不支持数据对比/同步" + result.Tables = append(result.Tables, summary) + return + } + if len(pkCols) > 1 { + summary.Message = fmt.Sprintf("复合主键(%s),暂不支持数据对比/同步", strings.Join(pkCols, ",")) + result.Tables = append(result.Tables, summary) + return + } + summary.PKColumn = pkCols[0] + + // Query data for diff + sourceRows, _, err := sourceDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.SourceConfig.Type, sourceQueryTable))) + if err != nil { + summary.Message = "读取源表失败: " + err.Error() + result.Tables = append(result.Tables, summary) + return + } + targetRows, _, err := targetDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.TargetConfig.Type, targetQueryTable))) + if err != nil { + summary.Message = "读取目标表失败: " + err.Error() + result.Tables = append(result.Tables, summary) + return + } + + pkCol := summary.PKColumn + targetMap := make(map[string]map[string]interface{}, len(targetRows)) + for _, row := range targetRows { + if row[pkCol] == nil { + continue + } + pkVal := strings.TrimSpace(fmt.Sprintf("%v", row[pkCol])) + if pkVal == "" || pkVal == "" { + continue + } + targetMap[pkVal] = row + } + + sourcePKSet := make(map[string]struct{}, len(sourceRows)) + for _, sRow := range sourceRows { + if sRow[pkCol] == nil { + continue + } + pkVal := strings.TrimSpace(fmt.Sprintf("%v", sRow[pkCol])) + if pkVal == "" || pkVal == "" { + continue + } + sourcePKSet[pkVal] = struct{}{} + + if tRow, exists := targetMap[pkVal]; exists { + changed := false + for k, v := range sRow { + if fmt.Sprintf("%v", v) != fmt.Sprintf("%v", tRow[k]) { + changed = true + break + } + } + if changed { + summary.Updates++ + } else { + summary.Same++ + } + } else { + summary.Inserts++ + } + } + + for pkVal := range targetMap { + if _, ok := sourcePKSet[pkVal]; !ok { + summary.Deletes++ + } + } + + summary.CanSync = true + result.Tables = append(result.Tables, summary) + }() + } + + s.progress(config.JobID, totalTables, totalTables, "", "差异分析完成") + result.Message = fmt.Sprintf("已完成 %d 张表的差异分析", len(result.Tables)) + return result +} diff --git a/internal/sync/preview.go b/internal/sync/preview.go new file mode 100644 index 0000000..7cec537 --- /dev/null +++ b/internal/sync/preview.go @@ -0,0 +1,164 @@ +package sync + +import ( + "GoNavi-Wails/internal/db" + "fmt" + "strings" +) + +type PreviewRow struct { + PK string `json:"pk"` + Row map[string]interface{} `json:"row"` +} + +type PreviewUpdateRow struct { + PK string `json:"pk"` + ChangedColumns []string `json:"changedColumns"` + Source map[string]interface{} `json:"source"` + Target map[string]interface{} `json:"target"` +} + +type TableDiffPreview struct { + Table string `json:"table"` + PKColumn string `json:"pkColumn"` + TotalInserts int `json:"totalInserts"` + TotalUpdates int `json:"totalUpdates"` + TotalDeletes int `json:"totalDeletes"` + Inserts []PreviewRow `json:"inserts"` + Updates []PreviewUpdateRow `json:"updates"` + Deletes []PreviewRow `json:"deletes"` +} + +func (s *SyncEngine) Preview(config SyncConfig, tableName string, limit int) (TableDiffPreview, error) { + if limit <= 0 { + limit = 200 + } + if limit > 500 { + limit = 500 + } + + sourceDB, err := db.NewDatabase(config.SourceConfig.Type) + if err != nil { + return TableDiffPreview{}, fmt.Errorf("初始化源数据库驱动失败: %w", err) + } + targetDB, err := db.NewDatabase(config.TargetConfig.Type) + if err != nil { + return TableDiffPreview{}, fmt.Errorf("初始化目标数据库驱动失败: %w", err) + } + + if err := sourceDB.Connect(config.SourceConfig); err != nil { + return TableDiffPreview{}, fmt.Errorf("源数据库连接失败: %w", err) + } + defer sourceDB.Close() + + if err := targetDB.Connect(config.TargetConfig); err != nil { + return TableDiffPreview{}, fmt.Errorf("目标数据库连接失败: %w", err) + } + defer targetDB.Close() + + sourceSchema, sourceTable := normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName) + targetSchema, targetTable := normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName) + sourceQueryTable := qualifiedNameForQuery(config.SourceConfig.Type, sourceSchema, sourceTable, tableName) + targetQueryTable := qualifiedNameForQuery(config.TargetConfig.Type, targetSchema, targetTable, tableName) + + cols, err := sourceDB.GetColumns(sourceSchema, sourceTable) + if err != nil { + return TableDiffPreview{}, fmt.Errorf("获取源表字段失败: %w", err) + } + + pkCols := make([]string, 0, 2) + for _, c := range cols { + if c.Key == "PRI" || c.Key == "PK" { + pkCols = append(pkCols, c.Name) + } + } + if len(pkCols) == 0 { + return TableDiffPreview{}, fmt.Errorf("无主键,不支持数据预览") + } + if len(pkCols) > 1 { + return TableDiffPreview{}, fmt.Errorf("复合主键(%s),暂不支持数据预览", strings.Join(pkCols, ",")) + } + pkCol := pkCols[0] + + sourceRows, _, err := sourceDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.SourceConfig.Type, sourceQueryTable))) + if err != nil { + return TableDiffPreview{}, fmt.Errorf("读取源表失败: %w", err) + } + targetRows, _, err := targetDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.TargetConfig.Type, targetQueryTable))) + if err != nil { + return TableDiffPreview{}, fmt.Errorf("读取目标表失败: %w", err) + } + + targetMap := make(map[string]map[string]interface{}, len(targetRows)) + for _, row := range targetRows { + if row[pkCol] == nil { + continue + } + pkVal := strings.TrimSpace(fmt.Sprintf("%v", row[pkCol])) + if pkVal == "" || pkVal == "" { + continue + } + targetMap[pkVal] = row + } + + out := TableDiffPreview{ + Table: tableName, + PKColumn: pkCol, + TotalInserts: 0, + TotalUpdates: 0, + TotalDeletes: 0, + Inserts: make([]PreviewRow, 0), + Updates: make([]PreviewUpdateRow, 0), + Deletes: make([]PreviewRow, 0), + } + + sourcePKSet := make(map[string]struct{}, len(sourceRows)) + for _, sRow := range sourceRows { + if sRow[pkCol] == nil { + continue + } + pkVal := strings.TrimSpace(fmt.Sprintf("%v", sRow[pkCol])) + if pkVal == "" || pkVal == "" { + continue + } + sourcePKSet[pkVal] = struct{}{} + + if tRow, exists := targetMap[pkVal]; exists { + changedColumns := make([]string, 0) + for k, v := range sRow { + if fmt.Sprintf("%v", v) != fmt.Sprintf("%v", tRow[k]) { + changedColumns = append(changedColumns, k) + } + } + if len(changedColumns) > 0 { + out.TotalUpdates++ + if len(out.Updates) < limit { + out.Updates = append(out.Updates, PreviewUpdateRow{ + PK: pkVal, + ChangedColumns: changedColumns, + Source: sRow, + Target: tRow, + }) + } + } + continue + } + + out.TotalInserts++ + if len(out.Inserts) < limit { + out.Inserts = append(out.Inserts, PreviewRow{PK: pkVal, Row: sRow}) + } + } + + for pkVal, row := range targetMap { + if _, ok := sourcePKSet[pkVal]; ok { + continue + } + out.TotalDeletes++ + if len(out.Deletes) < limit { + out.Deletes = append(out.Deletes, PreviewRow{PK: pkVal, Row: row}) + } + } + + return out, nil +} diff --git a/internal/sync/row_selection.go b/internal/sync/row_selection.go new file mode 100644 index 0000000..a27f0f3 --- /dev/null +++ b/internal/sync/row_selection.go @@ -0,0 +1,58 @@ +package sync + +import ( + "GoNavi-Wails/internal/connection" + "fmt" +) + +func filterRowsByPKSelection(pkCol string, rows []map[string]interface{}, enabled bool, selectedPKs []string) []map[string]interface{} { + if !enabled { + return nil + } + if len(rows) == 0 { + return rows + } + if len(selectedPKs) == 0 { + return rows + } + + set := make(map[string]struct{}, len(selectedPKs)) + for _, pk := range selectedPKs { + set[pk] = struct{}{} + } + + out := make([]map[string]interface{}, 0, len(rows)) + for _, row := range rows { + pkStr := fmt.Sprintf("%v", row[pkCol]) + if _, ok := set[pkStr]; ok { + out = append(out, row) + } + } + return out +} + +func filterUpdatesByPKSelection(pkCol string, updates []connection.UpdateRow, enabled bool, selectedPKs []string) []connection.UpdateRow { + if !enabled { + return nil + } + if len(updates) == 0 { + return updates + } + if len(selectedPKs) == 0 { + return updates + } + + set := make(map[string]struct{}, len(selectedPKs)) + for _, pk := range selectedPKs { + set[pk] = struct{}{} + } + + out := make([]connection.UpdateRow, 0, len(updates)) + for _, u := range updates { + pkStr := fmt.Sprintf("%v", u.Keys[pkCol]) + if _, ok := set[pkStr]; ok { + out = append(out, u) + } + } + return out +} diff --git a/internal/sync/schema_align.go b/internal/sync/schema_align.go new file mode 100644 index 0000000..a68135c --- /dev/null +++ b/internal/sync/schema_align.go @@ -0,0 +1,97 @@ +package sync + +import ( + "GoNavi-Wails/internal/connection" + "strings" +) + +func collectRequiredColumns(inserts []map[string]interface{}, updates []connection.UpdateRow) map[string]string { + // key: lower(columnName), value: original columnName + required := make(map[string]string) + for _, row := range inserts { + for k := range row { + key := strings.ToLower(strings.TrimSpace(k)) + if key == "" { + continue + } + if _, exists := required[key]; !exists { + required[key] = k + } + } + } + for _, u := range updates { + for k := range u.Values { + key := strings.ToLower(strings.TrimSpace(k)) + if key == "" { + continue + } + if _, exists := required[key]; !exists { + required[key] = k + } + } + } + return required +} + +func filterInsertRows(inserts []map[string]interface{}, allowedLower map[string]struct{}) []map[string]interface{} { + if len(inserts) == 0 || len(allowedLower) == 0 { + return inserts + } + + out := make([]map[string]interface{}, 0, len(inserts)) + for _, row := range inserts { + if len(row) == 0 { + out = append(out, row) + continue + } + n := make(map[string]interface{}, len(row)) + for k, v := range row { + if _, ok := allowedLower[strings.ToLower(strings.TrimSpace(k))]; ok { + n[k] = v + } + } + out = append(out, n) + } + return out +} + +func filterUpdateRows(updates []connection.UpdateRow, allowedLower map[string]struct{}) []connection.UpdateRow { + if len(updates) == 0 || len(allowedLower) == 0 { + return updates + } + + out := make([]connection.UpdateRow, 0, len(updates)) + for _, u := range updates { + if len(u.Values) == 0 { + continue + } + + values := make(map[string]interface{}, len(u.Values)) + for k, v := range u.Values { + if _, ok := allowedLower[strings.ToLower(strings.TrimSpace(k))]; ok { + values[k] = v + } + } + if len(values) == 0 { + continue + } + out = append(out, connection.UpdateRow{ + Keys: u.Keys, + Values: values, + }) + } + return out +} + +func sanitizeMySQLColumnType(t string) string { + tt := strings.TrimSpace(t) + if tt == "" { + return "TEXT" + } + + // 基础防护:避免把元数据中异常内容拼进 SQL。 + if strings.ContainsAny(tt, "`;\n\r") { + return "TEXT" + } + return tt +} diff --git a/internal/sync/schema_sync.go b/internal/sync/schema_sync.go new file mode 100644 index 0000000..126b623 --- /dev/null +++ b/internal/sync/schema_sync.go @@ -0,0 +1,101 @@ +package sync + +import ( + "GoNavi-Wails/internal/db" + "fmt" + "strings" +) + +func (s *SyncEngine) syncTableSchema(config SyncConfig, res *SyncResult, sourceDB db.Database, targetDB db.Database, tableName string) error { + targetType := strings.ToLower(strings.TrimSpace(config.TargetConfig.Type)) + if targetType != "mysql" { + s.appendLog(config.JobID, res, "warn", fmt.Sprintf("目标数据库类型=%s 暂不支持结构同步,已跳过表 %s", config.TargetConfig.Type, tableName)) + return nil + } + + sourceSchema, sourceTable := normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName) + targetSchema, targetTable := normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName) + targetQueryTable := qualifiedNameForQuery(config.TargetConfig.Type, targetSchema, targetTable, tableName) + + // 1) 获取源表字段 + sourceCols, err := sourceDB.GetColumns(sourceSchema, sourceTable) + if err != nil { + return fmt.Errorf("获取源表字段失败: %w", err) + } + + // 2) 确保目标表存在 + targetCols, err := targetDB.GetColumns(targetSchema, targetTable) + if err != nil { + sourceType := strings.ToLower(strings.TrimSpace(config.SourceConfig.Type)) + if sourceType != "mysql" { + return fmt.Errorf("目标表不存在且源类型=%s 暂不支持自动建表: %w", config.SourceConfig.Type, err) + } + + s.appendLog(config.JobID, res, "warn", fmt.Sprintf("目标表 %s 不存在,开始尝试创建表结构", tableName)) + createSQL, errCreate := sourceDB.GetCreateStatement(sourceSchema, sourceTable) + if errCreate != nil || strings.TrimSpace(createSQL) == "" { + if errCreate == nil { + errCreate = fmt.Errorf("建表语句为空") + } + return fmt.Errorf("获取源表建表语句失败: %w", errCreate) + } + + if _, errExec := targetDB.Exec(createSQL); errExec != nil { + return fmt.Errorf("创建目标表失败: %w", errExec) + } + s.appendLog(config.JobID, res, "info", fmt.Sprintf("目标表创建成功:%s", tableName)) + + targetCols, err = targetDB.GetColumns(targetSchema, targetTable) + if err != nil { + return fmt.Errorf("创建目标表后获取字段失败: %w", err) + } + } + + targetColSet := make(map[string]struct{}, len(targetCols)) + for _, c := range targetCols { + name := strings.ToLower(strings.TrimSpace(c.Name)) + if name == "" { + continue + } + targetColSet[name] = struct{}{} + } + + // 3) 补齐目标缺失字段(安全策略:新增字段统一允许 NULL) + missing := make([]string, 0) + sourceType := strings.ToLower(strings.TrimSpace(config.SourceConfig.Type)) + for _, c := range sourceCols { + colName := strings.TrimSpace(c.Name) + if colName == "" { + continue + } + lower := strings.ToLower(colName) + if _, ok := targetColSet[lower]; ok { + continue + } + missing = append(missing, colName) + + colType := "TEXT" + if sourceType == "mysql" { + colType = sanitizeMySQLColumnType(c.Type) + } + + alterSQL := fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL", + quoteQualifiedIdentByType("mysql", targetQueryTable), + quoteIdentByType("mysql", colName), + colType, + ) + if _, err := targetDB.Exec(alterSQL); err != nil { + s.appendLog(config.JobID, res, "error", fmt.Sprintf(" -> 补字段失败:表=%s 字段=%s 错误=%v", tableName, colName, err)) + continue + } + s.appendLog(config.JobID, res, "info", fmt.Sprintf(" -> 已补齐字段:表=%s 字段=%s 类型=%s", tableName, colName, colType)) + } + + if len(missing) == 0 { + s.appendLog(config.JobID, res, "info", fmt.Sprintf("表结构一致:%s", tableName)) + } else { + s.appendLog(config.JobID, res, "info", fmt.Sprintf("表结构同步完成:%s(新增字段 %d 个)", tableName, len(missing))) + } + + return nil +} diff --git a/internal/sync/sql_helpers.go b/internal/sync/sql_helpers.go new file mode 100644 index 0000000..53b4bd2 --- /dev/null +++ b/internal/sync/sql_helpers.go @@ -0,0 +1,109 @@ +package sync + +import "strings" + +func normalizeSyncMode(mode string) string { + m := strings.ToLower(strings.TrimSpace(mode)) + switch m { + case "", "insert_update": + return "insert_update" + case "insert_only": + return "insert_only" + case "full_overwrite": + return "full_overwrite" + default: + return "insert_update" + } +} + +func quoteIdentByType(dbType string, ident string) string { + if ident == "" { + return ident + } + + switch dbType { + case "mysql": + return "`" + strings.ReplaceAll(ident, "`", "``") + "`" + default: + return `"` + strings.ReplaceAll(ident, `"`, `""`) + `"` + } +} + +func quoteQualifiedIdentByType(dbType string, ident string) string { + raw := strings.TrimSpace(ident) + if raw == "" { + return raw + } + + parts := strings.Split(raw, ".") + if len(parts) <= 1 { + return quoteIdentByType(dbType, raw) + } + + quotedParts := make([]string, 0, len(parts)) + for _, part := range parts { + part = strings.TrimSpace(part) + if part == "" { + continue + } + quotedParts = append(quotedParts, quoteIdentByType(dbType, part)) + } + + if len(quotedParts) == 0 { + return quoteIdentByType(dbType, raw) + } + return strings.Join(quotedParts, ".") +} + +func normalizeSchemaAndTable(dbType string, dbName string, tableName string) (string, string) { + rawTable := strings.TrimSpace(tableName) + rawDB := strings.TrimSpace(dbName) + if rawTable == "" { + return rawDB, rawTable + } + + if parts := strings.SplitN(rawTable, ".", 2); len(parts) == 2 { + schema := strings.TrimSpace(parts[0]) + table := strings.TrimSpace(parts[1]) + if schema != "" && table != "" { + return schema, table + } + } + + switch strings.ToLower(strings.TrimSpace(dbType)) { + case "postgres", "kingbase": + return "public", rawTable + default: + return rawDB, rawTable + } +} + +func qualifiedNameForQuery(dbType string, schema string, table string, original string) string { + raw := strings.TrimSpace(original) + if raw == "" { + return raw + } + if strings.Contains(raw, ".") { + return raw + } + + switch strings.ToLower(strings.TrimSpace(dbType)) { + case "postgres", "kingbase": + s := strings.TrimSpace(schema) + if s == "" { + s = "public" + } + if table == "" { + return raw + } + return s + "." + table + case "mysql": + s := strings.TrimSpace(schema) + if s == "" || table == "" { + return table + } + return s + "." + table + default: + return table + } +} diff --git a/internal/sync/sync_engine.go b/internal/sync/sync_engine.go index d253670..d1d897c 100644 --- a/internal/sync/sync_engine.go +++ b/internal/sync/sync_engine.go @@ -5,15 +5,21 @@ import ( "GoNavi-Wails/internal/db" "GoNavi-Wails/internal/logger" "fmt" + "sort" "strings" + "time" ) // SyncConfig defines the parameters for a synchronization task type SyncConfig struct { - SourceConfig connection.ConnectionConfig `json:"sourceConfig"` - TargetConfig connection.ConnectionConfig `json:"targetConfig"` - Tables []string `json:"tables"` // Tables to sync - Mode string `json:"mode"` // "insert_update", "full_overwrite" + SourceConfig connection.ConnectionConfig `json:"sourceConfig"` + TargetConfig connection.ConnectionConfig `json:"targetConfig"` + Tables []string `json:"tables"` // Tables to sync + Content string `json:"content,omitempty"` // "data", "schema", "both" + Mode string `json:"mode"` // "insert_update", "insert_only", "full_overwrite" + JobID string `json:"jobId,omitempty"` + AutoAddColumns bool `json:"autoAddColumns,omitempty"` // 自动补齐缺失字段(当前仅 MySQL 目标支持) + TableOptions map[string]TableOptions `json:"tableOptions,omitempty"` } // SyncResult holds the result of the sync operation @@ -28,21 +34,55 @@ type SyncResult struct { } type SyncEngine struct { + reporter Reporter } -func NewSyncEngine() *SyncEngine { - return &SyncEngine{} +func NewSyncEngine(reporter Reporter) *SyncEngine { + return &SyncEngine{reporter: reporter} } // CompareAndSync performs the synchronization func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { result := SyncResult{Success: true, Logs: []string{}} logger.Infof("开始数据同步:源=%s 目标=%s 表数量=%d", formatConnSummaryForSync(config.SourceConfig), formatConnSummaryForSync(config.TargetConfig), len(config.Tables)) + totalTables := len(config.Tables) + s.progress(config.JobID, 0, totalTables, "", "开始同步") + + contentRaw := strings.ToLower(strings.TrimSpace(config.Content)) + syncSchema := false + syncData := true + switch contentRaw { + case "", "data": + syncData = true + case "schema": + syncSchema = true + syncData = false + case "both": + syncSchema = true + syncData = true + default: + s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("未知同步内容 %q,已自动使用仅同步数据", config.Content)) + syncData = true + } + + modeRaw := strings.ToLower(strings.TrimSpace(config.Mode)) + if modeRaw != "" && modeRaw != "insert_update" && modeRaw != "insert_only" && modeRaw != "full_overwrite" { + s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("未知同步模式 %q,已自动使用 insert_update", config.Mode)) + } + defaultMode := normalizeSyncMode(config.Mode) + + contentLabel := "仅同步数据" + if syncSchema && syncData { + contentLabel = "同步结构+数据" + } else if syncSchema { + contentLabel = "仅同步结构" + } + s.appendLog(config.JobID, &result, "info", fmt.Sprintf("同步内容:%s;模式:%s;自动补字段:%v", contentLabel, defaultMode, config.AutoAddColumns)) sourceDB, err := db.NewDatabase(config.SourceConfig.Type) if err != nil { logger.Error(err, "初始化源数据库驱动失败:类型=%s", config.SourceConfig.Type) - return s.fail(result, "初始化源数据库驱动失败: "+err.Error()) + return s.fail(config.JobID, totalTables, result, "初始化源数据库驱动失败: "+err.Error()) } if config.SourceConfig.Type == "custom" { // Custom DB setup would go here if needed @@ -51,133 +91,402 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { targetDB, err := db.NewDatabase(config.TargetConfig.Type) if err != nil { logger.Error(err, "初始化目标数据库驱动失败:类型=%s", config.TargetConfig.Type) - return s.fail(result, "初始化目标数据库驱动失败: "+err.Error()) + return s.fail(config.JobID, totalTables, result, "初始化目标数据库驱动失败: "+err.Error()) } // Connect Source - result.Logs = append(result.Logs, fmt.Sprintf("正在连接源数据库: %s...", config.SourceConfig.Host)) + s.appendLog(config.JobID, &result, "info", fmt.Sprintf("正在连接源数据库: %s...", config.SourceConfig.Host)) + s.progress(config.JobID, 0, totalTables, "", "连接源数据库") if err := sourceDB.Connect(config.SourceConfig); err != nil { logger.Error(err, "源数据库连接失败:%s", formatConnSummaryForSync(config.SourceConfig)) - return s.fail(result, "源数据库连接失败: "+err.Error()) + return s.fail(config.JobID, totalTables, result, "源数据库连接失败: "+err.Error()) } defer sourceDB.Close() // Connect Target - result.Logs = append(result.Logs, fmt.Sprintf("正在连接目标数据库: %s...", config.TargetConfig.Host)) + s.appendLog(config.JobID, &result, "info", fmt.Sprintf("正在连接目标数据库: %s...", config.TargetConfig.Host)) + s.progress(config.JobID, 0, totalTables, "", "连接目标数据库") if err := targetDB.Connect(config.TargetConfig); err != nil { logger.Error(err, "目标数据库连接失败:%s", formatConnSummaryForSync(config.TargetConfig)) - return s.fail(result, "目标数据库连接失败: "+err.Error()) + return s.fail(config.JobID, totalTables, result, "目标数据库连接失败: "+err.Error()) } defer targetDB.Close() // Iterate Tables - for _, tableName := range config.Tables { - result.Logs = append(result.Logs, fmt.Sprintf("正在同步表: %s", tableName)) + for i, tableName := range config.Tables { + func() { + tableMode := defaultMode + s.appendLog(config.JobID, &result, "info", fmt.Sprintf("正在同步表: %s", tableName)) + s.progress(config.JobID, i, totalTables, tableName, fmt.Sprintf("同步表(%d/%d)", i+1, totalTables)) + defer s.progress(config.JobID, i+1, totalTables, tableName, "表处理完成") - // 1. Get Columns & PKs (Naive approach: assume same schema) - cols, err := sourceDB.GetColumns(config.SourceConfig.Database, tableName) - if err != nil { - logger.Error(err, "获取源表列信息失败:表=%s", tableName) - result.Logs = append(result.Logs, fmt.Sprintf("获取表 %s 的列信息失败: %v", tableName, err)) - continue - } - - pkCol := "" - for _, col := range cols { - if col.Key == "PRI" || col.Key == "PK" { - pkCol = col.Name - break + if syncSchema { + s.progress(config.JobID, i, totalTables, tableName, "同步表结构") + if err := s.syncTableSchema(config, &result, sourceDB, targetDB, tableName); err != nil { + s.appendLog(config.JobID, &result, "error", fmt.Sprintf("表结构同步失败:表=%s 错误=%v", tableName, err)) + return + } + } + if !syncData { + result.TablesSynced++ + return } - } - if pkCol == "" { - result.Logs = append(result.Logs, fmt.Sprintf("跳过表 %s: 未找到主键 (同步需要主键)", tableName)) - continue - } + sourceSchema, sourceTable := normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName) + targetSchema, targetTable := normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName) + sourceQueryTable := qualifiedNameForQuery(config.SourceConfig.Type, sourceSchema, sourceTable, tableName) + targetQueryTable := qualifiedNameForQuery(config.TargetConfig.Type, targetSchema, targetTable, tableName) - // 2. Fetch Data (MEMORY INTENSIVE - PROTOTYPE ONLY) - // TODO: Implement paging/streaming - sourceRows, _, err := sourceDB.Query(fmt.Sprintf("SELECT * FROM %s", tableName)) - if err != nil { - logger.Error(err, "读取源表失败:表=%s", tableName) - result.Logs = append(result.Logs, fmt.Sprintf("读取源表 %s 失败: %v", tableName, err)) - continue - } + // 1. Get Columns & PKs + cols, err := sourceDB.GetColumns(sourceSchema, sourceTable) + if err != nil { + logger.Error(err, "获取源表列信息失败:表=%s", tableName) + s.appendLog(config.JobID, &result, "error", fmt.Sprintf("获取表 %s 的列信息失败: %v", tableName, err)) + return + } + sourceColsByLower := make(map[string]connection.ColumnDefinition, len(cols)) + for _, col := range cols { + if strings.TrimSpace(col.Name) == "" { + continue + } + sourceColsByLower[strings.ToLower(strings.TrimSpace(col.Name))] = col + } - targetRows, _, err := targetDB.Query(fmt.Sprintf("SELECT * FROM %s", tableName)) - if err != nil { - logger.Error(err, "读取目标表失败:表=%s", tableName) - // Table might not exist in target? - // Check if error is "table not found" -> Try to Create? - // For now, assume table exists. - result.Logs = append(result.Logs, fmt.Sprintf("读取目标表 %s 失败: %v", tableName, err)) - continue - } + pkCols := make([]string, 0, 2) + for _, col := range cols { + if col.Key == "PRI" || col.Key == "PK" { + pkCols = append(pkCols, col.Name) + } + } - // 3. Compare (In-Memory Hash Map) - targetMap := make(map[string]map[string]interface{}) - for _, row := range targetRows { - pkVal := fmt.Sprintf("%v", row[pkCol]) - targetMap[pkVal] = row - } + if len(pkCols) == 0 { + s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("表 %s 未找到主键,已跳过数据同步(避免产生重复数据)", tableName)) + return + } + if len(pkCols) > 1 { + s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("表 %s 为复合主键(%s),当前暂不支持数据同步", tableName, strings.Join(pkCols, ","))) + return + } + pkCol := pkCols[0] - var inserts []map[string]interface{} - var updates []connection.UpdateRow - // var deletes []map[string]interface{} // Not implemented in "insert_update" mode usually - - for _, sRow := range sourceRows { - pkVal := fmt.Sprintf("%v", sRow[pkCol]) - - if tRow, exists := targetMap[pkVal]; exists { - // Update? Compare values - // Simplified: Compare string representations or iterate keys - // For prototype: assume update if exists - // Optimization: Check diff - changes := make(map[string]interface{}) - for k, v := range sRow { - if fmt.Sprintf("%v", v) != fmt.Sprintf("%v", tRow[k]) { - changes[k] = v + opts := TableOptions{Insert: true, Update: true, Delete: false} + if config.TableOptions != nil { + if t, ok := config.TableOptions[tableName]; ok { + opts = t + // 默认防护:如用户未设置任意一个字段,保持 insert/update 默认 true、delete 默认 false + if !t.Insert && !t.Update && !t.Delete { + opts = t } } - if len(changes) > 0 { - updates = append(updates, connection.UpdateRow{ - Keys: map[string]interface{}{pkCol: pkVal}, - Values: changes, - }) - } - } else { - // Insert - inserts = append(inserts, sRow) } - } + if !opts.Insert && !opts.Update && !opts.Delete { + s.appendLog(config.JobID, &result, "info", fmt.Sprintf("表 %s 未勾选任何操作,已跳过", tableName)) + return + } - // 4. Apply Changes - changeSet := connection.ChangeSet{ - Inserts: inserts, - Updates: updates, - } + // 2. Fetch Data (MEMORY INTENSIVE - PROTOTYPE ONLY) + // TODO: Implement paging/streaming + s.progress(config.JobID, i, totalTables, tableName, "读取源表数据") + sourceRows, _, err := sourceDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.SourceConfig.Type, sourceQueryTable))) + if err != nil { + logger.Error(err, "读取源表失败:表=%s", tableName) + s.appendLog(config.JobID, &result, "error", fmt.Sprintf("读取源表 %s 失败: %v", tableName, err)) + return + } - if len(inserts) > 0 || len(updates) > 0 { - result.Logs = append(result.Logs, fmt.Sprintf(" -> 需插入: %d 行, 需更新: %d 行", len(inserts), len(updates))) + var inserts []map[string]interface{} + var updates []connection.UpdateRow - // We need a BatchApplier interface or assume Database implements ApplyChanges - if applier, ok := targetDB.(db.BatchApplier); ok { - if err := applier.ApplyChanges(tableName, changeSet); err != nil { - result.Logs = append(result.Logs, fmt.Sprintf(" -> 应用变更失败: %v", err)) + if tableMode == "insert_update" { + s.progress(config.JobID, i, totalTables, tableName, "读取目标表数据") + targetRows, _, err := targetDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.TargetConfig.Type, targetQueryTable))) + if err != nil { + logger.Error(err, "读取目标表失败:表=%s", tableName) + s.appendLog(config.JobID, &result, "error", fmt.Sprintf("读取目标表 %s 失败: %v", tableName, err)) + return + } + + // 3. Compare (In-Memory Hash Map) + s.progress(config.JobID, i, totalTables, tableName, "对比差异") + targetMap := make(map[string]map[string]interface{}) + for _, row := range targetRows { + if row[pkCol] == nil { + continue + } + pkVal := fmt.Sprintf("%v", row[pkCol]) + if strings.TrimSpace(pkVal) == "" || pkVal == "" { + continue + } + targetMap[pkVal] = row + } + sourcePKSet := make(map[string]struct{}, len(sourceRows)) + + for _, sRow := range sourceRows { + if sRow[pkCol] == nil { + continue + } + pkVal := fmt.Sprintf("%v", sRow[pkCol]) + if strings.TrimSpace(pkVal) == "" || pkVal == "" { + continue + } + sourcePKSet[pkVal] = struct{}{} + + if tRow, exists := targetMap[pkVal]; exists { + changes := make(map[string]interface{}) + for k, v := range sRow { + if fmt.Sprintf("%v", v) != fmt.Sprintf("%v", tRow[k]) { + changes[k] = v + } + } + if len(changes) > 0 { + updates = append(updates, connection.UpdateRow{ + Keys: map[string]interface{}{pkCol: sRow[pkCol]}, + Values: changes, + }) + } + } else { + inserts = append(inserts, sRow) + } + } + + var deletes []map[string]interface{} + if opts.Delete { + for pkStr, row := range targetMap { + if _, ok := sourcePKSet[pkStr]; ok { + continue + } + deletes = append(deletes, map[string]interface{}{pkCol: row[pkCol]}) + } + } + + // apply operation selection + inserts = filterRowsByPKSelection(pkCol, inserts, opts.Insert, opts.SelectedInsertPKs) + updates = filterUpdatesByPKSelection(pkCol, updates, opts.Update, opts.SelectedUpdatePKs) + deletes = filterRowsByPKSelection(pkCol, deletes, opts.Delete, opts.SelectedDeletePKs) + + changeSet := connection.ChangeSet{ + Inserts: inserts, + Updates: updates, + Deletes: deletes, + } + + // 4. Align schema (target missing columns) + s.progress(config.JobID, i, totalTables, tableName, "检查字段一致性") + requiredCols := collectRequiredColumns(changeSet.Inserts, changeSet.Updates) + targetCols, err := targetDB.GetColumns(targetSchema, targetTable) + if err != nil { + s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 获取目标表字段失败,已跳过字段一致性检查: %v", err)) } else { - result.RowsInserted += len(inserts) - result.RowsUpdated += len(updates) + targetColSet := make(map[string]struct{}, len(targetCols)) + for _, c := range targetCols { + name := strings.ToLower(strings.TrimSpace(c.Name)) + if name == "" { + continue + } + targetColSet[name] = struct{}{} + } + + missing := make([]string, 0) + for lower, original := range requiredCols { + if _, ok := targetColSet[lower]; !ok { + missing = append(missing, original) + } + } + sort.Strings(missing) + + if len(missing) > 0 { + if config.AutoAddColumns && strings.ToLower(strings.TrimSpace(config.TargetConfig.Type)) == "mysql" { + s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 目标表缺少字段 %d 个,开始自动补齐: %s", len(missing), strings.Join(missing, ", "))) + added := 0 + for _, colName := range missing { + colLower := strings.ToLower(strings.TrimSpace(colName)) + colType := "TEXT" + if strings.ToLower(strings.TrimSpace(config.SourceConfig.Type)) == "mysql" { + if srcCol, ok := sourceColsByLower[colLower]; ok { + colType = sanitizeMySQLColumnType(srcCol.Type) + } + } + + alterSQL := fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL", + quoteQualifiedIdentByType("mysql", targetQueryTable), + quoteIdentByType("mysql", colName), + colType, + ) + if _, err := targetDB.Exec(alterSQL); err != nil { + s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 自动补字段失败:字段=%s 错误=%v", colName, err)) + continue + } + added++ + } + s.appendLog(config.JobID, &result, "info", fmt.Sprintf(" -> 自动补字段完成:成功=%d 失败=%d", added, len(missing)-added)) + + // refresh columns + targetCols, err = targetDB.GetColumns(targetSchema, targetTable) + if err == nil { + targetColSet = make(map[string]struct{}, len(targetCols)) + for _, c := range targetCols { + name := strings.ToLower(strings.TrimSpace(c.Name)) + if name == "" { + continue + } + targetColSet[name] = struct{}{} + } + } + } else { + s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 目标表缺少字段 %d 个(未开启自动补齐),将自动忽略:%s", len(missing), strings.Join(missing, ", "))) + } + + // filter out still-missing columns to avoid apply failure + changeSet.Inserts = filterInsertRows(changeSet.Inserts, targetColSet) + changeSet.Updates = filterUpdateRows(changeSet.Updates, targetColSet) + } + } + + // 5. Apply Changes + s.progress(config.JobID, i, totalTables, tableName, "应用变更") + + if len(changeSet.Inserts) > 0 || len(changeSet.Updates) > 0 || len(changeSet.Deletes) > 0 { + s.appendLog(config.JobID, &result, "info", fmt.Sprintf(" -> 需插入: %d 行, 需更新: %d 行, 需删除: %d 行", len(changeSet.Inserts), len(changeSet.Updates), len(changeSet.Deletes))) + + if applier, ok := targetDB.(db.BatchApplier); ok { + if err := applier.ApplyChanges(targetTable, changeSet); err != nil { + s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 应用变更失败: %v", err)) + } else { + result.RowsInserted += len(changeSet.Inserts) + result.RowsUpdated += len(changeSet.Updates) + result.RowsDeleted += len(changeSet.Deletes) + } + } else { + s.appendLog(config.JobID, &result, "warn", " -> 目标驱动不支持应用数据变更 (ApplyChanges).") + } + } else { + s.appendLog(config.JobID, &result, "info", " -> 数据一致,无需变更.") + } + + result.TablesSynced++ + return + } else { + // insert_only / full_overwrite: do not compare target, just insert source rows + inserts = sourceRows + } + + // full_overwrite: clear target table first + if tableMode == "full_overwrite" { + s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 全量覆盖模式:即将清空目标表 %s", tableName)) + s.progress(config.JobID, i, totalTables, tableName, "清空目标表") + clearSQL := "" + if strings.ToLower(strings.TrimSpace(config.TargetConfig.Type)) == "mysql" { + clearSQL = fmt.Sprintf("TRUNCATE TABLE %s", quoteQualifiedIdentByType(config.TargetConfig.Type, targetQueryTable)) + } else { + clearSQL = fmt.Sprintf("DELETE FROM %s", quoteQualifiedIdentByType(config.TargetConfig.Type, targetQueryTable)) + } + if _, err := targetDB.Exec(clearSQL); err != nil { + s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 清空目标表失败: %v", err)) + return + } + } + + // 4. Align schema (target missing columns) + s.progress(config.JobID, i, totalTables, tableName, "检查字段一致性") + requiredCols := collectRequiredColumns(inserts, updates) + targetCols, err := targetDB.GetColumns(targetSchema, targetTable) + if err != nil { + s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 获取目标表字段失败,已跳过字段一致性检查: %v", err)) + } else { + targetColSet := make(map[string]struct{}, len(targetCols)) + for _, c := range targetCols { + name := strings.ToLower(strings.TrimSpace(c.Name)) + if name == "" { + continue + } + targetColSet[name] = struct{}{} + } + + missing := make([]string, 0) + for lower, original := range requiredCols { + if _, ok := targetColSet[lower]; !ok { + missing = append(missing, original) + } + } + sort.Strings(missing) + + if len(missing) > 0 { + if config.AutoAddColumns && strings.ToLower(strings.TrimSpace(config.TargetConfig.Type)) == "mysql" { + s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 目标表缺少字段 %d 个,开始自动补齐: %s", len(missing), strings.Join(missing, ", "))) + added := 0 + for _, colName := range missing { + colLower := strings.ToLower(strings.TrimSpace(colName)) + colType := "TEXT" + if strings.ToLower(strings.TrimSpace(config.SourceConfig.Type)) == "mysql" { + if srcCol, ok := sourceColsByLower[colLower]; ok { + colType = sanitizeMySQLColumnType(srcCol.Type) + } + } + + alterSQL := fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL", + quoteQualifiedIdentByType("mysql", targetQueryTable), + quoteIdentByType("mysql", colName), + colType, + ) + if _, err := targetDB.Exec(alterSQL); err != nil { + s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 自动补字段失败:字段=%s 错误=%v", colName, err)) + continue + } + added++ + } + s.appendLog(config.JobID, &result, "info", fmt.Sprintf(" -> 自动补字段完成:成功=%d 失败=%d", added, len(missing)-added)) + + // refresh columns + targetCols, err = targetDB.GetColumns(targetSchema, targetTable) + if err == nil { + targetColSet = make(map[string]struct{}, len(targetCols)) + for _, c := range targetCols { + name := strings.ToLower(strings.TrimSpace(c.Name)) + if name == "" { + continue + } + targetColSet[name] = struct{}{} + } + } + } else { + s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 目标表缺少字段 %d 个(未开启自动补齐),将自动忽略:%s", len(missing), strings.Join(missing, ", "))) + } + + // filter out still-missing columns to avoid apply failure + inserts = filterInsertRows(inserts, targetColSet) + updates = filterUpdateRows(updates, targetColSet) + } + } + + // 5. Apply Changes + s.progress(config.JobID, i, totalTables, tableName, "应用变更") + changeSet := connection.ChangeSet{ + Inserts: inserts, + Updates: updates, + } + + if len(changeSet.Inserts) > 0 || len(changeSet.Updates) > 0 { + s.appendLog(config.JobID, &result, "info", fmt.Sprintf(" -> 需插入: %d 行, 需更新: %d 行", len(changeSet.Inserts), len(changeSet.Updates))) + + if applier, ok := targetDB.(db.BatchApplier); ok { + if err := applier.ApplyChanges(targetTable, changeSet); err != nil { + s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 应用变更失败: %v", err)) + } else { + result.RowsInserted += len(changeSet.Inserts) + result.RowsUpdated += len(changeSet.Updates) + } + } else { + s.appendLog(config.JobID, &result, "warn", " -> 目标驱动不支持应用数据变更 (ApplyChanges).") } } else { - result.Logs = append(result.Logs, " -> 目标驱动不支持应用数据变更 (ApplyChanges).") + s.appendLog(config.JobID, &result, "info", " -> 数据一致,无需变更.") } - } else { - result.Logs = append(result.Logs, " -> 数据一致,无需变更.") - } - result.TablesSynced++ + result.TablesSynced++ + }() } + s.progress(config.JobID, totalTables, totalTables, "", "同步完成") return result } @@ -196,9 +505,52 @@ func formatConnSummaryForSync(config connection.ConnectionConfig) string { config.Type, config.Host, config.Port, dbName, config.User, timeoutSeconds) } -func (s *SyncEngine) fail(res SyncResult, msg string) SyncResult { +func (s *SyncEngine) appendLog(jobID string, res *SyncResult, level string, msg string) { + if res != nil { + res.Logs = append(res.Logs, msg) + } + if s.reporter.OnLog != nil && strings.TrimSpace(jobID) != "" { + s.reporter.OnLog(SyncLogEvent{ + JobID: jobID, + Level: level, + Message: msg, + Ts: time.Now().UnixMilli(), + }) + } +} + +func (s *SyncEngine) progress(jobID string, current, total int, table string, stage string) { + if s.reporter.OnProgress == nil || strings.TrimSpace(jobID) == "" { + return + } + percent := 0 + if total <= 0 { + if current > 0 { + percent = 100 + } + } else { + if current < 0 { + current = 0 + } + if current > total { + current = total + } + percent = (current * 100) / total + } + s.reporter.OnProgress(SyncProgressEvent{ + JobID: jobID, + Percent: percent, + Current: current, + Total: total, + Table: table, + Stage: stage, + }) +} + +func (s *SyncEngine) fail(jobID string, totalTables int, res SyncResult, msg string) SyncResult { res.Success = false res.Message = msg - res.Logs = append(res.Logs, "致命错误: "+msg) + s.appendLog(jobID, &res, "error", "致命错误: "+msg) + s.progress(jobID, res.TablesSynced, totalTables, "", "同步失败") return res } diff --git a/internal/sync/sync_events.go b/internal/sync/sync_events.go new file mode 100644 index 0000000..1facae7 --- /dev/null +++ b/internal/sync/sync_events.go @@ -0,0 +1,30 @@ +package sync + +const ( + EventSyncStart = "sync:start" + EventSyncProgress = "sync:progress" + EventSyncLog = "sync:log" + EventSyncDone = "sync:done" +) + +type SyncLogEvent struct { + JobID string `json:"jobId"` + Level string `json:"level"` // info/warn/error + Message string `json:"message"` + Ts int64 `json:"ts"` // Unix milli +} + +type SyncProgressEvent struct { + JobID string `json:"jobId"` + Percent int `json:"percent"` + Current int `json:"current"` // 已完成表数 + Total int `json:"total"` // 总表数 + Table string `json:"table,omitempty"` + Stage string `json:"stage,omitempty"` +} + +type Reporter struct { + OnLog func(event SyncLogEvent) + OnProgress func(event SyncProgressEvent) +} + diff --git a/internal/sync/table_options.go b/internal/sync/table_options.go new file mode 100644 index 0000000..894f19b --- /dev/null +++ b/internal/sync/table_options.go @@ -0,0 +1,13 @@ +package sync + +// TableOptions controls which operations to apply per table, and optional row selection. +// 注意:如未指定 Selected*PKs,则表示“同步全部该类型差异数据”;如指定为空数组,则同样表示全部。 +type TableOptions struct { + Insert bool `json:"insert,omitempty"` + Update bool `json:"update,omitempty"` + Delete bool `json:"delete,omitempty"` + + SelectedInsertPKs []string `json:"selectedInsertPks,omitempty"` + SelectedUpdatePKs []string `json:"selectedUpdatePks,omitempty"` + SelectedDeletePKs []string `json:"selectedDeletePks,omitempty"` +}