mirror of
https://github.com/Syngnat/GoNavi.git
synced 2026-05-11 23:49:58 +08:00
✨ feat(data-sync): 扩展跨库迁移链路并优化数据同步交互
- 统一同库同步与跨库迁移入口,补充模式区分与风险提示 - 扩展 ClickHouse 与 PG-like 双向迁移,并新增 PG-like、ClickHouse、TDengine 到 MongoDB 的迁移路由 - 完善 TDengine 目标端建表规划、回归测试与需求追踪文档 - refs #51
This commit is contained in:
@@ -1 +1 @@
|
||||
d0f9366af59a6367ad3c7e2d4185ead4
|
||||
5b8157374dae5f9340e31b2d0bd2c00e
|
||||
@@ -1,9 +1,11 @@
|
||||
import React, { useState, useEffect, useMemo, useRef } from 'react';
|
||||
import { Modal, Form, Select, Button, message, Steps, Transfer, Card, Alert, Divider, Typography, Progress, Checkbox, Table, Drawer, Tabs } from 'antd';
|
||||
import { Modal, Form, Select, Input, Button, message, Steps, Transfer, Card, Alert, Divider, Typography, Progress, Checkbox, Table, Drawer, Tabs, theme as antdTheme } from 'antd';
|
||||
import { DatabaseOutlined, RocketOutlined, SwapOutlined, TableOutlined } from '@ant-design/icons';
|
||||
import { useStore } from '../store';
|
||||
import { DBGetDatabases, DBGetTables, DataSync, DataSyncAnalyze, DataSyncPreview } from '../../wailsjs/go/app/App';
|
||||
import { SavedConnection } from '../types';
|
||||
import { EventsOn } from '../../wailsjs/runtime/runtime';
|
||||
import { normalizeOpacityForPlatform, resolveAppearanceValues } from '../utils/appearance';
|
||||
|
||||
const { Title, Text } = Typography;
|
||||
const { Step } = Steps;
|
||||
@@ -21,6 +23,12 @@ type TableDiffSummary = {
|
||||
deletes?: number;
|
||||
same?: number;
|
||||
message?: string;
|
||||
targetTableExists?: boolean;
|
||||
plannedAction?: string;
|
||||
warnings?: string[];
|
||||
unsupportedObjects?: string[];
|
||||
indexesToCreate?: number;
|
||||
indexesSkipped?: number;
|
||||
};
|
||||
type TableOps = {
|
||||
insert: boolean;
|
||||
@@ -31,6 +39,8 @@ type TableOps = {
|
||||
selectedDeletePks?: string[];
|
||||
};
|
||||
|
||||
type WorkflowType = 'sync' | 'migration';
|
||||
|
||||
const quoteSqlIdent = (dbType: string, ident: string): string => {
|
||||
const raw = String(ident || '').trim();
|
||||
if (!raw) return raw;
|
||||
@@ -76,6 +86,11 @@ const toSqlLiteral = (value: any, dbType: string): string => {
|
||||
return `'${String(value).replace(/'/g, "''")}'`;
|
||||
};
|
||||
|
||||
const resolveRedisDbIndex = (raw?: string): number => {
|
||||
const value = Number(String(raw || '').trim());
|
||||
return Number.isInteger(value) && value >= 0 && value <= 15 ? value : 0;
|
||||
};
|
||||
|
||||
const buildSqlPreview = (
|
||||
previewData: any,
|
||||
tableName: string,
|
||||
@@ -145,8 +160,14 @@ const buildSqlPreview = (
|
||||
|
||||
const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, onClose }) => {
|
||||
const connections = useStore((state) => state.connections);
|
||||
const themeMode = useStore((state) => state.theme);
|
||||
const appearance = useStore((state) => state.appearance);
|
||||
const [currentStep, setCurrentStep] = useState(0);
|
||||
const [loading, setLoading] = useState(false);
|
||||
const { token } = antdTheme.useToken();
|
||||
const darkMode = themeMode === 'dark';
|
||||
const resolvedAppearance = resolveAppearanceValues(appearance);
|
||||
const effectiveOpacity = normalizeOpacityForPlatform(resolvedAppearance.opacity);
|
||||
|
||||
// Step 1: Config
|
||||
const [sourceConnId, setSourceConnId] = useState<string>('');
|
||||
@@ -162,9 +183,13 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
|
||||
const [selectedTables, setSelectedTables] = useState<string[]>([]);
|
||||
|
||||
// Options
|
||||
const [workflowType, setWorkflowType] = useState<WorkflowType>('sync');
|
||||
const [syncContent, setSyncContent] = useState<'data' | 'schema' | 'both'>('data');
|
||||
const [syncMode, setSyncMode] = useState<string>('insert_update');
|
||||
const [autoAddColumns, setAutoAddColumns] = useState<boolean>(true);
|
||||
const [targetTableStrategy, setTargetTableStrategy] = useState<'existing_only' | 'auto_create_if_missing' | 'smart'>('existing_only');
|
||||
const [createIndexes, setCreateIndexes] = useState<boolean>(false);
|
||||
const [mongoCollectionName, setMongoCollectionName] = useState<string>('');
|
||||
const [showSameTables, setShowSameTables] = useState<boolean>(false);
|
||||
const [analyzing, setAnalyzing] = useState<boolean>(false);
|
||||
const [diffTables, setDiffTables] = useState<TableDiffSummary[]>([]);
|
||||
@@ -240,9 +265,12 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
|
||||
setSourceDb('');
|
||||
setTargetDb('');
|
||||
setSelectedTables([]);
|
||||
setWorkflowType('sync');
|
||||
setSyncContent('data');
|
||||
setSyncMode('insert_update');
|
||||
setAutoAddColumns(true);
|
||||
setTargetTableStrategy('existing_only');
|
||||
setCreateIndexes(false);
|
||||
setShowSameTables(false);
|
||||
setAnalyzing(false);
|
||||
setDiffTables([]);
|
||||
@@ -260,6 +288,30 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
|
||||
}
|
||||
}, [open]);
|
||||
|
||||
useEffect(() => {
|
||||
if (workflowType === 'migration') {
|
||||
if (syncMode === 'insert_update') {
|
||||
setSyncMode('insert_only');
|
||||
}
|
||||
if (syncContent === 'schema') {
|
||||
setSyncContent('both');
|
||||
}
|
||||
if (targetTableStrategy === 'existing_only') {
|
||||
setTargetTableStrategy('smart');
|
||||
}
|
||||
if (!createIndexes) {
|
||||
setCreateIndexes(true);
|
||||
}
|
||||
} else {
|
||||
if (targetTableStrategy !== 'existing_only') {
|
||||
setTargetTableStrategy('existing_only');
|
||||
}
|
||||
if (createIndexes) {
|
||||
setCreateIndexes(false);
|
||||
}
|
||||
}
|
||||
}, [workflowType]);
|
||||
|
||||
const handleSourceConnChange = async (connId: string) => {
|
||||
setSourceConnId(connId);
|
||||
setSourceDb('');
|
||||
@@ -357,6 +409,9 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
|
||||
content: syncContent,
|
||||
mode: "insert_update",
|
||||
autoAddColumns,
|
||||
targetTableStrategy,
|
||||
createIndexes,
|
||||
mongoCollectionName: mongoCollectionName.trim(),
|
||||
jobId,
|
||||
};
|
||||
|
||||
@@ -407,6 +462,9 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
|
||||
content: "data",
|
||||
mode: "insert_update",
|
||||
autoAddColumns,
|
||||
targetTableStrategy,
|
||||
createIndexes,
|
||||
mongoCollectionName: mongoCollectionName.trim(),
|
||||
};
|
||||
|
||||
try {
|
||||
@@ -483,6 +541,9 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
|
||||
content: syncContent,
|
||||
mode: syncMode,
|
||||
autoAddColumns,
|
||||
targetTableStrategy,
|
||||
createIndexes,
|
||||
mongoCollectionName: mongoCollectionName.trim(),
|
||||
tableOptions,
|
||||
jobId,
|
||||
};
|
||||
@@ -530,10 +591,132 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
|
||||
return buildSqlPreview(previewData, previewTable, targetType, ops);
|
||||
}, [previewData, previewTable, targetConnId, connections, tableOptions]);
|
||||
|
||||
const analysisWarnings = useMemo(() => {
|
||||
const items: string[] = [];
|
||||
diffTables.forEach((table) => {
|
||||
(table.warnings || []).forEach((warning) => items.push(`${table.table}: ${warning}`));
|
||||
(table.unsupportedObjects || []).forEach((warning) => items.push(`${table.table}: ${warning}`));
|
||||
});
|
||||
return Array.from(new Set(items));
|
||||
}, [diffTables]);
|
||||
|
||||
const isMigrationWorkflow = workflowType === 'migration';
|
||||
const sourceConn = useMemo(() => connections.find(c => c.id === sourceConnId), [connections, sourceConnId]);
|
||||
const targetConn = useMemo(() => connections.find(c => c.id === targetConnId), [connections, targetConnId]);
|
||||
const sourceType = String(sourceConn?.config?.type || '').toLowerCase();
|
||||
const targetType = String(targetConn?.config?.type || '').toLowerCase();
|
||||
const isRedisMongoKeyspaceMigration = isMigrationWorkflow && (
|
||||
(sourceType === 'redis' && targetType === 'mongodb') ||
|
||||
(sourceType === 'mongodb' && targetType === 'redis')
|
||||
);
|
||||
const defaultMongoCollectionName = useMemo(() => {
|
||||
if (sourceType === 'redis' && targetType === 'mongodb') {
|
||||
return `redis_db_${resolveRedisDbIndex(sourceDb || sourceConn?.config?.database)}_keys`;
|
||||
}
|
||||
if (sourceType === 'mongodb' && targetType === 'redis') {
|
||||
return selectedTables[0] || `redis_db_${resolveRedisDbIndex(targetDb || targetConn?.config?.database)}_keys`;
|
||||
}
|
||||
return '';
|
||||
}, [sourceType, targetType, sourceDb, targetDb, sourceConn, targetConn, selectedTables]);
|
||||
|
||||
const modalPanelStyle = useMemo(() => ({
|
||||
background: darkMode
|
||||
? 'linear-gradient(180deg, rgba(16,22,34,0.96) 0%, rgba(10,14,24,0.98) 100%)'
|
||||
: 'linear-gradient(180deg, rgba(255,255,255,0.98) 0%, rgba(246,248,252,0.98) 100%)',
|
||||
border: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(16,24,40,0.08)',
|
||||
boxShadow: darkMode ? '0 24px 56px rgba(0,0,0,0.36)' : '0 18px 44px rgba(15,23,42,0.14)',
|
||||
backdropFilter: darkMode ? 'blur(18px)' : 'none',
|
||||
}), [darkMode]);
|
||||
|
||||
const shellCardStyle = useMemo<React.CSSProperties>(() => ({
|
||||
borderRadius: 18,
|
||||
border: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(15,23,42,0.08)',
|
||||
background: darkMode ? 'rgba(255,255,255,0.03)' : `rgba(255,255,255,${Math.max(effectiveOpacity, 0.88)})`,
|
||||
boxShadow: darkMode ? '0 12px 32px rgba(0,0,0,0.22)' : '0 10px 24px rgba(15,23,42,0.08)',
|
||||
overflow: 'hidden',
|
||||
}), [darkMode, effectiveOpacity]);
|
||||
|
||||
const heroPanelStyle = useMemo<React.CSSProperties>(() => ({
|
||||
padding: 18,
|
||||
borderRadius: 18,
|
||||
border: darkMode ? '1px solid rgba(255,214,102,0.12)' : '1px solid rgba(24,144,255,0.12)',
|
||||
background: darkMode
|
||||
? 'linear-gradient(135deg, rgba(255,214,102,0.10) 0%, rgba(255,255,255,0.03) 100%)'
|
||||
: 'linear-gradient(135deg, rgba(24,144,255,0.10) 0%, rgba(255,255,255,0.95) 100%)',
|
||||
marginBottom: 18,
|
||||
}), [darkMode]);
|
||||
|
||||
const badgeStyle = useMemo<React.CSSProperties>(() => ({
|
||||
display: 'inline-flex',
|
||||
alignItems: 'center',
|
||||
gap: 6,
|
||||
padding: '6px 10px',
|
||||
borderRadius: 999,
|
||||
border: darkMode ? '1px solid rgba(255,255,255,0.10)' : '1px solid rgba(15,23,42,0.08)',
|
||||
background: darkMode ? 'rgba(255,255,255,0.04)' : 'rgba(255,255,255,0.86)',
|
||||
color: darkMode ? 'rgba(255,255,255,0.88)' : '#334155',
|
||||
fontSize: 12,
|
||||
fontWeight: 600,
|
||||
}), [darkMode]);
|
||||
|
||||
const quietPanelStyle = useMemo<React.CSSProperties>(() => ({
|
||||
padding: 14,
|
||||
borderRadius: 16,
|
||||
border: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(15,23,42,0.08)',
|
||||
background: darkMode ? 'rgba(255,255,255,0.025)' : 'rgba(248,250,252,0.92)',
|
||||
}), [darkMode]);
|
||||
|
||||
const modalWorkspaceStyle = useMemo<React.CSSProperties>(() => ({
|
||||
display: 'flex',
|
||||
flexDirection: 'column',
|
||||
height: '100%',
|
||||
minHeight: 0,
|
||||
}), []);
|
||||
|
||||
const modalScrollableContentStyle = useMemo<React.CSSProperties>(() => ({
|
||||
flex: 1,
|
||||
minHeight: 0,
|
||||
overflowY: 'auto',
|
||||
overflowX: 'hidden',
|
||||
paddingRight: 4,
|
||||
overscrollBehavior: 'contain',
|
||||
}), []);
|
||||
|
||||
const modalFooterBarStyle = useMemo<React.CSSProperties>(() => ({
|
||||
marginTop: 18,
|
||||
display: 'flex',
|
||||
justifyContent: 'flex-end',
|
||||
gap: 8,
|
||||
paddingTop: 12,
|
||||
borderTop: darkMode ? '1px solid rgba(255,255,255,0.06)' : '1px solid rgba(15,23,42,0.06)',
|
||||
flex: '0 0 auto',
|
||||
}), [darkMode]);
|
||||
|
||||
const renderModalTitle = (title: string, description: string) => (
|
||||
<div style={{ display: 'flex', alignItems: 'flex-start', gap: 12 }}>
|
||||
<div style={{
|
||||
width: 38,
|
||||
height: 38,
|
||||
borderRadius: 14,
|
||||
display: 'grid',
|
||||
placeItems: 'center',
|
||||
background: darkMode ? 'rgba(255,214,102,0.12)' : 'rgba(24,144,255,0.10)',
|
||||
color: darkMode ? '#ffd666' : token.colorPrimary,
|
||||
flexShrink: 0,
|
||||
}}>
|
||||
{isMigrationWorkflow ? <RocketOutlined /> : <SwapOutlined />}
|
||||
</div>
|
||||
<div style={{ minWidth: 0 }}>
|
||||
<div style={{ fontSize: 16, fontWeight: 700, color: darkMode ? '#f8fafc' : '#0f172a' }}>{title}</div>
|
||||
<div style={{ marginTop: 4, fontSize: 12, lineHeight: 1.6, color: darkMode ? 'rgba(255,255,255,0.56)' : 'rgba(15,23,42,0.58)' }}>{description}</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
|
||||
return (
|
||||
<>
|
||||
<Modal
|
||||
title="数据同步"
|
||||
title={renderModalTitle(isMigrationWorkflow ? '跨库迁移工作台' : '数据同步工作台', isMigrationWorkflow ? '按源库 → 目标库完成建表、导入与风险预检。' : '按已有目标表完成差异对比、同步执行与结果确认。')}
|
||||
open={open}
|
||||
onCancel={() => {
|
||||
if (syncing) {
|
||||
@@ -542,23 +725,61 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
|
||||
}
|
||||
onClose();
|
||||
}}
|
||||
width={800}
|
||||
width={920}
|
||||
footer={null}
|
||||
destroyOnHidden
|
||||
closable={!syncing}
|
||||
maskClosable={!syncing}
|
||||
styles={{
|
||||
content: modalPanelStyle,
|
||||
header: { background: 'transparent', borderBottom: 'none', paddingBottom: 10 },
|
||||
body: {
|
||||
paddingTop: 8,
|
||||
height: 760,
|
||||
maxHeight: 'calc(100vh - 120px)',
|
||||
overflow: 'hidden',
|
||||
display: 'flex',
|
||||
flexDirection: 'column',
|
||||
},
|
||||
footer: { background: 'transparent', borderTop: 'none', paddingTop: 12 },
|
||||
}}
|
||||
>
|
||||
<div style={modalWorkspaceStyle}>
|
||||
<div style={{ flex: '0 0 auto' }}>
|
||||
<div style={heroPanelStyle}>
|
||||
<div style={{ display: 'flex', justifyContent: 'space-between', gap: 12, alignItems: 'flex-start', flexWrap: 'wrap' }}>
|
||||
<div style={{ minWidth: 0 }}>
|
||||
<div style={{ fontSize: 18, fontWeight: 700, color: darkMode ? '#f8fafc' : '#0f172a' }}>{isMigrationWorkflow ? '跨数据源迁移' : '数据同步'}</div>
|
||||
<div style={{ marginTop: 6, fontSize: 13, lineHeight: 1.7, color: darkMode ? 'rgba(255,255,255,0.62)' : 'rgba(15,23,42,0.62)' }}>
|
||||
{isMigrationWorkflow
|
||||
? '适合把源表迁移到另一套数据库,可按策略自动建表、导入数据并补建可兼容索引。'
|
||||
: '适合目标表已存在的场景,先做差异分析,再按勾选执行插入、更新或删除。'}
|
||||
</div>
|
||||
</div>
|
||||
<div style={{ display: 'flex', flexWrap: 'wrap', gap: 8 }}>
|
||||
<span style={badgeStyle}>{isMigrationWorkflow ? <RocketOutlined /> : <SwapOutlined />} {isMigrationWorkflow ? '迁移模式' : '同步模式'}</span>
|
||||
<span style={badgeStyle}><DatabaseOutlined /> {sourceConnId ? '已选源连接' : '待选源连接'}</span>
|
||||
<span style={badgeStyle}><TableOutlined /> {selectedTables.length || 0} 张表</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<Steps current={currentStep} style={{ marginBottom: 24 }}>
|
||||
<Step title="配置源与目标" />
|
||||
<Step title="选择表" />
|
||||
<Step title="执行结果" />
|
||||
</Steps>
|
||||
</div>
|
||||
|
||||
<div style={modalScrollableContentStyle}>
|
||||
{/* STEP 1: CONFIG */}
|
||||
{currentStep === 0 && (
|
||||
<div>
|
||||
<div style={{ display: 'flex', gap: 24, justifyContent: 'center' }}>
|
||||
<Card title="源数据库" style={{ width: 350 }}>
|
||||
<div style={{ display: 'grid', gridTemplateColumns: 'minmax(0, 1fr) 44px minmax(0, 1fr)', gap: 18, alignItems: 'stretch' }}>
|
||||
<Card
|
||||
title="源数据库"
|
||||
style={shellCardStyle}
|
||||
styles={{ header: { borderBottom: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(15,23,42,0.06)', fontWeight: 700 }, body: { padding: 18 } }}
|
||||
>
|
||||
<Form layout="vertical">
|
||||
<Form.Item label="连接">
|
||||
<Select value={sourceConnId} onChange={handleSourceConnChange}>
|
||||
@@ -572,8 +793,16 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
|
||||
</Form.Item>
|
||||
</Form>
|
||||
</Card>
|
||||
<div style={{ display: 'flex', alignItems: 'center' }}>至</div>
|
||||
<Card title="目标数据库" style={{ width: 350 }}>
|
||||
<div style={{ display: 'grid', placeItems: 'center' }}>
|
||||
<div style={{ ...badgeStyle, width: 44, height: 44, borderRadius: 14, justifyContent: 'center', padding: 0 }}>
|
||||
<SwapOutlined />
|
||||
</div>
|
||||
</div>
|
||||
<Card
|
||||
title="目标数据库"
|
||||
style={shellCardStyle}
|
||||
styles={{ header: { borderBottom: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(15,23,42,0.06)', fontWeight: 700 }, body: { padding: 18 } }}
|
||||
>
|
||||
<Form layout="vertical">
|
||||
<Form.Item label="连接">
|
||||
<Select value={targetConnId} onChange={handleTargetConnChange}>
|
||||
@@ -589,27 +818,94 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
|
||||
</Card>
|
||||
</div>
|
||||
|
||||
<Card title="同步选项" style={{ marginTop: 16 }}>
|
||||
<Card
|
||||
title={isMigrationWorkflow ? '迁移选项' : '同步选项'}
|
||||
style={{ ...shellCardStyle, marginTop: 18 }}
|
||||
styles={{ header: { borderBottom: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(15,23,42,0.06)', fontWeight: 700 }, body: { padding: 18 } }}
|
||||
>
|
||||
<div style={{ ...quietPanelStyle, marginBottom: 14 }}>
|
||||
<Text style={{ color: darkMode ? 'rgba(255,255,255,0.72)' : 'rgba(15,23,42,0.68)', lineHeight: 1.7 }}>
|
||||
先明确当前要做的是“已有目标表同步”还是“跨库迁移”,页面会按功能类型自动给出更安全的默认策略。
|
||||
</Text>
|
||||
</div>
|
||||
<Form layout="vertical">
|
||||
<Form.Item label="同步内容">
|
||||
<Form.Item label="功能类型">
|
||||
<Select value={workflowType} onChange={setWorkflowType}>
|
||||
<Option value="sync">数据同步(基于已有目标表做差异同步)</Option>
|
||||
<Option value="migration">跨库迁移(可自动建表后导入)</Option>
|
||||
</Select>
|
||||
</Form.Item>
|
||||
<Alert
|
||||
type={isMigrationWorkflow ? 'info' : 'success'}
|
||||
showIcon
|
||||
style={{ marginBottom: 12 }}
|
||||
message={isMigrationWorkflow
|
||||
? '当前为“跨库迁移”模式:适合将表迁移到另一数据源,可自动建表并导入数据。'
|
||||
: '当前为“数据同步”模式:适合目标表已存在时做增量同步或覆盖导入。'}
|
||||
/>
|
||||
<Form.Item label={isMigrationWorkflow ? '迁移内容' : '同步内容'}>
|
||||
<Select value={syncContent} onChange={setSyncContent}>
|
||||
<Option value="data">仅同步数据</Option>
|
||||
<Option value="schema">仅同步结构</Option>
|
||||
<Option value="both">同步结构 + 数据</Option>
|
||||
</Select>
|
||||
</Form.Item>
|
||||
<Form.Item label="同步模式">
|
||||
<Form.Item label={isMigrationWorkflow ? '迁移模式' : '同步模式'}>
|
||||
<Select value={syncMode} onChange={setSyncMode} disabled={syncContent === 'schema'}>
|
||||
<Option value="insert_update">增量同步(对比差异,按插入/更新/删除勾选执行)</Option>
|
||||
<Option value="insert_only">仅插入(不对比目标;无主键表将跳过)</Option>
|
||||
<Option value="full_overwrite">全量覆盖(清空目标表后插入)</Option>
|
||||
</Select>
|
||||
</Form.Item>
|
||||
<Form.Item label={isMigrationWorkflow ? '目标表处理策略' : '目标表要求'}>
|
||||
<Select value={targetTableStrategy} onChange={setTargetTableStrategy} disabled={!isMigrationWorkflow}>
|
||||
<Option value="existing_only">仅使用已有目标表</Option>
|
||||
<Option value="auto_create_if_missing">目标表不存在时自动建表后导入</Option>
|
||||
<Option value="smart">智能模式(存在则直接导入,不存在则自动建表)</Option>
|
||||
</Select>
|
||||
</Form.Item>
|
||||
{isRedisMongoKeyspaceMigration && (
|
||||
<Form.Item
|
||||
label="Mongo 集合名(可选)"
|
||||
extra={sourceType === 'redis'
|
||||
? '为空时沿用默认集合名;填写后本次 Redis 键空间会统一写入该 Mongo 集合。'
|
||||
: 'MongoDB → Redis 场景下通常直接选择源集合;这里留空即可,未显式选集合时才会回退使用该名称。'}
|
||||
>
|
||||
<Input
|
||||
value={mongoCollectionName}
|
||||
onChange={(e) => setMongoCollectionName(e.target.value)}
|
||||
placeholder={defaultMongoCollectionName || '请输入 Mongo 集合名'}
|
||||
allowClear
|
||||
maxLength={128}
|
||||
/>
|
||||
</Form.Item>
|
||||
)}
|
||||
<Form.Item>
|
||||
<Checkbox checked={autoAddColumns} onChange={(e) => setAutoAddColumns(e.target.checked)}>
|
||||
自动补齐目标表缺失字段(仅 MySQL 目标)
|
||||
自动补齐目标表缺失字段(当前支持 MySQL 目标及 MySQL → Kingbase)
|
||||
</Checkbox>
|
||||
</Form.Item>
|
||||
<Form.Item>
|
||||
<Checkbox checked={createIndexes} onChange={(e) => setCreateIndexes(e.target.checked)} disabled={!isMigrationWorkflow || targetTableStrategy === 'existing_only'}>
|
||||
自动迁移可兼容的普通索引/唯一索引(仅自动建表模式生效)
|
||||
</Checkbox>
|
||||
</Form.Item>
|
||||
{isMigrationWorkflow && targetTableStrategy !== 'existing_only' && (
|
||||
<Alert
|
||||
type="info"
|
||||
showIcon
|
||||
message="自动建表模式首期仅支持 MySQL → Kingbase;将迁移字段、主键、普通/唯一/联合索引,并显式跳过全文、空间、前缀、函数类索引。"
|
||||
style={{ marginBottom: 12 }}
|
||||
/>
|
||||
)}
|
||||
{!isMigrationWorkflow && (
|
||||
<Alert
|
||||
type="info"
|
||||
showIcon
|
||||
message="数据同步模式默认基于已有目标表执行;如需跨数据源建表导入,请切换到“跨库迁移”。"
|
||||
style={{ marginBottom: 12 }}
|
||||
/>
|
||||
)}
|
||||
{syncContent !== 'schema' && syncMode === 'full_overwrite' && (
|
||||
<Alert
|
||||
type="warning"
|
||||
@@ -624,26 +920,42 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
|
||||
|
||||
{/* STEP 2: TABLES */}
|
||||
{currentStep === 1 && (
|
||||
<div style={{ display: 'flex', flexDirection: 'column', gap: 12 }}>
|
||||
<div style={{ display: 'flex', justifyContent: 'space-between', alignItems: 'center' }}>
|
||||
<Text type="secondary">请选择需要同步的表:</Text>
|
||||
<div style={{ display: 'flex', flexDirection: 'column', gap: 14 }}>
|
||||
<div style={quietPanelStyle}>
|
||||
<div style={{ display: 'flex', justifyContent: 'space-between', alignItems: 'center', marginBottom: 10 }}>
|
||||
<Text type="secondary">请选择需要同步的表:</Text>
|
||||
<Checkbox checked={showSameTables} onChange={(e) => setShowSameTables(e.target.checked)}>
|
||||
显示相同表
|
||||
</Checkbox>
|
||||
</div>
|
||||
<Transfer
|
||||
</div>
|
||||
<Transfer
|
||||
dataSource={allTables.map(t => ({ key: t, title: t }))}
|
||||
titles={['源表', '已选表']}
|
||||
targetKeys={selectedTables}
|
||||
onChange={(keys) => setSelectedTables(keys as string[])}
|
||||
render={item => item.title}
|
||||
listStyle={{ width: 350, height: 280, marginTop: 0 }}
|
||||
locale={{ itemUnit: '项', itemsUnit: '项', searchPlaceholder: '搜索表', notFoundContent: '暂无数据' }}
|
||||
listStyle={{ width: 390, height: 320, marginTop: 0, borderRadius: 14, overflow: 'hidden' }}
|
||||
locale={{ itemUnit: '项', itemsUnit: '项', searchPlaceholder: '搜索表…', notFoundContent: '暂无数据' }}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{diffTables.length > 0 && (
|
||||
<div>
|
||||
<Divider orientation="left">对比结果</Divider>
|
||||
<div style={quietPanelStyle}>
|
||||
<Divider orientation="left" style={{ marginTop: 0 }}>对比结果</Divider>
|
||||
{analysisWarnings.length > 0 && (
|
||||
<Alert
|
||||
type="warning"
|
||||
showIcon
|
||||
message="预检发现风险或降级项,请在执行前确认"
|
||||
description={
|
||||
<ul style={{ margin: 0, paddingLeft: 18 }}>
|
||||
{analysisWarnings.slice(0, 8).map((item) => <li key={item}>{item}</li>)}
|
||||
{analysisWarnings.length > 8 && <li>还有 {analysisWarnings.length - 8} 项未展开</li>}
|
||||
</ul>
|
||||
}
|
||||
style={{ marginBottom: 12 }}
|
||||
/>
|
||||
)}
|
||||
<Table
|
||||
size="small"
|
||||
pagination={false}
|
||||
@@ -655,13 +967,29 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
|
||||
const same = Number(t.same || 0);
|
||||
const msg = String(t.message || '').trim();
|
||||
const can = !!t.canSync;
|
||||
const warns = Array.isArray(t.warnings) ? t.warnings.length : 0;
|
||||
const unsupported = Array.isArray(t.unsupportedObjects) ? t.unsupportedObjects.length : 0;
|
||||
if (showSameTables) return true;
|
||||
if (!can) return true;
|
||||
if (msg) return true;
|
||||
if (msg || warns > 0 || unsupported > 0) return true;
|
||||
return ins > 0 || upd > 0 || del > 0 || same === 0;
|
||||
})}
|
||||
columns={[
|
||||
{ title: '表名', dataIndex: 'table', key: 'table', ellipsis: true },
|
||||
{
|
||||
title: '目标表',
|
||||
key: 'targetTableExists',
|
||||
width: 90,
|
||||
render: (_: any, r: any) => r.targetTableExists ? '已存在' : '不存在'
|
||||
},
|
||||
{
|
||||
title: '计划',
|
||||
dataIndex: 'plannedAction',
|
||||
key: 'plannedAction',
|
||||
width: 220,
|
||||
ellipsis: true,
|
||||
render: (v: any) => String(v || '')
|
||||
},
|
||||
{
|
||||
title: '插入',
|
||||
key: 'inserts',
|
||||
@@ -670,11 +998,7 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
|
||||
const ops = tableOptions[r.table] || { insert: true, update: true, delete: false };
|
||||
const disabled = !r.canSync || analyzing || Number(r.inserts || 0) === 0;
|
||||
return (
|
||||
<Checkbox
|
||||
checked={!!ops.insert}
|
||||
disabled={disabled}
|
||||
onChange={(e) => updateTableOption(r.table, 'insert', e.target.checked)}
|
||||
>
|
||||
<Checkbox checked={!!ops.insert} disabled={disabled} onChange={(e) => updateTableOption(r.table, 'insert', e.target.checked)}>
|
||||
{Number(r.inserts || 0)}
|
||||
</Checkbox>
|
||||
);
|
||||
@@ -688,11 +1012,7 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
|
||||
const ops = tableOptions[r.table] || { insert: true, update: true, delete: false };
|
||||
const disabled = !r.canSync || analyzing || Number(r.updates || 0) === 0;
|
||||
return (
|
||||
<Checkbox
|
||||
checked={!!ops.update}
|
||||
disabled={disabled}
|
||||
onChange={(e) => updateTableOption(r.table, 'update', e.target.checked)}
|
||||
>
|
||||
<Checkbox checked={!!ops.update} disabled={disabled} onChange={(e) => updateTableOption(r.table, 'update', e.target.checked)}>
|
||||
{Number(r.updates || 0)}
|
||||
</Checkbox>
|
||||
);
|
||||
@@ -706,18 +1026,28 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
|
||||
const ops = tableOptions[r.table] || { insert: true, update: true, delete: false };
|
||||
const disabled = !r.canSync || analyzing || Number(r.deletes || 0) === 0;
|
||||
return (
|
||||
<Checkbox
|
||||
checked={!!ops.delete}
|
||||
disabled={disabled}
|
||||
onChange={(e) => updateTableOption(r.table, 'delete', e.target.checked)}
|
||||
>
|
||||
<Checkbox checked={!!ops.delete} disabled={disabled} onChange={(e) => updateTableOption(r.table, 'delete', e.target.checked)}>
|
||||
{Number(r.deletes || 0)}
|
||||
</Checkbox>
|
||||
);
|
||||
}
|
||||
},
|
||||
{ title: '相同', dataIndex: 'same', key: 'same', width: 70, render: (v: any) => Number(v || 0) },
|
||||
{ title: '消息', dataIndex: 'message', key: 'message', ellipsis: true, render: (v: any) => (v ? String(v) : '') },
|
||||
{
|
||||
title: '风险',
|
||||
key: 'warnings',
|
||||
width: 220,
|
||||
render: (_: any, r: any) => {
|
||||
const warns = [...(Array.isArray(r.warnings) ? r.warnings : []), ...(Array.isArray(r.unsupportedObjects) ? r.unsupportedObjects : [])];
|
||||
if (warns.length === 0) return '-';
|
||||
return (
|
||||
<div style={{ color: '#d48806', fontSize: 12, lineHeight: 1.5 }}>
|
||||
{warns.slice(0, 2).map((item: string) => <div key={item}>{item}</div>)}
|
||||
{warns.length > 2 && <div>还有 {warns.length - 2} 项</div>}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
},
|
||||
{
|
||||
title: '预览',
|
||||
key: 'preview',
|
||||
@@ -741,7 +1071,8 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
|
||||
|
||||
{/* STEP 3: RESULT */}
|
||||
{currentStep === 2 && (
|
||||
<div>
|
||||
<div style={{ display: 'flex', flexDirection: 'column', gap: 14 }}>
|
||||
<div style={quietPanelStyle}>
|
||||
<Alert
|
||||
message={syncing ? "正在同步" : (syncResult?.success ? "同步完成" : "同步失败")}
|
||||
description={
|
||||
@@ -753,7 +1084,7 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
|
||||
showIcon
|
||||
/>
|
||||
|
||||
<div style={{ marginTop: 12 }}>
|
||||
<div style={{ marginTop: 14 }}>
|
||||
<Progress
|
||||
percent={syncProgress.percent}
|
||||
status={syncing ? "active" : (syncResult?.success ? "success" : "exception")}
|
||||
@@ -761,7 +1092,9 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
|
||||
/>
|
||||
</div>
|
||||
|
||||
<Divider orientation="left">日志</Divider>
|
||||
</div>
|
||||
<div style={quietPanelStyle}>
|
||||
<Divider orientation="left" style={{ marginTop: 0 }}>执行日志</Divider>
|
||||
<div
|
||||
ref={logBoxRef}
|
||||
onScroll={() => {
|
||||
@@ -770,14 +1103,25 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
|
||||
const nearBottom = el.scrollHeight - el.scrollTop - el.clientHeight < 40;
|
||||
autoScrollRef.current = nearBottom;
|
||||
}}
|
||||
style={{ background: '#f5f5f5', padding: 12, height: 300, overflowY: 'auto', fontFamily: 'monospace' }}
|
||||
style={{
|
||||
background: darkMode ? 'rgba(255,255,255,0.03)' : 'rgba(248,250,252,0.92)',
|
||||
border: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(15,23,42,0.06)',
|
||||
borderRadius: 14,
|
||||
padding: 12,
|
||||
height: 300,
|
||||
overflowY: 'auto',
|
||||
fontFamily: 'SFMono-Regular, ui-monospace, Menlo, Consolas, monospace'
|
||||
}}
|
||||
>
|
||||
{syncLogs.map((item, i: number) => <div key={i}>{renderSyncLogItem(item)}</div>)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div style={{ marginTop: 24, textAlign: 'right' }}>
|
||||
</div>
|
||||
|
||||
<div style={modalFooterBarStyle}>
|
||||
{currentStep === 0 && (
|
||||
<Button type="primary" onClick={nextToTables} loading={loading}>下一步</Button>
|
||||
)}
|
||||
@@ -804,14 +1148,16 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</Modal>
|
||||
<Drawer
|
||||
title={`差异预览:${previewTable}`}
|
||||
styles={{ body: { background: darkMode ? 'rgba(9,13,20,0.98)' : '#f8fafc' } }}
|
||||
open={previewOpen}
|
||||
onClose={() => { setPreviewOpen(false); setPreviewTable(''); setPreviewData(null); }}
|
||||
width={900}
|
||||
>
|
||||
{previewLoading && <Alert type="info" showIcon message="正在加载差异预览..." />}
|
||||
{previewLoading && <Alert type="info" showIcon message="正在加载差异预览…" />}
|
||||
{!previewLoading && previewData && (
|
||||
<div>
|
||||
<Alert
|
||||
|
||||
@@ -277,6 +277,9 @@ export namespace sync {
|
||||
mode: string;
|
||||
jobId?: string;
|
||||
autoAddColumns?: boolean;
|
||||
targetTableStrategy?: string;
|
||||
createIndexes?: boolean;
|
||||
mongoCollectionName?: string;
|
||||
tableOptions?: Record<string, TableOptions>;
|
||||
|
||||
static createFrom(source: any = {}) {
|
||||
@@ -292,6 +295,9 @@ export namespace sync {
|
||||
this.mode = source["mode"];
|
||||
this.jobId = source["jobId"];
|
||||
this.autoAddColumns = source["autoAddColumns"];
|
||||
this.targetTableStrategy = source["targetTableStrategy"];
|
||||
this.createIndexes = source["createIndexes"];
|
||||
this.mongoCollectionName = source["mongoCollectionName"];
|
||||
this.tableOptions = this.convertValues(source["tableOptions"], TableOptions, true);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"GoNavi-Wails/internal/connection"
|
||||
@@ -20,6 +21,11 @@ func normalizeRunConfig(config connection.ConnectionConfig, dbName string) conne
|
||||
case "dameng":
|
||||
// 达梦使用 schema 参数,沿用现有行为:dbName 表示 schema。
|
||||
runConfig.Database = name
|
||||
case "redis":
|
||||
runConfig.Database = name
|
||||
if idx, err := strconv.Atoi(name); err == nil && idx >= 0 && idx <= 15 {
|
||||
runConfig.RedisDB = idx
|
||||
}
|
||||
default:
|
||||
// oracle: dbName 表示 schema/owner,不能覆盖 config.Database(服务名)
|
||||
// sqlite: 无需设置 Database
|
||||
|
||||
@@ -3,6 +3,7 @@ package app
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -547,6 +548,24 @@ func ensureNonNilSlice[T any](items []T) []T {
|
||||
|
||||
func (a *App) DBGetDatabases(config connection.ConnectionConfig) connection.QueryResult {
|
||||
runConfig := normalizeRunConfig(config, "")
|
||||
if strings.EqualFold(strings.TrimSpace(runConfig.Type), "redis") {
|
||||
runConfig.Type = "redis"
|
||||
client, err := a.getRedisClient(runConfig)
|
||||
if err != nil {
|
||||
logger.Error(err, "DBGetDatabases 获取 Redis 连接失败:%s", formatConnSummary(runConfig))
|
||||
return connection.QueryResult{Success: false, Message: err.Error()}
|
||||
}
|
||||
dbs, err := client.GetDatabases()
|
||||
if err != nil {
|
||||
logger.Error(err, "DBGetDatabases 获取 Redis 库列表失败:%s", formatConnSummary(runConfig))
|
||||
return connection.QueryResult{Success: false, Message: err.Error()}
|
||||
}
|
||||
resData := make([]map[string]string, 0, len(dbs))
|
||||
for _, item := range dbs {
|
||||
resData = append(resData, map[string]string{"Database": strconv.Itoa(item.Index)})
|
||||
}
|
||||
return connection.QueryResult{Success: true, Data: resData}
|
||||
}
|
||||
dbInst, err := a.getDatabase(runConfig)
|
||||
if err != nil {
|
||||
logger.Error(err, "DBGetDatabases 获取连接失败:%s", formatConnSummary(runConfig))
|
||||
@@ -579,6 +598,48 @@ func (a *App) DBGetDatabases(config connection.ConnectionConfig) connection.Quer
|
||||
|
||||
func (a *App) DBGetTables(config connection.ConnectionConfig, dbName string) connection.QueryResult {
|
||||
runConfig := normalizeRunConfig(config, dbName)
|
||||
if strings.EqualFold(strings.TrimSpace(runConfig.Type), "redis") {
|
||||
runConfig.Type = "redis"
|
||||
client, err := a.getRedisClient(runConfig)
|
||||
if err != nil {
|
||||
logger.Error(err, "DBGetTables 获取 Redis 连接失败:%s", formatConnSummary(runConfig))
|
||||
return connection.QueryResult{Success: false, Message: err.Error()}
|
||||
}
|
||||
cursor := uint64(0)
|
||||
tables := make([]string, 0, 128)
|
||||
seen := make(map[string]struct{}, 128)
|
||||
for {
|
||||
result, err := client.ScanKeys("*", cursor, 1000)
|
||||
if err != nil {
|
||||
logger.Error(err, "DBGetTables 扫描 Redis Key 失败:%s", formatConnSummary(runConfig))
|
||||
return connection.QueryResult{Success: false, Message: err.Error()}
|
||||
}
|
||||
for _, item := range result.Keys {
|
||||
key := strings.TrimSpace(item.Key)
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[key]; ok {
|
||||
continue
|
||||
}
|
||||
seen[key] = struct{}{}
|
||||
tables = append(tables, key)
|
||||
}
|
||||
if strings.TrimSpace(result.Cursor) == "" || strings.TrimSpace(result.Cursor) == "0" {
|
||||
break
|
||||
}
|
||||
next, err := strconv.ParseUint(strings.TrimSpace(result.Cursor), 10, 64)
|
||||
if err != nil || next == cursor {
|
||||
break
|
||||
}
|
||||
cursor = next
|
||||
}
|
||||
resData := make([]map[string]string, 0, len(tables))
|
||||
for _, name := range tables {
|
||||
resData = append(resData, map[string]string{"Table": name})
|
||||
}
|
||||
return connection.QueryResult{Success: true, Data: resData}
|
||||
}
|
||||
|
||||
dbInst, err := a.getDatabase(runConfig)
|
||||
if err != nil {
|
||||
|
||||
@@ -90,6 +90,7 @@ type IndexDefinition struct {
|
||||
NonUnique int `json:"nonUnique"`
|
||||
SeqInIndex int `json:"seqInIndex"`
|
||||
IndexType string `json:"indexType"`
|
||||
SubPart int `json:"subPart,omitempty"`
|
||||
}
|
||||
|
||||
// ForeignKeyDefinition represents a foreign key
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -678,3 +679,134 @@ func isClickHouseTruthy(value interface{}) bool {
|
||||
return normalized == "1" || normalized == "true" || normalized == "yes" || normalized == "y"
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ClickHouseDB) ApplyChanges(tableName string, changes connection.ChangeSet) error {
|
||||
if c.conn == nil {
|
||||
return fmt.Errorf("connection not open")
|
||||
}
|
||||
|
||||
database, table, err := c.resolveDatabaseAndTable(c.database, tableName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
qualifiedTable := fmt.Sprintf("%s.%s", quoteClickHouseIdentifier(database), quoteClickHouseIdentifier(table))
|
||||
|
||||
for _, pk := range changes.Deletes {
|
||||
whereExpr := buildClickHouseWhereClause(pk)
|
||||
if whereExpr == "" {
|
||||
continue
|
||||
}
|
||||
query := fmt.Sprintf("ALTER TABLE %s DELETE WHERE %s", qualifiedTable, whereExpr)
|
||||
if _, err := c.conn.Exec(query); err != nil {
|
||||
return fmt.Errorf("delete error: %v; sql=%s", err, query)
|
||||
}
|
||||
}
|
||||
|
||||
for _, update := range changes.Updates {
|
||||
setExpr := buildClickHouseAssignments(update.Values)
|
||||
whereExpr := buildClickHouseWhereClause(update.Keys)
|
||||
if setExpr == "" || whereExpr == "" {
|
||||
continue
|
||||
}
|
||||
query := fmt.Sprintf("ALTER TABLE %s UPDATE %s WHERE %s", qualifiedTable, setExpr, whereExpr)
|
||||
if _, err := c.conn.Exec(query); err != nil {
|
||||
return fmt.Errorf("update error: %v; sql=%s", err, query)
|
||||
}
|
||||
}
|
||||
|
||||
for _, row := range changes.Inserts {
|
||||
query, err := buildClickHouseInsertSQL(qualifiedTable, row)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if query == "" {
|
||||
continue
|
||||
}
|
||||
if _, err := c.conn.Exec(query); err != nil {
|
||||
return fmt.Errorf("insert error: %v; sql=%s", err, query)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildClickHouseInsertSQL(qualifiedTable string, row map[string]interface{}) (string, error) {
|
||||
if len(row) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
cols := make([]string, 0, len(row))
|
||||
for k := range row {
|
||||
if strings.TrimSpace(k) == "" {
|
||||
continue
|
||||
}
|
||||
cols = append(cols, k)
|
||||
}
|
||||
if len(cols) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
sort.Strings(cols)
|
||||
quotedCols := make([]string, 0, len(cols))
|
||||
values := make([]string, 0, len(cols))
|
||||
for _, col := range cols {
|
||||
quotedCols = append(quotedCols, quoteClickHouseIdentifier(col))
|
||||
values = append(values, clickHouseLiteral(row[col]))
|
||||
}
|
||||
return fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)", qualifiedTable, strings.Join(quotedCols, ", "), strings.Join(values, ", ")), nil
|
||||
}
|
||||
|
||||
func buildClickHouseAssignments(values map[string]interface{}) string {
|
||||
if len(values) == 0 {
|
||||
return ""
|
||||
}
|
||||
cols := make([]string, 0, len(values))
|
||||
for k := range values {
|
||||
if strings.TrimSpace(k) == "" {
|
||||
continue
|
||||
}
|
||||
cols = append(cols, k)
|
||||
}
|
||||
sort.Strings(cols)
|
||||
parts := make([]string, 0, len(cols))
|
||||
for _, col := range cols {
|
||||
parts = append(parts, fmt.Sprintf("%s = %s", quoteClickHouseIdentifier(col), clickHouseLiteral(values[col])))
|
||||
}
|
||||
return strings.Join(parts, ", ")
|
||||
}
|
||||
|
||||
func buildClickHouseWhereClause(keys map[string]interface{}) string {
|
||||
if len(keys) == 0 {
|
||||
return ""
|
||||
}
|
||||
cols := make([]string, 0, len(keys))
|
||||
for k := range keys {
|
||||
if strings.TrimSpace(k) == "" {
|
||||
continue
|
||||
}
|
||||
cols = append(cols, k)
|
||||
}
|
||||
sort.Strings(cols)
|
||||
parts := make([]string, 0, len(cols))
|
||||
for _, col := range cols {
|
||||
parts = append(parts, fmt.Sprintf("%s = %s", quoteClickHouseIdentifier(col), clickHouseLiteral(keys[col])))
|
||||
}
|
||||
return strings.Join(parts, " AND ")
|
||||
}
|
||||
|
||||
func clickHouseLiteral(value interface{}) string {
|
||||
switch val := value.(type) {
|
||||
case nil:
|
||||
return "NULL"
|
||||
case bool:
|
||||
if val {
|
||||
return "1"
|
||||
}
|
||||
return "0"
|
||||
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64:
|
||||
return fmt.Sprintf("%v", val)
|
||||
case time.Time:
|
||||
return fmt.Sprintf("'%s'", val.Format("2006-01-02 15:04:05"))
|
||||
case []byte:
|
||||
return fmt.Sprintf("'%s'", strings.ReplaceAll(string(val), "'", "''"))
|
||||
default:
|
||||
return fmt.Sprintf("'%s'", strings.ReplaceAll(fmt.Sprintf("%v", val), "'", "''"))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -250,12 +250,22 @@ func (m *MariaDB) GetIndexes(dbName, tableName string) ([]connection.IndexDefini
|
||||
}
|
||||
}
|
||||
|
||||
subPart := 0
|
||||
if val, ok := row["Sub_part"]; ok && val != nil {
|
||||
if f, ok := val.(float64); ok {
|
||||
subPart = int(f)
|
||||
} else if i, ok := val.(int64); ok {
|
||||
subPart = int(i)
|
||||
}
|
||||
}
|
||||
|
||||
idx := connection.IndexDefinition{
|
||||
Name: fmt.Sprintf("%v", row["Key_name"]),
|
||||
ColumnName: fmt.Sprintf("%v", row["Column_name"]),
|
||||
NonUnique: nonUnique,
|
||||
SeqInIndex: seq,
|
||||
IndexType: fmt.Sprintf("%v", row["Index_type"]),
|
||||
SubPart: subPart,
|
||||
}
|
||||
indexes = append(indexes, idx)
|
||||
}
|
||||
@@ -323,7 +333,7 @@ func (m *MariaDB) ApplyChanges(tableName string, changes connection.ChangeSet) e
|
||||
var args []interface{}
|
||||
for k, v := range pk {
|
||||
wheres = append(wheres, fmt.Sprintf("`%s` = ?", k))
|
||||
args = append(args, normalizeMySQLDateTimeValue(v))
|
||||
args = append(args, normalizeMySQLComplexValue(normalizeMySQLDateTimeValue(v)))
|
||||
}
|
||||
if len(wheres) == 0 {
|
||||
continue
|
||||
@@ -341,7 +351,7 @@ func (m *MariaDB) ApplyChanges(tableName string, changes connection.ChangeSet) e
|
||||
|
||||
for k, v := range update.Values {
|
||||
sets = append(sets, fmt.Sprintf("`%s` = ?", k))
|
||||
args = append(args, normalizeMySQLDateTimeValue(v))
|
||||
args = append(args, normalizeMySQLComplexValue(normalizeMySQLDateTimeValue(v)))
|
||||
}
|
||||
|
||||
if len(sets) == 0 {
|
||||
@@ -351,7 +361,7 @@ func (m *MariaDB) ApplyChanges(tableName string, changes connection.ChangeSet) e
|
||||
var wheres []string
|
||||
for k, v := range update.Keys {
|
||||
wheres = append(wheres, fmt.Sprintf("`%s` = ?", k))
|
||||
args = append(args, normalizeMySQLDateTimeValue(v))
|
||||
args = append(args, normalizeMySQLComplexValue(normalizeMySQLDateTimeValue(v)))
|
||||
}
|
||||
|
||||
if len(wheres) == 0 {
|
||||
@@ -373,7 +383,7 @@ func (m *MariaDB) ApplyChanges(tableName string, changes connection.ChangeSet) e
|
||||
for k, v := range row {
|
||||
cols = append(cols, fmt.Sprintf("`%s`", k))
|
||||
placeholders = append(placeholders, "?")
|
||||
args = append(args, normalizeMySQLDateTimeValue(v))
|
||||
args = append(args, normalizeMySQLComplexValue(normalizeMySQLDateTimeValue(v)))
|
||||
}
|
||||
|
||||
if len(cols) == 0 {
|
||||
|
||||
@@ -3,6 +3,7 @@ package db
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
@@ -441,12 +442,22 @@ func (m *MySQLDB) GetIndexes(dbName, tableName string) ([]connection.IndexDefini
|
||||
}
|
||||
}
|
||||
|
||||
subPart := 0
|
||||
if val, ok := row["Sub_part"]; ok && val != nil {
|
||||
if f, ok := val.(float64); ok {
|
||||
subPart = int(f)
|
||||
} else if i, ok := val.(int64); ok {
|
||||
subPart = int(i)
|
||||
}
|
||||
}
|
||||
|
||||
idx := connection.IndexDefinition{
|
||||
Name: fmt.Sprintf("%v", row["Key_name"]),
|
||||
ColumnName: fmt.Sprintf("%v", row["Column_name"]),
|
||||
NonUnique: nonUnique,
|
||||
SeqInIndex: seq,
|
||||
IndexType: fmt.Sprintf("%v", row["Index_type"]),
|
||||
SubPart: subPart,
|
||||
}
|
||||
indexes = append(indexes, idx)
|
||||
}
|
||||
@@ -606,6 +617,18 @@ func (m *MySQLDB) ApplyChanges(tableName string, changes connection.ChangeSet) e
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func normalizeMySQLComplexValue(value interface{}) interface{} {
|
||||
switch v := value.(type) {
|
||||
case map[string]interface{}, []interface{}:
|
||||
if data, err := json.Marshal(v); err == nil {
|
||||
return string(data)
|
||||
}
|
||||
return fmt.Sprintf("%v", value)
|
||||
default:
|
||||
return value
|
||||
}
|
||||
}
|
||||
|
||||
func normalizeMySQLDateTimeValue(value interface{}) interface{} {
|
||||
text, ok := value.(string)
|
||||
if !ok {
|
||||
@@ -670,7 +693,7 @@ func (m *MySQLDB) loadColumnTypeMap(tableName string) map[string]string {
|
||||
func normalizeMySQLValueForInsert(columnName string, value interface{}, columnTypeMap map[string]string) (interface{}, bool) {
|
||||
columnType := strings.ToLower(strings.TrimSpace(columnTypeMap[strings.ToLower(strings.TrimSpace(columnName))]))
|
||||
if !isMySQLTemporalColumnType(columnType) {
|
||||
return value, false
|
||||
return normalizeMySQLComplexValue(value), false
|
||||
}
|
||||
text, ok := value.(string)
|
||||
if ok && strings.TrimSpace(text) == "" {
|
||||
|
||||
168
internal/db/tdengine_applychanges_test.go
Normal file
168
internal/db/tdengine_applychanges_test.go
Normal file
@@ -0,0 +1,168 @@
|
||||
//go:build gonavi_full_drivers || gonavi_tdengine_driver
|
||||
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"GoNavi-Wails/internal/connection"
|
||||
)
|
||||
|
||||
const tdengineRecordingDriverName = "gonavi_tdengine_recording"
|
||||
|
||||
var (
|
||||
registerTDengineRecordingDriverOnce sync.Once
|
||||
tdengineRecordingDriverMu sync.Mutex
|
||||
tdengineRecordingDriverSeq int
|
||||
tdengineRecordingDriverStates = map[string]*tdengineRecordingState{}
|
||||
)
|
||||
|
||||
type tdengineRecordingState struct {
|
||||
mu sync.Mutex
|
||||
queries []string
|
||||
execErr error
|
||||
}
|
||||
|
||||
func (s *tdengineRecordingState) snapshotQueries() []string {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
queries := make([]string, len(s.queries))
|
||||
copy(queries, s.queries)
|
||||
return queries
|
||||
}
|
||||
|
||||
type tdengineRecordingDriver struct{}
|
||||
|
||||
func (tdengineRecordingDriver) Open(name string) (driver.Conn, error) {
|
||||
tdengineRecordingDriverMu.Lock()
|
||||
state := tdengineRecordingDriverStates[name]
|
||||
tdengineRecordingDriverMu.Unlock()
|
||||
if state == nil {
|
||||
return nil, fmt.Errorf("recording state not found: %s", name)
|
||||
}
|
||||
return &tdengineRecordingConn{state: state}, nil
|
||||
}
|
||||
|
||||
type tdengineRecordingConn struct {
|
||||
state *tdengineRecordingState
|
||||
}
|
||||
|
||||
func (c *tdengineRecordingConn) Prepare(query string) (driver.Stmt, error) {
|
||||
return nil, fmt.Errorf("prepare not supported in tdengine recording driver: %s", query)
|
||||
}
|
||||
|
||||
func (c *tdengineRecordingConn) Close() error { return nil }
|
||||
|
||||
func (c *tdengineRecordingConn) Begin() (driver.Tx, error) {
|
||||
return nil, fmt.Errorf("transactions not supported in tdengine recording driver")
|
||||
}
|
||||
|
||||
func (c *tdengineRecordingConn) ExecContext(_ context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
|
||||
if len(args) > 0 {
|
||||
return nil, fmt.Errorf("unexpected exec args: %d", len(args))
|
||||
}
|
||||
c.state.mu.Lock()
|
||||
defer c.state.mu.Unlock()
|
||||
if c.state.execErr != nil {
|
||||
return nil, c.state.execErr
|
||||
}
|
||||
c.state.queries = append(c.state.queries, query)
|
||||
return driver.RowsAffected(1), nil
|
||||
}
|
||||
|
||||
var _ driver.ExecerContext = (*tdengineRecordingConn)(nil)
|
||||
|
||||
func openTDengineRecordingDB(t *testing.T) (*sql.DB, *tdengineRecordingState) {
|
||||
t.Helper()
|
||||
registerTDengineRecordingDriverOnce.Do(func() {
|
||||
sql.Register(tdengineRecordingDriverName, tdengineRecordingDriver{})
|
||||
})
|
||||
|
||||
tdengineRecordingDriverMu.Lock()
|
||||
tdengineRecordingDriverSeq++
|
||||
dsn := fmt.Sprintf("tdengine-recording-%d", tdengineRecordingDriverSeq)
|
||||
state := &tdengineRecordingState{}
|
||||
tdengineRecordingDriverStates[dsn] = state
|
||||
tdengineRecordingDriverMu.Unlock()
|
||||
|
||||
dbConn, err := sql.Open(tdengineRecordingDriverName, dsn)
|
||||
if err != nil {
|
||||
t.Fatalf("打开 recording db 失败: %v", err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
_ = dbConn.Close()
|
||||
tdengineRecordingDriverMu.Lock()
|
||||
delete(tdengineRecordingDriverStates, dsn)
|
||||
tdengineRecordingDriverMu.Unlock()
|
||||
})
|
||||
|
||||
return dbConn, state
|
||||
}
|
||||
|
||||
func TestTDengineApplyChanges_InsertsIntoQualifiedTable(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dbConn, state := openTDengineRecordingDB(t)
|
||||
td := &TDengineDB{conn: dbConn}
|
||||
|
||||
changes := connection.ChangeSet{
|
||||
Inserts: []map[string]interface{}{
|
||||
{
|
||||
"ts": "2026-03-09 10:00:00",
|
||||
"value": 12.5,
|
||||
"device": "sensor-a",
|
||||
"enabled": true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if err := td.ApplyChanges("analytics.metrics", changes); err != nil {
|
||||
t.Fatalf("ApplyChanges 返回错误: %v", err)
|
||||
}
|
||||
|
||||
queries := state.snapshotQueries()
|
||||
if len(queries) != 1 {
|
||||
t.Fatalf("期望执行 1 条 SQL,实际 %d 条: %#v", len(queries), queries)
|
||||
}
|
||||
|
||||
want := "INSERT INTO `analytics`.`metrics` (`device`, `enabled`, `ts`, `value`) VALUES ('sensor-a', 1, '2026-03-09 10:00:00', 12.5)"
|
||||
if queries[0] != want {
|
||||
t.Fatalf("插入 SQL 不符合预期\nwant: %s\n got: %s", want, queries[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestTDengineApplyChanges_RejectsMixedUpdatesWithoutPartialWrite(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dbConn, state := openTDengineRecordingDB(t)
|
||||
td := &TDengineDB{conn: dbConn}
|
||||
|
||||
changes := connection.ChangeSet{
|
||||
Inserts: []map[string]interface{}{{
|
||||
"ts": "2026-03-09 10:00:00",
|
||||
"value": 12.5,
|
||||
}},
|
||||
Updates: []connection.UpdateRow{{
|
||||
Keys: map[string]interface{}{"ts": "2026-03-09 10:00:00"},
|
||||
Values: map[string]interface{}{"value": 18.8},
|
||||
}},
|
||||
}
|
||||
|
||||
err := td.ApplyChanges("metrics", changes)
|
||||
if err == nil {
|
||||
t.Fatalf("期望 mixed changes 被拒绝")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "UPDATE/DELETE") {
|
||||
t.Fatalf("错误信息未说明限制边界: %v", err)
|
||||
}
|
||||
if queries := state.snapshotQueries(); len(queries) != 0 {
|
||||
t.Fatalf("期望拒绝 mixed changes 时不执行任何 SQL,实际=%#v", queries)
|
||||
}
|
||||
}
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"net"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -362,6 +363,83 @@ func (t *TDengineDB) GetTriggers(dbName, tableName string) ([]connection.Trigger
|
||||
return []connection.TriggerDefinition{}, nil
|
||||
}
|
||||
|
||||
func (t *TDengineDB) ApplyChanges(tableName string, changes connection.ChangeSet) error {
|
||||
if t.conn == nil {
|
||||
return fmt.Errorf("connection not open")
|
||||
}
|
||||
if strings.TrimSpace(tableName) == "" {
|
||||
return fmt.Errorf("table name required")
|
||||
}
|
||||
if len(changes.Updates) > 0 || len(changes.Deletes) > 0 {
|
||||
return fmt.Errorf("TDengine 目标端当前仅支持 INSERT 写入,暂不支持 UPDATE/DELETE 差异同步,请改用仅插入或全量覆盖模式")
|
||||
}
|
||||
|
||||
qualifiedTable := quoteTDengineTable("", tableName)
|
||||
for _, row := range changes.Inserts {
|
||||
query, err := buildTDengineInsertSQL(qualifiedTable, row)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if query == "" {
|
||||
continue
|
||||
}
|
||||
if _, err := t.conn.Exec(query); err != nil {
|
||||
return fmt.Errorf("insert error: %v; sql=%s", err, query)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildTDengineInsertSQL(qualifiedTable string, row map[string]interface{}) (string, error) {
|
||||
if strings.TrimSpace(qualifiedTable) == "" {
|
||||
return "", fmt.Errorf("qualified table required")
|
||||
}
|
||||
if len(row) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
cols := make([]string, 0, len(row))
|
||||
for key := range row {
|
||||
if strings.TrimSpace(key) == "" {
|
||||
continue
|
||||
}
|
||||
cols = append(cols, key)
|
||||
}
|
||||
if len(cols) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
sort.Strings(cols)
|
||||
|
||||
quotedCols := make([]string, 0, len(cols))
|
||||
values := make([]string, 0, len(cols))
|
||||
for _, col := range cols {
|
||||
quotedCols = append(quotedCols, fmt.Sprintf("`%s`", escapeBacktickIdent(col)))
|
||||
values = append(values, tdengineLiteral(row[col]))
|
||||
}
|
||||
|
||||
return fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)", qualifiedTable, strings.Join(quotedCols, ", "), strings.Join(values, ", ")), nil
|
||||
}
|
||||
|
||||
func tdengineLiteral(value interface{}) string {
|
||||
switch val := value.(type) {
|
||||
case nil:
|
||||
return "NULL"
|
||||
case bool:
|
||||
if val {
|
||||
return "1"
|
||||
}
|
||||
return "0"
|
||||
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64:
|
||||
return fmt.Sprintf("%v", val)
|
||||
case time.Time:
|
||||
return fmt.Sprintf("'%s'", val.Format("2006-01-02 15:04:05"))
|
||||
case []byte:
|
||||
return fmt.Sprintf("'%s'", strings.ReplaceAll(string(val), "'", "''"))
|
||||
default:
|
||||
return fmt.Sprintf("'%s'", strings.ReplaceAll(fmt.Sprintf("%v", val), "'", "''"))
|
||||
}
|
||||
}
|
||||
|
||||
func getValueFromRow(row map[string]interface{}, keys ...string) (interface{}, bool) {
|
||||
if len(row) == 0 {
|
||||
return nil, false
|
||||
|
||||
@@ -1,22 +1,27 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"GoNavi-Wails/internal/db"
|
||||
"GoNavi-Wails/internal/logger"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type TableDiffSummary struct {
|
||||
Table string `json:"table"`
|
||||
PKColumn string `json:"pkColumn,omitempty"`
|
||||
CanSync bool `json:"canSync"`
|
||||
Inserts int `json:"inserts"`
|
||||
Updates int `json:"updates"`
|
||||
Deletes int `json:"deletes"`
|
||||
Same int `json:"same"`
|
||||
Message string `json:"message,omitempty"`
|
||||
HasSchema bool `json:"hasSchema,omitempty"`
|
||||
Table string `json:"table"`
|
||||
PKColumn string `json:"pkColumn,omitempty"`
|
||||
CanSync bool `json:"canSync"`
|
||||
Inserts int `json:"inserts"`
|
||||
Updates int `json:"updates"`
|
||||
Deletes int `json:"deletes"`
|
||||
Same int `json:"same"`
|
||||
Message string `json:"message,omitempty"`
|
||||
HasSchema bool `json:"hasSchema,omitempty"`
|
||||
TargetTableExists bool `json:"targetTableExists,omitempty"`
|
||||
PlannedAction string `json:"plannedAction,omitempty"`
|
||||
Warnings []string `json:"warnings,omitempty"`
|
||||
UnsupportedObjects []string `json:"unsupportedObjects,omitempty"`
|
||||
IndexesToCreate int `json:"indexesToCreate,omitempty"`
|
||||
IndexesSkipped int `json:"indexesSkipped,omitempty"`
|
||||
}
|
||||
|
||||
type SyncAnalyzeResult struct {
|
||||
@@ -27,6 +32,12 @@ type SyncAnalyzeResult struct {
|
||||
|
||||
func (s *SyncEngine) Analyze(config SyncConfig) SyncAnalyzeResult {
|
||||
result := SyncAnalyzeResult{Success: true, Tables: []TableDiffSummary{}}
|
||||
if isRedisToMongoKeyspacePair(config) {
|
||||
return s.analyzeRedisToMongo(config)
|
||||
}
|
||||
if isMongoToRedisKeyspacePair(config) {
|
||||
return s.analyzeMongoToRedis(config)
|
||||
}
|
||||
|
||||
contentRaw := strings.ToLower(strings.TrimSpace(config.Content))
|
||||
syncSchema := false
|
||||
@@ -48,25 +59,23 @@ func (s *SyncEngine) Analyze(config SyncConfig) SyncAnalyzeResult {
|
||||
totalTables := len(config.Tables)
|
||||
s.progress(config.JobID, 0, totalTables, "", "差异分析开始")
|
||||
|
||||
sourceDB, err := db.NewDatabase(config.SourceConfig.Type)
|
||||
sourceDB, err := newSyncDatabase(config.SourceConfig.Type)
|
||||
if err != nil {
|
||||
logger.Error(err, "初始化源数据库驱动失败:类型=%s", config.SourceConfig.Type)
|
||||
return SyncAnalyzeResult{Success: false, Message: "初始化源数据库驱动失败: " + err.Error()}
|
||||
}
|
||||
targetDB, err := db.NewDatabase(config.TargetConfig.Type)
|
||||
targetDB, err := newSyncDatabase(config.TargetConfig.Type)
|
||||
if err != nil {
|
||||
logger.Error(err, "初始化目标数据库驱动失败:类型=%s", config.TargetConfig.Type)
|
||||
return SyncAnalyzeResult{Success: false, Message: "初始化目标数据库驱动失败: " + err.Error()}
|
||||
}
|
||||
|
||||
// Connect Source
|
||||
if err := sourceDB.Connect(config.SourceConfig); err != nil {
|
||||
logger.Error(err, "源数据库连接失败:%s", formatConnSummaryForSync(config.SourceConfig))
|
||||
return SyncAnalyzeResult{Success: false, Message: "源数据库连接失败: " + err.Error()}
|
||||
}
|
||||
defer sourceDB.Close()
|
||||
|
||||
// Connect Target
|
||||
if err := targetDB.Connect(config.TargetConfig); err != nil {
|
||||
logger.Error(err, "目标数据库连接失败:%s", formatConnSummaryForSync(config.TargetConfig))
|
||||
return SyncAnalyzeResult{Success: false, Message: "目标数据库连接失败: " + err.Error()}
|
||||
@@ -88,51 +97,76 @@ func (s *SyncEngine) Analyze(config SyncConfig) SyncAnalyzeResult {
|
||||
HasSchema: syncSchema,
|
||||
}
|
||||
|
||||
sourceSchema, sourceTable := normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName)
|
||||
targetSchema, targetTable := normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName)
|
||||
sourceQueryTable := qualifiedNameForQuery(config.SourceConfig.Type, sourceSchema, sourceTable, tableName)
|
||||
targetQueryTable := qualifiedNameForQuery(config.TargetConfig.Type, targetSchema, targetTable, tableName)
|
||||
|
||||
cols, err := sourceDB.GetColumns(sourceSchema, sourceTable)
|
||||
plan, cols, _, err := buildSchemaMigrationPlan(config, tableName, sourceDB, targetDB)
|
||||
if err != nil {
|
||||
summary.Message = "获取源表字段失败: " + err.Error()
|
||||
summary.Message = err.Error()
|
||||
result.Tables = append(result.Tables, summary)
|
||||
return
|
||||
}
|
||||
summary.TargetTableExists = plan.TargetTableExists
|
||||
summary.PlannedAction = plan.PlannedAction
|
||||
summary.Warnings = append(summary.Warnings, plan.Warnings...)
|
||||
summary.UnsupportedObjects = append(summary.UnsupportedObjects, plan.UnsupportedObjects...)
|
||||
summary.IndexesToCreate = plan.IndexesToCreate
|
||||
summary.IndexesSkipped = plan.IndexesSkipped
|
||||
|
||||
if !plan.TargetTableExists && !plan.AutoCreate {
|
||||
summary.Message = firstNonEmpty(plan.PlannedAction, "目标表不存在,无法执行同步")
|
||||
result.Tables = append(result.Tables, summary)
|
||||
return
|
||||
}
|
||||
|
||||
if !syncData {
|
||||
summary.CanSync = true
|
||||
summary.Message = "仅同步结构,未执行数据差异分析"
|
||||
summary.Message = firstNonEmpty(plan.PlannedAction, "仅同步结构,未执行数据差异分析")
|
||||
result.Tables = append(result.Tables, summary)
|
||||
return
|
||||
}
|
||||
|
||||
tableMode := normalizeSyncMode(config.Mode)
|
||||
pkCols := make([]string, 0, 2)
|
||||
for _, c := range cols {
|
||||
if c.Key == "PRI" || c.Key == "PK" {
|
||||
pkCols = append(pkCols, c.Name)
|
||||
}
|
||||
}
|
||||
if len(pkCols) == 0 {
|
||||
summary.Message = "无主键,不支持数据对比/同步"
|
||||
result.Tables = append(result.Tables, summary)
|
||||
return
|
||||
}
|
||||
if len(pkCols) > 1 {
|
||||
summary.Message = fmt.Sprintf("复合主键(%s),暂不支持数据对比/同步", strings.Join(pkCols, ","))
|
||||
result.Tables = append(result.Tables, summary)
|
||||
return
|
||||
}
|
||||
summary.PKColumn = pkCols[0]
|
||||
|
||||
// Query data for diff
|
||||
sourceRows, _, err := sourceDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.SourceConfig.Type, sourceQueryTable)))
|
||||
sourceRows, _, err := sourceDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.SourceConfig.Type, plan.SourceQueryTable)))
|
||||
if err != nil {
|
||||
summary.Message = "读取源表失败: " + err.Error()
|
||||
result.Tables = append(result.Tables, summary)
|
||||
return
|
||||
}
|
||||
targetRows, _, err := targetDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.TargetConfig.Type, targetQueryTable)))
|
||||
|
||||
if !plan.TargetTableExists && plan.AutoCreate {
|
||||
summary.CanSync = true
|
||||
summary.Inserts = len(sourceRows)
|
||||
summary.Message = firstNonEmpty(plan.PlannedAction, "目标表不存在,执行时将自动建表并导入全部源数据")
|
||||
result.Tables = append(result.Tables, summary)
|
||||
return
|
||||
}
|
||||
|
||||
if tableMode != "insert_update" {
|
||||
summary.CanSync = true
|
||||
summary.Inserts = len(sourceRows)
|
||||
summary.Message = firstNonEmpty(plan.PlannedAction, "当前模式无需差异对比,将按源表数据执行导入")
|
||||
result.Tables = append(result.Tables, summary)
|
||||
return
|
||||
}
|
||||
|
||||
if len(pkCols) == 0 {
|
||||
summary.Message = "无主键,不支持差异对比同步;如需直接导入请使用仅插入或全量覆盖模式"
|
||||
result.Tables = append(result.Tables, summary)
|
||||
return
|
||||
}
|
||||
if len(pkCols) > 1 {
|
||||
summary.Message = fmt.Sprintf("复合主键(%s),暂不支持差异对比同步", strings.Join(pkCols, ","))
|
||||
result.Tables = append(result.Tables, summary)
|
||||
return
|
||||
}
|
||||
summary.PKColumn = pkCols[0]
|
||||
|
||||
targetRows, _, err := targetDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.TargetConfig.Type, plan.TargetQueryTable)))
|
||||
if err != nil {
|
||||
summary.Message = "读取目标表失败: " + err.Error()
|
||||
result.Tables = append(result.Tables, summary)
|
||||
@@ -188,6 +222,9 @@ func (s *SyncEngine) Analyze(config SyncConfig) SyncAnalyzeResult {
|
||||
}
|
||||
|
||||
summary.CanSync = true
|
||||
if strings.TrimSpace(summary.Message) == "" {
|
||||
summary.Message = firstNonEmpty(plan.PlannedAction, "差异分析完成")
|
||||
}
|
||||
result.Tables = append(result.Tables, summary)
|
||||
}()
|
||||
}
|
||||
@@ -196,3 +233,12 @@ func (s *SyncEngine) Analyze(config SyncConfig) SyncAnalyzeResult {
|
||||
result.Message = fmt.Sprintf("已完成 %d 张表的差异分析", len(result.Tables))
|
||||
return result
|
||||
}
|
||||
|
||||
func firstNonEmpty(values ...string) string {
|
||||
for _, value := range values {
|
||||
if strings.TrimSpace(value) != "" {
|
||||
return value
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
741
internal/sync/migration_clickhouse.go
Normal file
741
internal/sync/migration_clickhouse.go
Normal file
@@ -0,0 +1,741 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"GoNavi-Wails/internal/connection"
|
||||
"GoNavi-Wails/internal/db"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func buildMySQLToClickHousePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
plan := SchemaMigrationPlan{}
|
||||
plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName)
|
||||
plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName)
|
||||
plan.SourceQueryTable = qualifiedNameForQuery(config.SourceConfig.Type, plan.SourceSchema, plan.SourceTable, tableName)
|
||||
plan.TargetQueryTable = qualifiedNameForQuery(config.TargetConfig.Type, plan.TargetSchema, plan.TargetTable, tableName)
|
||||
plan.PlannedAction = "使用已有目标表导入"
|
||||
|
||||
sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable)
|
||||
if err != nil {
|
||||
return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err)
|
||||
}
|
||||
if !sourceExists {
|
||||
return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName)
|
||||
}
|
||||
|
||||
targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable)
|
||||
if err != nil {
|
||||
return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err)
|
||||
}
|
||||
plan.TargetTableExists = targetExists
|
||||
|
||||
strategy := normalizeTargetTableStrategy(config.TargetTableStrategy)
|
||||
if targetExists {
|
||||
missing := diffMissingColumnNames(sourceCols, targetCols)
|
||||
if len(missing) > 0 {
|
||||
plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", ")))
|
||||
}
|
||||
if config.AutoAddColumns {
|
||||
addSQL, addWarnings := buildMySQLToClickHouseAddColumnSQL(plan.TargetQueryTable, sourceCols, targetCols)
|
||||
plan.PreDataSQL = append(plan.PreDataSQL, addSQL...)
|
||||
plan.Warnings = append(plan.Warnings, addWarnings...)
|
||||
if len(addSQL) > 0 {
|
||||
plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL))
|
||||
}
|
||||
}
|
||||
plan.Warnings = append(plan.Warnings, "ClickHouse 目标端建议优先使用仅插入或全量覆盖;更新/删除语义与传统关系型存在差异")
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
}
|
||||
|
||||
switch strategy {
|
||||
case "existing_only":
|
||||
plan.PlannedAction = "目标表不存在,需先手工创建"
|
||||
plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表")
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
case "smart", "auto_create_if_missing":
|
||||
plan.AutoCreate = true
|
||||
plan.PlannedAction = "目标表不存在,将自动建表后导入"
|
||||
createSQL, warnings, unsupported := buildMySQLToClickHouseCreateTableSQL(plan.TargetQueryTable, sourceCols)
|
||||
plan.CreateTableSQL = createSQL
|
||||
plan.Warnings = append(plan.Warnings, warnings...)
|
||||
plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...)
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
default:
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
}
|
||||
}
|
||||
|
||||
func buildPGLikeToClickHousePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
plan := SchemaMigrationPlan{}
|
||||
sourceType := resolveMigrationDBType(config.SourceConfig)
|
||||
targetType := resolveMigrationDBType(config.TargetConfig)
|
||||
plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName)
|
||||
plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName)
|
||||
plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName)
|
||||
plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName)
|
||||
plan.PlannedAction = "使用已有目标表导入"
|
||||
|
||||
sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable)
|
||||
if err != nil {
|
||||
return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err)
|
||||
}
|
||||
if !sourceExists {
|
||||
return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName)
|
||||
}
|
||||
|
||||
targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable)
|
||||
if err != nil {
|
||||
return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err)
|
||||
}
|
||||
plan.TargetTableExists = targetExists
|
||||
|
||||
strategy := normalizeTargetTableStrategy(config.TargetTableStrategy)
|
||||
if targetExists {
|
||||
missing := diffMissingColumnNames(sourceCols, targetCols)
|
||||
if len(missing) > 0 {
|
||||
plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", ")))
|
||||
}
|
||||
if config.AutoAddColumns {
|
||||
addSQL, addWarnings := buildPGLikeToClickHouseAddColumnSQL(plan.TargetQueryTable, sourceCols, targetCols)
|
||||
plan.PreDataSQL = append(plan.PreDataSQL, addSQL...)
|
||||
plan.Warnings = append(plan.Warnings, addWarnings...)
|
||||
if len(addSQL) > 0 {
|
||||
plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL))
|
||||
}
|
||||
}
|
||||
plan.Warnings = append(plan.Warnings, "ClickHouse 目标端建议优先使用仅插入或全量覆盖;更新/删除语义与传统关系型存在差异")
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
}
|
||||
|
||||
switch strategy {
|
||||
case "existing_only":
|
||||
plan.PlannedAction = "目标表不存在,需先手工创建"
|
||||
plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表")
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
case "smart", "auto_create_if_missing":
|
||||
plan.AutoCreate = true
|
||||
plan.PlannedAction = "目标表不存在,将自动建表后导入"
|
||||
createSQL, warnings, unsupported := buildPGLikeToClickHouseCreateTableSQL(plan.TargetQueryTable, sourceCols)
|
||||
plan.CreateTableSQL = createSQL
|
||||
plan.Warnings = append(plan.Warnings, warnings...)
|
||||
plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...)
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
default:
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
}
|
||||
}
|
||||
|
||||
func buildClickHouseToMySQLPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
plan := SchemaMigrationPlan{}
|
||||
plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName)
|
||||
plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName)
|
||||
plan.SourceQueryTable = qualifiedNameForQuery(config.SourceConfig.Type, plan.SourceSchema, plan.SourceTable, tableName)
|
||||
plan.TargetQueryTable = qualifiedNameForQuery(config.TargetConfig.Type, plan.TargetSchema, plan.TargetTable, tableName)
|
||||
plan.PlannedAction = "使用已有目标表导入"
|
||||
|
||||
sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable)
|
||||
if err != nil {
|
||||
return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err)
|
||||
}
|
||||
if !sourceExists {
|
||||
return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName)
|
||||
}
|
||||
|
||||
targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable)
|
||||
if err != nil {
|
||||
return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err)
|
||||
}
|
||||
plan.TargetTableExists = targetExists
|
||||
|
||||
strategy := normalizeTargetTableStrategy(config.TargetTableStrategy)
|
||||
if targetExists {
|
||||
missing := diffMissingColumnNames(sourceCols, targetCols)
|
||||
if len(missing) > 0 {
|
||||
plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", ")))
|
||||
}
|
||||
if config.AutoAddColumns {
|
||||
addSQL, addWarnings := buildClickHouseToMySQLAddColumnSQL(plan.TargetQueryTable, sourceCols, targetCols)
|
||||
plan.PreDataSQL = append(plan.PreDataSQL, addSQL...)
|
||||
plan.Warnings = append(plan.Warnings, addWarnings...)
|
||||
if len(addSQL) > 0 {
|
||||
plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL))
|
||||
}
|
||||
}
|
||||
plan.Warnings = append(plan.Warnings, "ClickHouse 源端索引/约束元数据有限,反向迁移将以字段和数据为主")
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
}
|
||||
|
||||
switch strategy {
|
||||
case "existing_only":
|
||||
plan.PlannedAction = "目标表不存在,需先手工创建"
|
||||
plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表")
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
case "smart", "auto_create_if_missing":
|
||||
plan.AutoCreate = true
|
||||
plan.PlannedAction = "目标表不存在,将自动建表后导入"
|
||||
createSQL, warnings := buildClickHouseToMySQLCreateTableSQL(plan.TargetQueryTable, sourceCols)
|
||||
plan.CreateTableSQL = createSQL
|
||||
plan.Warnings = append(plan.Warnings, warnings...)
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
default:
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
}
|
||||
}
|
||||
|
||||
func buildClickHouseToPGLikePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
plan := SchemaMigrationPlan{}
|
||||
sourceType := resolveMigrationDBType(config.SourceConfig)
|
||||
targetType := resolveMigrationDBType(config.TargetConfig)
|
||||
plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName)
|
||||
plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName)
|
||||
plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName)
|
||||
plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName)
|
||||
plan.PlannedAction = "使用已有目标表导入"
|
||||
|
||||
sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable)
|
||||
if err != nil {
|
||||
return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err)
|
||||
}
|
||||
if !sourceExists {
|
||||
return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName)
|
||||
}
|
||||
|
||||
targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable)
|
||||
if err != nil {
|
||||
return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err)
|
||||
}
|
||||
plan.TargetTableExists = targetExists
|
||||
|
||||
strategy := normalizeTargetTableStrategy(config.TargetTableStrategy)
|
||||
if targetExists {
|
||||
missing := diffMissingColumnNames(sourceCols, targetCols)
|
||||
if len(missing) > 0 {
|
||||
plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", ")))
|
||||
}
|
||||
if config.AutoAddColumns {
|
||||
addSQL, addWarnings := buildClickHouseToPGLikeAddColumnSQL(targetType, plan.TargetQueryTable, sourceCols, targetCols)
|
||||
plan.PreDataSQL = append(plan.PreDataSQL, addSQL...)
|
||||
plan.Warnings = append(plan.Warnings, addWarnings...)
|
||||
if len(addSQL) > 0 {
|
||||
plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL))
|
||||
}
|
||||
}
|
||||
plan.Warnings = append(plan.Warnings, "ClickHouse 源端索引/约束元数据有限,反向迁移将以字段和数据为主")
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
}
|
||||
|
||||
switch strategy {
|
||||
case "existing_only":
|
||||
plan.PlannedAction = "目标表不存在,需先手工创建"
|
||||
plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表")
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
case "smart", "auto_create_if_missing":
|
||||
plan.AutoCreate = true
|
||||
plan.PlannedAction = "目标表不存在,将自动建表后导入"
|
||||
createSQL, warnings, unsupported := buildClickHouseToPGLikeCreateTableSQL(targetType, plan.TargetQueryTable, sourceCols)
|
||||
plan.CreateTableSQL = createSQL
|
||||
plan.Warnings = append(plan.Warnings, warnings...)
|
||||
plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...)
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
default:
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
}
|
||||
}
|
||||
|
||||
func buildPGLikeToClickHouseAddColumnSQL(targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) {
|
||||
targetSet := make(map[string]struct{}, len(targetCols))
|
||||
for _, col := range targetCols {
|
||||
key := strings.ToLower(strings.TrimSpace(col.Name))
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
targetSet[key] = struct{}{}
|
||||
}
|
||||
var sqlList []string
|
||||
var warnings []string
|
||||
for _, col := range sourceCols {
|
||||
key := strings.ToLower(strings.TrimSpace(col.Name))
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := targetSet[key]; ok {
|
||||
continue
|
||||
}
|
||||
colType, mapWarnings := mapPGLikeColumnToClickHouse(col)
|
||||
warnings = append(warnings, mapWarnings...)
|
||||
sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s",
|
||||
quoteQualifiedIdentByType("clickhouse", targetQueryTable),
|
||||
quoteIdentByType("clickhouse", col.Name),
|
||||
colType,
|
||||
))
|
||||
}
|
||||
return sqlList, dedupeStrings(warnings)
|
||||
}
|
||||
|
||||
func buildMySQLToClickHouseAddColumnSQL(targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) {
|
||||
targetSet := make(map[string]struct{}, len(targetCols))
|
||||
for _, col := range targetCols {
|
||||
key := strings.ToLower(strings.TrimSpace(col.Name))
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
targetSet[key] = struct{}{}
|
||||
}
|
||||
var sqlList []string
|
||||
var warnings []string
|
||||
for _, col := range sourceCols {
|
||||
key := strings.ToLower(strings.TrimSpace(col.Name))
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := targetSet[key]; ok {
|
||||
continue
|
||||
}
|
||||
colType, mapWarnings := mapMySQLColumnToClickHouse(col)
|
||||
warnings = append(warnings, mapWarnings...)
|
||||
sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s",
|
||||
quoteQualifiedIdentByType("clickhouse", targetQueryTable),
|
||||
quoteIdentByType("clickhouse", col.Name),
|
||||
colType,
|
||||
))
|
||||
}
|
||||
return sqlList, dedupeStrings(warnings)
|
||||
}
|
||||
|
||||
func buildClickHouseToPGLikeAddColumnSQL(targetType string, targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) {
|
||||
targetSet := make(map[string]struct{}, len(targetCols))
|
||||
for _, col := range targetCols {
|
||||
key := strings.ToLower(strings.TrimSpace(col.Name))
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
targetSet[key] = struct{}{}
|
||||
}
|
||||
var sqlList []string
|
||||
var warnings []string
|
||||
for _, col := range sourceCols {
|
||||
key := strings.ToLower(strings.TrimSpace(col.Name))
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := targetSet[key]; ok {
|
||||
continue
|
||||
}
|
||||
colType, mapWarnings := mapClickHouseColumnToPGLike(col)
|
||||
warnings = append(warnings, mapWarnings...)
|
||||
sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL",
|
||||
quoteQualifiedIdentByType(targetType, targetQueryTable),
|
||||
quoteIdentByType(targetType, col.Name),
|
||||
colType,
|
||||
))
|
||||
}
|
||||
return sqlList, dedupeStrings(warnings)
|
||||
}
|
||||
|
||||
func buildClickHouseToMySQLAddColumnSQL(targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) {
|
||||
targetSet := make(map[string]struct{}, len(targetCols))
|
||||
for _, col := range targetCols {
|
||||
key := strings.ToLower(strings.TrimSpace(col.Name))
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
targetSet[key] = struct{}{}
|
||||
}
|
||||
var sqlList []string
|
||||
var warnings []string
|
||||
for _, col := range sourceCols {
|
||||
key := strings.ToLower(strings.TrimSpace(col.Name))
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := targetSet[key]; ok {
|
||||
continue
|
||||
}
|
||||
colType, mapWarnings := mapClickHouseColumnToMySQL(col)
|
||||
warnings = append(warnings, mapWarnings...)
|
||||
sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL",
|
||||
quoteQualifiedIdentByType("mysql", targetQueryTable),
|
||||
quoteIdentByType("mysql", col.Name),
|
||||
colType,
|
||||
))
|
||||
}
|
||||
return sqlList, dedupeStrings(warnings)
|
||||
}
|
||||
|
||||
func buildPGLikeToClickHouseCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition) (string, []string, []string) {
|
||||
columnDefs := make([]string, 0, len(sourceCols))
|
||||
warnings := make([]string, 0)
|
||||
unsupported := make([]string, 0)
|
||||
orderByCols := make([]string, 0)
|
||||
for _, col := range sourceCols {
|
||||
def, colWarnings := buildPGLikeToClickHouseColumnDefinition(col)
|
||||
warnings = append(warnings, colWarnings...)
|
||||
columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("clickhouse", col.Name), def))
|
||||
if col.Key == "PRI" || col.Key == "PK" {
|
||||
orderByCols = append(orderByCols, quoteIdentByType("clickhouse", col.Name))
|
||||
}
|
||||
}
|
||||
orderExpr := "tuple()"
|
||||
if len(orderByCols) > 0 {
|
||||
orderExpr = "(" + strings.Join(orderByCols, ", ") + ")"
|
||||
} else {
|
||||
warnings = append(warnings, "源表未识别到主键,ClickHouse 将使用 ORDER BY tuple() 建表,后续查询性能可能受影响")
|
||||
}
|
||||
warnings = append(warnings, "ClickHouse 不保留关系型外键/唯一约束语义,将仅迁移字段与数据")
|
||||
createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n) ENGINE = MergeTree() ORDER BY %s", quoteQualifiedIdentByType("clickhouse", targetQueryTable), strings.Join(columnDefs, ",\n "), orderExpr)
|
||||
return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported)
|
||||
}
|
||||
|
||||
func buildMySQLToClickHouseCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition) (string, []string, []string) {
|
||||
columnDefs := make([]string, 0, len(sourceCols))
|
||||
warnings := make([]string, 0)
|
||||
unsupported := make([]string, 0)
|
||||
orderByCols := make([]string, 0)
|
||||
for _, col := range sourceCols {
|
||||
def, colWarnings := buildMySQLToClickHouseColumnDefinition(col)
|
||||
warnings = append(warnings, colWarnings...)
|
||||
columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("clickhouse", col.Name), def))
|
||||
if col.Key == "PRI" || col.Key == "PK" {
|
||||
orderByCols = append(orderByCols, quoteIdentByType("clickhouse", col.Name))
|
||||
}
|
||||
}
|
||||
orderExpr := "tuple()"
|
||||
if len(orderByCols) > 0 {
|
||||
orderExpr = "(" + strings.Join(orderByCols, ", ") + ")"
|
||||
} else {
|
||||
warnings = append(warnings, "源表未识别到主键,ClickHouse 将使用 ORDER BY tuple() 建表,后续查询性能可能受影响")
|
||||
}
|
||||
warnings = append(warnings, "ClickHouse 不保留关系型外键/唯一约束语义,将仅迁移字段与数据")
|
||||
createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n) ENGINE = MergeTree() ORDER BY %s", quoteQualifiedIdentByType("clickhouse", targetQueryTable), strings.Join(columnDefs, ",\n "), orderExpr)
|
||||
return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported)
|
||||
}
|
||||
|
||||
func buildClickHouseToPGLikeCreateTableSQL(targetType string, targetQueryTable string, sourceCols []connection.ColumnDefinition) (string, []string, []string) {
|
||||
columnDefs := make([]string, 0, len(sourceCols)+1)
|
||||
warnings := make([]string, 0)
|
||||
unsupported := []string{"ClickHouse ORDER BY/PARTITION/TTL/Projection/物化视图 语义当前不会自动迁移到 PG-like"}
|
||||
pkCols := make([]string, 0)
|
||||
for _, col := range sourceCols {
|
||||
def, colWarnings := buildClickHouseToPGLikeColumnDefinition(col)
|
||||
warnings = append(warnings, colWarnings...)
|
||||
columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType(targetType, col.Name), def))
|
||||
if col.Key == "PRI" || col.Key == "PK" {
|
||||
pkCols = append(pkCols, quoteIdentByType(targetType, col.Name))
|
||||
}
|
||||
}
|
||||
if len(pkCols) > 0 {
|
||||
columnDefs = append(columnDefs, fmt.Sprintf("PRIMARY KEY (%s)", strings.Join(pkCols, ", ")))
|
||||
} else {
|
||||
warnings = append(warnings, "ClickHouse 源端未返回主键信息,目标 PG-like 表将不自动创建主键")
|
||||
}
|
||||
createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType(targetType, targetQueryTable), strings.Join(columnDefs, ",\n "))
|
||||
return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported)
|
||||
}
|
||||
|
||||
func buildClickHouseToMySQLCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition) (string, []string) {
|
||||
columnDefs := make([]string, 0, len(sourceCols)+1)
|
||||
warnings := make([]string, 0)
|
||||
pkCols := make([]string, 0)
|
||||
for _, col := range sourceCols {
|
||||
def, colWarnings := buildClickHouseToMySQLColumnDefinition(col)
|
||||
warnings = append(warnings, colWarnings...)
|
||||
columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("mysql", col.Name), def))
|
||||
if col.Key == "PRI" || col.Key == "PK" {
|
||||
pkCols = append(pkCols, quoteIdentByType("mysql", col.Name))
|
||||
}
|
||||
}
|
||||
if len(pkCols) > 0 {
|
||||
columnDefs = append(columnDefs, fmt.Sprintf("PRIMARY KEY (%s)", strings.Join(pkCols, ", ")))
|
||||
} else {
|
||||
warnings = append(warnings, "ClickHouse 源端未返回主键信息,目标 MySQL 表将不自动创建主键")
|
||||
}
|
||||
createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("mysql", targetQueryTable), strings.Join(columnDefs, ",\n "))
|
||||
return createSQL, dedupeStrings(warnings)
|
||||
}
|
||||
|
||||
func buildPGLikeToClickHouseColumnDefinition(col connection.ColumnDefinition) (string, []string) {
|
||||
targetType, warnings := mapPGLikeColumnToClickHouse(col)
|
||||
parts := []string{targetType}
|
||||
return strings.Join(parts, " "), dedupeStrings(warnings)
|
||||
}
|
||||
|
||||
func buildMySQLToClickHouseColumnDefinition(col connection.ColumnDefinition) (string, []string) {
|
||||
targetType, warnings := mapMySQLColumnToClickHouse(col)
|
||||
parts := []string{targetType}
|
||||
if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") && !strings.HasPrefix(strings.ToLower(targetType), "nullable(") {
|
||||
return strings.Join(parts, " "), dedupeStrings(warnings)
|
||||
}
|
||||
return strings.Join(parts, " "), dedupeStrings(warnings)
|
||||
}
|
||||
|
||||
func buildClickHouseToPGLikeColumnDefinition(col connection.ColumnDefinition) (string, []string) {
|
||||
targetType, warnings := mapClickHouseColumnToPGLike(col)
|
||||
parts := []string{targetType}
|
||||
if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") {
|
||||
parts = append(parts, "NOT NULL")
|
||||
}
|
||||
return strings.Join(parts, " "), dedupeStrings(warnings)
|
||||
}
|
||||
|
||||
func buildClickHouseToMySQLColumnDefinition(col connection.ColumnDefinition) (string, []string) {
|
||||
targetType, warnings := mapClickHouseColumnToMySQL(col)
|
||||
parts := []string{targetType}
|
||||
if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") {
|
||||
parts = append(parts, "NOT NULL")
|
||||
}
|
||||
return strings.Join(parts, " "), dedupeStrings(warnings)
|
||||
}
|
||||
|
||||
func mapPGLikeColumnToClickHouse(col connection.ColumnDefinition) (string, []string) {
|
||||
raw := strings.ToLower(strings.TrimSpace(col.Type))
|
||||
warnings := make([]string, 0)
|
||||
if raw == "" {
|
||||
return "String", []string{fmt.Sprintf("字段 %s 类型为空,已降级为 String", col.Name)}
|
||||
}
|
||||
baseType := "String"
|
||||
switch {
|
||||
case raw == "boolean" || strings.HasPrefix(raw, "bool"):
|
||||
baseType = "UInt8"
|
||||
case raw == "smallint":
|
||||
baseType = "Int16"
|
||||
case raw == "integer" || raw == "int4":
|
||||
baseType = "Int32"
|
||||
case raw == "bigint" || raw == "int8":
|
||||
baseType = "Int64"
|
||||
case strings.HasPrefix(raw, "numeric"), strings.HasPrefix(raw, "decimal"):
|
||||
baseType = replaceTypeBase(raw, []string{"numeric", "decimal"}, "Decimal")
|
||||
case raw == "real" || raw == "float4":
|
||||
baseType = "Float32"
|
||||
case raw == "double precision" || raw == "float8":
|
||||
baseType = "Float64"
|
||||
case raw == "date":
|
||||
baseType = "Date"
|
||||
case strings.HasPrefix(raw, "timestamp") || strings.Contains(raw, "without time zone") || strings.Contains(raw, "with time zone"):
|
||||
baseType = "DateTime"
|
||||
case strings.HasPrefix(raw, "time"):
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 String", col.Name, col.Type))
|
||||
baseType = "String"
|
||||
case strings.HasPrefix(raw, "character varying"), strings.HasPrefix(raw, "varchar("), strings.HasPrefix(raw, "character("), strings.HasPrefix(raw, "char("), raw == "character", raw == "text", raw == "uuid":
|
||||
baseType = "String"
|
||||
case raw == "json" || raw == "jsonb" || raw == "bytea":
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 String", col.Name, col.Type))
|
||||
baseType = "String"
|
||||
case strings.HasSuffix(raw, "[]") || strings.HasPrefix(raw, "array"):
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 String", col.Name, col.Type))
|
||||
baseType = "String"
|
||||
case raw == "user-defined":
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 为用户自定义类型,已降级为 String", col.Name))
|
||||
baseType = "String"
|
||||
default:
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门映射,已降级为 String", col.Name, col.Type))
|
||||
baseType = "String"
|
||||
}
|
||||
if strings.EqualFold(strings.TrimSpace(col.Nullable), "YES") && !strings.HasPrefix(strings.ToLower(baseType), "nullable(") {
|
||||
baseType = fmt.Sprintf("Nullable(%s)", baseType)
|
||||
}
|
||||
if strings.Contains(strings.ToLower(strings.TrimSpace(col.Extra)), "identity") || strings.Contains(strings.ToLower(strings.TrimSpace(col.Extra)), "auto_increment") {
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 的 identity/自增语义在 ClickHouse 中不保留", col.Name))
|
||||
}
|
||||
return baseType, dedupeStrings(warnings)
|
||||
}
|
||||
|
||||
func mapMySQLColumnToClickHouse(col connection.ColumnDefinition) (string, []string) {
|
||||
raw := strings.ToLower(strings.TrimSpace(col.Type))
|
||||
warnings := make([]string, 0)
|
||||
if raw == "" {
|
||||
return "String", []string{fmt.Sprintf("字段 %s 类型为空,已降级为 String", col.Name)}
|
||||
}
|
||||
unsigned := strings.Contains(raw, "unsigned")
|
||||
clean := strings.ReplaceAll(raw, " unsigned", "")
|
||||
clean = strings.ReplaceAll(clean, " zerofill", "")
|
||||
baseType := "String"
|
||||
switch {
|
||||
case strings.HasPrefix(clean, "tinyint(1)"):
|
||||
baseType = "UInt8"
|
||||
case strings.HasPrefix(clean, "tinyint"):
|
||||
if unsigned {
|
||||
baseType = "UInt8"
|
||||
} else {
|
||||
baseType = "Int8"
|
||||
}
|
||||
case strings.HasPrefix(clean, "smallint"):
|
||||
if unsigned {
|
||||
baseType = "UInt16"
|
||||
} else {
|
||||
baseType = "Int16"
|
||||
}
|
||||
case strings.HasPrefix(clean, "mediumint"), strings.HasPrefix(clean, "int"), strings.HasPrefix(clean, "integer"):
|
||||
if unsigned {
|
||||
baseType = "UInt32"
|
||||
} else {
|
||||
baseType = "Int32"
|
||||
}
|
||||
case strings.HasPrefix(clean, "bigint"):
|
||||
if unsigned {
|
||||
baseType = "UInt64"
|
||||
} else {
|
||||
baseType = "Int64"
|
||||
}
|
||||
case strings.HasPrefix(clean, "decimal"), strings.HasPrefix(clean, "numeric"):
|
||||
baseType = replaceTypeBase(strings.Title(clean), []string{"Decimal", "Numeric"}, "Decimal")
|
||||
case strings.HasPrefix(clean, "float"):
|
||||
baseType = "Float32"
|
||||
case strings.HasPrefix(clean, "double"):
|
||||
baseType = "Float64"
|
||||
case strings.HasPrefix(clean, "date"):
|
||||
baseType = "Date"
|
||||
case strings.HasPrefix(clean, "datetime"), strings.HasPrefix(clean, "timestamp"):
|
||||
baseType = "DateTime"
|
||||
case strings.HasPrefix(clean, "time"):
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 time 已降级为 String", col.Name))
|
||||
baseType = "String"
|
||||
case strings.HasPrefix(clean, "json"), strings.HasPrefix(clean, "enum"), strings.HasPrefix(clean, "set"), strings.HasPrefix(clean, "char"), strings.HasPrefix(clean, "varchar"), strings.Contains(clean, "text"):
|
||||
baseType = "String"
|
||||
case strings.Contains(clean, "blob"), strings.Contains(clean, "binary"):
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 二进制类型已降级为 String", col.Name))
|
||||
baseType = "String"
|
||||
default:
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门映射,已降级为 String", col.Name, col.Type))
|
||||
baseType = "String"
|
||||
}
|
||||
if strings.EqualFold(strings.TrimSpace(col.Nullable), "YES") && !strings.HasPrefix(strings.ToLower(baseType), "nullable(") {
|
||||
baseType = fmt.Sprintf("Nullable(%s)", baseType)
|
||||
}
|
||||
if strings.Contains(strings.ToLower(strings.TrimSpace(col.Extra)), "auto_increment") {
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 的 AUTO_INCREMENT 在 ClickHouse 中不保留自增语义", col.Name))
|
||||
}
|
||||
return baseType, dedupeStrings(warnings)
|
||||
}
|
||||
|
||||
var clickHouseDecimalPattern = regexp.MustCompile(`^(decimal|numeric)\((\d+)\s*,\s*(\d+)\)$`)
|
||||
var clickHouseStringArgsPattern = regexp.MustCompile(`^fixedstring\((\d+)\)$`)
|
||||
|
||||
func mapClickHouseColumnToPGLike(col connection.ColumnDefinition) (string, []string) {
|
||||
raw := strings.TrimSpace(col.Type)
|
||||
lower := strings.ToLower(raw)
|
||||
warnings := make([]string, 0)
|
||||
if strings.HasPrefix(lower, "nullable(") && strings.HasSuffix(lower, ")") {
|
||||
raw = strings.TrimSpace(raw[len("Nullable(") : len(raw)-1])
|
||||
lower = strings.ToLower(raw)
|
||||
}
|
||||
for {
|
||||
if strings.HasPrefix(lower, "lowcardinality(") && strings.HasSuffix(lower, ")") {
|
||||
raw = strings.TrimSpace(raw[len("LowCardinality(") : len(raw)-1])
|
||||
lower = strings.ToLower(raw)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
switch {
|
||||
case lower == "bool" || lower == "boolean":
|
||||
return "boolean", warnings
|
||||
case lower == "int8":
|
||||
return "smallint", warnings
|
||||
case lower == "uint8":
|
||||
return "smallint", warnings
|
||||
case lower == "int16":
|
||||
return "smallint", warnings
|
||||
case lower == "uint16":
|
||||
return "integer", warnings
|
||||
case lower == "int32":
|
||||
return "integer", warnings
|
||||
case lower == "uint32":
|
||||
return "bigint", warnings
|
||||
case lower == "int64":
|
||||
return "bigint", warnings
|
||||
case lower == "uint64":
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已映射为 numeric(20,0) 以避免无符号溢出", col.Name, col.Type))
|
||||
return "numeric(20,0)", warnings
|
||||
case lower == "float32":
|
||||
return "real", warnings
|
||||
case lower == "float64":
|
||||
return "double precision", warnings
|
||||
case lower == "date":
|
||||
return "date", warnings
|
||||
case strings.HasPrefix(lower, "datetime"):
|
||||
return "timestamp", warnings
|
||||
case lower == "string":
|
||||
return "text", warnings
|
||||
case lower == "uuid":
|
||||
return "uuid", warnings
|
||||
case lower == "json", strings.HasPrefix(lower, "map("), strings.HasPrefix(lower, "array("), strings.HasPrefix(lower, "tuple("), strings.HasPrefix(lower, "nested("):
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 jsonb", col.Name, col.Type))
|
||||
return "jsonb", warnings
|
||||
case strings.HasPrefix(lower, "enum8("), strings.HasPrefix(lower, "enum16("):
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 枚举类型 %s 已降级为 varchar(255)", col.Name, col.Type))
|
||||
return "varchar(255)", warnings
|
||||
case clickHouseDecimalPattern.MatchString(lower):
|
||||
parts := clickHouseDecimalPattern.FindStringSubmatch(lower)
|
||||
return fmt.Sprintf("numeric(%s,%s)", parts[2], parts[3]), warnings
|
||||
case clickHouseStringArgsPattern.MatchString(lower):
|
||||
parts := clickHouseStringArgsPattern.FindStringSubmatch(lower)
|
||||
return fmt.Sprintf("varchar(%s)", parts[1]), warnings
|
||||
default:
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门 PG-like 映射,已降级为 text", col.Name, col.Type))
|
||||
return "text", warnings
|
||||
}
|
||||
}
|
||||
|
||||
func mapClickHouseColumnToMySQL(col connection.ColumnDefinition) (string, []string) {
|
||||
raw := strings.TrimSpace(col.Type)
|
||||
lower := strings.ToLower(raw)
|
||||
warnings := make([]string, 0)
|
||||
nullable := false
|
||||
if strings.HasPrefix(lower, "nullable(") && strings.HasSuffix(lower, ")") {
|
||||
nullable = true
|
||||
raw = strings.TrimSpace(raw[len("Nullable(") : len(raw)-1])
|
||||
lower = strings.ToLower(raw)
|
||||
}
|
||||
for {
|
||||
if strings.HasPrefix(lower, "lowcardinality(") && strings.HasSuffix(lower, ")") {
|
||||
raw = strings.TrimSpace(raw[len("LowCardinality(") : len(raw)-1])
|
||||
lower = strings.ToLower(raw)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
_ = nullable
|
||||
switch {
|
||||
case lower == "bool" || lower == "boolean" || lower == "uint8":
|
||||
return "tinyint(1)", warnings
|
||||
case lower == "int8":
|
||||
return "tinyint", warnings
|
||||
case lower == "uint16":
|
||||
return "smallint unsigned", warnings
|
||||
case lower == "int16":
|
||||
return "smallint", warnings
|
||||
case lower == "uint32":
|
||||
return "int unsigned", warnings
|
||||
case lower == "int32":
|
||||
return "int", warnings
|
||||
case lower == "uint64":
|
||||
return "bigint unsigned", warnings
|
||||
case lower == "int64":
|
||||
return "bigint", warnings
|
||||
case lower == "float32":
|
||||
return "float", warnings
|
||||
case lower == "float64":
|
||||
return "double", warnings
|
||||
case lower == "date":
|
||||
return "date", warnings
|
||||
case strings.HasPrefix(lower, "datetime"):
|
||||
return "datetime", warnings
|
||||
case lower == "string":
|
||||
return "text", warnings
|
||||
case lower == "uuid":
|
||||
return "char(36)", warnings
|
||||
case lower == "json", strings.HasPrefix(lower, "map("), strings.HasPrefix(lower, "array("), strings.HasPrefix(lower, "tuple("), strings.HasPrefix(lower, "nested("):
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 json", col.Name, col.Type))
|
||||
return "json", warnings
|
||||
case clickHouseDecimalPattern.MatchString(lower):
|
||||
parts := clickHouseDecimalPattern.FindStringSubmatch(lower)
|
||||
return fmt.Sprintf("decimal(%s,%s)", parts[2], parts[3]), warnings
|
||||
case clickHouseStringArgsPattern.MatchString(lower):
|
||||
parts := clickHouseStringArgsPattern.FindStringSubmatch(lower)
|
||||
return fmt.Sprintf("varchar(%s)", parts[1]), warnings
|
||||
default:
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门映射,已降级为 text", col.Name, col.Type))
|
||||
return "text", warnings
|
||||
}
|
||||
}
|
||||
379
internal/sync/migration_kernel_router.go
Normal file
379
internal/sync/migration_kernel_router.go
Normal file
@@ -0,0 +1,379 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"GoNavi-Wails/internal/connection"
|
||||
"GoNavi-Wails/internal/db"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type genericLegacyPlanner struct{}
|
||||
|
||||
type mysqlToPGLikePlanner struct{}
|
||||
|
||||
type mysqlToClickHousePlanner struct{}
|
||||
|
||||
type pgLikeToClickHousePlanner struct{}
|
||||
|
||||
type clickHouseToMySQLPlanner struct{}
|
||||
|
||||
type clickHouseToPGLikePlanner struct{}
|
||||
|
||||
type mysqlToMongoPlanner struct{}
|
||||
|
||||
type pgLikeToMongoPlanner struct{}
|
||||
|
||||
type clickHouseToMongoPlanner struct{}
|
||||
|
||||
type tdengineToMongoPlanner struct{}
|
||||
|
||||
type mongoToMySQLPlanner struct{}
|
||||
|
||||
type mongoToPGLikePlanner struct{}
|
||||
|
||||
type pgLikeToMySQLPlanner struct{}
|
||||
|
||||
type tdengineToMySQLPlanner struct{}
|
||||
|
||||
type tdengineToPGLikePlanner struct{}
|
||||
|
||||
type mongoToRelationalPlanner struct{}
|
||||
|
||||
func buildSchemaMigrationPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
ctx := MigrationBuildContext{
|
||||
Config: config,
|
||||
TableName: tableName,
|
||||
SourceDB: sourceDB,
|
||||
TargetDB: targetDB,
|
||||
}
|
||||
planner := resolveMigrationPlanner(ctx)
|
||||
if planner == nil {
|
||||
return buildSchemaMigrationPlanLegacy(config, tableName, sourceDB, targetDB)
|
||||
}
|
||||
return planner.BuildPlan(ctx)
|
||||
}
|
||||
|
||||
func resolveMigrationPlanner(ctx MigrationBuildContext) MigrationPlanner {
|
||||
planners := []MigrationPlanner{
|
||||
mysqlToPGLikePlanner{},
|
||||
mySQLLikeToTDenginePlanner{},
|
||||
pgLikeToTDenginePlanner{},
|
||||
clickHouseToTDenginePlanner{},
|
||||
tdengineToTDenginePlanner{},
|
||||
tdengineToPGLikePlanner{},
|
||||
tdengineToMySQLPlanner{},
|
||||
mysqlToClickHousePlanner{},
|
||||
pgLikeToClickHousePlanner{},
|
||||
clickHouseToMySQLPlanner{},
|
||||
clickHouseToPGLikePlanner{},
|
||||
mysqlToMongoPlanner{},
|
||||
pgLikeToMongoPlanner{},
|
||||
clickHouseToMongoPlanner{},
|
||||
tdengineToMongoPlanner{},
|
||||
mongoToMySQLPlanner{},
|
||||
mongoToPGLikePlanner{},
|
||||
pgLikeToMySQLPlanner{},
|
||||
mongoToRelationalPlanner{},
|
||||
genericLegacyPlanner{},
|
||||
}
|
||||
bestLevel := MigrationSupportLevelUnsupported
|
||||
var bestPlanner MigrationPlanner
|
||||
for _, planner := range planners {
|
||||
level := planner.SupportLevel(ctx)
|
||||
if migrationSupportRank(level) > migrationSupportRank(bestLevel) {
|
||||
bestLevel = level
|
||||
bestPlanner = planner
|
||||
}
|
||||
}
|
||||
return bestPlanner
|
||||
}
|
||||
|
||||
func migrationSupportRank(level MigrationSupportLevel) int {
|
||||
switch level {
|
||||
case MigrationSupportLevelFull:
|
||||
return 4
|
||||
case MigrationSupportLevelPlanned:
|
||||
return 3
|
||||
case MigrationSupportLevelPartial:
|
||||
return 2
|
||||
default:
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
func isMySQLLikeType(dbType string) bool {
|
||||
return isMySQLLikeWritableTargetType(dbType)
|
||||
}
|
||||
|
||||
func classifyMigrationDataModel(dbType string) MigrationDataModel {
|
||||
switch normalizeMigrationDBType(dbType) {
|
||||
case "mysql", "mariadb", "postgres", "kingbase", "highgo", "vastbase", "oracle", "sqlserver", "dameng", "sqlite", "duckdb":
|
||||
return MigrationDataModelRelational
|
||||
case "mongodb":
|
||||
return MigrationDataModelDocument
|
||||
case "clickhouse", "diros", "sphinx":
|
||||
return MigrationDataModelColumnar
|
||||
case "tdengine":
|
||||
return MigrationDataModelTimeSeries
|
||||
case "redis":
|
||||
return MigrationDataModelKeyValue
|
||||
default:
|
||||
return MigrationDataModelCustom
|
||||
}
|
||||
}
|
||||
|
||||
func (genericLegacyPlanner) Name() string { return "generic-legacy-planner" }
|
||||
|
||||
func (genericLegacyPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
|
||||
_ = ctx
|
||||
return MigrationSupportLevelPartial
|
||||
}
|
||||
|
||||
func (genericLegacyPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
return buildSchemaMigrationPlanLegacy(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
|
||||
}
|
||||
|
||||
func (mysqlToPGLikePlanner) Name() string { return "mysql-pglike-planner" }
|
||||
|
||||
func (mysqlToPGLikePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
|
||||
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
|
||||
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
|
||||
if isMySQLLikeSourceType(sourceType) && isPGLikeTarget(targetType) {
|
||||
return MigrationSupportLevelFull
|
||||
}
|
||||
return MigrationSupportLevelUnsupported
|
||||
}
|
||||
|
||||
func (mysqlToPGLikePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
return buildMySQLToPGLikePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
|
||||
}
|
||||
|
||||
func (tdengineToMySQLPlanner) Name() string { return "tdengine-mysql-planner" }
|
||||
|
||||
func (tdengineToMySQLPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
|
||||
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
|
||||
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
|
||||
if sourceType == "tdengine" && isMySQLLikeWritableTargetType(targetType) {
|
||||
return MigrationSupportLevelFull
|
||||
}
|
||||
return MigrationSupportLevelUnsupported
|
||||
}
|
||||
|
||||
func (tdengineToMySQLPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
return buildTDengineToMySQLPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
|
||||
}
|
||||
|
||||
func (tdengineToPGLikePlanner) Name() string { return "tdengine-pglike-planner" }
|
||||
|
||||
func (tdengineToPGLikePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
|
||||
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
|
||||
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
|
||||
if sourceType == "tdengine" && isPGLikeTarget(targetType) {
|
||||
return MigrationSupportLevelFull
|
||||
}
|
||||
return MigrationSupportLevelUnsupported
|
||||
}
|
||||
|
||||
func (tdengineToPGLikePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
return buildTDengineToPGLikePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
|
||||
}
|
||||
|
||||
func (mysqlToClickHousePlanner) Name() string { return "mysql-clickhouse-planner" }
|
||||
|
||||
func (mysqlToClickHousePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
|
||||
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
|
||||
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
|
||||
if isMySQLCoreType(sourceType) && targetType == "clickhouse" {
|
||||
return MigrationSupportLevelFull
|
||||
}
|
||||
return MigrationSupportLevelUnsupported
|
||||
}
|
||||
|
||||
func (mysqlToClickHousePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
return buildMySQLToClickHousePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
|
||||
}
|
||||
|
||||
func (pgLikeToClickHousePlanner) Name() string { return "pglike-clickhouse-planner" }
|
||||
|
||||
func (pgLikeToClickHousePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
|
||||
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
|
||||
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
|
||||
if isPGLikeSource(sourceType) && targetType == "clickhouse" {
|
||||
return MigrationSupportLevelFull
|
||||
}
|
||||
return MigrationSupportLevelUnsupported
|
||||
}
|
||||
|
||||
func (pgLikeToClickHousePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
return buildPGLikeToClickHousePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
|
||||
}
|
||||
|
||||
func (clickHouseToMySQLPlanner) Name() string { return "clickhouse-mysql-planner" }
|
||||
|
||||
func (clickHouseToMySQLPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
|
||||
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
|
||||
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
|
||||
if sourceType == "clickhouse" && isMySQLLikeWritableTargetType(targetType) {
|
||||
return MigrationSupportLevelFull
|
||||
}
|
||||
return MigrationSupportLevelUnsupported
|
||||
}
|
||||
|
||||
func (clickHouseToMySQLPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
return buildClickHouseToMySQLPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
|
||||
}
|
||||
|
||||
func (clickHouseToPGLikePlanner) Name() string { return "clickhouse-pglike-planner" }
|
||||
|
||||
func (clickHouseToPGLikePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
|
||||
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
|
||||
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
|
||||
if sourceType == "clickhouse" && isPGLikeTarget(targetType) {
|
||||
return MigrationSupportLevelFull
|
||||
}
|
||||
return MigrationSupportLevelUnsupported
|
||||
}
|
||||
|
||||
func (clickHouseToPGLikePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
return buildClickHouseToPGLikePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
|
||||
}
|
||||
|
||||
func (mysqlToMongoPlanner) Name() string { return "mysql-mongo-planner" }
|
||||
|
||||
func (mysqlToMongoPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
|
||||
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
|
||||
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
|
||||
if isMySQLCoreType(sourceType) && targetType == "mongodb" {
|
||||
return MigrationSupportLevelFull
|
||||
}
|
||||
return MigrationSupportLevelUnsupported
|
||||
}
|
||||
|
||||
func (mysqlToMongoPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
return buildMySQLToMongoPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
|
||||
}
|
||||
|
||||
func (pgLikeToMongoPlanner) Name() string { return "pglike-mongo-planner" }
|
||||
|
||||
func (pgLikeToMongoPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
|
||||
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
|
||||
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
|
||||
if isPGLikeSource(sourceType) && targetType == "mongodb" {
|
||||
return MigrationSupportLevelFull
|
||||
}
|
||||
return MigrationSupportLevelUnsupported
|
||||
}
|
||||
|
||||
func (pgLikeToMongoPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
return buildPGLikeToMongoPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
|
||||
}
|
||||
|
||||
func (clickHouseToMongoPlanner) Name() string { return "clickhouse-mongo-planner" }
|
||||
|
||||
func (clickHouseToMongoPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
|
||||
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
|
||||
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
|
||||
if sourceType == "clickhouse" && targetType == "mongodb" {
|
||||
return MigrationSupportLevelFull
|
||||
}
|
||||
return MigrationSupportLevelUnsupported
|
||||
}
|
||||
|
||||
func (clickHouseToMongoPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
return buildClickHouseToMongoPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
|
||||
}
|
||||
|
||||
func (tdengineToMongoPlanner) Name() string { return "tdengine-mongo-planner" }
|
||||
|
||||
func (tdengineToMongoPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
|
||||
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
|
||||
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
|
||||
if sourceType == "tdengine" && targetType == "mongodb" {
|
||||
return MigrationSupportLevelFull
|
||||
}
|
||||
return MigrationSupportLevelUnsupported
|
||||
}
|
||||
|
||||
func (tdengineToMongoPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
return buildTDengineToMongoPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
|
||||
}
|
||||
|
||||
func (mongoToMySQLPlanner) Name() string { return "mongo-mysql-planner" }
|
||||
|
||||
func (mongoToMySQLPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
|
||||
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
|
||||
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
|
||||
if sourceType == "mongodb" && isMySQLLikeWritableTargetType(targetType) {
|
||||
return MigrationSupportLevelFull
|
||||
}
|
||||
return MigrationSupportLevelUnsupported
|
||||
}
|
||||
|
||||
func (mongoToMySQLPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
return buildMongoToMySQLPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
|
||||
}
|
||||
|
||||
func (mongoToPGLikePlanner) Name() string { return "mongo-pglike-planner" }
|
||||
|
||||
func (mongoToPGLikePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
|
||||
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
|
||||
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
|
||||
if sourceType == "mongodb" && isPGLikeTarget(targetType) {
|
||||
return MigrationSupportLevelFull
|
||||
}
|
||||
return MigrationSupportLevelUnsupported
|
||||
}
|
||||
|
||||
func (mongoToPGLikePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
return buildMongoToPGLikePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
|
||||
}
|
||||
|
||||
func (pgLikeToMySQLPlanner) Name() string { return "pglike-mysql-planner" }
|
||||
|
||||
func (pgLikeToMySQLPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
|
||||
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
|
||||
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
|
||||
if isPGLikeSource(sourceType) && isMySQLLikeWritableTargetType(targetType) {
|
||||
return MigrationSupportLevelFull
|
||||
}
|
||||
return MigrationSupportLevelUnsupported
|
||||
}
|
||||
|
||||
func (pgLikeToMySQLPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
return buildPGLikeToMySQLPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
|
||||
}
|
||||
|
||||
func (mongoToRelationalPlanner) Name() string { return "mongo-relational-inference-planner" }
|
||||
|
||||
func (mongoToRelationalPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
|
||||
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
|
||||
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
|
||||
if !shouldUseSchemaInference(sourceType, targetType) {
|
||||
return MigrationSupportLevelUnsupported
|
||||
}
|
||||
return MigrationSupportLevelPlanned
|
||||
}
|
||||
|
||||
func (mongoToRelationalPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
|
||||
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
|
||||
inference, err := inferSchemaForPair(sourceType, targetType, ctx.TableName)
|
||||
if err != nil {
|
||||
return SchemaMigrationPlan{}, nil, nil, err
|
||||
}
|
||||
plan := SchemaMigrationPlan{}
|
||||
plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, ctx.Config.SourceConfig.Database, ctx.TableName)
|
||||
plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, ctx.Config.TargetConfig.Database, ctx.TableName)
|
||||
plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, ctx.TableName)
|
||||
plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, ctx.TableName)
|
||||
plan.PlannedAction = "当前库对已进入迁移内核规划阶段,等待 schema 推断与目标方言生成器落地"
|
||||
for _, issue := range inference.Issues {
|
||||
msg := strings.TrimSpace(issue.Message)
|
||||
if msg == "" {
|
||||
continue
|
||||
}
|
||||
plan.Warnings = append(plan.Warnings, msg)
|
||||
}
|
||||
plan.Warnings = append(plan.Warnings, fmt.Sprintf("迁移对象=%s,目标类型=%s,当前仅提供规划入口,暂不执行自动建表", inference.Object.Kind, targetType))
|
||||
return dedupeSchemaMigrationPlan(plan), nil, nil, nil
|
||||
}
|
||||
447
internal/sync/migration_kernel_router_test.go
Normal file
447
internal/sync/migration_kernel_router_test.go
Normal file
@@ -0,0 +1,447 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"GoNavi-Wails/internal/connection"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestClassifyMigrationDataModel(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cases := map[string]MigrationDataModel{
|
||||
"mysql": MigrationDataModelRelational,
|
||||
"postgres": MigrationDataModelRelational,
|
||||
"kingbase": MigrationDataModelRelational,
|
||||
"mongodb": MigrationDataModelDocument,
|
||||
"clickhouse": MigrationDataModelColumnar,
|
||||
"tdengine": MigrationDataModelTimeSeries,
|
||||
"redis": MigrationDataModelKeyValue,
|
||||
"custom": MigrationDataModelCustom,
|
||||
}
|
||||
|
||||
for input, want := range cases {
|
||||
input, want := input, want
|
||||
t.Run(input, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
got := classifyMigrationDataModel(input)
|
||||
if got != want {
|
||||
t.Fatalf("unexpected data model, input=%s got=%s want=%s", input, got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMigrationPlanner_PrefersMySQLKingbasePlanner(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
planner := resolveMigrationPlanner(MigrationBuildContext{
|
||||
Config: SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "mysql"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "kingbase"},
|
||||
},
|
||||
})
|
||||
if planner == nil {
|
||||
t.Fatalf("expected planner")
|
||||
}
|
||||
if planner.Name() != "mysql-pglike-planner" {
|
||||
t.Fatalf("unexpected planner: %s", planner.Name())
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMigrationPlanner_UsesSchemaInferencePlannerForMongoToMySQL(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
planner := resolveMigrationPlanner(MigrationBuildContext{
|
||||
Config: SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "mongodb"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "mysql"},
|
||||
},
|
||||
})
|
||||
if planner == nil {
|
||||
t.Fatalf("expected planner")
|
||||
}
|
||||
if planner.Name() != "mongo-mysql-planner" {
|
||||
t.Fatalf("unexpected planner: %s", planner.Name())
|
||||
}
|
||||
}
|
||||
|
||||
func TestInferSchemaForPair_MongoToMySQLReturnsPlannedWarning(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
result, err := inferSchemaForPair("mongodb", "mysql", "users")
|
||||
if err != nil {
|
||||
t.Fatalf("inferSchemaForPair returned error: %v", err)
|
||||
}
|
||||
if !result.NeedsReview {
|
||||
t.Fatalf("expected needs review")
|
||||
}
|
||||
if result.Object.Name != "users" {
|
||||
t.Fatalf("unexpected object name: %s", result.Object.Name)
|
||||
}
|
||||
if len(result.Issues) == 0 || !strings.Contains(result.Issues[0].Message, "schema 推断") {
|
||||
t.Fatalf("unexpected issues: %+v", result.Issues)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMigrationPlanner_UsesPGLikeMySQLPlannerForKingbaseToMySQL(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
planner := resolveMigrationPlanner(MigrationBuildContext{
|
||||
Config: SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "kingbase"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "mysql"},
|
||||
},
|
||||
})
|
||||
if planner == nil {
|
||||
t.Fatalf("expected planner")
|
||||
}
|
||||
if planner.Name() != "pglike-mysql-planner" {
|
||||
t.Fatalf("unexpected planner: %s", planner.Name())
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMigrationPlanner_UsesMySQLPGLikePlannerForMySQLToPostgres(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
planner := resolveMigrationPlanner(MigrationBuildContext{
|
||||
Config: SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "mysql"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "postgres"},
|
||||
},
|
||||
})
|
||||
if planner == nil {
|
||||
t.Fatalf("expected planner")
|
||||
}
|
||||
if planner.Name() != "mysql-pglike-planner" {
|
||||
t.Fatalf("unexpected planner: %s", planner.Name())
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMigrationPlanner_UsesMySQLClickHousePlanner(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
planner := resolveMigrationPlanner(MigrationBuildContext{
|
||||
Config: SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "mysql"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "clickhouse"},
|
||||
},
|
||||
})
|
||||
if planner == nil {
|
||||
t.Fatalf("expected planner")
|
||||
}
|
||||
if planner.Name() != "mysql-clickhouse-planner" {
|
||||
t.Fatalf("unexpected planner: %s", planner.Name())
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMigrationPlanner_UsesClickHouseMySQLPlanner(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
planner := resolveMigrationPlanner(MigrationBuildContext{
|
||||
Config: SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "clickhouse"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "mysql"},
|
||||
},
|
||||
})
|
||||
if planner == nil {
|
||||
t.Fatalf("expected planner")
|
||||
}
|
||||
if planner.Name() != "clickhouse-mysql-planner" {
|
||||
t.Fatalf("unexpected planner: %s", planner.Name())
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMigrationPlanner_UsesMySQLMongoPlanner(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
planner := resolveMigrationPlanner(MigrationBuildContext{
|
||||
Config: SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "mysql"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "mongodb"},
|
||||
},
|
||||
})
|
||||
if planner == nil || planner.Name() != "mysql-mongo-planner" {
|
||||
t.Fatalf("unexpected planner: %v", planner)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMigrationPlanner_UsesMongoMySQLPlanner(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
planner := resolveMigrationPlanner(MigrationBuildContext{
|
||||
Config: SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "mongodb"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "mysql"},
|
||||
},
|
||||
})
|
||||
if planner == nil || planner.Name() != "mongo-mysql-planner" {
|
||||
t.Fatalf("unexpected planner: %v", planner)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMigrationPlanner_UsesMongoPGLikePlanner(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
planner := resolveMigrationPlanner(MigrationBuildContext{
|
||||
Config: SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "mongodb"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "postgres"},
|
||||
},
|
||||
})
|
||||
if planner == nil || planner.Name() != "mongo-pglike-planner" {
|
||||
t.Fatalf("unexpected planner: %v", planner)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMigrationPlanner_UsesPGLikeMongoPlanner(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
planner := resolveMigrationPlanner(MigrationBuildContext{
|
||||
Config: SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "postgres"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "mongodb"},
|
||||
},
|
||||
})
|
||||
if planner == nil || planner.Name() != "pglike-mongo-planner" {
|
||||
t.Fatalf("unexpected planner: %v", planner)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMigrationPlanner_UsesClickHouseMongoPlanner(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
planner := resolveMigrationPlanner(MigrationBuildContext{
|
||||
Config: SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "clickhouse"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "mongodb"},
|
||||
},
|
||||
})
|
||||
if planner == nil || planner.Name() != "clickhouse-mongo-planner" {
|
||||
t.Fatalf("unexpected planner: %v", planner)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMigrationPlanner_UsesTDengineMongoPlanner(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
planner := resolveMigrationPlanner(MigrationBuildContext{
|
||||
Config: SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "tdengine"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "mongodb"},
|
||||
},
|
||||
})
|
||||
if planner == nil || planner.Name() != "tdengine-mongo-planner" {
|
||||
t.Fatalf("unexpected planner: %v", planner)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMigrationPlanner_UsesMySQLPGLikePlannerForDirosToPostgres(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
planner := resolveMigrationPlanner(MigrationBuildContext{
|
||||
Config: SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "diros"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "postgres"},
|
||||
},
|
||||
})
|
||||
if planner == nil || planner.Name() != "mysql-pglike-planner" {
|
||||
t.Fatalf("unexpected planner: %v", planner)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMigrationPlanner_UsesPGLikeMySQLPlannerForPostgresToDiros(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
planner := resolveMigrationPlanner(MigrationBuildContext{
|
||||
Config: SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "postgres"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "diros"},
|
||||
},
|
||||
})
|
||||
if planner == nil || planner.Name() != "pglike-mysql-planner" {
|
||||
t.Fatalf("unexpected planner: %v", planner)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMigrationPlanner_UsesMySQLPGLikePlannerForMySQLToDuckDB(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
planner := resolveMigrationPlanner(MigrationBuildContext{
|
||||
Config: SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "mysql"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "duckdb"},
|
||||
},
|
||||
})
|
||||
if planner == nil || planner.Name() != "mysql-pglike-planner" {
|
||||
t.Fatalf("unexpected planner: %v", planner)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMigrationPlanner_UsesPGLikeClickHousePlanner(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
planner := resolveMigrationPlanner(MigrationBuildContext{
|
||||
Config: SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "postgres"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "clickhouse"},
|
||||
},
|
||||
})
|
||||
if planner == nil || planner.Name() != "pglike-clickhouse-planner" {
|
||||
t.Fatalf("unexpected planner: %v", planner)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMigrationPlanner_UsesPGLikeMySQLPlannerForDuckDBToMySQL(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
planner := resolveMigrationPlanner(MigrationBuildContext{
|
||||
Config: SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "duckdb"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "mysql"},
|
||||
},
|
||||
})
|
||||
if planner == nil || planner.Name() != "pglike-mysql-planner" {
|
||||
t.Fatalf("unexpected planner: %v", planner)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMigrationPlanner_UsesMySQLPGLikePlannerForSphinxToPostgres(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
planner := resolveMigrationPlanner(MigrationBuildContext{
|
||||
Config: SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "sphinx"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "postgres"},
|
||||
},
|
||||
})
|
||||
if planner == nil || planner.Name() != "mysql-pglike-planner" {
|
||||
t.Fatalf("unexpected planner: %v", planner)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMigrationPlanner_UsesPGLikeMySQLPlannerForCustomKingbaseToMySQL(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
planner := resolveMigrationPlanner(MigrationBuildContext{
|
||||
Config: SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "custom", Driver: "kingbase8"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "mysql"},
|
||||
},
|
||||
})
|
||||
if planner == nil || planner.Name() != "pglike-mysql-planner" {
|
||||
t.Fatalf("unexpected planner: %v", planner)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMigrationPlanner_UsesMySQLPGLikePlannerForMySQLToCustomPostgres(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
planner := resolveMigrationPlanner(MigrationBuildContext{
|
||||
Config: SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "mysql"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "custom", Driver: "postgresql"},
|
||||
},
|
||||
})
|
||||
if planner == nil || planner.Name() != "mysql-pglike-planner" {
|
||||
t.Fatalf("unexpected planner: %v", planner)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMigrationPlanner_UsesTDengineMySQLPlanner(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
planner := resolveMigrationPlanner(MigrationBuildContext{
|
||||
Config: SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "tdengine"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "mysql"},
|
||||
},
|
||||
})
|
||||
if planner == nil || planner.Name() != "tdengine-mysql-planner" {
|
||||
t.Fatalf("unexpected planner: %v", planner)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMigrationPlanner_UsesTDenginePGLikePlanner(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
planner := resolveMigrationPlanner(MigrationBuildContext{
|
||||
Config: SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "tdengine"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "kingbase"},
|
||||
},
|
||||
})
|
||||
if planner == nil || planner.Name() != "tdengine-pglike-planner" {
|
||||
t.Fatalf("unexpected planner: %v", planner)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMigrationPlanner_UsesMySQLLikeTDenginePlanner(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
planner := resolveMigrationPlanner(MigrationBuildContext{
|
||||
Config: SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "mysql"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "tdengine"},
|
||||
},
|
||||
})
|
||||
if planner == nil || planner.Name() != "mysqllike-tdengine-planner" {
|
||||
t.Fatalf("unexpected planner: %v", planner)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMigrationPlanner_UsesPGLikeTDenginePlanner(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
planner := resolveMigrationPlanner(MigrationBuildContext{
|
||||
Config: SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "postgres"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "tdengine"},
|
||||
},
|
||||
})
|
||||
if planner == nil || planner.Name() != "pglike-tdengine-planner" {
|
||||
t.Fatalf("unexpected planner: %v", planner)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMigrationPlanner_UsesClickHouseTDenginePlanner(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
planner := resolveMigrationPlanner(MigrationBuildContext{
|
||||
Config: SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "clickhouse"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "tdengine"},
|
||||
},
|
||||
})
|
||||
if planner == nil || planner.Name() != "clickhouse-tdengine-planner" {
|
||||
t.Fatalf("unexpected planner: %v", planner)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMigrationPlanner_UsesClickHousePGLikePlanner(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
planner := resolveMigrationPlanner(MigrationBuildContext{
|
||||
Config: SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "clickhouse"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "postgres"},
|
||||
},
|
||||
})
|
||||
if planner == nil || planner.Name() != "clickhouse-pglike-planner" {
|
||||
t.Fatalf("unexpected planner: %v", planner)
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveMigrationPlanner_UsesTDengineTDenginePlanner(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
planner := resolveMigrationPlanner(MigrationBuildContext{
|
||||
Config: SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "tdengine"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "tdengine"},
|
||||
},
|
||||
})
|
||||
if planner == nil || planner.Name() != "tdengine-tdengine-planner" {
|
||||
t.Fatalf("unexpected planner: %v", planner)
|
||||
}
|
||||
}
|
||||
104
internal/sync/migration_kernel_types.go
Normal file
104
internal/sync/migration_kernel_types.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"GoNavi-Wails/internal/connection"
|
||||
"GoNavi-Wails/internal/db"
|
||||
)
|
||||
|
||||
type MigrationDataModel string
|
||||
|
||||
const (
|
||||
MigrationDataModelRelational MigrationDataModel = "relational"
|
||||
MigrationDataModelDocument MigrationDataModel = "document"
|
||||
MigrationDataModelColumnar MigrationDataModel = "columnar"
|
||||
MigrationDataModelTimeSeries MigrationDataModel = "timeseries"
|
||||
MigrationDataModelKeyValue MigrationDataModel = "keyvalue"
|
||||
MigrationDataModelCustom MigrationDataModel = "custom"
|
||||
)
|
||||
|
||||
type MigrationObjectKind string
|
||||
|
||||
const (
|
||||
MigrationObjectKindTable MigrationObjectKind = "table"
|
||||
MigrationObjectKindCollection MigrationObjectKind = "collection"
|
||||
MigrationObjectKindKeyspace MigrationObjectKind = "keyspace"
|
||||
)
|
||||
|
||||
type MigrationSupportLevel string
|
||||
|
||||
const (
|
||||
MigrationSupportLevelFull MigrationSupportLevel = "full"
|
||||
MigrationSupportLevelPartial MigrationSupportLevel = "partial"
|
||||
MigrationSupportLevelPlanned MigrationSupportLevel = "planned"
|
||||
MigrationSupportLevelUnsupported MigrationSupportLevel = "unsupported"
|
||||
)
|
||||
|
||||
type CanonicalFieldSpec struct {
|
||||
Name string
|
||||
SourceType string
|
||||
CanonicalType string
|
||||
Nullable bool
|
||||
DefaultValue *string
|
||||
AutoIncrement bool
|
||||
Comment string
|
||||
NestedPath string
|
||||
Confidence float64
|
||||
}
|
||||
|
||||
type CanonicalIndexSpec struct {
|
||||
Name string
|
||||
Kind string
|
||||
Columns []string
|
||||
Expression string
|
||||
PrefixLength int
|
||||
Supported bool
|
||||
DegradeStrategy string
|
||||
Unique bool
|
||||
}
|
||||
|
||||
type CanonicalConstraintSpec struct {
|
||||
Name string
|
||||
Kind string
|
||||
Columns []string
|
||||
RefName string
|
||||
}
|
||||
|
||||
type CanonicalObjectSpec struct {
|
||||
Name string
|
||||
Schema string
|
||||
Kind MigrationObjectKind
|
||||
Fields []CanonicalFieldSpec
|
||||
PrimaryKey []string
|
||||
Indexes []CanonicalIndexSpec
|
||||
Constraints []CanonicalConstraintSpec
|
||||
Comments []string
|
||||
SourceHints map[string]string
|
||||
}
|
||||
|
||||
type SchemaInferenceIssue struct {
|
||||
Field string
|
||||
Level string
|
||||
Message string
|
||||
Resolution string
|
||||
}
|
||||
|
||||
type SchemaInferenceResult struct {
|
||||
Object CanonicalObjectSpec
|
||||
Issues []SchemaInferenceIssue
|
||||
SampleSize int
|
||||
Confidence float64
|
||||
NeedsReview bool
|
||||
}
|
||||
|
||||
type MigrationBuildContext struct {
|
||||
Config SyncConfig
|
||||
TableName string
|
||||
SourceDB db.Database
|
||||
TargetDB db.Database
|
||||
}
|
||||
|
||||
type MigrationPlanner interface {
|
||||
Name() string
|
||||
SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel
|
||||
BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error)
|
||||
}
|
||||
603
internal/sync/migration_mongodb.go
Normal file
603
internal/sync/migration_mongodb.go
Normal file
@@ -0,0 +1,603 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"GoNavi-Wails/internal/connection"
|
||||
"GoNavi-Wails/internal/db"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func buildMySQLToMongoPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
return buildTabularToMongoPlan(config, tableName, sourceDB, targetDB)
|
||||
}
|
||||
|
||||
func buildPGLikeToMongoPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
return buildTabularToMongoPlan(config, tableName, sourceDB, targetDB)
|
||||
}
|
||||
|
||||
func buildClickHouseToMongoPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
return buildTabularToMongoPlan(config, tableName, sourceDB, targetDB)
|
||||
}
|
||||
|
||||
func buildTDengineToMongoPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
return buildTabularToMongoPlan(config, tableName, sourceDB, targetDB)
|
||||
}
|
||||
|
||||
func buildTabularToMongoPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
plan := SchemaMigrationPlan{}
|
||||
sourceType := resolveMigrationDBType(config.SourceConfig)
|
||||
targetType := resolveMigrationDBType(config.TargetConfig)
|
||||
plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName)
|
||||
plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName)
|
||||
plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName)
|
||||
plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName)
|
||||
plan.PlannedAction = "使用已有目标集合导入"
|
||||
|
||||
sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable)
|
||||
if err != nil {
|
||||
return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err)
|
||||
}
|
||||
if !sourceExists {
|
||||
return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName)
|
||||
}
|
||||
|
||||
targetExists, err := inspectMongoCollection(targetDB, plan.TargetSchema, plan.TargetTable)
|
||||
if err != nil {
|
||||
return plan, sourceCols, nil, fmt.Errorf("检查目标集合失败: %w", err)
|
||||
}
|
||||
plan.TargetTableExists = targetExists
|
||||
|
||||
strategy := normalizeTargetTableStrategy(config.TargetTableStrategy)
|
||||
if targetExists {
|
||||
plan.Warnings = append(plan.Warnings, "MongoDB 为弱 schema 目标,字段结构以写入文档为准,不执行目标列校验")
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, nil, nil
|
||||
}
|
||||
|
||||
switch strategy {
|
||||
case "existing_only":
|
||||
plan.PlannedAction = "目标集合不存在,需先手工创建"
|
||||
plan.Warnings = append(plan.Warnings, "当前策略要求目标集合已存在,执行时不会自动创建")
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, nil, nil
|
||||
case "smart", "auto_create_if_missing":
|
||||
plan.AutoCreate = true
|
||||
plan.PlannedAction = "目标集合不存在,将自动创建集合后导入"
|
||||
createCmd, err := buildMongoCreateCollectionCommand(plan.TargetTable)
|
||||
if err != nil {
|
||||
return plan, sourceCols, nil, err
|
||||
}
|
||||
plan.PreDataSQL = append(plan.PreDataSQL, createCmd)
|
||||
if config.CreateIndexes {
|
||||
indexCmds, warnings, unsupported, created, skipped, err := buildMongoIndexCommands(sourceDB, plan.SourceSchema, plan.SourceTable, plan.TargetTable)
|
||||
if err != nil {
|
||||
plan.Warnings = append(plan.Warnings, fmt.Sprintf("读取源表索引失败,已跳过索引迁移:%v", err))
|
||||
} else {
|
||||
plan.PostDataSQL = append(plan.PostDataSQL, indexCmds...)
|
||||
plan.Warnings = append(plan.Warnings, warnings...)
|
||||
plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...)
|
||||
plan.IndexesToCreate = created
|
||||
plan.IndexesSkipped = skipped
|
||||
}
|
||||
}
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, nil, nil
|
||||
default:
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
func buildMongoToMySQLPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
plan := SchemaMigrationPlan{}
|
||||
plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName)
|
||||
plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName)
|
||||
plan.SourceQueryTable = qualifiedNameForQuery(config.SourceConfig.Type, plan.SourceSchema, plan.SourceTable, tableName)
|
||||
plan.TargetQueryTable = qualifiedNameForQuery(config.TargetConfig.Type, plan.TargetSchema, plan.TargetTable, tableName)
|
||||
plan.PlannedAction = "使用已有目标表导入"
|
||||
|
||||
sourceCols, warnings, err := inferMongoCollectionColumns(sourceDB, plan.SourceTable)
|
||||
if err != nil {
|
||||
return plan, nil, nil, err
|
||||
}
|
||||
plan.Warnings = append(plan.Warnings, warnings...)
|
||||
if len(sourceCols) == 0 {
|
||||
return plan, nil, nil, fmt.Errorf("源集合未推断出可迁移字段: %s", tableName)
|
||||
}
|
||||
|
||||
targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable)
|
||||
if err != nil {
|
||||
return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err)
|
||||
}
|
||||
plan.TargetTableExists = targetExists
|
||||
|
||||
strategy := normalizeTargetTableStrategy(config.TargetTableStrategy)
|
||||
if targetExists {
|
||||
missing := diffMissingColumnNames(sourceCols, targetCols)
|
||||
if len(missing) > 0 {
|
||||
plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", ")))
|
||||
}
|
||||
if config.AutoAddColumns {
|
||||
addSQL, addWarnings := buildMongoToMySQLAddColumnSQL(plan.TargetQueryTable, sourceCols, targetCols)
|
||||
plan.PreDataSQL = append(plan.PreDataSQL, addSQL...)
|
||||
plan.Warnings = append(plan.Warnings, addWarnings...)
|
||||
if len(addSQL) > 0 {
|
||||
plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL))
|
||||
}
|
||||
}
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
}
|
||||
|
||||
switch strategy {
|
||||
case "existing_only":
|
||||
plan.PlannedAction = "目标表不存在,需先手工创建"
|
||||
plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表")
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
case "smart", "auto_create_if_missing":
|
||||
plan.AutoCreate = true
|
||||
plan.PlannedAction = "目标表不存在,将自动建表后导入"
|
||||
createSQL, postSQL, moreWarnings, unsupported, idxCreate, idxSkip, err := buildMongoToMySQLCreateTablePlan(config, plan.TargetQueryTable, sourceCols, sourceDB, plan.SourceSchema, plan.SourceTable)
|
||||
if err != nil {
|
||||
return plan, sourceCols, targetCols, err
|
||||
}
|
||||
plan.CreateTableSQL = createSQL
|
||||
plan.PostDataSQL = append(plan.PostDataSQL, postSQL...)
|
||||
plan.Warnings = append(plan.Warnings, moreWarnings...)
|
||||
plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...)
|
||||
plan.IndexesToCreate = idxCreate
|
||||
plan.IndexesSkipped = idxSkip
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
default:
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
}
|
||||
}
|
||||
|
||||
func inspectMongoCollection(database db.Database, dbName, collection string) (bool, error) {
|
||||
items, err := database.GetTables(dbName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
target := strings.TrimSpace(collection)
|
||||
for _, item := range items {
|
||||
if strings.EqualFold(strings.TrimSpace(item), target) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func buildMongoCreateCollectionCommand(collection string) (string, error) {
|
||||
cmd := map[string]interface{}{"create": strings.TrimSpace(collection)}
|
||||
data, err := json.Marshal(cmd)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(data), nil
|
||||
}
|
||||
|
||||
func buildMongoIndexCommands(sourceDB db.Database, dbName, tableName, targetCollection string) ([]string, []string, []string, int, int, error) {
|
||||
indexes, err := sourceDB.GetIndexes(dbName, tableName)
|
||||
if err != nil {
|
||||
return nil, nil, nil, 0, 0, err
|
||||
}
|
||||
grouped := groupIndexDefinitions(indexes)
|
||||
cmds := make([]string, 0, len(grouped))
|
||||
warnings := make([]string, 0)
|
||||
unsupported := make([]string, 0)
|
||||
created := 0
|
||||
skipped := 0
|
||||
for _, idx := range grouped {
|
||||
name := strings.TrimSpace(idx.Name)
|
||||
if name == "" || strings.EqualFold(name, "primary") {
|
||||
continue
|
||||
}
|
||||
if len(idx.Columns) == 0 {
|
||||
skipped++
|
||||
unsupported = append(unsupported, fmt.Sprintf("索引 %s 缺少列定义,已跳过", name))
|
||||
continue
|
||||
}
|
||||
kind := strings.ToLower(strings.TrimSpace(idx.IndexType))
|
||||
if idx.SubPart > 0 {
|
||||
skipped++
|
||||
unsupported = append(unsupported, fmt.Sprintf("索引 %s 使用前缀长度,MongoDB 目标暂不支持等价迁移", name))
|
||||
continue
|
||||
}
|
||||
if kind != "" && kind != "btree" {
|
||||
warnings = append(warnings, fmt.Sprintf("索引 %s 类型=%s 将按普通索引迁移到 MongoDB", name, idx.IndexType))
|
||||
}
|
||||
keySpec := make(map[string]int)
|
||||
for _, col := range idx.Columns {
|
||||
keySpec[col] = 1
|
||||
}
|
||||
command := map[string]interface{}{
|
||||
"createIndexes": strings.TrimSpace(targetCollection),
|
||||
"indexes": []map[string]interface{}{{
|
||||
"name": name,
|
||||
"key": keySpec,
|
||||
"unique": idx.Unique,
|
||||
}},
|
||||
}
|
||||
data, err := json.Marshal(command)
|
||||
if err != nil {
|
||||
skipped++
|
||||
unsupported = append(unsupported, fmt.Sprintf("索引 %s 生成 MongoDB createIndexes 命令失败:%v", name, err))
|
||||
continue
|
||||
}
|
||||
cmds = append(cmds, string(data))
|
||||
created++
|
||||
}
|
||||
return cmds, dedupeStrings(warnings), dedupeStrings(unsupported), created, skipped, nil
|
||||
}
|
||||
|
||||
func inferMongoCollectionColumns(sourceDB db.Database, collection string) ([]connection.ColumnDefinition, []string, error) {
|
||||
query := fmt.Sprintf(`{"find":"%s","filter":{},"limit":200}`, strings.TrimSpace(collection))
|
||||
rows, _, err := sourceDB.Query(query)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("读取源集合样本失败: %w", err)
|
||||
}
|
||||
if len(rows) == 0 {
|
||||
return []connection.ColumnDefinition{{Name: "_id", Type: "varchar(64)", Nullable: "NO", Key: "PRI"}}, []string{"源集合暂无样本数据,仅按 `_id` 生成基础主键列"}, nil
|
||||
}
|
||||
fieldNames := make(map[string]struct{})
|
||||
for _, row := range rows {
|
||||
for key := range row {
|
||||
fieldNames[key] = struct{}{}
|
||||
}
|
||||
}
|
||||
orderedFields := make([]string, 0, len(fieldNames))
|
||||
for key := range fieldNames {
|
||||
orderedFields = append(orderedFields, key)
|
||||
}
|
||||
sort.Strings(orderedFields)
|
||||
if containsString(orderedFields, "_id") {
|
||||
orderedFields = moveStringToFront(orderedFields, "_id")
|
||||
}
|
||||
columns := make([]connection.ColumnDefinition, 0, len(orderedFields))
|
||||
warnings := make([]string, 0)
|
||||
for _, field := range orderedFields {
|
||||
typeName, nullable, fieldWarnings := inferMongoFieldType(rows, field)
|
||||
warnings = append(warnings, fieldWarnings...)
|
||||
col := connection.ColumnDefinition{
|
||||
Name: field,
|
||||
Type: typeName,
|
||||
Nullable: ternaryString(nullable, "YES", "NO"),
|
||||
Key: "",
|
||||
Extra: "",
|
||||
}
|
||||
if field == "_id" {
|
||||
col.Key = "PRI"
|
||||
col.Nullable = "NO"
|
||||
}
|
||||
columns = append(columns, col)
|
||||
}
|
||||
return columns, dedupeStrings(warnings), nil
|
||||
}
|
||||
|
||||
func inferMongoFieldType(rows []map[string]interface{}, field string) (string, bool, []string) {
|
||||
nullable := false
|
||||
hasString, hasBool, hasInt, hasFloat, hasTime, hasComplex := false, false, false, false, false, false
|
||||
for _, row := range rows {
|
||||
value, ok := row[field]
|
||||
if !ok || value == nil {
|
||||
nullable = true
|
||||
continue
|
||||
}
|
||||
switch value.(type) {
|
||||
case bool:
|
||||
hasBool = true
|
||||
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
|
||||
hasInt = true
|
||||
case float32, float64:
|
||||
hasFloat = true
|
||||
case time.Time:
|
||||
hasTime = true
|
||||
case map[string]interface{}, []interface{}:
|
||||
hasComplex = true
|
||||
default:
|
||||
hasString = true
|
||||
}
|
||||
}
|
||||
kinds := 0
|
||||
for _, flag := range []bool{hasString, hasBool, hasInt, hasFloat, hasTime, hasComplex} {
|
||||
if flag {
|
||||
kinds++
|
||||
}
|
||||
}
|
||||
warnings := make([]string, 0)
|
||||
if kinds > 1 {
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 存在多种 BSON 值类型,已按兼容类型降级", field))
|
||||
}
|
||||
if field == "_id" {
|
||||
return "varchar(64)", false, warnings
|
||||
}
|
||||
switch {
|
||||
case hasComplex:
|
||||
return "json", nullable, warnings
|
||||
case hasTime:
|
||||
return "datetime", nullable, warnings
|
||||
case hasFloat:
|
||||
return "double", nullable, warnings
|
||||
case hasInt:
|
||||
return "bigint", nullable, warnings
|
||||
case hasBool:
|
||||
return "tinyint(1)", nullable, warnings
|
||||
default:
|
||||
return "varchar(255)", nullable, warnings
|
||||
}
|
||||
}
|
||||
|
||||
func buildMongoToMySQLAddColumnSQL(targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) {
|
||||
targetSet := make(map[string]struct{}, len(targetCols))
|
||||
for _, col := range targetCols {
|
||||
key := strings.ToLower(strings.TrimSpace(col.Name))
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
targetSet[key] = struct{}{}
|
||||
}
|
||||
var sqlList []string
|
||||
for _, col := range sourceCols {
|
||||
key := strings.ToLower(strings.TrimSpace(col.Name))
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := targetSet[key]; ok {
|
||||
continue
|
||||
}
|
||||
sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL",
|
||||
quoteQualifiedIdentByType("mysql", targetQueryTable),
|
||||
quoteIdentByType("mysql", col.Name),
|
||||
strings.TrimSpace(col.Type),
|
||||
))
|
||||
}
|
||||
return sqlList, nil
|
||||
}
|
||||
|
||||
func buildMongoToMySQLCreateTablePlan(config SyncConfig, targetQueryTable string, sourceCols []connection.ColumnDefinition, sourceDB db.Database, sourceSchema, sourceTable string) (string, []string, []string, []string, int, int, error) {
|
||||
columnDefs := make([]string, 0, len(sourceCols)+1)
|
||||
warnings := make([]string, 0)
|
||||
unsupported := make([]string, 0)
|
||||
pkCols := make([]string, 0, 1)
|
||||
for _, col := range sourceCols {
|
||||
columnDef := fmt.Sprintf("%s %s", quoteIdentByType("mysql", col.Name), strings.TrimSpace(col.Type))
|
||||
if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") {
|
||||
columnDef += " NOT NULL"
|
||||
}
|
||||
columnDefs = append(columnDefs, columnDef)
|
||||
if col.Key == "PRI" || col.Key == "PK" {
|
||||
pkCols = append(pkCols, quoteIdentByType("mysql", col.Name))
|
||||
}
|
||||
}
|
||||
if len(pkCols) > 0 {
|
||||
columnDefs = append(columnDefs, fmt.Sprintf("PRIMARY KEY (%s)", strings.Join(pkCols, ", ")))
|
||||
} else {
|
||||
warnings = append(warnings, "MongoDB 源集合未推断出稳定主键,目标表将不自动创建主键")
|
||||
}
|
||||
createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("mysql", targetQueryTable), strings.Join(columnDefs, ",\n "))
|
||||
if !config.CreateIndexes {
|
||||
return createSQL, nil, dedupeStrings(warnings), dedupeStrings(unsupported), 0, 0, nil
|
||||
}
|
||||
indexes, err := sourceDB.GetIndexes(sourceSchema, sourceTable)
|
||||
if err != nil {
|
||||
warnings = append(warnings, fmt.Sprintf("读取源集合索引失败,已跳过索引迁移:%v", err))
|
||||
return createSQL, nil, dedupeStrings(warnings), dedupeStrings(unsupported), 0, 0, nil
|
||||
}
|
||||
grouped := groupIndexDefinitions(indexes)
|
||||
postSQL := make([]string, 0, len(grouped))
|
||||
created := 0
|
||||
skipped := 0
|
||||
for _, idx := range grouped {
|
||||
name := strings.TrimSpace(idx.Name)
|
||||
if name == "" || strings.EqualFold(name, "_id_") || strings.EqualFold(name, "primary") {
|
||||
continue
|
||||
}
|
||||
if len(idx.Columns) == 0 {
|
||||
skipped++
|
||||
unsupported = append(unsupported, fmt.Sprintf("索引 %s 缺少列定义,已跳过", name))
|
||||
continue
|
||||
}
|
||||
quotedCols := make([]string, 0, len(idx.Columns))
|
||||
for _, col := range idx.Columns {
|
||||
quotedCols = append(quotedCols, quoteIdentByType("mysql", col))
|
||||
}
|
||||
prefix := "CREATE INDEX"
|
||||
if idx.Unique {
|
||||
prefix = "CREATE UNIQUE INDEX"
|
||||
}
|
||||
postSQL = append(postSQL, fmt.Sprintf("%s %s ON %s (%s)", prefix, quoteIdentByType("mysql", name), quoteQualifiedIdentByType("mysql", targetQueryTable), strings.Join(quotedCols, ", ")))
|
||||
created++
|
||||
}
|
||||
return createSQL, postSQL, dedupeStrings(warnings), dedupeStrings(unsupported), created, skipped, nil
|
||||
}
|
||||
|
||||
func containsString(items []string, target string) bool {
|
||||
for _, item := range items {
|
||||
if item == target {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func moveStringToFront(items []string, target string) []string {
|
||||
out := make([]string, 0, len(items))
|
||||
for _, item := range items {
|
||||
if item == target {
|
||||
continue
|
||||
}
|
||||
out = append(out, item)
|
||||
}
|
||||
return append([]string{target}, out...)
|
||||
}
|
||||
|
||||
func buildMongoToPGLikePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
plan := SchemaMigrationPlan{}
|
||||
targetType := strings.ToLower(strings.TrimSpace(config.TargetConfig.Type))
|
||||
plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName)
|
||||
plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName)
|
||||
plan.SourceQueryTable = qualifiedNameForQuery(config.SourceConfig.Type, plan.SourceSchema, plan.SourceTable, tableName)
|
||||
plan.TargetQueryTable = qualifiedNameForQuery(config.TargetConfig.Type, plan.TargetSchema, plan.TargetTable, tableName)
|
||||
plan.PlannedAction = "使用已有目标表导入"
|
||||
|
||||
sourceCols, warnings, err := inferMongoCollectionColumns(sourceDB, plan.SourceTable)
|
||||
if err != nil {
|
||||
return plan, nil, nil, err
|
||||
}
|
||||
plan.Warnings = append(plan.Warnings, warnings...)
|
||||
if len(sourceCols) == 0 {
|
||||
return plan, nil, nil, fmt.Errorf("源集合未推断出可迁移字段: %s", tableName)
|
||||
}
|
||||
|
||||
targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable)
|
||||
if err != nil {
|
||||
return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err)
|
||||
}
|
||||
plan.TargetTableExists = targetExists
|
||||
|
||||
strategy := normalizeTargetTableStrategy(config.TargetTableStrategy)
|
||||
if targetExists {
|
||||
missing := diffMissingColumnNames(sourceCols, targetCols)
|
||||
if len(missing) > 0 {
|
||||
plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", ")))
|
||||
}
|
||||
if config.AutoAddColumns {
|
||||
addSQL, addWarnings := buildMongoToPGLikeAddColumnSQL(targetType, plan.TargetQueryTable, sourceCols, targetCols)
|
||||
plan.PreDataSQL = append(plan.PreDataSQL, addSQL...)
|
||||
plan.Warnings = append(plan.Warnings, addWarnings...)
|
||||
if len(addSQL) > 0 {
|
||||
plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL))
|
||||
}
|
||||
}
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
}
|
||||
|
||||
switch strategy {
|
||||
case "existing_only":
|
||||
plan.PlannedAction = "目标表不存在,需先手工创建"
|
||||
plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表")
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
case "smart", "auto_create_if_missing":
|
||||
plan.AutoCreate = true
|
||||
plan.PlannedAction = "目标表不存在,将自动建表后导入"
|
||||
createSQL, postSQL, moreWarnings, unsupported, idxCreate, idxSkip, err := buildMongoToPGLikeCreateTablePlan(targetType, config, plan.TargetQueryTable, sourceCols, sourceDB, plan.SourceSchema, plan.SourceTable)
|
||||
if err != nil {
|
||||
return plan, sourceCols, targetCols, err
|
||||
}
|
||||
plan.CreateTableSQL = createSQL
|
||||
plan.PostDataSQL = append(plan.PostDataSQL, postSQL...)
|
||||
plan.Warnings = append(plan.Warnings, moreWarnings...)
|
||||
plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...)
|
||||
plan.IndexesToCreate = idxCreate
|
||||
plan.IndexesSkipped = idxSkip
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
default:
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
}
|
||||
}
|
||||
|
||||
func buildMongoToPGLikeAddColumnSQL(targetType string, targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) {
|
||||
targetSet := make(map[string]struct{}, len(targetCols))
|
||||
for _, col := range targetCols {
|
||||
key := strings.ToLower(strings.TrimSpace(col.Name))
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
targetSet[key] = struct{}{}
|
||||
}
|
||||
var sqlList []string
|
||||
var warnings []string
|
||||
for _, col := range sourceCols {
|
||||
key := strings.ToLower(strings.TrimSpace(col.Name))
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := targetSet[key]; ok {
|
||||
continue
|
||||
}
|
||||
colType, mapWarnings := mapMongoInferredColumnToPGLike(col)
|
||||
warnings = append(warnings, mapWarnings...)
|
||||
sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL",
|
||||
quoteQualifiedIdentByType(targetType, targetQueryTable),
|
||||
quoteIdentByType(targetType, col.Name),
|
||||
colType,
|
||||
))
|
||||
}
|
||||
return sqlList, dedupeStrings(warnings)
|
||||
}
|
||||
|
||||
func buildMongoToPGLikeCreateTablePlan(targetType string, config SyncConfig, targetQueryTable string, sourceCols []connection.ColumnDefinition, sourceDB db.Database, sourceSchema, sourceTable string) (string, []string, []string, []string, int, int, error) {
|
||||
columnDefs := make([]string, 0, len(sourceCols)+1)
|
||||
warnings := make([]string, 0)
|
||||
unsupported := make([]string, 0)
|
||||
pkCols := make([]string, 0, 1)
|
||||
for _, col := range sourceCols {
|
||||
colType, colWarnings := mapMongoInferredColumnToPGLike(col)
|
||||
warnings = append(warnings, colWarnings...)
|
||||
parts := []string{colType}
|
||||
if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") {
|
||||
parts = append(parts, "NOT NULL")
|
||||
}
|
||||
columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType(targetType, col.Name), strings.Join(parts, " ")))
|
||||
if col.Key == "PRI" || col.Key == "PK" {
|
||||
pkCols = append(pkCols, quoteIdentByType(targetType, col.Name))
|
||||
}
|
||||
}
|
||||
if len(pkCols) > 0 {
|
||||
columnDefs = append(columnDefs, fmt.Sprintf("PRIMARY KEY (%s)", strings.Join(pkCols, ", ")))
|
||||
}
|
||||
createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType(targetType, targetQueryTable), strings.Join(columnDefs, ",\n "))
|
||||
if !config.CreateIndexes {
|
||||
return createSQL, nil, dedupeStrings(warnings), dedupeStrings(unsupported), 0, 0, nil
|
||||
}
|
||||
indexes, err := sourceDB.GetIndexes(sourceSchema, sourceTable)
|
||||
if err != nil {
|
||||
warnings = append(warnings, fmt.Sprintf("读取源集合索引失败,已跳过索引迁移:%v", err))
|
||||
return createSQL, nil, dedupeStrings(warnings), dedupeStrings(unsupported), 0, 0, nil
|
||||
}
|
||||
grouped := groupIndexDefinitions(indexes)
|
||||
postSQL := make([]string, 0, len(grouped))
|
||||
created := 0
|
||||
skipped := 0
|
||||
for _, idx := range grouped {
|
||||
name := strings.TrimSpace(idx.Name)
|
||||
if name == "" || strings.EqualFold(name, "_id_") || strings.EqualFold(name, "primary") {
|
||||
continue
|
||||
}
|
||||
if len(idx.Columns) == 0 {
|
||||
skipped++
|
||||
unsupported = append(unsupported, fmt.Sprintf("索引 %s 缺少列定义,已跳过", name))
|
||||
continue
|
||||
}
|
||||
quotedCols := make([]string, 0, len(idx.Columns))
|
||||
for _, col := range idx.Columns {
|
||||
quotedCols = append(quotedCols, quoteIdentByType(targetType, col))
|
||||
}
|
||||
prefix := "CREATE INDEX"
|
||||
if idx.Unique {
|
||||
prefix = "CREATE UNIQUE INDEX"
|
||||
}
|
||||
postSQL = append(postSQL, fmt.Sprintf("%s %s ON %s (%s)", prefix, quoteIdentByType(targetType, name), quoteQualifiedIdentByType(targetType, targetQueryTable), strings.Join(quotedCols, ", ")))
|
||||
created++
|
||||
}
|
||||
return createSQL, postSQL, dedupeStrings(warnings), dedupeStrings(unsupported), created, skipped, nil
|
||||
}
|
||||
|
||||
func mapMongoInferredColumnToPGLike(col connection.ColumnDefinition) (string, []string) {
|
||||
raw := strings.ToLower(strings.TrimSpace(col.Type))
|
||||
warnings := make([]string, 0)
|
||||
switch {
|
||||
case strings.HasPrefix(raw, "varchar"):
|
||||
return col.Type, warnings
|
||||
case raw == "json":
|
||||
return "jsonb", warnings
|
||||
case raw == "datetime":
|
||||
return "timestamp", warnings
|
||||
case raw == "tinyint(1)":
|
||||
return "boolean", warnings
|
||||
case raw == "double":
|
||||
return "double precision", warnings
|
||||
case raw == "bigint":
|
||||
return "bigint", warnings
|
||||
default:
|
||||
return col.Type, warnings
|
||||
}
|
||||
}
|
||||
1315
internal/sync/migration_redis.go
Normal file
1315
internal/sync/migration_redis.go
Normal file
File diff suppressed because it is too large
Load Diff
58
internal/sync/migration_runtime_helpers.go
Normal file
58
internal/sync/migration_runtime_helpers.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"GoNavi-Wails/internal/connection"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func supportsAutoAddColumnsForPair(sourceType string, targetType string) bool {
|
||||
source := normalizeMigrationDBType(sourceType)
|
||||
target := normalizeMigrationDBType(targetType)
|
||||
if isMySQLLikeWritableTargetType(target) {
|
||||
return isMySQLCoreType(source)
|
||||
}
|
||||
if isPGLikeTarget(target) {
|
||||
return isMySQLLikeSourceType(source)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func buildAddColumnSQLForPair(sourceType string, targetType string, targetQueryTable string, sourceCol connection.ColumnDefinition) (string, error) {
|
||||
source := normalizeMigrationDBType(sourceType)
|
||||
target := normalizeMigrationDBType(targetType)
|
||||
switch {
|
||||
case isMySQLCoreType(source) && isMySQLLikeWritableTargetType(target):
|
||||
colType := sanitizeMySQLColumnType(sourceCol.Type)
|
||||
return fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL",
|
||||
quoteQualifiedIdentByType("mysql", targetQueryTable),
|
||||
quoteIdentByType("mysql", sourceCol.Name),
|
||||
colType,
|
||||
), nil
|
||||
case isMySQLLikeSourceType(source) && isPGLikeTarget(target):
|
||||
colType, _, warnings := mapMySQLColumnToKingbase(sourceCol)
|
||||
if len(warnings) > 0 && strings.Contains(strings.Join(warnings, " "), "identity") {
|
||||
// 对已有目标表补字段时保守处理,不补建自增语义。
|
||||
}
|
||||
return fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL",
|
||||
quoteQualifiedIdentByType(target, targetQueryTable),
|
||||
quoteIdentByType(target, sourceCol.Name),
|
||||
colType,
|
||||
), nil
|
||||
default:
|
||||
return "", fmt.Errorf("当前不支持 source=%s target=%s 的自动补字段", sourceType, targetType)
|
||||
}
|
||||
}
|
||||
|
||||
func executeSQLStatements(execFn func(string) (int64, error), statements []string) error {
|
||||
for _, stmt := range statements {
|
||||
trimmed := strings.TrimSpace(stmt)
|
||||
if trimmed == "" {
|
||||
continue
|
||||
}
|
||||
if _, err := execFn(trimmed); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
53
internal/sync/migration_schema_inference.go
Normal file
53
internal/sync/migration_schema_inference.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type SchemaInferenceStrategy string
|
||||
|
||||
const (
|
||||
SchemaInferenceStrategySample SchemaInferenceStrategy = "sample"
|
||||
SchemaInferenceStrategyStrict SchemaInferenceStrategy = "strict"
|
||||
)
|
||||
|
||||
func shouldUseSchemaInference(sourceType string, targetType string) bool {
|
||||
sourceModel := classifyMigrationDataModel(sourceType)
|
||||
targetModel := classifyMigrationDataModel(targetType)
|
||||
return sourceModel == MigrationDataModelDocument && targetModel == MigrationDataModelRelational
|
||||
}
|
||||
|
||||
func inferMigrationObjectKind(sourceType string, targetType string) MigrationObjectKind {
|
||||
sourceModel := classifyMigrationDataModel(sourceType)
|
||||
targetModel := classifyMigrationDataModel(targetType)
|
||||
switch {
|
||||
case sourceModel == MigrationDataModelDocument || targetModel == MigrationDataModelDocument:
|
||||
return MigrationObjectKindCollection
|
||||
case sourceModel == MigrationDataModelKeyValue || targetModel == MigrationDataModelKeyValue:
|
||||
return MigrationObjectKindKeyspace
|
||||
default:
|
||||
return MigrationObjectKindTable
|
||||
}
|
||||
}
|
||||
|
||||
func inferSchemaForPair(sourceType string, targetType string, objectName string) (SchemaInferenceResult, error) {
|
||||
if !shouldUseSchemaInference(sourceType, targetType) {
|
||||
return SchemaInferenceResult{}, fmt.Errorf("当前迁移对 %s -> %s 不需要 schema 推断", sourceType, targetType)
|
||||
}
|
||||
return SchemaInferenceResult{
|
||||
Object: CanonicalObjectSpec{
|
||||
Name: strings.TrimSpace(objectName),
|
||||
Kind: MigrationObjectKindCollection,
|
||||
Fields: []CanonicalFieldSpec{},
|
||||
},
|
||||
Issues: []SchemaInferenceIssue{
|
||||
{
|
||||
Level: "info",
|
||||
Message: "MongoDB -> 关系型数据库的 schema 推断能力尚在建设中,当前仅提供内核入口。",
|
||||
Resolution: "后续将基于样本数据生成列定义与类型降级策略。",
|
||||
},
|
||||
},
|
||||
NeedsReview: true,
|
||||
}, nil
|
||||
}
|
||||
296
internal/sync/migration_tdengine.go
Normal file
296
internal/sync/migration_tdengine.go
Normal file
@@ -0,0 +1,296 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"GoNavi-Wails/internal/connection"
|
||||
"GoNavi-Wails/internal/db"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func buildTDengineToMySQLPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
plan := SchemaMigrationPlan{}
|
||||
sourceType := resolveMigrationDBType(config.SourceConfig)
|
||||
targetType := resolveMigrationDBType(config.TargetConfig)
|
||||
plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName)
|
||||
plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName)
|
||||
plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName)
|
||||
plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName)
|
||||
plan.PlannedAction = "使用已有目标表导入"
|
||||
|
||||
sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable)
|
||||
if err != nil {
|
||||
return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err)
|
||||
}
|
||||
if !sourceExists {
|
||||
return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName)
|
||||
}
|
||||
|
||||
targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable)
|
||||
if err != nil {
|
||||
return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err)
|
||||
}
|
||||
plan.TargetTableExists = targetExists
|
||||
plan.Warnings = append(plan.Warnings, tdengineSemanticWarnings(sourceCols)...)
|
||||
|
||||
strategy := normalizeTargetTableStrategy(config.TargetTableStrategy)
|
||||
if targetExists {
|
||||
missing := diffMissingColumnNames(sourceCols, targetCols)
|
||||
if len(missing) > 0 {
|
||||
plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", ")))
|
||||
}
|
||||
if strategy != "existing_only" {
|
||||
plan.Warnings = append(plan.Warnings, "TDengine 源端当前不自动补齐已有目标表字段,请先确认目标表结构")
|
||||
}
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
}
|
||||
|
||||
switch strategy {
|
||||
case "existing_only":
|
||||
plan.PlannedAction = "目标表不存在,需先手工创建"
|
||||
plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表")
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
case "smart", "auto_create_if_missing":
|
||||
plan.AutoCreate = true
|
||||
plan.PlannedAction = "目标表不存在,将自动建表后导入"
|
||||
createSQL, warnings, unsupported := buildTDengineToMySQLCreateTableSQL(plan.TargetQueryTable, sourceCols)
|
||||
plan.CreateTableSQL = createSQL
|
||||
plan.Warnings = append(plan.Warnings, warnings...)
|
||||
plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...)
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
default:
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
}
|
||||
}
|
||||
|
||||
func buildTDengineToPGLikePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
plan := SchemaMigrationPlan{}
|
||||
sourceType := resolveMigrationDBType(config.SourceConfig)
|
||||
targetType := resolveMigrationDBType(config.TargetConfig)
|
||||
plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName)
|
||||
plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName)
|
||||
plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName)
|
||||
plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName)
|
||||
plan.PlannedAction = "使用已有目标表导入"
|
||||
|
||||
sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable)
|
||||
if err != nil {
|
||||
return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err)
|
||||
}
|
||||
if !sourceExists {
|
||||
return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName)
|
||||
}
|
||||
|
||||
targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable)
|
||||
if err != nil {
|
||||
return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err)
|
||||
}
|
||||
plan.TargetTableExists = targetExists
|
||||
plan.Warnings = append(plan.Warnings, tdengineSemanticWarnings(sourceCols)...)
|
||||
|
||||
strategy := normalizeTargetTableStrategy(config.TargetTableStrategy)
|
||||
if targetExists {
|
||||
missing := diffMissingColumnNames(sourceCols, targetCols)
|
||||
if len(missing) > 0 {
|
||||
plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", ")))
|
||||
}
|
||||
if strategy != "existing_only" {
|
||||
plan.Warnings = append(plan.Warnings, "TDengine 源端当前不自动补齐已有目标表字段,请先确认目标表结构")
|
||||
}
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
}
|
||||
|
||||
switch strategy {
|
||||
case "existing_only":
|
||||
plan.PlannedAction = "目标表不存在,需先手工创建"
|
||||
plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表")
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
case "smart", "auto_create_if_missing":
|
||||
plan.AutoCreate = true
|
||||
plan.PlannedAction = "目标表不存在,将自动建表后导入"
|
||||
createSQL, warnings, unsupported := buildTDengineToPGLikeCreateTableSQL(targetType, plan.TargetQueryTable, sourceCols)
|
||||
plan.CreateTableSQL = createSQL
|
||||
plan.Warnings = append(plan.Warnings, warnings...)
|
||||
plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...)
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
default:
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
}
|
||||
}
|
||||
|
||||
func buildTDengineToMySQLCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition) (string, []string, []string) {
|
||||
columnDefs := make([]string, 0, len(sourceCols))
|
||||
warnings := make([]string, 0)
|
||||
unsupported := []string{"TDengine 的索引/外键/触发器/超级表/TTL 等时序语义当前不会自动迁移"}
|
||||
for _, col := range sourceCols {
|
||||
def, colWarnings := buildTDengineToMySQLColumnDefinition(col)
|
||||
columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("mysql", col.Name), def))
|
||||
warnings = append(warnings, colWarnings...)
|
||||
}
|
||||
createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("mysql", targetQueryTable), strings.Join(columnDefs, ",\n "))
|
||||
return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported)
|
||||
}
|
||||
|
||||
func buildTDengineToPGLikeCreateTableSQL(targetType string, targetQueryTable string, sourceCols []connection.ColumnDefinition) (string, []string, []string) {
|
||||
columnDefs := make([]string, 0, len(sourceCols))
|
||||
warnings := make([]string, 0)
|
||||
unsupported := []string{"TDengine 的索引/外键/触发器/超级表/TTL 等时序语义当前不会自动迁移"}
|
||||
for _, col := range sourceCols {
|
||||
def, colWarnings := buildTDengineToPGLikeColumnDefinition(col)
|
||||
columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType(targetType, col.Name), def))
|
||||
warnings = append(warnings, colWarnings...)
|
||||
}
|
||||
createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType(targetType, targetQueryTable), strings.Join(columnDefs, ",\n "))
|
||||
return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported)
|
||||
}
|
||||
|
||||
func buildTDengineToMySQLColumnDefinition(col connection.ColumnDefinition) (string, []string) {
|
||||
targetType, warnings := mapTDengineColumnToMySQL(col)
|
||||
parts := []string{targetType}
|
||||
if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") {
|
||||
parts = append(parts, "NOT NULL")
|
||||
} else {
|
||||
parts = append(parts, "NULL")
|
||||
}
|
||||
return strings.Join(parts, " "), warnings
|
||||
}
|
||||
|
||||
func buildTDengineToPGLikeColumnDefinition(col connection.ColumnDefinition) (string, []string) {
|
||||
targetType, warnings := mapTDengineColumnToPGLike(col)
|
||||
parts := []string{targetType}
|
||||
if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") {
|
||||
parts = append(parts, "NOT NULL")
|
||||
} else {
|
||||
parts = append(parts, "NULL")
|
||||
}
|
||||
return strings.Join(parts, " "), warnings
|
||||
}
|
||||
|
||||
func tdengineSemanticWarnings(sourceCols []connection.ColumnDefinition) []string {
|
||||
warnings := []string{"TDengine 到关系型目标库当前仅迁移列与数据;超级表、TAG 关联、保留策略等时序语义会降级或丢失"}
|
||||
for _, col := range sourceCols {
|
||||
if isTDengineTagColumn(col) {
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 为 TDengine TAG 列,迁移到关系型目标后将降级为普通字段", col.Name))
|
||||
}
|
||||
}
|
||||
return dedupeStrings(warnings)
|
||||
}
|
||||
|
||||
func isTDengineTagColumn(col connection.ColumnDefinition) bool {
|
||||
return strings.EqualFold(strings.TrimSpace(col.Key), "TAG") || strings.Contains(strings.ToUpper(strings.TrimSpace(col.Extra)), "TAG")
|
||||
}
|
||||
|
||||
func parseTDengineType(raw string) (string, int) {
|
||||
cleaned := strings.TrimSpace(strings.ToUpper(raw))
|
||||
if cleaned == "" {
|
||||
return "", 0
|
||||
}
|
||||
base := cleaned
|
||||
length := 0
|
||||
if idx := strings.Index(base, "("); idx >= 0 {
|
||||
end := strings.Index(base[idx+1:], ")")
|
||||
if end >= 0 {
|
||||
lengthText := strings.TrimSpace(base[idx+1 : idx+1+end])
|
||||
if v, err := strconv.Atoi(lengthText); err == nil {
|
||||
length = v
|
||||
}
|
||||
}
|
||||
base = strings.TrimSpace(base[:idx])
|
||||
}
|
||||
return base, length
|
||||
}
|
||||
|
||||
func mapTDengineColumnToMySQL(col connection.ColumnDefinition) (string, []string) {
|
||||
base, length := parseTDengineType(col.Type)
|
||||
warnings := make([]string, 0)
|
||||
if isTDengineTagColumn(col) {
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 为 TDengine TAG 列,已按普通列映射", col.Name))
|
||||
}
|
||||
switch base {
|
||||
case "BOOL", "BOOLEAN":
|
||||
return "tinyint(1)", warnings
|
||||
case "TINYINT":
|
||||
return "tinyint", warnings
|
||||
case "UTINYINT":
|
||||
return "tinyint unsigned", warnings
|
||||
case "SMALLINT":
|
||||
return "smallint", warnings
|
||||
case "USMALLINT":
|
||||
return "smallint unsigned", warnings
|
||||
case "INT", "INTEGER":
|
||||
return "int", warnings
|
||||
case "UINT":
|
||||
return "int unsigned", warnings
|
||||
case "BIGINT":
|
||||
return "bigint", warnings
|
||||
case "UBIGINT":
|
||||
return "bigint unsigned", warnings
|
||||
case "FLOAT":
|
||||
return "float", warnings
|
||||
case "DOUBLE":
|
||||
return "double", warnings
|
||||
case "DECIMAL", "NUMERIC":
|
||||
if length > 0 {
|
||||
return strings.ToLower(strings.TrimSpace(col.Type)), warnings
|
||||
}
|
||||
return "decimal(38,10)", warnings
|
||||
case "TIMESTAMP":
|
||||
return "datetime", warnings
|
||||
case "DATE":
|
||||
return "date", warnings
|
||||
case "JSON":
|
||||
return "json", warnings
|
||||
case "BINARY", "NCHAR", "VARCHAR", "VARBINARY":
|
||||
if length > 0 && length <= 65535 {
|
||||
return fmt.Sprintf("varchar(%d)", length), warnings
|
||||
}
|
||||
return "text", warnings
|
||||
default:
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门 MySQL 映射,已降级为 text", col.Name, col.Type))
|
||||
return "text", warnings
|
||||
}
|
||||
}
|
||||
|
||||
func mapTDengineColumnToPGLike(col connection.ColumnDefinition) (string, []string) {
|
||||
base, length := parseTDengineType(col.Type)
|
||||
warnings := make([]string, 0)
|
||||
if isTDengineTagColumn(col) {
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 为 TDengine TAG 列,已按普通列映射", col.Name))
|
||||
}
|
||||
switch base {
|
||||
case "BOOL", "BOOLEAN":
|
||||
return "boolean", warnings
|
||||
case "TINYINT", "UTINYINT", "SMALLINT":
|
||||
return "smallint", warnings
|
||||
case "USMALLINT", "INT", "INTEGER":
|
||||
return "integer", warnings
|
||||
case "UINT", "BIGINT":
|
||||
return "bigint", warnings
|
||||
case "UBIGINT":
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 UBIGINT 已映射为 numeric(20,0) 以避免无符号溢出", col.Name))
|
||||
return "numeric(20,0)", warnings
|
||||
case "FLOAT":
|
||||
return "real", warnings
|
||||
case "DOUBLE":
|
||||
return "double precision", warnings
|
||||
case "DECIMAL", "NUMERIC":
|
||||
if length > 0 {
|
||||
return strings.ToLower(strings.TrimSpace(col.Type)), warnings
|
||||
}
|
||||
return "numeric(38,10)", warnings
|
||||
case "TIMESTAMP":
|
||||
return "timestamp", warnings
|
||||
case "DATE":
|
||||
return "date", warnings
|
||||
case "JSON":
|
||||
return "jsonb", warnings
|
||||
case "BINARY", "NCHAR", "VARCHAR", "VARBINARY":
|
||||
if length > 0 {
|
||||
return fmt.Sprintf("varchar(%d)", length), warnings
|
||||
}
|
||||
return "text", warnings
|
||||
default:
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门 PG-like 映射,已降级为 text", col.Name, col.Type))
|
||||
return "text", warnings
|
||||
}
|
||||
}
|
||||
657
internal/sync/migration_tdengine_target.go
Normal file
657
internal/sync/migration_tdengine_target.go
Normal file
@@ -0,0 +1,657 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"GoNavi-Wails/internal/connection"
|
||||
"GoNavi-Wails/internal/db"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type mySQLLikeToTDenginePlanner struct{}
|
||||
|
||||
type pgLikeToTDenginePlanner struct{}
|
||||
|
||||
type clickHouseToTDenginePlanner struct{}
|
||||
|
||||
type tdengineToTDenginePlanner struct{}
|
||||
|
||||
func (mySQLLikeToTDenginePlanner) Name() string { return "mysqllike-tdengine-planner" }
|
||||
|
||||
func (mySQLLikeToTDenginePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
|
||||
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
|
||||
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
|
||||
if isMySQLLikeSourceType(sourceType) && targetType == "tdengine" {
|
||||
return MigrationSupportLevelFull
|
||||
}
|
||||
return MigrationSupportLevelUnsupported
|
||||
}
|
||||
|
||||
func (mySQLLikeToTDenginePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
return buildMySQLLikeToTDenginePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
|
||||
}
|
||||
|
||||
func (pgLikeToTDenginePlanner) Name() string { return "pglike-tdengine-planner" }
|
||||
|
||||
func (pgLikeToTDenginePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
|
||||
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
|
||||
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
|
||||
if isPGLikeSource(sourceType) && targetType == "tdengine" {
|
||||
return MigrationSupportLevelFull
|
||||
}
|
||||
return MigrationSupportLevelUnsupported
|
||||
}
|
||||
|
||||
func (pgLikeToTDenginePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
return buildPGLikeToTDenginePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
|
||||
}
|
||||
|
||||
func buildMySQLLikeToTDenginePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
return buildSourceToTDenginePlan(config, tableName, sourceDB, targetDB, isMySQLLikeTDengineTimestampCandidate, buildMySQLLikeToTDengineCreateTableSQL)
|
||||
}
|
||||
|
||||
func buildPGLikeToTDenginePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
return buildSourceToTDenginePlan(config, tableName, sourceDB, targetDB, isPGLikeTDengineTimestampCandidate, buildPGLikeToTDengineCreateTableSQL)
|
||||
}
|
||||
|
||||
func buildClickHouseToTDenginePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
return buildSourceToTDenginePlan(config, tableName, sourceDB, targetDB, isClickHouseTDengineTimestampCandidate, buildClickHouseToTDengineCreateTableSQL)
|
||||
}
|
||||
|
||||
func buildTDengineToTDenginePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
return buildSourceToTDenginePlan(config, tableName, sourceDB, targetDB, isTDengineTDengineTimestampCandidate, buildTDengineToTDengineCreateTableSQL)
|
||||
}
|
||||
|
||||
func (clickHouseToTDenginePlanner) Name() string { return "clickhouse-tdengine-planner" }
|
||||
|
||||
func (clickHouseToTDenginePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
|
||||
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
|
||||
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
|
||||
if sourceType == "clickhouse" && targetType == "tdengine" {
|
||||
return MigrationSupportLevelFull
|
||||
}
|
||||
return MigrationSupportLevelUnsupported
|
||||
}
|
||||
|
||||
func (clickHouseToTDenginePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
return buildClickHouseToTDenginePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
|
||||
}
|
||||
|
||||
func (tdengineToTDenginePlanner) Name() string { return "tdengine-tdengine-planner" }
|
||||
|
||||
func (tdengineToTDenginePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
|
||||
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
|
||||
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
|
||||
if sourceType == "tdengine" && targetType == "tdengine" {
|
||||
return MigrationSupportLevelFull
|
||||
}
|
||||
return MigrationSupportLevelUnsupported
|
||||
}
|
||||
|
||||
func (tdengineToTDenginePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
return buildTDengineToTDenginePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
|
||||
}
|
||||
|
||||
type tdengineTimestampCandidate func(connection.ColumnDefinition) bool
|
||||
|
||||
type tdengineCreateTableBuilder func(string, []connection.ColumnDefinition, int) (string, []string, []string)
|
||||
|
||||
func buildSourceToTDenginePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database, isTimestamp tdengineTimestampCandidate, buildCreateSQL tdengineCreateTableBuilder) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
|
||||
plan := SchemaMigrationPlan{}
|
||||
sourceType := resolveMigrationDBType(config.SourceConfig)
|
||||
targetType := resolveMigrationDBType(config.TargetConfig)
|
||||
plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName)
|
||||
plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName)
|
||||
plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName)
|
||||
plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName)
|
||||
plan.PlannedAction = "使用已有目标表导入"
|
||||
|
||||
sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable)
|
||||
if err != nil {
|
||||
return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err)
|
||||
}
|
||||
if !sourceExists {
|
||||
return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName)
|
||||
}
|
||||
|
||||
plan.Warnings = append(plan.Warnings, tdengineTargetBaseWarnings()...)
|
||||
timestampIndex := findTDengineTimestampColumn(sourceCols, isTimestamp)
|
||||
if timestampIndex < 0 {
|
||||
plan.Warnings = append(plan.Warnings, tdengineTargetMissingTimeWarning())
|
||||
}
|
||||
|
||||
targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable)
|
||||
if err != nil {
|
||||
return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err)
|
||||
}
|
||||
plan.TargetTableExists = targetExists
|
||||
|
||||
strategy := normalizeTargetTableStrategy(config.TargetTableStrategy)
|
||||
if targetExists {
|
||||
missing := diffMissingColumnNames(sourceCols, targetCols)
|
||||
if len(missing) > 0 {
|
||||
plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", ")))
|
||||
}
|
||||
if strategy != "existing_only" {
|
||||
plan.Warnings = append(plan.Warnings, "TDengine 目标端当前不自动补齐已有目标表字段,请先确认目标表结构")
|
||||
}
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
}
|
||||
|
||||
switch strategy {
|
||||
case "existing_only":
|
||||
plan.PlannedAction = "目标表不存在,需先手工创建"
|
||||
plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表")
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
case "smart", "auto_create_if_missing":
|
||||
if timestampIndex < 0 {
|
||||
plan.PlannedAction = "源表未识别到可映射为 TDengine 首列的时间列,无法自动建表"
|
||||
plan.UnsupportedObjects = append(plan.UnsupportedObjects, "TDengine regular table 首列必须为 TIMESTAMP,当前源表缺少可直接映射的时间列")
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
}
|
||||
plan.AutoCreate = true
|
||||
plan.PlannedAction = "目标表不存在,将自动建表后导入"
|
||||
createSQL, warnings, unsupported := buildCreateSQL(plan.TargetQueryTable, sourceCols, timestampIndex)
|
||||
plan.CreateTableSQL = createSQL
|
||||
plan.Warnings = append(plan.Warnings, warnings...)
|
||||
plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...)
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
default:
|
||||
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
|
||||
}
|
||||
}
|
||||
|
||||
func tdengineTargetBaseWarnings() []string {
|
||||
return []string{
|
||||
"TDengine 目标端当前仅支持 INSERT 写入;若存在差异 update/delete,执行期会被拒绝",
|
||||
"TDengine 目标端 auto-create 当前仅创建基础表;索引、外键、触发器、supertable/TAGS/TTL 不会自动迁移",
|
||||
}
|
||||
}
|
||||
|
||||
func tdengineTargetMissingTimeWarning() string {
|
||||
return "源表缺少可映射的时间列,自动建表将不可用;如需继续,请先人工准备 TDengine 目标表与时间列"
|
||||
}
|
||||
|
||||
func findTDengineTimestampColumn(sourceCols []connection.ColumnDefinition, candidate tdengineTimestampCandidate) int {
|
||||
preferred := []string{"ts", "timestamp", "event_time", "eventtime", "created_at", "create_time", "occurred_at"}
|
||||
for _, name := range preferred {
|
||||
for idx, col := range sourceCols {
|
||||
if !candidate(col) {
|
||||
continue
|
||||
}
|
||||
if strings.EqualFold(strings.TrimSpace(col.Name), name) {
|
||||
return idx
|
||||
}
|
||||
}
|
||||
}
|
||||
for idx, col := range sourceCols {
|
||||
if candidate(col) {
|
||||
return idx
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func reorderTDengineColumns(sourceCols []connection.ColumnDefinition, timestampIndex int) []connection.ColumnDefinition {
|
||||
if timestampIndex <= 0 || timestampIndex >= len(sourceCols) {
|
||||
cloned := make([]connection.ColumnDefinition, len(sourceCols))
|
||||
copy(cloned, sourceCols)
|
||||
return cloned
|
||||
}
|
||||
ordered := make([]connection.ColumnDefinition, 0, len(sourceCols))
|
||||
ordered = append(ordered, sourceCols[timestampIndex])
|
||||
for idx, col := range sourceCols {
|
||||
if idx == timestampIndex {
|
||||
continue
|
||||
}
|
||||
ordered = append(ordered, col)
|
||||
}
|
||||
return ordered
|
||||
}
|
||||
|
||||
func buildMySQLLikeToTDengineCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition, timestampIndex int) (string, []string, []string) {
|
||||
ordered := reorderTDengineColumns(sourceCols, timestampIndex)
|
||||
columnDefs := make([]string, 0, len(ordered))
|
||||
warnings := make([]string, 0)
|
||||
unsupported := []string{"源表索引/外键/触发器/唯一约束/自增语义当前不会自动迁移到 TDengine"}
|
||||
if timestampIndex != 0 && timestampIndex >= 0 && timestampIndex < len(sourceCols) {
|
||||
warnings = append(warnings, fmt.Sprintf("TDengine 基础表要求时间列优先,已将字段 %s 调整为首列", sourceCols[timestampIndex].Name))
|
||||
}
|
||||
for idx, col := range ordered {
|
||||
def, colWarnings := mapMySQLLikeColumnToTDengine(col, idx == 0)
|
||||
warnings = append(warnings, colWarnings...)
|
||||
columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("tdengine", col.Name), def))
|
||||
}
|
||||
createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("tdengine", targetQueryTable), strings.Join(columnDefs, ",\n "))
|
||||
return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported)
|
||||
}
|
||||
|
||||
func buildPGLikeToTDengineCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition, timestampIndex int) (string, []string, []string) {
|
||||
ordered := reorderTDengineColumns(sourceCols, timestampIndex)
|
||||
columnDefs := make([]string, 0, len(ordered))
|
||||
warnings := make([]string, 0)
|
||||
unsupported := []string{"源表索引/外键/触发器/唯一约束/identity/sequence 语义当前不会自动迁移到 TDengine"}
|
||||
if timestampIndex != 0 && timestampIndex >= 0 && timestampIndex < len(sourceCols) {
|
||||
warnings = append(warnings, fmt.Sprintf("TDengine 基础表要求时间列优先,已将字段 %s 调整为首列", sourceCols[timestampIndex].Name))
|
||||
}
|
||||
for idx, col := range ordered {
|
||||
def, colWarnings := mapPGLikeColumnToTDengine(col, idx == 0)
|
||||
warnings = append(warnings, colWarnings...)
|
||||
columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("tdengine", col.Name), def))
|
||||
}
|
||||
createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("tdengine", targetQueryTable), strings.Join(columnDefs, ",\n "))
|
||||
return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported)
|
||||
}
|
||||
|
||||
func buildClickHouseToTDengineCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition, timestampIndex int) (string, []string, []string) {
|
||||
ordered := reorderTDengineColumns(sourceCols, timestampIndex)
|
||||
columnDefs := make([]string, 0, len(ordered))
|
||||
warnings := make([]string, 0)
|
||||
unsupported := []string{"源表 ORDER BY/PARTITION/TTL/Projection/物化视图 语义当前不会自动迁移到 TDengine"}
|
||||
if timestampIndex != 0 && timestampIndex >= 0 && timestampIndex < len(sourceCols) {
|
||||
warnings = append(warnings, fmt.Sprintf("TDengine 基础表要求时间列优先,已将字段 %s 调整为首列", sourceCols[timestampIndex].Name))
|
||||
}
|
||||
for idx, col := range ordered {
|
||||
def, colWarnings := mapClickHouseColumnToTDengine(col, idx == 0)
|
||||
warnings = append(warnings, colWarnings...)
|
||||
columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("tdengine", col.Name), def))
|
||||
}
|
||||
createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("tdengine", targetQueryTable), strings.Join(columnDefs, ",\n "))
|
||||
return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported)
|
||||
}
|
||||
|
||||
func buildTDengineToTDengineCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition, timestampIndex int) (string, []string, []string) {
|
||||
ordered := reorderTDengineColumns(sourceCols, timestampIndex)
|
||||
columnDefs := make([]string, 0, len(ordered))
|
||||
warnings := make([]string, 0)
|
||||
unsupported := []string{"源表 supertable/TAGS/TTL/保留策略/索引 语义当前不会自动迁移到 TDengine regular table"}
|
||||
if timestampIndex != 0 && timestampIndex >= 0 && timestampIndex < len(sourceCols) {
|
||||
warnings = append(warnings, fmt.Sprintf("TDengine 基础表要求时间列优先,已将字段 %s 调整为首列", sourceCols[timestampIndex].Name))
|
||||
}
|
||||
for idx, col := range ordered {
|
||||
def, colWarnings := mapTDengineColumnToTDengine(col, idx == 0)
|
||||
warnings = append(warnings, colWarnings...)
|
||||
columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("tdengine", col.Name), def))
|
||||
}
|
||||
createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("tdengine", targetQueryTable), strings.Join(columnDefs, ",\n "))
|
||||
return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported)
|
||||
}
|
||||
|
||||
func isMySQLLikeTDengineTimestampCandidate(col connection.ColumnDefinition) bool {
|
||||
raw := strings.ToLower(strings.TrimSpace(col.Type))
|
||||
clean := strings.ReplaceAll(raw, " unsigned", "")
|
||||
clean = strings.ReplaceAll(clean, " zerofill", "")
|
||||
return strings.HasPrefix(clean, "timestamp") || strings.HasPrefix(clean, "datetime")
|
||||
}
|
||||
|
||||
func isPGLikeTDengineTimestampCandidate(col connection.ColumnDefinition) bool {
|
||||
raw := strings.ToLower(strings.TrimSpace(col.Type))
|
||||
return strings.HasPrefix(raw, "timestamp")
|
||||
}
|
||||
|
||||
func isClickHouseTDengineTimestampCandidate(col connection.ColumnDefinition) bool {
|
||||
lower, _ := unwrapClickHouseTDengineType(col.Type)
|
||||
return strings.HasPrefix(lower, "datetime")
|
||||
}
|
||||
|
||||
func isTDengineTDengineTimestampCandidate(col connection.ColumnDefinition) bool {
|
||||
base, _ := parseTDengineType(col.Type)
|
||||
return base == "TIMESTAMP"
|
||||
}
|
||||
|
||||
func mapMySQLLikeColumnToTDengine(col connection.ColumnDefinition, forceTimestamp bool) (string, []string) {
|
||||
warnings := make([]string, 0)
|
||||
if forceTimestamp {
|
||||
if !isMySQLLikeTDengineTimestampCandidate(col) {
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已提升为 TDengine 首列 TIMESTAMP", col.Name, col.Type))
|
||||
}
|
||||
return "TIMESTAMP", warnings
|
||||
}
|
||||
|
||||
raw := strings.ToLower(strings.TrimSpace(col.Type))
|
||||
if raw == "" {
|
||||
return "VARCHAR(1024)", []string{fmt.Sprintf("字段 %s 类型为空,已降级为 VARCHAR(1024)", col.Name)}
|
||||
}
|
||||
unsigned := strings.Contains(raw, "unsigned")
|
||||
clean := strings.ReplaceAll(raw, " unsigned", "")
|
||||
clean = strings.ReplaceAll(clean, " zerofill", "")
|
||||
isAutoIncrement := strings.Contains(strings.ToLower(strings.TrimSpace(col.Extra)), "auto_increment")
|
||||
if isAutoIncrement {
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 自增语义不会迁移到 TDengine", col.Name))
|
||||
}
|
||||
if col.Key == "PRI" || col.Key == "PK" {
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 主键语义不会按关系型约束迁移到 TDengine", col.Name))
|
||||
}
|
||||
|
||||
switch {
|
||||
case strings.HasPrefix(clean, "tinyint(1)") && !unsigned && !isAutoIncrement:
|
||||
return "BOOL", warnings
|
||||
case strings.HasPrefix(clean, "tinyint"):
|
||||
if unsigned {
|
||||
return "UTINYINT", warnings
|
||||
}
|
||||
return "TINYINT", warnings
|
||||
case strings.HasPrefix(clean, "smallint"):
|
||||
if unsigned {
|
||||
return "USMALLINT", warnings
|
||||
}
|
||||
return "SMALLINT", warnings
|
||||
case strings.HasPrefix(clean, "mediumint"), strings.HasPrefix(clean, "int"), strings.HasPrefix(clean, "integer"):
|
||||
if unsigned {
|
||||
return "UINT", warnings
|
||||
}
|
||||
return "INT", warnings
|
||||
case strings.HasPrefix(clean, "bigint"):
|
||||
if unsigned {
|
||||
return "UBIGINT", warnings
|
||||
}
|
||||
return "BIGINT", warnings
|
||||
case strings.HasPrefix(clean, "decimal"), strings.HasPrefix(clean, "numeric"):
|
||||
return normalizeTDengineDecimalType(clean), warnings
|
||||
case strings.HasPrefix(clean, "float"):
|
||||
return "FLOAT", warnings
|
||||
case strings.HasPrefix(clean, "double"):
|
||||
return "DOUBLE", warnings
|
||||
case strings.HasPrefix(clean, "date"):
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 date 已降级映射为 TIMESTAMP", col.Name))
|
||||
return "TIMESTAMP", warnings
|
||||
case strings.HasPrefix(clean, "timestamp"), strings.HasPrefix(clean, "datetime"):
|
||||
return "TIMESTAMP", warnings
|
||||
case strings.HasPrefix(clean, "time"):
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无稳定 TDengine 时间-only 映射,已降级为 VARCHAR(64)", col.Name, col.Type))
|
||||
return "VARCHAR(64)", warnings
|
||||
case strings.HasPrefix(clean, "char("), strings.HasPrefix(clean, "varchar("):
|
||||
return fmt.Sprintf("VARCHAR(%d)", normalizeTDengineVarcharLength(extractFirstTypeLength(clean), 255)), warnings
|
||||
case strings.HasPrefix(clean, "tinytext"), strings.HasPrefix(clean, "text"), strings.HasPrefix(clean, "mediumtext"), strings.HasPrefix(clean, "longtext"):
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 VARCHAR(4096)", col.Name, col.Type))
|
||||
return "VARCHAR(4096)", warnings
|
||||
case strings.HasPrefix(clean, "json"):
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 因 TDengine JSON 仅适用于 TAG,已降级为 VARCHAR(4096)", col.Name, col.Type))
|
||||
return "VARCHAR(4096)", warnings
|
||||
case strings.HasPrefix(clean, "enum"), strings.HasPrefix(clean, "set"):
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 VARCHAR(255)", col.Name, col.Type))
|
||||
return "VARCHAR(255)", warnings
|
||||
case strings.HasPrefix(clean, "binary"), strings.HasPrefix(clean, "varbinary"), strings.HasPrefix(clean, "tinyblob"), strings.HasPrefix(clean, "blob"), strings.HasPrefix(clean, "mediumblob"), strings.HasPrefix(clean, "longblob"):
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已按字符串语义降级为 VARCHAR(4096)", col.Name, col.Type))
|
||||
return "VARCHAR(4096)", warnings
|
||||
default:
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门 TDengine 映射,已降级为 VARCHAR(1024)", col.Name, col.Type))
|
||||
return "VARCHAR(1024)", warnings
|
||||
}
|
||||
}
|
||||
|
||||
func mapPGLikeColumnToTDengine(col connection.ColumnDefinition, forceTimestamp bool) (string, []string) {
|
||||
warnings := make([]string, 0)
|
||||
if forceTimestamp {
|
||||
if raw := strings.ToLower(strings.TrimSpace(col.Type)); !strings.HasPrefix(raw, "timestamp") {
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已提升为 TDengine 首列 TIMESTAMP", col.Name, col.Type))
|
||||
}
|
||||
return "TIMESTAMP", warnings
|
||||
}
|
||||
|
||||
raw := strings.ToLower(strings.TrimSpace(col.Type))
|
||||
if raw == "" {
|
||||
return "VARCHAR(1024)", []string{fmt.Sprintf("字段 %s 类型为空,已降级为 VARCHAR(1024)", col.Name)}
|
||||
}
|
||||
if col.Key == "PRI" || col.Key == "PK" {
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 主键语义不会按关系型约束迁移到 TDengine", col.Name))
|
||||
}
|
||||
if strings.Contains(strings.ToLower(strings.TrimSpace(col.Extra)), "identity") || strings.Contains(strings.ToLower(strings.TrimSpace(col.Extra)), "auto_increment") {
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 自增/identity 语义不会迁移到 TDengine", col.Name))
|
||||
}
|
||||
|
||||
switch {
|
||||
case raw == "boolean" || strings.HasPrefix(raw, "bool"):
|
||||
return "BOOL", warnings
|
||||
case raw == "smallint":
|
||||
return "SMALLINT", warnings
|
||||
case raw == "integer" || raw == "int4":
|
||||
return "INT", warnings
|
||||
case raw == "bigint" || raw == "int8":
|
||||
return "BIGINT", warnings
|
||||
case strings.HasPrefix(raw, "numeric"), strings.HasPrefix(raw, "decimal"):
|
||||
return normalizeTDengineDecimalType(raw), warnings
|
||||
case raw == "real" || raw == "float4":
|
||||
return "FLOAT", warnings
|
||||
case raw == "double precision" || raw == "float8":
|
||||
return "DOUBLE", warnings
|
||||
case raw == "date":
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 date 已降级映射为 TIMESTAMP", col.Name))
|
||||
return "TIMESTAMP", warnings
|
||||
case strings.HasPrefix(raw, "timestamp"):
|
||||
return "TIMESTAMP", warnings
|
||||
case strings.HasPrefix(raw, "time"):
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无稳定 TDengine 时间-only 映射,已降级为 VARCHAR(64)", col.Name, col.Type))
|
||||
return "VARCHAR(64)", warnings
|
||||
case strings.HasPrefix(raw, "character varying("), strings.HasPrefix(raw, "varchar("), strings.HasPrefix(raw, "character("), strings.HasPrefix(raw, "char("):
|
||||
return fmt.Sprintf("VARCHAR(%d)", normalizeTDengineVarcharLength(extractFirstTypeLength(raw), 255)), warnings
|
||||
case raw == "text":
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 text 已降级为 VARCHAR(4096)", col.Name))
|
||||
return "VARCHAR(4096)", warnings
|
||||
case raw == "uuid":
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 uuid 已降级为 VARCHAR(36)", col.Name))
|
||||
return "VARCHAR(36)", warnings
|
||||
case raw == "json" || raw == "jsonb":
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 因 TDengine JSON 仅适用于 TAG,已降级为 VARCHAR(4096)", col.Name, col.Type))
|
||||
return "VARCHAR(4096)", warnings
|
||||
case raw == "bytea":
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 bytea 已按字符串语义降级为 VARCHAR(4096)", col.Name))
|
||||
return "VARCHAR(4096)", warnings
|
||||
case strings.HasSuffix(raw, "[]") || strings.HasPrefix(raw, "array"):
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 VARCHAR(4096)", col.Name, col.Type))
|
||||
return "VARCHAR(4096)", warnings
|
||||
case raw == "user-defined":
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 为用户自定义类型,已降级为 VARCHAR(1024)", col.Name))
|
||||
return "VARCHAR(1024)", warnings
|
||||
default:
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门 TDengine 映射,已降级为 VARCHAR(1024)", col.Name, col.Type))
|
||||
return "VARCHAR(1024)", warnings
|
||||
}
|
||||
}
|
||||
|
||||
func mapClickHouseColumnToTDengine(col connection.ColumnDefinition, forceTimestamp bool) (string, []string) {
|
||||
warnings := make([]string, 0)
|
||||
if forceTimestamp {
|
||||
if !isClickHouseTDengineTimestampCandidate(col) {
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已提升为 TDengine 首列 TIMESTAMP", col.Name, col.Type))
|
||||
}
|
||||
return "TIMESTAMP", warnings
|
||||
}
|
||||
|
||||
lower, _ := unwrapClickHouseTDengineType(col.Type)
|
||||
if lower == "" {
|
||||
return "VARCHAR(1024)", []string{fmt.Sprintf("字段 %s 类型为空,已降级为 VARCHAR(1024)", col.Name)}
|
||||
}
|
||||
|
||||
switch {
|
||||
case lower == "bool" || lower == "boolean":
|
||||
return "BOOL", warnings
|
||||
case lower == "int8":
|
||||
return "TINYINT", warnings
|
||||
case lower == "uint8":
|
||||
return "UTINYINT", warnings
|
||||
case lower == "int16":
|
||||
return "SMALLINT", warnings
|
||||
case lower == "uint16":
|
||||
return "USMALLINT", warnings
|
||||
case lower == "int32":
|
||||
return "INT", warnings
|
||||
case lower == "uint32":
|
||||
return "UINT", warnings
|
||||
case lower == "int64":
|
||||
return "BIGINT", warnings
|
||||
case lower == "uint64":
|
||||
return "UBIGINT", warnings
|
||||
case lower == "float32":
|
||||
return "FLOAT", warnings
|
||||
case lower == "float64":
|
||||
return "DOUBLE", warnings
|
||||
case lower == "date":
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 date 已降级映射为 TIMESTAMP", col.Name))
|
||||
return "TIMESTAMP", warnings
|
||||
case strings.HasPrefix(lower, "datetime"):
|
||||
return "TIMESTAMP", warnings
|
||||
case lower == "string":
|
||||
return "VARCHAR(1024)", warnings
|
||||
case lower == "uuid":
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 uuid 已降级为 VARCHAR(36)", col.Name))
|
||||
return "VARCHAR(36)", warnings
|
||||
case lower == "json", strings.HasPrefix(lower, "map("), strings.HasPrefix(lower, "array("), strings.HasPrefix(lower, "tuple("), strings.HasPrefix(lower, "nested("):
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 VARCHAR(4096)", col.Name, col.Type))
|
||||
return "VARCHAR(4096)", warnings
|
||||
case strings.HasPrefix(lower, "enum8("), strings.HasPrefix(lower, "enum16("):
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 枚举类型 %s 已降级为 VARCHAR(255)", col.Name, col.Type))
|
||||
return "VARCHAR(255)", warnings
|
||||
case clickHouseDecimalPattern.MatchString(lower):
|
||||
parts := clickHouseDecimalPattern.FindStringSubmatch(lower)
|
||||
return fmt.Sprintf("DECIMAL(%s,%s)", parts[2], parts[3]), warnings
|
||||
case clickHouseStringArgsPattern.MatchString(lower):
|
||||
parts := clickHouseStringArgsPattern.FindStringSubmatch(lower)
|
||||
length, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s FixedString 长度解析失败,已降级为 VARCHAR(255)", col.Name))
|
||||
return "VARCHAR(255)", warnings
|
||||
}
|
||||
return fmt.Sprintf("VARCHAR(%d)", normalizeTDengineVarcharLength(length, 255)), warnings
|
||||
default:
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门 TDengine 映射,已降级为 VARCHAR(1024)", col.Name, col.Type))
|
||||
return "VARCHAR(1024)", warnings
|
||||
}
|
||||
}
|
||||
|
||||
func mapTDengineColumnToTDengine(col connection.ColumnDefinition, forceTimestamp bool) (string, []string) {
|
||||
warnings := make([]string, 0)
|
||||
if forceTimestamp {
|
||||
if !isTDengineTDengineTimestampCandidate(col) {
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已提升为 TDengine 首列 TIMESTAMP", col.Name, col.Type))
|
||||
}
|
||||
return "TIMESTAMP", warnings
|
||||
}
|
||||
|
||||
base, length := parseTDengineType(col.Type)
|
||||
if base == "" {
|
||||
return "VARCHAR(1024)", []string{fmt.Sprintf("字段 %s 类型为空,已降级为 VARCHAR(1024)", col.Name)}
|
||||
}
|
||||
if isTDengineTagColumn(col) {
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 为 TDengine TAG 列,迁移到 regular table 后将降级为普通字段", col.Name))
|
||||
}
|
||||
|
||||
switch base {
|
||||
case "BOOL", "BOOLEAN":
|
||||
return "BOOL", warnings
|
||||
case "TINYINT":
|
||||
return "TINYINT", warnings
|
||||
case "UTINYINT":
|
||||
return "UTINYINT", warnings
|
||||
case "SMALLINT":
|
||||
return "SMALLINT", warnings
|
||||
case "USMALLINT":
|
||||
return "USMALLINT", warnings
|
||||
case "INT", "INTEGER":
|
||||
return "INT", warnings
|
||||
case "UINT":
|
||||
return "UINT", warnings
|
||||
case "BIGINT":
|
||||
return "BIGINT", warnings
|
||||
case "UBIGINT":
|
||||
return "UBIGINT", warnings
|
||||
case "FLOAT":
|
||||
return "FLOAT", warnings
|
||||
case "DOUBLE":
|
||||
return "DOUBLE", warnings
|
||||
case "DECIMAL", "NUMERIC":
|
||||
return normalizeTDengineDecimalType(col.Type), warnings
|
||||
case "TIMESTAMP":
|
||||
return "TIMESTAMP", warnings
|
||||
case "DATE":
|
||||
return "DATE", warnings
|
||||
case "JSON":
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 JSON 在 TDengine regular table 中不保留 TAG 语义,已降级为 VARCHAR(4096)", col.Name))
|
||||
return "VARCHAR(4096)", warnings
|
||||
case "BINARY", "NCHAR", "VARCHAR", "VARBINARY":
|
||||
if length > 0 {
|
||||
return fmt.Sprintf("%s(%d)", base, normalizeTDengineVarcharLength(length, length)), warnings
|
||||
}
|
||||
fallback := 255
|
||||
if base == "VARCHAR" {
|
||||
fallback = 1024
|
||||
}
|
||||
return fmt.Sprintf("%s(%d)", base, fallback), warnings
|
||||
default:
|
||||
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门 TDengine 同库映射,已降级为 VARCHAR(1024)", col.Name, col.Type))
|
||||
return "VARCHAR(1024)", warnings
|
||||
}
|
||||
}
|
||||
|
||||
func unwrapClickHouseTDengineType(raw string) (string, bool) {
|
||||
text := strings.TrimSpace(raw)
|
||||
lower := strings.ToLower(text)
|
||||
nullable := false
|
||||
for {
|
||||
switched := false
|
||||
if strings.HasPrefix(lower, "nullable(") && strings.HasSuffix(lower, ")") {
|
||||
text = strings.TrimSpace(text[len("Nullable(") : len(text)-1])
|
||||
lower = strings.ToLower(text)
|
||||
nullable = true
|
||||
switched = true
|
||||
}
|
||||
if strings.HasPrefix(lower, "lowcardinality(") && strings.HasSuffix(lower, ")") {
|
||||
text = strings.TrimSpace(text[len("LowCardinality(") : len(text)-1])
|
||||
lower = strings.ToLower(text)
|
||||
switched = true
|
||||
}
|
||||
if !switched {
|
||||
break
|
||||
}
|
||||
}
|
||||
return lower, nullable
|
||||
}
|
||||
|
||||
func normalizeTDengineDecimalType(raw string) string {
|
||||
text := strings.TrimSpace(raw)
|
||||
if text == "" {
|
||||
return "DECIMAL(38,10)"
|
||||
}
|
||||
lower := strings.ToLower(text)
|
||||
if strings.HasPrefix(lower, "numeric") {
|
||||
return "DECIMAL" + text[len("numeric"):]
|
||||
}
|
||||
if strings.HasPrefix(lower, "decimal") {
|
||||
return "DECIMAL" + text[len("decimal"):]
|
||||
}
|
||||
return "DECIMAL(38,10)"
|
||||
}
|
||||
|
||||
func normalizeTDengineVarcharLength(length int, fallback int) int {
|
||||
if fallback <= 0 {
|
||||
fallback = 255
|
||||
}
|
||||
if length <= 0 {
|
||||
return fallback
|
||||
}
|
||||
if length > 16384 {
|
||||
return 16384
|
||||
}
|
||||
return length
|
||||
}
|
||||
|
||||
func extractFirstTypeLength(raw string) int {
|
||||
start := strings.Index(raw, "(")
|
||||
if start < 0 {
|
||||
return 0
|
||||
}
|
||||
end := strings.Index(raw[start+1:], ")")
|
||||
if end < 0 {
|
||||
return 0
|
||||
}
|
||||
inside := strings.TrimSpace(raw[start+1 : start+1+end])
|
||||
if inside == "" {
|
||||
return 0
|
||||
}
|
||||
parts := strings.SplitN(inside, ",", 2)
|
||||
length, err := strconv.Atoi(strings.TrimSpace(parts[0]))
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return length
|
||||
}
|
||||
98
internal/sync/migration_type_resolver.go
Normal file
98
internal/sync/migration_type_resolver.go
Normal file
@@ -0,0 +1,98 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"GoNavi-Wails/internal/connection"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func normalizeMigrationDBType(dbType string) string {
|
||||
normalized := strings.ToLower(strings.TrimSpace(dbType))
|
||||
switch normalized {
|
||||
case "doris":
|
||||
return "diros"
|
||||
case "postgresql":
|
||||
return "postgres"
|
||||
case "dm", "dm8":
|
||||
return "dameng"
|
||||
case "sqlite3":
|
||||
return "sqlite"
|
||||
default:
|
||||
return normalized
|
||||
}
|
||||
}
|
||||
|
||||
func resolveMigrationDBType(config connection.ConnectionConfig) string {
|
||||
dbType := normalizeMigrationDBType(config.Type)
|
||||
if dbType != "custom" {
|
||||
return dbType
|
||||
}
|
||||
|
||||
driver := strings.ToLower(strings.TrimSpace(config.Driver))
|
||||
switch driver {
|
||||
case "postgresql", "postgres", "pg", "pq", "pgx":
|
||||
return "postgres"
|
||||
case "dm", "dameng", "dm8":
|
||||
return "dameng"
|
||||
case "sqlite3", "sqlite":
|
||||
return "sqlite"
|
||||
case "sphinxql":
|
||||
return "sphinx"
|
||||
case "diros", "doris":
|
||||
return "diros"
|
||||
case "kingbase", "kingbase8", "kingbasees", "kingbasev8":
|
||||
return "kingbase"
|
||||
case "highgo":
|
||||
return "highgo"
|
||||
case "vastbase":
|
||||
return "vastbase"
|
||||
case "mysql", "mysql2":
|
||||
return "mysql"
|
||||
case "mariadb":
|
||||
return "mariadb"
|
||||
}
|
||||
|
||||
switch {
|
||||
case strings.Contains(driver, "postgres"):
|
||||
return "postgres"
|
||||
case strings.Contains(driver, "kingbase"):
|
||||
return "kingbase"
|
||||
case strings.Contains(driver, "highgo"):
|
||||
return "highgo"
|
||||
case strings.Contains(driver, "vastbase"):
|
||||
return "vastbase"
|
||||
case strings.Contains(driver, "sqlite"):
|
||||
return "sqlite"
|
||||
case strings.Contains(driver, "sphinx"):
|
||||
return "sphinx"
|
||||
case strings.Contains(driver, "diros"), strings.Contains(driver, "doris"):
|
||||
return "diros"
|
||||
case strings.Contains(driver, "maria"):
|
||||
return "mariadb"
|
||||
case strings.Contains(driver, "mysql"):
|
||||
return "mysql"
|
||||
case strings.Contains(driver, "dameng"), strings.Contains(driver, "dm"):
|
||||
return "dameng"
|
||||
default:
|
||||
return normalizeMigrationDBType(driver)
|
||||
}
|
||||
}
|
||||
|
||||
func isMySQLCoreType(dbType string) bool {
|
||||
switch normalizeMigrationDBType(dbType) {
|
||||
case "mysql", "mariadb", "diros":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func isMySQLLikeSourceType(dbType string) bool {
|
||||
if isMySQLCoreType(dbType) {
|
||||
return true
|
||||
}
|
||||
return normalizeMigrationDBType(dbType) == "sphinx"
|
||||
}
|
||||
|
||||
func isMySQLLikeWritableTargetType(dbType string) bool {
|
||||
return isMySQLCoreType(dbType)
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"GoNavi-Wails/internal/db"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
@@ -36,12 +36,18 @@ func (s *SyncEngine) Preview(config SyncConfig, tableName string, limit int) (Ta
|
||||
if limit > 500 {
|
||||
limit = 500
|
||||
}
|
||||
if isRedisToMongoKeyspacePair(config) {
|
||||
return s.previewRedisToMongo(config, tableName, limit)
|
||||
}
|
||||
if isMongoToRedisKeyspacePair(config) {
|
||||
return s.previewMongoToRedis(config, tableName, limit)
|
||||
}
|
||||
|
||||
sourceDB, err := db.NewDatabase(config.SourceConfig.Type)
|
||||
sourceDB, err := newSyncDatabase(config.SourceConfig.Type)
|
||||
if err != nil {
|
||||
return TableDiffPreview{}, fmt.Errorf("初始化源数据库驱动失败: %w", err)
|
||||
}
|
||||
targetDB, err := db.NewDatabase(config.TargetConfig.Type)
|
||||
targetDB, err := newSyncDatabase(config.TargetConfig.Type)
|
||||
if err != nil {
|
||||
return TableDiffPreview{}, fmt.Errorf("初始化目标数据库驱动失败: %w", err)
|
||||
}
|
||||
@@ -56,14 +62,12 @@ func (s *SyncEngine) Preview(config SyncConfig, tableName string, limit int) (Ta
|
||||
}
|
||||
defer targetDB.Close()
|
||||
|
||||
sourceSchema, sourceTable := normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName)
|
||||
targetSchema, targetTable := normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName)
|
||||
sourceQueryTable := qualifiedNameForQuery(config.SourceConfig.Type, sourceSchema, sourceTable, tableName)
|
||||
targetQueryTable := qualifiedNameForQuery(config.TargetConfig.Type, targetSchema, targetTable, tableName)
|
||||
|
||||
cols, err := sourceDB.GetColumns(sourceSchema, sourceTable)
|
||||
plan, cols, _, err := buildSchemaMigrationPlan(config, tableName, sourceDB, targetDB)
|
||||
if err != nil {
|
||||
return TableDiffPreview{}, fmt.Errorf("获取源表字段失败: %w", err)
|
||||
return TableDiffPreview{}, err
|
||||
}
|
||||
if !plan.TargetTableExists && !plan.AutoCreate {
|
||||
return TableDiffPreview{}, errors.New(firstNonEmpty(plan.PlannedAction, "目标表不存在,无法预览差异"))
|
||||
}
|
||||
|
||||
pkCols := make([]string, 0, 2)
|
||||
@@ -80,13 +84,17 @@ func (s *SyncEngine) Preview(config SyncConfig, tableName string, limit int) (Ta
|
||||
}
|
||||
pkCol := pkCols[0]
|
||||
|
||||
sourceRows, _, err := sourceDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.SourceConfig.Type, sourceQueryTable)))
|
||||
sourceRows, _, err := sourceDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(resolveMigrationDBType(config.SourceConfig), plan.SourceQueryTable)))
|
||||
if err != nil {
|
||||
return TableDiffPreview{}, fmt.Errorf("读取源表失败: %w", err)
|
||||
}
|
||||
targetRows, _, err := targetDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.TargetConfig.Type, targetQueryTable)))
|
||||
if err != nil {
|
||||
return TableDiffPreview{}, fmt.Errorf("读取目标表失败: %w", err)
|
||||
|
||||
targetRows := make([]map[string]interface{}, 0)
|
||||
if plan.TargetTableExists {
|
||||
targetRows, _, err = targetDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(resolveMigrationDBType(config.TargetConfig), plan.TargetQueryTable)))
|
||||
if err != nil {
|
||||
return TableDiffPreview{}, fmt.Errorf("读取目标表失败: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
targetMap := make(map[string]map[string]interface{}, len(targetRows))
|
||||
@@ -133,12 +141,7 @@ func (s *SyncEngine) Preview(config SyncConfig, tableName string, limit int) (Ta
|
||||
if len(changedColumns) > 0 {
|
||||
out.TotalUpdates++
|
||||
if len(out.Updates) < limit {
|
||||
out.Updates = append(out.Updates, PreviewUpdateRow{
|
||||
PK: pkVal,
|
||||
ChangedColumns: changedColumns,
|
||||
Source: sRow,
|
||||
Target: tRow,
|
||||
})
|
||||
out.Updates = append(out.Updates, PreviewUpdateRow{PK: pkVal, ChangedColumns: changedColumns, Source: sRow, Target: tRow})
|
||||
}
|
||||
}
|
||||
continue
|
||||
|
||||
490
internal/sync/redis_migration_test.go
Normal file
490
internal/sync/redis_migration_test.go
Normal file
@@ -0,0 +1,490 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"GoNavi-Wails/internal/connection"
|
||||
"GoNavi-Wails/internal/db"
|
||||
redispkg "GoNavi-Wails/internal/redis"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type fakeRedisMigrationClient struct {
|
||||
values map[string]*redispkg.RedisValue
|
||||
scannedKeys []string
|
||||
connectConfig connection.ConnectionConfig
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (f *fakeRedisMigrationClient) Connect(config connection.ConnectionConfig) error {
|
||||
f.connectConfig = config
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeRedisMigrationClient) Close() error {
|
||||
f.closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeRedisMigrationClient) ScanKeys(pattern string, cursor uint64, count int64) (*redispkg.RedisScanResult, error) {
|
||||
items := make([]redispkg.RedisKeyInfo, 0, len(f.scannedKeys))
|
||||
for _, key := range f.scannedKeys {
|
||||
items = append(items, redispkg.RedisKeyInfo{Key: key, Type: "string", TTL: -1})
|
||||
}
|
||||
return &redispkg.RedisScanResult{Keys: items, Cursor: "0"}, nil
|
||||
}
|
||||
|
||||
func (f *fakeRedisMigrationClient) GetKeyType(key string) (string, error) {
|
||||
if value, ok := f.values[key]; ok && value != nil {
|
||||
return value.Type, nil
|
||||
}
|
||||
return "none", nil
|
||||
}
|
||||
|
||||
func (f *fakeRedisMigrationClient) GetValue(key string) (*redispkg.RedisValue, error) {
|
||||
if value, ok := f.values[key]; ok {
|
||||
return value, nil
|
||||
}
|
||||
return nil, fmt.Errorf("key not found: %s", key)
|
||||
}
|
||||
|
||||
func (f *fakeRedisMigrationClient) DeleteKeys(keys []string) (int64, error) {
|
||||
var deleted int64
|
||||
for _, key := range keys {
|
||||
if _, ok := f.values[key]; ok {
|
||||
delete(f.values, key)
|
||||
deleted++
|
||||
}
|
||||
}
|
||||
return deleted, nil
|
||||
}
|
||||
|
||||
func (f *fakeRedisMigrationClient) SetTTL(key string, ttl int64) error {
|
||||
value, ok := f.values[key]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
value.TTL = ttl
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeRedisMigrationClient) SetString(key, value string, ttl int64) error {
|
||||
if f.values == nil {
|
||||
f.values = map[string]*redispkg.RedisValue{}
|
||||
}
|
||||
f.values[key] = &redispkg.RedisValue{Type: "string", TTL: ttl, Value: value, Length: int64(len(value))}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeRedisMigrationClient) SetHashField(key, field, value string) error {
|
||||
if f.values == nil {
|
||||
f.values = map[string]*redispkg.RedisValue{}
|
||||
}
|
||||
current, ok := f.values[key]
|
||||
if !ok || current == nil || current.Type != "hash" {
|
||||
current = &redispkg.RedisValue{Type: "hash", TTL: -1, Value: map[string]string{}}
|
||||
f.values[key] = current
|
||||
}
|
||||
hash, _ := current.Value.(map[string]string)
|
||||
if hash == nil {
|
||||
hash = map[string]string{}
|
||||
}
|
||||
hash[field] = value
|
||||
current.Value = hash
|
||||
current.Length = int64(len(hash))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeRedisMigrationClient) ListPush(key string, values ...string) error {
|
||||
if f.values == nil {
|
||||
f.values = map[string]*redispkg.RedisValue{}
|
||||
}
|
||||
current, ok := f.values[key]
|
||||
if !ok || current == nil || current.Type != "list" {
|
||||
current = &redispkg.RedisValue{Type: "list", TTL: -1, Value: []string{}}
|
||||
f.values[key] = current
|
||||
}
|
||||
list, _ := current.Value.([]string)
|
||||
list = append(list, values...)
|
||||
current.Value = list
|
||||
current.Length = int64(len(list))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeRedisMigrationClient) SetAdd(key string, members ...string) error {
|
||||
if f.values == nil {
|
||||
f.values = map[string]*redispkg.RedisValue{}
|
||||
}
|
||||
current, ok := f.values[key]
|
||||
if !ok || current == nil || current.Type != "set" {
|
||||
current = &redispkg.RedisValue{Type: "set", TTL: -1, Value: []string{}}
|
||||
f.values[key] = current
|
||||
}
|
||||
setValues, _ := current.Value.([]string)
|
||||
seen := make(map[string]struct{}, len(setValues)+len(members))
|
||||
for _, item := range setValues {
|
||||
seen[item] = struct{}{}
|
||||
}
|
||||
for _, item := range members {
|
||||
if _, ok := seen[item]; ok {
|
||||
continue
|
||||
}
|
||||
seen[item] = struct{}{}
|
||||
setValues = append(setValues, item)
|
||||
}
|
||||
sort.Strings(setValues)
|
||||
current.Value = setValues
|
||||
current.Length = int64(len(setValues))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeRedisMigrationClient) ZSetAdd(key string, members ...redispkg.ZSetMember) error {
|
||||
if f.values == nil {
|
||||
f.values = map[string]*redispkg.RedisValue{}
|
||||
}
|
||||
copied := append([]redispkg.ZSetMember(nil), members...)
|
||||
sort.Slice(copied, func(i, j int) bool {
|
||||
if copied[i].Score == copied[j].Score {
|
||||
return copied[i].Member < copied[j].Member
|
||||
}
|
||||
return copied[i].Score < copied[j].Score
|
||||
})
|
||||
f.values[key] = &redispkg.RedisValue{Type: "zset", TTL: -1, Value: copied, Length: int64(len(copied))}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeRedisMigrationClient) StreamAdd(key string, fields map[string]string, id string) (string, error) {
|
||||
if f.values == nil {
|
||||
f.values = map[string]*redispkg.RedisValue{}
|
||||
}
|
||||
current, ok := f.values[key]
|
||||
if !ok || current == nil || current.Type != "stream" {
|
||||
current = &redispkg.RedisValue{Type: "stream", TTL: -1, Value: []redispkg.StreamEntry{}}
|
||||
f.values[key] = current
|
||||
}
|
||||
entries, _ := current.Value.([]redispkg.StreamEntry)
|
||||
entryID := id
|
||||
if entryID == "" {
|
||||
entryID = fmt.Sprintf("%d-0", len(entries)+1)
|
||||
}
|
||||
entries = append(entries, redispkg.StreamEntry{ID: entryID, Fields: fields})
|
||||
current.Value = entries
|
||||
current.Length = int64(len(entries))
|
||||
return entryID, nil
|
||||
}
|
||||
|
||||
type fakeRedisMongoTargetDB struct {
|
||||
tables []string
|
||||
queryTable string
|
||||
queryRows []map[string]interface{}
|
||||
execs []string
|
||||
applyTable string
|
||||
applySet connection.ChangeSet
|
||||
}
|
||||
|
||||
func (f *fakeRedisMongoTargetDB) Connect(config connection.ConnectionConfig) error { return nil }
|
||||
func (f *fakeRedisMongoTargetDB) Close() error { return nil }
|
||||
func (f *fakeRedisMongoTargetDB) Ping() error { return nil }
|
||||
func (f *fakeRedisMongoTargetDB) Query(query string) ([]map[string]interface{}, []string, error) {
|
||||
queryTable := strings.TrimSpace(f.queryTable)
|
||||
if queryTable == "" {
|
||||
queryTable = "redis_db_0_keys"
|
||||
}
|
||||
if strings.Contains(query, fmt.Sprintf(`"find":"%s"`, queryTable)) {
|
||||
return f.queryRows, []string{"_id", "key", "value"}, nil
|
||||
}
|
||||
return nil, nil, nil
|
||||
}
|
||||
func (f *fakeRedisMongoTargetDB) Exec(query string) (int64, error) {
|
||||
f.execs = append(f.execs, query)
|
||||
return 1, nil
|
||||
}
|
||||
func (f *fakeRedisMongoTargetDB) GetDatabases() ([]string, error) { return []string{"app"}, nil }
|
||||
func (f *fakeRedisMongoTargetDB) GetTables(dbName string) ([]string, error) {
|
||||
return f.tables, nil
|
||||
}
|
||||
func (f *fakeRedisMongoTargetDB) GetCreateStatement(dbName, tableName string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
func (f *fakeRedisMongoTargetDB) GetColumns(dbName, tableName string) ([]connection.ColumnDefinition, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (f *fakeRedisMongoTargetDB) GetAllColumns(dbName string) ([]connection.ColumnDefinitionWithTable, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (f *fakeRedisMongoTargetDB) GetIndexes(dbName, tableName string) ([]connection.IndexDefinition, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (f *fakeRedisMongoTargetDB) GetForeignKeys(dbName, tableName string) ([]connection.ForeignKeyDefinition, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (f *fakeRedisMongoTargetDB) GetTriggers(dbName, tableName string) ([]connection.TriggerDefinition, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (f *fakeRedisMongoTargetDB) ApplyChanges(tableName string, changes connection.ChangeSet) error {
|
||||
f.applyTable = tableName
|
||||
f.applySet = changes
|
||||
return nil
|
||||
}
|
||||
|
||||
type fakeMongoRedisSourceDB struct {
|
||||
tables []string
|
||||
rowsByTable map[string][]map[string]interface{}
|
||||
connectConfig connection.ConnectionConfig
|
||||
}
|
||||
|
||||
func (f *fakeMongoRedisSourceDB) Connect(config connection.ConnectionConfig) error {
|
||||
f.connectConfig = config
|
||||
return nil
|
||||
}
|
||||
func (f *fakeMongoRedisSourceDB) Close() error { return nil }
|
||||
func (f *fakeMongoRedisSourceDB) Ping() error { return nil }
|
||||
func (f *fakeMongoRedisSourceDB) Query(query string) ([]map[string]interface{}, []string, error) {
|
||||
for tableName, rows := range f.rowsByTable {
|
||||
if strings.Contains(query, fmt.Sprintf(`"find":"%s"`, tableName)) {
|
||||
return rows, []string{"_id", "key", "type", "ttl", "value"}, nil
|
||||
}
|
||||
}
|
||||
return nil, nil, fmt.Errorf("unexpected query: %s", query)
|
||||
}
|
||||
func (f *fakeMongoRedisSourceDB) Exec(query string) (int64, error) { return 0, nil }
|
||||
func (f *fakeMongoRedisSourceDB) GetDatabases() ([]string, error) { return []string{"app"}, nil }
|
||||
func (f *fakeMongoRedisSourceDB) GetTables(dbName string) ([]string, error) {
|
||||
return f.tables, nil
|
||||
}
|
||||
func (f *fakeMongoRedisSourceDB) GetCreateStatement(dbName, tableName string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
func (f *fakeMongoRedisSourceDB) GetColumns(dbName, tableName string) ([]connection.ColumnDefinition, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (f *fakeMongoRedisSourceDB) GetAllColumns(dbName string) ([]connection.ColumnDefinitionWithTable, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (f *fakeMongoRedisSourceDB) GetIndexes(dbName, tableName string) ([]connection.IndexDefinition, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (f *fakeMongoRedisSourceDB) GetForeignKeys(dbName, tableName string) ([]connection.ForeignKeyDefinition, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (f *fakeMongoRedisSourceDB) GetTriggers(dbName, tableName string) ([]connection.TriggerDefinition, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func TestRunSync_RedisToMongoAppliesInsertAndUpdate(t *testing.T) {
|
||||
fakeRedis := &fakeRedisMigrationClient{
|
||||
values: map[string]*redispkg.RedisValue{
|
||||
"user:1": {Type: "hash", TTL: 120, Length: 2, Value: map[string]string{"name": "alice"}},
|
||||
"user:2": {Type: "string", TTL: -1, Length: 1, Value: "online"},
|
||||
},
|
||||
}
|
||||
fakeTarget := &fakeRedisMongoTargetDB{
|
||||
tables: []string{"redis_db_0_keys"},
|
||||
queryRows: []map[string]interface{}{
|
||||
{"_id": "db0:user:1", "redisDb": 0, "key": "user:1", "type": "hash", "ttl": 120, "length": int64(2), "value": map[string]interface{}{"name": "old"}},
|
||||
},
|
||||
}
|
||||
|
||||
oldNewRedisClient := newRedisSourceClient
|
||||
oldNewDatabase := newSyncDatabase
|
||||
defer func() {
|
||||
newRedisSourceClient = oldNewRedisClient
|
||||
newSyncDatabase = oldNewDatabase
|
||||
}()
|
||||
newRedisSourceClient = func() redisMigrationClient { return fakeRedis }
|
||||
newSyncDatabase = func(dbType string) (db.Database, error) { return fakeTarget, nil }
|
||||
|
||||
engine := NewSyncEngine(Reporter{})
|
||||
result := engine.RunSync(SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "redis", Database: "0"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"},
|
||||
Tables: []string{"user:1", "user:2"},
|
||||
Content: "data",
|
||||
Mode: "insert_update",
|
||||
})
|
||||
|
||||
if !result.Success {
|
||||
t.Fatalf("expected success, got: %+v", result)
|
||||
}
|
||||
if fakeRedis.connectConfig.RedisDB != 0 {
|
||||
t.Fatalf("expected redis db 0, got %d", fakeRedis.connectConfig.RedisDB)
|
||||
}
|
||||
if fakeTarget.applyTable != "redis_db_0_keys" {
|
||||
t.Fatalf("unexpected apply table: %s", fakeTarget.applyTable)
|
||||
}
|
||||
if len(fakeTarget.applySet.Inserts) != 1 || len(fakeTarget.applySet.Updates) != 1 {
|
||||
t.Fatalf("unexpected change set: %+v", fakeTarget.applySet)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunSync_RedisToMongoUsesConfiguredCollectionName(t *testing.T) {
|
||||
fakeRedis := &fakeRedisMigrationClient{
|
||||
values: map[string]*redispkg.RedisValue{
|
||||
"user:1": {Type: "string", TTL: -1, Length: 1, Value: "online"},
|
||||
},
|
||||
}
|
||||
fakeTarget := &fakeRedisMongoTargetDB{
|
||||
tables: []string{"custom_keyspace_docs"},
|
||||
queryTable: "custom_keyspace_docs",
|
||||
}
|
||||
|
||||
oldNewRedisClient := newRedisSourceClient
|
||||
oldNewDatabase := newSyncDatabase
|
||||
defer func() {
|
||||
newRedisSourceClient = oldNewRedisClient
|
||||
newSyncDatabase = oldNewDatabase
|
||||
}()
|
||||
newRedisSourceClient = func() redisMigrationClient { return fakeRedis }
|
||||
newSyncDatabase = func(dbType string) (db.Database, error) { return fakeTarget, nil }
|
||||
|
||||
engine := NewSyncEngine(Reporter{})
|
||||
result := engine.RunSync(SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "redis", Database: "0"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"},
|
||||
Tables: []string{"user:1"},
|
||||
Content: "data",
|
||||
Mode: "insert_update",
|
||||
MongoCollectionName: "custom_keyspace_docs",
|
||||
})
|
||||
|
||||
if !result.Success {
|
||||
t.Fatalf("expected success, got: %+v", result)
|
||||
}
|
||||
if fakeTarget.applyTable != "custom_keyspace_docs" {
|
||||
t.Fatalf("unexpected apply table: %s", fakeTarget.applyTable)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPreview_RedisToMongoReturnsDocumentPreview(t *testing.T) {
|
||||
fakeRedis := &fakeRedisMigrationClient{
|
||||
values: map[string]*redispkg.RedisValue{
|
||||
"session:1": {Type: "string", TTL: 60, Length: 1, Value: "token"},
|
||||
},
|
||||
}
|
||||
fakeTarget := &fakeRedisMongoTargetDB{}
|
||||
|
||||
oldNewRedisClient := newRedisSourceClient
|
||||
oldNewDatabase := newSyncDatabase
|
||||
defer func() {
|
||||
newRedisSourceClient = oldNewRedisClient
|
||||
newSyncDatabase = oldNewDatabase
|
||||
}()
|
||||
newRedisSourceClient = func() redisMigrationClient { return fakeRedis }
|
||||
newSyncDatabase = func(dbType string) (db.Database, error) { return fakeTarget, nil }
|
||||
|
||||
engine := NewSyncEngine(Reporter{})
|
||||
preview, err := engine.Preview(SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "redis", Database: "0"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"},
|
||||
Tables: []string{"session:1"},
|
||||
Content: "data",
|
||||
Mode: "insert_update",
|
||||
}, "session:1", 20)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if preview.PKColumn != "_id" {
|
||||
t.Fatalf("unexpected pk column: %s", preview.PKColumn)
|
||||
}
|
||||
if preview.TotalInserts != 1 || len(preview.Inserts) != 1 {
|
||||
t.Fatalf("unexpected preview: %+v", preview)
|
||||
}
|
||||
if preview.Inserts[0].PK != "db0:session:1" {
|
||||
t.Fatalf("unexpected preview pk: %+v", preview.Inserts[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunSync_MongoToRedisAppliesStringAndHash(t *testing.T) {
|
||||
fakeSource := &fakeMongoRedisSourceDB{
|
||||
tables: []string{"redis_db_0_keys"},
|
||||
rowsByTable: map[string][]map[string]interface{}{
|
||||
"redis_db_0_keys": {
|
||||
{"_id": "db0:session:1", "key": "session:1", "type": "string", "ttl": int64(60), "value": "token"},
|
||||
{"_id": "db0:user:1", "key": "user:1", "type": "hash", "ttl": int64(120), "value": map[string]interface{}{"name": "alice", "role": "admin"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeRedis := &fakeRedisMigrationClient{
|
||||
values: map[string]*redispkg.RedisValue{
|
||||
"user:1": {Type: "hash", TTL: 120, Length: 1, Value: map[string]string{"name": "old"}},
|
||||
},
|
||||
}
|
||||
|
||||
oldNewRedisClient := newRedisSourceClient
|
||||
oldNewDatabase := newSyncDatabase
|
||||
defer func() {
|
||||
newRedisSourceClient = oldNewRedisClient
|
||||
newSyncDatabase = oldNewDatabase
|
||||
}()
|
||||
newRedisSourceClient = func() redisMigrationClient { return fakeRedis }
|
||||
newSyncDatabase = func(dbType string) (db.Database, error) { return fakeSource, nil }
|
||||
|
||||
engine := NewSyncEngine(Reporter{})
|
||||
result := engine.RunSync(SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "redis", Database: "0"},
|
||||
Tables: []string{"redis_db_0_keys"},
|
||||
Content: "data",
|
||||
Mode: "insert_update",
|
||||
})
|
||||
|
||||
if !result.Success {
|
||||
t.Fatalf("expected success, got: %+v", result)
|
||||
}
|
||||
if fakeRedis.connectConfig.RedisDB != 0 {
|
||||
t.Fatalf("expected redis db 0, got %d", fakeRedis.connectConfig.RedisDB)
|
||||
}
|
||||
if got := fakeRedis.values["session:1"]; got == nil || got.Type != "string" || got.Value != "token" || got.TTL != 60 {
|
||||
t.Fatalf("unexpected string value: %+v", got)
|
||||
}
|
||||
gotHash, _ := fakeRedis.values["user:1"].Value.(map[string]string)
|
||||
if gotHash["name"] != "alice" || gotHash["role"] != "admin" {
|
||||
t.Fatalf("unexpected hash value: %+v", fakeRedis.values["user:1"])
|
||||
}
|
||||
if result.RowsInserted != 1 || result.RowsUpdated != 1 {
|
||||
t.Fatalf("unexpected sync result: %+v", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPreview_MongoToRedisReturnsCollectionPreview(t *testing.T) {
|
||||
fakeSource := &fakeMongoRedisSourceDB{
|
||||
tables: []string{"redis_db_0_keys"},
|
||||
rowsByTable: map[string][]map[string]interface{}{
|
||||
"redis_db_0_keys": {
|
||||
{"_id": "db0:session:1", "key": "session:1", "type": "string", "ttl": int64(60), "value": "token"},
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeRedis := &fakeRedisMigrationClient{values: map[string]*redispkg.RedisValue{}}
|
||||
|
||||
oldNewRedisClient := newRedisSourceClient
|
||||
oldNewDatabase := newSyncDatabase
|
||||
defer func() {
|
||||
newRedisSourceClient = oldNewRedisClient
|
||||
newSyncDatabase = oldNewDatabase
|
||||
}()
|
||||
newRedisSourceClient = func() redisMigrationClient { return fakeRedis }
|
||||
newSyncDatabase = func(dbType string) (db.Database, error) { return fakeSource, nil }
|
||||
|
||||
engine := NewSyncEngine(Reporter{})
|
||||
preview, err := engine.Preview(SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "redis", Database: "0"},
|
||||
Tables: []string{"redis_db_0_keys"},
|
||||
Content: "data",
|
||||
Mode: "insert_update",
|
||||
}, "redis_db_0_keys", 20)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if preview.Table != "redis_db_0_keys" || preview.PKColumn != "key" {
|
||||
t.Fatalf("unexpected preview header: %+v", preview)
|
||||
}
|
||||
if preview.TotalInserts != 1 || len(preview.Inserts) != 1 {
|
||||
t.Fatalf("unexpected preview rows: %+v", preview)
|
||||
}
|
||||
if preview.Inserts[0].PK != "session:1" {
|
||||
t.Fatalf("unexpected preview pk: %+v", preview.Inserts[0])
|
||||
}
|
||||
}
|
||||
1014
internal/sync/schema_migration.go
Normal file
1014
internal/sync/schema_migration.go
Normal file
File diff suppressed because it is too large
Load Diff
957
internal/sync/schema_migration_test.go
Normal file
957
internal/sync/schema_migration_test.go
Normal file
@@ -0,0 +1,957 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"GoNavi-Wails/internal/connection"
|
||||
"context"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type fakeMigrationDB struct {
|
||||
columns map[string][]connection.ColumnDefinition
|
||||
indexes map[string][]connection.IndexDefinition
|
||||
queryData map[string][]map[string]interface{}
|
||||
queryCols map[string][]string
|
||||
}
|
||||
|
||||
func (f *fakeMigrationDB) Connect(config connection.ConnectionConfig) error { return nil }
|
||||
func (f *fakeMigrationDB) Close() error { return nil }
|
||||
func (f *fakeMigrationDB) Ping() error { return nil }
|
||||
func (f *fakeMigrationDB) Query(query string) ([]map[string]interface{}, []string, error) {
|
||||
if rows, ok := f.queryData[query]; ok {
|
||||
return rows, f.queryCols[query], nil
|
||||
}
|
||||
return nil, nil, nil
|
||||
}
|
||||
func (f *fakeMigrationDB) Exec(query string) (int64, error) { return 0, nil }
|
||||
func (f *fakeMigrationDB) GetDatabases() ([]string, error) { return nil, nil }
|
||||
func (f *fakeMigrationDB) GetTables(dbName string) ([]string, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (f *fakeMigrationDB) GetCreateStatement(dbName, tableName string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
func (f *fakeMigrationDB) GetColumns(dbName, tableName string) ([]connection.ColumnDefinition, error) {
|
||||
key := dbName + "." + tableName
|
||||
if rows, ok := f.columns[key]; ok {
|
||||
return rows, nil
|
||||
}
|
||||
return []connection.ColumnDefinition{}, nil
|
||||
}
|
||||
func (f *fakeMigrationDB) GetAllColumns(dbName string) ([]connection.ColumnDefinitionWithTable, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (f *fakeMigrationDB) GetIndexes(dbName, tableName string) ([]connection.IndexDefinition, error) {
|
||||
key := dbName + "." + tableName
|
||||
if rows, ok := f.indexes[key]; ok {
|
||||
return rows, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
func (f *fakeMigrationDB) GetForeignKeys(dbName, tableName string) ([]connection.ForeignKeyDefinition, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (f *fakeMigrationDB) GetTriggers(dbName, tableName string) ([]connection.TriggerDefinition, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (f *fakeMigrationDB) QueryContext(ctx context.Context, query string) ([]map[string]interface{}, []string, error) {
|
||||
return f.Query(query)
|
||||
}
|
||||
func (f *fakeMigrationDB) ExecContext(ctx context.Context, query string) (int64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func TestBuildMySQLToKingbaseColumnDefinition_AutoIncrementAndBoolean(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
def, warnings := buildMySQLToKingbaseColumnDefinition(connection.ColumnDefinition{
|
||||
Name: "id",
|
||||
Type: "int unsigned",
|
||||
Nullable: "NO",
|
||||
Extra: "auto_increment",
|
||||
})
|
||||
if !strings.Contains(def, "bigint") || !strings.Contains(def, "GENERATED BY DEFAULT AS IDENTITY") || !strings.Contains(def, "NOT NULL") {
|
||||
t.Fatalf("unexpected definition: %s", def)
|
||||
}
|
||||
if len(warnings) != 0 {
|
||||
t.Fatalf("unexpected warnings: %v", warnings)
|
||||
}
|
||||
|
||||
def, warnings = buildMySQLToKingbaseColumnDefinition(connection.ColumnDefinition{
|
||||
Name: "enabled",
|
||||
Type: "tinyint(1)",
|
||||
Nullable: "YES",
|
||||
Default: stringPtr("1"),
|
||||
})
|
||||
if !strings.Contains(def, "boolean") || !strings.Contains(def, "DEFAULT TRUE") {
|
||||
t.Fatalf("unexpected boolean definition: %s", def)
|
||||
}
|
||||
if len(warnings) != 0 {
|
||||
t.Fatalf("unexpected warnings for boolean: %v", warnings)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildMySQLToKingbaseCreateTablePlan_GeneratesAndSkipsIndexes(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sourceDB := &fakeMigrationDB{
|
||||
indexes: map[string][]connection.IndexDefinition{
|
||||
"shop.orders": {
|
||||
{Name: "PRIMARY", ColumnName: "id", NonUnique: 0, SeqInIndex: 1, IndexType: "BTREE"},
|
||||
{Name: "idx_user_status", ColumnName: "user_id", NonUnique: 1, SeqInIndex: 1, IndexType: "BTREE"},
|
||||
{Name: "idx_user_status", ColumnName: "status", NonUnique: 1, SeqInIndex: 2, IndexType: "BTREE"},
|
||||
{Name: "idx_name_prefix", ColumnName: "name", NonUnique: 1, SeqInIndex: 1, IndexType: "BTREE", SubPart: 12},
|
||||
{Name: "idx_fulltext_note", ColumnName: "note", NonUnique: 1, SeqInIndex: 1, IndexType: "FULLTEXT"},
|
||||
},
|
||||
},
|
||||
}
|
||||
cols := []connection.ColumnDefinition{
|
||||
{Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI", Extra: "auto_increment"},
|
||||
{Name: "user_id", Type: "bigint", Nullable: "NO"},
|
||||
{Name: "status", Type: "varchar(32)", Nullable: "YES"},
|
||||
{Name: "name", Type: "varchar(128)", Nullable: "YES"},
|
||||
{Name: "note", Type: "text", Nullable: "YES"},
|
||||
}
|
||||
cfg := SyncConfig{CreateIndexes: true}
|
||||
createSQL, postSQL, warnings, unsupported, idxCreate, idxSkip, err := buildMySQLToKingbaseCreateTablePlan(cfg, "public.orders", cols, sourceDB, "shop", "orders")
|
||||
if err != nil {
|
||||
t.Fatalf("buildMySQLToKingbaseCreateTablePlan returned error: %v", err)
|
||||
}
|
||||
if !strings.Contains(createSQL, `CREATE TABLE "public"."orders"`) {
|
||||
t.Fatalf("unexpected create SQL: %s", createSQL)
|
||||
}
|
||||
if !strings.Contains(createSQL, `PRIMARY KEY ("id")`) {
|
||||
t.Fatalf("create SQL missing primary key: %s", createSQL)
|
||||
}
|
||||
if idxCreate != 1 || idxSkip != 2 {
|
||||
t.Fatalf("unexpected index summary: create=%d skip=%d", idxCreate, idxSkip)
|
||||
}
|
||||
if len(postSQL) != 1 || !strings.Contains(postSQL[0], `CREATE INDEX "idx_user_status"`) {
|
||||
t.Fatalf("unexpected post SQL: %v", postSQL)
|
||||
}
|
||||
if len(warnings) != 0 {
|
||||
t.Fatalf("unexpected warnings: %v", warnings)
|
||||
}
|
||||
wantUnsupported := []string{
|
||||
"索引 idx_name_prefix 使用前缀长度,当前暂不支持迁移",
|
||||
"索引 idx_fulltext_note 类型=FULLTEXT,当前暂不支持自动迁移",
|
||||
}
|
||||
if !reflect.DeepEqual(unsupported, wantUnsupported) {
|
||||
t.Fatalf("unexpected unsupported objects: got=%v want=%v", unsupported, wantUnsupported)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildSchemaMigrationPlan_AutoCreateWhenTargetMissing(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sourceDB := &fakeMigrationDB{
|
||||
columns: map[string][]connection.ColumnDefinition{
|
||||
"shop.orders": {
|
||||
{Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI", Extra: "auto_increment"},
|
||||
{Name: "name", Type: "varchar(128)", Nullable: "YES"},
|
||||
},
|
||||
},
|
||||
indexes: map[string][]connection.IndexDefinition{},
|
||||
}
|
||||
targetDB := &fakeMigrationDB{columns: map[string][]connection.ColumnDefinition{}}
|
||||
cfg := SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "mysql", Database: "shop"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "kingbase", Database: "demo"},
|
||||
TargetTableStrategy: "smart",
|
||||
CreateIndexes: true,
|
||||
}
|
||||
plan, sourceCols, targetCols, err := buildSchemaMigrationPlan(cfg, "orders", sourceDB, targetDB)
|
||||
if err != nil {
|
||||
t.Fatalf("buildSchemaMigrationPlan returned error: %v", err)
|
||||
}
|
||||
if len(sourceCols) != 2 || len(targetCols) != 0 {
|
||||
t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols))
|
||||
}
|
||||
if plan.TargetTableExists {
|
||||
t.Fatalf("expected target table missing")
|
||||
}
|
||||
if !plan.AutoCreate {
|
||||
t.Fatalf("expected auto create enabled")
|
||||
}
|
||||
if !strings.Contains(plan.PlannedAction, "自动建表") {
|
||||
t.Fatalf("unexpected planned action: %s", plan.PlannedAction)
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, `CREATE TABLE "public"."orders"`) {
|
||||
t.Fatalf("unexpected create table SQL: %s", plan.CreateTableSQL)
|
||||
}
|
||||
}
|
||||
|
||||
func stringPtr(v string) *string { return &v }
|
||||
|
||||
func TestBuildPGLikeToMySQLCreateTablePlan_GeneratesMySQLDDL(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sourceDB := &fakeMigrationDB{
|
||||
indexes: map[string][]connection.IndexDefinition{
|
||||
"public.users": {
|
||||
{Name: "users_email_key", ColumnName: "email", NonUnique: 0, SeqInIndex: 1, IndexType: "BTREE"},
|
||||
{Name: "idx_users_name", ColumnName: "name", NonUnique: 1, SeqInIndex: 1, IndexType: "BTREE"},
|
||||
},
|
||||
},
|
||||
}
|
||||
cols := []connection.ColumnDefinition{
|
||||
{Name: "id", Type: "integer", Nullable: "NO", Key: "PRI", Extra: "auto_increment"},
|
||||
{Name: "email", Type: "character varying(120)", Nullable: "NO"},
|
||||
{Name: "name", Type: "text", Nullable: "YES"},
|
||||
{Name: "profile", Type: "jsonb", Nullable: "YES"},
|
||||
}
|
||||
cfg := SyncConfig{CreateIndexes: true}
|
||||
createSQL, postSQL, warnings, unsupported, idxCreate, idxSkip, err := buildPGLikeToMySQLCreateTablePlan(cfg, "app.users", cols, sourceDB, "public", "users")
|
||||
if err != nil {
|
||||
t.Fatalf("buildPGLikeToMySQLCreateTablePlan returned error: %v", err)
|
||||
}
|
||||
if !strings.Contains(createSQL, "CREATE TABLE `app`.`users`") {
|
||||
t.Fatalf("unexpected create SQL: %s", createSQL)
|
||||
}
|
||||
if !strings.Contains(createSQL, "`id` int AUTO_INCREMENT NOT NULL") {
|
||||
t.Fatalf("unexpected id definition: %s", createSQL)
|
||||
}
|
||||
if !strings.Contains(createSQL, "`profile` json") {
|
||||
t.Fatalf("unexpected json definition: %s", createSQL)
|
||||
}
|
||||
if idxCreate != 2 || idxSkip != 0 {
|
||||
t.Fatalf("unexpected index summary: create=%d skip=%d", idxCreate, idxSkip)
|
||||
}
|
||||
if len(postSQL) != 2 {
|
||||
t.Fatalf("unexpected post sql length: %v", postSQL)
|
||||
}
|
||||
if len(warnings) != 0 {
|
||||
t.Fatalf("unexpected warnings: %v", warnings)
|
||||
}
|
||||
if len(unsupported) != 0 {
|
||||
t.Fatalf("unexpected unsupported: %v", unsupported)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildPGLikeToMySQLPlan_AutoCreateWhenTargetMissing(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sourceDB := &fakeMigrationDB{
|
||||
columns: map[string][]connection.ColumnDefinition{
|
||||
"public.orders": {
|
||||
{Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI", Extra: "auto_increment"},
|
||||
{Name: "amount", Type: "numeric(10,2)", Nullable: "NO"},
|
||||
},
|
||||
},
|
||||
indexes: map[string][]connection.IndexDefinition{},
|
||||
}
|
||||
targetDB := &fakeMigrationDB{columns: map[string][]connection.ColumnDefinition{}}
|
||||
cfg := SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "kingbase", Database: "public"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "mysql", Database: "app"},
|
||||
TargetTableStrategy: "smart",
|
||||
CreateIndexes: true,
|
||||
}
|
||||
plan, sourceCols, targetCols, err := buildPGLikeToMySQLPlan(cfg, "orders", sourceDB, targetDB)
|
||||
if err != nil {
|
||||
t.Fatalf("buildPGLikeToMySQLPlan returned error: %v", err)
|
||||
}
|
||||
if len(sourceCols) != 2 || len(targetCols) != 0 {
|
||||
t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols))
|
||||
}
|
||||
if plan.TargetTableExists {
|
||||
t.Fatalf("expected target table missing")
|
||||
}
|
||||
if !plan.AutoCreate {
|
||||
t.Fatalf("expected auto create enabled")
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `app`.`orders`") {
|
||||
t.Fatalf("unexpected create table SQL: %s", plan.CreateTableSQL)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildMySQLToPGLikeCreateTablePlan_GeneratesPostgresDDL(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sourceDB := &fakeMigrationDB{
|
||||
indexes: map[string][]connection.IndexDefinition{
|
||||
"shop.orders": {
|
||||
{Name: "idx_orders_user", ColumnName: "user_id", NonUnique: 1, SeqInIndex: 1, IndexType: "BTREE"},
|
||||
{Name: "idx_orders_user", ColumnName: "status", NonUnique: 1, SeqInIndex: 2, IndexType: "BTREE"},
|
||||
},
|
||||
},
|
||||
}
|
||||
cols := []connection.ColumnDefinition{
|
||||
{Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI", Extra: "auto_increment"},
|
||||
{Name: "user_id", Type: "bigint", Nullable: "NO"},
|
||||
{Name: "status", Type: "varchar(32)", Nullable: "YES"},
|
||||
{Name: "payload", Type: "json", Nullable: "YES"},
|
||||
}
|
||||
cfg := SyncConfig{CreateIndexes: true}
|
||||
createSQL, postSQL, warnings, unsupported, idxCreate, idxSkip, err := buildMySQLToPGLikeCreateTablePlan("postgres", cfg, "public.orders", cols, sourceDB, "shop", "orders")
|
||||
if err != nil {
|
||||
t.Fatalf("buildMySQLToPGLikeCreateTablePlan returned error: %v", err)
|
||||
}
|
||||
if !strings.Contains(createSQL, `CREATE TABLE "public"."orders"`) {
|
||||
t.Fatalf("unexpected create SQL: %s", createSQL)
|
||||
}
|
||||
if !strings.Contains(createSQL, `GENERATED BY DEFAULT AS IDENTITY`) {
|
||||
t.Fatalf("missing identity mapping: %s", createSQL)
|
||||
}
|
||||
if !strings.Contains(createSQL, `jsonb`) {
|
||||
t.Fatalf("missing jsonb mapping: %s", createSQL)
|
||||
}
|
||||
if idxCreate != 1 || idxSkip != 0 {
|
||||
t.Fatalf("unexpected index summary: create=%d skip=%d", idxCreate, idxSkip)
|
||||
}
|
||||
if len(postSQL) != 1 || !strings.Contains(postSQL[0], `CREATE INDEX "idx_orders_user"`) {
|
||||
t.Fatalf("unexpected post SQL: %v", postSQL)
|
||||
}
|
||||
if len(warnings) != 0 || len(unsupported) != 0 {
|
||||
t.Fatalf("unexpected warnings/unsupported: warnings=%v unsupported=%v", warnings, unsupported)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildMySQLToClickHouseCreateTableSQL_GeneratesMergeTree(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cols := []connection.ColumnDefinition{
|
||||
{Name: "id", Type: "bigint unsigned", Nullable: "NO", Key: "PRI"},
|
||||
{Name: "name", Type: "varchar(128)", Nullable: "YES"},
|
||||
{Name: "payload", Type: "json", Nullable: "YES"},
|
||||
}
|
||||
createSQL, warnings, unsupported := buildMySQLToClickHouseCreateTableSQL("analytics.orders", cols)
|
||||
if !strings.Contains(createSQL, "ENGINE = MergeTree()") {
|
||||
t.Fatalf("unexpected create SQL: %s", createSQL)
|
||||
}
|
||||
if !strings.Contains(createSQL, "ORDER BY (`id`)") {
|
||||
t.Fatalf("unexpected order by: %s", createSQL)
|
||||
}
|
||||
if !strings.Contains(createSQL, "`payload` Nullable(String)") {
|
||||
t.Fatalf("unexpected json mapping: %s", createSQL)
|
||||
}
|
||||
if len(warnings) == 0 {
|
||||
t.Fatalf("expected warnings for clickhouse semantics")
|
||||
}
|
||||
if len(unsupported) != 0 {
|
||||
t.Fatalf("unexpected unsupported: %v", unsupported)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildClickHouseToMySQLCreateTableSQL_GeneratesMySQLDDL(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cols := []connection.ColumnDefinition{
|
||||
{Name: "id", Type: "UInt64", Nullable: "NO", Key: "PRI"},
|
||||
{Name: "event_time", Type: "DateTime", Nullable: "NO"},
|
||||
{Name: "payload", Type: "Map(String, String)", Nullable: "YES"},
|
||||
}
|
||||
createSQL, warnings := buildClickHouseToMySQLCreateTableSQL("app.metrics", cols)
|
||||
if !strings.Contains(createSQL, "CREATE TABLE `app`.`metrics`") {
|
||||
t.Fatalf("unexpected create SQL: %s", createSQL)
|
||||
}
|
||||
if !strings.Contains(createSQL, "`id` bigint unsigned NOT NULL") {
|
||||
t.Fatalf("unexpected uint64 mapping: %s", createSQL)
|
||||
}
|
||||
if !strings.Contains(createSQL, "`payload` json") {
|
||||
t.Fatalf("unexpected complex type mapping: %s", createSQL)
|
||||
}
|
||||
if len(warnings) == 0 {
|
||||
t.Fatalf("expected warning for limited clickhouse reverse semantics")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildMySQLToMongoPlan_AutoCreateCollection(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sourceDB := &fakeMigrationDB{
|
||||
columns: map[string][]connection.ColumnDefinition{
|
||||
"shop.users": {
|
||||
{Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI"},
|
||||
{Name: "name", Type: "varchar(64)", Nullable: "YES"},
|
||||
},
|
||||
},
|
||||
indexes: map[string][]connection.IndexDefinition{
|
||||
"shop.users": {
|
||||
{Name: "idx_users_name", ColumnName: "name", NonUnique: 1, SeqInIndex: 1, IndexType: "BTREE"},
|
||||
},
|
||||
},
|
||||
}
|
||||
targetDB := &fakeMigrationDB{}
|
||||
cfg := SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "mysql", Database: "shop"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"},
|
||||
TargetTableStrategy: "smart",
|
||||
CreateIndexes: true,
|
||||
}
|
||||
plan, sourceCols, targetCols, err := buildMySQLToMongoPlan(cfg, "users", sourceDB, targetDB)
|
||||
if err != nil {
|
||||
t.Fatalf("buildMySQLToMongoPlan returned error: %v", err)
|
||||
}
|
||||
if len(sourceCols) != 2 || targetCols != nil {
|
||||
t.Fatalf("unexpected source/target columns: %d / %v", len(sourceCols), targetCols)
|
||||
}
|
||||
if !plan.AutoCreate || len(plan.PreDataSQL) == 0 {
|
||||
t.Fatalf("expected auto create collection command: %+v", plan)
|
||||
}
|
||||
if !strings.Contains(plan.PreDataSQL[0], `"create":"users"`) {
|
||||
t.Fatalf("unexpected create collection command: %v", plan.PreDataSQL)
|
||||
}
|
||||
if len(plan.PostDataSQL) != 1 || !strings.Contains(plan.PostDataSQL[0], `"createIndexes":"users"`) {
|
||||
t.Fatalf("unexpected index commands: %v", plan.PostDataSQL)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildPGLikeToMongoPlan_AutoCreateCollection(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sourceDB := &fakeMigrationDB{
|
||||
columns: map[string][]connection.ColumnDefinition{
|
||||
"public.orders": {
|
||||
{Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI"},
|
||||
{Name: "name", Type: "varchar(64)", Nullable: "YES"},
|
||||
},
|
||||
},
|
||||
indexes: map[string][]connection.IndexDefinition{
|
||||
"public.orders": {
|
||||
{Name: "idx_orders_name", ColumnName: "name", NonUnique: 1, SeqInIndex: 1, IndexType: "BTREE"},
|
||||
},
|
||||
},
|
||||
}
|
||||
targetDB := &fakeMigrationDB{}
|
||||
cfg := SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "postgres", Database: "public"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"},
|
||||
TargetTableStrategy: "smart",
|
||||
CreateIndexes: true,
|
||||
}
|
||||
plan, sourceCols, targetCols, err := buildPGLikeToMongoPlan(cfg, "orders", sourceDB, targetDB)
|
||||
if err != nil {
|
||||
t.Fatalf("buildPGLikeToMongoPlan returned error: %v", err)
|
||||
}
|
||||
if len(sourceCols) != 2 || targetCols != nil {
|
||||
t.Fatalf("unexpected source/target columns: %d / %v", len(sourceCols), targetCols)
|
||||
}
|
||||
if !plan.AutoCreate || len(plan.PreDataSQL) == 0 {
|
||||
t.Fatalf("expected auto create collection command: %+v", plan)
|
||||
}
|
||||
if !strings.Contains(plan.PreDataSQL[0], `"create":"orders"`) {
|
||||
t.Fatalf("unexpected create collection command: %v", plan.PreDataSQL)
|
||||
}
|
||||
if len(plan.PostDataSQL) != 1 || !strings.Contains(plan.PostDataSQL[0], `"createIndexes":"orders"`) {
|
||||
t.Fatalf("unexpected index commands: %v", plan.PostDataSQL)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildClickHouseToMongoPlan_AutoCreateCollection(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sourceDB := &fakeMigrationDB{
|
||||
columns: map[string][]connection.ColumnDefinition{
|
||||
"analytics.metrics": {
|
||||
{Name: "id", Type: "UInt64", Nullable: "NO", Key: "PRI"},
|
||||
{Name: "host", Type: "String", Nullable: "YES"},
|
||||
},
|
||||
},
|
||||
}
|
||||
targetDB := &fakeMigrationDB{}
|
||||
cfg := SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "clickhouse", Database: "analytics"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"},
|
||||
TargetTableStrategy: "smart",
|
||||
}
|
||||
plan, sourceCols, targetCols, err := buildClickHouseToMongoPlan(cfg, "metrics", sourceDB, targetDB)
|
||||
if err != nil {
|
||||
t.Fatalf("buildClickHouseToMongoPlan returned error: %v", err)
|
||||
}
|
||||
if len(sourceCols) != 2 || targetCols != nil {
|
||||
t.Fatalf("unexpected source/target columns: %d / %v", len(sourceCols), targetCols)
|
||||
}
|
||||
if !plan.AutoCreate || len(plan.PreDataSQL) == 0 {
|
||||
t.Fatalf("expected auto create collection command: %+v", plan)
|
||||
}
|
||||
if !strings.Contains(plan.PreDataSQL[0], `"create":"metrics"`) {
|
||||
t.Fatalf("unexpected create collection command: %v", plan.PreDataSQL)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildTDengineToMongoPlan_AutoCreateCollection(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sourceDB := &fakeMigrationDB{
|
||||
columns: map[string][]connection.ColumnDefinition{
|
||||
"src.cpu": {
|
||||
{Name: "ts", Type: "TIMESTAMP", Nullable: "NO"},
|
||||
{Name: "host", Type: "NCHAR(64)", Nullable: "YES"},
|
||||
},
|
||||
},
|
||||
}
|
||||
targetDB := &fakeMigrationDB{}
|
||||
cfg := SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "tdengine", Database: "src"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"},
|
||||
TargetTableStrategy: "smart",
|
||||
}
|
||||
plan, sourceCols, targetCols, err := buildTDengineToMongoPlan(cfg, "cpu", sourceDB, targetDB)
|
||||
if err != nil {
|
||||
t.Fatalf("buildTDengineToMongoPlan returned error: %v", err)
|
||||
}
|
||||
if len(sourceCols) != 2 || targetCols != nil {
|
||||
t.Fatalf("unexpected source/target columns: %d / %v", len(sourceCols), targetCols)
|
||||
}
|
||||
if !plan.AutoCreate || len(plan.PreDataSQL) == 0 {
|
||||
t.Fatalf("expected auto create collection command: %+v", plan)
|
||||
}
|
||||
if !strings.Contains(plan.PreDataSQL[0], `"create":"cpu"`) {
|
||||
t.Fatalf("unexpected create collection command: %v", plan.PreDataSQL)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildMongoToMySQLPlan_InfersColumnsAndCreatesTable(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
query := `{"find":"users","filter":{},"limit":200}`
|
||||
sourceDB := &fakeMigrationDB{
|
||||
queryData: map[string][]map[string]interface{}{
|
||||
query: {
|
||||
{"_id": "a1", "name": "alice", "age": int64(18), "profile": map[string]interface{}{"city": "shanghai"}},
|
||||
{"_id": "b2", "name": "bob", "profile": map[string]interface{}{"city": "beijing"}},
|
||||
},
|
||||
},
|
||||
queryCols: map[string][]string{query: {"_id", "name", "age", "profile"}},
|
||||
indexes: map[string][]connection.IndexDefinition{
|
||||
"crm.users": {{Name: "email_1", ColumnName: "name", NonUnique: 1, SeqInIndex: 1, IndexType: "BTREE"}},
|
||||
},
|
||||
}
|
||||
targetDB := &fakeMigrationDB{}
|
||||
cfg := SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "mongodb", Database: "crm"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "mysql", Database: "app"},
|
||||
TargetTableStrategy: "smart",
|
||||
CreateIndexes: true,
|
||||
}
|
||||
plan, sourceCols, _, err := buildMongoToMySQLPlan(cfg, "users", sourceDB, targetDB)
|
||||
if err != nil {
|
||||
t.Fatalf("buildMongoToMySQLPlan returned error: %v", err)
|
||||
}
|
||||
if len(sourceCols) == 0 {
|
||||
t.Fatalf("expected inferred source cols")
|
||||
}
|
||||
if !plan.AutoCreate || !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `app`.`users`") {
|
||||
t.Fatalf("unexpected create table sql: %s", plan.CreateTableSQL)
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, "`_id` text NOT NULL") && !strings.Contains(plan.CreateTableSQL, "`_id` varchar") {
|
||||
t.Fatalf("missing inferred _id column: %s", plan.CreateTableSQL)
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, "`profile` json") {
|
||||
t.Fatalf("expected nested field degrade to json: %s", plan.CreateTableSQL)
|
||||
}
|
||||
if len(plan.PostDataSQL) != 1 {
|
||||
t.Fatalf("expected one post index sql, got=%v", plan.PostDataSQL)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildTDengineToMySQLPlan_AutoCreateWhenTargetMissing(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sourceDB := &fakeMigrationDB{
|
||||
columns: map[string][]connection.ColumnDefinition{
|
||||
"metrics.cpu": {
|
||||
{Name: "ts", Type: "TIMESTAMP", Nullable: "NO"},
|
||||
{Name: "host", Type: "NCHAR(64)", Nullable: "YES", Key: "TAG", Extra: "TAG"},
|
||||
{Name: "usage", Type: "DOUBLE", Nullable: "YES"},
|
||||
},
|
||||
},
|
||||
}
|
||||
targetDB := &fakeMigrationDB{}
|
||||
cfg := SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "tdengine", Database: "metrics"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "mysql", Database: "app"},
|
||||
TargetTableStrategy: "smart",
|
||||
}
|
||||
plan, sourceCols, targetCols, err := buildTDengineToMySQLPlan(cfg, "cpu", sourceDB, targetDB)
|
||||
if err != nil {
|
||||
t.Fatalf("buildTDengineToMySQLPlan returned error: %v", err)
|
||||
}
|
||||
if len(sourceCols) != 3 || len(targetCols) != 0 {
|
||||
t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols))
|
||||
}
|
||||
if !plan.AutoCreate {
|
||||
t.Fatalf("expected auto create enabled")
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `app`.`cpu`") {
|
||||
t.Fatalf("unexpected create table sql: %s", plan.CreateTableSQL)
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, "`ts` datetime") {
|
||||
t.Fatalf("expected timestamp mapping, got: %s", plan.CreateTableSQL)
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, "`host` varchar(64)") {
|
||||
t.Fatalf("expected nchar mapping, got: %s", plan.CreateTableSQL)
|
||||
}
|
||||
if len(plan.Warnings) == 0 || !strings.Contains(strings.Join(plan.Warnings, " "), "TAG") {
|
||||
t.Fatalf("expected TAG warning, got: %v", plan.Warnings)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildTDengineToPGLikePlan_AutoCreateWhenTargetMissing(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sourceDB := &fakeMigrationDB{
|
||||
columns: map[string][]connection.ColumnDefinition{
|
||||
"metrics.cpu": {
|
||||
{Name: "ts", Type: "TIMESTAMP", Nullable: "NO"},
|
||||
{Name: "payload", Type: "JSON", Nullable: "YES"},
|
||||
{Name: "host", Type: "BINARY(32)", Nullable: "YES", Key: "TAG", Extra: "TAG"},
|
||||
},
|
||||
},
|
||||
}
|
||||
targetDB := &fakeMigrationDB{}
|
||||
cfg := SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "tdengine", Database: "metrics"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "kingbase", Database: "ignored"},
|
||||
TargetTableStrategy: "smart",
|
||||
}
|
||||
plan, sourceCols, targetCols, err := buildTDengineToPGLikePlan(cfg, "cpu", sourceDB, targetDB)
|
||||
if err != nil {
|
||||
t.Fatalf("buildTDengineToPGLikePlan returned error: %v", err)
|
||||
}
|
||||
if len(sourceCols) != 3 || len(targetCols) != 0 {
|
||||
t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols))
|
||||
}
|
||||
if !plan.AutoCreate {
|
||||
t.Fatalf("expected auto create enabled")
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, `CREATE TABLE "public"."cpu"`) {
|
||||
t.Fatalf("unexpected create table sql: %s", plan.CreateTableSQL)
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, `"ts" timestamp`) {
|
||||
t.Fatalf("expected timestamp mapping, got: %s", plan.CreateTableSQL)
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, `"payload" jsonb`) {
|
||||
t.Fatalf("expected json mapping, got: %s", plan.CreateTableSQL)
|
||||
}
|
||||
if len(plan.Warnings) == 0 || !strings.Contains(strings.Join(plan.Warnings, " "), "TAG") {
|
||||
t.Fatalf("expected TAG warning, got: %v", plan.Warnings)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildSchemaMigrationPlan_TDengineTargetWarnsInsertOnlyBoundary(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sourceDB := &fakeMigrationDB{
|
||||
columns: map[string][]connection.ColumnDefinition{
|
||||
"shop.metrics": {
|
||||
{Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI"},
|
||||
{Name: "ts", Type: "datetime", Nullable: "NO"},
|
||||
{Name: "value", Type: "double", Nullable: "YES"},
|
||||
},
|
||||
},
|
||||
}
|
||||
targetDB := &fakeMigrationDB{
|
||||
columns: map[string][]connection.ColumnDefinition{
|
||||
"taos.metrics": {
|
||||
{Name: "id", Type: "bigint", Nullable: "NO"},
|
||||
{Name: "ts", Type: "timestamp", Nullable: "NO"},
|
||||
{Name: "value", Type: "double", Nullable: "YES"},
|
||||
},
|
||||
},
|
||||
}
|
||||
cfg := SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "mysql", Database: "shop"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "tdengine", Database: "taos"},
|
||||
Mode: "insert_update",
|
||||
}
|
||||
|
||||
plan, _, _, err := buildSchemaMigrationPlan(cfg, "metrics", sourceDB, targetDB)
|
||||
if err != nil {
|
||||
t.Fatalf("buildSchemaMigrationPlan returned error: %v", err)
|
||||
}
|
||||
warnings := strings.Join(plan.Warnings, " ")
|
||||
if !strings.Contains(warnings, "仅支持 INSERT 写入") {
|
||||
t.Fatalf("expected TDengine target warning, got: %v", plan.Warnings)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildMySQLLikeToTDenginePlan_AutoCreateWhenTargetMissing(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sourceDB := &fakeMigrationDB{
|
||||
columns: map[string][]connection.ColumnDefinition{
|
||||
"shop.metrics": {
|
||||
{Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI", Extra: "auto_increment"},
|
||||
{Name: "ts", Type: "datetime", Nullable: "NO"},
|
||||
{Name: "payload", Type: "json", Nullable: "YES"},
|
||||
},
|
||||
},
|
||||
}
|
||||
targetDB := &fakeMigrationDB{}
|
||||
cfg := SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "mysql", Database: "shop"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "tdengine", Database: "taos"},
|
||||
TargetTableStrategy: "smart",
|
||||
}
|
||||
plan, sourceCols, targetCols, err := buildMySQLLikeToTDenginePlan(cfg, "metrics", sourceDB, targetDB)
|
||||
if err != nil {
|
||||
t.Fatalf("buildMySQLLikeToTDenginePlan returned error: %v", err)
|
||||
}
|
||||
if len(sourceCols) != 3 || len(targetCols) != 0 {
|
||||
t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols))
|
||||
}
|
||||
if !plan.AutoCreate {
|
||||
t.Fatalf("expected auto create enabled")
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `taos`.`metrics`") {
|
||||
t.Fatalf("unexpected create sql: %s", plan.CreateTableSQL)
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, "`ts` TIMESTAMP") {
|
||||
t.Fatalf("expected ts first column mapped to TIMESTAMP, got: %s", plan.CreateTableSQL)
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, "`payload` VARCHAR(") {
|
||||
t.Fatalf("expected json degrade to VARCHAR, got: %s", plan.CreateTableSQL)
|
||||
}
|
||||
if !strings.Contains(strings.Join(plan.Warnings, " "), "insert-only") && !strings.Contains(strings.Join(plan.Warnings, " "), "INSERT") {
|
||||
t.Fatalf("expected tdengine target warning, got: %v", plan.Warnings)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildPGLikeToTDenginePlan_AutoCreateWhenTargetMissing(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sourceDB := &fakeMigrationDB{
|
||||
columns: map[string][]connection.ColumnDefinition{
|
||||
"public.metrics": {
|
||||
{Name: "event_time", Type: "timestamp without time zone", Nullable: "NO"},
|
||||
{Name: "name", Type: "character varying(64)", Nullable: "YES"},
|
||||
{Name: "meta", Type: "jsonb", Nullable: "YES"},
|
||||
},
|
||||
},
|
||||
}
|
||||
targetDB := &fakeMigrationDB{}
|
||||
cfg := SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "postgres", Database: "ignored"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "tdengine", Database: "taos"},
|
||||
TargetTableStrategy: "smart",
|
||||
}
|
||||
plan, sourceCols, targetCols, err := buildPGLikeToTDenginePlan(cfg, "metrics", sourceDB, targetDB)
|
||||
if err != nil {
|
||||
t.Fatalf("buildPGLikeToTDenginePlan returned error: %v", err)
|
||||
}
|
||||
if len(sourceCols) != 3 || len(targetCols) != 0 {
|
||||
t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols))
|
||||
}
|
||||
if !plan.AutoCreate {
|
||||
t.Fatalf("expected auto create enabled")
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `taos`.`metrics`") {
|
||||
t.Fatalf("unexpected create sql: %s", plan.CreateTableSQL)
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, "`event_time` TIMESTAMP") {
|
||||
t.Fatalf("expected timestamp mapping, got: %s", plan.CreateTableSQL)
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, "`meta` VARCHAR(") {
|
||||
t.Fatalf("expected jsonb degrade to VARCHAR, got: %s", plan.CreateTableSQL)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildMySQLLikeToTDenginePlan_RejectsAutoCreateWithoutTimestampColumn(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sourceDB := &fakeMigrationDB{
|
||||
columns: map[string][]connection.ColumnDefinition{
|
||||
"shop.metrics": {
|
||||
{Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI"},
|
||||
{Name: "name", Type: "varchar(64)", Nullable: "YES"},
|
||||
},
|
||||
},
|
||||
}
|
||||
targetDB := &fakeMigrationDB{}
|
||||
cfg := SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "mysql", Database: "shop"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "tdengine", Database: "taos"},
|
||||
TargetTableStrategy: "smart",
|
||||
}
|
||||
plan, _, _, err := buildMySQLLikeToTDenginePlan(cfg, "metrics", sourceDB, targetDB)
|
||||
if err != nil {
|
||||
t.Fatalf("buildMySQLLikeToTDenginePlan returned error: %v", err)
|
||||
}
|
||||
if plan.AutoCreate {
|
||||
t.Fatalf("expected auto create disabled when source has no timestamp column")
|
||||
}
|
||||
if !strings.Contains(plan.PlannedAction, "时间列") {
|
||||
t.Fatalf("unexpected planned action: %s", plan.PlannedAction)
|
||||
}
|
||||
if !strings.Contains(strings.Join(plan.Warnings, " "), "时间列") {
|
||||
t.Fatalf("expected missing timestamp warning, got: %v", plan.Warnings)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildClickHouseToTDenginePlan_AutoCreateWhenTargetMissing(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sourceDB := &fakeMigrationDB{
|
||||
columns: map[string][]connection.ColumnDefinition{
|
||||
"analytics.metrics": {
|
||||
{Name: "event_time", Type: "DateTime64(3)", Nullable: "NO"},
|
||||
{Name: "host", Type: "FixedString(64)", Nullable: "YES"},
|
||||
{Name: "payload", Type: "Map(String,String)", Nullable: "YES"},
|
||||
},
|
||||
},
|
||||
}
|
||||
targetDB := &fakeMigrationDB{}
|
||||
cfg := SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "clickhouse", Database: "analytics"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "tdengine", Database: "taos"},
|
||||
TargetTableStrategy: "smart",
|
||||
}
|
||||
plan, sourceCols, targetCols, err := buildClickHouseToTDenginePlan(cfg, "metrics", sourceDB, targetDB)
|
||||
if err != nil {
|
||||
t.Fatalf("buildClickHouseToTDenginePlan returned error: %v", err)
|
||||
}
|
||||
if len(sourceCols) != 3 || len(targetCols) != 0 {
|
||||
t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols))
|
||||
}
|
||||
if !plan.AutoCreate {
|
||||
t.Fatalf("expected auto create enabled")
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `taos`.`metrics`") {
|
||||
t.Fatalf("unexpected create sql: %s", plan.CreateTableSQL)
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, "`event_time` TIMESTAMP") {
|
||||
t.Fatalf("expected datetime64 mapping, got: %s", plan.CreateTableSQL)
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, "`host` VARCHAR(64)") {
|
||||
t.Fatalf("expected fixedstring mapping, got: %s", plan.CreateTableSQL)
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, "`payload` VARCHAR(") {
|
||||
t.Fatalf("expected complex type degrade to VARCHAR, got: %s", plan.CreateTableSQL)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildClickHouseToPGLikePlan_AutoCreateWhenTargetMissing(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sourceDB := &fakeMigrationDB{
|
||||
columns: map[string][]connection.ColumnDefinition{
|
||||
"analytics.metrics": {
|
||||
{Name: "id", Type: "UInt64", Nullable: "NO", Key: "PRI"},
|
||||
{Name: "event_time", Type: "DateTime64(3)", Nullable: "NO"},
|
||||
{Name: "host", Type: "FixedString(64)", Nullable: "YES"},
|
||||
{Name: "payload", Type: "Map(String,String)", Nullable: "YES"},
|
||||
},
|
||||
},
|
||||
}
|
||||
targetDB := &fakeMigrationDB{}
|
||||
cfg := SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "clickhouse", Database: "analytics"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "postgres", Database: "public"},
|
||||
TargetTableStrategy: "smart",
|
||||
}
|
||||
plan, sourceCols, targetCols, err := buildClickHouseToPGLikePlan(cfg, "metrics", sourceDB, targetDB)
|
||||
if err != nil {
|
||||
t.Fatalf("buildClickHouseToPGLikePlan returned error: %v", err)
|
||||
}
|
||||
if len(sourceCols) != 4 || len(targetCols) != 0 {
|
||||
t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols))
|
||||
}
|
||||
if !plan.AutoCreate {
|
||||
t.Fatalf("expected auto create enabled")
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, `CREATE TABLE "public"."metrics"`) {
|
||||
t.Fatalf("unexpected create sql: %s", plan.CreateTableSQL)
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, `"id" numeric(20,0)`) {
|
||||
t.Fatalf("expected uint64 safeguard mapping, got: %s", plan.CreateTableSQL)
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, `"event_time" timestamp`) {
|
||||
t.Fatalf("expected datetime64 mapping, got: %s", plan.CreateTableSQL)
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, `"host" varchar(64)`) {
|
||||
t.Fatalf("expected fixedstring mapping, got: %s", plan.CreateTableSQL)
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, `"payload" jsonb`) {
|
||||
t.Fatalf("expected complex type degrade to jsonb, got: %s", plan.CreateTableSQL)
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, `PRIMARY KEY ("id")`) {
|
||||
t.Fatalf("expected primary key preservation, got: %s", plan.CreateTableSQL)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildPGLikeToClickHousePlan_AutoCreateWhenTargetMissing(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sourceDB := &fakeMigrationDB{
|
||||
columns: map[string][]connection.ColumnDefinition{
|
||||
"public.orders": {
|
||||
{Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI"},
|
||||
{Name: "created_at", Type: "timestamp without time zone", Nullable: "NO"},
|
||||
{Name: "profile", Type: "jsonb", Nullable: "YES"},
|
||||
},
|
||||
},
|
||||
}
|
||||
targetDB := &fakeMigrationDB{}
|
||||
cfg := SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "postgres", Database: "public"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "clickhouse", Database: "analytics"},
|
||||
TargetTableStrategy: "smart",
|
||||
}
|
||||
plan, sourceCols, targetCols, err := buildPGLikeToClickHousePlan(cfg, "orders", sourceDB, targetDB)
|
||||
if err != nil {
|
||||
t.Fatalf("buildPGLikeToClickHousePlan returned error: %v", err)
|
||||
}
|
||||
if len(sourceCols) != 3 || len(targetCols) != 0 {
|
||||
t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols))
|
||||
}
|
||||
if !plan.AutoCreate {
|
||||
t.Fatalf("expected auto create enabled")
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `analytics`.`orders`") {
|
||||
t.Fatalf("unexpected create sql: %s", plan.CreateTableSQL)
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, "`created_at` DateTime") {
|
||||
t.Fatalf("expected timestamp mapping, got: %s", plan.CreateTableSQL)
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, "`profile` Nullable(String)") {
|
||||
t.Fatalf("expected jsonb degrade to Nullable(String), got: %s", plan.CreateTableSQL)
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, "ORDER BY (`id`)") {
|
||||
t.Fatalf("expected primary key order by, got: %s", plan.CreateTableSQL)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildTDengineToTDenginePlan_AutoCreateWhenTargetMissing(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
sourceDB := &fakeMigrationDB{
|
||||
columns: map[string][]connection.ColumnDefinition{
|
||||
"src.cpu": {
|
||||
{Name: "ts", Type: "TIMESTAMP", Nullable: "NO"},
|
||||
{Name: "host", Type: "NCHAR(64)", Nullable: "YES"},
|
||||
{Name: "region", Type: "NCHAR(32)", Nullable: "YES", Key: "TAG"},
|
||||
},
|
||||
},
|
||||
}
|
||||
targetDB := &fakeMigrationDB{}
|
||||
cfg := SyncConfig{
|
||||
SourceConfig: connection.ConnectionConfig{Type: "tdengine", Database: "src"},
|
||||
TargetConfig: connection.ConnectionConfig{Type: "tdengine", Database: "dst"},
|
||||
TargetTableStrategy: "smart",
|
||||
}
|
||||
plan, sourceCols, targetCols, err := buildTDengineToTDenginePlan(cfg, "cpu", sourceDB, targetDB)
|
||||
if err != nil {
|
||||
t.Fatalf("buildTDengineToTDenginePlan returned error: %v", err)
|
||||
}
|
||||
if len(sourceCols) != 3 || len(targetCols) != 0 {
|
||||
t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols))
|
||||
}
|
||||
if !plan.AutoCreate {
|
||||
t.Fatalf("expected auto create enabled")
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `dst`.`cpu`") {
|
||||
t.Fatalf("unexpected create sql: %s", plan.CreateTableSQL)
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, "`ts` TIMESTAMP") {
|
||||
t.Fatalf("expected timestamp preserved, got: %s", plan.CreateTableSQL)
|
||||
}
|
||||
if !strings.Contains(plan.CreateTableSQL, "`region` NCHAR(32)") {
|
||||
t.Fatalf("expected tag degrade to regular nchar column, got: %s", plan.CreateTableSQL)
|
||||
}
|
||||
if !strings.Contains(strings.Join(plan.Warnings, " "), "TAG") {
|
||||
t.Fatalf("expected TAG degrade warning, got: %v", plan.Warnings)
|
||||
}
|
||||
}
|
||||
@@ -7,15 +7,16 @@ import (
|
||||
)
|
||||
|
||||
func (s *SyncEngine) syncTableSchema(config SyncConfig, res *SyncResult, sourceDB db.Database, targetDB db.Database, tableName string) error {
|
||||
targetType := strings.ToLower(strings.TrimSpace(config.TargetConfig.Type))
|
||||
targetType := resolveMigrationDBType(config.TargetConfig)
|
||||
if targetType != "mysql" {
|
||||
s.appendLog(config.JobID, res, "warn", fmt.Sprintf("目标数据库类型=%s 暂不支持结构同步,已跳过表 %s", config.TargetConfig.Type, tableName))
|
||||
return nil
|
||||
}
|
||||
|
||||
sourceSchema, sourceTable := normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName)
|
||||
targetSchema, targetTable := normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName)
|
||||
targetQueryTable := qualifiedNameForQuery(config.TargetConfig.Type, targetSchema, targetTable, tableName)
|
||||
sourceType := resolveMigrationDBType(config.SourceConfig)
|
||||
sourceSchema, sourceTable := normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName)
|
||||
targetSchema, targetTable := normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName)
|
||||
targetQueryTable := qualifiedNameForQuery(targetType, targetSchema, targetTable, tableName)
|
||||
|
||||
// 1) 获取源表字段
|
||||
sourceCols, err := sourceDB.GetColumns(sourceSchema, sourceTable)
|
||||
@@ -26,7 +27,6 @@ func (s *SyncEngine) syncTableSchema(config SyncConfig, res *SyncResult, sourceD
|
||||
// 2) 确保目标表存在
|
||||
targetCols, err := targetDB.GetColumns(targetSchema, targetTable)
|
||||
if err != nil {
|
||||
sourceType := strings.ToLower(strings.TrimSpace(config.SourceConfig.Type))
|
||||
if sourceType != "mysql" {
|
||||
return fmt.Errorf("目标表不存在且源类型=%s 暂不支持自动建表: %w", config.SourceConfig.Type, err)
|
||||
}
|
||||
@@ -62,7 +62,6 @@ func (s *SyncEngine) syncTableSchema(config SyncConfig, res *SyncResult, sourceD
|
||||
|
||||
// 3) 补齐目标缺失字段(安全策略:新增字段统一允许 NULL)
|
||||
missing := make([]string, 0)
|
||||
sourceType := strings.ToLower(strings.TrimSpace(config.SourceConfig.Type))
|
||||
for _, c := range sourceCols {
|
||||
colName := strings.TrimSpace(c.Name)
|
||||
if colName == "" {
|
||||
|
||||
@@ -22,7 +22,7 @@ func quoteIdentByType(dbType string, ident string) string {
|
||||
}
|
||||
|
||||
switch dbType {
|
||||
case "mysql", "mariadb", "diros", "sphinx":
|
||||
case "mysql", "mariadb", "diros", "sphinx", "clickhouse", "tdengine":
|
||||
return "`" + strings.ReplaceAll(ident, "`", "``") + "`"
|
||||
case "sqlserver":
|
||||
escaped := strings.ReplaceAll(ident, "]", "]]")
|
||||
@@ -74,8 +74,10 @@ func normalizeSchemaAndTable(dbType string, dbName string, tableName string) (st
|
||||
}
|
||||
|
||||
switch strings.ToLower(strings.TrimSpace(dbType)) {
|
||||
case "postgres", "kingbase", "vastbase":
|
||||
case "postgres", "kingbase", "highgo", "vastbase":
|
||||
return "public", rawTable
|
||||
case "duckdb":
|
||||
return "main", rawTable
|
||||
default:
|
||||
return rawDB, rawTable
|
||||
}
|
||||
@@ -91,7 +93,7 @@ func qualifiedNameForQuery(dbType string, schema string, table string, original
|
||||
}
|
||||
|
||||
switch strings.ToLower(strings.TrimSpace(dbType)) {
|
||||
case "postgres", "kingbase", "vastbase":
|
||||
case "postgres", "kingbase", "highgo", "vastbase":
|
||||
s := strings.TrimSpace(schema)
|
||||
if s == "" {
|
||||
s = "public"
|
||||
@@ -100,7 +102,16 @@ func qualifiedNameForQuery(dbType string, schema string, table string, original
|
||||
return raw
|
||||
}
|
||||
return s + "." + table
|
||||
case "mysql", "mariadb", "diros", "sphinx":
|
||||
case "duckdb":
|
||||
s := strings.TrimSpace(schema)
|
||||
if s == "" {
|
||||
s = "main"
|
||||
}
|
||||
if table == "" {
|
||||
return raw
|
||||
}
|
||||
return s + "." + table
|
||||
case "mysql", "mariadb", "diros", "sphinx", "clickhouse", "tdengine":
|
||||
s := strings.TrimSpace(schema)
|
||||
if s == "" || table == "" {
|
||||
return table
|
||||
|
||||
@@ -12,14 +12,17 @@ import (
|
||||
|
||||
// SyncConfig defines the parameters for a synchronization task
|
||||
type SyncConfig struct {
|
||||
SourceConfig connection.ConnectionConfig `json:"sourceConfig"`
|
||||
TargetConfig connection.ConnectionConfig `json:"targetConfig"`
|
||||
Tables []string `json:"tables"` // Tables to sync
|
||||
Content string `json:"content,omitempty"` // "data", "schema", "both"
|
||||
Mode string `json:"mode"` // "insert_update", "insert_only", "full_overwrite"
|
||||
JobID string `json:"jobId,omitempty"`
|
||||
AutoAddColumns bool `json:"autoAddColumns,omitempty"` // 自动补齐缺失字段(当前仅 MySQL 目标支持)
|
||||
TableOptions map[string]TableOptions `json:"tableOptions,omitempty"`
|
||||
SourceConfig connection.ConnectionConfig `json:"sourceConfig"`
|
||||
TargetConfig connection.ConnectionConfig `json:"targetConfig"`
|
||||
Tables []string `json:"tables"`
|
||||
Content string `json:"content,omitempty"` // "data", "schema", "both"
|
||||
Mode string `json:"mode"` // "insert_update", "insert_only", "full_overwrite"
|
||||
JobID string `json:"jobId,omitempty"`
|
||||
AutoAddColumns bool `json:"autoAddColumns,omitempty"` // 自动补齐缺失字段
|
||||
TargetTableStrategy string `json:"targetTableStrategy,omitempty"`
|
||||
CreateIndexes bool `json:"createIndexes,omitempty"`
|
||||
MongoCollectionName string `json:"mongoCollectionName,omitempty"`
|
||||
TableOptions map[string]TableOptions `json:"tableOptions,omitempty"`
|
||||
}
|
||||
|
||||
// SyncResult holds the result of the sync operation
|
||||
@@ -45,6 +48,13 @@ func NewSyncEngine(reporter Reporter) *SyncEngine {
|
||||
func (s *SyncEngine) RunSync(config SyncConfig) SyncResult {
|
||||
result := SyncResult{Success: true, Logs: []string{}}
|
||||
logger.Infof("开始数据同步:源=%s 目标=%s 表数量=%d", formatConnSummaryForSync(config.SourceConfig), formatConnSummaryForSync(config.TargetConfig), len(config.Tables))
|
||||
if isRedisToMongoKeyspacePair(config) {
|
||||
return s.runRedisToMongoSync(config, result)
|
||||
}
|
||||
if isMongoToRedisKeyspacePair(config) {
|
||||
return s.runMongoToRedisSync(config, result)
|
||||
}
|
||||
|
||||
totalTables := len(config.Tables)
|
||||
s.progress(config.JobID, 0, totalTables, "", "开始同步")
|
||||
|
||||
@@ -70,6 +80,7 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult {
|
||||
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("未知同步模式 %q,已自动使用 insert_update", config.Mode))
|
||||
}
|
||||
defaultMode := normalizeSyncMode(config.Mode)
|
||||
strategy := normalizeTargetTableStrategy(config.TargetTableStrategy)
|
||||
|
||||
contentLabel := "仅同步数据"
|
||||
if syncSchema && syncData {
|
||||
@@ -77,9 +88,9 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult {
|
||||
} else if syncSchema {
|
||||
contentLabel = "仅同步结构"
|
||||
}
|
||||
s.appendLog(config.JobID, &result, "info", fmt.Sprintf("同步内容:%s;模式:%s;自动补字段:%v", contentLabel, defaultMode, config.AutoAddColumns))
|
||||
s.appendLog(config.JobID, &result, "info", fmt.Sprintf("同步内容:%s;模式:%s;自动补字段:%v;目标表策略:%s;创建索引:%v", contentLabel, defaultMode, config.AutoAddColumns, strategy, config.CreateIndexes))
|
||||
|
||||
sourceDB, err := db.NewDatabase(config.SourceConfig.Type)
|
||||
sourceDB, err := newSyncDatabase(config.SourceConfig.Type)
|
||||
if err != nil {
|
||||
logger.Error(err, "初始化源数据库驱动失败:类型=%s", config.SourceConfig.Type)
|
||||
return s.fail(config.JobID, totalTables, result, "初始化源数据库驱动失败: "+err.Error())
|
||||
@@ -88,7 +99,7 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult {
|
||||
// Custom DB setup would go here if needed
|
||||
}
|
||||
|
||||
targetDB, err := db.NewDatabase(config.TargetConfig.Type)
|
||||
targetDB, err := newSyncDatabase(config.TargetConfig.Type)
|
||||
if err != nil {
|
||||
logger.Error(err, "初始化目标数据库驱动失败:类型=%s", config.TargetConfig.Type)
|
||||
return s.fail(config.JobID, totalTables, result, "初始化目标数据库驱动失败: "+err.Error())
|
||||
@@ -112,7 +123,6 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult {
|
||||
}
|
||||
defer targetDB.Close()
|
||||
|
||||
// Iterate Tables
|
||||
for i, tableName := range config.Tables {
|
||||
func() {
|
||||
tableMode := defaultMode
|
||||
@@ -120,30 +130,82 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult {
|
||||
s.progress(config.JobID, i, totalTables, tableName, fmt.Sprintf("同步表(%d/%d)", i+1, totalTables))
|
||||
defer s.progress(config.JobID, i+1, totalTables, tableName, "表处理完成")
|
||||
|
||||
if syncSchema {
|
||||
s.progress(config.JobID, i, totalTables, tableName, "同步表结构")
|
||||
if err := s.syncTableSchema(config, &result, sourceDB, targetDB, tableName); err != nil {
|
||||
s.appendLog(config.JobID, &result, "error", fmt.Sprintf("表结构同步失败:表=%s 错误=%v", tableName, err))
|
||||
plan, cols, targetCols, err := buildSchemaMigrationPlan(config, tableName, sourceDB, targetDB)
|
||||
if err != nil {
|
||||
s.appendLog(config.JobID, &result, "error", fmt.Sprintf("生成迁移计划失败:表=%s 错误=%v", tableName, err))
|
||||
return
|
||||
}
|
||||
for _, warning := range plan.Warnings {
|
||||
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> %s", warning))
|
||||
}
|
||||
for _, unsupported := range plan.UnsupportedObjects {
|
||||
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> %s", unsupported))
|
||||
}
|
||||
if strings.TrimSpace(plan.PlannedAction) != "" {
|
||||
s.appendLog(config.JobID, &result, "info", fmt.Sprintf(" -> %s", plan.PlannedAction))
|
||||
}
|
||||
|
||||
if !plan.TargetTableExists && !plan.AutoCreate {
|
||||
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("表 %s 目标表不存在,当前策略不允许自动建表,已跳过", tableName))
|
||||
return
|
||||
}
|
||||
|
||||
if !plan.TargetTableExists && plan.AutoCreate {
|
||||
s.progress(config.JobID, i, totalTables, tableName, "创建目标表")
|
||||
if len(plan.PreDataSQL) > 0 {
|
||||
if err := executeSQLStatements(targetDB.Exec, plan.PreDataSQL); err != nil {
|
||||
s.appendLog(config.JobID, &result, "error", fmt.Sprintf("预执行建表 SQL 失败:表=%s 错误=%v", tableName, err))
|
||||
return
|
||||
}
|
||||
}
|
||||
if strings.TrimSpace(plan.CreateTableSQL) == "" {
|
||||
s.appendLog(config.JobID, &result, "error", fmt.Sprintf("表 %s 自动建表失败:建表 SQL 为空", tableName))
|
||||
return
|
||||
}
|
||||
if _, err := targetDB.Exec(plan.CreateTableSQL); err != nil {
|
||||
s.appendLog(config.JobID, &result, "error", fmt.Sprintf("创建目标表失败:表=%s 错误=%v", tableName, err))
|
||||
return
|
||||
}
|
||||
s.appendLog(config.JobID, &result, "info", fmt.Sprintf("目标表创建成功:%s", tableName))
|
||||
targetCols, err = targetDB.GetColumns(plan.TargetSchema, plan.TargetTable)
|
||||
if err != nil {
|
||||
s.appendLog(config.JobID, &result, "error", fmt.Sprintf("创建目标表后获取字段失败:表=%s 错误=%v", tableName, err))
|
||||
return
|
||||
}
|
||||
} else if len(plan.PreDataSQL) > 0 {
|
||||
s.progress(config.JobID, i, totalTables, tableName, "同步表结构")
|
||||
if err := executeSQLStatements(targetDB.Exec, plan.PreDataSQL); err != nil {
|
||||
s.appendLog(config.JobID, &result, "error", fmt.Sprintf("同步表结构失败:表=%s 错误=%v", tableName, err))
|
||||
return
|
||||
}
|
||||
targetCols, err = targetDB.GetColumns(plan.TargetSchema, plan.TargetTable)
|
||||
if err != nil {
|
||||
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("补字段后刷新目标字段失败:表=%s 错误=%v", tableName, err))
|
||||
}
|
||||
}
|
||||
|
||||
if !syncData {
|
||||
if len(plan.PostDataSQL) > 0 {
|
||||
s.progress(config.JobID, i, totalTables, tableName, "创建索引")
|
||||
if err := executeSQLStatements(targetDB.Exec, plan.PostDataSQL); err != nil {
|
||||
s.appendLog(config.JobID, &result, "error", fmt.Sprintf("创建索引失败:表=%s 错误=%v", tableName, err))
|
||||
return
|
||||
}
|
||||
}
|
||||
result.TablesSynced++
|
||||
return
|
||||
}
|
||||
|
||||
sourceSchema, sourceTable := normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName)
|
||||
targetSchema, targetTable := normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName)
|
||||
sourceQueryTable := qualifiedNameForQuery(config.SourceConfig.Type, sourceSchema, sourceTable, tableName)
|
||||
targetQueryTable := qualifiedNameForQuery(config.TargetConfig.Type, targetSchema, targetTable, tableName)
|
||||
|
||||
// 1. Get Columns & PKs
|
||||
cols, err := sourceDB.GetColumns(sourceSchema, sourceTable)
|
||||
if err != nil {
|
||||
logger.Error(err, "获取源表列信息失败:表=%s", tableName)
|
||||
s.appendLog(config.JobID, &result, "error", fmt.Sprintf("获取表 %s 的列信息失败: %v", tableName, err))
|
||||
return
|
||||
targetType := resolveMigrationDBType(config.TargetConfig)
|
||||
sourceType := resolveMigrationDBType(config.SourceConfig)
|
||||
targetTable := plan.TargetTable
|
||||
sourceQueryTable, targetQueryTable := plan.SourceQueryTable, plan.TargetQueryTable
|
||||
applyTableName := targetTable
|
||||
switch targetType {
|
||||
case "postgres", "kingbase", "highgo", "vastbase", "sqlserver":
|
||||
applyTableName = targetQueryTable
|
||||
}
|
||||
|
||||
sourceColsByLower := make(map[string]connection.ColumnDefinition, len(cols))
|
||||
for _, col := range cols {
|
||||
if strings.TrimSpace(col.Name) == "" {
|
||||
@@ -158,25 +220,24 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult {
|
||||
pkCols = append(pkCols, col.Name)
|
||||
}
|
||||
}
|
||||
|
||||
if len(pkCols) == 0 {
|
||||
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("表 %s 未找到主键,已跳过数据同步(避免产生重复数据)", tableName))
|
||||
return
|
||||
requirePK := tableMode == "insert_update" && plan.TargetTableExists
|
||||
pkCol := ""
|
||||
if requirePK {
|
||||
if len(pkCols) == 0 {
|
||||
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("表 %s 未找到主键,当前模式需要差异对比,已跳过", tableName))
|
||||
return
|
||||
}
|
||||
if len(pkCols) > 1 {
|
||||
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("表 %s 为复合主键(%s),当前暂不支持差异同步", tableName, strings.Join(pkCols, ",")))
|
||||
return
|
||||
}
|
||||
pkCol = pkCols[0]
|
||||
}
|
||||
if len(pkCols) > 1 {
|
||||
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("表 %s 为复合主键(%s),当前暂不支持数据同步", tableName, strings.Join(pkCols, ",")))
|
||||
return
|
||||
}
|
||||
pkCol := pkCols[0]
|
||||
|
||||
opts := TableOptions{Insert: true, Update: true, Delete: false}
|
||||
if config.TableOptions != nil {
|
||||
if t, ok := config.TableOptions[tableName]; ok {
|
||||
opts = t
|
||||
// 默认防护:如用户未设置任意一个字段,保持 insert/update 默认 true、delete 默认 false
|
||||
if !t.Insert && !t.Update && !t.Delete {
|
||||
opts = t
|
||||
}
|
||||
}
|
||||
}
|
||||
if !opts.Insert && !opts.Update && !opts.Delete {
|
||||
@@ -184,10 +245,8 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult {
|
||||
return
|
||||
}
|
||||
|
||||
// 2. Fetch Data (MEMORY INTENSIVE - PROTOTYPE ONLY)
|
||||
// TODO: Implement paging/streaming
|
||||
s.progress(config.JobID, i, totalTables, tableName, "读取源表数据")
|
||||
sourceRows, _, err := sourceDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.SourceConfig.Type, sourceQueryTable)))
|
||||
sourceRows, _, err := sourceDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(sourceType, sourceQueryTable)))
|
||||
if err != nil {
|
||||
logger.Error(err, "读取源表失败:表=%s", tableName)
|
||||
s.appendLog(config.JobID, &result, "error", fmt.Sprintf("读取源表 %s 失败: %v", tableName, err))
|
||||
@@ -196,19 +255,19 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult {
|
||||
|
||||
var inserts []map[string]interface{}
|
||||
var updates []connection.UpdateRow
|
||||
var deletes []map[string]interface{}
|
||||
|
||||
if tableMode == "insert_update" {
|
||||
if tableMode == "insert_update" && plan.TargetTableExists {
|
||||
s.progress(config.JobID, i, totalTables, tableName, "读取目标表数据")
|
||||
targetRows, _, err := targetDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.TargetConfig.Type, targetQueryTable)))
|
||||
targetRows, _, err := targetDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(targetType, targetQueryTable)))
|
||||
if err != nil {
|
||||
logger.Error(err, "读取目标表失败:表=%s", tableName)
|
||||
s.appendLog(config.JobID, &result, "error", fmt.Sprintf("读取目标表 %s 失败: %v", tableName, err))
|
||||
return
|
||||
}
|
||||
|
||||
// 3. Compare (In-Memory Hash Map)
|
||||
s.progress(config.JobID, i, totalTables, tableName, "对比差异")
|
||||
targetMap := make(map[string]map[string]interface{})
|
||||
targetMap := make(map[string]map[string]interface{}, len(targetRows))
|
||||
for _, row := range targetRows {
|
||||
if row[pkCol] == nil {
|
||||
continue
|
||||
@@ -220,7 +279,6 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult {
|
||||
targetMap[pkVal] = row
|
||||
}
|
||||
sourcePKSet := make(map[string]struct{}, len(sourceRows))
|
||||
|
||||
for _, sRow := range sourceRows {
|
||||
if sRow[pkCol] == nil {
|
||||
continue
|
||||
@@ -230,7 +288,6 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult {
|
||||
continue
|
||||
}
|
||||
sourcePKSet[pkVal] = struct{}{}
|
||||
|
||||
if tRow, exists := targetMap[pkVal]; exists {
|
||||
changes := make(map[string]interface{})
|
||||
for k, v := range sRow {
|
||||
@@ -239,17 +296,12 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult {
|
||||
}
|
||||
}
|
||||
if len(changes) > 0 {
|
||||
updates = append(updates, connection.UpdateRow{
|
||||
Keys: map[string]interface{}{pkCol: sRow[pkCol]},
|
||||
Values: changes,
|
||||
})
|
||||
updates = append(updates, connection.UpdateRow{Keys: map[string]interface{}{pkCol: sRow[pkCol]}, Values: changes})
|
||||
}
|
||||
} else {
|
||||
inserts = append(inserts, sRow)
|
||||
}
|
||||
}
|
||||
|
||||
var deletes []map[string]interface{}
|
||||
if opts.Delete {
|
||||
for pkStr, row := range targetMap {
|
||||
if _, ok := sourcePKSet[pkStr]; ok {
|
||||
@@ -258,150 +310,49 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult {
|
||||
deletes = append(deletes, map[string]interface{}{pkCol: row[pkCol]})
|
||||
}
|
||||
}
|
||||
|
||||
// apply operation selection
|
||||
inserts = filterRowsByPKSelection(pkCol, inserts, opts.Insert, opts.SelectedInsertPKs)
|
||||
updates = filterUpdatesByPKSelection(pkCol, updates, opts.Update, opts.SelectedUpdatePKs)
|
||||
deletes = filterRowsByPKSelection(pkCol, deletes, opts.Delete, opts.SelectedDeletePKs)
|
||||
|
||||
changeSet := connection.ChangeSet{
|
||||
Inserts: inserts,
|
||||
Updates: updates,
|
||||
Deletes: deletes,
|
||||
} else {
|
||||
inserts = sourceRows
|
||||
if !opts.Insert {
|
||||
inserts = nil
|
||||
}
|
||||
if tableMode == "full_overwrite" && plan.TargetTableExists {
|
||||
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 全量覆盖模式:即将清空目标表 %s", tableName))
|
||||
s.progress(config.JobID, i, totalTables, tableName, "清空目标表")
|
||||
clearSQL := ""
|
||||
if targetType == "mysql" {
|
||||
clearSQL = fmt.Sprintf("TRUNCATE TABLE %s", quoteQualifiedIdentByType(targetType, targetQueryTable))
|
||||
} else {
|
||||
clearSQL = fmt.Sprintf("DELETE FROM %s", quoteQualifiedIdentByType(targetType, targetQueryTable))
|
||||
}
|
||||
if _, err := targetDB.Exec(clearSQL); err != nil {
|
||||
s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 清空目标表失败: %v", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Align schema (target missing columns)
|
||||
s.progress(config.JobID, i, totalTables, tableName, "检查字段一致性")
|
||||
requiredCols := collectRequiredColumns(changeSet.Inserts, changeSet.Updates)
|
||||
targetCols, err := targetDB.GetColumns(targetSchema, targetTable)
|
||||
changeSet := connection.ChangeSet{Inserts: inserts, Updates: updates, Deletes: deletes}
|
||||
s.progress(config.JobID, i, totalTables, tableName, "检查字段一致性")
|
||||
targetColsResolved := targetCols
|
||||
if len(targetColsResolved) == 0 {
|
||||
targetColsResolved, err = targetDB.GetColumns(plan.TargetSchema, plan.TargetTable)
|
||||
if err != nil {
|
||||
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 获取目标表字段失败,已跳过字段一致性检查: %v", err))
|
||||
} else {
|
||||
targetColSet := make(map[string]struct{}, len(targetCols))
|
||||
for _, c := range targetCols {
|
||||
name := strings.ToLower(strings.TrimSpace(c.Name))
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
targetColSet[name] = struct{}{}
|
||||
}
|
||||
|
||||
missing := make([]string, 0)
|
||||
for lower, original := range requiredCols {
|
||||
if _, ok := targetColSet[lower]; !ok {
|
||||
missing = append(missing, original)
|
||||
}
|
||||
}
|
||||
sort.Strings(missing)
|
||||
|
||||
if len(missing) > 0 {
|
||||
if config.AutoAddColumns && strings.ToLower(strings.TrimSpace(config.TargetConfig.Type)) == "mysql" {
|
||||
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 目标表缺少字段 %d 个,开始自动补齐: %s", len(missing), strings.Join(missing, ", ")))
|
||||
added := 0
|
||||
for _, colName := range missing {
|
||||
colLower := strings.ToLower(strings.TrimSpace(colName))
|
||||
colType := "TEXT"
|
||||
if strings.ToLower(strings.TrimSpace(config.SourceConfig.Type)) == "mysql" {
|
||||
if srcCol, ok := sourceColsByLower[colLower]; ok {
|
||||
colType = sanitizeMySQLColumnType(srcCol.Type)
|
||||
}
|
||||
}
|
||||
|
||||
alterSQL := fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL",
|
||||
quoteQualifiedIdentByType("mysql", targetQueryTable),
|
||||
quoteIdentByType("mysql", colName),
|
||||
colType,
|
||||
)
|
||||
if _, err := targetDB.Exec(alterSQL); err != nil {
|
||||
s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 自动补字段失败:字段=%s 错误=%v", colName, err))
|
||||
continue
|
||||
}
|
||||
added++
|
||||
}
|
||||
s.appendLog(config.JobID, &result, "info", fmt.Sprintf(" -> 自动补字段完成:成功=%d 失败=%d", added, len(missing)-added))
|
||||
|
||||
// refresh columns
|
||||
targetCols, err = targetDB.GetColumns(targetSchema, targetTable)
|
||||
if err == nil {
|
||||
targetColSet = make(map[string]struct{}, len(targetCols))
|
||||
for _, c := range targetCols {
|
||||
name := strings.ToLower(strings.TrimSpace(c.Name))
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
targetColSet[name] = struct{}{}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 目标表缺少字段 %d 个(未开启自动补齐),将自动忽略:%s", len(missing), strings.Join(missing, ", ")))
|
||||
}
|
||||
|
||||
// filter out still-missing columns to avoid apply failure
|
||||
changeSet.Inserts = filterInsertRows(changeSet.Inserts, targetColSet)
|
||||
changeSet.Updates = filterUpdateRows(changeSet.Updates, targetColSet)
|
||||
}
|
||||
}
|
||||
|
||||
// 5. Apply Changes
|
||||
s.progress(config.JobID, i, totalTables, tableName, "应用变更")
|
||||
|
||||
if len(changeSet.Inserts) > 0 || len(changeSet.Updates) > 0 || len(changeSet.Deletes) > 0 {
|
||||
s.appendLog(config.JobID, &result, "info", fmt.Sprintf(" -> 需插入: %d 行, 需更新: %d 行, 需删除: %d 行", len(changeSet.Inserts), len(changeSet.Updates), len(changeSet.Deletes)))
|
||||
|
||||
if applier, ok := targetDB.(db.BatchApplier); ok {
|
||||
if err := applier.ApplyChanges(targetTable, changeSet); err != nil {
|
||||
s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 应用变更失败: %v", err))
|
||||
} else {
|
||||
result.RowsInserted += len(changeSet.Inserts)
|
||||
result.RowsUpdated += len(changeSet.Updates)
|
||||
result.RowsDeleted += len(changeSet.Deletes)
|
||||
}
|
||||
} else {
|
||||
s.appendLog(config.JobID, &result, "warn", " -> 目标驱动不支持应用数据变更 (ApplyChanges).")
|
||||
}
|
||||
} else {
|
||||
s.appendLog(config.JobID, &result, "info", " -> 数据一致,无需变更.")
|
||||
}
|
||||
|
||||
result.TablesSynced++
|
||||
return
|
||||
} else {
|
||||
// insert_only / full_overwrite: do not compare target, just insert source rows
|
||||
inserts = sourceRows
|
||||
}
|
||||
|
||||
// full_overwrite: clear target table first
|
||||
if tableMode == "full_overwrite" {
|
||||
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 全量覆盖模式:即将清空目标表 %s", tableName))
|
||||
s.progress(config.JobID, i, totalTables, tableName, "清空目标表")
|
||||
clearSQL := ""
|
||||
if strings.ToLower(strings.TrimSpace(config.TargetConfig.Type)) == "mysql" {
|
||||
clearSQL = fmt.Sprintf("TRUNCATE TABLE %s", quoteQualifiedIdentByType(config.TargetConfig.Type, targetQueryTable))
|
||||
} else {
|
||||
clearSQL = fmt.Sprintf("DELETE FROM %s", quoteQualifiedIdentByType(config.TargetConfig.Type, targetQueryTable))
|
||||
}
|
||||
if _, err := targetDB.Exec(clearSQL); err != nil {
|
||||
s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 清空目标表失败: %v", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Align schema (target missing columns)
|
||||
s.progress(config.JobID, i, totalTables, tableName, "检查字段一致性")
|
||||
requiredCols := collectRequiredColumns(inserts, updates)
|
||||
targetCols, err := targetDB.GetColumns(targetSchema, targetTable)
|
||||
if err != nil {
|
||||
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 获取目标表字段失败,已跳过字段一致性检查: %v", err))
|
||||
} else {
|
||||
targetColSet := make(map[string]struct{}, len(targetCols))
|
||||
for _, c := range targetCols {
|
||||
if len(targetColsResolved) > 0 {
|
||||
targetColSet := make(map[string]struct{}, len(targetColsResolved))
|
||||
for _, c := range targetColsResolved {
|
||||
name := strings.ToLower(strings.TrimSpace(c.Name))
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
targetColSet[name] = struct{}{}
|
||||
}
|
||||
|
||||
requiredCols := collectRequiredColumns(changeSet.Inserts, changeSet.Updates)
|
||||
missing := make([]string, 0)
|
||||
for lower, original := range requiredCols {
|
||||
if _, ok := targetColSet[lower]; !ok {
|
||||
@@ -409,79 +360,64 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult {
|
||||
}
|
||||
}
|
||||
sort.Strings(missing)
|
||||
|
||||
if len(missing) > 0 {
|
||||
if config.AutoAddColumns && strings.ToLower(strings.TrimSpace(config.TargetConfig.Type)) == "mysql" {
|
||||
if config.AutoAddColumns && supportsAutoAddColumnsForPair(sourceType, targetType) {
|
||||
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 目标表缺少字段 %d 个,开始自动补齐: %s", len(missing), strings.Join(missing, ", ")))
|
||||
added := 0
|
||||
for _, colName := range missing {
|
||||
colLower := strings.ToLower(strings.TrimSpace(colName))
|
||||
colType := "TEXT"
|
||||
if strings.ToLower(strings.TrimSpace(config.SourceConfig.Type)) == "mysql" {
|
||||
if srcCol, ok := sourceColsByLower[colLower]; ok {
|
||||
colType = sanitizeMySQLColumnType(srcCol.Type)
|
||||
}
|
||||
srcCol, ok := sourceColsByLower[colLower]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
alterSQL, err := buildAddColumnSQLForPair(sourceType, targetType, targetQueryTable, srcCol)
|
||||
if err != nil {
|
||||
s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 自动补字段失败:字段=%s 错误=%v", colName, err))
|
||||
continue
|
||||
}
|
||||
|
||||
alterSQL := fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL",
|
||||
quoteQualifiedIdentByType("mysql", targetQueryTable),
|
||||
quoteIdentByType("mysql", colName),
|
||||
colType,
|
||||
)
|
||||
if _, err := targetDB.Exec(alterSQL); err != nil {
|
||||
s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 自动补字段失败:字段=%s 错误=%v", colName, err))
|
||||
continue
|
||||
}
|
||||
added++
|
||||
targetColSet[colLower] = struct{}{}
|
||||
}
|
||||
s.appendLog(config.JobID, &result, "info", fmt.Sprintf(" -> 自动补字段完成:成功=%d 失败=%d", added, len(missing)-added))
|
||||
|
||||
// refresh columns
|
||||
targetCols, err = targetDB.GetColumns(targetSchema, targetTable)
|
||||
if err == nil {
|
||||
targetColSet = make(map[string]struct{}, len(targetCols))
|
||||
for _, c := range targetCols {
|
||||
name := strings.ToLower(strings.TrimSpace(c.Name))
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
targetColSet[name] = struct{}{}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 目标表缺少字段 %d 个(未开启自动补齐),将自动忽略:%s", len(missing), strings.Join(missing, ", ")))
|
||||
}
|
||||
|
||||
// filter out still-missing columns to avoid apply failure
|
||||
inserts = filterInsertRows(inserts, targetColSet)
|
||||
updates = filterUpdateRows(updates, targetColSet)
|
||||
changeSet.Inserts = filterInsertRows(changeSet.Inserts, targetColSet)
|
||||
changeSet.Updates = filterUpdateRows(changeSet.Updates, targetColSet)
|
||||
}
|
||||
}
|
||||
|
||||
// 5. Apply Changes
|
||||
s.progress(config.JobID, i, totalTables, tableName, "应用变更")
|
||||
changeSet := connection.ChangeSet{
|
||||
Inserts: inserts,
|
||||
Updates: updates,
|
||||
}
|
||||
|
||||
if len(changeSet.Inserts) > 0 || len(changeSet.Updates) > 0 {
|
||||
s.appendLog(config.JobID, &result, "info", fmt.Sprintf(" -> 需插入: %d 行, 需更新: %d 行", len(changeSet.Inserts), len(changeSet.Updates)))
|
||||
|
||||
if len(changeSet.Inserts) > 0 || len(changeSet.Updates) > 0 || len(changeSet.Deletes) > 0 {
|
||||
s.appendLog(config.JobID, &result, "info", fmt.Sprintf(" -> 需插入: %d 行, 需更新: %d 行, 需删除: %d 行", len(changeSet.Inserts), len(changeSet.Updates), len(changeSet.Deletes)))
|
||||
if applier, ok := targetDB.(db.BatchApplier); ok {
|
||||
if err := applier.ApplyChanges(targetTable, changeSet); err != nil {
|
||||
if err := applier.ApplyChanges(applyTableName, changeSet); err != nil {
|
||||
s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 应用变更失败: %v", err))
|
||||
} else {
|
||||
result.RowsInserted += len(changeSet.Inserts)
|
||||
result.RowsUpdated += len(changeSet.Updates)
|
||||
return
|
||||
}
|
||||
result.RowsInserted += len(changeSet.Inserts)
|
||||
result.RowsUpdated += len(changeSet.Updates)
|
||||
result.RowsDeleted += len(changeSet.Deletes)
|
||||
} else {
|
||||
s.appendLog(config.JobID, &result, "warn", " -> 目标驱动不支持应用数据变更 (ApplyChanges).")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
s.appendLog(config.JobID, &result, "info", " -> 数据一致,无需变更.")
|
||||
}
|
||||
|
||||
if len(plan.PostDataSQL) > 0 {
|
||||
s.progress(config.JobID, i, totalTables, tableName, "创建索引")
|
||||
if err := executeSQLStatements(targetDB.Exec, plan.PostDataSQL); err != nil {
|
||||
s.appendLog(config.JobID, &result, "error", fmt.Sprintf("创建索引失败:表=%s 错误=%v", tableName, err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
result.TablesSynced++
|
||||
}()
|
||||
}
|
||||
@@ -554,3 +490,26 @@ func (s *SyncEngine) fail(jobID string, totalTables int, res SyncResult, msg str
|
||||
s.progress(jobID, res.TablesSynced, totalTables, "", "同步失败")
|
||||
return res
|
||||
}
|
||||
|
||||
func (s *SyncEngine) execDDLStatements(jobID string, res *SyncResult, database db.Database, tableName string, stage string, statements []string) error {
|
||||
for _, statement := range statements {
|
||||
sqlText := strings.TrimSpace(statement)
|
||||
if sqlText == "" {
|
||||
continue
|
||||
}
|
||||
if _, err := database.Exec(sqlText); err != nil {
|
||||
return fmt.Errorf("%s失败: %w", stage, err)
|
||||
}
|
||||
s.appendLog(jobID, res, "info", fmt.Sprintf("表 %s %s成功:%s", tableName, stage, shortenSyncSQL(sqlText)))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func shortenSyncSQL(sqlText string) string {
|
||||
text := strings.TrimSpace(strings.ReplaceAll(strings.ReplaceAll(sqlText, "\n", " "), "\t", " "))
|
||||
text = strings.Join(strings.Fields(text), " ")
|
||||
if len(text) <= 120 {
|
||||
return text
|
||||
}
|
||||
return text[:117] + "..."
|
||||
}
|
||||
|
||||
@@ -27,4 +27,3 @@ type Reporter struct {
|
||||
OnLog func(event SyncLogEvent)
|
||||
OnProgress func(event SyncProgressEvent)
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user