From 26b79adc5f308c2b595b24d983ab5e08ec639212 Mon Sep 17 00:00:00 2001 From: Syngnat Date: Mon, 2 Mar 2026 10:49:23 +0800 Subject: [PATCH 01/48] =?UTF-8?q?=F0=9F=90=9B=20fix(data-viewer):=20?= =?UTF-8?q?=E4=BF=AE=E5=A4=8DClickHouse=E5=B0=BE=E9=83=A8=E5=88=86?= =?UTF-8?q?=E9=A1=B5=E5=BC=82=E5=B8=B8=E5=B9=B6=E5=A2=9E=E5=BC=BADuckDB?= =?UTF-8?q?=E5=A4=8D=E6=9D=82=E7=B1=BB=E5=9E=8B=E5=85=BC=E5=AE=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - DataViewer 新增 ClickHouse 反向分页策略,修复最后页与倒数页查询失败 - DuckDB 查询失败时按列类型生成安全 SELECT,复杂类型转 VARCHAR 重试 - 分页状态统一使用 currentPage 回填,避免页码与总数推导不一致 - 增强查询异常日志与重试路径,降低大表场景卡顿与误报 --- .github/workflows/release.yml | 54 +++++- cmd/optional-driver-agent/main.go | 55 +++++- cmd/optional-driver-agent/main_test.go | 62 +++++++ docs/driver-manifest.json | 2 +- frontend/src/components/ConnectionModal.tsx | 34 +++- frontend/src/components/DataViewer.tsx | 175 ++++++++++++++++---- frontend/wailsjs/go/app/App.d.ts | 2 + frontend/wailsjs/go/app/App.js | 4 + internal/app/app.go | 90 +++++++++- internal/app/app_cache_key_test.go | 63 +++++++ internal/app/methods_driver.go | 4 +- internal/app/methods_file.go | 170 ++++++++++++++++++- internal/app/methods_file_export_test.go | 89 ++++++++++ internal/db/query_value.go | 66 +++++++- internal/db/query_value_test.go | 39 +++++ 15 files changed, 853 insertions(+), 56 deletions(-) create mode 100644 cmd/optional-driver-agent/main_test.go create mode 100644 internal/app/app_cache_key_test.go create mode 100644 internal/app/methods_file_export_test.go diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index ec38a17..0e0cb32 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -131,6 +131,24 @@ jobs: - name: Install Wails run: go install -v github.com/wailsapp/wails/v2/cmd/wails@latest + - name: Prepare MinGW For DuckDB (Windows) + if: ${{ matrix.build_optional_agents && contains(matrix.platform, 'windows') }} + shell: pwsh + run: | + $mingwBin = "C:\msys64\mingw64\bin" + if (!(Test-Path $mingwBin)) { + choco install mingw --yes --no-progress + $mingwBin = "C:\ProgramData\chocolatey\lib\mingw\tools\install\mingw64\bin" + } + if (!(Test-Path $mingwBin)) { + Write-Error "❌ 未找到 MinGW GCC 路径:$mingwBin" + exit 1 + } + "$mingwBin" | Out-File -FilePath $env:GITHUB_PATH -Append -Encoding utf8 + "CC=$mingwBin\gcc.exe" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 + "CXX=$mingwBin\g++.exe" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 + Write-Host "✅ 已配置 DuckDB cgo 编译器: $mingwBin" + - name: Build shell: bash run: | @@ -166,20 +184,12 @@ jobs: OUTPUT_PATH="${OUTDIR}/${OUTPUT}" echo "🔧 构建 ${OUTPUT_PATH} (tag=${TAG})" if [ "$DRIVER" = "duckdb" ]; then - set +e CGO_ENABLED=1 GOOS="$GOOS" GOARCH="$GOARCH" go build \ -tags "${TAG}" \ -trimpath \ -ldflags "-s -w" \ -o "${OUTPUT_PATH}" \ ./cmd/optional-driver-agent - DUCKDB_RC=$? - set -e - if [ "${DUCKDB_RC}" -ne 0 ]; then - echo "⚠️ DuckDB 代理构建失败(平台 ${GOOS}/${GOARCH}),跳过该资产,不阻断发布" - rm -f "${OUTPUT_PATH}" - continue - fi else CGO_ENABLED=0 GOOS="$GOOS" GOARCH="$GOARCH" go build \ -tags "${TAG}" \ @@ -369,6 +379,34 @@ jobs: - name: List Assets run: ls -R release-assets + - name: Verify DuckDB Driver Assets + shell: bash + run: | + set -euo pipefail + cd release-assets + + REQUIRED_FILES=( + "drivers/Windows/duckdb-driver-agent-windows-amd64.exe" + "drivers/MacOS/duckdb-driver-agent-darwin-amd64" + "drivers/MacOS/duckdb-driver-agent-darwin-arm64" + "drivers/Linux/duckdb-driver-agent-linux-amd64" + ) + + missing=0 + for file in "${REQUIRED_FILES[@]}"; do + if [ ! -f "$file" ]; then + echo "❌ 缺少 DuckDB 驱动资产:$file" + missing=1 + else + echo "✅ 已找到 DuckDB 驱动资产:$file" + fi + done + + if [ "$missing" -ne 0 ]; then + echo "❌ DuckDB 驱动资产不完整,终止发布" + exit 1 + fi + - name: Package Driver Agents Bundle shell: bash run: | diff --git a/cmd/optional-driver-agent/main.go b/cmd/optional-driver-agent/main.go index 20c7316..63f6945 100644 --- a/cmd/optional-driver-agent/main.go +++ b/cmd/optional-driver-agent/main.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "os" + "reflect" "strings" "GoNavi-Wails/internal/connection" @@ -218,7 +219,11 @@ func handleRequest(inst *db.Database, req agentRequest) agentResponse { } func writeResponse(writer *bufio.Writer, resp agentResponse) error { - payload, err := json.Marshal(resp) + // 对响应数据做统一 JSON 安全归一化: + // 将 map[any]any(如 duckdb.Map)递归转换为 map[string]any,避免序列化失败导致代理进程退出。 + safeResp := resp + safeResp.Data = normalizeAgentResponseData(resp.Data) + payload, err := json.Marshal(safeResp) if err != nil { return err } @@ -234,3 +239,51 @@ func fail(resp agentResponse, errText string) agentResponse { resp.Error = strings.TrimSpace(errText) return resp } + +func normalizeAgentResponseData(v interface{}) interface{} { + if v == nil { + return nil + } + + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Pointer, reflect.Interface: + if rv.IsNil() { + return nil + } + return normalizeAgentResponseData(rv.Elem().Interface()) + case reflect.Map: + if rv.IsNil() { + return nil + } + out := make(map[string]interface{}, rv.Len()) + iter := rv.MapRange() + for iter.Next() { + out[fmt.Sprint(iter.Key().Interface())] = normalizeAgentResponseData(iter.Value().Interface()) + } + return out + case reflect.Slice: + if rv.IsNil() { + return nil + } + // 保持 []byte 原样,避免改变现有二进制列的 JSON 编码行为(base64)。 + if rv.Type().Elem().Kind() == reflect.Uint8 { + return v + } + size := rv.Len() + items := make([]interface{}, size) + for i := 0; i < size; i++ { + items[i] = normalizeAgentResponseData(rv.Index(i).Interface()) + } + return items + case reflect.Array: + size := rv.Len() + items := make([]interface{}, size) + for i := 0; i < size; i++ { + items[i] = normalizeAgentResponseData(rv.Index(i).Interface()) + } + return items + default: + return v + } +} diff --git a/cmd/optional-driver-agent/main_test.go b/cmd/optional-driver-agent/main_test.go new file mode 100644 index 0000000..e74c805 --- /dev/null +++ b/cmd/optional-driver-agent/main_test.go @@ -0,0 +1,62 @@ +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "testing" +) + +type duckMapLike map[any]any + +func TestWriteResponse_NormalizesMapAnyAny(t *testing.T) { + resp := agentResponse{ + ID: 1, + Success: true, + Data: []map[string]interface{}{ + { + "id": int64(7), + "meta": duckMapLike{"k": "v", 2: "two"}, + }, + }, + } + + var out bytes.Buffer + writer := bufio.NewWriter(&out) + if err := writeResponse(writer, resp); err != nil { + t.Fatalf("writeResponse 返回错误: %v", err) + } + + var decoded struct { + Data []map[string]interface{} `json:"data"` + } + if err := json.Unmarshal(bytes.TrimSpace(out.Bytes()), &decoded); err != nil { + t.Fatalf("解码响应失败: %v", err) + } + + if len(decoded.Data) != 1 { + t.Fatalf("期望 1 行数据,实际 %d", len(decoded.Data)) + } + meta, ok := decoded.Data[0]["meta"].(map[string]interface{}) + if !ok { + t.Fatalf("meta 字段类型异常: %T", decoded.Data[0]["meta"]) + } + if meta["k"] != "v" { + t.Fatalf("字符串 key 转换异常: %v", meta["k"]) + } + if meta["2"] != "two" { + t.Fatalf("数字 key 未字符串化: %v", meta["2"]) + } +} + +func TestNormalizeAgentResponseData_KeepByteSlice(t *testing.T) { + raw := []byte{0x61, 0x62, 0x63} + normalized := normalizeAgentResponseData(raw) + out, ok := normalized.([]byte) + if !ok { + t.Fatalf("期望 []byte,实际 %T", normalized) + } + if !bytes.Equal(out, raw) { + t.Fatalf("[]byte 内容被意外改写: %v", out) + } +} diff --git a/docs/driver-manifest.json b/docs/driver-manifest.json index 1f0302a..2352ea1 100644 --- a/docs/driver-manifest.json +++ b/docs/driver-manifest.json @@ -33,7 +33,7 @@ }, "duckdb": { "engine": "go", - "version": "2.5.5", + "version": "2.5.6", "checksumPolicy": "off", "downloadUrl": "builtin://activate/duckdb" }, diff --git a/frontend/src/components/ConnectionModal.tsx b/frontend/src/components/ConnectionModal.tsx index 3f362a8..5c8ad1b 100644 --- a/frontend/src/components/ConnectionModal.tsx +++ b/frontend/src/components/ConnectionModal.tsx @@ -3,7 +3,7 @@ import { Modal, Form, Input, InputNumber, Button, message, Checkbox, Divider, Se import { DatabaseOutlined, ConsoleSqlOutlined, FileTextOutlined, CloudServerOutlined, AppstoreAddOutlined, CloudOutlined, CheckCircleFilled, CloseCircleFilled } from '@ant-design/icons'; import { useStore } from '../store'; import { normalizeOpacityForPlatform } from '../utils/appearance'; -import { DBGetDatabases, GetDriverStatusList, MongoDiscoverMembers, TestConnection, RedisConnect, SelectSSHKeyFile } from '../../wailsjs/go/app/App'; +import { DBGetDatabases, GetDriverStatusList, MongoDiscoverMembers, TestConnection, RedisConnect, SelectDatabaseFile, SelectSSHKeyFile } from '../../wailsjs/go/app/App'; import { ConnectionConfig, MongoMemberInfo, SavedConnection } from '../types'; const { Meta } = Card; @@ -80,6 +80,7 @@ const ConnectionModal: React.FC<{ const [typeSelectWarning, setTypeSelectWarning] = useState<{ driverName: string; reason: string } | null>(null); const [driverStatusMap, setDriverStatusMap] = useState>({}); const [driverStatusLoaded, setDriverStatusLoaded] = useState(false); + const [selectingDbFile, setSelectingDbFile] = useState(false); const [selectingSSHKey, setSelectingSSHKey] = useState(false); const testInFlightRef = useRef(false); const testTimerRef = useRef(null); @@ -665,6 +666,30 @@ const ConnectionModal: React.FC<{ } }; + const handleSelectDatabaseFile = async () => { + if (selectingDbFile) { + return; + } + try { + setSelectingDbFile(true); + const currentPath = String(form.getFieldValue('host') || '').trim(); + const res = await SelectDatabaseFile(currentPath, dbType); + if (res?.success) { + const data = res.data || {}; + const selectedPath = typeof data === 'string' ? data : String(data.path || '').trim(); + if (selectedPath) { + form.setFieldValue('host', normalizeFileDbPath(selectedPath)); + } + } else if (res?.message !== 'Cancelled') { + message.error(`选择数据库文件失败: ${res?.message || '未知错误'}`); + } + } catch (e: any) { + message.error(`选择数据库文件失败: ${e?.message || String(e)}`); + } finally { + setSelectingDbFile(false); + } + }; + useEffect(() => { if (open) { setTestResult(null); // Reset test result @@ -1392,6 +1417,13 @@ const ConnectionModal: React.FC<{ onDoubleClick={requestTest} /> + {isFileDb && ( + + + + )} {!isFileDb && ( { const escapeSQLLiteral = (value: string): string => String(value || '').replace(/'/g, "''"); +const isDuckDBUnsupportedTypeError = (msg: string): boolean => /unsupported\s*type:\s*duckdb\./i.test(String(msg || '')); + +const isDuckDBComplexColumnType = (columnType?: string): boolean => { + const raw = String(columnType || '').trim().toLowerCase(); + if (!raw) return false; + return raw.includes('map') || raw.includes('struct') || raw.includes('union') || raw.includes('array') || raw.includes('list'); +}; + +const reverseOrderBySQL = (orderBySQL: string): string => { + const raw = String(orderBySQL || '').trim(); + if (!raw) return ''; + const body = raw.replace(/^order\s+by\s+/i, '').trim(); + if (!body) return ''; + + const parts = body + .split(',') + .map((part) => part.trim()) + .filter(Boolean) + .map((part) => { + if (/\s+asc$/i.test(part)) return part.replace(/\s+asc$/i, ' DESC'); + if (/\s+desc$/i.test(part)) return part.replace(/\s+desc$/i, ' ASC'); + return `${part} DESC`; + }); + if (parts.length === 0) return ''; + return ` ORDER BY ${parts.join(', ')}`; +}; + const DataViewer: React.FC<{ tab: TabData }> = ({ tab }) => { const [data, setData] = useState([]); const [columnNames, setColumnNames] = useState([]); @@ -144,19 +171,17 @@ const DataViewer: React.FC<{ tab: TabData }> = ({ tab }) => { const [showFilter, setShowFilter] = useState(false); const [filterConditions, setFilterConditions] = useState([]); + const duckdbSafeSelectCacheRef = useRef>({}); const currentConnType = (connections.find(c => c.id === tab.connectionId)?.config?.type || '').toLowerCase(); const forceReadOnly = currentConnType === 'tdengine' || currentConnType === 'clickhouse'; - const runIsolatedQuery = useCallback(async (queryConfig: any, dbName: string, sql: string) => { - return DBQueryIsolated(queryConfig as any, dbName, sql); - }, []); - useEffect(() => { setPkColumns([]); pkKeyRef.current = ''; countKeyRef.current = ''; duckdbApproxKeyRef.current = ''; manualCountKeyRef.current = ''; + duckdbSafeSelectCacheRef.current = {}; latestConfigRef.current = null; latestDbTypeRef.current = ''; latestDbNameRef.current = ''; @@ -194,7 +219,7 @@ const DataViewer: React.FC<{ tab: TabData }> = ({ tab }) => { const countConfig: any = { ...(config as any), timeout: 120 }; try { - const resCount = await runIsolatedQuery(countConfig, dbName, countSql); + const resCount = await DBQuery(countConfig as any, dbName, countSql); const countDuration = Date.now() - countStart; addSqlLog({ id: `log-${Date.now()}-duckdb-manual-count`, @@ -240,7 +265,7 @@ const DataViewer: React.FC<{ tab: TabData }> = ({ tab }) => { setPagination(prev => ({ ...prev, totalCountLoading: false })); message.error(`统计总数失败: ${String(e?.message || e)}`); } - }, [addSqlLog, runIsolatedQuery]); + }, [addSqlLog]); const handleDuckDBCancelManualCount = useCallback(() => { manualCountSeqRef.current++; @@ -277,35 +302,112 @@ const DataViewer: React.FC<{ tab: TabData }> = ({ tab }) => { const countSql = `SELECT COUNT(*) as total FROM ${quoteQualifiedIdent(dbType, tableName)} ${whereSQL}`; - let sql = `SELECT * FROM ${quoteQualifiedIdent(dbType, tableName)} ${whereSQL}`; - sql += buildOrderBySQL(dbType, sortInfo, pkColumns); - const offset = (page - 1) * size; - // 大表性能:打开表不阻塞在 COUNT(*),先通过多取 1 条判断是否还有下一页;总数在后台统计并异步回填。 - sql += ` LIMIT ${size + 1} OFFSET ${offset}`; + const baseSql = `SELECT * FROM ${quoteQualifiedIdent(dbType, tableName)} ${whereSQL}`; + const orderBySQL = buildOrderBySQL(dbType, sortInfo, pkColumns); + let sql = `${baseSql}${orderBySQL}`; + const totalRows = Number(pagination.total); + const hasFiniteTotal = Number.isFinite(totalRows) && totalRows >= 0; + const totalKnown = pagination.totalKnown && hasFiniteTotal; + const totalPages = hasFiniteTotal ? Math.max(1, Math.ceil(totalRows / size)) : 0; + const currentPage = totalPages > 0 ? Math.min(Math.max(1, page), totalPages) : Math.max(1, page); + const offset = (currentPage - 1) * size; + const isClickHouse = dbTypeLower === 'clickhouse'; + const reverseOrderSQL = isClickHouse ? reverseOrderBySQL(orderBySQL) : ''; + let useClickHouseReversePagination = false; + let clickHouseReverseLimit = 0; + let clickHouseReverseHasMore = false; + // ClickHouse 深分页在超大 OFFSET 下容易超时。对于总数已知且存在 ORDER BY 的场景, + // 当“尾部偏移”小于“头部偏移”时,改为反向 ORDER BY + 小 OFFSET,并在前端翻转结果。 + if (isClickHouse && totalKnown && offset > 0 && reverseOrderSQL) { + const pageRowCount = Math.max(0, Math.min(size, totalRows - offset)); + if (pageRowCount > 0) { + const tailOffset = Math.max(0, totalRows - (offset + pageRowCount)); + if (tailOffset < offset) { + sql = `${baseSql}${reverseOrderSQL} LIMIT ${pageRowCount} OFFSET ${tailOffset}`; + useClickHouseReversePagination = true; + clickHouseReverseLimit = pageRowCount; + clickHouseReverseHasMore = currentPage < totalPages; + } + } + } + if (!useClickHouseReversePagination) { + // 大表性能:打开表不阻塞在 COUNT(*),先通过多取 1 条判断是否还有下一页;总数在后台统计并异步回填。 + sql += ` LIMIT ${size + 1} OFFSET ${offset}`; + } const requestStartTime = Date.now(); let executedSql = sql; try { const executeDataQuery = async (querySql: string, attemptLabel: string) => { const startTime = Date.now(); - const result = await DBQuery(config as any, dbName, querySql); - addSqlLog({ - id: `log-${Date.now()}-data`, - timestamp: Date.now(), - sql: querySql, - status: result.success ? 'success' : 'error', - duration: Date.now() - startTime, - message: result.success ? '' : `${attemptLabel}: ${result.message}`, - affectedRows: Array.isArray(result.data) ? result.data.length : undefined, - dbName - }); - return result; + try { + const result = await DBQuery(config as any, dbName, querySql); + addSqlLog({ + id: `log-${Date.now()}-data`, + timestamp: Date.now(), + sql: querySql, + status: result.success ? 'success' : 'error', + duration: Date.now() - startTime, + message: result.success ? '' : `${attemptLabel}: ${result.message}`, + affectedRows: Array.isArray(result.data) ? result.data.length : undefined, + dbName + }); + return result; + } catch (e: any) { + const errMessage = String(e?.message || e || 'query failed'); + addSqlLog({ + id: `log-${Date.now()}-data`, + timestamp: Date.now(), + sql: querySql, + status: 'error', + duration: Date.now() - startTime, + message: `${attemptLabel}: ${errMessage}`, + dbName + }); + return { success: false, message: errMessage, data: [], fields: [] }; + } }; const hasSort = !!sortInfo?.columnKey && (sortInfo?.order === 'ascend' || sortInfo?.order === 'descend'); const isSortMemoryErr = (msg: string) => /error\s*1038|out of sort memory/i.test(String(msg || '')); let resData = await executeDataQuery(sql, '主查询'); + if (!resData.success && dbTypeLower === 'duckdb' && isDuckDBUnsupportedTypeError(String(resData.message || ''))) { + const cacheKey = `${tab.connectionId}|${dbName}|${tableName}`; + let safeSelect = duckdbSafeSelectCacheRef.current[cacheKey] || ''; + if (!safeSelect) { + try { + const resCols = await DBGetColumns(config as any, dbName, tableName); + if (resCols?.success && Array.isArray(resCols.data)) { + const columnDefs = resCols.data as ColumnDefinition[]; + const selectParts = columnDefs.map((col) => { + const colName = String(col?.name || '').trim(); + if (!colName) return ''; + const quotedCol = quoteIdentPart(dbType, colName); + if (isDuckDBComplexColumnType(col?.type)) { + return `CAST(${quotedCol} AS VARCHAR) AS ${quotedCol}`; + } + return quotedCol; + }).filter(Boolean); + if (selectParts.length > 0) { + safeSelect = selectParts.join(', '); + duckdbSafeSelectCacheRef.current[cacheKey] = safeSelect; + } + } + } catch { + // ignore and keep original error path + } + } + + if (safeSelect) { + let fallbackSql = `SELECT ${safeSelect} FROM ${quoteQualifiedIdent(dbType, tableName)} ${whereSQL}`; + fallbackSql += buildOrderBySQL(dbType, sortInfo, pkColumns); + fallbackSql += ` LIMIT ${size + 1} OFFSET ${offset}`; + executedSql = fallbackSql; + resData = await executeDataQuery(fallbackSql, '复杂类型降级重试'); + } + } + if (!resData.success && isMySQLFamily && hasSort && isSortMemoryErr(resData.message)) { const retrySql32MB = withSortBufferTuningSQL(dbType, sql, 32 * 1024 * 1024); if (retrySql32MB !== sql) { @@ -348,7 +450,12 @@ const DataViewer: React.FC<{ tab: TabData }> = ({ tab }) => { let resultData = resData.data as any[]; if (!Array.isArray(resultData)) resultData = []; - const hasMore = resultData.length > size; + if (useClickHouseReversePagination) { + // 反向查询后恢复为原排序方向,保证用户看到的仍是“最后一页正序数据”。 + resultData = resultData.slice(0, clickHouseReverseLimit).reverse(); + } + + const hasMore = useClickHouseReversePagination ? clickHouseReverseHasMore : resultData.length > size; if (hasMore) resultData = resultData.slice(0, size); let fieldNames = resData.fields || []; @@ -363,7 +470,7 @@ const DataViewer: React.FC<{ tab: TabData }> = ({ tab }) => { setData(resultData); const countKey = `${tab.connectionId}|${dbName}|${tableName}|${whereSQL}`; const derivedTotalKnown = !hasMore; - const derivedTotal = derivedTotalKnown ? offset + resultData.length : page * size + 1; + const derivedTotal = derivedTotalKnown ? offset + resultData.length : currentPage * size + 1; const isDuckDB = dbTypeLower === 'duckdb'; const minExpectedTotal = hasMore ? offset + resultData.length + 1 : offset + resultData.length; if (derivedTotalKnown) countKeyRef.current = countKey; @@ -377,7 +484,7 @@ const DataViewer: React.FC<{ tab: TabData }> = ({ tab }) => { if (derivedTotalKnown) { return { ...prev, - current: page, + current: currentPage, pageSize: size, total: derivedTotal, totalKnown: true, @@ -388,19 +495,19 @@ const DataViewer: React.FC<{ tab: TabData }> = ({ tab }) => { } if (prev.totalKnown && countKeyRef.current === countKey) { if (!isDuckDB) { - return { ...prev, current: page, pageSize: size }; + return { ...prev, current: currentPage, pageSize: size }; } // 当当前页存在“下一页”信号时,已知总数至少应大于当前页末尾。 // 若旧总数不满足该条件(例如历史统计值为 0),降级为未知总数并回退到 derivedTotal。 if (Number.isFinite(prev.total) && prev.total >= minExpectedTotal) { - return { ...prev, current: page, pageSize: size }; + return { ...prev, current: currentPage, pageSize: size }; } } const keepManualCounting = prev.totalCountLoading && manualCountKeyRef.current === countKey; if (isDuckDB && prev.totalApprox && duckdbApproxKeyRef.current === countKey && Number.isFinite(prev.total) && prev.total >= minExpectedTotal) { return { ...prev, - current: page, + current: currentPage, pageSize: size, totalKnown: false, totalApprox: true, @@ -410,7 +517,7 @@ const DataViewer: React.FC<{ tab: TabData }> = ({ tab }) => { } return { ...prev, - current: page, + current: currentPage, pageSize: size, total: derivedTotal, totalKnown: false, @@ -489,7 +596,7 @@ const DataViewer: React.FC<{ tab: TabData }> = ({ tab }) => { (async () => { for (const approxSql of approxSqlCandidates) { try { - const approxRes = await runIsolatedQuery(approxConfig, dbName, approxSql); + const approxRes = await DBQuery(approxConfig as any, dbName, approxSql); if (duckdbApproxSeqRef.current !== approxSeq) return; if (countKeyRef.current !== countKey) return; if (!approxRes?.success || !Array.isArray(approxRes.data) || approxRes.data.length === 0) continue; @@ -534,7 +641,7 @@ const DataViewer: React.FC<{ tab: TabData }> = ({ tab }) => { }); } if (fetchSeqRef.current === seq) setLoading(false); - }, [connections, tab, sortInfo, filterConditions, pkColumns, runIsolatedQuery]); + }, [connections, tab, sortInfo, filterConditions, pkColumns, pagination.total, pagination.totalKnown]); // 依赖 pkColumns:在无手动排序时可回退到主键稳定排序。 // 主键信息只会在首次加载后更新一次,避免循环查询。 diff --git a/frontend/wailsjs/go/app/App.d.ts b/frontend/wailsjs/go/app/App.d.ts index 72ad6a1..98e4dd2 100755 --- a/frontend/wailsjs/go/app/App.d.ts +++ b/frontend/wailsjs/go/app/App.d.ts @@ -164,6 +164,8 @@ export function ResolveDriverPackageDownloadURL(arg1:string,arg2:string):Promise export function ResolveDriverRepositoryURL(arg1:string):Promise; +export function SelectDatabaseFile(arg1:string,arg2:string):Promise; + export function SelectDriverDownloadDirectory(arg1:string):Promise; export function SelectDriverPackageDirectory(arg1:string):Promise; diff --git a/frontend/wailsjs/go/app/App.js b/frontend/wailsjs/go/app/App.js index 86f801f..e6def2b 100755 --- a/frontend/wailsjs/go/app/App.js +++ b/frontend/wailsjs/go/app/App.js @@ -322,6 +322,10 @@ export function ResolveDriverRepositoryURL(arg1) { return window['go']['app']['App']['ResolveDriverRepositoryURL'](arg1); } +export function SelectDatabaseFile(arg1, arg2) { + return window['go']['app']['App']['SelectDatabaseFile'](arg1, arg2); +} + export function SelectDriverDownloadDirectory(arg1) { return window['go']['app']['App']['SelectDriverDownloadDirectory'](arg1); } diff --git a/internal/app/app.go b/internal/app/app.go index b8dd6a7..5616523 100644 --- a/internal/app/app.go +++ b/internal/app/app.go @@ -74,16 +74,67 @@ func (a *App) Shutdown(ctx context.Context) { logger.Close() } -// Helper: Generate a unique key for the connection config -func getCacheKey(config connection.ConnectionConfig) string { - if !config.UseSSH { - config.SSH = connection.SSHConfig{} +func normalizeCacheKeyConfig(config connection.ConnectionConfig) connection.ConnectionConfig { + normalized := config + normalized.Type = strings.ToLower(strings.TrimSpace(normalized.Type)) + // timeout 仅用于 Query/Ping 控制,不应作为物理连接复用键的一部分。 + normalized.Timeout = 0 + normalized.SavePassword = false + + if !normalized.UseSSH { + normalized.SSH = connection.SSHConfig{} } - if !config.UseProxy { - config.Proxy = connection.ProxyConfig{} + if !normalized.UseProxy { + normalized.Proxy = connection.ProxyConfig{} } - b, _ := json.Marshal(config) + if isFileDatabaseType(normalized.Type) { + dsn := strings.TrimSpace(normalized.Host) + if dsn == "" { + dsn = strings.TrimSpace(normalized.Database) + } + if dsn == "" { + dsn = ":memory:" + } + + // DuckDB/SQLite 仅基于文件来源识别连接,其他网络字段不参与键计算。 + normalized.Host = dsn + normalized.Database = "" + normalized.Port = 0 + normalized.User = "" + normalized.Password = "" + normalized.URI = "" + normalized.Hosts = nil + normalized.Topology = "" + normalized.MySQLReplicaUser = "" + normalized.MySQLReplicaPassword = "" + normalized.ReplicaSet = "" + normalized.AuthSource = "" + normalized.ReadPreference = "" + normalized.MongoSRV = false + normalized.MongoAuthMechanism = "" + normalized.MongoReplicaUser = "" + normalized.MongoReplicaPassword = "" + } + + return normalized +} + +func resolveFileDatabaseDSN(config connection.ConnectionConfig) string { + dsn := strings.TrimSpace(config.Host) + if dsn == "" { + dsn = strings.TrimSpace(config.Database) + } + if dsn == "" { + dsn = ":memory:" + } + return dsn +} + +// Helper: Generate a unique key for the connection config +func getCacheKey(config connection.ConnectionConfig) string { + normalized := normalizeCacheKeyConfig(config) + b, _ := json.Marshal(normalized) sum := sha256.Sum256(b) return hex.EncodeToString(sum[:]) } @@ -235,12 +286,19 @@ func (a *App) openDatabaseIsolated(config connection.ConnectionConfig) (db.Datab func (a *App) getDatabaseWithPing(config connection.ConnectionConfig, forcePing bool) (db.Database, error) { effectiveConfig := applyGlobalProxyToConnection(config) + isFileDB := isFileDatabaseType(effectiveConfig.Type) key := getCacheKey(effectiveConfig) shortKey := key if len(shortKey) > 12 { shortKey = shortKey[:12] } + if isFileDB { + rawDSN := resolveFileDatabaseDSN(effectiveConfig) + normalizedDSN := resolveFileDatabaseDSN(normalizeCacheKeyConfig(effectiveConfig)) + logger.Infof("文件库连接缓存探测:类型=%s 原始DSN=%s 归一化DSN=%s timeout=%ds forcePing=%t 缓存Key=%s", + strings.TrimSpace(effectiveConfig.Type), rawDSN, normalizedDSN, effectiveConfig.Timeout, forcePing, shortKey) + } if supported, reason := db.DriverRuntimeSupportStatus(effectiveConfig.Type); !supported { if strings.TrimSpace(reason) == "" { @@ -260,6 +318,9 @@ func (a *App) getDatabaseWithPing(config connection.ConnectionConfig, forcePing entry, ok := a.dbCache[key] a.mu.RUnlock() if ok { + if isFileDB { + logger.Infof("命中文件库连接缓存:类型=%s 缓存Key=%s", strings.TrimSpace(effectiveConfig.Type), shortKey) + } needPing := forcePing if !needPing { lastPing := entry.lastPing @@ -269,6 +330,9 @@ func (a *App) getDatabaseWithPing(config connection.ConnectionConfig, forcePing } if !needPing { + if isFileDB { + logger.Infof("复用文件库连接缓存(免 Ping):类型=%s 缓存Key=%s", strings.TrimSpace(effectiveConfig.Type), shortKey) + } return entry.inst, nil } @@ -280,6 +344,9 @@ func (a *App) getDatabaseWithPing(config connection.ConnectionConfig, forcePing a.dbCache[key] = cur } a.mu.Unlock() + if isFileDB { + logger.Infof("复用文件库连接缓存(Ping 成功):类型=%s 缓存Key=%s", strings.TrimSpace(effectiveConfig.Type), shortKey) + } return entry.inst, nil } else { logger.Error(err, "缓存连接不可用,准备重建:%s 缓存Key=%s", formatConnSummary(effectiveConfig), shortKey) @@ -294,6 +361,12 @@ func (a *App) getDatabaseWithPing(config connection.ConnectionConfig, forcePing delete(a.dbCache, key) } a.mu.Unlock() + if isFileDB { + logger.Infof("文件库缓存连接已剔除,准备新建连接:类型=%s 缓存Key=%s", strings.TrimSpace(effectiveConfig.Type), shortKey) + } + } + if isFileDB { + logger.Infof("未命中文件库连接缓存,开始创建连接:类型=%s 缓存Key=%s", strings.TrimSpace(effectiveConfig.Type), shortKey) } logger.Infof("获取数据库连接:%s 缓存Key=%s", formatConnSummary(effectiveConfig), shortKey) @@ -324,6 +397,9 @@ func (a *App) getDatabaseWithPing(config connection.ConnectionConfig, forcePing a.mu.Unlock() // Prefer existing cached connection to avoid cache racing duplicates. _ = dbInst.Close() + if isFileDB { + logger.Infof("并发创建命中已存在文件库连接,关闭新建连接并复用缓存:类型=%s 缓存Key=%s", strings.TrimSpace(effectiveConfig.Type), shortKey) + } return existing.inst, nil } a.dbCache[key] = cachedDatabase{inst: dbInst, lastPing: now} diff --git a/internal/app/app_cache_key_test.go b/internal/app/app_cache_key_test.go new file mode 100644 index 0000000..ef7714f --- /dev/null +++ b/internal/app/app_cache_key_test.go @@ -0,0 +1,63 @@ +package app + +import ( + "testing" + + "GoNavi-Wails/internal/connection" +) + +func TestGetCacheKey_IgnoreTimeout(t *testing.T) { + base := connection.ConnectionConfig{ + Type: "duckdb", + Host: `C:\data\songs.duckdb`, + Timeout: 30, + UseProxy: false, + UseSSH: false, + } + modified := base + modified.Timeout = 120 + + left := getCacheKey(base) + right := getCacheKey(modified) + if left != right { + t.Fatalf("expected same cache key when only timeout differs, got %s vs %s", left, right) + } +} + +func TestGetCacheKey_DuckDBHostAndDatabaseEquivalent(t *testing.T) { + withHost := connection.ConnectionConfig{ + Type: "duckdb", + Host: `D:\music\songs.duckdb`, + } + withDatabase := connection.ConnectionConfig{ + Type: "duckdb", + Database: `D:\music\songs.duckdb`, + } + + left := getCacheKey(withHost) + right := getCacheKey(withDatabase) + if left != right { + t.Fatalf("expected same cache key for duckdb host/database path, got %s vs %s", left, right) + } +} + +func TestGetCacheKey_KeepDatabaseIsolation(t *testing.T) { + a := connection.ConnectionConfig{ + Type: "mysql", + Host: "127.0.0.1", + Port: 3306, + User: "root", + Password: "root", + Database: "db_a", + Timeout: 30, + } + b := a + b.Database = "db_b" + b.Timeout = 5 + + left := getCacheKey(a) + right := getCacheKey(b) + if left == right { + t.Fatalf("expected different cache key for different database targets") + } +} diff --git a/internal/app/methods_driver.go b/internal/app/methods_driver.go index cef721a..49ea66e 100644 --- a/internal/app/methods_driver.go +++ b/internal/app/methods_driver.go @@ -218,7 +218,7 @@ const builtinDriverManifestJSON = `{ "sphinx": { "engine": "go", "version": "1.9.3", "checksumPolicy": "off", "downloadUrl": "builtin://activate/sphinx" }, "sqlserver": { "engine": "go", "version": "1.9.6", "checksumPolicy": "off", "downloadUrl": "builtin://activate/sqlserver" }, "sqlite": { "engine": "go", "version": "1.44.3", "checksumPolicy": "off", "downloadUrl": "builtin://activate/sqlite" }, - "duckdb": { "engine": "go", "version": "2.5.5", "checksumPolicy": "off", "downloadUrl": "builtin://activate/duckdb" }, + "duckdb": { "engine": "go", "version": "2.5.6", "checksumPolicy": "off", "downloadUrl": "builtin://activate/duckdb" }, "dameng": { "engine": "go", "version": "1.8.22", "checksumPolicy": "off", "downloadUrl": "builtin://activate/dameng" }, "kingbase": { "engine": "go", "version": "0.0.0-20201021123113-29bd62a876c3", "checksumPolicy": "off", "downloadUrl": "builtin://activate/kingbase" }, "highgo": { "engine": "go", "version": "0.0.0-local", "checksumPolicy": "off", "downloadUrl": "builtin://activate/highgo" }, @@ -271,7 +271,7 @@ var latestDriverVersionMap = map[string]string{ "sphinx": "1.9.3", "sqlserver": "1.9.6", "sqlite": "1.46.1", - "duckdb": "2.5.5", + "duckdb": "2.5.6", "dameng": "1.8.22", "kingbase": "0.0.0-20201021123113-29bd62a876c3", "highgo": "0.0.0-local", diff --git a/internal/app/methods_file.go b/internal/app/methods_file.go index 561ef9b..6efef4c 100644 --- a/internal/app/methods_file.go +++ b/internal/app/methods_file.go @@ -8,6 +8,7 @@ import ( "math" "os" "path/filepath" + "reflect" "sort" "strconv" "strings" @@ -120,6 +121,78 @@ func (a *App) SelectSSHKeyFile(currentPath string) connection.QueryResult { return connection.QueryResult{Success: true, Data: map[string]interface{}{"path": selection}} } +func (a *App) SelectDatabaseFile(currentPath string, driverType string) connection.QueryResult { + defaultDir := strings.TrimSpace(currentPath) + if defaultDir == "" { + if home, err := os.UserHomeDir(); err == nil { + defaultDir = home + } + } + if filepath.Ext(defaultDir) != "" { + defaultDir = filepath.Dir(defaultDir) + } + if defaultDir != "" && !filepath.IsAbs(defaultDir) { + if abs, err := filepath.Abs(defaultDir); err == nil { + defaultDir = abs + } + } + + normalizedType := strings.ToLower(strings.TrimSpace(driverType)) + filters := []runtime.FileFilter{ + { + DisplayName: "数据库文件", + Pattern: "*.db;*.sqlite;*.sqlite3;*.db3;*.duckdb;*.ddb", + }, + { + DisplayName: "所有文件", + Pattern: "*", + }, + } + title := "选择数据库文件" + switch normalizedType { + case "sqlite": + title = "选择 SQLite 数据文件" + filters = []runtime.FileFilter{ + { + DisplayName: "SQLite 文件", + Pattern: "*.db;*.sqlite;*.sqlite3;*.db3", + }, + { + DisplayName: "所有文件", + Pattern: "*", + }, + } + case "duckdb": + title = "选择 DuckDB 数据文件" + filters = []runtime.FileFilter{ + { + DisplayName: "DuckDB 文件", + Pattern: "*.duckdb;*.ddb;*.db", + }, + { + DisplayName: "所有文件", + Pattern: "*", + }, + } + } + + selection, err := runtime.OpenFileDialog(a.ctx, runtime.OpenDialogOptions{ + Title: title, + DefaultDirectory: defaultDir, + Filters: filters, + }) + if err != nil { + return connection.QueryResult{Success: false, Message: err.Error()} + } + if strings.TrimSpace(selection) == "" { + return connection.QueryResult{Success: false, Message: "Cancelled"} + } + if abs, err := filepath.Abs(selection); err == nil { + selection = abs + } + return connection.QueryResult{Success: true, Data: map[string]interface{}{"path": selection}} +} + // PreviewImportFile 解析导入文件,返回字段列表、总行数、前 5 行预览数据 func (a *App) PreviewImportFile(filePath string) connection.QueryResult { if filePath == "" { @@ -1527,7 +1600,11 @@ func writeRowsToFile(f *os.File, data []map[string]interface{}, columns []string return err } } - if err := jsonEncoder.Encode(rowMap); err != nil { + exportedRow := make(map[string]interface{}, len(columns)) + for _, col := range columns { + exportedRow[col] = normalizeExportJSONValue(rowMap[col]) + } + if err := jsonEncoder.Encode(exportedRow); err != nil { return err } isJsonFirstRow = false @@ -1567,11 +1644,102 @@ func formatExportCellText(val interface{}) string { return "NULL" } return v.Format("2006-01-02 15:04:05") + case float32: + f := float64(v) + if math.IsNaN(f) || math.IsInf(f, 0) { + return "NULL" + } + return strconv.FormatFloat(f, 'f', -1, 32) + case float64: + if math.IsNaN(v) || math.IsInf(v, 0) { + return "NULL" + } + return strconv.FormatFloat(v, 'f', -1, 64) + case json.Number: + text := strings.TrimSpace(v.String()) + if text == "" { + return "NULL" + } + return text default: return fmt.Sprintf("%v", val) } } +func normalizeExportJSONValue(val interface{}) interface{} { + if val == nil { + return nil + } + + switch v := val.(type) { + case float32: + f := float64(v) + if math.IsNaN(f) || math.IsInf(f, 0) { + return nil + } + return json.Number(strconv.FormatFloat(f, 'f', -1, 32)) + case float64: + if math.IsNaN(v) || math.IsInf(v, 0) { + return nil + } + return json.Number(strconv.FormatFloat(v, 'f', -1, 64)) + case json.Number: + text := strings.TrimSpace(v.String()) + if text == "" { + return nil + } + return json.Number(text) + case map[string]interface{}: + out := make(map[string]interface{}, len(v)) + for key, item := range v { + out[key] = normalizeExportJSONValue(item) + } + return out + case []interface{}: + items := make([]interface{}, len(v)) + for i, item := range v { + items[i] = normalizeExportJSONValue(item) + } + return items + } + + rv := reflect.ValueOf(val) + switch rv.Kind() { + case reflect.Pointer, reflect.Interface: + if rv.IsNil() { + return nil + } + return normalizeExportJSONValue(rv.Elem().Interface()) + case reflect.Map: + if rv.IsNil() { + return nil + } + out := make(map[string]interface{}, rv.Len()) + iter := rv.MapRange() + for iter.Next() { + out[fmt.Sprint(iter.Key().Interface())] = normalizeExportJSONValue(iter.Value().Interface()) + } + return out + case reflect.Slice: + if rv.IsNil() { + return nil + } + if rv.Type().Elem().Kind() == reflect.Uint8 { + return val + } + fallthrough + case reflect.Array: + size := rv.Len() + items := make([]interface{}, size) + for i := 0; i < size; i++ { + items[i] = normalizeExportJSONValue(rv.Index(i).Interface()) + } + return items + default: + return val + } +} + // writeRowsToXlsx 使用 excelize 写入真正的 xlsx 格式文件 func writeRowsToXlsx(filename string, data []map[string]interface{}, columns []string) error { xlsx := excelize.NewFile() diff --git a/internal/app/methods_file_export_test.go b/internal/app/methods_file_export_test.go new file mode 100644 index 0000000..7fef8a9 --- /dev/null +++ b/internal/app/methods_file_export_test.go @@ -0,0 +1,89 @@ +package app + +import ( + "bytes" + "encoding/json" + "os" + "strings" + "testing" +) + +func TestFormatExportCellText_FloatNoScientificNotation(t *testing.T) { + got := formatExportCellText(1.445663e+06) + if strings.Contains(strings.ToLower(got), "e+") || strings.Contains(strings.ToLower(got), "e-") { + t.Fatalf("不应输出科学计数法,got=%q", got) + } + if got != "1445663" { + t.Fatalf("浮点整值导出异常,want=%q got=%q", "1445663", got) + } +} + +func TestWriteRowsToFile_Markdown_NumberKeepPlainText(t *testing.T) { + f, err := os.CreateTemp("", "gonavi-export-*.md") + if err != nil { + t.Fatalf("创建临时文件失败: %v", err) + } + defer os.Remove(f.Name()) + defer f.Close() + + data := []map[string]interface{}{ + {"id": 1.445663e+06}, + } + columns := []string{"id"} + + if err := writeRowsToFile(f, data, columns, "md"); err != nil { + t.Fatalf("写入 md 失败: %v", err) + } + + contentBytes, err := os.ReadFile(f.Name()) + if err != nil { + t.Fatalf("读取 md 失败: %v", err) + } + content := string(contentBytes) + if strings.Contains(strings.ToLower(content), "e+") || strings.Contains(strings.ToLower(content), "e-") { + t.Fatalf("md 导出包含科学计数法: %s", content) + } + if !strings.Contains(content, "| 1445663 |") { + t.Fatalf("md 导出未保留整数字面量,content=%s", content) + } +} + +func TestWriteRowsToFile_JSON_NumberKeepPlainText(t *testing.T) { + f, err := os.CreateTemp("", "gonavi-export-*.json") + if err != nil { + t.Fatalf("创建临时文件失败: %v", err) + } + defer os.Remove(f.Name()) + defer f.Close() + + data := []map[string]interface{}{ + {"id": 1.445663e+06}, + } + columns := []string{"id"} + + if err := writeRowsToFile(f, data, columns, "json"); err != nil { + t.Fatalf("写入 json 失败: %v", err) + } + + contentBytes, err := os.ReadFile(f.Name()) + if err != nil { + t.Fatalf("读取 json 失败: %v", err) + } + content := string(contentBytes) + if strings.Contains(strings.ToLower(content), "e+") || strings.Contains(strings.ToLower(content), "e-") { + t.Fatalf("json 导出包含科学计数法: %s", content) + } + + var decoded []map[string]json.Number + decoder := json.NewDecoder(bytes.NewReader(contentBytes)) + decoder.UseNumber() + if err := decoder.Decode(&decoded); err != nil { + t.Fatalf("解析导出 json 失败: %v", err) + } + if len(decoded) != 1 { + t.Fatalf("导出行数异常,got=%d", len(decoded)) + } + if decoded[0]["id"].String() != "1445663" { + t.Fatalf("json 数值格式异常,want=1445663 got=%s", decoded[0]["id"].String()) + } +} diff --git a/internal/db/query_value.go b/internal/db/query_value.go index d4dde25..36e9744 100644 --- a/internal/db/query_value.go +++ b/internal/db/query_value.go @@ -3,6 +3,7 @@ package db import ( "encoding/hex" "fmt" + "reflect" "strings" "unicode" "unicode/utf8" @@ -18,7 +19,70 @@ func normalizeQueryValueWithDBType(v interface{}, databaseTypeName string) inter if b, ok := v.([]byte); ok { return bytesToDisplayValue(b, databaseTypeName) } - return v + return normalizeCompositeQueryValue(v) +} + +func normalizeCompositeQueryValue(v interface{}) interface{} { + if v == nil { + return nil + } + + switch typed := v.(type) { + case []interface{}: + items := make([]interface{}, len(typed)) + for i, item := range typed { + items[i] = normalizeQueryValue(item) + } + return items + case map[string]interface{}: + out := make(map[string]interface{}, len(typed)) + for key, value := range typed { + out[key] = normalizeQueryValue(value) + } + return out + } + + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Pointer: + if rv.IsNil() { + return nil + } + return normalizeQueryValue(rv.Elem().Interface()) + case reflect.Map: + if rv.IsNil() { + return nil + } + out := make(map[string]interface{}, rv.Len()) + iter := rv.MapRange() + for iter.Next() { + out[mapKeyToString(iter.Key().Interface())] = normalizeQueryValue(iter.Value().Interface()) + } + return out + case reflect.Slice, reflect.Array: + // []byte 在上层已单独处理,这里保留对其它切片/数组的递归规整。 + if rv.Kind() == reflect.Slice && rv.IsNil() { + return nil + } + size := rv.Len() + items := make([]interface{}, size) + for i := 0; i < size; i++ { + items[i] = normalizeQueryValue(rv.Index(i).Interface()) + } + return items + default: + return v + } +} + +func mapKeyToString(key interface{}) string { + if key == nil { + return "null" + } + if s, ok := key.(string); ok { + return s + } + return fmt.Sprintf("%v", key) } func bytesToDisplayValue(b []byte, databaseTypeName string) interface{} { diff --git a/internal/db/query_value_test.go b/internal/db/query_value_test.go index 1b2c140..a19fa26 100644 --- a/internal/db/query_value_test.go +++ b/internal/db/query_value_test.go @@ -2,6 +2,8 @@ package db import "testing" +type duckMapLike map[any]any + func TestNormalizeQueryValueWithDBType_BitBytes(t *testing.T) { v := normalizeQueryValueWithDBType([]byte{0x00}, "BIT") if v != int64(0) { @@ -42,3 +44,40 @@ func TestNormalizeQueryValueWithDBType_ByteFallbacks(t *testing.T) { t.Fatalf("未知类型 0xff 期望返回 0xff,实际=%v(%T)", v, v) } } + +func TestNormalizeQueryValueWithDBType_MapAnyAnyForJSON(t *testing.T) { + input := duckMapLike{ + "id": int64(1), + 1: "one", + true: []interface{}{duckMapLike{2: "two"}}, + "bytes": []byte("ok"), + } + + v := normalizeQueryValueWithDBType(input, "") + root, ok := v.(map[string]interface{}) + if !ok { + t.Fatalf("期望转换为 map[string]interface{},实际=%T", v) + } + + if root["id"] != int64(1) { + t.Fatalf("id 字段异常,实际=%v(%T)", root["id"], root["id"]) + } + if root["1"] != "one" { + t.Fatalf("数字 key 未被字符串化,实际=%v(%T)", root["1"], root["1"]) + } + if root["bytes"] != "ok" { + t.Fatalf("嵌套 []byte 未被转换,实际=%v(%T)", root["bytes"], root["bytes"]) + } + + arr, ok := root["true"].([]interface{}) + if !ok || len(arr) != 1 { + t.Fatalf("bool key 下的数组结构异常,实际=%v(%T)", root["true"], root["true"]) + } + nested, ok := arr[0].(map[string]interface{}) + if !ok { + t.Fatalf("嵌套 map 未被转换,实际=%v(%T)", arr[0], arr[0]) + } + if nested["2"] != "two" { + t.Fatalf("嵌套 map 数字 key 未转换,实际=%v(%T)", nested["2"], nested["2"]) + } +} From 4d0940636d0b671e3b9525cbde1fb2130288f1f9 Mon Sep 17 00:00:00 2001 From: Syngnat Date: Mon, 2 Mar 2026 11:10:48 +0800 Subject: [PATCH 02/48] =?UTF-8?q?=E2=9C=A8=20feat(frontend-driver):=20?= =?UTF-8?q?=E9=A9=B1=E5=8A=A8=E7=AE=A1=E7=90=86=E6=94=AF=E6=8C=81=E5=BF=AB?= =?UTF-8?q?=E9=80=9F=E6=90=9C=E7=B4=A2=E5=B9=B6=E4=BC=98=E5=8C=96=E4=BF=A1?= =?UTF-8?q?=E6=81=AF=E5=B1=95=E7=A4=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 新增搜索框,支持按 DuckDB/ClickHouse 等关键字快速定位驱动 - 显示“匹配 x / y”统计与无结果提示 - 优化头部区域排版,提升透明/暗色场景下的视觉对齐 --- .../src/components/DriverManagerModal.tsx | 62 +++++++++++++++---- 1 file changed, 51 insertions(+), 11 deletions(-) diff --git a/frontend/src/components/DriverManagerModal.tsx b/frontend/src/components/DriverManagerModal.tsx index b198d5b..ce86735 100644 --- a/frontend/src/components/DriverManagerModal.tsx +++ b/frontend/src/components/DriverManagerModal.tsx @@ -1,5 +1,5 @@ import React, { useCallback, useEffect, useMemo, useRef, useState } from 'react'; -import { Alert, Button, Collapse, Modal, Progress, Select, Space, Switch, Table, Tag, Typography, message } from 'antd'; +import { Alert, Button, Collapse, Input, Modal, Progress, Select, Space, Switch, Table, Tag, Typography, message } from 'antd'; import { DeleteOutlined, DownloadOutlined, FileSearchOutlined, FolderOpenOutlined, ReloadOutlined } from '@ant-design/icons'; import { EventsOn } from '../../wailsjs/runtime/runtime'; import { useStore } from '../store'; @@ -90,6 +90,7 @@ type DriverVersionOption = { const buildVersionOptionKey = (option: DriverVersionOption) => `${option.version}@@${option.downloadUrl}`; const buildVersionSizeLoadingKey = (driverType: string, optionKey: string) => `${driverType}@@${optionKey}`; const DRIVER_TABLE_SCROLL_X = 1450; +const normalizeDriverSearchText = (value: string) => String(value || '').trim().toLowerCase(); const buildVersionSelectOptions = (options: DriverVersionOption[]) => { type SelectOption = { value: string; label: string }; @@ -151,6 +152,7 @@ const DriverManagerModal: React.FC<{ open: boolean; onClose: () => void }> = ({ const [downloadDir, setDownloadDir] = useState(''); const [networkChecking, setNetworkChecking] = useState(false); const [networkStatus, setNetworkStatus] = useState(null); + const [searchKeyword, setSearchKeyword] = useState(''); const [rows, setRows] = useState([]); const [actionState, setActionState] = useState<{ driverType: string; kind: DriverActionKind }>({ driverType: '', kind: '' }); const [progressMap, setProgressMap] = useState>({}); @@ -1075,6 +1077,31 @@ const DriverManagerModal: React.FC<{ open: boolean; onClose: () => void }> = ({ } return rows.find((item) => item.type === logDriverType); }, [logDriverType, rows]); + const normalizedSearchKeyword = useMemo(() => normalizeDriverSearchText(searchKeyword), [searchKeyword]); + const filteredRows = useMemo(() => { + if (!normalizedSearchKeyword) { + return rows; + } + return rows.filter((row) => { + const searchableParts = [ + row.name, + row.type, + row.pinnedVersion, + row.installedVersion, + row.message, + row.builtIn ? '内置' : '外置', + row.connectable ? '已启用' : row.packageInstalled ? '已安装' : '未启用', + ]; + const searchableText = normalizeDriverSearchText(searchableParts.filter(Boolean).join(' ')); + return searchableText.includes(normalizedSearchKeyword); + }); + }, [normalizedSearchKeyword, rows]); + const filterSummaryText = useMemo(() => { + if (normalizedSearchKeyword) { + return `匹配 ${filteredRows.length} / ${rows.length}`; + } + return `共 ${rows.length} 个驱动`; + }, [filteredRows.length, normalizedSearchKeyword, rows.length]); const activeDriverLogs = operationLogMap[logDriverType] || []; const activeDriverLogLines = activeDriverLogs.map((item) => `[${item.time}] ${item.text}`); @@ -1190,7 +1217,14 @@ const DriverManagerModal: React.FC<{ open: boolean; onClose: () => void }> = ({ )} /> - +
+ setSearchKeyword(event.target.value)} + style={{ minWidth: 300, flex: '1 1 360px' }} + /> 覆盖已安装 void }> = ({ onChange={(checked) => setForceOverwriteInstalled(checked)} disabled={batchDirectoryImporting} /> + - - +
+ {filterSummaryText}
void }> = ({ rowKey="type" loading={loading} columns={columns as any} - dataSource={rows} + dataSource={filteredRows} pagination={false} size="middle" sticky={false} scroll={{ x: DRIVER_TABLE_SCROLL_X }} + locale={{ + emptyText: normalizedSearchKeyword + ? `未找到匹配“${String(searchKeyword || '').trim()}”的驱动` + : '暂无驱动数据', + }} />
From 84688e995abcf72ee973c48acdad6d4e70afc013 Mon Sep 17 00:00:00 2001 From: Syngnat Date: Mon, 2 Mar 2026 11:46:59 +0800 Subject: [PATCH 03/48] =?UTF-8?q?=F0=9F=94=A7=20fix(connection-modal):=20?= =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E5=A4=9A=E6=95=B0=E6=8D=AE=E6=BA=90URI?= =?UTF-8?q?=E5=AF=BC=E5=85=A5=E8=A7=A3=E6=9E=90=E5=B9=B6=E6=A0=A1=E6=AD=A3?= =?UTF-8?q?Oracle=E6=9C=8D=E5=8A=A1=E5=90=8D=E6=A0=A1=E9=AA=8C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 新增单主机URI解析映射,兼容 postgres/postgresql、sqlserver、redis、tdengine、dameng(dm)、kingbase、highgo、vastbase、clickhouse、oracle - 抽取 parseSingleHostUri 复用逻辑,统一 host/port/user/password/database 回填行为 - Oracle 连接新增服务名必填校验,移除“服务名为空回退用户名”的隐式逻辑 - 连接弹窗补充 Oracle 服务名输入项与 URI 示例 --- frontend/src/components/ConnectionModal.tsx | 84 +++++++++++++++++---- internal/db/oracle_impl.go | 9 ++- 2 files changed, 75 insertions(+), 18 deletions(-) diff --git a/frontend/src/components/ConnectionModal.tsx b/frontend/src/components/ConnectionModal.tsx index 5c8ad1b..2b209c8 100644 --- a/frontend/src/components/ConnectionModal.tsx +++ b/frontend/src/components/ConnectionModal.tsx @@ -41,6 +41,19 @@ const getDefaultPortByType = (type: string) => { } }; +const singleHostUriSchemesByType: Record = { + postgres: ['postgresql', 'postgres'], + clickhouse: ['clickhouse'], + oracle: ['oracle'], + sqlserver: ['sqlserver'], + redis: ['redis'], + tdengine: ['tdengine'], + dameng: ['dameng', 'dm'], + kingbase: ['kingbase'], + highgo: ['highgo'], + vastbase: ['vastbase'], +}; + const isFileDatabaseType = (type: string) => type === 'sqlite' || type === 'duckdb'; type DriverStatusSnapshot = { @@ -344,6 +357,41 @@ const ConnectionModal: React.FC<{ }; }; + const parseSingleHostUri = ( + uriText: string, + expectedSchemes: string[], + defaultPort: number, + ): { host: string; port: number; username: string; password: string; database: string } | null => { + let parsed: ReturnType | null = null; + for (const scheme of expectedSchemes) { + parsed = parseMultiHostUri(uriText, scheme); + if (parsed) { + break; + } + } + if (!parsed) { + return null; + } + if (!parsed.hosts.length || parsed.hosts.length > MAX_URI_HOSTS) { + return null; + } + if (parsed.hosts.some((entry) => !isValidUriHostEntry(entry))) { + return null; + } + const hostList = normalizeAddressList(parsed.hosts, defaultPort); + if (!hostList.length) { + return null; + } + const primary = parseHostPort(hostList[0] || `localhost:${defaultPort}`, defaultPort); + return { + host: primary?.host || 'localhost', + port: primary?.port || defaultPort, + username: parsed.username, + password: parsed.password, + database: parsed.database || '', + }; + }; + const parseUriToValues = (uriText: string, type: string): Record | null => { const trimmedUri = String(uriText || '').trim(); if (!trimmedUri) { @@ -441,28 +489,22 @@ const ConnectionModal: React.FC<{ }; } - if (type === 'clickhouse') { - const parsed = parseMultiHostUri(trimmedUri, 'clickhouse'); + const singleHostSchemes = singleHostUriSchemesByType[type]; + if (singleHostSchemes && singleHostSchemes.length > 0) { + const parsed = parseSingleHostUri(trimmedUri, singleHostSchemes, getDefaultPortByType(type)); if (!parsed) { return null; } - if (!parsed.hosts.length || parsed.hosts.length > MAX_URI_HOSTS) { + if (type === 'oracle' && !String(parsed.database || '').trim()) { + // Oracle 需要显式 service name,避免 URI 解析后放过必填校验。 return null; } - if (parsed.hosts.some((entry) => !isValidUriHostEntry(entry))) { - return null; - } - const hostList = normalizeAddressList(parsed.hosts, 9000); - if (!hostList.length) { - return null; - } - const primary = parseHostPort(hostList[0] || 'localhost:9000', 9000); return { - host: primary?.host || 'localhost', - port: primary?.port || 9000, + host: parsed.host, + port: parsed.port, user: parsed.username, password: parsed.password, - database: parsed.database || '', + database: parsed.database, }; } @@ -503,6 +545,9 @@ const ConnectionModal: React.FC<{ if (dbType === 'clickhouse') { return 'clickhouse://default:pass@127.0.0.1:9000/default'; } + if (dbType === 'oracle') { + return 'oracle://user:pass@127.0.0.1:1521/ORCLPDB1'; + } return '例如: postgres://user:pass@127.0.0.1:5432/db_name'; }; @@ -1446,6 +1491,17 @@ const ConnectionModal: React.FC<{
)} + {dbType === 'oracle' && ( + + + + )} + {(dbType === 'mysql' || dbType === 'mariadb' || dbType === 'diros' || dbType === 'sphinx') && ( <> diff --git a/internal/db/oracle_impl.go b/internal/db/oracle_impl.go index 43ec441..727e82c 100644 --- a/internal/db/oracle_impl.go +++ b/internal/db/oracle_impl.go @@ -26,10 +26,7 @@ type OracleDB struct { func (o *OracleDB) getDSN(config connection.ConnectionConfig) string { // oracle://user:pass@host:port/service_name - database := config.Database - if database == "" { - database = config.User // Default to user service/schema if empty? - } + database := strings.TrimSpace(config.Database) u := &url.URL{ Scheme: "oracle", @@ -44,6 +41,10 @@ func (o *OracleDB) getDSN(config connection.ConnectionConfig) string { func (o *OracleDB) Connect(config connection.ConnectionConfig) error { var dsn string var err error + serviceName := strings.TrimSpace(config.Database) + if serviceName == "" { + return fmt.Errorf("Oracle 连接缺少服务名(Service Name),请在连接配置中填写,例如 ORCLPDB1") + } if config.UseSSH { // Create SSH tunnel with local port forwarding From 3ca898a95032a2b78ac2ab9bdacb2b31673a168b Mon Sep 17 00:00:00 2001 From: Syngnat Date: Mon, 2 Mar 2026 14:18:44 +0800 Subject: [PATCH 04/48] =?UTF-8?q?=F0=9F=90=9B=20fix(query-export):=20?= =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=9F=A5=E8=AF=A2=E7=BB=93=E6=9E=9C=E5=AF=BC?= =?UTF-8?q?=E5=87=BA=E5=8D=A1=E4=BD=8F=E5=B9=B6=E7=BB=9F=E4=B8=80=E6=8C=89?= =?UTF-8?q?=E6=95=B0=E6=8D=AE=E6=BA=90=E8=83=BD=E5=8A=9B=E6=8E=A7=E5=88=B6?= =?UTF-8?q?=E5=AF=BC=E5=87=BA=E8=B7=AF=E5=BE=84?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 查询结果页导出增加稳定兜底,异常时确保 loading 关闭避免持续转圈 - DataGrid 导出逻辑按数据源能力分流,优先走后端 ExportQuery 并保留结果集导出降级 - QueryEditor 传递结果导出 SQL,保证查询结果导出范围与当前结果一致 - 后端补充 ExportData/ExportQuery 关键日志,提升导出链路可观测性 --- .github/workflows/release.yml | 12 +- cmd/optional-driver-agent/main.go | 45 +++++- cmd/optional-driver-agent/main_test.go | 110 +++++++++++++ docs/driver-manifest.json | 2 +- frontend/src/components/DataGrid.tsx | 146 ++++++++++++------ frontend/src/components/DataViewer.tsx | 8 +- frontend/src/components/QueryEditor.tsx | 33 +++- frontend/src/utils/dataSourceCapabilities.ts | 86 +++++++++++ internal/app/methods_driver.go | 4 +- internal/app/methods_file.go | 67 +++++++- internal/app/methods_file_export_test.go | 116 ++++++++++++++ internal/db/clickhouse_impl.go | 11 +- internal/db/dsn_test.go | 26 +++- internal/db/optional_driver_agent_impl.go | 45 +++++- .../db/optional_driver_agent_impl_test.go | 32 ++++ 15 files changed, 672 insertions(+), 71 deletions(-) create mode 100644 frontend/src/utils/dataSourceCapabilities.ts create mode 100644 internal/db/optional_driver_agent_impl_test.go diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 0e0cb32..b373353 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -379,7 +379,7 @@ jobs: - name: List Assets run: ls -R release-assets - - name: Verify DuckDB Driver Assets + - name: Verify Optional Driver Assets shell: bash run: | set -euo pipefail @@ -390,20 +390,24 @@ jobs: "drivers/MacOS/duckdb-driver-agent-darwin-amd64" "drivers/MacOS/duckdb-driver-agent-darwin-arm64" "drivers/Linux/duckdb-driver-agent-linux-amd64" + "drivers/Windows/clickhouse-driver-agent-windows-amd64.exe" + "drivers/MacOS/clickhouse-driver-agent-darwin-amd64" + "drivers/MacOS/clickhouse-driver-agent-darwin-arm64" + "drivers/Linux/clickhouse-driver-agent-linux-amd64" ) missing=0 for file in "${REQUIRED_FILES[@]}"; do if [ ! -f "$file" ]; then - echo "❌ 缺少 DuckDB 驱动资产:$file" + echo "❌ 缺少驱动资产:$file" missing=1 else - echo "✅ 已找到 DuckDB 驱动资产:$file" + echo "✅ 已找到驱动资产:$file" fi done if [ "$missing" -ne 0 ]; then - echo "❌ DuckDB 驱动资产不完整,终止发布" + echo "❌ 可选驱动资产不完整,终止发布" exit 1 fi diff --git a/cmd/optional-driver-agent/main.go b/cmd/optional-driver-agent/main.go index 63f6945..4c0c5b9 100644 --- a/cmd/optional-driver-agent/main.go +++ b/cmd/optional-driver-agent/main.go @@ -2,11 +2,13 @@ package main import ( "bufio" + "context" "encoding/json" "fmt" "os" "reflect" "strings" + "time" "GoNavi-Wails/internal/connection" "GoNavi-Wails/internal/db" @@ -17,6 +19,7 @@ type agentRequest struct { Method string `json:"method"` Config *connection.ConnectionConfig `json:"config,omitempty"` Query string `json:"query,omitempty"` + TimeoutMs int64 `json:"timeoutMs,omitempty"` DBName string `json:"dbName,omitempty"` TableName string `json:"tableName,omitempty"` Changes *connection.ChangeSet `json:"changes,omitempty"` @@ -48,6 +51,8 @@ const ( agentMethodApplyChanges = "applyChanges" ) +const legacyClickHouseDefaultTimeout = 2 * time.Hour + var ( agentDriverType string agentDatabaseFactory func() db.Database @@ -138,14 +143,14 @@ func handleRequest(inst *db.Database, req agentRequest) agentResponse { return fail(resp, err.Error()) } case agentMethodQuery: - data, fields, err := (*inst).Query(req.Query) + data, fields, err := queryWithOptionalTimeout(*inst, req.Query, req.TimeoutMs) if err != nil { return fail(resp, err.Error()) } resp.Data = data resp.Fields = fields case agentMethodExec: - affected, err := (*inst).Exec(req.Query) + affected, err := execWithOptionalTimeout(*inst, req.Query, req.TimeoutMs) if err != nil { return fail(resp, err.Error()) } @@ -287,3 +292,39 @@ func normalizeAgentResponseData(v interface{}) interface{} { return v } } + +func queryWithOptionalTimeout(inst db.Database, query string, timeoutMs int64) ([]map[string]interface{}, []string, error) { + effectiveTimeoutMs := timeoutMs + if effectiveTimeoutMs <= 0 && strings.EqualFold(strings.TrimSpace(agentDriverType), "clickhouse") { + effectiveTimeoutMs = int64(legacyClickHouseDefaultTimeout / time.Millisecond) + } + if effectiveTimeoutMs <= 0 { + return inst.Query(query) + } + if q, ok := inst.(interface { + QueryContext(context.Context, string) ([]map[string]interface{}, []string, error) + }); ok { + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(effectiveTimeoutMs)*time.Millisecond) + defer cancel() + return q.QueryContext(ctx, query) + } + return inst.Query(query) +} + +func execWithOptionalTimeout(inst db.Database, query string, timeoutMs int64) (int64, error) { + effectiveTimeoutMs := timeoutMs + if effectiveTimeoutMs <= 0 && strings.EqualFold(strings.TrimSpace(agentDriverType), "clickhouse") { + effectiveTimeoutMs = int64(legacyClickHouseDefaultTimeout / time.Millisecond) + } + if effectiveTimeoutMs <= 0 { + return inst.Exec(query) + } + if e, ok := inst.(interface { + ExecContext(context.Context, string) (int64, error) + }); ok { + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(effectiveTimeoutMs)*time.Millisecond) + defer cancel() + return e.ExecContext(ctx, query) + } + return inst.Exec(query) +} diff --git a/cmd/optional-driver-agent/main_test.go b/cmd/optional-driver-agent/main_test.go index e74c805..016e520 100644 --- a/cmd/optional-driver-agent/main_test.go +++ b/cmd/optional-driver-agent/main_test.go @@ -3,8 +3,13 @@ package main import ( "bufio" "bytes" + "context" "encoding/json" + "errors" "testing" + "time" + + "GoNavi-Wails/internal/connection" ) type duckMapLike map[any]any @@ -60,3 +65,108 @@ func TestNormalizeAgentResponseData_KeepByteSlice(t *testing.T) { t.Fatalf("[]byte 内容被意外改写: %v", out) } } + +type fakeAgentTimeoutDB struct { + queryCalled bool + queryContextCalled bool + execCalled bool + execContextCalled bool + deadlineSet bool +} + +func (f *fakeAgentTimeoutDB) Connect(config connection.ConnectionConfig) error { return nil } +func (f *fakeAgentTimeoutDB) Close() error { return nil } +func (f *fakeAgentTimeoutDB) Ping() error { return nil } +func (f *fakeAgentTimeoutDB) Query(query string) ([]map[string]interface{}, []string, error) { + f.queryCalled = true + return nil, nil, errors.New("query should not be called") +} +func (f *fakeAgentTimeoutDB) QueryContext(ctx context.Context, query string) ([]map[string]interface{}, []string, error) { + f.queryContextCalled = true + if _, ok := ctx.Deadline(); ok { + f.deadlineSet = true + } + return []map[string]interface{}{{"ok": 1}}, []string{"ok"}, nil +} +func (f *fakeAgentTimeoutDB) Exec(query string) (int64, error) { + f.execCalled = true + return 0, errors.New("exec should not be called") +} +func (f *fakeAgentTimeoutDB) ExecContext(ctx context.Context, query string) (int64, error) { + f.execContextCalled = true + if _, ok := ctx.Deadline(); ok { + f.deadlineSet = true + } + return 3, nil +} +func (f *fakeAgentTimeoutDB) GetDatabases() ([]string, error) { return nil, nil } +func (f *fakeAgentTimeoutDB) GetTables(dbName string) ([]string, error) { + return nil, nil +} +func (f *fakeAgentTimeoutDB) GetCreateStatement(dbName, tableName string) (string, error) { + return "", nil +} +func (f *fakeAgentTimeoutDB) GetColumns(dbName, tableName string) ([]connection.ColumnDefinition, error) { + return nil, nil +} +func (f *fakeAgentTimeoutDB) GetAllColumns(dbName string) ([]connection.ColumnDefinitionWithTable, error) { + return nil, nil +} +func (f *fakeAgentTimeoutDB) GetIndexes(dbName, tableName string) ([]connection.IndexDefinition, error) { + return nil, nil +} +func (f *fakeAgentTimeoutDB) GetForeignKeys(dbName, tableName string) ([]connection.ForeignKeyDefinition, error) { + return nil, nil +} +func (f *fakeAgentTimeoutDB) GetTriggers(dbName, tableName string) ([]connection.TriggerDefinition, error) { + return nil, nil +} + +func TestQueryWithOptionalTimeout_UsesQueryContext(t *testing.T) { + fake := &fakeAgentTimeoutDB{} + data, fields, err := queryWithOptionalTimeout(fake, "SELECT 1", int64((2 * time.Second).Milliseconds())) + if err != nil { + t.Fatalf("queryWithOptionalTimeout 返回错误: %v", err) + } + if !fake.queryContextCalled || fake.queryCalled { + t.Fatalf("query 调用路径异常,QueryContext=%v Query=%v", fake.queryContextCalled, fake.queryCalled) + } + if !fake.deadlineSet { + t.Fatal("queryWithOptionalTimeout 未设置 deadline") + } + if len(data) != 1 || len(fields) != 1 || fields[0] != "ok" { + t.Fatalf("queryWithOptionalTimeout 返回数据异常: data=%v fields=%v", data, fields) + } +} + +func TestExecWithOptionalTimeout_UsesExecContext(t *testing.T) { + fake := &fakeAgentTimeoutDB{} + affected, err := execWithOptionalTimeout(fake, "DELETE FROM t", int64((2 * time.Second).Milliseconds())) + if err != nil { + t.Fatalf("execWithOptionalTimeout 返回错误: %v", err) + } + if !fake.execContextCalled || fake.execCalled { + t.Fatalf("exec 调用路径异常,ExecContext=%v Exec=%v", fake.execContextCalled, fake.execCalled) + } + if !fake.deadlineSet { + t.Fatal("execWithOptionalTimeout 未设置 deadline") + } + if affected != 3 { + t.Fatalf("受影响行数异常,want=3 got=%d", affected) + } +} + +func TestQueryWithOptionalTimeout_ClickHouseLegacyModeUsesQueryContext(t *testing.T) { + old := agentDriverType + agentDriverType = "clickhouse" + defer func() { agentDriverType = old }() + + fake := &fakeAgentTimeoutDB{} + _, _, err := queryWithOptionalTimeout(fake, "SELECT 1", 0) + if err != nil { + t.Fatalf("queryWithOptionalTimeout 返回错误: %v", err) + } + if !fake.queryContextCalled || fake.queryCalled { + t.Fatalf("clickhouse legacy query 调用路径异常,QueryContext=%v Query=%v", fake.queryContextCalled, fake.queryCalled) + } +} diff --git a/docs/driver-manifest.json b/docs/driver-manifest.json index 2352ea1..d04fba3 100644 --- a/docs/driver-manifest.json +++ b/docs/driver-manifest.json @@ -75,7 +75,7 @@ }, "clickhouse": { "engine": "go", - "version": "2.43.0", + "version": "2.43.1", "checksumPolicy": "off", "downloadUrl": "builtin://activate/clickhouse" }, diff --git a/frontend/src/components/DataGrid.tsx b/frontend/src/components/DataGrid.tsx index 797a1ab..51f6d2b 100644 --- a/frontend/src/components/DataGrid.tsx +++ b/frontend/src/components/DataGrid.tsx @@ -12,6 +12,7 @@ import { v4 as uuidv4 } from 'uuid'; import 'react-resizable/css/styles.css'; import { buildOrderBySQL, buildWhereSQL, escapeLiteral, quoteIdentPart, quoteQualifiedIdent, withSortBufferTuningSQL, type FilterCondition } from '../utils/sql'; import { isMacLikePlatform, normalizeOpacityForPlatform } from '../utils/appearance'; +import { getDataSourceCapabilities } from '../utils/dataSourceCapabilities'; // --- Error Boundary --- interface DataGridErrorBoundaryState { @@ -302,6 +303,7 @@ const DataContext = React.createContext<{ copyToClipboard: (t: string) => void; tableName?: string; enableRowContextMenu: boolean; + supportsCopyInsert: boolean; } | null>(null); interface Item { @@ -444,7 +446,7 @@ const ContextMenuRow = React.memo(({ children, record, ...props }: any) => { if (!record || !context) return {children}; - const { selectedRowKeysRef, displayDataRef, handleCopyInsert, handleCopyJson, handleCopyCsv, handleExportSelected, copyToClipboard, enableRowContextMenu } = context; + const { selectedRowKeysRef, displayDataRef, handleCopyInsert, handleCopyJson, handleCopyCsv, handleExportSelected, copyToClipboard, enableRowContextMenu, supportsCopyInsert } = context; if (!enableRowContextMenu) { return {children}; @@ -460,12 +462,12 @@ const ContextMenuRow = React.memo(({ children, record, ...props }: any) => { }; const menuItems: MenuProps['items'] = [ - { - key: 'insert', - label: `复制为 INSERT`, - icon: , - onClick: () => handleCopyInsert(record) - }, + ...(supportsCopyInsert ? [{ + key: 'insert', + label: '复制为 INSERT', + icon: , + onClick: () => handleCopyInsert(record), + }] : []), { key: 'json', label: '复制为 JSON', icon: , onClick: () => handleCopyJson(record) }, { key: 'csv', label: '复制为 CSV', icon: , onClick: () => handleCopyCsv(record) }, { key: 'copy', label: '复制为 Markdown', icon: , onClick: () => { @@ -502,6 +504,8 @@ interface DataGridProps { columnNames: string[]; loading: boolean; tableName?: string; + exportScope?: 'table' | 'queryResult'; + resultSql?: string; dbName?: string; connectionId?: string; pkColumns?: string[]; @@ -543,7 +547,7 @@ type ColumnMeta = { }; const DataGrid: React.FC = ({ - data, columnNames, loading, tableName, dbName, connectionId, pkColumns = [], readOnly = false, + data, columnNames, loading, tableName, exportScope = 'table', resultSql, dbName, connectionId, pkColumns = [], readOnly = false, onReload, onSort, onPageChange, pagination, onRequestTotalCount, onCancelTotalCount, sortInfoExternal, showFilter, onToggleFilter, onApplyFilter }) => { const connections = useStore(state => state.connections); @@ -559,8 +563,14 @@ const DataGrid: React.FC = ({ const showColumnComment = queryOptions?.showColumnComment !== false; const showColumnType = queryOptions?.showColumnType !== false; const selectionColumnWidth = 46; - const connTypeLower = String(connections.find(c => c.id === connectionId)?.config?.type || '').trim().toLowerCase(); - const isDuckDBConnection = connTypeLower === 'duckdb'; + const currentConnConfig = connections.find(c => c.id === connectionId)?.config; + const dataSourceCaps = getDataSourceCapabilities(currentConnConfig); + const isDuckDBConnection = dataSourceCaps.type === 'duckdb'; + const supportsCopyInsert = dataSourceCaps.supportsCopyInsert; + const supportsSqlQueryExport = dataSourceCaps.supportsSqlQueryExport; + const isQueryResultExport = exportScope === 'queryResult'; + const canImport = exportScope === 'table' && !!tableName; + const canExport = !!connectionId && (isQueryResultExport || !!tableName); // Background Helper const getBg = (darkHex: string) => { @@ -687,11 +697,20 @@ const DataGrid: React.FC = ({ // Helper to export specific data const exportData = async (rows: any[], format: string) => { const hide = message.loading(`正在导出 ${rows.length} 条数据...`, 0); - const cleanRows = rows.map(({ [GONAVI_ROW_KEY]: _rowKey, ...rest }) => rest); - // Pass tableName (or 'export') as default filename - const res = await ExportData(cleanRows, columnNames, tableName || 'export', format); - hide(); - if (res.success) { message.success("导出成功"); } else if (res.message !== "Cancelled") { message.error("导出失败: " + res.message); } + try { + const cleanRows = rows.map(({ [GONAVI_ROW_KEY]: _rowKey, ...rest }) => rest); + // Pass tableName (or 'export') as default filename + const res = await ExportData(cleanRows, columnNames, tableName || 'export', format); + if (res.success) { + message.success("导出成功"); + } else if (res.message !== "Cancelled") { + message.error("导出失败: " + res.message); + } + } catch (e: any) { + message.error("导出失败: " + (e?.message || String(e))); + } finally { + hide(); + } }; const [sortInfo, setSortInfo] = useState<{ columnKey: string, order: string } | null>(null); @@ -2101,6 +2120,10 @@ const DataGrid: React.FC = ({ }, []); const handleCopyInsert = useCallback((record: any) => { + if (!supportsCopyInsert) { + message.warning("当前数据源不支持复制为 INSERT,请使用 JSON/CSV/Markdown 复制。"); + return; + } const records = getTargets(record); const sqls = records.map((r: any) => { const { [GONAVI_ROW_KEY]: _rowKey, ...vals } = r; @@ -2110,7 +2133,7 @@ const DataGrid: React.FC = ({ return `INSERT INTO \`${targetTable}\` (${cols.map(c => `\`${c}\``).join(', ')}) VALUES (${values.join(', ')});`; }); copyToClipboard(sqls.join('\n')); - }, [tableName, getTargets, copyToClipboard]); + }, [supportsCopyInsert, tableName, getTargets, copyToClipboard]); const handleCopyJson = useCallback((record: any) => { const records = getTargets(record); @@ -2149,12 +2172,17 @@ const DataGrid: React.FC = ({ const config = buildConnConfig(); if (!config) return; const hide = message.loading(`正在导出...`, 0); - const res = await ExportQuery(config as any, dbName || '', sql, defaultName || 'export', format); - hide(); - if (res.success) { - message.success("导出成功"); - } else if (res.message !== "Cancelled") { - message.error("导出失败: " + res.message); + try { + const res = await ExportQuery(config as any, dbName || '', sql, defaultName || 'export', format); + if (res.success) { + message.success("导出成功"); + } else if (res.message !== "Cancelled") { + message.error("导出失败: " + res.message); + } + } catch (e: any) { + message.error("导出失败: " + (e?.message || String(e))); + } finally { + hide(); } }, [buildConnConfig, dbName]); @@ -2198,6 +2226,10 @@ const DataGrid: React.FC = ({ // Context Menu Export const handleExportSelected = useCallback(async (format: string, record: any) => { const records = getTargets(record); + if (isQueryResultExport) { + await exportData(records, format); + return; + } if (!connectionId || !tableName) { await exportData(records, format); return; @@ -2225,11 +2257,11 @@ const DataGrid: React.FC = ({ const sql = `SELECT * FROM ${quoteQualifiedIdent(dbType, tableName)} WHERE ${pkWhere}`; await exportByQuery(sql, format, tableName || 'export'); - }, [getTargets, connectionId, tableName, hasChanges, exportData, buildConnConfig, buildPkWhereSql, exportByQuery]); + }, [getTargets, isQueryResultExport, connectionId, tableName, hasChanges, exportData, buildConnConfig, buildPkWhereSql, exportByQuery]); // Export const handleExport = async (format: string) => { - if (!connectionId || !tableName) return; + if (!connectionId) return; // 1. Export Selected if (selectedRowKeys.length > 0) { @@ -2238,17 +2270,38 @@ const DataGrid: React.FC = ({ return; } + // 查询结果页导出统一按当前结果集(已加载数据)导出,避免再次执行原 SQL 造成大数据导出或长时间阻塞。 + if (isQueryResultExport) { + const sql = String(resultSql || '').trim(); + if (!hasChanges && supportsSqlQueryExport && sql) { + await exportByQuery(sql, format, tableName || 'query_result'); + } else { + await exportData(mergedDisplayData, format); + } + return; + } + // 2. Prompt for Current vs All // Using a custom modal content with buttons to handle 3 states let instance: any; const handleAll = async () => { instance.destroy(); + if (!tableName) return; const config = buildConnConfig(); if (!config) return; const hide = message.loading(`正在导出全部数据...`, 0); - const res = await ExportTable(config as any, dbName || '', tableName, format); - hide(); - if (res.success) { message.success("导出成功"); } else if (res.message !== "Cancelled") { message.error("导出失败: " + res.message); } + try { + const res = await ExportTable(config as any, dbName || '', tableName, format); + if (res.success) { + message.success("导出成功"); + } else if (res.message !== "Cancelled") { + message.error("导出失败: " + res.message); + } + } catch (e: any) { + message.error("导出失败: " + (e?.message || String(e))); + } finally { + hide(); + } }; const handlePage = async () => { instance.destroy(); @@ -2411,7 +2464,8 @@ const DataGrid: React.FC = ({ copyToClipboard, tableName, enableRowContextMenu: !canModifyData, - }), [handleCopyCsv, handleCopyInsert, handleCopyJson, handleExportSelected, copyToClipboard, tableName, canModifyData]); + supportsCopyInsert, + }), [handleCopyCsv, handleCopyInsert, handleCopyJson, handleExportSelected, copyToClipboard, tableName, canModifyData, supportsCopyInsert]); const cellContextMenuValue = useMemo(() => ({ showMenu: showCellContextMenu, @@ -2456,8 +2510,8 @@ const DataGrid: React.FC = ({ setSelectedRowKeys([]); onReload(); }}>刷新} - {tableName && } - {tableName && } + {canImport && } + {canExport && } {canModifyData && ( <> @@ -2996,21 +3050,23 @@ const DataGrid: React.FC = ({ 填充到选中行 ({selectedRowKeys.length})
-
e.currentTarget.style.background = darkMode ? '#303030' : '#f5f5f5'} - onMouseLeave={(e) => e.currentTarget.style.background = 'transparent'} - onClick={() => { - if (cellContextMenu.record) handleCopyInsert(cellContextMenu.record); - setCellContextMenu(prev => ({ ...prev, visible: false })); - }} - > - 复制为 INSERT -
+ {supportsCopyInsert && ( +
e.currentTarget.style.background = darkMode ? '#303030' : '#f5f5f5'} + onMouseLeave={(e) => e.currentTarget.style.background = 'transparent'} + onClick={() => { + if (cellContextMenu.record) handleCopyInsert(cellContextMenu.record); + setCellContextMenu(prev => ({ ...prev, visible: false })); + }} + > + 复制为 INSERT +
+ )}
= ({ tab }) => { const [showFilter, setShowFilter] = useState(false); const [filterConditions, setFilterConditions] = useState([]); const duckdbSafeSelectCacheRef = useRef>({}); - const currentConnType = (connections.find(c => c.id === tab.connectionId)?.config?.type || '').toLowerCase(); - const forceReadOnly = currentConnType === 'tdengine' || currentConnType === 'clickhouse'; + const currentConnConfig = connections.find(c => c.id === tab.connectionId)?.config; + const currentConnCaps = getDataSourceCapabilities(currentConnConfig); + const currentConnType = currentConnCaps.type; + const forceReadOnly = currentConnCaps.forceReadOnlyQueryResult; useEffect(() => { setPkColumns([]); @@ -673,6 +676,7 @@ const DataViewer: React.FC<{ tab: TabData }> = ({ tab }) => { columnNames={columnNames} loading={loading} tableName={tab.tableName} + exportScope="table" dbName={tab.dbName} connectionId={tab.connectionId} pkColumns={pkColumns} diff --git a/frontend/src/components/QueryEditor.tsx b/frontend/src/components/QueryEditor.tsx index 347f43e..2d6a36e 100644 --- a/frontend/src/components/QueryEditor.tsx +++ b/frontend/src/components/QueryEditor.tsx @@ -1,4 +1,4 @@ -import React, { useState, useEffect, useRef } from 'react'; +import React, { useState, useEffect, useRef, useMemo } from 'react'; import Editor, { OnMount } from '@monaco-editor/react'; import { Button, message, Modal, Input, Form, Dropdown, MenuProps, Tooltip, Select, Tabs } from 'antd'; import { PlayCircleOutlined, SaveOutlined, FormatPainterOutlined, SettingOutlined, CloseOutlined } from '@ant-design/icons'; @@ -7,6 +7,7 @@ import { TabData, ColumnDefinition } from '../types'; import { useStore } from '../store'; import { DBQuery, DBGetTables, DBGetAllColumns, DBGetDatabases, DBGetColumns } from '../../wailsjs/go/app/App'; import DataGrid, { GONAVI_ROW_KEY } from './DataGrid'; +import { getDataSourceCapabilities } from '../utils/dataSourceCapabilities'; const QueryEditor: React.FC<{ tab: TabData }> = ({ tab }) => { const [query, setQuery] = useState(tab.query || 'SELECT * FROM '); @@ -14,6 +15,7 @@ const QueryEditor: React.FC<{ tab: TabData }> = ({ tab }) => { type ResultSet = { key: string; sql: string; + exportSql?: string; rows: any[]; columns: string[]; tableName?: string; @@ -47,6 +49,10 @@ const QueryEditor: React.FC<{ tab: TabData }> = ({ tab }) => { const visibleDbsRef = useRef([]); // Store visible databases for cross-db intellisense const connections = useStore(state => state.connections); + const queryCapableConnections = useMemo( + () => connections.filter(c => getDataSourceCapabilities(c.config).supportsQueryEditor), + [connections] + ); const addSqlLog = useStore(state => state.addSqlLog); const currentConnectionIdRef = useRef(currentConnectionId); const currentDbRef = useRef(currentDb); @@ -64,6 +70,16 @@ const QueryEditor: React.FC<{ tab: TabData }> = ({ tab }) => { currentConnectionIdRef.current = currentConnectionId; }, [currentConnectionId]); + useEffect(() => { + if (!queryCapableConnections.some(c => c.id === currentConnectionId)) { + const fallback = queryCapableConnections[0]?.id || ''; + if (fallback && fallback !== currentConnectionId) { + setCurrentConnectionId(fallback); + setCurrentDb(''); + } + } + }, [queryCapableConnections, currentConnectionId]); + useEffect(() => { currentDbRef.current = currentDb; }, [currentDb]); @@ -977,6 +993,12 @@ const QueryEditor: React.FC<{ tab: TabData }> = ({ tab }) => { if (runSeqRef.current === runSeq) setLoading(false); return; } + const connCaps = getDataSourceCapabilities(conn.config); + if (!connCaps.supportsQueryEditor) { + message.error("当前数据源不支持 SQL 查询编辑器,请使用对应专用页面。"); + if (runSeqRef.current === runSeq) setLoading(false); + return; + } const config = { ...conn.config, @@ -1000,8 +1022,7 @@ const QueryEditor: React.FC<{ tab: TabData }> = ({ tab }) => { const nextResultSets: ResultSet[] = []; const maxRows = Number(queryOptions?.maxRows) || 0; const dbType = String((config as any).type || 'mysql'); - const normalizedDbType = dbType.toLowerCase(); - const forceReadOnlyResult = normalizedDbType === 'tdengine' || normalizedDbType === 'clickhouse'; + const forceReadOnlyResult = connCaps.forceReadOnlyQueryResult; const wantsLimitProbe = Number.isFinite(maxRows) && maxRows > 0; const probeLimit = wantsLimitProbe ? (maxRows + 1) : 0; let anyTruncated = false; @@ -1066,6 +1087,7 @@ const QueryEditor: React.FC<{ tab: TabData }> = ({ tab }) => { nextResultSets.push({ key: `result-${idx + 1}`, sql: rawStatement, + exportSql: limited.applied ? applyAutoLimit(rawStatement, dbType, Math.max(1, Number(maxRows) || 1)).sql : rawStatement, rows, columns: cols, tableName: simpleTableName, @@ -1082,6 +1104,7 @@ const QueryEditor: React.FC<{ tab: TabData }> = ({ tab }) => { nextResultSets.push({ key: `result-${idx + 1}`, sql: rawStatement, + exportSql: rawStatement, rows: [row], columns: ['affectedRows'], pkColumns: [], @@ -1223,7 +1246,7 @@ const QueryEditor: React.FC<{ tab: TabData }> = ({ tab }) => { setCurrentConnectionId(val); setCurrentDb(''); }} - options={connections.map(c => ({ label: c.name, value: c.id }))} + options={queryCapableConnections.map(c => ({ label: c.name, value: c.id }))} showSearch /> + + {redisTopology === 'cluster' && ( + + + + diff --git a/frontend/src/store.ts b/frontend/src/store.ts index beaea1b..4a4b320 100644 --- a/frontend/src/store.ts +++ b/frontend/src/store.ts @@ -199,7 +199,7 @@ const sanitizeConnectionConfig = (value: unknown): ConnectionConfig => { proxy, uri: toTrimmedString(raw.uri).slice(0, MAX_URI_LENGTH), hosts: sanitizeAddressList(raw.hosts), - topology: raw.topology === 'replica' ? 'replica' : 'single', + topology: raw.topology === 'replica' ? 'replica' : (raw.topology === 'cluster' ? 'cluster' : 'single'), mysqlReplicaUser: toTrimmedString(raw.mysqlReplicaUser), mysqlReplicaPassword: savePassword ? toTrimmedString(raw.mysqlReplicaPassword) : '', replicaSet: toTrimmedString(raw.replicaSet), diff --git a/frontend/src/types.ts b/frontend/src/types.ts index e8a6cb4..2bc8dac 100644 --- a/frontend/src/types.ts +++ b/frontend/src/types.ts @@ -32,7 +32,7 @@ export interface ConnectionConfig { redisDB?: number; // Redis database index (0-15) uri?: string; // Connection URI for copy/paste hosts?: string[]; // Multi-host addresses: host:port - topology?: 'single' | 'replica'; + topology?: 'single' | 'replica' | 'cluster'; mysqlReplicaUser?: string; mysqlReplicaPassword?: string; replicaSet?: string; diff --git a/internal/app/methods_redis.go b/internal/app/methods_redis.go index e88d79d..1b626b0 100644 --- a/internal/app/methods_redis.go +++ b/internal/app/methods_redis.go @@ -67,24 +67,27 @@ func getRedisClientCacheKey(config connection.ConnectionConfig) string { } func formatRedisConnSummary(config connection.ConnectionConfig) string { - timeoutSeconds := config.Timeout - if timeoutSeconds <= 0 { - timeoutSeconds = 30 - } - var b strings.Builder b.WriteString("类型=redis 地址=") b.WriteString(config.Host) b.WriteString(":") - b.WriteString(string(rune(config.Port + '0'))) + b.WriteString(strconv.Itoa(config.Port)) + if topology := strings.TrimSpace(config.Topology); topology != "" { + b.WriteString(" 模式=") + b.WriteString(topology) + } + if len(config.Hosts) > 0 { + b.WriteString(" 节点数=") + b.WriteString(strconv.Itoa(len(config.Hosts))) + } b.WriteString(" DB=") - b.WriteString(string(rune(config.RedisDB + '0'))) + b.WriteString(strconv.Itoa(config.RedisDB)) if config.UseSSH { b.WriteString(" SSH=") b.WriteString(config.SSH.Host) b.WriteString(":") - b.WriteString(string(rune(config.SSH.Port + '0'))) + b.WriteString(strconv.Itoa(config.SSH.Port)) b.WriteString(" 用户=") b.WriteString(config.SSH.User) } diff --git a/internal/connection/types.go b/internal/connection/types.go index cfc0253..20b4cbb 100644 --- a/internal/connection/types.go +++ b/internal/connection/types.go @@ -37,7 +37,7 @@ type ConnectionConfig struct { RedisDB int `json:"redisDB,omitempty"` // Redis database index (0-15) URI string `json:"uri,omitempty"` // Connection URI for copy/paste Hosts []string `json:"hosts,omitempty"` // Multi-host addresses: host:port - Topology string `json:"topology,omitempty"` // single | replica + Topology string `json:"topology,omitempty"` // single | replica | cluster MySQLReplicaUser string `json:"mysqlReplicaUser,omitempty"` // MySQL replica auth user MySQLReplicaPassword string `json:"mysqlReplicaPassword,omitempty"` // MySQL replica auth password ReplicaSet string `json:"replicaSet,omitempty"` // MongoDB replica set name diff --git a/internal/redis/redis.go b/internal/redis/redis.go index 80e58f6..d9e776b 100644 --- a/internal/redis/redis.go +++ b/internal/redis/redis.go @@ -12,7 +12,7 @@ type RedisValue struct { // RedisDBInfo represents information about a Redis database type RedisDBInfo struct { - Index int `json:"index"` // Database index (0-15) + Index int `json:"index"` // Database index (single: 0-15, cluster: logical 0-15) Keys int64 `json:"keys"` // Number of keys in this database } diff --git a/internal/redis/redis_impl.go b/internal/redis/redis_impl.go index 044f16d..f08b4f5 100644 --- a/internal/redis/redis_impl.go +++ b/internal/redis/redis_impl.go @@ -3,8 +3,10 @@ package redis import ( "context" "fmt" + "net" "strconv" "strings" + "sync" "time" "GoNavi-Wails/internal/connection" @@ -16,10 +18,14 @@ import ( // RedisClientImpl implements RedisClient using go-redis type RedisClientImpl struct { - client *redis.Client - config connection.ConnectionConfig - currentDB int - forwarder *ssh.LocalForwarder + client redis.UniversalClient + singleClient *redis.Client + clusterClient *redis.ClusterClient + config connection.ConnectionConfig + currentDB int + isCluster bool + seedAddrs []string + forwarder *ssh.LocalForwarder } const ( @@ -40,14 +46,183 @@ func NewRedisClient() RedisClient { return &RedisClientImpl{} } +func normalizeRedisTimeout(timeoutSeconds int) time.Duration { + if timeoutSeconds <= 0 { + return 30 * time.Second + } + return time.Duration(timeoutSeconds) * time.Second +} + +func normalizeRedisSeedAddress(raw string, defaultPort int) (string, error) { + addr := strings.TrimSpace(raw) + if addr == "" { + return "", fmt.Errorf("Redis 节点地址不能为空") + } + + if _, _, err := net.SplitHostPort(addr); err == nil { + return addr, nil + } + + if !strings.Contains(addr, ":") { + return net.JoinHostPort(addr, strconv.Itoa(defaultPort)), nil + } + + // 尝试兼容 host:port 但端口格式异常的场景。 + host, port, ok := strings.Cut(addr, ":") + if !ok { + return "", fmt.Errorf("无效 Redis 节点地址: %s", addr) + } + host = strings.TrimSpace(host) + port = strings.TrimSpace(port) + if host == "" { + return "", fmt.Errorf("无效 Redis 节点地址: %s", addr) + } + if _, err := strconv.Atoi(port); err != nil { + return "", fmt.Errorf("无效 Redis 端口: %s", addr) + } + return net.JoinHostPort(host, port), nil +} + +func buildRedisSeedAddrs(config connection.ConnectionConfig) ([]string, error) { + defaultPort := config.Port + if defaultPort <= 0 { + defaultPort = 6379 + } + + candidates := make([]string, 0, 1+len(config.Hosts)) + if strings.TrimSpace(config.Host) != "" { + candidates = append(candidates, fmt.Sprintf("%s:%d", strings.TrimSpace(config.Host), defaultPort)) + } + candidates = append(candidates, config.Hosts...) + + seen := make(map[string]struct{}, len(candidates)) + addrs := make([]string, 0, len(candidates)) + for _, candidate := range candidates { + normalized, err := normalizeRedisSeedAddress(candidate, defaultPort) + if err != nil { + return nil, err + } + if _, exists := seen[normalized]; exists { + continue + } + seen[normalized] = struct{}{} + addrs = append(addrs, normalized) + } + if len(addrs) == 0 { + return nil, fmt.Errorf("Redis 连接地址不能为空") + } + return addrs, nil +} + +func (r *RedisClientImpl) redisNamespacePrefixForDB(index int) string { + if !r.isCluster || index <= 0 { + return "" + } + // Redis Cluster 仅支持物理 db0;这里用固定前缀模拟逻辑库隔离。 + return fmt.Sprintf("__gonavi_db_%d__:", index) +} + +func (r *RedisClientImpl) redisNamespacePrefix() string { + return r.redisNamespacePrefixForDB(r.currentDB) +} + +func (r *RedisClientImpl) toPhysicalKey(key string) string { + trimmed := strings.TrimSpace(key) + if trimmed == "" { + return "" + } + prefix := r.redisNamespacePrefix() + if prefix == "" || strings.HasPrefix(trimmed, prefix) { + return trimmed + } + return prefix + trimmed +} + +func (r *RedisClientImpl) toPhysicalPattern(pattern string) string { + normalized := strings.TrimSpace(pattern) + if normalized == "" { + normalized = "*" + } + prefix := r.redisNamespacePrefix() + if prefix == "" { + return normalized + } + return prefix + normalized +} + +func (r *RedisClientImpl) toPhysicalKeys(keys []string) []string { + if len(keys) == 0 { + return nil + } + result := make([]string, 0, len(keys)) + for _, key := range keys { + physical := r.toPhysicalKey(key) + if physical == "" { + continue + } + result = append(result, physical) + } + return result +} + +func (r *RedisClientImpl) toDisplayKey(key string) string { + prefix := r.redisNamespacePrefix() + if prefix == "" { + return key + } + return strings.TrimPrefix(key, prefix) +} + // Connect establishes a connection to Redis func (r *RedisClientImpl) Connect(config connection.ConnectionConfig) error { r.config = config - r.currentDB = config.RedisDB + if r.config.RedisDB < 0 || r.config.RedisDB > 15 { + r.config.RedisDB = 0 + } + r.currentDB = r.config.RedisDB + r.forwarder = nil + r.client = nil + r.singleClient = nil + r.clusterClient = nil + r.isCluster = false - addr := fmt.Sprintf("%s:%d", config.Host, config.Port) + seedAddrs, err := buildRedisSeedAddrs(config) + if err != nil { + return err + } + r.seedAddrs = append([]string(nil), seedAddrs...) - // Handle SSH tunnel if enabled + topology := strings.ToLower(strings.TrimSpace(config.Topology)) + r.isCluster = topology == "cluster" || len(seedAddrs) > 1 + + if r.isCluster && config.UseSSH { + return fmt.Errorf("Redis 集群模式暂不支持 SSH 隧道,请关闭 SSH 后重试") + } + + timeout := normalizeRedisTimeout(config.Timeout) + if r.isCluster { + opts := &redis.ClusterOptions{ + Addrs: seedAddrs, + Username: strings.TrimSpace(config.User), + Password: config.Password, + DialTimeout: timeout, + ReadTimeout: timeout, + WriteTimeout: timeout, + } + clusterClient := redis.NewClusterClient(opts) + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if err := clusterClient.Ping(ctx).Err(); err != nil { + clusterClient.Close() + return fmt.Errorf("Redis 集群连接失败: %w", err) + } + r.client = clusterClient + r.clusterClient = clusterClient + logger.Infof("Redis 集群连接成功: seeds=%s 逻辑库=db%d", strings.Join(seedAddrs, ","), r.currentDB) + return nil + } + + addr := seedAddrs[0] if config.UseSSH { forwarder, err := ssh.GetOrCreateLocalForwarder(config.SSH, config.Host, config.Port) if err != nil { @@ -60,32 +235,26 @@ func (r *RedisClientImpl) Connect(config connection.ConnectionConfig) error { opts := &redis.Options{ Addr: addr, + Username: strings.TrimSpace(config.User), Password: config.Password, - DB: config.RedisDB, - DialTimeout: time.Duration(config.Timeout) * time.Second, - ReadTimeout: time.Duration(config.Timeout) * time.Second, - WriteTimeout: time.Duration(config.Timeout) * time.Second, + DB: r.currentDB, + DialTimeout: timeout, + ReadTimeout: timeout, + WriteTimeout: timeout, } - if opts.DialTimeout == 0 { - opts.DialTimeout = 30 * time.Second - opts.ReadTimeout = 30 * time.Second - opts.WriteTimeout = 30 * time.Second - } - - r.client = redis.NewClient(opts) - - // Test connection - ctx, cancel := context.WithTimeout(context.Background(), opts.DialTimeout) + singleClient := redis.NewClient(opts) + ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() - if err := r.client.Ping(ctx).Err(); err != nil { - r.client.Close() - r.client = nil + if err := singleClient.Ping(ctx).Err(); err != nil { + singleClient.Close() return fmt.Errorf("Redis 连接失败: %w", err) } - logger.Infof("Redis 连接成功: %s DB=%d", addr, config.RedisDB) + r.client = singleClient + r.singleClient = singleClient + logger.Infof("Redis 连接成功: %s DB=%d", addr, r.currentDB) return nil } @@ -94,6 +263,11 @@ func (r *RedisClientImpl) Close() error { if r.client != nil { err := r.client.Close() r.client = nil + r.singleClient = nil + r.clusterClient = nil + r.isCluster = false + r.seedAddrs = nil + r.forwarder = nil return err } return nil @@ -118,6 +292,7 @@ func (r *RedisClientImpl) ScanKeys(pattern string, cursor uint64, count int64) ( if pattern == "" { pattern = "*" } + physicalPattern := r.toPhysicalPattern(pattern) isSearchPattern := pattern != "*" targetCount := normalizeRedisScanTargetCount(count) @@ -150,7 +325,7 @@ func (r *RedisClientImpl) ScanKeys(pattern string, cursor uint64, count int64) ( break } - batch, nextCursor, err := r.client.Scan(ctx, currentCursor, pattern, scanStepCount).Result() + batch, nextCursor, err := r.client.Scan(ctx, currentCursor, physicalPattern, scanStepCount).Result() if err != nil { return nil, err } @@ -226,7 +401,7 @@ func (r *RedisClientImpl) loadRedisKeyInfos(ctx context.Context, keys []string) ttlValue = -2 } result = append(result, RedisKeyInfo{ - Key: key, + Key: r.toDisplayKey(key), Type: keyType, TTL: toRedisTTLSeconds(ttlValue), }) @@ -236,7 +411,7 @@ func (r *RedisClientImpl) loadRedisKeyInfos(ctx context.Context, keys []string) for i, key := range keys { result = append(result, RedisKeyInfo{ - Key: key, + Key: r.toDisplayKey(key), Type: typeResults[i].Val(), TTL: toRedisTTLSeconds(ttlResults[i].Val()), }) @@ -261,7 +436,7 @@ func (r *RedisClientImpl) GetKeyType(key string) (string, error) { } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - return r.client.Type(ctx, key).Result() + return r.client.Type(ctx, r.toPhysicalKey(key)).Result() } // GetTTL returns the TTL of a key in seconds @@ -272,7 +447,7 @@ func (r *RedisClientImpl) GetTTL(key string) (int64, error) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - ttl, err := r.client.TTL(ctx, key).Result() + ttl, err := r.client.TTL(ctx, r.toPhysicalKey(key)).Result() if err != nil { return 0, err } @@ -295,9 +470,9 @@ func (r *RedisClientImpl) SetTTL(key string, ttl int64) error { if ttl < 0 { // Remove expiry - return r.client.Persist(ctx, key).Err() + return r.client.Persist(ctx, r.toPhysicalKey(key)).Err() } - return r.client.Expire(ctx, key, time.Duration(ttl)*time.Second).Err() + return r.client.Expire(ctx, r.toPhysicalKey(key), time.Duration(ttl)*time.Second).Err() } // DeleteKeys deletes one or more keys @@ -307,7 +482,11 @@ func (r *RedisClientImpl) DeleteKeys(keys []string) (int64, error) { } ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - return r.client.Del(ctx, keys...).Result() + physicalKeys := r.toPhysicalKeys(keys) + if len(physicalKeys) == 0 { + return 0, nil + } + return r.client.Del(ctx, physicalKeys...).Result() } // RenameKey renames a key @@ -317,7 +496,7 @@ func (r *RedisClientImpl) RenameKey(oldKey, newKey string) error { } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - return r.client.Rename(ctx, oldKey, newKey).Err() + return r.client.Rename(ctx, r.toPhysicalKey(oldKey), r.toPhysicalKey(newKey)).Err() } // KeyExists checks if a key exists @@ -327,7 +506,7 @@ func (r *RedisClientImpl) KeyExists(key string) (bool, error) { } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - n, err := r.client.Exists(ctx, key).Result() + n, err := r.client.Exists(ctx, r.toPhysicalKey(key)).Result() return n > 0, err } @@ -343,6 +522,7 @@ func (r *RedisClientImpl) GetValue(key string) (*RedisValue, error) { } ttl, _ := r.GetTTL(key) + physicalKey := r.toPhysicalKey(key) result := &RedisValue{ Type: keyType, @@ -354,7 +534,7 @@ func (r *RedisClientImpl) GetValue(key string) (*RedisValue, error) { switch keyType { case "string": - val, err := r.client.Get(ctx, key).Result() + val, err := r.client.Get(ctx, physicalKey).Result() if err != nil { return nil, err } @@ -362,7 +542,7 @@ func (r *RedisClientImpl) GetValue(key string) (*RedisValue, error) { result.Length = int64(len(val)) case "hash": - val, err := r.client.HGetAll(ctx, key).Result() + val, err := r.client.HGetAll(ctx, physicalKey).Result() if err != nil { return nil, err } @@ -370,7 +550,7 @@ func (r *RedisClientImpl) GetValue(key string) (*RedisValue, error) { result.Length = int64(len(val)) case "list": - length, err := r.client.LLen(ctx, key).Result() + length, err := r.client.LLen(ctx, physicalKey).Result() if err != nil { return nil, err } @@ -379,7 +559,7 @@ func (r *RedisClientImpl) GetValue(key string) (*RedisValue, error) { if length < limit { limit = length } - val, err := r.client.LRange(ctx, key, 0, limit-1).Result() + val, err := r.client.LRange(ctx, physicalKey, 0, limit-1).Result() if err != nil { return nil, err } @@ -387,12 +567,12 @@ func (r *RedisClientImpl) GetValue(key string) (*RedisValue, error) { result.Length = length case "set": - length, err := r.client.SCard(ctx, key).Result() + length, err := r.client.SCard(ctx, physicalKey).Result() if err != nil { return nil, err } // Get members using SMembers (limited by Redis server) - members, err := r.client.SMembers(ctx, key).Result() + members, err := r.client.SMembers(ctx, physicalKey).Result() if err != nil { return nil, err } @@ -400,7 +580,7 @@ func (r *RedisClientImpl) GetValue(key string) (*RedisValue, error) { result.Length = length case "zset": - length, err := r.client.ZCard(ctx, key).Result() + length, err := r.client.ZCard(ctx, physicalKey).Result() if err != nil { return nil, err } @@ -409,7 +589,7 @@ func (r *RedisClientImpl) GetValue(key string) (*RedisValue, error) { if length < limit { limit = length } - val, err := r.client.ZRangeWithScores(ctx, key, 0, limit-1).Result() + val, err := r.client.ZRangeWithScores(ctx, physicalKey, 0, limit-1).Result() if err != nil { return nil, err } @@ -424,7 +604,7 @@ func (r *RedisClientImpl) GetValue(key string) (*RedisValue, error) { result.Length = length case "stream": - length, err := r.client.XLen(ctx, key).Result() + length, err := r.client.XLen(ctx, physicalKey).Result() if err != nil { return nil, err } @@ -437,7 +617,7 @@ func (r *RedisClientImpl) GetValue(key string) (*RedisValue, error) { if length < limit { limit = length } - val, err := r.client.XRangeN(ctx, key, "-", "+", limit).Result() + val, err := r.client.XRangeN(ctx, physicalKey, "-", "+", limit).Result() if err != nil { return nil, err } @@ -457,7 +637,7 @@ func (r *RedisClientImpl) GetString(key string) (string, error) { } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - return r.client.Get(ctx, key).Result() + return r.client.Get(ctx, r.toPhysicalKey(key)).Result() } // SetString sets a string value with optional TTL @@ -472,7 +652,7 @@ func (r *RedisClientImpl) SetString(key, value string, ttl int64) error { if ttl > 0 { expiration = time.Duration(ttl) * time.Second } - return r.client.Set(ctx, key, value, expiration).Err() + return r.client.Set(ctx, r.toPhysicalKey(key), value, expiration).Err() } // GetHash gets all fields of a hash @@ -482,7 +662,7 @@ func (r *RedisClientImpl) GetHash(key string) (map[string]string, error) { } ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - return r.client.HGetAll(ctx, key).Result() + return r.client.HGetAll(ctx, r.toPhysicalKey(key)).Result() } // SetHashField sets a field in a hash @@ -492,7 +672,7 @@ func (r *RedisClientImpl) SetHashField(key, field, value string) error { } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - return r.client.HSet(ctx, key, field, value).Err() + return r.client.HSet(ctx, r.toPhysicalKey(key), field, value).Err() } // DeleteHashField deletes fields from a hash @@ -502,7 +682,7 @@ func (r *RedisClientImpl) DeleteHashField(key string, fields ...string) error { } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - return r.client.HDel(ctx, key, fields...).Err() + return r.client.HDel(ctx, r.toPhysicalKey(key), fields...).Err() } // GetList gets a range of elements from a list @@ -512,7 +692,7 @@ func (r *RedisClientImpl) GetList(key string, start, stop int64) ([]string, erro } ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - return r.client.LRange(ctx, key, start, stop).Result() + return r.client.LRange(ctx, r.toPhysicalKey(key), start, stop).Result() } // ListPush pushes values to the end of a list @@ -526,7 +706,7 @@ func (r *RedisClientImpl) ListPush(key string, values ...string) error { for i, v := range values { args[i] = v } - return r.client.RPush(ctx, key, args...).Err() + return r.client.RPush(ctx, r.toPhysicalKey(key), args...).Err() } // ListSet sets the value at an index in a list @@ -536,7 +716,7 @@ func (r *RedisClientImpl) ListSet(key string, index int64, value string) error { } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - return r.client.LSet(ctx, key, index, value).Err() + return r.client.LSet(ctx, r.toPhysicalKey(key), index, value).Err() } // GetSet gets all members of a set @@ -546,7 +726,7 @@ func (r *RedisClientImpl) GetSet(key string) ([]string, error) { } ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - return r.client.SMembers(ctx, key).Result() + return r.client.SMembers(ctx, r.toPhysicalKey(key)).Result() } // SetAdd adds members to a set @@ -560,7 +740,7 @@ func (r *RedisClientImpl) SetAdd(key string, members ...string) error { for i, m := range members { args[i] = m } - return r.client.SAdd(ctx, key, args...).Err() + return r.client.SAdd(ctx, r.toPhysicalKey(key), args...).Err() } // SetRemove removes members from a set @@ -574,7 +754,7 @@ func (r *RedisClientImpl) SetRemove(key string, members ...string) error { for i, m := range members { args[i] = m } - return r.client.SRem(ctx, key, args...).Err() + return r.client.SRem(ctx, r.toPhysicalKey(key), args...).Err() } // GetZSet gets members with scores from a sorted set @@ -585,7 +765,7 @@ func (r *RedisClientImpl) GetZSet(key string, start, stop int64) ([]ZSetMember, ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - val, err := r.client.ZRangeWithScores(ctx, key, start, stop).Result() + val, err := r.client.ZRangeWithScores(ctx, r.toPhysicalKey(key), start, stop).Result() if err != nil { return nil, err } @@ -615,7 +795,7 @@ func (r *RedisClientImpl) ZSetAdd(key string, members ...ZSetMember) error { Member: m.Member, } } - return r.client.ZAdd(ctx, key, zMembers...).Err() + return r.client.ZAdd(ctx, r.toPhysicalKey(key), zMembers...).Err() } // ZSetRemove removes members from a sorted set @@ -629,7 +809,7 @@ func (r *RedisClientImpl) ZSetRemove(key string, members ...string) error { for i, m := range members { args[i] = m } - return r.client.ZRem(ctx, key, args...).Err() + return r.client.ZRem(ctx, r.toPhysicalKey(key), args...).Err() } // GetStream gets stream entries in a range @@ -650,7 +830,7 @@ func (r *RedisClientImpl) GetStream(key, start, stop string, count int64) ([]Str ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() - val, err := r.client.XRangeN(ctx, key, start, stop, count).Result() + val, err := r.client.XRangeN(ctx, r.toPhysicalKey(key), start, stop, count).Result() if err != nil { return nil, err } @@ -678,7 +858,7 @@ func (r *RedisClientImpl) StreamAdd(key string, fields map[string]string, id str defer cancel() newID, err := r.client.XAdd(ctx, &redis.XAddArgs{ - Stream: key, + Stream: r.toPhysicalKey(key), ID: id, Values: values, }).Result() @@ -699,7 +879,7 @@ func (r *RedisClientImpl) StreamDelete(key string, ids ...string) (int64, error) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - return r.client.XDel(ctx, key, ids...).Result() + return r.client.XDel(ctx, r.toPhysicalKey(key), ids...).Result() } func toStreamEntries(messages []redis.XMessage) []StreamEntry { @@ -717,6 +897,72 @@ func toStreamEntries(messages []redis.XMessage) []StreamEntry { return entries } +func parseRedisCommandGetKeysResult(result interface{}) []string { + items, ok := result.([]interface{}) + if !ok || len(items) == 0 { + return nil + } + keys := make([]string, 0, len(items)) + for _, item := range items { + switch v := item.(type) { + case string: + if v != "" { + keys = append(keys, v) + } + case []byte: + text := string(v) + if text != "" { + keys = append(keys, text) + } + } + } + return keys +} + +func (r *RedisClientImpl) rewriteCommandArgsForNamespace(ctx context.Context, args []string) []string { + if !r.isCluster || r.currentDB <= 0 || len(args) == 0 { + return args + } + + command := strings.ToUpper(strings.TrimSpace(args[0])) + if command == "COMMAND" || command == "SELECT" || command == "FLUSHDB" { + return args + } + + probeArgs := make([]interface{}, 0, len(args)+2) + probeArgs = append(probeArgs, "COMMAND", "GETKEYS") + for _, arg := range args { + probeArgs = append(probeArgs, arg) + } + + result, err := r.client.Do(ctx, probeArgs...).Result() + if err != nil { + return args + } + + keyCandidates := parseRedisCommandGetKeysResult(result) + if len(keyCandidates) == 0 { + return args + } + + rewritten := append([]string(nil), args...) + used := make([]bool, len(rewritten)) + for _, key := range keyCandidates { + for i := 1; i < len(rewritten); i++ { + if used[i] { + continue + } + if rewritten[i] != key { + continue + } + rewritten[i] = r.toPhysicalKey(rewritten[i]) + used[i] = true + break + } + } + return rewritten +} + // ExecuteCommand executes a raw Redis command func (r *RedisClientImpl) ExecuteCommand(args []string) (interface{}, error) { if r.client == nil { @@ -729,6 +975,33 @@ func (r *RedisClientImpl) ExecuteCommand(args []string) (interface{}, error) { ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() + if r.isCluster { + command := strings.ToUpper(strings.TrimSpace(args[0])) + switch command { + case "SELECT": + if len(args) < 2 { + return nil, fmt.Errorf("SELECT 命令缺少数据库索引") + } + index, err := strconv.Atoi(strings.TrimSpace(args[1])) + if err != nil { + return nil, fmt.Errorf("无效数据库索引: %s", args[1]) + } + if index < 0 || index > 15 { + return nil, fmt.Errorf("数据库索引必须在 0-15 之间") + } + r.currentDB = index + r.config.RedisDB = index + return "OK", nil + case "FLUSHDB": + if err := r.FlushDB(); err != nil { + return nil, err + } + return "OK", nil + } + } + + args = r.rewriteCommandArgsForNamespace(ctx, args) + // Convert to []interface{} cmdArgs := make([]interface{}, len(args)) for i, arg := range args { @@ -795,6 +1068,31 @@ func (r *RedisClientImpl) GetDatabases() ([]RedisDBInfo, error) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() + if r.isCluster && r.clusterClient != nil { + var totalKeys int64 + var mu sync.Mutex + err := r.clusterClient.ForEachMaster(ctx, func(nodeCtx context.Context, node *redis.Client) error { + keys, err := node.DBSize(nodeCtx).Result() + if err != nil { + return err + } + mu.Lock() + totalKeys += keys + mu.Unlock() + return nil + }) + if err != nil { + logger.Warnf("Redis 集群获取 key 数量失败,回退为 0: %v", err) + totalKeys = 0 + } + result := make([]RedisDBInfo, 16) + for i := 0; i < 16; i++ { + result[i] = RedisDBInfo{Index: i, Keys: 0} + } + result[0].Keys = totalKeys + return result, nil + } + // Get keyspace info info, err := r.client.Info(ctx, "keyspace").Result() if err != nil { @@ -845,34 +1143,47 @@ func (r *RedisClientImpl) SelectDB(index int) error { if r.client == nil { return fmt.Errorf("Redis 客户端未连接") } + + if r.isCluster { + if index < 0 || index > 15 { + return fmt.Errorf("数据库索引必须在 0-15 之间") + } + r.currentDB = index + r.config.RedisDB = index + return nil + } + if index < 0 || index > 15 { return fmt.Errorf("数据库索引必须在 0-15 之间") } // Create new client with different DB - addr := fmt.Sprintf("%s:%d", r.config.Host, r.config.Port) + addr := "" + if len(r.seedAddrs) > 0 { + addr = r.seedAddrs[0] + } if r.forwarder != nil { addr = r.forwarder.LocalAddr } + if addr == "" { + addr = fmt.Sprintf("%s:%d", r.config.Host, r.config.Port) + } + + timeout := normalizeRedisTimeout(r.config.Timeout) opts := &redis.Options{ Addr: addr, + Username: strings.TrimSpace(r.config.User), Password: r.config.Password, DB: index, - DialTimeout: time.Duration(r.config.Timeout) * time.Second, - ReadTimeout: time.Duration(r.config.Timeout) * time.Second, - WriteTimeout: time.Duration(r.config.Timeout) * time.Second, - } - - if opts.DialTimeout == 0 { - opts.DialTimeout = 30 * time.Second - opts.ReadTimeout = 30 * time.Second - opts.WriteTimeout = 30 * time.Second + DialTimeout: timeout, + ReadTimeout: timeout, + WriteTimeout: timeout, } newClient := redis.NewClient(opts) - ctx, cancel := context.WithTimeout(context.Background(), opts.DialTimeout) + ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() if err := newClient.Ping(ctx).Err(); err != nil { @@ -881,9 +1192,14 @@ func (r *RedisClientImpl) SelectDB(index int) error { } // Close old client and replace - r.client.Close() + if r.client != nil { + _ = r.client.Close() + } r.client = newClient + r.singleClient = newClient + r.clusterClient = nil r.currentDB = index + r.config.RedisDB = index logger.Infof("Redis 切换到数据库: db%d", index) return nil @@ -899,6 +1215,63 @@ func (r *RedisClientImpl) FlushDB() error { if r.client == nil { return fmt.Errorf("Redis 客户端未连接") } + + if r.isCluster && r.clusterClient != nil { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + namespacePrefix := r.redisNamespacePrefix() + var deletedTotal int64 + var deletedMu sync.Mutex + + err := r.clusterClient.ForEachMaster(ctx, func(nodeCtx context.Context, node *redis.Client) error { + var cursor uint64 + for { + pattern := "*" + if namespacePrefix != "" { + pattern = namespacePrefix + "*" + } + keys, nextCursor, err := node.Scan(nodeCtx, cursor, pattern, 2000).Result() + if err != nil { + return err + } + + if namespacePrefix == "" { + filtered := keys[:0] + for _, key := range keys { + // db0 保留兼容:不删除逻辑库前缀 key,避免误清理 db1~db15。 + if strings.HasPrefix(key, "__gonavi_db_") { + continue + } + filtered = append(filtered, key) + } + keys = filtered + } + + if len(keys) > 0 { + deleted, err := node.Del(nodeCtx, keys...).Result() + if err != nil { + return err + } + deletedMu.Lock() + deletedTotal += deleted + deletedMu.Unlock() + } + + cursor = nextCursor + if cursor == 0 { + break + } + } + return nil + }) + if err != nil { + return err + } + logger.Infof("Redis 集群逻辑库清空完成: db%d deleted=%d", r.currentDB, deletedTotal) + return nil + } + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() return r.client.FlushDB(ctx).Err() From e76e174bfe50185f6f68fcf909d63a516f9748e7 Mon Sep 17 00:00:00 2001 From: Syngnat Date: Tue, 3 Mar 2026 13:49:31 +0800 Subject: [PATCH 11/48] =?UTF-8?q?=E2=9C=A8=20feat(DataGrid):=20=E5=A4=A7?= =?UTF-8?q?=E6=95=B0=E6=8D=AE=E8=A1=A8=E8=99=9A=E6=8B=9F=E6=BB=9A=E5=8A=A8?= =?UTF-8?q?=E6=80=A7=E8=83=BD=E4=BC=98=E5=8C=96=E5=8F=8AUI=E4=B8=80?= =?UTF-8?q?=E8=87=B4=E6=80=A7=E4=BF=AE=E5=A4=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 启用动态虚拟滚动(数据量≥500行自动切换),解决万行数据表卡顿问题 - 虚拟模式下EditableCell改用div渲染,CSS选择器从元素级改为类级适配虚拟DOM - 修复虚拟模式双水平滚动条:样式化rc-virtual-list内置滚动条为胶囊外观,禁用自定义外部滚动条 - 为rc-virtual-list水平滚动条添加鼠标滚轮支持(MutationObserver + marginLeft驱动) - 修复白色主题透明模式下列名悬浮Tooltip对比度不足的问题 - 新增白色主题全局滚动条样式适配透明模式(App.css) - App.tsx主题token与组件样式优化 - refs #147 --- frontend/src/App.css | 29 +- frontend/src/App.tsx | 99 +++- frontend/src/components/DataGrid.tsx | 783 ++++++++++++++++++++----- frontend/src/components/TabManager.tsx | 94 +-- frontend/src/store.ts | 35 ++ 5 files changed, 844 insertions(+), 196 deletions(-) diff --git a/frontend/src/App.css b/frontend/src/App.css index 72a84c1..e91f7e7 100644 --- a/frontend/src/App.css +++ b/frontend/src/App.css @@ -57,6 +57,29 @@ body[data-theme='dark'] ::-webkit-scrollbar-thumb:hover { background: #666; } +/* Scrollbar styling for light mode (transparent-friendly) */ +body[data-theme='light'] ::-webkit-scrollbar { + width: 10px; + height: 10px; +} +body[data-theme='light'] ::-webkit-scrollbar-track { + background: transparent; +} +body[data-theme='light'] ::-webkit-scrollbar-corner { + background: transparent; +} +body[data-theme='light'] ::-webkit-scrollbar-thumb { + background: rgba(0, 0, 0, 0.18); + border-radius: 4px; + border: 2px solid transparent; + background-clip: content-box; +} +body[data-theme='light'] ::-webkit-scrollbar-thumb:hover { + background: rgba(0, 0, 0, 0.30); + border: 2px solid transparent; + background-clip: content-box; +} + /* Ensure body background matches theme to avoid white flashes, but kept transparent for window composition */ body { transition: color 0.3s; @@ -102,11 +125,13 @@ body[data-theme='dark'] .ant-switch.ant-switch-checked { background: #d8a93b !important; } -body[data-theme='dark'] .ant-table-tbody > tr.ant-table-row-selected > td { +body[data-theme='dark'] .ant-table-tbody > tr.ant-table-row-selected > td, +body[data-theme='dark'] .ant-table-tbody .ant-table-row.ant-table-row-selected > .ant-table-cell { background: rgba(246, 196, 83, 0.18) !important; } -body[data-theme='dark'] .ant-table-tbody > tr.ant-table-row-selected:hover > td { +body[data-theme='dark'] .ant-table-tbody > tr.ant-table-row-selected:hover > td, +body[data-theme='dark'] .ant-table-tbody .ant-table-row.ant-table-row-selected:hover > .ant-table-cell { background: rgba(246, 196, 83, 0.26) !important; } diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx index e97f1c7..ac328a7 100644 --- a/frontend/src/App.tsx +++ b/frontend/src/App.tsx @@ -16,6 +16,12 @@ import { ConfigureGlobalProxy, SetWindowTranslucency } from '../wailsjs/go/app/A import './App.css'; const { Sider, Content } = Layout; +const MIN_UI_SCALE = 0.8; +const MAX_UI_SCALE = 1.25; +const MIN_FONT_SIZE = 12; +const MAX_FONT_SIZE = 20; +const DEFAULT_UI_SCALE = 1.0; +const DEFAULT_FONT_SIZE = 14; function App() { const [isModalOpen, setIsModalOpen] = useState(false); @@ -26,11 +32,28 @@ function App() { const setTheme = useStore(state => state.setTheme); const appearance = useStore(state => state.appearance); const setAppearance = useStore(state => state.setAppearance); + const uiScale = useStore(state => state.uiScale); + const setUiScale = useStore(state => state.setUiScale); + const fontSize = useStore(state => state.fontSize); + const setFontSize = useStore(state => state.setFontSize); const startupFullscreen = useStore(state => state.startupFullscreen); const setStartupFullscreen = useStore(state => state.setStartupFullscreen); const globalProxy = useStore(state => state.globalProxy); const setGlobalProxy = useStore(state => state.setGlobalProxy); const darkMode = themeMode === 'dark'; + const effectiveUiScale = Math.min(MAX_UI_SCALE, Math.max(MIN_UI_SCALE, Number(uiScale) || DEFAULT_UI_SCALE)); + const effectiveFontSize = Math.min(MAX_FONT_SIZE, Math.max(MIN_FONT_SIZE, Math.round(Number(fontSize) || DEFAULT_FONT_SIZE))); + const tokenFontSize = Math.round(effectiveFontSize * effectiveUiScale); + const tokenFontSizeSM = Math.max(10, Math.round(tokenFontSize * 0.86)); + const tokenFontSizeLG = Math.max(tokenFontSize + 1, Math.round(tokenFontSize * 1.14)); + const tokenControlHeight = Math.max(24, Math.round(32 * effectiveUiScale)); + const tokenControlHeightSM = Math.max(20, Math.round(24 * effectiveUiScale)); + const tokenControlHeightLG = Math.max(30, Math.round(40 * effectiveUiScale)); + const appComponentSize: 'small' | 'middle' | 'large' = effectiveUiScale <= 0.92 ? 'small' : (effectiveUiScale >= 1.12 ? 'large' : 'middle'); + const titleBarHeight = Math.max(28, Math.round(32 * effectiveUiScale)); + const toolbarHeight = Math.max(32, Math.round(36 * effectiveUiScale)); + const titleBarButtonWidth = Math.max(40, Math.round(46 * effectiveUiScale)); + const floatingLogButtonHeight = Math.max(30, Math.round(34 * effectiveUiScale)); const effectiveOpacity = normalizeOpacityForPlatform(appearance.opacity); const effectiveBlur = normalizeBlurForPlatform(appearance.blur); const blurFilter = blurToFilter(effectiveBlur); @@ -834,7 +857,9 @@ function App() { document.body.style.backgroundColor = 'transparent'; document.body.style.color = darkMode ? '#ffffff' : '#000000'; document.body.setAttribute('data-theme', darkMode ? 'dark' : 'light'); - }, [darkMode]); + document.body.style.fontSize = `${effectiveFontSize}px`; + document.documentElement.style.setProperty('--gonavi-font-size', `${effectiveFontSize}px`); + }, [darkMode, effectiveFontSize]); useEffect(() => { isAboutOpenRef.current = isAboutOpen; @@ -916,9 +941,16 @@ function App() { return ( -
+
{/* Logo can be added here if available */} GoNavi
@@ -1007,13 +1040,13 @@ function App() {
@@ -1029,13 +1062,13 @@ function App() {
setIsLogPanelOpen(!isLogPanelOpen)} style={isLogPanelOpen ? { width: '100%', - height: 34, + height: floatingLogButtonHeight, borderRadius: 999, boxShadow: floatingLogButtonShadow, pointerEvents: 'auto' } : { width: '100%', - height: 34, + height: floatingLogButtonHeight, borderRadius: 999, border: `1px solid ${floatingLogButtonBorderColor}`, color: floatingLogButtonTextColor, @@ -1216,6 +1249,37 @@ function App() { width={460} >
+
+
界面缩放 (UI Scale)
+
+ setUiScale(Number(v))} + style={{ flex: 1 }} + /> + {Math.round(effectiveUiScale * 100)}% +
+
+ * 建议小屏设备设置为 85%-95% +
+
+
+
基础字体大小 (Font Size)
+
+ setFontSize(Number(v))} + style={{ flex: 1 }} + /> + {effectiveFontSize}px +
+
背景不透明度 (Opacity)
@@ -1264,6 +1328,17 @@ function App() { * 修改后下次启动生效
+
+ +
diff --git a/frontend/src/components/DataGrid.tsx b/frontend/src/components/DataGrid.tsx index 012c2bb..05ccbba 100644 --- a/frontend/src/components/DataGrid.tsx +++ b/frontend/src/components/DataGrid.tsx @@ -65,6 +65,10 @@ export const GONAVI_ROW_KEY = '__gonavi_row_key__'; // Cell key helpers for batch selection/fill. // Use a control character separator to avoid collisions with rowKey/columnName contents (e.g. `new-123`). const CELL_KEY_SEP = '\u0001'; +const DATE_TIME_CACHE_LIMIT = 2000; +const TABLE_CELL_PREVIEW_MAX_CHARS = 240; +const normalizedDateTimeCache = new Map(); +const objectCellPreviewCache = new WeakMap(); const makeCellKey = (rowKey: string, colName: string) => `${rowKey}${CELL_KEY_SEP}${colName}`; const splitCellKey = (cellKey: string): { rowKey: string; colName: string } | null => { const sepIndex = cellKey.indexOf(CELL_KEY_SEP); @@ -75,10 +79,42 @@ const splitCellKey = (cellKey: string): { rowKey: string; colName: string } | nu }; }; +const trimSimpleCache = (cache: Map, limit: number) => { + if (cache.size < limit) return; + const firstKey = cache.keys().next().value; + if (typeof firstKey === 'string') { + cache.delete(firstKey); + } +}; + +const looksLikeDateTimeText = (val: string): boolean => { + if (!val) return false; + const len = val.length; + if (len < 19 || len > 48) return false; + const charCode0 = val.charCodeAt(0); + if (charCode0 < 48 || charCode0 > 57) return false; + return ( + val[4] === '-' && + val[7] === '-' && + (val[10] === ' ' || val[10] === 'T') && + val[13] === ':' && + val[16] === ':' + ); +}; + // Normalize common datetime strings to `YYYY-MM-DD HH:mm:ss` for display/editing. // Handles RFC3339 and Go-style datetime text like `2024-05-13 08:32:47 +0800 CST`. // Also keep invalid datetime values like `0000-00-00 00:00:00` unchanged. const normalizeDateTimeString = (val: string) => { + if (!looksLikeDateTimeText(val)) { + return val; + } + + const cached = normalizedDateTimeCache.get(val); + if (cached !== undefined) { + return cached; + } + // 检查是否为无效日期时间(0000-00-00 或类似格式) if (/^0{4}-0{2}-0{2}/.test(val)) { return val; // 保持原样显示,不尝试转换 @@ -87,8 +123,10 @@ const normalizeDateTimeString = (val: string) => { const match = val.match( /^(\d{4}-\d{2}-\d{2})[T ](\d{2}:\d{2}:\d{2})(?:\.\d+)?(?:\s*(?:Z|[+-]\d{2}:?\d{2})(?:\s+[A-Za-z_\/+-]+)?)?$/ ); - if (!match) return val; - return `${match[1]} ${match[2]}`; + const normalized = match ? `${match[1]} ${match[2]}` : val; + trimSimpleCache(normalizedDateTimeCache, DATE_TIME_CACHE_LIMIT); + normalizedDateTimeCache.set(val, normalized); + return normalized; }; const isTemporalColumnType = (columnType?: string): boolean => { @@ -104,14 +142,22 @@ const formatCellValue = (val: any) => { try { if (val === null) return NULL; if (typeof val === 'object') { + const cached = objectCellPreviewCache.get(val); + if (cached !== undefined) { + return cached; + } try { - return JSON.stringify(val); + const nextText = JSON.stringify(val); + const previewText = nextText.length > TABLE_CELL_PREVIEW_MAX_CHARS ? `${nextText.slice(0, TABLE_CELL_PREVIEW_MAX_CHARS)}…` : nextText; + objectCellPreviewCache.set(val, previewText); + return previewText; } catch { return '[Object]'; } } if (typeof val === 'string') { - return normalizeDateTimeString(val); + const normalized = normalizeDateTimeString(val); + return normalized.length > TABLE_CELL_PREVIEW_MAX_CHARS ? `${normalized.slice(0, TABLE_CELL_PREVIEW_MAX_CHARS)}…` : normalized; } return String(val); } catch (e) { @@ -138,6 +184,7 @@ const toFormText = (val: any): string => { // 用于变更比较:NULL 与 undefined 视为同类空值;与空字符串严格区分。 const isCellValueEqualForDiff = (left: any, right: any): boolean => { + if (left === right) return true; const leftNullish = left === null || left === undefined; const rightNullish = right === null || right === undefined; if (leftNullish || rightNullish) return leftNullish && rightNullish; @@ -318,6 +365,7 @@ interface EditableCellProps { record: Item; handleSave: (record: Item) => void; focusCell?: (record: Item, dataIndex: string, title: React.ReactNode) => void; + as?: any; [key: string]: any; } @@ -329,6 +377,7 @@ const EditableCell: React.FC = React.memo(({ record, handleSave, focusCell, + as: Component = 'td', ...restProps }) => { const [editing, setEditing] = useState(false); @@ -430,14 +479,14 @@ const EditableCell: React.FC = React.memo(({ }; return ( - {childNode} - + ); }); @@ -596,6 +645,31 @@ const DataGrid: React.FC = ({ const darkHighlightTextColor = 'rgba(255, 236, 179, 0.98)'; const lightMetaHintColor = '#595959'; const lightMetaTooltipColor = '#262626'; + const panelRadius = 10; + const panelOuterGap = 6; + const panelPaddingY = 10; + const panelPaddingX = 12; + const toolbarBottomPadding = 6; + const filterTopPadding = 2; + const panelBorderColor = darkMode ? 'rgba(255, 255, 255, 0.08)' : 'rgba(0, 0, 0, 0.08)'; + const panelFrameColor = darkMode ? 'rgba(0, 0, 0, 0.42)' : 'rgba(0, 0, 0, 0.18)'; + const floatingScrollbarGap = 6; + const floatingScrollbarInset = 10; + const floatingScrollbarHeight = 10; + const floatingScrollbarTrackBg = 'transparent'; + const floatingScrollbarBorderColor = 'transparent'; + const floatingScrollbarShadow = 'none'; + const floatingScrollbarThumbBg = darkMode ? 'rgba(255,255,255,0.34)' : 'rgba(0,0,0,0.22)'; + const floatingScrollbarThumbBorderColor = darkMode ? 'rgba(255,255,255,0.10)' : 'rgba(255,255,255,0.32)'; + const floatingScrollbarThumbShadow = darkMode ? '0 4px 12px rgba(0,0,0,0.28)' : '0 4px 10px rgba(0,0,0,0.12)'; + const horizontalScrollbarTrackBg = 'transparent'; + const horizontalScrollbarTrackBorderColor = 'transparent'; + const horizontalScrollbarTrackShadow = 'none'; + const horizontalScrollbarThumbBg = darkMode ? 'rgba(255,255,255,0.20)' : 'rgba(0,0,0,0.14)'; + const horizontalScrollbarThumbBorderColor = 'transparent'; + const horizontalScrollbarThumbShadow = 'none'; + const externalScrollbarMinWidth = 1; + const toolbarDividerColor = darkMode ? 'rgba(255, 255, 255, 0.12)' : 'rgba(0, 0, 0, 0.10)'; const columnMetaHintColor = darkMode ? darkHighlightTextColor : lightMetaHintColor; const columnMetaTooltipColor = darkMode ? darkHighlightTextColor : lightMetaTooltipColor; @@ -635,6 +709,12 @@ const DataGrid: React.FC = ({ title: '', }); const containerRef = useRef(null); + const tableContainerRef = useRef(null); + const tableScrollTargetsRef = useRef([]); + const externalHScrollRef = useRef(null); + const horizontalSyncSourceRef = useRef<'table' | 'external' | ''>(''); + const lastTableScrollLeftRef = useRef(0); + const lastExternalScrollLeftRef = useRef(0); const pendingScrollToBottomRef = useRef(false); // 批量编辑模式状态 @@ -885,8 +965,9 @@ const DataGrid: React.FC = ({ if (hoverLines.length === 0) return titleNode; return ( {hoverLines.join('\n')}} + title={
{hoverLines.join('\n')}
} styles={{ root: { maxWidth: 640 } }} + {...(!darkMode ? { color: 'rgba(0, 0, 0, 0.82)' } : {})} > {titleNode}
@@ -938,23 +1019,19 @@ const DataGrid: React.FC = ({ Number.isFinite(rawHeaderHeight) && rawHeaderHeight >= 24 && rawHeaderHeight <= 120 ? rawHeaderHeight : 42; const bodyEl = target.querySelector('.ant-table-body') as HTMLElement | null; - const stickyScrollEl = target.querySelector('.ant-table-sticky-scroll') as HTMLElement | null; - const hasHorizontalOverflow = !!bodyEl && (bodyEl.scrollWidth - bodyEl.clientWidth > 1); - const nativeHorizontalScrollbarHeight = bodyEl ? Math.max(0, Math.ceil(bodyEl.offsetHeight - bodyEl.clientHeight)) : 0; - const stickyScrollHeight = stickyScrollEl ? Math.ceil(stickyScrollEl.getBoundingClientRect().height) : 0; - // 动态为横向滚动条(含 sticky 条)预留空间,避免最后一行被遮住。 - const horizontalReserve = hasHorizontalOverflow - ? Math.max(nativeHorizontalScrollbarHeight, stickyScrollHeight, 14) - : Math.max(nativeHorizontalScrollbarHeight, 0); - // sticky 横向滚动条会覆盖在表格底部,额外给 body 增加内边距,确保最后一行完整可见。 + const virtualHolderEl = target.querySelector('.rc-virtual-list-holder') as HTMLElement | null; + const scrollableEl = virtualHolderEl || bodyEl; + const hasHorizontalOverflow = !!scrollableEl && (scrollableEl.scrollWidth - scrollableEl.clientWidth > 1); + // 外部横向滚动条采用悬浮覆盖,不再通过压缩表格高度制造独立底部空白层; + // 只给 body 增加底部内边距,确保最后一行可以完整滚到胶囊条上方。 const nextBodyBottomPadding = hasHorizontalOverflow - ? Math.max(stickyScrollHeight, nativeHorizontalScrollbarHeight, 14) + 6 + ? floatingScrollbarHeight + floatingScrollbarGap + 4 : 0; setTableBodyBottomPadding(nextBodyBottomPadding); - const extraBottom = 10 + horizontalReserve; + const extraBottom = 2; const nextHeight = Math.max(100, Math.floor(height - headerHeight - extraBottom)); setTableHeight(nextHeight); - }, []); + }, [floatingScrollbarGap, floatingScrollbarHeight]); useEffect(() => { const el = containerRef.current; @@ -1456,8 +1533,16 @@ const DataGrid: React.FC = ({ }, [addedRows, rowKeyStr]); const modifiedRowKeySet = useMemo(() => new Set(Object.keys(modifiedRows)), [modifiedRows]); + const rowClassName = useCallback((record: Item) => { + const k = record?.[GONAVI_ROW_KEY]; + if (k === undefined || k === null) return ''; + const keyStr = rowKeyStr(k); + if (addedRowKeySet.has(keyStr)) return 'row-added'; + if (modifiedRowKeySet.has(keyStr) || deletedRowKeys.has(keyStr)) return 'row-modified'; + return ''; + }, [addedRowKeySet, modifiedRowKeySet, deletedRowKeys, rowKeyStr]); - const handleTableChange = (pag: any, filtersArg: any, sorter: any) => { + const handleTableChange = useCallback((pag: any, filtersArg: any, sorter: any) => { if (isResizingRef.current) return; // Block sort if resizing if (sorter.field) { const field = String(sorter.field); @@ -1474,7 +1559,7 @@ const DataGrid: React.FC = ({ setSortInfo(null); if (onSort) onSort('', ''); } - }; + }, [onSort]); // Native Drag State const draggingRef = useRef<{ @@ -1631,6 +1716,11 @@ const DataGrid: React.FC = ({ } }, [cellEditorIsJson, cellEditorValue]); + const handleVirtualCellActivate = useCallback((record: Item, dataIndex: string, title: React.ReactNode) => { + if (!canModifyData) return; + openCellEditor(record, dataIndex, title); + }, [canModifyData, openCellEditor]); + // Merge Data for Display // 'displayData' already merges addedRows. // We need to merge modifiedRows into it for rendering. @@ -1652,24 +1742,27 @@ const DataGrid: React.FC = ({ }, [mergedDisplayData.length]); const jsonViewText = useMemo(() => { + if (viewMode !== 'json') return ''; const cleanRows = mergedDisplayData.map((row) => { const { [GONAVI_ROW_KEY]: _rowKey, ...rest } = row || {}; return normalizeValueForJsonView(rest); }); return JSON.stringify(cleanRows, null, 2); - }, [mergedDisplayData]); + }, [viewMode, mergedDisplayData]); const textViewRows = useMemo(() => { + if (viewMode !== 'text') return []; return mergedDisplayData.map((row) => { const { [GONAVI_ROW_KEY]: _rowKey, ...rest } = row || {}; return rest; }); - }, [mergedDisplayData]); + }, [viewMode, mergedDisplayData]); const currentTextRow = useMemo(() => { + if (viewMode !== 'text') return null; if (textViewRows.length === 0) return null; return textViewRows[textRecordIndex] || null; - }, [textViewRows, textRecordIndex]); + }, [viewMode, textViewRows, textRecordIndex]); const formatTextViewValue = useCallback((val: any): string => { if (val === null) return 'NULL'; @@ -1915,6 +2008,12 @@ const DataGrid: React.FC = ({ closeRowEditor(); }, [rowEditorRowKey, rowEditorForm, addedRows, columnNames, rowKeyStr, closeRowEditor]); + const estimatedVisibleCellCount = mergedDisplayData.length * Math.max(columnNames.length, 1); + const enableLargeResultOptimizedEditing = + viewMode === 'table' && (mergedDisplayData.length >= 60 || estimatedVisibleCellCount >= 4000); + const enableVirtual = enableLargeResultOptimizedEditing; + const enableInlineEditableCell = canModifyData; + const columns = useMemo(() => { return columnNames.map(key => ({ title: renderColumnTitle(key), @@ -1964,18 +2063,49 @@ const DataGrid: React.FC = ({ const mergedColumns = useMemo(() => columns.map(col => { if (!col.editable) return col; + const dataIndex = String(col.dataIndex); return { ...col, - onCell: (record: Item) => ({ - record, - editable: col.editable, - dataIndex: col.dataIndex, - title: String(col.dataIndex), - handleSave: handleCellSave, - focusCell: openCellEditor, - }), + onCell: (record: Item) => { + if (!enableInlineEditableCell) { + const rowKey = record?.[GONAVI_ROW_KEY]; + return { + 'data-row-key': rowKey === undefined || rowKey === null ? undefined : String(rowKey), + 'data-col-name': dataIndex, + onDoubleClick: () => handleVirtualCellActivate(record, dataIndex, dataIndex), + }; + } + return { + record, + editable: col.editable, + dataIndex: col.dataIndex, + title: dataIndex, + handleSave: handleCellSave, + focusCell: openCellEditor, + }; + }, + render: (text: any, record: Item, index: number) => { + const originalRenderContent = col.render ? (col.render as any)(text, record, index) : text; + if (enableVirtual && enableInlineEditableCell) { + return ( + + {originalRenderContent} + + ); + } + return originalRenderContent; + } }; - }), [columns, handleCellSave, openCellEditor]); + }), [columns, enableInlineEditableCell, enableVirtual, handleCellSave, openCellEditor, handleVirtualCellActivate]); const handleAddRow = () => { const newKey = `new-${Date.now()}`; @@ -2456,11 +2586,6 @@ const DataGrid: React.FC = ({
); - const tableComponents = useMemo(() => ({ - body: { cell: EditableCell, row: ContextMenuRow }, - header: { cell: ResizableTitle } - }), []); - const dataContextValue = useMemo(() => ({ selectedRowKeysRef, displayDataRef, @@ -2488,17 +2613,121 @@ const DataGrid: React.FC = ({ const rowPropsFactory = useCallback((record: any) => ({ record } as any), []); const totalWidth = columns.reduce((sum, col) => sum + (Number(col.width) || 200), 0) + selectionColumnWidth; - const enableVirtual = mergedDisplayData.length >= 200; + const useContextMenuRow = !canModifyData; const tableScrollX = useMemo(() => { const baseWidth = Math.max(totalWidth, 1000); if (!isMacLike || tableViewportWidth <= 0) return baseWidth; // macOS 在“自动隐藏滚动条”模式下容易误判为无横向滚动,预留 2px 触发稳定滚动轨道。 return Math.max(baseWidth, tableViewportWidth + 2); }, [totalWidth, isMacLike, tableViewportWidth]); - const tableStickyConfig = useMemo(() => ({ - getContainer: () => containerRef.current || document.body, - offsetScroll: 0, - }), []); + const horizontalScrollVisible = viewMode === 'table' && !enableVirtual && tableScrollX > tableViewportWidth + 1; + const horizontalScrollWidth = Math.max(externalScrollbarMinWidth, tableScrollX); + const tableScrollConfig = useMemo(() => ({ x: tableScrollX, y: tableHeight }), [tableScrollX, tableHeight]); + const tableComponents = useMemo(() => { + const body: Record = {}; + if (enableInlineEditableCell) { + body.cell = EditableCell; + } + if (useContextMenuRow) { + body.row = ContextMenuRow; + } + return Object.keys(body).length > 0 + ? { body, header: { cell: ResizableTitle } } + : { header: { cell: ResizableTitle } }; + }, [enableInlineEditableCell, useContextMenuRow]); + const tableOnRow = useMemo(() => (useContextMenuRow ? rowPropsFactory : undefined), [useContextMenuRow, rowPropsFactory]); + + const pickHorizontalScrollTargets = useCallback((tableContainer: HTMLElement): HTMLElement[] => { + const body = tableContainer.querySelector('.ant-table-body'); + const content = tableContainer.querySelector('.ant-table-content'); + const virtualHolder = tableContainer.querySelector('.rc-virtual-list-holder'); + const candidates = [virtualHolder, body, content].filter((node): node is HTMLElement => node instanceof HTMLElement); + if (candidates.length === 0) { + return []; + } + const active = candidates.find((target) => target.scrollWidth > target.clientWidth + 1) || candidates[0]; + return active ? [active] : []; + }, []); + + const syncExternalScrollFromTargets = useCallback((targets?: HTMLElement[], source?: HTMLElement | null) => { + const externalScroll = externalHScrollRef.current; + if (!(externalScroll instanceof HTMLDivElement) || horizontalSyncSourceRef.current === 'external') { + return; + } + const nextTargets = targets && targets.length > 0 ? targets : tableScrollTargetsRef.current; + if (!nextTargets || nextTargets.length === 0) { + return; + } + const activeTarget = source || nextTargets.find((target) => target.scrollWidth > target.clientWidth + 1) || nextTargets[0]; + if (!(activeTarget instanceof HTMLElement)) { + return; + } + const nextScrollLeft = activeTarget.scrollLeft; + if (Math.abs(lastTableScrollLeftRef.current - nextScrollLeft) < 1 && Math.abs(externalScroll.scrollLeft - nextScrollLeft) < 1) { + return; + } + lastTableScrollLeftRef.current = nextScrollLeft; + if (Math.abs(externalScroll.scrollLeft - nextScrollLeft) > 1) { + externalScroll.scrollLeft = nextScrollLeft; + lastExternalScrollLeftRef.current = nextScrollLeft; + } + }, []); + + const applyExternalScrollToTableTargets = useCallback(() => { + const externalScroll = externalHScrollRef.current; + if (!(externalScroll instanceof HTMLDivElement)) { + return; + } + if (horizontalSyncSourceRef.current === 'table') { + return; + } + + const liveTargets = tableScrollTargetsRef.current; + if (liveTargets.length === 0) { + return; + } + + if (Math.abs(lastExternalScrollLeftRef.current - externalScroll.scrollLeft) < 1) { + return; + } + lastExternalScrollLeftRef.current = externalScroll.scrollLeft; + + horizontalSyncSourceRef.current = 'external'; + liveTargets.forEach((target) => { + if (target.scrollWidth <= target.clientWidth + 1) { + return; + } + if (Math.abs(target.scrollLeft - externalScroll.scrollLeft) > 1) { + target.scrollLeft = externalScroll.scrollLeft; + } + }); + lastTableScrollLeftRef.current = externalScroll.scrollLeft; + horizontalSyncSourceRef.current = ''; + }, []); + + const handleExternalHorizontalWheel = useCallback((event: React.WheelEvent) => { + const externalScroll = externalHScrollRef.current; + if (!(externalScroll instanceof HTMLDivElement)) { + return; + } + const dominantDelta = Math.abs(event.deltaX) > Math.abs(event.deltaY) ? event.deltaX : event.deltaY; + if (!Number.isFinite(dominantDelta) || Math.abs(dominantDelta) < 0.5) { + return; + } + + const maxScrollLeft = Math.max(0, externalScroll.scrollWidth - externalScroll.clientWidth); + if (maxScrollLeft <= 0) { + return; + } + + const nextScrollLeft = Math.max(0, Math.min(maxScrollLeft, externalScroll.scrollLeft + dominantDelta)); + if (Math.abs(nextScrollLeft - externalScroll.scrollLeft) < 0.5) { + return; + } + + event.preventDefault(); + externalScroll.scrollLeft = nextScrollLeft; + }, []); useEffect(() => { if (viewMode !== 'table') return; @@ -2506,10 +2735,141 @@ const DataGrid: React.FC = ({ return () => cancelAnimationFrame(rafId); }, [viewMode, totalWidth, mergedDisplayData.length, recalculateTableMetrics]); + // 虚拟模式下,为 rc-virtual-list 的内置水平滚动条添加鼠标滚轮支持 + // rc-virtual-list 的 ScrollBar 组件原生只支持拖拽,不支持 wheel 事件 + // 方案:使用 MutationObserver 发现滚动条元素后直接绑定 wheel 事件 + useEffect(() => { + if (viewMode !== 'table' || !enableVirtual) return; + const container = tableContainerRef.current; + if (!container) return; + + let currentScrollbarEl: HTMLElement | null = null; + + const handleScrollbarWheel = (e: WheelEvent) => { + const innerEl = container.querySelector('.rc-virtual-list-holder-inner') as HTMLElement | null; + const holderEl = container.querySelector('.rc-virtual-list-holder') as HTMLElement | null; + if (!innerEl || !holderEl) return; + + const dominantDelta = Math.abs(e.deltaX) > Math.abs(e.deltaY) ? e.deltaX : e.deltaY; + if (Math.abs(dominantDelta) < 0.5) return; + + e.preventDefault(); + e.stopPropagation(); + + // 读取当前 marginLeft(负值表示向右偏移) + const currentMarginLeft = parseFloat(innerEl.style.marginLeft) || 0; + const contentWidth = tableScrollX; + const viewportWidth = holderEl.clientWidth; + const maxScroll = Math.max(0, contentWidth - viewportWidth); + + const currentOffset = Math.abs(currentMarginLeft); + const newOffset = Math.min(maxScroll, Math.max(0, currentOffset + dominantDelta)); + + // 直接更新内容位置 + innerEl.style.marginLeft = `${-newOffset}px`; + + // 同步 scrollbar thumb 位置 + if (currentScrollbarEl && maxScroll > 0) { + const thumbEl = currentScrollbarEl.querySelector('[class*="scrollbar-thumb"]') as HTMLElement | null; + if (thumbEl) { + const ratio = newOffset / maxScroll; + const thumbWidth = parseFloat(thumbEl.style.width) || thumbEl.offsetWidth; + const trackWidth = currentScrollbarEl.clientWidth; + const thumbMaxOffset = trackWidth - thumbWidth; + thumbEl.style.left = `${ratio * thumbMaxOffset}px`; + } + } + + // 同步表头水平位置 + const headerEl = container.querySelector('.ant-table-header') as HTMLElement | null; + if (headerEl) { + headerEl.scrollLeft = newOffset; + } + }; + + const bindScrollbar = () => { + const el = container.querySelector('.ant-table-tbody-virtual-scrollbar-horizontal') as HTMLElement | null; + if (el && el !== currentScrollbarEl) { + if (currentScrollbarEl) { + currentScrollbarEl.removeEventListener('wheel', handleScrollbarWheel); + } + currentScrollbarEl = el; + el.addEventListener('wheel', handleScrollbarWheel, { passive: false }); + } + }; + + // 初次尝试绑定 + bindScrollbar(); + + // 使用 MutationObserver 监听 DOM 变化,确保即使元素延迟渲染也能绑定 + const observer = new MutationObserver(() => { + bindScrollbar(); + }); + observer.observe(container, { childList: true, subtree: true }); + + return () => { + observer.disconnect(); + if (currentScrollbarEl) { + currentScrollbarEl.removeEventListener('wheel', handleScrollbarWheel); + } + }; + }, [viewMode, enableVirtual, tableScrollX, mergedDisplayData.length]); + + useEffect(() => { + if (viewMode !== 'table') return; + const tableContainer = tableContainerRef.current; + const externalScroll = externalHScrollRef.current; + if (!(tableContainer instanceof HTMLElement) || !(externalScroll instanceof HTMLDivElement)) return; + + let rafId: number | null = null; + let boundTargets: HTMLElement[] = []; + + const handleTargetScroll = (event: Event) => { + const source = event.target as HTMLElement | null; + if (horizontalSyncSourceRef.current === 'external') return; + horizontalSyncSourceRef.current = 'table'; + syncExternalScrollFromTargets(undefined, source); + horizontalSyncSourceRef.current = ''; + }; + + const bindCurrentTableTargets = () => { + // Unbind previous targets + boundTargets.forEach(t => t.removeEventListener('scroll', handleTargetScroll)); + const nextTargets = pickHorizontalScrollTargets(tableContainer); + tableScrollTargetsRef.current = nextTargets; + boundTargets = nextTargets; + // Bind scroll listener on new targets + nextTargets.forEach(t => t.addEventListener('scroll', handleTargetScroll, { passive: true })); + syncExternalScrollFromTargets(nextTargets); + }; + + const scheduleBind = () => { + if (rafId !== null) { + cancelAnimationFrame(rafId); + } + rafId = requestAnimationFrame(() => { + bindCurrentTableTargets(); + }); + }; + + window.addEventListener('resize', scheduleBind); + scheduleBind(); + + return () => { + window.removeEventListener('resize', scheduleBind); + boundTargets.forEach(t => t.removeEventListener('scroll', handleTargetScroll)); + tableScrollTargetsRef.current = []; + if (rafId !== null) { + cancelAnimationFrame(rafId); + } + }; + }, [viewMode, tableScrollX, mergedDisplayData.length, syncExternalScrollFromTargets, pickHorizontalScrollTargets]); + return ( -
- {/* Toolbar */} -
+
+ {/* Toolbar + Filter Panel */} +
+
{onReload && {selectedRowKeys.length > 0 && 已选 {selectedRowKeys.length}} -
+
)} -
+
{hasChanges && (
-
+
- {/* Filter Panel */} {showFilter && (
{filterConditions.map(cond => (
@@ -2762,8 +3120,9 @@ const DataGrid: React.FC = ({
)} +
-
+
{contextHolder} = ({ title={cellEditorMeta ? `编辑单元格:${cellEditorMeta.title}` : '编辑单元格'} open={cellEditorOpen} onCancel={closeCellEditor} + destroyOnHidden width={960} maskClosable={false} footer={[ @@ -2828,21 +3188,23 @@ const DataGrid: React.FC = ({
{cellEditorMeta ? `${tableName || ''}${tableName ? '.' : ''}${cellEditorMeta.dataIndex}` : ''}
- setCellEditorValue(val || '')} - options={{ - minimap: { enabled: false }, - scrollBeyondLastLine: false, - wordWrap: "on", - fontSize: 14, - tabSize: 2, - automaticLayout: true, - }} - /> + {cellEditorOpen && ( + setCellEditorValue(val || '')} + options={{ + minimap: { enabled: false }, + scrollBeyondLastLine: false, + wordWrap: "on", + fontSize: 14, + tabSize: 2, + automaticLayout: true, + }} + /> + )}
{/* 批量编辑弹窗 */} @@ -2875,6 +3237,7 @@ const DataGrid: React.FC = ({ title="编辑 JSON 结果集" open={jsonEditorOpen} onCancel={() => setJsonEditorOpen(false)} + destroyOnHidden width={980} maskClosable={false} footer={[ @@ -2886,59 +3249,76 @@ const DataGrid: React.FC = ({
说明:此处按当前结果集顺序编辑,不支持在 JSON 模式增删记录(可在表格模式操作)。
- setJsonEditorValue(val || '')} - options={{ - readOnly: false, - minimap: { enabled: false }, - scrollBeyondLastLine: false, - wordWrap: "off", - fontSize: 12, - tabSize: 2, - automaticLayout: true, - }} - /> + {jsonEditorOpen && ( + setJsonEditorValue(val || '')} + options={{ + readOnly: false, + minimap: { enabled: false }, + scrollBeyondLastLine: false, + wordWrap: "off", + fontSize: 12, + tabSize: 2, + automaticLayout: true, + }} + /> + )} {viewMode === 'table' ? ( -
- - - - { - const k = record?.[GONAVI_ROW_KEY]; - if (k === undefined || k === null) return ''; - const keyStr = rowKeyStr(k); - if (addedRowKeySet.has(keyStr)) return 'row-added'; - if (modifiedRowKeySet.has(keyStr) || deletedRowKeys.has(keyStr)) return 'row-modified'; // deleted won't show - return ''; - }} - onRow={rowPropsFactory} - /> - - - - +
+
+ + + +
+ + + + +
+
+
+
) : viewMode === 'json' ? (
@@ -3222,23 +3602,54 @@ const DataGrid: React.FC = ({ .${gridId} .data-grid-toolbar-scroll::-webkit-scrollbar-track { background: transparent; } - .${gridId} .ant-table { background: transparent !important; } - .${gridId} .ant-table-container { background: transparent !important; border: none !important; } - .${gridId} .ant-table-tbody > tr > td { background: transparent !important; border-bottom: 1px solid ${darkMode ? 'rgba(255,255,255,0.05)' : 'rgba(0,0,0,0.05)'} !important; border-inline-end: 1px solid transparent !important; } + .${gridId} .ant-table, + .${gridId} .ant-table-wrapper, + .${gridId} .ant-table-container { + background: transparent !important; + border-radius: ${panelRadius}px !important; + } + .${gridId} .ant-table-wrapper, + .${gridId} .ant-table-container { + border: none !important; + overflow: hidden !important; + } + .${gridId} .ant-table-tbody > tr > td, + .${gridId} .ant-table-tbody .ant-table-row > .ant-table-cell { background: transparent !important; border-bottom: 1px solid ${darkMode ? 'rgba(255,255,255,0.05)' : 'rgba(0,0,0,0.05)'} !important; border-inline-end: 1px solid transparent !important; } .${gridId} .ant-table-thead > tr > th { background: transparent !important; border-bottom: 1px solid ${darkMode ? 'rgba(255,255,255,0.05)' : 'rgba(0,0,0,0.05)'} !important; border-inline-end: 1px solid transparent !important; } + .${gridId} .ant-table-thead > tr:first-child > th:first-child, + .${gridId} .ant-table-header table > thead > tr:first-child > th:first-child { + border-top-left-radius: ${panelRadius}px !important; + } + .${gridId} .ant-table-thead > tr:first-child > th:last-child, + .${gridId} .ant-table-header table > thead > tr:first-child > th:last-child { + border-top-right-radius: ${panelRadius}px !important; + } + .${gridId} .ant-table-body { + border-bottom-left-radius: ${panelRadius}px !important; + border-bottom-right-radius: ${panelRadius}px !important; + } .${gridId} .ant-table-thead > tr > th::before { display: none !important; } .${gridId} .ant-table-thead > tr > th .ant-table-column-sorters { cursor: default !important; } .${gridId} .ant-table-thead > tr > th .ant-table-column-sorter, .${gridId} .ant-table-thead > tr > th .ant-table-column-sorter * { cursor: pointer !important; } - .${gridId} .ant-table-tbody > tr:hover > td { background-color: ${darkMode ? 'rgba(255, 255, 255, 0.08)' : 'rgba(0, 0, 0, 0.02)'} !important; } - .${gridId} .ant-table-tbody > tr.ant-table-row-selected > td { background-color: ${darkMode ? `rgba(${selectionAccentRgb}, 0.18)` : `rgba(${selectionAccentRgb}, 0.08)`} !important; } - .${gridId} .ant-table-tbody > tr.ant-table-row-selected:hover > td { background-color: ${darkMode ? `rgba(${selectionAccentRgb}, 0.28)` : `rgba(${selectionAccentRgb}, 0.12)`} !important; } - .${gridId} .row-added td { background-color: ${rowAddedBg} !important; color: ${darkMode ? '#e6fffb' : 'inherit'}; } - .${gridId} .row-modified td { background-color: ${rowModBg} !important; color: ${darkMode ? '#e6f7ff' : 'inherit'}; } - .${gridId} .ant-table-tbody > tr.row-added:hover > td { background-color: ${rowAddedHover} !important; } - .${gridId} .ant-table-tbody > tr.row-modified:hover > td { background-color: ${rowModHover} !important; } - .${gridId}.cell-edit-mode .ant-table-tbody > tr > td[data-col-name] { user-select: none; -webkit-user-select: none; cursor: crosshair; } - .${gridId}.cell-edit-mode .ant-table-tbody > tr > td[data-cell-selected="true"] { + .${gridId} .ant-table-tbody > tr:hover > td, + .${gridId} .ant-table-tbody .ant-table-row:hover > .ant-table-cell { background-color: ${darkMode ? 'rgba(255, 255, 255, 0.08)' : 'rgba(0, 0, 0, 0.02)'} !important; } + .${gridId} .ant-table-tbody > tr.ant-table-row-selected > td, + .${gridId} .ant-table-tbody .ant-table-row.ant-table-row-selected > .ant-table-cell { background-color: ${darkMode ? `rgba(${selectionAccentRgb}, 0.18)` : `rgba(${selectionAccentRgb}, 0.08)`} !important; } + .${gridId} .ant-table-tbody > tr.ant-table-row-selected:hover > td, + .${gridId} .ant-table-tbody .ant-table-row.ant-table-row-selected:hover > .ant-table-cell { background-color: ${darkMode ? `rgba(${selectionAccentRgb}, 0.28)` : `rgba(${selectionAccentRgb}, 0.12)`} !important; } + .${gridId} .row-added td, + .${gridId} .row-added > .ant-table-cell { background-color: ${rowAddedBg} !important; color: ${darkMode ? '#e6fffb' : 'inherit'}; } + .${gridId} .row-modified td, + .${gridId} .row-modified > .ant-table-cell { background-color: ${rowModBg} !important; color: ${darkMode ? '#e6f7ff' : 'inherit'}; } + .${gridId} .ant-table-tbody > tr.row-added:hover > td, + .${gridId} .ant-table-tbody .ant-table-row.row-added:hover > .ant-table-cell { background-color: ${rowAddedHover} !important; } + .${gridId} .ant-table-tbody > tr.row-modified:hover > td, + .${gridId} .ant-table-tbody .ant-table-row.row-modified:hover > .ant-table-cell { background-color: ${rowModHover} !important; } + .${gridId}.cell-edit-mode .ant-table-tbody > tr > td[data-col-name], + .${gridId}.cell-edit-mode .ant-table-tbody .ant-table-row > .ant-table-cell[data-col-name] { user-select: none; -webkit-user-select: none; cursor: crosshair; } + .${gridId}.cell-edit-mode .ant-table-tbody > tr > td[data-cell-selected="true"], + .${gridId}.cell-edit-mode .ant-table-tbody .ant-table-row > .ant-table-cell[data-cell-selected="true"] { box-shadow: inset 0 0 0 2px ${selectionAccentHex}; background-image: linear-gradient(${darkMode ? `rgba(${selectionAccentRgb}, 0.20)` : `rgba(${selectionAccentRgb}, 0.08)`}, ${darkMode ? `rgba(${selectionAccentRgb}, 0.20)` : `rgba(${selectionAccentRgb}, 0.08)`}); } @@ -3251,13 +3662,103 @@ const DataGrid: React.FC = ({ box-sizing: border-box; scroll-padding-bottom: ${tableBodyBottomPadding}px; } - .${gridId} .ant-table-sticky-scroll { - height: 10px !important; - background: ${darkMode ? 'rgba(255,255,255,0.08)' : 'rgba(0,0,0,0.08)'}; - z-index: 20 !important; + .${gridId} .data-grid-table-wrap { + width: 100%; + max-width: 100%; + overflow: hidden; } - .${gridId} .ant-table-sticky-scroll-bar { - background: ${darkMode ? 'rgba(255,255,255,0.35)' : 'rgba(0,0,0,0.28)'} !important; + .${gridId} .ant-table-sticky-scroll { + display: none !important; + } + .${gridId} .ant-table-tbody-virtual-scrollbar.ant-table-tbody-virtual-scrollbar-horizontal { + height: ${floatingScrollbarHeight + 4}px !important; + bottom: ${floatingScrollbarGap}px !important; + left: ${floatingScrollbarInset}px !important; + right: ${floatingScrollbarInset}px !important; + background: transparent !important; + visibility: visible !important; + pointer-events: auto !important; + z-index: 24; + } + .${gridId} .ant-table-tbody-virtual-scrollbar.ant-table-tbody-virtual-scrollbar-horizontal .ant-table-tbody-virtual-scrollbar-thumb { + background: ${horizontalScrollbarThumbBg} !important; + border: 1px solid ${horizontalScrollbarThumbBorderColor} !important; + border-radius: 999px !important; + box-shadow: ${horizontalScrollbarThumbShadow} !important; + height: ${floatingScrollbarHeight}px !important; + margin-top: 2px; + } + .${gridId} .data-grid-table-wrap.data-grid-table-wrap-external-active .ant-table-content { + overflow-x: hidden !important; + } + .${gridId} .data-grid-table-wrap.data-grid-table-wrap-external-active .ant-table-body { + overflow-x: hidden !important; + overflow-y: auto !important; + } + .${gridId} .ant-table-body { + scrollbar-width: thin; + scrollbar-color: ${floatingScrollbarThumbBg} transparent; + } + .${gridId} .ant-table-body::-webkit-scrollbar { + width: ${floatingScrollbarHeight}px; + height: 0; + } + .${gridId} .ant-table-body::-webkit-scrollbar-track { + background: transparent; + margin: 8px 0; + } + .${gridId} .ant-table-body::-webkit-scrollbar-thumb { + background: ${floatingScrollbarThumbBg}; + border: 1px solid ${floatingScrollbarThumbBorderColor}; + border-radius: 999px; + box-shadow: ${floatingScrollbarThumbShadow}; + } + .${gridId} .rc-virtual-list-holder { + scrollbar-width: thin; + scrollbar-color: ${floatingScrollbarThumbBg} transparent; + } + .${gridId} .rc-virtual-list-holder::-webkit-scrollbar { + width: ${floatingScrollbarHeight}px; + height: 0; + } + .${gridId} .rc-virtual-list-holder::-webkit-scrollbar-track { + background: transparent; + margin: 8px 0; + } + .${gridId} .rc-virtual-list-holder::-webkit-scrollbar-thumb { + background: ${floatingScrollbarThumbBg}; + border: 1px solid ${floatingScrollbarThumbBorderColor}; + border-radius: 999px; + box-shadow: ${floatingScrollbarThumbShadow}; + } + .${gridId} .data-grid-external-hscroll { + position: absolute; + left: ${floatingScrollbarInset}px; + right: ${floatingScrollbarInset}px; + bottom: ${floatingScrollbarGap}px; + height: ${floatingScrollbarHeight + 4}px; + overflow-x: auto; + overflow-y: hidden; + background: transparent; + z-index: 24; + } + .${gridId} .data-grid-external-hscroll::-webkit-scrollbar { + height: ${floatingScrollbarHeight}px; + } + .${gridId} .data-grid-external-hscroll::-webkit-scrollbar-track { + background: ${horizontalScrollbarTrackBg}; + border: 1px solid ${horizontalScrollbarTrackBorderColor}; + border-radius: 999px; + box-shadow: ${horizontalScrollbarTrackShadow}; + } + .${gridId} .data-grid-external-hscroll::-webkit-scrollbar-thumb { + background: ${horizontalScrollbarThumbBg}; + border: 1px solid ${horizontalScrollbarThumbBorderColor}; + border-radius: 999px; + box-shadow: ${horizontalScrollbarThumbShadow}; + } + .${gridId} .data-grid-external-hscroll-inner { + height: 1px; } `} diff --git a/frontend/src/components/TabManager.tsx b/frontend/src/components/TabManager.tsx index 7f61c83..555bf15 100644 --- a/frontend/src/components/TabManager.tsx +++ b/frontend/src/components/TabManager.tsx @@ -1,6 +1,6 @@ import React, { useMemo, useRef, useState } from 'react'; import { Tabs, Dropdown } from 'antd'; -import type { MenuProps } from 'antd'; +import type { MenuProps, TabsProps } from 'antd'; import { DndContext, PointerSensor, closestCenter, useSensor, useSensors } from '@dnd-kit/core'; import type { DragStartEvent, DragEndEvent } from '@dnd-kit/core'; import { SortableContext, useSortable, horizontalListSortingStrategy } from '@dnd-kit/sortable'; @@ -35,44 +35,18 @@ const buildTabDisplayTitle = (tab: TabData, connectionName: string | undefined): }; type SortableTabLabelProps = { - tabId: string; displayTitle: string; menuItems: MenuProps['items']; - draggingTabId: string | null; - onSelect: (tabId: string) => void; }; const SortableTabLabel: React.FC = ({ - tabId, displayTitle, menuItems, - draggingTabId, - onSelect, }) => { - const { attributes, listeners, setNodeRef, transform, transition, isDragging } = useSortable({ id: tabId }); - const style: React.CSSProperties = { - transform: CSS.Transform.toString(transform), - transition: transition || 'transform 180ms cubic-bezier(0.22, 1, 0.36, 1)', - opacity: isDragging ? 0.88 : 1, - cursor: isDragging ? 'grabbing' : 'grab', - display: 'inline-flex', - alignItems: 'center', - maxWidth: '100%', - touchAction: 'none', - }; - const isDragBlocked = !!draggingTabId && draggingTabId !== tabId; - return ( { - if (!isDragBlocked) onSelect(tabId); - }} + className="tab-dnd-label" onContextMenu={(e) => e.preventDefault()} title="拖拽调整标签顺序" > @@ -82,9 +56,36 @@ const SortableTabLabel: React.FC = ({ ); }; +type DraggableTabNodeProps = { + node: React.ReactElement; +}; + +const DraggableTabNode: React.FC = ({ node }) => { + const tabId = String(node.key || '').trim(); + const { attributes, listeners, setNodeRef, transform, transition, isDragging } = useSortable({ id: tabId }); + const style: React.CSSProperties = { + ...(node.props.style || {}), + transform: CSS.Transform.toString(transform), + transition: transition || 'transform 180ms cubic-bezier(0.22, 1, 0.36, 1)', + opacity: isDragging ? 0.88 : 1, + cursor: isDragging ? 'grabbing' : 'grab', + touchAction: 'none', + zIndex: isDragging ? 2 : node.props.style?.zIndex, + }; + + return React.cloneElement(node, { + ref: setNodeRef, + style, + ...attributes, + ...listeners, + className: `${node.props.className || ''} tab-dnd-node${isDragging ? ' is-dragging' : ''}`, + }); +}; + const TabManager: React.FC = () => { const tabs = useStore(state => state.tabs); const connections = useStore(state => state.connections); + const theme = useStore(state => state.theme); const activeTabId = useStore(state => state.activeTabId); const setActiveTab = useStore(state => state.setActiveTab); const closeTab = useStore(state => state.closeTab); @@ -93,6 +94,7 @@ const TabManager: React.FC = () => { const closeTabsToRight = useStore(state => state.closeTabsToRight); const closeAllTabs = useStore(state => state.closeAllTabs); const moveTab = useStore(state => state.moveTab); + const tabsNavBorderColor = theme === 'dark' ? 'rgba(255, 255, 255, 0.09)' : 'rgba(0, 0, 0, 0.08)'; const [draggingTabId, setDraggingTabId] = useState(null); const suppressClickUntilRef = useRef(0); const sensors = useSensors( @@ -111,11 +113,6 @@ const TabManager: React.FC = () => { } }; - const handleTabSelect = (tabId: string) => { - if (Date.now() < suppressClickUntilRef.current) return; - setActiveTab(tabId); - }; - const handleDragStart = (event: DragStartEvent) => { const sourceId = String(event.active.id || '').trim(); setDraggingTabId(sourceId || null); @@ -138,11 +135,21 @@ const TabManager: React.FC = () => { const tabIds = useMemo(() => tabs.map((tab) => tab.id), [tabs]); + const renderTabBar: TabsProps['renderTabBar'] = (tabBarProps, DefaultTabBar) => ( + + {(node) => } + + ); + const items = useMemo(() => tabs.map((tab, index) => { const connectionName = connections.find((conn) => conn.id === tab.connectionId)?.name; const displayTitle = buildTabDisplayTitle(tab, connectionName); + const keepMountedWhenInactive = tab.type === 'query' || tab.type === 'redis-command'; + const shouldRenderContent = activeTabId === tab.id || keepMountedWhenInactive; let content; - if (tab.type === 'query') { + if (!shouldRenderContent) { + content = null; + } else if (tab.type === 'query') { content = ; } else if (tab.type === 'table') { content = ; @@ -189,17 +196,14 @@ const TabManager: React.FC = () => { return { label: ( ), key: tab.id, children: content, }; - }), [tabs, connections, closeOtherTabs, closeTabsToLeft, closeTabsToRight, closeAllTabs, draggingTabId]); + }), [tabs, connections, activeTabId, closeOtherTabs, closeTabsToLeft, closeTabsToRight, closeAllTabs]); return ( <> @@ -248,7 +252,7 @@ const TabManager: React.FC = () => { display: none !important; } .main-tabs .ant-tabs-nav::before { - border-bottom: none !important; + border-bottom: 1px solid ${tabsNavBorderColor} !important; } .main-tabs .ant-tabs-tab { transition: transform 180ms cubic-bezier(0.22, 1, 0.36, 1), background-color 120ms ease; @@ -256,8 +260,12 @@ const TabManager: React.FC = () => { .main-tabs .tab-dnd-label { user-select: none; -webkit-user-select: none; + display: inline-flex; + align-items: center; + max-width: 100%; } - .main-tabs .tab-dnd-label.is-dragging { + .main-tabs .tab-dnd-node.is-dragging, + .main-tabs .tab-dnd-node.is-dragging .tab-dnd-label { cursor: grabbing !important; } body[data-theme='dark'] .main-tabs .ant-tabs-tab-btn:focus-visible { @@ -289,11 +297,15 @@ const TabManager: React.FC = () => { { + if (Date.now() < suppressClickUntilRef.current) return; + onChange(newActiveKey); + }} activeKey={activeTabId || undefined} onEdit={onEdit} items={items} hideAdd + renderTabBar={renderTabBar} /> diff --git a/frontend/src/store.ts b/frontend/src/store.ts index 4a4b320..e6d14fc 100644 --- a/frontend/src/store.ts +++ b/frontend/src/store.ts @@ -3,6 +3,12 @@ import { persist } from 'zustand/middleware'; import { ConnectionConfig, ProxyConfig, SavedConnection, TabData, SavedQuery } from './types'; const DEFAULT_APPEARANCE = { opacity: 1.0, blur: 0 }; +const DEFAULT_UI_SCALE = 1.0; +const MIN_UI_SCALE = 0.8; +const MAX_UI_SCALE = 1.25; +const DEFAULT_FONT_SIZE = 14; +const MIN_FONT_SIZE = 12; +const MAX_FONT_SIZE = 20; const DEFAULT_STARTUP_FULLSCREEN = false; const LEGACY_DEFAULT_OPACITY = 0.95; const OPACITY_EPSILON = 1e-6; @@ -107,6 +113,13 @@ const normalizeIntegerInRange = (value: unknown, fallbackValue: number, min: num return normalized; }; +const normalizeFloatInRange = (value: unknown, fallbackValue: number, min: number, max: number): number => { + const parsed = Number(value); + if (!Number.isFinite(parsed)) return fallbackValue; + if (parsed < min || parsed > max) return fallbackValue; + return parsed; +}; + const isValidHostEntry = (entry: string): boolean => { if (!entry) return false; if (entry.length > MAX_HOST_ENTRY_LENGTH) return false; @@ -318,6 +331,8 @@ interface AppState { savedQueries: SavedQuery[]; theme: 'light' | 'dark'; appearance: { opacity: number; blur: number }; + uiScale: number; + fontSize: number; startupFullscreen: boolean; globalProxy: GlobalProxyConfig; sqlFormatOptions: { keywordCase: 'upper' | 'lower' }; @@ -347,6 +362,8 @@ interface AppState { setTheme: (theme: 'light' | 'dark') => void; setAppearance: (appearance: Partial<{ opacity: number; blur: number }>) => void; + setUiScale: (scale: number) => void; + setFontSize: (size: number) => void; setStartupFullscreen: (enabled: boolean) => void; setGlobalProxy: (proxy: Partial) => void; setSqlFormatOptions: (options: { keywordCase: 'upper' | 'lower' }) => void; @@ -441,6 +458,14 @@ const sanitizeStartupFullscreen = (value: unknown): boolean => { return value === true; }; +const sanitizeUiScale = (value: unknown): number => { + return normalizeFloatInRange(value, DEFAULT_UI_SCALE, MIN_UI_SCALE, MAX_UI_SCALE); +}; + +const sanitizeFontSize = (value: unknown): number => { + return normalizeIntegerInRange(value, DEFAULT_FONT_SIZE, MIN_FONT_SIZE, MAX_FONT_SIZE); +}; + const sanitizeGlobalProxy = (value: unknown): GlobalProxyConfig => { const raw = (value && typeof value === 'object') ? value as Record : {}; const typeRaw = toTrimmedString(raw.type, DEFAULT_GLOBAL_PROXY.type).toLowerCase(); @@ -477,6 +502,8 @@ export const useStore = create()( savedQueries: [], theme: 'light', appearance: { ...DEFAULT_APPEARANCE }, + uiScale: DEFAULT_UI_SCALE, + fontSize: DEFAULT_FONT_SIZE, startupFullscreen: DEFAULT_STARTUP_FULLSCREEN, globalProxy: { ...DEFAULT_GLOBAL_PROXY }, sqlFormatOptions: { keywordCase: 'upper' }, @@ -607,6 +634,8 @@ export const useStore = create()( setTheme: (theme) => set({ theme }), setAppearance: (appearance) => set((state) => ({ appearance: { ...state.appearance, ...appearance } })), + setUiScale: (scale) => set({ uiScale: sanitizeUiScale(scale) }), + setFontSize: (size) => set({ fontSize: sanitizeFontSize(size) }), setStartupFullscreen: (enabled) => set({ startupFullscreen: !!enabled }), setGlobalProxy: (proxy) => set((state) => ({ globalProxy: sanitizeGlobalProxy({ ...state.globalProxy, ...proxy }) })), setSqlFormatOptions: (options) => set({ sqlFormatOptions: options }), @@ -646,6 +675,8 @@ export const useStore = create()( nextState.savedQueries = sanitizeSavedQueries(state.savedQueries); nextState.theme = sanitizeTheme(state.theme); nextState.appearance = sanitizeAppearance(state.appearance, version); + nextState.uiScale = sanitizeUiScale(state.uiScale); + nextState.fontSize = sanitizeFontSize(state.fontSize); nextState.startupFullscreen = sanitizeStartupFullscreen(state.startupFullscreen); nextState.globalProxy = sanitizeGlobalProxy(state.globalProxy); nextState.sqlFormatOptions = sanitizeSqlFormatOptions(state.sqlFormatOptions); @@ -663,6 +694,8 @@ export const useStore = create()( savedQueries: sanitizeSavedQueries(state.savedQueries), theme: sanitizeTheme(state.theme), appearance: sanitizeAppearance(state.appearance, PERSIST_VERSION), + uiScale: sanitizeUiScale(state.uiScale), + fontSize: sanitizeFontSize(state.fontSize), startupFullscreen: sanitizeStartupFullscreen(state.startupFullscreen), globalProxy: sanitizeGlobalProxy(state.globalProxy), sqlFormatOptions: sanitizeSqlFormatOptions(state.sqlFormatOptions), @@ -676,6 +709,8 @@ export const useStore = create()( savedQueries: state.savedQueries, theme: state.theme, appearance: state.appearance, + uiScale: state.uiScale, + fontSize: state.fontSize, startupFullscreen: state.startupFullscreen, globalProxy: state.globalProxy, sqlFormatOptions: state.sqlFormatOptions, From f477feab2f5ac805adc76a649aa2807a8a4a1b99 Mon Sep 17 00:00:00 2001 From: Syngnat Date: Tue, 3 Mar 2026 14:11:35 +0800 Subject: [PATCH 12/48] =?UTF-8?q?=F0=9F=94=A7=20chore(app):=20=E6=B8=85?= =?UTF-8?q?=E7=90=86=20App.tsx=20=E7=B1=BB=E5=9E=8B=E5=91=8A=E8=AD=A6?= =?UTF-8?q?=E5=B9=B6=E6=94=B6=E6=95=9B=E5=89=8D=E7=AB=AF=E5=A3=B3=E5=B1=82?= =?UTF-8?q?=E5=AE=9E=E7=8E=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 清除未使用代码和冗余状态 - 替换弃用 API 以消除 IDE 提示 - 显式处理浮动 Promise 避免告警 - 保持现有更新检查和代理设置行为不变 --- frontend/src/App.tsx | 105 +++++++++++++++++++++++-------------------- 1 file changed, 57 insertions(+), 48 deletions(-) diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx index ac328a7..ba93b57 100644 --- a/frontend/src/App.tsx +++ b/frontend/src/App.tsx @@ -1,8 +1,8 @@ import React, { useState, useEffect } from 'react'; import { Layout, Button, ConfigProvider, theme, Dropdown, MenuProps, message, Modal, Spin, Slider, Progress, Switch, Input, InputNumber, Select } from 'antd'; import zhCN from 'antd/locale/zh_CN'; -import { PlusOutlined, BulbOutlined, BulbFilled, ConsoleSqlOutlined, UploadOutlined, DownloadOutlined, CloudDownloadOutlined, BugOutlined, ToolOutlined, GlobalOutlined, InfoCircleOutlined, GithubOutlined, SkinOutlined, CheckOutlined, MinusOutlined, BorderOutlined, CloseOutlined, SettingOutlined } from '@ant-design/icons'; -import { Environment, EventsOn, WindowFullscreen, WindowIsFullscreen, WindowIsMaximised, WindowMaximise } from '../wailsjs/runtime/runtime'; +import { PlusOutlined, ConsoleSqlOutlined, UploadOutlined, DownloadOutlined, CloudDownloadOutlined, BugOutlined, ToolOutlined, GlobalOutlined, InfoCircleOutlined, GithubOutlined, SkinOutlined, CheckOutlined, MinusOutlined, BorderOutlined, CloseOutlined, SettingOutlined } from '@ant-design/icons'; +import { BrowserOpenURL, Environment, EventsOn, Quit, WindowFullscreen, WindowIsFullscreen, WindowIsMaximised, WindowMaximise, WindowMinimise, WindowToggleMaximise } from '../wailsjs/runtime'; import Sidebar from './components/Sidebar'; import TabManager from './components/TabManager'; import ConnectionModal from './components/ConnectionModal'; @@ -23,6 +23,19 @@ const MAX_FONT_SIZE = 20; const DEFAULT_UI_SCALE = 1.0; const DEFAULT_FONT_SIZE = 14; +const detectNavigatorPlatform = (): string => { + if (typeof navigator === 'undefined') { + return ''; + } + const uaDataPlatform = (navigator as Navigator & { + userAgentData?: { platform?: string }; + }).userAgentData?.platform; + if (uaDataPlatform) { + return uaDataPlatform; + } + return navigator.userAgent || ''; +}; + function App() { const [isModalOpen, setIsModalOpen] = useState(false); const [isSyncModalOpen, setIsSyncModalOpen] = useState(false); @@ -66,7 +79,7 @@ function App() { // 同步 macOS 窗口透明度:opacity=1.0 且 blur=0 时关闭 NSVisualEffectView, // 避免 GPU 持续计算窗口背后的模糊合成 useEffect(() => { - SetWindowTranslucency(appearance.opacity, appearance.blur).catch(() => {}); + void SetWindowTranslucency(appearance.opacity, appearance.blur).catch(() => undefined); }, [appearance.opacity, appearance.blur]); useEffect(() => { @@ -80,7 +93,7 @@ function App() { }) .catch(() => { if (cancelled) return; - const platform = typeof navigator !== 'undefined' ? navigator.platform : ''; + const platform = detectNavigatorPlatform(); const normalized = /linux/i.test(platform) ? 'linux' : (/mac/i.test(platform) ? 'darwin' : (/win/i.test(platform) ? 'windows' : '')); @@ -116,7 +129,7 @@ function App() { if (invalidWhenEnabled) { if (!globalProxyInvalidHintShownRef.current) { - message.warning({ + void message.warning({ content: '全局代理已开启,但地址或端口无效,当前按未启用处理', key: 'global-proxy-invalid', }); @@ -124,7 +137,7 @@ function App() { } } else { globalProxyInvalidHintShownRef.current = false; - message.destroy('global-proxy-invalid'); + void message.destroy('global-proxy-invalid'); } const enabledForBackend = globalProxy.enabled && !invalidWhenEnabled; @@ -140,7 +153,7 @@ function App() { if (cancelled || res?.success) { return; } - message.error({ + void message.error({ content: '全局代理配置失败: ' + (res?.message || '未知错误'), key: 'global-proxy-sync-error', }); @@ -150,7 +163,7 @@ function App() { return; } const errMsg = err instanceof Error ? err.message : String(err || '未知错误'); - message.error({ + void message.error({ content: '全局代理配置失败: ' + errMsg, key: 'global-proxy-sync-error', }); @@ -205,18 +218,18 @@ function App() { if (!useStore.getState().startupFullscreen) { return; } - Promise.resolve() + void Promise.resolve() .then(async () => { if (await checkStartupPreferenceApplied()) { return; } // 优先尝试全屏,若当前平台/时机不生效,后续走最大化兜底。 - WindowFullscreen(); + await WindowFullscreen(); await new Promise((resolve) => window.setTimeout(resolve, settleDelayMs)); if (await checkStartupPreferenceApplied()) { return; } - WindowMaximise(); + await WindowMaximise(); await new Promise((resolve) => window.setTimeout(resolve, settleDelayMs)); if (await checkStartupPreferenceApplied()) { return; @@ -225,7 +238,7 @@ function App() { applyStartupWindowPreference(attempt + 1); } }); - }, 300); + }, applyRetryDelayMs); }; if (useStore.persist.hasHydrated()) { @@ -248,7 +261,7 @@ function App() { }, []); // Background Helper - const getBg = (darkHex: string, lightHex: string) => { + const getBg = (darkHex: string) => { if (!darkMode) return `rgba(255, 255, 255, ${effectiveOpacity})`; // Light mode usually white // Parse hex to rgb @@ -259,8 +272,8 @@ function App() { return `rgba(${r}, ${g}, ${b}, ${effectiveOpacity})`; }; // Specific colors - const bgMain = getBg('#141414', '#ffffff'); - const bgContent = getBg('#1d1d1d', '#ffffff'); + const bgMain = getBg('#141414'); + const bgContent = getBg('#1d1d1d'); const floatingLogButtonBorderColor = darkMode ? 'rgba(255,255,255,0.20)' : 'rgba(0,0,0,0.16)'; const floatingLogButtonTextColor = darkMode ? 'rgba(255,255,255,0.92)' : 'rgba(0,0,0,0.82)'; const floatingLogButtonBgColor = darkMode @@ -339,7 +352,7 @@ function App() { }; const isMacRuntime = runtimePlatform === 'darwin' - || (runtimePlatform === '' && typeof navigator !== 'undefined' && /mac/i.test(navigator.platform)); + || (runtimePlatform === '' && /mac/i.test(detectNavigatorPlatform())); const formatBytes = (bytes?: number) => { if (!bytes || bytes <= 0) return '0 B'; @@ -358,7 +371,7 @@ function App() { if (updateDownloadedVersionRef.current === info.latestVersion) { if (!silent) { const cachedDownloadPath = updateDownloadMetaRef.current?.downloadPath; - message.info(cachedDownloadPath ? `更新包已就绪(${info.latestVersion}),路径:${cachedDownloadPath}` : `更新包已就绪(${info.latestVersion})`); + void message.info(cachedDownloadPath ? `更新包已就绪(${info.latestVersion}),路径:${cachedDownloadPath}` : `更新包已就绪(${info.latestVersion})`); showUpdateDownloadProgress(); } return; @@ -399,9 +412,9 @@ function App() { }; }); if (resultData?.downloadPath) { - message.success({ content: `更新下载完成,更新包路径:${resultData.downloadPath}`, duration: 5 }); + void message.success({ content: `更新下载完成,更新包路径:${resultData.downloadPath}`, duration: 5 }); } else { - message.success({ content: '更新下载完成', duration: 2 }); + void message.success({ content: '更新下载完成', duration: 2 }); } setAboutUpdateStatus(`发现新版本 ${info.latestVersion}(已下载,请点击“下载进度”后安装)`); } else { @@ -410,7 +423,7 @@ function App() { status: 'error', message: res?.message || '未知错误' })); - message.error({ content: '更新下载失败: ' + (res?.message || '未知错误'), duration: 4 }); + void message.error({ content: '更新下载失败: ' + (res?.message || '未知错误'), duration: 4 }); } }, []); @@ -425,10 +438,6 @@ function App() { setUpdateDownloadProgress((prev) => ({ ...prev, open: false })); }, []); - const hasUpdateDownloadProgress = updateDownloadProgress.status === 'start' - || updateDownloadProgress.status === 'downloading' - || updateDownloadProgress.status === 'done' - || updateDownloadProgress.status === 'error'; const isLatestUpdateDownloaded = Boolean(lastUpdateInfo?.hasUpdate) && ( Boolean(lastUpdateInfo?.downloaded) || (Boolean(lastUpdateInfo?.latestVersion) && updateDownloadedVersionRef.current === lastUpdateInfo?.latestVersion) @@ -449,17 +458,17 @@ function App() { if (isMacRuntime) { const res = await (window as any).go.app.App.OpenDownloadedUpdateDirectory(); if (!res?.success) { - message.error('打开安装目录失败: ' + (res?.message || '未知错误')); + void message.error('打开安装目录失败: ' + (res?.message || '未知错误')); return; } updateInstallTriggeredVersionRef.current = updateDownloadProgress.version || lastUpdateInfo?.latestVersion || null; hideUpdateDownloadProgress(); - message.success(res?.message || '已打开安装目录,请手动完成替换'); + void message.success(res?.message || '已打开安装目录,请手动完成替换'); return; } const res = await (window as any).go.app.App.InstallUpdateAndRestart(); if (!res?.success) { - message.error('更新安装失败: ' + (res?.message || '未知错误')); + void message.error('更新安装失败: ' + (res?.message || '未知错误')); return; } updateInstallTriggeredVersionRef.current = updateDownloadProgress.version || lastUpdateInfo?.latestVersion || null; @@ -476,7 +485,7 @@ function App() { updateCheckInFlightRef.current = false; if (!res?.success) { if (!silent) { - message.error('检查更新失败: ' + (res?.message || '未知错误')); + void message.error('检查更新失败: ' + (res?.message || '未知错误')); setAboutUpdateStatus('检查更新失败: ' + (res?.message || '未知错误')); } return; @@ -541,7 +550,7 @@ function App() { ? `发现新版本 ${info.latestVersion}(已下载,请点击“下载进度”后安装)` : `发现新版本 ${info.latestVersion}(未下载)`; if (!silent) { - message.info(`发现新版本 ${info.latestVersion}`); + void message.info(`发现新版本 ${info.latestVersion}`); setAboutUpdateStatus(statusText); } if (silent && aboutOpen) { @@ -568,7 +577,7 @@ function App() { }); setLastUpdateInfo(info); const text = `当前已是最新版本(${info.currentVersion || '未知'})`; - message.success(text); + void message.success(text); setAboutUpdateStatus(text); } else if (silent && aboutOpen) { setUpdateDownloadProgress((prev) => { @@ -599,7 +608,7 @@ function App() { if (res?.success) { setAboutInfo(res.data); } else { - message.error('获取应用信息失败: ' + (res?.message || '未知错误')); + void message.error('获取应用信息失败: ' + (res?.message || '未知错误')); } setAboutLoading(false); }, []); @@ -640,28 +649,28 @@ function App() { count++; } }); - message.success(`成功导入 ${count} 个连接`); + void message.success(`成功导入 ${count} 个连接`); } else { - message.error("文件格式错误:需要 JSON 数组"); + void message.error("文件格式错误:需要 JSON 数组"); } } catch (e) { - message.error("解析 JSON 失败"); + void message.error("解析 JSON 失败"); } } else if (res.message !== "Cancelled") { - message.error("导入失败: " + res.message); + void message.error("导入失败: " + res.message); } }; const handleExportConnections = async () => { if (connections.length === 0) { - message.warning("没有连接可导出"); + void message.warning("没有连接可导出"); return; } const res = await (window as any).go.app.App.ExportData(connections, [], "connections", "json"); if (res.success) { - message.success("导出成功"); + void message.success("导出成功"); } else if (res.message !== "Cancelled") { - message.error("导出失败: " + res.message); + void message.error("导出失败: " + res.message); } }; @@ -790,7 +799,7 @@ function App() { if (target?.closest('[data-no-titlebar-toggle="true"]')) { return; } - (window as any).runtime.WindowToggleMaximise(); + WindowToggleMaximise(); }; // Sidebar Resizing @@ -880,16 +889,16 @@ function App() { } else { setAboutUpdateStatus('未检查'); } - loadAboutInfo(); + void loadAboutInfo(); } }, [isAboutOpen, lastUpdateInfo, loadAboutInfo]); useEffect(() => { const startupTimer = window.setTimeout(() => { - checkForUpdates(true); + void checkForUpdates(true); }, 2000); const interval = window.setInterval(() => { - checkForUpdates(true); + void checkForUpdates(true); }, 30 * 60 * 1000); return () => { window.clearTimeout(startupTimer); @@ -1041,13 +1050,13 @@ function App() { type="text" icon={} style={{ height: '100%', borderRadius: 0, width: titleBarButtonWidth }} - onClick={() => (window as any).runtime.WindowMinimise()} + onClick={WindowMinimise} />
@@ -1216,7 +1225,7 @@ function App() {
{aboutInfo?.repoUrl ? ( - { e.preventDefault(); (window as any).runtime.BrowserOpenURL(aboutInfo.repoUrl); }} href={aboutInfo.repoUrl}> + { e.preventDefault(); if (aboutInfo?.repoUrl) BrowserOpenURL(aboutInfo.repoUrl); }} href={aboutInfo.repoUrl}> {aboutInfo.repoUrl} ) : '未知'} @@ -1224,7 +1233,7 @@ function App() {
{aboutInfo?.issueUrl ? ( - { e.preventDefault(); (window as any).runtime.BrowserOpenURL(aboutInfo.issueUrl); }} href={aboutInfo.issueUrl}> + { e.preventDefault(); if (aboutInfo?.issueUrl) BrowserOpenURL(aboutInfo.issueUrl); }} href={aboutInfo.issueUrl}> {aboutInfo.issueUrl} ) : '未知'} @@ -1232,7 +1241,7 @@ function App() {
{aboutInfo?.releaseUrl ? ( - { e.preventDefault(); (window as any).runtime.BrowserOpenURL(aboutInfo.releaseUrl); }} href={aboutInfo.releaseUrl}> + { e.preventDefault(); if (aboutInfo?.releaseUrl) BrowserOpenURL(aboutInfo.releaseUrl); }} href={aboutInfo.releaseUrl}> {aboutInfo.releaseUrl} ) : '未知'} From 3284eeba174a16a19c5e7c11ab65c3fc64d2c172 Mon Sep 17 00:00:00 2001 From: Syngnat Date: Tue, 3 Mar 2026 14:58:19 +0800 Subject: [PATCH 13/48] =?UTF-8?q?=F0=9F=94=A7=20fix(ci):=20=E4=BF=AE?= =?UTF-8?q?=E5=A4=8D=20Windows=20AMD64=20=E4=B8=8B=20DuckDB=20=E9=A9=B1?= =?UTF-8?q?=E5=8A=A8=E6=9E=84=E5=BB=BA=E9=93=BE=E8=B7=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 将 DuckDB 工具链准备切换为优先使用 MSYS2 - 增加 gcc 和 g++ 存在性校验与版本验证 - 在 MSYS2 异常时回退 Chocolatey 安装 MinGW - 保持 Windows ARM64 跳过 DuckDB 构建与平台支持一致 --- .github/workflows/release.yml | 86 ++++++++++++++++++++++++++++++----- 1 file changed, 75 insertions(+), 11 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index b373353..d52c061 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -131,23 +131,83 @@ jobs: - name: Install Wails run: go install -v github.com/wailsapp/wails/v2/cmd/wails@latest - - name: Prepare MinGW For DuckDB (Windows) - if: ${{ matrix.build_optional_agents && contains(matrix.platform, 'windows') }} + - name: Setup MSYS2 Toolchain For DuckDB (Windows AMD64) + id: msys2_duckdb + if: ${{ matrix.build_optional_agents && matrix.platform == 'windows/amd64' }} + continue-on-error: true + uses: msys2/setup-msys2@v2 + with: + msystem: MINGW64 + update: true + install: >- + mingw-w64-x86_64-gcc + + - name: Configure DuckDB CGO Toolchain (Windows AMD64) + if: ${{ matrix.build_optional_agents && matrix.platform == 'windows/amd64' }} shell: pwsh run: | - $mingwBin = "C:\msys64\mingw64\bin" - if (!(Test-Path $mingwBin)) { - choco install mingw --yes --no-progress - $mingwBin = "C:\ProgramData\chocolatey\lib\mingw\tools\install\mingw64\bin" + function Find-MingwBin([string[]]$candidates) { + foreach ($bin in $candidates) { + if ([string]::IsNullOrWhiteSpace($bin)) { + continue + } + $gcc = Join-Path $bin 'gcc.exe' + $gxx = Join-Path $bin 'g++.exe' + if ((Test-Path $gcc) -and (Test-Path $gxx)) { + return $bin + } + } + return $null } - if (!(Test-Path $mingwBin)) { - Write-Error "❌ 未找到 MinGW GCC 路径:$mingwBin" + + $msys2Outcome = "${{ steps.msys2_duckdb.outcome }}" + $msys2Location = "${{ steps.msys2_duckdb.outputs['msys2-location'] }}" + $candidateBins = @() + if (-not [string]::IsNullOrWhiteSpace($msys2Location)) { + $candidateBins += Join-Path $msys2Location 'mingw64\bin' + } + $candidateBins += @( + 'C:\msys64\mingw64\bin', + 'C:\ProgramData\chocolatey\lib\mingw\tools\install\mingw64\bin' + ) + $candidateBins = @($candidateBins | Select-Object -Unique) + + $mingwBin = Find-MingwBin $candidateBins + if (-not $mingwBin) { + if ($msys2Outcome -ne 'success') { + Write-Warning "⚠️ MSYS2 安装步骤结果为 $msys2Outcome,回退到本机探测/Chocolatey 安装 MinGW" + } else { + Write-Warning "⚠️ MSYS2 已执行,但未找到 gcc/g++,回退到本机探测/Chocolatey 安装 MinGW" + } + + choco install mingw --yes --no-progress + $mingwBin = Find-MingwBin $candidateBins + } + + if (-not $mingwBin) { + Write-Error "❌ 未找到可用的 DuckDB 编译器。已检查:$($candidateBins -join ', ')" exit 1 } + + $gcc = (Join-Path $mingwBin 'gcc.exe') + $gxx = (Join-Path $mingwBin 'g++.exe') + + if (!(Test-Path $gcc) -or !(Test-Path $gxx)) { + Write-Error "❌ DuckDB 编译器缺失:gcc=$gcc g++=$gxx" + exit 1 + } + "$mingwBin" | Out-File -FilePath $env:GITHUB_PATH -Append -Encoding utf8 - "CC=$mingwBin\gcc.exe" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 - "CXX=$mingwBin\g++.exe" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 - Write-Host "✅ 已配置 DuckDB cgo 编译器: $mingwBin" + "CC=$gcc" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 + "CXX=$gxx" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 + Write-Host "✅ 已配置 DuckDB cgo 编译器: gcc=$gcc g++=$gxx" + + - name: Verify DuckDB CGO Toolchain (Windows AMD64) + if: ${{ matrix.build_optional_agents && matrix.platform == 'windows/amd64' }} + shell: pwsh + run: | + & "$env:CC" --version + & "$env:CXX" --version - name: Build shell: bash @@ -176,6 +236,10 @@ jobs: if [ "$DRIVER" = "doris" ]; then BUILD_DRIVER="diros" fi + if [ "$DRIVER" = "duckdb" ] && [ "$GOOS" = "windows" ] && [ "$GOARCH" != "amd64" ]; then + echo "⚠️ 跳过 DuckDB driver(当前平台 ${GOOS}/${GOARCH} 不受支持,仅支持 windows/amd64)" + continue + fi TAG="gonavi_${BUILD_DRIVER}_driver" OUTPUT="${DRIVER}-driver-agent-${GOOS}-${GOARCH}" if [ "$GOOS" = "windows" ]; then From 1afb8850adbd95739558a704c89935a8b6efac47 Mon Sep 17 00:00:00 2001 From: Syngnat Date: Tue, 3 Mar 2026 14:58:19 +0800 Subject: [PATCH 14/48] =?UTF-8?q?=F0=9F=94=A7=20fix(ci):=20=E4=BF=AE?= =?UTF-8?q?=E5=A4=8D=20Windows=20AMD64=20=E4=B8=8B=20DuckDB=20=E9=A9=B1?= =?UTF-8?q?=E5=8A=A8=E6=9E=84=E5=BB=BA=E9=93=BE=E8=B7=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 将 DuckDB 工具链准备切换为优先使用 MSYS2 - 增加 gcc 和 g++ 存在性校验与版本验证 - 在 MSYS2 异常时回退 Chocolatey 安装 MinGW - 保持 Windows ARM64 跳过 DuckDB 构建与平台支持一致 --- .github/workflows/release.yml | 86 ++++++++++++++++++++++++++++++----- 1 file changed, 75 insertions(+), 11 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 1b522e5..ee661a0 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -131,23 +131,83 @@ jobs: - name: Install Wails run: go install -v github.com/wailsapp/wails/v2/cmd/wails@latest - - name: Prepare MinGW For DuckDB (Windows) - if: ${{ matrix.build_optional_agents && contains(matrix.platform, 'windows') }} + - name: Setup MSYS2 Toolchain For DuckDB (Windows AMD64) + id: msys2_duckdb + if: ${{ matrix.build_optional_agents && matrix.platform == 'windows/amd64' }} + continue-on-error: true + uses: msys2/setup-msys2@v2 + with: + msystem: MINGW64 + update: true + install: >- + mingw-w64-x86_64-gcc + + - name: Configure DuckDB CGO Toolchain (Windows AMD64) + if: ${{ matrix.build_optional_agents && matrix.platform == 'windows/amd64' }} shell: pwsh run: | - $mingwBin = "C:\msys64\mingw64\bin" - if (!(Test-Path $mingwBin)) { - choco install mingw --yes --no-progress - $mingwBin = "C:\ProgramData\chocolatey\lib\mingw\tools\install\mingw64\bin" + function Find-MingwBin([string[]]$candidates) { + foreach ($bin in $candidates) { + if ([string]::IsNullOrWhiteSpace($bin)) { + continue + } + $gcc = Join-Path $bin 'gcc.exe' + $gxx = Join-Path $bin 'g++.exe' + if ((Test-Path $gcc) -and (Test-Path $gxx)) { + return $bin + } + } + return $null } - if (!(Test-Path $mingwBin)) { - Write-Error "❌ 未找到 MinGW GCC 路径:$mingwBin" + + $msys2Outcome = "${{ steps.msys2_duckdb.outcome }}" + $msys2Location = "${{ steps.msys2_duckdb.outputs['msys2-location'] }}" + $candidateBins = @() + if (-not [string]::IsNullOrWhiteSpace($msys2Location)) { + $candidateBins += Join-Path $msys2Location 'mingw64\bin' + } + $candidateBins += @( + 'C:\msys64\mingw64\bin', + 'C:\ProgramData\chocolatey\lib\mingw\tools\install\mingw64\bin' + ) + $candidateBins = @($candidateBins | Select-Object -Unique) + + $mingwBin = Find-MingwBin $candidateBins + if (-not $mingwBin) { + if ($msys2Outcome -ne 'success') { + Write-Warning "⚠️ MSYS2 安装步骤结果为 $msys2Outcome,回退到本机探测/Chocolatey 安装 MinGW" + } else { + Write-Warning "⚠️ MSYS2 已执行,但未找到 gcc/g++,回退到本机探测/Chocolatey 安装 MinGW" + } + + choco install mingw --yes --no-progress + $mingwBin = Find-MingwBin $candidateBins + } + + if (-not $mingwBin) { + Write-Error "❌ 未找到可用的 DuckDB 编译器。已检查:$($candidateBins -join ', ')" exit 1 } + + $gcc = (Join-Path $mingwBin 'gcc.exe') + $gxx = (Join-Path $mingwBin 'g++.exe') + + if (!(Test-Path $gcc) -or !(Test-Path $gxx)) { + Write-Error "❌ DuckDB 编译器缺失:gcc=$gcc g++=$gxx" + exit 1 + } + "$mingwBin" | Out-File -FilePath $env:GITHUB_PATH -Append -Encoding utf8 - "CC=$mingwBin\gcc.exe" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 - "CXX=$mingwBin\g++.exe" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 - Write-Host "✅ 已配置 DuckDB cgo 编译器: $mingwBin" + "CC=$gcc" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 + "CXX=$gxx" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 + Write-Host "✅ 已配置 DuckDB cgo 编译器: gcc=$gcc g++=$gxx" + + - name: Verify DuckDB CGO Toolchain (Windows AMD64) + if: ${{ matrix.build_optional_agents && matrix.platform == 'windows/amd64' }} + shell: pwsh + run: | + & "$env:CC" --version + & "$env:CXX" --version - name: Build shell: bash @@ -176,6 +236,10 @@ jobs: if [ "$DRIVER" = "doris" ]; then BUILD_DRIVER="diros" fi + if [ "$DRIVER" = "duckdb" ] && [ "$GOOS" = "windows" ] && [ "$GOARCH" != "amd64" ]; then + echo "⚠️ 跳过 DuckDB driver(当前平台 ${GOOS}/${GOARCH} 不受支持,仅支持 windows/amd64)" + continue + fi TAG="gonavi_${BUILD_DRIVER}_driver" OUTPUT="${DRIVER}-driver-agent-${GOOS}-${GOARCH}" if [ "$GOOS" = "windows" ]; then From 462ca5790780e57c9ab279b772441065034a774f Mon Sep 17 00:00:00 2001 From: Syngnat Date: Tue, 3 Mar 2026 15:22:02 +0800 Subject: [PATCH 15/48] =?UTF-8?q?=F0=9F=94=A7=20fix(ci):=20=E4=BF=AE?= =?UTF-8?q?=E5=A4=8D=20Windows=20AMD64=20=E4=B8=8B=20DuckDB=20=E9=A9=B1?= =?UTF-8?q?=E5=8A=A8=E6=9E=84=E5=BB=BA=E5=B7=A5=E5=85=B7=E9=93=BE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 将 DuckDB 编译链从 MINGW64 切换为 MSYS2 UCRT64 - 修正 Windows AMD64 的 gcc 和 g++ 探测路径 - 增加 DuckDB 编译器版本校验步骤 --- .github/workflows/release.yml | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index ee661a0..a4e6a37 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -137,10 +137,10 @@ jobs: continue-on-error: true uses: msys2/setup-msys2@v2 with: - msystem: MINGW64 + msystem: UCRT64 update: true install: >- - mingw-w64-x86_64-gcc + mingw-w64-ucrt-x86_64-gcc - name: Configure DuckDB CGO Toolchain (Windows AMD64) if: ${{ matrix.build_optional_agents && matrix.platform == 'windows/amd64' }} @@ -164,28 +164,26 @@ jobs: $msys2Location = "${{ steps.msys2_duckdb.outputs['msys2-location'] }}" $candidateBins = @() if (-not [string]::IsNullOrWhiteSpace($msys2Location)) { - $candidateBins += Join-Path $msys2Location 'mingw64\bin' + $candidateBins += Join-Path $msys2Location 'ucrt64\bin' } $candidateBins += @( - 'C:\msys64\mingw64\bin', - 'C:\ProgramData\chocolatey\lib\mingw\tools\install\mingw64\bin' + 'C:\msys64\ucrt64\bin', + 'D:\a\_temp\msys64\ucrt64\bin' ) $candidateBins = @($candidateBins | Select-Object -Unique) $mingwBin = Find-MingwBin $candidateBins if (-not $mingwBin) { if ($msys2Outcome -ne 'success') { - Write-Warning "⚠️ MSYS2 安装步骤结果为 $msys2Outcome,回退到本机探测/Chocolatey 安装 MinGW" + Write-Warning "⚠️ MSYS2 安装步骤结果为 $msys2Outcome,回退到 UCRT64 本机路径探测" } else { - Write-Warning "⚠️ MSYS2 已执行,但未找到 gcc/g++,回退到本机探测/Chocolatey 安装 MinGW" + Write-Warning "⚠️ MSYS2 已执行,但未找到 UCRT64 gcc/g++,回退到本机路径探测" } - - choco install mingw --yes --no-progress $mingwBin = Find-MingwBin $candidateBins } if (-not $mingwBin) { - Write-Error "❌ 未找到可用的 DuckDB 编译器。已检查:$($candidateBins -join ', ')" + Write-Error "❌ 未找到可用的 DuckDB UCRT64 编译器。已检查:$($candidateBins -join ', ')" exit 1 } From 786835c9bc08ee2afc1a351a689ee9d0122209e8 Mon Sep 17 00:00:00 2001 From: Syngnat Date: Tue, 3 Mar 2026 15:49:58 +0800 Subject: [PATCH 16/48] =?UTF-8?q?=F0=9F=93=9D=20docs(contributing):=20?= =?UTF-8?q?=E8=A1=A5=E5=85=85=E4=B8=AD=E8=8B=B1=E6=96=87=E8=B4=A1=E7=8C=AE?= =?UTF-8?q?=E6=8C=87=E5=8D=97=E5=B9=B6=E7=BB=9F=E4=B8=80=20README=20?= =?UTF-8?q?=E5=85=A5=E5=8F=A3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 新增英文版 CONTRIBUTING.md 作为正式贡献文档 - 新增中文版 CONTRIBUTING.zh-CN.md 作为中文贡献说明 - 调整 README 和 README.zh-CN 的贡献入口指向对应语言文档 --- CONTRIBUTING.md | 154 ++++++++++++++++++++++++++++++++++++++++++ CONTRIBUTING.zh-CN.md | 154 ++++++++++++++++++++++++++++++++++++++++++ README.md | 10 +-- README.zh-CN.md | 10 +-- 4 files changed, 318 insertions(+), 10 deletions(-) create mode 100644 CONTRIBUTING.md create mode 100644 CONTRIBUTING.zh-CN.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..561dd32 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,154 @@ +# Contributing Guide + +Thank you for contributing to this project. + +This repository follows a release-first workflow: `main` is the default public branch, while releases are prepared through `release/*` branches. + +--- + +## Branch Model + +- `main`: stable release branch and default branch +- `dev`: day-to-day integration branch for maintainers +- `release/*`: release preparation branches for maintainers +- Recommended branch names for external contributors: + - `fix/*`: bug fixes + - `feature/*`: new features or enhancements + +Maintainer release flow: + +```text +feature/* / fix/* -> dev -> release/* -> main -> tag(vX.Y.Z) +``` + +--- + +## How External Contributors Should Open Pull Requests + +Whether your branch is `fix/*` or `feature/*`, external contributors should **open pull requests directly against `main`**. + +Reasons: + +- `main` is the default branch, so the PR entry point is clearer +- merged contributions are immediately visible on the default branch +- maintainers can handle downstream sync and release preparation in one place + +Recommended flow: + +1. Fork this repository +2. Create a branch in your fork (`fix/*` or `feature/*` is recommended) +3. Make your changes and perform basic self-checks +4. Push the branch to your fork +5. Open a pull request against the `main` branch of this repository + +--- + +## Pull Request Requirements + +Please keep each pull request focused, reviewable, and easy to validate. + +Recommended expectations: + +- one pull request should address one logical change +- use a clear title that explains the purpose +- include the following in the description: + - background and problem statement + - key changes + - impact scope + - validation method +- include screenshots or recordings for UI changes when helpful +- explicitly mention risk and rollback notes for compatibility, data, or build-chain changes + +--- + +## Merge Strategy for Maintainers + +Pull requests merged into `main` should generally use **Squash and merge**. + +Reasons: + +- keeps `main` history clean and linear +- maps each PR to a single commit on `main` +- reduces release, audit, and rollback complexity + +--- + +## Maintainer Sync Rules + +Because external pull requests are merged directly into `main`, maintainers must sync `main` back to development and release branches to avoid branch drift. + +### 1. Sync `main` -> `dev` (required) + +Every change merged into `main` must be synced into `dev`: + +```bash +git checkout dev +git pull +git merge main +git push +``` + +### 2. Create `release/*` from `dev` + +Before a release, create a release branch from `dev`, for example: + +```bash +git checkout dev +git pull +git checkout -b release/v0.6.0 +git push -u origin release/v0.6.0 +``` + +### 3. Release from `release/*` back to `main` + +When release preparation is complete, merge the release branch back into `main` and create a tag: + +```bash +git checkout main +git pull +git merge release/v0.6.0 +git push +git tag v0.6.0 +git push origin v0.6.0 +``` + +### 4. Sync `main` back to `dev` after release + +After the release, sync `main` back into `dev` again: + +```bash +git checkout dev +git pull +git merge main +git push +``` + +--- + +## Commit Message Recommendation + +Keep commit messages clear and easy to audit. + +Recommended format: + +```text +emoji type(scope): concise description +``` + +Examples: + +```text +🔧 fix(ci): fix DuckDB driver toolchain on Windows AMD64 +✨ feat(redis): add Stream data browsing support +♻️ refactor(datagrid): optimize large-table horizontal scrolling and rendering +``` + +--- + +## Additional Notes + +- Please include validation results for documentation, build-chain, or driver compatibility changes +- For larger changes, opening an issue or draft PR first is recommended +- Maintainers may ask contributors to narrow the scope if the change conflicts with the current project direction + +Thank you for contributing. diff --git a/CONTRIBUTING.zh-CN.md b/CONTRIBUTING.zh-CN.md new file mode 100644 index 0000000..20cc60e --- /dev/null +++ b/CONTRIBUTING.zh-CN.md @@ -0,0 +1,154 @@ +# 贡献指南 + +感谢你对本项目的贡献。 + +本项目采用“发布优先(`main` 为默认分支)+ `release/*` 分支发版”的协作模型。为减少分支漂移与 PR 处理成本,请在提交贡献前先阅读本指南。 + +--- + +## 分支模型 + +- `main`:稳定发布分支,也是仓库默认分支 +- `dev`:日常开发集成分支,主要供维护者使用 +- `release/*`:发布准备分支,主要供维护者使用 +- 外部贡献者建议使用以下分支命名: + - `fix/*`:问题修复 + - `feature/*`:功能新增或增强 + +维护者发布流转如下: + +```text +feature/* / fix/* -> dev -> release/* -> main -> tag(vX.Y.Z) +``` + +--- + +## 外部贡献者如何提 Pull Request + +无论是 `fix/*` 还是 `feature/*`,**外部贡献者统一直接向 `main` 发起 Pull Request**。 + +这样做的原因: + +- `main` 是默认分支,PR 入口更直观 +- 合并后贡献会直接体现在默认分支 +- 便于维护者统一做后续同步与发版整理 + +建议流程: + +1. Fork 本仓库 +2. 从你自己的仓库创建分支(建议命名为 `fix/*` 或 `feature/*`) +3. 完成代码修改,并进行必要自检 +4. 推送到你的远程分支 +5. 向本仓库的 `main` 分支发起 Pull Request + +--- + +## Pull Request 要求 + +请尽量保证 PR 单一、清晰、可审核。 + +建议遵循以下要求: + +- 一个 PR 只解决一类问题,避免混入无关改动 +- 标题清晰说明改动目的 +- 描述中说明: + - 背景与问题 + - 变更点 + - 影响范围 + - 验证方式 +- 如涉及 UI 调整,建议附截图或录屏 +- 如涉及兼容性、数据变更或构建链路调整,请明确说明风险和回滚方式 + +--- + +## PR 合并策略(维护者) + +`main` 分支上的 PR 建议使用 **Squash and merge**。 + +原因: + +- 保持 `main` 历史干净、线性 +- 每个 PR 在 `main` 上对应一个清晰提交 +- 降低发布排查与回滚成本 + +--- + +## 维护者同步规则 + +由于外部 PR 会直接合入 `main`,维护者必须及时将 `main` 的变更同步到开发与发布分支,避免分支漂移。 + +### 1. main → dev 同步(必做) + +任何合入 `main` 的变更,都必须同步到 `dev`: + +```bash +git checkout dev +git pull +git merge main +git push +``` + +### 2. 发版前从 dev 切 release/* + +发布前由维护者基于 `dev` 创建发布分支,例如: + +```bash +git checkout dev +git pull +git checkout -b release/v0.6.0 +git push -u origin release/v0.6.0 +``` + +### 3. release/* → main 发版 + +发布准备完成后,将 `release/*` 合并回 `main`,并打标签发布: + +```bash +git checkout main +git pull +git merge release/v0.6.0 +git push +git tag v0.6.0 +git push origin v0.6.0 +``` + +### 4. main 回流到 dev(发版后必做) + +发布完成后,再次将 `main` 回流到 `dev`,确保开发线与发布线一致: + +```bash +git checkout dev +git pull +git merge main +git push +``` + +--- + +## 提交建议 + +建议保持提交信息简洁、明确,便于维护者审查与后续追踪。 + +推荐格式: + +```text +emoji type(scope): 中文描述 +``` + +示例: + +```text +🔧 fix(ci): 修复 Windows AMD64 下 DuckDB 驱动构建工具链 +✨ feat(redis): 新增 Stream 类型数据浏览支持 +♻️ refactor(datagrid): 优化大表横向滚动与渲染结构 +``` + +--- + +## 其他说明 + +- 文档、构建链路、驱动兼容性相关改动,请尽量附带验证结果 +- 若改动较大,建议先提 Issue 或 Draft PR,先对齐方案再实施 +- 如提交内容与项目当前架构方向冲突,维护者可能要求收敛范围后再合并 + +感谢你的贡献。 diff --git a/README.md b/README.md index de049cd..4ad80ac 100644 --- a/README.md +++ b/README.md @@ -200,11 +200,11 @@ If you use Linux artifacts with the `-WebKit41` suffix, prefer Debian 13 / Ubunt Issues and pull requests are welcome. -1. Fork the repository. -2. Create a feature branch. -3. Commit your changes. -4. Push to your branch. -5. Open a pull request. +For the full workflow, branch model, and maintainer sync rules, see: + +- [CONTRIBUTING.md](CONTRIBUTING.md) + +External contributors should open pull requests directly against `main`. ## License diff --git a/README.zh-CN.md b/README.zh-CN.md index b572ee8..3a2f2d5 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -183,11 +183,11 @@ sudo apt-get install -y libgtk-3-0 libwebkit2gtk-4.0-37 libjavascriptcoregtk-4.0 欢迎提交 Issue 与 Pull Request。 -1. Fork 本仓库。 -2. 创建特性分支。 -3. 提交改动。 -4. 推送分支。 -5. 发起 Pull Request。 +完整流程、分支模型与维护者同步规则请查看: + +- [CONTRIBUTING.zh-CN.md](CONTRIBUTING.zh-CN.md) + +外部贡献者统一直接向 `main` 发起 Pull Request。 ## 开源协议 From 04f8b266d370a9e9afaa17b06de50784c0f9ba1c Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 6 Mar 2026 13:57:11 +0800 Subject: [PATCH 17/48] =?UTF-8?q?=20=20-=20feat(connection,metadata,kingba?= =?UTF-8?q?se):=20=E5=A2=9E=E5=BC=BA=E5=A4=9A=E6=95=B0=E6=8D=AE=E6=BA=90?= =?UTF-8?q?=E8=BF=9E=E6=8E=A5=E8=83=BD=E5=8A=9B=E5=B9=B6=E4=BF=AE=E5=A4=8D?= =?UTF-8?q?=E9=87=91=E4=BB=93/=E8=BE=BE=E6=A2=A6/Oracle/ClickHouse?= =?UTF-8?q?=E5=85=BC=E5=AE=B9=E6=80=A7=E9=97=AE=E9=A2=98=20(#188)=20(#190)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat(http-tunnel): 支持独立 HTTP 隧道连接并覆盖多数据源 refs #168 * fix(kingbase-data-grid): 修复金仓打开表卡顿并降低对象渲染开销 refs #178 * fix(kingbase-transaction): 修复金仓事务提交重复引号导致语法错误 refs #176 * fix(driver-agent): 修复老版本 Win10 升级后金仓驱动代理启动失败 refs #177 * chore(ci): 新增手动触发的 macOS 测试构建工作流 * chore(ci): 允许测试工作流在当前分支自动触发 * fix(query-editor): 修复 SQL 编辑中光标随机跳到末尾 refs #185 * feat(data-sync): 增加差异 SQL 预览能力便于审核 refs #174 * fix(clickhouse-connect): 自动识别并回退 HTTP/Native 协议连接 refs #181 * fix(oracle-metadata): 修复视图与函数加载按 schema 过滤异常 refs #155 * fix(dameng-databases): 修复显示全部库时数据库列表不完整 refs #154 * fix(connection,db-list): 统一处理空列表返回并修复达梦连接测试报错 refs #157 Co-authored-by: 辣条 <69459608+tianqijiuyun-latiao@users.noreply.github.com> --- .github/workflows/test-macos-build.yml | 91 ++++++++ frontend/package.json.md5 | 2 +- frontend/src/components/ConnectionModal.tsx | 136 ++++++++++-- frontend/src/components/DataGrid.tsx | 31 ++- frontend/src/components/DataSyncModal.tsx | 231 +++++++++++++++++--- frontend/src/components/QueryEditor.tsx | 38 +++- frontend/src/components/Sidebar.tsx | 40 +++- frontend/src/store.ts | 16 +- frontend/src/types.ts | 9 + frontend/wailsjs/go/models.ts | 23 ++ internal/app/app.go | 11 + internal/app/db_proxy.go | 29 +++ internal/app/global_proxy.go | 2 +- internal/app/methods_db.go | 21 +- internal/app/methods_driver.go | 36 ++- internal/app/methods_redis.go | 47 +++- internal/connection/types.go | 72 +++--- internal/db/clickhouse_impl.go | 84 ++++++- internal/db/dameng_impl.go | 81 ++++++- internal/db/driver_agent_binary_check.go | 74 +++++++ internal/db/driver_support.go | 3 + internal/db/driver_support_test.go | 19 +- internal/db/kingbase_impl.go | 91 ++++++-- internal/db/kingbase_impl_test.go | 74 +++++++ internal/db/optional_driver_agent_impl.go | 29 +++ internal/db/query_value.go | 11 + internal/db/query_value_test.go | 30 +++ 27 files changed, 1162 insertions(+), 169 deletions(-) create mode 100644 .github/workflows/test-macos-build.yml create mode 100644 internal/db/driver_agent_binary_check.go create mode 100644 internal/db/kingbase_impl_test.go diff --git a/.github/workflows/test-macos-build.yml b/.github/workflows/test-macos-build.yml new file mode 100644 index 0000000..1dd01af --- /dev/null +++ b/.github/workflows/test-macos-build.yml @@ -0,0 +1,91 @@ +name: Test Build macOS (Manual) + +on: + workflow_dispatch: + inputs: + build_label: + description: "测试包标识(仅用于文件名)" + required: false + default: "test" + push: + branches: + - feature/kingbase_opt + paths: + - ".github/workflows/test-macos-build.yml" + +permissions: + contents: read + +jobs: + build-macos: + name: Build macOS ${{ matrix.arch }} + runs-on: macos-latest + strategy: + fail-fast: false + matrix: + include: + - platform: darwin/amd64 + arch: amd64 + - platform: darwin/arm64 + arch: arm64 + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: "1.24.3" + check-latest: true + + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: "20" + + - name: Install Wails + run: go install github.com/wailsapp/wails/v2/cmd/wails@v2.11.0 + + - name: Build App + run: | + set -euo pipefail + OUTPUT_NAME="gonavi-test-${{ matrix.arch }}" + BUILD_LABEL="${{ inputs.build_label }}" + if [ -z "$BUILD_LABEL" ]; then + BUILD_LABEL="test" + fi + APP_VERSION="${BUILD_LABEL}-${GITHUB_RUN_NUMBER}" + wails build \ + -platform "${{ matrix.platform }}" \ + -clean \ + -o "$OUTPUT_NAME" \ + -ldflags "-s -w -X GoNavi-Wails/internal/app.AppVersion=${APP_VERSION}" + + - name: Package Zip + run: | + set -euo pipefail + APP_PATH="build/bin/gonavi-test-${{ matrix.arch }}.app" + if [ ! -d "$APP_PATH" ]; then + APP_PATH=$(find build/bin -maxdepth 1 -name "*.app" | head -n 1 || true) + fi + if [ -z "$APP_PATH" ] || [ ! -d "$APP_PATH" ]; then + echo "未找到 .app 产物" + ls -la build/bin || true + exit 1 + fi + LABEL="${{ inputs.build_label }}" + if [ -z "$LABEL" ]; then + LABEL="test" + fi + ZIP_NAME="GoNavi-${LABEL}-macos-${{ matrix.arch }}-run${GITHUB_RUN_NUMBER}.zip" + mkdir -p artifacts + ditto -c -k --sequesterRsrc --keepParent "$APP_PATH" "artifacts/$ZIP_NAME" + shasum -a 256 "artifacts/$ZIP_NAME" > "artifacts/$ZIP_NAME.sha256" + + - name: Upload Artifact + uses: actions/upload-artifact@v4 + with: + name: gonavi-macos-${{ matrix.arch }}-run${{ github.run_number }} + path: artifacts/* + if-no-files-found: error diff --git a/frontend/package.json.md5 b/frontend/package.json.md5 index 0f8f4fe..a7661c0 100755 --- a/frontend/package.json.md5 +++ b/frontend/package.json.md5 @@ -1 +1 @@ -5b8157374dae5f9340e31b2d0bd2c00e \ No newline at end of file +d0f9366af59a6367ad3c7e2d4185ead4 \ No newline at end of file diff --git a/frontend/src/components/ConnectionModal.tsx b/frontend/src/components/ConnectionModal.tsx index 45ef1a8..85aa4c6 100644 --- a/frontend/src/components/ConnectionModal.tsx +++ b/frontend/src/components/ConnectionModal.tsx @@ -101,6 +101,7 @@ const ConnectionModal: React.FC<{ const [useSSL, setUseSSL] = useState(false); const [useSSH, setUseSSH] = useState(false); const [useProxy, setUseProxy] = useState(false); + const [useHttpTunnel, setUseHttpTunnel] = useState(false); const [dbType, setDbType] = useState('mysql'); const [step, setStep] = useState(1); // 1: Select Type, 2: Configure const [activeGroup, setActiveGroup] = useState(0); // Active category index in step 1 @@ -1026,6 +1027,8 @@ const ConnectionModal: React.FC<{ const mysqlIsReplica = String(config.topology || '').toLowerCase() === 'replica' || mysqlReplicaHosts.length > 0; const mongoIsReplica = String(config.topology || '').toLowerCase() === 'replica' || mongoHosts.length > 0 || !!config.replicaSet; const redisIsCluster = String(config.topology || '').toLowerCase() === 'cluster' || redisHosts.length > 0; + const hasHttpTunnel = !!config.useHttpTunnel; + const hasProxy = !hasHttpTunnel && !!config.useProxy; form.setFieldsValue({ type: configType, name: initialValues.name, @@ -1047,12 +1050,17 @@ const ConnectionModal: React.FC<{ sshUser: config.ssh?.user, sshPassword: config.ssh?.password, sshKeyPath: config.ssh?.keyPath, - useProxy: config.useProxy, + useProxy: hasProxy, proxyType: config.proxy?.type || 'socks5', proxyHost: config.proxy?.host, proxyPort: config.proxy?.port, proxyUser: config.proxy?.user, proxyPassword: config.proxy?.password, + useHttpTunnel: hasHttpTunnel, + httpTunnelHost: config.httpTunnel?.host, + httpTunnelPort: config.httpTunnel?.port || 8080, + httpTunnelUser: config.httpTunnel?.user, + httpTunnelPassword: config.httpTunnel?.password, driver: config.driver, dsn: config.dsn, timeout: config.timeout || 30, @@ -1076,7 +1084,8 @@ const ConnectionModal: React.FC<{ }); setUseSSL(!!config.useSSL); setUseSSH(config.useSSH || false); - setUseProxy(config.useProxy || false); + setUseProxy(hasProxy); + setUseHttpTunnel(hasHttpTunnel); setDbType(configType); // 如果是 Redis 编辑模式,设置已保存的 Redis 数据库列表 if (configType === 'redis') { @@ -1089,6 +1098,7 @@ const ConnectionModal: React.FC<{ setUseSSL(false); setUseSSH(false); setUseProxy(false); + setUseHttpTunnel(false); setDbType('mysql'); setActiveGroup(0); } @@ -1140,6 +1150,7 @@ const ConnectionModal: React.FC<{ setUseSSL(false); setUseSSH(false); setUseProxy(false); + setUseHttpTunnel(false); setDbType('mysql'); setStep(1); onClose(); @@ -1185,19 +1196,24 @@ const ConnectionModal: React.FC<{ ? await RedisConnect(config as any) : await TestConnection(config as any); - if (res.success) { - setTestResult({ type: 'success', message: res.message }); - if (isRedisType) { - setRedisDbList(Array.from({ length: 16 }, (_, i) => i)); - } else { - // Other databases: fetch database list - const dbRes = await DBGetDatabases(config as any); - if (dbRes.success) { - const dbs = (dbRes.data as any[]).map((row: any) => row.Database || row.database); - setDbList(dbs); - } - } - } else { + if (res.success) { + setTestResult({ type: 'success', message: res.message }); + if (isRedisType) { + setRedisDbList(Array.from({ length: 16 }, (_, i) => i)); + } else { + // Other databases: fetch database list + const dbRes = await DBGetDatabases(config as any); + if (dbRes.success) { + const dbRows = Array.isArray(dbRes.data) ? dbRes.data : []; + const dbs = dbRows + .map((row: any) => row?.Database || row?.database) + .filter((name: any) => typeof name === 'string' && name.trim() !== ''); + setDbList(dbs); + } else { + setDbList([]); + } + } + } else { const failMessage = buildTestFailureMessage( res?.message, '连接被拒绝或参数无效,请检查后重试' @@ -1388,7 +1404,8 @@ const ConnectionModal: React.FC<{ password: mergedValues.sshPassword || "", keyPath: mergedValues.sshKeyPath || "" } : { host: "", port: 22, user: "", password: "", keyPath: "" }; - const effectiveUseProxy = !isFileDbType && !!mergedValues.useProxy; + const effectiveUseHttpTunnel = !isFileDbType && !!mergedValues.useHttpTunnel; + const effectiveUseProxy = !isFileDbType && !!mergedValues.useProxy && !effectiveUseHttpTunnel; const proxyTypeRaw = String(mergedValues.proxyType || 'socks5').toLowerCase(); const proxyType: 'socks5' | 'http' = proxyTypeRaw === 'http' ? 'http' : 'socks5'; const proxyConfig: NonNullable = effectiveUseProxy ? { @@ -1404,6 +1421,25 @@ const ConnectionModal: React.FC<{ user: '', password: '', }; + const httpTunnelConfig: NonNullable = effectiveUseHttpTunnel ? { + host: String(mergedValues.httpTunnelHost || '').trim(), + port: Number(mergedValues.httpTunnelPort || 8080), + user: String(mergedValues.httpTunnelUser || '').trim(), + password: mergedValues.httpTunnelPassword || "", + } : { + host: '', + port: 8080, + user: '', + password: '', + }; + if (effectiveUseHttpTunnel) { + if (!httpTunnelConfig.host) { + throw new Error('HTTP 隧道主机不能为空'); + } + if (!Number.isFinite(httpTunnelConfig.port) || httpTunnelConfig.port <= 0 || httpTunnelConfig.port > 65535) { + throw new Error('HTTP 隧道端口必须在 1-65535 之间'); + } + } const keepPassword = !forPersist || savePassword; @@ -1423,6 +1459,8 @@ const ConnectionModal: React.FC<{ ssh: sshConfig, useProxy: effectiveUseProxy, proxy: proxyConfig, + useHttpTunnel: effectiveUseHttpTunnel, + httpTunnel: httpTunnelConfig, driver: mergedValues.driver, dsn: mergedValues.dsn, timeout: Number(mergedValues.timeout || 30), @@ -1461,6 +1499,7 @@ const ConnectionModal: React.FC<{ setUseSSL(false); setUseSSH(false); setUseProxy(false); + setUseHttpTunnel(false); form.setFieldsValue({ host: '', port: 0, @@ -1483,6 +1522,11 @@ const ConnectionModal: React.FC<{ proxyPort: 1080, proxyUser: '', proxyPassword: '', + useHttpTunnel: false, + httpTunnelHost: '', + httpTunnelPort: 8080, + httpTunnelUser: '', + httpTunnelPassword: '', mysqlTopology: 'single', redisTopology: 'single', mongoTopology: 'single', @@ -1505,6 +1549,7 @@ const ConnectionModal: React.FC<{ const defaultUser = type === 'clickhouse' ? 'default' : 'root'; const sslCapableType = supportsSSLForType(type); setUseSSL(false); + setUseHttpTunnel(false); form.setFieldsValue({ user: defaultUser, database: '', @@ -1513,6 +1558,11 @@ const ConnectionModal: React.FC<{ sslMode: sslCapableType ? 'preferred' : undefined, sslCertPath: sslCapableType ? '' : undefined, sslKeyPath: sslCapableType ? '' : undefined, + useHttpTunnel: false, + httpTunnelHost: '', + httpTunnelPort: 8080, + httpTunnelUser: '', + httpTunnelPassword: '', mysqlTopology: 'single', redisTopology: 'single', mongoTopology: 'single', @@ -1665,6 +1715,8 @@ const ConnectionModal: React.FC<{ useProxy: false, proxyType: 'socks5', proxyPort: 1080, + useHttpTunnel: false, + httpTunnelPort: 8080, timeout: 30, uri: '', mysqlTopology: 'single', @@ -1693,7 +1745,14 @@ const ConnectionModal: React.FC<{ } if (changed.useSSL !== undefined) setUseSSL(changed.useSSL); if (changed.useSSH !== undefined) setUseSSH(changed.useSSH); - if (changed.useProxy !== undefined) setUseProxy(changed.useProxy); + if (changed.useProxy !== undefined) { + const enabledProxy = !!changed.useProxy; + setUseProxy(enabledProxy); + if (enabledProxy && form.getFieldValue('useHttpTunnel')) { + form.setFieldValue('useHttpTunnel', false); + setUseHttpTunnel(false); + } + } if (changed.proxyType !== undefined) { const nextType = String(changed.proxyType || 'socks5').toLowerCase(); if (nextType === 'http') { @@ -1708,6 +1767,20 @@ const ConnectionModal: React.FC<{ } } } + if (changed.useHttpTunnel !== undefined) { + const enabledHttpTunnel = !!changed.useHttpTunnel; + setUseHttpTunnel(enabledHttpTunnel); + if (enabledHttpTunnel && form.getFieldValue('useProxy')) { + form.setFieldValue('useProxy', false); + setUseProxy(false); + } + if (enabledHttpTunnel) { + const currentPort = Number(form.getFieldValue('httpTunnelPort') || 0); + if (!currentPort || currentPort <= 0) { + form.setFieldValue('httpTunnelPort', 8080); + } + } + } // Type change handled by step 1, but keep sync if select changes (hidden now) if (changed.type !== undefined) setDbType(changed.type); if (changed.redisTopology !== undefined) { @@ -2194,6 +2267,35 @@ const ConnectionModal: React.FC<{
)} + + + 使用 HTTP 隧道(独立代理) + + + {useHttpTunnel && ( +
+
+ + + + + + +
+
+ + + + + + +
+ + 与“使用代理”互斥,启用后将通过 HTTP CONNECT 建立独立隧道。 + +
+ )} + { try { if (val === null) return NULL; if (typeof val === 'object') { + if (!Array.isArray(val) && !isPlainObject(val)) { + return String(val); + } const cached = objectCellPreviewCache.get(val); if (cached !== undefined) { return cached; } + const topLevelSize = Array.isArray(val) ? val.length : Object.keys(val || {}).length; + if (topLevelSize > 80) { + const summary = Array.isArray(val) ? `[Array(${topLevelSize})]` : `{Object(${topLevelSize})}`; + objectCellPreviewCache.set(val, summary); + return summary; + } try { const nextText = JSON.stringify(val); const previewText = nextText.length > TABLE_CELL_PREVIEW_MAX_CHARS ? `${nextText.slice(0, TABLE_CELL_PREVIEW_MAX_CHARS)}…` : nextText; @@ -191,6 +200,26 @@ const isCellValueEqualForDiff = (left: any, right: any): boolean => { return toFormText(left) === toFormText(right); }; +// 渲染阶段轻量比较:避免对象值在 shouldCellUpdate 中反复深度序列化导致卡顿。 +const isCellValueEqualForRender = (left: any, right: any): boolean => { + if (left === right) return true; + const leftNullish = left === null || left === undefined; + const rightNullish = right === null || right === undefined; + if (leftNullish || rightNullish) return leftNullish && rightNullish; + + const leftType = typeof left; + const rightType = typeof right; + if (leftType === 'object' || rightType === 'object') { + // 对象仅按引用比较;真正的值差异在提交保存时再做严格比对。 + return false; + } + + if (leftType === 'string' || rightType === 'string') { + return normalizeDateTimeString(String(left)) === normalizeDateTimeString(String(right)); + } + return left === right; +}; + const INLINE_EDIT_MAX_CHARS = 2000; const shouldOpenModalEditor = (val: any): boolean => { @@ -2067,7 +2096,7 @@ const DataGrid: React.FC = ({ shouldCellUpdate: (record: Item, prevRecord: Item) => { const rowKeyChanged = record?.[GONAVI_ROW_KEY] !== prevRecord?.[GONAVI_ROW_KEY]; if (rowKeyChanged) return true; - return !isCellValueEqualForDiff(record?.[key], prevRecord?.[key]); + return !isCellValueEqualForRender(record?.[key], prevRecord?.[key]); }, onHeaderCell: (column: any) => ({ width: column.width, diff --git a/frontend/src/components/DataSyncModal.tsx b/frontend/src/components/DataSyncModal.tsx index 769885a..57c4033 100644 --- a/frontend/src/components/DataSyncModal.tsx +++ b/frontend/src/components/DataSyncModal.tsx @@ -1,4 +1,4 @@ -import React, { useState, useEffect, useRef } from 'react'; +import React, { useState, useEffect, useMemo, useRef } from 'react'; import { Modal, Form, Select, Button, message, Steps, Transfer, Card, Alert, Divider, Typography, Progress, Checkbox, Table, Drawer, Tabs } from 'antd'; import { useStore } from '../store'; import { DBGetDatabases, DBGetTables, DataSync, DataSyncAnalyze, DataSyncPreview } from '../../wailsjs/go/app/App'; @@ -31,6 +31,118 @@ type TableOps = { selectedDeletePks?: string[]; }; +const quoteSqlIdent = (dbType: string, ident: string): string => { + const raw = String(ident || '').trim(); + if (!raw) return raw; + const t = String(dbType || '').toLowerCase(); + if (t === 'mysql' || t === 'mariadb' || t === 'diros' || t === 'sphinx' || t === 'clickhouse' || t === 'tdengine') { + return `\`${raw.replace(/`/g, '``')}\``; + } + if (t === 'sqlserver') { + return `[${raw.replace(/]/g, ']]')}]`; + } + return `"${raw.replace(/"/g, '""')}"`; +}; + +const quoteSqlTable = (dbType: string, tableName: string): string => { + const raw = String(tableName || '').trim(); + if (!raw) return raw; + if (!raw.includes('.')) return quoteSqlIdent(dbType, raw); + return raw + .split('.') + .map((part) => quoteSqlIdent(dbType, part)) + .join('.'); +}; + +const toSqlLiteral = (value: any, dbType: string): string => { + if (value === null || value === undefined) return 'NULL'; + if (typeof value === 'number') return Number.isFinite(value) ? String(value) : 'NULL'; + if (typeof value === 'bigint') return value.toString(); + if (typeof value === 'boolean') { + const t = String(dbType || '').toLowerCase(); + if (t === 'sqlserver') return value ? '1' : '0'; + return value ? 'TRUE' : 'FALSE'; + } + if (value instanceof Date) { + return `'${value.toISOString().replace(/'/g, "''")}'`; + } + if (typeof value === 'object') { + try { + return `'${JSON.stringify(value).replace(/'/g, "''")}'`; + } catch { + return `'${String(value).replace(/'/g, "''")}'`; + } + } + return `'${String(value).replace(/'/g, "''")}'`; +}; + +const buildSqlPreview = ( + previewData: any, + tableName: string, + dbType: string, + ops?: TableOps, +): { sqlText: string; statementCount: number } => { + if (!previewData || !tableName) return { sqlText: '', statementCount: 0 }; + const tableExpr = quoteSqlTable(dbType, tableName); + const pkCol = String(previewData.pkColumn || 'id'); + const statements: string[] = []; + + const insertRows = Array.isArray(previewData.inserts) ? previewData.inserts : []; + const updateRows = Array.isArray(previewData.updates) ? previewData.updates : []; + const deleteRows = Array.isArray(previewData.deletes) ? previewData.deletes : []; + + const selectedInsert = new Set((ops?.selectedInsertPks || []).map((v) => String(v))); + const selectedUpdate = new Set((ops?.selectedUpdatePks || []).map((v) => String(v))); + const selectedDelete = new Set((ops?.selectedDeletePks || []).map((v) => String(v))); + + if (ops?.insert !== false) { + insertRows.forEach((rowWrap: any) => { + const pk = String(rowWrap?.pk ?? ''); + if (selectedInsert.size > 0 && !selectedInsert.has(pk)) return; + const row = rowWrap?.row || {}; + const columns = Object.keys(row); + if (columns.length === 0) return; + const colExpr = columns.map((c) => quoteSqlIdent(dbType, c)).join(', '); + const valExpr = columns.map((c) => toSqlLiteral(row[c], dbType)).join(', '); + statements.push(`INSERT INTO ${tableExpr} (${colExpr}) VALUES (${valExpr});`); + }); + } + + if (ops?.update !== false) { + updateRows.forEach((rowWrap: any) => { + const pk = String(rowWrap?.pk ?? ''); + if (selectedUpdate.size > 0 && !selectedUpdate.has(pk)) return; + const source = rowWrap?.source || {}; + const changedColumns = Array.isArray(rowWrap?.changedColumns) + ? rowWrap.changedColumns + : Object.keys(source).filter((k) => k !== pkCol); + const setCols = changedColumns.filter((c: string) => String(c) !== pkCol); + if (setCols.length === 0) return; + const setExpr = setCols + .map((c: string) => `${quoteSqlIdent(dbType, c)} = ${toSqlLiteral(source[c], dbType)}`) + .join(', '); + statements.push( + `UPDATE ${tableExpr} SET ${setExpr} WHERE ${quoteSqlIdent(dbType, pkCol)} = ${toSqlLiteral(pk, dbType)};`, + ); + }); + } + + if (ops?.delete) { + deleteRows.forEach((rowWrap: any) => { + const pk = String(rowWrap?.pk ?? ''); + if (selectedDelete.size > 0 && !selectedDelete.has(pk)) return; + statements.push( + `DELETE FROM ${tableExpr} WHERE ${quoteSqlIdent(dbType, pkCol)} = ${toSqlLiteral(pk, dbType)};`, + ); + }); + } + + return { + sqlText: statements.join('\n'), + statementCount: statements.length, + }; +}; + const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, onClose }) => { const connections = useStore((state) => state.connections); const [currentStep, setCurrentStep] = useState(0); @@ -152,32 +264,38 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, setSourceConnId(connId); setSourceDb(''); const conn = connections.find(c => c.id === connId); - if (conn) { - setLoading(true); - try { - const res = await DBGetDatabases(normalizeConnConfig(conn) as any); - if (res.success) { - setSourceDbs((res.data as any[]).map((r: any) => r.Database || r.database || r.username)); - } - } catch(e) { message.error("Failed to fetch source databases"); } - setLoading(false); - } + if (conn) { + setLoading(true); + try { + const res = await DBGetDatabases(normalizeConnConfig(conn) as any); + if (res.success) { + const dbRows = Array.isArray(res.data) ? res.data : []; + setSourceDbs(dbRows + .map((r: any) => r?.Database || r?.database || r?.username) + .filter((name: any) => typeof name === 'string' && name.trim() !== '')); + } + } catch(e) { message.error("Failed to fetch source databases"); } + setLoading(false); + } }; const handleTargetConnChange = async (connId: string) => { setTargetConnId(connId); setTargetDb(''); const conn = connections.find(c => c.id === connId); - if (conn) { - setLoading(true); - try { - const res = await DBGetDatabases(normalizeConnConfig(conn) as any); - if (res.success) { - setTargetDbs((res.data as any[]).map((r: any) => r.Database || r.database || r.username)); - } - } catch(e) { message.error("Failed to fetch target databases"); } - setLoading(false); - } + if (conn) { + setLoading(true); + try { + const res = await DBGetDatabases(normalizeConnConfig(conn) as any); + if (res.success) { + const dbRows = Array.isArray(res.data) ? res.data : []; + setTargetDbs(dbRows + .map((r: any) => r?.Database || r?.database || r?.username) + .filter((name: any) => typeof name === 'string' && name.trim() !== '')); + } + } catch(e) { message.error("Failed to fetch target databases"); } + setLoading(false); + } }; const nextToTables = async () => { @@ -189,14 +307,17 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, try { const conn = connections.find(c => c.id === sourceConnId); if (conn) { - const config = normalizeConnConfig(conn, sourceDb); - const res = await DBGetTables(config as any, sourceDb); - if (res.success) { - // DBGetTables returns [{Table: "name"}, ...] - const tables = (res.data as any[]).map((row: any) => row.Table || row.table || row.TABLE_NAME || Object.values(row)[0]); - setAllTables(tables as string[]); - setCurrentStep(1); - } else { + const config = normalizeConnConfig(conn, sourceDb); + const res = await DBGetTables(config as any, sourceDb); + if (res.success) { + // DBGetTables returns [{Table: "name"}, ...] + const tableRows = Array.isArray(res.data) ? res.data : []; + const tables = tableRows + .map((row: any) => row?.Table || row?.table || row?.TABLE_NAME || Object.values(row || {})[0]) + .filter((name: any) => typeof name === 'string' && name.trim() !== ''); + setAllTables(tables as string[]); + setCurrentStep(1); + } else { message.error(res.message); } } @@ -402,6 +523,13 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, ); }; + const previewSql = useMemo(() => { + if (!previewData || !previewTable) return { sqlText: '', statementCount: 0 }; + const targetType = String(connections.find(c => c.id === targetConnId)?.config?.type || ''); + const ops = tableOptions[previewTable] || { insert: true, update: true, delete: false }; + return buildSqlPreview(previewData, previewTable, targetType, ops); + }, [previewData, previewTable, targetConnId, connections, tableOptions]); + return ( <> void }> = ({ open, />
) + }, + { + key: 'sql', + label: `SQL(${previewSql.statementCount})`, + children: ( +
+ +
+ 共 {previewSql.statementCount} 条语句(预览数据最多 200 条/类型) + +
+
+                                        {previewSql.sqlText || '-- 当前勾选范围下无 SQL 可预览'}
+                                    
+
+ ) } ]} /> diff --git a/frontend/src/components/QueryEditor.tsx b/frontend/src/components/QueryEditor.tsx index 2e66344..69294d1 100644 --- a/frontend/src/components/QueryEditor.tsx +++ b/frontend/src/components/QueryEditor.tsx @@ -48,6 +48,7 @@ const QueryEditor: React.FC<{ tab: TabData }> = ({ tab }) => { const [editorHeight, setEditorHeight] = useState(300); const editorRef = useRef(null); const monacoRef = useRef(null); + const lastExternalQueryRef = useRef(tab.query || ''); const dragRef = useRef<{ startY: number, startHeight: number } | null>(null); const tablesRef = useRef<{dbName: string, tableName: string}[]>([]); // Store tables for autocomplete (cross-db) const allColumnsRef = useRef<{dbName: string, tableName: string, name: string, type: string}[]>([]); // Store all columns (cross-db) @@ -95,10 +96,30 @@ const QueryEditor: React.FC<{ tab: TabData }> = ({ tab }) => { connectionsRef.current = connections; }, [connections]); + const getCurrentQuery = () => { + const val = editorRef.current?.getValue?.(); + if (typeof val === 'string') return val; + return query || ''; + }; + + const syncQueryToEditor = (sql: string) => { + const next = sql || ''; + setQuery(next); + const editor = editorRef.current; + if (editor && editor.getValue?.() !== next) { + editor.setValue(next); + } + }; + // If opening a saved query, load its SQL useEffect(() => { - if (tab.query) setQuery(tab.query); - }, [tab.query]); + const incoming = tab.query || ''; + if (incoming === lastExternalQueryRef.current) { + return; + } + lastExternalQueryRef.current = incoming; + syncQueryToEditor(incoming || 'SELECT * FROM '); + }, [tab.id, tab.query]); // Fetch Database List useEffect(() => { @@ -557,8 +578,8 @@ const QueryEditor: React.FC<{ tab: TabData }> = ({ tab }) => { const handleFormat = () => { try { - const formatted = format(query, { language: 'mysql', keywordCase: sqlFormatOptions.keywordCase }); - setQuery(formatted); + const formatted = format(getCurrentQuery(), { language: 'mysql', keywordCase: sqlFormatOptions.keywordCase }); + syncQueryToEditor(formatted); } catch (e) { message.error("格式化失败: SQL 语法可能有误"); } @@ -1045,7 +1066,8 @@ const QueryEditor: React.FC<{ tab: TabData }> = ({ tab }) => { }; const handleRun = async () => { - if (!query.trim()) return; + const currentQuery = getCurrentQuery(); + if (!currentQuery.trim()) return; if (!currentDb) { message.error("请先选择数据库"); return; @@ -1086,7 +1108,7 @@ const QueryEditor: React.FC<{ tab: TabData }> = ({ tab }) => { }; try { - const rawSQL = getSelectedSQL() || query; + const rawSQL = getSelectedSQL() || currentQuery; const dbType = String((config as any).type || 'mysql'); const normalizedDbType = dbType.trim().toLowerCase(); const normalizedRawSQL = String(rawSQL || '').replace(/;/g, ';'); @@ -1367,7 +1389,7 @@ const QueryEditor: React.FC<{ tab: TabData }> = ({ tab }) => { saveQuery({ id: tab.id.startsWith('saved-') ? tab.id : `saved-${Date.now()}`, name: values.name, - sql: query, + sql: getCurrentQuery(), connectionId: currentConnectionId, dbName: currentDb || tab.dbName || '', createdAt: Date.now() @@ -1512,7 +1534,7 @@ const QueryEditor: React.FC<{ tab: TabData }> = ({ tab }) => { height="100%" defaultLanguage="sql" theme={darkMode ? "transparent-dark" : "transparent-light"} - value={query} + defaultValue={query} onChange={(val) => setQuery(val || '')} onMount={handleEditorDidMount} options={{ diff --git a/frontend/src/components/Sidebar.tsx b/frontend/src/components/Sidebar.tsx index 6420955..2fa3fdd 100644 --- a/frontend/src/components/Sidebar.tsx +++ b/frontend/src/components/Sidebar.tsx @@ -382,6 +382,16 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }> password: readString(rawProxy.password, rawProxy.Password, cloned.proxyPassword, cloned.ProxyPassword), }; const hasProxyDetail = Boolean(normalizedProxy.host || normalizedProxy.user || normalizedProxy.password); + const rawHttpTunnel = (cloned.httpTunnel ?? cloned.HTTPTunnel ?? {}) as Record; + const normalizedHttpTunnel = { + host: readString(rawHttpTunnel.host, rawHttpTunnel.Host, cloned.httpTunnelHost, cloned.HttpTunnelHost), + port: readNumber(8080, rawHttpTunnel.port, rawHttpTunnel.Port, cloned.httpTunnelPort, cloned.HttpTunnelPort), + user: readString(rawHttpTunnel.user, rawHttpTunnel.User, cloned.httpTunnelUser, cloned.HttpTunnelUser), + password: readString(rawHttpTunnel.password, rawHttpTunnel.Password, cloned.httpTunnelPassword, cloned.HttpTunnelPassword), + }; + const hasHttpTunnelDetail = Boolean(normalizedHttpTunnel.host || normalizedHttpTunnel.user || normalizedHttpTunnel.password); + const normalizedUseHttpTunnel = readBool(hasHttpTunnelDetail, cloned.useHttpTunnel, cloned.UseHTTPTunnel); + const normalizedUseProxy = !normalizedUseHttpTunnel && readBool(hasProxyDetail, cloned.useProxy, cloned.UseProxy); const rawHosts = Array.isArray(cloned.hosts) ? cloned.hosts @@ -394,8 +404,10 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }> ...(cloned as SavedConnection['config']), useSSH: readBool(hasSSHDetail, cloned.useSSH, cloned.UseSSH), ssh: normalizedSSH, - useProxy: readBool(hasProxyDetail, cloned.useProxy, cloned.UseProxy), + useProxy: normalizedUseProxy, proxy: normalizedProxy, + useHttpTunnel: normalizedUseHttpTunnel, + httpTunnel: normalizedHttpTunnel, hosts: normalizedHosts, timeout: readNumber(30, cloned.timeout, cloned.Timeout), }; @@ -645,10 +657,15 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }> } case 'oracle': case 'dm': - if (!safeDbName) { - return [{ sql: `SELECT VIEW_NAME AS view_name FROM USER_VIEWS ORDER BY VIEW_NAME` }]; - } - return [{ sql: `SELECT OWNER AS schema_name, VIEW_NAME AS view_name FROM ALL_VIEWS WHERE OWNER = '${safeDbName.toUpperCase()}' ORDER BY VIEW_NAME` }]; + return normalizeMetadataQuerySpecs([ + { sql: `SELECT VIEW_NAME AS view_name FROM USER_VIEWS ORDER BY VIEW_NAME` }, + { sql: `SELECT OWNER AS schema_name, VIEW_NAME AS view_name FROM ALL_VIEWS WHERE OWNER = USER ORDER BY VIEW_NAME` }, + { + sql: safeDbName + ? `SELECT OWNER AS schema_name, VIEW_NAME AS view_name FROM ALL_VIEWS WHERE OWNER = '${safeDbName.toUpperCase()}' ORDER BY VIEW_NAME` + : '', + }, + ]); case 'sqlite': return [{ sql: `SELECT name AS view_name FROM sqlite_master WHERE type = 'view' ORDER BY name` }]; case 'duckdb': @@ -731,10 +748,15 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }> } case 'oracle': case 'dm': - if (!safeDbName) { - return [{ sql: `SELECT OBJECT_NAME AS routine_name, OBJECT_TYPE AS routine_type FROM USER_OBJECTS WHERE OBJECT_TYPE IN ('FUNCTION','PROCEDURE') ORDER BY OBJECT_TYPE, OBJECT_NAME` }]; - } - return [{ sql: `SELECT OWNER AS schema_name, OBJECT_NAME AS routine_name, OBJECT_TYPE AS routine_type FROM ALL_OBJECTS WHERE OWNER = '${safeDbName.toUpperCase()}' AND OBJECT_TYPE IN ('FUNCTION','PROCEDURE') ORDER BY OBJECT_TYPE, OBJECT_NAME` }]; + return normalizeMetadataQuerySpecs([ + { sql: `SELECT OBJECT_NAME AS routine_name, OBJECT_TYPE AS routine_type FROM USER_OBJECTS WHERE OBJECT_TYPE IN ('FUNCTION','PROCEDURE') ORDER BY OBJECT_TYPE, OBJECT_NAME` }, + { sql: `SELECT OWNER AS schema_name, OBJECT_NAME AS routine_name, OBJECT_TYPE AS routine_type FROM ALL_OBJECTS WHERE OWNER = USER AND OBJECT_TYPE IN ('FUNCTION','PROCEDURE') ORDER BY OBJECT_TYPE, OBJECT_NAME` }, + { + sql: safeDbName + ? `SELECT OWNER AS schema_name, OBJECT_NAME AS routine_name, OBJECT_TYPE AS routine_type FROM ALL_OBJECTS WHERE OWNER = '${safeDbName.toUpperCase()}' AND OBJECT_TYPE IN ('FUNCTION','PROCEDURE') ORDER BY OBJECT_TYPE, OBJECT_NAME` + : '', + }, + ]); case 'duckdb': return [{ sql: `SELECT schema_name, function_name AS routine_name, 'FUNCTION' AS routine_type FROM duckdb_functions() WHERE internal = false AND lower(function_type) = 'macro' AND COALESCE(macro_definition, '') <> '' ORDER BY schema_name, function_name`, diff --git a/frontend/src/store.ts b/frontend/src/store.ts index 42f3fb6..e3b44f5 100644 --- a/frontend/src/store.ts +++ b/frontend/src/store.ts @@ -231,6 +231,18 @@ const sanitizeConnectionConfig = (value: unknown): ConnectionConfig => { user: toTrimmedString(proxyRaw.user), password: toTrimmedString(proxyRaw.password), }; + const httpTunnelRaw = (raw.httpTunnel && typeof raw.httpTunnel === 'object') + ? raw.httpTunnel as Record + : ((raw.HTTPTunnel && typeof raw.HTTPTunnel === 'object') ? raw.HTTPTunnel as Record : {}); + const httpTunnel = { + host: toTrimmedString(httpTunnelRaw.host ?? raw.httpTunnelHost), + port: normalizePort(httpTunnelRaw.port ?? raw.httpTunnelPort, 8080), + user: toTrimmedString(httpTunnelRaw.user ?? raw.httpTunnelUser), + password: toTrimmedString(httpTunnelRaw.password ?? raw.httpTunnelPassword), + }; + const supportsNetworkTunnel = type !== 'sqlite' && type !== 'duckdb'; + const useHttpTunnel = supportsNetworkTunnel && (raw.useHttpTunnel === true || raw.UseHTTPTunnel === true); + const useProxy = supportsNetworkTunnel && !!raw.useProxy && !useHttpTunnel; const safeConfig: ConnectionConfig & Record = { ...raw, @@ -247,8 +259,10 @@ const sanitizeConnectionConfig = (value: unknown): ConnectionConfig => { sslKeyPath: sslCapable ? toTrimmedString(raw.sslKeyPath) : '', useSSH: !!raw.useSSH, ssh, - useProxy: !!raw.useProxy, + useProxy, proxy, + useHttpTunnel, + httpTunnel, uri: toTrimmedString(raw.uri).slice(0, MAX_URI_LENGTH), hosts: sanitizeAddressList(raw.hosts), topology: raw.topology === 'replica' ? 'replica' : (raw.topology === 'cluster' ? 'cluster' : 'single'), diff --git a/frontend/src/types.ts b/frontend/src/types.ts index 501a854..96ac6da 100644 --- a/frontend/src/types.ts +++ b/frontend/src/types.ts @@ -14,6 +14,13 @@ export interface ProxyConfig { password?: string; } +export interface HTTPTunnelConfig { + host: string; + port: number; + user?: string; + password?: string; +} + export interface ConnectionConfig { type: string; host: string; @@ -30,6 +37,8 @@ export interface ConnectionConfig { ssh?: SSHConfig; useProxy?: boolean; proxy?: ProxyConfig; + useHttpTunnel?: boolean; + httpTunnel?: HTTPTunnelConfig; driver?: string; dsn?: string; timeout?: number; diff --git a/frontend/wailsjs/go/models.ts b/frontend/wailsjs/go/models.ts index bca7b39..2de678a 100755 --- a/frontend/wailsjs/go/models.ts +++ b/frontend/wailsjs/go/models.ts @@ -48,6 +48,24 @@ export namespace connection { return a; } } + export class HTTPTunnelConfig { + host: string; + port: number; + user?: string; + password?: string; + + static createFrom(source: any = {}) { + return new HTTPTunnelConfig(source); + } + + constructor(source: any = {}) { + if ('string' === typeof source) source = JSON.parse(source); + this.host = source["host"]; + this.port = source["port"]; + this.user = source["user"]; + this.password = source["password"]; + } + } export class ProxyConfig { type: string; host: string; @@ -104,6 +122,8 @@ export namespace connection { ssh: SSHConfig; useProxy?: boolean; proxy?: ProxyConfig; + useHttpTunnel?: boolean; + httpTunnel?: HTTPTunnelConfig; driver?: string; dsn?: string; timeout?: number; @@ -142,6 +162,8 @@ export namespace connection { this.ssh = this.convertValues(source["ssh"], SSHConfig); this.useProxy = source["useProxy"]; this.proxy = this.convertValues(source["proxy"], ProxyConfig); + this.useHttpTunnel = source["useHttpTunnel"]; + this.httpTunnel = this.convertValues(source["httpTunnel"], HTTPTunnelConfig); this.driver = source["driver"]; this.dsn = source["dsn"]; this.timeout = source["timeout"]; @@ -179,6 +201,7 @@ export namespace connection { } } + export class QueryResult { success: boolean; message: string; diff --git a/internal/app/app.go b/internal/app/app.go index 789f7be..0709a27 100644 --- a/internal/app/app.go +++ b/internal/app/app.go @@ -96,6 +96,9 @@ func normalizeCacheKeyConfig(config connection.ConnectionConfig) connection.Conn if !normalized.UseProxy { normalized.Proxy = connection.ProxyConfig{} } + if !normalized.UseHTTPTunnel { + normalized.HTTPTunnel = connection.HTTPTunnelConfig{} + } if isFileDatabaseType(normalized.Type) { dsn := strings.TrimSpace(normalized.Host) @@ -124,6 +127,8 @@ func normalizeCacheKeyConfig(config connection.ConnectionConfig) connection.Conn normalized.MongoAuthMechanism = "" normalized.MongoReplicaUser = "" normalized.MongoReplicaPassword = "" + normalized.UseHTTPTunnel = false + normalized.HTTPTunnel = connection.HTTPTunnelConfig{} } return normalized @@ -303,6 +308,12 @@ func formatConnSummary(config connection.ConnectionConfig) string { b.WriteString(" 代理认证=已配置") } } + if config.UseHTTPTunnel { + b.WriteString(fmt.Sprintf(" HTTP隧道=%s:%d", strings.TrimSpace(config.HTTPTunnel.Host), config.HTTPTunnel.Port)) + if strings.TrimSpace(config.HTTPTunnel.User) != "" { + b.WriteString(" HTTP隧道认证=已配置") + } + } if config.Type == "custom" { driver := strings.TrimSpace(config.Driver) diff --git a/internal/app/db_proxy.go b/internal/app/db_proxy.go index bdf2311..e3228b6 100644 --- a/internal/app/db_proxy.go +++ b/internal/app/db_proxy.go @@ -12,8 +12,35 @@ import ( func resolveDialConfigWithProxy(raw connection.ConnectionConfig) (connection.ConnectionConfig, error) { config := raw + if config.UseHTTPTunnel { + if config.UseProxy { + return connection.ConnectionConfig{}, fmt.Errorf("HTTP 隧道与普通代理不能同时启用") + } + tunnelHost := strings.TrimSpace(config.HTTPTunnel.Host) + if tunnelHost == "" { + return connection.ConnectionConfig{}, fmt.Errorf("HTTP 隧道主机不能为空") + } + tunnelPort := config.HTTPTunnel.Port + if tunnelPort <= 0 { + tunnelPort = 8080 + } + if tunnelPort > 65535 { + return connection.ConnectionConfig{}, fmt.Errorf("HTTP 隧道端口无效:%d", config.HTTPTunnel.Port) + } + + config.UseProxy = true + config.Proxy = connection.ProxyConfig{ + Type: "http", + Host: tunnelHost, + Port: tunnelPort, + User: strings.TrimSpace(config.HTTPTunnel.User), + Password: config.HTTPTunnel.Password, + } + } if !config.UseProxy { config.Proxy = connection.ProxyConfig{} + config.UseHTTPTunnel = false + config.HTTPTunnel = connection.HTTPTunnelConfig{} return config, nil } @@ -22,6 +49,8 @@ func resolveDialConfigWithProxy(raw connection.ConnectionConfig) (connection.Con return connection.ConnectionConfig{}, err } config.Proxy = normalizedProxy + config.UseHTTPTunnel = false + config.HTTPTunnel = connection.HTTPTunnelConfig{} if config.UseSSH { sshPort := config.SSH.Port diff --git a/internal/app/global_proxy.go b/internal/app/global_proxy.go index 4dc8686..4361782 100644 --- a/internal/app/global_proxy.go +++ b/internal/app/global_proxy.go @@ -110,7 +110,7 @@ func (a *App) GetGlobalProxyConfig() connection.QueryResult { func applyGlobalProxyToConnection(config connection.ConnectionConfig) connection.ConnectionConfig { effective := config - if effective.UseProxy { + if effective.UseProxy || effective.UseHTTPTunnel { return effective } if isFileDatabaseType(effective.Type) { diff --git a/internal/app/methods_db.go b/internal/app/methods_db.go index d1ef4a9..d8529a9 100644 --- a/internal/app/methods_db.go +++ b/internal/app/methods_db.go @@ -547,6 +547,13 @@ func sqlSnippet(query string) string { return q[:max] + "..." } +func ensureNonNilSlice[T any](items []T) []T { + if items == nil { + return make([]T, 0) + } + return items +} + func (a *App) DBGetDatabases(config connection.ConnectionConfig) connection.QueryResult { runConfig := normalizeRunConfig(config, "") dbInst, err := a.getDatabase(runConfig) @@ -571,7 +578,7 @@ func (a *App) DBGetDatabases(config connection.ConnectionConfig) connection.Quer return connection.QueryResult{Success: false, Message: err.Error()} } - var resData []map[string]string + resData := make([]map[string]string, 0, len(dbs)) for _, name := range dbs { resData = append(resData, map[string]string{"Database": name}) } @@ -604,7 +611,7 @@ func (a *App) DBGetTables(config connection.ConnectionConfig, dbName string) con return connection.QueryResult{Success: false, Message: err.Error()} } - var resData []map[string]string + resData := make([]map[string]string, 0, len(tables)) for _, name := range tables { resData = append(resData, map[string]string{"Table": name}) } @@ -786,7 +793,7 @@ func (a *App) DBGetColumns(config connection.ConnectionConfig, dbName string, ta return connection.QueryResult{Success: false, Message: err.Error()} } - return connection.QueryResult{Success: true, Data: columns} + return connection.QueryResult{Success: true, Data: ensureNonNilSlice(columns)} } func (a *App) DBGetIndexes(config connection.ConnectionConfig, dbName string, tableName string) connection.QueryResult { @@ -803,7 +810,7 @@ func (a *App) DBGetIndexes(config connection.ConnectionConfig, dbName string, ta return connection.QueryResult{Success: false, Message: err.Error()} } - return connection.QueryResult{Success: true, Data: indexes} + return connection.QueryResult{Success: true, Data: ensureNonNilSlice(indexes)} } func (a *App) DBGetForeignKeys(config connection.ConnectionConfig, dbName string, tableName string) connection.QueryResult { @@ -820,7 +827,7 @@ func (a *App) DBGetForeignKeys(config connection.ConnectionConfig, dbName string return connection.QueryResult{Success: false, Message: err.Error()} } - return connection.QueryResult{Success: true, Data: fks} + return connection.QueryResult{Success: true, Data: ensureNonNilSlice(fks)} } func (a *App) DBGetTriggers(config connection.ConnectionConfig, dbName string, tableName string) connection.QueryResult { @@ -837,7 +844,7 @@ func (a *App) DBGetTriggers(config connection.ConnectionConfig, dbName string, t return connection.QueryResult{Success: false, Message: err.Error()} } - return connection.QueryResult{Success: true, Data: triggers} + return connection.QueryResult{Success: true, Data: ensureNonNilSlice(triggers)} } func (a *App) DropView(config connection.ConnectionConfig, dbName string, viewName string) connection.QueryResult { @@ -975,5 +982,5 @@ func (a *App) DBGetAllColumns(config connection.ConnectionConfig, dbName string) return connection.QueryResult{Success: false, Message: err.Error()} } - return connection.QueryResult{Success: true, Data: cols} + return connection.QueryResult{Success: true, Data: ensureNonNilSlice(cols)} } diff --git a/internal/app/methods_driver.go b/internal/app/methods_driver.go index 344233e..07a13cc 100644 --- a/internal/app/methods_driver.go +++ b/internal/app/methods_driver.go @@ -2536,6 +2536,9 @@ func installOptionalDriverAgentFromLocalPath(definition driverDefinition, filePa return installedDriverPackage{}, fmt.Errorf("导入本地驱动代理失败:%w", copyErr) } } + if validateErr := db.ValidateOptionalDriverAgentExecutable(driverType, executablePath); validateErr != nil { + return installedDriverPackage{}, validateErr + } hash, hashErr := hashFileSHA256(executablePath) if hashErr != nil { @@ -2793,11 +2796,15 @@ func ensureOptionalDriverAgentBinary(a *App, definition driverDefinition, execut info, err := os.Stat(executablePath) if err == nil && !info.IsDir() { - hash, hashErr := hashFileSHA256(executablePath) - if hashErr != nil { - return "", "", fmt.Errorf("读取已安装 %s 驱动代理摘要失败:%w", displayName, hashErr) + if validateErr := db.ValidateOptionalDriverAgentExecutable(driverType, executablePath); validateErr != nil { + _ = os.Remove(executablePath) + } else { + hash, hashErr := hashFileSHA256(executablePath) + if hashErr != nil { + return "", "", fmt.Errorf("读取已安装 %s 驱动代理摘要失败:%w", displayName, hashErr) + } + return fmt.Sprintf("local://existing/%s-driver-agent", driverType), hash, nil } - return fmt.Sprintf("local://existing/%s-driver-agent", driverType), hash, nil } if err == nil && info.IsDir() { return "", "", fmt.Errorf("%s 驱动代理路径被目录占用:%s", displayName, executablePath) @@ -2814,6 +2821,10 @@ func ensureOptionalDriverAgentBinary(a *App, definition driverDefinition, execut if copyErr := copyAgentBinary(sourcePath, executablePath); copyErr != nil { return "", "", fmt.Errorf("复制预置 %s 驱动代理失败:%w", displayName, copyErr) } + if validateErr := db.ValidateOptionalDriverAgentExecutable(driverType, executablePath); validateErr != nil { + _ = os.Remove(executablePath) + return "", "", validateErr + } hash, hashErr := hashFileSHA256(executablePath) if hashErr != nil { return "", "", fmt.Errorf("计算预置 %s 驱动代理摘要失败:%w", displayName, hashErr) @@ -2901,6 +2912,10 @@ func downloadOptionalDriverAgentBinary(a *App, definition driverDefinition, urlT if chmodErr := os.Chmod(executablePath, 0o755); chmodErr != nil && stdRuntime.GOOS != "windows" { return "", fmt.Errorf("设置代理权限失败:%w", chmodErr) } + if validateErr := db.ValidateOptionalDriverAgentExecutable(driverType, executablePath); validateErr != nil { + _ = os.Remove(executablePath) + return "", validateErr + } return hash, nil } @@ -3009,6 +3024,10 @@ func downloadOptionalDriverAgentFromBundle(a *App, definition driverDefinition, if chmodErr := os.Chmod(executablePath, 0o755); chmodErr != nil && stdRuntime.GOOS != "windows" { return "", "", fmt.Errorf("设置驱动代理权限失败:%w", chmodErr) } + if validateErr := db.ValidateOptionalDriverAgentExecutable(driverType, executablePath); validateErr != nil { + _ = os.Remove(executablePath) + return "", "", validateErr + } hash, err := hashFileSHA256(executablePath) if err != nil { return "", "", fmt.Errorf("计算驱动代理摘要失败:%w", err) @@ -3334,6 +3353,7 @@ func resolveOptionalDriverAgentDownloadURLs(definition driverDefinition, rawURL } func findExistingOptionalDriverAgentCandidate(definition driverDefinition, targetPath string) (string, bool) { + driverType := normalizeDriverType(definition.Type) targetAbs, _ := filepath.Abs(targetPath) candidates := resolveOptionalDriverAgentCandidatePaths(definition) for _, candidate := range candidates { @@ -3349,9 +3369,13 @@ func findExistingOptionalDriverAgentCandidate(definition driverDefinition, targe continue } info, statErr := os.Stat(absPath) - if statErr == nil && !info.IsDir() { - return absPath, true + if statErr != nil || info.IsDir() { + continue } + if validateErr := db.ValidateOptionalDriverAgentExecutable(driverType, absPath); validateErr != nil { + continue + } + return absPath, true } return "", false } diff --git a/internal/app/methods_redis.go b/internal/app/methods_redis.go index 1b626b0..3bf8956 100644 --- a/internal/app/methods_redis.go +++ b/internal/app/methods_redis.go @@ -23,12 +23,20 @@ var ( // getRedisClient gets or creates a Redis client from cache func (a *App) getRedisClient(config connection.ConnectionConfig) (redis.RedisClient, error) { - key := getRedisClientCacheKey(config) + effectiveConfig := applyGlobalProxyToConnection(config) + connectConfig, proxyErr := resolveDialConfigWithProxy(effectiveConfig) + if proxyErr != nil { + wrapped := wrapConnectError(effectiveConfig, proxyErr) + logger.Error(wrapped, "Redis 代理准备失败:%s", formatRedisConnSummary(effectiveConfig)) + return nil, wrapped + } + + key := getRedisClientCacheKey(connectConfig) shortKey := key if len(shortKey) > 12 { shortKey = shortKey[:12] } - logger.Infof("获取 Redis 连接:%s 缓存Key=%s", formatRedisConnSummary(config), shortKey) + logger.Infof("获取 Redis 连接:%s 缓存Key=%s", formatRedisConnSummary(effectiveConfig), shortKey) redisCacheMu.Lock() defer redisCacheMu.Unlock() @@ -47,21 +55,20 @@ func (a *App) getRedisClient(config connection.ConnectionConfig) (redis.RedisCli logger.Infof("创建 Redis 客户端实例:缓存Key=%s", shortKey) client := redis.NewRedisClient() - if err := client.Connect(config); err != nil { - logger.Error(err, "Redis 连接失败:%s 缓存Key=%s", formatRedisConnSummary(config), shortKey) - return nil, err + if err := client.Connect(connectConfig); err != nil { + wrapped := wrapConnectError(effectiveConfig, err) + logger.Error(wrapped, "Redis 连接失败:%s 缓存Key=%s", formatRedisConnSummary(effectiveConfig), shortKey) + return nil, wrapped } redisCache[key] = client - logger.Infof("Redis 连接成功并写入缓存:%s 缓存Key=%s", formatRedisConnSummary(config), shortKey) + logger.Infof("Redis 连接成功并写入缓存:%s 缓存Key=%s", formatRedisConnSummary(effectiveConfig), shortKey) return client, nil } func getRedisClientCacheKey(config connection.ConnectionConfig) string { - if !config.UseSSH { - config.SSH = connection.SSHConfig{} - } - b, _ := json.Marshal(config) + normalized := normalizeCacheKeyConfig(config) + b, _ := json.Marshal(normalized) sum := sha256.Sum256(b) return hex.EncodeToString(sum[:]) } @@ -91,6 +98,26 @@ func formatRedisConnSummary(config connection.ConnectionConfig) string { b.WriteString(" 用户=") b.WriteString(config.SSH.User) } + if config.UseProxy { + b.WriteString(" 代理=") + b.WriteString(strings.ToLower(strings.TrimSpace(config.Proxy.Type))) + b.WriteString("://") + b.WriteString(config.Proxy.Host) + b.WriteString(":") + b.WriteString(strconv.Itoa(config.Proxy.Port)) + if strings.TrimSpace(config.Proxy.User) != "" { + b.WriteString(" 代理认证=已配置") + } + } + if config.UseHTTPTunnel { + b.WriteString(" HTTP隧道=") + b.WriteString(strings.TrimSpace(config.HTTPTunnel.Host)) + b.WriteString(":") + b.WriteString(strconv.Itoa(config.HTTPTunnel.Port)) + if strings.TrimSpace(config.HTTPTunnel.User) != "" { + b.WriteString(" HTTP隧道认证=已配置") + } + } return b.String() } diff --git a/internal/connection/types.go b/internal/connection/types.go index bc88873..bac9ec7 100644 --- a/internal/connection/types.go +++ b/internal/connection/types.go @@ -18,39 +18,49 @@ type ProxyConfig struct { Password string `json:"password,omitempty"` } +// HTTPTunnelConfig holds independent HTTP CONNECT tunnel details +type HTTPTunnelConfig struct { + Host string `json:"host"` + Port int `json:"port"` + User string `json:"user,omitempty"` + Password string `json:"password,omitempty"` +} + // ConnectionConfig holds database connection details including SSH type ConnectionConfig struct { - Type string `json:"type"` - Host string `json:"host"` - Port int `json:"port"` - User string `json:"user"` - Password string `json:"password"` - SavePassword bool `json:"savePassword,omitempty"` // Persist password in saved connection - Database string `json:"database"` - UseSSL bool `json:"useSSL,omitempty"` // MySQL-like SSL/TLS switch - SSLMode string `json:"sslMode,omitempty"` // preferred | required | skip-verify | disable - SSLCertPath string `json:"sslCertPath,omitempty"` // TLS client certificate path (e.g., Dameng) - SSLKeyPath string `json:"sslKeyPath,omitempty"` // TLS client private key path (e.g., Dameng) - UseSSH bool `json:"useSSH"` - SSH SSHConfig `json:"ssh"` - UseProxy bool `json:"useProxy,omitempty"` - Proxy ProxyConfig `json:"proxy,omitempty"` - Driver string `json:"driver,omitempty"` // For custom connection - DSN string `json:"dsn,omitempty"` // For custom connection - Timeout int `json:"timeout,omitempty"` // Connection timeout in seconds (default: 30) - RedisDB int `json:"redisDB,omitempty"` // Redis database index (0-15) - URI string `json:"uri,omitempty"` // Connection URI for copy/paste - Hosts []string `json:"hosts,omitempty"` // Multi-host addresses: host:port - Topology string `json:"topology,omitempty"` // single | replica | cluster - MySQLReplicaUser string `json:"mysqlReplicaUser,omitempty"` // MySQL replica auth user - MySQLReplicaPassword string `json:"mysqlReplicaPassword,omitempty"` // MySQL replica auth password - ReplicaSet string `json:"replicaSet,omitempty"` // MongoDB replica set name - AuthSource string `json:"authSource,omitempty"` // MongoDB authSource - ReadPreference string `json:"readPreference,omitempty"` // MongoDB readPreference - MongoSRV bool `json:"mongoSrv,omitempty"` // MongoDB use mongodb+srv URI scheme - MongoAuthMechanism string `json:"mongoAuthMechanism,omitempty"` // MongoDB authMechanism - MongoReplicaUser string `json:"mongoReplicaUser,omitempty"` // MongoDB replica auth user - MongoReplicaPassword string `json:"mongoReplicaPassword,omitempty"` // MongoDB replica auth password + Type string `json:"type"` + Host string `json:"host"` + Port int `json:"port"` + User string `json:"user"` + Password string `json:"password"` + SavePassword bool `json:"savePassword,omitempty"` // Persist password in saved connection + Database string `json:"database"` + UseSSL bool `json:"useSSL,omitempty"` // MySQL-like SSL/TLS switch + SSLMode string `json:"sslMode,omitempty"` // preferred | required | skip-verify | disable + SSLCertPath string `json:"sslCertPath,omitempty"` // TLS client certificate path (e.g., Dameng) + SSLKeyPath string `json:"sslKeyPath,omitempty"` // TLS client private key path (e.g., Dameng) + UseSSH bool `json:"useSSH"` + SSH SSHConfig `json:"ssh"` + UseProxy bool `json:"useProxy,omitempty"` + Proxy ProxyConfig `json:"proxy,omitempty"` + UseHTTPTunnel bool `json:"useHttpTunnel,omitempty"` + HTTPTunnel HTTPTunnelConfig `json:"httpTunnel,omitempty"` + Driver string `json:"driver,omitempty"` // For custom connection + DSN string `json:"dsn,omitempty"` // For custom connection + Timeout int `json:"timeout,omitempty"` // Connection timeout in seconds (default: 30) + RedisDB int `json:"redisDB,omitempty"` // Redis database index (0-15) + URI string `json:"uri,omitempty"` // Connection URI for copy/paste + Hosts []string `json:"hosts,omitempty"` // Multi-host addresses: host:port + Topology string `json:"topology,omitempty"` // single | replica | cluster + MySQLReplicaUser string `json:"mysqlReplicaUser,omitempty"` // MySQL replica auth user + MySQLReplicaPassword string `json:"mysqlReplicaPassword,omitempty"` // MySQL replica auth password + ReplicaSet string `json:"replicaSet,omitempty"` // MongoDB replica set name + AuthSource string `json:"authSource,omitempty"` // MongoDB authSource + ReadPreference string `json:"readPreference,omitempty"` // MongoDB readPreference + MongoSRV bool `json:"mongoSrv,omitempty"` // MongoDB use mongodb+srv URI scheme + MongoAuthMechanism string `json:"mongoAuthMechanism,omitempty"` // MongoDB authMechanism + MongoReplicaUser string `json:"mongoReplicaUser,omitempty"` // MongoDB replica auth user + MongoReplicaPassword string `json:"mongoReplicaPassword,omitempty"` // MongoDB replica auth password } // QueryResult is the standard response format for Wails methods diff --git a/internal/db/clickhouse_impl.go b/internal/db/clickhouse_impl.go index dcf18e6..f1d5811 100644 --- a/internal/db/clickhouse_impl.go +++ b/internal/db/clickhouse_impl.go @@ -107,7 +107,9 @@ func (c *ClickHouseDB) buildClickHouseOptions(config connection.ConnectionConfig if readTimeout < minClickHouseReadTimeout { readTimeout = minClickHouseReadTimeout } + protocol := detectClickHouseProtocol(config) opts := &clickhouse.Options{ + Protocol: protocol, Addr: []string{ net.JoinHostPort(config.Host, strconv.Itoa(config.Port)), }, @@ -125,6 +127,46 @@ func (c *ClickHouseDB) buildClickHouseOptions(config connection.ConnectionConfig return opts } +func detectClickHouseProtocol(config connection.ConnectionConfig) clickhouse.Protocol { + uriText := strings.ToLower(strings.TrimSpace(config.URI)) + if strings.HasPrefix(uriText, "http://") || strings.HasPrefix(uriText, "https://") { + return clickhouse.HTTP + } + if config.Port == 8123 || config.Port == 8443 { + return clickhouse.HTTP + } + return clickhouse.Native +} + +func isClickHouseProtocolMismatch(err error) bool { + if err == nil { + return false + } + text := strings.ToLower(strings.TrimSpace(err.Error())) + if text == "" { + return false + } + return strings.Contains(text, "unexpected packet [72]") || + (strings.Contains(text, "unexpected packet") && strings.Contains(text, "handshake")) || + strings.Contains(text, "http response to https client") || + strings.Contains(text, "malformed http response") +} + +func withClickHouseProtocol(config connection.ConnectionConfig, protocol clickhouse.Protocol) connection.ConnectionConfig { + next := config + switch protocol { + case clickhouse.HTTP: + if next.Port == 0 { + next.Port = 8123 + } + default: + if next.Port == 0 { + next.Port = defaultClickHousePort + } + } + return next +} + func (c *ClickHouseDB) Connect(config connection.ConnectionConfig) error { if supported, reason := DriverRuntimeSupportStatus("clickhouse"); !supported { if strings.TrimSpace(reason) == "" { @@ -176,23 +218,41 @@ func (c *ClickHouseDB) Connect(config connection.ConnectionConfig) error { var failures []string for idx, attempt := range attempts { - c.conn = clickhouse.OpenDB(c.buildClickHouseOptions(attempt)) - if err := c.Ping(); err != nil { - failures = append(failures, fmt.Sprintf("第%d次连接验证失败: %v", idx+1, err)) - if c.conn != nil { - _ = c.conn.Close() - c.conn = nil + primaryProtocol := detectClickHouseProtocol(attempt) + protocols := []clickhouse.Protocol{primaryProtocol} + if primaryProtocol == clickhouse.Native { + protocols = append(protocols, clickhouse.HTTP) + } else { + protocols = append(protocols, clickhouse.Native) + } + + for pIdx, protocol := range protocols { + protocolConfig := withClickHouseProtocol(attempt, protocol) + c.conn = clickhouse.OpenDB(c.buildClickHouseOptions(protocolConfig)) + if err := c.Ping(); err != nil { + failures = append(failures, fmt.Sprintf("第%d次连接验证失败(protocol=%s): %v", idx+1, protocol.String(), err)) + if c.conn != nil { + _ = c.conn.Close() + c.conn = nil + } + if pIdx == 0 && !isClickHouseProtocolMismatch(err) { + // 首次连接不是协议误配特征,避免无谓重试次协议。 + break + } + continue } - continue + if idx > 0 { + logger.Warnf("ClickHouse SSL 优先连接失败,已回退至明文连接") + } + if pIdx > 0 { + logger.Warnf("ClickHouse 已自动切换连接协议为 %s(常见于 8123/8443 HTTP 端口)", protocol.String()) + } + return nil } - if idx > 0 { - logger.Warnf("ClickHouse SSL 优先连接失败,已回退至明文连接") - } - return nil } _ = c.Close() - return fmt.Errorf("连接建立后验证失败:%s", strings.Join(failures, ";")) + return fmt.Errorf("连接建立后验证失败(可检查 ClickHouse 端口与协议是否匹配:Native=9000/9440,HTTP=8123/8443):%s", strings.Join(failures, ";")) } func (c *ClickHouseDB) Close() error { diff --git a/internal/db/dameng_impl.go b/internal/db/dameng_impl.go index 5080540..5cceb0a 100644 --- a/internal/db/dameng_impl.go +++ b/internal/db/dameng_impl.go @@ -8,6 +8,7 @@ import ( "fmt" "net" "net/url" + "sort" "strconv" "strings" "time" @@ -204,24 +205,82 @@ func (d *DamengDB) Exec(query string) (int64, error) { } func (d *DamengDB) GetDatabases() ([]string, error) { - // DM: List Users/Schemas - data, _, err := d.Query("SELECT username FROM dba_users") - if err != nil { - // Fallback if dba_users not accessible - data, _, err = d.Query("SELECT username FROM all_users") + // 达梦将「用户/模式」作为数据库列表来源,不同权限下可见口径不同。 + // 这里采用多查询口径聚合,避免仅依赖单一视图导致“少库”。 + queries := []string{ + "SELECT USERNAME AS DATABASE_NAME FROM SYS.DBA_USERS ORDER BY USERNAME", + "SELECT USERNAME AS DATABASE_NAME FROM DBA_USERS ORDER BY USERNAME", + "SELECT USERNAME AS DATABASE_NAME FROM ALL_USERS ORDER BY USERNAME", + "SELECT USERNAME AS DATABASE_NAME FROM USER_USERS", + "SELECT DISTINCT OWNER AS DATABASE_NAME FROM ALL_TABLES ORDER BY OWNER", + } + + seen := make(map[string]struct{}) + dbs := make([]string, 0, 64) + var lastErr error + success := false + + for _, q := range queries { + data, _, err := d.Query(q) if err != nil { - return nil, err + lastErr = err + continue + } + success = true + for _, row := range data { + name := getDamengRowString(row, "DATABASE_NAME", "USERNAME", "OWNER", "SCHEMA_NAME") + if name == "" { + // 回退到第一列,兼容驱动返回列名差异。 + for _, v := range row { + text := strings.TrimSpace(fmt.Sprintf("%v", v)) + if text == "" || strings.EqualFold(text, "") { + continue + } + name = text + break + } + } + if name == "" { + continue + } + key := strings.ToUpper(name) + if _, ok := seen[key]; ok { + continue + } + seen[key] = struct{}{} + dbs = append(dbs, name) } } - var dbs []string - for _, row := range data { - if val, ok := row["USERNAME"]; ok { - dbs = append(dbs, fmt.Sprintf("%v", val)) - } + + if !success && lastErr != nil { + return nil, lastErr } + + sort.Slice(dbs, func(i, j int) bool { + return strings.ToUpper(dbs[i]) < strings.ToUpper(dbs[j]) + }) return dbs, nil } +func getDamengRowString(row map[string]interface{}, keys ...string) string { + if len(row) == 0 { + return "" + } + for _, key := range keys { + for k, v := range row { + if !strings.EqualFold(strings.TrimSpace(k), strings.TrimSpace(key)) { + continue + } + text := strings.TrimSpace(fmt.Sprintf("%v", v)) + if text == "" || strings.EqualFold(text, "") { + return "" + } + return text + } + } + return "" +} + func (d *DamengDB) GetTables(dbName string) ([]string, error) { query := fmt.Sprintf("SELECT owner, table_name FROM all_tables WHERE owner = '%s' ORDER BY table_name", strings.ToUpper(dbName)) if dbName == "" { diff --git a/internal/db/driver_agent_binary_check.go b/internal/db/driver_agent_binary_check.go new file mode 100644 index 0000000..762c720 --- /dev/null +++ b/internal/db/driver_agent_binary_check.go @@ -0,0 +1,74 @@ +package db + +import ( + "debug/pe" + "fmt" + "runtime" + "strings" +) + +const ( + peMachineI386 uint16 = 0x014c + peMachineAmd64 uint16 = 0x8664 + peMachineArm64 uint16 = 0xaa64 +) + +func windowsMachineLabel(machine uint16) string { + switch machine { + case peMachineI386: + return "windows-386" + case peMachineAmd64: + return "windows-amd64" + case peMachineArm64: + return "windows-arm64" + default: + return fmt.Sprintf("windows-unknown(0x%04x)", machine) + } +} + +func expectedWindowsMachineForGoArch(goarch string) (uint16, string, bool) { + switch strings.ToLower(strings.TrimSpace(goarch)) { + case "386": + return peMachineI386, "windows-386", true + case "amd64": + return peMachineAmd64, "windows-amd64", true + case "arm64": + return peMachineArm64, "windows-arm64", true + default: + return 0, "", false + } +} + +func validateWindowsExecutableMachine(pathText string) error { + file, err := pe.Open(pathText) + if err != nil { + return fmt.Errorf("无法识别为有效的 Windows 可执行文件:%w", err) + } + defer file.Close() + + expectedMachine, expectedLabel, ok := expectedWindowsMachineForGoArch(runtime.GOARCH) + if !ok { + return nil + } + actualMachine := file.FileHeader.Machine + if actualMachine != expectedMachine { + return fmt.Errorf("可执行文件架构不兼容(文件=%s,当前进程=%s)", windowsMachineLabel(actualMachine), expectedLabel) + } + return nil +} + +// ValidateOptionalDriverAgentExecutable 校验可选驱动代理二进制是否可在当前进程中执行。 +// 当前主要用于 Windows 下的 PE 架构兼容性校验,避免升级后复用到错误架构的旧代理。 +func ValidateOptionalDriverAgentExecutable(driverType string, executablePath string) error { + pathText := strings.TrimSpace(executablePath) + if pathText == "" { + return fmt.Errorf("%s 驱动代理路径为空", driverDisplayName(driverType)) + } + if runtime.GOOS != "windows" { + return nil + } + if err := validateWindowsExecutableMachine(pathText); err != nil { + return fmt.Errorf("%s 驱动代理不可用:%w", driverDisplayName(driverType), err) + } + return nil +} diff --git a/internal/db/driver_support.go b/internal/db/driver_support.go index 517a81a..db00717 100644 --- a/internal/db/driver_support.go +++ b/internal/db/driver_support.go @@ -194,6 +194,9 @@ func optionalGoDriverRuntimeReady(driverType string) (bool, string) { if statErr != nil || info.IsDir() { return false, fmt.Sprintf("%s 驱动代理缺失,请在驱动管理中重新安装启用", driverDisplayName(normalized)) } + if validateErr := ValidateOptionalDriverAgentExecutable(normalized, executablePath); validateErr != nil { + return false, fmt.Sprintf("%s;请在驱动管理中重新安装启用", validateErr.Error()) + } return true, "" } diff --git a/internal/db/driver_support_test.go b/internal/db/driver_support_test.go index 8dc5f62..002fba0 100644 --- a/internal/db/driver_support_test.go +++ b/internal/db/driver_support_test.go @@ -65,11 +65,22 @@ func TestManagedDriverRequiresInstallMarker(t *testing.T) { if err != nil { t.Fatalf("解析 mariadb 代理路径失败: %v", err) } - if err := os.WriteFile(executablePath, []byte("placeholder"), 0o755); err != nil { - t.Fatalf("写入 mariadb 代理占位文件失败: %v", err) - } if runtime.GOOS == "windows" { - _ = os.Chmod(executablePath, 0o644) + selfPath, selfErr := os.Executable() + if selfErr != nil { + t.Fatalf("获取测试进程路径失败: %v", selfErr) + } + content, readErr := os.ReadFile(selfPath) + if readErr != nil { + t.Fatalf("读取测试进程失败: %v", readErr) + } + if err := os.WriteFile(executablePath, content, 0o755); err != nil { + t.Fatalf("写入 mariadb 代理占位可执行文件失败: %v", err) + } + } else { + if err := os.WriteFile(executablePath, []byte("placeholder"), 0o755); err != nil { + t.Fatalf("写入 mariadb 代理占位文件失败: %v", err) + } } supported, reason := DriverRuntimeSupportStatus("mariadb") diff --git a/internal/db/kingbase_impl.go b/internal/db/kingbase_impl.go index f1357a8..6dfd2e5 100644 --- a/internal/db/kingbase_impl.go +++ b/internal/db/kingbase_impl.go @@ -623,28 +623,16 @@ func (k *KingbaseDB) ApplyChanges(tableName string, changes connection.ChangeSet } defer tx.Rollback() - quoteIdent := func(name string) string { - n := strings.TrimSpace(name) - n = strings.Trim(n, "\"") - n = strings.ReplaceAll(n, "\"", "\"\"") - if n == "" { - return "\"\"" - } - return `"` + n + `"` - } - - schema := "" - table := strings.TrimSpace(tableName) - if parts := strings.SplitN(table, ".", 2); len(parts) == 2 { - schema = strings.TrimSpace(parts[0]) - table = strings.TrimSpace(parts[1]) + schema, table := splitKingbaseQualifiedTable(tableName) + if table == "" { + return fmt.Errorf("table name required") } qualifiedTable := "" if schema != "" { - qualifiedTable = fmt.Sprintf("%s.%s", quoteIdent(schema), quoteIdent(table)) + qualifiedTable = fmt.Sprintf("%s.%s", quoteKingbaseIdent(schema), quoteKingbaseIdent(table)) } else { - qualifiedTable = quoteIdent(table) + qualifiedTable = quoteKingbaseIdent(table) } // 1. Deletes @@ -654,7 +642,7 @@ func (k *KingbaseDB) ApplyChanges(tableName string, changes connection.ChangeSet idx := 0 for k, v := range pk { idx++ - wheres = append(wheres, fmt.Sprintf("%s = $%d", quoteIdent(k), idx)) + wheres = append(wheres, fmt.Sprintf("%s = $%d", quoteKingbaseIdent(k), idx)) args = append(args, v) } if len(wheres) == 0 { @@ -674,7 +662,7 @@ func (k *KingbaseDB) ApplyChanges(tableName string, changes connection.ChangeSet for k, v := range update.Values { idx++ - sets = append(sets, fmt.Sprintf("%s = $%d", quoteIdent(k), idx)) + sets = append(sets, fmt.Sprintf("%s = $%d", quoteKingbaseIdent(k), idx)) args = append(args, v) } @@ -685,7 +673,7 @@ func (k *KingbaseDB) ApplyChanges(tableName string, changes connection.ChangeSet var wheres []string for k, v := range update.Keys { idx++ - wheres = append(wheres, fmt.Sprintf("%s = $%d", quoteIdent(k), idx)) + wheres = append(wheres, fmt.Sprintf("%s = $%d", quoteKingbaseIdent(k), idx)) args = append(args, v) } @@ -708,7 +696,7 @@ func (k *KingbaseDB) ApplyChanges(tableName string, changes connection.ChangeSet for k, v := range row { idx++ - cols = append(cols, quoteIdent(k)) + cols = append(cols, quoteKingbaseIdent(k)) placeholders = append(placeholders, fmt.Sprintf("$%d", idx)) args = append(args, v) } @@ -726,6 +714,67 @@ func (k *KingbaseDB) ApplyChanges(tableName string, changes connection.ChangeSet return tx.Commit() } +func normalizeKingbaseIdentifier(raw string) string { + value := strings.TrimSpace(raw) + if value == "" { + return "" + } + + // 兼容 JSON/字符串转义后传入的标识符:\"schema\" -> "schema" + value = strings.ReplaceAll(value, `\"`, `"`) + value = strings.TrimSpace(value) + + // 兼容异常多重包裹引号(例如 ""schema""、""""schema"""")。 + // strings.Trim 会移除两端连续引号,迭代后可收敛到纯标识符。 + for i := 0; i < 4; i++ { + next := strings.TrimSpace(strings.Trim(value, `"`)) + if next == value { + break + } + value = next + } + + // 兼容其他方言可能残留的引用形式 + if len(value) >= 2 && strings.HasPrefix(value, "`") && strings.HasSuffix(value, "`") { + value = strings.TrimSpace(strings.Trim(value, "`")) + } + if len(value) >= 2 && strings.HasPrefix(value, "[") && strings.HasSuffix(value, "]") { + value = strings.TrimSpace(value[1 : len(value)-1]) + } + + return value +} + +func quoteKingbaseIdent(name string) string { + n := normalizeKingbaseIdentifier(name) + n = strings.ReplaceAll(n, `"`, `""`) + if n == "" { + return "\"\"" + } + return `"` + n + `"` +} + +func splitKingbaseQualifiedTable(tableName string) (schema string, table string) { + raw := strings.TrimSpace(tableName) + if raw == "" { + return "", "" + } + + if parts := strings.SplitN(raw, ".", 2); len(parts) == 2 { + schema = normalizeKingbaseIdentifier(parts[0]) + table = normalizeKingbaseIdentifier(parts[1]) + if table == "" { + return "", normalizeKingbaseIdentifier(raw) + } + if schema == "" { + return "", table + } + return schema, table + } + + return "", normalizeKingbaseIdentifier(raw) +} + func (k *KingbaseDB) GetAllColumns(dbName string) ([]connection.ColumnDefinitionWithTable, error) { // dbName 在本项目语义里是“数据库”,schema 由 table_schema 决定;这里返回全部用户 schema 的列用于查询提示。 query := ` diff --git a/internal/db/kingbase_impl_test.go b/internal/db/kingbase_impl_test.go new file mode 100644 index 0000000..eca6eaa --- /dev/null +++ b/internal/db/kingbase_impl_test.go @@ -0,0 +1,74 @@ +//go:build gonavi_full_drivers || gonavi_kingbase_driver + +package db + +import "testing" + +func TestNormalizeKingbaseIdentifier(t *testing.T) { + tests := []struct { + name string + in string + want string + }{ + {name: "plain", in: "ldf_server", want: "ldf_server"}, + {name: "quoted", in: `"ldf_server"`, want: "ldf_server"}, + {name: "double quoted", in: `""ldf_server""`, want: "ldf_server"}, + {name: "quad quoted", in: `""""ldf_server""""`, want: "ldf_server"}, + {name: "escaped quoted", in: `\"ldf_server\"`, want: "ldf_server"}, + {name: "backtick quoted", in: "`ldf_server`", want: "ldf_server"}, + {name: "bracket quoted", in: "[ldf_server]", want: "ldf_server"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := normalizeKingbaseIdentifier(tt.in); got != tt.want { + t.Fatalf("normalizeKingbaseIdentifier(%q) = %q, want %q", tt.in, got, tt.want) + } + }) + } +} + +func TestQuoteKingbaseIdent(t *testing.T) { + tests := []struct { + name string + in string + want string + }{ + {name: "plain", in: "ldf_server", want: `"ldf_server"`}, + {name: "double quoted", in: `""ldf_server""`, want: `"ldf_server"`}, + {name: "escaped quoted", in: `\"ldf_server\"`, want: `"ldf_server"`}, + {name: "with embedded quote", in: `ab"cd`, want: `"ab""cd"`}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := quoteKingbaseIdent(tt.in); got != tt.want { + t.Fatalf("quoteKingbaseIdent(%q) = %q, want %q", tt.in, got, tt.want) + } + }) + } +} + +func TestSplitKingbaseQualifiedTable(t *testing.T) { + tests := []struct { + name string + in string + wantSchema string + wantTable string + }{ + {name: "plain qualified", in: "ldf_server.t_user", wantSchema: "ldf_server", wantTable: "t_user"}, + {name: "double quoted qualified", in: `""ldf_server"".""t_user""`, wantSchema: "ldf_server", wantTable: "t_user"}, + {name: "escaped qualified", in: `\"ldf_server\".\"t_user\"`, wantSchema: "ldf_server", wantTable: "t_user"}, + {name: "bracket qualified", in: "[ldf_server].[t_user]", wantSchema: "ldf_server", wantTable: "t_user"}, + {name: "table only", in: `""t_user""`, wantSchema: "", wantTable: "t_user"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotSchema, gotTable := splitKingbaseQualifiedTable(tt.in) + if gotSchema != tt.wantSchema || gotTable != tt.wantTable { + t.Fatalf("splitKingbaseQualifiedTable(%q) = (%q, %q), want (%q, %q)", tt.in, gotSchema, gotTable, tt.wantSchema, tt.wantTable) + } + }) + } +} diff --git a/internal/db/optional_driver_agent_impl.go b/internal/db/optional_driver_agent_impl.go index 1b83902..2579b7c 100644 --- a/internal/db/optional_driver_agent_impl.go +++ b/internal/db/optional_driver_agent_impl.go @@ -9,8 +9,10 @@ import ( "io" "os" "os/exec" + "runtime" "strings" "sync" + "syscall" "time" "GoNavi-Wails/internal/connection" @@ -94,6 +96,9 @@ func newOptionalDriverAgentClient(driverType string, executablePath string) (*op return nil, fmt.Errorf("创建 %s 驱动代理 stderr 失败:%w", driverDisplayName(driverType), err) } if err := cmd.Start(); err != nil { + if isWindowsExecutableMachineMismatch(err) { + return nil, fmt.Errorf("启动 %s 驱动代理失败:%w(检测到驱动代理与当前系统架构不兼容,请在驱动管理中重新安装启用)", driverDisplayName(driverType), err) + } return nil, fmt.Errorf("启动 %s 驱动代理失败:%w", driverDisplayName(driverType), err) } @@ -107,6 +112,30 @@ func newOptionalDriverAgentClient(driverType string, executablePath string) (*op return client, nil } +func isWindowsExecutableMachineMismatch(err error) bool { + if err == nil || runtime.GOOS != "windows" { + return false + } + var errno syscall.Errno + if errors.As(err, &errno) && errno == syscall.Errno(216) { + return true + } + text := strings.ToLower(strings.TrimSpace(err.Error())) + if text == "" { + return false + } + if strings.Contains(text, "not compatible with the version of windows") { + return true + } + if strings.Contains(text, "win32") && strings.Contains(text, "compatible") { + return true + } + if strings.Contains(text, "不是有效的win32应用程序") || strings.Contains(text, "无法在win32模式下运行") { + return true + } + return false +} + func (c *optionalDriverAgentClient) captureStderr(stderr io.Reader) { scanner := bufio.NewScanner(stderr) buffer := make([]byte, 0, 8<<10) diff --git a/internal/db/query_value.go b/internal/db/query_value.go index 83fdf7f..fa28bd7 100644 --- a/internal/db/query_value.go +++ b/internal/db/query_value.go @@ -8,6 +8,7 @@ import ( "reflect" "strconv" "strings" + "time" "unicode" "unicode/utf8" ) @@ -86,6 +87,16 @@ func normalizeCompositeQueryValue(v interface{}) interface{} { items[i] = normalizeQueryValue(rv.Index(i).Interface()) } return items + case reflect.Struct: + // 部分驱动(如 Kingbase)会返回复杂结构体值,直接透传会导致前端渲染和比较开销激增。 + // 统一降级为可读字符串,避免对象深层序列化触发 UI 卡顿。 + if tm, ok := v.(time.Time); ok { + return tm.Format(time.RFC3339Nano) + } + if stringer, ok := v.(fmt.Stringer); ok { + return stringer.String() + } + return fmt.Sprintf("%v", v) default: return normalizeUnsafeIntegerForJS(rv, v) } diff --git a/internal/db/query_value_test.go b/internal/db/query_value_test.go index b05977e..285344e 100644 --- a/internal/db/query_value_test.go +++ b/internal/db/query_value_test.go @@ -2,7 +2,9 @@ package db import ( "encoding/json" + "fmt" "testing" + "time" ) type duckMapLike map[any]any @@ -165,3 +167,31 @@ func TestNormalizeQueryValueWithDBType_JSONNumber(t *testing.T) { }) } } + +type customStructValue struct { + Name string + Age int +} + +func (v customStructValue) String() string { + return fmt.Sprintf("%s-%d", v.Name, v.Age) +} + +func TestNormalizeQueryValueWithDBType_StructToString(t *testing.T) { + got := normalizeQueryValueWithDBType(customStructValue{Name: "alice", Age: 18}, "") + if got != "alice-18" { + t.Fatalf("结构体应降级为可读字符串,实际=%v(%T)", got, got) + } +} + +func TestNormalizeQueryValueWithDBType_TimeStructToRFC3339(t *testing.T) { + input := time.Date(2026, 3, 5, 18, 30, 15, 123456789, time.UTC) + got := normalizeQueryValueWithDBType(input, "") + text, ok := got.(string) + if !ok { + t.Fatalf("time.Time 应转为字符串,实际=%v(%T)", got, got) + } + if text != "2026-03-05T18:30:15.123456789Z" { + t.Fatalf("time.Time 规整值异常,实际=%s", text) + } +} From dea096d4c2630378362d7d18fc99313606fa02d7 Mon Sep 17 00:00:00 2001 From: Syngnat Date: Fri, 6 Mar 2026 14:26:08 +0800 Subject: [PATCH 18/48] =?UTF-8?q?=E2=9C=A8=20feat(release-notes):=20?= =?UTF-8?q?=E6=94=AF=E6=8C=81=E8=87=AA=E5=8A=A8=E7=94=9F=E6=88=90=20Releas?= =?UTF-8?q?e=20=E6=9B=B4=E6=96=B0=E8=AF=B4=E6=98=8E=E5=B9=B6=E5=8C=BA?= =?UTF-8?q?=E5=88=86=E9=85=8D=E7=BD=AE=E6=96=87=E4=BB=B6=E5=91=BD=E5=90=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .github/release.yaml | 26 ++++++++++++++++++++++++++ .github/workflows/release.yml | 1 + README.md | 1 + README.zh-CN.md | 1 + 4 files changed, 29 insertions(+) create mode 100644 .github/release.yaml diff --git a/.github/release.yaml b/.github/release.yaml new file mode 100644 index 0000000..5c87acd --- /dev/null +++ b/.github/release.yaml @@ -0,0 +1,26 @@ +changelog: + categories: + - title: 新功能 + labels: + - feature + - enhancement + - feat + - title: 问题修复 + labels: + - bug + - fix + - title: 文档与流程 + labels: + - docs + - documentation + - ci + - workflow + - chore + - title: 重构与优化 + labels: + - refactor + - perf + - optimization + - title: 其他更新 + labels: + - '*' diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index a4e6a37..7dd9b87 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -550,5 +550,6 @@ jobs: files: release-assets/* draft: true make_latest: true + generate_release_notes: true env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/README.md b/README.md index 4ad80ac..c2ad140 100644 --- a/README.md +++ b/README.md @@ -154,6 +154,7 @@ Artifacts are generated in `build/bin`. The repository includes a release workflow. Push a `v*` tag to trigger automated build and release. +Release notes are generated automatically from merged pull requests and categorized by `.github/release.yaml`. Target artifacts include: - macOS (AMD64 / ARM64) diff --git a/README.zh-CN.md b/README.zh-CN.md index 3a2f2d5..6c74566 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -147,6 +147,7 @@ wails build -clean ### 跨平台发布(GitHub Actions) 仓库内置发布流水线,推送 `v*` Tag 可自动构建并发布 Release。 +Release 更新说明会基于已合并 Pull Request 自动生成,并按 `.github/release.yaml` 分类。 支持目标: - macOS (AMD64 / ARM64) From 0f843a7dcf43bc1af7fad9b82a923c0e594e6231 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 6 Mar 2026 14:31:15 +0800 Subject: [PATCH 19/48] =?UTF-8?q?=F0=9F=94=81=20chore(sync):=20=E5=9B=9E?= =?UTF-8?q?=E7=81=8C=20main=20=E5=88=B0=20dev=20(#192)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * - feat(connection,metadata,kingbase): 增强多数据源连接能力并修复金仓/达梦/Oracle/ClickHouse兼容性问题 (#188) * feat(http-tunnel): 支持独立 HTTP 隧道连接并覆盖多数据源 refs #168 * fix(kingbase-data-grid): 修复金仓打开表卡顿并降低对象渲染开销 refs #178 * fix(kingbase-transaction): 修复金仓事务提交重复引号导致语法错误 refs #176 * fix(driver-agent): 修复老版本 Win10 升级后金仓驱动代理启动失败 refs #177 * chore(ci): 新增手动触发的 macOS 测试构建工作流 * chore(ci): 允许测试工作流在当前分支自动触发 * fix(query-editor): 修复 SQL 编辑中光标随机跳到末尾 refs #185 * feat(data-sync): 增加差异 SQL 预览能力便于审核 refs #174 * fix(clickhouse-connect): 自动识别并回退 HTTP/Native 协议连接 refs #181 * fix(oracle-metadata): 修复视图与函数加载按 schema 过滤异常 refs #155 * fix(dameng-databases): 修复显示全部库时数据库列表不完整 refs #154 * fix(connection,db-list): 统一处理空列表返回并修复达梦连接测试报错 refs #157 * Release/0.5.3 (#191) --------- Co-authored-by: 辣条 <69459608+tianqijiuyun-latiao@users.noreply.github.com> Co-authored-by: Syngnat <92659908+Syngnat@users.noreply.github.com> From 6157161293108b99d32c973322f927d90260f4ac Mon Sep 17 00:00:00 2001 From: Syngnat Date: Fri, 6 Mar 2026 14:56:43 +0800 Subject: [PATCH 20/48] =?UTF-8?q?=F0=9F=90=9B=20fix(branch-sync):=20?= =?UTF-8?q?=E4=BF=AE=E5=A4=8D=20main=20=E5=9B=9E=E7=81=8C=20dev=20?= =?UTF-8?q?=E6=97=B6=20mergeable=20=E5=BC=82=E6=AD=A5=E8=AE=A1=E7=AE=97?= =?UTF-8?q?=E5=AF=BC=E8=87=B4=E6=BC=8F=E5=BC=80=E8=87=AA=E5=8A=A8=E5=90=88?= =?UTF-8?q?=E5=B9=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 增加 mergeable 状态轮询,避免新建同步 PR 后立即返回 UNKNOWN - 在合并状态未稳定时输出中文告警与执行摘要 - 保持冲突分支、待计算分支与自动合并分支的处理路径清晰 --- .github/workflows/sync-main-to-dev.yml | 27 +++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/.github/workflows/sync-main-to-dev.yml b/.github/workflows/sync-main-to-dev.yml index c46cbcf..18f047a 100644 --- a/.github/workflows/sync-main-to-dev.yml +++ b/.github/workflows/sync-main-to-dev.yml @@ -106,9 +106,28 @@ jobs: run: | set -euo pipefail pr_number="${{ steps.sync_pr.outputs.pr_number }}" - mergeable="$(gh pr view "${pr_number}" --json mergeable --jq '.mergeable')" - merge_state_status="$(gh pr view "${pr_number}" --json mergeStateStatus --jq '.mergeStateStatus')" - echo "PR #${pr_number} 合并状态:mergeable=${mergeable}, mergeStateStatus=${merge_state_status}" + mergeable="UNKNOWN" + merge_state_status="UNKNOWN" + + for attempt in 1 2 3 4 5 6; do + mergeable="$(gh pr view "${pr_number}" --json mergeable --jq '.mergeable')" + merge_state_status="$(gh pr view "${pr_number}" --json mergeStateStatus --jq '.mergeStateStatus')" + echo "第 ${attempt} 次检查 PR #${pr_number} 合并状态:mergeable=${mergeable}, mergeStateStatus=${merge_state_status}" + if [ "${mergeable}" != "UNKNOWN" ]; then + break + fi + if [ "${attempt}" -lt 6 ]; then + echo "GitHub 仍在计算可合并状态,3 秒后重试..." + sleep 3 + fi + done + + if [ "${mergeable}" = "UNKNOWN" ]; then + echo "::warning::PR 合并状态仍在计算中,本次未开启自动合并,可稍后重跑 workflow 或手动开启。" + echo "merge_state_pending=true" >> "$GITHUB_OUTPUT" + else + echo "merge_state_pending=false" >> "$GITHUB_OUTPUT" + fi echo "mergeable=${mergeable}" >> "$GITHUB_OUTPUT" echo "merge_state_status=${merge_state_status}" >> "$GITHUB_OUTPUT" @@ -151,6 +170,8 @@ jobs: echo "- 合并状态详情:${{ steps.merge_state.outputs.merge_state_status }}" if [ "${{ steps.merge_state.outputs.mergeable }}" = "CONFLICTING" ]; then echo "- 结论:检测到冲突,需要手动处理后合并" + elif [ "${{ steps.merge_state.outputs.merge_state_pending }}" = "true" ]; then + echo "- 结论:GitHub 仍在计算合并状态,本次未开启自动合并;可稍后重跑 workflow 或手动开启 auto-merge" elif [ "${{ steps.auto_merge.outputs.result }}" = "enabled" ]; then echo "- 结论:已启用自动合并(满足保护规则后将自动入 dev)" else From 1c050aefd0eb6fa4390657374c84f61a4546c1e0 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 6 Mar 2026 17:36:28 +0800 Subject: [PATCH 21/48] =?UTF-8?q?=F0=9F=94=81=20chore(sync):=20=E5=9B=9E?= =?UTF-8?q?=E7=81=8C=20main=20=E5=88=B0=20dev=20(#195)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * - feat(connection,metadata,kingbase): 增强多数据源连接能力并修复金仓/达梦/Oracle/ClickHouse兼容性问题 (#188) * feat(http-tunnel): 支持独立 HTTP 隧道连接并覆盖多数据源 refs #168 * fix(kingbase-data-grid): 修复金仓打开表卡顿并降低对象渲染开销 refs #178 * fix(kingbase-transaction): 修复金仓事务提交重复引号导致语法错误 refs #176 * fix(driver-agent): 修复老版本 Win10 升级后金仓驱动代理启动失败 refs #177 * chore(ci): 新增手动触发的 macOS 测试构建工作流 * chore(ci): 允许测试工作流在当前分支自动触发 * fix(query-editor): 修复 SQL 编辑中光标随机跳到末尾 refs #185 * feat(data-sync): 增加差异 SQL 预览能力便于审核 refs #174 * fix(clickhouse-connect): 自动识别并回退 HTTP/Native 协议连接 refs #181 * fix(oracle-metadata): 修复视图与函数加载按 schema 过滤异常 refs #155 * fix(dameng-databases): 修复显示全部库时数据库列表不完整 refs #154 * fix(connection,db-list): 统一处理空列表返回并修复达梦连接测试报错 refs #157 * Release/0.5.3 (#191) * - chore(ci): 新增全平台测试包手动构建工作流 tianqijiuyun-latiao 今天 下午4:26 (#194) * feat(http-tunnel): 支持独立 HTTP 隧道连接并覆盖多数据源 refs #168 * fix(kingbase-data-grid): 修复金仓打开表卡顿并降低对象渲染开销 refs #178 * fix(kingbase-transaction): 修复金仓事务提交重复引号导致语法错误 refs #176 * fix(driver-agent): 修复老版本 Win10 升级后金仓驱动代理启动失败 refs #177 * chore(ci): 新增手动触发的 macOS 测试构建工作流 * chore(ci): 允许测试工作流在当前分支自动触发 * fix(query-editor): 修复 SQL 编辑中光标随机跳到末尾 refs #185 * feat(data-sync): 增加差异 SQL 预览能力便于审核 refs #174 * fix(clickhouse-connect): 自动识别并回退 HTTP/Native 协议连接 refs #181 * fix(oracle-metadata): 修复视图与函数加载按 schema 过滤异常 refs #155 * fix(dameng-databases): 修复显示全部库时数据库列表不完整 refs #154 * fix(connection,db-list): 统一处理空列表返回并修复达梦连接测试报错 refs #157 * fix(kingbase): 补齐主键识别并优化宽表卡顿 refs #176 refs #178 * fix(query-execution): 支持带前置注释的读查询结果识别 * chore(ci): 新增全平台测试包手动构建工作流 --------- Co-authored-by: 辣条 <69459608+tianqijiuyun-latiao@users.noreply.github.com> Co-authored-by: Syngnat <92659908+Syngnat@users.noreply.github.com> --- .../workflows/test-build-all-platforms.yml | 342 ++++++++++++++++++ frontend/src/components/DataGrid.tsx | 9 +- internal/app/methods_db.go | 13 +- internal/app/sql_sanitize.go | 60 +++ internal/db/kingbase_impl.go | 82 ++++- 5 files changed, 482 insertions(+), 24 deletions(-) create mode 100644 .github/workflows/test-build-all-platforms.yml diff --git a/.github/workflows/test-build-all-platforms.yml b/.github/workflows/test-build-all-platforms.yml new file mode 100644 index 0000000..3fccb8d --- /dev/null +++ b/.github/workflows/test-build-all-platforms.yml @@ -0,0 +1,342 @@ +name: Test Build All Platforms (Manual) + +on: + workflow_dispatch: + inputs: + build_label: + description: "测试包标识(仅用于文件名)" + required: false + default: "test" + +permissions: + contents: read + +concurrency: + group: test-build-${{ github.ref }} + cancel-in-progress: false + +jobs: + build: + name: Build ${{ matrix.platform }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + include: + - os: macos-latest + platform: darwin/amd64 + os_name: MacOS + arch_name: Amd64 + build_name: gonavi-test-darwin-amd64 + wails_tags: "" + artifact_suffix: "" + build_optional_agents: true + linux_webkit: "" + - os: macos-latest + platform: darwin/arm64 + os_name: MacOS + arch_name: Arm64 + build_name: gonavi-test-darwin-arm64 + wails_tags: "" + artifact_suffix: "" + build_optional_agents: true + linux_webkit: "" + - os: windows-latest + platform: windows/amd64 + os_name: Windows + arch_name: Amd64 + build_name: gonavi-test-windows-amd64 + wails_tags: "" + artifact_suffix: "" + build_optional_agents: true + linux_webkit: "" + - os: windows-latest + platform: windows/arm64 + os_name: Windows + arch_name: Arm64 + build_name: gonavi-test-windows-arm64 + wails_tags: "" + artifact_suffix: "" + build_optional_agents: true + linux_webkit: "" + - os: ubuntu-22.04 + platform: linux/amd64 + os_name: Linux + arch_name: Amd64 + build_name: gonavi-test-linux-amd64 + wails_tags: "" + artifact_suffix: "" + build_optional_agents: true + linux_webkit: "4.0" + - os: ubuntu-24.04 + platform: linux/amd64 + os_name: Linux + arch_name: Amd64 + build_name: gonavi-test-linux-amd64-webkit41 + wails_tags: "webkit2_41" + artifact_suffix: "-WebKit41" + build_optional_agents: false + linux_webkit: "4.1" + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: '1.24' + check-latest: true + + - name: Setup Node + uses: actions/setup-node@v4 + with: + node-version: '20' + + - name: Install Linux Dependencies + if: contains(matrix.platform, 'linux') + run: | + sudo apt-get update + sudo apt-get install -y libgtk-3-dev + + if [ "${{ matrix.linux_webkit }}" = "4.1" ]; then + sudo apt-get install -y libwebkit2gtk-4.1-dev libsoup-3.0-dev + else + sudo apt-get install -y libwebkit2gtk-4.0-dev + fi + + sudo apt-get install -y libfuse2 || sudo apt-get install -y libfuse2t64 || true + + LINUXDEPLOY_URL="https://github.com/linuxdeploy/linuxdeploy/releases/download/continuous/linuxdeploy-x86_64.AppImage" + PLUGIN_URL="https://github.com/linuxdeploy/linuxdeploy-plugin-gtk/releases/download/continuous/linuxdeploy-plugin-gtk-x86_64.AppImage" + + wget --retry-connrefused --waitretry=1 --read-timeout=20 --timeout=15 --tries=3 -O /tmp/linuxdeploy "$LINUXDEPLOY_URL" || { + echo "skip-appimage=true" >> "$GITHUB_ENV" + } + wget --retry-connrefused --waitretry=1 --read-timeout=20 --timeout=15 --tries=3 -O /tmp/linuxdeploy-plugin-gtk "$PLUGIN_URL" || { + echo "skip-appimage=true" >> "$GITHUB_ENV" + } + + if [ "${skip-appimage:-false}" != "true" ]; then + chmod +x /tmp/linuxdeploy /tmp/linuxdeploy-plugin-gtk + fi + + - name: Install Wails + run: go install github.com/wailsapp/wails/v2/cmd/wails@v2.11.0 + + - name: Setup MSYS2 Toolchain For DuckDB (Windows AMD64) + id: msys2_duckdb + if: ${{ matrix.build_optional_agents && matrix.platform == 'windows/amd64' }} + continue-on-error: true + uses: msys2/setup-msys2@v2 + with: + msystem: UCRT64 + update: true + install: >- + mingw-w64-ucrt-x86_64-gcc + + - name: Configure DuckDB CGO Toolchain (Windows AMD64) + if: ${{ matrix.build_optional_agents && matrix.platform == 'windows/amd64' }} + shell: pwsh + run: | + function Find-MingwBin([string[]]$candidates) { + foreach ($bin in $candidates) { + if ([string]::IsNullOrWhiteSpace($bin)) { + continue + } + $gcc = Join-Path $bin 'gcc.exe' + $gxx = Join-Path $bin 'g++.exe' + if ((Test-Path $gcc) -and (Test-Path $gxx)) { + return $bin + } + } + return $null + } + + $msys2Location = "${{ steps.msys2_duckdb.outputs['msys2-location'] }}" + $candidateBins = @() + if (-not [string]::IsNullOrWhiteSpace($msys2Location)) { + $candidateBins += Join-Path $msys2Location 'ucrt64\bin' + } + $candidateBins += @( + 'C:\msys64\ucrt64\bin', + 'D:\a\_temp\msys64\ucrt64\bin' + ) + $candidateBins = @($candidateBins | Select-Object -Unique) + + $mingwBin = Find-MingwBin $candidateBins + if (-not $mingwBin) { + Write-Error "❌ 未找到可用的 DuckDB UCRT64 编译器。" + exit 1 + } + + $gcc = Join-Path $mingwBin 'gcc.exe' + $gxx = Join-Path $mingwBin 'g++.exe' + "$mingwBin" | Out-File -FilePath $env:GITHUB_PATH -Append -Encoding utf8 + "CC=$gcc" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 + "CXX=$gxx" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 + + - name: Build App + shell: bash + run: | + set -euo pipefail + BUILD_LABEL="${{ inputs.build_label }}" + if [ -z "$BUILD_LABEL" ]; then + BUILD_LABEL="test" + fi + APP_VERSION="${BUILD_LABEL}-${GITHUB_RUN_NUMBER}" + if [ -n "${{ matrix.wails_tags }}" ]; then + wails build -platform "${{ matrix.platform }}" -clean -o "${{ matrix.build_name }}" -tags "${{ matrix.wails_tags }}" -ldflags "-s -w -X GoNavi-Wails/internal/app.AppVersion=${APP_VERSION}" + else + wails build -platform "${{ matrix.platform }}" -clean -o "${{ matrix.build_name }}" -ldflags "-s -w -X GoNavi-Wails/internal/app.AppVersion=${APP_VERSION}" + fi + + - name: Build Optional Driver Agents + if: ${{ matrix.build_optional_agents }} + shell: bash + run: | + set -euo pipefail + TARGET_PLATFORM="${{ matrix.platform }}" + GOOS="${TARGET_PLATFORM%%/*}" + GOARCH="${TARGET_PLATFORM##*/}" + DRIVERS=(mariadb doris sphinx sqlserver sqlite duckdb dameng kingbase highgo vastbase mongodb tdengine clickhouse) + OUTDIR="drivers/${{ matrix.os_name }}" + mkdir -p "$OUTDIR" + + for DRIVER in "${DRIVERS[@]}"; do + BUILD_DRIVER="$DRIVER" + if [ "$DRIVER" = "doris" ]; then + BUILD_DRIVER="diros" + fi + if [ "$DRIVER" = "duckdb" ] && [ "$GOOS" = "windows" ] && [ "$GOARCH" != "amd64" ]; then + echo "跳过 DuckDB driver: ${GOOS}/${GOARCH}" + continue + fi + TAG="gonavi_${BUILD_DRIVER}_driver" + OUTPUT="${DRIVER}-driver-agent-${GOOS}-${GOARCH}" + if [ "$GOOS" = "windows" ]; then + OUTPUT="${OUTPUT}.exe" + fi + OUTPUT_PATH="${OUTDIR}/${OUTPUT}" + if [ "$DRIVER" = "duckdb" ]; then + CGO_ENABLED=1 GOOS="$GOOS" GOARCH="$GOARCH" go build -tags "$TAG" -trimpath -ldflags "-s -w" -o "$OUTPUT_PATH" ./cmd/optional-driver-agent + else + CGO_ENABLED=0 GOOS="$GOOS" GOARCH="$GOARCH" go build -tags "$TAG" -trimpath -ldflags "-s -w" -o "$OUTPUT_PATH" ./cmd/optional-driver-agent + fi + done + + - name: Package macOS + if: contains(matrix.platform, 'darwin') + shell: bash + run: | + set -euo pipefail + brew install create-dmg + LABEL="${{ inputs.build_label }}" + if [ -z "$LABEL" ]; then + LABEL="test" + fi + cd build/bin + APP_PATH=$(find . -maxdepth 1 -name "*.app" | head -n 1) + if [ -z "$APP_PATH" ]; then + echo "未找到 .app 应用包" + exit 1 + fi + APP_NAME=$(basename "$APP_PATH") + codesign --force --deep --sign - "$APP_NAME" + ZIP_NAME="GoNavi-${LABEL}-${{ matrix.os_name }}-${{ matrix.arch_name }}-run${GITHUB_RUN_NUMBER}.zip" + DMG_NAME="GoNavi-${LABEL}-${{ matrix.os_name }}-${{ matrix.arch_name }}-run${GITHUB_RUN_NUMBER}.dmg" + mkdir -p ../../artifacts + ditto -c -k --sequesterRsrc --keepParent "$APP_NAME" "../../artifacts/$ZIP_NAME" + create-dmg \ + --volname "GoNavi Test Installer" \ + --window-pos 200 120 \ + --window-size 800 400 \ + --icon-size 100 \ + --icon "$APP_NAME" 200 190 \ + --hide-extension "$APP_NAME" \ + --app-drop-link 600 185 \ + "$DMG_NAME" \ + "$APP_NAME" + mv "$DMG_NAME" "../../artifacts/$DMG_NAME" + shasum -a 256 "../../artifacts/$ZIP_NAME" > "../../artifacts/$ZIP_NAME.sha256" + shasum -a 256 "../../artifacts/$DMG_NAME" > "../../artifacts/$DMG_NAME.sha256" + + - name: Package Windows + if: contains(matrix.platform, 'windows') + shell: pwsh + run: | + $label = "${{ inputs.build_label }}" + if ([string]::IsNullOrWhiteSpace($label)) { $label = 'test' } + Set-Location build/bin + $target = "${{ matrix.build_name }}" + $finalExeName = "GoNavi-$label-${{ matrix.os_name }}-${{ matrix.arch_name }}-run$env:GITHUB_RUN_NUMBER.exe" + $finalZipName = "GoNavi-$label-${{ matrix.os_name }}-${{ matrix.arch_name }}-run$env:GITHUB_RUN_NUMBER.zip" + if (Test-Path "$target.exe") { + $finalExe = "$target.exe" + } elseif (Test-Path "$target") { + Rename-Item -Path "$target" -NewName "$target.exe" + $finalExe = "$target.exe" + } else { + Write-Error "未找到构建产物 '$target'" + exit 1 + } + New-Item -ItemType Directory -Force -Path ..\..\artifacts | Out-Null + Copy-Item -LiteralPath $finalExe -Destination "..\..\artifacts\$finalExeName" -Force + Compress-Archive -LiteralPath $finalExe -DestinationPath "..\..\artifacts\$finalZipName" -Force + Get-FileHash "..\..\artifacts\$finalExeName" -Algorithm SHA256 | ForEach-Object { "{0} *{1}" -f $_.Hash.ToLower(), (Split-Path $_.Path -Leaf) } | Out-File "..\..\artifacts\$finalExeName.sha256" -Encoding ascii + Get-FileHash "..\..\artifacts\$finalZipName" -Algorithm SHA256 | ForEach-Object { "{0} *{1}" -f $_.Hash.ToLower(), (Split-Path $_.Path -Leaf) } | Out-File "..\..\artifacts\$finalZipName.sha256" -Encoding ascii + + - name: Package Linux + if: contains(matrix.platform, 'linux') + shell: bash + run: | + set -euo pipefail + LABEL="${{ inputs.build_label }}" + if [ -z "$LABEL" ]; then + LABEL="test" + fi + cd build/bin + TARGET="${{ matrix.build_name }}" + TAR_NAME="GoNavi-${LABEL}-${{ matrix.os_name }}-${{ matrix.arch_name }}${{ matrix.artifact_suffix }}-run${GITHUB_RUN_NUMBER}.tar.gz" + APPIMAGE_NAME="GoNavi-${LABEL}-${{ matrix.os_name }}-${{ matrix.arch_name }}${{ matrix.artifact_suffix }}-run${GITHUB_RUN_NUMBER}.AppImage" + mkdir -p ../../artifacts + + if [ ! -f "$TARGET" ]; then + echo "未找到构建产物 '$TARGET'" + exit 1 + fi + chmod +x "$TARGET" + tar -czvf "../../artifacts/$TAR_NAME" "$TARGET" + sha256sum "../../artifacts/$TAR_NAME" > "../../artifacts/$TAR_NAME.sha256" + + if [ "${skip-appimage:-false}" = "true" ]; then + echo "跳过 AppImage 打包" + exit 0 + fi + + mkdir -p AppDir/usr/bin AppDir/usr/share/applications AppDir/usr/share/icons/hicolor/256x256/apps + cp "$TARGET" AppDir/usr/bin/gonavi + printf '%s\n' '[Desktop Entry]' 'Name=GoNavi' 'Exec=gonavi' 'Icon=gonavi' 'Type=Application' 'Categories=Development;Database;' 'Comment=Database Management Tool' > AppDir/usr/share/applications/gonavi.desktop + cp AppDir/usr/share/applications/gonavi.desktop AppDir/gonavi.desktop + if [ -f "../../build/appicon.png" ]; then + cp "../../build/appicon.png" AppDir/usr/share/icons/hicolor/256x256/apps/gonavi.png + cp "../../build/appicon.png" AppDir/gonavi.png + else + touch AppDir/gonavi.png + cp AppDir/gonavi.png AppDir/usr/share/icons/hicolor/256x256/apps/gonavi.png + fi + export DEPLOY_GTK_VERSION=3 + /tmp/linuxdeploy --appdir AppDir --plugin gtk --output appimage || exit 0 + mv GoNavi*.AppImage "$APPIMAGE_NAME" 2>/dev/null || exit 0 + mv "$APPIMAGE_NAME" "../../artifacts/$APPIMAGE_NAME" + sha256sum "../../artifacts/$APPIMAGE_NAME" > "../../artifacts/$APPIMAGE_NAME.sha256" + + - name: Upload Artifact + uses: actions/upload-artifact@v4 + with: + name: test-build-${{ matrix.os_name }}-${{ matrix.arch_name }}-run${{ github.run_number }} + path: | + artifacts/* + drivers/** + if-no-files-found: error + retention-days: 7 diff --git a/frontend/src/components/DataGrid.tsx b/frontend/src/components/DataGrid.tsx index 981173c..10c8b87 100644 --- a/frontend/src/components/DataGrid.tsx +++ b/frontend/src/components/DataGrid.tsx @@ -2074,9 +2074,14 @@ const DataGrid: React.FC = ({ const estimatedVisibleCellCount = mergedDisplayData.length * Math.max(columnNames.length, 1); const enableLargeResultOptimizedEditing = - viewMode === 'table' && (mergedDisplayData.length >= 60 || estimatedVisibleCellCount >= 4000); + viewMode === 'table' && ( + mergedDisplayData.length >= 60 || + estimatedVisibleCellCount >= 1600 || + columnNames.length >= 36 || + (isMacLike && columnNames.length >= 24) + ); const enableVirtual = enableLargeResultOptimizedEditing; - const enableInlineEditableCell = canModifyData; + const enableInlineEditableCell = canModifyData && !enableLargeResultOptimizedEditing; const columns = useMemo(() => { return columnNames.map(key => ({ diff --git a/internal/app/methods_db.go b/internal/app/methods_db.go index d8529a9..24119e1 100644 --- a/internal/app/methods_db.go +++ b/internal/app/methods_db.go @@ -416,12 +416,7 @@ func (a *App) DBQueryWithCancel(config connection.ConnectionConfig, dbName strin a.queryMu.Unlock() }() - lowerQuery := strings.TrimSpace(strings.ToLower(query)) - isReadQuery := strings.HasPrefix(lowerQuery, "select") || strings.HasPrefix(lowerQuery, "show") || strings.HasPrefix(lowerQuery, "describe") || strings.HasPrefix(lowerQuery, "explain") - // MongoDB JSON 命令中的 find/count/aggregate 也属于读查询 - if !isReadQuery && strings.ToLower(strings.TrimSpace(runConfig.Type)) == "mongodb" && strings.HasPrefix(strings.TrimSpace(query), "{") { - isReadQuery = true - } + isReadQuery := isReadOnlySQLQuery(runConfig.Type, query) runReadQuery := func(inst db.Database) ([]map[string]interface{}, []string, error) { if q, ok := inst.(interface { @@ -500,11 +495,7 @@ func (a *App) DBQueryIsolated(config connection.ConnectionConfig, dbName string, ctx, cancel := utils.ContextWithTimeout(time.Duration(timeoutSeconds) * time.Second) defer cancel() - lowerQuery := strings.TrimSpace(strings.ToLower(query)) - isReadQuery := strings.HasPrefix(lowerQuery, "select") || strings.HasPrefix(lowerQuery, "show") || strings.HasPrefix(lowerQuery, "describe") || strings.HasPrefix(lowerQuery, "explain") - if !isReadQuery && strings.ToLower(strings.TrimSpace(runConfig.Type)) == "mongodb" && strings.HasPrefix(strings.TrimSpace(query), "{") { - isReadQuery = true - } + isReadQuery := isReadOnlySQLQuery(runConfig.Type, query) if isReadQuery { var data []map[string]interface{} diff --git a/internal/app/sql_sanitize.go b/internal/app/sql_sanitize.go index 99c5335..2990bcc 100644 --- a/internal/app/sql_sanitize.go +++ b/internal/app/sql_sanitize.go @@ -5,6 +5,66 @@ import ( "unicode" ) +func leadingSQLKeyword(query string) string { + text := strings.TrimSpace(query) + for len(text) > 0 { + trimmed := strings.TrimLeft(text, " \t\r\n") + if trimmed == "" { + return "" + } + text = trimmed + + switch { + case strings.HasPrefix(text, "--"): + if idx := strings.IndexByte(text, '\n'); idx >= 0 { + text = text[idx+1:] + continue + } + return "" + case strings.HasPrefix(text, "#"): + if idx := strings.IndexByte(text, '\n'); idx >= 0 { + text = text[idx+1:] + continue + } + return "" + case strings.HasPrefix(text, "/*"): + if idx := strings.Index(text, "*/"); idx >= 0 { + text = text[idx+2:] + continue + } + return "" + } + break + } + + if text == "" { + return "" + } + for i, r := range text { + if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' { + continue + } + if i == 0 { + return "" + } + return strings.ToLower(text[:i]) + } + return strings.ToLower(text) +} + +func isReadOnlySQLQuery(dbType string, query string) bool { + if strings.ToLower(strings.TrimSpace(dbType)) == "mongodb" && strings.HasPrefix(strings.TrimSpace(query), "{") { + return true + } + + switch leadingSQLKeyword(query) { + case "select", "with", "show", "describe", "desc", "explain", "pragma", "values": + return true + default: + return false + } +} + func sanitizeSQLForPgLike(dbType string, query string) string { switch strings.ToLower(strings.TrimSpace(dbType)) { case "postgres", "kingbase", "highgo", "vastbase": diff --git a/internal/db/kingbase_impl.go b/internal/db/kingbase_impl.go index 6dfd2e5..619455d 100644 --- a/internal/db/kingbase_impl.go +++ b/internal/db/kingbase_impl.go @@ -305,10 +305,30 @@ func (k *KingbaseDB) GetColumns(dbName, tableName string) ([]connection.ColumnDe return strings.ReplaceAll(s, "'", "''") } - query := fmt.Sprintf(`SELECT column_name, data_type, is_nullable, column_default - FROM information_schema.columns - WHERE table_schema = '%s' AND table_name = '%s' - ORDER BY ordinal_position`, esc(schema), esc(table)) + query := fmt.Sprintf(` +SELECT + a.attname AS column_name, + pg_catalog.format_type(a.atttypid, a.atttypmod) AS data_type, + CASE WHEN a.attnotnull THEN 'NO' ELSE 'YES' END AS is_nullable, + pg_get_expr(ad.adbin, ad.adrelid) AS column_default, + col_description(a.attrelid, a.attnum) AS comment, + CASE WHEN pk.attname IS NOT NULL THEN 'PRI' ELSE '' END AS column_key +FROM pg_class c +JOIN pg_namespace n ON n.oid = c.relnamespace +JOIN pg_attribute a ON a.attrelid = c.oid +LEFT JOIN pg_attrdef ad ON ad.adrelid = c.oid AND ad.adnum = a.attnum +LEFT JOIN ( + SELECT i.indrelid, a3.attname + FROM pg_index i + JOIN pg_attribute a3 ON a3.attrelid = i.indrelid AND a3.attnum = ANY(i.indkey) + WHERE i.indisprimary +) pk ON pk.indrelid = c.oid AND pk.attname = a.attname +WHERE c.relkind IN ('r', 'p') + AND n.nspname = '%s' + AND c.relname = '%s' + AND a.attnum > 0 + AND NOT a.attisdropped +ORDER BY a.attnum`, esc(schema), esc(table)) data, _, err := k.Query(query) if err != nil { @@ -321,11 +341,21 @@ func (k *KingbaseDB) GetColumns(dbName, tableName string) ([]connection.ColumnDe Name: fmt.Sprintf("%v", row["column_name"]), Type: fmt.Sprintf("%v", row["data_type"]), Nullable: fmt.Sprintf("%v", row["is_nullable"]), + Key: fmt.Sprintf("%v", row["column_key"]), + Extra: "", + Comment: "", } if row["column_default"] != nil { def := fmt.Sprintf("%v", row["column_default"]) col.Default = &def + if strings.HasPrefix(strings.ToLower(strings.TrimSpace(def)), "nextval(") { + col.Extra = "auto_increment" + } + } + + if v, ok := row["comment"]; ok && v != nil { + col.Comment = fmt.Sprintf("%v", v) } columns = append(columns, col) @@ -347,10 +377,30 @@ func (k *KingbaseDB) getColumnsWithCurrentSchema(tableName string) ([]connection } // 使用 current_schema() 获取当前schema - query := fmt.Sprintf(`SELECT column_name, data_type, is_nullable, column_default - FROM information_schema.columns - WHERE table_schema = current_schema() AND table_name = '%s' - ORDER BY ordinal_position`, esc(table)) + query := fmt.Sprintf(` +SELECT + a.attname AS column_name, + pg_catalog.format_type(a.atttypid, a.atttypmod) AS data_type, + CASE WHEN a.attnotnull THEN 'NO' ELSE 'YES' END AS is_nullable, + pg_get_expr(ad.adbin, ad.adrelid) AS column_default, + col_description(a.attrelid, a.attnum) AS comment, + CASE WHEN pk.attname IS NOT NULL THEN 'PRI' ELSE '' END AS column_key +FROM pg_class c +JOIN pg_namespace n ON n.oid = c.relnamespace +JOIN pg_attribute a ON a.attrelid = c.oid +LEFT JOIN pg_attrdef ad ON ad.adrelid = c.oid AND ad.adnum = a.attnum +LEFT JOIN ( + SELECT i.indrelid, a3.attname + FROM pg_index i + JOIN pg_attribute a3 ON a3.attrelid = i.indrelid AND a3.attnum = ANY(i.indkey) + WHERE i.indisprimary +) pk ON pk.indrelid = c.oid AND pk.attname = a.attname +WHERE c.relkind IN ('r', 'p') + AND n.nspname = current_schema() + AND c.relname = '%s' + AND a.attnum > 0 + AND NOT a.attisdropped +ORDER BY a.attnum`, esc(table)) data, _, err := k.Query(query) if err != nil { @@ -363,11 +413,21 @@ func (k *KingbaseDB) getColumnsWithCurrentSchema(tableName string) ([]connection Name: fmt.Sprintf("%v", row["column_name"]), Type: fmt.Sprintf("%v", row["data_type"]), Nullable: fmt.Sprintf("%v", row["is_nullable"]), + Key: fmt.Sprintf("%v", row["column_key"]), + Extra: "", + Comment: "", } if row["column_default"] != nil { def := fmt.Sprintf("%v", row["column_default"]) col.Default = &def + if strings.HasPrefix(strings.ToLower(strings.TrimSpace(def)), "nextval(") { + col.Extra = "auto_increment" + } + } + + if v, ok := row["comment"]; ok && v != nil { + col.Comment = fmt.Sprintf("%v", v) } columns = append(columns, col) @@ -650,7 +710,7 @@ func (k *KingbaseDB) ApplyChanges(tableName string, changes connection.ChangeSet } query := fmt.Sprintf("DELETE FROM %s WHERE %s", qualifiedTable, strings.Join(wheres, " AND ")) if _, err := tx.Exec(query, args...); err != nil { - return fmt.Errorf("delete error: %v", err) + return fmt.Errorf("delete error: %v; sql=%s", err, query) } } @@ -683,7 +743,7 @@ func (k *KingbaseDB) ApplyChanges(tableName string, changes connection.ChangeSet query := fmt.Sprintf("UPDATE %s SET %s WHERE %s", qualifiedTable, strings.Join(sets, ", "), strings.Join(wheres, " AND ")) if _, err := tx.Exec(query, args...); err != nil { - return fmt.Errorf("update error: %v", err) + return fmt.Errorf("update error: %v; sql=%s", err, query) } } @@ -707,7 +767,7 @@ func (k *KingbaseDB) ApplyChanges(tableName string, changes connection.ChangeSet query := fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)", qualifiedTable, strings.Join(cols, ", "), strings.Join(placeholders, ", ")) if _, err := tx.Exec(query, args...); err != nil { - return fmt.Errorf("insert error: %v", err) + return fmt.Errorf("insert error: %v; sql=%s", err, query) } } From ed1f40e04a8e5de6e7e65b8af5d8afc251af2d80 Mon Sep 17 00:00:00 2001 From: Syngnat Date: Sat, 7 Mar 2026 17:01:49 +0800 Subject: [PATCH 22/48] =?UTF-8?q?=E2=99=BB=EF=B8=8F=20refactor(frontend-sy?= =?UTF-8?q?nc):=20=E4=BC=98=E5=8C=96=E6=A1=8C=E9=9D=A2=E4=BA=A4=E4=BA=92?= =?UTF-8?q?=E7=BB=86=E8=8A=82=E5=B9=B6=E7=A7=BB=E9=99=A4=20main=20?= =?UTF-8?q?=E5=9B=9E=E7=81=8C=20dev=20=E8=87=AA=E5=8A=A8=E5=8C=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 优化新建连接、主题设置、侧边栏工具区与 SQL 日志的界面表现 - 调整分页、筛选、透明模式与弹窗样式,统一整体交互层次 - 收口外观参数生效逻辑并补齐多组件适配 - 删除 sync-main-to-dev 工作流并同步维护者手动回灌说明 --- .github/workflows/sync-main-to-dev.yml | 180 -- CONTRIBUTING.md | 10 +- CONTRIBUTING.zh-CN.md | 10 +- frontend/package.json.md5 | 2 +- frontend/src/App.tsx | 632 ++++--- frontend/src/components/ConnectionModal.tsx | 1483 ++++++++++------- frontend/src/components/DataGrid.tsx | 397 ++++- .../src/components/DriverManagerModal.tsx | 7 +- frontend/src/components/LogPanel.tsx | 107 +- frontend/src/components/RedisViewer.tsx | 5 +- frontend/src/components/Sidebar.tsx | 299 +++- frontend/src/components/TableDesigner.tsx | 2 +- frontend/src/store.ts | 13 +- frontend/src/utils/appearance.ts | 16 + 14 files changed, 2003 insertions(+), 1160 deletions(-) delete mode 100644 .github/workflows/sync-main-to-dev.yml diff --git a/.github/workflows/sync-main-to-dev.yml b/.github/workflows/sync-main-to-dev.yml deleted file mode 100644 index 18f047a..0000000 --- a/.github/workflows/sync-main-to-dev.yml +++ /dev/null @@ -1,180 +0,0 @@ -name: main 回灌 dev - -on: - push: - branches: - - main - workflow_dispatch: - -permissions: - contents: write - pull-requests: write - -concurrency: - group: sync-main-to-dev - cancel-in-progress: true - -jobs: - sync-main-to-dev: - name: 执行回灌同步 - runs-on: ubuntu-latest - steps: - - name: 检出代码 - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: 检查是否需要同步 - id: diff_check - shell: bash - run: | - set -euo pipefail - echo "开始检查 main 与 dev 的分支差异..." - git fetch origin main dev - ahead_count="$(git rev-list --count origin/dev..origin/main)" - echo "ahead_count=${ahead_count}" >> "$GITHUB_OUTPUT" - if [ "${ahead_count}" -eq 0 ]; then - echo "无需同步,dev 已包含 main 的最新提交。" - echo "has_changes=false" >> "$GITHUB_OUTPUT" - else - echo "检测到 ${ahead_count} 个待同步提交,准备创建或复用同步 PR。" - echo "has_changes=true" >> "$GITHUB_OUTPUT" - fi - - - name: 创建或复用同步 PR - id: sync_pr - if: steps.diff_check.outputs.has_changes == 'true' - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - shell: bash - run: | - set -euo pipefail - echo "permission_blocked=false" >> "$GITHUB_OUTPUT" - existing_number="$(gh pr list --base dev --head main --state open --json number --jq '.[0].number // empty')" - - if [ -n "${existing_number}" ]; then - pr_number="${existing_number}" - pr_url="$(gh pr view "${pr_number}" --json url --jq '.url')" - echo "复用已有同步 PR:#${pr_number}" - echo "created=false" >> "$GITHUB_OUTPUT" - else - body_file="$(mktemp)" - error_file="$(mktemp)" - { - echo "## 自动回灌:\`main -> dev\`" - echo - echo "- 触发条件:\`main\` 分支出现新提交(含贡献者直接合并到 \`main\` 的 PR)" - echo "- 目标:让 \`dev\` 持续吸收 \`main\` 的更新,避免发布前集中冲突" - echo - echo "### 合并建议" - echo "- 无冲突:直接合并该 PR(建议 \`Merge commit\`)" - echo "- 有冲突:在该 PR 内解决冲突后再合并" - } > "${body_file}" - - if pr_url="$(gh pr create \ - --base dev \ - --head main \ - --title "🔁 chore(sync): 回灌 main 到 dev" \ - --body-file "${body_file}" 2>"${error_file}")"; then - pr_number="${pr_url##*/}" - echo "已创建同步 PR:#${pr_number}" - echo "created=true" >> "$GITHUB_OUTPUT" - else - error_message="$(tr '\n' ' ' < "${error_file}")" - if printf '%s' "${error_message}" | grep -Fq "GitHub Actions is not permitted to create or approve pull requests"; then - echo "::warning::仓库未开启“Allow GitHub Actions to create and approve pull requests”,已跳过自动创建同步 PR。" - echo "permission_blocked=true" >> "$GITHUB_OUTPUT" - echo "created=false" >> "$GITHUB_OUTPUT" - echo "pr_number=" >> "$GITHUB_OUTPUT" - echo "pr_url=" >> "$GITHUB_OUTPUT" - exit 0 - fi - echo "::error::创建同步 PR 失败:${error_message}" - exit 1 - fi - fi - - echo "pr_number=${pr_number}" >> "$GITHUB_OUTPUT" - echo "pr_url=${pr_url}" >> "$GITHUB_OUTPUT" - - - name: 检查合并状态 - id: merge_state - if: steps.diff_check.outputs.has_changes == 'true' && steps.sync_pr.outputs.permission_blocked != 'true' - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - shell: bash - run: | - set -euo pipefail - pr_number="${{ steps.sync_pr.outputs.pr_number }}" - mergeable="UNKNOWN" - merge_state_status="UNKNOWN" - - for attempt in 1 2 3 4 5 6; do - mergeable="$(gh pr view "${pr_number}" --json mergeable --jq '.mergeable')" - merge_state_status="$(gh pr view "${pr_number}" --json mergeStateStatus --jq '.mergeStateStatus')" - echo "第 ${attempt} 次检查 PR #${pr_number} 合并状态:mergeable=${mergeable}, mergeStateStatus=${merge_state_status}" - if [ "${mergeable}" != "UNKNOWN" ]; then - break - fi - if [ "${attempt}" -lt 6 ]; then - echo "GitHub 仍在计算可合并状态,3 秒后重试..." - sleep 3 - fi - done - - if [ "${mergeable}" = "UNKNOWN" ]; then - echo "::warning::PR 合并状态仍在计算中,本次未开启自动合并,可稍后重跑 workflow 或手动开启。" - echo "merge_state_pending=true" >> "$GITHUB_OUTPUT" - else - echo "merge_state_pending=false" >> "$GITHUB_OUTPUT" - fi - echo "mergeable=${mergeable}" >> "$GITHUB_OUTPUT" - echo "merge_state_status=${merge_state_status}" >> "$GITHUB_OUTPUT" - - - name: 可合并时开启自动合并 - id: auto_merge - if: steps.diff_check.outputs.has_changes == 'true' && steps.sync_pr.outputs.permission_blocked != 'true' && steps.merge_state.outputs.mergeable == 'MERGEABLE' - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - shell: bash - run: | - set -euo pipefail - pr_number="${{ steps.sync_pr.outputs.pr_number }}" - if gh pr merge "${pr_number}" --merge --auto; then - echo "已为 PR #${pr_number} 开启自动合并。" - echo "result=enabled" >> "$GITHUB_OUTPUT" - else - echo "::warning::自动合并开启失败,请手动处理并合并该 PR。" - echo "result=failed" >> "$GITHUB_OUTPUT" - fi - - - name: 写入执行摘要 - if: always() - shell: bash - run: | - { - echo "## main 回灌 dev 执行结果" - if [ "${{ steps.diff_check.outputs.has_changes }}" != "true" ]; then - echo "- 状态:无需同步(dev 已包含 main 最新提交)" - exit 0 - fi - if [ "${{ steps.sync_pr.outputs.permission_blocked }}" = "true" ]; then - echo "- 状态:已跳过自动创建同步 PR" - echo "- 原因:仓库未开启 GitHub Actions 创建与审批 Pull Request 权限" - echo "- 处理:前往 Settings -> Actions -> General -> Workflow permissions,开启 Allow GitHub Actions to create and approve pull requests" - echo "- 兜底:由维护者手动执行 main 到 dev 合并,或开启该设置后重新运行 workflow" - exit 0 - fi - echo "- PR:${{ steps.sync_pr.outputs.pr_url }}" - echo "- 可合并状态:${{ steps.merge_state.outputs.mergeable }}" - echo "- 合并状态详情:${{ steps.merge_state.outputs.merge_state_status }}" - if [ "${{ steps.merge_state.outputs.mergeable }}" = "CONFLICTING" ]; then - echo "- 结论:检测到冲突,需要手动处理后合并" - elif [ "${{ steps.merge_state.outputs.merge_state_pending }}" = "true" ]; then - echo "- 结论:GitHub 仍在计算合并状态,本次未开启自动合并;可稍后重跑 workflow 或手动开启 auto-merge" - elif [ "${{ steps.auto_merge.outputs.result }}" = "enabled" ]; then - echo "- 结论:已启用自动合并(满足保护规则后将自动入 dev)" - else - echo "- 结论:PR 已创建/复用,请按分支策略人工合并" - fi - } >> "$GITHUB_STEP_SUMMARY" diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b89e554..162357f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -79,14 +79,8 @@ Because external pull requests are merged directly into `main`, maintainers must ### 1. Sync `main` -> `dev` (required) -This repository provides automatic sync via GitHub Actions workflow: - -- `.github/workflows/sync-main-to-dev.yml` -- Trigger: every push to `main` -- Behavior: create/reuse a PR from `main` to `dev`; if mergeable, it tries to enable auto-merge -- Prerequisite: in `Settings -> Actions -> General -> Workflow permissions`, enable `Allow GitHub Actions to create and approve pull requests`; otherwise the workflow will skip PR creation and only emit a warning summary - -Manual fallback (when conflicts or automation is unavailable): +The automatic GitHub Actions sync workflow has been removed. +Maintainers should sync `main` back to `dev` manually when needed: ```bash git checkout dev diff --git a/CONTRIBUTING.zh-CN.md b/CONTRIBUTING.zh-CN.md index 3e79997..a2d3983 100644 --- a/CONTRIBUTING.zh-CN.md +++ b/CONTRIBUTING.zh-CN.md @@ -79,14 +79,8 @@ feature/* / fix/* -> dev -> release/* -> main -> tag(vX.Y.Z) ### 1. main → dev 同步(必做) -仓库已提供 GitHub Actions 自动同步机制: - -- `.github/workflows/sync-main-to-dev.yml` -- 触发时机:每次 `main` 分支有新的 push -- 行为:自动创建或复用 `main` 到 `dev` 的同步 PR;若可合并,则尝试开启自动合并 -- 前置条件:需在 `Settings -> Actions -> General -> Workflow permissions` 中开启 `Allow GitHub Actions to create and approve pull requests`,否则 workflow 只会输出告警摘要并跳过建 PR - -当出现冲突,或自动化暂不可用时,使用以下手动兜底方式: +仓库已移除 GitHub Actions 自动回灌 workflow。 +当前统一采用手动方式将 `main` 同步回 `dev`: ```bash git checkout dev diff --git a/frontend/package.json.md5 b/frontend/package.json.md5 index a7661c0..0f8f4fe 100755 --- a/frontend/package.json.md5 +++ b/frontend/package.json.md5 @@ -1 +1 @@ -d0f9366af59a6367ad3c7e2d4185ead4 \ No newline at end of file +5b8157374dae5f9340e31b2d0bd2c00e \ No newline at end of file diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx index be49c41..3aa2f01 100644 --- a/frontend/src/App.tsx +++ b/frontend/src/App.tsx @@ -1,7 +1,7 @@ -import React, { useState, useEffect } from 'react'; +import React, { useState, useEffect, useMemo } from 'react'; import { Layout, Button, ConfigProvider, theme, Dropdown, MenuProps, message, Modal, Spin, Slider, Progress, Switch, Input, InputNumber, Select } from 'antd'; import zhCN from 'antd/locale/zh_CN'; -import { PlusOutlined, ConsoleSqlOutlined, UploadOutlined, DownloadOutlined, CloudDownloadOutlined, BugOutlined, ToolOutlined, GlobalOutlined, InfoCircleOutlined, GithubOutlined, SkinOutlined, CheckOutlined, MinusOutlined, BorderOutlined, CloseOutlined, SettingOutlined, LinkOutlined } from '@ant-design/icons'; +import { PlusOutlined, ConsoleSqlOutlined, UploadOutlined, DownloadOutlined, CloudDownloadOutlined, BugOutlined, ToolOutlined, GlobalOutlined, InfoCircleOutlined, GithubOutlined, SkinOutlined, CheckOutlined, MinusOutlined, BorderOutlined, CloseOutlined, SettingOutlined, LinkOutlined, BgColorsOutlined, AppstoreOutlined } from '@ant-design/icons'; import { BrowserOpenURL, Environment, EventsOn, Quit, WindowFullscreen, WindowGetSize, WindowIsFullscreen, WindowIsMaximised, WindowMaximise, WindowMinimise, WindowSetSize, WindowToggleMaximise } from '../wailsjs/runtime'; import Sidebar from './components/Sidebar'; import TabManager from './components/TabManager'; @@ -11,7 +11,7 @@ import DriverManagerModal from './components/DriverManagerModal'; import LogPanel from './components/LogPanel'; import { useStore } from './store'; import { SavedConnection } from './types'; -import { blurToFilter, normalizeBlurForPlatform, normalizeOpacityForPlatform, isWindowsPlatform } from './utils/appearance'; +import { blurToFilter, normalizeBlurForPlatform, normalizeOpacityForPlatform, isWindowsPlatform, resolveAppearanceValues } from './utils/appearance'; import { SHORTCUT_ACTION_META, SHORTCUT_ACTION_ORDER, @@ -78,11 +78,11 @@ function App() { const tokenControlHeightLG = Math.max(30, Math.round(40 * effectiveUiScale)); const appComponentSize: 'small' | 'middle' | 'large' = effectiveUiScale <= 0.92 ? 'small' : (effectiveUiScale >= 1.12 ? 'large' : 'middle'); const titleBarHeight = Math.max(28, Math.round(32 * effectiveUiScale)); - const toolbarHeight = Math.max(32, Math.round(36 * effectiveUiScale)); const titleBarButtonWidth = Math.max(40, Math.round(46 * effectiveUiScale)); const floatingLogButtonHeight = Math.max(30, Math.round(34 * effectiveUiScale)); - const effectiveOpacity = normalizeOpacityForPlatform(appearance.opacity); - const effectiveBlur = normalizeBlurForPlatform(appearance.blur); + const resolvedAppearance = resolveAppearanceValues(appearance); + const effectiveOpacity = normalizeOpacityForPlatform(resolvedAppearance.opacity); + const effectiveBlur = normalizeBlurForPlatform(resolvedAppearance.blur); const blurFilter = blurToFilter(effectiveBlur); const windowCornerRadius = 14; const [runtimePlatform, setRuntimePlatform] = useState(''); @@ -93,8 +93,8 @@ function App() { // 同步 macOS 窗口透明度:opacity=1.0 且 blur=0 时关闭 NSVisualEffectView, // 避免 GPU 持续计算窗口背后的模糊合成 useEffect(() => { - void SetWindowTranslucency(appearance.opacity, appearance.blur).catch(() => undefined); - }, [appearance.opacity, appearance.blur]); + void SetWindowTranslucency(resolvedAppearance.opacity, resolvedAppearance.blur).catch(() => undefined); + }, [resolvedAppearance.blur, resolvedAppearance.opacity]); useEffect(() => { let cancelled = false; @@ -370,6 +370,141 @@ function App() { const floatingLogButtonShadow = darkMode ? '0 8px 22px rgba(0,0,0,0.38)' : '0 8px 20px rgba(0,0,0,0.16)'; + + const isOpaqueUtilityMode = resolvedAppearance.opacity >= 0.999 && resolvedAppearance.blur <= 0; + const utilityButtonBgAlpha = darkMode + ? Math.max(0.28, Math.min(0.76, effectiveOpacity * 0.72)) + : Math.max(0.52, Math.min(0.92, effectiveOpacity * 0.9)); + const utilityButtonBgColor = isOpaqueUtilityMode + ? 'transparent' + : (darkMode + ? `rgba(20, 26, 38, ${utilityButtonBgAlpha})` + : `rgba(255, 255, 255, ${utilityButtonBgAlpha})`); + const utilityButtonBorderColor = isOpaqueUtilityMode + ? (darkMode ? 'rgba(255,255,255,0.12)' : 'rgba(16,24,40,0.10)') + : (darkMode + ? `rgba(255,255,255,${Math.max(0.08, Math.min(0.18, effectiveOpacity * 0.16))})` + : `rgba(16,24,40,${Math.max(0.06, Math.min(0.14, effectiveOpacity * 0.12))})`); + const utilityButtonShadow = isOpaqueUtilityMode + ? 'none' + : (darkMode + ? `0 8px 18px rgba(0,0,0,${Math.max(0.10, Math.min(0.22, effectiveOpacity * 0.24))})` + : `0 8px 18px rgba(15,23,42,${Math.max(0.04, Math.min(0.12, effectiveOpacity * 0.12))})`); + const utilityButtonStyle = useMemo(() => ({ + height: Math.max(30, Math.round(32 * effectiveUiScale)), + width: '100%', + paddingInline: Math.max(10, Math.round(12 * effectiveUiScale)), + borderRadius: 10, + border: `1px solid ${utilityButtonBorderColor}`, + background: utilityButtonBgColor, + color: darkMode ? 'rgba(255,255,255,0.94)' : '#162033', + boxShadow: utilityButtonShadow, + backdropFilter: isOpaqueUtilityMode ? 'none' : blurFilter, + WebkitBackdropFilter: isOpaqueUtilityMode ? 'none' : blurFilter, + display: 'inline-flex', + alignItems: 'center', + justifyContent: 'center', + gap: 6, + }), [blurFilter, darkMode, effectiveUiScale, isOpaqueUtilityMode, utilityButtonBgColor, utilityButtonBorderColor, utilityButtonShadow]); + const utilityDropdownShellStyle = useMemo(() => ({ + borderRadius: 14, + padding: 6, + background: darkMode ? 'linear-gradient(180deg, rgba(20,26,38,0.96) 0%, rgba(13,17,26,0.98) 100%)' : 'linear-gradient(180deg, rgba(255,255,255,0.98) 0%, rgba(246,248,252,0.98) 100%)', + border: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(16,24,40,0.08)', + boxShadow: darkMode ? '0 20px 48px rgba(0,0,0,0.32)' : '0 16px 36px rgba(15,23,42,0.12)', + backdropFilter: darkMode ? 'blur(16px)' : 'none', + overflow: 'hidden', + }), [darkMode]); + + const sidebarQuickActionBaseStyle = useMemo(() => ({ + height: Math.max(34, Math.round(36 * effectiveUiScale)), + borderRadius: 12, + display: 'inline-flex', + alignItems: 'center', + justifyContent: 'center', + gap: 8, + paddingInline: Math.max(12, Math.round(14 * effectiveUiScale)), + fontWeight: 700, + boxShadow: darkMode ? '0 8px 18px rgba(0,0,0,0.16)' : '0 8px 16px rgba(15,23,42,0.08)', + backdropFilter: blurFilter, + WebkitBackdropFilter: blurFilter, + minWidth: 0, + whiteSpace: 'nowrap', + }), [blurFilter, darkMode, effectiveUiScale]); + const sidebarQueryActionStyle = useMemo(() => ({ + ...sidebarQuickActionBaseStyle, + flex: '1 1 0', + border: `1px solid ${darkMode ? 'rgba(255,255,255,0.12)' : 'rgba(16,24,40,0.10)'}`, + background: darkMode ? `rgba(255,255,255,0.05)` : 'rgba(255,255,255,0.88)', + color: darkMode ? 'rgba(255,255,255,0.92)' : '#162033', + }), [darkMode, sidebarQuickActionBaseStyle]); + const sidebarCreateConnectionActionStyle = useMemo(() => ({ + ...sidebarQuickActionBaseStyle, + flex: '1 1 0', + border: 'none', + background: 'linear-gradient(135deg, rgba(255,214,102,0.96) 0%, rgba(240,183,39,0.92) 100%)', + color: '#2a1f00', + }), [sidebarQuickActionBaseStyle]); + + const utilityMenuTheme = useMemo(() => ({ + components: { + Menu: { + popupBg: 'transparent', + darkPopupBg: 'transparent', + itemBg: 'transparent', + darkItemBg: 'transparent', + subMenuItemBg: 'transparent', + itemColor: darkMode ? 'rgba(255,255,255,0.88)' : '#162033', + itemHoverColor: darkMode ? '#fff7d6' : '#0f172a', + itemHoverBg: darkMode ? 'rgba(255,214,102,0.10)' : 'rgba(24,144,255,0.08)', + itemSelectedColor: darkMode ? '#ffd666' : '#1677ff', + itemSelectedBg: darkMode ? 'rgba(255,214,102,0.14)' : 'rgba(24,144,255,0.12)', + itemBorderRadius: 10, + itemMarginBlock: 4, + itemMarginInline: 0, + itemPaddingInline: 12, + itemHeight: 40, + groupTitleColor: darkMode ? 'rgba(255,255,255,0.48)' : 'rgba(16,24,40,0.48)', + }, + }, + }), [darkMode]); + const renderUtilityDropdown = (menu: React.ReactNode) => ( + +
+ {menu} +
+
+ ); + const utilityModalShellStyle = useMemo(() => ({ + background: darkMode ? 'linear-gradient(180deg, rgba(20,26,38,0.96) 0%, rgba(13,17,26,0.98) 100%)' : 'linear-gradient(180deg, rgba(255,255,255,0.98) 0%, rgba(246,248,252,0.98) 100%)', + border: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(16,24,40,0.08)', + boxShadow: darkMode ? '0 24px 56px rgba(0,0,0,0.32)' : '0 18px 42px rgba(15,23,42,0.12)', + backdropFilter: darkMode ? 'blur(18px)' : 'none', + }), [darkMode]); + const utilityPanelStyle = useMemo(() => ({ + padding: 16, + borderRadius: 14, + border: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(16,24,40,0.08)', + background: darkMode ? 'rgba(255,255,255,0.03)' : 'rgba(255,255,255,0.84)', + }), [darkMode]); + const utilityMutedTextStyle = useMemo(() => ({ + color: darkMode ? 'rgba(255,255,255,0.5)' : 'rgba(16,24,40,0.55)', + fontSize: 12, + lineHeight: 1.6, + }), [darkMode]); + const renderUtilityModalTitle = (icon: React.ReactNode, title: string, description: string) => ( +
+
+ {icon} +
+
+
{title}
+
{description}
+
+
+ ); + + const sidebarHorizontalPadding = 10; const addTab = useStore(state => state.addTab); const activeContext = useStore(state => state.activeContext); @@ -786,37 +921,18 @@ function App() { label: '驱动管理', icon: , onClick: () => setIsDriverModalOpen(true) - } - ]; - - const themeMenu: MenuProps['items'] = [ - { - key: 'light', - label: '亮色主题', - icon: themeMode === 'light' ? : undefined, - onClick: () => setTheme('light') - }, - { - key: 'dark', - label: '暗色主题', - icon: themeMode === 'dark' ? : undefined, - onClick: () => setTheme('dark') }, { type: 'divider' }, - { - key: 'settings', - label: '外观设置...', - icon: , - onClick: () => setIsAppearanceModalOpen(true) - }, { key: 'shortcut-settings', - label: '快捷键管理...', + label: '快捷键管理', icon: , onClick: () => setIsShortcutModalOpen(true) } ]; + const [isThemeModalOpen, setIsThemeModalOpen] = useState(false); + const [themeModalSection, setThemeModalSection] = useState<'theme' | 'appearance'>('theme'); const [isAppearanceModalOpen, setIsAppearanceModalOpen] = useState(false); const [isShortcutModalOpen, setIsShortcutModalOpen] = useState(false); const [capturingShortcutAction, setCapturingShortcutAction] = useState(null); @@ -1190,8 +1306,6 @@ function App() { }, components: { Layout: { - colorBgBody: 'transparent', - colorBgHeader: 'transparent', bodyBg: 'transparent', headerBg: 'transparent', siderBg: 'transparent', @@ -1272,28 +1386,6 @@ function App() {
-
- - - - - - - - -
-
- -
- + + + + +
+
+
+
+ + +
-
@@ -1370,8 +1475,8 @@ function App() { title="拖动调整宽度" /> - -
+ +
{isLogPanelOpen && ( @@ -1399,9 +1504,10 @@ function App() { onOpenGlobalProxySettings={() => setIsProxyModalOpen(true)} /> , '关于 GoNavi', '查看版本信息、仓库地址、更新状态与下载入口。')} open={isAboutOpen} onCancel={() => setIsAboutOpen(false)} + styles={{ content: utilityModalShellStyle, header: { background: 'transparent', borderBottom: 'none', paddingBottom: 8 }, body: { paddingTop: 8 }, footer: { background: 'transparent', borderTop: 'none', paddingTop: 10 } }} footer={[ canShowProgressEntry ? ( @@ -1421,150 +1527,274 @@ function App() {
) : ( -
-
版本:{aboutInfo?.version || '未知'}
-
作者:{aboutInfo?.author || '未知'}
- {aboutInfo?.communityUrl ? ( - - ) : null} -
更新状态:{aboutUpdateStatus || '未检查'}
-
- - {aboutInfo?.repoUrl ? ( - { e.preventDefault(); if (aboutInfo?.repoUrl) BrowserOpenURL(aboutInfo.repoUrl); }} href={aboutInfo.repoUrl}> - {aboutInfo.repoUrl} - - ) : '未知'} +
+
+
+
+
版本
+
{aboutInfo?.version || '未知'}
+
+
+
作者
+
{aboutInfo?.author || '未知'}
+
+
+
更新状态
+
{aboutUpdateStatus || '未检查'}
+
+ {aboutInfo?.communityUrl ? ( + + ) : null} +
+
+
- - -
)} setIsAppearanceModalOpen(false)} + title={renderUtilityModalTitle( + themeModalSection === 'theme' ? : , + themeModalSection === 'theme' ? '主题设置' : '外观设置', + themeModalSection === 'theme' + ? '切换亮暗主题,保持整体视觉风格统一。' + : '统一调整缩放、字体、透明度与模糊效果。' + )} + open={isThemeModalOpen} + onCancel={() => { setIsThemeModalOpen(false); setThemeModalSection('theme'); }} footer={null} - width={460} + width={820} + styles={{ content: utilityModalShellStyle, header: { background: 'transparent', borderBottom: 'none', paddingBottom: 8 }, body: { paddingTop: 8, height: 620, overflow: 'hidden' }, footer: { background: 'transparent', borderTop: 'none', paddingTop: 10 } }} > -
-
-
界面缩放 (UI Scale)
-
- setUiScale(Number(v))} - style={{ flex: 1 }} - /> - {Math.round(effectiveUiScale * 100)}% -
-
- * 建议小屏设备设置为 85%-95% +
+
+
设置导航
+
+ {[ + { key: 'theme', title: '主题模式', description: '亮色与暗色切换', icon: }, + { key: 'appearance', title: '外观参数', description: '缩放、字体与透明度', icon: }, + ].map((item) => { + const active = themeModalSection === item.key; + return ( + + ); + })}
-
-
基础字体大小 (Font Size)
-
- setFontSize(Number(v))} - style={{ flex: 1 }} - /> - {effectiveFontSize}px -
-
-
-
背景不透明度 (Opacity)
-
- setAppearance({ opacity: v })} - style={{ flex: 1 }} - /> - {Math.round((appearance.opacity ?? 1.0) * 100)}% -
-
-
-
高斯模糊 (Blur)
- {isWindowsPlatform() ? ( -
- Windows 使用系统 Acrylic 效果,模糊程度由系统控制 +
+ {themeModalSection === 'theme' ? ( +
+
+
主题模式
+
+ {[ + { key: 'light', label: '亮色主题', description: '适合明亮环境,层次更轻。' }, + { key: 'dark', label: '暗色主题', description: '适合低光环境,视觉更沉稳。' }, + ].map((item) => { + const active = themeMode === item.key; + return ( + + ); + })} +
+
) : ( - <> -
- setAppearance({ blur: v })} - style={{ flex: 1 }} - /> - {appearance.blur}px +
+
+
界面缩放 (UI Scale)
+
+ setUiScale(Number(v))} + style={{ flex: 1 }} + /> + {Math.round(effectiveUiScale * 100)}% +
+
+ * 建议小屏设备设置为 85%-95% +
-
- * 仅控制应用内覆盖层的模糊效果 +
+
基础字体大小 (Font Size)
+
+ setFontSize(Number(v))} + style={{ flex: 1 }} + /> + {effectiveFontSize}px +
- +
+
透明与模糊效果
+
+
+
启用透明与模糊
+
关闭后保留当前阈值,重新开启时直接恢复之前的设置。
+
+ setAppearance({ enabled: checked })} /> +
+
+
+
背景不透明度 (Opacity)
+
+ setAppearance({ opacity: v })} + style={{ flex: 1 }} + /> + {Math.round((appearance.opacity ?? 1.0) * 100)}% +
+
+
+
高斯模糊 (Blur)
+ {isWindowsPlatform() ? ( +
+ Windows 使用系统 Acrylic 效果,模糊程度由系统控制 +
+ ) : ( + <> +
+ setAppearance({ blur: v })} + style={{ flex: 1 }} + /> + {appearance.blur}px +
+
+ * 仅控制应用内覆盖层的模糊效果 +
+ + )} +
+
+
+
+
启动窗口
+
+ 启动时全屏 + setStartupFullscreen(checked)} /> +
+
+ * 修改后下次启动生效 +
+
+
+ +
+
)}
-
-
启动窗口
-
- 启动时全屏 - setStartupFullscreen(checked)} /> -
-
- * 修改后下次启动生效 -
-
-
- -
, '快捷键管理', '统一查看、录制与启停常用快捷键,保持操作习惯一致。')} open={isShortcutModalOpen} onCancel={() => { setIsShortcutModalOpen(false); setCapturingShortcutAction(null); }} - width={720} + width={760} + styles={{ content: utilityModalShellStyle, header: { background: 'transparent', borderBottom: 'none', paddingBottom: 8 }, body: { paddingTop: 8 }, footer: { background: 'transparent', borderTop: 'none', paddingTop: 10 } }} footer={[
@@ -1696,632 +1763,766 @@ const ConnectionModal: React.FC<{ ); - const renderStep2 = () => ( - { - if (testResult) { - setTestResult(null); // Clear result on change - setTestErrorLogOpen(false); - } - if (changed.uri !== undefined || changed.type !== undefined) { - setUriFeedback(null); - } - if (changed.useSSL !== undefined) setUseSSL(changed.useSSL); - if (changed.useSSH !== undefined) setUseSSH(changed.useSSH); - if (changed.useProxy !== undefined) { - const enabledProxy = !!changed.useProxy; - setUseProxy(enabledProxy); - if (enabledProxy && form.getFieldValue('useHttpTunnel')) { - form.setFieldValue('useHttpTunnel', false); - setUseHttpTunnel(false); - } - } - if (changed.proxyType !== undefined) { - const nextType = String(changed.proxyType || 'socks5').toLowerCase(); - if (nextType === 'http') { - const currentPort = Number(form.getFieldValue('proxyPort') || 0); - if (!currentPort || currentPort === 1080) { - form.setFieldValue('proxyPort', 8080); - } - } else { - const currentPort = Number(form.getFieldValue('proxyPort') || 0); - if (!currentPort || currentPort === 8080) { - form.setFieldValue('proxyPort', 1080); - } - } - } - if (changed.useHttpTunnel !== undefined) { - const enabledHttpTunnel = !!changed.useHttpTunnel; - setUseHttpTunnel(enabledHttpTunnel); - if (enabledHttpTunnel && form.getFieldValue('useProxy')) { - form.setFieldValue('useProxy', false); - setUseProxy(false); - } - if (enabledHttpTunnel) { - const currentPort = Number(form.getFieldValue('httpTunnelPort') || 0); - if (!currentPort || currentPort <= 0) { - form.setFieldValue('httpTunnelPort', 8080); - } - } - } - // Type change handled by step 1, but keep sync if select changes (hidden now) - if (changed.type !== undefined) setDbType(changed.type); - if (changed.redisTopology !== undefined) { - const supportedDbs = Array.from({ length: 16 }, (_, i) => i); - setRedisDbList(supportedDbs); - const selectedDbsRaw = form.getFieldValue('includeRedisDatabases'); - const selectedDbs = Array.isArray(selectedDbsRaw) ? selectedDbsRaw.map((entry: any) => Number(entry)) : []; - const validDbs = selectedDbs - .filter((entry: number) => Number.isFinite(entry)) - .map((entry: number) => Math.trunc(entry)) - .filter((entry: number) => supportedDbs.includes(entry)); - form.setFieldValue('includeRedisDatabases', validDbs.length > 0 ? validDbs : undefined); - } - if ( - changed.type !== undefined - || changed.host !== undefined - || changed.port !== undefined - || changed.mongoHosts !== undefined - || changed.mongoTopology !== undefined - || changed.mongoSrv !== undefined - ) { - setMongoMembers([]); - } - }} - > - {/* Hidden Type Field to keep form value synced */} - + const renderStep2 = () => { + const baseInfoSection = ( +
+
基础信息
+
常用参数集中在左侧,优先完成连接建立所需的最小输入。
- - - - - - - - - - - - {uriFeedback && ( - setUriFeedback(null)} - style={{ marginBottom: 12 }} - /> - )} - {currentDriverUnavailableReason && ( - - {currentDriverUnavailableReason} - - - )} - /> - )} - - {isCustom ? ( - <> - - - - - - - - ) : ( - <> -
- - - - {isFileDb && ( - - - - )} - {!isFileDb && ( - Number(value) > 0)]} - style={{ width: 100 }} - > - - - )} -
+ + + - {(dbType === 'postgres' || dbType === 'kingbase' || dbType === 'highgo' || dbType === 'vastbase') && ( - - - - )} + {!isCustom && ( + <> + + + + + + + + + {uriFeedback && ( + setUriFeedback(null)} + style={{ marginBottom: 16 }} + /> + )} + + )} - {dbType === 'oracle' && ( - - - - )} + {isCustom ? ( + <> + + + + + + + + ) : ( + <> +
+ + + + {isFileDb ? ( + + + + ) : ( + Number(value) > 0)]} + style={{ marginBottom: 0 }} + > + + + )} +
- {(dbType === 'mysql' || dbType === 'mariadb' || dbType === 'diros' || dbType === 'sphinx') && ( - <> - - - -
- - - - - - -
- - )} - - )} + {(dbType === 'postgres' || dbType === 'kingbase' || dbType === 'highgo' || dbType === 'vastbase') && ( + + + + )} - {dbType === 'mongodb' && ( - <> - - 使用 SRV 记录(mongodb+srv) - - - - - )} - {mongoSrv && ( - - )} - - - -
- - - - - - -
- - - 发现后可校验当前副本集状态 - - {mongoMembers.length > 0 && ( -
`${record.host}-${record.state}`} - dataSource={mongoMembers} - style={{ marginBottom: 12 }} - columns={[ - { - title: '成员', - dataIndex: 'host', - width: '48%', - render: (value: string, record: MongoMemberInfo) => ( - - {value} - {record.isSelf ? 当前 : null} - - ), - }, - { - title: '状态', - dataIndex: 'state', - width: '32%', - render: (value: string) => { - const state = String(value || '').toUpperCase(); - let color: string = 'default'; - if (state === 'PRIMARY') color = 'success'; - else if (state === 'SECONDARY' || state === 'PASSIVE') color = 'blue'; - else if (state === 'ARBITER') color = 'purple'; - else if (state === 'DOWN' || state === 'REMOVED' || state === 'UNKNOWN') color = 'error'; - return {state || 'UNKNOWN'}; - }, - }, - { - title: '健康', - dataIndex: 'healthy', - width: '20%', - render: (value: boolean) => ( - {value ? '正常' : '异常'} - ), - }, - ]} - /> - )} - - )} - - - - - + + )} - {/* Redis specific: password only, no username */} - {isRedis && ( - <> - - - - )} - - - - - - - - )} + {(dbType === 'mysql' || dbType === 'mariadb' || dbType === 'diros' || dbType === 'sphinx') && ( + <> + + + +
+ + + + + + +
+ + )} + + )} - {/* Non-Redis, non-SQLite: username and password */} - {!isFileDb && !isRedis && ( -
- - - - - - - {dbType === 'mongodb' && ( - - + + + 使用 SRV(mongodb+srv) + + {mongoSrv && useSSH && ( + + )} + {mongoTopology === 'replica' && ( + <> + + + + + + +
+ + + + + + + {mongoMembers.length > 0 && ( +
record.host} + pagination={false} + dataSource={mongoMembers} + style={{ marginBottom: 12 }} + columns={[ + { title: 'Host', dataIndex: 'host', width: '48%' }, + { + title: '角色', + dataIndex: 'role', + width: '32%', + render: (value: string, record: MongoMemberInfo) => ( + {value || 'UNKNOWN'} + ), + }, + { + title: '健康', + dataIndex: 'healthy', + width: '20%', + render: (value: boolean) => ( + {value ? '正常' : '异常'} + ), + }, + ]} + /> + )} + + )} +
+ + + + + + + {redisTopology === 'cluster' && ( + + + {redisDbList.map(db => db{db})} + + + + )} - {!isFileDb && !isRedis && ( - - - - )} + {!isFileDb && !isRedis && ( +
+ + + + + + + {dbType === 'mongodb' && ( + + - - {dbType === 'dameng' && ( - <> - - - - - - - - )} - - {sslHintText} - -
- )} - - )} + {dbType === 'mongodb' && ( + + 保存密码 + + )} - - - 使用 SSH 隧道 (SSH Tunnel) - + {!isFileDb && !isRedis && ( + + + + )} + + )} +
+ ); - {useSSH && ( -
-
- - - - - - -
-
- - - - - - -
- - - - - - - - -
- )} + const networkSecuritySection = !isFileDb ? (() => { + const networkItems: Array<{ + key: 'ssl' | 'ssh' | 'proxy' | 'httpTunnel'; + title: string; + description: string; + enabled: boolean; + }> = [ + ...(isSSLType ? [{ key: 'ssl' as const, title: 'SSL/TLS', description: '加密与证书校验', enabled: useSSL }] : []), + { key: 'ssh', title: 'SSH 隧道', description: '跳板机 / 堡垒机转发', enabled: useSSH }, + { key: 'proxy', title: '代理', description: 'SOCKS5 / HTTP CONNECT', enabled: useProxy }, + { key: 'httpTunnel', title: 'HTTP 隧道', description: '独立 HTTP CONNECT 路由', enabled: useHttpTunnel }, + ]; + const resolvedNetworkConfig = networkItems.some((item) => item.key === activeNetworkConfig) + ? activeNetworkConfig + : networkItems[0]?.key || 'ssh'; + const renderNetworkPanel = () => { + if (resolvedNetworkConfig === 'ssl') { + return ( +
+
SSL/TLS
+
为连接链路增加加密与证书校验控制,适合生产或跨网络访问场景。
+ {!useSSL ? ( +
+ 左侧勾选“SSL/TLS”后,可在这里配置模式、证书与校验策略。 +
+ ) : ( +
+ + + + + + + + )} + {sslHintText} +
+ )} +
+ ); + } + if (resolvedNetworkConfig === 'ssh') { + return ( +
+
SSH 隧道
+
通过跳板机或堡垒机转发数据库连接,适合内网或受限网络环境。
+ {!useSSH ? ( +
+ 左侧勾选“SSH 隧道”后,可在这里填写主机、端口、用户名、密码和私钥路径。 +
+ ) : ( +
+
+ + + + + + +
+
+ + + + + + +
+ + + + + + + + +
+ )} +
+ ); + } + if (resolvedNetworkConfig === 'proxy') { + return ( +
+
代理
+
适合借助本地代理软件或中间网关转发数据库流量。
+ {!useProxy ? ( +
+ 左侧勾选“代理”后,可在这里选择代理类型并填写主机、端口与认证信息。 +
+ ) : ( +
+ + + +
+ + + + + + +
+
+ )} +
+ ); + } + return ( +
+
HTTP 隧道
+
与代理模式互斥,适合单独指定一条 HTTP CONNECT 隧道路由。
+ {!useHttpTunnel ? ( +
+ 左侧勾选“HTTP 隧道”后,可在这里填写隧道目标与认证信息。 +
+ ) : ( +
+
+ + + + + + +
+
+ + + + + + +
+ 与“使用代理”互斥,启用后将通过 HTTP CONNECT 建立独立隧道。 +
+ )} +
+ ); + }; - - - 使用代理 (SOCKS5 / HTTP CONNECT) - + return ( +
+
网络与安全
+
上方稳定列出所有连接方式,下方固定展示当前方式的配置详情,避免启用后页面重新排布,同时给详情区留出足够宽度。
+
+ {networkItems.map((item) => { + const active = item.key === resolvedNetworkConfig; + const activeColor = darkMode ? '#ffd666' : '#1677ff'; + return ( +
setActiveNetworkConfig(item.key)} + onKeyDown={(event) => { + if (event.key === 'Enter' || event.key === ' ') { + event.preventDefault(); + setActiveNetworkConfig(item.key); + } + }} + style={{ + ...getConnectionOptionCardStyle(item.enabled), + borderColor: active + ? (darkMode ? 'rgba(255,214,102,0.46)' : 'rgba(24,144,255,0.36)') + : 'transparent', + background: active + ? (darkMode ? 'linear-gradient(180deg, rgba(255,214,102,0.14) 0%, rgba(255,214,102,0.08) 100%)' : 'linear-gradient(180deg, rgba(24,144,255,0.12) 0%, rgba(24,144,255,0.06) 100%)') + : getConnectionOptionCardStyle(item.enabled).background, + boxShadow: active + ? (darkMode ? '0 0 0 1px rgba(255,214,102,0.18) inset, 0 12px 26px rgba(0,0,0,0.16)' : '0 0 0 1px rgba(24,144,255,0.14) inset, 0 12px 22px rgba(24,144,255,0.10)') + : 'none', + cursor: 'pointer', + outline: 'none', + }} + > +
+
+
+ + + +
+
+ {item.title} +
+ {active && ( + + 当前编辑 + + )} + + {item.enabled ? '已启用' : '未启用'} + +
+
+
+ {item.description} +
+
+
+
+
+ ); + })} +
+
+ {renderNetworkPanel()} +
+
+
高级连接
+ + + +
+
+ ); + })() : null; - {useProxy && ( -
-
- - - - - - -
-
- - - - - - -
-
- )} + return ( + { + if (testResult) { + setTestResult(null); + setTestErrorLogOpen(false); + } + if (changed.uri !== undefined || changed.type !== undefined) { + setUriFeedback(null); + } + if (changed.useSSL !== undefined) { + setUseSSL(changed.useSSL); + if (changed.useSSL) setActiveNetworkConfig('ssl'); + } + if (changed.useSSH !== undefined) { + setUseSSH(changed.useSSH); + if (changed.useSSH) setActiveNetworkConfig('ssh'); + } + if (changed.useProxy !== undefined) { + const enabledProxy = !!changed.useProxy; + setUseProxy(enabledProxy); + if (enabledProxy) setActiveNetworkConfig('proxy'); + if (enabledProxy && form.getFieldValue('useHttpTunnel')) { + form.setFieldValue('useHttpTunnel', false); + setUseHttpTunnel(false); + } + } + if (changed.proxyType !== undefined) { + const nextType = String(changed.proxyType || 'socks5').toLowerCase(); + if (nextType === 'http') { + const currentPort = Number(form.getFieldValue('proxyPort') || 0); + if (!currentPort || currentPort === 1080) { + form.setFieldValue('proxyPort', 8080); + } + } else { + const currentPort = Number(form.getFieldValue('proxyPort') || 0); + if (!currentPort || currentPort === 8080) { + form.setFieldValue('proxyPort', 1080); + } + } + } + if (changed.useHttpTunnel !== undefined) { + const enabledHttpTunnel = !!changed.useHttpTunnel; + setUseHttpTunnel(enabledHttpTunnel); + if (enabledHttpTunnel) setActiveNetworkConfig('httpTunnel'); + if (enabledHttpTunnel && form.getFieldValue('useProxy')) { + form.setFieldValue('useProxy', false); + setUseProxy(false); + } + if (enabledHttpTunnel) { + const currentPort = Number(form.getFieldValue('httpTunnelPort') || 0); + if (!currentPort || currentPort <= 0) { + form.setFieldValue('httpTunnelPort', 8080); + } + } + } + if (changed.type !== undefined) setDbType(changed.type); + if (changed.redisTopology !== undefined) { + const supportedDbs = Array.from({ length: 16 }, (_, i) => i); + setRedisDbList(supportedDbs); + const selectedDbsRaw = form.getFieldValue('includeRedisDatabases'); + const selectedDbs = Array.isArray(selectedDbsRaw) ? selectedDbsRaw.map((entry: any) => Number(entry)) : []; + const validDbs = selectedDbs + .filter((entry: number) => Number.isFinite(entry)) + .map((entry: number) => Math.trunc(entry)) + .filter((entry: number) => supportedDbs.includes(entry)); + form.setFieldValue('includeRedisDatabases', validDbs.length > 0 ? validDbs : undefined); + } + if ( + changed.type !== undefined + || changed.host !== undefined + || changed.port !== undefined + || changed.mongoHosts !== undefined + || changed.mongoTopology !== undefined + || changed.mongoSrv !== undefined + ) { + setMongoMembers([]); + } + }} + > + + {currentDriverUnavailableReason && ( + + {currentDriverUnavailableReason} + + + )} + /> + )} + {(() => { + const sectionItems: Array<{ key: 'basic' | 'network'; title: string; description: string; icon: React.ReactNode }> = [ + { key: 'basic', title: '基础信息', description: '名称、地址、认证、URI 与数据库范围', icon: }, + ...(!isCustom && !isFileDb ? [{ key: 'network' as const, title: '网络与安全', description: 'SSL、SSH、代理与高级连接', icon: }] : []), + ]; + const resolvedSection = sectionItems.some((item) => item.key === activeConfigSection) + ? activeConfigSection + : sectionItems[0]?.key || 'basic'; + const currentSectionContent = resolvedSection === 'basic' + ? baseInfoSection + : networkSecuritySection; - - - 使用 HTTP 隧道(独立代理) - + if (sectionItems.length <= 1) { + return currentSectionContent; + } - {useHttpTunnel && ( -
-
- - - - - - -
-
- - - - - - -
- - 与“使用代理”互斥,启用后将通过 HTTP CONNECT 建立独立隧道。 - -
- )} - - - - - - - ) - }]} - /> - - )} - - )} - - - ); + return ( +
+
+
配置分区
+
+ {sectionItems.map((item) => { + const active = item.key === resolvedSection; + return ( + + ); + })} +
+
+
+ {currentSectionContent} +
+
+ ); + })()} + + ); + }; const getFooter = () => { if (step === 1) { @@ -2333,7 +2534,7 @@ const ConnectionModal: React.FC<{ const hasTestError = !!testResult && !isTestSuccess; const operationBlocked = !!currentDriverUnavailableReason || driverStatusChecking; return ( -
+
{!initialValues && } {testResult ? ( @@ -2386,18 +2587,21 @@ const ConnectionModal: React.FC<{ }; const getTitle = () => { - if (step === 1) return "选择数据源类型"; + if (step === 1) { + return renderConnectionModalTitle(, '选择数据源类型', '按数据库、中间件或文件类型快速进入对应的连接配置流程。'); + } const typeName = dbTypes.find(t => t.key === dbType)?.name || dbType; - return initialValues ? "编辑连接" : `新建 ${typeName} 连接`; + return initialValues + ? renderConnectionModalTitle(, '编辑连接', `调整 ${typeName} 连接的参数、认证方式与网络选项。`) + : renderConnectionModalTitle(, `新建 ${typeName} 连接`, '填写连接参数、测试连通性,并保存到连接树中。'); }; - const modalBodyStyle = step === 1 - ? { padding: '16px 24px', overflow: 'hidden' as const, minHeight: STEP1_MODAL_MIN_BODY_HEIGHT } - : { - padding: '16px 24px', - overflowY: 'auto' as const, - overflowX: 'hidden' as const, - }; + const modalBodyStyle = { + padding: '12px 24px 18px', + height: CONNECTION_MODAL_BODY_HEIGHT, + overflowY: 'auto' as const, + overflowX: 'hidden' as const, + }; return ( <> @@ -2408,22 +2612,33 @@ const ConnectionModal: React.FC<{ footer={getFooter()} centered wrapClassName="connection-modal-wrap" - width={step === 1 ? STEP1_MODAL_WIDTH : STEP2_MODAL_WIDTH} + width={CONNECTION_MODAL_WIDTH} zIndex={10001} destroyOnHidden maskClosable={false} - styles={{ body: modalBodyStyle }} + styles={{ + content: modalShellStyle, + header: { background: 'transparent', borderBottom: 'none', paddingBottom: 8 }, + body: modalBodyStyle, + footer: { background: 'transparent', borderTop: 'none', paddingTop: 10 } + }} > {step === 1 ? renderStep1() : renderStep2()} , '测试连接失败原因', '查看本次测试连接的完整错误上下文,便于快速定位配置问题。')} open={testErrorLogOpen} onCancel={() => setTestErrorLogOpen(false)} centered width={760} zIndex={10002} destroyOnHidden + styles={{ + content: modalShellStyle, + header: { background: 'transparent', borderBottom: 'none', paddingBottom: 8 }, + body: { paddingTop: 8 }, + footer: { background: 'transparent', borderTop: 'none', paddingTop: 10 } + }} footer={[ , ]} diff --git a/frontend/src/components/DataGrid.tsx b/frontend/src/components/DataGrid.tsx index 10c8b87..f23caa0 100644 --- a/frontend/src/components/DataGrid.tsx +++ b/frontend/src/components/DataGrid.tsx @@ -2,7 +2,7 @@ import React, { useState, useEffect, useRef, useContext, useMemo, useCallback } import { createPortal } from 'react-dom'; import { Table, message, Input, Button, Dropdown, MenuProps, Form, Pagination, Select, Modal, Checkbox, Segmented, Tooltip, Popover } from 'antd'; import type { SortOrder } from 'antd/es/table/interface'; -import { ReloadOutlined, ImportOutlined, ExportOutlined, DownOutlined, PlusOutlined, DeleteOutlined, SaveOutlined, UndoOutlined, FilterOutlined, CloseOutlined, ConsoleSqlOutlined, FileTextOutlined, CopyOutlined, ClearOutlined, EditOutlined, VerticalAlignBottomOutlined } from '@ant-design/icons'; +import { ReloadOutlined, ImportOutlined, ExportOutlined, DownOutlined, PlusOutlined, DeleteOutlined, SaveOutlined, UndoOutlined, FilterOutlined, CloseOutlined, ConsoleSqlOutlined, FileTextOutlined, CopyOutlined, ClearOutlined, EditOutlined, VerticalAlignBottomOutlined, LeftOutlined, RightOutlined } from '@ant-design/icons'; import Editor from '@monaco-editor/react'; import { ImportData, ExportTable, ExportData, ExportQuery, ApplyChanges, DBGetColumns } from '../../wailsjs/go/app/App'; import ImportPreviewModal from './ImportPreviewModal'; @@ -11,7 +11,7 @@ import type { ColumnDefinition } from '../types'; import { v4 as uuidv4 } from 'uuid'; import 'react-resizable/css/styles.css'; import { buildOrderBySQL, buildWhereSQL, escapeLiteral, quoteIdentPart, quoteQualifiedIdent, withSortBufferTuningSQL, type FilterCondition } from '../utils/sql'; -import { isMacLikePlatform, normalizeOpacityForPlatform } from '../utils/appearance'; +import { isMacLikePlatform, normalizeOpacityForPlatform, resolveAppearanceValues } from '../utils/appearance'; import { getDataSourceCapabilities } from '../utils/dataSourceCapabilities'; // --- Error Boundary --- @@ -639,7 +639,8 @@ const DataGrid: React.FC = ({ const setQueryOptions = useStore(state => state.setQueryOptions); const isMacLike = useMemo(() => isMacLikePlatform(), []); const darkMode = theme === 'dark'; - const opacity = normalizeOpacityForPlatform(appearance.opacity); + const resolvedAppearance = resolveAppearanceValues(appearance); + const opacity = normalizeOpacityForPlatform(resolvedAppearance.opacity); const canModifyData = !readOnly && !!tableName; const showColumnComment = queryOptions?.showColumnComment !== false; const showColumnType = queryOptions?.showColumnType !== false; @@ -706,6 +707,33 @@ const DataGrid: React.FC = ({ const toolbarDividerColor = darkMode ? 'rgba(255, 255, 255, 0.12)' : 'rgba(0, 0, 0, 0.10)'; const columnMetaHintColor = darkMode ? darkHighlightTextColor : lightMetaHintColor; const columnMetaTooltipColor = darkMode ? darkHighlightTextColor : lightMetaTooltipColor; + const paginationPageSizeOptions = ['100', '200', '500', '1000']; + const paginationGlassMode = opacity < 0.999 || resolvedAppearance.blur > 0; + const paginationShellBg = darkMode + ? `linear-gradient(135deg, rgba(17,22,34,${paginationGlassMode ? Math.max(0.22, opacity * 0.38) : 0.82}) 0%, rgba(10,14,24,${paginationGlassMode ? Math.max(0.28, opacity * 0.46) : 0.9}) 100%)` + : `linear-gradient(135deg, rgba(255,255,255,${paginationGlassMode ? Math.max(0.24, opacity * 0.36) : 0.96}) 0%, rgba(246,248,252,${paginationGlassMode ? Math.max(0.32, opacity * 0.44) : 0.99}) 100%)`; + const paginationShellBorderColor = darkMode + ? `rgba(255,255,255,${paginationGlassMode ? 0.10 : 0.08})` + : `rgba(16,24,40,${paginationGlassMode ? 0.08 : 0.08})`; + const paginationShellShadow = darkMode + ? `0 16px 34px rgba(0,0,0,${paginationGlassMode ? 0.10 : 0.22})` + : `0 14px 30px rgba(15,23,42,${paginationGlassMode ? 0.03 : 0.08})`; + const paginationChipBg = darkMode + ? `rgba(255,255,255,${paginationGlassMode ? Math.max(0.02, opacity * 0.035) : 0.04})` + : `rgba(255,255,255,${paginationGlassMode ? Math.max(0.18, opacity * 0.26) : 0.86})`; + const paginationChipBorderColor = darkMode + ? `rgba(255,255,255,${paginationGlassMode ? 0.10 : 0.08})` + : `rgba(16,24,40,${paginationGlassMode ? 0.10 : 0.08})`; + const paginationHoverBg = darkMode + ? `rgba(255,255,255,${paginationGlassMode ? Math.max(0.04, opacity * 0.06) : 0.07})` + : `rgba(255,255,255,${paginationGlassMode ? Math.max(0.24, opacity * 0.34) : 0.96})`; + const paginationPrimaryTextColor = darkMode ? '#f5f7ff' : '#162033'; + const paginationSecondaryTextColor = darkMode ? 'rgba(255,255,255,0.54)' : 'rgba(16,24,40,0.56)'; + const paginationAccentBg = darkMode ? 'rgba(255,214,102,0.14)' : 'rgba(24,144,255,0.10)'; + const paginationAccentBorderColor = darkMode ? 'rgba(255,214,102,0.38)' : 'rgba(24,144,255,0.22)'; + const paginationActiveItemBg = darkMode ? 'rgba(255,214,102,0.18)' : 'rgba(24,144,255,0.12)'; + const paginationActiveItemBorderColor = darkMode ? 'rgba(255,214,102,0.46)' : 'rgba(24,144,255,0.28)'; + const paginationActiveItemTextColor = darkMode ? '#fff7d6' : '#0958d9'; const [form] = Form.useForm(); const [modal, contextHolder] = Modal.useModal(); @@ -2970,6 +2998,49 @@ const DataGrid: React.FC = ({ }; }, [viewMode, tableScrollX, mergedDisplayData.length, syncExternalScrollFromTargets, pickHorizontalScrollTargets]); + const paginationSummaryText = useMemo(() => { + if (!pagination) return ''; + const total = Number.isFinite(pagination.total) ? pagination.total : 0; + const rangeStart = Math.max(0, (pagination.current - 1) * pagination.pageSize + (total > 0 ? 1 : 0)); + const hasValidRange = total > 0 && rangeStart > 0; + const rangeEnd = hasValidRange ? Math.min(total, rangeStart + pagination.pageSize - 1) : 0; + const currentCount = hasValidRange ? Math.max(0, rangeEnd - rangeStart + 1) : 0; + + if (pagination.totalKnown === false) { + if (isDuckDBConnection) { + if (pagination.totalCountLoading) return `当前 ${currentCount} 条 / 正在统计精确总数…`; + if (pagination.totalApprox && Number.isFinite(total) && total > 0) return `当前 ${currentCount} 条 / 约 ${total} 条`; + if (pagination.totalCountCancelled) return `当前 ${currentCount} 条 / 已取消统计`; + return `当前 ${currentCount} 条 / 总数未统计`; + } + return `当前 ${currentCount} 条 / 正在统计总数…`; + } + + if (isDuckDBConnection && (!Number.isFinite(total) || total <= 0)) { + return '当前 0 条 / 共 0 条'; + } + + return `当前 ${currentCount} 条 / 共 ${total} 条`; + }, [pagination, isDuckDBConnection]); + + const paginationPageText = useMemo(() => { + if (!pagination) return ''; + const total = Number.isFinite(pagination.total) ? pagination.total : 0; + const canShowTotalPages = pagination.totalKnown !== false || (isDuckDBConnection && pagination.totalApprox && total > 0); + if (!canShowTotalPages || total <= 0) return `第 ${pagination.current} 页`; + const totalPages = Math.max(1, Math.ceil(total / Math.max(1, pagination.pageSize))); + return `第 ${pagination.current} / ${totalPages} 页`; + }, [pagination, isDuckDBConnection]); + + const handlePageSizeChange = useCallback((value: string) => { + if (!pagination || !onPageChange) return; + const nextSize = Number(value); + if (!Number.isFinite(nextSize) || nextSize <= 0) return; + const firstRowIndex = Math.max(0, (pagination.current - 1) * pagination.pageSize); + const nextPage = Math.floor(firstRowIndex / nextSize) + 1; + onPageChange(nextPage, nextSize); + }, [pagination, onPageChange]); + return (
{/* Toolbar + Filter Panel */} @@ -3697,33 +3768,41 @@ const DataGrid: React.FC = ({
{pagination && ( -
- { - const hasValidRange = Array.isArray(range) && range[0] > 0 && range[1] >= range[0]; - const currentCount = hasValidRange ? Math.max(0, range[1] - range[0] + 1) : 0; - if (pagination.totalKnown === false) { - if (isDuckDBConnection) { - if (pagination.totalCountLoading) return `当前 ${currentCount} 条 / 正在统计精确总数...`; - if (pagination.totalApprox && Number.isFinite(total) && total > 0) return `当前 ${currentCount} 条 / 约 ${total} 条`; - if (pagination.totalCountCancelled) return `当前 ${currentCount} 条 / 已取消统计`; - return `当前 ${currentCount} 条 / 总数未统计`; +
+
+
+ 结果集 + {paginationSummaryText} +
+
{paginationPageText}
+ { + if (type === 'prev') { + return ; } - return `当前 ${currentCount} 条 / 正在统计总数...`; - } - if (isDuckDBConnection && (!Number.isFinite(total) || total <= 0)) { - return '当前 0 条 / 共 0 条'; - } - return `当前 ${currentCount} 条 / 共 ${total} 条`; - }} - showSizeChanger - pageSizeOptions={['100', '200', '500', '1000']} - onChange={onPageChange} - size="small" - /> + if (type === 'next') { + return ; + } + return originalElement; + }} + /> +
+
+ {sqlLogs.length === 0 ? ( +
+ 暂无 SQL 执行日志} + /> +
+ ) : ( +
+ )} ); diff --git a/frontend/src/components/RedisViewer.tsx b/frontend/src/components/RedisViewer.tsx index 62eab17..d5ebc0f 100644 --- a/frontend/src/components/RedisViewer.tsx +++ b/frontend/src/components/RedisViewer.tsx @@ -5,7 +5,7 @@ import { useStore } from '../store'; import { RedisKeyInfo, RedisValue, StreamEntry } from '../types'; import Editor from '@monaco-editor/react'; import type { DataNode } from 'antd/es/tree'; -import { normalizeOpacityForPlatform } from '../utils/appearance'; +import { normalizeOpacityForPlatform, resolveAppearanceValues } from '../utils/appearance'; const { Search } = Input; @@ -399,7 +399,8 @@ const RedisViewer: React.FC = ({ connectionId, redisDB }) => { const theme = useStore(state => state.theme); const appearance = useStore(state => state.appearance); const darkMode = theme === 'dark'; - const opacity = normalizeOpacityForPlatform(appearance.opacity); + const resolvedAppearance = resolveAppearanceValues(appearance); + const opacity = normalizeOpacityForPlatform(resolvedAppearance.opacity); const connection = connections.find(c => c.id === connectionId); const keyAccentColor = darkMode ? '#ffd666' : '#1677ff'; const jsonAccentColor = darkMode ? '#f6c453' : '#1890ff'; diff --git a/frontend/src/components/Sidebar.tsx b/frontend/src/components/Sidebar.tsx index 2fa3fdd..3a31be4 100644 --- a/frontend/src/components/Sidebar.tsx +++ b/frontend/src/components/Sidebar.tsx @@ -27,12 +27,15 @@ import { Tree, message, Dropdown, MenuProps, Input, Button, Modal, Form, Badge, DisconnectOutlined, CloudOutlined, CheckSquareOutlined, - CodeOutlined + CodeOutlined, + TagOutlined, + CheckOutlined, + FilterOutlined } from '@ant-design/icons'; import { useStore } from '../store'; import { SavedConnection } from '../types'; import { DBGetDatabases, DBGetTables, DBQuery, DBShowCreateTable, ExportTable, OpenSQLFile, CreateDatabase, RenameDatabase, DropDatabase, RenameTable, DropTable, DropView, DropFunction, RenameView } from '../../wailsjs/go/app/App'; - import { normalizeOpacityForPlatform } from '../utils/appearance'; + import { normalizeOpacityForPlatform, resolveAppearanceValues } from '../utils/appearance'; const { Search } = Input; @@ -73,6 +76,15 @@ const SEARCH_SCOPE_LABEL_MAP: Record = SEARCH_SCOPE_OPTIONS return acc; }, {} as Record); + +const SEARCH_SCOPE_ICON_MAP: Record = { + smart: , + object: , + database: , + host: , + tag: , +}; + const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }> = ({ onEditConnection }) => { const connections = useStore(state => state.connections); const savedQueries = useStore(state => state.savedQueries); @@ -95,7 +107,8 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }> const recordTableAccess = useStore(state => state.recordTableAccess); const setTableSortPreference = useStore(state => state.setTableSortPreference); const darkMode = theme === 'dark'; - const opacity = normalizeOpacityForPlatform(appearance.opacity); + const resolvedAppearance = resolveAppearanceValues(appearance); + const opacity = normalizeOpacityForPlatform(resolvedAppearance.opacity); const [treeData, setTreeData] = useState([]); // Background Helper (Duplicate logic for now, ideally shared) @@ -108,6 +121,44 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }> return `rgba(${r}, ${g}, ${b}, ${opacity})`; }; const bgMain = getBg('#141414'); + const modalPanelStyle = useMemo(() => ({ + background: darkMode + ? 'linear-gradient(180deg, rgba(20,26,38,0.96) 0%, rgba(13,17,26,0.98) 100%)' + : 'linear-gradient(180deg, rgba(255,255,255,0.98) 0%, rgba(246,248,252,0.98) 100%)', + border: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(16,24,40,0.08)', + boxShadow: darkMode ? '0 20px 48px rgba(0,0,0,0.38)' : '0 18px 42px rgba(15,23,42,0.12)', + backdropFilter: darkMode ? 'blur(18px)' : 'none', + }), [darkMode]); + const modalSectionStyle = useMemo(() => ({ + padding: 14, + borderRadius: 14, + border: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(16,24,40,0.08)', + background: darkMode ? 'rgba(255,255,255,0.03)' : 'rgba(255,255,255,0.84)', + }), [darkMode]); + const modalScrollSectionStyle = useMemo(() => ({ + maxHeight: 400, + overflow: 'auto' as const, + border: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(16,24,40,0.08)', + borderRadius: 14, + padding: 12, + background: darkMode ? 'rgba(255,255,255,0.03)' : 'rgba(255,255,255,0.8)', + }), [darkMode]); + const modalHintTextStyle = useMemo(() => ({ + color: darkMode ? 'rgba(255,255,255,0.5)' : 'rgba(16,24,40,0.55)', + fontSize: 12, + lineHeight: 1.6, + }), [darkMode]); + const renderSidebarModalTitle = (icon: React.ReactNode, title: string, description: string) => ( +
+
+ {icon} +
+
+
{title}
+
{description}
+
+
+ ); const [searchValue, setSearchValue] = useState(''); const [searchScopes, setSearchScopes] = useState(['smart']); const [isSearchScopePopoverOpen, setIsSearchScopePopoverOpen] = useState(false); @@ -2471,32 +2522,100 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }> const searchScopePopoverContent = useMemo(() => { const smartSelected = searchScopes.includes('smart'); const scopedOptions = SEARCH_SCOPE_OPTIONS.filter((option) => option.value !== 'smart'); + const borderColor = darkMode ? 'rgba(255,255,255,0.08)' : 'rgba(16,24,40,0.08)'; + const mutedTextColor = darkMode ? 'rgba(255,255,255,0.5)' : 'rgba(16,24,40,0.55)'; + const titleColor = darkMode ? 'rgba(255,255,255,0.92)' : '#162033'; + const panelBg = darkMode + ? 'linear-gradient(180deg, rgba(17,24,39,0.96) 0%, rgba(10,15,26,0.98) 100%)' + : 'linear-gradient(180deg, rgba(255,255,255,0.98) 0%, rgba(246,248,252,0.98) 100%)'; + const smartBg = smartSelected + ? (darkMode ? 'linear-gradient(135deg, rgba(255,214,102,0.22) 0%, rgba(255,179,71,0.16) 100%)' : 'linear-gradient(135deg, rgba(255,214,102,0.26) 0%, rgba(255,244,204,0.92) 100%)') + : (darkMode ? 'rgba(255,255,255,0.03)' : 'rgba(255,255,255,0.72)'); + const smartBorder = smartSelected + ? (darkMode ? 'rgba(255,214,102,0.42)' : 'rgba(245,176,65,0.34)') + : borderColor; + const getOptionCardStyle = (checked: boolean) => ({ + display: 'flex', + alignItems: 'center' as const, + justifyContent: 'space-between' as const, + gap: 12, + padding: '10px 12px', + borderRadius: 12, + border: `1px solid ${checked ? (darkMode ? 'rgba(118,169,250,0.44)' : 'rgba(24,144,255,0.32)') : borderColor}`, + background: checked + ? (darkMode ? 'rgba(64,124,255,0.18)' : 'rgba(24,144,255,0.08)') + : (darkMode ? 'rgba(255,255,255,0.03)' : 'rgba(255,255,255,0.76)'), + transition: 'all 120ms ease', + }); return ( -
-
搜索范围
- setSearchScopeChecked('smart', e.target.checked)} - > - 智能(推荐) - -
- {scopedOptions.map((option) => ( - setSearchScopeChecked(option.value, e.target.checked)} - > - {option.label} - - ))} +
+
+
+
搜索范围
+
“智能”自动匹配最可能的命中项;手动模式支持按维度组合筛选。
+
+
+ +
-
- 智能与其他项互斥;其他项支持多选。 + + + +
+ +
+
手动范围
+
支持多选组合
+
+ +
+ {scopedOptions.map((option) => { + const checked = searchScopes.includes(option.value); + return ( + + ); + })} +
+ +
+ 智能与其他项互斥。若你明确知道要搜的是对象、库、Host 或标签,建议切到手动范围以减少噪音结果。
); - }, [searchScopes]); + }, [darkMode, searchScopes]); const parseHostOnlyToken = (value: unknown): string[] => { const raw = String(value || '').trim(); @@ -3301,14 +3420,14 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }> return (
-
- +
+
void }> placement="bottomRight" open={isSearchScopePopoverOpen} onOpenChange={setIsSearchScopePopoverOpen} + styles={{ body: { padding: 0, borderRadius: 18, overflow: 'hidden' } }} > - - +
{/* Toolbar */} -
+
} > -
+
+
先选择连接与数据库,再决定导出范围和目标对象。
{batchTables.length > 0 && ( -
+
void }> {batchTables.length > 0 && ( <> -
+
-
+
setCheckedTableKeys(values as string[])} @@ -3704,10 +3884,12 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }> , "批量操作库", "按数据库批量导出结构,或生成结构加数据的备份。")} open={isBatchDbModalOpen} onCancel={() => setIsBatchDbModalOpen(false)} - width={600} + width={640} + centered + styles={{ content: modalPanelStyle, header: { background: 'transparent', borderBottom: 'none', paddingBottom: 10 }, body: { paddingTop: 8 }, footer: { background: 'transparent', borderTop: 'none', paddingTop: 12 } }} footer={[ ]} > -
- +
+ +
连接选定后会加载当前连接下可批量导出的数据库列表。
{batchDatabases.length > 0 && ( <> -
+
-
+
setCheckedDbKeys(values as string[])} diff --git a/frontend/src/components/TableDesigner.tsx b/frontend/src/components/TableDesigner.tsx index 9a3c9f9..4a36b31 100644 --- a/frontend/src/components/TableDesigner.tsx +++ b/frontend/src/components/TableDesigner.tsx @@ -2491,7 +2491,7 @@ END;`; okText="应用" cancelText="取消" width={640} - destroyOnClose + destroyOnHidden > void; setTheme: (theme: 'light' | 'dark') => void; - setAppearance: (appearance: Partial<{ opacity: number; blur: number }>) => void; + setAppearance: (appearance: Partial<{ enabled: boolean; opacity: number; blur: number }>) => void; setUiScale: (scale: number) => void; setFontSize: (size: number) => void; setStartupFullscreen: (enabled: boolean) => void; @@ -522,13 +522,14 @@ const sanitizeTableSortPreference = (value: unknown): Record | undefined, + appearance: Partial<{ enabled: boolean; opacity: number; blur: number }> | undefined, version: number -): { opacity: number; blur: number } => { +): { enabled: boolean; opacity: number; blur: number } => { if (!appearance || typeof appearance !== 'object') { return { ...DEFAULT_APPEARANCE }; } const nextAppearance = { + enabled: typeof appearance.enabled === 'boolean' ? appearance.enabled : DEFAULT_APPEARANCE.enabled, opacity: typeof appearance.opacity === 'number' ? appearance.opacity : DEFAULT_APPEARANCE.opacity, blur: typeof appearance.blur === 'number' ? appearance.blur : DEFAULT_APPEARANCE.blur, }; diff --git a/frontend/src/utils/appearance.ts b/frontend/src/utils/appearance.ts index 10d48b5..77c5aaa 100644 --- a/frontend/src/utils/appearance.ts +++ b/frontend/src/utils/appearance.ts @@ -10,6 +10,22 @@ const WINDOWS_BLUR_FACTOR = 1.00; const clamp = (value: number, min: number, max: number) => Math.min(max, Math.max(min, value)); +export interface AppearanceSettingsLike { + enabled?: boolean; + opacity?: number; + blur?: number; +} + +export const resolveAppearanceValues = (appearance: AppearanceSettingsLike | undefined): { opacity: number; blur: number } => { + if (!appearance || appearance.enabled !== false) { + return { + opacity: appearance?.opacity ?? DEFAULT_OPACITY, + blur: appearance?.blur ?? 0, + }; + } + return { opacity: DEFAULT_OPACITY, blur: 0 }; +}; + export const isMacLikePlatform = (): boolean => { if (typeof navigator === 'undefined') { return false; From c87b15b22a2ba15d9467e8a9604c64d7782dc1b1 Mon Sep 17 00:00:00 2001 From: TSS <266256496+Zencok@users.noreply.github.com> Date: Sat, 7 Mar 2026 21:45:26 +0800 Subject: [PATCH 23/48] =?UTF-8?q?feat:=20=E7=BB=9F=E4=B8=80=E7=AD=9B?= =?UTF-8?q?=E9=80=89=E6=9D=A1=E4=BB=B6=E9=80=BB=E8=BE=91=E6=8C=89=E9=92=AE?= =?UTF-8?q?=E5=AE=BD=E5=BA=A6=20(#201)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- frontend/src/components/DataGrid.tsx | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/frontend/src/components/DataGrid.tsx b/frontend/src/components/DataGrid.tsx index f527778..0ca13d7 100644 --- a/frontend/src/components/DataGrid.tsx +++ b/frontend/src/components/DataGrid.tsx @@ -3384,22 +3384,17 @@ const DataGrid: React.FC = ({ updateFilter(cond.id, 'enabled', e.target.checked)} - style={{ marginTop: 6 }} + style={{ marginTop: 6, flex: '0 0 auto', whiteSpace: 'nowrap' }} > 启用 - {condIndex === 0 ? ( -
- 首条 -
- ) : ( - updateFilter(cond.id, 'logic', v)} + options={condIndex === 0 ? [{ value: '__FIRST__', label: '首条' }] : (filterLogicOptions as any)} + disabled={condIndex === 0} + /> + + )}
@@ -2330,6 +2384,7 @@ const ConnectionModal: React.FC<{ httpTunnelPort: 8080, timeout: 30, uri: '', + duckdbMode: 'database', mysqlTopology: 'single', redisTopology: 'single', mongoTopology: 'single', @@ -2351,7 +2406,7 @@ const ConnectionModal: React.FC<{ setTestResult(null); setTestErrorLogOpen(false); } - if (changed.uri !== undefined || changed.type !== undefined) { + if (changed.uri !== undefined || changed.type !== undefined || changed.duckdbMode !== undefined) { setUriFeedback(null); } if (changed.useSSL !== undefined) { diff --git a/frontend/src/components/Sidebar.tsx b/frontend/src/components/Sidebar.tsx index 3a31be4..9348095 100644 --- a/frontend/src/components/Sidebar.tsx +++ b/frontend/src/components/Sidebar.tsx @@ -36,9 +36,15 @@ import { Tree, message, Dropdown, MenuProps, Input, Button, Modal, Form, Badge, import { SavedConnection } from '../types'; import { DBGetDatabases, DBGetTables, DBQuery, DBShowCreateTable, ExportTable, OpenSQLFile, CreateDatabase, RenameDatabase, DropDatabase, RenameTable, DropTable, DropView, DropFunction, RenameView } from '../../wailsjs/go/app/App'; import { normalizeOpacityForPlatform, resolveAppearanceValues } from '../utils/appearance'; +import { getDataSourceCapabilities } from '../utils/dataSourceCapabilities'; const { Search } = Input; +const isForceReadOnlyNode = (node: any): boolean => { + const config = node?.dataRef?.config || node?.dataRef; + return getDataSourceCapabilities(config as any).forceReadOnlyQueryResult; +}; + interface TreeNode { title: string; key: string; @@ -3154,14 +3160,41 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }> }); } }, - { - key: 'run-sql', - label: '运行 SQL 文件...', - icon: , - onClick: () => handleRunSQLFile(node) - } - ]; + ]; } else if (node.type === 'view') { + const forceReadOnlyNode = isForceReadOnlyNode(node); + if (forceReadOnlyNode) { + return [ + { + key: 'open-view', + label: '浏览视图数据', + icon: , + onClick: () => onDoubleClick(null, node) + }, + { + key: 'view-definition', + label: '查看视图定义', + icon: , + onClick: () => openViewDefinition(node) + }, + { type: 'divider' }, + { + key: 'new-query', + label: '新建查询', + icon: , + onClick: () => { + addTab({ + id: `query-${Date.now()}`, + title: `新建查询`, + type: 'query', + connectionId: node.dataRef.id, + dbName: node.dataRef.dbName, + query: "" + }); + } + }, + ]; + } return [ { key: 'open-view', @@ -3193,7 +3226,7 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }> type: 'query', connectionId: node.dataRef.id, dbName: node.dataRef.dbName, - query: '' + query: "" }); } }, @@ -3242,6 +3275,45 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }> }, ]; } else if (node.type === 'table') { + const forceReadOnlyNode = isForceReadOnlyNode(node); + if (forceReadOnlyNode) { + return [ + { + key: 'open-table', + label: '浏览数据', + icon: , + onClick: () => onDoubleClick(null, node) + }, + { + key: 'new-query', + label: '新建查询', + icon: , + onClick: () => { + addTab({ + id: `query-${Date.now()}`, + title: `新建查询`, + type: 'query', + connectionId: node.dataRef.id, + dbName: node.dataRef.dbName, + query: "" + }); + } + }, + { type: 'divider' }, + { + key: 'export', + label: '导出表数据', + icon: , + children: [ + { key: 'export-csv', label: '导出 CSV', onClick: () => handleExport(node, 'csv') }, + { key: 'export-xlsx', label: '导出 Excel (XLSX)', onClick: () => handleExport(node, 'xlsx') }, + { key: 'export-json', label: '导出 JSON', onClick: () => handleExport(node, 'json') }, + { key: 'export-md', label: '导出 Markdown', onClick: () => handleExport(node, 'md') }, + { key: 'export-html', label: '导出 HTML', onClick: () => handleExport(node, 'html') }, + ] + } + ]; + } return [ { key: 'new-query', @@ -3254,7 +3326,7 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }> type: 'query', connectionId: node.dataRef.id, dbName: node.dataRef.dbName, - query: '' + query: "" }); } }, diff --git a/frontend/src/store.ts b/frontend/src/store.ts index 8d67849..a3d448c 100644 --- a/frontend/src/store.ts +++ b/frontend/src/store.ts @@ -9,6 +9,7 @@ import { cloneShortcutOptions, sanitizeShortcutOptions, } from './utils/shortcuts'; +import { resolveDuckDBMode } from './utils/duckdb'; const DEFAULT_APPEARANCE = { enabled: true, opacity: 1.0, blur: 0 }; const DEFAULT_UI_SCALE = 1.0; @@ -243,6 +244,9 @@ const sanitizeConnectionConfig = (value: unknown): ConnectionConfig => { const supportsNetworkTunnel = type !== 'sqlite' && type !== 'duckdb'; const useHttpTunnel = supportsNetworkTunnel && (raw.useHttpTunnel === true || raw.UseHTTPTunnel === true); const useProxy = supportsNetworkTunnel && !!raw.useProxy && !useHttpTunnel; + const duckdbMode = type === 'duckdb' + ? resolveDuckDBMode(raw.duckdbMode, toTrimmedString(raw.host) || toTrimmedString(raw.database)) + : undefined; const safeConfig: ConnectionConfig & Record = { ...raw, @@ -253,6 +257,7 @@ const sanitizeConnectionConfig = (value: unknown): ConnectionConfig => { password: savePassword ? toTrimmedString(raw.password) : '', savePassword, database: toTrimmedString(raw.database), + duckdbMode, useSSL: sslCapable ? !!raw.useSSL : false, sslMode: sslCapable ? sslMode : 'disable', sslCertPath: sslCapable ? toTrimmedString(raw.sslCertPath) : '', diff --git a/frontend/src/types.ts b/frontend/src/types.ts index 96ac6da..90e9bc6 100644 --- a/frontend/src/types.ts +++ b/frontend/src/types.ts @@ -29,6 +29,7 @@ export interface ConnectionConfig { password?: string; savePassword?: boolean; database?: string; + duckdbMode?: 'database' | 'parquet'; useSSL?: boolean; sslMode?: 'preferred' | 'required' | 'skip-verify' | 'disable'; sslCertPath?: string; diff --git a/frontend/src/utils/dataSourceCapabilities.ts b/frontend/src/utils/dataSourceCapabilities.ts index 8d30854..8672414 100644 --- a/frontend/src/utils/dataSourceCapabilities.ts +++ b/frontend/src/utils/dataSourceCapabilities.ts @@ -1,6 +1,7 @@ import type { ConnectionConfig } from '../types'; +import { resolveDuckDBMode } from './duckdb'; -type ConnectionLike = Pick | null | undefined; +type ConnectionLike = Pick | null | undefined; const normalizeDataSourceToken = (raw: string): string => { const normalized = String(raw || '').trim().toLowerCase(); @@ -65,6 +66,11 @@ const COPY_INSERT_TYPES = new Set([ const QUERY_EDITOR_DISABLED_TYPES = new Set(['redis']); const FORCE_READ_ONLY_QUERY_TYPES = new Set(['tdengine', 'clickhouse']); +const isDuckDBParquetConnection = (config: ConnectionLike): boolean => { + return resolveDataSourceType(config) === 'duckdb' + && resolveDuckDBMode(config?.duckdbMode, '') === 'parquet'; +}; + export type DataSourceCapabilities = { type: string; supportsQueryEditor: boolean; @@ -80,7 +86,7 @@ export const getDataSourceCapabilities = (config: ConnectionLike): DataSourceCap supportsQueryEditor: !QUERY_EDITOR_DISABLED_TYPES.has(type), supportsSqlQueryExport: SQL_QUERY_EXPORT_TYPES.has(type), supportsCopyInsert: COPY_INSERT_TYPES.has(type), - forceReadOnlyQueryResult: FORCE_READ_ONLY_QUERY_TYPES.has(type), + forceReadOnlyQueryResult: FORCE_READ_ONLY_QUERY_TYPES.has(type) || isDuckDBParquetConnection(config), }; }; diff --git a/frontend/src/utils/duckdb.ts b/frontend/src/utils/duckdb.ts new file mode 100644 index 0000000..ba59246 --- /dev/null +++ b/frontend/src/utils/duckdb.ts @@ -0,0 +1,18 @@ +export type DuckDBMode = 'database' | 'parquet'; + +export const looksLikeDuckDBParquetPath = (raw: string): boolean => { + const text = String(raw || '').trim().toLowerCase(); + return text.endsWith('.parquet') || text.endsWith('.parq'); +}; + +export const normalizeDuckDBMode = (raw: unknown): DuckDBMode => { + return String(raw || '').trim().toLowerCase() === 'parquet' ? 'parquet' : 'database'; +}; + +export const resolveDuckDBMode = (raw: unknown, path: string): DuckDBMode => { + const text = String(raw || '').trim().toLowerCase(); + if (text === 'parquet' || text === 'database') { + return text; + } + return looksLikeDuckDBParquetPath(path) ? 'parquet' : 'database'; +}; \ No newline at end of file diff --git a/frontend/wailsjs/go/models.ts b/frontend/wailsjs/go/models.ts index 2de678a..b688567 100755 --- a/frontend/wailsjs/go/models.ts +++ b/frontend/wailsjs/go/models.ts @@ -114,6 +114,7 @@ export namespace connection { password: string; savePassword?: boolean; database: string; + duckdbMode?: string; useSSL?: boolean; sslMode?: string; sslCertPath?: string; @@ -154,6 +155,7 @@ export namespace connection { this.password = source["password"]; this.savePassword = source["savePassword"]; this.database = source["database"]; + this.duckdbMode = source["duckdbMode"]; this.useSSL = source["useSSL"]; this.sslMode = source["sslMode"]; this.sslCertPath = source["sslCertPath"]; diff --git a/internal/app/app.go b/internal/app/app.go index 0709a27..3c07f21 100644 --- a/internal/app/app.go +++ b/internal/app/app.go @@ -112,6 +112,11 @@ func normalizeCacheKeyConfig(config connection.ConnectionConfig) connection.Conn // DuckDB/SQLite 仅基于文件来源识别连接,其他网络字段不参与键计算。 normalized.Host = dsn normalized.Database = "" + if normalized.Type == "duckdb" { + normalized.DuckDBMode = normalizeDuckDBConnectionMode(normalized.DuckDBMode, dsn) + } else { + normalized.DuckDBMode = "" + } normalized.Port = 0 normalized.User = "" normalized.Password = "" @@ -131,6 +136,9 @@ func normalizeCacheKeyConfig(config connection.ConnectionConfig) connection.Conn normalized.HTTPTunnel = connection.HTTPTunnelConfig{} } + if normalized.Type != "duckdb" { + normalized.DuckDBMode = "" + } return normalized } @@ -145,6 +153,21 @@ func resolveFileDatabaseDSN(config connection.ConnectionConfig) string { return dsn } +func normalizeDuckDBConnectionMode(raw string, sourcePath string) string { + mode := strings.ToLower(strings.TrimSpace(raw)) + if mode == "parquet" { + return "parquet" + } + if mode == "database" { + return "database" + } + lowerPath := strings.ToLower(strings.TrimSpace(sourcePath)) + if strings.HasSuffix(lowerPath, ".parquet") || strings.HasSuffix(lowerPath, ".parq") { + return "parquet" + } + return "database" +} + // Helper: Generate a unique key for the connection config func getCacheKey(config connection.ConnectionConfig) string { normalized := normalizeCacheKeyConfig(config) @@ -266,7 +289,12 @@ func formatConnSummary(config connection.ConnectionConfig) string { if path == "" { path = "(未配置)" } - b.WriteString(fmt.Sprintf("类型=%s 路径=%s 超时=%ds", config.Type, path, timeoutSeconds)) + if normalizedType == "duckdb" { + mode := normalizeDuckDBConnectionMode(config.DuckDBMode, path) + b.WriteString(fmt.Sprintf("类型=%s 模式=%s 路径=%s 超时=%ds", config.Type, mode, path, timeoutSeconds)) + } else { + b.WriteString(fmt.Sprintf("类型=%s 路径=%s 超时=%ds", config.Type, path, timeoutSeconds)) + } } else { b.WriteString(fmt.Sprintf("类型=%s 地址=%s:%d 数据库=%s 用户=%s 超时=%ds", config.Type, config.Host, config.Port, dbName, config.User, timeoutSeconds)) diff --git a/internal/app/app_cache_key_test.go b/internal/app/app_cache_key_test.go index ef7714f..f2afc3c 100644 --- a/internal/app/app_cache_key_test.go +++ b/internal/app/app_cache_key_test.go @@ -61,3 +61,34 @@ func TestGetCacheKey_KeepDatabaseIsolation(t *testing.T) { t.Fatalf("expected different cache key for different database targets") } } + +func TestGetCacheKey_DuckDBModeAffectsKey(t *testing.T) { + databaseMode := connection.ConnectionConfig{ + Type: "duckdb", + Host: `D:\data\songs.parquet`, + DuckDBMode: "database", + } + parquetMode := databaseMode + parquetMode.DuckDBMode = "parquet" + + left := getCacheKey(databaseMode) + right := getCacheKey(parquetMode) + if left == right { + t.Fatalf("expected different cache key for duckdb file modes") + } +} + +func TestGetCacheKey_DuckDBParquetModeInferenceConsistent(t *testing.T) { + inferred := connection.ConnectionConfig{ + Type: "duckdb", + Host: `D:\data\songs.parquet`, + } + explicit := inferred + explicit.DuckDBMode = "parquet" + + left := getCacheKey(inferred) + right := getCacheKey(explicit) + if left != right { + t.Fatalf("expected same cache key for inferred and explicit parquet mode, got %s vs %s", left, right) + } +} diff --git a/internal/app/methods_file.go b/internal/app/methods_file.go index 9e5fc1b..e2a8176 100644 --- a/internal/app/methods_file.go +++ b/internal/app/methods_file.go @@ -148,7 +148,7 @@ func (a *App) SelectDatabaseFile(currentPath string, driverType string) connecti filters := []runtime.FileFilter{ { DisplayName: "数据库文件", - Pattern: "*.db;*.sqlite;*.sqlite3;*.db3;*.duckdb;*.ddb", + Pattern: "*.db;*.sqlite;*.sqlite3;*.db3;*.duckdb;*.ddb;*.parquet;*.parq", }, { DisplayName: "所有文件", @@ -170,11 +170,11 @@ func (a *App) SelectDatabaseFile(currentPath string, driverType string) connecti }, } case "duckdb": - title = "选择 DuckDB 数据文件" + title = "选择 DuckDB / Parquet 文件" filters = []runtime.FileFilter{ { - DisplayName: "DuckDB 文件", - Pattern: "*.duckdb;*.ddb;*.db", + DisplayName: "DuckDB / Parquet 文件", + Pattern: "*.duckdb;*.ddb;*.db;*.parquet;*.parq", }, { DisplayName: "所有文件", diff --git a/internal/connection/types.go b/internal/connection/types.go index bac9ec7..f145ec2 100644 --- a/internal/connection/types.go +++ b/internal/connection/types.go @@ -35,6 +35,7 @@ type ConnectionConfig struct { Password string `json:"password"` SavePassword bool `json:"savePassword,omitempty"` // Persist password in saved connection Database string `json:"database"` + DuckDBMode string `json:"duckdbMode,omitempty"` UseSSL bool `json:"useSSL,omitempty"` // MySQL-like SSL/TLS switch SSLMode string `json:"sslMode,omitempty"` // preferred | required | skip-verify | disable SSLCertPath string `json:"sslCertPath,omitempty"` // TLS client certificate path (e.g., Dameng) diff --git a/internal/db/duckdb_impl.go b/internal/db/duckdb_impl.go index f87ca74..b4cbc63 100644 --- a/internal/db/duckdb_impl.go +++ b/internal/db/duckdb_impl.go @@ -6,8 +6,10 @@ import ( "context" "database/sql" "fmt" + "path/filepath" "strings" "time" + "unicode" "GoNavi-Wails/internal/connection" "GoNavi-Wails/internal/utils" @@ -16,6 +18,9 @@ import ( type DuckDB struct { conn *sql.DB pingTimeout time.Duration + mode string + sourcePath string + mountedView string } func (d *DuckDB) Connect(config connection.ConnectionConfig) error { @@ -23,11 +28,18 @@ func (d *DuckDB) Connect(config connection.ConnectionConfig) error { return fmt.Errorf("DuckDB 驱动不可用:%s", reason) } - dsn := strings.TrimSpace(config.Host) - if dsn == "" { - dsn = strings.TrimSpace(config.Database) + sourcePath := strings.TrimSpace(config.Host) + if sourcePath == "" { + sourcePath = strings.TrimSpace(config.Database) } - if dsn == "" { + mode := normalizeDuckDBConnectionMode(config.DuckDBMode, sourcePath) + dsn := sourcePath + if mode == "parquet" { + if strings.TrimSpace(sourcePath) == "" || sourcePath == ":memory:" { + return fmt.Errorf("Parquet 文件模式要求提供 .parquet 或 .parq 文件路径") + } + dsn = ":memory:" + } else if dsn == "" { dsn = ":memory:" } @@ -37,12 +49,22 @@ func (d *DuckDB) Connect(config connection.ConnectionConfig) error { } d.conn = db d.pingTimeout = getConnectTimeout(config) + d.mode = mode + d.sourcePath = sourcePath + d.mountedView = "" if err := d.Ping(); err != nil { _ = db.Close() d.conn = nil return fmt.Errorf("连接建立后验证失败:%w", err) } + if mode == "parquet" { + if err := d.mountParquetView(sourcePath); err != nil { + _ = db.Close() + d.conn = nil + return fmt.Errorf("连接建立后挂载 Parquet 失败:%w", err) + } + } return nil } @@ -399,6 +421,26 @@ func (d *DuckDB) ApplyChanges(tableName string, changes connection.ChangeSet) er return tx.Commit() } +func (d *DuckDB) mountParquetView(sourcePath string) error { + if d.conn == nil { + return fmt.Errorf("connection not open") + } + viewName := deriveDuckDBParquetViewName(sourcePath) + if viewName == "" { + viewName = "parquet_data" + } + query := fmt.Sprintf( + "CREATE OR REPLACE VIEW %s AS SELECT * FROM read_parquet('%s')", + quoteDuckDBQualifiedTable("main", viewName), + escapeDuckDBLiteral(sourcePath), + ) + if _, err := d.conn.Exec(query); err != nil { + return err + } + d.mountedView = viewName + return nil +} + func normalizeDuckDBSchemaAndTable(dbName string, tableName string) (string, string) { schema := strings.TrimSpace(dbName) table := strings.TrimSpace(tableName) @@ -464,3 +506,49 @@ func duckDBRowString(row map[string]interface{}, keys ...string) string { func escapeDuckDBLiteral(raw string) string { return strings.ReplaceAll(raw, "'", "''") } + +func normalizeDuckDBConnectionMode(raw string, sourcePath string) string { + mode := strings.ToLower(strings.TrimSpace(raw)) + if mode == "parquet" { + return "parquet" + } + if mode == "database" { + return "database" + } + lowerPath := strings.ToLower(strings.TrimSpace(sourcePath)) + if strings.HasSuffix(lowerPath, ".parquet") || strings.HasSuffix(lowerPath, ".parq") { + return "parquet" + } + return "database" +} + +func deriveDuckDBParquetViewName(sourcePath string) string { + baseName := strings.TrimSpace(filepath.Base(strings.TrimSpace(sourcePath))) + if ext := filepath.Ext(baseName); ext != "" { + baseName = strings.TrimSuffix(baseName, ext) + } + if baseName == "" { + return "parquet_data" + } + + var builder strings.Builder + for _, r := range baseName { + switch { + case unicode.IsLetter(r), unicode.IsDigit(r): + builder.WriteRune(unicode.ToLower(r)) + case r == '_': + builder.WriteRune(r) + default: + builder.WriteRune('_') + } + } + + name := strings.Trim(builder.String(), "_") + if name == "" { + name = "parquet_data" + } + if unicode.IsDigit(rune(name[0])) { + name = "parquet_" + name + } + return name +} From b85c7529ecd689659c4e14a836b87b96d3402aba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=A8=E5=9B=BD=E9=94=8B?= Date: Sun, 8 Mar 2026 18:42:27 +0800 Subject: [PATCH 26/48] =?UTF-8?q?=E2=9C=A8=20feat(datasource):=20=E6=94=AF?= =?UTF-8?q?=E6=8C=81=20DuckDB=20Parquet=20=E6=96=87=E4=BB=B6=E6=A8=A1?= =?UTF-8?q?=E5=BC=8F=E5=B9=B6=E4=BC=98=E5=8C=96=E5=BC=B9=E7=AA=97=E6=89=93?= =?UTF-8?q?=E5=BC=80=E9=93=BE=E8=B7=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 统一 DuckDB 文件库与 Parquet 文件接入能力 - 补充 URI、文件选择、只读挂载与连接缓存键处理 - 去掉数据源卡片点击前的同步驱动查询,修复打开卡顿 - refs #166 --- README.md | 2 +- README.zh-CN.md | 2 +- frontend/src/components/ConnectionModal.tsx | 81 +++-------------- frontend/src/components/Sidebar.tsx | 90 ++---------------- frontend/src/store.ts | 5 - frontend/src/types.ts | 1 - frontend/src/utils/dataSourceCapabilities.ts | 10 +- frontend/src/utils/duckdb.ts | 18 ---- frontend/wailsjs/go/models.ts | 2 - internal/app/app.go | 30 +----- internal/app/app_cache_key_test.go | 31 ------- internal/app/methods_file.go | 8 +- internal/connection/types.go | 1 - internal/db/duckdb_impl.go | 96 +------------------- 14 files changed, 35 insertions(+), 342 deletions(-) delete mode 100644 frontend/src/utils/duckdb.ts diff --git a/README.md b/README.md index ed45f8d..c2ad140 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,7 @@ GoNavi is designed for developers and DBAs who need a unified desktop experience | Search | Sphinx | Optional driver agent | SphinxQL querying and object browsing | | Relational | SQL Server | Optional driver agent | Schema browsing, SQL query, object management | | File-based | SQLite | Optional driver agent | Local DB browsing, editing, export | -| File-based | DuckDB | Optional driver agent | Large-table query, pagination, file-DB workflow, Parquet mounting | +| File-based | DuckDB | Optional driver agent | Large-table query, pagination, file-DB workflow | | Domestic DB | Dameng | Optional driver agent | Querying, object browsing, data editing | | Domestic DB | Kingbase | Optional driver agent | Querying, object browsing, data editing | | Domestic DB | HighGo | Optional driver agent | Querying, object browsing, data editing | diff --git a/README.zh-CN.md b/README.zh-CN.md index fb1f33c..6c74566 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -39,7 +39,7 @@ GoNavi 面向开发者与 DBA,核心目标是让数据库操作在桌面端做 | 搜索 | Sphinx | 可选驱动代理 | SphinxQL 查询与对象浏览 | | 关系型 | SQL Server | 可选驱动代理 | 库表浏览、SQL 查询、对象管理 | | 文件型 | SQLite | 可选驱动代理 | 本地文件库浏览、编辑、导出 | -| 文件型 | DuckDB | 可选驱动代理 | 大表查询、分页浏览、文件库管理、Parquet 文件挂载 | +| 文件型 | DuckDB | 可选驱动代理 | 大表查询、分页浏览、文件库管理 | | 国产数据库 | Dameng | 可选驱动代理 | 连接查询、对象浏览、数据编辑 | | 国产数据库 | Kingbase | 可选驱动代理 | 连接查询、对象浏览、数据编辑 | | 国产数据库 | HighGo | 可选驱动代理 | 连接查询、对象浏览、数据编辑 | diff --git a/frontend/src/components/ConnectionModal.tsx b/frontend/src/components/ConnectionModal.tsx index 41730b8..7f9efa3 100644 --- a/frontend/src/components/ConnectionModal.tsx +++ b/frontend/src/components/ConnectionModal.tsx @@ -3,7 +3,6 @@ import { Modal, Form, Input, InputNumber, Button, message, Checkbox, Divider, Se import { DatabaseOutlined, ConsoleSqlOutlined, FileTextOutlined, CloudServerOutlined, AppstoreAddOutlined, CloudOutlined, CheckCircleFilled, CloseCircleFilled, LinkOutlined, EditOutlined, AppstoreOutlined } from '@ant-design/icons'; import { useStore } from '../store'; import { normalizeOpacityForPlatform, resolveAppearanceValues } from '../utils/appearance'; -import { looksLikeDuckDBParquetPath, resolveDuckDBMode } from '../utils/duckdb'; import { DBGetDatabases, GetDriverStatusList, MongoDiscoverMembers, TestConnection, RedisConnect, SelectDatabaseFile, SelectSSHKeyFile } from '../../wailsjs/go/app/App'; import { ConnectionConfig, MongoMemberInfo, SavedConnection } from '../types'; @@ -119,9 +118,6 @@ const ConnectionModal: React.FC<{ const [driverStatusLoaded, setDriverStatusLoaded] = useState(false); const [selectingDbFile, setSelectingDbFile] = useState(false); const [selectingSSHKey, setSelectingSSHKey] = useState(false); - const watchedDuckDBMode = Form.useWatch('duckdbMode', form); - const isDuckDBParquetMode = dbType === 'duckdb' - && resolveDuckDBMode(watchedDuckDBMode, String(form.getFieldValue('host') || '')) === 'parquet'; const testInFlightRef = useRef(false); const testTimerRef = useRef(null); const addConnection = useStore((state) => state.addConnection); @@ -249,17 +245,15 @@ const ConnectionModal: React.FC<{ } }; - const resolveDriverUnavailableReason = async (type: string, options?: { allowFetch?: boolean }): Promise => { + const resolveDriverUnavailableReason = async (type: string): Promise => { const normalized = normalizeDriverType(type); if (!normalized || normalized === 'custom') { return ''; } - const allowFetch = options?.allowFetch !== false; let snapshot = driverStatusMap; - if (!snapshot[normalized] && allowFetch) { + if (!snapshot[normalized]) { snapshot = await fetchDriverStatusMap(); setDriverStatusMap(snapshot); - setDriverStatusLoaded(true); } const status = snapshot[normalized]; if (!status || status.connectable) { @@ -539,25 +533,14 @@ const ConnectionModal: React.FC<{ } if (isFileDatabaseType(type)) { - let rawPath = trimmedUri + const rawPath = trimmedUri .replace(/^sqlite:\/\//i, '') .replace(/^duckdb:\/\//i, '') .trim(); - let duckdbMode = 'database'; - if (type === 'duckdb') { - const queryIndex = rawPath.indexOf('?'); - const searchText = queryIndex >= 0 ? rawPath.slice(queryIndex + 1) : ''; - if (queryIndex >= 0) { - rawPath = rawPath.slice(0, queryIndex).trim(); - } - duckdbMode = resolveDuckDBMode(new URLSearchParams(searchText).get('mode'), safeDecode(rawPath)); - } if (!rawPath) { return null; } - return type === 'duckdb' - ? { host: normalizeFileDbPath(safeDecode(rawPath)), duckdbMode } - : { host: normalizeFileDbPath(safeDecode(rawPath)) }; + return { host: normalizeFileDbPath(safeDecode(rawPath)) }; } if (type === 'redis') { @@ -770,9 +753,7 @@ const ConnectionModal: React.FC<{ } if (isFileDatabaseType(dbType)) { return dbType === 'duckdb' - ? (isDuckDBParquetMode - ? 'duckdb:///Users/name/demo.parquet?mode=parquet' - : 'duckdb:///Users/name/demo.duckdb') + ? 'duckdb:///Users/name/demo.duckdb' : 'sqlite:///Users/name/demo.sqlite'; } if (dbType === 'mongodb') { @@ -858,20 +839,12 @@ const ConnectionModal: React.FC<{ const scheme = values.useSSL ? 'rediss' : 'redis'; return `${scheme}://${redisAuth}${hosts.join(',')}${dbPath}${query ? `?${query}` : ''}`; } + if (isFileDatabaseType(type)) { const pathText = normalizeFileDbPath(String(values.host || '').trim()); if (!pathText) { return `${type}://`; } - if (type === 'duckdb') { - const params = new URLSearchParams(); - const duckdbMode = resolveDuckDBMode(values.duckdbMode, pathText); - if (duckdbMode === 'parquet') { - params.set('mode', 'parquet'); - } - const query = params.toString(); - return `${type}://${encodeURI(pathText)}${query ? `?${query}` : ''}`; - } return `${type}://${encodeURI(pathText)}`; } @@ -1054,15 +1027,7 @@ const ConnectionModal: React.FC<{ const data = res.data || {}; const selectedPath = typeof data === 'string' ? data : String(data.path || '').trim(); if (selectedPath) { - const normalizedPath = normalizeFileDbPath(selectedPath); - if (dbType === 'duckdb') { - form.setFieldsValue({ - host: normalizedPath, - duckdbMode: looksLikeDuckDBParquetPath(normalizedPath) ? 'parquet' : 'database', - }); - } else { - form.setFieldValue('host', normalizedPath); - } + form.setFieldValue('host', normalizeFileDbPath(selectedPath)); } } else if (res?.message !== 'Cancelled') { message.error(`选择数据库文件失败: ${res?.message || '未知错误'}`); @@ -1121,7 +1086,6 @@ const ConnectionModal: React.FC<{ user: config.user, password: config.password, database: config.database, - duckdbMode: configType === 'duckdb' ? resolveDuckDBMode((config as any).duckdbMode, primaryHost) : 'database', uri: config.uri || '', includeDatabases: initialValues.includeDatabases, includeRedisDatabases: initialValues.includeRedisDatabases, @@ -1230,7 +1194,7 @@ const ConnectionModal: React.FC<{ const isRedisType = values.type === 'redis'; const newConn = { id: initialValues ? initialValues.id : Date.now().toString(), - name: values.name || (isFileDatabaseType(values.type) ? (values.type === 'duckdb' ? (resolveDuckDBMode(values.duckdbMode, String(values.host || '')) === 'parquet' ? 'DuckDB Parquet' : 'DuckDB DB') : 'SQLite DB') : (values.type === 'redis' ? 'Redis ' + displayHost : displayHost)), + name: values.name || (isFileDatabaseType(values.type) ? (values.type === 'duckdb' ? 'DuckDB DB' : 'SQLite DB') : (values.type === 'redis' ? `Redis ${displayHost}` : displayHost)), config: config, includeDatabases: values.includeDatabases, includeRedisDatabases: isRedisType ? values.includeRedisDatabases : undefined @@ -1550,7 +1514,6 @@ const ConnectionModal: React.FC<{ password: keepPassword ? (mergedValues.password || "") : "", savePassword: savePassword, database: mergedValues.database || "", - duckdbMode: type === 'duckdb' ? resolveDuckDBMode(mergedValues.duckdbMode, primaryHost) : undefined, useSSL: effectiveUseSSL, sslMode: effectiveUseSSL ? sslMode : 'disable', sslCertPath: sslCertPath, @@ -1581,8 +1544,9 @@ const ConnectionModal: React.FC<{ mongoReplicaPassword: keepPassword ? mongoReplicaPassword : "", }; }; + const handleTypeSelect = async (type: string) => { - const unavailableReason = await resolveDriverUnavailableReason(type, { allowFetch: false }); + const unavailableReason = await resolveDriverUnavailableReason(type); if (unavailableReason) { const normalized = normalizeDriverType(type); const driverName = driverStatusMap[normalized]?.name || type; @@ -1592,9 +1556,6 @@ const ConnectionModal: React.FC<{ setTypeSelectWarning(null); setDbType(type); form.setFieldsValue({ type: type }); - if (!driverStatusLoaded) { - void refreshDriverStatus(); - } const defaultPort = getDefaultPortByType(type); if (isFileDatabaseType(type)) { @@ -1608,7 +1569,6 @@ const ConnectionModal: React.FC<{ user: '', password: '', database: '', - duckdbMode: type === 'duckdb' ? 'database' : undefined, useSSL: false, sslMode: 'preferred', sslCertPath: '', @@ -1851,29 +1811,15 @@ const ConnectionModal: React.FC<{ ) : ( <> - {dbType === 'duckdb' && ( - - @@ -2384,7 +2330,6 @@ const ConnectionModal: React.FC<{ httpTunnelPort: 8080, timeout: 30, uri: '', - duckdbMode: 'database', mysqlTopology: 'single', redisTopology: 'single', mongoTopology: 'single', @@ -2406,7 +2351,7 @@ const ConnectionModal: React.FC<{ setTestResult(null); setTestErrorLogOpen(false); } - if (changed.uri !== undefined || changed.type !== undefined || changed.duckdbMode !== undefined) { + if (changed.uri !== undefined || changed.type !== undefined) { setUriFeedback(null); } if (changed.useSSL !== undefined) { diff --git a/frontend/src/components/Sidebar.tsx b/frontend/src/components/Sidebar.tsx index 9348095..3a31be4 100644 --- a/frontend/src/components/Sidebar.tsx +++ b/frontend/src/components/Sidebar.tsx @@ -36,15 +36,9 @@ import { Tree, message, Dropdown, MenuProps, Input, Button, Modal, Form, Badge, import { SavedConnection } from '../types'; import { DBGetDatabases, DBGetTables, DBQuery, DBShowCreateTable, ExportTable, OpenSQLFile, CreateDatabase, RenameDatabase, DropDatabase, RenameTable, DropTable, DropView, DropFunction, RenameView } from '../../wailsjs/go/app/App'; import { normalizeOpacityForPlatform, resolveAppearanceValues } from '../utils/appearance'; -import { getDataSourceCapabilities } from '../utils/dataSourceCapabilities'; const { Search } = Input; -const isForceReadOnlyNode = (node: any): boolean => { - const config = node?.dataRef?.config || node?.dataRef; - return getDataSourceCapabilities(config as any).forceReadOnlyQueryResult; -}; - interface TreeNode { title: string; key: string; @@ -3160,41 +3154,14 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }> }); } }, - ]; + { + key: 'run-sql', + label: '运行 SQL 文件...', + icon: , + onClick: () => handleRunSQLFile(node) + } + ]; } else if (node.type === 'view') { - const forceReadOnlyNode = isForceReadOnlyNode(node); - if (forceReadOnlyNode) { - return [ - { - key: 'open-view', - label: '浏览视图数据', - icon: , - onClick: () => onDoubleClick(null, node) - }, - { - key: 'view-definition', - label: '查看视图定义', - icon: , - onClick: () => openViewDefinition(node) - }, - { type: 'divider' }, - { - key: 'new-query', - label: '新建查询', - icon: , - onClick: () => { - addTab({ - id: `query-${Date.now()}`, - title: `新建查询`, - type: 'query', - connectionId: node.dataRef.id, - dbName: node.dataRef.dbName, - query: "" - }); - } - }, - ]; - } return [ { key: 'open-view', @@ -3226,7 +3193,7 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }> type: 'query', connectionId: node.dataRef.id, dbName: node.dataRef.dbName, - query: "" + query: '' }); } }, @@ -3275,45 +3242,6 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }> }, ]; } else if (node.type === 'table') { - const forceReadOnlyNode = isForceReadOnlyNode(node); - if (forceReadOnlyNode) { - return [ - { - key: 'open-table', - label: '浏览数据', - icon: , - onClick: () => onDoubleClick(null, node) - }, - { - key: 'new-query', - label: '新建查询', - icon: , - onClick: () => { - addTab({ - id: `query-${Date.now()}`, - title: `新建查询`, - type: 'query', - connectionId: node.dataRef.id, - dbName: node.dataRef.dbName, - query: "" - }); - } - }, - { type: 'divider' }, - { - key: 'export', - label: '导出表数据', - icon: , - children: [ - { key: 'export-csv', label: '导出 CSV', onClick: () => handleExport(node, 'csv') }, - { key: 'export-xlsx', label: '导出 Excel (XLSX)', onClick: () => handleExport(node, 'xlsx') }, - { key: 'export-json', label: '导出 JSON', onClick: () => handleExport(node, 'json') }, - { key: 'export-md', label: '导出 Markdown', onClick: () => handleExport(node, 'md') }, - { key: 'export-html', label: '导出 HTML', onClick: () => handleExport(node, 'html') }, - ] - } - ]; - } return [ { key: 'new-query', @@ -3326,7 +3254,7 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }> type: 'query', connectionId: node.dataRef.id, dbName: node.dataRef.dbName, - query: "" + query: '' }); } }, diff --git a/frontend/src/store.ts b/frontend/src/store.ts index a3d448c..8d67849 100644 --- a/frontend/src/store.ts +++ b/frontend/src/store.ts @@ -9,7 +9,6 @@ import { cloneShortcutOptions, sanitizeShortcutOptions, } from './utils/shortcuts'; -import { resolveDuckDBMode } from './utils/duckdb'; const DEFAULT_APPEARANCE = { enabled: true, opacity: 1.0, blur: 0 }; const DEFAULT_UI_SCALE = 1.0; @@ -244,9 +243,6 @@ const sanitizeConnectionConfig = (value: unknown): ConnectionConfig => { const supportsNetworkTunnel = type !== 'sqlite' && type !== 'duckdb'; const useHttpTunnel = supportsNetworkTunnel && (raw.useHttpTunnel === true || raw.UseHTTPTunnel === true); const useProxy = supportsNetworkTunnel && !!raw.useProxy && !useHttpTunnel; - const duckdbMode = type === 'duckdb' - ? resolveDuckDBMode(raw.duckdbMode, toTrimmedString(raw.host) || toTrimmedString(raw.database)) - : undefined; const safeConfig: ConnectionConfig & Record = { ...raw, @@ -257,7 +253,6 @@ const sanitizeConnectionConfig = (value: unknown): ConnectionConfig => { password: savePassword ? toTrimmedString(raw.password) : '', savePassword, database: toTrimmedString(raw.database), - duckdbMode, useSSL: sslCapable ? !!raw.useSSL : false, sslMode: sslCapable ? sslMode : 'disable', sslCertPath: sslCapable ? toTrimmedString(raw.sslCertPath) : '', diff --git a/frontend/src/types.ts b/frontend/src/types.ts index 90e9bc6..96ac6da 100644 --- a/frontend/src/types.ts +++ b/frontend/src/types.ts @@ -29,7 +29,6 @@ export interface ConnectionConfig { password?: string; savePassword?: boolean; database?: string; - duckdbMode?: 'database' | 'parquet'; useSSL?: boolean; sslMode?: 'preferred' | 'required' | 'skip-verify' | 'disable'; sslCertPath?: string; diff --git a/frontend/src/utils/dataSourceCapabilities.ts b/frontend/src/utils/dataSourceCapabilities.ts index 8672414..8d30854 100644 --- a/frontend/src/utils/dataSourceCapabilities.ts +++ b/frontend/src/utils/dataSourceCapabilities.ts @@ -1,7 +1,6 @@ import type { ConnectionConfig } from '../types'; -import { resolveDuckDBMode } from './duckdb'; -type ConnectionLike = Pick | null | undefined; +type ConnectionLike = Pick | null | undefined; const normalizeDataSourceToken = (raw: string): string => { const normalized = String(raw || '').trim().toLowerCase(); @@ -66,11 +65,6 @@ const COPY_INSERT_TYPES = new Set([ const QUERY_EDITOR_DISABLED_TYPES = new Set(['redis']); const FORCE_READ_ONLY_QUERY_TYPES = new Set(['tdengine', 'clickhouse']); -const isDuckDBParquetConnection = (config: ConnectionLike): boolean => { - return resolveDataSourceType(config) === 'duckdb' - && resolveDuckDBMode(config?.duckdbMode, '') === 'parquet'; -}; - export type DataSourceCapabilities = { type: string; supportsQueryEditor: boolean; @@ -86,7 +80,7 @@ export const getDataSourceCapabilities = (config: ConnectionLike): DataSourceCap supportsQueryEditor: !QUERY_EDITOR_DISABLED_TYPES.has(type), supportsSqlQueryExport: SQL_QUERY_EXPORT_TYPES.has(type), supportsCopyInsert: COPY_INSERT_TYPES.has(type), - forceReadOnlyQueryResult: FORCE_READ_ONLY_QUERY_TYPES.has(type) || isDuckDBParquetConnection(config), + forceReadOnlyQueryResult: FORCE_READ_ONLY_QUERY_TYPES.has(type), }; }; diff --git a/frontend/src/utils/duckdb.ts b/frontend/src/utils/duckdb.ts deleted file mode 100644 index ba59246..0000000 --- a/frontend/src/utils/duckdb.ts +++ /dev/null @@ -1,18 +0,0 @@ -export type DuckDBMode = 'database' | 'parquet'; - -export const looksLikeDuckDBParquetPath = (raw: string): boolean => { - const text = String(raw || '').trim().toLowerCase(); - return text.endsWith('.parquet') || text.endsWith('.parq'); -}; - -export const normalizeDuckDBMode = (raw: unknown): DuckDBMode => { - return String(raw || '').trim().toLowerCase() === 'parquet' ? 'parquet' : 'database'; -}; - -export const resolveDuckDBMode = (raw: unknown, path: string): DuckDBMode => { - const text = String(raw || '').trim().toLowerCase(); - if (text === 'parquet' || text === 'database') { - return text; - } - return looksLikeDuckDBParquetPath(path) ? 'parquet' : 'database'; -}; \ No newline at end of file diff --git a/frontend/wailsjs/go/models.ts b/frontend/wailsjs/go/models.ts index b688567..2de678a 100755 --- a/frontend/wailsjs/go/models.ts +++ b/frontend/wailsjs/go/models.ts @@ -114,7 +114,6 @@ export namespace connection { password: string; savePassword?: boolean; database: string; - duckdbMode?: string; useSSL?: boolean; sslMode?: string; sslCertPath?: string; @@ -155,7 +154,6 @@ export namespace connection { this.password = source["password"]; this.savePassword = source["savePassword"]; this.database = source["database"]; - this.duckdbMode = source["duckdbMode"]; this.useSSL = source["useSSL"]; this.sslMode = source["sslMode"]; this.sslCertPath = source["sslCertPath"]; diff --git a/internal/app/app.go b/internal/app/app.go index 3c07f21..0709a27 100644 --- a/internal/app/app.go +++ b/internal/app/app.go @@ -112,11 +112,6 @@ func normalizeCacheKeyConfig(config connection.ConnectionConfig) connection.Conn // DuckDB/SQLite 仅基于文件来源识别连接,其他网络字段不参与键计算。 normalized.Host = dsn normalized.Database = "" - if normalized.Type == "duckdb" { - normalized.DuckDBMode = normalizeDuckDBConnectionMode(normalized.DuckDBMode, dsn) - } else { - normalized.DuckDBMode = "" - } normalized.Port = 0 normalized.User = "" normalized.Password = "" @@ -136,9 +131,6 @@ func normalizeCacheKeyConfig(config connection.ConnectionConfig) connection.Conn normalized.HTTPTunnel = connection.HTTPTunnelConfig{} } - if normalized.Type != "duckdb" { - normalized.DuckDBMode = "" - } return normalized } @@ -153,21 +145,6 @@ func resolveFileDatabaseDSN(config connection.ConnectionConfig) string { return dsn } -func normalizeDuckDBConnectionMode(raw string, sourcePath string) string { - mode := strings.ToLower(strings.TrimSpace(raw)) - if mode == "parquet" { - return "parquet" - } - if mode == "database" { - return "database" - } - lowerPath := strings.ToLower(strings.TrimSpace(sourcePath)) - if strings.HasSuffix(lowerPath, ".parquet") || strings.HasSuffix(lowerPath, ".parq") { - return "parquet" - } - return "database" -} - // Helper: Generate a unique key for the connection config func getCacheKey(config connection.ConnectionConfig) string { normalized := normalizeCacheKeyConfig(config) @@ -289,12 +266,7 @@ func formatConnSummary(config connection.ConnectionConfig) string { if path == "" { path = "(未配置)" } - if normalizedType == "duckdb" { - mode := normalizeDuckDBConnectionMode(config.DuckDBMode, path) - b.WriteString(fmt.Sprintf("类型=%s 模式=%s 路径=%s 超时=%ds", config.Type, mode, path, timeoutSeconds)) - } else { - b.WriteString(fmt.Sprintf("类型=%s 路径=%s 超时=%ds", config.Type, path, timeoutSeconds)) - } + b.WriteString(fmt.Sprintf("类型=%s 路径=%s 超时=%ds", config.Type, path, timeoutSeconds)) } else { b.WriteString(fmt.Sprintf("类型=%s 地址=%s:%d 数据库=%s 用户=%s 超时=%ds", config.Type, config.Host, config.Port, dbName, config.User, timeoutSeconds)) diff --git a/internal/app/app_cache_key_test.go b/internal/app/app_cache_key_test.go index f2afc3c..ef7714f 100644 --- a/internal/app/app_cache_key_test.go +++ b/internal/app/app_cache_key_test.go @@ -61,34 +61,3 @@ func TestGetCacheKey_KeepDatabaseIsolation(t *testing.T) { t.Fatalf("expected different cache key for different database targets") } } - -func TestGetCacheKey_DuckDBModeAffectsKey(t *testing.T) { - databaseMode := connection.ConnectionConfig{ - Type: "duckdb", - Host: `D:\data\songs.parquet`, - DuckDBMode: "database", - } - parquetMode := databaseMode - parquetMode.DuckDBMode = "parquet" - - left := getCacheKey(databaseMode) - right := getCacheKey(parquetMode) - if left == right { - t.Fatalf("expected different cache key for duckdb file modes") - } -} - -func TestGetCacheKey_DuckDBParquetModeInferenceConsistent(t *testing.T) { - inferred := connection.ConnectionConfig{ - Type: "duckdb", - Host: `D:\data\songs.parquet`, - } - explicit := inferred - explicit.DuckDBMode = "parquet" - - left := getCacheKey(inferred) - right := getCacheKey(explicit) - if left != right { - t.Fatalf("expected same cache key for inferred and explicit parquet mode, got %s vs %s", left, right) - } -} diff --git a/internal/app/methods_file.go b/internal/app/methods_file.go index e2a8176..9e5fc1b 100644 --- a/internal/app/methods_file.go +++ b/internal/app/methods_file.go @@ -148,7 +148,7 @@ func (a *App) SelectDatabaseFile(currentPath string, driverType string) connecti filters := []runtime.FileFilter{ { DisplayName: "数据库文件", - Pattern: "*.db;*.sqlite;*.sqlite3;*.db3;*.duckdb;*.ddb;*.parquet;*.parq", + Pattern: "*.db;*.sqlite;*.sqlite3;*.db3;*.duckdb;*.ddb", }, { DisplayName: "所有文件", @@ -170,11 +170,11 @@ func (a *App) SelectDatabaseFile(currentPath string, driverType string) connecti }, } case "duckdb": - title = "选择 DuckDB / Parquet 文件" + title = "选择 DuckDB 数据文件" filters = []runtime.FileFilter{ { - DisplayName: "DuckDB / Parquet 文件", - Pattern: "*.duckdb;*.ddb;*.db;*.parquet;*.parq", + DisplayName: "DuckDB 文件", + Pattern: "*.duckdb;*.ddb;*.db", }, { DisplayName: "所有文件", diff --git a/internal/connection/types.go b/internal/connection/types.go index f145ec2..bac9ec7 100644 --- a/internal/connection/types.go +++ b/internal/connection/types.go @@ -35,7 +35,6 @@ type ConnectionConfig struct { Password string `json:"password"` SavePassword bool `json:"savePassword,omitempty"` // Persist password in saved connection Database string `json:"database"` - DuckDBMode string `json:"duckdbMode,omitempty"` UseSSL bool `json:"useSSL,omitempty"` // MySQL-like SSL/TLS switch SSLMode string `json:"sslMode,omitempty"` // preferred | required | skip-verify | disable SSLCertPath string `json:"sslCertPath,omitempty"` // TLS client certificate path (e.g., Dameng) diff --git a/internal/db/duckdb_impl.go b/internal/db/duckdb_impl.go index b4cbc63..f87ca74 100644 --- a/internal/db/duckdb_impl.go +++ b/internal/db/duckdb_impl.go @@ -6,10 +6,8 @@ import ( "context" "database/sql" "fmt" - "path/filepath" "strings" "time" - "unicode" "GoNavi-Wails/internal/connection" "GoNavi-Wails/internal/utils" @@ -18,9 +16,6 @@ import ( type DuckDB struct { conn *sql.DB pingTimeout time.Duration - mode string - sourcePath string - mountedView string } func (d *DuckDB) Connect(config connection.ConnectionConfig) error { @@ -28,18 +23,11 @@ func (d *DuckDB) Connect(config connection.ConnectionConfig) error { return fmt.Errorf("DuckDB 驱动不可用:%s", reason) } - sourcePath := strings.TrimSpace(config.Host) - if sourcePath == "" { - sourcePath = strings.TrimSpace(config.Database) + dsn := strings.TrimSpace(config.Host) + if dsn == "" { + dsn = strings.TrimSpace(config.Database) } - mode := normalizeDuckDBConnectionMode(config.DuckDBMode, sourcePath) - dsn := sourcePath - if mode == "parquet" { - if strings.TrimSpace(sourcePath) == "" || sourcePath == ":memory:" { - return fmt.Errorf("Parquet 文件模式要求提供 .parquet 或 .parq 文件路径") - } - dsn = ":memory:" - } else if dsn == "" { + if dsn == "" { dsn = ":memory:" } @@ -49,22 +37,12 @@ func (d *DuckDB) Connect(config connection.ConnectionConfig) error { } d.conn = db d.pingTimeout = getConnectTimeout(config) - d.mode = mode - d.sourcePath = sourcePath - d.mountedView = "" if err := d.Ping(); err != nil { _ = db.Close() d.conn = nil return fmt.Errorf("连接建立后验证失败:%w", err) } - if mode == "parquet" { - if err := d.mountParquetView(sourcePath); err != nil { - _ = db.Close() - d.conn = nil - return fmt.Errorf("连接建立后挂载 Parquet 失败:%w", err) - } - } return nil } @@ -421,26 +399,6 @@ func (d *DuckDB) ApplyChanges(tableName string, changes connection.ChangeSet) er return tx.Commit() } -func (d *DuckDB) mountParquetView(sourcePath string) error { - if d.conn == nil { - return fmt.Errorf("connection not open") - } - viewName := deriveDuckDBParquetViewName(sourcePath) - if viewName == "" { - viewName = "parquet_data" - } - query := fmt.Sprintf( - "CREATE OR REPLACE VIEW %s AS SELECT * FROM read_parquet('%s')", - quoteDuckDBQualifiedTable("main", viewName), - escapeDuckDBLiteral(sourcePath), - ) - if _, err := d.conn.Exec(query); err != nil { - return err - } - d.mountedView = viewName - return nil -} - func normalizeDuckDBSchemaAndTable(dbName string, tableName string) (string, string) { schema := strings.TrimSpace(dbName) table := strings.TrimSpace(tableName) @@ -506,49 +464,3 @@ func duckDBRowString(row map[string]interface{}, keys ...string) string { func escapeDuckDBLiteral(raw string) string { return strings.ReplaceAll(raw, "'", "''") } - -func normalizeDuckDBConnectionMode(raw string, sourcePath string) string { - mode := strings.ToLower(strings.TrimSpace(raw)) - if mode == "parquet" { - return "parquet" - } - if mode == "database" { - return "database" - } - lowerPath := strings.ToLower(strings.TrimSpace(sourcePath)) - if strings.HasSuffix(lowerPath, ".parquet") || strings.HasSuffix(lowerPath, ".parq") { - return "parquet" - } - return "database" -} - -func deriveDuckDBParquetViewName(sourcePath string) string { - baseName := strings.TrimSpace(filepath.Base(strings.TrimSpace(sourcePath))) - if ext := filepath.Ext(baseName); ext != "" { - baseName = strings.TrimSuffix(baseName, ext) - } - if baseName == "" { - return "parquet_data" - } - - var builder strings.Builder - for _, r := range baseName { - switch { - case unicode.IsLetter(r), unicode.IsDigit(r): - builder.WriteRune(unicode.ToLower(r)) - case r == '_': - builder.WriteRune(r) - default: - builder.WriteRune('_') - } - } - - name := strings.Trim(builder.String(), "_") - if name == "" { - name = "parquet_data" - } - if unicode.IsDigit(rune(name[0])) { - name = "parquet_" + name - } - return name -} From 058c74e49ada9107b179fec275ad797f75e521cf Mon Sep 17 00:00:00 2001 From: Syngnat Date: Mon, 9 Mar 2026 11:02:00 +0800 Subject: [PATCH 27/48] =?UTF-8?q?=F0=9F=90=9B=20fix(dameng):=20=E4=BF=AE?= =?UTF-8?q?=E5=A4=8D=E8=BE=BE=E6=A2=A6=E8=BF=9E=E6=8E=A5=E6=88=90=E5=8A=9F?= =?UTF-8?q?=E5=90=8E=E6=95=B0=E6=8D=AE=E5=BA=93=E5=88=97=E8=A1=A8=E4=B8=BA?= =?UTF-8?q?=E7=A9=BA=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 调整达梦数据库列表获取策略,优先回退查询当前 schema 与当前用户 - 保留可见用户与 owner 聚合逻辑,兼容低权限账号场景 - 补充前端空列表提示与后端单元测试,降低排查成本 - close #203 --- frontend/src/components/ConnectionModal.tsx | 38 +++++---- internal/db/dameng_impl.go | 78 +----------------- internal/db/dameng_metadata.go | 91 +++++++++++++++++++++ internal/db/dameng_metadata_test.go | 73 +++++++++++++++++ 4 files changed, 189 insertions(+), 91 deletions(-) create mode 100644 internal/db/dameng_metadata.go create mode 100644 internal/db/dameng_metadata_test.go diff --git a/frontend/src/components/ConnectionModal.tsx b/frontend/src/components/ConnectionModal.tsx index 7f9efa3..c55c0b2 100644 --- a/frontend/src/components/ConnectionModal.tsx +++ b/frontend/src/components/ConnectionModal.tsx @@ -1259,24 +1259,30 @@ const ConnectionModal: React.FC<{ ? await RedisConnect(config as any) : await TestConnection(config as any); - if (res.success) { - setTestResult({ type: 'success', message: res.message }); - if (isRedisType) { - setRedisDbList(Array.from({ length: 16 }, (_, i) => i)); - } else { - // Other databases: fetch database list - const dbRes = await DBGetDatabases(config as any); - if (dbRes.success) { - const dbRows = Array.isArray(dbRes.data) ? dbRes.data : []; - const dbs = dbRows - .map((row: any) => row?.Database || row?.database) - .filter((name: any) => typeof name === 'string' && name.trim() !== ''); - setDbList(dbs); + if (res.success) { + setTestResult({ type: 'success', message: res.message }); + if (isRedisType) { + setRedisDbList(Array.from({ length: 16 }, (_, i) => i)); } else { - setDbList([]); + // Other databases: fetch database list + const dbRes = await DBGetDatabases(config as any); + if (dbRes.success) { + const dbRows = Array.isArray(dbRes.data) ? dbRes.data : []; + const dbs = dbRows + .map((row: any) => row?.Database || row?.database) + .filter((name: any) => typeof name === 'string' && name.trim() !== ''); + setDbList(dbs); + if (dbs.length === 0) { + message.warning(values.type === 'dameng' + ? '连接成功,但未获取到可见 schema;请检查当前账号权限或默认 schema 配置' + : '连接成功,但未获取到可见数据库列表'); + } + } else { + setDbList([]); + message.warning(`连接成功,但获取数据库列表失败:${dbRes.message || '未知错误'}`); + } } - } - } else { + } else { const failMessage = buildTestFailureMessage( res?.message, '连接被拒绝或参数无效,请检查后重试' diff --git a/internal/db/dameng_impl.go b/internal/db/dameng_impl.go index 5cceb0a..1cf27e6 100644 --- a/internal/db/dameng_impl.go +++ b/internal/db/dameng_impl.go @@ -8,7 +8,6 @@ import ( "fmt" "net" "net/url" - "sort" "strconv" "strings" "time" @@ -205,80 +204,9 @@ func (d *DamengDB) Exec(query string) (int64, error) { } func (d *DamengDB) GetDatabases() ([]string, error) { - // 达梦将「用户/模式」作为数据库列表来源,不同权限下可见口径不同。 - // 这里采用多查询口径聚合,避免仅依赖单一视图导致“少库”。 - queries := []string{ - "SELECT USERNAME AS DATABASE_NAME FROM SYS.DBA_USERS ORDER BY USERNAME", - "SELECT USERNAME AS DATABASE_NAME FROM DBA_USERS ORDER BY USERNAME", - "SELECT USERNAME AS DATABASE_NAME FROM ALL_USERS ORDER BY USERNAME", - "SELECT USERNAME AS DATABASE_NAME FROM USER_USERS", - "SELECT DISTINCT OWNER AS DATABASE_NAME FROM ALL_TABLES ORDER BY OWNER", - } - - seen := make(map[string]struct{}) - dbs := make([]string, 0, 64) - var lastErr error - success := false - - for _, q := range queries { - data, _, err := d.Query(q) - if err != nil { - lastErr = err - continue - } - success = true - for _, row := range data { - name := getDamengRowString(row, "DATABASE_NAME", "USERNAME", "OWNER", "SCHEMA_NAME") - if name == "" { - // 回退到第一列,兼容驱动返回列名差异。 - for _, v := range row { - text := strings.TrimSpace(fmt.Sprintf("%v", v)) - if text == "" || strings.EqualFold(text, "") { - continue - } - name = text - break - } - } - if name == "" { - continue - } - key := strings.ToUpper(name) - if _, ok := seen[key]; ok { - continue - } - seen[key] = struct{}{} - dbs = append(dbs, name) - } - } - - if !success && lastErr != nil { - return nil, lastErr - } - - sort.Slice(dbs, func(i, j int) bool { - return strings.ToUpper(dbs[i]) < strings.ToUpper(dbs[j]) - }) - return dbs, nil -} - -func getDamengRowString(row map[string]interface{}, keys ...string) string { - if len(row) == 0 { - return "" - } - for _, key := range keys { - for k, v := range row { - if !strings.EqualFold(strings.TrimSpace(k), strings.TrimSpace(key)) { - continue - } - text := strings.TrimSpace(fmt.Sprintf("%v", v)) - if text == "" || strings.EqualFold(text, "") { - return "" - } - return text - } - } - return "" + // 达梦在本项目中将 schema/owner 作为“数据库”展示口径。 + // 先查当前 schema / 当前用户,再聚合可见用户与 owner,避免权限受限时返回空列表。 + return collectDamengDatabaseNames(d.Query) } func (d *DamengDB) GetTables(dbName string) ([]string, error) { diff --git a/internal/db/dameng_metadata.go b/internal/db/dameng_metadata.go new file mode 100644 index 0000000..c963da1 --- /dev/null +++ b/internal/db/dameng_metadata.go @@ -0,0 +1,91 @@ +package db + +import ( + "fmt" + "sort" + "strings" +) + +var damengDatabaseQueries = []string{ + "SELECT SYS_CONTEXT('USERENV', 'CURRENT_SCHEMA') AS DATABASE_NAME FROM DUAL", + "SELECT SYS_CONTEXT('USERENV', 'CURRENT_USER') AS DATABASE_NAME FROM DUAL", + "SELECT USERNAME AS DATABASE_NAME FROM USER_USERS", + "SELECT USERNAME AS DATABASE_NAME FROM ALL_USERS ORDER BY USERNAME", + "SELECT USERNAME AS DATABASE_NAME FROM DBA_USERS ORDER BY USERNAME", + "SELECT USERNAME AS DATABASE_NAME FROM SYS.DBA_USERS ORDER BY USERNAME", + "SELECT DISTINCT OWNER AS DATABASE_NAME FROM ALL_OBJECTS ORDER BY OWNER", + "SELECT DISTINCT OWNER AS DATABASE_NAME FROM ALL_TABLES ORDER BY OWNER", +} + +type damengQueryFunc func(query string) ([]map[string]interface{}, []string, error) + +func collectDamengDatabaseNames(query damengQueryFunc) ([]string, error) { + seen := make(map[string]struct{}) + dbs := make([]string, 0, 64) + var lastErr error + + for _, q := range damengDatabaseQueries { + data, _, err := query(q) + if err != nil { + lastErr = err + continue + } + for _, row := range data { + name := getDamengRowString(row, + "DATABASE_NAME", + "USERNAME", + "OWNER", + "SCHEMA_NAME", + "CURRENT_SCHEMA", + "CURRENT_USER", + ) + if name == "" { + for _, v := range row { + text := strings.TrimSpace(fmt.Sprintf("%v", v)) + if text == "" || strings.EqualFold(text, "") { + continue + } + name = text + break + } + } + if name == "" { + continue + } + key := strings.ToUpper(name) + if _, ok := seen[key]; ok { + continue + } + seen[key] = struct{}{} + dbs = append(dbs, name) + } + } + + if len(dbs) == 0 && lastErr != nil { + return nil, lastErr + } + + sort.Slice(dbs, func(i, j int) bool { + return strings.ToUpper(dbs[i]) < strings.ToUpper(dbs[j]) + }) + return dbs, nil +} + +func getDamengRowString(row map[string]interface{}, keys ...string) string { + if len(row) == 0 { + return "" + } + for _, key := range keys { + for k, v := range row { + if !strings.EqualFold(strings.TrimSpace(k), strings.TrimSpace(key)) { + continue + } + text := strings.TrimSpace(fmt.Sprintf("%v", v)) + if text == "" || strings.EqualFold(text, "") { + return "" + } + return text + } + } + return "" +} diff --git a/internal/db/dameng_metadata_test.go b/internal/db/dameng_metadata_test.go new file mode 100644 index 0000000..5310679 --- /dev/null +++ b/internal/db/dameng_metadata_test.go @@ -0,0 +1,73 @@ +package db + +import ( + "errors" + "reflect" + "testing" +) + +func TestCollectDamengDatabaseNames_UsesCurrentSchemaFallback(t *testing.T) { + t.Parallel() + + got, err := collectDamengDatabaseNames(func(query string) ([]map[string]interface{}, []string, error) { + switch query { + case damengDatabaseQueries[0]: + return []map[string]interface{}{{"DATABASE_NAME": "APP_SCHEMA"}}, nil, nil + case damengDatabaseQueries[1]: + return []map[string]interface{}{{"DATABASE_NAME": "app_schema"}}, nil, nil + default: + return nil, nil, errors.New("permission denied") + } + }) + if err != nil { + t.Fatalf("collectDamengDatabaseNames 返回错误: %v", err) + } + + want := []string{"APP_SCHEMA"} + if !reflect.DeepEqual(got, want) { + t.Fatalf("unexpected database names, got=%v want=%v", got, want) + } +} + +func TestCollectDamengDatabaseNames_CollectsOwnersWhenVisible(t *testing.T) { + t.Parallel() + + got, err := collectDamengDatabaseNames(func(query string) ([]map[string]interface{}, []string, error) { + switch query { + case damengDatabaseQueries[0], damengDatabaseQueries[1], damengDatabaseQueries[2], damengDatabaseQueries[3], damengDatabaseQueries[4], damengDatabaseQueries[5]: + return []map[string]interface{}{}, nil, nil + case damengDatabaseQueries[6]: + return []map[string]interface{}{{"OWNER": "BIZ"}, {"OWNER": "audit"}}, nil, nil + case damengDatabaseQueries[7]: + return []map[string]interface{}{{"OWNER": "BIZ"}}, nil, nil + default: + return nil, nil, nil + } + }) + if err != nil { + t.Fatalf("collectDamengDatabaseNames 返回错误: %v", err) + } + + want := []string{"audit", "BIZ"} + if !reflect.DeepEqual(got, want) { + t.Fatalf("unexpected database names, got=%v want=%v", got, want) + } +} + +func TestCollectDamengDatabaseNames_ReturnsErrorWhenNoNameResolved(t *testing.T) { + t.Parallel() + + expectErr := errors.New("last query failed") + got, err := collectDamengDatabaseNames(func(query string) ([]map[string]interface{}, []string, error) { + if query == damengDatabaseQueries[len(damengDatabaseQueries)-1] { + return nil, nil, expectErr + } + return nil, nil, errors.New("permission denied") + }) + if err == nil { + t.Fatalf("期望返回错误,实际 got=%v", got) + } + if !errors.Is(err, expectErr) { + t.Fatalf("错误不符合预期: %v", err) + } +} From 0daf702d2569c17e6ef7a628114f7c7f90f9e5ed Mon Sep 17 00:00:00 2001 From: Syngnat Date: Mon, 9 Mar 2026 17:22:26 +0800 Subject: [PATCH 28/48] =?UTF-8?q?=E2=9C=A8=20feat(data-sync):=20=E6=89=A9?= =?UTF-8?q?=E5=B1=95=E8=B7=A8=E5=BA=93=E8=BF=81=E7=A7=BB=E9=93=BE=E8=B7=AF?= =?UTF-8?q?=E5=B9=B6=E4=BC=98=E5=8C=96=E6=95=B0=E6=8D=AE=E5=90=8C=E6=AD=A5?= =?UTF-8?q?=E4=BA=A4=E4=BA=92?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 统一同库同步与跨库迁移入口,补充模式区分与风险提示 - 扩展 ClickHouse 与 PG-like 双向迁移,并新增 PG-like、ClickHouse、TDengine 到 MongoDB 的迁移路由 - 完善 TDengine 目标端建表规划、回归测试与需求追踪文档 - refs #51 --- frontend/package.json.md5 | 2 +- frontend/src/components/DataSyncModal.tsx | 432 +++++- frontend/wailsjs/go/models.ts | 6 + internal/app/db_context.go | 6 + internal/app/methods_db.go | 61 + internal/connection/types.go | 1 + internal/db/clickhouse_impl.go | 132 ++ internal/db/mariadb_impl.go | 18 +- internal/db/mysql_impl.go | 25 +- internal/db/tdengine_applychanges_test.go | 168 +++ internal/db/tdengine_impl.go | 78 + internal/sync/analyze.go | 118 +- internal/sync/migration_clickhouse.go | 741 ++++++++++ internal/sync/migration_kernel_router.go | 379 +++++ internal/sync/migration_kernel_router_test.go | 447 ++++++ internal/sync/migration_kernel_types.go | 104 ++ internal/sync/migration_mongodb.go | 603 ++++++++ internal/sync/migration_redis.go | 1315 +++++++++++++++++ internal/sync/migration_runtime_helpers.go | 58 + internal/sync/migration_schema_inference.go | 53 + internal/sync/migration_tdengine.go | 296 ++++ internal/sync/migration_tdengine_target.go | 657 ++++++++ internal/sync/migration_type_resolver.go | 98 ++ internal/sync/preview.go | 43 +- internal/sync/redis_migration_test.go | 490 ++++++ internal/sync/schema_migration.go | 1014 +++++++++++++ internal/sync/schema_migration_test.go | 957 ++++++++++++ internal/sync/schema_sync.go | 11 +- internal/sync/sql_helpers.go | 19 +- internal/sync/sync_engine.go | 413 +++--- internal/sync/sync_events.go | 1 - 31 files changed, 8403 insertions(+), 343 deletions(-) create mode 100644 internal/db/tdengine_applychanges_test.go create mode 100644 internal/sync/migration_clickhouse.go create mode 100644 internal/sync/migration_kernel_router.go create mode 100644 internal/sync/migration_kernel_router_test.go create mode 100644 internal/sync/migration_kernel_types.go create mode 100644 internal/sync/migration_mongodb.go create mode 100644 internal/sync/migration_redis.go create mode 100644 internal/sync/migration_runtime_helpers.go create mode 100644 internal/sync/migration_schema_inference.go create mode 100644 internal/sync/migration_tdengine.go create mode 100644 internal/sync/migration_tdengine_target.go create mode 100644 internal/sync/migration_type_resolver.go create mode 100644 internal/sync/redis_migration_test.go create mode 100644 internal/sync/schema_migration.go create mode 100644 internal/sync/schema_migration_test.go diff --git a/frontend/package.json.md5 b/frontend/package.json.md5 index a7661c0..0f8f4fe 100755 --- a/frontend/package.json.md5 +++ b/frontend/package.json.md5 @@ -1 +1 @@ -d0f9366af59a6367ad3c7e2d4185ead4 \ No newline at end of file +5b8157374dae5f9340e31b2d0bd2c00e \ No newline at end of file diff --git a/frontend/src/components/DataSyncModal.tsx b/frontend/src/components/DataSyncModal.tsx index 57c4033..1389be7 100644 --- a/frontend/src/components/DataSyncModal.tsx +++ b/frontend/src/components/DataSyncModal.tsx @@ -1,9 +1,11 @@ import React, { useState, useEffect, useMemo, useRef } from 'react'; -import { Modal, Form, Select, Button, message, Steps, Transfer, Card, Alert, Divider, Typography, Progress, Checkbox, Table, Drawer, Tabs } from 'antd'; +import { Modal, Form, Select, Input, Button, message, Steps, Transfer, Card, Alert, Divider, Typography, Progress, Checkbox, Table, Drawer, Tabs, theme as antdTheme } from 'antd'; +import { DatabaseOutlined, RocketOutlined, SwapOutlined, TableOutlined } from '@ant-design/icons'; import { useStore } from '../store'; import { DBGetDatabases, DBGetTables, DataSync, DataSyncAnalyze, DataSyncPreview } from '../../wailsjs/go/app/App'; import { SavedConnection } from '../types'; import { EventsOn } from '../../wailsjs/runtime/runtime'; +import { normalizeOpacityForPlatform, resolveAppearanceValues } from '../utils/appearance'; const { Title, Text } = Typography; const { Step } = Steps; @@ -21,6 +23,12 @@ type TableDiffSummary = { deletes?: number; same?: number; message?: string; + targetTableExists?: boolean; + plannedAction?: string; + warnings?: string[]; + unsupportedObjects?: string[]; + indexesToCreate?: number; + indexesSkipped?: number; }; type TableOps = { insert: boolean; @@ -31,6 +39,8 @@ type TableOps = { selectedDeletePks?: string[]; }; +type WorkflowType = 'sync' | 'migration'; + const quoteSqlIdent = (dbType: string, ident: string): string => { const raw = String(ident || '').trim(); if (!raw) return raw; @@ -76,6 +86,11 @@ const toSqlLiteral = (value: any, dbType: string): string => { return `'${String(value).replace(/'/g, "''")}'`; }; +const resolveRedisDbIndex = (raw?: string): number => { + const value = Number(String(raw || '').trim()); + return Number.isInteger(value) && value >= 0 && value <= 15 ? value : 0; +}; + const buildSqlPreview = ( previewData: any, tableName: string, @@ -145,8 +160,14 @@ const buildSqlPreview = ( const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, onClose }) => { const connections = useStore((state) => state.connections); + const themeMode = useStore((state) => state.theme); + const appearance = useStore((state) => state.appearance); const [currentStep, setCurrentStep] = useState(0); const [loading, setLoading] = useState(false); + const { token } = antdTheme.useToken(); + const darkMode = themeMode === 'dark'; + const resolvedAppearance = resolveAppearanceValues(appearance); + const effectiveOpacity = normalizeOpacityForPlatform(resolvedAppearance.opacity); // Step 1: Config const [sourceConnId, setSourceConnId] = useState(''); @@ -162,9 +183,13 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, const [selectedTables, setSelectedTables] = useState([]); // Options + const [workflowType, setWorkflowType] = useState('sync'); const [syncContent, setSyncContent] = useState<'data' | 'schema' | 'both'>('data'); const [syncMode, setSyncMode] = useState('insert_update'); const [autoAddColumns, setAutoAddColumns] = useState(true); + const [targetTableStrategy, setTargetTableStrategy] = useState<'existing_only' | 'auto_create_if_missing' | 'smart'>('existing_only'); + const [createIndexes, setCreateIndexes] = useState(false); + const [mongoCollectionName, setMongoCollectionName] = useState(''); const [showSameTables, setShowSameTables] = useState(false); const [analyzing, setAnalyzing] = useState(false); const [diffTables, setDiffTables] = useState([]); @@ -240,9 +265,12 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, setSourceDb(''); setTargetDb(''); setSelectedTables([]); + setWorkflowType('sync'); setSyncContent('data'); setSyncMode('insert_update'); setAutoAddColumns(true); + setTargetTableStrategy('existing_only'); + setCreateIndexes(false); setShowSameTables(false); setAnalyzing(false); setDiffTables([]); @@ -260,6 +288,30 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, } }, [open]); + useEffect(() => { + if (workflowType === 'migration') { + if (syncMode === 'insert_update') { + setSyncMode('insert_only'); + } + if (syncContent === 'schema') { + setSyncContent('both'); + } + if (targetTableStrategy === 'existing_only') { + setTargetTableStrategy('smart'); + } + if (!createIndexes) { + setCreateIndexes(true); + } + } else { + if (targetTableStrategy !== 'existing_only') { + setTargetTableStrategy('existing_only'); + } + if (createIndexes) { + setCreateIndexes(false); + } + } + }, [workflowType]); + const handleSourceConnChange = async (connId: string) => { setSourceConnId(connId); setSourceDb(''); @@ -357,6 +409,9 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, content: syncContent, mode: "insert_update", autoAddColumns, + targetTableStrategy, + createIndexes, + mongoCollectionName: mongoCollectionName.trim(), jobId, }; @@ -407,6 +462,9 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, content: "data", mode: "insert_update", autoAddColumns, + targetTableStrategy, + createIndexes, + mongoCollectionName: mongoCollectionName.trim(), }; try { @@ -483,6 +541,9 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, content: syncContent, mode: syncMode, autoAddColumns, + targetTableStrategy, + createIndexes, + mongoCollectionName: mongoCollectionName.trim(), tableOptions, jobId, }; @@ -530,10 +591,132 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, return buildSqlPreview(previewData, previewTable, targetType, ops); }, [previewData, previewTable, targetConnId, connections, tableOptions]); + const analysisWarnings = useMemo(() => { + const items: string[] = []; + diffTables.forEach((table) => { + (table.warnings || []).forEach((warning) => items.push(`${table.table}: ${warning}`)); + (table.unsupportedObjects || []).forEach((warning) => items.push(`${table.table}: ${warning}`)); + }); + return Array.from(new Set(items)); + }, [diffTables]); + + const isMigrationWorkflow = workflowType === 'migration'; + const sourceConn = useMemo(() => connections.find(c => c.id === sourceConnId), [connections, sourceConnId]); + const targetConn = useMemo(() => connections.find(c => c.id === targetConnId), [connections, targetConnId]); + const sourceType = String(sourceConn?.config?.type || '').toLowerCase(); + const targetType = String(targetConn?.config?.type || '').toLowerCase(); + const isRedisMongoKeyspaceMigration = isMigrationWorkflow && ( + (sourceType === 'redis' && targetType === 'mongodb') || + (sourceType === 'mongodb' && targetType === 'redis') + ); + const defaultMongoCollectionName = useMemo(() => { + if (sourceType === 'redis' && targetType === 'mongodb') { + return `redis_db_${resolveRedisDbIndex(sourceDb || sourceConn?.config?.database)}_keys`; + } + if (sourceType === 'mongodb' && targetType === 'redis') { + return selectedTables[0] || `redis_db_${resolveRedisDbIndex(targetDb || targetConn?.config?.database)}_keys`; + } + return ''; + }, [sourceType, targetType, sourceDb, targetDb, sourceConn, targetConn, selectedTables]); + + const modalPanelStyle = useMemo(() => ({ + background: darkMode + ? 'linear-gradient(180deg, rgba(16,22,34,0.96) 0%, rgba(10,14,24,0.98) 100%)' + : 'linear-gradient(180deg, rgba(255,255,255,0.98) 0%, rgba(246,248,252,0.98) 100%)', + border: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(16,24,40,0.08)', + boxShadow: darkMode ? '0 24px 56px rgba(0,0,0,0.36)' : '0 18px 44px rgba(15,23,42,0.14)', + backdropFilter: darkMode ? 'blur(18px)' : 'none', + }), [darkMode]); + + const shellCardStyle = useMemo(() => ({ + borderRadius: 18, + border: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(15,23,42,0.08)', + background: darkMode ? 'rgba(255,255,255,0.03)' : `rgba(255,255,255,${Math.max(effectiveOpacity, 0.88)})`, + boxShadow: darkMode ? '0 12px 32px rgba(0,0,0,0.22)' : '0 10px 24px rgba(15,23,42,0.08)', + overflow: 'hidden', + }), [darkMode, effectiveOpacity]); + + const heroPanelStyle = useMemo(() => ({ + padding: 18, + borderRadius: 18, + border: darkMode ? '1px solid rgba(255,214,102,0.12)' : '1px solid rgba(24,144,255,0.12)', + background: darkMode + ? 'linear-gradient(135deg, rgba(255,214,102,0.10) 0%, rgba(255,255,255,0.03) 100%)' + : 'linear-gradient(135deg, rgba(24,144,255,0.10) 0%, rgba(255,255,255,0.95) 100%)', + marginBottom: 18, + }), [darkMode]); + + const badgeStyle = useMemo(() => ({ + display: 'inline-flex', + alignItems: 'center', + gap: 6, + padding: '6px 10px', + borderRadius: 999, + border: darkMode ? '1px solid rgba(255,255,255,0.10)' : '1px solid rgba(15,23,42,0.08)', + background: darkMode ? 'rgba(255,255,255,0.04)' : 'rgba(255,255,255,0.86)', + color: darkMode ? 'rgba(255,255,255,0.88)' : '#334155', + fontSize: 12, + fontWeight: 600, + }), [darkMode]); + + const quietPanelStyle = useMemo(() => ({ + padding: 14, + borderRadius: 16, + border: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(15,23,42,0.08)', + background: darkMode ? 'rgba(255,255,255,0.025)' : 'rgba(248,250,252,0.92)', + }), [darkMode]); + + const modalWorkspaceStyle = useMemo(() => ({ + display: 'flex', + flexDirection: 'column', + height: '100%', + minHeight: 0, + }), []); + + const modalScrollableContentStyle = useMemo(() => ({ + flex: 1, + minHeight: 0, + overflowY: 'auto', + overflowX: 'hidden', + paddingRight: 4, + overscrollBehavior: 'contain', + }), []); + + const modalFooterBarStyle = useMemo(() => ({ + marginTop: 18, + display: 'flex', + justifyContent: 'flex-end', + gap: 8, + paddingTop: 12, + borderTop: darkMode ? '1px solid rgba(255,255,255,0.06)' : '1px solid rgba(15,23,42,0.06)', + flex: '0 0 auto', + }), [darkMode]); + + const renderModalTitle = (title: string, description: string) => ( +
+
+ {isMigrationWorkflow ? : } +
+
+
{title}
+
{description}
+
+
+ ); + return ( <> { if (syncing) { @@ -542,23 +725,61 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, } onClose(); }} - width={800} + width={920} footer={null} destroyOnHidden closable={!syncing} maskClosable={!syncing} + styles={{ + content: modalPanelStyle, + header: { background: 'transparent', borderBottom: 'none', paddingBottom: 10 }, + body: { + paddingTop: 8, + height: 760, + maxHeight: 'calc(100vh - 120px)', + overflow: 'hidden', + display: 'flex', + flexDirection: 'column', + }, + footer: { background: 'transparent', borderTop: 'none', paddingTop: 12 }, + }} > +
+
+
+
+
+
{isMigrationWorkflow ? '跨数据源迁移' : '数据同步'}
+
+ {isMigrationWorkflow + ? '适合把源表迁移到另一套数据库,可按策略自动建表、导入数据并补建可兼容索引。' + : '适合目标表已存在的场景,先做差异分析,再按勾选执行插入、更新或删除。'} +
+
+
+ {isMigrationWorkflow ? : } {isMigrationWorkflow ? '迁移模式' : '同步模式'} + {sourceConnId ? '已选源连接' : '待选源连接'} + {selectedTables.length || 0} 张表 +
+
+
+
+
{/* STEP 1: CONFIG */} {currentStep === 0 && (
-
- +
+
@@ -589,27 +818,94 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
- + +
+ + 先明确当前要做的是“已有目标表同步”还是“跨库迁移”,页面会按功能类型自动给出更安全的默认策略。 + +
- + + + + + - + + + + + {isRedisMongoKeyspaceMigration && ( + + setMongoCollectionName(e.target.value)} + placeholder={defaultMongoCollectionName || '请输入 Mongo 集合名'} + allowClear + maxLength={128} + /> + + )} setAutoAddColumns(e.target.checked)}> - 自动补齐目标表缺失字段(仅 MySQL 目标) + 自动补齐目标表缺失字段(当前支持 MySQL 目标及 MySQL → Kingbase) + + setCreateIndexes(e.target.checked)} disabled={!isMigrationWorkflow || targetTableStrategy === 'existing_only'}> + 自动迁移可兼容的普通索引/唯一索引(仅自动建表模式生效) + + + {isMigrationWorkflow && targetTableStrategy !== 'existing_only' && ( + + )} + {!isMigrationWorkflow && ( + + )} {syncContent !== 'schema' && syncMode === 'full_overwrite' && ( void }> = ({ open, {/* STEP 2: TABLES */} {currentStep === 1 && ( -
-
- 请选择需要同步的表: +
+
+
+ 请选择需要同步的表: setShowSameTables(e.target.checked)}> 显示相同表 -
- + ({ key: t, title: t }))} titles={['源表', '已选表']} targetKeys={selectedTables} onChange={(keys) => setSelectedTables(keys as string[])} render={item => item.title} - listStyle={{ width: 350, height: 280, marginTop: 0 }} - locale={{ itemUnit: '项', itemsUnit: '项', searchPlaceholder: '搜索表', notFoundContent: '暂无数据' }} + listStyle={{ width: 390, height: 320, marginTop: 0, borderRadius: 14, overflow: 'hidden' }} + locale={{ itemUnit: '项', itemsUnit: '项', searchPlaceholder: '搜索表…', notFoundContent: '暂无数据' }} /> +
{diffTables.length > 0 && ( -
- 对比结果 +
+ 对比结果 + {analysisWarnings.length > 0 && ( + + {analysisWarnings.slice(0, 8).map((item) =>
  • {item}
  • )} + {analysisWarnings.length > 8 &&
  • 还有 {analysisWarnings.length - 8} 项未展开
  • } + + } + style={{ marginBottom: 12 }} + /> + )}
    void }> = ({ open, const same = Number(t.same || 0); const msg = String(t.message || '').trim(); const can = !!t.canSync; + const warns = Array.isArray(t.warnings) ? t.warnings.length : 0; + const unsupported = Array.isArray(t.unsupportedObjects) ? t.unsupportedObjects.length : 0; if (showSameTables) return true; if (!can) return true; - if (msg) return true; + if (msg || warns > 0 || unsupported > 0) return true; return ins > 0 || upd > 0 || del > 0 || same === 0; })} columns={[ { title: '表名', dataIndex: 'table', key: 'table', ellipsis: true }, + { + title: '目标表', + key: 'targetTableExists', + width: 90, + render: (_: any, r: any) => r.targetTableExists ? '已存在' : '不存在' + }, + { + title: '计划', + dataIndex: 'plannedAction', + key: 'plannedAction', + width: 220, + ellipsis: true, + render: (v: any) => String(v || '') + }, { title: '插入', key: 'inserts', @@ -670,11 +998,7 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, const ops = tableOptions[r.table] || { insert: true, update: true, delete: false }; const disabled = !r.canSync || analyzing || Number(r.inserts || 0) === 0; return ( - updateTableOption(r.table, 'insert', e.target.checked)} - > + updateTableOption(r.table, 'insert', e.target.checked)}> {Number(r.inserts || 0)} ); @@ -688,11 +1012,7 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, const ops = tableOptions[r.table] || { insert: true, update: true, delete: false }; const disabled = !r.canSync || analyzing || Number(r.updates || 0) === 0; return ( - updateTableOption(r.table, 'update', e.target.checked)} - > + updateTableOption(r.table, 'update', e.target.checked)}> {Number(r.updates || 0)} ); @@ -706,18 +1026,28 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, const ops = tableOptions[r.table] || { insert: true, update: true, delete: false }; const disabled = !r.canSync || analyzing || Number(r.deletes || 0) === 0; return ( - updateTableOption(r.table, 'delete', e.target.checked)} - > + updateTableOption(r.table, 'delete', e.target.checked)}> {Number(r.deletes || 0)} ); } }, { title: '相同', dataIndex: 'same', key: 'same', width: 70, render: (v: any) => Number(v || 0) }, - { title: '消息', dataIndex: 'message', key: 'message', ellipsis: true, render: (v: any) => (v ? String(v) : '') }, + { + title: '风险', + key: 'warnings', + width: 220, + render: (_: any, r: any) => { + const warns = [...(Array.isArray(r.warnings) ? r.warnings : []), ...(Array.isArray(r.unsupportedObjects) ? r.unsupportedObjects : [])]; + if (warns.length === 0) return '-'; + return ( +
    + {warns.slice(0, 2).map((item: string) =>
    {item}
    )} + {warns.length > 2 &&
    还有 {warns.length - 2} 项
    } +
    + ); + } + }, { title: '预览', key: 'preview', @@ -741,7 +1071,8 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, {/* STEP 3: RESULT */} {currentStep === 2 && ( -
    +
    +
    void }> = ({ open, showIcon /> -
    +
    void }> = ({ open, />
    - 日志 +
    +
    + 执行日志
    { @@ -770,14 +1103,25 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, const nearBottom = el.scrollHeight - el.scrollTop - el.clientHeight < 40; autoScrollRef.current = nearBottom; }} - style={{ background: '#f5f5f5', padding: 12, height: 300, overflowY: 'auto', fontFamily: 'monospace' }} + style={{ + background: darkMode ? 'rgba(255,255,255,0.03)' : 'rgba(248,250,252,0.92)', + border: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(15,23,42,0.06)', + borderRadius: 14, + padding: 12, + height: 300, + overflowY: 'auto', + fontFamily: 'SFMono-Regular, ui-monospace, Menlo, Consolas, monospace' + }} > {syncLogs.map((item, i: number) =>
    {renderSyncLogItem(item)}
    )}
    +
    )} -
    +
    + +
    {currentStep === 0 && ( )} @@ -804,14 +1148,16 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, )}
    +
    { setPreviewOpen(false); setPreviewTable(''); setPreviewData(null); }} width={900} > - {previewLoading && } + {previewLoading && } {!previewLoading && previewData && (
    ; static createFrom(source: any = {}) { @@ -292,6 +295,9 @@ export namespace sync { this.mode = source["mode"]; this.jobId = source["jobId"]; this.autoAddColumns = source["autoAddColumns"]; + this.targetTableStrategy = source["targetTableStrategy"]; + this.createIndexes = source["createIndexes"]; + this.mongoCollectionName = source["mongoCollectionName"]; this.tableOptions = this.convertValues(source["tableOptions"], TableOptions, true); } diff --git a/internal/app/db_context.go b/internal/app/db_context.go index ec56c89..009e405 100644 --- a/internal/app/db_context.go +++ b/internal/app/db_context.go @@ -1,6 +1,7 @@ package app import ( + "strconv" "strings" "GoNavi-Wails/internal/connection" @@ -20,6 +21,11 @@ func normalizeRunConfig(config connection.ConnectionConfig, dbName string) conne case "dameng": // 达梦使用 schema 参数,沿用现有行为:dbName 表示 schema。 runConfig.Database = name + case "redis": + runConfig.Database = name + if idx, err := strconv.Atoi(name); err == nil && idx >= 0 && idx <= 15 { + runConfig.RedisDB = idx + } default: // oracle: dbName 表示 schema/owner,不能覆盖 config.Database(服务名) // sqlite: 无需设置 Database diff --git a/internal/app/methods_db.go b/internal/app/methods_db.go index 24119e1..b28109f 100644 --- a/internal/app/methods_db.go +++ b/internal/app/methods_db.go @@ -3,6 +3,7 @@ package app import ( "context" "fmt" + "strconv" "strings" "time" @@ -547,6 +548,24 @@ func ensureNonNilSlice[T any](items []T) []T { func (a *App) DBGetDatabases(config connection.ConnectionConfig) connection.QueryResult { runConfig := normalizeRunConfig(config, "") + if strings.EqualFold(strings.TrimSpace(runConfig.Type), "redis") { + runConfig.Type = "redis" + client, err := a.getRedisClient(runConfig) + if err != nil { + logger.Error(err, "DBGetDatabases 获取 Redis 连接失败:%s", formatConnSummary(runConfig)) + return connection.QueryResult{Success: false, Message: err.Error()} + } + dbs, err := client.GetDatabases() + if err != nil { + logger.Error(err, "DBGetDatabases 获取 Redis 库列表失败:%s", formatConnSummary(runConfig)) + return connection.QueryResult{Success: false, Message: err.Error()} + } + resData := make([]map[string]string, 0, len(dbs)) + for _, item := range dbs { + resData = append(resData, map[string]string{"Database": strconv.Itoa(item.Index)}) + } + return connection.QueryResult{Success: true, Data: resData} + } dbInst, err := a.getDatabase(runConfig) if err != nil { logger.Error(err, "DBGetDatabases 获取连接失败:%s", formatConnSummary(runConfig)) @@ -579,6 +598,48 @@ func (a *App) DBGetDatabases(config connection.ConnectionConfig) connection.Quer func (a *App) DBGetTables(config connection.ConnectionConfig, dbName string) connection.QueryResult { runConfig := normalizeRunConfig(config, dbName) + if strings.EqualFold(strings.TrimSpace(runConfig.Type), "redis") { + runConfig.Type = "redis" + client, err := a.getRedisClient(runConfig) + if err != nil { + logger.Error(err, "DBGetTables 获取 Redis 连接失败:%s", formatConnSummary(runConfig)) + return connection.QueryResult{Success: false, Message: err.Error()} + } + cursor := uint64(0) + tables := make([]string, 0, 128) + seen := make(map[string]struct{}, 128) + for { + result, err := client.ScanKeys("*", cursor, 1000) + if err != nil { + logger.Error(err, "DBGetTables 扫描 Redis Key 失败:%s", formatConnSummary(runConfig)) + return connection.QueryResult{Success: false, Message: err.Error()} + } + for _, item := range result.Keys { + key := strings.TrimSpace(item.Key) + if key == "" { + continue + } + if _, ok := seen[key]; ok { + continue + } + seen[key] = struct{}{} + tables = append(tables, key) + } + if strings.TrimSpace(result.Cursor) == "" || strings.TrimSpace(result.Cursor) == "0" { + break + } + next, err := strconv.ParseUint(strings.TrimSpace(result.Cursor), 10, 64) + if err != nil || next == cursor { + break + } + cursor = next + } + resData := make([]map[string]string, 0, len(tables)) + for _, name := range tables { + resData = append(resData, map[string]string{"Table": name}) + } + return connection.QueryResult{Success: true, Data: resData} + } dbInst, err := a.getDatabase(runConfig) if err != nil { diff --git a/internal/connection/types.go b/internal/connection/types.go index bac9ec7..bddb794 100644 --- a/internal/connection/types.go +++ b/internal/connection/types.go @@ -90,6 +90,7 @@ type IndexDefinition struct { NonUnique int `json:"nonUnique"` SeqInIndex int `json:"seqInIndex"` IndexType string `json:"indexType"` + SubPart int `json:"subPart,omitempty"` } // ForeignKeyDefinition represents a foreign key diff --git a/internal/db/clickhouse_impl.go b/internal/db/clickhouse_impl.go index f1d5811..75a418c 100644 --- a/internal/db/clickhouse_impl.go +++ b/internal/db/clickhouse_impl.go @@ -8,6 +8,7 @@ import ( "fmt" "net" "net/url" + "sort" "strconv" "strings" "time" @@ -678,3 +679,134 @@ func isClickHouseTruthy(value interface{}) bool { return normalized == "1" || normalized == "true" || normalized == "yes" || normalized == "y" } } + +func (c *ClickHouseDB) ApplyChanges(tableName string, changes connection.ChangeSet) error { + if c.conn == nil { + return fmt.Errorf("connection not open") + } + + database, table, err := c.resolveDatabaseAndTable(c.database, tableName) + if err != nil { + return err + } + qualifiedTable := fmt.Sprintf("%s.%s", quoteClickHouseIdentifier(database), quoteClickHouseIdentifier(table)) + + for _, pk := range changes.Deletes { + whereExpr := buildClickHouseWhereClause(pk) + if whereExpr == "" { + continue + } + query := fmt.Sprintf("ALTER TABLE %s DELETE WHERE %s", qualifiedTable, whereExpr) + if _, err := c.conn.Exec(query); err != nil { + return fmt.Errorf("delete error: %v; sql=%s", err, query) + } + } + + for _, update := range changes.Updates { + setExpr := buildClickHouseAssignments(update.Values) + whereExpr := buildClickHouseWhereClause(update.Keys) + if setExpr == "" || whereExpr == "" { + continue + } + query := fmt.Sprintf("ALTER TABLE %s UPDATE %s WHERE %s", qualifiedTable, setExpr, whereExpr) + if _, err := c.conn.Exec(query); err != nil { + return fmt.Errorf("update error: %v; sql=%s", err, query) + } + } + + for _, row := range changes.Inserts { + query, err := buildClickHouseInsertSQL(qualifiedTable, row) + if err != nil { + return err + } + if query == "" { + continue + } + if _, err := c.conn.Exec(query); err != nil { + return fmt.Errorf("insert error: %v; sql=%s", err, query) + } + } + return nil +} + +func buildClickHouseInsertSQL(qualifiedTable string, row map[string]interface{}) (string, error) { + if len(row) == 0 { + return "", nil + } + cols := make([]string, 0, len(row)) + for k := range row { + if strings.TrimSpace(k) == "" { + continue + } + cols = append(cols, k) + } + if len(cols) == 0 { + return "", nil + } + sort.Strings(cols) + quotedCols := make([]string, 0, len(cols)) + values := make([]string, 0, len(cols)) + for _, col := range cols { + quotedCols = append(quotedCols, quoteClickHouseIdentifier(col)) + values = append(values, clickHouseLiteral(row[col])) + } + return fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)", qualifiedTable, strings.Join(quotedCols, ", "), strings.Join(values, ", ")), nil +} + +func buildClickHouseAssignments(values map[string]interface{}) string { + if len(values) == 0 { + return "" + } + cols := make([]string, 0, len(values)) + for k := range values { + if strings.TrimSpace(k) == "" { + continue + } + cols = append(cols, k) + } + sort.Strings(cols) + parts := make([]string, 0, len(cols)) + for _, col := range cols { + parts = append(parts, fmt.Sprintf("%s = %s", quoteClickHouseIdentifier(col), clickHouseLiteral(values[col]))) + } + return strings.Join(parts, ", ") +} + +func buildClickHouseWhereClause(keys map[string]interface{}) string { + if len(keys) == 0 { + return "" + } + cols := make([]string, 0, len(keys)) + for k := range keys { + if strings.TrimSpace(k) == "" { + continue + } + cols = append(cols, k) + } + sort.Strings(cols) + parts := make([]string, 0, len(cols)) + for _, col := range cols { + parts = append(parts, fmt.Sprintf("%s = %s", quoteClickHouseIdentifier(col), clickHouseLiteral(keys[col]))) + } + return strings.Join(parts, " AND ") +} + +func clickHouseLiteral(value interface{}) string { + switch val := value.(type) { + case nil: + return "NULL" + case bool: + if val { + return "1" + } + return "0" + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64: + return fmt.Sprintf("%v", val) + case time.Time: + return fmt.Sprintf("'%s'", val.Format("2006-01-02 15:04:05")) + case []byte: + return fmt.Sprintf("'%s'", strings.ReplaceAll(string(val), "'", "''")) + default: + return fmt.Sprintf("'%s'", strings.ReplaceAll(fmt.Sprintf("%v", val), "'", "''")) + } +} diff --git a/internal/db/mariadb_impl.go b/internal/db/mariadb_impl.go index 1e316ad..6a36400 100644 --- a/internal/db/mariadb_impl.go +++ b/internal/db/mariadb_impl.go @@ -250,12 +250,22 @@ func (m *MariaDB) GetIndexes(dbName, tableName string) ([]connection.IndexDefini } } + subPart := 0 + if val, ok := row["Sub_part"]; ok && val != nil { + if f, ok := val.(float64); ok { + subPart = int(f) + } else if i, ok := val.(int64); ok { + subPart = int(i) + } + } + idx := connection.IndexDefinition{ Name: fmt.Sprintf("%v", row["Key_name"]), ColumnName: fmt.Sprintf("%v", row["Column_name"]), NonUnique: nonUnique, SeqInIndex: seq, IndexType: fmt.Sprintf("%v", row["Index_type"]), + SubPart: subPart, } indexes = append(indexes, idx) } @@ -323,7 +333,7 @@ func (m *MariaDB) ApplyChanges(tableName string, changes connection.ChangeSet) e var args []interface{} for k, v := range pk { wheres = append(wheres, fmt.Sprintf("`%s` = ?", k)) - args = append(args, normalizeMySQLDateTimeValue(v)) + args = append(args, normalizeMySQLComplexValue(normalizeMySQLDateTimeValue(v))) } if len(wheres) == 0 { continue @@ -341,7 +351,7 @@ func (m *MariaDB) ApplyChanges(tableName string, changes connection.ChangeSet) e for k, v := range update.Values { sets = append(sets, fmt.Sprintf("`%s` = ?", k)) - args = append(args, normalizeMySQLDateTimeValue(v)) + args = append(args, normalizeMySQLComplexValue(normalizeMySQLDateTimeValue(v))) } if len(sets) == 0 { @@ -351,7 +361,7 @@ func (m *MariaDB) ApplyChanges(tableName string, changes connection.ChangeSet) e var wheres []string for k, v := range update.Keys { wheres = append(wheres, fmt.Sprintf("`%s` = ?", k)) - args = append(args, normalizeMySQLDateTimeValue(v)) + args = append(args, normalizeMySQLComplexValue(normalizeMySQLDateTimeValue(v))) } if len(wheres) == 0 { @@ -373,7 +383,7 @@ func (m *MariaDB) ApplyChanges(tableName string, changes connection.ChangeSet) e for k, v := range row { cols = append(cols, fmt.Sprintf("`%s`", k)) placeholders = append(placeholders, "?") - args = append(args, normalizeMySQLDateTimeValue(v)) + args = append(args, normalizeMySQLComplexValue(normalizeMySQLDateTimeValue(v))) } if len(cols) == 0 { diff --git a/internal/db/mysql_impl.go b/internal/db/mysql_impl.go index 4aefa29..5095f1c 100644 --- a/internal/db/mysql_impl.go +++ b/internal/db/mysql_impl.go @@ -3,6 +3,7 @@ package db import ( "context" "database/sql" + "encoding/json" "fmt" "net/url" "strconv" @@ -441,12 +442,22 @@ func (m *MySQLDB) GetIndexes(dbName, tableName string) ([]connection.IndexDefini } } + subPart := 0 + if val, ok := row["Sub_part"]; ok && val != nil { + if f, ok := val.(float64); ok { + subPart = int(f) + } else if i, ok := val.(int64); ok { + subPart = int(i) + } + } + idx := connection.IndexDefinition{ Name: fmt.Sprintf("%v", row["Key_name"]), ColumnName: fmt.Sprintf("%v", row["Column_name"]), NonUnique: nonUnique, SeqInIndex: seq, IndexType: fmt.Sprintf("%v", row["Index_type"]), + SubPart: subPart, } indexes = append(indexes, idx) } @@ -606,6 +617,18 @@ func (m *MySQLDB) ApplyChanges(tableName string, changes connection.ChangeSet) e return tx.Commit() } +func normalizeMySQLComplexValue(value interface{}) interface{} { + switch v := value.(type) { + case map[string]interface{}, []interface{}: + if data, err := json.Marshal(v); err == nil { + return string(data) + } + return fmt.Sprintf("%v", value) + default: + return value + } +} + func normalizeMySQLDateTimeValue(value interface{}) interface{} { text, ok := value.(string) if !ok { @@ -670,7 +693,7 @@ func (m *MySQLDB) loadColumnTypeMap(tableName string) map[string]string { func normalizeMySQLValueForInsert(columnName string, value interface{}, columnTypeMap map[string]string) (interface{}, bool) { columnType := strings.ToLower(strings.TrimSpace(columnTypeMap[strings.ToLower(strings.TrimSpace(columnName))])) if !isMySQLTemporalColumnType(columnType) { - return value, false + return normalizeMySQLComplexValue(value), false } text, ok := value.(string) if ok && strings.TrimSpace(text) == "" { diff --git a/internal/db/tdengine_applychanges_test.go b/internal/db/tdengine_applychanges_test.go new file mode 100644 index 0000000..8afebd4 --- /dev/null +++ b/internal/db/tdengine_applychanges_test.go @@ -0,0 +1,168 @@ +//go:build gonavi_full_drivers || gonavi_tdengine_driver + +package db + +import ( + "context" + "database/sql" + "database/sql/driver" + "fmt" + "strings" + "sync" + "testing" + + "GoNavi-Wails/internal/connection" +) + +const tdengineRecordingDriverName = "gonavi_tdengine_recording" + +var ( + registerTDengineRecordingDriverOnce sync.Once + tdengineRecordingDriverMu sync.Mutex + tdengineRecordingDriverSeq int + tdengineRecordingDriverStates = map[string]*tdengineRecordingState{} +) + +type tdengineRecordingState struct { + mu sync.Mutex + queries []string + execErr error +} + +func (s *tdengineRecordingState) snapshotQueries() []string { + s.mu.Lock() + defer s.mu.Unlock() + queries := make([]string, len(s.queries)) + copy(queries, s.queries) + return queries +} + +type tdengineRecordingDriver struct{} + +func (tdengineRecordingDriver) Open(name string) (driver.Conn, error) { + tdengineRecordingDriverMu.Lock() + state := tdengineRecordingDriverStates[name] + tdengineRecordingDriverMu.Unlock() + if state == nil { + return nil, fmt.Errorf("recording state not found: %s", name) + } + return &tdengineRecordingConn{state: state}, nil +} + +type tdengineRecordingConn struct { + state *tdengineRecordingState +} + +func (c *tdengineRecordingConn) Prepare(query string) (driver.Stmt, error) { + return nil, fmt.Errorf("prepare not supported in tdengine recording driver: %s", query) +} + +func (c *tdengineRecordingConn) Close() error { return nil } + +func (c *tdengineRecordingConn) Begin() (driver.Tx, error) { + return nil, fmt.Errorf("transactions not supported in tdengine recording driver") +} + +func (c *tdengineRecordingConn) ExecContext(_ context.Context, query string, args []driver.NamedValue) (driver.Result, error) { + if len(args) > 0 { + return nil, fmt.Errorf("unexpected exec args: %d", len(args)) + } + c.state.mu.Lock() + defer c.state.mu.Unlock() + if c.state.execErr != nil { + return nil, c.state.execErr + } + c.state.queries = append(c.state.queries, query) + return driver.RowsAffected(1), nil +} + +var _ driver.ExecerContext = (*tdengineRecordingConn)(nil) + +func openTDengineRecordingDB(t *testing.T) (*sql.DB, *tdengineRecordingState) { + t.Helper() + registerTDengineRecordingDriverOnce.Do(func() { + sql.Register(tdengineRecordingDriverName, tdengineRecordingDriver{}) + }) + + tdengineRecordingDriverMu.Lock() + tdengineRecordingDriverSeq++ + dsn := fmt.Sprintf("tdengine-recording-%d", tdengineRecordingDriverSeq) + state := &tdengineRecordingState{} + tdengineRecordingDriverStates[dsn] = state + tdengineRecordingDriverMu.Unlock() + + dbConn, err := sql.Open(tdengineRecordingDriverName, dsn) + if err != nil { + t.Fatalf("打开 recording db 失败: %v", err) + } + + t.Cleanup(func() { + _ = dbConn.Close() + tdengineRecordingDriverMu.Lock() + delete(tdengineRecordingDriverStates, dsn) + tdengineRecordingDriverMu.Unlock() + }) + + return dbConn, state +} + +func TestTDengineApplyChanges_InsertsIntoQualifiedTable(t *testing.T) { + t.Parallel() + + dbConn, state := openTDengineRecordingDB(t) + td := &TDengineDB{conn: dbConn} + + changes := connection.ChangeSet{ + Inserts: []map[string]interface{}{ + { + "ts": "2026-03-09 10:00:00", + "value": 12.5, + "device": "sensor-a", + "enabled": true, + }, + }, + } + + if err := td.ApplyChanges("analytics.metrics", changes); err != nil { + t.Fatalf("ApplyChanges 返回错误: %v", err) + } + + queries := state.snapshotQueries() + if len(queries) != 1 { + t.Fatalf("期望执行 1 条 SQL,实际 %d 条: %#v", len(queries), queries) + } + + want := "INSERT INTO `analytics`.`metrics` (`device`, `enabled`, `ts`, `value`) VALUES ('sensor-a', 1, '2026-03-09 10:00:00', 12.5)" + if queries[0] != want { + t.Fatalf("插入 SQL 不符合预期\nwant: %s\n got: %s", want, queries[0]) + } +} + +func TestTDengineApplyChanges_RejectsMixedUpdatesWithoutPartialWrite(t *testing.T) { + t.Parallel() + + dbConn, state := openTDengineRecordingDB(t) + td := &TDengineDB{conn: dbConn} + + changes := connection.ChangeSet{ + Inserts: []map[string]interface{}{{ + "ts": "2026-03-09 10:00:00", + "value": 12.5, + }}, + Updates: []connection.UpdateRow{{ + Keys: map[string]interface{}{"ts": "2026-03-09 10:00:00"}, + Values: map[string]interface{}{"value": 18.8}, + }}, + } + + err := td.ApplyChanges("metrics", changes) + if err == nil { + t.Fatalf("期望 mixed changes 被拒绝") + } + if !strings.Contains(err.Error(), "UPDATE/DELETE") { + t.Fatalf("错误信息未说明限制边界: %v", err) + } + if queries := state.snapshotQueries(); len(queries) != 0 { + t.Fatalf("期望拒绝 mixed changes 时不执行任何 SQL,实际=%#v", queries) + } +} diff --git a/internal/db/tdengine_impl.go b/internal/db/tdengine_impl.go index 300cfb0..7efcf92 100644 --- a/internal/db/tdengine_impl.go +++ b/internal/db/tdengine_impl.go @@ -7,6 +7,7 @@ import ( "database/sql" "fmt" "net" + "sort" "strconv" "strings" "time" @@ -362,6 +363,83 @@ func (t *TDengineDB) GetTriggers(dbName, tableName string) ([]connection.Trigger return []connection.TriggerDefinition{}, nil } +func (t *TDengineDB) ApplyChanges(tableName string, changes connection.ChangeSet) error { + if t.conn == nil { + return fmt.Errorf("connection not open") + } + if strings.TrimSpace(tableName) == "" { + return fmt.Errorf("table name required") + } + if len(changes.Updates) > 0 || len(changes.Deletes) > 0 { + return fmt.Errorf("TDengine 目标端当前仅支持 INSERT 写入,暂不支持 UPDATE/DELETE 差异同步,请改用仅插入或全量覆盖模式") + } + + qualifiedTable := quoteTDengineTable("", tableName) + for _, row := range changes.Inserts { + query, err := buildTDengineInsertSQL(qualifiedTable, row) + if err != nil { + return err + } + if query == "" { + continue + } + if _, err := t.conn.Exec(query); err != nil { + return fmt.Errorf("insert error: %v; sql=%s", err, query) + } + } + return nil +} + +func buildTDengineInsertSQL(qualifiedTable string, row map[string]interface{}) (string, error) { + if strings.TrimSpace(qualifiedTable) == "" { + return "", fmt.Errorf("qualified table required") + } + if len(row) == 0 { + return "", nil + } + + cols := make([]string, 0, len(row)) + for key := range row { + if strings.TrimSpace(key) == "" { + continue + } + cols = append(cols, key) + } + if len(cols) == 0 { + return "", nil + } + sort.Strings(cols) + + quotedCols := make([]string, 0, len(cols)) + values := make([]string, 0, len(cols)) + for _, col := range cols { + quotedCols = append(quotedCols, fmt.Sprintf("`%s`", escapeBacktickIdent(col))) + values = append(values, tdengineLiteral(row[col])) + } + + return fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)", qualifiedTable, strings.Join(quotedCols, ", "), strings.Join(values, ", ")), nil +} + +func tdengineLiteral(value interface{}) string { + switch val := value.(type) { + case nil: + return "NULL" + case bool: + if val { + return "1" + } + return "0" + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64: + return fmt.Sprintf("%v", val) + case time.Time: + return fmt.Sprintf("'%s'", val.Format("2006-01-02 15:04:05")) + case []byte: + return fmt.Sprintf("'%s'", strings.ReplaceAll(string(val), "'", "''")) + default: + return fmt.Sprintf("'%s'", strings.ReplaceAll(fmt.Sprintf("%v", val), "'", "''")) + } +} + func getValueFromRow(row map[string]interface{}, keys ...string) (interface{}, bool) { if len(row) == 0 { return nil, false diff --git a/internal/sync/analyze.go b/internal/sync/analyze.go index a12a2a0..e1a4af1 100644 --- a/internal/sync/analyze.go +++ b/internal/sync/analyze.go @@ -1,22 +1,27 @@ package sync import ( - "GoNavi-Wails/internal/db" "GoNavi-Wails/internal/logger" "fmt" "strings" ) type TableDiffSummary struct { - Table string `json:"table"` - PKColumn string `json:"pkColumn,omitempty"` - CanSync bool `json:"canSync"` - Inserts int `json:"inserts"` - Updates int `json:"updates"` - Deletes int `json:"deletes"` - Same int `json:"same"` - Message string `json:"message,omitempty"` - HasSchema bool `json:"hasSchema,omitempty"` + Table string `json:"table"` + PKColumn string `json:"pkColumn,omitempty"` + CanSync bool `json:"canSync"` + Inserts int `json:"inserts"` + Updates int `json:"updates"` + Deletes int `json:"deletes"` + Same int `json:"same"` + Message string `json:"message,omitempty"` + HasSchema bool `json:"hasSchema,omitempty"` + TargetTableExists bool `json:"targetTableExists,omitempty"` + PlannedAction string `json:"plannedAction,omitempty"` + Warnings []string `json:"warnings,omitempty"` + UnsupportedObjects []string `json:"unsupportedObjects,omitempty"` + IndexesToCreate int `json:"indexesToCreate,omitempty"` + IndexesSkipped int `json:"indexesSkipped,omitempty"` } type SyncAnalyzeResult struct { @@ -27,6 +32,12 @@ type SyncAnalyzeResult struct { func (s *SyncEngine) Analyze(config SyncConfig) SyncAnalyzeResult { result := SyncAnalyzeResult{Success: true, Tables: []TableDiffSummary{}} + if isRedisToMongoKeyspacePair(config) { + return s.analyzeRedisToMongo(config) + } + if isMongoToRedisKeyspacePair(config) { + return s.analyzeMongoToRedis(config) + } contentRaw := strings.ToLower(strings.TrimSpace(config.Content)) syncSchema := false @@ -48,25 +59,23 @@ func (s *SyncEngine) Analyze(config SyncConfig) SyncAnalyzeResult { totalTables := len(config.Tables) s.progress(config.JobID, 0, totalTables, "", "差异分析开始") - sourceDB, err := db.NewDatabase(config.SourceConfig.Type) + sourceDB, err := newSyncDatabase(config.SourceConfig.Type) if err != nil { logger.Error(err, "初始化源数据库驱动失败:类型=%s", config.SourceConfig.Type) return SyncAnalyzeResult{Success: false, Message: "初始化源数据库驱动失败: " + err.Error()} } - targetDB, err := db.NewDatabase(config.TargetConfig.Type) + targetDB, err := newSyncDatabase(config.TargetConfig.Type) if err != nil { logger.Error(err, "初始化目标数据库驱动失败:类型=%s", config.TargetConfig.Type) return SyncAnalyzeResult{Success: false, Message: "初始化目标数据库驱动失败: " + err.Error()} } - // Connect Source if err := sourceDB.Connect(config.SourceConfig); err != nil { logger.Error(err, "源数据库连接失败:%s", formatConnSummaryForSync(config.SourceConfig)) return SyncAnalyzeResult{Success: false, Message: "源数据库连接失败: " + err.Error()} } defer sourceDB.Close() - // Connect Target if err := targetDB.Connect(config.TargetConfig); err != nil { logger.Error(err, "目标数据库连接失败:%s", formatConnSummaryForSync(config.TargetConfig)) return SyncAnalyzeResult{Success: false, Message: "目标数据库连接失败: " + err.Error()} @@ -88,51 +97,76 @@ func (s *SyncEngine) Analyze(config SyncConfig) SyncAnalyzeResult { HasSchema: syncSchema, } - sourceSchema, sourceTable := normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName) - targetSchema, targetTable := normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName) - sourceQueryTable := qualifiedNameForQuery(config.SourceConfig.Type, sourceSchema, sourceTable, tableName) - targetQueryTable := qualifiedNameForQuery(config.TargetConfig.Type, targetSchema, targetTable, tableName) - - cols, err := sourceDB.GetColumns(sourceSchema, sourceTable) + plan, cols, _, err := buildSchemaMigrationPlan(config, tableName, sourceDB, targetDB) if err != nil { - summary.Message = "获取源表字段失败: " + err.Error() + summary.Message = err.Error() + result.Tables = append(result.Tables, summary) + return + } + summary.TargetTableExists = plan.TargetTableExists + summary.PlannedAction = plan.PlannedAction + summary.Warnings = append(summary.Warnings, plan.Warnings...) + summary.UnsupportedObjects = append(summary.UnsupportedObjects, plan.UnsupportedObjects...) + summary.IndexesToCreate = plan.IndexesToCreate + summary.IndexesSkipped = plan.IndexesSkipped + + if !plan.TargetTableExists && !plan.AutoCreate { + summary.Message = firstNonEmpty(plan.PlannedAction, "目标表不存在,无法执行同步") result.Tables = append(result.Tables, summary) return } if !syncData { summary.CanSync = true - summary.Message = "仅同步结构,未执行数据差异分析" + summary.Message = firstNonEmpty(plan.PlannedAction, "仅同步结构,未执行数据差异分析") result.Tables = append(result.Tables, summary) return } + tableMode := normalizeSyncMode(config.Mode) pkCols := make([]string, 0, 2) for _, c := range cols { if c.Key == "PRI" || c.Key == "PK" { pkCols = append(pkCols, c.Name) } } - if len(pkCols) == 0 { - summary.Message = "无主键,不支持数据对比/同步" - result.Tables = append(result.Tables, summary) - return - } - if len(pkCols) > 1 { - summary.Message = fmt.Sprintf("复合主键(%s),暂不支持数据对比/同步", strings.Join(pkCols, ",")) - result.Tables = append(result.Tables, summary) - return - } - summary.PKColumn = pkCols[0] - // Query data for diff - sourceRows, _, err := sourceDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.SourceConfig.Type, sourceQueryTable))) + sourceRows, _, err := sourceDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.SourceConfig.Type, plan.SourceQueryTable))) if err != nil { summary.Message = "读取源表失败: " + err.Error() result.Tables = append(result.Tables, summary) return } - targetRows, _, err := targetDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.TargetConfig.Type, targetQueryTable))) + + if !plan.TargetTableExists && plan.AutoCreate { + summary.CanSync = true + summary.Inserts = len(sourceRows) + summary.Message = firstNonEmpty(plan.PlannedAction, "目标表不存在,执行时将自动建表并导入全部源数据") + result.Tables = append(result.Tables, summary) + return + } + + if tableMode != "insert_update" { + summary.CanSync = true + summary.Inserts = len(sourceRows) + summary.Message = firstNonEmpty(plan.PlannedAction, "当前模式无需差异对比,将按源表数据执行导入") + result.Tables = append(result.Tables, summary) + return + } + + if len(pkCols) == 0 { + summary.Message = "无主键,不支持差异对比同步;如需直接导入请使用仅插入或全量覆盖模式" + result.Tables = append(result.Tables, summary) + return + } + if len(pkCols) > 1 { + summary.Message = fmt.Sprintf("复合主键(%s),暂不支持差异对比同步", strings.Join(pkCols, ",")) + result.Tables = append(result.Tables, summary) + return + } + summary.PKColumn = pkCols[0] + + targetRows, _, err := targetDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.TargetConfig.Type, plan.TargetQueryTable))) if err != nil { summary.Message = "读取目标表失败: " + err.Error() result.Tables = append(result.Tables, summary) @@ -188,6 +222,9 @@ func (s *SyncEngine) Analyze(config SyncConfig) SyncAnalyzeResult { } summary.CanSync = true + if strings.TrimSpace(summary.Message) == "" { + summary.Message = firstNonEmpty(plan.PlannedAction, "差异分析完成") + } result.Tables = append(result.Tables, summary) }() } @@ -196,3 +233,12 @@ func (s *SyncEngine) Analyze(config SyncConfig) SyncAnalyzeResult { result.Message = fmt.Sprintf("已完成 %d 张表的差异分析", len(result.Tables)) return result } + +func firstNonEmpty(values ...string) string { + for _, value := range values { + if strings.TrimSpace(value) != "" { + return value + } + } + return "" +} diff --git a/internal/sync/migration_clickhouse.go b/internal/sync/migration_clickhouse.go new file mode 100644 index 0000000..d67fcef --- /dev/null +++ b/internal/sync/migration_clickhouse.go @@ -0,0 +1,741 @@ +package sync + +import ( + "GoNavi-Wails/internal/connection" + "GoNavi-Wails/internal/db" + "fmt" + "regexp" + "strings" +) + +func buildMySQLToClickHousePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + plan := SchemaMigrationPlan{} + plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName) + plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName) + plan.SourceQueryTable = qualifiedNameForQuery(config.SourceConfig.Type, plan.SourceSchema, plan.SourceTable, tableName) + plan.TargetQueryTable = qualifiedNameForQuery(config.TargetConfig.Type, plan.TargetSchema, plan.TargetTable, tableName) + plan.PlannedAction = "使用已有目标表导入" + + sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err) + } + if !sourceExists { + return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName) + } + + targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable) + if err != nil { + return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err) + } + plan.TargetTableExists = targetExists + + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + if targetExists { + missing := diffMissingColumnNames(sourceCols, targetCols) + if len(missing) > 0 { + plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", "))) + } + if config.AutoAddColumns { + addSQL, addWarnings := buildMySQLToClickHouseAddColumnSQL(plan.TargetQueryTable, sourceCols, targetCols) + plan.PreDataSQL = append(plan.PreDataSQL, addSQL...) + plan.Warnings = append(plan.Warnings, addWarnings...) + if len(addSQL) > 0 { + plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL)) + } + } + plan.Warnings = append(plan.Warnings, "ClickHouse 目标端建议优先使用仅插入或全量覆盖;更新/删除语义与传统关系型存在差异") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } + + switch strategy { + case "existing_only": + plan.PlannedAction = "目标表不存在,需先手工创建" + plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + case "smart", "auto_create_if_missing": + plan.AutoCreate = true + plan.PlannedAction = "目标表不存在,将自动建表后导入" + createSQL, warnings, unsupported := buildMySQLToClickHouseCreateTableSQL(plan.TargetQueryTable, sourceCols) + plan.CreateTableSQL = createSQL + plan.Warnings = append(plan.Warnings, warnings...) + plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...) + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + default: + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } +} + +func buildPGLikeToClickHousePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + plan := SchemaMigrationPlan{} + sourceType := resolveMigrationDBType(config.SourceConfig) + targetType := resolveMigrationDBType(config.TargetConfig) + plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName) + plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName) + plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName) + plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName) + plan.PlannedAction = "使用已有目标表导入" + + sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err) + } + if !sourceExists { + return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName) + } + + targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable) + if err != nil { + return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err) + } + plan.TargetTableExists = targetExists + + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + if targetExists { + missing := diffMissingColumnNames(sourceCols, targetCols) + if len(missing) > 0 { + plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", "))) + } + if config.AutoAddColumns { + addSQL, addWarnings := buildPGLikeToClickHouseAddColumnSQL(plan.TargetQueryTable, sourceCols, targetCols) + plan.PreDataSQL = append(plan.PreDataSQL, addSQL...) + plan.Warnings = append(plan.Warnings, addWarnings...) + if len(addSQL) > 0 { + plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL)) + } + } + plan.Warnings = append(plan.Warnings, "ClickHouse 目标端建议优先使用仅插入或全量覆盖;更新/删除语义与传统关系型存在差异") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } + + switch strategy { + case "existing_only": + plan.PlannedAction = "目标表不存在,需先手工创建" + plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + case "smart", "auto_create_if_missing": + plan.AutoCreate = true + plan.PlannedAction = "目标表不存在,将自动建表后导入" + createSQL, warnings, unsupported := buildPGLikeToClickHouseCreateTableSQL(plan.TargetQueryTable, sourceCols) + plan.CreateTableSQL = createSQL + plan.Warnings = append(plan.Warnings, warnings...) + plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...) + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + default: + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } +} + +func buildClickHouseToMySQLPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + plan := SchemaMigrationPlan{} + plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName) + plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName) + plan.SourceQueryTable = qualifiedNameForQuery(config.SourceConfig.Type, plan.SourceSchema, plan.SourceTable, tableName) + plan.TargetQueryTable = qualifiedNameForQuery(config.TargetConfig.Type, plan.TargetSchema, plan.TargetTable, tableName) + plan.PlannedAction = "使用已有目标表导入" + + sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err) + } + if !sourceExists { + return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName) + } + + targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable) + if err != nil { + return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err) + } + plan.TargetTableExists = targetExists + + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + if targetExists { + missing := diffMissingColumnNames(sourceCols, targetCols) + if len(missing) > 0 { + plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", "))) + } + if config.AutoAddColumns { + addSQL, addWarnings := buildClickHouseToMySQLAddColumnSQL(plan.TargetQueryTable, sourceCols, targetCols) + plan.PreDataSQL = append(plan.PreDataSQL, addSQL...) + plan.Warnings = append(plan.Warnings, addWarnings...) + if len(addSQL) > 0 { + plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL)) + } + } + plan.Warnings = append(plan.Warnings, "ClickHouse 源端索引/约束元数据有限,反向迁移将以字段和数据为主") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } + + switch strategy { + case "existing_only": + plan.PlannedAction = "目标表不存在,需先手工创建" + plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + case "smart", "auto_create_if_missing": + plan.AutoCreate = true + plan.PlannedAction = "目标表不存在,将自动建表后导入" + createSQL, warnings := buildClickHouseToMySQLCreateTableSQL(plan.TargetQueryTable, sourceCols) + plan.CreateTableSQL = createSQL + plan.Warnings = append(plan.Warnings, warnings...) + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + default: + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } +} + +func buildClickHouseToPGLikePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + plan := SchemaMigrationPlan{} + sourceType := resolveMigrationDBType(config.SourceConfig) + targetType := resolveMigrationDBType(config.TargetConfig) + plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName) + plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName) + plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName) + plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName) + plan.PlannedAction = "使用已有目标表导入" + + sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err) + } + if !sourceExists { + return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName) + } + + targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable) + if err != nil { + return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err) + } + plan.TargetTableExists = targetExists + + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + if targetExists { + missing := diffMissingColumnNames(sourceCols, targetCols) + if len(missing) > 0 { + plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", "))) + } + if config.AutoAddColumns { + addSQL, addWarnings := buildClickHouseToPGLikeAddColumnSQL(targetType, plan.TargetQueryTable, sourceCols, targetCols) + plan.PreDataSQL = append(plan.PreDataSQL, addSQL...) + plan.Warnings = append(plan.Warnings, addWarnings...) + if len(addSQL) > 0 { + plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL)) + } + } + plan.Warnings = append(plan.Warnings, "ClickHouse 源端索引/约束元数据有限,反向迁移将以字段和数据为主") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } + + switch strategy { + case "existing_only": + plan.PlannedAction = "目标表不存在,需先手工创建" + plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + case "smart", "auto_create_if_missing": + plan.AutoCreate = true + plan.PlannedAction = "目标表不存在,将自动建表后导入" + createSQL, warnings, unsupported := buildClickHouseToPGLikeCreateTableSQL(targetType, plan.TargetQueryTable, sourceCols) + plan.CreateTableSQL = createSQL + plan.Warnings = append(plan.Warnings, warnings...) + plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...) + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + default: + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } +} + +func buildPGLikeToClickHouseAddColumnSQL(targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) { + targetSet := make(map[string]struct{}, len(targetCols)) + for _, col := range targetCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + targetSet[key] = struct{}{} + } + var sqlList []string + var warnings []string + for _, col := range sourceCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + if _, ok := targetSet[key]; ok { + continue + } + colType, mapWarnings := mapPGLikeColumnToClickHouse(col) + warnings = append(warnings, mapWarnings...) + sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s", + quoteQualifiedIdentByType("clickhouse", targetQueryTable), + quoteIdentByType("clickhouse", col.Name), + colType, + )) + } + return sqlList, dedupeStrings(warnings) +} + +func buildMySQLToClickHouseAddColumnSQL(targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) { + targetSet := make(map[string]struct{}, len(targetCols)) + for _, col := range targetCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + targetSet[key] = struct{}{} + } + var sqlList []string + var warnings []string + for _, col := range sourceCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + if _, ok := targetSet[key]; ok { + continue + } + colType, mapWarnings := mapMySQLColumnToClickHouse(col) + warnings = append(warnings, mapWarnings...) + sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s", + quoteQualifiedIdentByType("clickhouse", targetQueryTable), + quoteIdentByType("clickhouse", col.Name), + colType, + )) + } + return sqlList, dedupeStrings(warnings) +} + +func buildClickHouseToPGLikeAddColumnSQL(targetType string, targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) { + targetSet := make(map[string]struct{}, len(targetCols)) + for _, col := range targetCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + targetSet[key] = struct{}{} + } + var sqlList []string + var warnings []string + for _, col := range sourceCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + if _, ok := targetSet[key]; ok { + continue + } + colType, mapWarnings := mapClickHouseColumnToPGLike(col) + warnings = append(warnings, mapWarnings...) + sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL", + quoteQualifiedIdentByType(targetType, targetQueryTable), + quoteIdentByType(targetType, col.Name), + colType, + )) + } + return sqlList, dedupeStrings(warnings) +} + +func buildClickHouseToMySQLAddColumnSQL(targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) { + targetSet := make(map[string]struct{}, len(targetCols)) + for _, col := range targetCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + targetSet[key] = struct{}{} + } + var sqlList []string + var warnings []string + for _, col := range sourceCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + if _, ok := targetSet[key]; ok { + continue + } + colType, mapWarnings := mapClickHouseColumnToMySQL(col) + warnings = append(warnings, mapWarnings...) + sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL", + quoteQualifiedIdentByType("mysql", targetQueryTable), + quoteIdentByType("mysql", col.Name), + colType, + )) + } + return sqlList, dedupeStrings(warnings) +} + +func buildPGLikeToClickHouseCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition) (string, []string, []string) { + columnDefs := make([]string, 0, len(sourceCols)) + warnings := make([]string, 0) + unsupported := make([]string, 0) + orderByCols := make([]string, 0) + for _, col := range sourceCols { + def, colWarnings := buildPGLikeToClickHouseColumnDefinition(col) + warnings = append(warnings, colWarnings...) + columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("clickhouse", col.Name), def)) + if col.Key == "PRI" || col.Key == "PK" { + orderByCols = append(orderByCols, quoteIdentByType("clickhouse", col.Name)) + } + } + orderExpr := "tuple()" + if len(orderByCols) > 0 { + orderExpr = "(" + strings.Join(orderByCols, ", ") + ")" + } else { + warnings = append(warnings, "源表未识别到主键,ClickHouse 将使用 ORDER BY tuple() 建表,后续查询性能可能受影响") + } + warnings = append(warnings, "ClickHouse 不保留关系型外键/唯一约束语义,将仅迁移字段与数据") + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n) ENGINE = MergeTree() ORDER BY %s", quoteQualifiedIdentByType("clickhouse", targetQueryTable), strings.Join(columnDefs, ",\n "), orderExpr) + return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported) +} + +func buildMySQLToClickHouseCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition) (string, []string, []string) { + columnDefs := make([]string, 0, len(sourceCols)) + warnings := make([]string, 0) + unsupported := make([]string, 0) + orderByCols := make([]string, 0) + for _, col := range sourceCols { + def, colWarnings := buildMySQLToClickHouseColumnDefinition(col) + warnings = append(warnings, colWarnings...) + columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("clickhouse", col.Name), def)) + if col.Key == "PRI" || col.Key == "PK" { + orderByCols = append(orderByCols, quoteIdentByType("clickhouse", col.Name)) + } + } + orderExpr := "tuple()" + if len(orderByCols) > 0 { + orderExpr = "(" + strings.Join(orderByCols, ", ") + ")" + } else { + warnings = append(warnings, "源表未识别到主键,ClickHouse 将使用 ORDER BY tuple() 建表,后续查询性能可能受影响") + } + warnings = append(warnings, "ClickHouse 不保留关系型外键/唯一约束语义,将仅迁移字段与数据") + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n) ENGINE = MergeTree() ORDER BY %s", quoteQualifiedIdentByType("clickhouse", targetQueryTable), strings.Join(columnDefs, ",\n "), orderExpr) + return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported) +} + +func buildClickHouseToPGLikeCreateTableSQL(targetType string, targetQueryTable string, sourceCols []connection.ColumnDefinition) (string, []string, []string) { + columnDefs := make([]string, 0, len(sourceCols)+1) + warnings := make([]string, 0) + unsupported := []string{"ClickHouse ORDER BY/PARTITION/TTL/Projection/物化视图 语义当前不会自动迁移到 PG-like"} + pkCols := make([]string, 0) + for _, col := range sourceCols { + def, colWarnings := buildClickHouseToPGLikeColumnDefinition(col) + warnings = append(warnings, colWarnings...) + columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType(targetType, col.Name), def)) + if col.Key == "PRI" || col.Key == "PK" { + pkCols = append(pkCols, quoteIdentByType(targetType, col.Name)) + } + } + if len(pkCols) > 0 { + columnDefs = append(columnDefs, fmt.Sprintf("PRIMARY KEY (%s)", strings.Join(pkCols, ", "))) + } else { + warnings = append(warnings, "ClickHouse 源端未返回主键信息,目标 PG-like 表将不自动创建主键") + } + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType(targetType, targetQueryTable), strings.Join(columnDefs, ",\n ")) + return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported) +} + +func buildClickHouseToMySQLCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition) (string, []string) { + columnDefs := make([]string, 0, len(sourceCols)+1) + warnings := make([]string, 0) + pkCols := make([]string, 0) + for _, col := range sourceCols { + def, colWarnings := buildClickHouseToMySQLColumnDefinition(col) + warnings = append(warnings, colWarnings...) + columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("mysql", col.Name), def)) + if col.Key == "PRI" || col.Key == "PK" { + pkCols = append(pkCols, quoteIdentByType("mysql", col.Name)) + } + } + if len(pkCols) > 0 { + columnDefs = append(columnDefs, fmt.Sprintf("PRIMARY KEY (%s)", strings.Join(pkCols, ", "))) + } else { + warnings = append(warnings, "ClickHouse 源端未返回主键信息,目标 MySQL 表将不自动创建主键") + } + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("mysql", targetQueryTable), strings.Join(columnDefs, ",\n ")) + return createSQL, dedupeStrings(warnings) +} + +func buildPGLikeToClickHouseColumnDefinition(col connection.ColumnDefinition) (string, []string) { + targetType, warnings := mapPGLikeColumnToClickHouse(col) + parts := []string{targetType} + return strings.Join(parts, " "), dedupeStrings(warnings) +} + +func buildMySQLToClickHouseColumnDefinition(col connection.ColumnDefinition) (string, []string) { + targetType, warnings := mapMySQLColumnToClickHouse(col) + parts := []string{targetType} + if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") && !strings.HasPrefix(strings.ToLower(targetType), "nullable(") { + return strings.Join(parts, " "), dedupeStrings(warnings) + } + return strings.Join(parts, " "), dedupeStrings(warnings) +} + +func buildClickHouseToPGLikeColumnDefinition(col connection.ColumnDefinition) (string, []string) { + targetType, warnings := mapClickHouseColumnToPGLike(col) + parts := []string{targetType} + if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") { + parts = append(parts, "NOT NULL") + } + return strings.Join(parts, " "), dedupeStrings(warnings) +} + +func buildClickHouseToMySQLColumnDefinition(col connection.ColumnDefinition) (string, []string) { + targetType, warnings := mapClickHouseColumnToMySQL(col) + parts := []string{targetType} + if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") { + parts = append(parts, "NOT NULL") + } + return strings.Join(parts, " "), dedupeStrings(warnings) +} + +func mapPGLikeColumnToClickHouse(col connection.ColumnDefinition) (string, []string) { + raw := strings.ToLower(strings.TrimSpace(col.Type)) + warnings := make([]string, 0) + if raw == "" { + return "String", []string{fmt.Sprintf("字段 %s 类型为空,已降级为 String", col.Name)} + } + baseType := "String" + switch { + case raw == "boolean" || strings.HasPrefix(raw, "bool"): + baseType = "UInt8" + case raw == "smallint": + baseType = "Int16" + case raw == "integer" || raw == "int4": + baseType = "Int32" + case raw == "bigint" || raw == "int8": + baseType = "Int64" + case strings.HasPrefix(raw, "numeric"), strings.HasPrefix(raw, "decimal"): + baseType = replaceTypeBase(raw, []string{"numeric", "decimal"}, "Decimal") + case raw == "real" || raw == "float4": + baseType = "Float32" + case raw == "double precision" || raw == "float8": + baseType = "Float64" + case raw == "date": + baseType = "Date" + case strings.HasPrefix(raw, "timestamp") || strings.Contains(raw, "without time zone") || strings.Contains(raw, "with time zone"): + baseType = "DateTime" + case strings.HasPrefix(raw, "time"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 String", col.Name, col.Type)) + baseType = "String" + case strings.HasPrefix(raw, "character varying"), strings.HasPrefix(raw, "varchar("), strings.HasPrefix(raw, "character("), strings.HasPrefix(raw, "char("), raw == "character", raw == "text", raw == "uuid": + baseType = "String" + case raw == "json" || raw == "jsonb" || raw == "bytea": + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 String", col.Name, col.Type)) + baseType = "String" + case strings.HasSuffix(raw, "[]") || strings.HasPrefix(raw, "array"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 String", col.Name, col.Type)) + baseType = "String" + case raw == "user-defined": + warnings = append(warnings, fmt.Sprintf("字段 %s 为用户自定义类型,已降级为 String", col.Name)) + baseType = "String" + default: + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门映射,已降级为 String", col.Name, col.Type)) + baseType = "String" + } + if strings.EqualFold(strings.TrimSpace(col.Nullable), "YES") && !strings.HasPrefix(strings.ToLower(baseType), "nullable(") { + baseType = fmt.Sprintf("Nullable(%s)", baseType) + } + if strings.Contains(strings.ToLower(strings.TrimSpace(col.Extra)), "identity") || strings.Contains(strings.ToLower(strings.TrimSpace(col.Extra)), "auto_increment") { + warnings = append(warnings, fmt.Sprintf("字段 %s 的 identity/自增语义在 ClickHouse 中不保留", col.Name)) + } + return baseType, dedupeStrings(warnings) +} + +func mapMySQLColumnToClickHouse(col connection.ColumnDefinition) (string, []string) { + raw := strings.ToLower(strings.TrimSpace(col.Type)) + warnings := make([]string, 0) + if raw == "" { + return "String", []string{fmt.Sprintf("字段 %s 类型为空,已降级为 String", col.Name)} + } + unsigned := strings.Contains(raw, "unsigned") + clean := strings.ReplaceAll(raw, " unsigned", "") + clean = strings.ReplaceAll(clean, " zerofill", "") + baseType := "String" + switch { + case strings.HasPrefix(clean, "tinyint(1)"): + baseType = "UInt8" + case strings.HasPrefix(clean, "tinyint"): + if unsigned { + baseType = "UInt8" + } else { + baseType = "Int8" + } + case strings.HasPrefix(clean, "smallint"): + if unsigned { + baseType = "UInt16" + } else { + baseType = "Int16" + } + case strings.HasPrefix(clean, "mediumint"), strings.HasPrefix(clean, "int"), strings.HasPrefix(clean, "integer"): + if unsigned { + baseType = "UInt32" + } else { + baseType = "Int32" + } + case strings.HasPrefix(clean, "bigint"): + if unsigned { + baseType = "UInt64" + } else { + baseType = "Int64" + } + case strings.HasPrefix(clean, "decimal"), strings.HasPrefix(clean, "numeric"): + baseType = replaceTypeBase(strings.Title(clean), []string{"Decimal", "Numeric"}, "Decimal") + case strings.HasPrefix(clean, "float"): + baseType = "Float32" + case strings.HasPrefix(clean, "double"): + baseType = "Float64" + case strings.HasPrefix(clean, "date"): + baseType = "Date" + case strings.HasPrefix(clean, "datetime"), strings.HasPrefix(clean, "timestamp"): + baseType = "DateTime" + case strings.HasPrefix(clean, "time"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 time 已降级为 String", col.Name)) + baseType = "String" + case strings.HasPrefix(clean, "json"), strings.HasPrefix(clean, "enum"), strings.HasPrefix(clean, "set"), strings.HasPrefix(clean, "char"), strings.HasPrefix(clean, "varchar"), strings.Contains(clean, "text"): + baseType = "String" + case strings.Contains(clean, "blob"), strings.Contains(clean, "binary"): + warnings = append(warnings, fmt.Sprintf("字段 %s 二进制类型已降级为 String", col.Name)) + baseType = "String" + default: + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门映射,已降级为 String", col.Name, col.Type)) + baseType = "String" + } + if strings.EqualFold(strings.TrimSpace(col.Nullable), "YES") && !strings.HasPrefix(strings.ToLower(baseType), "nullable(") { + baseType = fmt.Sprintf("Nullable(%s)", baseType) + } + if strings.Contains(strings.ToLower(strings.TrimSpace(col.Extra)), "auto_increment") { + warnings = append(warnings, fmt.Sprintf("字段 %s 的 AUTO_INCREMENT 在 ClickHouse 中不保留自增语义", col.Name)) + } + return baseType, dedupeStrings(warnings) +} + +var clickHouseDecimalPattern = regexp.MustCompile(`^(decimal|numeric)\((\d+)\s*,\s*(\d+)\)$`) +var clickHouseStringArgsPattern = regexp.MustCompile(`^fixedstring\((\d+)\)$`) + +func mapClickHouseColumnToPGLike(col connection.ColumnDefinition) (string, []string) { + raw := strings.TrimSpace(col.Type) + lower := strings.ToLower(raw) + warnings := make([]string, 0) + if strings.HasPrefix(lower, "nullable(") && strings.HasSuffix(lower, ")") { + raw = strings.TrimSpace(raw[len("Nullable(") : len(raw)-1]) + lower = strings.ToLower(raw) + } + for { + if strings.HasPrefix(lower, "lowcardinality(") && strings.HasSuffix(lower, ")") { + raw = strings.TrimSpace(raw[len("LowCardinality(") : len(raw)-1]) + lower = strings.ToLower(raw) + continue + } + break + } + switch { + case lower == "bool" || lower == "boolean": + return "boolean", warnings + case lower == "int8": + return "smallint", warnings + case lower == "uint8": + return "smallint", warnings + case lower == "int16": + return "smallint", warnings + case lower == "uint16": + return "integer", warnings + case lower == "int32": + return "integer", warnings + case lower == "uint32": + return "bigint", warnings + case lower == "int64": + return "bigint", warnings + case lower == "uint64": + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已映射为 numeric(20,0) 以避免无符号溢出", col.Name, col.Type)) + return "numeric(20,0)", warnings + case lower == "float32": + return "real", warnings + case lower == "float64": + return "double precision", warnings + case lower == "date": + return "date", warnings + case strings.HasPrefix(lower, "datetime"): + return "timestamp", warnings + case lower == "string": + return "text", warnings + case lower == "uuid": + return "uuid", warnings + case lower == "json", strings.HasPrefix(lower, "map("), strings.HasPrefix(lower, "array("), strings.HasPrefix(lower, "tuple("), strings.HasPrefix(lower, "nested("): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 jsonb", col.Name, col.Type)) + return "jsonb", warnings + case strings.HasPrefix(lower, "enum8("), strings.HasPrefix(lower, "enum16("): + warnings = append(warnings, fmt.Sprintf("字段 %s 枚举类型 %s 已降级为 varchar(255)", col.Name, col.Type)) + return "varchar(255)", warnings + case clickHouseDecimalPattern.MatchString(lower): + parts := clickHouseDecimalPattern.FindStringSubmatch(lower) + return fmt.Sprintf("numeric(%s,%s)", parts[2], parts[3]), warnings + case clickHouseStringArgsPattern.MatchString(lower): + parts := clickHouseStringArgsPattern.FindStringSubmatch(lower) + return fmt.Sprintf("varchar(%s)", parts[1]), warnings + default: + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门 PG-like 映射,已降级为 text", col.Name, col.Type)) + return "text", warnings + } +} + +func mapClickHouseColumnToMySQL(col connection.ColumnDefinition) (string, []string) { + raw := strings.TrimSpace(col.Type) + lower := strings.ToLower(raw) + warnings := make([]string, 0) + nullable := false + if strings.HasPrefix(lower, "nullable(") && strings.HasSuffix(lower, ")") { + nullable = true + raw = strings.TrimSpace(raw[len("Nullable(") : len(raw)-1]) + lower = strings.ToLower(raw) + } + for { + if strings.HasPrefix(lower, "lowcardinality(") && strings.HasSuffix(lower, ")") { + raw = strings.TrimSpace(raw[len("LowCardinality(") : len(raw)-1]) + lower = strings.ToLower(raw) + continue + } + break + } + _ = nullable + switch { + case lower == "bool" || lower == "boolean" || lower == "uint8": + return "tinyint(1)", warnings + case lower == "int8": + return "tinyint", warnings + case lower == "uint16": + return "smallint unsigned", warnings + case lower == "int16": + return "smallint", warnings + case lower == "uint32": + return "int unsigned", warnings + case lower == "int32": + return "int", warnings + case lower == "uint64": + return "bigint unsigned", warnings + case lower == "int64": + return "bigint", warnings + case lower == "float32": + return "float", warnings + case lower == "float64": + return "double", warnings + case lower == "date": + return "date", warnings + case strings.HasPrefix(lower, "datetime"): + return "datetime", warnings + case lower == "string": + return "text", warnings + case lower == "uuid": + return "char(36)", warnings + case lower == "json", strings.HasPrefix(lower, "map("), strings.HasPrefix(lower, "array("), strings.HasPrefix(lower, "tuple("), strings.HasPrefix(lower, "nested("): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 json", col.Name, col.Type)) + return "json", warnings + case clickHouseDecimalPattern.MatchString(lower): + parts := clickHouseDecimalPattern.FindStringSubmatch(lower) + return fmt.Sprintf("decimal(%s,%s)", parts[2], parts[3]), warnings + case clickHouseStringArgsPattern.MatchString(lower): + parts := clickHouseStringArgsPattern.FindStringSubmatch(lower) + return fmt.Sprintf("varchar(%s)", parts[1]), warnings + default: + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门映射,已降级为 text", col.Name, col.Type)) + return "text", warnings + } +} diff --git a/internal/sync/migration_kernel_router.go b/internal/sync/migration_kernel_router.go new file mode 100644 index 0000000..aa88df2 --- /dev/null +++ b/internal/sync/migration_kernel_router.go @@ -0,0 +1,379 @@ +package sync + +import ( + "GoNavi-Wails/internal/connection" + "GoNavi-Wails/internal/db" + "fmt" + "strings" +) + +type genericLegacyPlanner struct{} + +type mysqlToPGLikePlanner struct{} + +type mysqlToClickHousePlanner struct{} + +type pgLikeToClickHousePlanner struct{} + +type clickHouseToMySQLPlanner struct{} + +type clickHouseToPGLikePlanner struct{} + +type mysqlToMongoPlanner struct{} + +type pgLikeToMongoPlanner struct{} + +type clickHouseToMongoPlanner struct{} + +type tdengineToMongoPlanner struct{} + +type mongoToMySQLPlanner struct{} + +type mongoToPGLikePlanner struct{} + +type pgLikeToMySQLPlanner struct{} + +type tdengineToMySQLPlanner struct{} + +type tdengineToPGLikePlanner struct{} + +type mongoToRelationalPlanner struct{} + +func buildSchemaMigrationPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + ctx := MigrationBuildContext{ + Config: config, + TableName: tableName, + SourceDB: sourceDB, + TargetDB: targetDB, + } + planner := resolveMigrationPlanner(ctx) + if planner == nil { + return buildSchemaMigrationPlanLegacy(config, tableName, sourceDB, targetDB) + } + return planner.BuildPlan(ctx) +} + +func resolveMigrationPlanner(ctx MigrationBuildContext) MigrationPlanner { + planners := []MigrationPlanner{ + mysqlToPGLikePlanner{}, + mySQLLikeToTDenginePlanner{}, + pgLikeToTDenginePlanner{}, + clickHouseToTDenginePlanner{}, + tdengineToTDenginePlanner{}, + tdengineToPGLikePlanner{}, + tdengineToMySQLPlanner{}, + mysqlToClickHousePlanner{}, + pgLikeToClickHousePlanner{}, + clickHouseToMySQLPlanner{}, + clickHouseToPGLikePlanner{}, + mysqlToMongoPlanner{}, + pgLikeToMongoPlanner{}, + clickHouseToMongoPlanner{}, + tdengineToMongoPlanner{}, + mongoToMySQLPlanner{}, + mongoToPGLikePlanner{}, + pgLikeToMySQLPlanner{}, + mongoToRelationalPlanner{}, + genericLegacyPlanner{}, + } + bestLevel := MigrationSupportLevelUnsupported + var bestPlanner MigrationPlanner + for _, planner := range planners { + level := planner.SupportLevel(ctx) + if migrationSupportRank(level) > migrationSupportRank(bestLevel) { + bestLevel = level + bestPlanner = planner + } + } + return bestPlanner +} + +func migrationSupportRank(level MigrationSupportLevel) int { + switch level { + case MigrationSupportLevelFull: + return 4 + case MigrationSupportLevelPlanned: + return 3 + case MigrationSupportLevelPartial: + return 2 + default: + return 1 + } +} + +func isMySQLLikeType(dbType string) bool { + return isMySQLLikeWritableTargetType(dbType) +} + +func classifyMigrationDataModel(dbType string) MigrationDataModel { + switch normalizeMigrationDBType(dbType) { + case "mysql", "mariadb", "postgres", "kingbase", "highgo", "vastbase", "oracle", "sqlserver", "dameng", "sqlite", "duckdb": + return MigrationDataModelRelational + case "mongodb": + return MigrationDataModelDocument + case "clickhouse", "diros", "sphinx": + return MigrationDataModelColumnar + case "tdengine": + return MigrationDataModelTimeSeries + case "redis": + return MigrationDataModelKeyValue + default: + return MigrationDataModelCustom + } +} + +func (genericLegacyPlanner) Name() string { return "generic-legacy-planner" } + +func (genericLegacyPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + _ = ctx + return MigrationSupportLevelPartial +} + +func (genericLegacyPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildSchemaMigrationPlanLegacy(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (mysqlToPGLikePlanner) Name() string { return "mysql-pglike-planner" } + +func (mysqlToPGLikePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if isMySQLLikeSourceType(sourceType) && isPGLikeTarget(targetType) { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (mysqlToPGLikePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildMySQLToPGLikePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (tdengineToMySQLPlanner) Name() string { return "tdengine-mysql-planner" } + +func (tdengineToMySQLPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if sourceType == "tdengine" && isMySQLLikeWritableTargetType(targetType) { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (tdengineToMySQLPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildTDengineToMySQLPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (tdengineToPGLikePlanner) Name() string { return "tdengine-pglike-planner" } + +func (tdengineToPGLikePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if sourceType == "tdengine" && isPGLikeTarget(targetType) { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (tdengineToPGLikePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildTDengineToPGLikePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (mysqlToClickHousePlanner) Name() string { return "mysql-clickhouse-planner" } + +func (mysqlToClickHousePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if isMySQLCoreType(sourceType) && targetType == "clickhouse" { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (mysqlToClickHousePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildMySQLToClickHousePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (pgLikeToClickHousePlanner) Name() string { return "pglike-clickhouse-planner" } + +func (pgLikeToClickHousePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if isPGLikeSource(sourceType) && targetType == "clickhouse" { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (pgLikeToClickHousePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildPGLikeToClickHousePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (clickHouseToMySQLPlanner) Name() string { return "clickhouse-mysql-planner" } + +func (clickHouseToMySQLPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if sourceType == "clickhouse" && isMySQLLikeWritableTargetType(targetType) { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (clickHouseToMySQLPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildClickHouseToMySQLPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (clickHouseToPGLikePlanner) Name() string { return "clickhouse-pglike-planner" } + +func (clickHouseToPGLikePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if sourceType == "clickhouse" && isPGLikeTarget(targetType) { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (clickHouseToPGLikePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildClickHouseToPGLikePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (mysqlToMongoPlanner) Name() string { return "mysql-mongo-planner" } + +func (mysqlToMongoPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if isMySQLCoreType(sourceType) && targetType == "mongodb" { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (mysqlToMongoPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildMySQLToMongoPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (pgLikeToMongoPlanner) Name() string { return "pglike-mongo-planner" } + +func (pgLikeToMongoPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if isPGLikeSource(sourceType) && targetType == "mongodb" { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (pgLikeToMongoPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildPGLikeToMongoPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (clickHouseToMongoPlanner) Name() string { return "clickhouse-mongo-planner" } + +func (clickHouseToMongoPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if sourceType == "clickhouse" && targetType == "mongodb" { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (clickHouseToMongoPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildClickHouseToMongoPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (tdengineToMongoPlanner) Name() string { return "tdengine-mongo-planner" } + +func (tdengineToMongoPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if sourceType == "tdengine" && targetType == "mongodb" { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (tdengineToMongoPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildTDengineToMongoPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (mongoToMySQLPlanner) Name() string { return "mongo-mysql-planner" } + +func (mongoToMySQLPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if sourceType == "mongodb" && isMySQLLikeWritableTargetType(targetType) { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (mongoToMySQLPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildMongoToMySQLPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (mongoToPGLikePlanner) Name() string { return "mongo-pglike-planner" } + +func (mongoToPGLikePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if sourceType == "mongodb" && isPGLikeTarget(targetType) { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (mongoToPGLikePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildMongoToPGLikePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (pgLikeToMySQLPlanner) Name() string { return "pglike-mysql-planner" } + +func (pgLikeToMySQLPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if isPGLikeSource(sourceType) && isMySQLLikeWritableTargetType(targetType) { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (pgLikeToMySQLPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildPGLikeToMySQLPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (mongoToRelationalPlanner) Name() string { return "mongo-relational-inference-planner" } + +func (mongoToRelationalPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if !shouldUseSchemaInference(sourceType, targetType) { + return MigrationSupportLevelUnsupported + } + return MigrationSupportLevelPlanned +} + +func (mongoToRelationalPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + inference, err := inferSchemaForPair(sourceType, targetType, ctx.TableName) + if err != nil { + return SchemaMigrationPlan{}, nil, nil, err + } + plan := SchemaMigrationPlan{} + plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, ctx.Config.SourceConfig.Database, ctx.TableName) + plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, ctx.Config.TargetConfig.Database, ctx.TableName) + plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, ctx.TableName) + plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, ctx.TableName) + plan.PlannedAction = "当前库对已进入迁移内核规划阶段,等待 schema 推断与目标方言生成器落地" + for _, issue := range inference.Issues { + msg := strings.TrimSpace(issue.Message) + if msg == "" { + continue + } + plan.Warnings = append(plan.Warnings, msg) + } + plan.Warnings = append(plan.Warnings, fmt.Sprintf("迁移对象=%s,目标类型=%s,当前仅提供规划入口,暂不执行自动建表", inference.Object.Kind, targetType)) + return dedupeSchemaMigrationPlan(plan), nil, nil, nil +} diff --git a/internal/sync/migration_kernel_router_test.go b/internal/sync/migration_kernel_router_test.go new file mode 100644 index 0000000..71e84e7 --- /dev/null +++ b/internal/sync/migration_kernel_router_test.go @@ -0,0 +1,447 @@ +package sync + +import ( + "GoNavi-Wails/internal/connection" + "strings" + "testing" +) + +func TestClassifyMigrationDataModel(t *testing.T) { + t.Parallel() + + cases := map[string]MigrationDataModel{ + "mysql": MigrationDataModelRelational, + "postgres": MigrationDataModelRelational, + "kingbase": MigrationDataModelRelational, + "mongodb": MigrationDataModelDocument, + "clickhouse": MigrationDataModelColumnar, + "tdengine": MigrationDataModelTimeSeries, + "redis": MigrationDataModelKeyValue, + "custom": MigrationDataModelCustom, + } + + for input, want := range cases { + input, want := input, want + t.Run(input, func(t *testing.T) { + t.Parallel() + got := classifyMigrationDataModel(input) + if got != want { + t.Fatalf("unexpected data model, input=%s got=%s want=%s", input, got, want) + } + }) + } +} + +func TestResolveMigrationPlanner_PrefersMySQLKingbasePlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mysql"}, + TargetConfig: connection.ConnectionConfig{Type: "kingbase"}, + }, + }) + if planner == nil { + t.Fatalf("expected planner") + } + if planner.Name() != "mysql-pglike-planner" { + t.Fatalf("unexpected planner: %s", planner.Name()) + } +} + +func TestResolveMigrationPlanner_UsesSchemaInferencePlannerForMongoToMySQL(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mongodb"}, + TargetConfig: connection.ConnectionConfig{Type: "mysql"}, + }, + }) + if planner == nil { + t.Fatalf("expected planner") + } + if planner.Name() != "mongo-mysql-planner" { + t.Fatalf("unexpected planner: %s", planner.Name()) + } +} + +func TestInferSchemaForPair_MongoToMySQLReturnsPlannedWarning(t *testing.T) { + t.Parallel() + + result, err := inferSchemaForPair("mongodb", "mysql", "users") + if err != nil { + t.Fatalf("inferSchemaForPair returned error: %v", err) + } + if !result.NeedsReview { + t.Fatalf("expected needs review") + } + if result.Object.Name != "users" { + t.Fatalf("unexpected object name: %s", result.Object.Name) + } + if len(result.Issues) == 0 || !strings.Contains(result.Issues[0].Message, "schema 推断") { + t.Fatalf("unexpected issues: %+v", result.Issues) + } +} + +func TestResolveMigrationPlanner_UsesPGLikeMySQLPlannerForKingbaseToMySQL(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "kingbase"}, + TargetConfig: connection.ConnectionConfig{Type: "mysql"}, + }, + }) + if planner == nil { + t.Fatalf("expected planner") + } + if planner.Name() != "pglike-mysql-planner" { + t.Fatalf("unexpected planner: %s", planner.Name()) + } +} + +func TestResolveMigrationPlanner_UsesMySQLPGLikePlannerForMySQLToPostgres(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mysql"}, + TargetConfig: connection.ConnectionConfig{Type: "postgres"}, + }, + }) + if planner == nil { + t.Fatalf("expected planner") + } + if planner.Name() != "mysql-pglike-planner" { + t.Fatalf("unexpected planner: %s", planner.Name()) + } +} + +func TestResolveMigrationPlanner_UsesMySQLClickHousePlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mysql"}, + TargetConfig: connection.ConnectionConfig{Type: "clickhouse"}, + }, + }) + if planner == nil { + t.Fatalf("expected planner") + } + if planner.Name() != "mysql-clickhouse-planner" { + t.Fatalf("unexpected planner: %s", planner.Name()) + } +} + +func TestResolveMigrationPlanner_UsesClickHouseMySQLPlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "clickhouse"}, + TargetConfig: connection.ConnectionConfig{Type: "mysql"}, + }, + }) + if planner == nil { + t.Fatalf("expected planner") + } + if planner.Name() != "clickhouse-mysql-planner" { + t.Fatalf("unexpected planner: %s", planner.Name()) + } +} + +func TestResolveMigrationPlanner_UsesMySQLMongoPlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mysql"}, + TargetConfig: connection.ConnectionConfig{Type: "mongodb"}, + }, + }) + if planner == nil || planner.Name() != "mysql-mongo-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesMongoMySQLPlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mongodb"}, + TargetConfig: connection.ConnectionConfig{Type: "mysql"}, + }, + }) + if planner == nil || planner.Name() != "mongo-mysql-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesMongoPGLikePlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mongodb"}, + TargetConfig: connection.ConnectionConfig{Type: "postgres"}, + }, + }) + if planner == nil || planner.Name() != "mongo-pglike-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesPGLikeMongoPlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "postgres"}, + TargetConfig: connection.ConnectionConfig{Type: "mongodb"}, + }, + }) + if planner == nil || planner.Name() != "pglike-mongo-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesClickHouseMongoPlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "clickhouse"}, + TargetConfig: connection.ConnectionConfig{Type: "mongodb"}, + }, + }) + if planner == nil || planner.Name() != "clickhouse-mongo-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesTDengineMongoPlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "tdengine"}, + TargetConfig: connection.ConnectionConfig{Type: "mongodb"}, + }, + }) + if planner == nil || planner.Name() != "tdengine-mongo-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesMySQLPGLikePlannerForDirosToPostgres(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "diros"}, + TargetConfig: connection.ConnectionConfig{Type: "postgres"}, + }, + }) + if planner == nil || planner.Name() != "mysql-pglike-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesPGLikeMySQLPlannerForPostgresToDiros(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "postgres"}, + TargetConfig: connection.ConnectionConfig{Type: "diros"}, + }, + }) + if planner == nil || planner.Name() != "pglike-mysql-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesMySQLPGLikePlannerForMySQLToDuckDB(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mysql"}, + TargetConfig: connection.ConnectionConfig{Type: "duckdb"}, + }, + }) + if planner == nil || planner.Name() != "mysql-pglike-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesPGLikeClickHousePlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "postgres"}, + TargetConfig: connection.ConnectionConfig{Type: "clickhouse"}, + }, + }) + if planner == nil || planner.Name() != "pglike-clickhouse-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesPGLikeMySQLPlannerForDuckDBToMySQL(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "duckdb"}, + TargetConfig: connection.ConnectionConfig{Type: "mysql"}, + }, + }) + if planner == nil || planner.Name() != "pglike-mysql-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesMySQLPGLikePlannerForSphinxToPostgres(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "sphinx"}, + TargetConfig: connection.ConnectionConfig{Type: "postgres"}, + }, + }) + if planner == nil || planner.Name() != "mysql-pglike-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesPGLikeMySQLPlannerForCustomKingbaseToMySQL(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "custom", Driver: "kingbase8"}, + TargetConfig: connection.ConnectionConfig{Type: "mysql"}, + }, + }) + if planner == nil || planner.Name() != "pglike-mysql-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesMySQLPGLikePlannerForMySQLToCustomPostgres(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mysql"}, + TargetConfig: connection.ConnectionConfig{Type: "custom", Driver: "postgresql"}, + }, + }) + if planner == nil || planner.Name() != "mysql-pglike-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesTDengineMySQLPlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "tdengine"}, + TargetConfig: connection.ConnectionConfig{Type: "mysql"}, + }, + }) + if planner == nil || planner.Name() != "tdengine-mysql-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesTDenginePGLikePlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "tdengine"}, + TargetConfig: connection.ConnectionConfig{Type: "kingbase"}, + }, + }) + if planner == nil || planner.Name() != "tdengine-pglike-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesMySQLLikeTDenginePlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mysql"}, + TargetConfig: connection.ConnectionConfig{Type: "tdengine"}, + }, + }) + if planner == nil || planner.Name() != "mysqllike-tdengine-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesPGLikeTDenginePlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "postgres"}, + TargetConfig: connection.ConnectionConfig{Type: "tdengine"}, + }, + }) + if planner == nil || planner.Name() != "pglike-tdengine-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesClickHouseTDenginePlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "clickhouse"}, + TargetConfig: connection.ConnectionConfig{Type: "tdengine"}, + }, + }) + if planner == nil || planner.Name() != "clickhouse-tdengine-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesClickHousePGLikePlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "clickhouse"}, + TargetConfig: connection.ConnectionConfig{Type: "postgres"}, + }, + }) + if planner == nil || planner.Name() != "clickhouse-pglike-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} + +func TestResolveMigrationPlanner_UsesTDengineTDenginePlanner(t *testing.T) { + t.Parallel() + + planner := resolveMigrationPlanner(MigrationBuildContext{ + Config: SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "tdengine"}, + TargetConfig: connection.ConnectionConfig{Type: "tdengine"}, + }, + }) + if planner == nil || planner.Name() != "tdengine-tdengine-planner" { + t.Fatalf("unexpected planner: %v", planner) + } +} diff --git a/internal/sync/migration_kernel_types.go b/internal/sync/migration_kernel_types.go new file mode 100644 index 0000000..f74fdcb --- /dev/null +++ b/internal/sync/migration_kernel_types.go @@ -0,0 +1,104 @@ +package sync + +import ( + "GoNavi-Wails/internal/connection" + "GoNavi-Wails/internal/db" +) + +type MigrationDataModel string + +const ( + MigrationDataModelRelational MigrationDataModel = "relational" + MigrationDataModelDocument MigrationDataModel = "document" + MigrationDataModelColumnar MigrationDataModel = "columnar" + MigrationDataModelTimeSeries MigrationDataModel = "timeseries" + MigrationDataModelKeyValue MigrationDataModel = "keyvalue" + MigrationDataModelCustom MigrationDataModel = "custom" +) + +type MigrationObjectKind string + +const ( + MigrationObjectKindTable MigrationObjectKind = "table" + MigrationObjectKindCollection MigrationObjectKind = "collection" + MigrationObjectKindKeyspace MigrationObjectKind = "keyspace" +) + +type MigrationSupportLevel string + +const ( + MigrationSupportLevelFull MigrationSupportLevel = "full" + MigrationSupportLevelPartial MigrationSupportLevel = "partial" + MigrationSupportLevelPlanned MigrationSupportLevel = "planned" + MigrationSupportLevelUnsupported MigrationSupportLevel = "unsupported" +) + +type CanonicalFieldSpec struct { + Name string + SourceType string + CanonicalType string + Nullable bool + DefaultValue *string + AutoIncrement bool + Comment string + NestedPath string + Confidence float64 +} + +type CanonicalIndexSpec struct { + Name string + Kind string + Columns []string + Expression string + PrefixLength int + Supported bool + DegradeStrategy string + Unique bool +} + +type CanonicalConstraintSpec struct { + Name string + Kind string + Columns []string + RefName string +} + +type CanonicalObjectSpec struct { + Name string + Schema string + Kind MigrationObjectKind + Fields []CanonicalFieldSpec + PrimaryKey []string + Indexes []CanonicalIndexSpec + Constraints []CanonicalConstraintSpec + Comments []string + SourceHints map[string]string +} + +type SchemaInferenceIssue struct { + Field string + Level string + Message string + Resolution string +} + +type SchemaInferenceResult struct { + Object CanonicalObjectSpec + Issues []SchemaInferenceIssue + SampleSize int + Confidence float64 + NeedsReview bool +} + +type MigrationBuildContext struct { + Config SyncConfig + TableName string + SourceDB db.Database + TargetDB db.Database +} + +type MigrationPlanner interface { + Name() string + SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel + BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) +} diff --git a/internal/sync/migration_mongodb.go b/internal/sync/migration_mongodb.go new file mode 100644 index 0000000..23a97c4 --- /dev/null +++ b/internal/sync/migration_mongodb.go @@ -0,0 +1,603 @@ +package sync + +import ( + "GoNavi-Wails/internal/connection" + "GoNavi-Wails/internal/db" + "encoding/json" + "fmt" + "sort" + "strings" + "time" +) + +func buildMySQLToMongoPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildTabularToMongoPlan(config, tableName, sourceDB, targetDB) +} + +func buildPGLikeToMongoPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildTabularToMongoPlan(config, tableName, sourceDB, targetDB) +} + +func buildClickHouseToMongoPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildTabularToMongoPlan(config, tableName, sourceDB, targetDB) +} + +func buildTDengineToMongoPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildTabularToMongoPlan(config, tableName, sourceDB, targetDB) +} + +func buildTabularToMongoPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + plan := SchemaMigrationPlan{} + sourceType := resolveMigrationDBType(config.SourceConfig) + targetType := resolveMigrationDBType(config.TargetConfig) + plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName) + plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName) + plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName) + plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName) + plan.PlannedAction = "使用已有目标集合导入" + + sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err) + } + if !sourceExists { + return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName) + } + + targetExists, err := inspectMongoCollection(targetDB, plan.TargetSchema, plan.TargetTable) + if err != nil { + return plan, sourceCols, nil, fmt.Errorf("检查目标集合失败: %w", err) + } + plan.TargetTableExists = targetExists + + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + if targetExists { + plan.Warnings = append(plan.Warnings, "MongoDB 为弱 schema 目标,字段结构以写入文档为准,不执行目标列校验") + return dedupeSchemaMigrationPlan(plan), sourceCols, nil, nil + } + + switch strategy { + case "existing_only": + plan.PlannedAction = "目标集合不存在,需先手工创建" + plan.Warnings = append(plan.Warnings, "当前策略要求目标集合已存在,执行时不会自动创建") + return dedupeSchemaMigrationPlan(plan), sourceCols, nil, nil + case "smart", "auto_create_if_missing": + plan.AutoCreate = true + plan.PlannedAction = "目标集合不存在,将自动创建集合后导入" + createCmd, err := buildMongoCreateCollectionCommand(plan.TargetTable) + if err != nil { + return plan, sourceCols, nil, err + } + plan.PreDataSQL = append(plan.PreDataSQL, createCmd) + if config.CreateIndexes { + indexCmds, warnings, unsupported, created, skipped, err := buildMongoIndexCommands(sourceDB, plan.SourceSchema, plan.SourceTable, plan.TargetTable) + if err != nil { + plan.Warnings = append(plan.Warnings, fmt.Sprintf("读取源表索引失败,已跳过索引迁移:%v", err)) + } else { + plan.PostDataSQL = append(plan.PostDataSQL, indexCmds...) + plan.Warnings = append(plan.Warnings, warnings...) + plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...) + plan.IndexesToCreate = created + plan.IndexesSkipped = skipped + } + } + return dedupeSchemaMigrationPlan(plan), sourceCols, nil, nil + default: + return dedupeSchemaMigrationPlan(plan), sourceCols, nil, nil + } +} + +func buildMongoToMySQLPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + plan := SchemaMigrationPlan{} + plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName) + plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName) + plan.SourceQueryTable = qualifiedNameForQuery(config.SourceConfig.Type, plan.SourceSchema, plan.SourceTable, tableName) + plan.TargetQueryTable = qualifiedNameForQuery(config.TargetConfig.Type, plan.TargetSchema, plan.TargetTable, tableName) + plan.PlannedAction = "使用已有目标表导入" + + sourceCols, warnings, err := inferMongoCollectionColumns(sourceDB, plan.SourceTable) + if err != nil { + return plan, nil, nil, err + } + plan.Warnings = append(plan.Warnings, warnings...) + if len(sourceCols) == 0 { + return plan, nil, nil, fmt.Errorf("源集合未推断出可迁移字段: %s", tableName) + } + + targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable) + if err != nil { + return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err) + } + plan.TargetTableExists = targetExists + + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + if targetExists { + missing := diffMissingColumnNames(sourceCols, targetCols) + if len(missing) > 0 { + plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", "))) + } + if config.AutoAddColumns { + addSQL, addWarnings := buildMongoToMySQLAddColumnSQL(plan.TargetQueryTable, sourceCols, targetCols) + plan.PreDataSQL = append(plan.PreDataSQL, addSQL...) + plan.Warnings = append(plan.Warnings, addWarnings...) + if len(addSQL) > 0 { + plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL)) + } + } + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } + + switch strategy { + case "existing_only": + plan.PlannedAction = "目标表不存在,需先手工创建" + plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + case "smart", "auto_create_if_missing": + plan.AutoCreate = true + plan.PlannedAction = "目标表不存在,将自动建表后导入" + createSQL, postSQL, moreWarnings, unsupported, idxCreate, idxSkip, err := buildMongoToMySQLCreateTablePlan(config, plan.TargetQueryTable, sourceCols, sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, sourceCols, targetCols, err + } + plan.CreateTableSQL = createSQL + plan.PostDataSQL = append(plan.PostDataSQL, postSQL...) + plan.Warnings = append(plan.Warnings, moreWarnings...) + plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...) + plan.IndexesToCreate = idxCreate + plan.IndexesSkipped = idxSkip + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + default: + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } +} + +func inspectMongoCollection(database db.Database, dbName, collection string) (bool, error) { + items, err := database.GetTables(dbName) + if err != nil { + return false, err + } + target := strings.TrimSpace(collection) + for _, item := range items { + if strings.EqualFold(strings.TrimSpace(item), target) { + return true, nil + } + } + return false, nil +} + +func buildMongoCreateCollectionCommand(collection string) (string, error) { + cmd := map[string]interface{}{"create": strings.TrimSpace(collection)} + data, err := json.Marshal(cmd) + if err != nil { + return "", err + } + return string(data), nil +} + +func buildMongoIndexCommands(sourceDB db.Database, dbName, tableName, targetCollection string) ([]string, []string, []string, int, int, error) { + indexes, err := sourceDB.GetIndexes(dbName, tableName) + if err != nil { + return nil, nil, nil, 0, 0, err + } + grouped := groupIndexDefinitions(indexes) + cmds := make([]string, 0, len(grouped)) + warnings := make([]string, 0) + unsupported := make([]string, 0) + created := 0 + skipped := 0 + for _, idx := range grouped { + name := strings.TrimSpace(idx.Name) + if name == "" || strings.EqualFold(name, "primary") { + continue + } + if len(idx.Columns) == 0 { + skipped++ + unsupported = append(unsupported, fmt.Sprintf("索引 %s 缺少列定义,已跳过", name)) + continue + } + kind := strings.ToLower(strings.TrimSpace(idx.IndexType)) + if idx.SubPart > 0 { + skipped++ + unsupported = append(unsupported, fmt.Sprintf("索引 %s 使用前缀长度,MongoDB 目标暂不支持等价迁移", name)) + continue + } + if kind != "" && kind != "btree" { + warnings = append(warnings, fmt.Sprintf("索引 %s 类型=%s 将按普通索引迁移到 MongoDB", name, idx.IndexType)) + } + keySpec := make(map[string]int) + for _, col := range idx.Columns { + keySpec[col] = 1 + } + command := map[string]interface{}{ + "createIndexes": strings.TrimSpace(targetCollection), + "indexes": []map[string]interface{}{{ + "name": name, + "key": keySpec, + "unique": idx.Unique, + }}, + } + data, err := json.Marshal(command) + if err != nil { + skipped++ + unsupported = append(unsupported, fmt.Sprintf("索引 %s 生成 MongoDB createIndexes 命令失败:%v", name, err)) + continue + } + cmds = append(cmds, string(data)) + created++ + } + return cmds, dedupeStrings(warnings), dedupeStrings(unsupported), created, skipped, nil +} + +func inferMongoCollectionColumns(sourceDB db.Database, collection string) ([]connection.ColumnDefinition, []string, error) { + query := fmt.Sprintf(`{"find":"%s","filter":{},"limit":200}`, strings.TrimSpace(collection)) + rows, _, err := sourceDB.Query(query) + if err != nil { + return nil, nil, fmt.Errorf("读取源集合样本失败: %w", err) + } + if len(rows) == 0 { + return []connection.ColumnDefinition{{Name: "_id", Type: "varchar(64)", Nullable: "NO", Key: "PRI"}}, []string{"源集合暂无样本数据,仅按 `_id` 生成基础主键列"}, nil + } + fieldNames := make(map[string]struct{}) + for _, row := range rows { + for key := range row { + fieldNames[key] = struct{}{} + } + } + orderedFields := make([]string, 0, len(fieldNames)) + for key := range fieldNames { + orderedFields = append(orderedFields, key) + } + sort.Strings(orderedFields) + if containsString(orderedFields, "_id") { + orderedFields = moveStringToFront(orderedFields, "_id") + } + columns := make([]connection.ColumnDefinition, 0, len(orderedFields)) + warnings := make([]string, 0) + for _, field := range orderedFields { + typeName, nullable, fieldWarnings := inferMongoFieldType(rows, field) + warnings = append(warnings, fieldWarnings...) + col := connection.ColumnDefinition{ + Name: field, + Type: typeName, + Nullable: ternaryString(nullable, "YES", "NO"), + Key: "", + Extra: "", + } + if field == "_id" { + col.Key = "PRI" + col.Nullable = "NO" + } + columns = append(columns, col) + } + return columns, dedupeStrings(warnings), nil +} + +func inferMongoFieldType(rows []map[string]interface{}, field string) (string, bool, []string) { + nullable := false + hasString, hasBool, hasInt, hasFloat, hasTime, hasComplex := false, false, false, false, false, false + for _, row := range rows { + value, ok := row[field] + if !ok || value == nil { + nullable = true + continue + } + switch value.(type) { + case bool: + hasBool = true + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + hasInt = true + case float32, float64: + hasFloat = true + case time.Time: + hasTime = true + case map[string]interface{}, []interface{}: + hasComplex = true + default: + hasString = true + } + } + kinds := 0 + for _, flag := range []bool{hasString, hasBool, hasInt, hasFloat, hasTime, hasComplex} { + if flag { + kinds++ + } + } + warnings := make([]string, 0) + if kinds > 1 { + warnings = append(warnings, fmt.Sprintf("字段 %s 存在多种 BSON 值类型,已按兼容类型降级", field)) + } + if field == "_id" { + return "varchar(64)", false, warnings + } + switch { + case hasComplex: + return "json", nullable, warnings + case hasTime: + return "datetime", nullable, warnings + case hasFloat: + return "double", nullable, warnings + case hasInt: + return "bigint", nullable, warnings + case hasBool: + return "tinyint(1)", nullable, warnings + default: + return "varchar(255)", nullable, warnings + } +} + +func buildMongoToMySQLAddColumnSQL(targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) { + targetSet := make(map[string]struct{}, len(targetCols)) + for _, col := range targetCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + targetSet[key] = struct{}{} + } + var sqlList []string + for _, col := range sourceCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + if _, ok := targetSet[key]; ok { + continue + } + sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL", + quoteQualifiedIdentByType("mysql", targetQueryTable), + quoteIdentByType("mysql", col.Name), + strings.TrimSpace(col.Type), + )) + } + return sqlList, nil +} + +func buildMongoToMySQLCreateTablePlan(config SyncConfig, targetQueryTable string, sourceCols []connection.ColumnDefinition, sourceDB db.Database, sourceSchema, sourceTable string) (string, []string, []string, []string, int, int, error) { + columnDefs := make([]string, 0, len(sourceCols)+1) + warnings := make([]string, 0) + unsupported := make([]string, 0) + pkCols := make([]string, 0, 1) + for _, col := range sourceCols { + columnDef := fmt.Sprintf("%s %s", quoteIdentByType("mysql", col.Name), strings.TrimSpace(col.Type)) + if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") { + columnDef += " NOT NULL" + } + columnDefs = append(columnDefs, columnDef) + if col.Key == "PRI" || col.Key == "PK" { + pkCols = append(pkCols, quoteIdentByType("mysql", col.Name)) + } + } + if len(pkCols) > 0 { + columnDefs = append(columnDefs, fmt.Sprintf("PRIMARY KEY (%s)", strings.Join(pkCols, ", "))) + } else { + warnings = append(warnings, "MongoDB 源集合未推断出稳定主键,目标表将不自动创建主键") + } + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("mysql", targetQueryTable), strings.Join(columnDefs, ",\n ")) + if !config.CreateIndexes { + return createSQL, nil, dedupeStrings(warnings), dedupeStrings(unsupported), 0, 0, nil + } + indexes, err := sourceDB.GetIndexes(sourceSchema, sourceTable) + if err != nil { + warnings = append(warnings, fmt.Sprintf("读取源集合索引失败,已跳过索引迁移:%v", err)) + return createSQL, nil, dedupeStrings(warnings), dedupeStrings(unsupported), 0, 0, nil + } + grouped := groupIndexDefinitions(indexes) + postSQL := make([]string, 0, len(grouped)) + created := 0 + skipped := 0 + for _, idx := range grouped { + name := strings.TrimSpace(idx.Name) + if name == "" || strings.EqualFold(name, "_id_") || strings.EqualFold(name, "primary") { + continue + } + if len(idx.Columns) == 0 { + skipped++ + unsupported = append(unsupported, fmt.Sprintf("索引 %s 缺少列定义,已跳过", name)) + continue + } + quotedCols := make([]string, 0, len(idx.Columns)) + for _, col := range idx.Columns { + quotedCols = append(quotedCols, quoteIdentByType("mysql", col)) + } + prefix := "CREATE INDEX" + if idx.Unique { + prefix = "CREATE UNIQUE INDEX" + } + postSQL = append(postSQL, fmt.Sprintf("%s %s ON %s (%s)", prefix, quoteIdentByType("mysql", name), quoteQualifiedIdentByType("mysql", targetQueryTable), strings.Join(quotedCols, ", "))) + created++ + } + return createSQL, postSQL, dedupeStrings(warnings), dedupeStrings(unsupported), created, skipped, nil +} + +func containsString(items []string, target string) bool { + for _, item := range items { + if item == target { + return true + } + } + return false +} + +func moveStringToFront(items []string, target string) []string { + out := make([]string, 0, len(items)) + for _, item := range items { + if item == target { + continue + } + out = append(out, item) + } + return append([]string{target}, out...) +} + +func buildMongoToPGLikePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + plan := SchemaMigrationPlan{} + targetType := strings.ToLower(strings.TrimSpace(config.TargetConfig.Type)) + plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName) + plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName) + plan.SourceQueryTable = qualifiedNameForQuery(config.SourceConfig.Type, plan.SourceSchema, plan.SourceTable, tableName) + plan.TargetQueryTable = qualifiedNameForQuery(config.TargetConfig.Type, plan.TargetSchema, plan.TargetTable, tableName) + plan.PlannedAction = "使用已有目标表导入" + + sourceCols, warnings, err := inferMongoCollectionColumns(sourceDB, plan.SourceTable) + if err != nil { + return plan, nil, nil, err + } + plan.Warnings = append(plan.Warnings, warnings...) + if len(sourceCols) == 0 { + return plan, nil, nil, fmt.Errorf("源集合未推断出可迁移字段: %s", tableName) + } + + targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable) + if err != nil { + return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err) + } + plan.TargetTableExists = targetExists + + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + if targetExists { + missing := diffMissingColumnNames(sourceCols, targetCols) + if len(missing) > 0 { + plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", "))) + } + if config.AutoAddColumns { + addSQL, addWarnings := buildMongoToPGLikeAddColumnSQL(targetType, plan.TargetQueryTable, sourceCols, targetCols) + plan.PreDataSQL = append(plan.PreDataSQL, addSQL...) + plan.Warnings = append(plan.Warnings, addWarnings...) + if len(addSQL) > 0 { + plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL)) + } + } + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } + + switch strategy { + case "existing_only": + plan.PlannedAction = "目标表不存在,需先手工创建" + plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + case "smart", "auto_create_if_missing": + plan.AutoCreate = true + plan.PlannedAction = "目标表不存在,将自动建表后导入" + createSQL, postSQL, moreWarnings, unsupported, idxCreate, idxSkip, err := buildMongoToPGLikeCreateTablePlan(targetType, config, plan.TargetQueryTable, sourceCols, sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, sourceCols, targetCols, err + } + plan.CreateTableSQL = createSQL + plan.PostDataSQL = append(plan.PostDataSQL, postSQL...) + plan.Warnings = append(plan.Warnings, moreWarnings...) + plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...) + plan.IndexesToCreate = idxCreate + plan.IndexesSkipped = idxSkip + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + default: + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } +} + +func buildMongoToPGLikeAddColumnSQL(targetType string, targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) { + targetSet := make(map[string]struct{}, len(targetCols)) + for _, col := range targetCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + targetSet[key] = struct{}{} + } + var sqlList []string + var warnings []string + for _, col := range sourceCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + if _, ok := targetSet[key]; ok { + continue + } + colType, mapWarnings := mapMongoInferredColumnToPGLike(col) + warnings = append(warnings, mapWarnings...) + sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL", + quoteQualifiedIdentByType(targetType, targetQueryTable), + quoteIdentByType(targetType, col.Name), + colType, + )) + } + return sqlList, dedupeStrings(warnings) +} + +func buildMongoToPGLikeCreateTablePlan(targetType string, config SyncConfig, targetQueryTable string, sourceCols []connection.ColumnDefinition, sourceDB db.Database, sourceSchema, sourceTable string) (string, []string, []string, []string, int, int, error) { + columnDefs := make([]string, 0, len(sourceCols)+1) + warnings := make([]string, 0) + unsupported := make([]string, 0) + pkCols := make([]string, 0, 1) + for _, col := range sourceCols { + colType, colWarnings := mapMongoInferredColumnToPGLike(col) + warnings = append(warnings, colWarnings...) + parts := []string{colType} + if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") { + parts = append(parts, "NOT NULL") + } + columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType(targetType, col.Name), strings.Join(parts, " "))) + if col.Key == "PRI" || col.Key == "PK" { + pkCols = append(pkCols, quoteIdentByType(targetType, col.Name)) + } + } + if len(pkCols) > 0 { + columnDefs = append(columnDefs, fmt.Sprintf("PRIMARY KEY (%s)", strings.Join(pkCols, ", "))) + } + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType(targetType, targetQueryTable), strings.Join(columnDefs, ",\n ")) + if !config.CreateIndexes { + return createSQL, nil, dedupeStrings(warnings), dedupeStrings(unsupported), 0, 0, nil + } + indexes, err := sourceDB.GetIndexes(sourceSchema, sourceTable) + if err != nil { + warnings = append(warnings, fmt.Sprintf("读取源集合索引失败,已跳过索引迁移:%v", err)) + return createSQL, nil, dedupeStrings(warnings), dedupeStrings(unsupported), 0, 0, nil + } + grouped := groupIndexDefinitions(indexes) + postSQL := make([]string, 0, len(grouped)) + created := 0 + skipped := 0 + for _, idx := range grouped { + name := strings.TrimSpace(idx.Name) + if name == "" || strings.EqualFold(name, "_id_") || strings.EqualFold(name, "primary") { + continue + } + if len(idx.Columns) == 0 { + skipped++ + unsupported = append(unsupported, fmt.Sprintf("索引 %s 缺少列定义,已跳过", name)) + continue + } + quotedCols := make([]string, 0, len(idx.Columns)) + for _, col := range idx.Columns { + quotedCols = append(quotedCols, quoteIdentByType(targetType, col)) + } + prefix := "CREATE INDEX" + if idx.Unique { + prefix = "CREATE UNIQUE INDEX" + } + postSQL = append(postSQL, fmt.Sprintf("%s %s ON %s (%s)", prefix, quoteIdentByType(targetType, name), quoteQualifiedIdentByType(targetType, targetQueryTable), strings.Join(quotedCols, ", "))) + created++ + } + return createSQL, postSQL, dedupeStrings(warnings), dedupeStrings(unsupported), created, skipped, nil +} + +func mapMongoInferredColumnToPGLike(col connection.ColumnDefinition) (string, []string) { + raw := strings.ToLower(strings.TrimSpace(col.Type)) + warnings := make([]string, 0) + switch { + case strings.HasPrefix(raw, "varchar"): + return col.Type, warnings + case raw == "json": + return "jsonb", warnings + case raw == "datetime": + return "timestamp", warnings + case raw == "tinyint(1)": + return "boolean", warnings + case raw == "double": + return "double precision", warnings + case raw == "bigint": + return "bigint", warnings + default: + return col.Type, warnings + } +} diff --git a/internal/sync/migration_redis.go b/internal/sync/migration_redis.go new file mode 100644 index 0000000..84f159f --- /dev/null +++ b/internal/sync/migration_redis.go @@ -0,0 +1,1315 @@ +package sync + +import ( + "GoNavi-Wails/internal/connection" + "GoNavi-Wails/internal/db" + redispkg "GoNavi-Wails/internal/redis" + "encoding/json" + "fmt" + "sort" + "strconv" + "strings" +) + +type redisMigrationClient interface { + Connect(config connection.ConnectionConfig) error + Close() error + ScanKeys(pattern string, cursor uint64, count int64) (*redispkg.RedisScanResult, error) + GetKeyType(key string) (string, error) + GetValue(key string) (*redispkg.RedisValue, error) + DeleteKeys(keys []string) (int64, error) + SetTTL(key string, ttl int64) error + SetString(key, value string, ttl int64) error + SetHashField(key, field, value string) error + ListPush(key string, values ...string) error + SetAdd(key string, members ...string) error + ZSetAdd(key string, members ...redispkg.ZSetMember) error + StreamAdd(key string, fields map[string]string, id string) (string, error) +} + +var newSyncDatabase = db.NewDatabase +var newRedisSourceClient = func() redisMigrationClient { return redispkg.NewRedisClient() } + +func isRedisToMongoKeyspacePair(config SyncConfig) bool { + return resolveMigrationDBType(config.SourceConfig) == "redis" && resolveMigrationDBType(config.TargetConfig) == "mongodb" +} + +func resolveRedisDBIndex(config connection.ConnectionConfig) int { + if config.RedisDB >= 0 && config.RedisDB <= 15 { + return config.RedisDB + } + if text := strings.TrimSpace(config.Database); text != "" { + if idx, err := strconv.Atoi(text); err == nil && idx >= 0 && idx <= 15 { + return idx + } + } + return 0 +} + +func withResolvedRedisDB(config connection.ConnectionConfig) connection.ConnectionConfig { + next := config + next.Type = "redis" + next.RedisDB = resolveRedisDBIndex(config) + return next +} + +func resolveMongoCollectionName(config SyncConfig) string { + if name := strings.TrimSpace(config.MongoCollectionName); name != "" { + return name + } + if resolveMigrationDBType(config.SourceConfig) == "redis" { + return fmt.Sprintf("redis_db_%d_keys", resolveRedisDBIndex(config.SourceConfig)) + } + return fmt.Sprintf("redis_db_%d_keys", resolveRedisDBIndex(config.TargetConfig)) +} + +func deriveRedisMongoCollectionName(config SyncConfig) string { + return resolveMongoCollectionName(config) +} + +func buildRedisToMongoPlan(config SyncConfig, keyName string, targetDB db.Database) (SchemaMigrationPlan, error) { + collection := deriveRedisMongoCollectionName(config) + plan := SchemaMigrationPlan{ + SourceSchema: strconv.Itoa(resolveRedisDBIndex(config.SourceConfig)), + SourceTable: keyName, + SourceQueryTable: keyName, + TargetSchema: strings.TrimSpace(config.TargetConfig.Database), + TargetTable: collection, + TargetQueryTable: collection, + PlannedAction: "按 Redis Key 生成 MongoDB 文档导入", + Warnings: []string{"Redis -> MongoDB 按 keyspace 语义迁移,不执行表级 schema 校验", "Redis TTL/集合顺序等语义会按文档字段保留,不保证与原系统完全等价"}, + UnsupportedObjects: []string{"Redis Consumer Group / PubSub / Lua 脚本 / 事务状态当前不迁移"}, + } + exists, err := inspectMongoCollection(targetDB, plan.TargetSchema, collection) + if err != nil { + return plan, fmt.Errorf("检查目标集合失败: %w", err) + } + plan.TargetTableExists = exists + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + if exists { + return dedupeSchemaMigrationPlan(plan), nil + } + if strategy == "existing_only" { + plan.PlannedAction = "目标集合不存在,需先手工创建" + plan.Warnings = append(plan.Warnings, "当前策略要求目标集合已存在,执行时不会自动建集合") + return dedupeSchemaMigrationPlan(plan), nil + } + createCommand, err := buildMongoCreateCollectionCommand(collection) + if err != nil { + return plan, err + } + plan.AutoCreate = true + plan.PlannedAction = "目标集合不存在,将自动创建集合后导入" + plan.PreDataSQL = []string{createCommand} + return dedupeSchemaMigrationPlan(plan), nil +} + +func listRedisMigrationKeys(client redisMigrationClient, selected []string) ([]string, error) { + if len(selected) > 0 { + return dedupeStrings(selected), nil + } + cursor := uint64(0) + keys := make([]string, 0, 64) + seen := map[string]struct{}{} + for { + result, err := client.ScanKeys("*", cursor, 1000) + if err != nil { + return nil, err + } + if result != nil { + for _, item := range result.Keys { + key := strings.TrimSpace(item.Key) + if key == "" { + continue + } + if _, ok := seen[key]; ok { + continue + } + seen[key] = struct{}{} + keys = append(keys, key) + } + if strings.TrimSpace(result.Cursor) == "" || strings.TrimSpace(result.Cursor) == "0" { + break + } + next, err := strconv.ParseUint(strings.TrimSpace(result.Cursor), 10, 64) + if err != nil || next == cursor { + break + } + cursor = next + continue + } + break + } + sort.Strings(keys) + return keys, nil +} + +func buildRedisMongoDocument(dbIndex int, key string, value *redispkg.RedisValue) map[string]interface{} { + doc := map[string]interface{}{ + "_id": fmt.Sprintf("db%d:%s", dbIndex, key), + "redisDb": dbIndex, + "key": key, + "source": "redis", + } + if value == nil { + return doc + } + doc["type"] = value.Type + doc["ttl"] = value.TTL + doc["length"] = value.Length + doc["value"] = normalizeRedisMongoValue(value.Value) + return doc +} + +func normalizeRedisMongoValue(value interface{}) interface{} { + switch typed := value.(type) { + case nil: + return nil + case []byte: + return string(typed) + case map[string]string: + result := make(map[string]interface{}, len(typed)) + for k, v := range typed { + result[k] = v + } + return result + case []string: + result := make([]interface{}, 0, len(typed)) + for _, item := range typed { + result = append(result, item) + } + return result + case []redispkg.ZSetMember: + result := make([]map[string]interface{}, 0, len(typed)) + for _, item := range typed { + result = append(result, map[string]interface{}{"member": item.Member, "score": item.Score}) + } + return result + case []redispkg.StreamEntry: + result := make([]map[string]interface{}, 0, len(typed)) + for _, item := range typed { + fields := make(map[string]interface{}, len(item.Fields)) + for k, v := range item.Fields { + fields[k] = v + } + result = append(result, map[string]interface{}{"id": item.ID, "fields": fields}) + } + return result + case map[string]interface{}: + result := make(map[string]interface{}, len(typed)) + for k, v := range typed { + result[k] = normalizeRedisMongoValue(v) + } + return result + case []interface{}: + result := make([]interface{}, 0, len(typed)) + for _, item := range typed { + result = append(result, normalizeRedisMongoValue(item)) + } + return result + default: + return typed + } +} + +func buildRedisMongoExistingDocsQuery(collection string, ids []string) (string, error) { + command := map[string]interface{}{ + "find": collection, + "filter": map[string]interface{}{ + "_id": map[string]interface{}{"$in": ids}, + }, + } + data, err := json.Marshal(command) + if err != nil { + return "", err + } + return string(data), nil +} + +func loadExistingRedisMongoDocs(targetDB db.Database, collection string, ids []string) (map[string]map[string]interface{}, error) { + result := make(map[string]map[string]interface{}, len(ids)) + if len(ids) == 0 { + return result, nil + } + query, err := buildRedisMongoExistingDocsQuery(collection, ids) + if err != nil { + return nil, err + } + rows, _, err := targetDB.Query(query) + if err != nil { + return nil, err + } + for _, row := range rows { + id := strings.TrimSpace(fmt.Sprintf("%v", row["_id"])) + if id == "" || id == "" { + continue + } + result[id] = row + } + return result, nil +} + +func buildRedisMongoChanges(config SyncConfig, keys []string, client redisMigrationClient, targetDB db.Database, collection string) (connection.ChangeSet, []map[string]interface{}, error) { + changeSet := connection.ChangeSet{Inserts: []map[string]interface{}{}, Updates: []connection.UpdateRow{}, Deletes: []map[string]interface{}{}} + documents := make([]map[string]interface{}, 0, len(keys)) + dbIndex := resolveRedisDBIndex(config.SourceConfig) + for _, key := range keys { + value, err := client.GetValue(key) + if err != nil { + return changeSet, nil, fmt.Errorf("读取 Redis Key 失败: key=%s err=%w", key, err) + } + documents = append(documents, buildRedisMongoDocument(dbIndex, key, value)) + } + ids := make([]string, 0, len(documents)) + for _, doc := range documents { + ids = append(ids, fmt.Sprintf("%v", doc["_id"])) + } + existing, err := loadExistingRedisMongoDocs(targetDB, collection, ids) + if err != nil { + return changeSet, nil, err + } + mode := normalizeSyncMode(config.Mode) + for _, doc := range documents { + id := fmt.Sprintf("%v", doc["_id"]) + existingDoc, ok := existing[id] + if !ok { + changeSet.Inserts = append(changeSet.Inserts, doc) + continue + } + if mode == "insert_only" { + continue + } + values := cloneMapWithoutKeys(doc, "_id") + if sameRedisMongoDocument(existingDoc, doc) { + continue + } + changeSet.Updates = append(changeSet.Updates, connection.UpdateRow{Keys: map[string]interface{}{"_id": id}, Values: values}) + } + return changeSet, documents, nil +} + +func sameRedisMongoDocument(existing map[string]interface{}, desired map[string]interface{}) bool { + for k, v := range desired { + if k == "_id" { + continue + } + if fmt.Sprintf("%v", normalizeRedisMongoValue(v)) != fmt.Sprintf("%v", normalizeRedisMongoValue(existing[k])) { + return false + } + } + return true +} + +func cloneMapWithoutKeys(input map[string]interface{}, skipKeys ...string) map[string]interface{} { + skip := make(map[string]struct{}, len(skipKeys)) + for _, key := range skipKeys { + skip[key] = struct{}{} + } + result := make(map[string]interface{}, len(input)) + for k, v := range input { + if _, ok := skip[k]; ok { + continue + } + result[k] = v + } + return result +} + +func (s *SyncEngine) runRedisToMongoSync(config SyncConfig, result SyncResult) SyncResult { + tables := config.Tables + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + mode := normalizeSyncMode(config.Mode) + s.progress(config.JobID, 0, len(tables), "", "开始 Redis 键空间迁移") + s.appendLog(config.JobID, &result, "info", fmt.Sprintf("Redis -> MongoDB 键空间迁移;模式:%s;目标策略:%s", mode, strategy)) + if mode == "full_overwrite" { + s.appendLog(config.JobID, &result, "warn", "Redis -> MongoDB 第一版暂不执行集合级 full_overwrite 删除,已降级为 insert_update") + } + + sourceClient := newRedisSourceClient() + sourceConfig := withResolvedRedisDB(config.SourceConfig) + if err := sourceClient.Connect(sourceConfig); err != nil { + return s.fail(config.JobID, len(tables), result, "源 Redis 连接失败: "+err.Error()) + } + defer sourceClient.Close() + + targetDB, err := newSyncDatabase(config.TargetConfig.Type) + if err != nil { + return s.fail(config.JobID, len(tables), result, "初始化目标数据库驱动失败: "+err.Error()) + } + if err := targetDB.Connect(config.TargetConfig); err != nil { + return s.fail(config.JobID, len(tables), result, "目标数据库连接失败: "+err.Error()) + } + defer targetDB.Close() + + keys, err := listRedisMigrationKeys(sourceClient, config.Tables) + if err != nil { + return s.fail(config.JobID, len(tables), result, "扫描 Redis Key 失败: "+err.Error()) + } + if len(keys) == 0 { + result.Message = "未发现可迁移的 Redis Key" + s.progress(config.JobID, 0, 0, "", "同步完成") + return result + } + totalKeys := len(keys) + collection := deriveRedisMongoCollectionName(config) + plan, err := buildRedisToMongoPlan(config, firstNonEmpty(keys[0], collection), targetDB) + if err != nil { + return s.fail(config.JobID, totalKeys, result, err.Error()) + } + for _, warning := range plan.Warnings { + s.appendLog(config.JobID, &result, "warn", " -> "+warning) + } + for _, unsupported := range plan.UnsupportedObjects { + s.appendLog(config.JobID, &result, "warn", " -> "+unsupported) + } + if strings.TrimSpace(plan.PlannedAction) != "" { + s.appendLog(config.JobID, &result, "info", " -> "+plan.PlannedAction) + } + if !plan.TargetTableExists && !plan.AutoCreate { + result.Message = firstNonEmpty(plan.PlannedAction, "目标集合不存在,当前策略不允许自动创建") + return result + } + if !plan.TargetTableExists && len(plan.PreDataSQL) > 0 { + s.progress(config.JobID, 0, totalKeys, collection, "创建目标集合") + if err := executeSQLStatements(targetDB.Exec, plan.PreDataSQL); err != nil { + return s.fail(config.JobID, totalKeys, result, "创建目标集合失败: "+err.Error()) + } + } + + changeSet, documents, err := buildRedisMongoChanges(config, keys, sourceClient, targetDB, collection) + if err != nil { + return s.fail(config.JobID, totalKeys, result, "构建 Redis 迁移变更失败: "+err.Error()) + } + for idx, key := range keys { + s.appendLog(config.JobID, &result, "info", fmt.Sprintf("正在迁移 Key: %s", key)) + s.progress(config.JobID, idx, totalKeys, key, fmt.Sprintf("迁移 Key(%d/%d)", idx+1, totalKeys)) + } + if len(changeSet.Inserts) == 0 && len(changeSet.Updates) == 0 && len(changeSet.Deletes) == 0 { + s.appendLog(config.JobID, &result, "info", " -> 目标集合中对应文档已是最新状态") + result.TablesSynced = totalKeys + result.Message = fmt.Sprintf("Redis 键空间迁移完成,共处理 %d 个 Key", totalKeys) + s.progress(config.JobID, totalKeys, totalKeys, collection, "同步完成") + return result + } + applier, ok := targetDB.(db.BatchApplier) + if !ok { + return s.fail(config.JobID, totalKeys, result, "目标驱动不支持 MongoDB 文档写入") + } + _ = documents + if err := applier.ApplyChanges(collection, changeSet); err != nil { + return s.fail(config.JobID, totalKeys, result, "应用 Redis 迁移变更失败: "+err.Error()) + } + result.RowsInserted += len(changeSet.Inserts) + result.RowsUpdated += len(changeSet.Updates) + result.RowsDeleted += len(changeSet.Deletes) + result.TablesSynced = totalKeys + result.Message = fmt.Sprintf("Redis 键空间迁移完成,共处理 %d 个 Key", totalKeys) + s.progress(config.JobID, totalKeys, totalKeys, collection, "同步完成") + return result +} + +func (s *SyncEngine) analyzeRedisToMongo(config SyncConfig) SyncAnalyzeResult { + result := SyncAnalyzeResult{Success: true, Tables: []TableDiffSummary{}} + sourceClient := newRedisSourceClient() + sourceConfig := withResolvedRedisDB(config.SourceConfig) + if err := sourceClient.Connect(sourceConfig); err != nil { + return SyncAnalyzeResult{Success: false, Message: "源 Redis 连接失败: " + err.Error()} + } + defer sourceClient.Close() + targetDB, err := newSyncDatabase(config.TargetConfig.Type) + if err != nil { + return SyncAnalyzeResult{Success: false, Message: "初始化目标数据库驱动失败: " + err.Error()} + } + if err := targetDB.Connect(config.TargetConfig); err != nil { + return SyncAnalyzeResult{Success: false, Message: "目标数据库连接失败: " + err.Error()} + } + defer targetDB.Close() + keys, err := listRedisMigrationKeys(sourceClient, config.Tables) + if err != nil { + return SyncAnalyzeResult{Success: false, Message: "扫描 Redis Key 失败: " + err.Error()} + } + collection := deriveRedisMongoCollectionName(config) + changeSet, documents, err := buildRedisMongoChanges(config, keys, sourceClient, targetDB, collection) + if err != nil { + return SyncAnalyzeResult{Success: false, Message: "分析 Redis 迁移变更失败: " + err.Error()} + } + insertSet := make(map[string]struct{}, len(changeSet.Inserts)) + updateSet := make(map[string]struct{}, len(changeSet.Updates)) + for _, row := range changeSet.Inserts { + insertSet[fmt.Sprintf("%v", row["_id"])] = struct{}{} + } + for _, row := range changeSet.Updates { + updateSet[fmt.Sprintf("%v", row.Keys["_id"])] = struct{}{} + } + for _, doc := range documents { + key := fmt.Sprintf("%v", doc["key"]) + id := fmt.Sprintf("%v", doc["_id"]) + summary := TableDiffSummary{ + Table: key, + PKColumn: "_id", + CanSync: true, + TargetTableExists: true, + PlannedAction: fmt.Sprintf("迁移到集合 %s", collection), + Warnings: []string{ + "Redis Key 将按文档写入 MongoDB 集合", + }, + } + if _, ok := insertSet[id]; ok { + summary.Inserts = 1 + summary.Message = "执行时将写入新文档" + } else if _, ok := updateSet[id]; ok { + summary.Updates = 1 + summary.Message = "执行时将更新已有文档" + } else { + summary.Same = 1 + summary.Message = "目标集合中对应文档已是最新状态" + } + result.Tables = append(result.Tables, summary) + } + result.Message = fmt.Sprintf("已完成 %d 个 Redis Key 的迁移分析", len(result.Tables)) + return result +} + +func (s *SyncEngine) previewRedisToMongo(config SyncConfig, keyName string, limit int) (TableDiffPreview, error) { + _ = limit + sourceClient := newRedisSourceClient() + sourceConfig := withResolvedRedisDB(config.SourceConfig) + if err := sourceClient.Connect(sourceConfig); err != nil { + return TableDiffPreview{}, fmt.Errorf("源 Redis 连接失败: %w", err) + } + defer sourceClient.Close() + targetDB, err := newSyncDatabase(config.TargetConfig.Type) + if err != nil { + return TableDiffPreview{}, fmt.Errorf("初始化目标数据库驱动失败: %w", err) + } + if err := targetDB.Connect(config.TargetConfig); err != nil { + return TableDiffPreview{}, fmt.Errorf("目标数据库连接失败: %w", err) + } + defer targetDB.Close() + collection := deriveRedisMongoCollectionName(config) + changeSet, documents, err := buildRedisMongoChanges(config, []string{keyName}, sourceClient, targetDB, collection) + if err != nil { + return TableDiffPreview{}, err + } + preview := TableDiffPreview{Table: keyName, PKColumn: "_id", Inserts: []PreviewRow{}, Updates: []PreviewUpdateRow{}, Deletes: []PreviewRow{}} + if len(documents) == 0 { + return preview, nil + } + doc := documents[0] + id := fmt.Sprintf("%v", doc["_id"]) + existingDocs, err := loadExistingRedisMongoDocs(targetDB, collection, []string{id}) + if err != nil { + return TableDiffPreview{}, err + } + if len(changeSet.Inserts) > 0 { + preview.TotalInserts = 1 + preview.Inserts = append(preview.Inserts, PreviewRow{PK: id, Row: doc}) + return preview, nil + } + if len(changeSet.Updates) > 0 { + preview.TotalUpdates = 1 + preview.Updates = append(preview.Updates, PreviewUpdateRow{PK: id, ChangedColumns: sortedMapKeys(changeSet.Updates[0].Values), Source: doc, Target: existingDocs[id]}) + return preview, nil + } + return preview, nil +} + +func sortedMapKeys(values map[string]interface{}) []string { + keys := make([]string, 0, len(values)) + for key := range values { + keys = append(keys, key) + } + sort.Strings(keys) + return keys +} + +func isMongoToRedisKeyspacePair(config SyncConfig) bool { + return resolveMigrationDBType(config.SourceConfig) == "mongodb" && resolveMigrationDBType(config.TargetConfig) == "redis" +} + +type mongoRedisKeyDocument struct { + Key string + Type string + TTL int64 + Value interface{} + SourceRow map[string]interface{} + Desired *redispkg.RedisValue +} + +type mongoRedisKeyDiff struct { + Collection string + Document mongoRedisKeyDocument + Current *redispkg.RedisValue + Exists bool + Action string + ChangedColumns []string +} + +func deriveRedisTargetLabel(config SyncConfig) string { + return fmt.Sprintf("Redis DB %d", resolveRedisDBIndex(config.TargetConfig)) +} + +func deriveDefaultMongoRedisCollection(config SyncConfig) string { + return resolveMongoCollectionName(config) +} + +func listMongoRedisCollections(sourceDB db.Database, config SyncConfig) ([]string, error) { + if len(config.Tables) > 0 { + return dedupeStrings(config.Tables), nil + } + tables, err := sourceDB.GetTables(strings.TrimSpace(config.SourceConfig.Database)) + if err == nil && len(tables) > 0 { + return dedupeStrings(tables), nil + } + return []string{deriveDefaultMongoRedisCollection(config)}, nil +} + +func buildMongoRedisFindQuery(collection string, limit int) (string, error) { + command := map[string]interface{}{ + "find": strings.TrimSpace(collection), + "filter": map[string]interface{}{}, + } + if limit > 0 { + command["limit"] = limit + } + data, err := json.Marshal(command) + if err != nil { + return "", err + } + return string(data), nil +} + +func loadMongoRedisDocuments(sourceDB db.Database, collection string, limit int) ([]map[string]interface{}, error) { + query, err := buildMongoRedisFindQuery(collection, limit) + if err != nil { + return nil, err + } + rows, _, err := sourceDB.Query(query) + if err != nil { + return nil, err + } + return rows, nil +} + +func parseMongoRedisDocument(row map[string]interface{}) (mongoRedisKeyDocument, error) { + key := strings.TrimSpace(asRedisMigrationString(row["key"])) + if key == "" { + if rawID := strings.TrimSpace(asRedisMigrationString(row["_id"])); rawID != "" { + if _, tail, ok := strings.Cut(rawID, ":"); ok { + key = strings.TrimSpace(tail) + } + } + } + if key == "" { + return mongoRedisKeyDocument{}, fmt.Errorf("文档缺少 key 字段") + } + + redisType := strings.ToLower(strings.TrimSpace(asRedisMigrationString(row["type"]))) + if redisType == "" { + return mongoRedisKeyDocument{}, fmt.Errorf("文档缺少 type 字段: key=%s", key) + } + + ttl := normalizeRedisMigrationTTL(asRedisMigrationInt64(row["ttl"], -1)) + desired := &redispkg.RedisValue{Type: redisType, TTL: ttl} + + sourceRow := cloneMapWithoutKeys(row) + sourceRow["key"] = key + sourceRow["type"] = redisType + sourceRow["ttl"] = ttl + + switch redisType { + case "string": + value := asRedisMigrationString(row["value"]) + desired.Value = value + desired.Length = int64(len(value)) + sourceRow["value"] = value + case "hash": + value, err := asRedisMigrationStringMap(row["value"]) + if err != nil { + return mongoRedisKeyDocument{}, fmt.Errorf("key=%s hash 值无效: %w", key, err) + } + desired.Value = value + desired.Length = int64(len(value)) + sourceRow["value"] = normalizeRedisMongoValue(value) + case "list": + value, err := asRedisMigrationStringSlice(row["value"]) + if err != nil { + return mongoRedisKeyDocument{}, fmt.Errorf("key=%s list 值无效: %w", key, err) + } + desired.Value = value + desired.Length = int64(len(value)) + sourceRow["value"] = normalizeRedisMongoValue(value) + case "set": + value, err := asRedisMigrationStringSlice(row["value"]) + if err != nil { + return mongoRedisKeyDocument{}, fmt.Errorf("key=%s set 值无效: %w", key, err) + } + sort.Strings(value) + desired.Value = value + desired.Length = int64(len(value)) + sourceRow["value"] = normalizeRedisMongoValue(value) + case "zset": + value, err := asRedisMigrationZSetMembers(row["value"]) + if err != nil { + return mongoRedisKeyDocument{}, fmt.Errorf("key=%s zset 值无效: %w", key, err) + } + sort.Slice(value, func(i, j int) bool { + if value[i].Score == value[j].Score { + return value[i].Member < value[j].Member + } + return value[i].Score < value[j].Score + }) + desired.Value = value + desired.Length = int64(len(value)) + sourceRow["value"] = normalizeRedisMongoValue(value) + case "stream": + value, err := asRedisMigrationStreamEntries(row["value"]) + if err != nil { + return mongoRedisKeyDocument{}, fmt.Errorf("key=%s stream 值无效: %w", key, err) + } + sort.Slice(value, func(i, j int) bool { return value[i].ID < value[j].ID }) + desired.Value = value + desired.Length = int64(len(value)) + sourceRow["value"] = normalizeRedisMongoValue(value) + default: + return mongoRedisKeyDocument{}, fmt.Errorf("key=%s 暂不支持 Redis 类型 %s", key, redisType) + } + + return mongoRedisKeyDocument{Key: key, Type: redisType, TTL: ttl, Value: desired.Value, SourceRow: sourceRow, Desired: desired}, nil +} + +func buildMongoToRedisDiffs(sourceDB db.Database, targetClient redisMigrationClient, collection string, mode string) ([]mongoRedisKeyDiff, error) { + rows, err := loadMongoRedisDocuments(sourceDB, collection, 0) + if err != nil { + return nil, err + } + diffs := make([]mongoRedisKeyDiff, 0, len(rows)) + effectiveMode := normalizeSyncMode(mode) + for _, row := range rows { + doc, err := parseMongoRedisDocument(row) + if err != nil { + return nil, err + } + current, exists, err := loadExistingRedisMigrationValue(targetClient, doc.Key) + if err != nil { + return nil, fmt.Errorf("读取目标 Redis Key 失败: key=%s err=%w", doc.Key, err) + } + action := "insert" + changedColumns := []string{"type", "ttl", "value"} + if exists { + if sameRedisMigrationValue(current, doc.Desired) { + action = "same" + changedColumns = nil + } else if effectiveMode == "insert_only" { + action = "same" + changedColumns = nil + } else { + action = "update" + changedColumns = diffRedisMigrationColumns(current, doc.Desired) + } + } + diffs = append(diffs, mongoRedisKeyDiff{ + Collection: collection, + Document: doc, + Current: current, + Exists: exists, + Action: action, + ChangedColumns: changedColumns, + }) + } + sort.Slice(diffs, func(i, j int) bool { return diffs[i].Document.Key < diffs[j].Document.Key }) + return diffs, nil +} + +func loadExistingRedisMigrationValue(client redisMigrationClient, key string) (*redispkg.RedisValue, bool, error) { + keyType, err := client.GetKeyType(key) + if err != nil { + return nil, false, err + } + keyType = strings.ToLower(strings.TrimSpace(keyType)) + if keyType == "" || keyType == "none" { + return nil, false, nil + } + value, err := client.GetValue(key) + if err != nil { + return nil, false, err + } + if value == nil { + return nil, false, nil + } + value.Type = keyType + value.TTL = normalizeRedisMigrationTTL(value.TTL) + return value, true, nil +} + +func normalizeRedisMigrationTTL(ttl int64) int64 { + if ttl > 0 { + return ttl + } + return -1 +} + +func sameRedisMigrationValue(current *redispkg.RedisValue, desired *redispkg.RedisValue) bool { + if current == nil || desired == nil { + return current == nil && desired == nil + } + if strings.ToLower(strings.TrimSpace(current.Type)) != strings.ToLower(strings.TrimSpace(desired.Type)) { + return false + } + if normalizeRedisMigrationTTL(current.TTL) != normalizeRedisMigrationTTL(desired.TTL) { + return false + } + return canonicalRedisMigrationValue(current) == canonicalRedisMigrationValue(desired) +} + +func canonicalRedisMigrationValue(value *redispkg.RedisValue) string { + if value == nil { + return "null" + } + payload := map[string]interface{}{ + "type": strings.ToLower(strings.TrimSpace(value.Type)), + "ttl": normalizeRedisMigrationTTL(value.TTL), + "value": normalizeRedisComparablePayload(strings.ToLower(strings.TrimSpace(value.Type)), value.Value), + } + data, err := json.Marshal(payload) + if err != nil { + return fmt.Sprintf("%v", payload) + } + return string(data) +} + +func normalizeRedisComparablePayload(redisType string, value interface{}) interface{} { + switch redisType { + case "string": + return asRedisMigrationString(value) + case "hash": + mapped, err := asRedisMigrationStringMap(value) + if err != nil { + return fmt.Sprintf("%v", value) + } + return normalizeRedisMongoValue(mapped) + case "list": + items, err := asRedisMigrationStringSlice(value) + if err != nil { + return fmt.Sprintf("%v", value) + } + return normalizeRedisMongoValue(items) + case "set": + items, err := asRedisMigrationStringSlice(value) + if err != nil { + return fmt.Sprintf("%v", value) + } + sort.Strings(items) + return normalizeRedisMongoValue(items) + case "zset": + members, err := asRedisMigrationZSetMembers(value) + if err != nil { + return fmt.Sprintf("%v", value) + } + sort.Slice(members, func(i, j int) bool { + if members[i].Score == members[j].Score { + return members[i].Member < members[j].Member + } + return members[i].Score < members[j].Score + }) + return normalizeRedisMongoValue(members) + case "stream": + entries, err := asRedisMigrationStreamEntries(value) + if err != nil { + return fmt.Sprintf("%v", value) + } + sort.Slice(entries, func(i, j int) bool { return entries[i].ID < entries[j].ID }) + return normalizeRedisMongoValue(entries) + default: + return normalizeRedisMongoValue(value) + } +} + +func diffRedisMigrationColumns(current *redispkg.RedisValue, desired *redispkg.RedisValue) []string { + changed := make([]string, 0, 3) + if current == nil || desired == nil { + return []string{"type", "ttl", "value"} + } + if strings.ToLower(strings.TrimSpace(current.Type)) != strings.ToLower(strings.TrimSpace(desired.Type)) { + changed = append(changed, "type") + } + if normalizeRedisMigrationTTL(current.TTL) != normalizeRedisMigrationTTL(desired.TTL) { + changed = append(changed, "ttl") + } + currentComparable := normalizeRedisComparablePayload(strings.ToLower(strings.TrimSpace(desired.Type)), current.Value) + desiredComparable := normalizeRedisComparablePayload(strings.ToLower(strings.TrimSpace(desired.Type)), desired.Value) + currentJSON, _ := json.Marshal(currentComparable) + desiredJSON, _ := json.Marshal(desiredComparable) + if string(currentJSON) != string(desiredJSON) { + changed = append(changed, "value") + } + return dedupeStrings(changed) +} + +func buildRedisPreviewRow(key string, value *redispkg.RedisValue) map[string]interface{} { + if value == nil { + return map[string]interface{}{"key": key} + } + return map[string]interface{}{ + "key": key, + "type": strings.ToLower(strings.TrimSpace(value.Type)), + "ttl": normalizeRedisMigrationTTL(value.TTL), + "value": normalizeRedisComparablePayload(strings.ToLower(strings.TrimSpace(value.Type)), value.Value), + } +} + +func applyMongoRedisDiff(targetClient redisMigrationClient, diff mongoRedisKeyDiff) error { + desired := diff.Document.Desired + if desired == nil { + return fmt.Errorf("空的 Redis 目标值: key=%s", diff.Document.Key) + } + redisType := strings.ToLower(strings.TrimSpace(desired.Type)) + ttl := normalizeRedisMigrationTTL(desired.TTL) + if diff.Exists && diff.Action == "update" && redisType != "string" { + if _, err := targetClient.DeleteKeys([]string{diff.Document.Key}); err != nil { + return err + } + } + + switch redisType { + case "string": + return targetClient.SetString(diff.Document.Key, asRedisMigrationString(desired.Value), ttl) + case "hash": + mapped, err := asRedisMigrationStringMap(desired.Value) + if err != nil { + return err + } + fields := make([]string, 0, len(mapped)) + for field := range mapped { + fields = append(fields, field) + } + sort.Strings(fields) + for _, field := range fields { + if err := targetClient.SetHashField(diff.Document.Key, field, mapped[field]); err != nil { + return err + } + } + return targetClient.SetTTL(diff.Document.Key, ttl) + case "list": + items, err := asRedisMigrationStringSlice(desired.Value) + if err != nil { + return err + } + if len(items) > 0 { + if err := targetClient.ListPush(diff.Document.Key, items...); err != nil { + return err + } + } + return targetClient.SetTTL(diff.Document.Key, ttl) + case "set": + items, err := asRedisMigrationStringSlice(desired.Value) + if err != nil { + return err + } + if len(items) > 0 { + if err := targetClient.SetAdd(diff.Document.Key, items...); err != nil { + return err + } + } + return targetClient.SetTTL(diff.Document.Key, ttl) + case "zset": + members, err := asRedisMigrationZSetMembers(desired.Value) + if err != nil { + return err + } + if len(members) > 0 { + if err := targetClient.ZSetAdd(diff.Document.Key, members...); err != nil { + return err + } + } + return targetClient.SetTTL(diff.Document.Key, ttl) + case "stream": + entries, err := asRedisMigrationStreamEntries(desired.Value) + if err != nil { + return err + } + for _, entry := range entries { + if _, err := targetClient.StreamAdd(diff.Document.Key, entry.Fields, entry.ID); err != nil { + return err + } + } + return targetClient.SetTTL(diff.Document.Key, ttl) + default: + return fmt.Errorf("暂不支持 Redis 类型 %s", redisType) + } +} + +func asRedisMigrationString(value interface{}) string { + switch typed := value.(type) { + case nil: + return "" + case string: + return typed + case []byte: + return string(typed) + default: + return fmt.Sprintf("%v", typed) + } +} + +func asRedisMigrationInt64(value interface{}, defaultValue int64) int64 { + switch typed := value.(type) { + case nil: + return defaultValue + case int: + return int64(typed) + case int8: + return int64(typed) + case int16: + return int64(typed) + case int32: + return int64(typed) + case int64: + return typed + case uint: + return int64(typed) + case uint8: + return int64(typed) + case uint16: + return int64(typed) + case uint32: + return int64(typed) + case uint64: + return int64(typed) + case float32: + return int64(typed) + case float64: + return int64(typed) + case json.Number: + if n, err := typed.Int64(); err == nil { + return n + } + case string: + if n, err := strconv.ParseInt(strings.TrimSpace(typed), 10, 64); err == nil { + return n + } + } + return defaultValue +} + +func asRedisMigrationFloat64(value interface{}) (float64, error) { + switch typed := value.(type) { + case float64: + return typed, nil + case float32: + return float64(typed), nil + case int: + return float64(typed), nil + case int8: + return float64(typed), nil + case int16: + return float64(typed), nil + case int32: + return float64(typed), nil + case int64: + return float64(typed), nil + case uint: + return float64(typed), nil + case uint8: + return float64(typed), nil + case uint16: + return float64(typed), nil + case uint32: + return float64(typed), nil + case uint64: + return float64(typed), nil + case json.Number: + return typed.Float64() + case string: + return strconv.ParseFloat(strings.TrimSpace(typed), 64) + default: + return 0, fmt.Errorf("无法转换为 float64: %T", value) + } +} + +func asRedisMigrationStringMap(value interface{}) (map[string]string, error) { + switch typed := value.(type) { + case nil: + return map[string]string{}, nil + case map[string]string: + result := make(map[string]string, len(typed)) + for k, v := range typed { + result[k] = v + } + return result, nil + case map[string]interface{}: + result := make(map[string]string, len(typed)) + for k, v := range typed { + result[k] = asRedisMigrationString(v) + } + return result, nil + default: + return nil, fmt.Errorf("期望对象,实际=%T", value) + } +} + +func asRedisMigrationStringSlice(value interface{}) ([]string, error) { + switch typed := value.(type) { + case nil: + return []string{}, nil + case []string: + result := append([]string(nil), typed...) + return result, nil + case []interface{}: + result := make([]string, 0, len(typed)) + for _, item := range typed { + result = append(result, asRedisMigrationString(item)) + } + return result, nil + default: + return nil, fmt.Errorf("期望数组,实际=%T", value) + } +} + +func asRedisMigrationZSetMembers(value interface{}) ([]redispkg.ZSetMember, error) { + switch typed := value.(type) { + case nil: + return []redispkg.ZSetMember{}, nil + case []redispkg.ZSetMember: + result := append([]redispkg.ZSetMember(nil), typed...) + return result, nil + case []interface{}: + result := make([]redispkg.ZSetMember, 0, len(typed)) + for _, item := range typed { + mapped, ok := item.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("zset 成员格式无效: %T", item) + } + score, err := asRedisMigrationFloat64(mapped["score"]) + if err != nil { + return nil, err + } + result = append(result, redispkg.ZSetMember{Member: asRedisMigrationString(mapped["member"]), Score: score}) + } + return result, nil + default: + return nil, fmt.Errorf("期望 zset 数组,实际=%T", value) + } +} + +func asRedisMigrationStreamEntries(value interface{}) ([]redispkg.StreamEntry, error) { + switch typed := value.(type) { + case nil: + return []redispkg.StreamEntry{}, nil + case []redispkg.StreamEntry: + result := append([]redispkg.StreamEntry(nil), typed...) + return result, nil + case []interface{}: + result := make([]redispkg.StreamEntry, 0, len(typed)) + for _, item := range typed { + mapped, ok := item.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("stream 条目格式无效: %T", item) + } + fields, err := asRedisMigrationStringMap(mapped["fields"]) + if err != nil { + return nil, err + } + result = append(result, redispkg.StreamEntry{ID: asRedisMigrationString(mapped["id"]), Fields: fields}) + } + return result, nil + default: + return nil, fmt.Errorf("期望 stream 数组,实际=%T", value) + } +} + +func (s *SyncEngine) runMongoToRedisSync(config SyncConfig, result SyncResult) SyncResult { + collections := dedupeStrings(config.Tables) + sourceDB, err := newSyncDatabase(config.SourceConfig.Type) + if err != nil { + return s.fail(config.JobID, len(collections), result, "初始化源数据库驱动失败: "+err.Error()) + } + if err := sourceDB.Connect(config.SourceConfig); err != nil { + return s.fail(config.JobID, len(collections), result, "源 MongoDB 连接失败: "+err.Error()) + } + defer sourceDB.Close() + if len(collections) == 0 { + collections, err = listMongoRedisCollections(sourceDB, config) + if err != nil { + return s.fail(config.JobID, 0, result, "获取 MongoDB 集合列表失败: "+err.Error()) + } + } + if len(collections) == 0 { + result.Message = "未发现可迁移的 MongoDB 集合" + s.progress(config.JobID, 0, 0, "", "同步完成") + return result + } + + effectiveMode := normalizeSyncMode(config.Mode) + totalCollections := len(collections) + s.progress(config.JobID, 0, totalCollections, "", "开始 MongoDB 键空间迁移") + s.appendLog(config.JobID, &result, "info", fmt.Sprintf("MongoDB -> Redis 键空间迁移;模式:%s;目标:%s", effectiveMode, deriveRedisTargetLabel(config))) + s.appendLog(config.JobID, &result, "warn", "MongoDB -> Redis 第一版仅支持固定文档格式:key/type/ttl/value") + if effectiveMode == "full_overwrite" { + s.appendLog(config.JobID, &result, "warn", "MongoDB -> Redis 第一版暂不执行 Redis DB 级 full_overwrite 删除,已降级为 insert_update") + effectiveMode = "insert_update" + } + + targetClient := newRedisSourceClient() + targetConfig := withResolvedRedisDB(config.TargetConfig) + if err := targetClient.Connect(targetConfig); err != nil { + return s.fail(config.JobID, totalCollections, result, "目标 Redis 连接失败: "+err.Error()) + } + defer targetClient.Close() + + processedKeys := 0 + for idx, collection := range collections { + s.appendLog(config.JobID, &result, "info", fmt.Sprintf("正在同步集合: %s", collection)) + s.progress(config.JobID, idx, totalCollections, collection, fmt.Sprintf("迁移集合(%d/%d)", idx+1, totalCollections)) + diffs, err := buildMongoToRedisDiffs(sourceDB, targetClient, collection, effectiveMode) + if err != nil { + return s.fail(config.JobID, totalCollections, result, fmt.Sprintf("分析集合 %s 失败: %v", collection, err)) + } + for _, diff := range diffs { + processedKeys++ + if diff.Action == "same" { + continue + } + s.appendLog(config.JobID, &result, "info", fmt.Sprintf("正在迁移 Key: %s", diff.Document.Key)) + if err := applyMongoRedisDiff(targetClient, diff); err != nil { + return s.fail(config.JobID, totalCollections, result, fmt.Sprintf("写入 Redis Key %s 失败: %v", diff.Document.Key, err)) + } + switch diff.Action { + case "insert": + result.RowsInserted++ + case "update": + result.RowsUpdated++ + } + } + result.TablesSynced++ + s.progress(config.JobID, idx+1, totalCollections, collection, "集合处理完成") + } + + if processedKeys == 0 { + result.Message = "未发现可迁移的 MongoDB Redis 文档" + return result + } + result.Message = fmt.Sprintf("MongoDB 键空间迁移完成,共处理 %d 个集合、%d 个 Key", result.TablesSynced, processedKeys) + return result +} + +func (s *SyncEngine) analyzeMongoToRedis(config SyncConfig) SyncAnalyzeResult { + result := SyncAnalyzeResult{Success: true, Tables: []TableDiffSummary{}} + sourceDB, err := newSyncDatabase(config.SourceConfig.Type) + if err != nil { + return SyncAnalyzeResult{Success: false, Message: "初始化源数据库驱动失败: " + err.Error()} + } + if err := sourceDB.Connect(config.SourceConfig); err != nil { + return SyncAnalyzeResult{Success: false, Message: "源 MongoDB 连接失败: " + err.Error()} + } + defer sourceDB.Close() + + collections, err := listMongoRedisCollections(sourceDB, config) + if err != nil { + return SyncAnalyzeResult{Success: false, Message: "获取 MongoDB 集合列表失败: " + err.Error()} + } + + effectiveMode := normalizeSyncMode(config.Mode) + modeWarning := "" + if effectiveMode == "full_overwrite" { + modeWarning = "MongoDB -> Redis 第一版会将 full_overwrite 降级为 insert_update,避免误删 DB 内其他 Key" + effectiveMode = "insert_update" + } + + targetClient := newRedisSourceClient() + targetConfig := withResolvedRedisDB(config.TargetConfig) + if err := targetClient.Connect(targetConfig); err != nil { + return SyncAnalyzeResult{Success: false, Message: "目标 Redis 连接失败: " + err.Error()} + } + defer targetClient.Close() + + for _, collection := range collections { + summary := TableDiffSummary{ + Table: collection, + PKColumn: "key", + CanSync: true, + TargetTableExists: true, + PlannedAction: fmt.Sprintf("迁移到 %s", deriveRedisTargetLabel(config)), + Warnings: []string{ + "MongoDB 集合中的文档会按 keyspace 语义写入 Redis", + "当前仅支持固定文档格式:key/type/ttl/value", + }, + } + if modeWarning != "" { + summary.Warnings = append(summary.Warnings, modeWarning) + } + diffs, err := buildMongoToRedisDiffs(sourceDB, targetClient, collection, effectiveMode) + if err != nil { + summary.CanSync = false + summary.Message = err.Error() + result.Tables = append(result.Tables, summary) + continue + } + for _, diff := range diffs { + switch diff.Action { + case "insert": + summary.Inserts++ + case "update": + summary.Updates++ + default: + summary.Same++ + } + } + if summary.Inserts == 0 && summary.Updates == 0 { + if summary.Same == 0 { + summary.Message = "集合中未发现可迁移文档" + } else { + summary.Message = "目标 Redis 中对应 Key 已是最新状态" + } + } else { + summary.Message = fmt.Sprintf("执行时将写入 %d 个新 Key、更新 %d 个已有 Key", summary.Inserts, summary.Updates) + } + result.Tables = append(result.Tables, summary) + } + result.Message = fmt.Sprintf("已完成 %d 个 MongoDB 集合的 Redis 迁移分析", len(result.Tables)) + return result +} + +func (s *SyncEngine) previewMongoToRedis(config SyncConfig, collection string, limit int) (TableDiffPreview, error) { + sourceDB, err := newSyncDatabase(config.SourceConfig.Type) + if err != nil { + return TableDiffPreview{}, fmt.Errorf("初始化源数据库驱动失败: %w", err) + } + if err := sourceDB.Connect(config.SourceConfig); err != nil { + return TableDiffPreview{}, fmt.Errorf("源 MongoDB 连接失败: %w", err) + } + defer sourceDB.Close() + + targetClient := newRedisSourceClient() + targetConfig := withResolvedRedisDB(config.TargetConfig) + if err := targetClient.Connect(targetConfig); err != nil { + return TableDiffPreview{}, fmt.Errorf("目标 Redis 连接失败: %w", err) + } + defer targetClient.Close() + + effectiveMode := normalizeSyncMode(config.Mode) + if effectiveMode == "full_overwrite" { + effectiveMode = "insert_update" + } + + diffs, err := buildMongoToRedisDiffs(sourceDB, targetClient, collection, effectiveMode) + if err != nil { + return TableDiffPreview{}, err + } + preview := TableDiffPreview{Table: collection, PKColumn: "key", Inserts: []PreviewRow{}, Updates: []PreviewUpdateRow{}, Deletes: []PreviewRow{}} + for _, diff := range diffs { + switch diff.Action { + case "insert": + preview.TotalInserts++ + if len(preview.Inserts) < limit { + preview.Inserts = append(preview.Inserts, PreviewRow{PK: diff.Document.Key, Row: diff.Document.SourceRow}) + } + case "update": + preview.TotalUpdates++ + if len(preview.Updates) < limit { + preview.Updates = append(preview.Updates, PreviewUpdateRow{PK: diff.Document.Key, ChangedColumns: diff.ChangedColumns, Source: diff.Document.SourceRow, Target: buildRedisPreviewRow(diff.Document.Key, diff.Current)}) + } + } + } + return preview, nil +} diff --git a/internal/sync/migration_runtime_helpers.go b/internal/sync/migration_runtime_helpers.go new file mode 100644 index 0000000..418080c --- /dev/null +++ b/internal/sync/migration_runtime_helpers.go @@ -0,0 +1,58 @@ +package sync + +import ( + "GoNavi-Wails/internal/connection" + "fmt" + "strings" +) + +func supportsAutoAddColumnsForPair(sourceType string, targetType string) bool { + source := normalizeMigrationDBType(sourceType) + target := normalizeMigrationDBType(targetType) + if isMySQLLikeWritableTargetType(target) { + return isMySQLCoreType(source) + } + if isPGLikeTarget(target) { + return isMySQLLikeSourceType(source) + } + return false +} + +func buildAddColumnSQLForPair(sourceType string, targetType string, targetQueryTable string, sourceCol connection.ColumnDefinition) (string, error) { + source := normalizeMigrationDBType(sourceType) + target := normalizeMigrationDBType(targetType) + switch { + case isMySQLCoreType(source) && isMySQLLikeWritableTargetType(target): + colType := sanitizeMySQLColumnType(sourceCol.Type) + return fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL", + quoteQualifiedIdentByType("mysql", targetQueryTable), + quoteIdentByType("mysql", sourceCol.Name), + colType, + ), nil + case isMySQLLikeSourceType(source) && isPGLikeTarget(target): + colType, _, warnings := mapMySQLColumnToKingbase(sourceCol) + if len(warnings) > 0 && strings.Contains(strings.Join(warnings, " "), "identity") { + // 对已有目标表补字段时保守处理,不补建自增语义。 + } + return fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL", + quoteQualifiedIdentByType(target, targetQueryTable), + quoteIdentByType(target, sourceCol.Name), + colType, + ), nil + default: + return "", fmt.Errorf("当前不支持 source=%s target=%s 的自动补字段", sourceType, targetType) + } +} + +func executeSQLStatements(execFn func(string) (int64, error), statements []string) error { + for _, stmt := range statements { + trimmed := strings.TrimSpace(stmt) + if trimmed == "" { + continue + } + if _, err := execFn(trimmed); err != nil { + return err + } + } + return nil +} diff --git a/internal/sync/migration_schema_inference.go b/internal/sync/migration_schema_inference.go new file mode 100644 index 0000000..178ef4e --- /dev/null +++ b/internal/sync/migration_schema_inference.go @@ -0,0 +1,53 @@ +package sync + +import ( + "fmt" + "strings" +) + +type SchemaInferenceStrategy string + +const ( + SchemaInferenceStrategySample SchemaInferenceStrategy = "sample" + SchemaInferenceStrategyStrict SchemaInferenceStrategy = "strict" +) + +func shouldUseSchemaInference(sourceType string, targetType string) bool { + sourceModel := classifyMigrationDataModel(sourceType) + targetModel := classifyMigrationDataModel(targetType) + return sourceModel == MigrationDataModelDocument && targetModel == MigrationDataModelRelational +} + +func inferMigrationObjectKind(sourceType string, targetType string) MigrationObjectKind { + sourceModel := classifyMigrationDataModel(sourceType) + targetModel := classifyMigrationDataModel(targetType) + switch { + case sourceModel == MigrationDataModelDocument || targetModel == MigrationDataModelDocument: + return MigrationObjectKindCollection + case sourceModel == MigrationDataModelKeyValue || targetModel == MigrationDataModelKeyValue: + return MigrationObjectKindKeyspace + default: + return MigrationObjectKindTable + } +} + +func inferSchemaForPair(sourceType string, targetType string, objectName string) (SchemaInferenceResult, error) { + if !shouldUseSchemaInference(sourceType, targetType) { + return SchemaInferenceResult{}, fmt.Errorf("当前迁移对 %s -> %s 不需要 schema 推断", sourceType, targetType) + } + return SchemaInferenceResult{ + Object: CanonicalObjectSpec{ + Name: strings.TrimSpace(objectName), + Kind: MigrationObjectKindCollection, + Fields: []CanonicalFieldSpec{}, + }, + Issues: []SchemaInferenceIssue{ + { + Level: "info", + Message: "MongoDB -> 关系型数据库的 schema 推断能力尚在建设中,当前仅提供内核入口。", + Resolution: "后续将基于样本数据生成列定义与类型降级策略。", + }, + }, + NeedsReview: true, + }, nil +} diff --git a/internal/sync/migration_tdengine.go b/internal/sync/migration_tdengine.go new file mode 100644 index 0000000..7e45e64 --- /dev/null +++ b/internal/sync/migration_tdengine.go @@ -0,0 +1,296 @@ +package sync + +import ( + "GoNavi-Wails/internal/connection" + "GoNavi-Wails/internal/db" + "fmt" + "strconv" + "strings" +) + +func buildTDengineToMySQLPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + plan := SchemaMigrationPlan{} + sourceType := resolveMigrationDBType(config.SourceConfig) + targetType := resolveMigrationDBType(config.TargetConfig) + plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName) + plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName) + plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName) + plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName) + plan.PlannedAction = "使用已有目标表导入" + + sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err) + } + if !sourceExists { + return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName) + } + + targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable) + if err != nil { + return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err) + } + plan.TargetTableExists = targetExists + plan.Warnings = append(plan.Warnings, tdengineSemanticWarnings(sourceCols)...) + + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + if targetExists { + missing := diffMissingColumnNames(sourceCols, targetCols) + if len(missing) > 0 { + plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", "))) + } + if strategy != "existing_only" { + plan.Warnings = append(plan.Warnings, "TDengine 源端当前不自动补齐已有目标表字段,请先确认目标表结构") + } + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } + + switch strategy { + case "existing_only": + plan.PlannedAction = "目标表不存在,需先手工创建" + plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + case "smart", "auto_create_if_missing": + plan.AutoCreate = true + plan.PlannedAction = "目标表不存在,将自动建表后导入" + createSQL, warnings, unsupported := buildTDengineToMySQLCreateTableSQL(plan.TargetQueryTable, sourceCols) + plan.CreateTableSQL = createSQL + plan.Warnings = append(plan.Warnings, warnings...) + plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...) + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + default: + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } +} + +func buildTDengineToPGLikePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + plan := SchemaMigrationPlan{} + sourceType := resolveMigrationDBType(config.SourceConfig) + targetType := resolveMigrationDBType(config.TargetConfig) + plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName) + plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName) + plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName) + plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName) + plan.PlannedAction = "使用已有目标表导入" + + sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err) + } + if !sourceExists { + return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName) + } + + targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable) + if err != nil { + return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err) + } + plan.TargetTableExists = targetExists + plan.Warnings = append(plan.Warnings, tdengineSemanticWarnings(sourceCols)...) + + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + if targetExists { + missing := diffMissingColumnNames(sourceCols, targetCols) + if len(missing) > 0 { + plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", "))) + } + if strategy != "existing_only" { + plan.Warnings = append(plan.Warnings, "TDengine 源端当前不自动补齐已有目标表字段,请先确认目标表结构") + } + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } + + switch strategy { + case "existing_only": + plan.PlannedAction = "目标表不存在,需先手工创建" + plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + case "smart", "auto_create_if_missing": + plan.AutoCreate = true + plan.PlannedAction = "目标表不存在,将自动建表后导入" + createSQL, warnings, unsupported := buildTDengineToPGLikeCreateTableSQL(targetType, plan.TargetQueryTable, sourceCols) + plan.CreateTableSQL = createSQL + plan.Warnings = append(plan.Warnings, warnings...) + plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...) + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + default: + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } +} + +func buildTDengineToMySQLCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition) (string, []string, []string) { + columnDefs := make([]string, 0, len(sourceCols)) + warnings := make([]string, 0) + unsupported := []string{"TDengine 的索引/外键/触发器/超级表/TTL 等时序语义当前不会自动迁移"} + for _, col := range sourceCols { + def, colWarnings := buildTDengineToMySQLColumnDefinition(col) + columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("mysql", col.Name), def)) + warnings = append(warnings, colWarnings...) + } + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("mysql", targetQueryTable), strings.Join(columnDefs, ",\n ")) + return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported) +} + +func buildTDengineToPGLikeCreateTableSQL(targetType string, targetQueryTable string, sourceCols []connection.ColumnDefinition) (string, []string, []string) { + columnDefs := make([]string, 0, len(sourceCols)) + warnings := make([]string, 0) + unsupported := []string{"TDengine 的索引/外键/触发器/超级表/TTL 等时序语义当前不会自动迁移"} + for _, col := range sourceCols { + def, colWarnings := buildTDengineToPGLikeColumnDefinition(col) + columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType(targetType, col.Name), def)) + warnings = append(warnings, colWarnings...) + } + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType(targetType, targetQueryTable), strings.Join(columnDefs, ",\n ")) + return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported) +} + +func buildTDengineToMySQLColumnDefinition(col connection.ColumnDefinition) (string, []string) { + targetType, warnings := mapTDengineColumnToMySQL(col) + parts := []string{targetType} + if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") { + parts = append(parts, "NOT NULL") + } else { + parts = append(parts, "NULL") + } + return strings.Join(parts, " "), warnings +} + +func buildTDengineToPGLikeColumnDefinition(col connection.ColumnDefinition) (string, []string) { + targetType, warnings := mapTDengineColumnToPGLike(col) + parts := []string{targetType} + if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") { + parts = append(parts, "NOT NULL") + } else { + parts = append(parts, "NULL") + } + return strings.Join(parts, " "), warnings +} + +func tdengineSemanticWarnings(sourceCols []connection.ColumnDefinition) []string { + warnings := []string{"TDengine 到关系型目标库当前仅迁移列与数据;超级表、TAG 关联、保留策略等时序语义会降级或丢失"} + for _, col := range sourceCols { + if isTDengineTagColumn(col) { + warnings = append(warnings, fmt.Sprintf("字段 %s 为 TDengine TAG 列,迁移到关系型目标后将降级为普通字段", col.Name)) + } + } + return dedupeStrings(warnings) +} + +func isTDengineTagColumn(col connection.ColumnDefinition) bool { + return strings.EqualFold(strings.TrimSpace(col.Key), "TAG") || strings.Contains(strings.ToUpper(strings.TrimSpace(col.Extra)), "TAG") +} + +func parseTDengineType(raw string) (string, int) { + cleaned := strings.TrimSpace(strings.ToUpper(raw)) + if cleaned == "" { + return "", 0 + } + base := cleaned + length := 0 + if idx := strings.Index(base, "("); idx >= 0 { + end := strings.Index(base[idx+1:], ")") + if end >= 0 { + lengthText := strings.TrimSpace(base[idx+1 : idx+1+end]) + if v, err := strconv.Atoi(lengthText); err == nil { + length = v + } + } + base = strings.TrimSpace(base[:idx]) + } + return base, length +} + +func mapTDengineColumnToMySQL(col connection.ColumnDefinition) (string, []string) { + base, length := parseTDengineType(col.Type) + warnings := make([]string, 0) + if isTDengineTagColumn(col) { + warnings = append(warnings, fmt.Sprintf("字段 %s 为 TDengine TAG 列,已按普通列映射", col.Name)) + } + switch base { + case "BOOL", "BOOLEAN": + return "tinyint(1)", warnings + case "TINYINT": + return "tinyint", warnings + case "UTINYINT": + return "tinyint unsigned", warnings + case "SMALLINT": + return "smallint", warnings + case "USMALLINT": + return "smallint unsigned", warnings + case "INT", "INTEGER": + return "int", warnings + case "UINT": + return "int unsigned", warnings + case "BIGINT": + return "bigint", warnings + case "UBIGINT": + return "bigint unsigned", warnings + case "FLOAT": + return "float", warnings + case "DOUBLE": + return "double", warnings + case "DECIMAL", "NUMERIC": + if length > 0 { + return strings.ToLower(strings.TrimSpace(col.Type)), warnings + } + return "decimal(38,10)", warnings + case "TIMESTAMP": + return "datetime", warnings + case "DATE": + return "date", warnings + case "JSON": + return "json", warnings + case "BINARY", "NCHAR", "VARCHAR", "VARBINARY": + if length > 0 && length <= 65535 { + return fmt.Sprintf("varchar(%d)", length), warnings + } + return "text", warnings + default: + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门 MySQL 映射,已降级为 text", col.Name, col.Type)) + return "text", warnings + } +} + +func mapTDengineColumnToPGLike(col connection.ColumnDefinition) (string, []string) { + base, length := parseTDengineType(col.Type) + warnings := make([]string, 0) + if isTDengineTagColumn(col) { + warnings = append(warnings, fmt.Sprintf("字段 %s 为 TDengine TAG 列,已按普通列映射", col.Name)) + } + switch base { + case "BOOL", "BOOLEAN": + return "boolean", warnings + case "TINYINT", "UTINYINT", "SMALLINT": + return "smallint", warnings + case "USMALLINT", "INT", "INTEGER": + return "integer", warnings + case "UINT", "BIGINT": + return "bigint", warnings + case "UBIGINT": + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 UBIGINT 已映射为 numeric(20,0) 以避免无符号溢出", col.Name)) + return "numeric(20,0)", warnings + case "FLOAT": + return "real", warnings + case "DOUBLE": + return "double precision", warnings + case "DECIMAL", "NUMERIC": + if length > 0 { + return strings.ToLower(strings.TrimSpace(col.Type)), warnings + } + return "numeric(38,10)", warnings + case "TIMESTAMP": + return "timestamp", warnings + case "DATE": + return "date", warnings + case "JSON": + return "jsonb", warnings + case "BINARY", "NCHAR", "VARCHAR", "VARBINARY": + if length > 0 { + return fmt.Sprintf("varchar(%d)", length), warnings + } + return "text", warnings + default: + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门 PG-like 映射,已降级为 text", col.Name, col.Type)) + return "text", warnings + } +} diff --git a/internal/sync/migration_tdengine_target.go b/internal/sync/migration_tdengine_target.go new file mode 100644 index 0000000..50a1839 --- /dev/null +++ b/internal/sync/migration_tdengine_target.go @@ -0,0 +1,657 @@ +package sync + +import ( + "GoNavi-Wails/internal/connection" + "GoNavi-Wails/internal/db" + "fmt" + "strconv" + "strings" +) + +type mySQLLikeToTDenginePlanner struct{} + +type pgLikeToTDenginePlanner struct{} + +type clickHouseToTDenginePlanner struct{} + +type tdengineToTDenginePlanner struct{} + +func (mySQLLikeToTDenginePlanner) Name() string { return "mysqllike-tdengine-planner" } + +func (mySQLLikeToTDenginePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if isMySQLLikeSourceType(sourceType) && targetType == "tdengine" { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (mySQLLikeToTDenginePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildMySQLLikeToTDenginePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (pgLikeToTDenginePlanner) Name() string { return "pglike-tdengine-planner" } + +func (pgLikeToTDenginePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if isPGLikeSource(sourceType) && targetType == "tdengine" { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (pgLikeToTDenginePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildPGLikeToTDenginePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func buildMySQLLikeToTDenginePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildSourceToTDenginePlan(config, tableName, sourceDB, targetDB, isMySQLLikeTDengineTimestampCandidate, buildMySQLLikeToTDengineCreateTableSQL) +} + +func buildPGLikeToTDenginePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildSourceToTDenginePlan(config, tableName, sourceDB, targetDB, isPGLikeTDengineTimestampCandidate, buildPGLikeToTDengineCreateTableSQL) +} + +func buildClickHouseToTDenginePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildSourceToTDenginePlan(config, tableName, sourceDB, targetDB, isClickHouseTDengineTimestampCandidate, buildClickHouseToTDengineCreateTableSQL) +} + +func buildTDengineToTDenginePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildSourceToTDenginePlan(config, tableName, sourceDB, targetDB, isTDengineTDengineTimestampCandidate, buildTDengineToTDengineCreateTableSQL) +} + +func (clickHouseToTDenginePlanner) Name() string { return "clickhouse-tdengine-planner" } + +func (clickHouseToTDenginePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if sourceType == "clickhouse" && targetType == "tdengine" { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (clickHouseToTDenginePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildClickHouseToTDenginePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +func (tdengineToTDenginePlanner) Name() string { return "tdengine-tdengine-planner" } + +func (tdengineToTDenginePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel { + sourceType := resolveMigrationDBType(ctx.Config.SourceConfig) + targetType := resolveMigrationDBType(ctx.Config.TargetConfig) + if sourceType == "tdengine" && targetType == "tdengine" { + return MigrationSupportLevelFull + } + return MigrationSupportLevelUnsupported +} + +func (tdengineToTDenginePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + return buildTDengineToTDenginePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB) +} + +type tdengineTimestampCandidate func(connection.ColumnDefinition) bool + +type tdengineCreateTableBuilder func(string, []connection.ColumnDefinition, int) (string, []string, []string) + +func buildSourceToTDenginePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database, isTimestamp tdengineTimestampCandidate, buildCreateSQL tdengineCreateTableBuilder) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + plan := SchemaMigrationPlan{} + sourceType := resolveMigrationDBType(config.SourceConfig) + targetType := resolveMigrationDBType(config.TargetConfig) + plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName) + plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName) + plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName) + plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName) + plan.PlannedAction = "使用已有目标表导入" + + sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err) + } + if !sourceExists { + return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName) + } + + plan.Warnings = append(plan.Warnings, tdengineTargetBaseWarnings()...) + timestampIndex := findTDengineTimestampColumn(sourceCols, isTimestamp) + if timestampIndex < 0 { + plan.Warnings = append(plan.Warnings, tdengineTargetMissingTimeWarning()) + } + + targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable) + if err != nil { + return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err) + } + plan.TargetTableExists = targetExists + + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + if targetExists { + missing := diffMissingColumnNames(sourceCols, targetCols) + if len(missing) > 0 { + plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", "))) + } + if strategy != "existing_only" { + plan.Warnings = append(plan.Warnings, "TDengine 目标端当前不自动补齐已有目标表字段,请先确认目标表结构") + } + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } + + switch strategy { + case "existing_only": + plan.PlannedAction = "目标表不存在,需先手工创建" + plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + case "smart", "auto_create_if_missing": + if timestampIndex < 0 { + plan.PlannedAction = "源表未识别到可映射为 TDengine 首列的时间列,无法自动建表" + plan.UnsupportedObjects = append(plan.UnsupportedObjects, "TDengine regular table 首列必须为 TIMESTAMP,当前源表缺少可直接映射的时间列") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } + plan.AutoCreate = true + plan.PlannedAction = "目标表不存在,将自动建表后导入" + createSQL, warnings, unsupported := buildCreateSQL(plan.TargetQueryTable, sourceCols, timestampIndex) + plan.CreateTableSQL = createSQL + plan.Warnings = append(plan.Warnings, warnings...) + plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...) + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + default: + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } +} + +func tdengineTargetBaseWarnings() []string { + return []string{ + "TDengine 目标端当前仅支持 INSERT 写入;若存在差异 update/delete,执行期会被拒绝", + "TDengine 目标端 auto-create 当前仅创建基础表;索引、外键、触发器、supertable/TAGS/TTL 不会自动迁移", + } +} + +func tdengineTargetMissingTimeWarning() string { + return "源表缺少可映射的时间列,自动建表将不可用;如需继续,请先人工准备 TDengine 目标表与时间列" +} + +func findTDengineTimestampColumn(sourceCols []connection.ColumnDefinition, candidate tdengineTimestampCandidate) int { + preferred := []string{"ts", "timestamp", "event_time", "eventtime", "created_at", "create_time", "occurred_at"} + for _, name := range preferred { + for idx, col := range sourceCols { + if !candidate(col) { + continue + } + if strings.EqualFold(strings.TrimSpace(col.Name), name) { + return idx + } + } + } + for idx, col := range sourceCols { + if candidate(col) { + return idx + } + } + return -1 +} + +func reorderTDengineColumns(sourceCols []connection.ColumnDefinition, timestampIndex int) []connection.ColumnDefinition { + if timestampIndex <= 0 || timestampIndex >= len(sourceCols) { + cloned := make([]connection.ColumnDefinition, len(sourceCols)) + copy(cloned, sourceCols) + return cloned + } + ordered := make([]connection.ColumnDefinition, 0, len(sourceCols)) + ordered = append(ordered, sourceCols[timestampIndex]) + for idx, col := range sourceCols { + if idx == timestampIndex { + continue + } + ordered = append(ordered, col) + } + return ordered +} + +func buildMySQLLikeToTDengineCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition, timestampIndex int) (string, []string, []string) { + ordered := reorderTDengineColumns(sourceCols, timestampIndex) + columnDefs := make([]string, 0, len(ordered)) + warnings := make([]string, 0) + unsupported := []string{"源表索引/外键/触发器/唯一约束/自增语义当前不会自动迁移到 TDengine"} + if timestampIndex != 0 && timestampIndex >= 0 && timestampIndex < len(sourceCols) { + warnings = append(warnings, fmt.Sprintf("TDengine 基础表要求时间列优先,已将字段 %s 调整为首列", sourceCols[timestampIndex].Name)) + } + for idx, col := range ordered { + def, colWarnings := mapMySQLLikeColumnToTDengine(col, idx == 0) + warnings = append(warnings, colWarnings...) + columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("tdengine", col.Name), def)) + } + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("tdengine", targetQueryTable), strings.Join(columnDefs, ",\n ")) + return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported) +} + +func buildPGLikeToTDengineCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition, timestampIndex int) (string, []string, []string) { + ordered := reorderTDengineColumns(sourceCols, timestampIndex) + columnDefs := make([]string, 0, len(ordered)) + warnings := make([]string, 0) + unsupported := []string{"源表索引/外键/触发器/唯一约束/identity/sequence 语义当前不会自动迁移到 TDengine"} + if timestampIndex != 0 && timestampIndex >= 0 && timestampIndex < len(sourceCols) { + warnings = append(warnings, fmt.Sprintf("TDengine 基础表要求时间列优先,已将字段 %s 调整为首列", sourceCols[timestampIndex].Name)) + } + for idx, col := range ordered { + def, colWarnings := mapPGLikeColumnToTDengine(col, idx == 0) + warnings = append(warnings, colWarnings...) + columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("tdengine", col.Name), def)) + } + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("tdengine", targetQueryTable), strings.Join(columnDefs, ",\n ")) + return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported) +} + +func buildClickHouseToTDengineCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition, timestampIndex int) (string, []string, []string) { + ordered := reorderTDengineColumns(sourceCols, timestampIndex) + columnDefs := make([]string, 0, len(ordered)) + warnings := make([]string, 0) + unsupported := []string{"源表 ORDER BY/PARTITION/TTL/Projection/物化视图 语义当前不会自动迁移到 TDengine"} + if timestampIndex != 0 && timestampIndex >= 0 && timestampIndex < len(sourceCols) { + warnings = append(warnings, fmt.Sprintf("TDengine 基础表要求时间列优先,已将字段 %s 调整为首列", sourceCols[timestampIndex].Name)) + } + for idx, col := range ordered { + def, colWarnings := mapClickHouseColumnToTDengine(col, idx == 0) + warnings = append(warnings, colWarnings...) + columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("tdengine", col.Name), def)) + } + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("tdengine", targetQueryTable), strings.Join(columnDefs, ",\n ")) + return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported) +} + +func buildTDengineToTDengineCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition, timestampIndex int) (string, []string, []string) { + ordered := reorderTDengineColumns(sourceCols, timestampIndex) + columnDefs := make([]string, 0, len(ordered)) + warnings := make([]string, 0) + unsupported := []string{"源表 supertable/TAGS/TTL/保留策略/索引 语义当前不会自动迁移到 TDengine regular table"} + if timestampIndex != 0 && timestampIndex >= 0 && timestampIndex < len(sourceCols) { + warnings = append(warnings, fmt.Sprintf("TDengine 基础表要求时间列优先,已将字段 %s 调整为首列", sourceCols[timestampIndex].Name)) + } + for idx, col := range ordered { + def, colWarnings := mapTDengineColumnToTDengine(col, idx == 0) + warnings = append(warnings, colWarnings...) + columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("tdengine", col.Name), def)) + } + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("tdengine", targetQueryTable), strings.Join(columnDefs, ",\n ")) + return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported) +} + +func isMySQLLikeTDengineTimestampCandidate(col connection.ColumnDefinition) bool { + raw := strings.ToLower(strings.TrimSpace(col.Type)) + clean := strings.ReplaceAll(raw, " unsigned", "") + clean = strings.ReplaceAll(clean, " zerofill", "") + return strings.HasPrefix(clean, "timestamp") || strings.HasPrefix(clean, "datetime") +} + +func isPGLikeTDengineTimestampCandidate(col connection.ColumnDefinition) bool { + raw := strings.ToLower(strings.TrimSpace(col.Type)) + return strings.HasPrefix(raw, "timestamp") +} + +func isClickHouseTDengineTimestampCandidate(col connection.ColumnDefinition) bool { + lower, _ := unwrapClickHouseTDengineType(col.Type) + return strings.HasPrefix(lower, "datetime") +} + +func isTDengineTDengineTimestampCandidate(col connection.ColumnDefinition) bool { + base, _ := parseTDengineType(col.Type) + return base == "TIMESTAMP" +} + +func mapMySQLLikeColumnToTDengine(col connection.ColumnDefinition, forceTimestamp bool) (string, []string) { + warnings := make([]string, 0) + if forceTimestamp { + if !isMySQLLikeTDengineTimestampCandidate(col) { + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已提升为 TDengine 首列 TIMESTAMP", col.Name, col.Type)) + } + return "TIMESTAMP", warnings + } + + raw := strings.ToLower(strings.TrimSpace(col.Type)) + if raw == "" { + return "VARCHAR(1024)", []string{fmt.Sprintf("字段 %s 类型为空,已降级为 VARCHAR(1024)", col.Name)} + } + unsigned := strings.Contains(raw, "unsigned") + clean := strings.ReplaceAll(raw, " unsigned", "") + clean = strings.ReplaceAll(clean, " zerofill", "") + isAutoIncrement := strings.Contains(strings.ToLower(strings.TrimSpace(col.Extra)), "auto_increment") + if isAutoIncrement { + warnings = append(warnings, fmt.Sprintf("字段 %s 自增语义不会迁移到 TDengine", col.Name)) + } + if col.Key == "PRI" || col.Key == "PK" { + warnings = append(warnings, fmt.Sprintf("字段 %s 主键语义不会按关系型约束迁移到 TDengine", col.Name)) + } + + switch { + case strings.HasPrefix(clean, "tinyint(1)") && !unsigned && !isAutoIncrement: + return "BOOL", warnings + case strings.HasPrefix(clean, "tinyint"): + if unsigned { + return "UTINYINT", warnings + } + return "TINYINT", warnings + case strings.HasPrefix(clean, "smallint"): + if unsigned { + return "USMALLINT", warnings + } + return "SMALLINT", warnings + case strings.HasPrefix(clean, "mediumint"), strings.HasPrefix(clean, "int"), strings.HasPrefix(clean, "integer"): + if unsigned { + return "UINT", warnings + } + return "INT", warnings + case strings.HasPrefix(clean, "bigint"): + if unsigned { + return "UBIGINT", warnings + } + return "BIGINT", warnings + case strings.HasPrefix(clean, "decimal"), strings.HasPrefix(clean, "numeric"): + return normalizeTDengineDecimalType(clean), warnings + case strings.HasPrefix(clean, "float"): + return "FLOAT", warnings + case strings.HasPrefix(clean, "double"): + return "DOUBLE", warnings + case strings.HasPrefix(clean, "date"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 date 已降级映射为 TIMESTAMP", col.Name)) + return "TIMESTAMP", warnings + case strings.HasPrefix(clean, "timestamp"), strings.HasPrefix(clean, "datetime"): + return "TIMESTAMP", warnings + case strings.HasPrefix(clean, "time"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无稳定 TDengine 时间-only 映射,已降级为 VARCHAR(64)", col.Name, col.Type)) + return "VARCHAR(64)", warnings + case strings.HasPrefix(clean, "char("), strings.HasPrefix(clean, "varchar("): + return fmt.Sprintf("VARCHAR(%d)", normalizeTDengineVarcharLength(extractFirstTypeLength(clean), 255)), warnings + case strings.HasPrefix(clean, "tinytext"), strings.HasPrefix(clean, "text"), strings.HasPrefix(clean, "mediumtext"), strings.HasPrefix(clean, "longtext"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 VARCHAR(4096)", col.Name, col.Type)) + return "VARCHAR(4096)", warnings + case strings.HasPrefix(clean, "json"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 因 TDengine JSON 仅适用于 TAG,已降级为 VARCHAR(4096)", col.Name, col.Type)) + return "VARCHAR(4096)", warnings + case strings.HasPrefix(clean, "enum"), strings.HasPrefix(clean, "set"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 VARCHAR(255)", col.Name, col.Type)) + return "VARCHAR(255)", warnings + case strings.HasPrefix(clean, "binary"), strings.HasPrefix(clean, "varbinary"), strings.HasPrefix(clean, "tinyblob"), strings.HasPrefix(clean, "blob"), strings.HasPrefix(clean, "mediumblob"), strings.HasPrefix(clean, "longblob"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已按字符串语义降级为 VARCHAR(4096)", col.Name, col.Type)) + return "VARCHAR(4096)", warnings + default: + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门 TDengine 映射,已降级为 VARCHAR(1024)", col.Name, col.Type)) + return "VARCHAR(1024)", warnings + } +} + +func mapPGLikeColumnToTDengine(col connection.ColumnDefinition, forceTimestamp bool) (string, []string) { + warnings := make([]string, 0) + if forceTimestamp { + if raw := strings.ToLower(strings.TrimSpace(col.Type)); !strings.HasPrefix(raw, "timestamp") { + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已提升为 TDengine 首列 TIMESTAMP", col.Name, col.Type)) + } + return "TIMESTAMP", warnings + } + + raw := strings.ToLower(strings.TrimSpace(col.Type)) + if raw == "" { + return "VARCHAR(1024)", []string{fmt.Sprintf("字段 %s 类型为空,已降级为 VARCHAR(1024)", col.Name)} + } + if col.Key == "PRI" || col.Key == "PK" { + warnings = append(warnings, fmt.Sprintf("字段 %s 主键语义不会按关系型约束迁移到 TDengine", col.Name)) + } + if strings.Contains(strings.ToLower(strings.TrimSpace(col.Extra)), "identity") || strings.Contains(strings.ToLower(strings.TrimSpace(col.Extra)), "auto_increment") { + warnings = append(warnings, fmt.Sprintf("字段 %s 自增/identity 语义不会迁移到 TDengine", col.Name)) + } + + switch { + case raw == "boolean" || strings.HasPrefix(raw, "bool"): + return "BOOL", warnings + case raw == "smallint": + return "SMALLINT", warnings + case raw == "integer" || raw == "int4": + return "INT", warnings + case raw == "bigint" || raw == "int8": + return "BIGINT", warnings + case strings.HasPrefix(raw, "numeric"), strings.HasPrefix(raw, "decimal"): + return normalizeTDengineDecimalType(raw), warnings + case raw == "real" || raw == "float4": + return "FLOAT", warnings + case raw == "double precision" || raw == "float8": + return "DOUBLE", warnings + case raw == "date": + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 date 已降级映射为 TIMESTAMP", col.Name)) + return "TIMESTAMP", warnings + case strings.HasPrefix(raw, "timestamp"): + return "TIMESTAMP", warnings + case strings.HasPrefix(raw, "time"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无稳定 TDengine 时间-only 映射,已降级为 VARCHAR(64)", col.Name, col.Type)) + return "VARCHAR(64)", warnings + case strings.HasPrefix(raw, "character varying("), strings.HasPrefix(raw, "varchar("), strings.HasPrefix(raw, "character("), strings.HasPrefix(raw, "char("): + return fmt.Sprintf("VARCHAR(%d)", normalizeTDengineVarcharLength(extractFirstTypeLength(raw), 255)), warnings + case raw == "text": + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 text 已降级为 VARCHAR(4096)", col.Name)) + return "VARCHAR(4096)", warnings + case raw == "uuid": + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 uuid 已降级为 VARCHAR(36)", col.Name)) + return "VARCHAR(36)", warnings + case raw == "json" || raw == "jsonb": + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 因 TDengine JSON 仅适用于 TAG,已降级为 VARCHAR(4096)", col.Name, col.Type)) + return "VARCHAR(4096)", warnings + case raw == "bytea": + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 bytea 已按字符串语义降级为 VARCHAR(4096)", col.Name)) + return "VARCHAR(4096)", warnings + case strings.HasSuffix(raw, "[]") || strings.HasPrefix(raw, "array"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 VARCHAR(4096)", col.Name, col.Type)) + return "VARCHAR(4096)", warnings + case raw == "user-defined": + warnings = append(warnings, fmt.Sprintf("字段 %s 为用户自定义类型,已降级为 VARCHAR(1024)", col.Name)) + return "VARCHAR(1024)", warnings + default: + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门 TDengine 映射,已降级为 VARCHAR(1024)", col.Name, col.Type)) + return "VARCHAR(1024)", warnings + } +} + +func mapClickHouseColumnToTDengine(col connection.ColumnDefinition, forceTimestamp bool) (string, []string) { + warnings := make([]string, 0) + if forceTimestamp { + if !isClickHouseTDengineTimestampCandidate(col) { + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已提升为 TDengine 首列 TIMESTAMP", col.Name, col.Type)) + } + return "TIMESTAMP", warnings + } + + lower, _ := unwrapClickHouseTDengineType(col.Type) + if lower == "" { + return "VARCHAR(1024)", []string{fmt.Sprintf("字段 %s 类型为空,已降级为 VARCHAR(1024)", col.Name)} + } + + switch { + case lower == "bool" || lower == "boolean": + return "BOOL", warnings + case lower == "int8": + return "TINYINT", warnings + case lower == "uint8": + return "UTINYINT", warnings + case lower == "int16": + return "SMALLINT", warnings + case lower == "uint16": + return "USMALLINT", warnings + case lower == "int32": + return "INT", warnings + case lower == "uint32": + return "UINT", warnings + case lower == "int64": + return "BIGINT", warnings + case lower == "uint64": + return "UBIGINT", warnings + case lower == "float32": + return "FLOAT", warnings + case lower == "float64": + return "DOUBLE", warnings + case lower == "date": + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 date 已降级映射为 TIMESTAMP", col.Name)) + return "TIMESTAMP", warnings + case strings.HasPrefix(lower, "datetime"): + return "TIMESTAMP", warnings + case lower == "string": + return "VARCHAR(1024)", warnings + case lower == "uuid": + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 uuid 已降级为 VARCHAR(36)", col.Name)) + return "VARCHAR(36)", warnings + case lower == "json", strings.HasPrefix(lower, "map("), strings.HasPrefix(lower, "array("), strings.HasPrefix(lower, "tuple("), strings.HasPrefix(lower, "nested("): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 VARCHAR(4096)", col.Name, col.Type)) + return "VARCHAR(4096)", warnings + case strings.HasPrefix(lower, "enum8("), strings.HasPrefix(lower, "enum16("): + warnings = append(warnings, fmt.Sprintf("字段 %s 枚举类型 %s 已降级为 VARCHAR(255)", col.Name, col.Type)) + return "VARCHAR(255)", warnings + case clickHouseDecimalPattern.MatchString(lower): + parts := clickHouseDecimalPattern.FindStringSubmatch(lower) + return fmt.Sprintf("DECIMAL(%s,%s)", parts[2], parts[3]), warnings + case clickHouseStringArgsPattern.MatchString(lower): + parts := clickHouseStringArgsPattern.FindStringSubmatch(lower) + length, err := strconv.Atoi(parts[1]) + if err != nil { + warnings = append(warnings, fmt.Sprintf("字段 %s FixedString 长度解析失败,已降级为 VARCHAR(255)", col.Name)) + return "VARCHAR(255)", warnings + } + return fmt.Sprintf("VARCHAR(%d)", normalizeTDengineVarcharLength(length, 255)), warnings + default: + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门 TDengine 映射,已降级为 VARCHAR(1024)", col.Name, col.Type)) + return "VARCHAR(1024)", warnings + } +} + +func mapTDengineColumnToTDengine(col connection.ColumnDefinition, forceTimestamp bool) (string, []string) { + warnings := make([]string, 0) + if forceTimestamp { + if !isTDengineTDengineTimestampCandidate(col) { + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已提升为 TDengine 首列 TIMESTAMP", col.Name, col.Type)) + } + return "TIMESTAMP", warnings + } + + base, length := parseTDengineType(col.Type) + if base == "" { + return "VARCHAR(1024)", []string{fmt.Sprintf("字段 %s 类型为空,已降级为 VARCHAR(1024)", col.Name)} + } + if isTDengineTagColumn(col) { + warnings = append(warnings, fmt.Sprintf("字段 %s 为 TDengine TAG 列,迁移到 regular table 后将降级为普通字段", col.Name)) + } + + switch base { + case "BOOL", "BOOLEAN": + return "BOOL", warnings + case "TINYINT": + return "TINYINT", warnings + case "UTINYINT": + return "UTINYINT", warnings + case "SMALLINT": + return "SMALLINT", warnings + case "USMALLINT": + return "USMALLINT", warnings + case "INT", "INTEGER": + return "INT", warnings + case "UINT": + return "UINT", warnings + case "BIGINT": + return "BIGINT", warnings + case "UBIGINT": + return "UBIGINT", warnings + case "FLOAT": + return "FLOAT", warnings + case "DOUBLE": + return "DOUBLE", warnings + case "DECIMAL", "NUMERIC": + return normalizeTDengineDecimalType(col.Type), warnings + case "TIMESTAMP": + return "TIMESTAMP", warnings + case "DATE": + return "DATE", warnings + case "JSON": + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 JSON 在 TDengine regular table 中不保留 TAG 语义,已降级为 VARCHAR(4096)", col.Name)) + return "VARCHAR(4096)", warnings + case "BINARY", "NCHAR", "VARCHAR", "VARBINARY": + if length > 0 { + return fmt.Sprintf("%s(%d)", base, normalizeTDengineVarcharLength(length, length)), warnings + } + fallback := 255 + if base == "VARCHAR" { + fallback = 1024 + } + return fmt.Sprintf("%s(%d)", base, fallback), warnings + default: + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门 TDengine 同库映射,已降级为 VARCHAR(1024)", col.Name, col.Type)) + return "VARCHAR(1024)", warnings + } +} + +func unwrapClickHouseTDengineType(raw string) (string, bool) { + text := strings.TrimSpace(raw) + lower := strings.ToLower(text) + nullable := false + for { + switched := false + if strings.HasPrefix(lower, "nullable(") && strings.HasSuffix(lower, ")") { + text = strings.TrimSpace(text[len("Nullable(") : len(text)-1]) + lower = strings.ToLower(text) + nullable = true + switched = true + } + if strings.HasPrefix(lower, "lowcardinality(") && strings.HasSuffix(lower, ")") { + text = strings.TrimSpace(text[len("LowCardinality(") : len(text)-1]) + lower = strings.ToLower(text) + switched = true + } + if !switched { + break + } + } + return lower, nullable +} + +func normalizeTDengineDecimalType(raw string) string { + text := strings.TrimSpace(raw) + if text == "" { + return "DECIMAL(38,10)" + } + lower := strings.ToLower(text) + if strings.HasPrefix(lower, "numeric") { + return "DECIMAL" + text[len("numeric"):] + } + if strings.HasPrefix(lower, "decimal") { + return "DECIMAL" + text[len("decimal"):] + } + return "DECIMAL(38,10)" +} + +func normalizeTDengineVarcharLength(length int, fallback int) int { + if fallback <= 0 { + fallback = 255 + } + if length <= 0 { + return fallback + } + if length > 16384 { + return 16384 + } + return length +} + +func extractFirstTypeLength(raw string) int { + start := strings.Index(raw, "(") + if start < 0 { + return 0 + } + end := strings.Index(raw[start+1:], ")") + if end < 0 { + return 0 + } + inside := strings.TrimSpace(raw[start+1 : start+1+end]) + if inside == "" { + return 0 + } + parts := strings.SplitN(inside, ",", 2) + length, err := strconv.Atoi(strings.TrimSpace(parts[0])) + if err != nil { + return 0 + } + return length +} diff --git a/internal/sync/migration_type_resolver.go b/internal/sync/migration_type_resolver.go new file mode 100644 index 0000000..937e2d7 --- /dev/null +++ b/internal/sync/migration_type_resolver.go @@ -0,0 +1,98 @@ +package sync + +import ( + "GoNavi-Wails/internal/connection" + "strings" +) + +func normalizeMigrationDBType(dbType string) string { + normalized := strings.ToLower(strings.TrimSpace(dbType)) + switch normalized { + case "doris": + return "diros" + case "postgresql": + return "postgres" + case "dm", "dm8": + return "dameng" + case "sqlite3": + return "sqlite" + default: + return normalized + } +} + +func resolveMigrationDBType(config connection.ConnectionConfig) string { + dbType := normalizeMigrationDBType(config.Type) + if dbType != "custom" { + return dbType + } + + driver := strings.ToLower(strings.TrimSpace(config.Driver)) + switch driver { + case "postgresql", "postgres", "pg", "pq", "pgx": + return "postgres" + case "dm", "dameng", "dm8": + return "dameng" + case "sqlite3", "sqlite": + return "sqlite" + case "sphinxql": + return "sphinx" + case "diros", "doris": + return "diros" + case "kingbase", "kingbase8", "kingbasees", "kingbasev8": + return "kingbase" + case "highgo": + return "highgo" + case "vastbase": + return "vastbase" + case "mysql", "mysql2": + return "mysql" + case "mariadb": + return "mariadb" + } + + switch { + case strings.Contains(driver, "postgres"): + return "postgres" + case strings.Contains(driver, "kingbase"): + return "kingbase" + case strings.Contains(driver, "highgo"): + return "highgo" + case strings.Contains(driver, "vastbase"): + return "vastbase" + case strings.Contains(driver, "sqlite"): + return "sqlite" + case strings.Contains(driver, "sphinx"): + return "sphinx" + case strings.Contains(driver, "diros"), strings.Contains(driver, "doris"): + return "diros" + case strings.Contains(driver, "maria"): + return "mariadb" + case strings.Contains(driver, "mysql"): + return "mysql" + case strings.Contains(driver, "dameng"), strings.Contains(driver, "dm"): + return "dameng" + default: + return normalizeMigrationDBType(driver) + } +} + +func isMySQLCoreType(dbType string) bool { + switch normalizeMigrationDBType(dbType) { + case "mysql", "mariadb", "diros": + return true + default: + return false + } +} + +func isMySQLLikeSourceType(dbType string) bool { + if isMySQLCoreType(dbType) { + return true + } + return normalizeMigrationDBType(dbType) == "sphinx" +} + +func isMySQLLikeWritableTargetType(dbType string) bool { + return isMySQLCoreType(dbType) +} diff --git a/internal/sync/preview.go b/internal/sync/preview.go index 7cec537..2ce6434 100644 --- a/internal/sync/preview.go +++ b/internal/sync/preview.go @@ -1,7 +1,7 @@ package sync import ( - "GoNavi-Wails/internal/db" + "errors" "fmt" "strings" ) @@ -36,12 +36,18 @@ func (s *SyncEngine) Preview(config SyncConfig, tableName string, limit int) (Ta if limit > 500 { limit = 500 } + if isRedisToMongoKeyspacePair(config) { + return s.previewRedisToMongo(config, tableName, limit) + } + if isMongoToRedisKeyspacePair(config) { + return s.previewMongoToRedis(config, tableName, limit) + } - sourceDB, err := db.NewDatabase(config.SourceConfig.Type) + sourceDB, err := newSyncDatabase(config.SourceConfig.Type) if err != nil { return TableDiffPreview{}, fmt.Errorf("初始化源数据库驱动失败: %w", err) } - targetDB, err := db.NewDatabase(config.TargetConfig.Type) + targetDB, err := newSyncDatabase(config.TargetConfig.Type) if err != nil { return TableDiffPreview{}, fmt.Errorf("初始化目标数据库驱动失败: %w", err) } @@ -56,14 +62,12 @@ func (s *SyncEngine) Preview(config SyncConfig, tableName string, limit int) (Ta } defer targetDB.Close() - sourceSchema, sourceTable := normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName) - targetSchema, targetTable := normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName) - sourceQueryTable := qualifiedNameForQuery(config.SourceConfig.Type, sourceSchema, sourceTable, tableName) - targetQueryTable := qualifiedNameForQuery(config.TargetConfig.Type, targetSchema, targetTable, tableName) - - cols, err := sourceDB.GetColumns(sourceSchema, sourceTable) + plan, cols, _, err := buildSchemaMigrationPlan(config, tableName, sourceDB, targetDB) if err != nil { - return TableDiffPreview{}, fmt.Errorf("获取源表字段失败: %w", err) + return TableDiffPreview{}, err + } + if !plan.TargetTableExists && !plan.AutoCreate { + return TableDiffPreview{}, errors.New(firstNonEmpty(plan.PlannedAction, "目标表不存在,无法预览差异")) } pkCols := make([]string, 0, 2) @@ -80,13 +84,17 @@ func (s *SyncEngine) Preview(config SyncConfig, tableName string, limit int) (Ta } pkCol := pkCols[0] - sourceRows, _, err := sourceDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.SourceConfig.Type, sourceQueryTable))) + sourceRows, _, err := sourceDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(resolveMigrationDBType(config.SourceConfig), plan.SourceQueryTable))) if err != nil { return TableDiffPreview{}, fmt.Errorf("读取源表失败: %w", err) } - targetRows, _, err := targetDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.TargetConfig.Type, targetQueryTable))) - if err != nil { - return TableDiffPreview{}, fmt.Errorf("读取目标表失败: %w", err) + + targetRows := make([]map[string]interface{}, 0) + if plan.TargetTableExists { + targetRows, _, err = targetDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(resolveMigrationDBType(config.TargetConfig), plan.TargetQueryTable))) + if err != nil { + return TableDiffPreview{}, fmt.Errorf("读取目标表失败: %w", err) + } } targetMap := make(map[string]map[string]interface{}, len(targetRows)) @@ -133,12 +141,7 @@ func (s *SyncEngine) Preview(config SyncConfig, tableName string, limit int) (Ta if len(changedColumns) > 0 { out.TotalUpdates++ if len(out.Updates) < limit { - out.Updates = append(out.Updates, PreviewUpdateRow{ - PK: pkVal, - ChangedColumns: changedColumns, - Source: sRow, - Target: tRow, - }) + out.Updates = append(out.Updates, PreviewUpdateRow{PK: pkVal, ChangedColumns: changedColumns, Source: sRow, Target: tRow}) } } continue diff --git a/internal/sync/redis_migration_test.go b/internal/sync/redis_migration_test.go new file mode 100644 index 0000000..ac3e7f1 --- /dev/null +++ b/internal/sync/redis_migration_test.go @@ -0,0 +1,490 @@ +package sync + +import ( + "GoNavi-Wails/internal/connection" + "GoNavi-Wails/internal/db" + redispkg "GoNavi-Wails/internal/redis" + "fmt" + "sort" + "strings" + "testing" +) + +type fakeRedisMigrationClient struct { + values map[string]*redispkg.RedisValue + scannedKeys []string + connectConfig connection.ConnectionConfig + closed bool +} + +func (f *fakeRedisMigrationClient) Connect(config connection.ConnectionConfig) error { + f.connectConfig = config + return nil +} + +func (f *fakeRedisMigrationClient) Close() error { + f.closed = true + return nil +} + +func (f *fakeRedisMigrationClient) ScanKeys(pattern string, cursor uint64, count int64) (*redispkg.RedisScanResult, error) { + items := make([]redispkg.RedisKeyInfo, 0, len(f.scannedKeys)) + for _, key := range f.scannedKeys { + items = append(items, redispkg.RedisKeyInfo{Key: key, Type: "string", TTL: -1}) + } + return &redispkg.RedisScanResult{Keys: items, Cursor: "0"}, nil +} + +func (f *fakeRedisMigrationClient) GetKeyType(key string) (string, error) { + if value, ok := f.values[key]; ok && value != nil { + return value.Type, nil + } + return "none", nil +} + +func (f *fakeRedisMigrationClient) GetValue(key string) (*redispkg.RedisValue, error) { + if value, ok := f.values[key]; ok { + return value, nil + } + return nil, fmt.Errorf("key not found: %s", key) +} + +func (f *fakeRedisMigrationClient) DeleteKeys(keys []string) (int64, error) { + var deleted int64 + for _, key := range keys { + if _, ok := f.values[key]; ok { + delete(f.values, key) + deleted++ + } + } + return deleted, nil +} + +func (f *fakeRedisMigrationClient) SetTTL(key string, ttl int64) error { + value, ok := f.values[key] + if !ok { + return nil + } + value.TTL = ttl + return nil +} + +func (f *fakeRedisMigrationClient) SetString(key, value string, ttl int64) error { + if f.values == nil { + f.values = map[string]*redispkg.RedisValue{} + } + f.values[key] = &redispkg.RedisValue{Type: "string", TTL: ttl, Value: value, Length: int64(len(value))} + return nil +} + +func (f *fakeRedisMigrationClient) SetHashField(key, field, value string) error { + if f.values == nil { + f.values = map[string]*redispkg.RedisValue{} + } + current, ok := f.values[key] + if !ok || current == nil || current.Type != "hash" { + current = &redispkg.RedisValue{Type: "hash", TTL: -1, Value: map[string]string{}} + f.values[key] = current + } + hash, _ := current.Value.(map[string]string) + if hash == nil { + hash = map[string]string{} + } + hash[field] = value + current.Value = hash + current.Length = int64(len(hash)) + return nil +} + +func (f *fakeRedisMigrationClient) ListPush(key string, values ...string) error { + if f.values == nil { + f.values = map[string]*redispkg.RedisValue{} + } + current, ok := f.values[key] + if !ok || current == nil || current.Type != "list" { + current = &redispkg.RedisValue{Type: "list", TTL: -1, Value: []string{}} + f.values[key] = current + } + list, _ := current.Value.([]string) + list = append(list, values...) + current.Value = list + current.Length = int64(len(list)) + return nil +} + +func (f *fakeRedisMigrationClient) SetAdd(key string, members ...string) error { + if f.values == nil { + f.values = map[string]*redispkg.RedisValue{} + } + current, ok := f.values[key] + if !ok || current == nil || current.Type != "set" { + current = &redispkg.RedisValue{Type: "set", TTL: -1, Value: []string{}} + f.values[key] = current + } + setValues, _ := current.Value.([]string) + seen := make(map[string]struct{}, len(setValues)+len(members)) + for _, item := range setValues { + seen[item] = struct{}{} + } + for _, item := range members { + if _, ok := seen[item]; ok { + continue + } + seen[item] = struct{}{} + setValues = append(setValues, item) + } + sort.Strings(setValues) + current.Value = setValues + current.Length = int64(len(setValues)) + return nil +} + +func (f *fakeRedisMigrationClient) ZSetAdd(key string, members ...redispkg.ZSetMember) error { + if f.values == nil { + f.values = map[string]*redispkg.RedisValue{} + } + copied := append([]redispkg.ZSetMember(nil), members...) + sort.Slice(copied, func(i, j int) bool { + if copied[i].Score == copied[j].Score { + return copied[i].Member < copied[j].Member + } + return copied[i].Score < copied[j].Score + }) + f.values[key] = &redispkg.RedisValue{Type: "zset", TTL: -1, Value: copied, Length: int64(len(copied))} + return nil +} + +func (f *fakeRedisMigrationClient) StreamAdd(key string, fields map[string]string, id string) (string, error) { + if f.values == nil { + f.values = map[string]*redispkg.RedisValue{} + } + current, ok := f.values[key] + if !ok || current == nil || current.Type != "stream" { + current = &redispkg.RedisValue{Type: "stream", TTL: -1, Value: []redispkg.StreamEntry{}} + f.values[key] = current + } + entries, _ := current.Value.([]redispkg.StreamEntry) + entryID := id + if entryID == "" { + entryID = fmt.Sprintf("%d-0", len(entries)+1) + } + entries = append(entries, redispkg.StreamEntry{ID: entryID, Fields: fields}) + current.Value = entries + current.Length = int64(len(entries)) + return entryID, nil +} + +type fakeRedisMongoTargetDB struct { + tables []string + queryTable string + queryRows []map[string]interface{} + execs []string + applyTable string + applySet connection.ChangeSet +} + +func (f *fakeRedisMongoTargetDB) Connect(config connection.ConnectionConfig) error { return nil } +func (f *fakeRedisMongoTargetDB) Close() error { return nil } +func (f *fakeRedisMongoTargetDB) Ping() error { return nil } +func (f *fakeRedisMongoTargetDB) Query(query string) ([]map[string]interface{}, []string, error) { + queryTable := strings.TrimSpace(f.queryTable) + if queryTable == "" { + queryTable = "redis_db_0_keys" + } + if strings.Contains(query, fmt.Sprintf(`"find":"%s"`, queryTable)) { + return f.queryRows, []string{"_id", "key", "value"}, nil + } + return nil, nil, nil +} +func (f *fakeRedisMongoTargetDB) Exec(query string) (int64, error) { + f.execs = append(f.execs, query) + return 1, nil +} +func (f *fakeRedisMongoTargetDB) GetDatabases() ([]string, error) { return []string{"app"}, nil } +func (f *fakeRedisMongoTargetDB) GetTables(dbName string) ([]string, error) { + return f.tables, nil +} +func (f *fakeRedisMongoTargetDB) GetCreateStatement(dbName, tableName string) (string, error) { + return "", nil +} +func (f *fakeRedisMongoTargetDB) GetColumns(dbName, tableName string) ([]connection.ColumnDefinition, error) { + return nil, nil +} +func (f *fakeRedisMongoTargetDB) GetAllColumns(dbName string) ([]connection.ColumnDefinitionWithTable, error) { + return nil, nil +} +func (f *fakeRedisMongoTargetDB) GetIndexes(dbName, tableName string) ([]connection.IndexDefinition, error) { + return nil, nil +} +func (f *fakeRedisMongoTargetDB) GetForeignKeys(dbName, tableName string) ([]connection.ForeignKeyDefinition, error) { + return nil, nil +} +func (f *fakeRedisMongoTargetDB) GetTriggers(dbName, tableName string) ([]connection.TriggerDefinition, error) { + return nil, nil +} +func (f *fakeRedisMongoTargetDB) ApplyChanges(tableName string, changes connection.ChangeSet) error { + f.applyTable = tableName + f.applySet = changes + return nil +} + +type fakeMongoRedisSourceDB struct { + tables []string + rowsByTable map[string][]map[string]interface{} + connectConfig connection.ConnectionConfig +} + +func (f *fakeMongoRedisSourceDB) Connect(config connection.ConnectionConfig) error { + f.connectConfig = config + return nil +} +func (f *fakeMongoRedisSourceDB) Close() error { return nil } +func (f *fakeMongoRedisSourceDB) Ping() error { return nil } +func (f *fakeMongoRedisSourceDB) Query(query string) ([]map[string]interface{}, []string, error) { + for tableName, rows := range f.rowsByTable { + if strings.Contains(query, fmt.Sprintf(`"find":"%s"`, tableName)) { + return rows, []string{"_id", "key", "type", "ttl", "value"}, nil + } + } + return nil, nil, fmt.Errorf("unexpected query: %s", query) +} +func (f *fakeMongoRedisSourceDB) Exec(query string) (int64, error) { return 0, nil } +func (f *fakeMongoRedisSourceDB) GetDatabases() ([]string, error) { return []string{"app"}, nil } +func (f *fakeMongoRedisSourceDB) GetTables(dbName string) ([]string, error) { + return f.tables, nil +} +func (f *fakeMongoRedisSourceDB) GetCreateStatement(dbName, tableName string) (string, error) { + return "", nil +} +func (f *fakeMongoRedisSourceDB) GetColumns(dbName, tableName string) ([]connection.ColumnDefinition, error) { + return nil, nil +} +func (f *fakeMongoRedisSourceDB) GetAllColumns(dbName string) ([]connection.ColumnDefinitionWithTable, error) { + return nil, nil +} +func (f *fakeMongoRedisSourceDB) GetIndexes(dbName, tableName string) ([]connection.IndexDefinition, error) { + return nil, nil +} +func (f *fakeMongoRedisSourceDB) GetForeignKeys(dbName, tableName string) ([]connection.ForeignKeyDefinition, error) { + return nil, nil +} +func (f *fakeMongoRedisSourceDB) GetTriggers(dbName, tableName string) ([]connection.TriggerDefinition, error) { + return nil, nil +} + +func TestRunSync_RedisToMongoAppliesInsertAndUpdate(t *testing.T) { + fakeRedis := &fakeRedisMigrationClient{ + values: map[string]*redispkg.RedisValue{ + "user:1": {Type: "hash", TTL: 120, Length: 2, Value: map[string]string{"name": "alice"}}, + "user:2": {Type: "string", TTL: -1, Length: 1, Value: "online"}, + }, + } + fakeTarget := &fakeRedisMongoTargetDB{ + tables: []string{"redis_db_0_keys"}, + queryRows: []map[string]interface{}{ + {"_id": "db0:user:1", "redisDb": 0, "key": "user:1", "type": "hash", "ttl": 120, "length": int64(2), "value": map[string]interface{}{"name": "old"}}, + }, + } + + oldNewRedisClient := newRedisSourceClient + oldNewDatabase := newSyncDatabase + defer func() { + newRedisSourceClient = oldNewRedisClient + newSyncDatabase = oldNewDatabase + }() + newRedisSourceClient = func() redisMigrationClient { return fakeRedis } + newSyncDatabase = func(dbType string) (db.Database, error) { return fakeTarget, nil } + + engine := NewSyncEngine(Reporter{}) + result := engine.RunSync(SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "redis", Database: "0"}, + TargetConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"}, + Tables: []string{"user:1", "user:2"}, + Content: "data", + Mode: "insert_update", + }) + + if !result.Success { + t.Fatalf("expected success, got: %+v", result) + } + if fakeRedis.connectConfig.RedisDB != 0 { + t.Fatalf("expected redis db 0, got %d", fakeRedis.connectConfig.RedisDB) + } + if fakeTarget.applyTable != "redis_db_0_keys" { + t.Fatalf("unexpected apply table: %s", fakeTarget.applyTable) + } + if len(fakeTarget.applySet.Inserts) != 1 || len(fakeTarget.applySet.Updates) != 1 { + t.Fatalf("unexpected change set: %+v", fakeTarget.applySet) + } +} + +func TestRunSync_RedisToMongoUsesConfiguredCollectionName(t *testing.T) { + fakeRedis := &fakeRedisMigrationClient{ + values: map[string]*redispkg.RedisValue{ + "user:1": {Type: "string", TTL: -1, Length: 1, Value: "online"}, + }, + } + fakeTarget := &fakeRedisMongoTargetDB{ + tables: []string{"custom_keyspace_docs"}, + queryTable: "custom_keyspace_docs", + } + + oldNewRedisClient := newRedisSourceClient + oldNewDatabase := newSyncDatabase + defer func() { + newRedisSourceClient = oldNewRedisClient + newSyncDatabase = oldNewDatabase + }() + newRedisSourceClient = func() redisMigrationClient { return fakeRedis } + newSyncDatabase = func(dbType string) (db.Database, error) { return fakeTarget, nil } + + engine := NewSyncEngine(Reporter{}) + result := engine.RunSync(SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "redis", Database: "0"}, + TargetConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"}, + Tables: []string{"user:1"}, + Content: "data", + Mode: "insert_update", + MongoCollectionName: "custom_keyspace_docs", + }) + + if !result.Success { + t.Fatalf("expected success, got: %+v", result) + } + if fakeTarget.applyTable != "custom_keyspace_docs" { + t.Fatalf("unexpected apply table: %s", fakeTarget.applyTable) + } +} + +func TestPreview_RedisToMongoReturnsDocumentPreview(t *testing.T) { + fakeRedis := &fakeRedisMigrationClient{ + values: map[string]*redispkg.RedisValue{ + "session:1": {Type: "string", TTL: 60, Length: 1, Value: "token"}, + }, + } + fakeTarget := &fakeRedisMongoTargetDB{} + + oldNewRedisClient := newRedisSourceClient + oldNewDatabase := newSyncDatabase + defer func() { + newRedisSourceClient = oldNewRedisClient + newSyncDatabase = oldNewDatabase + }() + newRedisSourceClient = func() redisMigrationClient { return fakeRedis } + newSyncDatabase = func(dbType string) (db.Database, error) { return fakeTarget, nil } + + engine := NewSyncEngine(Reporter{}) + preview, err := engine.Preview(SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "redis", Database: "0"}, + TargetConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"}, + Tables: []string{"session:1"}, + Content: "data", + Mode: "insert_update", + }, "session:1", 20) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if preview.PKColumn != "_id" { + t.Fatalf("unexpected pk column: %s", preview.PKColumn) + } + if preview.TotalInserts != 1 || len(preview.Inserts) != 1 { + t.Fatalf("unexpected preview: %+v", preview) + } + if preview.Inserts[0].PK != "db0:session:1" { + t.Fatalf("unexpected preview pk: %+v", preview.Inserts[0]) + } +} + +func TestRunSync_MongoToRedisAppliesStringAndHash(t *testing.T) { + fakeSource := &fakeMongoRedisSourceDB{ + tables: []string{"redis_db_0_keys"}, + rowsByTable: map[string][]map[string]interface{}{ + "redis_db_0_keys": { + {"_id": "db0:session:1", "key": "session:1", "type": "string", "ttl": int64(60), "value": "token"}, + {"_id": "db0:user:1", "key": "user:1", "type": "hash", "ttl": int64(120), "value": map[string]interface{}{"name": "alice", "role": "admin"}}, + }, + }, + } + fakeRedis := &fakeRedisMigrationClient{ + values: map[string]*redispkg.RedisValue{ + "user:1": {Type: "hash", TTL: 120, Length: 1, Value: map[string]string{"name": "old"}}, + }, + } + + oldNewRedisClient := newRedisSourceClient + oldNewDatabase := newSyncDatabase + defer func() { + newRedisSourceClient = oldNewRedisClient + newSyncDatabase = oldNewDatabase + }() + newRedisSourceClient = func() redisMigrationClient { return fakeRedis } + newSyncDatabase = func(dbType string) (db.Database, error) { return fakeSource, nil } + + engine := NewSyncEngine(Reporter{}) + result := engine.RunSync(SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"}, + TargetConfig: connection.ConnectionConfig{Type: "redis", Database: "0"}, + Tables: []string{"redis_db_0_keys"}, + Content: "data", + Mode: "insert_update", + }) + + if !result.Success { + t.Fatalf("expected success, got: %+v", result) + } + if fakeRedis.connectConfig.RedisDB != 0 { + t.Fatalf("expected redis db 0, got %d", fakeRedis.connectConfig.RedisDB) + } + if got := fakeRedis.values["session:1"]; got == nil || got.Type != "string" || got.Value != "token" || got.TTL != 60 { + t.Fatalf("unexpected string value: %+v", got) + } + gotHash, _ := fakeRedis.values["user:1"].Value.(map[string]string) + if gotHash["name"] != "alice" || gotHash["role"] != "admin" { + t.Fatalf("unexpected hash value: %+v", fakeRedis.values["user:1"]) + } + if result.RowsInserted != 1 || result.RowsUpdated != 1 { + t.Fatalf("unexpected sync result: %+v", result) + } +} + +func TestPreview_MongoToRedisReturnsCollectionPreview(t *testing.T) { + fakeSource := &fakeMongoRedisSourceDB{ + tables: []string{"redis_db_0_keys"}, + rowsByTable: map[string][]map[string]interface{}{ + "redis_db_0_keys": { + {"_id": "db0:session:1", "key": "session:1", "type": "string", "ttl": int64(60), "value": "token"}, + }, + }, + } + fakeRedis := &fakeRedisMigrationClient{values: map[string]*redispkg.RedisValue{}} + + oldNewRedisClient := newRedisSourceClient + oldNewDatabase := newSyncDatabase + defer func() { + newRedisSourceClient = oldNewRedisClient + newSyncDatabase = oldNewDatabase + }() + newRedisSourceClient = func() redisMigrationClient { return fakeRedis } + newSyncDatabase = func(dbType string) (db.Database, error) { return fakeSource, nil } + + engine := NewSyncEngine(Reporter{}) + preview, err := engine.Preview(SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"}, + TargetConfig: connection.ConnectionConfig{Type: "redis", Database: "0"}, + Tables: []string{"redis_db_0_keys"}, + Content: "data", + Mode: "insert_update", + }, "redis_db_0_keys", 20) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if preview.Table != "redis_db_0_keys" || preview.PKColumn != "key" { + t.Fatalf("unexpected preview header: %+v", preview) + } + if preview.TotalInserts != 1 || len(preview.Inserts) != 1 { + t.Fatalf("unexpected preview rows: %+v", preview) + } + if preview.Inserts[0].PK != "session:1" { + t.Fatalf("unexpected preview pk: %+v", preview.Inserts[0]) + } +} diff --git a/internal/sync/schema_migration.go b/internal/sync/schema_migration.go new file mode 100644 index 0000000..ad6cdc6 --- /dev/null +++ b/internal/sync/schema_migration.go @@ -0,0 +1,1014 @@ +package sync + +import ( + "GoNavi-Wails/internal/connection" + "GoNavi-Wails/internal/db" + "fmt" + "regexp" + "sort" + "strconv" + "strings" +) + +type SchemaMigrationPlan struct { + SourceSchema string + SourceTable string + SourceQueryTable string + TargetSchema string + TargetTable string + TargetQueryTable string + TargetTableExists bool + AutoCreate bool + PlannedAction string + Warnings []string + UnsupportedObjects []string + IndexesToCreate int + IndexesSkipped int + CreateTableSQL string + PreDataSQL []string + PostDataSQL []string +} + +type groupedIndex struct { + Name string + Columns []string + Unique bool + IndexType string + SubPart int +} + +func normalizeTargetTableStrategy(strategy string) string { + switch strings.ToLower(strings.TrimSpace(strategy)) { + case "smart": + return "smart" + case "auto_create_if_missing": + return "auto_create_if_missing" + case "existing_only", "": + return "existing_only" + default: + return "existing_only" + } +} + +func supportsAutoCreateMigration(sourceType, targetType string) bool { + return normalizeMigrationDBType(sourceType) == "mysql" && normalizeMigrationDBType(targetType) == "kingbase" +} + +func inspectTableColumns(database db.Database, schema, table string) ([]connection.ColumnDefinition, bool, error) { + cols, err := database.GetColumns(schema, table) + if err != nil { + if isLikelyTableNotFound(err) { + return nil, false, nil + } + return nil, false, err + } + if len(cols) == 0 { + return cols, false, nil + } + return cols, true, nil +} + +func isLikelyTableNotFound(err error) bool { + if err == nil { + return false + } + text := strings.ToLower(strings.TrimSpace(err.Error())) + if text == "" { + return false + } + keywords := []string{ + "doesn't exist", + "does not exist", + "not exist", + "unknown table", + "未找到表", + "不存在", + "invalid object", + "relation", + } + for _, keyword := range keywords { + if strings.Contains(text, keyword) { + return true + } + } + return false +} + +func buildSchemaMigrationPlanLegacy(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + plan := SchemaMigrationPlan{} + sourceType := resolveMigrationDBType(config.SourceConfig) + targetType := resolveMigrationDBType(config.TargetConfig) + plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName) + plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName) + plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName) + plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName) + plan.PlannedAction = "使用已有目标表导入" + if targetType == "tdengine" { + plan.Warnings = append(plan.Warnings, "TDengine 目标端当前仅支持 INSERT 写入;若存在差异更新/删除,执行期会被拒绝,请优先使用仅插入或全量覆盖模式") + } + + sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err) + } + if !sourceExists { + return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName) + } + + targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable) + if err != nil { + return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err) + } + plan.TargetTableExists = targetExists + + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + if targetExists { + missing := diffMissingColumnNames(sourceCols, targetCols) + if len(missing) > 0 { + plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", "))) + } + if config.AutoAddColumns && isMySQLLikeSourceType(sourceType) && normalizeMigrationDBType(targetType) == "kingbase" { + addSQL, addWarnings := buildMySQLToKingbaseAddColumnSQL(plan.TargetQueryTable, sourceCols, targetCols) + plan.PreDataSQL = append(plan.PreDataSQL, addSQL...) + plan.Warnings = append(plan.Warnings, addWarnings...) + if len(addSQL) > 0 { + plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL)) + } + } + if strategy != "existing_only" { + plan.Warnings = append(plan.Warnings, "目标表已存在,当前仅执行数据导入;不会自动重建已有索引/约束") + } + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } + + switch strategy { + case "existing_only": + plan.PlannedAction = "目标表不存在,需先手工创建" + plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + case "smart", "auto_create_if_missing": + if !supportsAutoCreateMigration(config.SourceConfig.Type, config.TargetConfig.Type) { + plan.PlannedAction = "当前库对暂不支持自动建表" + plan.Warnings = append(plan.Warnings, fmt.Sprintf("当前仅支持 MySQL -> Kingbase 自动建表,当前组合=%s -> %s", config.SourceConfig.Type, config.TargetConfig.Type)) + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } + plan.AutoCreate = true + plan.PlannedAction = "目标表不存在,将自动建表后导入" + createSQL, postSQL, warnings, unsupported, idxCreate, idxSkip, err := buildMySQLToKingbaseCreateTablePlan(config, plan.TargetQueryTable, sourceCols, sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, sourceCols, targetCols, err + } + plan.CreateTableSQL = createSQL + plan.PostDataSQL = append(plan.PostDataSQL, postSQL...) + plan.Warnings = append(plan.Warnings, warnings...) + plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...) + plan.IndexesToCreate = idxCreate + plan.IndexesSkipped = idxSkip + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + default: + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } +} + +func dedupeSchemaMigrationPlan(plan SchemaMigrationPlan) SchemaMigrationPlan { + plan.Warnings = dedupeStrings(plan.Warnings) + plan.UnsupportedObjects = dedupeStrings(plan.UnsupportedObjects) + return plan +} + +func dedupeStrings(items []string) []string { + if len(items) == 0 { + return items + } + seen := make(map[string]struct{}, len(items)) + out := make([]string, 0, len(items)) + for _, item := range items { + text := strings.TrimSpace(item) + if text == "" { + continue + } + if _, ok := seen[text]; ok { + continue + } + seen[text] = struct{}{} + out = append(out, text) + } + return out +} + +func diffMissingColumnNames(sourceCols, targetCols []connection.ColumnDefinition) []string { + if len(sourceCols) == 0 { + return nil + } + targetSet := make(map[string]struct{}, len(targetCols)) + for _, col := range targetCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + targetSet[key] = struct{}{} + } + missing := make([]string, 0) + for _, col := range sourceCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + if _, ok := targetSet[key]; ok { + continue + } + missing = append(missing, col.Name) + } + sort.Strings(missing) + return missing +} + +func buildMySQLToKingbaseAddColumnSQL(targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) { + targetSet := make(map[string]struct{}, len(targetCols)) + for _, col := range targetCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + targetSet[key] = struct{}{} + } + + var sqlList []string + var warnings []string + for _, col := range sourceCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + if _, ok := targetSet[key]; ok { + continue + } + colType, _, mapWarnings := mapMySQLColumnToKingbase(col) + warnings = append(warnings, mapWarnings...) + if col.Extra != "" && strings.Contains(strings.ToLower(col.Extra), "auto_increment") { + warnings = append(warnings, fmt.Sprintf("字段 %s 为自增列,补齐到已有目标表时不会自动补建 identity/sequence", col.Name)) + } + sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL", + quoteQualifiedIdentByType("kingbase", targetQueryTable), + quoteIdentByType("kingbase", col.Name), + colType, + )) + } + return sqlList, dedupeStrings(warnings) +} + +func buildMySQLToKingbaseCreateTablePlan(config SyncConfig, targetQueryTable string, sourceCols []connection.ColumnDefinition, sourceDB db.Database, sourceSchema, sourceTable string) (string, []string, []string, []string, int, int, error) { + columnDefs := make([]string, 0, len(sourceCols)+1) + warnings := make([]string, 0) + unsupported := make([]string, 0) + pkCols := make([]string, 0, 2) + + for _, col := range sourceCols { + def, colWarnings := buildMySQLToKingbaseColumnDefinition(col) + warnings = append(warnings, colWarnings...) + columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("kingbase", col.Name), def)) + if col.Key == "PRI" || col.Key == "PK" { + pkCols = append(pkCols, quoteIdentByType("kingbase", col.Name)) + } + } + if len(pkCols) > 0 { + columnDefs = append(columnDefs, fmt.Sprintf("PRIMARY KEY (%s)", strings.Join(pkCols, ", "))) + } + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("kingbase", targetQueryTable), strings.Join(columnDefs, ",\n ")) + + if !config.CreateIndexes { + return createSQL, nil, dedupeStrings(warnings), dedupeStrings(unsupported), 0, 0, nil + } + + indexes, err := sourceDB.GetIndexes(sourceSchema, sourceTable) + if err != nil { + warnings = append(warnings, fmt.Sprintf("读取源表索引失败,已跳过索引迁移:%v", err)) + return createSQL, nil, dedupeStrings(warnings), dedupeStrings(unsupported), 0, 0, nil + } + grouped := groupIndexDefinitions(indexes) + postSQL := make([]string, 0, len(grouped)) + created := 0 + skipped := 0 + for _, idx := range grouped { + name := strings.TrimSpace(idx.Name) + if name == "" || strings.EqualFold(name, "primary") { + continue + } + if len(idx.Columns) == 0 { + skipped++ + unsupported = append(unsupported, fmt.Sprintf("索引 %s 缺少列定义,已跳过", name)) + continue + } + kind := strings.ToLower(strings.TrimSpace(idx.IndexType)) + if idx.SubPart > 0 { + skipped++ + unsupported = append(unsupported, fmt.Sprintf("索引 %s 使用前缀长度,当前暂不支持迁移", name)) + continue + } + if kind != "" && kind != "btree" { + skipped++ + unsupported = append(unsupported, fmt.Sprintf("索引 %s 类型=%s,当前暂不支持自动迁移", name, idx.IndexType)) + continue + } + quotedCols := make([]string, 0, len(idx.Columns)) + for _, col := range idx.Columns { + quotedCols = append(quotedCols, quoteIdentByType("kingbase", col)) + } + prefix := "CREATE INDEX" + if idx.Unique { + prefix = "CREATE UNIQUE INDEX" + } + postSQL = append(postSQL, fmt.Sprintf("%s %s ON %s (%s)", prefix, quoteIdentByType("kingbase", name), quoteQualifiedIdentByType("kingbase", targetQueryTable), strings.Join(quotedCols, ", "))) + created++ + } + return createSQL, postSQL, dedupeStrings(warnings), dedupeStrings(unsupported), created, skipped, nil +} + +func buildMySQLToKingbaseColumnDefinition(col connection.ColumnDefinition) (string, []string) { + targetType, useIdentity, warnings := mapMySQLColumnToKingbase(col) + parts := []string{targetType} + if useIdentity { + parts = append(parts, "GENERATED BY DEFAULT AS IDENTITY") + } + if !useIdentity { + if defaultSQL, ok, warningText := mapMySQLDefaultToKingbase(col, targetType); warningText != "" { + warnings = append(warnings, warningText) + } else if ok { + parts = append(parts, "DEFAULT "+defaultSQL) + } + } + if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") { + parts = append(parts, "NOT NULL") + } + return strings.Join(parts, " "), dedupeStrings(warnings) +} + +func mapMySQLColumnToKingbase(col connection.ColumnDefinition) (string, bool, []string) { + raw := strings.ToLower(strings.TrimSpace(col.Type)) + warnings := make([]string, 0) + if raw == "" { + return "text", false, []string{fmt.Sprintf("字段 %s 类型为空,已降级为 text", col.Name)} + } + unsigned := strings.Contains(raw, "unsigned") + clean := strings.ReplaceAll(raw, " unsigned", "") + clean = strings.ReplaceAll(clean, " zerofill", "") + isAutoIncrement := strings.Contains(strings.ToLower(strings.TrimSpace(col.Extra)), "auto_increment") + + switch { + case strings.HasPrefix(clean, "tinyint(1)") && !unsigned && !isAutoIncrement: + return "boolean", false, warnings + case strings.HasPrefix(clean, "tinyint"): + return ternaryString(unsigned, "smallint", "smallint"), false, warnings + case strings.HasPrefix(clean, "smallint"): + return ternaryString(unsigned, "integer", "smallint"), isAutoIncrement, warnings + case strings.HasPrefix(clean, "mediumint"): + return ternaryString(unsigned, "bigint", "integer"), isAutoIncrement, warnings + case strings.HasPrefix(clean, "int") || strings.HasPrefix(clean, "integer"): + return ternaryString(unsigned, "bigint", "integer"), isAutoIncrement, warnings + case strings.HasPrefix(clean, "bigint"): + if unsigned { + if isAutoIncrement { + warnings = append(warnings, fmt.Sprintf("字段 %s 为 unsigned bigint auto_increment,已降级为 numeric(20,0) 且不保留自增语义", col.Name)) + } + return "numeric(20,0)", false, warnings + } + return "bigint", isAutoIncrement, warnings + case strings.HasPrefix(clean, "decimal"), strings.HasPrefix(clean, "numeric"): + return replaceTypeBase(clean, []string{"decimal", "numeric"}, "numeric"), false, warnings + case strings.HasPrefix(clean, "float"): + return "real", false, warnings + case strings.HasPrefix(clean, "double"): + return "double precision", false, warnings + case strings.HasPrefix(clean, "bit("): + if clean == "bit(1)" { + return "boolean", false, warnings + } + return clean, false, warnings + case strings.HasPrefix(clean, "bool"), strings.HasPrefix(clean, "boolean"): + return "boolean", false, warnings + case strings.HasPrefix(clean, "char("), strings.HasPrefix(clean, "varchar("): + return clean, false, warnings + case strings.HasPrefix(clean, "tinytext"), strings.HasPrefix(clean, "text"), strings.HasPrefix(clean, "mediumtext"), strings.HasPrefix(clean, "longtext"): + return "text", false, warnings + case strings.HasPrefix(clean, "json"): + return "jsonb", false, warnings + case strings.HasPrefix(clean, "date"): + return "date", false, warnings + case strings.HasPrefix(clean, "time"): + return "time", false, warnings + case strings.HasPrefix(clean, "datetime"), strings.HasPrefix(clean, "timestamp"): + return "timestamp", false, warnings + case strings.HasPrefix(clean, "year"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 year 已映射为 integer", col.Name)) + return "integer", false, warnings + case strings.HasPrefix(clean, "binary"), strings.HasPrefix(clean, "varbinary"), strings.HasPrefix(clean, "tinyblob"), strings.HasPrefix(clean, "blob"), strings.HasPrefix(clean, "mediumblob"), strings.HasPrefix(clean, "longblob"): + return "bytea", false, warnings + case strings.HasPrefix(clean, "enum"), strings.HasPrefix(clean, "set"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 text", col.Name, col.Type)) + return "text", false, warnings + default: + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门映射,已降级为 text", col.Name, col.Type)) + return "text", false, warnings + } +} + +func replaceTypeBase(raw string, bases []string, target string) string { + for _, base := range bases { + if strings.HasPrefix(raw, base) { + return target + strings.TrimPrefix(raw, base) + } + } + return target +} + +var numericPattern = regexp.MustCompile(`^[+-]?\d+(\.\d+)?$`) + +func mapMySQLDefaultToKingbase(col connection.ColumnDefinition, targetType string) (string, bool, string) { + if col.Default == nil { + return "", false, "" + } + raw := strings.TrimSpace(*col.Default) + if raw == "" { + if isStringLikeTargetType(targetType) { + return "''", true, "" + } + return "", false, fmt.Sprintf("字段 %s 的空字符串默认值未保留", col.Name) + } + lower := strings.ToLower(raw) + if lower == "null" { + return "", false, "" + } + if strings.HasPrefix(lower, "current_timestamp") { + return "CURRENT_TIMESTAMP", true, "" + } + if targetType == "boolean" { + switch lower { + case "1", "true": + return "TRUE", true, "" + case "0", "false": + return "FALSE", true, "" + } + } + if numericPattern.MatchString(raw) && !isStringLikeTargetType(targetType) { + return raw, true, "" + } + if strings.ContainsAny(raw, "()") && !strings.HasPrefix(lower, "current_timestamp") { + return "", false, fmt.Sprintf("字段 %s 的默认值 %s 包含表达式,当前未自动迁移", col.Name, raw) + } + return "'" + strings.ReplaceAll(raw, "'", "''") + "'", true, "" +} + +func isStringLikeTargetType(targetType string) bool { + text := strings.ToLower(strings.TrimSpace(targetType)) + return strings.Contains(text, "char") || strings.Contains(text, "text") || strings.Contains(text, "json") || strings.Contains(text, "bytea") +} + +func ternaryString(ok bool, a, b string) string { + if ok { + return a + } + return b +} + +func groupIndexDefinitions(indexes []connection.IndexDefinition) []groupedIndex { + if len(indexes) == 0 { + return nil + } + groupMap := make(map[string][]connection.IndexDefinition) + order := make([]string, 0) + for _, idx := range indexes { + name := strings.TrimSpace(idx.Name) + if name == "" { + continue + } + if _, ok := groupMap[name]; !ok { + order = append(order, name) + } + groupMap[name] = append(groupMap[name], idx) + } + grouped := make([]groupedIndex, 0, len(groupMap)) + for _, name := range order { + rows := groupMap[name] + sort.SliceStable(rows, func(i, j int) bool { + return rows[i].SeqInIndex < rows[j].SeqInIndex + }) + gi := groupedIndex{Name: name, Unique: true, IndexType: "BTREE"} + for _, row := range rows { + if row.NonUnique != 0 { + gi.Unique = false + } + if strings.TrimSpace(row.IndexType) != "" { + gi.IndexType = row.IndexType + } + if row.SubPart > 0 && gi.SubPart == 0 { + gi.SubPart = row.SubPart + } + col := strings.TrimSpace(row.ColumnName) + if col != "" { + gi.Columns = append(gi.Columns, col) + } + } + grouped = append(grouped, gi) + } + return grouped +} + +func intFromAny(v interface{}) int { + switch typed := v.(type) { + case int: + return typed + case int64: + return int(typed) + case float64: + return int(typed) + case string: + i, _ := strconv.Atoi(strings.TrimSpace(typed)) + return i + default: + return 0 + } +} + +func isPGLikeSource(dbType string) bool { + switch normalizeMigrationDBType(dbType) { + case "postgres", "kingbase", "highgo", "vastbase", "duckdb": + return true + default: + return false + } +} + +func buildPGLikeToMySQLPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + plan := SchemaMigrationPlan{} + sourceType := resolveMigrationDBType(config.SourceConfig) + targetType := resolveMigrationDBType(config.TargetConfig) + plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName) + plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName) + plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName) + plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName) + plan.PlannedAction = "使用已有目标表导入" + + sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err) + } + if !sourceExists { + return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName) + } + + targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable) + if err != nil { + return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err) + } + plan.TargetTableExists = targetExists + + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + if targetExists { + missing := diffMissingColumnNames(sourceCols, targetCols) + if len(missing) > 0 { + plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", "))) + } + if config.AutoAddColumns { + addSQL, addWarnings := buildPGLikeToMySQLAddColumnSQL(plan.TargetQueryTable, sourceCols, targetCols) + plan.PreDataSQL = append(plan.PreDataSQL, addSQL...) + plan.Warnings = append(plan.Warnings, addWarnings...) + if len(addSQL) > 0 { + plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL)) + } + } + if strategy != "existing_only" { + plan.Warnings = append(plan.Warnings, "目标表已存在,当前仅执行数据导入;不会自动重建已有索引/约束") + } + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } + + switch strategy { + case "existing_only": + plan.PlannedAction = "目标表不存在,需先手工创建" + plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + case "smart", "auto_create_if_missing": + plan.AutoCreate = true + plan.PlannedAction = "目标表不存在,将自动建表后导入" + createSQL, postSQL, warnings, unsupported, idxCreate, idxSkip, err := buildPGLikeToMySQLCreateTablePlan(config, plan.TargetQueryTable, sourceCols, sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, sourceCols, targetCols, err + } + plan.CreateTableSQL = createSQL + plan.PostDataSQL = append(plan.PostDataSQL, postSQL...) + plan.Warnings = append(plan.Warnings, warnings...) + plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...) + plan.IndexesToCreate = idxCreate + plan.IndexesSkipped = idxSkip + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + default: + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } +} + +func buildPGLikeToMySQLAddColumnSQL(targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) { + targetSet := make(map[string]struct{}, len(targetCols)) + for _, col := range targetCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + targetSet[key] = struct{}{} + } + var sqlList []string + var warnings []string + for _, col := range sourceCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + if _, ok := targetSet[key]; ok { + continue + } + colType, mapWarnings := mapPGLikeColumnToMySQL(col) + warnings = append(warnings, mapWarnings...) + if col.Extra != "" && strings.Contains(strings.ToLower(col.Extra), "auto_increment") { + warnings = append(warnings, fmt.Sprintf("字段 %s 为自增列,补齐到已有目标表时不会自动补建 AUTO_INCREMENT 属性", col.Name)) + } + sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL", + quoteQualifiedIdentByType("mysql", targetQueryTable), + quoteIdentByType("mysql", col.Name), + colType, + )) + } + return sqlList, dedupeStrings(warnings) +} + +func buildPGLikeToMySQLCreateTablePlan(config SyncConfig, targetQueryTable string, sourceCols []connection.ColumnDefinition, sourceDB db.Database, sourceSchema, sourceTable string) (string, []string, []string, []string, int, int, error) { + columnDefs := make([]string, 0, len(sourceCols)+1) + warnings := make([]string, 0) + unsupported := make([]string, 0) + pkCols := make([]string, 0, 2) + for _, col := range sourceCols { + def, colWarnings := buildPGLikeToMySQLColumnDefinition(col) + warnings = append(warnings, colWarnings...) + columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("mysql", col.Name), def)) + if col.Key == "PRI" || col.Key == "PK" { + pkCols = append(pkCols, quoteIdentByType("mysql", col.Name)) + } + } + if len(pkCols) > 0 { + columnDefs = append(columnDefs, fmt.Sprintf("PRIMARY KEY (%s)", strings.Join(pkCols, ", "))) + } + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("mysql", targetQueryTable), strings.Join(columnDefs, ",\n ")) + if !config.CreateIndexes { + return createSQL, nil, dedupeStrings(warnings), dedupeStrings(unsupported), 0, 0, nil + } + indexes, err := sourceDB.GetIndexes(sourceSchema, sourceTable) + if err != nil { + warnings = append(warnings, fmt.Sprintf("读取源表索引失败,已跳过索引迁移:%v", err)) + return createSQL, nil, dedupeStrings(warnings), dedupeStrings(unsupported), 0, 0, nil + } + grouped := groupIndexDefinitions(indexes) + postSQL := make([]string, 0, len(grouped)) + created := 0 + skipped := 0 + for _, idx := range grouped { + name := strings.TrimSpace(idx.Name) + if name == "" || strings.EqualFold(name, "primary") { + continue + } + if len(idx.Columns) == 0 { + skipped++ + unsupported = append(unsupported, fmt.Sprintf("索引 %s 缺少列定义,已跳过", name)) + continue + } + kind := strings.ToLower(strings.TrimSpace(idx.IndexType)) + if idx.SubPart > 0 { + skipped++ + unsupported = append(unsupported, fmt.Sprintf("索引 %s 使用前缀长度,当前暂不支持迁移", name)) + continue + } + if kind != "" && kind != "btree" { + skipped++ + unsupported = append(unsupported, fmt.Sprintf("索引 %s 类型=%s,当前暂不支持自动迁移", name, idx.IndexType)) + continue + } + quotedCols := make([]string, 0, len(idx.Columns)) + for _, col := range idx.Columns { + quotedCols = append(quotedCols, quoteIdentByType("mysql", col)) + } + prefix := "CREATE INDEX" + if idx.Unique { + prefix = "CREATE UNIQUE INDEX" + } + postSQL = append(postSQL, fmt.Sprintf("%s %s ON %s (%s)", prefix, quoteIdentByType("mysql", name), quoteQualifiedIdentByType("mysql", targetQueryTable), strings.Join(quotedCols, ", "))) + created++ + } + return createSQL, postSQL, dedupeStrings(warnings), dedupeStrings(unsupported), created, skipped, nil +} + +func buildPGLikeToMySQLColumnDefinition(col connection.ColumnDefinition) (string, []string) { + targetType, warnings := mapPGLikeColumnToMySQL(col) + parts := []string{targetType} + if strings.Contains(strings.ToLower(strings.TrimSpace(col.Extra)), "auto_increment") && canUseMySQLAutoIncrement(targetType) { + parts = append(parts, "AUTO_INCREMENT") + } + if defaultSQL, ok, warningText := mapPGLikeDefaultToMySQL(col, targetType); warningText != "" { + warnings = append(warnings, warningText) + } else if ok { + parts = append(parts, "DEFAULT "+defaultSQL) + } + if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") { + parts = append(parts, "NOT NULL") + } + return strings.Join(parts, " "), dedupeStrings(warnings) +} + +func mapPGLikeColumnToMySQL(col connection.ColumnDefinition) (string, []string) { + raw := strings.ToLower(strings.TrimSpace(col.Type)) + warnings := make([]string, 0) + if raw == "" { + return "text", []string{fmt.Sprintf("字段 %s 类型为空,已降级为 text", col.Name)} + } + switch { + case raw == "boolean" || strings.HasPrefix(raw, "bool"): + return "tinyint(1)", warnings + case raw == "smallint": + return "smallint", warnings + case raw == "integer" || raw == "int4": + return "int", warnings + case raw == "bigint" || raw == "int8": + return "bigint", warnings + case strings.HasPrefix(raw, "numeric") || strings.HasPrefix(raw, "decimal"): + return replaceTypeBase(raw, []string{"numeric", "decimal"}, "decimal"), warnings + case raw == "real" || raw == "float4": + return "float", warnings + case raw == "double precision" || raw == "float8": + return "double", warnings + case strings.HasPrefix(raw, "character varying"): + return strings.Replace(raw, "character varying", "varchar", 1), warnings + case strings.HasPrefix(raw, "character("): + return strings.Replace(raw, "character", "char", 1), warnings + case raw == "character": + return "char(1)", warnings + case raw == "text": + return "text", warnings + case raw == "json" || raw == "jsonb": + return "json", warnings + case raw == "bytea": + return "longblob", warnings + case raw == "date": + return "date", warnings + case strings.HasPrefix(raw, "time"): + return "time", warnings + case strings.HasPrefix(raw, "timestamp"): + return "datetime", warnings + case strings.HasPrefix(raw, "uuid"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 uuid 已映射为 varchar(36)", col.Name)) + return "varchar(36)", warnings + case strings.Contains(raw, "without time zone") || strings.Contains(raw, "with time zone"): + return "datetime", warnings + case strings.HasPrefix(raw, "json"): + return "json", warnings + case strings.HasSuffix(raw, "[]") || strings.HasPrefix(raw, "array"): + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 json", col.Name, col.Type)) + return "json", warnings + case raw == "user-defined": + warnings = append(warnings, fmt.Sprintf("字段 %s 为用户自定义类型,已降级为 text", col.Name)) + return "text", warnings + default: + warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门映射,已降级为 text", col.Name, col.Type)) + return "text", warnings + } +} + +func canUseMySQLAutoIncrement(targetType string) bool { + text := strings.ToLower(strings.TrimSpace(targetType)) + switch { + case strings.HasPrefix(text, "tinyint"), strings.HasPrefix(text, "smallint"), strings.HasPrefix(text, "mediumint"), strings.HasPrefix(text, "int"), strings.HasPrefix(text, "bigint"): + return true + default: + return false + } +} + +func mapPGLikeDefaultToMySQL(col connection.ColumnDefinition, targetType string) (string, bool, string) { + if col.Default == nil { + return "", false, "" + } + raw := strings.TrimSpace(*col.Default) + if raw == "" || strings.EqualFold(raw, "null") { + return "", false, "" + } + lower := strings.ToLower(raw) + if strings.HasPrefix(lower, "nextval(") { + return "", false, "" + } + if strings.Contains(lower, "current_timestamp") || strings.Contains(lower, "now()") { + return "CURRENT_TIMESTAMP", true, "" + } + if targetType == "tinyint(1)" { + switch lower { + case "true", "1": + return "1", true, "" + case "false", "0": + return "0", true, "" + } + } + if numericPattern.MatchString(raw) && !isStringLikeTargetType(targetType) { + return raw, true, "" + } + if strings.ContainsAny(raw, "()") && !strings.Contains(lower, "current_timestamp") && !strings.Contains(lower, "now()") { + return "", false, fmt.Sprintf("字段 %s 的默认值 %s 包含表达式,当前未自动迁移", col.Name, raw) + } + return "'" + strings.ReplaceAll(raw, "'", "''") + "'", true, "" +} + +func isPGLikeTarget(dbType string) bool { + switch normalizeMigrationDBType(dbType) { + case "postgres", "kingbase", "highgo", "vastbase", "duckdb": + return true + default: + return false + } +} + +func buildMySQLToPGLikePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) { + plan := SchemaMigrationPlan{} + sourceType := resolveMigrationDBType(config.SourceConfig) + targetType := resolveMigrationDBType(config.TargetConfig) + plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName) + plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName) + plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName) + plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName) + plan.PlannedAction = "使用已有目标表导入" + + sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err) + } + if !sourceExists { + return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName) + } + + targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable) + if err != nil { + return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err) + } + plan.TargetTableExists = targetExists + + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) + if targetExists { + missing := diffMissingColumnNames(sourceCols, targetCols) + if len(missing) > 0 { + plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", "))) + } + if config.AutoAddColumns { + addSQL, addWarnings := buildMySQLToPGLikeAddColumnSQL(targetType, plan.TargetQueryTable, sourceCols, targetCols) + plan.PreDataSQL = append(plan.PreDataSQL, addSQL...) + plan.Warnings = append(plan.Warnings, addWarnings...) + if len(addSQL) > 0 { + plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL)) + } + } + if strategy != "existing_only" { + plan.Warnings = append(plan.Warnings, "目标表已存在,当前仅执行数据导入;不会自动重建已有索引/约束") + } + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } + + switch strategy { + case "existing_only": + plan.PlannedAction = "目标表不存在,需先手工创建" + plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表") + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + case "smart", "auto_create_if_missing": + plan.AutoCreate = true + plan.PlannedAction = "目标表不存在,将自动建表后导入" + createSQL, postSQL, warnings, unsupported, idxCreate, idxSkip, err := buildMySQLToPGLikeCreateTablePlan(targetType, config, plan.TargetQueryTable, sourceCols, sourceDB, plan.SourceSchema, plan.SourceTable) + if err != nil { + return plan, sourceCols, targetCols, err + } + plan.CreateTableSQL = createSQL + plan.PostDataSQL = append(plan.PostDataSQL, postSQL...) + plan.Warnings = append(plan.Warnings, warnings...) + plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...) + plan.IndexesToCreate = idxCreate + plan.IndexesSkipped = idxSkip + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + default: + return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil + } +} + +func buildMySQLToPGLikeAddColumnSQL(targetType string, targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) { + targetSet := make(map[string]struct{}, len(targetCols)) + for _, col := range targetCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + targetSet[key] = struct{}{} + } + var sqlList []string + var warnings []string + for _, col := range sourceCols { + key := strings.ToLower(strings.TrimSpace(col.Name)) + if key == "" { + continue + } + if _, ok := targetSet[key]; ok { + continue + } + colType, _, mapWarnings := mapMySQLColumnToKingbase(col) + warnings = append(warnings, mapWarnings...) + if col.Extra != "" && strings.Contains(strings.ToLower(col.Extra), "auto_increment") { + warnings = append(warnings, fmt.Sprintf("字段 %s 为自增列,补齐到已有目标表时不会自动补建 identity/sequence", col.Name)) + } + sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL", + quoteQualifiedIdentByType(targetType, targetQueryTable), + quoteIdentByType(targetType, col.Name), + colType, + )) + } + return sqlList, dedupeStrings(warnings) +} + +func buildMySQLToPGLikeCreateTablePlan(targetType string, config SyncConfig, targetQueryTable string, sourceCols []connection.ColumnDefinition, sourceDB db.Database, sourceSchema, sourceTable string) (string, []string, []string, []string, int, int, error) { + columnDefs := make([]string, 0, len(sourceCols)+1) + warnings := make([]string, 0) + unsupported := make([]string, 0) + pkCols := make([]string, 0, 2) + for _, col := range sourceCols { + def, colWarnings := buildMySQLToPGLikeColumnDefinition(col) + warnings = append(warnings, colWarnings...) + columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType(targetType, col.Name), def)) + if col.Key == "PRI" || col.Key == "PK" { + pkCols = append(pkCols, quoteIdentByType(targetType, col.Name)) + } + } + if len(pkCols) > 0 { + columnDefs = append(columnDefs, fmt.Sprintf("PRIMARY KEY (%s)", strings.Join(pkCols, ", "))) + } + createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType(targetType, targetQueryTable), strings.Join(columnDefs, ",\n ")) + if !config.CreateIndexes { + return createSQL, nil, dedupeStrings(warnings), dedupeStrings(unsupported), 0, 0, nil + } + indexes, err := sourceDB.GetIndexes(sourceSchema, sourceTable) + if err != nil { + warnings = append(warnings, fmt.Sprintf("读取源表索引失败,已跳过索引迁移:%v", err)) + return createSQL, nil, dedupeStrings(warnings), dedupeStrings(unsupported), 0, 0, nil + } + grouped := groupIndexDefinitions(indexes) + postSQL := make([]string, 0, len(grouped)) + created := 0 + skipped := 0 + for _, idx := range grouped { + name := strings.TrimSpace(idx.Name) + if name == "" || strings.EqualFold(name, "primary") { + continue + } + if len(idx.Columns) == 0 { + skipped++ + unsupported = append(unsupported, fmt.Sprintf("索引 %s 缺少列定义,已跳过", name)) + continue + } + kind := strings.ToLower(strings.TrimSpace(idx.IndexType)) + if idx.SubPart > 0 { + skipped++ + unsupported = append(unsupported, fmt.Sprintf("索引 %s 使用前缀长度,当前暂不支持迁移", name)) + continue + } + if kind != "" && kind != "btree" { + skipped++ + unsupported = append(unsupported, fmt.Sprintf("索引 %s 类型=%s,当前暂不支持自动迁移", name, idx.IndexType)) + continue + } + quotedCols := make([]string, 0, len(idx.Columns)) + for _, col := range idx.Columns { + quotedCols = append(quotedCols, quoteIdentByType(targetType, col)) + } + prefix := "CREATE INDEX" + if idx.Unique { + prefix = "CREATE UNIQUE INDEX" + } + postSQL = append(postSQL, fmt.Sprintf("%s %s ON %s (%s)", prefix, quoteIdentByType(targetType, name), quoteQualifiedIdentByType(targetType, targetQueryTable), strings.Join(quotedCols, ", "))) + created++ + } + return createSQL, postSQL, dedupeStrings(warnings), dedupeStrings(unsupported), created, skipped, nil +} + +func buildMySQLToPGLikeColumnDefinition(col connection.ColumnDefinition) (string, []string) { + targetType, useIdentity, warnings := mapMySQLColumnToKingbase(col) + parts := []string{targetType} + if useIdentity { + parts = append(parts, "GENERATED BY DEFAULT AS IDENTITY") + } + if !useIdentity { + if defaultSQL, ok, warningText := mapMySQLDefaultToKingbase(col, targetType); warningText != "" { + warnings = append(warnings, warningText) + } else if ok { + parts = append(parts, "DEFAULT "+defaultSQL) + } + } + if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") { + parts = append(parts, "NOT NULL") + } + return strings.Join(parts, " "), dedupeStrings(warnings) +} diff --git a/internal/sync/schema_migration_test.go b/internal/sync/schema_migration_test.go new file mode 100644 index 0000000..c946fbe --- /dev/null +++ b/internal/sync/schema_migration_test.go @@ -0,0 +1,957 @@ +package sync + +import ( + "GoNavi-Wails/internal/connection" + "context" + "reflect" + "strings" + "testing" +) + +type fakeMigrationDB struct { + columns map[string][]connection.ColumnDefinition + indexes map[string][]connection.IndexDefinition + queryData map[string][]map[string]interface{} + queryCols map[string][]string +} + +func (f *fakeMigrationDB) Connect(config connection.ConnectionConfig) error { return nil } +func (f *fakeMigrationDB) Close() error { return nil } +func (f *fakeMigrationDB) Ping() error { return nil } +func (f *fakeMigrationDB) Query(query string) ([]map[string]interface{}, []string, error) { + if rows, ok := f.queryData[query]; ok { + return rows, f.queryCols[query], nil + } + return nil, nil, nil +} +func (f *fakeMigrationDB) Exec(query string) (int64, error) { return 0, nil } +func (f *fakeMigrationDB) GetDatabases() ([]string, error) { return nil, nil } +func (f *fakeMigrationDB) GetTables(dbName string) ([]string, error) { + return nil, nil +} +func (f *fakeMigrationDB) GetCreateStatement(dbName, tableName string) (string, error) { + return "", nil +} +func (f *fakeMigrationDB) GetColumns(dbName, tableName string) ([]connection.ColumnDefinition, error) { + key := dbName + "." + tableName + if rows, ok := f.columns[key]; ok { + return rows, nil + } + return []connection.ColumnDefinition{}, nil +} +func (f *fakeMigrationDB) GetAllColumns(dbName string) ([]connection.ColumnDefinitionWithTable, error) { + return nil, nil +} +func (f *fakeMigrationDB) GetIndexes(dbName, tableName string) ([]connection.IndexDefinition, error) { + key := dbName + "." + tableName + if rows, ok := f.indexes[key]; ok { + return rows, nil + } + return nil, nil +} +func (f *fakeMigrationDB) GetForeignKeys(dbName, tableName string) ([]connection.ForeignKeyDefinition, error) { + return nil, nil +} +func (f *fakeMigrationDB) GetTriggers(dbName, tableName string) ([]connection.TriggerDefinition, error) { + return nil, nil +} +func (f *fakeMigrationDB) QueryContext(ctx context.Context, query string) ([]map[string]interface{}, []string, error) { + return f.Query(query) +} +func (f *fakeMigrationDB) ExecContext(ctx context.Context, query string) (int64, error) { + return 0, nil +} + +func TestBuildMySQLToKingbaseColumnDefinition_AutoIncrementAndBoolean(t *testing.T) { + t.Parallel() + + def, warnings := buildMySQLToKingbaseColumnDefinition(connection.ColumnDefinition{ + Name: "id", + Type: "int unsigned", + Nullable: "NO", + Extra: "auto_increment", + }) + if !strings.Contains(def, "bigint") || !strings.Contains(def, "GENERATED BY DEFAULT AS IDENTITY") || !strings.Contains(def, "NOT NULL") { + t.Fatalf("unexpected definition: %s", def) + } + if len(warnings) != 0 { + t.Fatalf("unexpected warnings: %v", warnings) + } + + def, warnings = buildMySQLToKingbaseColumnDefinition(connection.ColumnDefinition{ + Name: "enabled", + Type: "tinyint(1)", + Nullable: "YES", + Default: stringPtr("1"), + }) + if !strings.Contains(def, "boolean") || !strings.Contains(def, "DEFAULT TRUE") { + t.Fatalf("unexpected boolean definition: %s", def) + } + if len(warnings) != 0 { + t.Fatalf("unexpected warnings for boolean: %v", warnings) + } +} + +func TestBuildMySQLToKingbaseCreateTablePlan_GeneratesAndSkipsIndexes(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + indexes: map[string][]connection.IndexDefinition{ + "shop.orders": { + {Name: "PRIMARY", ColumnName: "id", NonUnique: 0, SeqInIndex: 1, IndexType: "BTREE"}, + {Name: "idx_user_status", ColumnName: "user_id", NonUnique: 1, SeqInIndex: 1, IndexType: "BTREE"}, + {Name: "idx_user_status", ColumnName: "status", NonUnique: 1, SeqInIndex: 2, IndexType: "BTREE"}, + {Name: "idx_name_prefix", ColumnName: "name", NonUnique: 1, SeqInIndex: 1, IndexType: "BTREE", SubPart: 12}, + {Name: "idx_fulltext_note", ColumnName: "note", NonUnique: 1, SeqInIndex: 1, IndexType: "FULLTEXT"}, + }, + }, + } + cols := []connection.ColumnDefinition{ + {Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI", Extra: "auto_increment"}, + {Name: "user_id", Type: "bigint", Nullable: "NO"}, + {Name: "status", Type: "varchar(32)", Nullable: "YES"}, + {Name: "name", Type: "varchar(128)", Nullable: "YES"}, + {Name: "note", Type: "text", Nullable: "YES"}, + } + cfg := SyncConfig{CreateIndexes: true} + createSQL, postSQL, warnings, unsupported, idxCreate, idxSkip, err := buildMySQLToKingbaseCreateTablePlan(cfg, "public.orders", cols, sourceDB, "shop", "orders") + if err != nil { + t.Fatalf("buildMySQLToKingbaseCreateTablePlan returned error: %v", err) + } + if !strings.Contains(createSQL, `CREATE TABLE "public"."orders"`) { + t.Fatalf("unexpected create SQL: %s", createSQL) + } + if !strings.Contains(createSQL, `PRIMARY KEY ("id")`) { + t.Fatalf("create SQL missing primary key: %s", createSQL) + } + if idxCreate != 1 || idxSkip != 2 { + t.Fatalf("unexpected index summary: create=%d skip=%d", idxCreate, idxSkip) + } + if len(postSQL) != 1 || !strings.Contains(postSQL[0], `CREATE INDEX "idx_user_status"`) { + t.Fatalf("unexpected post SQL: %v", postSQL) + } + if len(warnings) != 0 { + t.Fatalf("unexpected warnings: %v", warnings) + } + wantUnsupported := []string{ + "索引 idx_name_prefix 使用前缀长度,当前暂不支持迁移", + "索引 idx_fulltext_note 类型=FULLTEXT,当前暂不支持自动迁移", + } + if !reflect.DeepEqual(unsupported, wantUnsupported) { + t.Fatalf("unexpected unsupported objects: got=%v want=%v", unsupported, wantUnsupported) + } +} + +func TestBuildSchemaMigrationPlan_AutoCreateWhenTargetMissing(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "shop.orders": { + {Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI", Extra: "auto_increment"}, + {Name: "name", Type: "varchar(128)", Nullable: "YES"}, + }, + }, + indexes: map[string][]connection.IndexDefinition{}, + } + targetDB := &fakeMigrationDB{columns: map[string][]connection.ColumnDefinition{}} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mysql", Database: "shop"}, + TargetConfig: connection.ConnectionConfig{Type: "kingbase", Database: "demo"}, + TargetTableStrategy: "smart", + CreateIndexes: true, + } + plan, sourceCols, targetCols, err := buildSchemaMigrationPlan(cfg, "orders", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildSchemaMigrationPlan returned error: %v", err) + } + if len(sourceCols) != 2 || len(targetCols) != 0 { + t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols)) + } + if plan.TargetTableExists { + t.Fatalf("expected target table missing") + } + if !plan.AutoCreate { + t.Fatalf("expected auto create enabled") + } + if !strings.Contains(plan.PlannedAction, "自动建表") { + t.Fatalf("unexpected planned action: %s", plan.PlannedAction) + } + if !strings.Contains(plan.CreateTableSQL, `CREATE TABLE "public"."orders"`) { + t.Fatalf("unexpected create table SQL: %s", plan.CreateTableSQL) + } +} + +func stringPtr(v string) *string { return &v } + +func TestBuildPGLikeToMySQLCreateTablePlan_GeneratesMySQLDDL(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + indexes: map[string][]connection.IndexDefinition{ + "public.users": { + {Name: "users_email_key", ColumnName: "email", NonUnique: 0, SeqInIndex: 1, IndexType: "BTREE"}, + {Name: "idx_users_name", ColumnName: "name", NonUnique: 1, SeqInIndex: 1, IndexType: "BTREE"}, + }, + }, + } + cols := []connection.ColumnDefinition{ + {Name: "id", Type: "integer", Nullable: "NO", Key: "PRI", Extra: "auto_increment"}, + {Name: "email", Type: "character varying(120)", Nullable: "NO"}, + {Name: "name", Type: "text", Nullable: "YES"}, + {Name: "profile", Type: "jsonb", Nullable: "YES"}, + } + cfg := SyncConfig{CreateIndexes: true} + createSQL, postSQL, warnings, unsupported, idxCreate, idxSkip, err := buildPGLikeToMySQLCreateTablePlan(cfg, "app.users", cols, sourceDB, "public", "users") + if err != nil { + t.Fatalf("buildPGLikeToMySQLCreateTablePlan returned error: %v", err) + } + if !strings.Contains(createSQL, "CREATE TABLE `app`.`users`") { + t.Fatalf("unexpected create SQL: %s", createSQL) + } + if !strings.Contains(createSQL, "`id` int AUTO_INCREMENT NOT NULL") { + t.Fatalf("unexpected id definition: %s", createSQL) + } + if !strings.Contains(createSQL, "`profile` json") { + t.Fatalf("unexpected json definition: %s", createSQL) + } + if idxCreate != 2 || idxSkip != 0 { + t.Fatalf("unexpected index summary: create=%d skip=%d", idxCreate, idxSkip) + } + if len(postSQL) != 2 { + t.Fatalf("unexpected post sql length: %v", postSQL) + } + if len(warnings) != 0 { + t.Fatalf("unexpected warnings: %v", warnings) + } + if len(unsupported) != 0 { + t.Fatalf("unexpected unsupported: %v", unsupported) + } +} + +func TestBuildPGLikeToMySQLPlan_AutoCreateWhenTargetMissing(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "public.orders": { + {Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI", Extra: "auto_increment"}, + {Name: "amount", Type: "numeric(10,2)", Nullable: "NO"}, + }, + }, + indexes: map[string][]connection.IndexDefinition{}, + } + targetDB := &fakeMigrationDB{columns: map[string][]connection.ColumnDefinition{}} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "kingbase", Database: "public"}, + TargetConfig: connection.ConnectionConfig{Type: "mysql", Database: "app"}, + TargetTableStrategy: "smart", + CreateIndexes: true, + } + plan, sourceCols, targetCols, err := buildPGLikeToMySQLPlan(cfg, "orders", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildPGLikeToMySQLPlan returned error: %v", err) + } + if len(sourceCols) != 2 || len(targetCols) != 0 { + t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols)) + } + if plan.TargetTableExists { + t.Fatalf("expected target table missing") + } + if !plan.AutoCreate { + t.Fatalf("expected auto create enabled") + } + if !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `app`.`orders`") { + t.Fatalf("unexpected create table SQL: %s", plan.CreateTableSQL) + } +} + +func TestBuildMySQLToPGLikeCreateTablePlan_GeneratesPostgresDDL(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + indexes: map[string][]connection.IndexDefinition{ + "shop.orders": { + {Name: "idx_orders_user", ColumnName: "user_id", NonUnique: 1, SeqInIndex: 1, IndexType: "BTREE"}, + {Name: "idx_orders_user", ColumnName: "status", NonUnique: 1, SeqInIndex: 2, IndexType: "BTREE"}, + }, + }, + } + cols := []connection.ColumnDefinition{ + {Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI", Extra: "auto_increment"}, + {Name: "user_id", Type: "bigint", Nullable: "NO"}, + {Name: "status", Type: "varchar(32)", Nullable: "YES"}, + {Name: "payload", Type: "json", Nullable: "YES"}, + } + cfg := SyncConfig{CreateIndexes: true} + createSQL, postSQL, warnings, unsupported, idxCreate, idxSkip, err := buildMySQLToPGLikeCreateTablePlan("postgres", cfg, "public.orders", cols, sourceDB, "shop", "orders") + if err != nil { + t.Fatalf("buildMySQLToPGLikeCreateTablePlan returned error: %v", err) + } + if !strings.Contains(createSQL, `CREATE TABLE "public"."orders"`) { + t.Fatalf("unexpected create SQL: %s", createSQL) + } + if !strings.Contains(createSQL, `GENERATED BY DEFAULT AS IDENTITY`) { + t.Fatalf("missing identity mapping: %s", createSQL) + } + if !strings.Contains(createSQL, `jsonb`) { + t.Fatalf("missing jsonb mapping: %s", createSQL) + } + if idxCreate != 1 || idxSkip != 0 { + t.Fatalf("unexpected index summary: create=%d skip=%d", idxCreate, idxSkip) + } + if len(postSQL) != 1 || !strings.Contains(postSQL[0], `CREATE INDEX "idx_orders_user"`) { + t.Fatalf("unexpected post SQL: %v", postSQL) + } + if len(warnings) != 0 || len(unsupported) != 0 { + t.Fatalf("unexpected warnings/unsupported: warnings=%v unsupported=%v", warnings, unsupported) + } +} + +func TestBuildMySQLToClickHouseCreateTableSQL_GeneratesMergeTree(t *testing.T) { + t.Parallel() + + cols := []connection.ColumnDefinition{ + {Name: "id", Type: "bigint unsigned", Nullable: "NO", Key: "PRI"}, + {Name: "name", Type: "varchar(128)", Nullable: "YES"}, + {Name: "payload", Type: "json", Nullable: "YES"}, + } + createSQL, warnings, unsupported := buildMySQLToClickHouseCreateTableSQL("analytics.orders", cols) + if !strings.Contains(createSQL, "ENGINE = MergeTree()") { + t.Fatalf("unexpected create SQL: %s", createSQL) + } + if !strings.Contains(createSQL, "ORDER BY (`id`)") { + t.Fatalf("unexpected order by: %s", createSQL) + } + if !strings.Contains(createSQL, "`payload` Nullable(String)") { + t.Fatalf("unexpected json mapping: %s", createSQL) + } + if len(warnings) == 0 { + t.Fatalf("expected warnings for clickhouse semantics") + } + if len(unsupported) != 0 { + t.Fatalf("unexpected unsupported: %v", unsupported) + } +} + +func TestBuildClickHouseToMySQLCreateTableSQL_GeneratesMySQLDDL(t *testing.T) { + t.Parallel() + + cols := []connection.ColumnDefinition{ + {Name: "id", Type: "UInt64", Nullable: "NO", Key: "PRI"}, + {Name: "event_time", Type: "DateTime", Nullable: "NO"}, + {Name: "payload", Type: "Map(String, String)", Nullable: "YES"}, + } + createSQL, warnings := buildClickHouseToMySQLCreateTableSQL("app.metrics", cols) + if !strings.Contains(createSQL, "CREATE TABLE `app`.`metrics`") { + t.Fatalf("unexpected create SQL: %s", createSQL) + } + if !strings.Contains(createSQL, "`id` bigint unsigned NOT NULL") { + t.Fatalf("unexpected uint64 mapping: %s", createSQL) + } + if !strings.Contains(createSQL, "`payload` json") { + t.Fatalf("unexpected complex type mapping: %s", createSQL) + } + if len(warnings) == 0 { + t.Fatalf("expected warning for limited clickhouse reverse semantics") + } +} + +func TestBuildMySQLToMongoPlan_AutoCreateCollection(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "shop.users": { + {Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI"}, + {Name: "name", Type: "varchar(64)", Nullable: "YES"}, + }, + }, + indexes: map[string][]connection.IndexDefinition{ + "shop.users": { + {Name: "idx_users_name", ColumnName: "name", NonUnique: 1, SeqInIndex: 1, IndexType: "BTREE"}, + }, + }, + } + targetDB := &fakeMigrationDB{} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mysql", Database: "shop"}, + TargetConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"}, + TargetTableStrategy: "smart", + CreateIndexes: true, + } + plan, sourceCols, targetCols, err := buildMySQLToMongoPlan(cfg, "users", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildMySQLToMongoPlan returned error: %v", err) + } + if len(sourceCols) != 2 || targetCols != nil { + t.Fatalf("unexpected source/target columns: %d / %v", len(sourceCols), targetCols) + } + if !plan.AutoCreate || len(plan.PreDataSQL) == 0 { + t.Fatalf("expected auto create collection command: %+v", plan) + } + if !strings.Contains(plan.PreDataSQL[0], `"create":"users"`) { + t.Fatalf("unexpected create collection command: %v", plan.PreDataSQL) + } + if len(plan.PostDataSQL) != 1 || !strings.Contains(plan.PostDataSQL[0], `"createIndexes":"users"`) { + t.Fatalf("unexpected index commands: %v", plan.PostDataSQL) + } +} + +func TestBuildPGLikeToMongoPlan_AutoCreateCollection(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "public.orders": { + {Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI"}, + {Name: "name", Type: "varchar(64)", Nullable: "YES"}, + }, + }, + indexes: map[string][]connection.IndexDefinition{ + "public.orders": { + {Name: "idx_orders_name", ColumnName: "name", NonUnique: 1, SeqInIndex: 1, IndexType: "BTREE"}, + }, + }, + } + targetDB := &fakeMigrationDB{} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "postgres", Database: "public"}, + TargetConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"}, + TargetTableStrategy: "smart", + CreateIndexes: true, + } + plan, sourceCols, targetCols, err := buildPGLikeToMongoPlan(cfg, "orders", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildPGLikeToMongoPlan returned error: %v", err) + } + if len(sourceCols) != 2 || targetCols != nil { + t.Fatalf("unexpected source/target columns: %d / %v", len(sourceCols), targetCols) + } + if !plan.AutoCreate || len(plan.PreDataSQL) == 0 { + t.Fatalf("expected auto create collection command: %+v", plan) + } + if !strings.Contains(plan.PreDataSQL[0], `"create":"orders"`) { + t.Fatalf("unexpected create collection command: %v", plan.PreDataSQL) + } + if len(plan.PostDataSQL) != 1 || !strings.Contains(plan.PostDataSQL[0], `"createIndexes":"orders"`) { + t.Fatalf("unexpected index commands: %v", plan.PostDataSQL) + } +} + +func TestBuildClickHouseToMongoPlan_AutoCreateCollection(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "analytics.metrics": { + {Name: "id", Type: "UInt64", Nullable: "NO", Key: "PRI"}, + {Name: "host", Type: "String", Nullable: "YES"}, + }, + }, + } + targetDB := &fakeMigrationDB{} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "clickhouse", Database: "analytics"}, + TargetConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"}, + TargetTableStrategy: "smart", + } + plan, sourceCols, targetCols, err := buildClickHouseToMongoPlan(cfg, "metrics", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildClickHouseToMongoPlan returned error: %v", err) + } + if len(sourceCols) != 2 || targetCols != nil { + t.Fatalf("unexpected source/target columns: %d / %v", len(sourceCols), targetCols) + } + if !plan.AutoCreate || len(plan.PreDataSQL) == 0 { + t.Fatalf("expected auto create collection command: %+v", plan) + } + if !strings.Contains(plan.PreDataSQL[0], `"create":"metrics"`) { + t.Fatalf("unexpected create collection command: %v", plan.PreDataSQL) + } +} + +func TestBuildTDengineToMongoPlan_AutoCreateCollection(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "src.cpu": { + {Name: "ts", Type: "TIMESTAMP", Nullable: "NO"}, + {Name: "host", Type: "NCHAR(64)", Nullable: "YES"}, + }, + }, + } + targetDB := &fakeMigrationDB{} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "tdengine", Database: "src"}, + TargetConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"}, + TargetTableStrategy: "smart", + } + plan, sourceCols, targetCols, err := buildTDengineToMongoPlan(cfg, "cpu", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildTDengineToMongoPlan returned error: %v", err) + } + if len(sourceCols) != 2 || targetCols != nil { + t.Fatalf("unexpected source/target columns: %d / %v", len(sourceCols), targetCols) + } + if !plan.AutoCreate || len(plan.PreDataSQL) == 0 { + t.Fatalf("expected auto create collection command: %+v", plan) + } + if !strings.Contains(plan.PreDataSQL[0], `"create":"cpu"`) { + t.Fatalf("unexpected create collection command: %v", plan.PreDataSQL) + } +} + +func TestBuildMongoToMySQLPlan_InfersColumnsAndCreatesTable(t *testing.T) { + t.Parallel() + + query := `{"find":"users","filter":{},"limit":200}` + sourceDB := &fakeMigrationDB{ + queryData: map[string][]map[string]interface{}{ + query: { + {"_id": "a1", "name": "alice", "age": int64(18), "profile": map[string]interface{}{"city": "shanghai"}}, + {"_id": "b2", "name": "bob", "profile": map[string]interface{}{"city": "beijing"}}, + }, + }, + queryCols: map[string][]string{query: {"_id", "name", "age", "profile"}}, + indexes: map[string][]connection.IndexDefinition{ + "crm.users": {{Name: "email_1", ColumnName: "name", NonUnique: 1, SeqInIndex: 1, IndexType: "BTREE"}}, + }, + } + targetDB := &fakeMigrationDB{} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mongodb", Database: "crm"}, + TargetConfig: connection.ConnectionConfig{Type: "mysql", Database: "app"}, + TargetTableStrategy: "smart", + CreateIndexes: true, + } + plan, sourceCols, _, err := buildMongoToMySQLPlan(cfg, "users", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildMongoToMySQLPlan returned error: %v", err) + } + if len(sourceCols) == 0 { + t.Fatalf("expected inferred source cols") + } + if !plan.AutoCreate || !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `app`.`users`") { + t.Fatalf("unexpected create table sql: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`_id` text NOT NULL") && !strings.Contains(plan.CreateTableSQL, "`_id` varchar") { + t.Fatalf("missing inferred _id column: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`profile` json") { + t.Fatalf("expected nested field degrade to json: %s", plan.CreateTableSQL) + } + if len(plan.PostDataSQL) != 1 { + t.Fatalf("expected one post index sql, got=%v", plan.PostDataSQL) + } +} + +func TestBuildTDengineToMySQLPlan_AutoCreateWhenTargetMissing(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "metrics.cpu": { + {Name: "ts", Type: "TIMESTAMP", Nullable: "NO"}, + {Name: "host", Type: "NCHAR(64)", Nullable: "YES", Key: "TAG", Extra: "TAG"}, + {Name: "usage", Type: "DOUBLE", Nullable: "YES"}, + }, + }, + } + targetDB := &fakeMigrationDB{} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "tdengine", Database: "metrics"}, + TargetConfig: connection.ConnectionConfig{Type: "mysql", Database: "app"}, + TargetTableStrategy: "smart", + } + plan, sourceCols, targetCols, err := buildTDengineToMySQLPlan(cfg, "cpu", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildTDengineToMySQLPlan returned error: %v", err) + } + if len(sourceCols) != 3 || len(targetCols) != 0 { + t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols)) + } + if !plan.AutoCreate { + t.Fatalf("expected auto create enabled") + } + if !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `app`.`cpu`") { + t.Fatalf("unexpected create table sql: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`ts` datetime") { + t.Fatalf("expected timestamp mapping, got: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`host` varchar(64)") { + t.Fatalf("expected nchar mapping, got: %s", plan.CreateTableSQL) + } + if len(plan.Warnings) == 0 || !strings.Contains(strings.Join(plan.Warnings, " "), "TAG") { + t.Fatalf("expected TAG warning, got: %v", plan.Warnings) + } +} + +func TestBuildTDengineToPGLikePlan_AutoCreateWhenTargetMissing(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "metrics.cpu": { + {Name: "ts", Type: "TIMESTAMP", Nullable: "NO"}, + {Name: "payload", Type: "JSON", Nullable: "YES"}, + {Name: "host", Type: "BINARY(32)", Nullable: "YES", Key: "TAG", Extra: "TAG"}, + }, + }, + } + targetDB := &fakeMigrationDB{} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "tdengine", Database: "metrics"}, + TargetConfig: connection.ConnectionConfig{Type: "kingbase", Database: "ignored"}, + TargetTableStrategy: "smart", + } + plan, sourceCols, targetCols, err := buildTDengineToPGLikePlan(cfg, "cpu", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildTDengineToPGLikePlan returned error: %v", err) + } + if len(sourceCols) != 3 || len(targetCols) != 0 { + t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols)) + } + if !plan.AutoCreate { + t.Fatalf("expected auto create enabled") + } + if !strings.Contains(plan.CreateTableSQL, `CREATE TABLE "public"."cpu"`) { + t.Fatalf("unexpected create table sql: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, `"ts" timestamp`) { + t.Fatalf("expected timestamp mapping, got: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, `"payload" jsonb`) { + t.Fatalf("expected json mapping, got: %s", plan.CreateTableSQL) + } + if len(plan.Warnings) == 0 || !strings.Contains(strings.Join(plan.Warnings, " "), "TAG") { + t.Fatalf("expected TAG warning, got: %v", plan.Warnings) + } +} + +func TestBuildSchemaMigrationPlan_TDengineTargetWarnsInsertOnlyBoundary(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "shop.metrics": { + {Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI"}, + {Name: "ts", Type: "datetime", Nullable: "NO"}, + {Name: "value", Type: "double", Nullable: "YES"}, + }, + }, + } + targetDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "taos.metrics": { + {Name: "id", Type: "bigint", Nullable: "NO"}, + {Name: "ts", Type: "timestamp", Nullable: "NO"}, + {Name: "value", Type: "double", Nullable: "YES"}, + }, + }, + } + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mysql", Database: "shop"}, + TargetConfig: connection.ConnectionConfig{Type: "tdengine", Database: "taos"}, + Mode: "insert_update", + } + + plan, _, _, err := buildSchemaMigrationPlan(cfg, "metrics", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildSchemaMigrationPlan returned error: %v", err) + } + warnings := strings.Join(plan.Warnings, " ") + if !strings.Contains(warnings, "仅支持 INSERT 写入") { + t.Fatalf("expected TDengine target warning, got: %v", plan.Warnings) + } +} + +func TestBuildMySQLLikeToTDenginePlan_AutoCreateWhenTargetMissing(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "shop.metrics": { + {Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI", Extra: "auto_increment"}, + {Name: "ts", Type: "datetime", Nullable: "NO"}, + {Name: "payload", Type: "json", Nullable: "YES"}, + }, + }, + } + targetDB := &fakeMigrationDB{} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mysql", Database: "shop"}, + TargetConfig: connection.ConnectionConfig{Type: "tdengine", Database: "taos"}, + TargetTableStrategy: "smart", + } + plan, sourceCols, targetCols, err := buildMySQLLikeToTDenginePlan(cfg, "metrics", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildMySQLLikeToTDenginePlan returned error: %v", err) + } + if len(sourceCols) != 3 || len(targetCols) != 0 { + t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols)) + } + if !plan.AutoCreate { + t.Fatalf("expected auto create enabled") + } + if !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `taos`.`metrics`") { + t.Fatalf("unexpected create sql: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`ts` TIMESTAMP") { + t.Fatalf("expected ts first column mapped to TIMESTAMP, got: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`payload` VARCHAR(") { + t.Fatalf("expected json degrade to VARCHAR, got: %s", plan.CreateTableSQL) + } + if !strings.Contains(strings.Join(plan.Warnings, " "), "insert-only") && !strings.Contains(strings.Join(plan.Warnings, " "), "INSERT") { + t.Fatalf("expected tdengine target warning, got: %v", plan.Warnings) + } +} + +func TestBuildPGLikeToTDenginePlan_AutoCreateWhenTargetMissing(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "public.metrics": { + {Name: "event_time", Type: "timestamp without time zone", Nullable: "NO"}, + {Name: "name", Type: "character varying(64)", Nullable: "YES"}, + {Name: "meta", Type: "jsonb", Nullable: "YES"}, + }, + }, + } + targetDB := &fakeMigrationDB{} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "postgres", Database: "ignored"}, + TargetConfig: connection.ConnectionConfig{Type: "tdengine", Database: "taos"}, + TargetTableStrategy: "smart", + } + plan, sourceCols, targetCols, err := buildPGLikeToTDenginePlan(cfg, "metrics", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildPGLikeToTDenginePlan returned error: %v", err) + } + if len(sourceCols) != 3 || len(targetCols) != 0 { + t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols)) + } + if !plan.AutoCreate { + t.Fatalf("expected auto create enabled") + } + if !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `taos`.`metrics`") { + t.Fatalf("unexpected create sql: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`event_time` TIMESTAMP") { + t.Fatalf("expected timestamp mapping, got: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`meta` VARCHAR(") { + t.Fatalf("expected jsonb degrade to VARCHAR, got: %s", plan.CreateTableSQL) + } +} + +func TestBuildMySQLLikeToTDenginePlan_RejectsAutoCreateWithoutTimestampColumn(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "shop.metrics": { + {Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI"}, + {Name: "name", Type: "varchar(64)", Nullable: "YES"}, + }, + }, + } + targetDB := &fakeMigrationDB{} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "mysql", Database: "shop"}, + TargetConfig: connection.ConnectionConfig{Type: "tdengine", Database: "taos"}, + TargetTableStrategy: "smart", + } + plan, _, _, err := buildMySQLLikeToTDenginePlan(cfg, "metrics", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildMySQLLikeToTDenginePlan returned error: %v", err) + } + if plan.AutoCreate { + t.Fatalf("expected auto create disabled when source has no timestamp column") + } + if !strings.Contains(plan.PlannedAction, "时间列") { + t.Fatalf("unexpected planned action: %s", plan.PlannedAction) + } + if !strings.Contains(strings.Join(plan.Warnings, " "), "时间列") { + t.Fatalf("expected missing timestamp warning, got: %v", plan.Warnings) + } +} + +func TestBuildClickHouseToTDenginePlan_AutoCreateWhenTargetMissing(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "analytics.metrics": { + {Name: "event_time", Type: "DateTime64(3)", Nullable: "NO"}, + {Name: "host", Type: "FixedString(64)", Nullable: "YES"}, + {Name: "payload", Type: "Map(String,String)", Nullable: "YES"}, + }, + }, + } + targetDB := &fakeMigrationDB{} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "clickhouse", Database: "analytics"}, + TargetConfig: connection.ConnectionConfig{Type: "tdengine", Database: "taos"}, + TargetTableStrategy: "smart", + } + plan, sourceCols, targetCols, err := buildClickHouseToTDenginePlan(cfg, "metrics", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildClickHouseToTDenginePlan returned error: %v", err) + } + if len(sourceCols) != 3 || len(targetCols) != 0 { + t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols)) + } + if !plan.AutoCreate { + t.Fatalf("expected auto create enabled") + } + if !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `taos`.`metrics`") { + t.Fatalf("unexpected create sql: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`event_time` TIMESTAMP") { + t.Fatalf("expected datetime64 mapping, got: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`host` VARCHAR(64)") { + t.Fatalf("expected fixedstring mapping, got: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`payload` VARCHAR(") { + t.Fatalf("expected complex type degrade to VARCHAR, got: %s", plan.CreateTableSQL) + } +} + +func TestBuildClickHouseToPGLikePlan_AutoCreateWhenTargetMissing(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "analytics.metrics": { + {Name: "id", Type: "UInt64", Nullable: "NO", Key: "PRI"}, + {Name: "event_time", Type: "DateTime64(3)", Nullable: "NO"}, + {Name: "host", Type: "FixedString(64)", Nullable: "YES"}, + {Name: "payload", Type: "Map(String,String)", Nullable: "YES"}, + }, + }, + } + targetDB := &fakeMigrationDB{} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "clickhouse", Database: "analytics"}, + TargetConfig: connection.ConnectionConfig{Type: "postgres", Database: "public"}, + TargetTableStrategy: "smart", + } + plan, sourceCols, targetCols, err := buildClickHouseToPGLikePlan(cfg, "metrics", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildClickHouseToPGLikePlan returned error: %v", err) + } + if len(sourceCols) != 4 || len(targetCols) != 0 { + t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols)) + } + if !plan.AutoCreate { + t.Fatalf("expected auto create enabled") + } + if !strings.Contains(plan.CreateTableSQL, `CREATE TABLE "public"."metrics"`) { + t.Fatalf("unexpected create sql: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, `"id" numeric(20,0)`) { + t.Fatalf("expected uint64 safeguard mapping, got: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, `"event_time" timestamp`) { + t.Fatalf("expected datetime64 mapping, got: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, `"host" varchar(64)`) { + t.Fatalf("expected fixedstring mapping, got: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, `"payload" jsonb`) { + t.Fatalf("expected complex type degrade to jsonb, got: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, `PRIMARY KEY ("id")`) { + t.Fatalf("expected primary key preservation, got: %s", plan.CreateTableSQL) + } +} + +func TestBuildPGLikeToClickHousePlan_AutoCreateWhenTargetMissing(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "public.orders": { + {Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI"}, + {Name: "created_at", Type: "timestamp without time zone", Nullable: "NO"}, + {Name: "profile", Type: "jsonb", Nullable: "YES"}, + }, + }, + } + targetDB := &fakeMigrationDB{} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "postgres", Database: "public"}, + TargetConfig: connection.ConnectionConfig{Type: "clickhouse", Database: "analytics"}, + TargetTableStrategy: "smart", + } + plan, sourceCols, targetCols, err := buildPGLikeToClickHousePlan(cfg, "orders", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildPGLikeToClickHousePlan returned error: %v", err) + } + if len(sourceCols) != 3 || len(targetCols) != 0 { + t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols)) + } + if !plan.AutoCreate { + t.Fatalf("expected auto create enabled") + } + if !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `analytics`.`orders`") { + t.Fatalf("unexpected create sql: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`created_at` DateTime") { + t.Fatalf("expected timestamp mapping, got: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`profile` Nullable(String)") { + t.Fatalf("expected jsonb degrade to Nullable(String), got: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "ORDER BY (`id`)") { + t.Fatalf("expected primary key order by, got: %s", plan.CreateTableSQL) + } +} + +func TestBuildTDengineToTDenginePlan_AutoCreateWhenTargetMissing(t *testing.T) { + t.Parallel() + + sourceDB := &fakeMigrationDB{ + columns: map[string][]connection.ColumnDefinition{ + "src.cpu": { + {Name: "ts", Type: "TIMESTAMP", Nullable: "NO"}, + {Name: "host", Type: "NCHAR(64)", Nullable: "YES"}, + {Name: "region", Type: "NCHAR(32)", Nullable: "YES", Key: "TAG"}, + }, + }, + } + targetDB := &fakeMigrationDB{} + cfg := SyncConfig{ + SourceConfig: connection.ConnectionConfig{Type: "tdengine", Database: "src"}, + TargetConfig: connection.ConnectionConfig{Type: "tdengine", Database: "dst"}, + TargetTableStrategy: "smart", + } + plan, sourceCols, targetCols, err := buildTDengineToTDenginePlan(cfg, "cpu", sourceDB, targetDB) + if err != nil { + t.Fatalf("buildTDengineToTDenginePlan returned error: %v", err) + } + if len(sourceCols) != 3 || len(targetCols) != 0 { + t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols)) + } + if !plan.AutoCreate { + t.Fatalf("expected auto create enabled") + } + if !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `dst`.`cpu`") { + t.Fatalf("unexpected create sql: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`ts` TIMESTAMP") { + t.Fatalf("expected timestamp preserved, got: %s", plan.CreateTableSQL) + } + if !strings.Contains(plan.CreateTableSQL, "`region` NCHAR(32)") { + t.Fatalf("expected tag degrade to regular nchar column, got: %s", plan.CreateTableSQL) + } + if !strings.Contains(strings.Join(plan.Warnings, " "), "TAG") { + t.Fatalf("expected TAG degrade warning, got: %v", plan.Warnings) + } +} diff --git a/internal/sync/schema_sync.go b/internal/sync/schema_sync.go index 126b623..f93abad 100644 --- a/internal/sync/schema_sync.go +++ b/internal/sync/schema_sync.go @@ -7,15 +7,16 @@ import ( ) func (s *SyncEngine) syncTableSchema(config SyncConfig, res *SyncResult, sourceDB db.Database, targetDB db.Database, tableName string) error { - targetType := strings.ToLower(strings.TrimSpace(config.TargetConfig.Type)) + targetType := resolveMigrationDBType(config.TargetConfig) if targetType != "mysql" { s.appendLog(config.JobID, res, "warn", fmt.Sprintf("目标数据库类型=%s 暂不支持结构同步,已跳过表 %s", config.TargetConfig.Type, tableName)) return nil } - sourceSchema, sourceTable := normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName) - targetSchema, targetTable := normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName) - targetQueryTable := qualifiedNameForQuery(config.TargetConfig.Type, targetSchema, targetTable, tableName) + sourceType := resolveMigrationDBType(config.SourceConfig) + sourceSchema, sourceTable := normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName) + targetSchema, targetTable := normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName) + targetQueryTable := qualifiedNameForQuery(targetType, targetSchema, targetTable, tableName) // 1) 获取源表字段 sourceCols, err := sourceDB.GetColumns(sourceSchema, sourceTable) @@ -26,7 +27,6 @@ func (s *SyncEngine) syncTableSchema(config SyncConfig, res *SyncResult, sourceD // 2) 确保目标表存在 targetCols, err := targetDB.GetColumns(targetSchema, targetTable) if err != nil { - sourceType := strings.ToLower(strings.TrimSpace(config.SourceConfig.Type)) if sourceType != "mysql" { return fmt.Errorf("目标表不存在且源类型=%s 暂不支持自动建表: %w", config.SourceConfig.Type, err) } @@ -62,7 +62,6 @@ func (s *SyncEngine) syncTableSchema(config SyncConfig, res *SyncResult, sourceD // 3) 补齐目标缺失字段(安全策略:新增字段统一允许 NULL) missing := make([]string, 0) - sourceType := strings.ToLower(strings.TrimSpace(config.SourceConfig.Type)) for _, c := range sourceCols { colName := strings.TrimSpace(c.Name) if colName == "" { diff --git a/internal/sync/sql_helpers.go b/internal/sync/sql_helpers.go index 44b8a8b..af647b9 100644 --- a/internal/sync/sql_helpers.go +++ b/internal/sync/sql_helpers.go @@ -22,7 +22,7 @@ func quoteIdentByType(dbType string, ident string) string { } switch dbType { - case "mysql", "mariadb", "diros", "sphinx": + case "mysql", "mariadb", "diros", "sphinx", "clickhouse", "tdengine": return "`" + strings.ReplaceAll(ident, "`", "``") + "`" case "sqlserver": escaped := strings.ReplaceAll(ident, "]", "]]") @@ -74,8 +74,10 @@ func normalizeSchemaAndTable(dbType string, dbName string, tableName string) (st } switch strings.ToLower(strings.TrimSpace(dbType)) { - case "postgres", "kingbase", "vastbase": + case "postgres", "kingbase", "highgo", "vastbase": return "public", rawTable + case "duckdb": + return "main", rawTable default: return rawDB, rawTable } @@ -91,7 +93,7 @@ func qualifiedNameForQuery(dbType string, schema string, table string, original } switch strings.ToLower(strings.TrimSpace(dbType)) { - case "postgres", "kingbase", "vastbase": + case "postgres", "kingbase", "highgo", "vastbase": s := strings.TrimSpace(schema) if s == "" { s = "public" @@ -100,7 +102,16 @@ func qualifiedNameForQuery(dbType string, schema string, table string, original return raw } return s + "." + table - case "mysql", "mariadb", "diros", "sphinx": + case "duckdb": + s := strings.TrimSpace(schema) + if s == "" { + s = "main" + } + if table == "" { + return raw + } + return s + "." + table + case "mysql", "mariadb", "diros", "sphinx", "clickhouse", "tdengine": s := strings.TrimSpace(schema) if s == "" || table == "" { return table diff --git a/internal/sync/sync_engine.go b/internal/sync/sync_engine.go index d1d897c..15b5aaa 100644 --- a/internal/sync/sync_engine.go +++ b/internal/sync/sync_engine.go @@ -12,14 +12,17 @@ import ( // SyncConfig defines the parameters for a synchronization task type SyncConfig struct { - SourceConfig connection.ConnectionConfig `json:"sourceConfig"` - TargetConfig connection.ConnectionConfig `json:"targetConfig"` - Tables []string `json:"tables"` // Tables to sync - Content string `json:"content,omitempty"` // "data", "schema", "both" - Mode string `json:"mode"` // "insert_update", "insert_only", "full_overwrite" - JobID string `json:"jobId,omitempty"` - AutoAddColumns bool `json:"autoAddColumns,omitempty"` // 自动补齐缺失字段(当前仅 MySQL 目标支持) - TableOptions map[string]TableOptions `json:"tableOptions,omitempty"` + SourceConfig connection.ConnectionConfig `json:"sourceConfig"` + TargetConfig connection.ConnectionConfig `json:"targetConfig"` + Tables []string `json:"tables"` + Content string `json:"content,omitempty"` // "data", "schema", "both" + Mode string `json:"mode"` // "insert_update", "insert_only", "full_overwrite" + JobID string `json:"jobId,omitempty"` + AutoAddColumns bool `json:"autoAddColumns,omitempty"` // 自动补齐缺失字段 + TargetTableStrategy string `json:"targetTableStrategy,omitempty"` + CreateIndexes bool `json:"createIndexes,omitempty"` + MongoCollectionName string `json:"mongoCollectionName,omitempty"` + TableOptions map[string]TableOptions `json:"tableOptions,omitempty"` } // SyncResult holds the result of the sync operation @@ -45,6 +48,13 @@ func NewSyncEngine(reporter Reporter) *SyncEngine { func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { result := SyncResult{Success: true, Logs: []string{}} logger.Infof("开始数据同步:源=%s 目标=%s 表数量=%d", formatConnSummaryForSync(config.SourceConfig), formatConnSummaryForSync(config.TargetConfig), len(config.Tables)) + if isRedisToMongoKeyspacePair(config) { + return s.runRedisToMongoSync(config, result) + } + if isMongoToRedisKeyspacePair(config) { + return s.runMongoToRedisSync(config, result) + } + totalTables := len(config.Tables) s.progress(config.JobID, 0, totalTables, "", "开始同步") @@ -70,6 +80,7 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("未知同步模式 %q,已自动使用 insert_update", config.Mode)) } defaultMode := normalizeSyncMode(config.Mode) + strategy := normalizeTargetTableStrategy(config.TargetTableStrategy) contentLabel := "仅同步数据" if syncSchema && syncData { @@ -77,9 +88,9 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { } else if syncSchema { contentLabel = "仅同步结构" } - s.appendLog(config.JobID, &result, "info", fmt.Sprintf("同步内容:%s;模式:%s;自动补字段:%v", contentLabel, defaultMode, config.AutoAddColumns)) + s.appendLog(config.JobID, &result, "info", fmt.Sprintf("同步内容:%s;模式:%s;自动补字段:%v;目标表策略:%s;创建索引:%v", contentLabel, defaultMode, config.AutoAddColumns, strategy, config.CreateIndexes)) - sourceDB, err := db.NewDatabase(config.SourceConfig.Type) + sourceDB, err := newSyncDatabase(config.SourceConfig.Type) if err != nil { logger.Error(err, "初始化源数据库驱动失败:类型=%s", config.SourceConfig.Type) return s.fail(config.JobID, totalTables, result, "初始化源数据库驱动失败: "+err.Error()) @@ -88,7 +99,7 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { // Custom DB setup would go here if needed } - targetDB, err := db.NewDatabase(config.TargetConfig.Type) + targetDB, err := newSyncDatabase(config.TargetConfig.Type) if err != nil { logger.Error(err, "初始化目标数据库驱动失败:类型=%s", config.TargetConfig.Type) return s.fail(config.JobID, totalTables, result, "初始化目标数据库驱动失败: "+err.Error()) @@ -112,7 +123,6 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { } defer targetDB.Close() - // Iterate Tables for i, tableName := range config.Tables { func() { tableMode := defaultMode @@ -120,30 +130,82 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { s.progress(config.JobID, i, totalTables, tableName, fmt.Sprintf("同步表(%d/%d)", i+1, totalTables)) defer s.progress(config.JobID, i+1, totalTables, tableName, "表处理完成") - if syncSchema { - s.progress(config.JobID, i, totalTables, tableName, "同步表结构") - if err := s.syncTableSchema(config, &result, sourceDB, targetDB, tableName); err != nil { - s.appendLog(config.JobID, &result, "error", fmt.Sprintf("表结构同步失败:表=%s 错误=%v", tableName, err)) + plan, cols, targetCols, err := buildSchemaMigrationPlan(config, tableName, sourceDB, targetDB) + if err != nil { + s.appendLog(config.JobID, &result, "error", fmt.Sprintf("生成迁移计划失败:表=%s 错误=%v", tableName, err)) + return + } + for _, warning := range plan.Warnings { + s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> %s", warning)) + } + for _, unsupported := range plan.UnsupportedObjects { + s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> %s", unsupported)) + } + if strings.TrimSpace(plan.PlannedAction) != "" { + s.appendLog(config.JobID, &result, "info", fmt.Sprintf(" -> %s", plan.PlannedAction)) + } + + if !plan.TargetTableExists && !plan.AutoCreate { + s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("表 %s 目标表不存在,当前策略不允许自动建表,已跳过", tableName)) + return + } + + if !plan.TargetTableExists && plan.AutoCreate { + s.progress(config.JobID, i, totalTables, tableName, "创建目标表") + if len(plan.PreDataSQL) > 0 { + if err := executeSQLStatements(targetDB.Exec, plan.PreDataSQL); err != nil { + s.appendLog(config.JobID, &result, "error", fmt.Sprintf("预执行建表 SQL 失败:表=%s 错误=%v", tableName, err)) + return + } + } + if strings.TrimSpace(plan.CreateTableSQL) == "" { + s.appendLog(config.JobID, &result, "error", fmt.Sprintf("表 %s 自动建表失败:建表 SQL 为空", tableName)) return } + if _, err := targetDB.Exec(plan.CreateTableSQL); err != nil { + s.appendLog(config.JobID, &result, "error", fmt.Sprintf("创建目标表失败:表=%s 错误=%v", tableName, err)) + return + } + s.appendLog(config.JobID, &result, "info", fmt.Sprintf("目标表创建成功:%s", tableName)) + targetCols, err = targetDB.GetColumns(plan.TargetSchema, plan.TargetTable) + if err != nil { + s.appendLog(config.JobID, &result, "error", fmt.Sprintf("创建目标表后获取字段失败:表=%s 错误=%v", tableName, err)) + return + } + } else if len(plan.PreDataSQL) > 0 { + s.progress(config.JobID, i, totalTables, tableName, "同步表结构") + if err := executeSQLStatements(targetDB.Exec, plan.PreDataSQL); err != nil { + s.appendLog(config.JobID, &result, "error", fmt.Sprintf("同步表结构失败:表=%s 错误=%v", tableName, err)) + return + } + targetCols, err = targetDB.GetColumns(plan.TargetSchema, plan.TargetTable) + if err != nil { + s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("补字段后刷新目标字段失败:表=%s 错误=%v", tableName, err)) + } } + if !syncData { + if len(plan.PostDataSQL) > 0 { + s.progress(config.JobID, i, totalTables, tableName, "创建索引") + if err := executeSQLStatements(targetDB.Exec, plan.PostDataSQL); err != nil { + s.appendLog(config.JobID, &result, "error", fmt.Sprintf("创建索引失败:表=%s 错误=%v", tableName, err)) + return + } + } result.TablesSynced++ return } - sourceSchema, sourceTable := normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName) - targetSchema, targetTable := normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName) - sourceQueryTable := qualifiedNameForQuery(config.SourceConfig.Type, sourceSchema, sourceTable, tableName) - targetQueryTable := qualifiedNameForQuery(config.TargetConfig.Type, targetSchema, targetTable, tableName) - - // 1. Get Columns & PKs - cols, err := sourceDB.GetColumns(sourceSchema, sourceTable) - if err != nil { - logger.Error(err, "获取源表列信息失败:表=%s", tableName) - s.appendLog(config.JobID, &result, "error", fmt.Sprintf("获取表 %s 的列信息失败: %v", tableName, err)) - return + targetType := resolveMigrationDBType(config.TargetConfig) + sourceType := resolveMigrationDBType(config.SourceConfig) + targetTable := plan.TargetTable + sourceQueryTable, targetQueryTable := plan.SourceQueryTable, plan.TargetQueryTable + applyTableName := targetTable + switch targetType { + case "postgres", "kingbase", "highgo", "vastbase", "sqlserver": + applyTableName = targetQueryTable } + sourceColsByLower := make(map[string]connection.ColumnDefinition, len(cols)) for _, col := range cols { if strings.TrimSpace(col.Name) == "" { @@ -158,25 +220,24 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { pkCols = append(pkCols, col.Name) } } - - if len(pkCols) == 0 { - s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("表 %s 未找到主键,已跳过数据同步(避免产生重复数据)", tableName)) - return + requirePK := tableMode == "insert_update" && plan.TargetTableExists + pkCol := "" + if requirePK { + if len(pkCols) == 0 { + s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("表 %s 未找到主键,当前模式需要差异对比,已跳过", tableName)) + return + } + if len(pkCols) > 1 { + s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("表 %s 为复合主键(%s),当前暂不支持差异同步", tableName, strings.Join(pkCols, ","))) + return + } + pkCol = pkCols[0] } - if len(pkCols) > 1 { - s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("表 %s 为复合主键(%s),当前暂不支持数据同步", tableName, strings.Join(pkCols, ","))) - return - } - pkCol := pkCols[0] opts := TableOptions{Insert: true, Update: true, Delete: false} if config.TableOptions != nil { if t, ok := config.TableOptions[tableName]; ok { opts = t - // 默认防护:如用户未设置任意一个字段,保持 insert/update 默认 true、delete 默认 false - if !t.Insert && !t.Update && !t.Delete { - opts = t - } } } if !opts.Insert && !opts.Update && !opts.Delete { @@ -184,10 +245,8 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { return } - // 2. Fetch Data (MEMORY INTENSIVE - PROTOTYPE ONLY) - // TODO: Implement paging/streaming s.progress(config.JobID, i, totalTables, tableName, "读取源表数据") - sourceRows, _, err := sourceDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.SourceConfig.Type, sourceQueryTable))) + sourceRows, _, err := sourceDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(sourceType, sourceQueryTable))) if err != nil { logger.Error(err, "读取源表失败:表=%s", tableName) s.appendLog(config.JobID, &result, "error", fmt.Sprintf("读取源表 %s 失败: %v", tableName, err)) @@ -196,19 +255,19 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { var inserts []map[string]interface{} var updates []connection.UpdateRow + var deletes []map[string]interface{} - if tableMode == "insert_update" { + if tableMode == "insert_update" && plan.TargetTableExists { s.progress(config.JobID, i, totalTables, tableName, "读取目标表数据") - targetRows, _, err := targetDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.TargetConfig.Type, targetQueryTable))) + targetRows, _, err := targetDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(targetType, targetQueryTable))) if err != nil { logger.Error(err, "读取目标表失败:表=%s", tableName) s.appendLog(config.JobID, &result, "error", fmt.Sprintf("读取目标表 %s 失败: %v", tableName, err)) return } - // 3. Compare (In-Memory Hash Map) s.progress(config.JobID, i, totalTables, tableName, "对比差异") - targetMap := make(map[string]map[string]interface{}) + targetMap := make(map[string]map[string]interface{}, len(targetRows)) for _, row := range targetRows { if row[pkCol] == nil { continue @@ -220,7 +279,6 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { targetMap[pkVal] = row } sourcePKSet := make(map[string]struct{}, len(sourceRows)) - for _, sRow := range sourceRows { if sRow[pkCol] == nil { continue @@ -230,7 +288,6 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { continue } sourcePKSet[pkVal] = struct{}{} - if tRow, exists := targetMap[pkVal]; exists { changes := make(map[string]interface{}) for k, v := range sRow { @@ -239,17 +296,12 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { } } if len(changes) > 0 { - updates = append(updates, connection.UpdateRow{ - Keys: map[string]interface{}{pkCol: sRow[pkCol]}, - Values: changes, - }) + updates = append(updates, connection.UpdateRow{Keys: map[string]interface{}{pkCol: sRow[pkCol]}, Values: changes}) } } else { inserts = append(inserts, sRow) } } - - var deletes []map[string]interface{} if opts.Delete { for pkStr, row := range targetMap { if _, ok := sourcePKSet[pkStr]; ok { @@ -258,150 +310,49 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { deletes = append(deletes, map[string]interface{}{pkCol: row[pkCol]}) } } - - // apply operation selection inserts = filterRowsByPKSelection(pkCol, inserts, opts.Insert, opts.SelectedInsertPKs) updates = filterUpdatesByPKSelection(pkCol, updates, opts.Update, opts.SelectedUpdatePKs) deletes = filterRowsByPKSelection(pkCol, deletes, opts.Delete, opts.SelectedDeletePKs) - - changeSet := connection.ChangeSet{ - Inserts: inserts, - Updates: updates, - Deletes: deletes, + } else { + inserts = sourceRows + if !opts.Insert { + inserts = nil } + if tableMode == "full_overwrite" && plan.TargetTableExists { + s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 全量覆盖模式:即将清空目标表 %s", tableName)) + s.progress(config.JobID, i, totalTables, tableName, "清空目标表") + clearSQL := "" + if targetType == "mysql" { + clearSQL = fmt.Sprintf("TRUNCATE TABLE %s", quoteQualifiedIdentByType(targetType, targetQueryTable)) + } else { + clearSQL = fmt.Sprintf("DELETE FROM %s", quoteQualifiedIdentByType(targetType, targetQueryTable)) + } + if _, err := targetDB.Exec(clearSQL); err != nil { + s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 清空目标表失败: %v", err)) + return + } + } + } - // 4. Align schema (target missing columns) - s.progress(config.JobID, i, totalTables, tableName, "检查字段一致性") - requiredCols := collectRequiredColumns(changeSet.Inserts, changeSet.Updates) - targetCols, err := targetDB.GetColumns(targetSchema, targetTable) + changeSet := connection.ChangeSet{Inserts: inserts, Updates: updates, Deletes: deletes} + s.progress(config.JobID, i, totalTables, tableName, "检查字段一致性") + targetColsResolved := targetCols + if len(targetColsResolved) == 0 { + targetColsResolved, err = targetDB.GetColumns(plan.TargetSchema, plan.TargetTable) if err != nil { s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 获取目标表字段失败,已跳过字段一致性检查: %v", err)) - } else { - targetColSet := make(map[string]struct{}, len(targetCols)) - for _, c := range targetCols { - name := strings.ToLower(strings.TrimSpace(c.Name)) - if name == "" { - continue - } - targetColSet[name] = struct{}{} - } - - missing := make([]string, 0) - for lower, original := range requiredCols { - if _, ok := targetColSet[lower]; !ok { - missing = append(missing, original) - } - } - sort.Strings(missing) - - if len(missing) > 0 { - if config.AutoAddColumns && strings.ToLower(strings.TrimSpace(config.TargetConfig.Type)) == "mysql" { - s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 目标表缺少字段 %d 个,开始自动补齐: %s", len(missing), strings.Join(missing, ", "))) - added := 0 - for _, colName := range missing { - colLower := strings.ToLower(strings.TrimSpace(colName)) - colType := "TEXT" - if strings.ToLower(strings.TrimSpace(config.SourceConfig.Type)) == "mysql" { - if srcCol, ok := sourceColsByLower[colLower]; ok { - colType = sanitizeMySQLColumnType(srcCol.Type) - } - } - - alterSQL := fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL", - quoteQualifiedIdentByType("mysql", targetQueryTable), - quoteIdentByType("mysql", colName), - colType, - ) - if _, err := targetDB.Exec(alterSQL); err != nil { - s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 自动补字段失败:字段=%s 错误=%v", colName, err)) - continue - } - added++ - } - s.appendLog(config.JobID, &result, "info", fmt.Sprintf(" -> 自动补字段完成:成功=%d 失败=%d", added, len(missing)-added)) - - // refresh columns - targetCols, err = targetDB.GetColumns(targetSchema, targetTable) - if err == nil { - targetColSet = make(map[string]struct{}, len(targetCols)) - for _, c := range targetCols { - name := strings.ToLower(strings.TrimSpace(c.Name)) - if name == "" { - continue - } - targetColSet[name] = struct{}{} - } - } - } else { - s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 目标表缺少字段 %d 个(未开启自动补齐),将自动忽略:%s", len(missing), strings.Join(missing, ", "))) - } - - // filter out still-missing columns to avoid apply failure - changeSet.Inserts = filterInsertRows(changeSet.Inserts, targetColSet) - changeSet.Updates = filterUpdateRows(changeSet.Updates, targetColSet) - } - } - - // 5. Apply Changes - s.progress(config.JobID, i, totalTables, tableName, "应用变更") - - if len(changeSet.Inserts) > 0 || len(changeSet.Updates) > 0 || len(changeSet.Deletes) > 0 { - s.appendLog(config.JobID, &result, "info", fmt.Sprintf(" -> 需插入: %d 行, 需更新: %d 行, 需删除: %d 行", len(changeSet.Inserts), len(changeSet.Updates), len(changeSet.Deletes))) - - if applier, ok := targetDB.(db.BatchApplier); ok { - if err := applier.ApplyChanges(targetTable, changeSet); err != nil { - s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 应用变更失败: %v", err)) - } else { - result.RowsInserted += len(changeSet.Inserts) - result.RowsUpdated += len(changeSet.Updates) - result.RowsDeleted += len(changeSet.Deletes) - } - } else { - s.appendLog(config.JobID, &result, "warn", " -> 目标驱动不支持应用数据变更 (ApplyChanges).") - } - } else { - s.appendLog(config.JobID, &result, "info", " -> 数据一致,无需变更.") - } - - result.TablesSynced++ - return - } else { - // insert_only / full_overwrite: do not compare target, just insert source rows - inserts = sourceRows - } - - // full_overwrite: clear target table first - if tableMode == "full_overwrite" { - s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 全量覆盖模式:即将清空目标表 %s", tableName)) - s.progress(config.JobID, i, totalTables, tableName, "清空目标表") - clearSQL := "" - if strings.ToLower(strings.TrimSpace(config.TargetConfig.Type)) == "mysql" { - clearSQL = fmt.Sprintf("TRUNCATE TABLE %s", quoteQualifiedIdentByType(config.TargetConfig.Type, targetQueryTable)) - } else { - clearSQL = fmt.Sprintf("DELETE FROM %s", quoteQualifiedIdentByType(config.TargetConfig.Type, targetQueryTable)) - } - if _, err := targetDB.Exec(clearSQL); err != nil { - s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 清空目标表失败: %v", err)) - return } } - - // 4. Align schema (target missing columns) - s.progress(config.JobID, i, totalTables, tableName, "检查字段一致性") - requiredCols := collectRequiredColumns(inserts, updates) - targetCols, err := targetDB.GetColumns(targetSchema, targetTable) - if err != nil { - s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 获取目标表字段失败,已跳过字段一致性检查: %v", err)) - } else { - targetColSet := make(map[string]struct{}, len(targetCols)) - for _, c := range targetCols { + if len(targetColsResolved) > 0 { + targetColSet := make(map[string]struct{}, len(targetColsResolved)) + for _, c := range targetColsResolved { name := strings.ToLower(strings.TrimSpace(c.Name)) if name == "" { continue } targetColSet[name] = struct{}{} } - + requiredCols := collectRequiredColumns(changeSet.Inserts, changeSet.Updates) missing := make([]string, 0) for lower, original := range requiredCols { if _, ok := targetColSet[lower]; !ok { @@ -409,79 +360,64 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult { } } sort.Strings(missing) - if len(missing) > 0 { - if config.AutoAddColumns && strings.ToLower(strings.TrimSpace(config.TargetConfig.Type)) == "mysql" { + if config.AutoAddColumns && supportsAutoAddColumnsForPair(sourceType, targetType) { s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 目标表缺少字段 %d 个,开始自动补齐: %s", len(missing), strings.Join(missing, ", "))) added := 0 for _, colName := range missing { colLower := strings.ToLower(strings.TrimSpace(colName)) - colType := "TEXT" - if strings.ToLower(strings.TrimSpace(config.SourceConfig.Type)) == "mysql" { - if srcCol, ok := sourceColsByLower[colLower]; ok { - colType = sanitizeMySQLColumnType(srcCol.Type) - } + srcCol, ok := sourceColsByLower[colLower] + if !ok { + continue + } + alterSQL, err := buildAddColumnSQLForPair(sourceType, targetType, targetQueryTable, srcCol) + if err != nil { + s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 自动补字段失败:字段=%s 错误=%v", colName, err)) + continue } - - alterSQL := fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL", - quoteQualifiedIdentByType("mysql", targetQueryTable), - quoteIdentByType("mysql", colName), - colType, - ) if _, err := targetDB.Exec(alterSQL); err != nil { s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 自动补字段失败:字段=%s 错误=%v", colName, err)) continue } added++ + targetColSet[colLower] = struct{}{} } s.appendLog(config.JobID, &result, "info", fmt.Sprintf(" -> 自动补字段完成:成功=%d 失败=%d", added, len(missing)-added)) - - // refresh columns - targetCols, err = targetDB.GetColumns(targetSchema, targetTable) - if err == nil { - targetColSet = make(map[string]struct{}, len(targetCols)) - for _, c := range targetCols { - name := strings.ToLower(strings.TrimSpace(c.Name)) - if name == "" { - continue - } - targetColSet[name] = struct{}{} - } - } } else { s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 目标表缺少字段 %d 个(未开启自动补齐),将自动忽略:%s", len(missing), strings.Join(missing, ", "))) } - - // filter out still-missing columns to avoid apply failure - inserts = filterInsertRows(inserts, targetColSet) - updates = filterUpdateRows(updates, targetColSet) + changeSet.Inserts = filterInsertRows(changeSet.Inserts, targetColSet) + changeSet.Updates = filterUpdateRows(changeSet.Updates, targetColSet) } } - // 5. Apply Changes s.progress(config.JobID, i, totalTables, tableName, "应用变更") - changeSet := connection.ChangeSet{ - Inserts: inserts, - Updates: updates, - } - - if len(changeSet.Inserts) > 0 || len(changeSet.Updates) > 0 { - s.appendLog(config.JobID, &result, "info", fmt.Sprintf(" -> 需插入: %d 行, 需更新: %d 行", len(changeSet.Inserts), len(changeSet.Updates))) - + if len(changeSet.Inserts) > 0 || len(changeSet.Updates) > 0 || len(changeSet.Deletes) > 0 { + s.appendLog(config.JobID, &result, "info", fmt.Sprintf(" -> 需插入: %d 行, 需更新: %d 行, 需删除: %d 行", len(changeSet.Inserts), len(changeSet.Updates), len(changeSet.Deletes))) if applier, ok := targetDB.(db.BatchApplier); ok { - if err := applier.ApplyChanges(targetTable, changeSet); err != nil { + if err := applier.ApplyChanges(applyTableName, changeSet); err != nil { s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 应用变更失败: %v", err)) - } else { - result.RowsInserted += len(changeSet.Inserts) - result.RowsUpdated += len(changeSet.Updates) + return } + result.RowsInserted += len(changeSet.Inserts) + result.RowsUpdated += len(changeSet.Updates) + result.RowsDeleted += len(changeSet.Deletes) } else { s.appendLog(config.JobID, &result, "warn", " -> 目标驱动不支持应用数据变更 (ApplyChanges).") + return } } else { s.appendLog(config.JobID, &result, "info", " -> 数据一致,无需变更.") } + if len(plan.PostDataSQL) > 0 { + s.progress(config.JobID, i, totalTables, tableName, "创建索引") + if err := executeSQLStatements(targetDB.Exec, plan.PostDataSQL); err != nil { + s.appendLog(config.JobID, &result, "error", fmt.Sprintf("创建索引失败:表=%s 错误=%v", tableName, err)) + return + } + } + result.TablesSynced++ }() } @@ -554,3 +490,26 @@ func (s *SyncEngine) fail(jobID string, totalTables int, res SyncResult, msg str s.progress(jobID, res.TablesSynced, totalTables, "", "同步失败") return res } + +func (s *SyncEngine) execDDLStatements(jobID string, res *SyncResult, database db.Database, tableName string, stage string, statements []string) error { + for _, statement := range statements { + sqlText := strings.TrimSpace(statement) + if sqlText == "" { + continue + } + if _, err := database.Exec(sqlText); err != nil { + return fmt.Errorf("%s失败: %w", stage, err) + } + s.appendLog(jobID, res, "info", fmt.Sprintf("表 %s %s成功:%s", tableName, stage, shortenSyncSQL(sqlText))) + } + return nil +} + +func shortenSyncSQL(sqlText string) string { + text := strings.TrimSpace(strings.ReplaceAll(strings.ReplaceAll(sqlText, "\n", " "), "\t", " ")) + text = strings.Join(strings.Fields(text), " ") + if len(text) <= 120 { + return text + } + return text[:117] + "..." +} diff --git a/internal/sync/sync_events.go b/internal/sync/sync_events.go index 1facae7..a7777e5 100644 --- a/internal/sync/sync_events.go +++ b/internal/sync/sync_events.go @@ -27,4 +27,3 @@ type Reporter struct { OnLog func(event SyncLogEvent) OnProgress func(event SyncProgressEvent) } - From 3bd02e2e0956fe3681f5720459a01e5268a3fcc6 Mon Sep 17 00:00:00 2001 From: Syngnat Date: Tue, 10 Mar 2026 10:27:13 +0800 Subject: [PATCH 29/48] =?UTF-8?q?=F0=9F=90=9B=20fix(connection):=20?= =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E6=96=B0=E5=BB=BA=E8=BF=9E=E6=8E=A5=E6=97=B6?= =?UTF-8?q?=E6=A0=87=E7=AD=BE=E5=88=87=E6=8D=A2=E5=AF=BC=E8=87=B4=E8=A1=A8?= =?UTF-8?q?=E5=8D=95=E6=95=B0=E6=8D=AE=E4=B8=A2=E5=A4=B1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 在 SSH 标签页测试连接时,基础信息的 host 回退为默认值 localhost - 在基础信息标签页保存时,SSH 配置丢失 - 保存结果仅包含当前选中标签页的字段 - refs #208 --- frontend/src/components/ConnectionModal.tsx | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/frontend/src/components/ConnectionModal.tsx b/frontend/src/components/ConnectionModal.tsx index c55c0b2..bf7414b 100644 --- a/frontend/src/components/ConnectionModal.tsx +++ b/frontend/src/components/ConnectionModal.tsx @@ -1179,7 +1179,8 @@ const ConnectionModal: React.FC<{ const handleOk = async () => { try { - const values = await form.validateFields(); + await form.validateFields(); + const values = form.getFieldsValue(true); const unavailableReason = await resolveDriverUnavailableReason(values.type); if (unavailableReason) { message.warning(unavailableReason); @@ -1241,7 +1242,8 @@ const ConnectionModal: React.FC<{ if (testInFlightRef.current) return; testInFlightRef.current = true; try { - const values = await form.validateFields(); + await form.validateFields(); + const values = form.getFieldsValue(true); const unavailableReason = await resolveDriverUnavailableReason(values.type); if (unavailableReason) { const failMessage = buildTestFailureMessage(unavailableReason, '驱动未安装启用'); @@ -1311,7 +1313,8 @@ const ConnectionModal: React.FC<{ return; } try { - const values = await form.validateFields(); + await form.validateFields(); + const values = form.getFieldsValue(true); setDiscoveringMembers(true); const config = await buildConfig(values, false); const result = await MongoDiscoverMembers(config as any); From 7644462180b4ae2f4ee1347d4d8dd89922c94f2c Mon Sep 17 00:00:00 2001 From: Syngnat Date: Tue, 10 Mar 2026 10:32:31 +0800 Subject: [PATCH 30/48] =?UTF-8?q?=F0=9F=90=9B=20fix(mongodb):=20=E4=BF=AE?= =?UTF-8?q?=E5=A4=8D=E5=8D=95=E6=9C=BA=E6=A8=A1=E5=BC=8F=E8=BF=9E=E6=8E=A5?= =?UTF-8?q?=E5=89=AF=E6=9C=AC=E9=9B=86=E5=AE=9E=E4=BE=8B=E6=97=B6=E5=9C=B0?= =?UTF-8?q?=E5=9D=80=E8=A2=AB=E6=9B=BF=E6=8D=A2=E4=B8=BA=E5=86=85=E7=BD=91?= =?UTF-8?q?=E5=9C=B0=E5=9D=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - getURI 在 topology=single 时未设置 directConnection=true - 驱动连接目标地址后自动跟随副本集成员发现,切换到 localhost:27017 - 在 mongodb_impl.go 和 mongodb_impl_v1.go 中添加 directConnection=true - 仅在 topology 非 replica、无 replicaSet、非 SRV 时生效 - refs #205 --- internal/db/mongodb_impl.go | 5 +++++ internal/db/mongodb_impl_v1.go | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/internal/db/mongodb_impl.go b/internal/db/mongodb_impl.go index 5c853f6..27ac0c7 100644 --- a/internal/db/mongodb_impl.go +++ b/internal/db/mongodb_impl.go @@ -251,6 +251,11 @@ func (m *MongoDB) getURI(config connection.ConnectionConfig) string { params.Set("authMechanism", authMechanism) } + // 单机模式且未指定副本集名称时,启用 directConnection 避免驱动自动跟随副本集成员发现 + if strings.TrimSpace(config.Topology) != "replica" && strings.TrimSpace(config.ReplicaSet) == "" && !config.MongoSRV { + params.Set("directConnection", "true") + } + if encoded := params.Encode(); encoded != "" { uri += "?" + encoded } diff --git a/internal/db/mongodb_impl_v1.go b/internal/db/mongodb_impl_v1.go index 26e110a..e3aa5b4 100644 --- a/internal/db/mongodb_impl_v1.go +++ b/internal/db/mongodb_impl_v1.go @@ -252,6 +252,11 @@ func (m *MongoDBV1) getURI(config connection.ConnectionConfig) string { params.Set("authMechanism", authMechanism) } + // 单机模式且未指定副本集名称时,启用 directConnection 避免驱动自动跟随副本集成员发现 + if strings.TrimSpace(config.Topology) != "replica" && strings.TrimSpace(config.ReplicaSet) == "" && !config.MongoSRV { + params.Set("directConnection", "true") + } + if encoded := params.Encode(); encoded != "" { uri += "?" + encoded } From e964c8ecf89886818b2f772d3217eda4b3c72dd2 Mon Sep 17 00:00:00 2001 From: Syngnat Date: Tue, 10 Mar 2026 10:42:34 +0800 Subject: [PATCH 31/48] =?UTF-8?q?=F0=9F=90=9B=20fix(DataGrid):=20=E4=BF=AE?= =?UTF-8?q?=E5=A4=8D=E8=99=9A=E6=8B=9F=E6=BB=9A=E5=8A=A8=E6=A8=A1=E5=BC=8F?= =?UTF-8?q?=E4=B8=8B=E5=8F=B3=E9=94=AE=E8=8F=9C=E5=8D=95=E5=A4=B1=E6=95=88?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 行级和单元格级右键菜单的启用条件互斥,虚拟滚动模式下两者同时失效 - enableLargeResultOptimizedEditing 关闭了内联编辑但未回退启用行级菜单 - 修改 useContextMenuRow 和 enableRowContextMenu 条件,虚拟模式下启用行级菜单 - 更新 dataContextValue 的 useMemo 依赖数组 - refs #209 --- frontend/src/components/DataGrid.tsx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/frontend/src/components/DataGrid.tsx b/frontend/src/components/DataGrid.tsx index 0a4d5b7..fc885d7 100644 --- a/frontend/src/components/DataGrid.tsx +++ b/frontend/src/components/DataGrid.tsx @@ -2746,9 +2746,9 @@ const DataGrid: React.FC = ({ handleExportSelected, copyToClipboard, tableName, - enableRowContextMenu: !canModifyData, + enableRowContextMenu: !canModifyData || enableLargeResultOptimizedEditing, supportsCopyInsert, - }), [handleCopyCsv, handleCopyInsert, handleCopyJson, handleExportSelected, copyToClipboard, tableName, canModifyData, supportsCopyInsert]); + }), [handleCopyCsv, handleCopyInsert, handleCopyJson, handleExportSelected, copyToClipboard, tableName, canModifyData, enableLargeResultOptimizedEditing, supportsCopyInsert]); const cellContextMenuValue = useMemo(() => ({ showMenu: showCellContextMenu, @@ -2764,7 +2764,7 @@ const DataGrid: React.FC = ({ const rowPropsFactory = useCallback((record: any) => ({ record } as any), []); const totalWidth = columns.reduce((sum, col) => sum + (Number(col.width) || 200), 0) + selectionColumnWidth; - const useContextMenuRow = !canModifyData; + const useContextMenuRow = !canModifyData || enableLargeResultOptimizedEditing; const tableScrollX = useMemo(() => { const baseWidth = Math.max(totalWidth, 1000); if (!isMacLike || tableViewportWidth <= 0) return baseWidth; From d61d7ec39b8c817678fec64d8f9ec2fdf0818aee Mon Sep 17 00:00:00 2001 From: Syngnat Date: Tue, 10 Mar 2026 10:50:16 +0800 Subject: [PATCH 32/48] =?UTF-8?q?=F0=9F=90=9B=20fix(sqlserver):=20?= =?UTF-8?q?=E4=BF=AE=E5=A4=8D=20SQL=20Server=20=E6=9F=A5=E7=9C=8B=E8=A1=A8?= =?UTF-8?q?=E6=95=B0=E6=8D=AE=E6=97=B6=E5=88=86=E9=A1=B5=E8=AF=AD=E6=B3=95?= =?UTF-8?q?=E5=92=8C=E6=A0=87=E8=AF=86=E7=AC=A6=E5=BC=95=E7=94=A8=E9=94=99?= =?UTF-8?q?=E8=AF=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - quoteIdentPart 缺少 sqlserver 分支,标识符使用双引号而非 [bracket] - buildPaginatedSelectSQL 增加 mssql 别名兜底,避免 dbType 变体导致走 default 分支 - 修复后标识符使用 [bracket],分页使用 OFFSET FETCH NEXT 语法 - refs #204 --- frontend/src/utils/sql.ts | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/frontend/src/utils/sql.ts b/frontend/src/utils/sql.ts index 14ad50f..4fece31 100644 --- a/frontend/src/utils/sql.ts +++ b/frontend/src/utils/sql.ts @@ -50,6 +50,11 @@ export const quoteIdentPart = (dbType: string, ident: string) => { return raw; } + // SQL Server 使用 [bracket] 标识符 + if (dbTypeLower === 'sqlserver' || dbTypeLower === 'mssql') { + return `[${raw.replace(/]/g, ']]')}]`; + } + // 其他数据库默认加双引号 return `"${raw.replace(/"/g, '""')}"`; }; @@ -160,7 +165,8 @@ export const buildPaginatedSelectSQL = ( } return `SELECT * FROM (SELECT "__gonavi_page__".*, ROWNUM "__gonavi_rn__" FROM (${orderedSql}) "__gonavi_page__" WHERE ROWNUM <= ${upperBound}) WHERE "__gonavi_rn__" > ${safeOffset}`; } - case 'sqlserver': { + case 'sqlserver': + case 'mssql': { const effectiveOrderBy = orderBy.trim() ? orderBy : ' ORDER BY (SELECT NULL)'; return `${base}${effectiveOrderBy} OFFSET ${safeOffset} ROWS FETCH NEXT ${safeLimit} ROWS ONLY`; } From c8c0c5f20a86c6167ca8ca02de84b5f61a623b11 Mon Sep 17 00:00:00 2001 From: Syngnat Date: Tue, 10 Mar 2026 10:58:27 +0800 Subject: [PATCH 33/48] =?UTF-8?q?=E2=9C=A8=20feat(DataGrid):=20=E7=BB=9F?= =?UTF-8?q?=E4=B8=80=E8=A1=A8=E6=A0=BC=E5=8F=B3=E9=94=AE=E8=8F=9C=E5=8D=95?= =?UTF-8?q?=E4=BA=A4=E4=BA=92=E4=BD=93=E9=AA=8C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 彻底移除功能较少的行级右键菜单 ContextMenuRow,统一使用功能更丰富的单元格右键菜单 - 优化虚拟滚动模式和只读模式下的渲染,支持触发单元格右键菜单 - 菜单展示自适应:在只读或不可修改数据的场景下自动隐藏「设置为 NULL」与「填充到选中行」等编辑项 - refs #209 --- frontend/src/components/DataGrid.tsx | 31 ++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/frontend/src/components/DataGrid.tsx b/frontend/src/components/DataGrid.tsx index fc885d7..b2a34ff 100644 --- a/frontend/src/components/DataGrid.tsx +++ b/frontend/src/components/DataGrid.tsx @@ -2175,6 +2175,11 @@ const DataGrid: React.FC = ({ 'data-row-key': rowKey === undefined || rowKey === null ? undefined : String(rowKey), 'data-col-name': dataIndex, onDoubleClick: () => handleVirtualCellActivate(record, dataIndex, dataIndex), + onContextMenu: (e: React.MouseEvent) => { + e.preventDefault(); + e.stopPropagation(); + showCellContextMenu(e, record, dataIndex, dataIndex); + }, }; } return { @@ -2204,10 +2209,24 @@ const DataGrid: React.FC = ({ ); } + if (enableVirtual) { + return ( +
    { + e.preventDefault(); + e.stopPropagation(); + showCellContextMenu(e, record, dataIndex, dataIndex); + }} + > + {originalRenderContent} +
    + ); + } return originalRenderContent; } }; - }), [columns, enableInlineEditableCell, enableVirtual, handleCellSave, openCellEditor, handleVirtualCellActivate]); + }), [columns, enableInlineEditableCell, enableVirtual, handleCellSave, openCellEditor, handleVirtualCellActivate, showCellContextMenu]); const handleAddRow = () => { const newKey = `new-${Date.now()}`; @@ -2746,9 +2765,9 @@ const DataGrid: React.FC = ({ handleExportSelected, copyToClipboard, tableName, - enableRowContextMenu: !canModifyData || enableLargeResultOptimizedEditing, + enableRowContextMenu: false, supportsCopyInsert, - }), [handleCopyCsv, handleCopyInsert, handleCopyJson, handleExportSelected, copyToClipboard, tableName, canModifyData, enableLargeResultOptimizedEditing, supportsCopyInsert]); + }), [handleCopyCsv, handleCopyInsert, handleCopyJson, handleExportSelected, copyToClipboard, tableName, canModifyData, supportsCopyInsert]); const cellContextMenuValue = useMemo(() => ({ showMenu: showCellContextMenu, @@ -2764,7 +2783,7 @@ const DataGrid: React.FC = ({ const rowPropsFactory = useCallback((record: any) => ({ record } as any), []); const totalWidth = columns.reduce((sum, col) => sum + (Number(col.width) || 200), 0) + selectionColumnWidth; - const useContextMenuRow = !canModifyData || enableLargeResultOptimizedEditing; + const useContextMenuRow = false; const tableScrollX = useMemo(() => { const baseWidth = Math.max(totalWidth, 1000); if (!isMacLike || tableViewportWidth <= 0) return baseWidth; @@ -3756,6 +3775,8 @@ const DataGrid: React.FC = ({ }} onClick={(e) => e.stopPropagation()} > + {canModifyData && ( + <>
    = ({ 填充到选中行 ({selectedRowKeys.length})
    + + )} {supportsCopyInsert && (
    Date: Tue, 10 Mar 2026 11:17:03 +0800 Subject: [PATCH 34/48] =?UTF-8?q?=F0=9F=94=A7=20fix(DataGrid):=20=E9=BB=98?= =?UTF-8?q?=E8=AE=A4=E5=BC=80=E5=90=AF=E8=99=9A=E6=8B=9F=E6=BB=9A=E5=8A=A8?= =?UTF-8?q?=E5=B9=B6=E4=BF=AE=E5=A4=8D=E5=A4=9A=E9=80=89=E5=8D=95=E5=85=83?= =?UTF-8?q?=E6=A0=BC=E9=AB=98=E4=BA=AE=E5=A4=B1=E6=95=88=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 移除根据数据量和列数动态判断是否开启虚拟滚动的阈值限制,改为在表格视图下默认全量开启,彻底解决卡顿问题 - 修复 `updateCellSelection` 在查找坐标节点时硬编码 `td` 选择器的问题,改为精确匹配 `.ant-table-cell`,兼容虚拟滚动时的 `div` 渲染模式 - 修复因透明窗口特性导致的 `transparent !important` 把高亮样式强行覆盖的问题,拔高了多选状态下背景与边框 CSS 的优先级 - 解决单元格内外多重属性嵌套导致的高亮右侧留白现象,使得高亮框完全贴合表格单元格边缘 - 适配主题色响应(暗黑模式使用黄色深色高亮,白昼模式使用默认蓝色高亮) --- frontend/src/components/DataGrid.tsx | 83 +++++++++++++--------------- 1 file changed, 39 insertions(+), 44 deletions(-) diff --git a/frontend/src/components/DataGrid.tsx b/frontend/src/components/DataGrid.tsx index b2a34ff..56264bd 100644 --- a/frontend/src/components/DataGrid.tsx +++ b/frontend/src/components/DataGrid.tsx @@ -1202,11 +1202,11 @@ const DataGrid: React.FC = ({ // 直接操作 DOM 更新选中效果,避免 React 重渲染 const updateCellSelection = useCallback((newSelection: Set) => { - const tableBody = containerRef.current?.querySelector('.ant-table-body'); - if (!tableBody) return; + const container = containerRef.current; + if (!container) return; - // 只同步可见单元格(兼容 virtual 渲染 + 极大选区) - const visibleCells = tableBody.querySelectorAll('td[data-row-key][data-col-name]'); + // 只同步可见单元格,严格限定 `.ant-table-cell`,避免虚拟列表中内嵌的 EditableCell 被重复获取并打上 selected 样式从而产生白边。 + const visibleCells = container.querySelectorAll('.ant-table-cell[data-row-key][data-col-name]'); visibleCells.forEach((cell) => { const el = cell as HTMLElement; const rowKey = el.getAttribute('data-row-key'); @@ -1334,10 +1334,10 @@ const DataGrid: React.FC = ({ const getCellInfo = (target: HTMLElement | null): { rowKey: string; colName: string } | null => { if (!target) return null; - const td = target.closest('td[data-row-key][data-col-name]') as HTMLElement; - if (!td) return null; - const rowKey = td.getAttribute('data-row-key'); - const colName = td.getAttribute('data-col-name'); + const cell = target.closest('[data-row-key][data-col-name]') as HTMLElement; + if (!cell) return null; + const rowKey = cell.getAttribute('data-row-key'); + const colName = cell.getAttribute('data-col-name'); if (!rowKey || !colName) return null; return { rowKey, colName }; }; @@ -2105,16 +2105,9 @@ const DataGrid: React.FC = ({ closeRowEditor(); }, [rowEditorRowKey, rowEditorForm, addedRows, columnNames, rowKeyStr, closeRowEditor]); - const estimatedVisibleCellCount = mergedDisplayData.length * Math.max(columnNames.length, 1); - const enableLargeResultOptimizedEditing = - viewMode === 'table' && ( - mergedDisplayData.length >= 60 || - estimatedVisibleCellCount >= 1600 || - columnNames.length >= 36 || - (isMacLike && columnNames.length >= 24) - ); - const enableVirtual = enableLargeResultOptimizedEditing; - const enableInlineEditableCell = canModifyData && !enableLargeResultOptimizedEditing; + + const enableVirtual = viewMode === 'table'; + const enableInlineEditableCell = canModifyData; const columns = useMemo(() => { return columnNames.map(key => ({ @@ -2169,27 +2162,28 @@ const DataGrid: React.FC = ({ return { ...col, onCell: (record: Item) => { - if (!enableInlineEditableCell) { - const rowKey = record?.[GONAVI_ROW_KEY]; - return { - 'data-row-key': rowKey === undefined || rowKey === null ? undefined : String(rowKey), - 'data-col-name': dataIndex, - onDoubleClick: () => handleVirtualCellActivate(record, dataIndex, dataIndex), - onContextMenu: (e: React.MouseEvent) => { - e.preventDefault(); - e.stopPropagation(); - showCellContextMenu(e, record, dataIndex, dataIndex); - }, - }; - } - return { - record, - editable: col.editable, - dataIndex: col.dataIndex, - title: dataIndex, - handleSave: handleCellSave, - focusCell: openCellEditor, + const rowKey = record?.[GONAVI_ROW_KEY]; + const cellProps: any = { + 'data-row-key': rowKey === undefined || rowKey === null ? undefined : String(rowKey), + 'data-col-name': dataIndex, }; + + if (!enableInlineEditableCell) { + cellProps.onDoubleClick = () => handleVirtualCellActivate(record, dataIndex, dataIndex); + cellProps.onContextMenu = (e: React.MouseEvent) => { + e.preventDefault(); + e.stopPropagation(); + showCellContextMenu(e, record, dataIndex, dataIndex); + }; + } else { + cellProps.record = record; + cellProps.editable = col.editable; + cellProps.dataIndex = col.dataIndex; + cellProps.title = dataIndex; + cellProps.handleSave = handleCellSave; + cellProps.focusCell = openCellEditor; + } + return cellProps; }, render: (text: any, record: Item, index: number) => { const originalRenderContent = col.render ? (col.render as any)(text, record, index) : text; @@ -4044,12 +4038,13 @@ const DataGrid: React.FC = ({ .${gridId} .ant-table-tbody .ant-table-row.row-added:hover > .ant-table-cell { background-color: ${rowAddedHover} !important; } .${gridId} .ant-table-tbody > tr.row-modified:hover > td, .${gridId} .ant-table-tbody .ant-table-row.row-modified:hover > .ant-table-cell { background-color: ${rowModHover} !important; } - .${gridId}.cell-edit-mode .ant-table-tbody > tr > td[data-col-name], - .${gridId}.cell-edit-mode .ant-table-tbody .ant-table-row > .ant-table-cell[data-col-name] { user-select: none; -webkit-user-select: none; cursor: crosshair; } - .${gridId}.cell-edit-mode .ant-table-tbody > tr > td[data-cell-selected="true"], - .${gridId}.cell-edit-mode .ant-table-tbody .ant-table-row > .ant-table-cell[data-cell-selected="true"] { - box-shadow: inset 0 0 0 2px ${selectionAccentHex}; - background-image: linear-gradient(${darkMode ? `rgba(${selectionAccentRgb}, 0.20)` : `rgba(${selectionAccentRgb}, 0.08)`}, ${darkMode ? `rgba(${selectionAccentRgb}, 0.20)` : `rgba(${selectionAccentRgb}, 0.08)`}); + .${gridId} .ant-table-tbody > tr > td[data-col-name], + .${gridId} .ant-table-tbody .ant-table-row > .ant-table-cell[data-col-name] { user-select: none; -webkit-user-select: none; cursor: crosshair; } + .${gridId} .ant-table-tbody > tr > td[data-cell-selected="true"], + .${gridId} .ant-table-tbody .ant-table-row > .ant-table-cell[data-cell-selected="true"], + .${gridId} [data-cell-selected="true"] { + box-shadow: inset 0 0 0 2px ${selectionAccentHex} !important; + background-image: linear-gradient(${darkMode ? `rgba(${selectionAccentRgb}, 0.20)` : `rgba(${selectionAccentRgb}, 0.08)`}, ${darkMode ? `rgba(${selectionAccentRgb}, 0.20)` : `rgba(${selectionAccentRgb}, 0.08)`}) !important; } .${gridId} .ant-table-content, .${gridId} .ant-table-body { From 695713c779a66b7c620841874288db99c788560a Mon Sep 17 00:00:00 2001 From: Syngnat Date: Tue, 10 Mar 2026 15:49:22 +0800 Subject: [PATCH 35/48] =?UTF-8?q?=E2=9C=A8=20feat(DataGrid):=20=E5=AE=9E?= =?UTF-8?q?=E7=8E=B0=E6=95=B0=E6=8D=AE=E8=A7=86=E5=9B=BE=E5=88=97=E6=A0=87?= =?UTF-8?q?=E9=A2=98=E6=8B=96=E6=8B=BD=E6=8E=92=E5=BA=8F=E5=8F=8A=E9=A1=BA?= =?UTF-8?q?=E5=BA=8F=E8=AE=B0=E5=BF=86?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 功能集成:接入 @dnd-kit 实现表头水平拖拽排序,支持多列位置灵活调整 - 持久化:Store 新增 tableColumnOrders 状态,支持按“连接-库-表”多维度记忆自定义列序 - 交互优化:重构表头 DOM 结构并消除内边距,实现“悬停手型、按住抓取”的精准指针反馈 - 性能提升:通过 React.memo 减少重渲染,并启用 will-change 硬件加速确保 60FPS 流畅度 - 稳定性:增强 Wails 环境接口调用的异常捕获,并补全前端独立开发环境下的 API Stub --- frontend/index.html | 17 ++ frontend/src/App.tsx | 143 +++++++++----- frontend/src/components/DataGrid.tsx | 284 ++++++++++++++++++++++----- frontend/src/main.tsx | 30 +++ frontend/src/store.ts | 46 ++++- 5 files changed, 417 insertions(+), 103 deletions(-) diff --git a/frontend/index.html b/frontend/index.html index 127af4b..b596c58 100644 --- a/frontend/index.html +++ b/frontend/index.html @@ -5,6 +5,23 @@ GoNavi +
    diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx index c0c5436..ce1832e 100644 --- a/frontend/src/App.tsx +++ b/frontend/src/App.tsx @@ -93,27 +93,39 @@ function App() { // 同步 macOS 窗口透明度:opacity=1.0 且 blur=0 时关闭 NSVisualEffectView, // 避免 GPU 持续计算窗口背后的模糊合成 useEffect(() => { - void SetWindowTranslucency(resolvedAppearance.opacity, resolvedAppearance.blur).catch(() => undefined); + try { + void SetWindowTranslucency(resolvedAppearance.opacity, resolvedAppearance.blur).catch(() => undefined); + } catch(e) { /* ignore */ } }, [resolvedAppearance.blur, resolvedAppearance.opacity]); useEffect(() => { let cancelled = false; - Environment() - .then((env) => { - if (cancelled) return; - const platform = String(env?.platform || '').toLowerCase(); - setRuntimePlatform(platform); - setIsLinuxRuntime(platform === 'linux'); - }) - .catch(() => { - if (cancelled) return; - const platform = detectNavigatorPlatform(); - const normalized = /linux/i.test(platform) - ? 'linux' - : (/mac/i.test(platform) ? 'darwin' : (/win/i.test(platform) ? 'windows' : '')); - setRuntimePlatform(normalized); - setIsLinuxRuntime(normalized === 'linux'); - }); + try { + Environment() + .then((env) => { + if (cancelled) return; + const platform = String(env?.platform || '').toLowerCase(); + setRuntimePlatform(platform); + setIsLinuxRuntime(platform === 'linux'); + }) + .catch(() => { + if (cancelled) return; + const platform = detectNavigatorPlatform(); + const normalized = /linux/i.test(platform) + ? 'linux' + : (/mac/i.test(platform) ? 'darwin' : (/win/i.test(platform) ? 'windows' : '')); + setRuntimePlatform(normalized); + setIsLinuxRuntime(normalized === 'linux'); + }); + } catch(e) { + if (cancelled) return; + const platform = detectNavigatorPlatform(); + const normalized = /linux/i.test(platform) + ? 'linux' + : (/mac/i.test(platform) ? 'darwin' : (/win/i.test(platform) ? 'windows' : '')); + setRuntimePlatform(normalized); + setIsLinuxRuntime(normalized === 'linux'); + } return () => { cancelled = true; }; @@ -156,32 +168,36 @@ function App() { const enabledForBackend = globalProxy.enabled && !invalidWhenEnabled; let cancelled = false; - ConfigureGlobalProxy(enabledForBackend, { - type: globalProxy.type, - host, - port: portValid ? port : (globalProxy.type === 'http' ? 8080 : 1080), - user: String(globalProxy.user || '').trim(), - password: globalProxy.password || '', - }) - .then((res) => { - if (cancelled || res?.success) { - return; - } - void message.error({ - content: '全局代理配置失败: ' + (res?.message || '未知错误'), - key: 'global-proxy-sync-error', - }); + try { + ConfigureGlobalProxy(enabledForBackend, { + type: globalProxy.type, + host, + port: portValid ? port : (globalProxy.type === 'http' ? 8080 : 1080), + user: String(globalProxy.user || '').trim(), + password: globalProxy.password || '', }) - .catch((err) => { - if (cancelled) { - return; - } - const errMsg = err instanceof Error ? err.message : String(err || '未知错误'); - void message.error({ - content: '全局代理配置失败: ' + errMsg, - key: 'global-proxy-sync-error', + .then((res) => { + if (cancelled || res?.success) { + return; + } + void message.error({ + content: '全局代理配置失败: ' + (res?.message || '未知错误'), + key: 'global-proxy-sync-error', + }); + }) + .catch((err) => { + if (cancelled) { + return; + } + const errMsg = err instanceof Error ? err.message : String(err || '未知错误'); + void message.error({ + content: '全局代理配置失败: ' + errMsg, + key: 'global-proxy-sync-error', + }); }); - }); + } catch (e) { + console.warn("Wails API: ConfigureGlobalProxy unavailable", e); + } return () => { cancelled = true; @@ -238,13 +254,18 @@ function App() { return; } // 优先尝试全屏,若当前平台/时机不生效,后续走最大化兜底。 - await WindowFullscreen(); - await new Promise((resolve) => window.setTimeout(resolve, settleDelayMs)); - if (await checkStartupPreferenceApplied()) { - return; + try { + await WindowFullscreen(); + await new Promise((resolve) => window.setTimeout(resolve, settleDelayMs)); + if (await checkStartupPreferenceApplied()) { + return; + } + await WindowMaximise(); + await new Promise((resolve) => window.setTimeout(resolve, settleDelayMs)); + } catch (e) { + console.warn("Wails Window APIs unavailable", e); } - await WindowMaximise(); - await new Promise((resolve) => window.setTimeout(resolve, settleDelayMs)); + if (await checkStartupPreferenceApplied()) { return; } @@ -315,11 +336,15 @@ function App() { } const nudgedWidth = width > 480 ? width - 1 : width + 1; - WindowSetSize(nudgedWidth, height); - await wait(28); - WindowSetSize(width, height); + try { + WindowSetSize(nudgedWidth, height); + await wait(28); + WindowSetSize(width, height); + } catch(e) {} window.dispatchEvent(new Event('resize')); lastFixAt = Date.now(); + } catch(e) { + console.warn("Wails Window APIs unavailable in fixWindowScaleIfNeeded", e); } finally { inFlight = false; } @@ -649,7 +674,12 @@ function App() { total: info.assetSize || 0, message: '' }); - const res = await (window as any).go.app.App.DownloadUpdate(); + let res: any = null; + try { + res = await (window as any).go.app.App.DownloadUpdate(); + } catch (e) { + console.warn("Wails API: DownloadUpdate unavailable", e); + } updateDownloadInFlightRef.current = false; if (res?.success) { const resultData = (res?.data || {}) as UpdateDownloadResultData; @@ -1050,7 +1080,7 @@ function App() { if (target?.closest('[data-no-titlebar-toggle="true"]')) { return; } - WindowToggleMaximise(); + try { WindowToggleMaximise(); } catch(e) {} }; // Sidebar Resizing @@ -1158,7 +1188,9 @@ function App() { }, [checkForUpdates]); useEffect(() => { - const offDownloadProgress = EventsOn('update:download-progress', (event: UpdateDownloadProgressEvent) => { + let offDownloadProgress: any = null; + try { + offDownloadProgress = EventsOn('update:download-progress', (event: UpdateDownloadProgressEvent) => { if (!event) return; const status = event.status || 'downloading'; const nextStatus: 'idle' | 'start' | 'downloading' | 'done' | 'error' = @@ -1181,8 +1213,11 @@ function App() { message: String(event.message || '') })); }); + } catch (e) { + console.warn("Wails API: EventsOn unavailable", e); + } return () => { - offDownloadProgress(); + if (offDownloadProgress) offDownloadProgress(); }; }, []); diff --git a/frontend/src/components/DataGrid.tsx b/frontend/src/components/DataGrid.tsx index 56264bd..3af5d3b 100644 --- a/frontend/src/components/DataGrid.tsx +++ b/frontend/src/components/DataGrid.tsx @@ -4,6 +4,23 @@ import { Table, message, Input, Button, Dropdown, MenuProps, Form, Pagination, S import type { SortOrder } from 'antd/es/table/interface'; import { ReloadOutlined, ImportOutlined, ExportOutlined, DownOutlined, PlusOutlined, DeleteOutlined, SaveOutlined, UndoOutlined, FilterOutlined, CloseOutlined, ConsoleSqlOutlined, FileTextOutlined, CopyOutlined, ClearOutlined, EditOutlined, VerticalAlignBottomOutlined, LeftOutlined, RightOutlined } from '@ant-design/icons'; import Editor from '@monaco-editor/react'; +import { + DndContext, + DragEndEvent, + PointerSensor, + MouseSensor, + TouchSensor, + useSensor, + useSensors, + closestCenter +} from '@dnd-kit/core'; +import { + SortableContext, + useSortable, + horizontalListSortingStrategy, + arrayMove +} from '@dnd-kit/sortable'; +import { CSS } from '@dnd-kit/utilities'; import { ImportData, ExportTable, ExportData, ExportQuery, ApplyChanges, DBGetColumns } from '../../wailsjs/go/app/App'; import ImportPreviewModal from './ImportPreviewModal'; import { useStore } from '../store'; @@ -323,7 +340,7 @@ const coerceJsonEditorValueForStorage = (currentValue: any, editedValue: any): a }; // --- Resizable Header (Native Implementation) --- -const ResizableTitle = (props: any) => { +const ResizableTitle = React.forwardRef((props, ref) => { const { onResizeStart, width, ...restProps } = props; const nextStyle = { ...(restProps.style || {}) } as React.CSSProperties; @@ -334,11 +351,11 @@ const ResizableTitle = (props: any) => { // 注意:virtual table 模式下,rc-table 会依赖 header cell 的 width 样式来渲染选择列。 // 若这里丢失 width,可能导致左上角“全选”checkbox 不显示。 if (!width || typeof onResizeStart !== 'function') { - return
    ); -}; +}); + +// --- Sortable Header Cell --- +interface SortableHeaderCellProps extends React.HTMLAttributes { + id?: string; +} + +// --- Sortable Header Cell --- +interface SortableHeaderCellProps extends React.HTMLAttributes { + id?: string; +} + +// 静态 CSS 移到组件外,强制去除 th 内边距并确保指针穿透 +const sortableHeaderStaticStyles = ` + .gonavi-sortable-header-cell { + padding: 0 !important; + } + .gonavi-sortable-header-cell[data-cursor-grabbing="true"], + .gonavi-sortable-header-cell[data-cursor-grabbing="true"] *, + .gonavi-sortable-header-cell.is-dragging, + .gonavi-sortable-header-cell.is-dragging * { + cursor: grabbing !important; + } + .sortable-header-cell-drag-handle { + display: flex; + align-items: center; + width: 100%; + height: 100%; + min-height: 44px; + padding: 0 10px; + user-select: none; + cursor: inherit; + } +`; + +const SortableHeaderCell: React.FC = React.memo((props) => { + const { id, children, style: propStyle, className: propClassName, ...restProps } = props; + const [isPressed, setIsPressed] = useState(false); + const { + attributes, + listeners, + setNodeRef, + transform, + transition, + isDragging, + } = useSortable({ id: id || '' }); + + const style: React.CSSProperties = { + ...propStyle, + transform: CSS.Transform.toString(transform), + transition, + ...(isDragging ? { + position: 'relative', + zIndex: 9999, + opacity: 0.6, + backgroundColor: 'rgba(24, 144, 255, 0.15)', + boxShadow: '0 4px 12px rgba(0,0,0,0.15)' + } : {}), + touchAction: 'none', + willChange: 'transform', + // 核心修复:将指针直接绑定到 th 级别,并由 isPressed 控制 + cursor: (isDragging || isPressed) ? 'grabbing' : 'pointer', + }; + + useEffect(() => { + const handleGlobalMouseUp = () => setIsPressed(false); + window.addEventListener('mouseup', handleGlobalMouseUp); + return () => window.removeEventListener('mouseup', handleGlobalMouseUp); + }, []); + + if (!id || id === 'GONAVI_SELECTION_COLUMN') { + return {children}; + } + + return ( + { + setIsPressed(true); + if (listeners?.onPointerDown) listeners.onPointerDown(e); + }} + > + +
    +
    + {children} +
    +
    +
    + ); +}); // --- Contexts --- const EditableContext = React.createContext(null); @@ -640,6 +753,12 @@ const DataGrid: React.FC = ({ const appearance = useStore(state => state.appearance); const queryOptions = useStore(state => state.queryOptions); const setQueryOptions = useStore(state => state.setQueryOptions); + const tableColumnOrders = useStore(state => state.tableColumnOrders); + const enableColumnOrderMemory = useStore(state => state.enableColumnOrderMemory); + const setTableColumnOrder = useStore(state => state.setTableColumnOrder); + const setEnableColumnOrderMemory = useStore(state => state.setEnableColumnOrderMemory); + const clearTableColumnOrder = useStore(state => state.clearTableColumnOrder); + const isMacLike = useMemo(() => isMacLikePlatform(), []); const darkMode = theme === 'dark'; const resolvedAppearance = resolveAppearanceValues(appearance); @@ -647,6 +766,49 @@ const DataGrid: React.FC = ({ const canModifyData = !readOnly && !!tableName; const showColumnComment = queryOptions?.showColumnComment !== false; const showColumnType = queryOptions?.showColumnType !== false; + + // --- Display Columns Order Management --- + const [displayColumnNames, setDisplayColumnNames] = useState([]); + + // Sync display order from incoming prop and store memory + useEffect(() => { + let nextOrder = [...columnNames]; + if (enableColumnOrderMemory && connectionId && dbName && tableName) { + const storedOrder = tableColumnOrders[`${connectionId}-${dbName}-${tableName}`]; + if (Array.isArray(storedOrder) && storedOrder.length > 0) { + // Only layout known columns. Filter out missing or new columns. + const storedSet = new Set(storedOrder); + const incomingSet = new Set(nextOrder); + const validStored = storedOrder.filter(col => incomingSet.has(col)); + const missingNew = nextOrder.filter(col => !storedSet.has(col)); + nextOrder = [...validStored, ...missingNew]; + } + } + setDisplayColumnNames(nextOrder); + }, [columnNames, tableColumnOrders, enableColumnOrderMemory, connectionId, dbName, tableName]); + + // Handle Dragging + const sensors = useSensors( + useSensor(PointerSensor, { activationConstraint: { distance: 8 } }), + useSensor(MouseSensor, { activationConstraint: { distance: 8 } }), + useSensor(TouchSensor, { activationConstraint: { delay: 200, tolerance: 5 } }), + ); + + const handleDragEnd = (event: DragEndEvent) => { + const { active, over } = event; + if (active.id !== over?.id && over) { + setDisplayColumnNames((prev) => { + const oldIndex = prev.indexOf(active.id as string); + const newIndex = prev.indexOf(over.id as string); + const nextOrder = arrayMove(prev, oldIndex, newIndex); + if (enableColumnOrderMemory && connectionId && dbName && tableName) { + setTableColumnOrder(connectionId, dbName, tableName, nextOrder); + } + return nextOrder; + }); + } + }; + const selectionColumnWidth = 46; const currentConnConfig = connections.find(c => c.id === connectionId)?.config; const dataSourceCaps = getDataSourceCapabilities(currentConnConfig); @@ -854,7 +1016,7 @@ const DataGrid: React.FC = ({ try { const cleanRows = rows.map(({ [GONAVI_ROW_KEY]: _rowKey, ...rest }) => rest); // Pass tableName (or 'export') as default filename - const res = await ExportData(cleanRows, columnNames, tableName || 'export', format); + const res = await ExportData(cleanRows, displayColumnNames, tableName || 'export', format); if (res.success) { message.success("导出成功"); } else if (res.message !== "Cancelled") { @@ -1142,13 +1304,13 @@ const DataGrid: React.FC = ({ id: nextId, enabled: cond?.enabled !== false, logic: normalizeFilterLogic(cond?.logic), - column: rawColumn || (op === 'CUSTOM' ? '' : String(columnNames[0] || '')), + column: rawColumn || (op === 'CUSTOM' ? '' : String(displayColumnNames[0] || '')), op, value: String(cond?.value ?? ''), value2: String(cond?.value2 ?? ''), }; }); - }, [columnNames, normalizeFilterLogic]); + }, [displayColumnNames, normalizeFilterLogic]); // Filter State const [filterConditions, setFilterConditions] = useState([]); @@ -1196,9 +1358,9 @@ const DataGrid: React.FC = ({ const columnIndexMap = useMemo(() => { const map = new Map(); - columnNames.forEach((name, idx) => map.set(name, idx)); + displayColumnNames.forEach((name: string, idx: number) => map.set(name, idx)); return map; - }, [columnNames]); + }, [displayColumnNames]); // 直接操作 DOM 更新选中效果,避免 React 重渲染 const updateCellSelection = useCallback((newSelection: Set) => { @@ -1377,7 +1539,7 @@ const DataGrid: React.FC = ({ const row = currentData[i]; const rKey = String(row?.[GONAVI_ROW_KEY]); for (let j = minColIndex; j <= maxColIndex; j++) { - newSelectedCells.add(makeCellKey(rKey, columnNames[j])); + newSelectedCells.add(makeCellKey(rKey, displayColumnNames[j])); } } @@ -1548,7 +1710,7 @@ const DataGrid: React.FC = ({ cellSelectionPointerRef.current = null; isDraggingRef.current = false; }; - }, [cellEditMode, columnNames, columnIndexMap, updateCellSelection]); + }, [cellEditMode, displayColumnNames, columnIndexMap, updateCellSelection]); // 批量填充到选中行 const handleBatchFillToSelected = useCallback((sourceRecord: Item, dataIndex: string) => { @@ -1906,7 +2068,7 @@ const DataGrid: React.FC = ({ const formMap: Record = {}; const nullCols = new Set(); - columnNames.forEach((col) => { + displayColumnNames.forEach((col) => { const baseVal = (baseRow as any)?.[col]; const displayVal = (displayRow as any)?.[col]; baseRawMap[col] = baseVal; @@ -1922,7 +2084,7 @@ const DataGrid: React.FC = ({ rowEditorForm.setFieldsValue(formMap); setRowEditorRowKey(keyStr); setRowEditorOpen(true); - }, [canModifyData, mergedDisplayData, data, addedRows, columnNames, rowEditorForm, rowKeyStr]); + }, [canModifyData, mergedDisplayData, data, addedRows, displayColumnNames, rowEditorForm, rowKeyStr]); const openRowEditor = useCallback(() => { if (!canModifyData) return; @@ -2016,7 +2178,7 @@ const DataGrid: React.FC = ({ const keyStr = rowKeyStr(rowKey); const normalizedNext: Record = {}; let hasAnyVisibleChange = false; - columnNames.forEach((col) => { + displayColumnNames.forEach((col) => { const currentVal = (currentRow as any)?.[col]; const editedVal = Object.prototype.hasOwnProperty.call(nextItem, col) ? (nextItem as any)[col] : currentVal; if (!isJsonViewValueEqual(currentVal, editedVal)) hasAnyVisibleChange = true; @@ -2035,7 +2197,7 @@ const DataGrid: React.FC = ({ const originalRow = originalMap.get(keyStr); if (!originalRow) continue; const patch: Record = {}; - columnNames.forEach((col) => { + displayColumnNames.forEach((col) => { const prevVal = (originalRow as any)?.[col]; const nextVal = normalizedNext[col]; if (!isCellValueEqualForDiff(prevVal, nextVal)) patch[col] = nextVal; @@ -2062,7 +2224,7 @@ const DataGrid: React.FC = ({ setJsonEditorOpen(false); message.success("JSON 修改已应用到当前结果集,可继续“提交事务”"); - }, [canModifyData, jsonEditorValue, mergedDisplayData, addedRows, rowKeyStr, data, columnNames]); + }, [canModifyData, jsonEditorValue, mergedDisplayData, addedRows, rowKeyStr, data, displayColumnNames]); const openRowEditorFieldEditor = useCallback((dataIndex: string) => { if (!dataIndex) return; @@ -2089,7 +2251,7 @@ const DataGrid: React.FC = ({ const baseRawMap = rowEditorBaseRawRef.current || {}; const patch: Record = {}; - columnNames.forEach((col) => { + displayColumnNames.forEach((col) => { const nextVal = values[col]; const baseVal = baseRawMap[col]; if (!isCellValueEqualForDiff(baseVal, nextVal)) patch[col] = nextVal; @@ -2103,14 +2265,14 @@ const DataGrid: React.FC = ({ }); closeRowEditor(); - }, [rowEditorRowKey, rowEditorForm, addedRows, columnNames, rowKeyStr, closeRowEditor]); + }, [rowEditorRowKey, rowEditorForm, addedRows, displayColumnNames, rowKeyStr, closeRowEditor]); const enableVirtual = viewMode === 'table'; const enableInlineEditableCell = canModifyData; const columns = useMemo(() => { - return columnNames.map(key => ({ + return displayColumnNames.map(key => ({ title: renderColumnTitle(key), dataIndex: key, key: key, @@ -2130,7 +2292,9 @@ const DataGrid: React.FC = ({ return !isCellValueEqualForRender(record?.[key], prevRecord?.[key]); }, onHeaderCell: (column: any) => ({ + id: key, width: column.width, + className: 'gonavi-sortable-header-cell', onResizeStart: handleResizeStart(key), // Only need start onClickCapture: (event: React.MouseEvent) => { if (!onSort) return; @@ -2154,7 +2318,7 @@ const DataGrid: React.FC = ({ }, }), })); - }, [columnNames, columnWidths, sortInfo, handleResizeStart, canModifyData, onSort, renderColumnTitle]); + }, [displayColumnNames, columnWidths, sortInfo, handleResizeStart, canModifyData, onSort, renderColumnTitle]); const mergedColumns = useMemo(() => columns.map(col => { if (!col.editable) return col; @@ -2225,7 +2389,7 @@ const DataGrid: React.FC = ({ const handleAddRow = () => { const newKey = `new-${Date.now()}`; const newRow: any = { [GONAVI_ROW_KEY]: newKey }; - columnNames.forEach(col => newRow[col] = ''); + displayColumnNames.forEach(col => newRow[col] = ''); pendingScrollToBottomRef.current = true; setAddedRows(prev => [...prev, newRow]); }; @@ -2284,7 +2448,7 @@ const DataGrid: React.FC = ({ if (!hasRowKey) { values = { ...(newRow as any) }; } else { - columnNames.forEach((col) => { + displayColumnNames.forEach((col) => { const nextVal = (newRow as any)?.[col]; const prevVal = (originalRow as any)?.[col]; if (!isCellValueEqualForDiff(prevVal, nextVal)) values[col] = nextVal; @@ -2676,7 +2840,7 @@ const DataGrid: React.FC = ({ id: nextFilterId, enabled: true, logic: 'AND', - column: columnNames[0] || '', + column: displayColumnNames[0] || '', op: '=', value: '', value2: '', @@ -2747,6 +2911,26 @@ const DataGrid: React.FC = ({ > 下方显示类型 +
    + setEnableColumnOrderMemory(e.target.checked)} + > + 记忆自定义列序 + +
    ); @@ -2776,7 +2960,7 @@ const DataGrid: React.FC = ({ const rowPropsFactory = useCallback((record: any) => ({ record } as any), []); - const totalWidth = columns.reduce((sum, col) => sum + (Number(col.width) || 200), 0) + selectionColumnWidth; + const totalWidth = columns.reduce((sum: number, col: any) => sum + (Number(col.width) || 200), 0) + selectionColumnWidth; const useContextMenuRow = false; const tableScrollX = useMemo(() => { const baseWidth = Math.max(totalWidth, 1000); @@ -2796,8 +2980,8 @@ const DataGrid: React.FC = ({ body.row = ContextMenuRow; } return Object.keys(body).length > 0 - ? { body, header: { cell: ResizableTitle } } - : { header: { cell: ResizableTitle } }; + ? { body, header: { cell: SortableHeaderCell } } + : { header: { cell: SortableHeaderCell } }; }, [enableInlineEditableCell, useContextMenuRow]); const tableOnRow = useMemo(() => (useContextMenuRow ? rowPropsFactory : undefined), [useContextMenuRow, rowPropsFactory]); @@ -3412,7 +3596,7 @@ const DataGrid: React.FC = ({ style={{ width: 180 }} value={cond.column} onChange={v => updateFilter(cond.id, 'column', v)} - options={columnNames.map(c => ({ value: c, label: c }))} + options={displayColumnNames.map(c => ({ value: c, label: c }))} showSearch optionFilterProp="label" filterOption={(input, option) => @@ -3508,7 +3692,7 @@ const DataGrid: React.FC = ({
    - {columnNames.map((col) => { + {displayColumnNames.map((col: string) => { const sample = rowEditorDisplayRef.current?.[col] ?? ''; const placeholder = rowEditorNullColsRef.current?.has(col) ? '(NULL)' : undefined; const isJson = looksLikeJsonText(sample); @@ -3645,25 +3829,29 @@ const DataGrid: React.FC = ({ -
    ; + return ; } return ( - + {restProps.children} { />
    + + +
    + + @@ -3734,7 +3922,7 @@ const DataGrid: React.FC = ({ )}
    - {currentTextRow ? columnNames.map((col) => ( + {currentTextRow ? displayColumnNames.map((col) => (
    {col} : diff --git a/frontend/src/main.tsx b/frontend/src/main.tsx index 9457771..7ab4fee 100644 --- a/frontend/src/main.tsx +++ b/frontend/src/main.tsx @@ -9,6 +9,36 @@ import { loader } from '@monaco-editor/react' import * as monaco from 'monaco-editor' loader.config({ monaco }) +if (typeof window !== 'undefined' && !(window as any).go) { + (window as any).go = { + app: { + App: { + CheckUpdate: async () => ({ success: false }), + DownloadUpdate: async () => ({ success: false }), + GetSavedConnections: async () => [], + SaveConnection: async () => null, + DeleteConnection: async () => null, + OpenConnection: async () => null, + CloseConnection: async () => null, + GetDatabases: async () => [], + GetTables: async () => [], + GetTableData: async () => ({ columns: [], rows: [], total: 0 }), + GetTableColumns: async () => [], + ExecuteQuery: async () => ({ columns: [], rows: [], time: 0 }), + GetSavedQueries: async () => [], + SaveQuery: async () => null, + DeleteQuery: async () => null, + GetAppInfo: async () => ({}), + CheckForUpdates: async () => ({ success: false }), + OpenDownloadedUpdateDirectory: async () => ({ success: false }), + InstallUpdateAndRestart: async () => ({ success: false }), + ImportConfigFile: async () => ({ success: false }), + ExportData: async () => ({ success: false }), + } + } + }; +} + // 全局注册透明主题,避免每个 Editor 组件 beforeMount 中重复定义 monaco.editor.defineTheme('transparent-dark', { base: 'vs-dark', inherit: true, rules: [], diff --git a/frontend/src/store.ts b/frontend/src/store.ts index 8d67849..bd424a5 100644 --- a/frontend/src/store.ts +++ b/frontend/src/store.ts @@ -416,6 +416,8 @@ interface AppState { sqlLogs: SqlLog[]; tableAccessCount: Record; tableSortPreference: Record; + tableColumnOrders: Record; + enableColumnOrderMemory: boolean; addConnection: (conn: SavedConnection) => void; updateConnection: (conn: SavedConnection) => void; @@ -458,6 +460,9 @@ interface AppState { recordTableAccess: (connectionId: string, dbName: string, tableName: string) => void; setTableSortPreference: (connectionId: string, dbName: string, sortBy: 'name' | 'frequency') => void; + setTableColumnOrder: (connectionId: string, dbName: string, tableName: string, order: string[]) => void; + setEnableColumnOrderMemory: (enabled: boolean) => void; + clearTableColumnOrder: (connectionId: string, dbName: string, tableName: string) => void; } const sanitizeSavedQueries = (value: unknown): SavedQuery[] => { @@ -521,6 +526,17 @@ const sanitizeTableSortPreference = (value: unknown): Record => { + const raw = (value && typeof value === 'object') ? value as Record : {}; + const result: Record = {}; + Object.entries(raw).forEach(([key, orderArray]) => { + if (Array.isArray(orderArray)) { + result[key] = orderArray.map(col => String(col)); + } + }); + return result; +}; + const sanitizeAppearance = ( appearance: Partial<{ enabled: boolean; opacity: number; blur: number }> | undefined, version: number @@ -598,6 +614,8 @@ export const useStore = create()( sqlLogs: [], tableAccessCount: {}, tableSortPreference: {}, + tableColumnOrders: {}, + enableColumnOrderMemory: true, addConnection: (conn) => set((state) => ({ connections: [...state.connections, conn] })), updateConnection: (conn) => set((state) => ({ @@ -800,6 +818,25 @@ export const useStore = create()( } }; }), + + setTableColumnOrder: (connectionId, dbName, tableName, order) => set((state) => { + const key = `${connectionId}-${dbName}-${tableName}`; + return { + tableColumnOrders: { + ...state.tableColumnOrders, + [key]: order + } + }; + }), + + clearTableColumnOrder: (connectionId, dbName, tableName) => set((state) => { + const key = `${connectionId}-${dbName}-${tableName}`; + const newOrders = { ...state.tableColumnOrders }; + delete newOrders[key]; + return { tableColumnOrders: newOrders }; + }), + + setEnableColumnOrderMemory: (enabled) => set({ enableColumnOrderMemory: !!enabled }), }), { name: 'lite-db-storage', // name of the item in the storage (must be unique) @@ -825,6 +862,10 @@ export const useStore = create()( nextState.shortcutOptions = sanitizeShortcutOptions(state.shortcutOptions); nextState.tableAccessCount = sanitizeTableAccessCount(state.tableAccessCount); nextState.tableSortPreference = sanitizeTableSortPreference(state.tableSortPreference); + // 新增的列排序记忆状态不需要做版本特殊兼容,直接做基本的类型保护 + const safeOrders = sanitizeTableColumnOrders(state.tableColumnOrders); + nextState.tableColumnOrders = safeOrders; + nextState.enableColumnOrderMemory = state.enableColumnOrderMemory !== false; return nextState as AppState; }, merge: (persistedState, currentState) => { @@ -841,11 +882,14 @@ export const useStore = create()( fontSize: sanitizeFontSize(state.fontSize), startupFullscreen: sanitizeStartupFullscreen(state.startupFullscreen), globalProxy: sanitizeGlobalProxy(state.globalProxy), + tableSortPreference: sanitizeTableSortPreference(state.tableSortPreference), + tableColumnOrders: sanitizeTableColumnOrders(state.tableColumnOrders), + enableColumnOrderMemory: state.enableColumnOrderMemory !== false, + sqlFormatOptions: sanitizeSqlFormatOptions(state.sqlFormatOptions), queryOptions: sanitizeQueryOptions(state.queryOptions), shortcutOptions: sanitizeShortcutOptions(state.shortcutOptions), tableAccessCount: sanitizeTableAccessCount(state.tableAccessCount), - tableSortPreference: sanitizeTableSortPreference(state.tableSortPreference), }; }, partialize: (state) => ({ From c4c7e379d13535bfee3ffb9e68ba05db886b5f16 Mon Sep 17 00:00:00 2001 From: Syngnat Date: Tue, 10 Mar 2026 16:45:35 +0800 Subject: [PATCH 36/48] =?UTF-8?q?=E2=9C=A8=20feat(DataGrid):=20=E5=A2=9E?= =?UTF-8?q?=E5=8A=A0=E8=A1=A8=E6=A0=BC=E5=88=97=E7=9A=84=E5=8A=A8=E6=80=81?= =?UTF-8?q?=E6=98=BE=E7=A4=BA=E4=B8=8E=E9=9A=90=E8=97=8F=E6=8E=A7=E5=88=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 字段面板新增列可见性筛选,支持列表内快速搜索、按需勾选与一键重置 - 新增持久化状态,自动记忆每张数据表的个性化隐藏列配置 - 优化数据提交链路,确保列的隐藏仅影响视图交互,不干扰增删改及复制功能 --- frontend/src/components/DataGrid.tsx | 185 ++++++++++++++++++++++----- frontend/src/store.ts | 49 ++++++- 2 files changed, 202 insertions(+), 32 deletions(-) diff --git a/frontend/src/components/DataGrid.tsx b/frontend/src/components/DataGrid.tsx index 3af5d3b..f7b197c 100644 --- a/frontend/src/components/DataGrid.tsx +++ b/frontend/src/components/DataGrid.tsx @@ -759,6 +759,12 @@ const DataGrid: React.FC = ({ const setEnableColumnOrderMemory = useStore(state => state.setEnableColumnOrderMemory); const clearTableColumnOrder = useStore(state => state.clearTableColumnOrder); + const tableHiddenColumns = useStore(state => state.tableHiddenColumns); + const enableHiddenColumnMemory = useStore(state => state.enableHiddenColumnMemory); + const setTableHiddenColumns = useStore(state => state.setTableHiddenColumns); + const setEnableHiddenColumnMemory = useStore(state => state.setEnableHiddenColumnMemory); + const clearTableHiddenColumns = useStore(state => state.clearTableHiddenColumns); + const isMacLike = useMemo(() => isMacLikePlatform(), []); const darkMode = theme === 'dark'; const resolvedAppearance = resolveAppearanceValues(appearance); @@ -767,9 +773,45 @@ const DataGrid: React.FC = ({ const showColumnComment = queryOptions?.showColumnComment !== false; const showColumnType = queryOptions?.showColumnType !== false; - // --- Display Columns Order Management --- + // --- Display Columns Order & Visibility Management --- + const [allOrderedColumnNames, setAllOrderedColumnNames] = useState([]); const [displayColumnNames, setDisplayColumnNames] = useState([]); - + const [localHiddenColumns, setLocalHiddenColumns] = useState([]); + const [columnSearchText, setColumnSearchText] = useState(''); + + // Sync hidden columns from store + useEffect(() => { + if (enableHiddenColumnMemory && connectionId && dbName && tableName) { + const storedHidden = tableHiddenColumns[`${connectionId}-${dbName}-${tableName}`]; + setLocalHiddenColumns(Array.isArray(storedHidden) ? storedHidden : []); + } else { + setLocalHiddenColumns([]); + } + }, [tableHiddenColumns, enableHiddenColumnMemory, connectionId, dbName, tableName]); + + const toggleColumnVisibility = useCallback((col: string, visible: boolean) => { + setLocalHiddenColumns(prev => { + const nextSet = new Set(prev); + if (visible) nextSet.delete(col); + else nextSet.add(col); + const nextArray = Array.from(nextSet); + if (enableHiddenColumnMemory && connectionId && dbName && tableName) { + setTableHiddenColumns(connectionId, dbName, tableName, nextArray); + } + return nextArray; + }); + }, [enableHiddenColumnMemory, connectionId, dbName, tableName, setTableHiddenColumns]); + + const toggleAllColumnsVisibility = useCallback((visible: boolean) => { + setLocalHiddenColumns(() => { + const nextArray = visible ? [] : [...allOrderedColumnNames]; + if (enableHiddenColumnMemory && connectionId && dbName && tableName) { + setTableHiddenColumns(connectionId, dbName, tableName, nextArray); + } + return nextArray; + }); + }, [allOrderedColumnNames, enableHiddenColumnMemory, connectionId, dbName, tableName, setTableHiddenColumns]); + // Sync display order from incoming prop and store memory useEffect(() => { let nextOrder = [...columnNames]; @@ -784,9 +826,15 @@ const DataGrid: React.FC = ({ nextOrder = [...validStored, ...missingNew]; } } - setDisplayColumnNames(nextOrder); + setAllOrderedColumnNames(nextOrder); }, [columnNames, tableColumnOrders, enableColumnOrderMemory, connectionId, dbName, tableName]); + // Compute final display columns + useEffect(() => { + const hiddenSet = new Set(localHiddenColumns); + setDisplayColumnNames(allOrderedColumnNames.filter(col => !hiddenSet.has(col))); + }, [allOrderedColumnNames, localHiddenColumns]); + // Handle Dragging const sensors = useSensors( useSensor(PointerSensor, { activationConstraint: { distance: 8 } }), @@ -797,14 +845,36 @@ const DataGrid: React.FC = ({ const handleDragEnd = (event: DragEndEvent) => { const { active, over } = event; if (active.id !== over?.id && over) { - setDisplayColumnNames((prev) => { - const oldIndex = prev.indexOf(active.id as string); - const newIndex = prev.indexOf(over.id as string); - const nextOrder = arrayMove(prev, oldIndex, newIndex); - if (enableColumnOrderMemory && connectionId && dbName && tableName) { - setTableColumnOrder(connectionId, dbName, tableName, nextOrder); - } - return nextOrder; + setAllOrderedColumnNames((prevAllOrder) => { + // Calculate the new order of all columns by applying the movement + // We only move the visible columns relative to each other, but the easiest way + // is to map the visible column movement back to the full array. + const hiddenSet = new Set(localHiddenColumns); + const visibleOrder = prevAllOrder.filter(col => !hiddenSet.has(col)); + + const oldVisibleIndex = visibleOrder.indexOf(active.id as string); + const newVisibleIndex = visibleOrder.indexOf(over.id as string); + + if (oldVisibleIndex === -1 || newVisibleIndex === -1) return prevAllOrder; + + const nextVisibleOrder = arrayMove(visibleOrder, oldVisibleIndex, newVisibleIndex); + + // Reconstruct allOrderedColumnNames by inserting hidden columns back to their original relative positions + // Or simpler: just keep hidden columns at the end, but that ruins user's layout. + // Better approach: build a new array + let vIndex = 0; + const nextOrder = prevAllOrder.map(col => { + if (hiddenSet.has(col)) { + return col; // Hidden columns stay at their absolute index in the master list + } else { + return nextVisibleOrder[vIndex++]; + } + }); + + if (enableColumnOrderMemory && connectionId && dbName && tableName) { + setTableColumnOrder(connectionId, dbName, tableName, nextOrder); + } + return nextOrder; }); } }; @@ -2068,7 +2138,7 @@ const DataGrid: React.FC = ({ const formMap: Record = {}; const nullCols = new Set(); - displayColumnNames.forEach((col) => { + columnNames.forEach((col) => { const baseVal = (baseRow as any)?.[col]; const displayVal = (displayRow as any)?.[col]; baseRawMap[col] = baseVal; @@ -2178,7 +2248,7 @@ const DataGrid: React.FC = ({ const keyStr = rowKeyStr(rowKey); const normalizedNext: Record = {}; let hasAnyVisibleChange = false; - displayColumnNames.forEach((col) => { + columnNames.forEach((col) => { const currentVal = (currentRow as any)?.[col]; const editedVal = Object.prototype.hasOwnProperty.call(nextItem, col) ? (nextItem as any)[col] : currentVal; if (!isJsonViewValueEqual(currentVal, editedVal)) hasAnyVisibleChange = true; @@ -2197,7 +2267,7 @@ const DataGrid: React.FC = ({ const originalRow = originalMap.get(keyStr); if (!originalRow) continue; const patch: Record = {}; - displayColumnNames.forEach((col) => { + columnNames.forEach((col) => { const prevVal = (originalRow as any)?.[col]; const nextVal = normalizedNext[col]; if (!isCellValueEqualForDiff(prevVal, nextVal)) patch[col] = nextVal; @@ -2389,11 +2459,10 @@ const DataGrid: React.FC = ({ const handleAddRow = () => { const newKey = `new-${Date.now()}`; const newRow: any = { [GONAVI_ROW_KEY]: newKey }; - displayColumnNames.forEach(col => newRow[col] = ''); + columnNames.forEach(col => newRow[col] = ''); pendingScrollToBottomRef.current = true; setAddedRows(prev => [...prev, newRow]); }; - const handleDeleteSelected = () => { setDeletedRowKeys(prev => { const newDeleted = new Set(prev); @@ -2898,19 +2967,49 @@ const DataGrid: React.FC = ({ ]; const columnInfoSettingContent = ( -
    +
    +
    显示设置
    setQueryOptions({ showColumnComment: e.target.checked })} > - 下方显示备注 + 表头显示备注 setQueryOptions({ showColumnType: e.target.checked })} > - 下方显示类型 + 表头显示类型 +
    + + + setColumnSearchText(e.target.value)} + allowClear + /> +
    + {allOrderedColumnNames.filter(col => !columnSearchText || col.toLowerCase().includes(columnSearchText.toLowerCase())).map(col => ( + toggleColumnVisibility(col, e.target.checked)} + style={{ marginLeft: 0 }} + > + {col} + + ))} +
    +
    = ({ > 记忆自定义列序 - + 记忆隐藏列配置 + +
    + + +
    ); diff --git a/frontend/src/store.ts b/frontend/src/store.ts index bd424a5..172099d 100644 --- a/frontend/src/store.ts +++ b/frontend/src/store.ts @@ -418,6 +418,8 @@ interface AppState { tableSortPreference: Record; tableColumnOrders: Record; enableColumnOrderMemory: boolean; + tableHiddenColumns: Record; + enableHiddenColumnMemory: boolean; addConnection: (conn: SavedConnection) => void; updateConnection: (conn: SavedConnection) => void; @@ -463,6 +465,10 @@ interface AppState { setTableColumnOrder: (connectionId: string, dbName: string, tableName: string, order: string[]) => void; setEnableColumnOrderMemory: (enabled: boolean) => void; clearTableColumnOrder: (connectionId: string, dbName: string, tableName: string) => void; + + setTableHiddenColumns: (connectionId: string, dbName: string, tableName: string, hiddenColumns: string[]) => void; + setEnableHiddenColumnMemory: (enabled: boolean) => void; + clearTableHiddenColumns: (connectionId: string, dbName: string, tableName: string) => void; } const sanitizeSavedQueries = (value: unknown): SavedQuery[] => { @@ -537,6 +543,17 @@ const sanitizeTableColumnOrders = (value: unknown): Record => return result; }; +const sanitizeTableHiddenColumns = (value: unknown): Record => { + const raw = (value && typeof value === 'object') ? value as Record : {}; + const result: Record = {}; + Object.entries(raw).forEach(([key, hiddenArray]) => { + if (Array.isArray(hiddenArray)) { + result[key] = hiddenArray.map(col => String(col)); + } + }); + return result; +}; + const sanitizeAppearance = ( appearance: Partial<{ enabled: boolean; opacity: number; blur: number }> | undefined, version: number @@ -616,6 +633,8 @@ export const useStore = create()( tableSortPreference: {}, tableColumnOrders: {}, enableColumnOrderMemory: true, + tableHiddenColumns: {}, + enableHiddenColumnMemory: true, addConnection: (conn) => set((state) => ({ connections: [...state.connections, conn] })), updateConnection: (conn) => set((state) => ({ @@ -837,6 +856,25 @@ export const useStore = create()( }), setEnableColumnOrderMemory: (enabled) => set({ enableColumnOrderMemory: !!enabled }), + + setTableHiddenColumns: (connectionId, dbName, tableName, hiddenColumns) => set((state) => { + const key = `${connectionId}-${dbName}-${tableName}`; + return { + tableHiddenColumns: { + ...state.tableHiddenColumns, + [key]: hiddenColumns + } + }; + }), + + clearTableHiddenColumns: (connectionId, dbName, tableName) => set((state) => { + const key = `${connectionId}-${dbName}-${tableName}`; + const newHidden = { ...state.tableHiddenColumns }; + delete newHidden[key]; + return { tableHiddenColumns: newHidden }; + }), + + setEnableHiddenColumnMemory: (enabled) => set({ enableHiddenColumnMemory: !!enabled }), }), { name: 'lite-db-storage', // name of the item in the storage (must be unique) @@ -866,6 +904,9 @@ export const useStore = create()( const safeOrders = sanitizeTableColumnOrders(state.tableColumnOrders); nextState.tableColumnOrders = safeOrders; nextState.enableColumnOrderMemory = state.enableColumnOrderMemory !== false; + const safeHidden = sanitizeTableHiddenColumns(state.tableHiddenColumns); + nextState.tableHiddenColumns = safeHidden; + nextState.enableHiddenColumnMemory = state.enableHiddenColumnMemory !== false; return nextState as AppState; }, merge: (persistedState, currentState) => { @@ -885,6 +926,8 @@ export const useStore = create()( tableSortPreference: sanitizeTableSortPreference(state.tableSortPreference), tableColumnOrders: sanitizeTableColumnOrders(state.tableColumnOrders), enableColumnOrderMemory: state.enableColumnOrderMemory !== false, + tableHiddenColumns: sanitizeTableHiddenColumns(state.tableHiddenColumns), + enableHiddenColumnMemory: state.enableHiddenColumnMemory !== false, sqlFormatOptions: sanitizeSqlFormatOptions(state.sqlFormatOptions), queryOptions: sanitizeQueryOptions(state.queryOptions), @@ -906,7 +949,11 @@ export const useStore = create()( queryOptions: state.queryOptions, shortcutOptions: state.shortcutOptions, tableAccessCount: state.tableAccessCount, - tableSortPreference: state.tableSortPreference + tableSortPreference: state.tableSortPreference, + tableColumnOrders: state.tableColumnOrders, + enableColumnOrderMemory: state.enableColumnOrderMemory, + tableHiddenColumns: state.tableHiddenColumns, + enableHiddenColumnMemory: state.enableHiddenColumnMemory }), // Don't persist logs } ) From 92e9381fccc1ddc889c2f326e12e600fd5a3500e Mon Sep 17 00:00:00 2001 From: Syngnat Date: Wed, 11 Mar 2026 09:19:49 +0800 Subject: [PATCH 37/48] =?UTF-8?q?=F0=9F=8E=A8=20style(DataGrid):=20?= =?UTF-8?q?=E6=B8=85=E7=90=86=E5=86=97=E4=BD=99=E4=BB=A3=E7=A0=81=E4=B8=8E?= =?UTF-8?q?=E9=9D=99=E6=80=81=E5=88=86=E6=9E=90=E5=91=8A=E8=AD=A6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 类型重构:通过修正 React Context 的函数签名解决了 void 类型的链式调用错误 - 代码精简:利用 Nullish Coalescing (??) 优化组件配置项降级逻辑,剥离无意义的隐式 undefined 赋值 - 工具链适配:适配 IDE 拼写检查与 Promise strict rules,确保全文件零警 --- frontend/src/components/DataGrid.tsx | 191 +++++++++++++-------------- 1 file changed, 91 insertions(+), 100 deletions(-) diff --git a/frontend/src/components/DataGrid.tsx b/frontend/src/components/DataGrid.tsx index f7b197c..0a35d9f 100644 --- a/frontend/src/components/DataGrid.tsx +++ b/frontend/src/components/DataGrid.tsx @@ -1,7 +1,8 @@ +// cspell:ignore anticon sqls uuidv uuidv4 hscroll import React, { useState, useEffect, useRef, useContext, useMemo, useCallback } from 'react'; import { createPortal } from 'react-dom'; import { Table, message, Input, Button, Dropdown, MenuProps, Form, Pagination, Select, Modal, Checkbox, Segmented, Tooltip, Popover } from 'antd'; -import type { SortOrder } from 'antd/es/table/interface'; +import type { SortOrder, ColumnType } from 'antd/es/table/interface'; import { ReloadOutlined, ImportOutlined, ExportOutlined, DownOutlined, PlusOutlined, DeleteOutlined, SaveOutlined, UndoOutlined, FilterOutlined, CloseOutlined, ConsoleSqlOutlined, FileTextOutlined, CopyOutlined, ClearOutlined, EditOutlined, VerticalAlignBottomOutlined, LeftOutlined, RightOutlined } from '@ant-design/icons'; import Editor from '@monaco-editor/react'; import { @@ -25,7 +26,7 @@ import { ImportData, ExportTable, ExportData, ExportQuery, ApplyChanges, DBGetCo import ImportPreviewModal from './ImportPreviewModal'; import { useStore } from '../store'; import type { ColumnDefinition } from '../types'; -import { v4 as uuidv4 } from 'uuid'; +import { v4 as generateUuid } from 'uuid'; import 'react-resizable/css/styles.css'; import { buildOrderBySQL, buildPaginatedSelectSQL, buildWhereSQL, escapeLiteral, quoteIdentPart, quoteQualifiedIdent, withSortBufferTuningSQL, type FilterCondition } from '../utils/sql'; import { isMacLikePlatform, normalizeOpacityForPlatform, resolveAppearanceValues } from '../utils/appearance'; @@ -244,13 +245,9 @@ const shouldOpenModalEditor = (val: any): boolean => { if (typeof val === 'string') { if (val.length > INLINE_EDIT_MAX_CHARS || val.includes('\n')) return true; const trimmed = val.trimStart(); - if (trimmed.startsWith('{') || trimmed.startsWith('[')) return true; - return false; + return trimmed.startsWith('{') || trimmed.startsWith('['); } - if (typeof val === 'object') { - return true; - } - return false; + return typeof val === 'object'; }; const getCellFieldName = (record: Item, dataIndex: string) => { @@ -488,7 +485,7 @@ const DataContext = React.createContext<{ handleCopyInsert: (r: any) => void; handleCopyJson: (r: any) => void; handleCopyCsv: (r: any) => void; - handleExportSelected: (format: string, r: any) => void; + handleExportSelected: (format: string, r: any) => Promise; copyToClipboard: (t: string) => void; tableName?: string; enableRowContextMenu: boolean; @@ -675,11 +672,11 @@ const ContextMenuRow = React.memo(({ children, record, ...props }: any) => { label: '导出选中数据', icon: , children: [ - { key: 'exp-csv', label: 'CSV', onClick: () => handleExportSelected('csv', record) }, - { key: 'exp-xlsx', label: 'Excel', onClick: () => handleExportSelected('xlsx', record) }, - { key: 'exp-json', label: 'JSON', onClick: () => handleExportSelected('json', record) }, - { key: 'exp-md', label: 'Markdown', onClick: () => handleExportSelected('md', record) }, - { key: 'exp-html', label: 'HTML', onClick: () => handleExportSelected('html', record) }, + { key: 'exp-csv', label: 'CSV', onClick: () => handleExportSelected('csv', record).catch(console.error) }, + { key: 'exp-xlsx', label: 'Excel', onClick: () => handleExportSelected('xlsx', record).catch(console.error) }, + { key: 'exp-json', label: 'JSON', onClick: () => handleExportSelected('json', record).catch(console.error) }, + { key: 'exp-md', label: 'Markdown', onClick: () => handleExportSelected('md', record).catch(console.error) }, + { key: 'exp-html', label: 'HTML', onClick: () => handleExportSelected('html', record).catch(console.error) }, ] } ]; @@ -770,8 +767,8 @@ const DataGrid: React.FC = ({ const resolvedAppearance = resolveAppearanceValues(appearance); const opacity = normalizeOpacityForPlatform(resolvedAppearance.opacity); const canModifyData = !readOnly && !!tableName; - const showColumnComment = queryOptions?.showColumnComment !== false; - const showColumnType = queryOptions?.showColumnType !== false; + const showColumnComment = queryOptions?.showColumnComment ?? true; + const showColumnType = queryOptions?.showColumnType ?? true; // --- Display Columns Order & Visibility Management --- const [allOrderedColumnNames, setAllOrderedColumnNames] = useState([]); @@ -921,14 +918,10 @@ const DataGrid: React.FC = ({ const panelPaddingX = 12; const toolbarBottomPadding = 6; const filterTopPadding = 2; - const panelBorderColor = darkMode ? 'rgba(255, 255, 255, 0.08)' : 'rgba(0, 0, 0, 0.08)'; const panelFrameColor = darkMode ? 'rgba(0, 0, 0, 0.42)' : 'rgba(0, 0, 0, 0.18)'; const floatingScrollbarGap = 6; const floatingScrollbarInset = 10; const floatingScrollbarHeight = 10; - const floatingScrollbarTrackBg = 'transparent'; - const floatingScrollbarBorderColor = 'transparent'; - const floatingScrollbarShadow = 'none'; const floatingScrollbarThumbBg = darkMode ? 'rgba(255,255,255,0.34)' : 'rgba(0,0,0,0.22)'; const floatingScrollbarThumbBorderColor = darkMode ? 'rgba(255,255,255,0.10)' : 'rgba(255,255,255,0.32)'; const floatingScrollbarThumbShadow = darkMode ? '0 4px 12px rgba(0,0,0,0.28)' : '0 4px 10px rgba(0,0,0,0.12)'; @@ -972,7 +965,7 @@ const DataGrid: React.FC = ({ const [form] = Form.useForm(); const [modal, contextHolder] = Modal.useModal(); - const gridId = useMemo(() => `grid-${uuidv4()}`, []); + const gridId = useMemo(() => `grid-${generateUuid()}`, []); const [viewMode, setViewMode] = useState('table'); const [textRecordIndex, setTextRecordIndex] = useState(0); const [cellEditorOpen, setCellEditorOpen] = useState(false); @@ -1008,7 +1001,7 @@ const DataGrid: React.FC = ({ const containerRef = useRef(null); const tableContainerRef = useRef(null); const tableScrollTargetsRef = useRef([]); - const externalHScrollRef = useRef(null); + const externalHorizontalScrollRef = useRef(null); const horizontalSyncSourceRef = useRef<'table' | 'external' | ''>(''); const lastTableScrollLeftRef = useRef(0); const lastExternalScrollLeftRef = useRef(0); @@ -1069,7 +1062,7 @@ const DataGrid: React.FC = ({ const showCellContextMenu = useCallback((e: React.MouseEvent, record: Item, dataIndex: string, title: React.ReactNode) => { e.preventDefault(); e.stopPropagation(); - const titleText = typeof title === 'string' ? title : (typeof title === 'number' ? String(title) : String(dataIndex)); + const titleText = typeof (title as any) === 'string' ? (title as string) : (typeof (title as any) === 'number' ? String(title) : String(dataIndex)); setCellContextMenu({ visible: true, x: e.clientX, @@ -1088,12 +1081,12 @@ const DataGrid: React.FC = ({ // Pass tableName (or 'export') as default filename const res = await ExportData(cleanRows, displayColumnNames, tableName || 'export', format); if (res.success) { - message.success("导出成功"); + void message.success("导出成功"); } else if (res.message !== "Cancelled") { - message.error("导出失败: " + res.message); + void message.error("导出失败: " + res.message); } } catch (e: any) { - message.error("导出失败: " + (e?.message || String(e))); + void message.error("导出失败: " + (e?.message || String(e))); } finally { hide(); } @@ -1286,7 +1279,7 @@ const DataGrid: React.FC = ({ const raw = record?.[dataIndex]; const text = toEditableText(raw); const isJson = looksLikeJsonText(text); - const titleText = typeof title === 'string' ? title : (typeof title === 'number' ? String(title) : String(dataIndex)); + const titleText = typeof (title as any) === 'string' ? (title as string) : (typeof (title as any) === 'number' ? String(title) : String(dataIndex)); setCellEditorMeta({ record, dataIndex, title: titleText }); setCellEditorValue(text); @@ -1457,7 +1450,7 @@ const DataGrid: React.FC = ({ const handleBatchFillCells = useCallback(() => { const cellsToFill = currentSelectionRef.current; if (cellsToFill.size === 0) { - message.info('请先选择要填充的单元格'); + void message.info('请先选择要填充的单元格'); return; } @@ -1487,7 +1480,7 @@ const DataGrid: React.FC = ({ const existing = modifiedRows[rowKey]; const baseRow = baseRowMap.get(rowKey); - let currentVal: any = undefined; + let currentVal: any; const addedRow = addedRowMap.get(rowKey); if (addedRow) { @@ -1510,7 +1503,7 @@ const DataGrid: React.FC = ({ }); if (updatedCount === 0) { - message.info('选中的单元格无需更新'); + void message.info('选中的单元格无需更新'); return; } @@ -1538,7 +1531,7 @@ const DataGrid: React.FC = ({ return next || prev; }); - message.success(`已填充 ${updatedCount} 个单元格`); + void message.success(`已填充 ${updatedCount} 个单元格`); setBatchEditModalOpen(false); // 清除选中状态 @@ -1788,7 +1781,7 @@ const DataGrid: React.FC = ({ const selKeys = selectedRowKeysRef.current; if (selKeys.length === 0) { - message.info('请先选择要填充的行'); + void message.info('请先选择要填充的行'); return; } @@ -1797,7 +1790,7 @@ const DataGrid: React.FC = ({ const targetKeys = selKeys.filter(k => k !== sourceKey); if (targetKeys.length === 0) { - message.info('没有其他选中的行可以填充'); + void message.info('没有其他选中的行可以填充'); return; } @@ -1836,7 +1829,7 @@ const DataGrid: React.FC = ({ return next || prev; }); - message.success(`已填充 ${updatedCount} 行`); + void message.success(`已填充 ${updatedCount} 行`); setCellContextMenu(prev => ({ ...prev, visible: false })); }, [addedRows, rowKeyStr]); @@ -1871,7 +1864,7 @@ const DataGrid: React.FC = ({ return ''; }, [addedRowKeySet, modifiedRowKeySet, deletedRowKeys, rowKeyStr]); - const handleTableChange = useCallback((pag: any, filtersArg: any, sorter: any) => { + const handleTableChange = useCallback((_pag: any, _filtersArg: any, sorter: any) => { if (isResizingRef.current) return; // Block sort if resizing if (sorter.field) { const field = String(sorter.field); @@ -2041,7 +2034,7 @@ const DataGrid: React.FC = ({ const obj = JSON.parse(cellEditorValue); setCellEditorValue(JSON.stringify(obj, null, 2)); } catch (e: any) { - message.error("JSON 格式无效:" + (e?.message || String(e))); + void message.error("JSON 格式无效:" + (e?.message || String(e))); } }, [cellEditorIsJson, cellEditorValue]); @@ -2119,12 +2112,12 @@ const DataGrid: React.FC = ({ const openRowEditorByKey = useCallback((keyStr?: string) => { if (!canModifyData) return; if (!keyStr) { - message.info('请先定位到要编辑的记录'); + void message.info('请先定位到要编辑的记录'); return; } const displayRow = mergedDisplayData.find(r => rowKeyStr(r?.[GONAVI_ROW_KEY]) === keyStr); if (!displayRow) { - message.error('未找到目标行,请刷新后重试'); + void message.error('未找到目标行,请刷新后重试'); return; } @@ -2159,12 +2152,12 @@ const DataGrid: React.FC = ({ const openRowEditor = useCallback(() => { if (!canModifyData) return; if (selectedRowKeys.length > 1) { - message.info('一次只能编辑一行,请仅选择一行'); + void message.info('一次只能编辑一行,请仅选择一行'); return; } const keyStr = selectedRowKeys.length === 1 ? rowKeyStr(selectedRowKeys[0]) : undefined; if (!keyStr) { - message.info('请先选择一行(勾选复选框)'); + void message.info('请先选择一行(勾选复选框)'); return; } openRowEditorByKey(keyStr); @@ -2175,7 +2168,7 @@ const DataGrid: React.FC = ({ const currentRow = mergedDisplayData[textRecordIndex]; const rowKey = currentRow?.[GONAVI_ROW_KEY]; if (rowKey === undefined || rowKey === null) { - message.info('当前记录不可编辑'); + void message.info('当前记录不可编辑'); return; } openRowEditorByKey(rowKeyStr(rowKey)); @@ -2192,7 +2185,7 @@ const DataGrid: React.FC = ({ const parsed = JSON.parse(jsonEditorValue); setJsonEditorValue(JSON.stringify(parsed, null, 2)); } catch (e: any) { - message.error("JSON 格式无效:" + (e?.message || String(e))); + void message.error("JSON 格式无效:" + (e?.message || String(e))); } }, [jsonEditorValue]); @@ -2202,16 +2195,16 @@ const DataGrid: React.FC = ({ try { parsed = JSON.parse(jsonEditorValue); } catch (e: any) { - message.error("JSON 解析失败:" + (e?.message || String(e))); + void message.error("JSON 解析失败:" + (e?.message || String(e))); return; } if (!Array.isArray(parsed)) { - message.error("JSON 视图必须是数组格式(每项对应一条记录)"); + void message.error("JSON 视图必须是数组格式(每项对应一条记录)"); return; } if (parsed.length !== mergedDisplayData.length) { - message.error(`记录条数不一致:当前 ${mergedDisplayData.length} 条,JSON 中 ${parsed.length} 条。请勿在此模式增删记录。`); + void message.error(`记录条数不一致:当前 ${mergedDisplayData.length} 条,JSON 中 ${parsed.length} 条。请勿在此模式增删记录。`); return; } @@ -2235,14 +2228,14 @@ const DataGrid: React.FC = ({ for (let idx = 0; idx < parsed.length; idx += 1) { const nextItem = parsed[idx]; if (!isPlainObject(nextItem)) { - message.error(`第 ${idx + 1} 条记录不是对象,无法应用`); + void message.error(`第 ${idx + 1} 条记录不是对象,无法应用`); return; } const currentRow = mergedDisplayData[idx]; const rowKey = currentRow?.[GONAVI_ROW_KEY]; if (rowKey === undefined || rowKey === null) { - message.error(`第 ${idx + 1} 条记录缺少行标识,无法应用`); + void message.error(`第 ${idx + 1} 条记录缺少行标识,无法应用`); return; } const keyStr = rowKeyStr(rowKey); @@ -2293,7 +2286,7 @@ const DataGrid: React.FC = ({ }); setJsonEditorOpen(false); - message.success("JSON 修改已应用到当前结果集,可继续“提交事务”"); + void message.success("JSON 修改已应用到当前结果集,可继续“提交事务”"); }, [canModifyData, jsonEditorValue, mergedDisplayData, addedRows, rowKeyStr, data, displayColumnNames]); const openRowEditorFieldEditor = useCallback((dataIndex: string) => { @@ -2341,7 +2334,7 @@ const DataGrid: React.FC = ({ const enableVirtual = viewMode === 'table'; const enableInlineEditableCell = canModifyData; - const columns = useMemo(() => { + const columns: (ColumnType & { editable?: boolean })[] = useMemo(() => { return displayColumnNames.map(key => ({ title: renderColumnTitle(key), dataIndex: key, @@ -2390,8 +2383,8 @@ const DataGrid: React.FC = ({ })); }, [displayColumnNames, columnWidths, sortInfo, handleResizeStart, canModifyData, onSort, renderColumnTitle]); - const mergedColumns = useMemo(() => columns.map(col => { - if (!col.editable) return col; + const mergedColumns = useMemo(() => columns.map((col): ColumnType => { + if (!col.editable) return col as ColumnType; const dataIndex = String(col.dataIndex); return { ...col, @@ -2425,7 +2418,7 @@ const DataGrid: React.FC = ({ return ( = ({ }); if (inserts.length === 0 && updates.length === 0 && deletes.length === 0) { - message.info("No changes to commit"); + void message.info("No changes to commit"); return; } @@ -2570,7 +2563,7 @@ const DataGrid: React.FC = ({ message: res.message, dbName }); - message.success("事务提交成功"); + void message.success("事务提交成功"); setAddedRows([]); setModifiedRows({}); setDeletedRowKeys(new Set()); @@ -2585,13 +2578,13 @@ const DataGrid: React.FC = ({ message: res.message, dbName }); - message.error("提交失败: " + res.message); + void message.error("提交失败: " + res.message); } }; const copyToClipboard = useCallback((text: string) => { - navigator.clipboard.writeText(text); - message.success("Copied to clipboard"); + navigator.clipboard.writeText(text).catch(console.error); + void message.success("Copied to clipboard"); }, []); const getTargets = useCallback((clickedRecord: any) => { @@ -2606,19 +2599,18 @@ const DataGrid: React.FC = ({ const handleCopyInsert = useCallback((record: any) => { if (!supportsCopyInsert) { - message.warning("当前数据源不支持复制为 INSERT,请使用 JSON/CSV/Markdown 复制。"); + void message.warning("当前数据源不支持复制为 INSERT,请使用 JSON/CSV/Markdown 复制。"); return; } const records = getTargets(record); - const sqls = records.map((r: any) => { + const sqlList = records.map((r: any) => { const { [GONAVI_ROW_KEY]: _rowKey, ...vals } = r; const cols = Object.keys(vals); - const values = Object.values(vals).map(v => v === null ? 'NULL' : `'${v}'`); + const values = Object.values(vals).map(v => v === null ? 'NULL' : `'${v}'`); const targetTable = tableName || 'table'; return `INSERT INTO \`${targetTable}\` (${cols.map(c => `\`${c}\``).join(', ')}) VALUES (${values.join(', ')});`; }); - copyToClipboard(sqls.join('\n')); - }, [supportsCopyInsert, tableName, getTargets, copyToClipboard]); + copyToClipboard(sqlList.join('\n')); }, [supportsCopyInsert, tableName, getTargets, copyToClipboard]); const handleCopyJson = useCallback((record: any) => { const records = getTargets(record); @@ -2660,12 +2652,12 @@ const DataGrid: React.FC = ({ try { const res = await ExportQuery(config as any, dbName || '', sql, defaultName || 'export', format); if (res.success) { - message.success("导出成功"); + void message.success("导出成功"); } else if (res.message !== "Cancelled") { - message.error("导出失败: " + res.message); + void message.error("导出失败: " + res.message); } } catch (e: any) { - message.error("导出失败: " + (e?.message || String(e))); + void message.error("导出失败: " + (e?.message || String(e))); } finally { hide(); } @@ -2722,7 +2714,7 @@ const DataGrid: React.FC = ({ // 有未提交修改时,优先按界面数据导出,避免与数据库不一致。 if (hasChanges) { - message.warning("当前存在未提交修改,导出将按界面数据生成;如需完整长字段建议先提交后再导出。"); + void message.warning("当前存在未提交修改,导出将按界面数据生成;如需完整长字段建议先提交后再导出。"); await exportData(records, format); return; } @@ -2778,12 +2770,12 @@ const DataGrid: React.FC = ({ try { const res = await ExportTable(config as any, dbName || '', tableName, format); if (res.success) { - message.success("导出成功"); + void message.success("导出成功"); } else if (res.message !== "Cancelled") { - message.error("导出失败: " + res.message); + void message.error("导出失败: " + res.message); } } catch (e: any) { - message.error("导出失败: " + (e?.message || String(e))); + void message.error("导出失败: " + (e?.message || String(e))); } finally { hide(); } @@ -2791,7 +2783,7 @@ const DataGrid: React.FC = ({ const handlePage = async () => { instance.destroy(); if (hasChanges) { - message.warning("当前存在未提交修改,导出将按界面数据生成;如需完整长字段建议先提交后再导出。"); + void message.warning("当前存在未提交修改,导出将按界面数据生成;如需完整长字段建议先提交后再导出。"); await exportData(displayData, format); return; } @@ -2832,15 +2824,15 @@ const DataGrid: React.FC = ({ const handleExportFilteredAll = async (format: string) => { if (!connectionId || !tableName) return; if (!filteredExportSql) { - message.warning('当前未应用筛选条件'); + void message.warning('当前未应用筛选条件'); return; } if (!supportsSqlQueryExport) { - message.error('当前数据源不支持按筛选结果导出'); + void message.error('当前数据源不支持按筛选结果导出'); return; } if (hasChanges) { - message.warning("当前存在未提交修改,筛选结果导出基于数据库已提交数据。"); + void message.warning("当前存在未提交修改,筛选结果导出基于数据库已提交数据。"); } await exportByQuery(filteredExportSql, format, `${tableName || 'export'}_filtered`); @@ -2856,14 +2848,14 @@ const DataGrid: React.FC = ({ setImportFilePath(res.data.filePath); setImportPreviewVisible(true); } else if (res.message !== "Cancelled") { - message.error("选择文件失败: " + res.message); + void message.error("选择文件失败: " + res.message); } }; const handleImportSuccess = () => { setImportPreviewVisible(false); setImportFilePath(''); - message.success('导入完成'); + void message.success('导入完成'); if (onReload) onReload(); }; @@ -3032,7 +3024,7 @@ const DataGrid: React.FC = ({ onClick={() => { if (connectionId && dbName && tableName) { clearTableColumnOrder(connectionId, dbName, tableName); - message.success('已恢复默认列排序'); + void message.success('已恢复默认列排序'); } }} > @@ -3047,7 +3039,7 @@ const DataGrid: React.FC = ({ if (connectionId && dbName && tableName) { clearTableHiddenColumns(connectionId, dbName, tableName); setLocalHiddenColumns([]); - message.success('已恢复全列显示'); + void message.success('已恢复全列显示'); } }} > @@ -3128,7 +3120,7 @@ const DataGrid: React.FC = ({ }, []); const syncExternalScrollFromTargets = useCallback((targets?: HTMLElement[], source?: HTMLElement | null) => { - const externalScroll = externalHScrollRef.current; + const externalScroll = externalHorizontalScrollRef.current; if (!(externalScroll instanceof HTMLDivElement) || horizontalSyncSourceRef.current === 'external') { return; } @@ -3152,7 +3144,7 @@ const DataGrid: React.FC = ({ }, []); const applyExternalScrollToTableTargets = useCallback(() => { - const externalScroll = externalHScrollRef.current; + const externalScroll = externalHorizontalScrollRef.current; if (!(externalScroll instanceof HTMLDivElement)) { return; } @@ -3185,7 +3177,7 @@ const DataGrid: React.FC = ({ // 非虚拟模式:外部水平滚动条的 wheel 处理(通过原生事件绑定,确保 preventDefault 生效) useEffect(() => { - const externalScroll = externalHScrollRef.current; + const externalScroll = externalHorizontalScrollRef.current; if (!externalScroll || !horizontalScrollVisible) return; const handleExternalWheel = (e: WheelEvent) => { @@ -3199,8 +3191,7 @@ const DataGrid: React.FC = ({ const maxScrollLeft = Math.max(0, externalScroll.scrollWidth - externalScroll.clientWidth); if (maxScrollLeft <= 0) return; - const nextScrollLeft = Math.max(0, Math.min(maxScrollLeft, externalScroll.scrollLeft + dominantDelta)); - externalScroll.scrollLeft = nextScrollLeft; + externalScroll.scrollLeft = Math.max(0, Math.min(maxScrollLeft, externalScroll.scrollLeft + dominantDelta)); }; externalScroll.addEventListener('wheel', handleExternalWheel, { passive: false, capture: true }); @@ -3229,7 +3220,7 @@ const DataGrid: React.FC = ({ const isTableDataAreaTarget = (target: EventTarget | null) => { const element = target instanceof HTMLElement ? target : null; if (!element) return false; - if (element.closest('.data-grid-external-hscroll')) return false; + if (element.closest('.data-grid-external-horizontal-scroll')) return false; return !!element.closest('.ant-table-body, .ant-table-content, .ant-table-cell, .ant-table-row, .ant-table-tbody'); }; @@ -3255,7 +3246,7 @@ const DataGrid: React.FC = ({ activeTarget.scrollLeft = nextScrollLeft; lastTableScrollLeftRef.current = nextScrollLeft; - const externalScroll = externalHScrollRef.current; + const externalScroll = externalHorizontalScrollRef.current; if (externalScroll && Math.abs(externalScroll.scrollLeft - nextScrollLeft) > 1) { externalScroll.scrollLeft = nextScrollLeft; lastExternalScrollLeftRef.current = nextScrollLeft; @@ -3283,7 +3274,7 @@ const DataGrid: React.FC = ({ let rafId: number | null = null; let boundVerticalTarget: HTMLElement | null = null; let boundHorizontalTargets: HTMLElement[] = []; - const externalScroll = externalHScrollRef.current; + const externalScroll = externalHorizontalScrollRef.current; const hasStoredScroll = !!scrollSnapshot && (Math.abs(scrollSnapshot.top) > 0.5 || Math.abs(scrollSnapshot.left) > 0.5); const emitSnapshot = () => { @@ -3350,7 +3341,7 @@ const DataGrid: React.FC = ({ target.scrollLeft = nextLeft; } }); - const externalScroll = externalHScrollRef.current; + const externalScroll = externalHorizontalScrollRef.current; if (externalScroll && Math.abs(externalScroll.scrollLeft - nextLeft) > 1) { externalScroll.scrollLeft = nextLeft; } @@ -3433,7 +3424,7 @@ const DataGrid: React.FC = ({ useEffect(() => { if (viewMode !== 'table') return; const tableContainer = tableContainerRef.current; - const externalScroll = externalHScrollRef.current; + const externalScroll = externalHorizontalScrollRef.current; if (!(tableContainer instanceof HTMLElement) || !(externalScroll instanceof HTMLDivElement)) return; let rafId: number | null = null; @@ -3577,7 +3568,7 @@ const DataGrid: React.FC = ({ } updateCellSelection(new Set()); if (!next) setBatchEditModalOpen(false); - message.info(next ? '已进入单元格编辑模式,可拖拽选择多个单元格' : '已退出单元格编辑模式'); + void message.info(next ? '已进入单元格编辑模式,可拖拽选择多个单元格' : '已退出单元格编辑模式').then(); }} > 单元格编辑器 @@ -3980,8 +3971,8 @@ const DataGrid: React.FC = ({
    = ({ }} >
    @@ -4196,7 +4187,7 @@ const DataGrid: React.FC = ({ onMouseEnter={(e) => e.currentTarget.style.background = darkMode ? '#303030' : '#f5f5f5'} onMouseLeave={(e) => e.currentTarget.style.background = 'transparent'} onClick={() => { - if (cellContextMenu.record) handleExportSelected('csv', cellContextMenu.record); + if (cellContextMenu.record) handleExportSelected('csv', cellContextMenu.record).catch(console.error); setCellContextMenu(prev => ({ ...prev, visible: false })); }} > @@ -4211,7 +4202,7 @@ const DataGrid: React.FC = ({ onMouseEnter={(e) => e.currentTarget.style.background = darkMode ? '#303030' : '#f5f5f5'} onMouseLeave={(e) => e.currentTarget.style.background = 'transparent'} onClick={() => { - if (cellContextMenu.record) handleExportSelected('xlsx', cellContextMenu.record); + if (cellContextMenu.record) handleExportSelected('xlsx', cellContextMenu.record).catch(console.error); setCellContextMenu(prev => ({ ...prev, visible: false })); }} > @@ -4226,7 +4217,7 @@ const DataGrid: React.FC = ({ onMouseEnter={(e) => e.currentTarget.style.background = darkMode ? '#303030' : '#f5f5f5'} onMouseLeave={(e) => e.currentTarget.style.background = 'transparent'} onClick={() => { - if (cellContextMenu.record) handleExportSelected('json', cellContextMenu.record); + if (cellContextMenu.record) handleExportSelected('json', cellContextMenu.record).catch(console.error); setCellContextMenu(prev => ({ ...prev, visible: false })); }} > @@ -4241,7 +4232,7 @@ const DataGrid: React.FC = ({ onMouseEnter={(e) => e.currentTarget.style.background = darkMode ? '#303030' : '#f5f5f5'} onMouseLeave={(e) => e.currentTarget.style.background = 'transparent'} onClick={() => { - if (cellContextMenu.record) handleExportSelected('html', cellContextMenu.record); + if (cellContextMenu.record) handleExportSelected('html', cellContextMenu.record).catch(console.error); setCellContextMenu(prev => ({ ...prev, visible: false })); }} > @@ -4435,7 +4426,7 @@ const DataGrid: React.FC = ({ border-radius: 999px; box-shadow: ${floatingScrollbarThumbShadow}; } - .${gridId} .data-grid-external-hscroll { + .${gridId} .data-grid-external-horizontal-scroll { position: absolute; left: ${floatingScrollbarInset}px; right: ${floatingScrollbarInset}px; @@ -4446,22 +4437,22 @@ const DataGrid: React.FC = ({ background: transparent; z-index: 24; } - .${gridId} .data-grid-external-hscroll::-webkit-scrollbar { + .${gridId} .data-grid-external-horizontal-scroll::-webkit-scrollbar { height: ${floatingScrollbarHeight}px; } - .${gridId} .data-grid-external-hscroll::-webkit-scrollbar-track { + .${gridId} .data-grid-external-horizontal-scroll::-webkit-scrollbar-track { background: ${horizontalScrollbarTrackBg}; border: 1px solid ${horizontalScrollbarTrackBorderColor}; border-radius: 999px; box-shadow: ${horizontalScrollbarTrackShadow}; } - .${gridId} .data-grid-external-hscroll::-webkit-scrollbar-thumb { + .${gridId} .data-grid-external-horizontal-scroll::-webkit-scrollbar-thumb { background: ${horizontalScrollbarThumbBg}; border: 1px solid ${horizontalScrollbarThumbBorderColor}; border-radius: 999px; box-shadow: ${horizontalScrollbarThumbShadow}; } - .${gridId} .data-grid-external-hscroll-inner { + .${gridId} .data-grid-external-horizontal-scroll-inner { height: 1px; } .${gridId} .data-grid-pagination-shell { From a73ca36a324d563c820188d75bdd15d53c86cbdd Mon Sep 17 00:00:00 2001 From: Syngnat Date: Wed, 11 Mar 2026 10:23:41 +0800 Subject: [PATCH 38/48] =?UTF-8?q?=F0=9F=94=A7=20fix(db/kingbase=5Fimpl):?= =?UTF-8?q?=20=E4=BF=AE=E5=A4=8D=E6=A0=87=E8=AF=86=E7=AC=A6=E6=97=A0?= =?UTF-8?q?=E6=9D=A1=E4=BB=B6=E5=8A=A0=E5=8F=8C=E5=BC=95=E5=8F=B7=E5=AF=BC?= =?UTF-8?q?=E8=87=B4SQL=E8=AF=AD=E6=B3=95=E6=8A=A5=E9=94=99?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - quoteKingbaseIdent 改为条件引用,仅对大写字母、保留字、特殊字符的标识符添加双引号 - 新增 kingbaseIdentNeedsQuote 判断标识符是否需要引用 - 新增 isKingbaseReservedWord 检测常见SQL保留字 - 补充 TestQuoteKingbaseIdent、TestKingbaseIdentNeedsQuote 单测覆盖各场景 - refs #176 --- internal/db/kingbase_impl.go | 47 +++++++++++++++++- internal/db/kingbase_impl_test.go | 46 ++++++++++++++++-- internal/redis/redis_impl.go | 24 +++++++++ internal/redis/redis_impl_test.go | 81 +++++++++++++++++++++++++++++++ 4 files changed, 194 insertions(+), 4 deletions(-) create mode 100644 internal/redis/redis_impl_test.go diff --git a/internal/db/kingbase_impl.go b/internal/db/kingbase_impl.go index 619455d..bb93467 100644 --- a/internal/db/kingbase_impl.go +++ b/internal/db/kingbase_impl.go @@ -7,6 +7,7 @@ import ( "database/sql" "fmt" "net" + "regexp" "strconv" "strings" "time" @@ -805,12 +806,56 @@ func normalizeKingbaseIdentifier(raw string) string { return value } +// kingbaseIdentNeedsQuote 判断标识符是否需要双引号包裹。 +// 与前端 sql.ts 中 needsQuote 逻辑保持一致。 +func kingbaseIdentNeedsQuote(ident string) bool { + if ident == "" { + return false + } + // 不是合法裸标识符格式(必须以字母或下划线开头,仅含字母、数字、下划线) + if matched, _ := regexp.MatchString(`^[a-zA-Z_][a-zA-Z0-9_]*$`, ident); !matched { + return true + } + // 包含大写字母时需要引号保护(KingbaseES/PostgreSQL 默认将未加引号的标识符折叠为小写) + for _, r := range ident { + if r >= 'A' && r <= 'Z' { + return true + } + } + // 是 SQL 保留字 + return isKingbaseReservedWord(ident) +} + +// isKingbaseReservedWord 检查是否为常见 SQL 保留字(简化版,与前端保持一致)。 +func isKingbaseReservedWord(ident string) bool { + switch strings.ToLower(ident) { + case "select", "from", "where", "table", "index", "user", "order", "group", "by", + "limit", "offset", "and", "or", "not", "null", "true", "false", "key", + "primary", "foreign", "references", "default", "constraint", + "create", "drop", "alter", "insert", "update", "delete", "set", "values", "into", + "join", "left", "right", "inner", "outer", "on", "as", "is", "in", "like", + "between", "case", "when", "then", "else", "end", "having", "distinct", + "all", "any", "exists", "union", "except", "intersect", + "column", "check", "unique", "with", "grant", "revoke", "trigger", + "begin", "commit", "rollback", "schema", "database", "view", "function", + "procedure", "sequence", "type", "domain", "role", "session", "current", + "authorization", "cross", "full", "natural", "some", "cast", "fetch", + "for", "to", "do", "if", "return", "returns", "declare", "cursor": + return true + } + return false +} + func quoteKingbaseIdent(name string) string { n := normalizeKingbaseIdentifier(name) - n = strings.ReplaceAll(n, `"`, `""`) if n == "" { return "\"\"" } + // 仅在需要时才加双引号,避免 KingbaseES 兼容性问题 + if !kingbaseIdentNeedsQuote(n) { + return n + } + n = strings.ReplaceAll(n, `"`, `""`) return `"` + n + `"` } diff --git a/internal/db/kingbase_impl_test.go b/internal/db/kingbase_impl_test.go index eca6eaa..afad520 100644 --- a/internal/db/kingbase_impl_test.go +++ b/internal/db/kingbase_impl_test.go @@ -34,10 +34,25 @@ func TestQuoteKingbaseIdent(t *testing.T) { in string want string }{ - {name: "plain", in: "ldf_server", want: `"ldf_server"`}, - {name: "double quoted", in: `""ldf_server""`, want: `"ldf_server"`}, - {name: "escaped quoted", in: `\"ldf_server\"`, want: `"ldf_server"`}, + // 纯小写+下划线:不加引号 + {name: "plain lowercase", in: "ldf_server", want: "ldf_server"}, + {name: "plain lowercase 2", in: "bcs_barcode", want: "bcs_barcode"}, + {name: "double quoted input", in: `""ldf_server""`, want: "ldf_server"}, + {name: "escaped quoted input", in: `\"ldf_server\"`, want: "ldf_server"}, + // 含大写字母:加引号 + {name: "uppercase", in: "LDF_Server", want: `"LDF_Server"`}, + {name: "mixed case", in: "myTable", want: `"myTable"`}, + // SQL 保留字:加引号 + {name: "reserved word order", in: "order", want: `"order"`}, + {name: "reserved word user", in: "user", want: `"user"`}, + {name: "reserved word table", in: "table", want: `"table"`}, + {name: "reserved word select", in: "select", want: `"select"`}, + // 含特殊字符:加引号 + {name: "with hyphen", in: "my-table", want: `"my-table"`}, + {name: "with space", in: "my table", want: `"my table"`}, {name: "with embedded quote", in: `ab"cd`, want: `"ab""cd"`}, + // 空值 + {name: "empty", in: "", want: `""`}, } for _, tt := range tests { @@ -49,6 +64,31 @@ func TestQuoteKingbaseIdent(t *testing.T) { } } +func TestKingbaseIdentNeedsQuote(t *testing.T) { + tests := []struct { + name string + in string + want bool + }{ + {name: "plain lowercase", in: "ldf_server", want: false}, + {name: "starts with underscore", in: "_col", want: false}, + {name: "with digits", in: "col123", want: false}, + {name: "uppercase", in: "MyTable", want: true}, + {name: "reserved word", in: "order", want: true}, + {name: "with hyphen", in: "my-col", want: true}, + {name: "starts with digit", in: "123col", want: true}, + {name: "empty", in: "", want: false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := kingbaseIdentNeedsQuote(tt.in); got != tt.want { + t.Fatalf("kingbaseIdentNeedsQuote(%q) = %v, want %v", tt.in, got, tt.want) + } + }) + } +} + func TestSplitKingbaseQualifiedTable(t *testing.T) { tests := []struct { name string diff --git a/internal/redis/redis_impl.go b/internal/redis/redis_impl.go index 8d41a28..93db691 100644 --- a/internal/redis/redis_impl.go +++ b/internal/redis/redis_impl.go @@ -5,6 +5,7 @@ import ( "crypto/tls" "fmt" "net" + "net/url" "strconv" "strings" "sync" @@ -174,8 +175,31 @@ func (r *RedisClientImpl) toDisplayKey(key string) string { return strings.TrimPrefix(key, prefix) } +// sanitizeRedisPassword 对 Redis 密码进行防御性 URL 解码。 +// 当密码中包含 URL 编码序列(如 %40)时,尝试解码还原原始字符。 +// 这可以防止前端 URI 构建中 encodeURIComponent 编码后的密码被误传入。 +func sanitizeRedisPassword(password string) string { + if password == "" { + return password + } + // 仅当密码中包含 '%' 且后跟两位十六进制数字时,才尝试 URL 解码 + if !strings.Contains(password, "%") { + return password + } + decoded, err := url.QueryUnescape(password) + if err != nil { + // 解码失败,使用原始密码 + return password + } + if decoded != password { + logger.Warnf("Redis 密码检测到 URL 编码,已自动解码(原长度=%d 解码后长度=%d)", len(password), len(decoded)) + } + return decoded +} + // Connect establishes a connection to Redis func (r *RedisClientImpl) Connect(config connection.ConnectionConfig) error { + config.Password = sanitizeRedisPassword(config.Password) r.config = config if r.config.RedisDB < 0 || r.config.RedisDB > 15 { r.config.RedisDB = 0 diff --git a/internal/redis/redis_impl_test.go b/internal/redis/redis_impl_test.go new file mode 100644 index 0000000..7014ab8 --- /dev/null +++ b/internal/redis/redis_impl_test.go @@ -0,0 +1,81 @@ +package redis + +import "testing" + +func TestSanitizeRedisPassword(t *testing.T) { + tests := []struct { + name string + input string + expected string + }{ + { + name: "empty password", + input: "", + expected: "", + }, + { + name: "plain password without special chars", + input: "mypassword123", + expected: "mypassword123", + }, + { + name: "password with @ not encoded", + input: "p@ssword", + expected: "p@ssword", + }, + { + name: "password with @ URL-encoded as %40", + input: "p%40ssword", + expected: "p@ssword", + }, + { + name: "password with multiple encoded chars", + input: "p%40ss%23word", + expected: "p@ss#word", + }, + { + name: "password with + encoded as %2B", + input: "p%2Bss", + expected: "p+ss", + }, + { + name: "password that is purely encoded", + input: "%40%23%24", + expected: "@#$", + }, + { + name: "password with invalid percent encoding", + input: "p%ZZssword", + expected: "p%ZZssword", + }, + { + name: "password with trailing percent", + input: "password%", + expected: "password%", + }, + { + name: "password with literal percent not encoding anything", + input: "100%safe", + expected: "100%safe", + }, + { + name: "password with space encoded as %20", + input: "my%20pass", + expected: "my pass", + }, + { + name: "complex password with mixed content", + input: "P%40ss%23w0rd!", + expected: "P@ss#w0rd!", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := sanitizeRedisPassword(tt.input) + if result != tt.expected { + t.Errorf("sanitizeRedisPassword(%q) = %q, want %q", tt.input, result, tt.expected) + } + }) + } +} From b0bdddad9b29d03a69edb9fd1b6ee88ea93fd853 Mon Sep 17 00:00:00 2001 From: Syngnat Date: Wed, 11 Mar 2026 13:39:41 +0800 Subject: [PATCH 39/48] =?UTF-8?q?=F0=9F=94=A7=20fix(release,db/kingbase=5F?= =?UTF-8?q?impl):=20=E4=BF=AE=E5=A4=8D=E9=87=91=E4=BB=93=E9=BB=98=E8=AE=A4?= =?UTF-8?q?=20schema=20=E5=B9=B6=E9=9D=99=E9=BB=98=E7=94=9F=E6=88=90=20DMG?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Kingbase:在 current_schema() 为 public 时探测候选 schema,并通过 DSN search_path 重连,兼容未限定 schema 的查询 - 候选优先级:数据库名/用户名同名 schema(存在性校验),否则仅在“唯一用户 schema 有表”场景兜底 - 避免连接污染:每次 Connect 重置探测结果,重连成功后替换连接并关闭旧连接 - 打包脚本:create-dmg 增加 --sandbox-safe,避免构建时自动弹出/打开挂载窗口 - 产物格式:强制 --format UDZO,并将 rw.*.dmg/UDRW 中间产物转换为可分发 DMG - 校验门禁:增加 hdiutil verify,失败时保留 .app 便于排查,同时修正卷图标探测并补 ad-hoc 签名 --- build-release.sh | 141 +++++++++++++++++++++-------- internal/db/kingbase_impl.go | 168 ++++++++++++++++++++++++++++++++++- 2 files changed, 271 insertions(+), 38 deletions(-) diff --git a/build-release.sh b/build-release.sh index 4be9a67..d8b3a72 100755 --- a/build-release.sh +++ b/build-release.sh @@ -20,6 +20,11 @@ RED='\033[0;31m' YELLOW='\033[1;33m' NC='\033[0m' +MAC_VOLICON_PATH="build/darwin/icon.icns" +if [ ! -f "$MAC_VOLICON_PATH" ]; then + MAC_VOLICON_PATH="" +fi + echo -e "${GREEN}🚀 开始构建 $APP_NAME $VERSION...${NC}" # 清理并创建输出目录 @@ -37,15 +42,25 @@ if [ $? -eq 0 ]; then # 移动 .app 到 dist mv "$APP_SRC" "$DIST_DIR/$APP_DEST_NAME" + # Ad-hoc 代码签名(无 Apple Developer 账号时防止 Gatekeeper 报已损坏) + echo " 🔏 正在对 .app 进行 ad-hoc 签名 (arm64)..." + codesign --force --deep --sign - "$DIST_DIR/$APP_DEST_NAME" + # 创建 DMG if command -v create-dmg &> /dev/null; then echo " 📦 正在打包 DMG (arm64)..." # 移除已存在的 DMG (以防万一) rm -f "$DIST_DIR/$DMG_NAME" - - create-dmg \ - --volname "${APP_NAME} ${VERSION}" \ - --volicon "build/appicon.icns" \ + + # --sandbox-safe 会跳过 Finder 的 AppleScript 排版,避免打包过程中弹出/打开挂载窗口(CI/本地静默打包更友好)。 + CREATE_DMG_ARGS=(--volname "${APP_NAME} ${VERSION}" --format UDZO --sandbox-safe) + if [ -n "$MAC_VOLICON_PATH" ]; then + CREATE_DMG_ARGS+=(--volicon "$MAC_VOLICON_PATH") + else + echo -e "${YELLOW} ⚠️ 未找到 macOS 卷图标 (build/darwin/icon.icns),跳过 --volicon。${NC}" + fi + + create-dmg "${CREATE_DMG_ARGS[@]}" \ --window-pos 200 120 \ --window-size 800 400 \ --icon-size 100 \ @@ -54,23 +69,47 @@ if [ $? -eq 0 ]; then --app-drop-link 600 185 \ "$DIST_DIR/$DMG_NAME" \ "$DIST_DIR/$APP_DEST_NAME" + + CREATE_DMG_EXIT_CODE=$? - # 检查是否生成了 rw.* 的临时文件并重命名 (create-dmg 有时会有此行为) - if [ ! -f "$DIST_DIR/$DMG_NAME" ]; then - RW_FILE=$(find "$DIST_DIR" -name "rw.*.dmg" -print -quit) - if [ -n "$RW_FILE" ]; then - echo -e "${YELLOW} ⚠️ 检测到临时文件名,正在重命名...${NC}" - mv "$RW_FILE" "$DIST_DIR/$DMG_NAME" - fi + if [ $CREATE_DMG_EXIT_CODE -ne 0 ]; then + echo -e "${RED} ❌ create-dmg 执行失败 (exit=$CREATE_DMG_EXIT_CODE),保留 .app 以便排查。${NC}" + else + # create-dmg 可能会在失败时遗留 rw.*.dmg 中间产物;不要直接当作最终产物使用 + if [ ! -f "$DIST_DIR/$DMG_NAME" ]; then + RW_FILE=$(find "$DIST_DIR" -maxdepth 1 -name "rw.*.dmg" -print -quit) + if [ -n "$RW_FILE" ]; then + echo -e "${YELLOW} ⚠️ 检测到 create-dmg 中间产物: $(basename "$RW_FILE"),正在转换为可分发 DMG...${NC}" + hdiutil convert "$RW_FILE" -format UDZO -o "$DIST_DIR/$DMG_NAME" >/dev/null 2>&1 + rm -f "$RW_FILE" + fi + fi + + # 防御性:即使生成了目标文件,也要确保不是 UDRW(UDRW 在 Finder 下可能表现为“已损坏/无法打开”) + if [ -f "$DIST_DIR/$DMG_NAME" ] && command -v hdiutil &> /dev/null; then + DMG_FORMAT=$(hdiutil imageinfo "$DIST_DIR/$DMG_NAME" 2>/dev/null | awk -F': ' '/^Format:/{print $2; exit}') + if [ "$DMG_FORMAT" = "UDRW" ]; then + echo -e "${YELLOW} ⚠️ 检测到 UDRW(可写原始映像),正在转换为 UDZO...${NC}" + TMP_UDZO="$DIST_DIR/.tmp.$DMG_NAME" + rm -f "$TMP_UDZO" + hdiutil convert "$DIST_DIR/$DMG_NAME" -format UDZO -o "$TMP_UDZO" >/dev/null 2>&1 && mv "$TMP_UDZO" "$DIST_DIR/$DMG_NAME" + fi + fi + + if [ -f "$DIST_DIR/$DMG_NAME" ] && command -v hdiutil &> /dev/null; then + hdiutil verify "$DIST_DIR/$DMG_NAME" >/dev/null 2>&1 + if [ $? -ne 0 ]; then + echo -e "${RED} ❌ DMG 校验失败,保留 .app 以便排查。${NC}" + else + # 删除中间的 .app 文件,保持目录整洁 + rm -rf "$DIST_DIR/$APP_DEST_NAME" + echo " ✅ 已生成 $DMG_NAME" + fi + fi fi - # 删除中间的 .app 文件,保持目录整洁 - rm -rf "$DIST_DIR/$APP_DEST_NAME" - - if [ -f "$DIST_DIR/$DMG_NAME" ]; then - echo " ✅ 已生成 $DMG_NAME" - else - echo -e "${RED} ❌ DMG 生成失败,请检查 create-dmg 输出。${NC}" + if [ ! -f "$DIST_DIR/$DMG_NAME" ]; then + echo -e "${RED} ❌ DMG 生成失败,请检查 create-dmg 输出。${NC}" fi else echo -e "${YELLOW} ⚠️ 未找到 create-dmg 工具,跳过 DMG 打包,仅保留 .app。${NC}" @@ -90,13 +129,23 @@ if [ $? -eq 0 ]; then mv "$APP_SRC" "$DIST_DIR/$APP_DEST_NAME" + # Ad-hoc 代码签名 + echo " 🔏 正在对 .app 进行 ad-hoc 签名 (amd64)..." + codesign --force --deep --sign - "$DIST_DIR/$APP_DEST_NAME" + if command -v create-dmg &> /dev/null; then echo " 📦 正在打包 DMG (amd64)..." rm -f "$DIST_DIR/$DMG_NAME" - - create-dmg \ - --volname "${APP_NAME} ${VERSION}" \ - --volicon "build/appicon.icns" \ + + # --sandbox-safe 会跳过 Finder 的 AppleScript 排版,避免打包过程中弹出/打开挂载窗口(CI/本地静默打包更友好)。 + CREATE_DMG_ARGS=(--volname "${APP_NAME} ${VERSION}" --format UDZO --sandbox-safe) + if [ -n "$MAC_VOLICON_PATH" ]; then + CREATE_DMG_ARGS+=(--volicon "$MAC_VOLICON_PATH") + else + echo -e "${YELLOW} ⚠️ 未找到 macOS 卷图标 (build/darwin/icon.icns),跳过 --volicon。${NC}" + fi + + create-dmg "${CREATE_DMG_ARGS[@]}" \ --window-pos 200 120 \ --window-size 800 400 \ --icon-size 100 \ @@ -106,21 +155,43 @@ if [ $? -eq 0 ]; then "$DIST_DIR/$DMG_NAME" \ "$DIST_DIR/$APP_DEST_NAME" - # 检查是否生成了 rw.* 的临时文件并重命名 - if [ ! -f "$DIST_DIR/$DMG_NAME" ]; then - RW_FILE=$(find "$DIST_DIR" -name "rw.*.dmg" -print -quit) - if [ -n "$RW_FILE" ]; then - echo -e "${YELLOW} ⚠️ 检测到临时文件名,正在重命名...${NC}" - mv "$RW_FILE" "$DIST_DIR/$DMG_NAME" - fi + CREATE_DMG_EXIT_CODE=$? + + if [ $CREATE_DMG_EXIT_CODE -ne 0 ]; then + echo -e "${RED} ❌ create-dmg 执行失败 (exit=$CREATE_DMG_EXIT_CODE),保留 .app 以便排查。${NC}" + else + if [ ! -f "$DIST_DIR/$DMG_NAME" ]; then + RW_FILE=$(find "$DIST_DIR" -maxdepth 1 -name "rw.*.dmg" -print -quit) + if [ -n "$RW_FILE" ]; then + echo -e "${YELLOW} ⚠️ 检测到 create-dmg 中间产物: $(basename "$RW_FILE"),正在转换为可分发 DMG...${NC}" + hdiutil convert "$RW_FILE" -format UDZO -o "$DIST_DIR/$DMG_NAME" >/dev/null 2>&1 + rm -f "$RW_FILE" + fi + fi + + if [ -f "$DIST_DIR/$DMG_NAME" ] && command -v hdiutil &> /dev/null; then + DMG_FORMAT=$(hdiutil imageinfo "$DIST_DIR/$DMG_NAME" 2>/dev/null | awk -F': ' '/^Format:/{print $2; exit}') + if [ "$DMG_FORMAT" = "UDRW" ]; then + echo -e "${YELLOW} ⚠️ 检测到 UDRW(可写原始映像),正在转换为 UDZO...${NC}" + TMP_UDZO="$DIST_DIR/.tmp.$DMG_NAME" + rm -f "$TMP_UDZO" + hdiutil convert "$DIST_DIR/$DMG_NAME" -format UDZO -o "$TMP_UDZO" >/dev/null 2>&1 && mv "$TMP_UDZO" "$DIST_DIR/$DMG_NAME" + fi + fi + + if [ -f "$DIST_DIR/$DMG_NAME" ] && command -v hdiutil &> /dev/null; then + hdiutil verify "$DIST_DIR/$DMG_NAME" >/dev/null 2>&1 + if [ $? -ne 0 ]; then + echo -e "${RED} ❌ DMG 校验失败,保留 .app 以便排查。${NC}" + else + rm -rf "$DIST_DIR/$APP_DEST_NAME" + echo " ✅ 已生成 $DMG_NAME" + fi + fi fi - rm -rf "$DIST_DIR/$APP_DEST_NAME" - - if [ -f "$DIST_DIR/$DMG_NAME" ]; then - echo " ✅ 已生成 $DMG_NAME" - else - echo -e "${RED} ❌ DMG 生成失败。${NC}" + if [ ! -f "$DIST_DIR/$DMG_NAME" ]; then + echo -e "${RED} ❌ DMG 生成失败。${NC}" fi else echo -e "${YELLOW} ⚠️ 未找到 create-dmg 工具。${NC}" diff --git a/internal/db/kingbase_impl.go b/internal/db/kingbase_impl.go index bb93467..c227506 100644 --- a/internal/db/kingbase_impl.go +++ b/internal/db/kingbase_impl.go @@ -21,9 +21,10 @@ import ( ) type KingbaseDB struct { - conn *sql.DB - pingTimeout time.Duration - forwarder *ssh.LocalForwarder // Store SSH tunnel forwarder + conn *sql.DB + pingTimeout time.Duration + defaultSearchPath string + forwarder *ssh.LocalForwarder // Store SSH tunnel forwarder } func quoteConnValue(v string) string { @@ -75,6 +76,9 @@ func (k *KingbaseDB) getDSN(config connection.ConnectionConfig) string { quoteConnValue(resolvePostgresSSLMode(config)), getConnectTimeoutSeconds(config), ) + if strings.TrimSpace(k.defaultSearchPath) != "" { + dsn += fmt.Sprintf(" search_path=%s", quoteConnValue(k.defaultSearchPath)) + } return dsn } @@ -120,6 +124,9 @@ func (k *KingbaseDB) Connect(config connection.ConnectionConfig) error { var failures []string for idx, attempt := range attempts { + // 避免跨连接缓存 defaultSearchPath 造成的污染:每次 Connect 都重新探测一次。 + k.defaultSearchPath = "" + dsn := k.getDSN(attempt) db, err := sql.Open("kingbase", dsn) if err != nil { @@ -137,11 +144,166 @@ func (k *KingbaseDB) Connect(config connection.ConnectionConfig) error { if idx > 0 { logger.Warnf("人大金仓 SSL 优先连接失败,已回退至明文连接") } + + k.reconnectWithPreferredSearchPathIfNeeded(attempt) return nil } return fmt.Errorf("连接建立后验证失败:%s", strings.Join(failures, ";")) } +func (k *KingbaseDB) reconnectWithPreferredSearchPathIfNeeded(config connection.ConnectionConfig) { + if k.conn == nil { + return + } + + timeout := k.pingTimeout + if timeout <= 0 { + timeout = 5 * time.Second + } + ctx, cancel := utils.ContextWithTimeout(timeout) + defer cancel() + + var currentSchema string + if err := k.conn.QueryRowContext(ctx, "SELECT current_schema()").Scan(¤tSchema); err != nil { + logger.Warnf("人大金仓读取当前 schema 失败:%v", err) + return + } + + if schema := strings.TrimSpace(currentSchema); schema != "" && !strings.EqualFold(schema, "public") { + return + } + + searchPath, chosenSchema := k.detectPreferredSearchPath(ctx, config) + if strings.TrimSpace(searchPath) == "" { + return + } + + oldConn := k.conn + prevSearchPath := k.defaultSearchPath + k.defaultSearchPath = searchPath + + dsn := k.getDSN(config) + newConn, err := sql.Open("kingbase", dsn) + if err != nil { + k.defaultSearchPath = prevSearchPath + logger.Warnf("人大金仓重连以设置 search_path 失败:%v", err) + return + } + if err := newConn.PingContext(ctx); err != nil { + _ = newConn.Close() + k.defaultSearchPath = prevSearchPath + logger.Warnf("人大金仓重连后验证失败:%v", err) + return + } + + k.conn = newConn + _ = oldConn.Close() + logger.Infof("人大金仓已设置默认 schema:%s", chosenSchema) +} + +func (k *KingbaseDB) kingbaseSchemaExists(ctx context.Context, schema string) (bool, error) { + if schema = strings.TrimSpace(schema); schema == "" { + return false, nil + } + + var one int + err := k.conn.QueryRowContext(ctx, "SELECT 1 FROM pg_namespace WHERE nspname = $1", schema).Scan(&one) + if err == sql.ErrNoRows { + return false, nil + } + if err != nil { + return false, err + } + return true, nil +} + +func (k *KingbaseDB) detectPreferredSearchPath(ctx context.Context, config connection.ConnectionConfig) (searchPath string, chosenSchema string) { + // 1) 优先使用与数据库名/用户名同名的 schema(需要存在) + candidates := []string{ + normalizeKingbaseIdentifier(config.Database), + normalizeKingbaseIdentifier(config.User), + } + + seen := make(map[string]struct{}, len(candidates)) + for _, candidate := range candidates { + if candidate == "" || strings.EqualFold(candidate, "public") { + continue + } + key := strings.ToLower(candidate) + if _, ok := seen[key]; ok { + continue + } + seen[key] = struct{}{} + + exists, err := k.kingbaseSchemaExists(ctx, candidate) + if err != nil { + logger.Warnf("人大金仓检查 schema 是否存在失败:schema=%s err=%v", candidate, err) + continue + } + if !exists { + continue + } + + return fmt.Sprintf("%s,public", quoteKingbaseIdent(candidate)), candidate + } + + // 2) 如果只有一个“用户 schema”含有表,则将其作为默认 schema(更符合 DB GUI 的直觉) + schema, err := k.detectSingleUserSchemaWithTables(ctx) + if err != nil { + logger.Warnf("人大金仓探测默认 schema 失败:%v", err) + return "", "" + } + if schema == "" || strings.EqualFold(schema, "public") { + return "", "" + } + return fmt.Sprintf("%s,public", quoteKingbaseIdent(schema)), schema +} + +func (k *KingbaseDB) detectSingleUserSchemaWithTables(ctx context.Context) (string, error) { + if k.conn == nil { + return "", nil + } + + // 仅在“唯一用户 schema”场景做兜底,避免多 schema 下误选导致对象解析歧义。 + // 注:information_schema.tables 的视图在 PG/金仓语义稳定且权限要求相对低。 + query := ` +SELECT table_schema, COUNT(*) AS table_count +FROM information_schema.tables +WHERE table_type = 'BASE TABLE' + AND table_schema NOT IN ('pg_catalog', 'information_schema', 'public') + AND table_schema NOT LIKE 'pg_%' +GROUP BY table_schema +ORDER BY table_count DESC, table_schema +LIMIT 2` + + rows, err := k.conn.QueryContext(ctx, query) + if err != nil { + return "", err + } + defer rows.Close() + + type row struct { + schema string + count int64 + } + var results []row + for rows.Next() { + var r row + if scanErr := rows.Scan(&r.schema, &r.count); scanErr != nil { + return "", scanErr + } + results = append(results, r) + } + if err := rows.Err(); err != nil { + return "", err + } + + if len(results) != 1 { + return "", nil + } + return normalizeKingbaseIdentifier(results[0].schema), nil +} + func (k *KingbaseDB) Close() error { // Close SSH forwarder first if exists if k.forwarder != nil { From 4d58cc6e263f1eff6d2a6f2945785b9edcd77242 Mon Sep 17 00:00:00 2001 From: Syngnat Date: Wed, 11 Mar 2026 14:04:37 +0800 Subject: [PATCH 40/48] =?UTF-8?q?=F0=9F=90=9B=20fix(connection/redis):=20?= =?UTF-8?q?=E4=BF=AE=E5=A4=8D=20Redis=20URI=20=E7=94=A8=E6=88=B7=E5=90=8D?= =?UTF-8?q?=E5=A4=84=E7=90=86=E5=AF=BC=E8=87=B4=E8=AE=A4=E8=AF=81=E5=A4=B1?= =?UTF-8?q?=E8=B4=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Redis URI 解析回填 user 字段,兼容 redis://user:pass@... 与 redis://:pass@... - 生成 URI 时按需输出 user/password,避免丢失用户名信息 - Redis 类型默认用户名置空,并在构建配置时清理历史默认 root - 避免 go-redis 触发 ACL AUTH(user, pass) 导致 WRONGPASS - refs #212 --- frontend/src/components/ConnectionModal.tsx | 26 +++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/frontend/src/components/ConnectionModal.tsx b/frontend/src/components/ConnectionModal.tsx index bf7414b..ce874a9 100644 --- a/frontend/src/components/ConnectionModal.tsx +++ b/frontend/src/components/ConnectionModal.tsx @@ -568,6 +568,7 @@ const ConnectionModal: React.FC<{ return { host: primary?.host || 'localhost', port: primary?.port || 6379, + user: parsed.username || '', password: parsed.password || '', useSSL: isRediss, sslMode: isRediss ? (skipVerify ? 'skip-verify' : 'required') : 'disable', @@ -823,8 +824,15 @@ const ConnectionModal: React.FC<{ if (hosts.length > 1 || values.redisTopology === 'cluster') { params.set('topology', 'cluster'); } + const redisUser = String(values.user || '').trim(); const redisPassword = String(values.password || ''); - const redisAuth = redisPassword ? `:${encodeURIComponent(redisPassword)}@` : ''; + let redisAuth = ''; + if (redisUser || redisPassword) { + const encodedPassword = redisPassword ? encodeURIComponent(redisPassword) : ''; + redisAuth = redisUser + ? `${encodeURIComponent(redisUser)}${redisPassword ? `:${encodedPassword}` : ''}@` + : `:${encodedPassword}@`; + } const redisDB = Number.isFinite(Number(values.redisDB)) ? Math.max(0, Math.min(15, Math.trunc(Number(values.redisDB)))) : 0; @@ -1368,6 +1376,16 @@ const ConnectionModal: React.FC<{ const defaultPort = getDefaultPortByType(type); const isFileDbType = isFileDatabaseType(type); const sslCapableType = supportsSSLForType(type); + + // Redis 默认不展示用户名字段;若 URI 可解析则以 URI 为准覆盖 user, + // 同时清理历史默认值 root,避免 go-redis 发送 ACL AUTH(user, pass) 导致 WRONGPASS。 + if (type === 'redis') { + if (parsedUriValues && Object.prototype.hasOwnProperty.call(parsedUriValues, 'user')) { + mergedValues.user = String((parsedUriValues as any).user || ''); + } else if (String(mergedValues.user || '').trim() === 'root') { + mergedValues.user = ''; + } + } const sslModeRaw = String(mergedValues.sslMode || 'preferred').trim().toLowerCase(); const sslMode: 'preferred' | 'required' | 'skip-verify' | 'disable' = sslModeRaw === 'required' ? 'required' @@ -1618,7 +1636,11 @@ const ConnectionModal: React.FC<{ redisDB: 0, }); } else if (type !== 'custom') { - const defaultUser = type === 'clickhouse' ? 'default' : 'root'; + const defaultUser = type === 'clickhouse' + ? 'default' + : type === 'redis' + ? '' + : 'root'; const sslCapableType = supportsSSLForType(type); setUseSSL(false); setUseHttpTunnel(false); From d8b6b4ef8d444105a405df406bea3affb5c0af74 Mon Sep 17 00:00:00 2001 From: Syngnat Date: Wed, 11 Mar 2026 14:36:36 +0800 Subject: [PATCH 41/48] =?UTF-8?q?=F0=9F=94=A7=20fix(release,ssh):=20?= =?UTF-8?q?=E4=BF=AE=E5=A4=8D=20SSH=20=E8=AF=AF=E5=88=A4=E8=BF=9E=E6=8E=A5?= =?UTF-8?q?=E6=88=90=E5=8A=9F=E5=B9=B6=E7=BA=A0=E6=AD=A3=20DMG=20=E6=89=93?= =?UTF-8?q?=E5=8C=85=E7=BB=93=E6=9E=84?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - SSH 缓存 key 纳入认证指纹(password/keyPath),避免改错凭证仍复用旧连接/端口转发 - MySQL/MariaDB/Doris:SSH 隧道建立失败直接返回错误,不再回退直连导致测试误判成功 - 新增最小单测覆盖 SSH cache key 与 UseSSH 异常路径 - build-release.sh:create-dmg 使用 staging 目录作为 source,避免 DMG 根目录变成 Contents - refs #213 --- build-release.sh | 154 +++++++++++++++++------------ internal/db/diros_impl.go | 23 +++-- internal/db/mariadb_impl.go | 22 +++-- internal/db/mysql_impl.go | 22 +++-- internal/db/mysql_ssh_test.go | 26 +++++ internal/ssh/ssh.go | 94 +++++++++++++----- internal/ssh/ssh_cache_key_test.go | 46 +++++++++ 7 files changed, 269 insertions(+), 118 deletions(-) create mode 100644 internal/db/mysql_ssh_test.go create mode 100644 internal/ssh/ssh_cache_key_test.go diff --git a/build-release.sh b/build-release.sh index d8b3a72..a36f835 100755 --- a/build-release.sh +++ b/build-release.sh @@ -42,39 +42,50 @@ if [ $? -eq 0 ]; then # 移动 .app 到 dist mv "$APP_SRC" "$DIST_DIR/$APP_DEST_NAME" - # Ad-hoc 代码签名(无 Apple Developer 账号时防止 Gatekeeper 报已损坏) - echo " 🔏 正在对 .app 进行 ad-hoc 签名 (arm64)..." - codesign --force --deep --sign - "$DIST_DIR/$APP_DEST_NAME" + # Ad-hoc 代码签名(无 Apple Developer 账号时防止 Gatekeeper 报已损坏) + echo " 🔏 正在对 .app 进行 ad-hoc 签名 (arm64)..." + codesign --force --deep --sign - "$DIST_DIR/$APP_DEST_NAME" - # 创建 DMG - if command -v create-dmg &> /dev/null; then - echo " 📦 正在打包 DMG (arm64)..." - # 移除已存在的 DMG (以防万一) - rm -f "$DIST_DIR/$DMG_NAME" + # 创建 DMG + if command -v create-dmg &> /dev/null; then + echo " 📦 正在打包 DMG (arm64)..." + # 移除已存在的 DMG (以防万一) + rm -f "$DIST_DIR/$DMG_NAME" + # create-dmg 的 source 需要是“包含 .app 的目录”,不能直接传 .app 路径。 + STAGE_DIR=$(mktemp -d "$DIST_DIR/.dmg-stage-${APP_NAME}-${VERSION}-arm64.XXXXXX") + if [ -z "$STAGE_DIR" ] || [ ! -d "$STAGE_DIR" ]; then + echo -e "${RED} ❌ 创建 DMG 临时目录失败,跳过 DMG 打包。${NC}" + else + if command -v ditto &> /dev/null; then + ditto "$DIST_DIR/$APP_DEST_NAME" "$STAGE_DIR/$APP_DEST_NAME" + else + cp -R "$DIST_DIR/$APP_DEST_NAME" "$STAGE_DIR/$APP_DEST_NAME" + fi - # --sandbox-safe 会跳过 Finder 的 AppleScript 排版,避免打包过程中弹出/打开挂载窗口(CI/本地静默打包更友好)。 - CREATE_DMG_ARGS=(--volname "${APP_NAME} ${VERSION}" --format UDZO --sandbox-safe) - if [ -n "$MAC_VOLICON_PATH" ]; then - CREATE_DMG_ARGS+=(--volicon "$MAC_VOLICON_PATH") + # --sandbox-safe 会跳过 Finder 的 AppleScript 排版,避免打包过程中弹出/打开挂载窗口(CI/本地静默打包更友好)。 + CREATE_DMG_ARGS=(--volname "${APP_NAME} ${VERSION}" --format UDZO --sandbox-safe) + if [ -n "$MAC_VOLICON_PATH" ]; then + CREATE_DMG_ARGS+=(--volicon "$MAC_VOLICON_PATH") else echo -e "${YELLOW} ⚠️ 未找到 macOS 卷图标 (build/darwin/icon.icns),跳过 --volicon。${NC}" fi - create-dmg "${CREATE_DMG_ARGS[@]}" \ - --window-pos 200 120 \ - --window-size 800 400 \ - --icon-size 100 \ - --icon "$APP_DEST_NAME" 200 190 \ - --hide-extension "$APP_DEST_NAME" \ - --app-drop-link 600 185 \ - "$DIST_DIR/$DMG_NAME" \ - "$DIST_DIR/$APP_DEST_NAME" + create-dmg "${CREATE_DMG_ARGS[@]}" \ + --window-pos 200 120 \ + --window-size 800 400 \ + --icon-size 100 \ + --icon "$APP_DEST_NAME" 200 190 \ + --hide-extension "$APP_DEST_NAME" \ + --app-drop-link 600 185 \ + "$DIST_DIR/$DMG_NAME" \ + "$STAGE_DIR" - CREATE_DMG_EXIT_CODE=$? - - if [ $CREATE_DMG_EXIT_CODE -ne 0 ]; then - echo -e "${RED} ❌ create-dmg 执行失败 (exit=$CREATE_DMG_EXIT_CODE),保留 .app 以便排查。${NC}" - else + CREATE_DMG_EXIT_CODE=$? + rm -rf "$STAGE_DIR" + + if [ $CREATE_DMG_EXIT_CODE -ne 0 ]; then + echo -e "${RED} ❌ create-dmg 执行失败 (exit=$CREATE_DMG_EXIT_CODE),保留 .app 以便排查。${NC}" + else # create-dmg 可能会在失败时遗留 rw.*.dmg 中间产物;不要直接当作最终产物使用 if [ ! -f "$DIST_DIR/$DMG_NAME" ]; then RW_FILE=$(find "$DIST_DIR" -maxdepth 1 -name "rw.*.dmg" -print -quit) @@ -108,14 +119,15 @@ if [ $? -eq 0 ]; then fi fi - if [ ! -f "$DIST_DIR/$DMG_NAME" ]; then - echo -e "${RED} ❌ DMG 生成失败,请检查 create-dmg 输出。${NC}" - fi - else - echo -e "${YELLOW} ⚠️ 未找到 create-dmg 工具,跳过 DMG 打包,仅保留 .app。${NC}" - echo " 安装命令: brew install create-dmg" - fi -else + if [ ! -f "$DIST_DIR/$DMG_NAME" ]; then + echo -e "${RED} ❌ DMG 生成失败,请检查 create-dmg 输出。${NC}" + fi + fi + else + echo -e "${YELLOW} ⚠️ 未找到 create-dmg 工具,跳过 DMG 打包,仅保留 .app。${NC}" + echo " 安装命令: brew install create-dmg" + fi + else echo -e "${RED} ❌ macOS arm64 构建失败。${NC}" fi @@ -129,37 +141,48 @@ if [ $? -eq 0 ]; then mv "$APP_SRC" "$DIST_DIR/$APP_DEST_NAME" - # Ad-hoc 代码签名 - echo " 🔏 正在对 .app 进行 ad-hoc 签名 (amd64)..." - codesign --force --deep --sign - "$DIST_DIR/$APP_DEST_NAME" + # Ad-hoc 代码签名 + echo " 🔏 正在对 .app 进行 ad-hoc 签名 (amd64)..." + codesign --force --deep --sign - "$DIST_DIR/$APP_DEST_NAME" - if command -v create-dmg &> /dev/null; then - echo " 📦 正在打包 DMG (amd64)..." - rm -f "$DIST_DIR/$DMG_NAME" + if command -v create-dmg &> /dev/null; then + echo " 📦 正在打包 DMG (amd64)..." + rm -f "$DIST_DIR/$DMG_NAME" + # create-dmg 的 source 需要是“包含 .app 的目录”,不能直接传 .app 路径。 + STAGE_DIR=$(mktemp -d "$DIST_DIR/.dmg-stage-${APP_NAME}-${VERSION}-amd64.XXXXXX") + if [ -z "$STAGE_DIR" ] || [ ! -d "$STAGE_DIR" ]; then + echo -e "${RED} ❌ 创建 DMG 临时目录失败,跳过 DMG 打包。${NC}" + else + if command -v ditto &> /dev/null; then + ditto "$DIST_DIR/$APP_DEST_NAME" "$STAGE_DIR/$APP_DEST_NAME" + else + cp -R "$DIST_DIR/$APP_DEST_NAME" "$STAGE_DIR/$APP_DEST_NAME" + fi - # --sandbox-safe 会跳过 Finder 的 AppleScript 排版,避免打包过程中弹出/打开挂载窗口(CI/本地静默打包更友好)。 - CREATE_DMG_ARGS=(--volname "${APP_NAME} ${VERSION}" --format UDZO --sandbox-safe) - if [ -n "$MAC_VOLICON_PATH" ]; then - CREATE_DMG_ARGS+=(--volicon "$MAC_VOLICON_PATH") + # --sandbox-safe 会跳过 Finder 的 AppleScript 排版,避免打包过程中弹出/打开挂载窗口(CI/本地静默打包更友好)。 + CREATE_DMG_ARGS=(--volname "${APP_NAME} ${VERSION}" --format UDZO --sandbox-safe) + if [ -n "$MAC_VOLICON_PATH" ]; then + CREATE_DMG_ARGS+=(--volicon "$MAC_VOLICON_PATH") else echo -e "${YELLOW} ⚠️ 未找到 macOS 卷图标 (build/darwin/icon.icns),跳过 --volicon。${NC}" fi - create-dmg "${CREATE_DMG_ARGS[@]}" \ - --window-pos 200 120 \ - --window-size 800 400 \ - --icon-size 100 \ - --icon "$APP_DEST_NAME" 200 190 \ - --hide-extension "$APP_DEST_NAME" \ - --app-drop-link 600 185 \ - "$DIST_DIR/$DMG_NAME" \ - "$DIST_DIR/$APP_DEST_NAME" + create-dmg "${CREATE_DMG_ARGS[@]}" \ + --window-pos 200 120 \ + --window-size 800 400 \ + --icon-size 100 \ + --icon "$APP_DEST_NAME" 200 190 \ + --hide-extension "$APP_DEST_NAME" \ + --app-drop-link 600 185 \ + "$DIST_DIR/$DMG_NAME" \ + "$STAGE_DIR" - CREATE_DMG_EXIT_CODE=$? + CREATE_DMG_EXIT_CODE=$? + rm -rf "$STAGE_DIR" - if [ $CREATE_DMG_EXIT_CODE -ne 0 ]; then - echo -e "${RED} ❌ create-dmg 执行失败 (exit=$CREATE_DMG_EXIT_CODE),保留 .app 以便排查。${NC}" - else + if [ $CREATE_DMG_EXIT_CODE -ne 0 ]; then + echo -e "${RED} ❌ create-dmg 执行失败 (exit=$CREATE_DMG_EXIT_CODE),保留 .app 以便排查。${NC}" + else if [ ! -f "$DIST_DIR/$DMG_NAME" ]; then RW_FILE=$(find "$DIST_DIR" -maxdepth 1 -name "rw.*.dmg" -print -quit) if [ -n "$RW_FILE" ]; then @@ -190,14 +213,15 @@ if [ $? -eq 0 ]; then fi fi - if [ ! -f "$DIST_DIR/$DMG_NAME" ]; then - echo -e "${RED} ❌ DMG 生成失败。${NC}" - fi - else - echo -e "${YELLOW} ⚠️ 未找到 create-dmg 工具。${NC}" - fi -else - echo -e "${RED} ❌ macOS amd64 构建失败。${NC}" + if [ ! -f "$DIST_DIR/$DMG_NAME" ]; then + echo -e "${RED} ❌ DMG 生成失败。${NC}" + fi + fi + else + echo -e "${YELLOW} ⚠️ 未找到 create-dmg 工具。${NC}" + fi + else + echo -e "${RED} ❌ macOS amd64 构建失败。${NC}" fi # --- Windows AMD64 构建 --- diff --git a/internal/db/diros_impl.go b/internal/db/diros_impl.go index 07bed73..773b7fa 100644 --- a/internal/db/diros_impl.go +++ b/internal/db/diros_impl.go @@ -9,7 +9,6 @@ import ( "strings" "GoNavi-Wails/internal/connection" - "GoNavi-Wails/internal/logger" "GoNavi-Wails/internal/ssh" "GoNavi-Wails/internal/utils" @@ -135,26 +134,26 @@ func collectDirosAddresses(config connection.ConnectionConfig) []string { return result } -func (d *DirosDB) getDSN(config connection.ConnectionConfig) string { +func (d *DirosDB) getDSN(config connection.ConnectionConfig) (string, error) { database := config.Database protocol := "tcp" address := normalizeMySQLAddress(config.Host, config.Port) if config.UseSSH { netName, err := ssh.RegisterSSHNetwork(config.SSH) - if err == nil { - protocol = netName - address = normalizeMySQLAddress(config.Host, config.Port) - } else { - logger.Warnf("注册 Doris SSH 网络失败,将尝试直连:地址=%s:%d 用户=%s,原因:%v", config.Host, config.Port, config.User, err) + if err != nil { + return "", fmt.Errorf("创建 SSH 隧道失败:%w", err) } + protocol = netName } timeout := getConnectTimeoutSeconds(config) tlsMode := resolveMySQLTLSMode(config) - return fmt.Sprintf("%s:%s@%s(%s)/%s?charset=utf8mb4&parseTime=True&loc=Local&timeout=%ds&tls=%s", - config.User, config.Password, protocol, address, database, timeout, url.QueryEscape(tlsMode)) + return fmt.Sprintf( + "%s:%s@%s(%s)/%s?charset=utf8mb4&parseTime=True&loc=Local&timeout=%ds&tls=%s", + config.User, config.Password, protocol, address, database, timeout, url.QueryEscape(tlsMode), + ), nil } func resolveDirosCredential(config connection.ConnectionConfig, addressIndex int) (string, string) { @@ -192,7 +191,11 @@ func (d *DirosDB) Connect(config connection.ConnectionConfig) error { candidateConfig.Port = port candidateConfig.User, candidateConfig.Password = resolveDirosCredential(runConfig, index) - dsn := d.getDSN(candidateConfig) + dsn, err := d.getDSN(candidateConfig) + if err != nil { + errorDetails = append(errorDetails, fmt.Sprintf("%s 生成连接串失败: %v", address, err)) + continue + } db, err := sql.Open(dirosDriverName, dsn) if err != nil { errorDetails = append(errorDetails, fmt.Sprintf("%s 打开失败: %v", address, err)) diff --git a/internal/db/mariadb_impl.go b/internal/db/mariadb_impl.go index 6a36400..65b9cc3 100644 --- a/internal/db/mariadb_impl.go +++ b/internal/db/mariadb_impl.go @@ -11,7 +11,6 @@ import ( "time" "GoNavi-Wails/internal/connection" - "GoNavi-Wails/internal/logger" "GoNavi-Wails/internal/ssh" "GoNavi-Wails/internal/utils" @@ -25,30 +24,33 @@ type MariaDB struct { pingTimeout time.Duration } -func (m *MariaDB) getDSN(config connection.ConnectionConfig) string { +func (m *MariaDB) getDSN(config connection.ConnectionConfig) (string, error) { database := config.Database protocol := "tcp" address := fmt.Sprintf("%s:%d", config.Host, config.Port) if config.UseSSH { netName, err := ssh.RegisterSSHNetwork(config.SSH) - if err == nil { - protocol = netName - address = fmt.Sprintf("%s:%d", config.Host, config.Port) - } else { - logger.Warnf("注册 SSH 网络失败,将尝试直连:地址=%s:%d 用户=%s,原因:%v", config.Host, config.Port, config.User, err) + if err != nil { + return "", fmt.Errorf("创建 SSH 隧道失败:%w", err) } + protocol = netName } timeout := getConnectTimeoutSeconds(config) tlsMode := resolveMySQLTLSMode(config) - return fmt.Sprintf("%s:%s@%s(%s)/%s?charset=utf8mb4&parseTime=True&loc=Local&timeout=%ds&tls=%s", - config.User, config.Password, protocol, address, database, timeout, url.QueryEscape(tlsMode)) + return fmt.Sprintf( + "%s:%s@%s(%s)/%s?charset=utf8mb4&parseTime=True&loc=Local&timeout=%ds&tls=%s", + config.User, config.Password, protocol, address, database, timeout, url.QueryEscape(tlsMode), + ), nil } func (m *MariaDB) Connect(config connection.ConnectionConfig) error { - dsn := m.getDSN(config) + dsn, err := m.getDSN(config) + if err != nil { + return err + } db, err := sql.Open("mysql", dsn) if err != nil { return fmt.Errorf("打开数据库连接失败:%w", err) diff --git a/internal/db/mysql_impl.go b/internal/db/mysql_impl.go index 5095f1c..32b63cc 100644 --- a/internal/db/mysql_impl.go +++ b/internal/db/mysql_impl.go @@ -169,26 +169,26 @@ func collectMySQLAddresses(config connection.ConnectionConfig) []string { return result } -func (m *MySQLDB) getDSN(config connection.ConnectionConfig) string { +func (m *MySQLDB) getDSN(config connection.ConnectionConfig) (string, error) { database := config.Database protocol := "tcp" address := normalizeMySQLAddress(config.Host, config.Port) if config.UseSSH { netName, err := ssh.RegisterSSHNetwork(config.SSH) - if err == nil { - protocol = netName - address = normalizeMySQLAddress(config.Host, config.Port) - } else { - logger.Warnf("注册 SSH 网络失败,将尝试直连:地址=%s:%d 用户=%s,原因:%v", config.Host, config.Port, config.User, err) + if err != nil { + return "", fmt.Errorf("创建 SSH 隧道失败:%w", err) } + protocol = netName } timeout := getConnectTimeoutSeconds(config) tlsMode := resolveMySQLTLSMode(config) - return fmt.Sprintf("%s:%s@%s(%s)/%s?charset=utf8mb4&parseTime=True&loc=Local&timeout=%ds&tls=%s", - config.User, config.Password, protocol, address, database, timeout, url.QueryEscape(tlsMode)) + return fmt.Sprintf( + "%s:%s@%s(%s)/%s?charset=utf8mb4&parseTime=True&loc=Local&timeout=%ds&tls=%s", + config.User, config.Password, protocol, address, database, timeout, url.QueryEscape(tlsMode), + ), nil } func resolveMySQLCredential(config connection.ConnectionConfig, addressIndex int) (string, string) { @@ -226,7 +226,11 @@ func (m *MySQLDB) Connect(config connection.ConnectionConfig) error { candidateConfig.Port = port candidateConfig.User, candidateConfig.Password = resolveMySQLCredential(runConfig, index) - dsn := m.getDSN(candidateConfig) + dsn, err := m.getDSN(candidateConfig) + if err != nil { + errorDetails = append(errorDetails, fmt.Sprintf("%s 生成连接串失败: %v", address, err)) + continue + } db, err := sql.Open("mysql", dsn) if err != nil { errorDetails = append(errorDetails, fmt.Sprintf("%s 打开失败: %v", address, err)) diff --git a/internal/db/mysql_ssh_test.go b/internal/db/mysql_ssh_test.go new file mode 100644 index 0000000..673639c --- /dev/null +++ b/internal/db/mysql_ssh_test.go @@ -0,0 +1,26 @@ +package db + +import ( + "testing" + + "GoNavi-Wails/internal/connection" +) + +func TestMySQLDSN_UseSSH_ShouldFailWhenSSHInvalid(t *testing.T) { + m := &MySQLDB{} + _, err := m.getDSN(connection.ConnectionConfig{ + Host: "127.0.0.1", + Port: 3306, + User: "root", + UseSSH: true, + SSH: connection.SSHConfig{ + Host: "127.0.0.1", + Port: 0, // invalid port, should fail immediately + User: "bad", + Password: "bad", + }, + }) + if err == nil { + t.Fatalf("expected error when UseSSH=true and SSH config invalid") + } +} diff --git a/internal/ssh/ssh.go b/internal/ssh/ssh.go index 51ad364..15feab3 100644 --- a/internal/ssh/ssh.go +++ b/internal/ssh/ssh.go @@ -2,10 +2,13 @@ package ssh import ( "context" + "crypto/sha256" + "encoding/hex" "fmt" "io" "net" "os" + "strconv" "sync" "time" @@ -69,7 +72,7 @@ func connectSSH(config connection.SSHConfig) (*ssh.Client, error) { } } } - + if config.Password != "" { authMethods = append(authMethods, ssh.Password(config.Password)) } @@ -105,7 +108,7 @@ func RegisterSSHNetwork(sshConfig connection.SSHConfig) (string, error) { // Generate unique network name netName := fmt.Sprintf("ssh_%s_%d", sshConfig.Host, time.Now().UnixNano()) logger.Infof("注册 SSH 网络:%s(地址=%s:%d 用户=%s)", netName, sshConfig.Host, sshConfig.Port, sshConfig.User) - + mysql.RegisterDialContext(netName, func(ctx context.Context, addr string) (net.Conn, error) { return dialContext(ctx, client, "tcp", addr) }) @@ -115,12 +118,58 @@ func RegisterSSHNetwork(sshConfig connection.SSHConfig) (string, error) { // sshClientCache stores SSH clients to avoid creating multiple connections var ( - sshClientCache = make(map[string]*ssh.Client) + sshClientCache = make(map[sshClientCacheKey]*ssh.Client) sshClientCacheMu sync.RWMutex - localForwarders = make(map[string]*LocalForwarder) + localForwarders = make(map[forwarderCacheKey]*LocalForwarder) forwarderMu sync.RWMutex ) +type sshClientCacheKey struct { + host string + port int + user string + auth string +} + +type forwarderCacheKey struct { + ssh sshClientCacheKey + remoteHost string + remotePort int +} + +func sshAuthFingerprint(config connection.SSHConfig) string { + hasher := sha256.New() + _, _ = hasher.Write([]byte(config.Password)) + _, _ = hasher.Write([]byte{0}) + _, _ = hasher.Write([]byte(config.KeyPath)) + if config.KeyPath != "" { + if st, err := os.Stat(config.KeyPath); err == nil { + _, _ = hasher.Write([]byte{0}) + _, _ = hasher.Write([]byte(st.ModTime().UTC().Format(time.RFC3339Nano))) + _, _ = hasher.Write([]byte{0}) + _, _ = hasher.Write([]byte(strconv.FormatInt(st.Size(), 10))) + } else { + _, _ = hasher.Write([]byte{0}) + _, _ = hasher.Write([]byte("stat_err")) + } + } + sum := hasher.Sum(nil) + return hex.EncodeToString(sum[:8]) +} + +func newSSHClientCacheKey(config connection.SSHConfig) sshClientCacheKey { + return sshClientCacheKey{ + host: config.Host, + port: config.Port, + user: config.User, + auth: sshAuthFingerprint(config), + } +} + +func formatSSHClientKeyForLog(key sshClientCacheKey) string { + return fmt.Sprintf("%s:%d 用户=%s", key.host, key.port, key.user) +} + // LocalForwarder represents a local port forwarder through SSH type LocalForwarder struct { LocalAddr string @@ -249,9 +298,13 @@ func (f *LocalForwarder) IsClosed() bool { // GetOrCreateLocalForwarder returns a cached forwarder or creates a new one func GetOrCreateLocalForwarder(sshConfig connection.SSHConfig, remoteHost string, remotePort int) (*LocalForwarder, error) { - key := fmt.Sprintf("%s:%d:%s->%s:%d", - sshConfig.Host, sshConfig.Port, sshConfig.User, - remoteHost, remotePort) + key := forwarderCacheKey{ + ssh: newSSHClientCacheKey(sshConfig), + remoteHost: remoteHost, + remotePort: remotePort, + } + logKey := fmt.Sprintf("%s:%d:%s->%s:%d", + sshConfig.Host, sshConfig.Port, sshConfig.User, remoteHost, remotePort) forwarderMu.RLock() forwarder, exists := localForwarders[key] @@ -259,7 +312,7 @@ func GetOrCreateLocalForwarder(sshConfig connection.SSHConfig, remoteHost string // Check if exists and is still valid if exists && forwarder != nil && !forwarder.IsClosed() { - logger.Infof("复用已有端口转发:%s", key) + logger.Infof("复用已有端口转发:%s", logKey) return forwarder, nil } @@ -287,24 +340,18 @@ func CloseAllForwarders() { forwarderMu.Lock() defer forwarderMu.Unlock() - for key, forwarder := range localForwarders { + for _, forwarder := range localForwarders { if forwarder != nil { _ = forwarder.Close() - logger.Infof("已关闭端口转发:%s", key) + logger.Infof("已关闭端口转发:本地 %s -> 远程 %s", forwarder.LocalAddr, forwarder.RemoteAddr) } } - localForwarders = make(map[string]*LocalForwarder) -} - - -// getSSHClientCacheKey generates a unique cache key for SSH config -func getSSHClientCacheKey(config connection.SSHConfig) string { - return fmt.Sprintf("%s:%d:%s", config.Host, config.Port, config.User) + localForwarders = make(map[forwarderCacheKey]*LocalForwarder) } // GetOrCreateSSHClient returns a cached SSH client or creates a new one func GetOrCreateSSHClient(config connection.SSHConfig) (*ssh.Client, error) { - key := getSSHClientCacheKey(config) + key := newSSHClientCacheKey(config) sshClientCacheMu.RLock() client, exists := sshClientCache[key] @@ -315,11 +362,11 @@ func GetOrCreateSSHClient(config connection.SSHConfig) (*ssh.Client, error) { session, err := client.NewSession() if err == nil { session.Close() - logger.Infof("复用已有 SSH 连接:%s", key) + logger.Infof("复用已有 SSH 连接:%s", formatSSHClientKeyForLog(key)) return client, nil } // Connection is dead, remove from cache - logger.Warnf("SSH 连接已断开,重新建立:%s (错误: %v)", key, err) + logger.Warnf("SSH 连接已断开,重新建立:%s (错误: %v)", formatSSHClientKeyForLog(key), err) sshClientCacheMu.Lock() delete(sshClientCache, key) sshClientCacheMu.Unlock() @@ -338,7 +385,7 @@ func GetOrCreateSSHClient(config connection.SSHConfig) (*ssh.Client, error) { sshClientCache[key] = client sshClientCacheMu.Unlock() - logger.Infof("已缓存 SSH 连接:%s", key) + logger.Infof("已缓存 SSH 连接:%s", formatSSHClientKeyForLog(key)) return client, nil } @@ -367,9 +414,8 @@ func CloseAllSSHClients() { for key, client := range sshClientCache { if client != nil { _ = client.Close() - logger.Infof("已关闭 SSH 连接:%s", key) + logger.Infof("已关闭 SSH 连接:%s", formatSSHClientKeyForLog(key)) } } - sshClientCache = make(map[string]*ssh.Client) + sshClientCache = make(map[sshClientCacheKey]*ssh.Client) } - diff --git a/internal/ssh/ssh_cache_key_test.go b/internal/ssh/ssh_cache_key_test.go new file mode 100644 index 0000000..16322c2 --- /dev/null +++ b/internal/ssh/ssh_cache_key_test.go @@ -0,0 +1,46 @@ +package ssh + +import ( + "testing" + + "GoNavi-Wails/internal/connection" +) + +func TestNewSSHClientCacheKey_DiffPassword(t *testing.T) { + a := newSSHClientCacheKey(connection.SSHConfig{ + Host: "127.0.0.1", + Port: 22, + User: "root", + Password: "a", + }) + b := newSSHClientCacheKey(connection.SSHConfig{ + Host: "127.0.0.1", + Port: 22, + User: "root", + Password: "b", + }) + if a == b { + t.Fatalf("expected different cache key when password differs") + } + if a.host != b.host || a.port != b.port || a.user != b.user { + t.Fatalf("expected host/port/user to stay identical") + } +} + +func TestNewSSHClientCacheKey_DiffKeyPath(t *testing.T) { + a := newSSHClientCacheKey(connection.SSHConfig{ + Host: "127.0.0.1", + Port: 22, + User: "root", + KeyPath: "/tmp/a.key", + }) + b := newSSHClientCacheKey(connection.SSHConfig{ + Host: "127.0.0.1", + Port: 22, + User: "root", + KeyPath: "/tmp/b.key", + }) + if a == b { + t.Fatalf("expected different cache key when keyPath differs") + } +} From eef973b7fc5625a584706e410e91f1fd71d868d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=87=8C=E5=B0=81?= <49424247+fengin@users.noreply.github.com> Date: Thu, 12 Mar 2026 10:04:49 +0800 Subject: [PATCH 42/48] =?UTF-8?q?fix:=20KingBase=20=E8=BF=9E=E6=8E=A5?= =?UTF-8?q?=E5=90=8E=E8=87=AA=E5=8A=A8=E8=AE=BE=E7=BD=AE=20search=5Fpath?= =?UTF-8?q?=EF=BC=8C=E4=BF=AE=E5=A4=8D=E8=87=AA=E5=AE=9A=E4=B9=89=20schema?= =?UTF-8?q?=20=E4=B8=8B=E8=A1=A8=E6=9F=A5=E8=AF=A2=E6=8A=A5=20relation=20d?= =?UTF-8?q?oes=20not=20exist=20=E7=9A=84=E9=97=AE=E9=A2=98=20(#215)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- internal/db/kingbase_impl.go | 171 +++++++---------------------------- 1 file changed, 33 insertions(+), 138 deletions(-) diff --git a/internal/db/kingbase_impl.go b/internal/db/kingbase_impl.go index c227506..2ef461b 100644 --- a/internal/db/kingbase_impl.go +++ b/internal/db/kingbase_impl.go @@ -144,164 +144,59 @@ func (k *KingbaseDB) Connect(config connection.ConnectionConfig) error { if idx > 0 { logger.Warnf("人大金仓 SSL 优先连接失败,已回退至明文连接") } - - k.reconnectWithPreferredSearchPathIfNeeded(attempt) + // 连接成功后自动设置 search_path,使用户 SQL 不需要手动带 schema 前缀 + k.initSearchPath() return nil } return fmt.Errorf("连接建立后验证失败:%s", strings.Join(failures, ";")) } -func (k *KingbaseDB) reconnectWithPreferredSearchPathIfNeeded(config connection.ConnectionConfig) { +// initSearchPath 查询当前数据库中所有用户 schema,并设置 search_path 以确保 +// 用户在 SQL 编辑器中不带 schema 前缀也能找到表。 +// KingBase 默认 search_path 为 "$user", public,对于自定义 schema 下的表不可见。 +func (k *KingbaseDB) initSearchPath() { if k.conn == nil { return } - timeout := k.pingTimeout - if timeout <= 0 { - timeout = 5 * time.Second - } - ctx, cancel := utils.ContextWithTimeout(timeout) - defer cancel() + query := `SELECT nspname FROM pg_namespace + WHERE nspname NOT IN ('pg_catalog', 'information_schema') + AND nspname NOT LIKE 'pg_%' + ORDER BY nspname` - var currentSchema string - if err := k.conn.QueryRowContext(ctx, "SELECT current_schema()").Scan(¤tSchema); err != nil { - logger.Warnf("人大金仓读取当前 schema 失败:%v", err) - return - } - - if schema := strings.TrimSpace(currentSchema); schema != "" && !strings.EqualFold(schema, "public") { - return - } - - searchPath, chosenSchema := k.detectPreferredSearchPath(ctx, config) - if strings.TrimSpace(searchPath) == "" { - return - } - - oldConn := k.conn - prevSearchPath := k.defaultSearchPath - k.defaultSearchPath = searchPath - - dsn := k.getDSN(config) - newConn, err := sql.Open("kingbase", dsn) + rows, err := k.conn.Query(query) if err != nil { - k.defaultSearchPath = prevSearchPath - logger.Warnf("人大金仓重连以设置 search_path 失败:%v", err) + logger.Warnf("人大金仓查询用户 schema 失败,跳过 search_path 设置:%v", err) return } - if err := newConn.PingContext(ctx); err != nil { - _ = newConn.Close() - k.defaultSearchPath = prevSearchPath - logger.Warnf("人大金仓重连后验证失败:%v", err) - return - } - - k.conn = newConn - _ = oldConn.Close() - logger.Infof("人大金仓已设置默认 schema:%s", chosenSchema) -} - -func (k *KingbaseDB) kingbaseSchemaExists(ctx context.Context, schema string) (bool, error) { - if schema = strings.TrimSpace(schema); schema == "" { - return false, nil - } - - var one int - err := k.conn.QueryRowContext(ctx, "SELECT 1 FROM pg_namespace WHERE nspname = $1", schema).Scan(&one) - if err == sql.ErrNoRows { - return false, nil - } - if err != nil { - return false, err - } - return true, nil -} - -func (k *KingbaseDB) detectPreferredSearchPath(ctx context.Context, config connection.ConnectionConfig) (searchPath string, chosenSchema string) { - // 1) 优先使用与数据库名/用户名同名的 schema(需要存在) - candidates := []string{ - normalizeKingbaseIdentifier(config.Database), - normalizeKingbaseIdentifier(config.User), - } - - seen := make(map[string]struct{}, len(candidates)) - for _, candidate := range candidates { - if candidate == "" || strings.EqualFold(candidate, "public") { - continue - } - key := strings.ToLower(candidate) - if _, ok := seen[key]; ok { - continue - } - seen[key] = struct{}{} - - exists, err := k.kingbaseSchemaExists(ctx, candidate) - if err != nil { - logger.Warnf("人大金仓检查 schema 是否存在失败:schema=%s err=%v", candidate, err) - continue - } - if !exists { - continue - } - - return fmt.Sprintf("%s,public", quoteKingbaseIdent(candidate)), candidate - } - - // 2) 如果只有一个“用户 schema”含有表,则将其作为默认 schema(更符合 DB GUI 的直觉) - schema, err := k.detectSingleUserSchemaWithTables(ctx) - if err != nil { - logger.Warnf("人大金仓探测默认 schema 失败:%v", err) - return "", "" - } - if schema == "" || strings.EqualFold(schema, "public") { - return "", "" - } - return fmt.Sprintf("%s,public", quoteKingbaseIdent(schema)), schema -} - -func (k *KingbaseDB) detectSingleUserSchemaWithTables(ctx context.Context) (string, error) { - if k.conn == nil { - return "", nil - } - - // 仅在“唯一用户 schema”场景做兜底,避免多 schema 下误选导致对象解析歧义。 - // 注:information_schema.tables 的视图在 PG/金仓语义稳定且权限要求相对低。 - query := ` -SELECT table_schema, COUNT(*) AS table_count -FROM information_schema.tables -WHERE table_type = 'BASE TABLE' - AND table_schema NOT IN ('pg_catalog', 'information_schema', 'public') - AND table_schema NOT LIKE 'pg_%' -GROUP BY table_schema -ORDER BY table_count DESC, table_schema -LIMIT 2` - - rows, err := k.conn.QueryContext(ctx, query) - if err != nil { - return "", err - } defer rows.Close() - type row struct { - schema string - count int64 - } - var results []row + var schemas []string for rows.Next() { - var r row - if scanErr := rows.Scan(&r.schema, &r.count); scanErr != nil { - return "", scanErr + var name string + if err := rows.Scan(&name); err != nil { + continue + } + name = strings.TrimSpace(name) + if name != "" { + // 使用 SQL 标准的双引号包裹标识符,内部双引号需要转义为 "" + escaped := strings.ReplaceAll(name, `"`, `""`) + schemas = append(schemas, `"`+escaped+`"`) } - results = append(results, r) - } - if err := rows.Err(); err != nil { - return "", err } - if len(results) != 1 { - return "", nil + if len(schemas) == 0 { + return } - return normalizeKingbaseIdentifier(results[0].schema), nil + + // 确保 public 在列表中(如果存在的话),构建 search_path + setSQL := fmt.Sprintf("SET search_path TO %s", strings.Join(schemas, ", ")) + if _, err := k.conn.Exec(setSQL); err != nil { + logger.Warnf("人大金仓设置 search_path 失败:%v SQL=%s", err, setSQL) + return + } + + logger.Infof("人大金仓 search_path 已设置:%s", strings.Join(schemas, ", ")) } func (k *KingbaseDB) Close() error { From e6af5f966b39479f924e84b8d4aab2a6b85223a0 Mon Sep 17 00:00:00 2001 From: Syngnat Date: Thu, 12 Mar 2026 16:45:46 +0800 Subject: [PATCH 43/48] =?UTF-8?q?=F0=9F=94=A7=20fix(driver/kingbase,mongod?= =?UTF-8?q?b):=20=E4=BF=AE=E5=A4=8D=E5=A4=96=E7=BD=AE=E9=A9=B1=E5=8A=A8?= =?UTF-8?q?=E4=BA=8B=E5=8A=A1=E5=BC=95=E7=94=A8=E4=B8=8E=E8=BF=9E=E6=8E=A5?= =?UTF-8?q?=E6=B5=8B=E8=AF=95=E9=93=BE=E8=B7=AF=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 金仓外置驱动链路增加表名与变更字段归一化,修复 ApplyChanges 场景下双引号转义异常导致的 SQL 语法错误 - 新增金仓公共标识符工具并复用到 kingbase_impl 与 optional_driver_agent_impl,统一处理多重转义、schema.table 拆分与引用规范 - 金仓代理连接后自动探测并设置 search_path,降低查询时必须手写 schema 前缀的概率 - MongoDB 连接参数改为显式 host/hosts 优先,避免被 URI 中 localhost 覆盖;代理链路保留目标地址不再改写为本地地址 - 连接测试增加前后端超时收敛与日志增强,避免长时间转圈;连接错误文案在未启用 TLS 时移除误导性的“SSL”前缀 - 统一日志级别为 INFO/WARN/ERROR,默认日志目录收敛到 ~/.GoNavi/Logs,并补充驱动构建脚本 build-driver-agents.sh --- build-driver-agents.sh | 228 +++++++++++++++ frontend/src/components/ConnectionModal.tsx | 61 +++- internal/app/app.go | 79 ++++- internal/app/app_connect_error_test.go | 84 ++++++ internal/app/db_proxy.go | 4 +- internal/app/db_proxy_test.go | 64 ++++ internal/app/global_proxy.go | 49 +++- internal/app/methods_db.go | 19 +- internal/app/methods_db_timeout_test.go | 31 ++ internal/app/methods_driver.go | 55 +++- internal/db/kingbase_identifier_utils.go | 164 +++++++++++ internal/db/kingbase_identifier_utils_test.go | 52 ++++ internal/db/kingbase_impl.go | 264 +++++------------ internal/db/kingbase_impl_test.go | 3 + internal/db/mongodb_impl.go | 82 +++++- internal/db/mongodb_impl_uri_test.go | 39 +++ internal/db/mongodb_impl_v1.go | 82 +++++- internal/db/mongodb_impl_v1_uri_test.go | 25 ++ internal/db/optional_driver_agent_impl.go | 276 ++++++++++++++++++ .../db/optional_driver_agent_impl_test.go | 75 +++-- internal/logger/logger.go | 56 ++-- 21 files changed, 1501 insertions(+), 291 deletions(-) create mode 100755 build-driver-agents.sh create mode 100644 internal/app/app_connect_error_test.go create mode 100644 internal/app/db_proxy_test.go create mode 100644 internal/app/methods_db_timeout_test.go create mode 100644 internal/db/kingbase_identifier_utils.go create mode 100644 internal/db/kingbase_identifier_utils_test.go create mode 100644 internal/db/mongodb_impl_uri_test.go create mode 100644 internal/db/mongodb_impl_v1_uri_test.go diff --git a/build-driver-agents.sh b/build-driver-agents.sh new file mode 100755 index 0000000..e3734d2 --- /dev/null +++ b/build-driver-agents.sh @@ -0,0 +1,228 @@ +#!/usr/bin/env bash + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +cd "$SCRIPT_DIR" + +DEFAULT_DRIVERS=(mariadb doris sphinx sqlserver sqlite duckdb dameng kingbase highgo vastbase mongodb tdengine clickhouse) + +usage() { + cat <<'EOF' +用法: + ./build-driver-agents.sh [选项] + +选项: + --drivers <列表> 指定驱动列表(逗号分隔),例如:kingbase,mongodb + --platform + 目标平台,默认使用当前 Go 环境(go env GOOS/GOARCH) + --out-dir <目录> 输出目录根路径,默认:dist/driver-agents + --bundle-name <文件名> 驱动总包 zip 名称,默认:GoNavi-DriverAgents.zip + --strict 任一驱动构建失败即中断(默认失败后继续,最后汇总) + -h, --help 显示帮助 + +示例: + ./build-driver-agents.sh + ./build-driver-agents.sh --drivers kingbase + ./build-driver-agents.sh --platform windows/amd64 --drivers kingbase,mongodb +EOF +} + +normalize_driver() { + local name + name="$(echo "${1:-}" | tr '[:upper:]' '[:lower:]' | xargs)" + case "$name" in + doris|diros) echo "doris" ;; + mariadb|sphinx|sqlserver|sqlite|duckdb|dameng|kingbase|highgo|vastbase|mongodb|tdengine|clickhouse) + echo "$name" + ;; + *) + return 1 + ;; + esac +} + +build_driver_name() { + case "$1" in + doris) echo "diros" ;; + *) echo "$1" ;; + esac +} + +platform_dir_name() { + case "$1" in + windows) echo "Windows" ;; + darwin) echo "MacOS" ;; + linux) echo "Linux" ;; + *) echo "Unknown" ;; + esac +} + +driver_csv="" +target_platform="" +out_root="dist/driver-agents" +bundle_name="GoNavi-DriverAgents.zip" +strict_mode="false" + +while [[ $# -gt 0 ]]; do + case "$1" in + --drivers) + driver_csv="${2:-}" + shift 2 + ;; + --platform) + target_platform="${2:-}" + shift 2 + ;; + --out-dir) + out_root="${2:-}" + shift 2 + ;; + --bundle-name) + bundle_name="${2:-}" + shift 2 + ;; + --strict) + strict_mode="true" + shift + ;; + -h|--help) + usage + exit 0 + ;; + *) + echo "❌ 未知参数:$1" + usage + exit 1 + ;; + esac +done + +if ! command -v go >/dev/null 2>&1; then + echo "❌ 未找到 Go,请先安装 Go 并确保 go 在 PATH 中。" + exit 1 +fi + +if [[ -z "$target_platform" ]]; then + target_platform="$(go env GOOS)/$(go env GOARCH)" +fi + +if [[ "$target_platform" != */* ]]; then + echo "❌ --platform 参数格式错误,应为 GOOS/GOARCH,例如 darwin/arm64" + exit 1 +fi + +goos="${target_platform%%/*}" +goarch="${target_platform##*/}" +platform_key="${goos}-${goarch}" +platform_dir="$(platform_dir_name "$goos")" + +declare -a drivers=() +if [[ -n "$driver_csv" ]]; then + IFS=',' read -r -a raw_drivers <<<"$driver_csv" + for item in "${raw_drivers[@]}"; do + normalized="$(normalize_driver "$item")" || { + echo "❌ 不支持的驱动:$item" + exit 1 + } + drivers+=("$normalized") + done +else + drivers=("${DEFAULT_DRIVERS[@]}") +fi + +output_dir="${out_root%/}/${platform_key}" +bundle_stage_dir="$(mktemp -d "${TMPDIR:-/tmp}/gonavi-driver-bundle.XXXXXX")" +bundle_platform_dir="$bundle_stage_dir/$platform_dir" + +cleanup() { + rm -rf "$bundle_stage_dir" +} +trap cleanup EXIT + +mkdir -p "$output_dir" "$bundle_platform_dir" +output_dir_abs="$(cd "$output_dir" && pwd)" +bundle_zip_path="$output_dir_abs/$bundle_name" + +declare -a built_assets=() +declare -a failed_drivers=() +declare -a skipped_drivers=() + +echo "🚀 开始构建 optional-driver-agent" +echo " 平台:$goos/$goarch" +echo " 输出目录:$output_dir_abs" +echo " 驱动列表:${drivers[*]}" + +for driver in "${drivers[@]}"; do + if [[ "$driver" == "duckdb" && "$goos" == "windows" && "$goarch" != "amd64" ]]; then + echo "⚠️ 跳过 duckdb(仅支持 windows/amd64)" + skipped_drivers+=("$driver") + continue + fi + + build_driver="$(build_driver_name "$driver")" + tag="gonavi_${build_driver}_driver" + asset_name="${driver}-driver-agent-${goos}-${goarch}" + if [[ "$goos" == "windows" ]]; then + asset_name="${asset_name}.exe" + fi + output_path="$output_dir_abs/$asset_name" + + cgo_enabled=0 + if [[ "$driver" == "duckdb" ]]; then + cgo_enabled=1 + fi + + echo "🔧 构建 $driver -> $asset_name (tag=$tag, CGO_ENABLED=$cgo_enabled)" + set +e + CGO_ENABLED="$cgo_enabled" GOOS="$goos" GOARCH="$goarch" GOTOOLCHAIN=auto \ + go build -tags "$tag" -trimpath -ldflags "-s -w" -o "$output_path" ./cmd/optional-driver-agent + build_exit=$? + set -e + + if [[ $build_exit -ne 0 ]]; then + echo "❌ 构建失败:$driver" + failed_drivers+=("$driver") + if [[ "$strict_mode" == "true" ]]; then + exit $build_exit + fi + continue + fi + + cp "$output_path" "$bundle_platform_dir/$asset_name" + built_assets+=("$asset_name") +done + +if [[ ${#built_assets[@]} -eq 0 ]]; then + echo "❌ 未成功构建任何驱动代理。" + exit 1 +fi + +rm -f "$bundle_zip_path" +if command -v zip >/dev/null 2>&1; then + ( + cd "$bundle_stage_dir" + zip -qry "$bundle_zip_path" "$platform_dir" + ) +elif command -v ditto >/dev/null 2>&1; then + ( + cd "$bundle_stage_dir" + ditto -c -k --sequesterRsrc --keepParent "$platform_dir" "$bundle_zip_path" + ) +else + echo "❌ 未找到 zip/ditto,无法生成驱动总包 zip。" + exit 1 +fi + +echo "" +echo "✅ 构建完成" +echo " 单文件输出目录:$output_dir_abs" +echo " 驱动总包:$bundle_zip_path" +echo " 已构建:${built_assets[*]}" +if [[ ${#skipped_drivers[@]} -gt 0 ]]; then + echo " 已跳过:${skipped_drivers[*]}" +fi +if [[ ${#failed_drivers[@]} -gt 0 ]]; then + echo "⚠️ 构建失败驱动:${failed_drivers[*]}" + exit 2 +fi diff --git a/frontend/src/components/ConnectionModal.tsx b/frontend/src/components/ConnectionModal.tsx index ce874a9..1f9d9b5 100644 --- a/frontend/src/components/ConnectionModal.tsx +++ b/frontend/src/components/ConnectionModal.tsx @@ -1049,6 +1049,12 @@ const ConnectionModal: React.FC<{ useEffect(() => { if (open) { + setLoading(false); + testInFlightRef.current = false; + if (testTimerRef.current !== null) { + window.clearTimeout(testTimerRef.current); + testTimerRef.current = null; + } setTestResult(null); // Reset test result setTestErrorLogOpen(false); setDbList([]); @@ -1240,6 +1246,22 @@ const ConnectionModal: React.FC<{ }, 0); }; + const withClientTimeout = async (promise: Promise, timeoutMs: number, timeoutMessage: string): Promise => { + let timer: number | null = null; + try { + return await Promise.race([ + promise, + new Promise((_, reject) => { + timer = window.setTimeout(() => reject(new Error(timeoutMessage)), timeoutMs); + }), + ]); + } finally { + if (timer !== null) { + window.clearTimeout(timer); + } + } + }; + const buildTestFailureMessage = (reason: unknown, fallback: string) => { const text = String(reason ?? '').trim(); const normalized = text && text !== 'undefined' && text !== 'null' ? text : fallback; @@ -1262,12 +1284,21 @@ const ConnectionModal: React.FC<{ setLoading(true); setTestResult(null); const config = await buildConfig(values, false); + const timeoutSecondsRaw = Number(values.timeout); + const timeoutSeconds = Number.isFinite(timeoutSecondsRaw) && timeoutSecondsRaw > 0 + ? Math.min(timeoutSecondsRaw, MAX_TIMEOUT_SECONDS) + : 30; + const rpcTimeoutMs = (timeoutSeconds + 5) * 1000; // Use different API for Redis const isRedisType = values.type === 'redis'; - const res = isRedisType - ? await RedisConnect(config as any) - : await TestConnection(config as any); + const res = await withClientTimeout( + isRedisType + ? RedisConnect(config as any) + : TestConnection(config as any), + rpcTimeoutMs, + `连接测试超时(>${timeoutSeconds} 秒),请检查网络/代理/SSH配置后重试` + ); if (res.success) { setTestResult({ type: 'success', message: res.message }); @@ -1275,7 +1306,11 @@ const ConnectionModal: React.FC<{ setRedisDbList(Array.from({ length: 16 }, (_, i) => i)); } else { // Other databases: fetch database list - const dbRes = await DBGetDatabases(config as any); + const dbRes = await withClientTimeout( + DBGetDatabases(config as any), + rpcTimeoutMs, + `连接成功但拉取数据库列表超时(>${timeoutSeconds} 秒)` + ); if (dbRes.success) { const dbRows = Array.isArray(dbRes.data) ? dbRes.data : []; const dbs = dbRows @@ -1572,12 +1607,13 @@ const ConnectionModal: React.FC<{ }; }; - const handleTypeSelect = async (type: string) => { - const unavailableReason = await resolveDriverUnavailableReason(type); - if (unavailableReason) { - const normalized = normalizeDriverType(type); - const driverName = driverStatusMap[normalized]?.name || type; - setTypeSelectWarning({ driverName, reason: unavailableReason }); + const handleTypeSelect = (type: string) => { + const normalized = normalizeDriverType(type); + const snapshot = driverStatusMap[normalized]; + if (snapshot && !snapshot.connectable) { + const driverName = snapshot.name || type; + const reason = snapshot.message || `${driverName} 驱动未安装启用,请先在驱动管理中安装`; + setTypeSelectWarning({ driverName, reason }); return; } setTypeSelectWarning(null); @@ -1679,6 +1715,10 @@ const ConnectionModal: React.FC<{ setMongoMembers([]); setStep(2); + + if (!driverStatusLoaded || !snapshot) { + void refreshDriverStatus(); + } }; const isFileDb = isFileDatabaseType(dbType); @@ -1851,7 +1891,6 @@ const ConnectionModal: React.FC<{ > {isFileDb ? ( diff --git a/internal/app/app.go b/internal/app/app.go index 0709a27..4a0aff9 100644 --- a/internal/app/app.go +++ b/internal/app/app.go @@ -8,6 +8,8 @@ import ( "errors" "fmt" "net" + "net/url" + "os" "strings" "sync" "time" @@ -218,6 +220,7 @@ func wrapConnectError(config connection.ConnectionConfig, err error) error { if err == nil { return nil } + err = sanitizeMongoConnectErrorLabel(config, err) var netErr net.Error if errors.Is(err, context.DeadlineExceeded) || (errors.As(err, &netErr) && netErr.Timeout()) { @@ -231,6 +234,73 @@ func wrapConnectError(config connection.ConnectionConfig, err error) error { return withLogHint{err: err, logPath: logger.Path()} } +type errorMessageOverride struct { + message string + cause error +} + +func (e errorMessageOverride) Error() string { + return e.message +} + +func (e errorMessageOverride) Unwrap() error { + return e.cause +} + +func sanitizeMongoConnectErrorLabel(config connection.ConnectionConfig, err error) error { + if err == nil { + return nil + } + if strings.ToLower(strings.TrimSpace(config.Type)) != "mongodb" { + return err + } + if mongoConnectUsesTLS(config) { + return err + } + original := err.Error() + rewritten := strings.ReplaceAll(original, "SSL 主库凭据", "主库凭据") + rewritten = strings.ReplaceAll(rewritten, "SSL 从库凭据", "从库凭据") + if rewritten == original { + return err + } + return errorMessageOverride{ + message: rewritten, + cause: err, + } +} + +func mongoConnectUsesTLS(config connection.ConnectionConfig) bool { + if config.UseSSL { + return true + } + uriText := strings.TrimSpace(config.URI) + if uriText == "" { + return false + } + parsed, err := url.Parse(uriText) + if err != nil { + return false + } + for _, key := range []string{"tls", "ssl"} { + if enabled, known := parseMongoBool(parsed.Query().Get(key)); known { + return enabled + } + } + return strings.EqualFold(strings.TrimSpace(parsed.Scheme), "mongodb+srv") +} + +func parseMongoBool(raw string) (enabled bool, known bool) { + value := strings.ToLower(strings.TrimSpace(raw)) + switch value { + case "1", "true", "t", "yes", "y", "on", "required": + return true, true + case "0", "false", "f", "no", "n", "off", "disable", "disabled": + return false, true + default: + return false, false + } +} + type withLogHint struct { err error logPath string @@ -238,10 +308,15 @@ type withLogHint struct { func (e withLogHint) Error() string { message := normalizeErrorMessage(e.err) - if strings.TrimSpace(e.logPath) == "" { + path := strings.TrimSpace(e.logPath) + if path == "" { return message } - return fmt.Sprintf("%s(详细日志:%s)", message, e.logPath) + info, statErr := os.Stat(path) + if statErr != nil || info.IsDir() || info.Size() <= 0 { + return message + } + return fmt.Sprintf("%s(详细日志:%s)", message, path) } func (e withLogHint) Unwrap() error { diff --git a/internal/app/app_connect_error_test.go b/internal/app/app_connect_error_test.go new file mode 100644 index 0000000..36bb99e --- /dev/null +++ b/internal/app/app_connect_error_test.go @@ -0,0 +1,84 @@ +package app + +import ( + "errors" + "os" + "path/filepath" + "strings" + "testing" + + "GoNavi-Wails/internal/connection" +) + +func TestWrapConnectError_MongoNoSSL_RemovesMisleadingSSLLabel(t *testing.T) { + config := connection.ConnectionConfig{ + Type: "mongodb", + UseSSL: false, + } + sourceErr := errors.New("MongoDB 连接失败:SSL 主库凭据验证失败: mock error") + + wrapped := wrapConnectError(config, sourceErr) + text := wrapped.Error() + if strings.Contains(text, "SSL 主库凭据") { + t.Fatalf("expected ssl label to be removed when TLS disabled, got: %s", text) + } + if !strings.Contains(text, "主库凭据验证失败") { + t.Fatalf("expected auth label to remain, got: %s", text) + } +} + +func TestWrapConnectError_MongoURIForcesTLS_KeepsSSLLabel(t *testing.T) { + config := connection.ConnectionConfig{ + Type: "mongodb", + UseSSL: false, + URI: "mongodb://user:pass@127.0.0.1:27017/admin?tls=true", + } + sourceErr := errors.New("MongoDB 连接失败:SSL 主库凭据验证失败: mock error") + + wrapped := wrapConnectError(config, sourceErr) + text := wrapped.Error() + if !strings.Contains(text, "SSL 主库凭据") { + t.Fatalf("expected ssl label to remain when URI enables TLS, got: %s", text) + } +} + +func TestWrapConnectError_MongoSRVDefaultTLS_KeepsSSLLabel(t *testing.T) { + config := connection.ConnectionConfig{ + Type: "mongodb", + UseSSL: false, + URI: "mongodb+srv://user:pass@cluster0.example.com/admin", + } + sourceErr := errors.New("MongoDB 连接失败:SSL 主库凭据验证失败: mock error") + + wrapped := wrapConnectError(config, sourceErr) + text := wrapped.Error() + if !strings.Contains(text, "SSL 主库凭据") { + t.Fatalf("expected ssl label to remain for mongodb+srv default TLS, got: %s", text) + } +} + +func TestWithLogHintError_OmitEmptyLogPath(t *testing.T) { + dir := t.TempDir() + logPath := filepath.Join(dir, "gonavi.log") + if err := os.WriteFile(logPath, nil, 0o644); err != nil { + t.Fatalf("write empty log failed: %v", err) + } + err := withLogHint{err: errors.New("连接失败"), logPath: logPath} + text := err.Error() + if strings.Contains(text, "详细日志:") { + t.Fatalf("expected no log hint for empty file, got: %s", text) + } +} + +func TestWithLogHintError_IncludeNonEmptyLogPath(t *testing.T) { + dir := t.TempDir() + logPath := filepath.Join(dir, "gonavi.log") + if err := os.WriteFile(logPath, []byte("log entry\n"), 0o644); err != nil { + t.Fatalf("write log failed: %v", err) + } + err := withLogHint{err: errors.New("连接失败"), logPath: logPath} + text := err.Error() + if !strings.Contains(text, "详细日志:"+logPath) { + t.Fatalf("expected log hint with path, got: %s", text) + } +} diff --git a/internal/app/db_proxy.go b/internal/app/db_proxy.go index e3228b6..14af069 100644 --- a/internal/app/db_proxy.go +++ b/internal/app/db_proxy.go @@ -73,8 +73,8 @@ func resolveDialConfigWithProxy(raw connection.ConnectionConfig) (connection.Con // 文件型/自定义 DSN 类型不走标准 host:port,不在此层改写。 return config, nil } - if normalizedType == "mongodb" && config.MongoSRV { - // Mongo SRV 由驱动侧 Dialer 处理代理,避免破坏 DNS SRV 拓扑发现。 + if normalizedType == "mongodb" { + // MongoDB 统一由驱动侧 Dialer 处理代理,保留原始目标地址,避免将连接目标改写为本地转发地址。 return config, nil } diff --git a/internal/app/db_proxy_test.go b/internal/app/db_proxy_test.go new file mode 100644 index 0000000..5d44170 --- /dev/null +++ b/internal/app/db_proxy_test.go @@ -0,0 +1,64 @@ +package app + +import ( + "reflect" + "testing" + + "GoNavi-Wails/internal/connection" +) + +func TestResolveDialConfigWithProxy_MongoKeepsTargetAddress(t *testing.T) { + hosts := []string{"10.20.30.40:27017", "10.20.30.41:27017"} + raw := connection.ConnectionConfig{ + Type: "mongodb", + Host: "10.20.30.40", + Port: 27017, + UseProxy: true, + Proxy: connection.ProxyConfig{ + Type: "socks5", + Host: "127.0.0.1", + Port: 1080, + }, + Hosts: hosts, + } + + got, err := resolveDialConfigWithProxy(raw) + if err != nil { + t.Fatalf("resolveDialConfigWithProxy returned error: %v", err) + } + if got.Host != raw.Host || got.Port != raw.Port { + t.Fatalf("mongo target address should be kept, got=%s:%d want=%s:%d", got.Host, got.Port, raw.Host, raw.Port) + } + if !got.UseProxy { + t.Fatalf("mongo should keep UseProxy=true for driver-level dialer") + } + if !reflect.DeepEqual(got.Hosts, hosts) { + t.Fatalf("mongo hosts should be kept, got=%v want=%v", got.Hosts, hosts) + } +} + +func TestResolveDialConfigWithProxy_MongoSRVKeepsTargetAddress(t *testing.T) { + raw := connection.ConnectionConfig{ + Type: "mongodb", + Host: "cluster0.example.com", + Port: 27017, + MongoSRV: true, + UseProxy: true, + Proxy: connection.ProxyConfig{ + Type: "http", + Host: "127.0.0.1", + Port: 7890, + }, + } + + got, err := resolveDialConfigWithProxy(raw) + if err != nil { + t.Fatalf("resolveDialConfigWithProxy returned error: %v", err) + } + if got.Host != raw.Host || got.Port != raw.Port { + t.Fatalf("mongo SRV target address should be kept, got=%s:%d want=%s:%d", got.Host, got.Port, raw.Host, raw.Port) + } + if !got.UseProxy { + t.Fatalf("mongo SRV should keep UseProxy=true for driver-level dialer") + } +} diff --git a/internal/app/global_proxy.go b/internal/app/global_proxy.go index 4361782..016fb26 100644 --- a/internal/app/global_proxy.go +++ b/internal/app/global_proxy.go @@ -72,25 +72,30 @@ func setGlobalProxyConfig(enabled bool, proxyConfig connection.ProxyConfig) (glo } func (a *App) ConfigureGlobalProxy(enabled bool, proxyConfig connection.ProxyConfig) connection.QueryResult { + before := currentGlobalProxyConfig() snapshot, err := setGlobalProxyConfig(enabled, proxyConfig) if err != nil { return connection.QueryResult{Success: false, Message: err.Error()} } - if snapshot.Enabled { - authState := "" - if strings.TrimSpace(snapshot.Proxy.User) != "" { - authState = "(认证:已配置)" + // 前端可能在同一配置下重复触发同步(例如严格模式或状态回放), + // 这里做幂等日志,避免重复刷屏。 + if !globalProxySnapshotEqual(before, snapshot) { + if snapshot.Enabled { + authState := "" + if strings.TrimSpace(snapshot.Proxy.User) != "" { + authState = "(认证:已配置)" + } + logger.Infof( + "全局代理已启用:%s://%s:%d%s", + strings.ToLower(strings.TrimSpace(snapshot.Proxy.Type)), + strings.TrimSpace(snapshot.Proxy.Host), + snapshot.Proxy.Port, + authState, + ) + } else { + logger.Infof("全局代理已关闭") } - logger.Infof( - "全局代理已启用:%s://%s:%d%s", - strings.ToLower(strings.TrimSpace(snapshot.Proxy.Type)), - strings.TrimSpace(snapshot.Proxy.Host), - snapshot.Proxy.Port, - authState, - ) - } else { - logger.Infof("全局代理已关闭") } return connection.QueryResult{ @@ -100,6 +105,24 @@ func (a *App) ConfigureGlobalProxy(enabled bool, proxyConfig connection.ProxyCon } } +func globalProxySnapshotEqual(a, b globalProxySnapshot) bool { + if a.Enabled != b.Enabled { + return false + } + if !a.Enabled { + return true + } + return proxyConfigEqual(a.Proxy, b.Proxy) +} + +func proxyConfigEqual(a, b connection.ProxyConfig) bool { + return strings.EqualFold(strings.TrimSpace(a.Type), strings.TrimSpace(b.Type)) && + strings.TrimSpace(a.Host) == strings.TrimSpace(b.Host) && + a.Port == b.Port && + strings.TrimSpace(a.User) == strings.TrimSpace(b.User) && + a.Password == b.Password +} + func (a *App) GetGlobalProxyConfig() connection.QueryResult { return connection.QueryResult{ Success: true, diff --git a/internal/app/methods_db.go b/internal/app/methods_db.go index b28109f..f411653 100644 --- a/internal/app/methods_db.go +++ b/internal/app/methods_db.go @@ -13,6 +13,16 @@ import ( "GoNavi-Wails/internal/utils" ) +const testConnectionTimeoutUpperBoundSeconds = 12 + +func normalizeTestConnectionConfig(config connection.ConnectionConfig) connection.ConnectionConfig { + normalized := config + if normalized.Timeout <= 0 || normalized.Timeout > testConnectionTimeoutUpperBoundSeconds { + normalized.Timeout = testConnectionTimeoutUpperBoundSeconds + } + return normalized +} + // Generic DB Methods func (a *App) DBConnect(config connection.ConnectionConfig) connection.QueryResult { @@ -28,13 +38,16 @@ func (a *App) DBConnect(config connection.ConnectionConfig) connection.QueryResu } func (a *App) TestConnection(config connection.ConnectionConfig) connection.QueryResult { - _, err := a.getDatabaseForcePing(config) + testConfig := normalizeTestConnectionConfig(config) + started := time.Now() + logger.Infof("TestConnection 开始:%s", formatConnSummary(testConfig)) + _, err := a.getDatabaseForcePing(testConfig) if err != nil { - logger.Error(err, "TestConnection 连接测试失败:%s", formatConnSummary(config)) + logger.Error(err, "TestConnection 连接测试失败:耗时=%s %s", time.Since(started).Round(time.Millisecond), formatConnSummary(testConfig)) return connection.QueryResult{Success: false, Message: err.Error()} } - logger.Infof("TestConnection 连接测试成功:%s", formatConnSummary(config)) + logger.Infof("TestConnection 连接测试成功:耗时=%s %s", time.Since(started).Round(time.Millisecond), formatConnSummary(testConfig)) return connection.QueryResult{Success: true, Message: "连接成功"} } diff --git a/internal/app/methods_db_timeout_test.go b/internal/app/methods_db_timeout_test.go new file mode 100644 index 0000000..d6cf867 --- /dev/null +++ b/internal/app/methods_db_timeout_test.go @@ -0,0 +1,31 @@ +package app + +import ( + "testing" + + "GoNavi-Wails/internal/connection" +) + +func TestNormalizeTestConnectionConfig_DefaultToUpperBound(t *testing.T) { + config := connection.ConnectionConfig{Type: "mongodb", Timeout: 0} + got := normalizeTestConnectionConfig(config) + if got.Timeout != testConnectionTimeoutUpperBoundSeconds { + t.Fatalf("expected timeout=%d, got=%d", testConnectionTimeoutUpperBoundSeconds, got.Timeout) + } +} + +func TestNormalizeTestConnectionConfig_KeepSmallerTimeout(t *testing.T) { + config := connection.ConnectionConfig{Type: "mongodb", Timeout: 6} + got := normalizeTestConnectionConfig(config) + if got.Timeout != 6 { + t.Fatalf("expected timeout=6, got=%d", got.Timeout) + } +} + +func TestNormalizeTestConnectionConfig_ClampLargeTimeout(t *testing.T) { + config := connection.ConnectionConfig{Type: "mongodb", Timeout: 60} + got := normalizeTestConnectionConfig(config) + if got.Timeout != testConnectionTimeoutUpperBoundSeconds { + t.Fatalf("expected timeout=%d, got=%d", testConnectionTimeoutUpperBoundSeconds, got.Timeout) + } +} diff --git a/internal/app/methods_driver.go b/internal/app/methods_driver.go index 07a13cc..ca7ce8c 100644 --- a/internal/app/methods_driver.go +++ b/internal/app/methods_driver.go @@ -2792,6 +2792,7 @@ func ensureOptionalDriverAgentBinary(a *App, definition driverDefinition, execut driverType := normalizeDriverType(definition.Type) displayName := resolveDriverDisplayName(definition) forceSourceBuild := shouldForceSourceBuildForVersion(driverType, selectedVersion) + preferSourceBuildBeforeDownload := shouldPreferSourceBuildBeforeDownload(driverType, selectedVersion) skipReuseCandidate := shouldSkipReusableAgentCandidate(driverType, selectedVersion) info, err := os.Stat(executablePath) @@ -2799,11 +2800,10 @@ func ensureOptionalDriverAgentBinary(a *App, definition driverDefinition, execut if validateErr := db.ValidateOptionalDriverAgentExecutable(driverType, executablePath); validateErr != nil { _ = os.Remove(executablePath) } else { - hash, hashErr := hashFileSHA256(executablePath) - if hashErr != nil { - return "", "", fmt.Errorf("读取已安装 %s 驱动代理摘要失败:%w", displayName, hashErr) + // 用户点击“安装/重装”时应强制刷新驱动代理,避免沿用旧二进制导致修复不生效。 + if removeErr := os.Remove(executablePath); removeErr != nil { + return "", "", fmt.Errorf("清理已安装 %s 驱动代理失败:%w", displayName, removeErr) } - return fmt.Sprintf("local://existing/%s-driver-agent", driverType), hash, nil } } if err == nil && info.IsDir() { @@ -2834,6 +2834,22 @@ func ensureOptionalDriverAgentBinary(a *App, definition driverDefinition, execut } var downloadErrs []string + var sourceBuildAttempted bool + var sourceBuildErr error + + if !forceSourceBuild && preferSourceBuildBeforeDownload { + sourceBuildAttempted = true + if a != nil { + a.emitDriverDownloadProgress(driverType, "downloading", 16, 100, fmt.Sprintf("优先使用本地源码构建 %s 驱动代理", displayName)) + } + hash, buildErr := buildOptionalDriverAgentFromSource(definition, executablePath, selectedVersion) + if buildErr == nil { + return fmt.Sprintf("local://go-build/%s-driver-agent", driverType), hash, nil + } + sourceBuildErr = buildErr + logger.Warnf("预先本地构建 %s 驱动代理失败,将继续尝试下载预编译包:%v", displayName, buildErr) + } + if !forceSourceBuild { downloadURLs := resolveOptionalDriverAgentDownloadURLs(definition, downloadURL) if len(downloadURLs) > 0 { @@ -2866,9 +2882,15 @@ func ensureOptionalDriverAgentBinary(a *App, definition driverDefinition, execut a.emitDriverDownloadProgress(driverType, "downloading", 92, 100, "未命中预编译包,尝试开发态本地构建") } - hash, buildErr := buildOptionalDriverAgentFromSource(definition, executablePath, selectedVersion) - if buildErr == nil { - return fmt.Sprintf("local://go-build/%s-driver-agent", driverType), hash, nil + var buildErr error + if sourceBuildAttempted { + buildErr = sourceBuildErr + } else { + hash, runErr := buildOptionalDriverAgentFromSource(definition, executablePath, selectedVersion) + buildErr = runErr + if buildErr == nil { + return fmt.Sprintf("local://go-build/%s-driver-agent", driverType), hash, nil + } } var parts []string @@ -3086,12 +3108,25 @@ func shouldForceSourceBuildForVersion(driverType string, selectedVersion string) return resolveMongoDriverMajorFromVersion(selectedVersion) == 1 } -func shouldSkipReusableAgentCandidate(driverType string, selectedVersion string) bool { - if normalizeDriverType(driverType) != "mongodb" { +func shouldPreferSourceBuildBeforeDownload(driverType string, selectedVersion string) bool { + _ = selectedVersion + switch normalizeDriverType(driverType) { + case "kingbase": + // 金仓迭代期优先本地源码构建,避免下载到旧版本预编译代理导致修复不生效。 + return true + default: return false } +} + +func shouldSkipReusableAgentCandidate(driverType string, selectedVersion string) bool { _ = selectedVersion - return true + switch normalizeDriverType(driverType) { + case "mongodb", "kingbase": + return true + default: + return false + } } func optionalDriverBuildTag(driverType string, selectedVersion string) (string, error) { diff --git a/internal/db/kingbase_identifier_utils.go b/internal/db/kingbase_identifier_utils.go new file mode 100644 index 0000000..f3412ac --- /dev/null +++ b/internal/db/kingbase_identifier_utils.go @@ -0,0 +1,164 @@ +package db + +import "strings" + +func normalizeKingbaseIdentCommon(raw string) string { + value := strings.TrimSpace(raw) + if value == "" { + return "" + } + + // 兼容被多次 JSON 序列化后的转义引号: + // \\\"schema\\\" -> \"schema\" -> "schema" + for i := 0; i < 8; i++ { + next := strings.TrimSpace(value) + next = strings.ReplaceAll(next, `\\\"`, `\"`) + next = strings.ReplaceAll(next, `\"`, `"`) + if next == value { + break + } + value = next + } + value = strings.TrimSpace(value) + + stripWrapperOnce := func(text string) string { + t := strings.TrimSpace(text) + if strings.HasPrefix(t, `\`) && len(t) > 1 { + t = strings.TrimSpace(strings.TrimPrefix(t, `\`)) + } + if strings.HasSuffix(t, `\`) && len(t) > 1 { + t = strings.TrimSpace(strings.TrimSuffix(t, `\`)) + } + if len(t) >= 4 && strings.HasPrefix(t, `\"`) && strings.HasSuffix(t, `\"`) { + return strings.TrimSpace(t[2 : len(t)-2]) + } + if len(t) >= 2 && strings.HasPrefix(t, `"`) && strings.HasSuffix(t, `"`) { + return strings.TrimSpace(t[1 : len(t)-1]) + } + if len(t) >= 2 && strings.HasPrefix(t, "`") && strings.HasSuffix(t, "`") { + return strings.TrimSpace(t[1 : len(t)-1]) + } + if len(t) >= 2 && strings.HasPrefix(t, "[") && strings.HasSuffix(t, "]") { + return strings.TrimSpace(t[1 : len(t)-1]) + } + return t + } + + for i := 0; i < 8; i++ { + next := stripWrapperOnce(value) + if next == value { + break + } + value = next + } + value = strings.TrimSpace(value) + + // 兼容错误的二次引用与残留反斜杠。 + value = strings.ReplaceAll(value, `\"`, `"`) + value = strings.ReplaceAll(value, `""`, "") + value = strings.TrimSpace(value) + + for i := 0; i < 8; i++ { + next := strings.TrimSpace(value) + changed := false + if strings.HasPrefix(next, `\`) && len(next) > 1 { + next = strings.TrimSpace(strings.TrimPrefix(next, `\`)) + changed = true + } + if strings.HasSuffix(next, `\`) && len(next) > 1 { + next = strings.TrimSpace(strings.TrimSuffix(next, `\`)) + changed = true + } + if !changed || next == value { + break + } + value = next + } + + return strings.TrimSpace(value) +} + +func splitKingbaseQualifiedNameCommon(raw string) (schema string, table string) { + text := strings.TrimSpace(raw) + if text == "" { + return "", "" + } + + sep := findKingbaseQualifiedSeparator(text) + if sep < 0 { + return "", normalizeKingbaseIdentCommon(text) + } + + schemaPart := normalizeKingbaseIdentCommon(text[:sep]) + tablePart := normalizeKingbaseIdentCommon(text[sep+1:]) + + if tablePart == "" { + if schemaPart == "" { + return "", normalizeKingbaseIdentCommon(text) + } + return "", schemaPart + } + if schemaPart == "" { + return "", tablePart + } + return schemaPart, tablePart +} + +func findKingbaseQualifiedSeparator(raw string) int { + inDouble := false + inBacktick := false + inBracket := false + escaped := false + + for i := 0; i < len(raw); i++ { + ch := raw[i] + if escaped { + escaped = false + continue + } + + if ch == '\\' { + escaped = true + continue + } + + if inDouble { + if ch == '"' { + // SQL 双引号转义:"" 代表字面量 " + if i+1 < len(raw) && raw[i+1] == '"' { + i++ + continue + } + inDouble = false + } + continue + } + + if inBacktick { + if ch == '`' { + inBacktick = false + } + continue + } + + if inBracket { + if ch == ']' { + inBracket = false + } + continue + } + + switch ch { + case '"': + inDouble = true + case '`': + inBacktick = true + case '[': + inBracket = true + case '.': + return i + } + } + + return -1 +} diff --git a/internal/db/kingbase_identifier_utils_test.go b/internal/db/kingbase_identifier_utils_test.go new file mode 100644 index 0000000..69e2b2e --- /dev/null +++ b/internal/db/kingbase_identifier_utils_test.go @@ -0,0 +1,52 @@ +package db + +import "testing" + +func TestNormalizeKingbaseIdentCommon(t *testing.T) { + tests := []struct { + name string + in string + want string + }{ + {name: "plain", in: "ldf_server", want: "ldf_server"}, + {name: "quoted", in: `"ldf_server"`, want: "ldf_server"}, + {name: "escaped quoted", in: `\"ldf_server\"`, want: "ldf_server"}, + {name: "double escaped quoted", in: `\\\"ldf_server\\\"`, want: "ldf_server"}, + {name: "double quoted", in: `""ldf_server""`, want: "ldf_server"}, + {name: "backtick quoted", in: "`ldf_server`", want: "ldf_server"}, + {name: "bracket quoted", in: "[ldf_server]", want: "ldf_server"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := normalizeKingbaseIdentCommon(tt.in); got != tt.want { + t.Fatalf("normalizeKingbaseIdentCommon(%q)=%q,want=%q", tt.in, got, tt.want) + } + }) + } +} + +func TestSplitKingbaseQualifiedNameCommon(t *testing.T) { + tests := []struct { + name string + in string + wantSchema string + wantTable string + }{ + {name: "plain", in: "ldf_server.andon_events", wantSchema: "ldf_server", wantTable: "andon_events"}, + {name: "quoted", in: `"ldf_server"."andon_events"`, wantSchema: "ldf_server", wantTable: "andon_events"}, + {name: "escaped quoted", in: `\"ldf_server\".\"andon_events\"`, wantSchema: "ldf_server", wantTable: "andon_events"}, + {name: "double escaped quoted", in: `\\\"ldf_server\\\".\\\"andon_events\\\"`, wantSchema: "ldf_server", wantTable: "andon_events"}, + {name: "space around dot", in: ` "ldf_server" . "andon_events" `, wantSchema: "ldf_server", wantTable: "andon_events"}, + {name: "table only", in: "andon_events", wantSchema: "", wantTable: "andon_events"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotSchema, gotTable := splitKingbaseQualifiedNameCommon(tt.in) + if gotSchema != tt.wantSchema || gotTable != tt.wantTable { + t.Fatalf("splitKingbaseQualifiedNameCommon(%q)=(%q,%q),want=(%q,%q)", tt.in, gotSchema, gotTable, tt.wantSchema, tt.wantTable) + } + }) + } +} diff --git a/internal/db/kingbase_impl.go b/internal/db/kingbase_impl.go index c227506..d4eda20 100644 --- a/internal/db/kingbase_impl.go +++ b/internal/db/kingbase_impl.go @@ -21,10 +21,9 @@ import ( ) type KingbaseDB struct { - conn *sql.DB - pingTimeout time.Duration - defaultSearchPath string - forwarder *ssh.LocalForwarder // Store SSH tunnel forwarder + conn *sql.DB + pingTimeout time.Duration + forwarder *ssh.LocalForwarder // Store SSH tunnel forwarder } func quoteConnValue(v string) string { @@ -76,9 +75,6 @@ func (k *KingbaseDB) getDSN(config connection.ConnectionConfig) string { quoteConnValue(resolvePostgresSSLMode(config)), getConnectTimeoutSeconds(config), ) - if strings.TrimSpace(k.defaultSearchPath) != "" { - dsn += fmt.Sprintf(" search_path=%s", quoteConnValue(k.defaultSearchPath)) - } return dsn } @@ -124,9 +120,6 @@ func (k *KingbaseDB) Connect(config connection.ConnectionConfig) error { var failures []string for idx, attempt := range attempts { - // 避免跨连接缓存 defaultSearchPath 造成的污染:每次 Connect 都重新探测一次。 - k.defaultSearchPath = "" - dsn := k.getDSN(attempt) db, err := sql.Open("kingbase", dsn) if err != nil { @@ -145,163 +138,85 @@ func (k *KingbaseDB) Connect(config connection.ConnectionConfig) error { logger.Warnf("人大金仓 SSL 优先连接失败,已回退至明文连接") } - k.reconnectWithPreferredSearchPathIfNeeded(attempt) + // 获取 schema 列表以重构带有 search_path 的连接池 + searchPathStr := k.getSearchPathStr() + if searchPathStr != "" { + // 将 search_path 参数拼入 DSN + finalDSN := dsn + " search_path=" + quoteConnValue(searchPathStr) + if finalDB, err := sql.Open("kingbase", finalDSN); err == nil { + k.pingTimeout = getConnectTimeout(attempt) + finalDB.SetConnMaxLifetime(5 * time.Minute) + + // 临时将 k.conn 指向 finalDB 来做 ping 测试 + oldConn := k.conn + k.conn = finalDB + if err := k.Ping(); err == nil { + // 成功使用带 search_path 的连接池 + _ = oldConn.Close() + logger.Infof("人大金仓已配置连接级 search_path:%s", searchPathStr) + } else { + _ = finalDB.Close() + k.conn = oldConn + } + } + } + if searchPathStr != "" { + timeout := k.pingTimeout + if timeout <= 0 { + timeout = 5 * time.Second + } + ctx, cancel := utils.ContextWithTimeout(timeout) + defer cancel() + if _, err := k.conn.ExecContext(ctx, fmt.Sprintf("SET search_path TO %s", searchPathStr)); err != nil { + logger.Warnf("人大金仓显式设置 search_path 失败:%v", err) + } else { + logger.Infof("人大金仓已设置默认 search_path:%s", searchPathStr) + } + } + return nil } return fmt.Errorf("连接建立后验证失败:%s", strings.Join(failures, ";")) } -func (k *KingbaseDB) reconnectWithPreferredSearchPathIfNeeded(config connection.ConnectionConfig) { +// getSearchPathStr 查询当前数据库中所有用户 schema,配置 DSN 的 search_path。 +// KingBase 默认 search_path 为 "$user", public,对于自定义 schema 下的表不可见。 +func (k *KingbaseDB) getSearchPathStr() string { if k.conn == nil { - return + return "" } - timeout := k.pingTimeout - if timeout <= 0 { - timeout = 5 * time.Second - } - ctx, cancel := utils.ContextWithTimeout(timeout) - defer cancel() + query := `SELECT nspname FROM pg_namespace + WHERE nspname NOT IN ('pg_catalog', 'information_schema') + AND nspname NOT LIKE 'pg_%' + ORDER BY nspname` - var currentSchema string - if err := k.conn.QueryRowContext(ctx, "SELECT current_schema()").Scan(¤tSchema); err != nil { - logger.Warnf("人大金仓读取当前 schema 失败:%v", err) - return - } - - if schema := strings.TrimSpace(currentSchema); schema != "" && !strings.EqualFold(schema, "public") { - return - } - - searchPath, chosenSchema := k.detectPreferredSearchPath(ctx, config) - if strings.TrimSpace(searchPath) == "" { - return - } - - oldConn := k.conn - prevSearchPath := k.defaultSearchPath - k.defaultSearchPath = searchPath - - dsn := k.getDSN(config) - newConn, err := sql.Open("kingbase", dsn) + rows, err := k.conn.Query(query) if err != nil { - k.defaultSearchPath = prevSearchPath - logger.Warnf("人大金仓重连以设置 search_path 失败:%v", err) - return - } - if err := newConn.PingContext(ctx); err != nil { - _ = newConn.Close() - k.defaultSearchPath = prevSearchPath - logger.Warnf("人大金仓重连后验证失败:%v", err) - return - } - - k.conn = newConn - _ = oldConn.Close() - logger.Infof("人大金仓已设置默认 schema:%s", chosenSchema) -} - -func (k *KingbaseDB) kingbaseSchemaExists(ctx context.Context, schema string) (bool, error) { - if schema = strings.TrimSpace(schema); schema == "" { - return false, nil - } - - var one int - err := k.conn.QueryRowContext(ctx, "SELECT 1 FROM pg_namespace WHERE nspname = $1", schema).Scan(&one) - if err == sql.ErrNoRows { - return false, nil - } - if err != nil { - return false, err - } - return true, nil -} - -func (k *KingbaseDB) detectPreferredSearchPath(ctx context.Context, config connection.ConnectionConfig) (searchPath string, chosenSchema string) { - // 1) 优先使用与数据库名/用户名同名的 schema(需要存在) - candidates := []string{ - normalizeKingbaseIdentifier(config.Database), - normalizeKingbaseIdentifier(config.User), - } - - seen := make(map[string]struct{}, len(candidates)) - for _, candidate := range candidates { - if candidate == "" || strings.EqualFold(candidate, "public") { - continue - } - key := strings.ToLower(candidate) - if _, ok := seen[key]; ok { - continue - } - seen[key] = struct{}{} - - exists, err := k.kingbaseSchemaExists(ctx, candidate) - if err != nil { - logger.Warnf("人大金仓检查 schema 是否存在失败:schema=%s err=%v", candidate, err) - continue - } - if !exists { - continue - } - - return fmt.Sprintf("%s,public", quoteKingbaseIdent(candidate)), candidate - } - - // 2) 如果只有一个“用户 schema”含有表,则将其作为默认 schema(更符合 DB GUI 的直觉) - schema, err := k.detectSingleUserSchemaWithTables(ctx) - if err != nil { - logger.Warnf("人大金仓探测默认 schema 失败:%v", err) - return "", "" - } - if schema == "" || strings.EqualFold(schema, "public") { - return "", "" - } - return fmt.Sprintf("%s,public", quoteKingbaseIdent(schema)), schema -} - -func (k *KingbaseDB) detectSingleUserSchemaWithTables(ctx context.Context) (string, error) { - if k.conn == nil { - return "", nil - } - - // 仅在“唯一用户 schema”场景做兜底,避免多 schema 下误选导致对象解析歧义。 - // 注:information_schema.tables 的视图在 PG/金仓语义稳定且权限要求相对低。 - query := ` -SELECT table_schema, COUNT(*) AS table_count -FROM information_schema.tables -WHERE table_type = 'BASE TABLE' - AND table_schema NOT IN ('pg_catalog', 'information_schema', 'public') - AND table_schema NOT LIKE 'pg_%' -GROUP BY table_schema -ORDER BY table_count DESC, table_schema -LIMIT 2` - - rows, err := k.conn.QueryContext(ctx, query) - if err != nil { - return "", err + logger.Warnf("人大金仓查询用户 schema 失败,跳过 search_path 设置:%v", err) + return "" } defer rows.Close() - type row struct { - schema string - count int64 - } - var results []row + var schemas []string for rows.Next() { - var r row - if scanErr := rows.Scan(&r.schema, &r.count); scanErr != nil { - return "", scanErr + var name string + if err := rows.Scan(&name); err != nil { + continue + } + name = strings.TrimSpace(name) + if name != "" { + // 使用 SQL 标准的双引号包裹标识符 + escaped := strings.ReplaceAll(name, `"`, `""`) + schemas = append(schemas, `"`+escaped+`"`) } - results = append(results, r) - } - if err := rows.Err(); err != nil { - return "", err } - if len(results) != 1 { - return "", nil + if len(schemas) == 0 { + return "" } - return normalizeKingbaseIdentifier(results[0].schema), nil + + return strings.Join(schemas, ", ") } func (k *KingbaseDB) Close() error { @@ -938,34 +853,7 @@ func (k *KingbaseDB) ApplyChanges(tableName string, changes connection.ChangeSet } func normalizeKingbaseIdentifier(raw string) string { - value := strings.TrimSpace(raw) - if value == "" { - return "" - } - - // 兼容 JSON/字符串转义后传入的标识符:\"schema\" -> "schema" - value = strings.ReplaceAll(value, `\"`, `"`) - value = strings.TrimSpace(value) - - // 兼容异常多重包裹引号(例如 ""schema""、""""schema"""")。 - // strings.Trim 会移除两端连续引号,迭代后可收敛到纯标识符。 - for i := 0; i < 4; i++ { - next := strings.TrimSpace(strings.Trim(value, `"`)) - if next == value { - break - } - value = next - } - - // 兼容其他方言可能残留的引用形式 - if len(value) >= 2 && strings.HasPrefix(value, "`") && strings.HasSuffix(value, "`") { - value = strings.TrimSpace(strings.Trim(value, "`")) - } - if len(value) >= 2 && strings.HasPrefix(value, "[") && strings.HasSuffix(value, "]") { - value = strings.TrimSpace(value[1 : len(value)-1]) - } - - return value + return normalizeKingbaseIdentCommon(raw) } // kingbaseIdentNeedsQuote 判断标识符是否需要双引号包裹。 @@ -1002,7 +890,7 @@ func isKingbaseReservedWord(ident string) bool { "begin", "commit", "rollback", "schema", "database", "view", "function", "procedure", "sequence", "type", "domain", "role", "session", "current", "authorization", "cross", "full", "natural", "some", "cast", "fetch", - "for", "to", "do", "if", "return", "returns", "declare", "cursor": + "for", "to", "do", "if", "return", "returns", "declare", "cursor", "server", "owner": return true } return false @@ -1013,7 +901,6 @@ func quoteKingbaseIdent(name string) string { if n == "" { return "\"\"" } - // 仅在需要时才加双引号,避免 KingbaseES 兼容性问题 if !kingbaseIdentNeedsQuote(n) { return n } @@ -1022,24 +909,7 @@ func quoteKingbaseIdent(name string) string { } func splitKingbaseQualifiedTable(tableName string) (schema string, table string) { - raw := strings.TrimSpace(tableName) - if raw == "" { - return "", "" - } - - if parts := strings.SplitN(raw, ".", 2); len(parts) == 2 { - schema = normalizeKingbaseIdentifier(parts[0]) - table = normalizeKingbaseIdentifier(parts[1]) - if table == "" { - return "", normalizeKingbaseIdentifier(raw) - } - if schema == "" { - return "", table - } - return schema, table - } - - return "", normalizeKingbaseIdentifier(raw) + return splitKingbaseQualifiedNameCommon(tableName) } func (k *KingbaseDB) GetAllColumns(dbName string) ([]connection.ColumnDefinitionWithTable, error) { diff --git a/internal/db/kingbase_impl_test.go b/internal/db/kingbase_impl_test.go index afad520..8b0d6f5 100644 --- a/internal/db/kingbase_impl_test.go +++ b/internal/db/kingbase_impl_test.go @@ -15,8 +15,10 @@ func TestNormalizeKingbaseIdentifier(t *testing.T) { {name: "double quoted", in: `""ldf_server""`, want: "ldf_server"}, {name: "quad quoted", in: `""""ldf_server""""`, want: "ldf_server"}, {name: "escaped quoted", in: `\"ldf_server\"`, want: "ldf_server"}, + {name: "double escaped quoted", in: `\\\"ldf_server\\\"`, want: "ldf_server"}, {name: "backtick quoted", in: "`ldf_server`", want: "ldf_server"}, {name: "bracket quoted", in: "[ldf_server]", want: "ldf_server"}, + {name: "embedded double quotes", in: `ldf""server`, want: "ldfserver"}, } for _, tt := range tests { @@ -99,6 +101,7 @@ func TestSplitKingbaseQualifiedTable(t *testing.T) { {name: "plain qualified", in: "ldf_server.t_user", wantSchema: "ldf_server", wantTable: "t_user"}, {name: "double quoted qualified", in: `""ldf_server"".""t_user""`, wantSchema: "ldf_server", wantTable: "t_user"}, {name: "escaped qualified", in: `\"ldf_server\".\"t_user\"`, wantSchema: "ldf_server", wantTable: "t_user"}, + {name: "double escaped qualified", in: `\\\"ldf_server\\\".\\\"t_user\\\"`, wantSchema: "ldf_server", wantTable: "t_user"}, {name: "bracket qualified", in: "[ldf_server].[t_user]", wantSchema: "ldf_server", wantTable: "t_user"}, {name: "table only", in: `""t_user""`, wantSchema: "", wantTable: "t_user"}, } diff --git a/internal/db/mongodb_impl.go b/internal/db/mongodb_impl.go index 27ac0c7..dff4644 100644 --- a/internal/db/mongodb_impl.go +++ b/internal/db/mongodb_impl.go @@ -151,10 +151,14 @@ func applyMongoURI(config connection.ConnectionConfig) connection.ConnectionConf } } - if len(config.Hosts) == 0 && len(hostsFromURI) > 0 { + explicitHost := strings.TrimSpace(config.Host) != "" + explicitHosts := len(config.Hosts) > 0 + + // 显式填写的 host/hosts 优先级高于 URI,避免表单 host 被 URI 中的 localhost 覆盖。 + if !explicitHost && !explicitHosts && len(hostsFromURI) > 0 { config.Hosts = hostsFromURI } - if strings.TrimSpace(config.Host) == "" && len(hostsFromURI) > 0 { + if !explicitHost && !explicitHosts && len(hostsFromURI) > 0 { host, port, ok := parseHostPortWithDefault(hostsFromURI[0], defaultPort) if ok { config.Host = host @@ -281,9 +285,44 @@ func buildMongoAuthAttempts(config connection.ConnectionConfig) []connection.Con return attempts } +func mongoURIForcesTLS(uriText string) bool { + trimmed := strings.TrimSpace(uriText) + if trimmed == "" { + return false + } + parsed, err := url.Parse(trimmed) + if err != nil { + return false + } + query := parsed.Query() + for _, key := range []string{"tls", "ssl"} { + value := strings.ToLower(strings.TrimSpace(query.Get(key))) + switch value { + case "1", "true", "t", "yes", "y", "required": + return true + } + } + return false +} + +func mongoAttemptSSLLabel(config connection.ConnectionConfig, fallbackToPlain bool) string { + if fallbackToPlain { + return "明文回退" + } + if mongoURIForcesTLS(config.URI) { + return "SSL" + } + enabled, _ := resolveMongoTLSSettings(config) + if enabled { + return "SSL" + } + return "明文" +} + func (m *MongoDB) Connect(config connection.ConnectionConfig) error { runConfig := applyMongoURI(config) connectConfig := runConfig + sshRouteHint := "" if runConfig.UseSSH && runConfig.MongoSRV { return fmt.Errorf("MongoDB SRV 记录模式暂不支持 SSH 隧道") @@ -324,6 +363,7 @@ func (m *MongoDB) Connect(config connection.ConnectionConfig) error { localConfig.URI = "" localConfig.Hosts = []string{normalizeMongoAddress(host, port)} connectConfig = localConfig + sshRouteHint = fmt.Sprintf("SSH隧道 %s -> %s:%d", forwarder.LocalAddr, targetHost, targetPort) logger.Infof("MongoDB 通过本地端口转发连接:%s -> %s:%d", forwarder.LocalAddr, targetHost, targetPort) } @@ -337,20 +377,32 @@ func (m *MongoDB) Connect(config connection.ConnectionConfig) error { if shouldTrySSLPreferredFallback(connectConfig) { sslAttempts = append(sslAttempts, withSSLDisabled(connectConfig)) } + totalAttempts := 0 + for _, attemptConfig := range sslAttempts { + totalAttempts += len(buildMongoAuthAttempts(attemptConfig)) + } + attemptNo := 0 var errorDetails []string for sslIndex, sslConfig := range sslAttempts { - sslLabel := "SSL" - if sslIndex > 0 { - sslLabel = "明文回退" - } + sslLabel := mongoAttemptSSLLabel(sslConfig, sslIndex > 0) attemptConfigs := buildMongoAuthAttempts(sslConfig) for index, attemptConfig := range attemptConfigs { + attemptNo++ authLabel := "主库凭据" if index > 0 { authLabel = "从库凭据" } + targets := collectMongoSeeds(attemptConfig) + if len(targets) == 0 { + targets = append(targets, normalizeMongoAddress(attemptConfig.Host, attemptConfig.Port)) + } + attemptStarted := time.Now() + logger.Infof( + "MongoDB 连接尝试:%d/%d 模式=%s 凭据=%s 目标=%s 代理=%t", + attemptNo, totalAttempts, sslLabel, authLabel, strings.Join(targets, ","), attemptConfig.UseProxy, + ) if sslIndex > 0 { attemptConfig.URI = "" @@ -369,7 +421,13 @@ func (m *MongoDB) Connect(config connection.ConnectionConfig) error { } client, err := mongo.Connect(clientOpts) if err != nil { - errorDetails = append(errorDetails, fmt.Sprintf("%s %s连接失败: %v", sslLabel, authLabel, err)) + logger.Warnf("MongoDB 连接尝试失败:%d/%d 模式=%s 凭据=%s 耗时=%s 错误=%v", + attemptNo, totalAttempts, sslLabel, authLabel, time.Since(attemptStarted).Round(time.Millisecond), err) + detail := fmt.Sprintf("%s %s连接失败: %v", sslLabel, authLabel, err) + if sshRouteHint != "" { + detail = fmt.Sprintf("%s(%s)", detail, sshRouteHint) + } + errorDetails = append(errorDetails, detail) continue } @@ -379,9 +437,17 @@ func (m *MongoDB) Connect(config connection.ConnectionConfig) error { _ = client.Disconnect(ctx) cancel() m.client = nil - errorDetails = append(errorDetails, fmt.Sprintf("%s %s验证失败: %v", sslLabel, authLabel, err)) + logger.Warnf("MongoDB 连接尝试验证失败:%d/%d 模式=%s 凭据=%s 耗时=%s 错误=%v", + attemptNo, totalAttempts, sslLabel, authLabel, time.Since(attemptStarted).Round(time.Millisecond), err) + detail := fmt.Sprintf("%s %s验证失败: %v", sslLabel, authLabel, err) + if sshRouteHint != "" { + detail = fmt.Sprintf("%s(%s)", detail, sshRouteHint) + } + errorDetails = append(errorDetails, detail) continue } + logger.Infof("MongoDB 连接尝试成功:%d/%d 模式=%s 凭据=%s 耗时=%s", + attemptNo, totalAttempts, sslLabel, authLabel, time.Since(attemptStarted).Round(time.Millisecond)) if sslIndex > 0 { logger.Warnf("MongoDB SSL 优先连接失败,已回退至明文连接") } diff --git a/internal/db/mongodb_impl_uri_test.go b/internal/db/mongodb_impl_uri_test.go new file mode 100644 index 0000000..020b293 --- /dev/null +++ b/internal/db/mongodb_impl_uri_test.go @@ -0,0 +1,39 @@ +//go:build gonavi_full_drivers || gonavi_mongodb_driver + +package db + +import ( + "testing" + + "GoNavi-Wails/internal/connection" +) + +func TestApplyMongoURI_ExplicitHostDoesNotAdoptURIHosts(t *testing.T) { + config := connection.ConnectionConfig{ + Host: "10.10.10.10", + Port: 27017, + URI: "mongodb://localhost:27017/admin", + } + + got := applyMongoURI(config) + if got.Host != "10.10.10.10" { + t.Fatalf("expected host to remain explicit, got %q", got.Host) + } + if len(got.Hosts) != 0 { + t.Fatalf("expected hosts to remain empty when explicit host exists, got %v", got.Hosts) + } +} + +func TestApplyMongoURI_ExplicitHostsDoesNotAdoptURIHosts(t *testing.T) { + config := connection.ConnectionConfig{ + Host: "10.10.10.10", + Port: 27017, + Hosts: []string{"10.10.10.10:27017", "10.10.10.11:27017"}, + URI: "mongodb://localhost:27017,localhost:27018/admin?replicaSet=rs0", + } + + got := applyMongoURI(config) + if len(got.Hosts) != 2 || got.Hosts[0] != "10.10.10.10:27017" { + t.Fatalf("expected explicit hosts to stay untouched, got %v", got.Hosts) + } +} diff --git a/internal/db/mongodb_impl_v1.go b/internal/db/mongodb_impl_v1.go index e3aa5b4..60d4fb2 100644 --- a/internal/db/mongodb_impl_v1.go +++ b/internal/db/mongodb_impl_v1.go @@ -152,10 +152,14 @@ func applyMongoURI(config connection.ConnectionConfig) connection.ConnectionConf } } - if len(config.Hosts) == 0 && len(hostsFromURI) > 0 { + explicitHost := strings.TrimSpace(config.Host) != "" + explicitHosts := len(config.Hosts) > 0 + + // 显式填写的 host/hosts 优先级高于 URI,避免表单 host 被 URI 中的 localhost 覆盖。 + if !explicitHost && !explicitHosts && len(hostsFromURI) > 0 { config.Hosts = hostsFromURI } - if strings.TrimSpace(config.Host) == "" && len(hostsFromURI) > 0 { + if !explicitHost && !explicitHosts && len(hostsFromURI) > 0 { host, port, ok := parseHostPortWithDefault(hostsFromURI[0], defaultPort) if ok { config.Host = host @@ -282,9 +286,44 @@ func buildMongoAuthAttempts(config connection.ConnectionConfig) []connection.Con return attempts } +func mongoURIForcesTLS(uriText string) bool { + trimmed := strings.TrimSpace(uriText) + if trimmed == "" { + return false + } + parsed, err := url.Parse(trimmed) + if err != nil { + return false + } + query := parsed.Query() + for _, key := range []string{"tls", "ssl"} { + value := strings.ToLower(strings.TrimSpace(query.Get(key))) + switch value { + case "1", "true", "t", "yes", "y", "required": + return true + } + } + return false +} + +func mongoAttemptSSLLabel(config connection.ConnectionConfig, fallbackToPlain bool) string { + if fallbackToPlain { + return "明文回退" + } + if mongoURIForcesTLS(config.URI) { + return "SSL" + } + enabled, _ := resolveMongoTLSSettings(config) + if enabled { + return "SSL" + } + return "明文" +} + func (m *MongoDBV1) Connect(config connection.ConnectionConfig) error { runConfig := applyMongoURI(config) connectConfig := runConfig + sshRouteHint := "" if runConfig.UseSSH && runConfig.MongoSRV { return fmt.Errorf("MongoDB SRV 记录模式暂不支持 SSH 隧道") @@ -325,6 +364,7 @@ func (m *MongoDBV1) Connect(config connection.ConnectionConfig) error { localConfig.URI = "" localConfig.Hosts = []string{normalizeMongoAddress(host, port)} connectConfig = localConfig + sshRouteHint = fmt.Sprintf("SSH隧道 %s -> %s:%d", forwarder.LocalAddr, targetHost, targetPort) logger.Infof("MongoDB 通过本地端口转发连接:%s -> %s:%d", forwarder.LocalAddr, targetHost, targetPort) } @@ -338,20 +378,32 @@ func (m *MongoDBV1) Connect(config connection.ConnectionConfig) error { if shouldTrySSLPreferredFallback(connectConfig) { sslAttempts = append(sslAttempts, withSSLDisabled(connectConfig)) } + totalAttempts := 0 + for _, attemptConfig := range sslAttempts { + totalAttempts += len(buildMongoAuthAttempts(attemptConfig)) + } + attemptNo := 0 var errorDetails []string for sslIndex, sslConfig := range sslAttempts { - sslLabel := "SSL" - if sslIndex > 0 { - sslLabel = "明文回退" - } + sslLabel := mongoAttemptSSLLabel(sslConfig, sslIndex > 0) attemptConfigs := buildMongoAuthAttempts(sslConfig) for index, attemptConfig := range attemptConfigs { + attemptNo++ authLabel := "主库凭据" if index > 0 { authLabel = "从库凭据" } + targets := collectMongoSeeds(attemptConfig) + if len(targets) == 0 { + targets = append(targets, normalizeMongoAddress(attemptConfig.Host, attemptConfig.Port)) + } + attemptStarted := time.Now() + logger.Infof( + "MongoDB(v1) 连接尝试:%d/%d 模式=%s 凭据=%s 目标=%s 代理=%t", + attemptNo, totalAttempts, sslLabel, authLabel, strings.Join(targets, ","), attemptConfig.UseProxy, + ) if sslIndex > 0 { attemptConfig.URI = "" @@ -372,7 +424,13 @@ func (m *MongoDBV1) Connect(config connection.ConnectionConfig) error { client, err := mongo.Connect(connectCtx, clientOpts) connectCancel() if err != nil { - errorDetails = append(errorDetails, fmt.Sprintf("%s %s连接失败: %v", sslLabel, authLabel, err)) + logger.Warnf("MongoDB(v1) 连接尝试失败:%d/%d 模式=%s 凭据=%s 耗时=%s 错误=%v", + attemptNo, totalAttempts, sslLabel, authLabel, time.Since(attemptStarted).Round(time.Millisecond), err) + detail := fmt.Sprintf("%s %s连接失败: %v", sslLabel, authLabel, err) + if sshRouteHint != "" { + detail = fmt.Sprintf("%s(%s)", detail, sshRouteHint) + } + errorDetails = append(errorDetails, detail) continue } @@ -382,9 +440,17 @@ func (m *MongoDBV1) Connect(config connection.ConnectionConfig) error { _ = client.Disconnect(ctx) cancel() m.client = nil - errorDetails = append(errorDetails, fmt.Sprintf("%s %s验证失败: %v", sslLabel, authLabel, err)) + logger.Warnf("MongoDB(v1) 连接尝试验证失败:%d/%d 模式=%s 凭据=%s 耗时=%s 错误=%v", + attemptNo, totalAttempts, sslLabel, authLabel, time.Since(attemptStarted).Round(time.Millisecond), err) + detail := fmt.Sprintf("%s %s验证失败: %v", sslLabel, authLabel, err) + if sshRouteHint != "" { + detail = fmt.Sprintf("%s(%s)", detail, sshRouteHint) + } + errorDetails = append(errorDetails, detail) continue } + logger.Infof("MongoDB(v1) 连接尝试成功:%d/%d 模式=%s 凭据=%s 耗时=%s", + attemptNo, totalAttempts, sslLabel, authLabel, time.Since(attemptStarted).Round(time.Millisecond)) if sslIndex > 0 { logger.Warnf("MongoDB(v1) SSL 优先连接失败,已回退至明文连接") } diff --git a/internal/db/mongodb_impl_v1_uri_test.go b/internal/db/mongodb_impl_v1_uri_test.go new file mode 100644 index 0000000..8860db2 --- /dev/null +++ b/internal/db/mongodb_impl_v1_uri_test.go @@ -0,0 +1,25 @@ +//go:build gonavi_mongodb_driver_v1 + +package db + +import ( + "testing" + + "GoNavi-Wails/internal/connection" +) + +func TestApplyMongoURIV1_ExplicitHostDoesNotAdoptURIHosts(t *testing.T) { + config := connection.ConnectionConfig{ + Host: "10.10.10.10", + Port: 27017, + URI: "mongodb://localhost:27017/admin", + } + + got := applyMongoURI(config) + if got.Host != "10.10.10.10" { + t.Fatalf("expected host to remain explicit, got %q", got.Host) + } + if len(got.Hosts) != 0 { + t.Fatalf("expected hosts to remain empty when explicit host exists, got %v", got.Hosts) + } +} diff --git a/internal/db/optional_driver_agent_impl.go b/internal/db/optional_driver_agent_impl.go index 2579b7c..07fd7d3 100644 --- a/internal/db/optional_driver_agent_impl.go +++ b/internal/db/optional_driver_agent_impl.go @@ -9,6 +9,7 @@ import ( "io" "os" "os/exec" + "reflect" "runtime" "strings" "sync" @@ -145,6 +146,7 @@ func (c *optionalDriverAgentClient) captureStderr(stderr io.Reader) { if line == "" { continue } + logger.Warnf("%s 驱动代理 stderr: %s", driverDisplayName(c.driver), line) c.stderrMu.Lock() if c.stderr.Len() > 0 { c.stderr.WriteString(" | ") @@ -268,6 +270,7 @@ func (d *OptionalDriverAgentDB) Connect(config connection.ConnectionConfig) erro return err } d.client = client + d.ensureKingbaseSearchPath(config) return nil } @@ -488,6 +491,16 @@ func (d *OptionalDriverAgentDB) ApplyChanges(tableName string, changes connectio if err != nil { return err } + if strings.EqualFold(d.driverType, "kingbase") { + if normalized := normalizeKingbaseAgentTableName(tableName); normalized != "" { + tableName = normalized + } + if normalized, normErr := d.normalizeKingbaseAgentChangeSet(tableName, changes); normErr == nil { + changes = normalized + } else { + logger.Warnf("Kingbase ApplyChanges 字段名规范化失败:%v", normErr) + } + } return client.call(optionalAgentRequest{ Method: optionalAgentMethodApplyChanges, TableName: tableName, @@ -502,6 +515,269 @@ func (d *OptionalDriverAgentDB) requireClient() (*optionalDriverAgentClient, err return d.client, nil } +func (d *OptionalDriverAgentDB) ensureKingbaseSearchPath(config connection.ConnectionConfig) { + if !strings.EqualFold(d.driverType, "kingbase") { + return + } + client, err := d.requireClient() + if err != nil || client == nil { + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + schemas, err := d.listKingbaseSchemas(ctx) + if err != nil || len(schemas) == 0 { + if err != nil { + logger.Warnf("人大金仓驱动代理探测 schema 失败:%v", err) + } + return + } + + searchPath := buildKingbaseSearchPathFromSchemas(schemas) + if strings.TrimSpace(searchPath) == "" { + return + } + + if _, err := d.ExecContext(ctx, fmt.Sprintf("SET search_path TO %s", searchPath)); err != nil { + logger.Warnf("人大金仓驱动代理设置 search_path 失败:%v", err) + return + } + logger.Infof("人大金仓驱动代理已设置默认 search_path:%s", searchPath) +} + +func (d *OptionalDriverAgentDB) listKingbaseSchemas(ctx context.Context) ([]string, error) { + query := `SELECT nspname FROM pg_namespace + WHERE nspname NOT IN ('pg_catalog', 'information_schema') + AND nspname NOT LIKE 'pg_%' + ORDER BY nspname` + rows, _, err := d.QueryContext(ctx, query) + if err != nil { + return nil, err + } + + schemas := make([]string, 0, len(rows)) + for _, row := range rows { + for key, val := range row { + if strings.EqualFold(key, "nspname") || strings.EqualFold(key, "schema") { + name := strings.TrimSpace(fmt.Sprintf("%v", val)) + if name != "" { + schemas = append(schemas, name) + } + break + } + } + if len(row) == 1 { + for _, val := range row { + name := strings.TrimSpace(fmt.Sprintf("%v", val)) + if name != "" { + schemas = append(schemas, name) + } + break + } + } + } + return schemas, nil +} + +func buildKingbaseSearchPathFromSchemas(schemas []string) string { + if len(schemas) == 0 { + return "" + } + seen := make(map[string]struct{}, len(schemas)+1) + parts := make([]string, 0, len(schemas)+1) + for _, name := range schemas { + trimmed := normalizeKingbaseAgentIdent(name) + if trimmed == "" { + continue + } + key := strings.ToLower(trimmed) + if _, ok := seen[key]; ok { + continue + } + seen[key] = struct{}{} + parts = append(parts, quoteKingbaseAgentIdent(trimmed)) + } + if _, ok := seen["public"]; !ok { + parts = append(parts, "public") + } + return strings.Join(parts, ", ") +} + +func quoteKingbaseAgentIdent(name string) string { + n := normalizeKingbaseAgentIdent(name) + if n == "" { + return "\"\"" + } + n = strings.ReplaceAll(n, `"`, `""`) + return `"` + n + `"` +} + +func normalizeKingbaseAgentTableName(raw string) string { + schema, table := splitKingbaseQualifiedNameCommon(raw) + if table == "" { + return "" + } + if schema == "" { + return table + } + return schema + "." + table +} + +func normalizeKingbaseAgentIdent(raw string) string { + return normalizeKingbaseIdentCommon(raw) +} + +type kingbaseAgentColumnIndex struct { + exact map[string]string + compact map[string]string +} + +func buildKingbaseAgentColumnIndex(columns []string) kingbaseAgentColumnIndex { + exact := make(map[string]string, len(columns)) + compact := make(map[string]string, len(columns)) + compactSeen := make(map[string]string, len(columns)) + compactDup := make(map[string]struct{}, len(columns)) + + for _, col := range columns { + name := normalizeKingbaseAgentIdent(col) + if name == "" { + continue + } + lower := strings.ToLower(name) + if _, ok := exact[lower]; !ok { + exact[lower] = name + } + key := normalizeKingbaseAgentCompactKey(name) + if key == "" { + continue + } + if prev, ok := compactSeen[key]; ok && !strings.EqualFold(prev, name) { + compactDup[key] = struct{}{} + continue + } + compactSeen[key] = name + } + + if len(compactDup) > 0 { + for key := range compactDup { + delete(compactSeen, key) + } + } + for key, value := range compactSeen { + compact[key] = value + } + return kingbaseAgentColumnIndex{exact: exact, compact: compact} +} + +func normalizeKingbaseAgentCompactKey(raw string) string { + name := normalizeKingbaseAgentIdent(raw) + if name == "" { + return "" + } + name = strings.ToLower(strings.TrimSpace(name)) + name = strings.Join(strings.Fields(name), "") + name = strings.ReplaceAll(name, "_", "") + return name +} + +func resolveKingbaseAgentColumnName(name string, index kingbaseAgentColumnIndex) string { + cleaned := normalizeKingbaseAgentIdent(name) + if cleaned == "" { + return name + } + lower := strings.ToLower(cleaned) + if actual, ok := index.exact[lower]; ok { + return actual + } + compact := normalizeKingbaseAgentCompactKey(cleaned) + if actual, ok := index.compact[compact]; ok { + return actual + } + return cleaned +} + +func normalizeKingbaseAgentChangeSetByColumns(changes connection.ChangeSet, columns []string) (connection.ChangeSet, error) { + index := buildKingbaseAgentColumnIndex(columns) + if len(index.exact) == 0 && len(index.compact) == 0 { + return changes, nil + } + + mapRow := func(row map[string]interface{}) (map[string]interface{}, error) { + if row == nil { + return row, nil + } + out := make(map[string]interface{}, len(row)) + for key, value := range row { + nextKey := resolveKingbaseAgentColumnName(key, index) + if existing, ok := out[nextKey]; ok && !reflect.DeepEqual(existing, value) { + return nil, fmt.Errorf("duplicate mapped column %q", nextKey) + } + out[nextKey] = value + } + return out, nil + } + + next := connection.ChangeSet{ + Inserts: make([]map[string]interface{}, 0, len(changes.Inserts)), + Updates: make([]connection.UpdateRow, 0, len(changes.Updates)), + Deletes: make([]map[string]interface{}, 0, len(changes.Deletes)), + } + + for _, row := range changes.Inserts { + mapped, err := mapRow(row) + if err != nil { + return changes, err + } + next.Inserts = append(next.Inserts, mapped) + } + + for _, upd := range changes.Updates { + keys, err := mapRow(upd.Keys) + if err != nil { + return changes, err + } + values, err := mapRow(upd.Values) + if err != nil { + return changes, err + } + next.Updates = append(next.Updates, connection.UpdateRow{ + Keys: keys, + Values: values, + }) + } + + for _, row := range changes.Deletes { + mapped, err := mapRow(row) + if err != nil { + return changes, err + } + next.Deletes = append(next.Deletes, mapped) + } + + return next, nil +} + +func (d *OptionalDriverAgentDB) normalizeKingbaseAgentChangeSet(tableName string, changes connection.ChangeSet) (connection.ChangeSet, error) { + columns, err := d.GetColumns("", tableName) + if err != nil { + return changes, err + } + if len(columns) == 0 { + return changes, nil + } + names := make([]string, 0, len(columns)) + for _, col := range columns { + name := strings.TrimSpace(col.Name) + if name == "" { + continue + } + names = append(names, name) + } + return normalizeKingbaseAgentChangeSetByColumns(changes, names) +} + func timeoutMsFromContext(ctx context.Context) int64 { deadline, ok := ctx.Deadline() if !ok { diff --git a/internal/db/optional_driver_agent_impl_test.go b/internal/db/optional_driver_agent_impl_test.go index 2273a06..a79b03d 100644 --- a/internal/db/optional_driver_agent_impl_test.go +++ b/internal/db/optional_driver_agent_impl_test.go @@ -1,32 +1,67 @@ package db import ( - "context" "testing" - "time" + + "GoNavi-Wails/internal/connection" ) -func TestTimeoutMsFromContext_NoDeadline(t *testing.T) { - if got := timeoutMsFromContext(context.Background()); got != 0 { - t.Fatalf("无 deadline 时应返回 0,got=%d", got) +func TestNormalizeKingbaseAgentTableName(t *testing.T) { + tests := []struct { + name string + in string + want string + }{ + {name: "plain", in: "ldf_server.andon_events", want: "ldf_server.andon_events"}, + {name: "quoted", in: `"ldf_server"."andon_events"`, want: "ldf_server.andon_events"}, + {name: "double quoted", in: `""ldf_server"".""andon_events""`, want: "ldf_server.andon_events"}, + {name: "escaped", in: `\"ldf_server\".\"andon_events\"`, want: "ldf_server.andon_events"}, + {name: "double escaped", in: `\\\"ldf_server\\\".\\\"andon_events\\\"`, want: "ldf_server.andon_events"}, + {name: "space around dot", in: ` "ldf_server" . "andon_events" `, want: "ldf_server.andon_events"}, + {name: "table only", in: `bcs_barcode`, want: "bcs_barcode"}, + {name: "table only quoted", in: `"bcs_barcode"`, want: "bcs_barcode"}, + {name: "table only double quoted", in: `""bcs_barcode""`, want: "bcs_barcode"}, + {name: "table only double escaped", in: `\\\"bcs_barcode\\\"`, want: "bcs_barcode"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := normalizeKingbaseAgentTableName(tt.in); got != tt.want { + t.Fatalf("normalizeKingbaseAgentTableName(%q) = %q, want %q", tt.in, got, tt.want) + } + }) } } -func TestTimeoutMsFromContext_WithDeadline(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - defer cancel() +func TestNormalizeKingbaseAgentChangeSetByColumns(t *testing.T) { + columns := []string{"andon_events_id", "event_name", "event_code"} + input := connection.ChangeSet{ + Inserts: []map[string]interface{}{ + {"event name": "物料1", "event_code": "EV-0001", "andon_events_id": 1}, + }, + Updates: []connection.UpdateRow{ + {Keys: map[string]interface{}{"andon_events_id": 1}, Values: map[string]interface{}{"event name": "物料2"}}, + }, + Deletes: []map[string]interface{}{ + {"andon_events_id": 1}, + }, + } - got := timeoutMsFromContext(ctx) - if got <= 0 { - t.Fatalf("有 deadline 时应返回正值,got=%d", got) - } -} - -func TestTimeoutMsFromContext_ExpiredDeadline(t *testing.T) { - ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(-time.Second)) - defer cancel() - - if got := timeoutMsFromContext(ctx); got != 1 { - t.Fatalf("过期 deadline 应返回 1,got=%d", got) + out, err := normalizeKingbaseAgentChangeSetByColumns(input, columns) + if err != nil { + t.Fatalf("normalizeKingbaseAgentChangeSetByColumns error: %v", err) + } + + if _, ok := out.Inserts[0]["event_name"]; !ok { + t.Fatalf("expected insert to map \"event name\" -> \"event_name\"") + } + if _, ok := out.Inserts[0]["event name"]; ok { + t.Fatalf("unexpected insert key \"event name\" after normalization") + } + if _, ok := out.Updates[0].Values["event_name"]; !ok { + t.Fatalf("expected update values to map \"event name\" -> \"event_name\"") + } + if _, ok := out.Updates[0].Values["event name"]; ok { + t.Fatalf("unexpected update value key \"event name\" after normalization") } } diff --git a/internal/logger/logger.go b/internal/logger/logger.go index e224608..56cf583 100644 --- a/internal/logger/logger.go +++ b/internal/logger/logger.go @@ -14,8 +14,9 @@ import ( ) const ( - envLogDir = "GONAVI_LOG_DIR" - appDirName = "GoNavi" + envLogDir = "GONAVI_LOG_DIR" + appHiddenDir = ".GoNavi" + appLogDirName = "Logs" logFileName = "gonavi.log" logRotateMaxBytes = 10 * 1024 * 1024 // 10MB @@ -37,7 +38,7 @@ func Init() { defer logMu.Unlock() logPath = path logInst = log.New(out, "", log.Ldate|log.Ltime|log.Lmicroseconds) - logInst.Printf("[信息] 日志初始化完成,日志文件:%s", logPath) + logInst.Printf("[INFO] 日志初始化完成,日志文件:%s", logPath) }) } @@ -62,15 +63,15 @@ func Close() { } func Infof(format string, args ...any) { - printf("信息", format, args...) + printf("INFO", format, args...) } func Warnf(format string, args ...any) { - printf("警告", format, args...) + printf("WARN", format, args...) } func Errorf(format string, args ...any) { - printf("错误", format, args...) + printf("ERROR", format, args...) } func Error(err error, format string, args ...any) { @@ -115,37 +116,58 @@ func ErrorChain(err error) string { func printf(level string, format string, args ...any) { Init() logMu.Lock() + defer logMu.Unlock() inst := logInst - logMu.Unlock() if inst == nil { return } inst.Printf("[%s] %s", level, fmt.Sprintf(format, args...)) + if logFile != nil { + _ = logFile.Sync() + } } func initOutput() (string, io.Writer) { dir := strings.TrimSpace(os.Getenv(envLogDir)) if dir == "" { - base, err := os.UserConfigDir() - if err != nil || strings.TrimSpace(base) == "" { - base = os.TempDir() - } - dir = filepath.Join(base, appDirName, "logs") + dir = defaultLogDir() } + if path, writer, ok := openLogFile(dir); ok { + return path, writer + } + + fallbackDir := filepath.Join(os.TempDir(), appHiddenDir, appLogDirName) + if path, writer, ok := openLogFile(fallbackDir); ok { + return path, writer + } + + return "", os.Stderr +} + +func defaultLogDir() string { + home, err := os.UserHomeDir() + if err != nil || strings.TrimSpace(home) == "" { + return filepath.Join(os.TempDir(), appHiddenDir, appLogDirName) + } + return filepath.Join(home, appHiddenDir, appLogDirName) +} + +func openLogFile(dir string) (string, io.Writer, bool) { + if strings.TrimSpace(dir) == "" { + return "", nil, false + } if err := os.MkdirAll(dir, 0o755); err != nil { - return filepath.Join(dir, logFileName), os.Stderr + return "", nil, false } - path := filepath.Join(dir, logFileName) rotateIfNeeded(path, dir) - f, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o644) if err != nil { - return path, os.Stderr + return "", nil, false } logFile = f - return path, f + return path, f, true } func rotateIfNeeded(path, dir string) { From 482a7fce2ecb68f768bb348956aa3aed8e37ed52 Mon Sep 17 00:00:00 2001 From: Syngnat Date: Thu, 12 Mar 2026 17:30:16 +0800 Subject: [PATCH 44/48] =?UTF-8?q?=F0=9F=94=A7=20fix(release/sidebar):=20?= =?UTF-8?q?=E7=BB=9F=E4=B8=80=E8=B7=A8=E5=B9=B3=E5=8F=B0UPX=E5=8E=8B?= =?UTF-8?q?=E7=BC=A9=E5=B9=B6=E4=BF=AE=E5=A4=8DPG=E5=87=BD=E6=95=B0?= =?UTF-8?q?=E5=88=97=E8=A1=A8=E6=9F=A5=E8=AF=A2=E5=85=BC=E5=AE=B9=E6=80=A7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 构建脚本新增通用 UPX 压缩函数,覆盖 macOS、Linux、Windows 产物 - 本地打包改为强制压缩策略:未安装 upx、压缩失败或校验失败直接终止 - macOS 打包在签名前压缩 .app 主程序并执行 upx -t 校验 - Linux 打包在生成 tar.gz 前压缩可执行文件并执行 upx -t 校验 - GitHub Release 与测试构建流程补齐 macOS/Linux/Windows 的 upx 安装与压缩步骤 - PostgreSQL/PG-like 函数元数据查询增加多路兼容 SQL,修复函数列表不显示问题 - refs #221 - refs #222 --- .github/workflows/release.yml | 75 ++++++++++-- .../workflows/test-build-all-platforms.yml | 67 ++++++++++- build-release.sh | 112 ++++++++++++++++-- frontend/src/components/Sidebar.tsx | 15 ++- 4 files changed, 248 insertions(+), 21 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 7dd9b87..84f14a5 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -88,6 +88,24 @@ jobs: with: node-version: '20' + - name: Install UPX (macOS) + if: contains(matrix.platform, 'darwin') + run: | + brew install upx + upx --version + + - name: Install UPX (Windows) + if: contains(matrix.platform, 'windows') + shell: pwsh + run: | + choco install upx --no-progress -y + $upxCmd = Get-Command upx -ErrorAction SilentlyContinue + if ($null -eq $upxCmd) { + Write-Error "❌ 未检测到 upx,无法保证 Windows 产物经过压缩" + exit 1 + } + & upx --version + # Linux Dependencies (GTK3, WebKit2GTK required by Wails) - name: Install Linux Dependencies if: contains(matrix.platform, 'linux') @@ -102,6 +120,9 @@ jobs: sudo apt-get install -y libwebkit2gtk-4.0-dev fi + sudo apt-get install -y upx-ucl || sudo apt-get install -y upx + upx --version + # AppImage 运行/打包可能需要 FUSE2。不同发行版/版本包名不同,做兼容兜底。 sudo apt-get install -y libfuse2 || sudo apt-get install -y libfuse2t64 || true @@ -277,6 +298,23 @@ jobs: exit 1 fi APP_NAME=$(basename "$APP_PATH") + + APP_BIN=$(find "$APP_PATH/Contents/MacOS" -maxdepth 1 -type f | head -n 1) + if [ -z "$APP_BIN" ]; then + echo "❌ 未找到 macOS 应用主程序,无法进行 UPX 压缩!" + exit 1 + fi + BEFORE_BYTES=$(wc -c <"$APP_BIN" | tr -d '[:space:]') + echo "🗜️ 正在使用 UPX 压缩 macOS 可执行文件: $APP_BIN ..." + upx --best --lzma --force "$APP_BIN" + upx -t "$APP_BIN" + AFTER_BYTES=$(wc -c <"$APP_BIN" | tr -d '[:space:]') + if [ "$AFTER_BYTES" -lt "$BEFORE_BYTES" ]; then + SAVED_BYTES=$((BEFORE_BYTES - AFTER_BYTES)) + awk -v b="$BEFORE_BYTES" -v a="$AFTER_BYTES" -v s="$SAVED_BYTES" 'BEGIN { printf "✅ macOS UPX 压缩完成:%.2fMB -> %.2fMB,减少 %.2fMB\n", b/1024/1024, a/1024/1024, s/1024/1024 }' + else + awk -v b="$BEFORE_BYTES" -v a="$AFTER_BYTES" 'BEGIN { printf "ℹ️ macOS UPX 压缩完成:%.2fMB -> %.2fMB\n", b/1024/1024, a/1024/1024 }' + fi echo "🔏 正在进行 Ad-hoc 签名..." # 注意:Ad-hoc + hardened runtime(--options runtime)在未配置 entitlements 时, @@ -301,7 +339,7 @@ jobs: mv "$DMG_NAME" "../../$FINAL_NAME" # Windows Packaging - - name: Package Windows Portable Zip + - name: Package Windows EXE if: contains(matrix.platform, 'windows') shell: pwsh run: | @@ -312,7 +350,6 @@ jobs: } $target = "${{ matrix.build_name }}" $finalExeName = "GoNavi-$version-${{ matrix.os_name }}-${{ matrix.arch_name }}${{ matrix.artifact_suffix }}.exe" - $finalZipName = "GoNavi-$version-${{ matrix.os_name }}-${{ matrix.arch_name }}${{ matrix.artifact_suffix }}.zip" if (Test-Path "$target.exe") { $finalExe = "$target.exe" @@ -324,11 +361,25 @@ jobs: exit 1 } - Write-Host "📦 生成 Windows 可执行文件 $finalExeName..." - Copy-Item -LiteralPath $finalExe -Destination "..\\..\\$finalExeName" -Force + $upxCmd = Get-Command upx -ErrorAction SilentlyContinue + if ($null -eq $upxCmd) { + Write-Error "❌ 未找到 upx,无法保证 Windows 产物经过压缩" + exit 1 + } + $beforeBytes = (Get-Item -LiteralPath $finalExe).Length + Write-Host "🗜️ 使用 UPX 压缩 $finalExe ..." + & upx --best --lzma --force $finalExe | Out-Host + & upx -t $finalExe | Out-Host + $afterBytes = (Get-Item -LiteralPath $finalExe).Length + if ($afterBytes -lt $beforeBytes) { + $savedBytes = $beforeBytes - $afterBytes + Write-Host ("✅ UPX 压缩完成:{0:N2}MB -> {1:N2}MB,减少 {2:N2}MB" -f ($beforeBytes / 1MB), ($afterBytes / 1MB), ($savedBytes / 1MB)) + } else { + Write-Host ("ℹ️ UPX 压缩完成:{0:N2}MB -> {1:N2}MB" -f ($beforeBytes / 1MB), ($afterBytes / 1MB)) + } - Write-Host "📦 生成 Windows 压缩包 $finalZipName..." - Compress-Archive -LiteralPath $finalExe -DestinationPath "..\\..\\$finalZipName" -Force + Write-Host "📦 输出 Windows 可执行文件 $finalExeName..." + Copy-Item -LiteralPath $finalExe -Destination "..\\..\\$finalExeName" -Force # Linux Packaging (tar.gz and AppImage) - name: Package Linux @@ -347,6 +398,17 @@ jobs: fi chmod +x "$TARGET" + BEFORE_BYTES=$(wc -c <"$TARGET" | tr -d '[:space:]') + echo "🗜️ 正在使用 UPX 压缩 Linux 可执行文件: $TARGET ..." + upx --best --lzma --force "$TARGET" + upx -t "$TARGET" + AFTER_BYTES=$(wc -c <"$TARGET" | tr -d '[:space:]') + if [ "$AFTER_BYTES" -lt "$BEFORE_BYTES" ]; then + SAVED_BYTES=$((BEFORE_BYTES - AFTER_BYTES)) + awk -v b="$BEFORE_BYTES" -v a="$AFTER_BYTES" -v s="$SAVED_BYTES" 'BEGIN { printf "✅ Linux UPX 压缩完成:%.2fMB -> %.2fMB,减少 %.2fMB\n", b/1024/1024, a/1024/1024, s/1024/1024 }' + else + awk -v b="$BEFORE_BYTES" -v a="$AFTER_BYTES" 'BEGIN { printf "ℹ️ Linux UPX 压缩完成:%.2fMB -> %.2fMB\n", b/1024/1024, a/1024/1024 }' + fi # 1. Create tar.gz echo "📦 正在打包 $TAR_NAME..." @@ -419,7 +481,6 @@ jobs: path: | GoNavi-*.dmg GoNavi-*.exe - GoNavi-*.zip GoNavi-*.tar.gz GoNavi-*.AppImage drivers/** diff --git a/.github/workflows/test-build-all-platforms.yml b/.github/workflows/test-build-all-platforms.yml index 29ffe9d..6646ece 100644 --- a/.github/workflows/test-build-all-platforms.yml +++ b/.github/workflows/test-build-all-platforms.yml @@ -93,6 +93,24 @@ jobs: with: node-version: '20' + - name: Install UPX (macOS) + if: contains(matrix.platform, 'darwin') + run: | + brew install upx + upx --version + + - name: Install UPX (Windows) + if: contains(matrix.platform, 'windows') + shell: pwsh + run: | + choco install upx --no-progress -y + $upxCmd = Get-Command upx -ErrorAction SilentlyContinue + if ($null -eq $upxCmd) { + Write-Error "❌ 未检测到 upx,无法保证 Windows 测试产物经过压缩" + exit 1 + } + & upx --version + - name: Install Linux Dependencies if: contains(matrix.platform, 'linux') run: | @@ -105,6 +123,9 @@ jobs: sudo apt-get install -y libwebkit2gtk-4.0-dev fi + sudo apt-get install -y upx-ucl || sudo apt-get install -y upx + upx --version + sudo apt-get install -y libfuse2 || sudo apt-get install -y libfuse2t64 || true LINUXDEPLOY_URL="https://github.com/linuxdeploy/linuxdeploy/releases/download/continuous/linuxdeploy-x86_64.AppImage" @@ -242,6 +263,22 @@ jobs: exit 1 fi APP_NAME=$(basename "$APP_PATH") + APP_BIN=$(find "$APP_PATH/Contents/MacOS" -maxdepth 1 -type f | head -n 1) + if [ -z "$APP_BIN" ]; then + echo "未找到 macOS 应用主程序,无法进行 UPX 压缩" + exit 1 + fi + BEFORE_BYTES=$(wc -c <"$APP_BIN" | tr -d '[:space:]') + echo "🗜️ 使用 UPX 压缩 macOS 可执行文件: $APP_BIN ..." + upx --best --lzma --force "$APP_BIN" + upx -t "$APP_BIN" + AFTER_BYTES=$(wc -c <"$APP_BIN" | tr -d '[:space:]') + if [ "$AFTER_BYTES" -lt "$BEFORE_BYTES" ]; then + SAVED_BYTES=$((BEFORE_BYTES - AFTER_BYTES)) + awk -v b="$BEFORE_BYTES" -v a="$AFTER_BYTES" -v s="$SAVED_BYTES" 'BEGIN { printf "✅ macOS UPX 压缩完成:%.2fMB -> %.2fMB,减少 %.2fMB\n", b/1024/1024, a/1024/1024, s/1024/1024 }' + else + awk -v b="$BEFORE_BYTES" -v a="$AFTER_BYTES" 'BEGIN { printf "ℹ️ macOS UPX 压缩完成:%.2fMB -> %.2fMB\n", b/1024/1024, a/1024/1024 }' + fi codesign --force --deep --sign - "$APP_NAME" ZIP_NAME="GoNavi-${LABEL}-${{ matrix.os_name }}-${{ matrix.arch_name }}-run${GITHUB_RUN_NUMBER}.zip" DMG_NAME="GoNavi-${LABEL}-${{ matrix.os_name }}-${{ matrix.arch_name }}-run${GITHUB_RUN_NUMBER}.dmg" @@ -270,7 +307,6 @@ jobs: Set-Location build/bin $target = "${{ matrix.build_name }}" $finalExeName = "GoNavi-$label-${{ matrix.os_name }}-${{ matrix.arch_name }}-run$env:GITHUB_RUN_NUMBER.exe" - $finalZipName = "GoNavi-$label-${{ matrix.os_name }}-${{ matrix.arch_name }}-run$env:GITHUB_RUN_NUMBER.zip" if (Test-Path "$target.exe") { $finalExe = "$target.exe" } elseif (Test-Path "$target") { @@ -280,11 +316,25 @@ jobs: Write-Error "未找到构建产物 '$target'" exit 1 } + $upxCmd = Get-Command upx -ErrorAction SilentlyContinue + if ($null -eq $upxCmd) { + Write-Error "❌ 未找到 upx,无法保证 Windows 测试产物经过压缩" + exit 1 + } + $beforeBytes = (Get-Item -LiteralPath $finalExe).Length + Write-Host "🗜️ 使用 UPX 压缩 $finalExe ..." + & upx --best --lzma --force $finalExe | Out-Host + & upx -t $finalExe | Out-Host + $afterBytes = (Get-Item -LiteralPath $finalExe).Length + if ($afterBytes -lt $beforeBytes) { + $savedBytes = $beforeBytes - $afterBytes + Write-Host ("✅ UPX 压缩完成:{0:N2}MB -> {1:N2}MB,减少 {2:N2}MB" -f ($beforeBytes / 1MB), ($afterBytes / 1MB), ($savedBytes / 1MB)) + } else { + Write-Host ("ℹ️ UPX 压缩完成:{0:N2}MB -> {1:N2}MB" -f ($beforeBytes / 1MB), ($afterBytes / 1MB)) + } New-Item -ItemType Directory -Force -Path ..\..\artifacts | Out-Null Copy-Item -LiteralPath $finalExe -Destination "..\..\artifacts\$finalExeName" -Force - Compress-Archive -LiteralPath $finalExe -DestinationPath "..\..\artifacts\$finalZipName" -Force Get-FileHash "..\..\artifacts\$finalExeName" -Algorithm SHA256 | ForEach-Object { "{0} *{1}" -f $_.Hash.ToLower(), (Split-Path $_.Path -Leaf) } | Out-File "..\..\artifacts\$finalExeName.sha256" -Encoding ascii - Get-FileHash "..\..\artifacts\$finalZipName" -Algorithm SHA256 | ForEach-Object { "{0} *{1}" -f $_.Hash.ToLower(), (Split-Path $_.Path -Leaf) } | Out-File "..\..\artifacts\$finalZipName.sha256" -Encoding ascii - name: Package Linux if: contains(matrix.platform, 'linux') @@ -306,6 +356,17 @@ jobs: exit 1 fi chmod +x "$TARGET" + BEFORE_BYTES=$(wc -c <"$TARGET" | tr -d '[:space:]') + echo "🗜️ 使用 UPX 压缩 Linux 可执行文件: $TARGET ..." + upx --best --lzma --force "$TARGET" + upx -t "$TARGET" + AFTER_BYTES=$(wc -c <"$TARGET" | tr -d '[:space:]') + if [ "$AFTER_BYTES" -lt "$BEFORE_BYTES" ]; then + SAVED_BYTES=$((BEFORE_BYTES - AFTER_BYTES)) + awk -v b="$BEFORE_BYTES" -v a="$AFTER_BYTES" -v s="$SAVED_BYTES" 'BEGIN { printf "✅ Linux UPX 压缩完成:%.2fMB -> %.2fMB,减少 %.2fMB\n", b/1024/1024, a/1024/1024, s/1024/1024 }' + else + awk -v b="$BEFORE_BYTES" -v a="$AFTER_BYTES" 'BEGIN { printf "ℹ️ Linux UPX 压缩完成:%.2fMB -> %.2fMB\n", b/1024/1024, a/1024/1024 }' + fi tar -czvf "../../artifacts/$TAR_NAME" "$TARGET" sha256sum "../../artifacts/$TAR_NAME" > "../../artifacts/$TAR_NAME.sha256" diff --git a/build-release.sh b/build-release.sh index a36f835..22fe7c8 100755 --- a/build-release.sh +++ b/build-release.sh @@ -20,6 +20,70 @@ RED='\033[0;31m' YELLOW='\033[1;33m' NC='\033[0m' +get_file_size_bytes() { + local target="$1" + if [ ! -f "$target" ]; then + echo 0 + return + fi + if stat -f%z "$target" >/dev/null 2>&1; then + stat -f%z "$target" + return + fi + if stat -c%s "$target" >/dev/null 2>&1; then + stat -c%s "$target" + return + fi + wc -c <"$target" | tr -d '[:space:]' +} + +format_size_mb() { + local bytes="${1:-0}" + awk -v b="$bytes" 'BEGIN { printf "%.2fMB", b / 1024 / 1024 }' +} + +try_compress_binary_with_upx() { + local exe_path="$1" + local label="$2" + if [ ! -f "$exe_path" ]; then + echo -e "${RED} ❌ 未找到 ${label} 文件:$exe_path${NC}" + exit 1 + fi + + if ! command -v upx >/dev/null 2>&1; then + echo -e "${RED} ❌ 未找到 upx,${label} 必须进行压缩后才能继续打包。${NC}" + case "$(uname -s)" in + Darwin) + echo " 安装命令: brew install upx" + ;; + Linux) + echo " 安装命令: sudo apt-get install -y upx-ucl (或对应发行版包管理器)" + ;; + esac + exit 1 + fi + + local before_bytes after_bytes + before_bytes=$(get_file_size_bytes "$exe_path") + echo " 🗜️ 正在使用 UPX 压缩 ${label}..." + if upx --best --lzma --force "$exe_path" >/dev/null 2>&1; then + if ! upx -t "$exe_path" >/dev/null 2>&1; then + echo -e "${RED} ❌ UPX 校验失败:${label}${NC}" + exit 1 + fi + after_bytes=$(get_file_size_bytes "$exe_path") + if [ "$after_bytes" -lt "$before_bytes" ]; then + local saved_bytes=$((before_bytes - after_bytes)) + echo " ✅ UPX 压缩完成: $(format_size_mb "$before_bytes") -> $(format_size_mb "$after_bytes"),减少 $(format_size_mb "$saved_bytes")" + else + echo " ℹ️ UPX 压缩完成: $(format_size_mb "$before_bytes") -> $(format_size_mb "$after_bytes")" + fi + else + echo -e "${RED} ❌ UPX 压缩失败:${label}${NC}" + exit 1 + fi +} + MAC_VOLICON_PATH="build/darwin/icon.icns" if [ ! -f "$MAC_VOLICON_PATH" ]; then MAC_VOLICON_PATH="" @@ -41,6 +105,14 @@ if [ $? -eq 0 ]; then # 移动 .app 到 dist mv "$APP_SRC" "$DIST_DIR/$APP_DEST_NAME" + + APP_BIN_PATH=$(find "$DIST_DIR/$APP_DEST_NAME/Contents/MacOS" -maxdepth 1 -type f -print -quit) + if [ -n "$APP_BIN_PATH" ] && [ -f "$APP_BIN_PATH" ]; then + try_compress_binary_with_upx "$APP_BIN_PATH" "macOS arm64 应用主程序" + else + echo -e "${RED} ❌ 未找到 macOS arm64 主程序文件,无法执行 UPX 压缩。${NC}" + exit 1 + fi # Ad-hoc 代码签名(无 Apple Developer 账号时防止 Gatekeeper 报已损坏) echo " 🔏 正在对 .app 进行 ad-hoc 签名 (arm64)..." @@ -140,6 +212,14 @@ if [ $? -eq 0 ]; then DMG_NAME="${APP_NAME}-${VERSION}-mac-amd64.dmg" mv "$APP_SRC" "$DIST_DIR/$APP_DEST_NAME" + + APP_BIN_PATH=$(find "$DIST_DIR/$APP_DEST_NAME/Contents/MacOS" -maxdepth 1 -type f -print -quit) + if [ -n "$APP_BIN_PATH" ] && [ -f "$APP_BIN_PATH" ]; then + try_compress_binary_with_upx "$APP_BIN_PATH" "macOS amd64 应用主程序" + else + echo -e "${RED} ❌ 未找到 macOS amd64 主程序文件,无法执行 UPX 压缩。${NC}" + exit 1 + fi # Ad-hoc 代码签名 echo " 🔏 正在对 .app 进行 ad-hoc 签名 (amd64)..." @@ -229,7 +309,9 @@ echo -e "${GREEN}🪟 正在构建 Windows (amd64)...${NC}" if command -v x86_64-w64-mingw32-gcc &> /dev/null; then wails build -platform windows/amd64 -clean -ldflags "$LDFLAGS" if [ $? -eq 0 ]; then - mv "$BUILD_BIN_DIR/${DEFAULT_BINARY_NAME}.exe" "$DIST_DIR/${APP_NAME}-${VERSION}-windows-amd64.exe" + TARGET_EXE="$DIST_DIR/${APP_NAME}-${VERSION}-windows-amd64.exe" + mv "$BUILD_BIN_DIR/${DEFAULT_BINARY_NAME}.exe" "$TARGET_EXE" + try_compress_binary_with_upx "$TARGET_EXE" "Windows amd64 可执行文件" echo " ✅ 已生成 ${APP_NAME}-${VERSION}-windows-amd64.exe" else echo -e "${RED} ❌ Windows amd64 构建失败。${NC}" @@ -243,7 +325,9 @@ echo -e "${GREEN}🪟 正在构建 Windows (arm64)...${NC}" if command -v aarch64-w64-mingw32-gcc &> /dev/null; then wails build -platform windows/arm64 -clean -ldflags "$LDFLAGS" if [ $? -eq 0 ]; then - mv "$BUILD_BIN_DIR/${DEFAULT_BINARY_NAME}.exe" "$DIST_DIR/${APP_NAME}-${VERSION}-windows-arm64.exe" + TARGET_EXE="$DIST_DIR/${APP_NAME}-${VERSION}-windows-arm64.exe" + mv "$BUILD_BIN_DIR/${DEFAULT_BINARY_NAME}.exe" "$TARGET_EXE" + try_compress_binary_with_upx "$TARGET_EXE" "Windows arm64 可执行文件" echo " ✅ 已生成 ${APP_NAME}-${VERSION}-windows-arm64.exe" else echo -e "${RED} ❌ Windows arm64 构建失败。${NC}" @@ -263,8 +347,10 @@ if [ "$CURRENT_OS" = "Linux" ] && [ "$CURRENT_ARCH" = "x86_64" ]; then # 本机 Linux amd64,直接构建 wails build -platform linux/amd64 -clean -ldflags "$LDFLAGS" if [ $? -eq 0 ]; then - mv "$BUILD_BIN_DIR/${DEFAULT_BINARY_NAME}" "$DIST_DIR/${APP_NAME}-${VERSION}-linux-amd64" - chmod +x "$DIST_DIR/${APP_NAME}-${VERSION}-linux-amd64" + TARGET_LINUX_BIN="$DIST_DIR/${APP_NAME}-${VERSION}-linux-amd64" + mv "$BUILD_BIN_DIR/${DEFAULT_BINARY_NAME}" "$TARGET_LINUX_BIN" + chmod +x "$TARGET_LINUX_BIN" + try_compress_binary_with_upx "$TARGET_LINUX_BIN" "Linux amd64 可执行文件" # 打包为 tar.gz cd "$DIST_DIR" tar -czvf "${APP_NAME}-${VERSION}-linux-amd64.tar.gz" "${APP_NAME}-${VERSION}-linux-amd64" @@ -281,8 +367,10 @@ elif command -v x86_64-linux-gnu-gcc &> /dev/null; then export CGO_ENABLED=1 wails build -platform linux/amd64 -clean -ldflags "$LDFLAGS" if [ $? -eq 0 ]; then - mv "$BUILD_BIN_DIR/${DEFAULT_BINARY_NAME}" "$DIST_DIR/${APP_NAME}-${VERSION}-linux-amd64" - chmod +x "$DIST_DIR/${APP_NAME}-${VERSION}-linux-amd64" + TARGET_LINUX_BIN="$DIST_DIR/${APP_NAME}-${VERSION}-linux-amd64" + mv "$BUILD_BIN_DIR/${DEFAULT_BINARY_NAME}" "$TARGET_LINUX_BIN" + chmod +x "$TARGET_LINUX_BIN" + try_compress_binary_with_upx "$TARGET_LINUX_BIN" "Linux amd64 可执行文件" cd "$DIST_DIR" tar -czvf "${APP_NAME}-${VERSION}-linux-amd64.tar.gz" "${APP_NAME}-${VERSION}-linux-amd64" rm "${APP_NAME}-${VERSION}-linux-amd64" @@ -303,8 +391,10 @@ if [ "$CURRENT_OS" = "Linux" ] && [ "$CURRENT_ARCH" = "aarch64" ]; then # 本机 Linux arm64,直接构建 wails build -platform linux/arm64 -clean -ldflags "$LDFLAGS" if [ $? -eq 0 ]; then - mv "$BUILD_BIN_DIR/${DEFAULT_BINARY_NAME}" "$DIST_DIR/${APP_NAME}-${VERSION}-linux-arm64" - chmod +x "$DIST_DIR/${APP_NAME}-${VERSION}-linux-arm64" + TARGET_LINUX_BIN="$DIST_DIR/${APP_NAME}-${VERSION}-linux-arm64" + mv "$BUILD_BIN_DIR/${DEFAULT_BINARY_NAME}" "$TARGET_LINUX_BIN" + chmod +x "$TARGET_LINUX_BIN" + try_compress_binary_with_upx "$TARGET_LINUX_BIN" "Linux arm64 可执行文件" cd "$DIST_DIR" tar -czvf "${APP_NAME}-${VERSION}-linux-arm64.tar.gz" "${APP_NAME}-${VERSION}-linux-arm64" rm "${APP_NAME}-${VERSION}-linux-arm64" @@ -320,8 +410,10 @@ elif command -v aarch64-linux-gnu-gcc &> /dev/null; then export CGO_ENABLED=1 wails build -platform linux/arm64 -clean -ldflags "$LDFLAGS" if [ $? -eq 0 ]; then - mv "$BUILD_BIN_DIR/${DEFAULT_BINARY_NAME}" "$DIST_DIR/${APP_NAME}-${VERSION}-linux-arm64" - chmod +x "$DIST_DIR/${APP_NAME}-${VERSION}-linux-arm64" + TARGET_LINUX_BIN="$DIST_DIR/${APP_NAME}-${VERSION}-linux-arm64" + mv "$BUILD_BIN_DIR/${DEFAULT_BINARY_NAME}" "$TARGET_LINUX_BIN" + chmod +x "$TARGET_LINUX_BIN" + try_compress_binary_with_upx "$TARGET_LINUX_BIN" "Linux arm64 可执行文件" cd "$DIST_DIR" tar -czvf "${APP_NAME}-${VERSION}-linux-arm64.tar.gz" "${APP_NAME}-${VERSION}-linux-arm64" rm "${APP_NAME}-${VERSION}-linux-arm64" diff --git a/frontend/src/components/Sidebar.tsx b/frontend/src/components/Sidebar.tsx index 3a31be4..9fc732b 100644 --- a/frontend/src/components/Sidebar.tsx +++ b/frontend/src/components/Sidebar.tsx @@ -792,7 +792,20 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }> case 'kingbase': case 'highgo': case 'vastbase': - return [{ sql: `SELECT n.nspname AS schema_name, p.proname AS routine_name, CASE WHEN p.prokind = 'p' THEN 'PROCEDURE' ELSE 'FUNCTION' END AS routine_type FROM pg_proc p JOIN pg_namespace n ON p.pronamespace = n.oid WHERE n.nspname NOT IN ('pg_catalog', 'information_schema') AND n.nspname NOT LIKE 'pg_%' ORDER BY n.nspname, routine_type, p.proname` }]; + return normalizeMetadataQuerySpecs([ + { + // PostgreSQL 11+ / 部分 PG-like:通过 prokind 区分 FUNCTION/PROCEDURE + sql: `SELECT n.nspname AS schema_name, p.proname AS routine_name, CASE WHEN p.prokind = 'p' THEN 'PROCEDURE' ELSE 'FUNCTION' END AS routine_type FROM pg_proc p JOIN pg_namespace n ON p.pronamespace = n.oid WHERE n.nspname NOT IN ('pg_catalog', 'information_schema') AND n.nspname NOT LIKE 'pg_%' ORDER BY n.nspname, routine_type, p.proname`, + }, + { + // PostgreSQL 10 / 不支持 prokind 的兼容路径 + sql: `SELECT r.routine_schema AS schema_name, r.routine_name AS routine_name, COALESCE(NULLIF(UPPER(r.routine_type), ''), 'FUNCTION') AS routine_type FROM information_schema.routines r WHERE r.routine_schema NOT IN ('pg_catalog', 'information_schema') AND r.routine_schema NOT LIKE 'pg_%' ORDER BY r.routine_schema, routine_type, r.routine_name`, + }, + { + // 最后兜底:仅函数列表,确保 prokind/routines 视图异常时仍可展示 + sql: `SELECT n.nspname AS schema_name, p.proname AS routine_name, 'FUNCTION' AS routine_type FROM pg_proc p JOIN pg_namespace n ON p.pronamespace = n.oid WHERE n.nspname NOT IN ('pg_catalog', 'information_schema') AND n.nspname NOT LIKE 'pg_%' ORDER BY n.nspname, p.proname`, + }, + ]); case 'sqlserver': { const safeDb = quoteSqlServerIdentifier(dbName || 'master'); return [{ sql: `SELECT s.name AS schema_name, o.name AS routine_name, CASE o.type WHEN 'P' THEN 'PROCEDURE' WHEN 'FN' THEN 'FUNCTION' WHEN 'IF' THEN 'FUNCTION' WHEN 'TF' THEN 'FUNCTION' END AS routine_type FROM ${safeDb}.sys.objects o JOIN ${safeDb}.sys.schemas s ON o.schema_id = s.schema_id WHERE o.type IN ('P','FN','IF','TF') ORDER BY o.type, s.name, o.name` }]; From e26a456eaece355ec0922a7e85292a689581a831 Mon Sep 17 00:00:00 2001 From: Syngnat Date: Thu, 12 Mar 2026 17:54:09 +0800 Subject: [PATCH 45/48] =?UTF-8?q?=F0=9F=94=A7=20fix(release/ci):=20?= =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E8=B7=A8=E5=B9=B3=E5=8F=B0UPX=E5=85=BC?= =?UTF-8?q?=E5=AE=B9=E5=B9=B6=E5=A4=84=E7=90=86Windows=20ARM64=E6=89=93?= =?UTF-8?q?=E5=8C=85=E5=A4=B1=E8=B4=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - CI 工作流统一启用 Node24 JavaScript 运行时,消除 Node20 退役告警干扰 - macOS 打包阶段为 UPX 增加 --force-macos,修复 Mach-O 压缩失败 - Windows 打包按架构分流:arm64 跳过 UPX 并保留原始 EXE,amd64 继续强制压缩 - Windows 压缩流程新增 $LASTEXITCODE 显式校验,避免命令失败被误判为成功 - 本地 build-release.sh 同步 macOS/Windows 的 UPX 兼容策略与错误处理逻辑 --- .github/workflows/release.yml | 47 +++++++++++++------ .../workflows/test-build-all-platforms.yml | 47 +++++++++++++------ .github/workflows/test-macos-build.yml | 3 ++ build-release.sh | 20 ++++++-- 4 files changed, 83 insertions(+), 34 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 84f14a5..62fe17e 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -8,6 +8,9 @@ on: permissions: contents: write +env: + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" + jobs: # Phase 1: Build in parallel and output artifacts build: @@ -306,7 +309,7 @@ jobs: fi BEFORE_BYTES=$(wc -c <"$APP_BIN" | tr -d '[:space:]') echo "🗜️ 正在使用 UPX 压缩 macOS 可执行文件: $APP_BIN ..." - upx --best --lzma --force "$APP_BIN" + upx --best --lzma --force --force-macos "$APP_BIN" upx -t "$APP_BIN" AFTER_BYTES=$(wc -c <"$APP_BIN" | tr -d '[:space:]') if [ "$AFTER_BYTES" -lt "$BEFORE_BYTES" ]; then @@ -361,21 +364,35 @@ jobs: exit 1 } - $upxCmd = Get-Command upx -ErrorAction SilentlyContinue - if ($null -eq $upxCmd) { - Write-Error "❌ 未找到 upx,无法保证 Windows 产物经过压缩" - exit 1 - } - $beforeBytes = (Get-Item -LiteralPath $finalExe).Length - Write-Host "🗜️ 使用 UPX 压缩 $finalExe ..." - & upx --best --lzma --force $finalExe | Out-Host - & upx -t $finalExe | Out-Host - $afterBytes = (Get-Item -LiteralPath $finalExe).Length - if ($afterBytes -lt $beforeBytes) { - $savedBytes = $beforeBytes - $afterBytes - Write-Host ("✅ UPX 压缩完成:{0:N2}MB -> {1:N2}MB,减少 {2:N2}MB" -f ($beforeBytes / 1MB), ($afterBytes / 1MB), ($savedBytes / 1MB)) + $isArm64Target = "${{ matrix.arch_name }}".ToLowerInvariant() -eq "arm64" + if ($isArm64Target) { + Write-Warning "⚠️ UPX 当前不支持 win64/arm64,跳过压缩并保留原始 EXE。" + $LASTEXITCODE = 0 } else { - Write-Host ("ℹ️ UPX 压缩完成:{0:N2}MB -> {1:N2}MB" -f ($beforeBytes / 1MB), ($afterBytes / 1MB)) + $upxCmd = Get-Command upx -ErrorAction SilentlyContinue + if ($null -eq $upxCmd) { + Write-Error "❌ 未找到 upx,无法保证 Windows 产物经过压缩" + exit 1 + } + $beforeBytes = (Get-Item -LiteralPath $finalExe).Length + Write-Host "🗜️ 使用 UPX 压缩 $finalExe ..." + & upx --best --lzma --force $finalExe | Out-Host + if ($LASTEXITCODE -ne 0) { + Write-Error "❌ UPX 压缩失败($LASTEXITCODE)" + exit 1 + } + & upx -t $finalExe | Out-Host + if ($LASTEXITCODE -ne 0) { + Write-Error "❌ UPX 校验失败($LASTEXITCODE)" + exit 1 + } + $afterBytes = (Get-Item -LiteralPath $finalExe).Length + if ($afterBytes -lt $beforeBytes) { + $savedBytes = $beforeBytes - $afterBytes + Write-Host ("✅ UPX 压缩完成:{0:N2}MB -> {1:N2}MB,减少 {2:N2}MB" -f ($beforeBytes / 1MB), ($afterBytes / 1MB), ($savedBytes / 1MB)) + } else { + Write-Host ("ℹ️ UPX 压缩完成:{0:N2}MB -> {1:N2}MB" -f ($beforeBytes / 1MB), ($afterBytes / 1MB)) + } } Write-Host "📦 输出 Windows 可执行文件 $finalExeName..." diff --git a/.github/workflows/test-build-all-platforms.yml b/.github/workflows/test-build-all-platforms.yml index 6646ece..d978dfe 100644 --- a/.github/workflows/test-build-all-platforms.yml +++ b/.github/workflows/test-build-all-platforms.yml @@ -11,6 +11,9 @@ on: permissions: contents: read +env: + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" + concurrency: group: test-build-${{ github.ref }} cancel-in-progress: false @@ -270,7 +273,7 @@ jobs: fi BEFORE_BYTES=$(wc -c <"$APP_BIN" | tr -d '[:space:]') echo "🗜️ 使用 UPX 压缩 macOS 可执行文件: $APP_BIN ..." - upx --best --lzma --force "$APP_BIN" + upx --best --lzma --force --force-macos "$APP_BIN" upx -t "$APP_BIN" AFTER_BYTES=$(wc -c <"$APP_BIN" | tr -d '[:space:]') if [ "$AFTER_BYTES" -lt "$BEFORE_BYTES" ]; then @@ -316,21 +319,35 @@ jobs: Write-Error "未找到构建产物 '$target'" exit 1 } - $upxCmd = Get-Command upx -ErrorAction SilentlyContinue - if ($null -eq $upxCmd) { - Write-Error "❌ 未找到 upx,无法保证 Windows 测试产物经过压缩" - exit 1 - } - $beforeBytes = (Get-Item -LiteralPath $finalExe).Length - Write-Host "🗜️ 使用 UPX 压缩 $finalExe ..." - & upx --best --lzma --force $finalExe | Out-Host - & upx -t $finalExe | Out-Host - $afterBytes = (Get-Item -LiteralPath $finalExe).Length - if ($afterBytes -lt $beforeBytes) { - $savedBytes = $beforeBytes - $afterBytes - Write-Host ("✅ UPX 压缩完成:{0:N2}MB -> {1:N2}MB,减少 {2:N2}MB" -f ($beforeBytes / 1MB), ($afterBytes / 1MB), ($savedBytes / 1MB)) + $isArm64Target = "${{ matrix.arch_name }}".ToLowerInvariant() -eq "arm64" + if ($isArm64Target) { + Write-Warning "⚠️ UPX 当前不支持 win64/arm64,跳过压缩并保留原始 EXE。" + $LASTEXITCODE = 0 } else { - Write-Host ("ℹ️ UPX 压缩完成:{0:N2}MB -> {1:N2}MB" -f ($beforeBytes / 1MB), ($afterBytes / 1MB)) + $upxCmd = Get-Command upx -ErrorAction SilentlyContinue + if ($null -eq $upxCmd) { + Write-Error "❌ 未找到 upx,无法保证 Windows 测试产物经过压缩" + exit 1 + } + $beforeBytes = (Get-Item -LiteralPath $finalExe).Length + Write-Host "🗜️ 使用 UPX 压缩 $finalExe ..." + & upx --best --lzma --force $finalExe | Out-Host + if ($LASTEXITCODE -ne 0) { + Write-Error "❌ UPX 压缩失败($LASTEXITCODE)" + exit 1 + } + & upx -t $finalExe | Out-Host + if ($LASTEXITCODE -ne 0) { + Write-Error "❌ UPX 校验失败($LASTEXITCODE)" + exit 1 + } + $afterBytes = (Get-Item -LiteralPath $finalExe).Length + if ($afterBytes -lt $beforeBytes) { + $savedBytes = $beforeBytes - $afterBytes + Write-Host ("✅ UPX 压缩完成:{0:N2}MB -> {1:N2}MB,减少 {2:N2}MB" -f ($beforeBytes / 1MB), ($afterBytes / 1MB), ($savedBytes / 1MB)) + } else { + Write-Host ("ℹ️ UPX 压缩完成:{0:N2}MB -> {1:N2}MB" -f ($beforeBytes / 1MB), ($afterBytes / 1MB)) + } } New-Item -ItemType Directory -Force -Path ..\..\artifacts | Out-Null Copy-Item -LiteralPath $finalExe -Destination "..\..\artifacts\$finalExeName" -Force diff --git a/.github/workflows/test-macos-build.yml b/.github/workflows/test-macos-build.yml index 1dd01af..d022e91 100644 --- a/.github/workflows/test-macos-build.yml +++ b/.github/workflows/test-macos-build.yml @@ -16,6 +16,9 @@ on: permissions: contents: read +env: + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" + jobs: build-macos: name: Build macOS ${{ matrix.arch }} diff --git a/build-release.sh b/build-release.sh index 22fe7c8..8e4a4a7 100755 --- a/build-release.sh +++ b/build-release.sh @@ -45,6 +45,7 @@ format_size_mb() { try_compress_binary_with_upx() { local exe_path="$1" local label="$2" + local is_macos_binary="${3:-false}" if [ ! -f "$exe_path" ]; then echo -e "${RED} ❌ 未找到 ${label} 文件:$exe_path${NC}" exit 1 @@ -63,10 +64,21 @@ try_compress_binary_with_upx() { exit 1 fi + local upx_cmd=(upx --best --lzma --force) + if [ "$is_macos_binary" = "true" ]; then + if upx --help 2>&1 | grep -q -- "--force-macos"; then + upx_cmd+=(--force-macos) + else + echo -e "${RED} ❌ 当前 upx 不支持 --force-macos,无法压缩 ${label}。${NC}" + echo " 请升级 upx 到支持 macOS 压缩的版本(UPX 5+)。" + exit 1 + fi + fi + local before_bytes after_bytes before_bytes=$(get_file_size_bytes "$exe_path") echo " 🗜️ 正在使用 UPX 压缩 ${label}..." - if upx --best --lzma --force "$exe_path" >/dev/null 2>&1; then + if "${upx_cmd[@]}" "$exe_path" >/dev/null 2>&1; then if ! upx -t "$exe_path" >/dev/null 2>&1; then echo -e "${RED} ❌ UPX 校验失败:${label}${NC}" exit 1 @@ -108,7 +120,7 @@ if [ $? -eq 0 ]; then APP_BIN_PATH=$(find "$DIST_DIR/$APP_DEST_NAME/Contents/MacOS" -maxdepth 1 -type f -print -quit) if [ -n "$APP_BIN_PATH" ] && [ -f "$APP_BIN_PATH" ]; then - try_compress_binary_with_upx "$APP_BIN_PATH" "macOS arm64 应用主程序" + try_compress_binary_with_upx "$APP_BIN_PATH" "macOS arm64 应用主程序" "true" else echo -e "${RED} ❌ 未找到 macOS arm64 主程序文件,无法执行 UPX 压缩。${NC}" exit 1 @@ -215,7 +227,7 @@ if [ $? -eq 0 ]; then APP_BIN_PATH=$(find "$DIST_DIR/$APP_DEST_NAME/Contents/MacOS" -maxdepth 1 -type f -print -quit) if [ -n "$APP_BIN_PATH" ] && [ -f "$APP_BIN_PATH" ]; then - try_compress_binary_with_upx "$APP_BIN_PATH" "macOS amd64 应用主程序" + try_compress_binary_with_upx "$APP_BIN_PATH" "macOS amd64 应用主程序" "true" else echo -e "${RED} ❌ 未找到 macOS amd64 主程序文件,无法执行 UPX 压缩。${NC}" exit 1 @@ -327,7 +339,7 @@ if command -v aarch64-w64-mingw32-gcc &> /dev/null; then if [ $? -eq 0 ]; then TARGET_EXE="$DIST_DIR/${APP_NAME}-${VERSION}-windows-arm64.exe" mv "$BUILD_BIN_DIR/${DEFAULT_BINARY_NAME}.exe" "$TARGET_EXE" - try_compress_binary_with_upx "$TARGET_EXE" "Windows arm64 可执行文件" + echo -e "${YELLOW} ⚠️ 当前 UPX 不支持 win64/arm64,跳过 Windows arm64 压缩。${NC}" echo " ✅ 已生成 ${APP_NAME}-${VERSION}-windows-arm64.exe" else echo -e "${RED} ❌ Windows arm64 构建失败。${NC}" From d467322ebef532964a5f3cac070a4c7e445c796b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=A8=E5=9B=BD=E9=94=8B?= Date: Thu, 12 Mar 2026 19:00:21 +0800 Subject: [PATCH 46/48] =?UTF-8?q?=F0=9F=94=A7=20fix(release/macos):=20?= =?UTF-8?q?=E7=A7=BB=E9=99=A4=20macOS=20=E6=89=93=E5=8C=85=E9=93=BE?= =?UTF-8?q?=E8=B7=AF=E7=9A=84=20UPX=20=E5=8E=8B=E7=BC=A9=E9=80=BB=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 删除 release 与手动测试工作流中的 macOS UPX 安装与压缩步骤 - build-release.sh 不再对 macOS arm64/amd64 主程序执行 UPX - 保留 Windows 与 Linux 的 UPX 压缩策略 --- .github/workflows/release.yml | 20 ++--------------- .../workflows/test-build-all-platforms.yml | 20 ++--------------- build-release.sh | 22 +++++-------------- 3 files changed, 9 insertions(+), 53 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 62fe17e..08171df 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -91,12 +91,6 @@ jobs: with: node-version: '20' - - name: Install UPX (macOS) - if: contains(matrix.platform, 'darwin') - run: | - brew install upx - upx --version - - name: Install UPX (Windows) if: contains(matrix.platform, 'windows') shell: pwsh @@ -304,20 +298,10 @@ jobs: APP_BIN=$(find "$APP_PATH/Contents/MacOS" -maxdepth 1 -type f | head -n 1) if [ -z "$APP_BIN" ]; then - echo "❌ 未找到 macOS 应用主程序,无法进行 UPX 压缩!" + echo "❌ 未找到 macOS 应用主程序!" exit 1 fi - BEFORE_BYTES=$(wc -c <"$APP_BIN" | tr -d '[:space:]') - echo "🗜️ 正在使用 UPX 压缩 macOS 可执行文件: $APP_BIN ..." - upx --best --lzma --force --force-macos "$APP_BIN" - upx -t "$APP_BIN" - AFTER_BYTES=$(wc -c <"$APP_BIN" | tr -d '[:space:]') - if [ "$AFTER_BYTES" -lt "$BEFORE_BYTES" ]; then - SAVED_BYTES=$((BEFORE_BYTES - AFTER_BYTES)) - awk -v b="$BEFORE_BYTES" -v a="$AFTER_BYTES" -v s="$SAVED_BYTES" 'BEGIN { printf "✅ macOS UPX 压缩完成:%.2fMB -> %.2fMB,减少 %.2fMB\n", b/1024/1024, a/1024/1024, s/1024/1024 }' - else - awk -v b="$BEFORE_BYTES" -v a="$AFTER_BYTES" 'BEGIN { printf "ℹ️ macOS UPX 压缩完成:%.2fMB -> %.2fMB\n", b/1024/1024, a/1024/1024 }' - fi + echo "ℹ️ macOS 产物不执行 UPX 压缩,保留原始主程序。" echo "🔏 正在进行 Ad-hoc 签名..." # 注意:Ad-hoc + hardened runtime(--options runtime)在未配置 entitlements 时, diff --git a/.github/workflows/test-build-all-platforms.yml b/.github/workflows/test-build-all-platforms.yml index d978dfe..17ba77c 100644 --- a/.github/workflows/test-build-all-platforms.yml +++ b/.github/workflows/test-build-all-platforms.yml @@ -96,12 +96,6 @@ jobs: with: node-version: '20' - - name: Install UPX (macOS) - if: contains(matrix.platform, 'darwin') - run: | - brew install upx - upx --version - - name: Install UPX (Windows) if: contains(matrix.platform, 'windows') shell: pwsh @@ -268,20 +262,10 @@ jobs: APP_NAME=$(basename "$APP_PATH") APP_BIN=$(find "$APP_PATH/Contents/MacOS" -maxdepth 1 -type f | head -n 1) if [ -z "$APP_BIN" ]; then - echo "未找到 macOS 应用主程序,无法进行 UPX 压缩" + echo "未找到 macOS 应用主程序" exit 1 fi - BEFORE_BYTES=$(wc -c <"$APP_BIN" | tr -d '[:space:]') - echo "🗜️ 使用 UPX 压缩 macOS 可执行文件: $APP_BIN ..." - upx --best --lzma --force --force-macos "$APP_BIN" - upx -t "$APP_BIN" - AFTER_BYTES=$(wc -c <"$APP_BIN" | tr -d '[:space:]') - if [ "$AFTER_BYTES" -lt "$BEFORE_BYTES" ]; then - SAVED_BYTES=$((BEFORE_BYTES - AFTER_BYTES)) - awk -v b="$BEFORE_BYTES" -v a="$AFTER_BYTES" -v s="$SAVED_BYTES" 'BEGIN { printf "✅ macOS UPX 压缩完成:%.2fMB -> %.2fMB,减少 %.2fMB\n", b/1024/1024, a/1024/1024, s/1024/1024 }' - else - awk -v b="$BEFORE_BYTES" -v a="$AFTER_BYTES" 'BEGIN { printf "ℹ️ macOS UPX 压缩完成:%.2fMB -> %.2fMB\n", b/1024/1024, a/1024/1024 }' - fi + echo "ℹ️ macOS 产物不执行 UPX 压缩,保留原始主程序。" codesign --force --deep --sign - "$APP_NAME" ZIP_NAME="GoNavi-${LABEL}-${{ matrix.os_name }}-${{ matrix.arch_name }}-run${GITHUB_RUN_NUMBER}.zip" DMG_NAME="GoNavi-${LABEL}-${{ matrix.os_name }}-${{ matrix.arch_name }}-run${GITHUB_RUN_NUMBER}.dmg" diff --git a/build-release.sh b/build-release.sh index 8e4a4a7..d60cad5 100755 --- a/build-release.sh +++ b/build-release.sh @@ -45,7 +45,6 @@ format_size_mb() { try_compress_binary_with_upx() { local exe_path="$1" local label="$2" - local is_macos_binary="${3:-false}" if [ ! -f "$exe_path" ]; then echo -e "${RED} ❌ 未找到 ${label} 文件:$exe_path${NC}" exit 1 @@ -64,21 +63,10 @@ try_compress_binary_with_upx() { exit 1 fi - local upx_cmd=(upx --best --lzma --force) - if [ "$is_macos_binary" = "true" ]; then - if upx --help 2>&1 | grep -q -- "--force-macos"; then - upx_cmd+=(--force-macos) - else - echo -e "${RED} ❌ 当前 upx 不支持 --force-macos,无法压缩 ${label}。${NC}" - echo " 请升级 upx 到支持 macOS 压缩的版本(UPX 5+)。" - exit 1 - fi - fi - local before_bytes after_bytes before_bytes=$(get_file_size_bytes "$exe_path") echo " 🗜️ 正在使用 UPX 压缩 ${label}..." - if "${upx_cmd[@]}" "$exe_path" >/dev/null 2>&1; then + if upx --best --lzma --force "$exe_path" >/dev/null 2>&1; then if ! upx -t "$exe_path" >/dev/null 2>&1; then echo -e "${RED} ❌ UPX 校验失败:${label}${NC}" exit 1 @@ -120,9 +108,9 @@ if [ $? -eq 0 ]; then APP_BIN_PATH=$(find "$DIST_DIR/$APP_DEST_NAME/Contents/MacOS" -maxdepth 1 -type f -print -quit) if [ -n "$APP_BIN_PATH" ] && [ -f "$APP_BIN_PATH" ]; then - try_compress_binary_with_upx "$APP_BIN_PATH" "macOS arm64 应用主程序" "true" + echo -e "${YELLOW} ⚠️ macOS arm64 不再执行 UPX 压缩,保留原始主程序。${NC}" else - echo -e "${RED} ❌ 未找到 macOS arm64 主程序文件,无法执行 UPX 压缩。${NC}" + echo -e "${RED} ❌ 未找到 macOS arm64 主程序文件。${NC}" exit 1 fi @@ -227,9 +215,9 @@ if [ $? -eq 0 ]; then APP_BIN_PATH=$(find "$DIST_DIR/$APP_DEST_NAME/Contents/MacOS" -maxdepth 1 -type f -print -quit) if [ -n "$APP_BIN_PATH" ] && [ -f "$APP_BIN_PATH" ]; then - try_compress_binary_with_upx "$APP_BIN_PATH" "macOS amd64 应用主程序" "true" + echo -e "${YELLOW} ⚠️ macOS amd64 不再执行 UPX 压缩,保留原始主程序。${NC}" else - echo -e "${RED} ❌ 未找到 macOS amd64 主程序文件,无法执行 UPX 压缩。${NC}" + echo -e "${RED} ❌ 未找到 macOS amd64 主程序文件。${NC}" exit 1 fi From e01328896702528d496988335f51e6e991009b75 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=A8=E5=9B=BD=E9=94=8B?= Date: Thu, 12 Mar 2026 19:23:46 +0800 Subject: [PATCH 47/48] =?UTF-8?q?=F0=9F=94=A7=20fix(ci/release-winget):=20?= =?UTF-8?q?=E4=BF=AE=E5=A4=8D=20Node20=20=E5=BC=83=E7=94=A8=E5=91=8A?= =?UTF-8?q?=E8=AD=A6=E5=B9=B6=E5=BC=BA=E5=88=B6=E5=90=AF=E7=94=A8=20Node24?= =?UTF-8?q?=20=E8=BF=90=E8=A1=8C=E6=97=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 在 release-winget workflow 增加 FORCE_JAVASCRIPT_ACTIONS_TO_NODE24=true - 与现有 release/test workflow 的 Node24 配置保持一致 - 避免 actions/checkout、setup-go、setup-node 触发 Node20 弃用告警 --- .github/workflows/release-winget.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/release-winget.yml b/.github/workflows/release-winget.yml index e4604b3..632c11e 100644 --- a/.github/workflows/release-winget.yml +++ b/.github/workflows/release-winget.yml @@ -10,6 +10,9 @@ on: description: 'Tag of release you want to publish' type: string +env: + FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" + jobs: publish: runs-on: windows-latest From 1dabac1a6504bd1d9584f56c626e24b3ff4121cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=A8=E5=9B=BD=E9=94=8B?= Date: Thu, 12 Mar 2026 19:38:54 +0800 Subject: [PATCH 48/48] =?UTF-8?q?=F0=9F=94=A7=20fix(window):=20=E4=BF=AE?= =?UTF-8?q?=E5=A4=8DWindows=E5=90=AF=E5=8A=A8=E5=85=A8=E5=B1=8F=E9=94=81?= =?UTF-8?q?=E6=AD=BB=E5=B9=B6=E8=A1=A5=E9=BD=90=E6=A0=87=E9=A2=98=E6=A0=8F?= =?UTF-8?q?=E9=80=80=E5=87=BA=E5=85=A8=E5=B1=8F=E9=80=BB=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- frontend/package.json.md5 | 2 +- frontend/src/App.tsx | 45 ++++++++++++++++++++++++++++----------- 2 files changed, 34 insertions(+), 13 deletions(-) diff --git a/frontend/package.json.md5 b/frontend/package.json.md5 index 0f8f4fe..a7661c0 100755 --- a/frontend/package.json.md5 +++ b/frontend/package.json.md5 @@ -1 +1 @@ -5b8157374dae5f9340e31b2d0bd2c00e \ No newline at end of file +d0f9366af59a6367ad3c7e2d4185ead4 \ No newline at end of file diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx index ce1832e..58b3a96 100644 --- a/frontend/src/App.tsx +++ b/frontend/src/App.tsx @@ -2,7 +2,7 @@ import React, { useState, useEffect, useMemo } from 'react'; import { Layout, Button, ConfigProvider, theme, Dropdown, MenuProps, message, Modal, Spin, Slider, Progress, Switch, Input, InputNumber, Select } from 'antd'; import zhCN from 'antd/locale/zh_CN'; import { PlusOutlined, ConsoleSqlOutlined, UploadOutlined, DownloadOutlined, CloudDownloadOutlined, BugOutlined, ToolOutlined, GlobalOutlined, InfoCircleOutlined, GithubOutlined, SkinOutlined, CheckOutlined, MinusOutlined, BorderOutlined, CloseOutlined, SettingOutlined, LinkOutlined, BgColorsOutlined, AppstoreOutlined } from '@ant-design/icons'; -import { BrowserOpenURL, Environment, EventsOn, Quit, WindowFullscreen, WindowGetSize, WindowIsFullscreen, WindowIsMaximised, WindowMaximise, WindowMinimise, WindowSetSize, WindowToggleMaximise } from '../wailsjs/runtime'; +import { BrowserOpenURL, Environment, EventsOn, Quit, WindowFullscreen, WindowGetSize, WindowIsFullscreen, WindowIsMaximised, WindowMaximise, WindowMinimise, WindowSetSize, WindowToggleMaximise, WindowUnfullscreen } from '../wailsjs/runtime'; import Sidebar from './components/Sidebar'; import TabManager from './components/TabManager'; import ConnectionModal from './components/ConnectionModal'; @@ -218,6 +218,7 @@ function App() { const maxApplyAttempts = 6; const applyRetryDelayMs = 400; const settleDelayMs = 160; + const useMaximiseForStartup = isWindowsPlatform(); const checkStartupPreferenceApplied = async (): Promise => { try { @@ -253,15 +254,21 @@ function App() { if (await checkStartupPreferenceApplied()) { return; } - // 优先尝试全屏,若当前平台/时机不生效,后续走最大化兜底。 + // Windows 使用最大化,避免进入真正全屏后无法通过标题栏交互退出。 + // 其他平台保持全屏优先、最大化兜底。 try { - await WindowFullscreen(); - await new Promise((resolve) => window.setTimeout(resolve, settleDelayMs)); - if (await checkStartupPreferenceApplied()) { - return; + if (useMaximiseForStartup) { + await WindowMaximise(); + await new Promise((resolve) => window.setTimeout(resolve, settleDelayMs)); + } else { + await WindowFullscreen(); + await new Promise((resolve) => window.setTimeout(resolve, settleDelayMs)); + if (await checkStartupPreferenceApplied()) { + return; + } + await WindowMaximise(); + await new Promise((resolve) => window.setTimeout(resolve, settleDelayMs)); } - await WindowMaximise(); - await new Promise((resolve) => window.setTimeout(resolve, settleDelayMs)); } catch (e) { console.warn("Wails Window APIs unavailable", e); } @@ -640,6 +647,8 @@ function App() { const isMacRuntime = runtimePlatform === 'darwin' || (runtimePlatform === '' && /mac/i.test(detectNavigatorPlatform())); + const isWindowsRuntime = runtimePlatform === 'windows' + || (runtimePlatform === '' && isWindowsPlatform()); const formatBytes = (bytes?: number) => { if (!bytes || bytes <= 0) return '0 B'; @@ -1075,12 +1084,24 @@ function App() { setIsDriverModalOpen(true); }; + const handleTitleBarWindowToggle = async () => { + try { + if (await WindowIsFullscreen()) { + await WindowUnfullscreen(); + return; + } + await WindowToggleMaximise(); + } catch (_) { + // ignore + } + }; + const handleTitleBarDoubleClick = (e: React.MouseEvent) => { const target = e.target as HTMLElement | null; if (target?.closest('[data-no-titlebar-toggle="true"]')) { return; } - try { WindowToggleMaximise(); } catch(e) {} + void handleTitleBarWindowToggle(); }; // Sidebar Resizing @@ -1447,7 +1468,7 @@ function App() { type="text" icon={} style={{ height: '100%', borderRadius: 0, width: titleBarButtonWidth }} - onClick={WindowToggleMaximise} + onClick={() => { void handleTitleBarWindowToggle(); }} />