Compare commits

..

24 Commits

Author SHA1 Message Date
tianqijiuyun-latiao
4a830c7a82 chore(ci): 删除旧的 macOS 单平台测试工作流 2026-03-07 13:32:55 +08:00
tianqijiuyun-latiao
adab24085e fix(db-query-value): 清理 query_value 合并冲突并保持零日期处理 2026-03-07 13:07:00 +08:00
tianqijiuyun-latiao
cc201f3df6 Merge branch 'refs/heads/main' into feature/kingbase_opt
# Conflicts:
#	.github/workflows/test-build-all-platforms.yml
#	internal/db/query_value.go
#	internal/db/query_value_test.go
2026-03-07 12:58:02 +08:00
tianqijiuyun-latiao
57d6ace2f8 fix(data-grid-scroll): 修复数据区触摸板横向滚动失效 refs #175 2026-03-06 22:39:36 +08:00
tianqijiuyun-latiao
0f5dc2184d fix(window-scale): 修复任务栏切换后字体异常放大 refs #193 2026-03-06 20:27:45 +08:00
tianqijiuyun-latiao
0b95908ae9 fix(datetime-display): 修复零日期显示被错误转换 refs #189 2026-03-06 19:35:33 +08:00
tianqijiuyun-latiao
4c322818bc fix(data-viewer): 保持切换标签后的表格滚动位置 2026-03-06 19:22:07 +08:00
tianqijiuyun-latiao
3fefa13023 fix(ci): 修复全平台测试包 artifact 命名冲突 2026-03-06 18:24:28 +08:00
tianqijiuyun-latiao
1be003b0a2 chore(ci): 新增全平台测试包手动构建工作流 2026-03-06 16:26:29 +08:00
tianqijiuyun-latiao
0a0609c459 fix(query-execution): 支持带前置注释的读查询结果识别 2026-03-06 16:05:34 +08:00
tianqijiuyun-latiao
0682baa14d fix(kingbase): 补齐主键识别并优化宽表卡顿 refs #176 refs #178 2026-03-06 15:39:51 +08:00
tianqijiuyun-latiao
6c41e15e99 fix(connection,db-list): 统一处理空列表返回并修复达梦连接测试报错 refs #157 2026-03-06 12:37:10 +08:00
tianqijiuyun-latiao
ba51fa658c Merge branch 'refs/heads/main' into feature/kingbase_opt 2026-03-06 09:57:37 +08:00
tianqijiuyun-latiao
18ed4ca50c fix(dameng-databases): 修复显示全部库时数据库列表不完整 refs #154 2026-03-06 08:01:40 +08:00
tianqijiuyun-latiao
37704e2b3b fix(oracle-metadata): 修复视图与函数加载按 schema 过滤异常 refs #155 2026-03-06 07:54:51 +08:00
tianqijiuyun-latiao
6b9104fae8 fix(clickhouse-connect): 自动识别并回退 HTTP/Native 协议连接 refs #181 2026-03-06 07:49:41 +08:00
tianqijiuyun-latiao
647768221e feat(data-sync): 增加差异 SQL 预览能力便于审核 refs #174 2026-03-06 00:53:40 +08:00
tianqijiuyun-latiao
251e1b22d7 fix(query-editor): 修复 SQL 编辑中光标随机跳到末尾 refs #185 2026-03-06 00:42:12 +08:00
tianqijiuyun-latiao
c99d4b1fa6 chore(ci): 允许测试工作流在当前分支自动触发 2026-03-06 00:02:47 +08:00
tianqijiuyun-latiao
ff98ec79a4 chore(ci): 新增手动触发的 macOS 测试构建工作流 2026-03-05 23:58:19 +08:00
tianqijiuyun-latiao
072b4e6e78 fix(driver-agent): 修复老版本 Win10 升级后金仓驱动代理启动失败
refs #177
2026-03-05 23:36:46 +08:00
tianqijiuyun-latiao
2449184ad3 fix(kingbase-transaction): 修复金仓事务提交重复引号导致语法错误
refs #176
2026-03-05 21:10:36 +08:00
tianqijiuyun-latiao
6ae49d4b84 fix(kingbase-data-grid): 修复金仓打开表卡顿并降低对象渲染开销
refs #178
2026-03-05 20:42:33 +08:00
tianqijiuyun-latiao
5c23722ad8 feat(http-tunnel): 支持独立 HTTP 隧道连接并覆盖多数据源
refs #168
2026-03-05 19:23:00 +08:00
81 changed files with 2120 additions and 13807 deletions

View File

@@ -88,24 +88,6 @@ jobs:
with:
node-version: '20'
- name: Install UPX (macOS)
if: contains(matrix.platform, 'darwin')
run: |
brew install upx
upx --version
- name: Install UPX (Windows)
if: contains(matrix.platform, 'windows')
shell: pwsh
run: |
choco install upx --no-progress -y
$upxCmd = Get-Command upx -ErrorAction SilentlyContinue
if ($null -eq $upxCmd) {
Write-Error "❌ 未检测到 upx无法保证 Windows 产物经过压缩"
exit 1
}
& upx --version
# Linux Dependencies (GTK3, WebKit2GTK required by Wails)
- name: Install Linux Dependencies
if: contains(matrix.platform, 'linux')
@@ -120,9 +102,6 @@ jobs:
sudo apt-get install -y libwebkit2gtk-4.0-dev
fi
sudo apt-get install -y upx-ucl || sudo apt-get install -y upx
upx --version
# AppImage 运行/打包可能需要 FUSE2。不同发行版/版本包名不同,做兼容兜底。
sudo apt-get install -y libfuse2 || sudo apt-get install -y libfuse2t64 || true
@@ -298,23 +277,6 @@ jobs:
exit 1
fi
APP_NAME=$(basename "$APP_PATH")
APP_BIN=$(find "$APP_PATH/Contents/MacOS" -maxdepth 1 -type f | head -n 1)
if [ -z "$APP_BIN" ]; then
echo "❌ 未找到 macOS 应用主程序,无法进行 UPX 压缩!"
exit 1
fi
BEFORE_BYTES=$(wc -c <"$APP_BIN" | tr -d '[:space:]')
echo "🗜️ 正在使用 UPX 压缩 macOS 可执行文件: $APP_BIN ..."
upx --best --lzma --force "$APP_BIN"
upx -t "$APP_BIN"
AFTER_BYTES=$(wc -c <"$APP_BIN" | tr -d '[:space:]')
if [ "$AFTER_BYTES" -lt "$BEFORE_BYTES" ]; then
SAVED_BYTES=$((BEFORE_BYTES - AFTER_BYTES))
awk -v b="$BEFORE_BYTES" -v a="$AFTER_BYTES" -v s="$SAVED_BYTES" 'BEGIN { printf "✅ macOS UPX 压缩完成:%.2fMB -> %.2fMB,减少 %.2fMB\n", b/1024/1024, a/1024/1024, s/1024/1024 }'
else
awk -v b="$BEFORE_BYTES" -v a="$AFTER_BYTES" 'BEGIN { printf " macOS UPX 压缩完成:%.2fMB -> %.2fMB\n", b/1024/1024, a/1024/1024 }'
fi
echo "🔏 正在进行 Ad-hoc 签名..."
# 注意Ad-hoc + hardened runtime--options runtime在未配置 entitlements 时,
@@ -339,7 +301,7 @@ jobs:
mv "$DMG_NAME" "../../$FINAL_NAME"
# Windows Packaging
- name: Package Windows EXE
- name: Package Windows Portable Zip
if: contains(matrix.platform, 'windows')
shell: pwsh
run: |
@@ -350,6 +312,7 @@ jobs:
}
$target = "${{ matrix.build_name }}"
$finalExeName = "GoNavi-$version-${{ matrix.os_name }}-${{ matrix.arch_name }}${{ matrix.artifact_suffix }}.exe"
$finalZipName = "GoNavi-$version-${{ matrix.os_name }}-${{ matrix.arch_name }}${{ matrix.artifact_suffix }}.zip"
if (Test-Path "$target.exe") {
$finalExe = "$target.exe"
@@ -361,26 +324,12 @@ jobs:
exit 1
}
$upxCmd = Get-Command upx -ErrorAction SilentlyContinue
if ($null -eq $upxCmd) {
Write-Error "❌ 未找到 upx无法保证 Windows 产物经过压缩"
exit 1
}
$beforeBytes = (Get-Item -LiteralPath $finalExe).Length
Write-Host "🗜️ 使用 UPX 压缩 $finalExe ..."
& upx --best --lzma --force $finalExe | Out-Host
& upx -t $finalExe | Out-Host
$afterBytes = (Get-Item -LiteralPath $finalExe).Length
if ($afterBytes -lt $beforeBytes) {
$savedBytes = $beforeBytes - $afterBytes
Write-Host ("✅ UPX 压缩完成:{0:N2}MB -> {1:N2}MB减少 {2:N2}MB" -f ($beforeBytes / 1MB), ($afterBytes / 1MB), ($savedBytes / 1MB))
} else {
Write-Host (" UPX 压缩完成:{0:N2}MB -> {1:N2}MB" -f ($beforeBytes / 1MB), ($afterBytes / 1MB))
}
Write-Host "📦 输出 Windows 可执行文件 $finalExeName..."
Write-Host "📦 生成 Windows 可执行文件 $finalExeName..."
Copy-Item -LiteralPath $finalExe -Destination "..\\..\\$finalExeName" -Force
Write-Host "📦 生成 Windows 压缩包 $finalZipName..."
Compress-Archive -LiteralPath $finalExe -DestinationPath "..\\..\\$finalZipName" -Force
# Linux Packaging (tar.gz and AppImage)
- name: Package Linux
if: contains(matrix.platform, 'linux')
@@ -398,17 +347,6 @@ jobs:
fi
chmod +x "$TARGET"
BEFORE_BYTES=$(wc -c <"$TARGET" | tr -d '[:space:]')
echo "🗜️ 正在使用 UPX 压缩 Linux 可执行文件: $TARGET ..."
upx --best --lzma --force "$TARGET"
upx -t "$TARGET"
AFTER_BYTES=$(wc -c <"$TARGET" | tr -d '[:space:]')
if [ "$AFTER_BYTES" -lt "$BEFORE_BYTES" ]; then
SAVED_BYTES=$((BEFORE_BYTES - AFTER_BYTES))
awk -v b="$BEFORE_BYTES" -v a="$AFTER_BYTES" -v s="$SAVED_BYTES" 'BEGIN { printf "✅ Linux UPX 压缩完成:%.2fMB -> %.2fMB,减少 %.2fMB\n", b/1024/1024, a/1024/1024, s/1024/1024 }'
else
awk -v b="$BEFORE_BYTES" -v a="$AFTER_BYTES" 'BEGIN { printf " Linux UPX 压缩完成:%.2fMB -> %.2fMB\n", b/1024/1024, a/1024/1024 }'
fi
# 1. Create tar.gz
echo "📦 正在打包 $TAR_NAME..."
@@ -481,6 +419,7 @@ jobs:
path: |
GoNavi-*.dmg
GoNavi-*.exe
GoNavi-*.zip
GoNavi-*.tar.gz
GoNavi-*.AppImage
drivers/**

159
.github/workflows/sync-main-to-dev.yml vendored Normal file
View File

@@ -0,0 +1,159 @@
name: main 回灌 dev
on:
push:
branches:
- main
workflow_dispatch:
permissions:
contents: write
pull-requests: write
concurrency:
group: sync-main-to-dev
cancel-in-progress: true
jobs:
sync-main-to-dev:
name: 执行回灌同步
runs-on: ubuntu-latest
steps:
- name: 检出代码
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: 检查是否需要同步
id: diff_check
shell: bash
run: |
set -euo pipefail
echo "开始检查 main 与 dev 的分支差异..."
git fetch origin main dev
ahead_count="$(git rev-list --count origin/dev..origin/main)"
echo "ahead_count=${ahead_count}" >> "$GITHUB_OUTPUT"
if [ "${ahead_count}" -eq 0 ]; then
echo "无需同步dev 已包含 main 的最新提交。"
echo "has_changes=false" >> "$GITHUB_OUTPUT"
else
echo "检测到 ${ahead_count} 个待同步提交,准备创建或复用同步 PR。"
echo "has_changes=true" >> "$GITHUB_OUTPUT"
fi
- name: 创建或复用同步 PR
id: sync_pr
if: steps.diff_check.outputs.has_changes == 'true'
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
shell: bash
run: |
set -euo pipefail
echo "permission_blocked=false" >> "$GITHUB_OUTPUT"
existing_number="$(gh pr list --base dev --head main --state open --json number --jq '.[0].number // empty')"
if [ -n "${existing_number}" ]; then
pr_number="${existing_number}"
pr_url="$(gh pr view "${pr_number}" --json url --jq '.url')"
echo "复用已有同步 PR#${pr_number}"
echo "created=false" >> "$GITHUB_OUTPUT"
else
body_file="$(mktemp)"
error_file="$(mktemp)"
{
echo "## 自动回灌:\`main -> dev\`"
echo
echo "- 触发条件:\`main\` 分支出现新提交(含贡献者直接合并到 \`main\` 的 PR"
echo "- 目标:让 \`dev\` 持续吸收 \`main\` 的更新,避免发布前集中冲突"
echo
echo "### 合并建议"
echo "- 无冲突:直接合并该 PR建议 \`Merge commit\`"
echo "- 有冲突:在该 PR 内解决冲突后再合并"
} > "${body_file}"
if pr_url="$(gh pr create \
--base dev \
--head main \
--title "🔁 chore(sync): 回灌 main 到 dev" \
--body-file "${body_file}" 2>"${error_file}")"; then
pr_number="${pr_url##*/}"
echo "已创建同步 PR#${pr_number}"
echo "created=true" >> "$GITHUB_OUTPUT"
else
error_message="$(tr '\n' ' ' < "${error_file}")"
if printf '%s' "${error_message}" | grep -Fq "GitHub Actions is not permitted to create or approve pull requests"; then
echo "::warning::仓库未开启“Allow GitHub Actions to create and approve pull requests”已跳过自动创建同步 PR。"
echo "permission_blocked=true" >> "$GITHUB_OUTPUT"
echo "created=false" >> "$GITHUB_OUTPUT"
echo "pr_number=" >> "$GITHUB_OUTPUT"
echo "pr_url=" >> "$GITHUB_OUTPUT"
exit 0
fi
echo "::error::创建同步 PR 失败:${error_message}"
exit 1
fi
fi
echo "pr_number=${pr_number}" >> "$GITHUB_OUTPUT"
echo "pr_url=${pr_url}" >> "$GITHUB_OUTPUT"
- name: 检查合并状态
id: merge_state
if: steps.diff_check.outputs.has_changes == 'true' && steps.sync_pr.outputs.permission_blocked != 'true'
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
shell: bash
run: |
set -euo pipefail
pr_number="${{ steps.sync_pr.outputs.pr_number }}"
mergeable="$(gh pr view "${pr_number}" --json mergeable --jq '.mergeable')"
merge_state_status="$(gh pr view "${pr_number}" --json mergeStateStatus --jq '.mergeStateStatus')"
echo "PR #${pr_number} 合并状态mergeable=${mergeable}, mergeStateStatus=${merge_state_status}"
echo "mergeable=${mergeable}" >> "$GITHUB_OUTPUT"
echo "merge_state_status=${merge_state_status}" >> "$GITHUB_OUTPUT"
- name: 可合并时开启自动合并
id: auto_merge
if: steps.diff_check.outputs.has_changes == 'true' && steps.sync_pr.outputs.permission_blocked != 'true' && steps.merge_state.outputs.mergeable == 'MERGEABLE'
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
shell: bash
run: |
set -euo pipefail
pr_number="${{ steps.sync_pr.outputs.pr_number }}"
if gh pr merge "${pr_number}" --merge --auto; then
echo "已为 PR #${pr_number} 开启自动合并。"
echo "result=enabled" >> "$GITHUB_OUTPUT"
else
echo "::warning::自动合并开启失败,请手动处理并合并该 PR。"
echo "result=failed" >> "$GITHUB_OUTPUT"
fi
- name: 写入执行摘要
if: always()
shell: bash
run: |
{
echo "## main 回灌 dev 执行结果"
if [ "${{ steps.diff_check.outputs.has_changes }}" != "true" ]; then
echo "- 状态无需同步dev 已包含 main 最新提交)"
exit 0
fi
if [ "${{ steps.sync_pr.outputs.permission_blocked }}" = "true" ]; then
echo "- 状态:已跳过自动创建同步 PR"
echo "- 原因:仓库未开启 GitHub Actions 创建与审批 Pull Request 权限"
echo "- 处理:前往 Settings -> Actions -> General -> Workflow permissions开启 Allow GitHub Actions to create and approve pull requests"
echo "- 兜底:由维护者手动执行 main 到 dev 合并,或开启该设置后重新运行 workflow"
exit 0
fi
echo "- PR${{ steps.sync_pr.outputs.pr_url }}"
echo "- 可合并状态:${{ steps.merge_state.outputs.mergeable }}"
echo "- 合并状态详情:${{ steps.merge_state.outputs.merge_state_status }}"
if [ "${{ steps.merge_state.outputs.mergeable }}" = "CONFLICTING" ]; then
echo "- 结论:检测到冲突,需要手动处理后合并"
elif [ "${{ steps.auto_merge.outputs.result }}" = "enabled" ]; then
echo "- 结论:已启用自动合并(满足保护规则后将自动入 dev"
else
echo "- 结论PR 已创建/复用,请按分支策略人工合并"
fi
} >> "$GITHUB_STEP_SUMMARY"

View File

@@ -93,24 +93,6 @@ jobs:
with:
node-version: '20'
- name: Install UPX (macOS)
if: contains(matrix.platform, 'darwin')
run: |
brew install upx
upx --version
- name: Install UPX (Windows)
if: contains(matrix.platform, 'windows')
shell: pwsh
run: |
choco install upx --no-progress -y
$upxCmd = Get-Command upx -ErrorAction SilentlyContinue
if ($null -eq $upxCmd) {
Write-Error "❌ 未检测到 upx无法保证 Windows 测试产物经过压缩"
exit 1
}
& upx --version
- name: Install Linux Dependencies
if: contains(matrix.platform, 'linux')
run: |
@@ -123,9 +105,6 @@ jobs:
sudo apt-get install -y libwebkit2gtk-4.0-dev
fi
sudo apt-get install -y upx-ucl || sudo apt-get install -y upx
upx --version
sudo apt-get install -y libfuse2 || sudo apt-get install -y libfuse2t64 || true
LINUXDEPLOY_URL="https://github.com/linuxdeploy/linuxdeploy/releases/download/continuous/linuxdeploy-x86_64.AppImage"
@@ -263,22 +242,6 @@ jobs:
exit 1
fi
APP_NAME=$(basename "$APP_PATH")
APP_BIN=$(find "$APP_PATH/Contents/MacOS" -maxdepth 1 -type f | head -n 1)
if [ -z "$APP_BIN" ]; then
echo "未找到 macOS 应用主程序,无法进行 UPX 压缩"
exit 1
fi
BEFORE_BYTES=$(wc -c <"$APP_BIN" | tr -d '[:space:]')
echo "🗜️ 使用 UPX 压缩 macOS 可执行文件: $APP_BIN ..."
upx --best --lzma --force "$APP_BIN"
upx -t "$APP_BIN"
AFTER_BYTES=$(wc -c <"$APP_BIN" | tr -d '[:space:]')
if [ "$AFTER_BYTES" -lt "$BEFORE_BYTES" ]; then
SAVED_BYTES=$((BEFORE_BYTES - AFTER_BYTES))
awk -v b="$BEFORE_BYTES" -v a="$AFTER_BYTES" -v s="$SAVED_BYTES" 'BEGIN { printf "✅ macOS UPX 压缩完成:%.2fMB -> %.2fMB,减少 %.2fMB\n", b/1024/1024, a/1024/1024, s/1024/1024 }'
else
awk -v b="$BEFORE_BYTES" -v a="$AFTER_BYTES" 'BEGIN { printf " macOS UPX 压缩完成:%.2fMB -> %.2fMB\n", b/1024/1024, a/1024/1024 }'
fi
codesign --force --deep --sign - "$APP_NAME"
ZIP_NAME="GoNavi-${LABEL}-${{ matrix.os_name }}-${{ matrix.arch_name }}-run${GITHUB_RUN_NUMBER}.zip"
DMG_NAME="GoNavi-${LABEL}-${{ matrix.os_name }}-${{ matrix.arch_name }}-run${GITHUB_RUN_NUMBER}.dmg"
@@ -307,6 +270,7 @@ jobs:
Set-Location build/bin
$target = "${{ matrix.build_name }}"
$finalExeName = "GoNavi-$label-${{ matrix.os_name }}-${{ matrix.arch_name }}-run$env:GITHUB_RUN_NUMBER.exe"
$finalZipName = "GoNavi-$label-${{ matrix.os_name }}-${{ matrix.arch_name }}-run$env:GITHUB_RUN_NUMBER.zip"
if (Test-Path "$target.exe") {
$finalExe = "$target.exe"
} elseif (Test-Path "$target") {
@@ -316,25 +280,11 @@ jobs:
Write-Error "未找到构建产物 '$target'"
exit 1
}
$upxCmd = Get-Command upx -ErrorAction SilentlyContinue
if ($null -eq $upxCmd) {
Write-Error "❌ 未找到 upx无法保证 Windows 测试产物经过压缩"
exit 1
}
$beforeBytes = (Get-Item -LiteralPath $finalExe).Length
Write-Host "🗜️ 使用 UPX 压缩 $finalExe ..."
& upx --best --lzma --force $finalExe | Out-Host
& upx -t $finalExe | Out-Host
$afterBytes = (Get-Item -LiteralPath $finalExe).Length
if ($afterBytes -lt $beforeBytes) {
$savedBytes = $beforeBytes - $afterBytes
Write-Host ("✅ UPX 压缩完成:{0:N2}MB -> {1:N2}MB减少 {2:N2}MB" -f ($beforeBytes / 1MB), ($afterBytes / 1MB), ($savedBytes / 1MB))
} else {
Write-Host (" UPX 压缩完成:{0:N2}MB -> {1:N2}MB" -f ($beforeBytes / 1MB), ($afterBytes / 1MB))
}
New-Item -ItemType Directory -Force -Path ..\..\artifacts | Out-Null
Copy-Item -LiteralPath $finalExe -Destination "..\..\artifacts\$finalExeName" -Force
Compress-Archive -LiteralPath $finalExe -DestinationPath "..\..\artifacts\$finalZipName" -Force
Get-FileHash "..\..\artifacts\$finalExeName" -Algorithm SHA256 | ForEach-Object { "{0} *{1}" -f $_.Hash.ToLower(), (Split-Path $_.Path -Leaf) } | Out-File "..\..\artifacts\$finalExeName.sha256" -Encoding ascii
Get-FileHash "..\..\artifacts\$finalZipName" -Algorithm SHA256 | ForEach-Object { "{0} *{1}" -f $_.Hash.ToLower(), (Split-Path $_.Path -Leaf) } | Out-File "..\..\artifacts\$finalZipName.sha256" -Encoding ascii
- name: Package Linux
if: contains(matrix.platform, 'linux')
@@ -356,17 +306,6 @@ jobs:
exit 1
fi
chmod +x "$TARGET"
BEFORE_BYTES=$(wc -c <"$TARGET" | tr -d '[:space:]')
echo "🗜️ 使用 UPX 压缩 Linux 可执行文件: $TARGET ..."
upx --best --lzma --force "$TARGET"
upx -t "$TARGET"
AFTER_BYTES=$(wc -c <"$TARGET" | tr -d '[:space:]')
if [ "$AFTER_BYTES" -lt "$BEFORE_BYTES" ]; then
SAVED_BYTES=$((BEFORE_BYTES - AFTER_BYTES))
awk -v b="$BEFORE_BYTES" -v a="$AFTER_BYTES" -v s="$SAVED_BYTES" 'BEGIN { printf "✅ Linux UPX 压缩完成:%.2fMB -> %.2fMB,减少 %.2fMB\n", b/1024/1024, a/1024/1024, s/1024/1024 }'
else
awk -v b="$BEFORE_BYTES" -v a="$AFTER_BYTES" 'BEGIN { printf " Linux UPX 压缩完成:%.2fMB -> %.2fMB\n", b/1024/1024, a/1024/1024 }'
fi
tar -czvf "../../artifacts/$TAR_NAME" "$TARGET"
sha256sum "../../artifacts/$TAR_NAME" > "../../artifacts/$TAR_NAME.sha256"

View File

@@ -1,91 +0,0 @@
name: Test Build macOS (Manual)
on:
workflow_dispatch:
inputs:
build_label:
description: "测试包标识(仅用于文件名)"
required: false
default: "test"
push:
branches:
- feature/kingbase_opt
paths:
- ".github/workflows/test-macos-build.yml"
permissions:
contents: read
jobs:
build-macos:
name: Build macOS ${{ matrix.arch }}
runs-on: macos-latest
strategy:
fail-fast: false
matrix:
include:
- platform: darwin/amd64
arch: amd64
- platform: darwin/arm64
arch: arm64
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: "1.24.3"
check-latest: true
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version: "20"
- name: Install Wails
run: go install github.com/wailsapp/wails/v2/cmd/wails@v2.11.0
- name: Build App
run: |
set -euo pipefail
OUTPUT_NAME="gonavi-test-${{ matrix.arch }}"
BUILD_LABEL="${{ inputs.build_label }}"
if [ -z "$BUILD_LABEL" ]; then
BUILD_LABEL="test"
fi
APP_VERSION="${BUILD_LABEL}-${GITHUB_RUN_NUMBER}"
wails build \
-platform "${{ matrix.platform }}" \
-clean \
-o "$OUTPUT_NAME" \
-ldflags "-s -w -X GoNavi-Wails/internal/app.AppVersion=${APP_VERSION}"
- name: Package Zip
run: |
set -euo pipefail
APP_PATH="build/bin/gonavi-test-${{ matrix.arch }}.app"
if [ ! -d "$APP_PATH" ]; then
APP_PATH=$(find build/bin -maxdepth 1 -name "*.app" | head -n 1 || true)
fi
if [ -z "$APP_PATH" ] || [ ! -d "$APP_PATH" ]; then
echo "未找到 .app 产物"
ls -la build/bin || true
exit 1
fi
LABEL="${{ inputs.build_label }}"
if [ -z "$LABEL" ]; then
LABEL="test"
fi
ZIP_NAME="GoNavi-${LABEL}-macos-${{ matrix.arch }}-run${GITHUB_RUN_NUMBER}.zip"
mkdir -p artifacts
ditto -c -k --sequesterRsrc --keepParent "$APP_PATH" "artifacts/$ZIP_NAME"
shasum -a 256 "artifacts/$ZIP_NAME" > "artifacts/$ZIP_NAME.sha256"
- name: Upload Artifact
uses: actions/upload-artifact@v4
with:
name: gonavi-macos-${{ matrix.arch }}-run${{ github.run_number }}
path: artifacts/*
if-no-files-found: error

View File

@@ -79,8 +79,14 @@ Because external pull requests are merged directly into `main`, maintainers must
### 1. Sync `main` -> `dev` (required)
The automatic GitHub Actions sync workflow has been removed.
Maintainers should sync `main` back to `dev` manually when needed:
This repository provides automatic sync via GitHub Actions workflow:
- `.github/workflows/sync-main-to-dev.yml`
- Trigger: every push to `main`
- Behavior: create/reuse a PR from `main` to `dev`; if mergeable, it tries to enable auto-merge
- Prerequisite: in `Settings -> Actions -> General -> Workflow permissions`, enable `Allow GitHub Actions to create and approve pull requests`; otherwise the workflow will skip PR creation and only emit a warning summary
Manual fallback (when conflicts or automation is unavailable):
```bash
git checkout dev

View File

@@ -79,8 +79,14 @@ feature/* / fix/* -> dev -> release/* -> main -> tag(vX.Y.Z)
### 1. main → dev 同步(必做)
仓库已移除 GitHub Actions 自动回灌 workflow。
当前统一采用手动方式将 `main` 同步回 `dev`
仓库已提供 GitHub Actions 自动同步机制:
- `.github/workflows/sync-main-to-dev.yml`
- 触发时机:每次 `main` 分支有新的 push
- 行为:自动创建或复用 `main``dev` 的同步 PR若可合并则尝试开启自动合并
- 前置条件:需在 `Settings -> Actions -> General -> Workflow permissions` 中开启 `Allow GitHub Actions to create and approve pull requests`,否则 workflow 只会输出告警摘要并跳过建 PR
当出现冲突,或自动化暂不可用时,使用以下手动兜底方式:
```bash
git checkout dev

View File

@@ -1,228 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$SCRIPT_DIR"
DEFAULT_DRIVERS=(mariadb doris sphinx sqlserver sqlite duckdb dameng kingbase highgo vastbase mongodb tdengine clickhouse)
usage() {
cat <<'EOF'
用法:
./build-driver-agents.sh [选项]
选项:
--drivers <列表> 指定驱动列表逗号分隔例如kingbase,mongodb
--platform <GOOS/GOARCH>
目标平台,默认使用当前 Go 环境go env GOOS/GOARCH
--out-dir <目录> 输出目录根路径默认dist/driver-agents
--bundle-name <文件名> 驱动总包 zip 名称默认GoNavi-DriverAgents.zip
--strict 任一驱动构建失败即中断(默认失败后继续,最后汇总)
-h, --help 显示帮助
示例:
./build-driver-agents.sh
./build-driver-agents.sh --drivers kingbase
./build-driver-agents.sh --platform windows/amd64 --drivers kingbase,mongodb
EOF
}
normalize_driver() {
local name
name="$(echo "${1:-}" | tr '[:upper:]' '[:lower:]' | xargs)"
case "$name" in
doris|diros) echo "doris" ;;
mariadb|sphinx|sqlserver|sqlite|duckdb|dameng|kingbase|highgo|vastbase|mongodb|tdengine|clickhouse)
echo "$name"
;;
*)
return 1
;;
esac
}
build_driver_name() {
case "$1" in
doris) echo "diros" ;;
*) echo "$1" ;;
esac
}
platform_dir_name() {
case "$1" in
windows) echo "Windows" ;;
darwin) echo "MacOS" ;;
linux) echo "Linux" ;;
*) echo "Unknown" ;;
esac
}
driver_csv=""
target_platform=""
out_root="dist/driver-agents"
bundle_name="GoNavi-DriverAgents.zip"
strict_mode="false"
while [[ $# -gt 0 ]]; do
case "$1" in
--drivers)
driver_csv="${2:-}"
shift 2
;;
--platform)
target_platform="${2:-}"
shift 2
;;
--out-dir)
out_root="${2:-}"
shift 2
;;
--bundle-name)
bundle_name="${2:-}"
shift 2
;;
--strict)
strict_mode="true"
shift
;;
-h|--help)
usage
exit 0
;;
*)
echo "❌ 未知参数:$1"
usage
exit 1
;;
esac
done
if ! command -v go >/dev/null 2>&1; then
echo "❌ 未找到 Go请先安装 Go 并确保 go 在 PATH 中。"
exit 1
fi
if [[ -z "$target_platform" ]]; then
target_platform="$(go env GOOS)/$(go env GOARCH)"
fi
if [[ "$target_platform" != */* ]]; then
echo "❌ --platform 参数格式错误,应为 GOOS/GOARCH例如 darwin/arm64"
exit 1
fi
goos="${target_platform%%/*}"
goarch="${target_platform##*/}"
platform_key="${goos}-${goarch}"
platform_dir="$(platform_dir_name "$goos")"
declare -a drivers=()
if [[ -n "$driver_csv" ]]; then
IFS=',' read -r -a raw_drivers <<<"$driver_csv"
for item in "${raw_drivers[@]}"; do
normalized="$(normalize_driver "$item")" || {
echo "❌ 不支持的驱动:$item"
exit 1
}
drivers+=("$normalized")
done
else
drivers=("${DEFAULT_DRIVERS[@]}")
fi
output_dir="${out_root%/}/${platform_key}"
bundle_stage_dir="$(mktemp -d "${TMPDIR:-/tmp}/gonavi-driver-bundle.XXXXXX")"
bundle_platform_dir="$bundle_stage_dir/$platform_dir"
cleanup() {
rm -rf "$bundle_stage_dir"
}
trap cleanup EXIT
mkdir -p "$output_dir" "$bundle_platform_dir"
output_dir_abs="$(cd "$output_dir" && pwd)"
bundle_zip_path="$output_dir_abs/$bundle_name"
declare -a built_assets=()
declare -a failed_drivers=()
declare -a skipped_drivers=()
echo "🚀 开始构建 optional-driver-agent"
echo " 平台:$goos/$goarch"
echo " 输出目录:$output_dir_abs"
echo " 驱动列表:${drivers[*]}"
for driver in "${drivers[@]}"; do
if [[ "$driver" == "duckdb" && "$goos" == "windows" && "$goarch" != "amd64" ]]; then
echo "⚠️ 跳过 duckdb仅支持 windows/amd64"
skipped_drivers+=("$driver")
continue
fi
build_driver="$(build_driver_name "$driver")"
tag="gonavi_${build_driver}_driver"
asset_name="${driver}-driver-agent-${goos}-${goarch}"
if [[ "$goos" == "windows" ]]; then
asset_name="${asset_name}.exe"
fi
output_path="$output_dir_abs/$asset_name"
cgo_enabled=0
if [[ "$driver" == "duckdb" ]]; then
cgo_enabled=1
fi
echo "🔧 构建 $driver -> $asset_name (tag=$tag, CGO_ENABLED=$cgo_enabled)"
set +e
CGO_ENABLED="$cgo_enabled" GOOS="$goos" GOARCH="$goarch" GOTOOLCHAIN=auto \
go build -tags "$tag" -trimpath -ldflags "-s -w" -o "$output_path" ./cmd/optional-driver-agent
build_exit=$?
set -e
if [[ $build_exit -ne 0 ]]; then
echo "❌ 构建失败:$driver"
failed_drivers+=("$driver")
if [[ "$strict_mode" == "true" ]]; then
exit $build_exit
fi
continue
fi
cp "$output_path" "$bundle_platform_dir/$asset_name"
built_assets+=("$asset_name")
done
if [[ ${#built_assets[@]} -eq 0 ]]; then
echo "❌ 未成功构建任何驱动代理。"
exit 1
fi
rm -f "$bundle_zip_path"
if command -v zip >/dev/null 2>&1; then
(
cd "$bundle_stage_dir"
zip -qry "$bundle_zip_path" "$platform_dir"
)
elif command -v ditto >/dev/null 2>&1; then
(
cd "$bundle_stage_dir"
ditto -c -k --sequesterRsrc --keepParent "$platform_dir" "$bundle_zip_path"
)
else
echo "❌ 未找到 zip/ditto无法生成驱动总包 zip。"
exit 1
fi
echo ""
echo "✅ 构建完成"
echo " 单文件输出目录:$output_dir_abs"
echo " 驱动总包:$bundle_zip_path"
echo " 已构建:${built_assets[*]}"
if [[ ${#skipped_drivers[@]} -gt 0 ]]; then
echo " 已跳过:${skipped_drivers[*]}"
fi
if [[ ${#failed_drivers[@]} -gt 0 ]]; then
echo "⚠️ 构建失败驱动:${failed_drivers[*]}"
exit 2
fi

View File

@@ -20,75 +20,6 @@ RED='\033[0;31m'
YELLOW='\033[1;33m'
NC='\033[0m'
get_file_size_bytes() {
local target="$1"
if [ ! -f "$target" ]; then
echo 0
return
fi
if stat -f%z "$target" >/dev/null 2>&1; then
stat -f%z "$target"
return
fi
if stat -c%s "$target" >/dev/null 2>&1; then
stat -c%s "$target"
return
fi
wc -c <"$target" | tr -d '[:space:]'
}
format_size_mb() {
local bytes="${1:-0}"
awk -v b="$bytes" 'BEGIN { printf "%.2fMB", b / 1024 / 1024 }'
}
try_compress_binary_with_upx() {
local exe_path="$1"
local label="$2"
if [ ! -f "$exe_path" ]; then
echo -e "${RED} ❌ 未找到 ${label} 文件:$exe_path${NC}"
exit 1
fi
if ! command -v upx >/dev/null 2>&1; then
echo -e "${RED} ❌ 未找到 upx${label} 必须进行压缩后才能继续打包。${NC}"
case "$(uname -s)" in
Darwin)
echo " 安装命令: brew install upx"
;;
Linux)
echo " 安装命令: sudo apt-get install -y upx-ucl (或对应发行版包管理器)"
;;
esac
exit 1
fi
local before_bytes after_bytes
before_bytes=$(get_file_size_bytes "$exe_path")
echo " 🗜️ 正在使用 UPX 压缩 ${label}..."
if upx --best --lzma --force "$exe_path" >/dev/null 2>&1; then
if ! upx -t "$exe_path" >/dev/null 2>&1; then
echo -e "${RED} ❌ UPX 校验失败:${label}${NC}"
exit 1
fi
after_bytes=$(get_file_size_bytes "$exe_path")
if [ "$after_bytes" -lt "$before_bytes" ]; then
local saved_bytes=$((before_bytes - after_bytes))
echo " ✅ UPX 压缩完成: $(format_size_mb "$before_bytes") -> $(format_size_mb "$after_bytes"),减少 $(format_size_mb "$saved_bytes")"
else
echo " UPX 压缩完成: $(format_size_mb "$before_bytes") -> $(format_size_mb "$after_bytes")"
fi
else
echo -e "${RED} ❌ UPX 压缩失败:${label}${NC}"
exit 1
fi
}
MAC_VOLICON_PATH="build/darwin/icon.icns"
if [ ! -f "$MAC_VOLICON_PATH" ]; then
MAC_VOLICON_PATH=""
fi
echo -e "${GREEN}🚀 开始构建 $APP_NAME $VERSION...${NC}"
# 清理并创建输出目录
@@ -105,101 +36,47 @@ if [ $? -eq 0 ]; then
# 移动 .app 到 dist
mv "$APP_SRC" "$DIST_DIR/$APP_DEST_NAME"
APP_BIN_PATH=$(find "$DIST_DIR/$APP_DEST_NAME/Contents/MacOS" -maxdepth 1 -type f -print -quit)
if [ -n "$APP_BIN_PATH" ] && [ -f "$APP_BIN_PATH" ]; then
try_compress_binary_with_upx "$APP_BIN_PATH" "macOS arm64 应用主程序"
else
echo -e "${RED} ❌ 未找到 macOS arm64 主程序文件,无法执行 UPX 压缩。${NC}"
exit 1
fi
# Ad-hoc 代码签名(无 Apple Developer 账号时防止 Gatekeeper 报已损坏)
echo " 🔏 正在对 .app 进行 ad-hoc 签名 (arm64)..."
codesign --force --deep --sign - "$DIST_DIR/$APP_DEST_NAME"
# 创建 DMG
if command -v create-dmg &> /dev/null; then
echo " 📦 正在打包 DMG (arm64)..."
# 移除已存在的 DMG (以防万一)
rm -f "$DIST_DIR/$DMG_NAME"
create-dmg \
--volname "${APP_NAME} ${VERSION}" \
--volicon "build/appicon.icns" \
--window-pos 200 120 \
--window-size 800 400 \
--icon-size 100 \
--icon "$APP_DEST_NAME" 200 190 \
--hide-extension "$APP_DEST_NAME" \
--app-drop-link 600 185 \
"$DIST_DIR/$DMG_NAME" \
"$DIST_DIR/$APP_DEST_NAME"
# 检查是否生成了 rw.* 的临时文件并重命名 (create-dmg 有时会有此行为)
if [ ! -f "$DIST_DIR/$DMG_NAME" ]; then
RW_FILE=$(find "$DIST_DIR" -name "rw.*.dmg" -print -quit)
if [ -n "$RW_FILE" ]; then
echo -e "${YELLOW} ⚠️ 检测到临时文件名,正在重命名...${NC}"
mv "$RW_FILE" "$DIST_DIR/$DMG_NAME"
fi
fi
# 创建 DMG
if command -v create-dmg &> /dev/null; then
echo " 📦 正在打包 DMG (arm64)..."
# 移除已存在的 DMG (以防万一)
rm -f "$DIST_DIR/$DMG_NAME"
# create-dmg 的 source 需要是“包含 .app 的目录”,不能直接传 .app 路径。
STAGE_DIR=$(mktemp -d "$DIST_DIR/.dmg-stage-${APP_NAME}-${VERSION}-arm64.XXXXXX")
if [ -z "$STAGE_DIR" ] || [ ! -d "$STAGE_DIR" ]; then
echo -e "${RED} ❌ 创建 DMG 临时目录失败,跳过 DMG 打包。${NC}"
else
if command -v ditto &> /dev/null; then
ditto "$DIST_DIR/$APP_DEST_NAME" "$STAGE_DIR/$APP_DEST_NAME"
else
cp -R "$DIST_DIR/$APP_DEST_NAME" "$STAGE_DIR/$APP_DEST_NAME"
fi
# --sandbox-safe 会跳过 Finder 的 AppleScript 排版,避免打包过程中弹出/打开挂载窗口CI/本地静默打包更友好)。
CREATE_DMG_ARGS=(--volname "${APP_NAME} ${VERSION}" --format UDZO --sandbox-safe)
if [ -n "$MAC_VOLICON_PATH" ]; then
CREATE_DMG_ARGS+=(--volicon "$MAC_VOLICON_PATH")
# 删除中间的 .app 文件,保持目录整洁
rm -rf "$DIST_DIR/$APP_DEST_NAME"
if [ -f "$DIST_DIR/$DMG_NAME" ]; then
echo " ✅ 已生成 $DMG_NAME"
else
echo -e "${YELLOW} ⚠️ 未找到 macOS 卷图标 (build/darwin/icon.icns),跳过 --volicon${NC}"
echo -e "${RED} ❌ DMG 生成失败,请检查 create-dmg 输出${NC}"
fi
create-dmg "${CREATE_DMG_ARGS[@]}" \
--window-pos 200 120 \
--window-size 800 400 \
--icon-size 100 \
--icon "$APP_DEST_NAME" 200 190 \
--hide-extension "$APP_DEST_NAME" \
--app-drop-link 600 185 \
"$DIST_DIR/$DMG_NAME" \
"$STAGE_DIR"
CREATE_DMG_EXIT_CODE=$?
rm -rf "$STAGE_DIR"
if [ $CREATE_DMG_EXIT_CODE -ne 0 ]; then
echo -e "${RED} ❌ create-dmg 执行失败 (exit=$CREATE_DMG_EXIT_CODE),保留 .app 以便排查。${NC}"
else
# create-dmg 可能会在失败时遗留 rw.*.dmg 中间产物;不要直接当作最终产物使用
if [ ! -f "$DIST_DIR/$DMG_NAME" ]; then
RW_FILE=$(find "$DIST_DIR" -maxdepth 1 -name "rw.*.dmg" -print -quit)
if [ -n "$RW_FILE" ]; then
echo -e "${YELLOW} ⚠️ 检测到 create-dmg 中间产物: $(basename "$RW_FILE"),正在转换为可分发 DMG...${NC}"
hdiutil convert "$RW_FILE" -format UDZO -o "$DIST_DIR/$DMG_NAME" >/dev/null 2>&1
rm -f "$RW_FILE"
fi
fi
# 防御性:即使生成了目标文件,也要确保不是 UDRWUDRW 在 Finder 下可能表现为“已损坏/无法打开”)
if [ -f "$DIST_DIR/$DMG_NAME" ] && command -v hdiutil &> /dev/null; then
DMG_FORMAT=$(hdiutil imageinfo "$DIST_DIR/$DMG_NAME" 2>/dev/null | awk -F': ' '/^Format:/{print $2; exit}')
if [ "$DMG_FORMAT" = "UDRW" ]; then
echo -e "${YELLOW} ⚠️ 检测到 UDRW可写原始映像正在转换为 UDZO...${NC}"
TMP_UDZO="$DIST_DIR/.tmp.$DMG_NAME"
rm -f "$TMP_UDZO"
hdiutil convert "$DIST_DIR/$DMG_NAME" -format UDZO -o "$TMP_UDZO" >/dev/null 2>&1 && mv "$TMP_UDZO" "$DIST_DIR/$DMG_NAME"
fi
fi
if [ -f "$DIST_DIR/$DMG_NAME" ] && command -v hdiutil &> /dev/null; then
hdiutil verify "$DIST_DIR/$DMG_NAME" >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo -e "${RED} ❌ DMG 校验失败,保留 .app 以便排查。${NC}"
else
# 删除中间的 .app 文件,保持目录整洁
rm -rf "$DIST_DIR/$APP_DEST_NAME"
echo " ✅ 已生成 $DMG_NAME"
fi
fi
fi
if [ ! -f "$DIST_DIR/$DMG_NAME" ]; then
echo -e "${RED} ❌ DMG 生成失败,请检查 create-dmg 输出。${NC}"
fi
fi
else
echo -e "${YELLOW} ⚠️ 未找到 create-dmg 工具,跳过 DMG 打包,仅保留 .app。${NC}"
echo " 安装命令: brew install create-dmg"
fi
else
else
echo -e "${YELLOW} ⚠️ 未找到 create-dmg 工具,跳过 DMG 打包,仅保留 .app。${NC}"
echo " 安装命令: brew install create-dmg"
fi
else
echo -e "${RED} ❌ macOS arm64 构建失败。${NC}"
fi
@@ -212,96 +89,44 @@ if [ $? -eq 0 ]; then
DMG_NAME="${APP_NAME}-${VERSION}-mac-amd64.dmg"
mv "$APP_SRC" "$DIST_DIR/$APP_DEST_NAME"
APP_BIN_PATH=$(find "$DIST_DIR/$APP_DEST_NAME/Contents/MacOS" -maxdepth 1 -type f -print -quit)
if [ -n "$APP_BIN_PATH" ] && [ -f "$APP_BIN_PATH" ]; then
try_compress_binary_with_upx "$APP_BIN_PATH" "macOS amd64 应用主程序"
else
echo -e "${RED} ❌ 未找到 macOS amd64 主程序文件,无法执行 UPX 压缩。${NC}"
exit 1
fi
# Ad-hoc 代码签名
echo " 🔏 正在对 .app 进行 ad-hoc 签名 (amd64)..."
codesign --force --deep --sign - "$DIST_DIR/$APP_DEST_NAME"
if command -v create-dmg &> /dev/null; then
echo " 📦 正在打包 DMG (amd64)..."
rm -f "$DIST_DIR/$DMG_NAME"
create-dmg \
--volname "${APP_NAME} ${VERSION}" \
--volicon "build/appicon.icns" \
--window-pos 200 120 \
--window-size 800 400 \
--icon-size 100 \
--icon "$APP_DEST_NAME" 200 190 \
--hide-extension "$APP_DEST_NAME" \
--app-drop-link 600 185 \
"$DIST_DIR/$DMG_NAME" \
"$DIST_DIR/$APP_DEST_NAME"
if command -v create-dmg &> /dev/null; then
echo " 📦 正在打包 DMG (amd64)..."
rm -f "$DIST_DIR/$DMG_NAME"
# create-dmg 的 source 需要是“包含 .app 的目录”,不能直接传 .app 路径。
STAGE_DIR=$(mktemp -d "$DIST_DIR/.dmg-stage-${APP_NAME}-${VERSION}-amd64.XXXXXX")
if [ -z "$STAGE_DIR" ] || [ ! -d "$STAGE_DIR" ]; then
echo -e "${RED} ❌ 创建 DMG 临时目录失败,跳过 DMG 打包。${NC}"
else
if command -v ditto &> /dev/null; then
ditto "$DIST_DIR/$APP_DEST_NAME" "$STAGE_DIR/$APP_DEST_NAME"
else
cp -R "$DIST_DIR/$APP_DEST_NAME" "$STAGE_DIR/$APP_DEST_NAME"
fi
# --sandbox-safe 会跳过 Finder 的 AppleScript 排版,避免打包过程中弹出/打开挂载窗口CI/本地静默打包更友好)。
CREATE_DMG_ARGS=(--volname "${APP_NAME} ${VERSION}" --format UDZO --sandbox-safe)
if [ -n "$MAC_VOLICON_PATH" ]; then
CREATE_DMG_ARGS+=(--volicon "$MAC_VOLICON_PATH")
else
echo -e "${YELLOW} ⚠️ 未找到 macOS 卷图标 (build/darwin/icon.icns),跳过 --volicon。${NC}"
fi
create-dmg "${CREATE_DMG_ARGS[@]}" \
--window-pos 200 120 \
--window-size 800 400 \
--icon-size 100 \
--icon "$APP_DEST_NAME" 200 190 \
--hide-extension "$APP_DEST_NAME" \
--app-drop-link 600 185 \
"$DIST_DIR/$DMG_NAME" \
"$STAGE_DIR"
CREATE_DMG_EXIT_CODE=$?
rm -rf "$STAGE_DIR"
if [ $CREATE_DMG_EXIT_CODE -ne 0 ]; then
echo -e "${RED} ❌ create-dmg 执行失败 (exit=$CREATE_DMG_EXIT_CODE),保留 .app 以便排查。${NC}"
else
if [ ! -f "$DIST_DIR/$DMG_NAME" ]; then
RW_FILE=$(find "$DIST_DIR" -maxdepth 1 -name "rw.*.dmg" -print -quit)
if [ -n "$RW_FILE" ]; then
echo -e "${YELLOW} ⚠️ 检测到 create-dmg 中间产物: $(basename "$RW_FILE"),正在转换为可分发 DMG...${NC}"
hdiutil convert "$RW_FILE" -format UDZO -o "$DIST_DIR/$DMG_NAME" >/dev/null 2>&1
rm -f "$RW_FILE"
fi
fi
if [ -f "$DIST_DIR/$DMG_NAME" ] && command -v hdiutil &> /dev/null; then
DMG_FORMAT=$(hdiutil imageinfo "$DIST_DIR/$DMG_NAME" 2>/dev/null | awk -F': ' '/^Format:/{print $2; exit}')
if [ "$DMG_FORMAT" = "UDRW" ]; then
echo -e "${YELLOW} ⚠️ 检测到 UDRW可写原始映像正在转换为 UDZO...${NC}"
TMP_UDZO="$DIST_DIR/.tmp.$DMG_NAME"
rm -f "$TMP_UDZO"
hdiutil convert "$DIST_DIR/$DMG_NAME" -format UDZO -o "$TMP_UDZO" >/dev/null 2>&1 && mv "$TMP_UDZO" "$DIST_DIR/$DMG_NAME"
fi
fi
if [ -f "$DIST_DIR/$DMG_NAME" ] && command -v hdiutil &> /dev/null; then
hdiutil verify "$DIST_DIR/$DMG_NAME" >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo -e "${RED} ❌ DMG 校验失败,保留 .app 以便排查。${NC}"
else
rm -rf "$DIST_DIR/$APP_DEST_NAME"
echo " ✅ 已生成 $DMG_NAME"
fi
fi
# 检查是否生成了 rw.* 的临时文件并重命名
if [ ! -f "$DIST_DIR/$DMG_NAME" ]; then
RW_FILE=$(find "$DIST_DIR" -name "rw.*.dmg" -print -quit)
if [ -n "$RW_FILE" ]; then
echo -e "${YELLOW} ⚠️ 检测到临时文件名,正在重命名...${NC}"
mv "$RW_FILE" "$DIST_DIR/$DMG_NAME"
fi
fi
if [ ! -f "$DIST_DIR/$DMG_NAME" ]; then
echo -e "${RED} ❌ DMG 生成失败。${NC}"
fi
fi
else
echo -e "${YELLOW} ⚠️ 未找到 create-dmg 工具${NC}"
fi
else
echo -e "${RED} ❌ macOS amd64 构建失败${NC}"
rm -rf "$DIST_DIR/$APP_DEST_NAME"
if [ -f "$DIST_DIR/$DMG_NAME" ]; then
echo " ✅ 已生成 $DMG_NAME"
else
echo -e "${RED} ❌ DMG 生成失败${NC}"
fi
else
echo -e "${YELLOW} ⚠️ 未找到 create-dmg 工具${NC}"
fi
else
echo -e "${RED} ❌ macOS amd64 构建失败。${NC}"
fi
# --- Windows AMD64 构建 ---
@@ -309,9 +134,7 @@ echo -e "${GREEN}🪟 正在构建 Windows (amd64)...${NC}"
if command -v x86_64-w64-mingw32-gcc &> /dev/null; then
wails build -platform windows/amd64 -clean -ldflags "$LDFLAGS"
if [ $? -eq 0 ]; then
TARGET_EXE="$DIST_DIR/${APP_NAME}-${VERSION}-windows-amd64.exe"
mv "$BUILD_BIN_DIR/${DEFAULT_BINARY_NAME}.exe" "$TARGET_EXE"
try_compress_binary_with_upx "$TARGET_EXE" "Windows amd64 可执行文件"
mv "$BUILD_BIN_DIR/${DEFAULT_BINARY_NAME}.exe" "$DIST_DIR/${APP_NAME}-${VERSION}-windows-amd64.exe"
echo " ✅ 已生成 ${APP_NAME}-${VERSION}-windows-amd64.exe"
else
echo -e "${RED} ❌ Windows amd64 构建失败。${NC}"
@@ -325,9 +148,7 @@ echo -e "${GREEN}🪟 正在构建 Windows (arm64)...${NC}"
if command -v aarch64-w64-mingw32-gcc &> /dev/null; then
wails build -platform windows/arm64 -clean -ldflags "$LDFLAGS"
if [ $? -eq 0 ]; then
TARGET_EXE="$DIST_DIR/${APP_NAME}-${VERSION}-windows-arm64.exe"
mv "$BUILD_BIN_DIR/${DEFAULT_BINARY_NAME}.exe" "$TARGET_EXE"
try_compress_binary_with_upx "$TARGET_EXE" "Windows arm64 可执行文件"
mv "$BUILD_BIN_DIR/${DEFAULT_BINARY_NAME}.exe" "$DIST_DIR/${APP_NAME}-${VERSION}-windows-arm64.exe"
echo " ✅ 已生成 ${APP_NAME}-${VERSION}-windows-arm64.exe"
else
echo -e "${RED} ❌ Windows arm64 构建失败。${NC}"
@@ -347,10 +168,8 @@ if [ "$CURRENT_OS" = "Linux" ] && [ "$CURRENT_ARCH" = "x86_64" ]; then
# 本机 Linux amd64直接构建
wails build -platform linux/amd64 -clean -ldflags "$LDFLAGS"
if [ $? -eq 0 ]; then
TARGET_LINUX_BIN="$DIST_DIR/${APP_NAME}-${VERSION}-linux-amd64"
mv "$BUILD_BIN_DIR/${DEFAULT_BINARY_NAME}" "$TARGET_LINUX_BIN"
chmod +x "$TARGET_LINUX_BIN"
try_compress_binary_with_upx "$TARGET_LINUX_BIN" "Linux amd64 可执行文件"
mv "$BUILD_BIN_DIR/${DEFAULT_BINARY_NAME}" "$DIST_DIR/${APP_NAME}-${VERSION}-linux-amd64"
chmod +x "$DIST_DIR/${APP_NAME}-${VERSION}-linux-amd64"
# 打包为 tar.gz
cd "$DIST_DIR"
tar -czvf "${APP_NAME}-${VERSION}-linux-amd64.tar.gz" "${APP_NAME}-${VERSION}-linux-amd64"
@@ -367,10 +186,8 @@ elif command -v x86_64-linux-gnu-gcc &> /dev/null; then
export CGO_ENABLED=1
wails build -platform linux/amd64 -clean -ldflags "$LDFLAGS"
if [ $? -eq 0 ]; then
TARGET_LINUX_BIN="$DIST_DIR/${APP_NAME}-${VERSION}-linux-amd64"
mv "$BUILD_BIN_DIR/${DEFAULT_BINARY_NAME}" "$TARGET_LINUX_BIN"
chmod +x "$TARGET_LINUX_BIN"
try_compress_binary_with_upx "$TARGET_LINUX_BIN" "Linux amd64 可执行文件"
mv "$BUILD_BIN_DIR/${DEFAULT_BINARY_NAME}" "$DIST_DIR/${APP_NAME}-${VERSION}-linux-amd64"
chmod +x "$DIST_DIR/${APP_NAME}-${VERSION}-linux-amd64"
cd "$DIST_DIR"
tar -czvf "${APP_NAME}-${VERSION}-linux-amd64.tar.gz" "${APP_NAME}-${VERSION}-linux-amd64"
rm "${APP_NAME}-${VERSION}-linux-amd64"
@@ -391,10 +208,8 @@ if [ "$CURRENT_OS" = "Linux" ] && [ "$CURRENT_ARCH" = "aarch64" ]; then
# 本机 Linux arm64直接构建
wails build -platform linux/arm64 -clean -ldflags "$LDFLAGS"
if [ $? -eq 0 ]; then
TARGET_LINUX_BIN="$DIST_DIR/${APP_NAME}-${VERSION}-linux-arm64"
mv "$BUILD_BIN_DIR/${DEFAULT_BINARY_NAME}" "$TARGET_LINUX_BIN"
chmod +x "$TARGET_LINUX_BIN"
try_compress_binary_with_upx "$TARGET_LINUX_BIN" "Linux arm64 可执行文件"
mv "$BUILD_BIN_DIR/${DEFAULT_BINARY_NAME}" "$DIST_DIR/${APP_NAME}-${VERSION}-linux-arm64"
chmod +x "$DIST_DIR/${APP_NAME}-${VERSION}-linux-arm64"
cd "$DIST_DIR"
tar -czvf "${APP_NAME}-${VERSION}-linux-arm64.tar.gz" "${APP_NAME}-${VERSION}-linux-arm64"
rm "${APP_NAME}-${VERSION}-linux-arm64"
@@ -410,10 +225,8 @@ elif command -v aarch64-linux-gnu-gcc &> /dev/null; then
export CGO_ENABLED=1
wails build -platform linux/arm64 -clean -ldflags "$LDFLAGS"
if [ $? -eq 0 ]; then
TARGET_LINUX_BIN="$DIST_DIR/${APP_NAME}-${VERSION}-linux-arm64"
mv "$BUILD_BIN_DIR/${DEFAULT_BINARY_NAME}" "$TARGET_LINUX_BIN"
chmod +x "$TARGET_LINUX_BIN"
try_compress_binary_with_upx "$TARGET_LINUX_BIN" "Linux arm64 可执行文件"
mv "$BUILD_BIN_DIR/${DEFAULT_BINARY_NAME}" "$DIST_DIR/${APP_NAME}-${VERSION}-linux-arm64"
chmod +x "$DIST_DIR/${APP_NAME}-${VERSION}-linux-arm64"
cd "$DIST_DIR"
tar -czvf "${APP_NAME}-${VERSION}-linux-arm64.tar.gz" "${APP_NAME}-${VERSION}-linux-arm64"
rm "${APP_NAME}-${VERSION}-linux-arm64"

View File

@@ -5,23 +5,6 @@
<link rel="icon" type="image/svg+xml" href="/logo.svg" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>GoNavi</title>
<script>
if (typeof window !== 'undefined' && !window.go) {
window.go = {
app: {
App: new Proxy({}, { get: () => async () => ({ success: false }) })
}
};
}
if (typeof window !== 'undefined' && !window.runtime) {
window.runtime = new Proxy({}, {
get: (target, prop) => {
if (prop === 'Environment') return async () => ({ platform: 'darwin' });
return typeof prop === 'string' && prop.startsWith('WindowIs') ? () => false : () => {};
}
});
}
</script>
</head>
<body>
<div id="root"></div>

View File

@@ -1 +1 @@
5b8157374dae5f9340e31b2d0bd2c00e
d0f9366af59a6367ad3c7e2d4185ead4

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,11 +1,9 @@
import React, { useState, useEffect, useMemo, useRef } from 'react';
import { Modal, Form, Select, Input, Button, message, Steps, Transfer, Card, Alert, Divider, Typography, Progress, Checkbox, Table, Drawer, Tabs, theme as antdTheme } from 'antd';
import { DatabaseOutlined, RocketOutlined, SwapOutlined, TableOutlined } from '@ant-design/icons';
import { Modal, Form, Select, Button, message, Steps, Transfer, Card, Alert, Divider, Typography, Progress, Checkbox, Table, Drawer, Tabs } from 'antd';
import { useStore } from '../store';
import { DBGetDatabases, DBGetTables, DataSync, DataSyncAnalyze, DataSyncPreview } from '../../wailsjs/go/app/App';
import { SavedConnection } from '../types';
import { EventsOn } from '../../wailsjs/runtime/runtime';
import { normalizeOpacityForPlatform, resolveAppearanceValues } from '../utils/appearance';
const { Title, Text } = Typography;
const { Step } = Steps;
@@ -23,12 +21,6 @@ type TableDiffSummary = {
deletes?: number;
same?: number;
message?: string;
targetTableExists?: boolean;
plannedAction?: string;
warnings?: string[];
unsupportedObjects?: string[];
indexesToCreate?: number;
indexesSkipped?: number;
};
type TableOps = {
insert: boolean;
@@ -39,8 +31,6 @@ type TableOps = {
selectedDeletePks?: string[];
};
type WorkflowType = 'sync' | 'migration';
const quoteSqlIdent = (dbType: string, ident: string): string => {
const raw = String(ident || '').trim();
if (!raw) return raw;
@@ -86,11 +76,6 @@ const toSqlLiteral = (value: any, dbType: string): string => {
return `'${String(value).replace(/'/g, "''")}'`;
};
const resolveRedisDbIndex = (raw?: string): number => {
const value = Number(String(raw || '').trim());
return Number.isInteger(value) && value >= 0 && value <= 15 ? value : 0;
};
const buildSqlPreview = (
previewData: any,
tableName: string,
@@ -160,14 +145,8 @@ const buildSqlPreview = (
const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open, onClose }) => {
const connections = useStore((state) => state.connections);
const themeMode = useStore((state) => state.theme);
const appearance = useStore((state) => state.appearance);
const [currentStep, setCurrentStep] = useState(0);
const [loading, setLoading] = useState(false);
const { token } = antdTheme.useToken();
const darkMode = themeMode === 'dark';
const resolvedAppearance = resolveAppearanceValues(appearance);
const effectiveOpacity = normalizeOpacityForPlatform(resolvedAppearance.opacity);
// Step 1: Config
const [sourceConnId, setSourceConnId] = useState<string>('');
@@ -183,13 +162,9 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
const [selectedTables, setSelectedTables] = useState<string[]>([]);
// Options
const [workflowType, setWorkflowType] = useState<WorkflowType>('sync');
const [syncContent, setSyncContent] = useState<'data' | 'schema' | 'both'>('data');
const [syncMode, setSyncMode] = useState<string>('insert_update');
const [autoAddColumns, setAutoAddColumns] = useState<boolean>(true);
const [targetTableStrategy, setTargetTableStrategy] = useState<'existing_only' | 'auto_create_if_missing' | 'smart'>('existing_only');
const [createIndexes, setCreateIndexes] = useState<boolean>(false);
const [mongoCollectionName, setMongoCollectionName] = useState<string>('');
const [showSameTables, setShowSameTables] = useState<boolean>(false);
const [analyzing, setAnalyzing] = useState<boolean>(false);
const [diffTables, setDiffTables] = useState<TableDiffSummary[]>([]);
@@ -265,12 +240,9 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
setSourceDb('');
setTargetDb('');
setSelectedTables([]);
setWorkflowType('sync');
setSyncContent('data');
setSyncMode('insert_update');
setAutoAddColumns(true);
setTargetTableStrategy('existing_only');
setCreateIndexes(false);
setShowSameTables(false);
setAnalyzing(false);
setDiffTables([]);
@@ -288,30 +260,6 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
}
}, [open]);
useEffect(() => {
if (workflowType === 'migration') {
if (syncMode === 'insert_update') {
setSyncMode('insert_only');
}
if (syncContent === 'schema') {
setSyncContent('both');
}
if (targetTableStrategy === 'existing_only') {
setTargetTableStrategy('smart');
}
if (!createIndexes) {
setCreateIndexes(true);
}
} else {
if (targetTableStrategy !== 'existing_only') {
setTargetTableStrategy('existing_only');
}
if (createIndexes) {
setCreateIndexes(false);
}
}
}, [workflowType]);
const handleSourceConnChange = async (connId: string) => {
setSourceConnId(connId);
setSourceDb('');
@@ -409,9 +357,6 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
content: syncContent,
mode: "insert_update",
autoAddColumns,
targetTableStrategy,
createIndexes,
mongoCollectionName: mongoCollectionName.trim(),
jobId,
};
@@ -462,9 +407,6 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
content: "data",
mode: "insert_update",
autoAddColumns,
targetTableStrategy,
createIndexes,
mongoCollectionName: mongoCollectionName.trim(),
};
try {
@@ -541,9 +483,6 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
content: syncContent,
mode: syncMode,
autoAddColumns,
targetTableStrategy,
createIndexes,
mongoCollectionName: mongoCollectionName.trim(),
tableOptions,
jobId,
};
@@ -591,132 +530,10 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
return buildSqlPreview(previewData, previewTable, targetType, ops);
}, [previewData, previewTable, targetConnId, connections, tableOptions]);
const analysisWarnings = useMemo(() => {
const items: string[] = [];
diffTables.forEach((table) => {
(table.warnings || []).forEach((warning) => items.push(`${table.table}: ${warning}`));
(table.unsupportedObjects || []).forEach((warning) => items.push(`${table.table}: ${warning}`));
});
return Array.from(new Set(items));
}, [diffTables]);
const isMigrationWorkflow = workflowType === 'migration';
const sourceConn = useMemo(() => connections.find(c => c.id === sourceConnId), [connections, sourceConnId]);
const targetConn = useMemo(() => connections.find(c => c.id === targetConnId), [connections, targetConnId]);
const sourceType = String(sourceConn?.config?.type || '').toLowerCase();
const targetType = String(targetConn?.config?.type || '').toLowerCase();
const isRedisMongoKeyspaceMigration = isMigrationWorkflow && (
(sourceType === 'redis' && targetType === 'mongodb') ||
(sourceType === 'mongodb' && targetType === 'redis')
);
const defaultMongoCollectionName = useMemo(() => {
if (sourceType === 'redis' && targetType === 'mongodb') {
return `redis_db_${resolveRedisDbIndex(sourceDb || sourceConn?.config?.database)}_keys`;
}
if (sourceType === 'mongodb' && targetType === 'redis') {
return selectedTables[0] || `redis_db_${resolveRedisDbIndex(targetDb || targetConn?.config?.database)}_keys`;
}
return '';
}, [sourceType, targetType, sourceDb, targetDb, sourceConn, targetConn, selectedTables]);
const modalPanelStyle = useMemo(() => ({
background: darkMode
? 'linear-gradient(180deg, rgba(16,22,34,0.96) 0%, rgba(10,14,24,0.98) 100%)'
: 'linear-gradient(180deg, rgba(255,255,255,0.98) 0%, rgba(246,248,252,0.98) 100%)',
border: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(16,24,40,0.08)',
boxShadow: darkMode ? '0 24px 56px rgba(0,0,0,0.36)' : '0 18px 44px rgba(15,23,42,0.14)',
backdropFilter: darkMode ? 'blur(18px)' : 'none',
}), [darkMode]);
const shellCardStyle = useMemo<React.CSSProperties>(() => ({
borderRadius: 18,
border: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(15,23,42,0.08)',
background: darkMode ? 'rgba(255,255,255,0.03)' : `rgba(255,255,255,${Math.max(effectiveOpacity, 0.88)})`,
boxShadow: darkMode ? '0 12px 32px rgba(0,0,0,0.22)' : '0 10px 24px rgba(15,23,42,0.08)',
overflow: 'hidden',
}), [darkMode, effectiveOpacity]);
const heroPanelStyle = useMemo<React.CSSProperties>(() => ({
padding: 18,
borderRadius: 18,
border: darkMode ? '1px solid rgba(255,214,102,0.12)' : '1px solid rgba(24,144,255,0.12)',
background: darkMode
? 'linear-gradient(135deg, rgba(255,214,102,0.10) 0%, rgba(255,255,255,0.03) 100%)'
: 'linear-gradient(135deg, rgba(24,144,255,0.10) 0%, rgba(255,255,255,0.95) 100%)',
marginBottom: 18,
}), [darkMode]);
const badgeStyle = useMemo<React.CSSProperties>(() => ({
display: 'inline-flex',
alignItems: 'center',
gap: 6,
padding: '6px 10px',
borderRadius: 999,
border: darkMode ? '1px solid rgba(255,255,255,0.10)' : '1px solid rgba(15,23,42,0.08)',
background: darkMode ? 'rgba(255,255,255,0.04)' : 'rgba(255,255,255,0.86)',
color: darkMode ? 'rgba(255,255,255,0.88)' : '#334155',
fontSize: 12,
fontWeight: 600,
}), [darkMode]);
const quietPanelStyle = useMemo<React.CSSProperties>(() => ({
padding: 14,
borderRadius: 16,
border: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(15,23,42,0.08)',
background: darkMode ? 'rgba(255,255,255,0.025)' : 'rgba(248,250,252,0.92)',
}), [darkMode]);
const modalWorkspaceStyle = useMemo<React.CSSProperties>(() => ({
display: 'flex',
flexDirection: 'column',
height: '100%',
minHeight: 0,
}), []);
const modalScrollableContentStyle = useMemo<React.CSSProperties>(() => ({
flex: 1,
minHeight: 0,
overflowY: 'auto',
overflowX: 'hidden',
paddingRight: 4,
overscrollBehavior: 'contain',
}), []);
const modalFooterBarStyle = useMemo<React.CSSProperties>(() => ({
marginTop: 18,
display: 'flex',
justifyContent: 'flex-end',
gap: 8,
paddingTop: 12,
borderTop: darkMode ? '1px solid rgba(255,255,255,0.06)' : '1px solid rgba(15,23,42,0.06)',
flex: '0 0 auto',
}), [darkMode]);
const renderModalTitle = (title: string, description: string) => (
<div style={{ display: 'flex', alignItems: 'flex-start', gap: 12 }}>
<div style={{
width: 38,
height: 38,
borderRadius: 14,
display: 'grid',
placeItems: 'center',
background: darkMode ? 'rgba(255,214,102,0.12)' : 'rgba(24,144,255,0.10)',
color: darkMode ? '#ffd666' : token.colorPrimary,
flexShrink: 0,
}}>
{isMigrationWorkflow ? <RocketOutlined /> : <SwapOutlined />}
</div>
<div style={{ minWidth: 0 }}>
<div style={{ fontSize: 16, fontWeight: 700, color: darkMode ? '#f8fafc' : '#0f172a' }}>{title}</div>
<div style={{ marginTop: 4, fontSize: 12, lineHeight: 1.6, color: darkMode ? 'rgba(255,255,255,0.56)' : 'rgba(15,23,42,0.58)' }}>{description}</div>
</div>
</div>
);
return (
<>
<Modal
title={renderModalTitle(isMigrationWorkflow ? '跨库迁移工作台' : '数据同步工作台', isMigrationWorkflow ? '按源库 → 目标库完成建表、导入与风险预检。' : '按已有目标表完成差异对比、同步执行与结果确认。')}
title="数据同步"
open={open}
onCancel={() => {
if (syncing) {
@@ -725,61 +542,23 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
}
onClose();
}}
width={920}
width={800}
footer={null}
destroyOnHidden
closable={!syncing}
maskClosable={!syncing}
styles={{
content: modalPanelStyle,
header: { background: 'transparent', borderBottom: 'none', paddingBottom: 10 },
body: {
paddingTop: 8,
height: 760,
maxHeight: 'calc(100vh - 120px)',
overflow: 'hidden',
display: 'flex',
flexDirection: 'column',
},
footer: { background: 'transparent', borderTop: 'none', paddingTop: 12 },
}}
>
<div style={modalWorkspaceStyle}>
<div style={{ flex: '0 0 auto' }}>
<div style={heroPanelStyle}>
<div style={{ display: 'flex', justifyContent: 'space-between', gap: 12, alignItems: 'flex-start', flexWrap: 'wrap' }}>
<div style={{ minWidth: 0 }}>
<div style={{ fontSize: 18, fontWeight: 700, color: darkMode ? '#f8fafc' : '#0f172a' }}>{isMigrationWorkflow ? '跨数据源迁移' : '数据同步'}</div>
<div style={{ marginTop: 6, fontSize: 13, lineHeight: 1.7, color: darkMode ? 'rgba(255,255,255,0.62)' : 'rgba(15,23,42,0.62)' }}>
{isMigrationWorkflow
? '适合把源表迁移到另一套数据库,可按策略自动建表、导入数据并补建可兼容索引。'
: '适合目标表已存在的场景,先做差异分析,再按勾选执行插入、更新或删除。'}
</div>
</div>
<div style={{ display: 'flex', flexWrap: 'wrap', gap: 8 }}>
<span style={badgeStyle}>{isMigrationWorkflow ? <RocketOutlined /> : <SwapOutlined />} {isMigrationWorkflow ? '迁移模式' : '同步模式'}</span>
<span style={badgeStyle}><DatabaseOutlined /> {sourceConnId ? '已选源连接' : '待选源连接'}</span>
<span style={badgeStyle}><TableOutlined /> {selectedTables.length || 0} </span>
</div>
</div>
</div>
<Steps current={currentStep} style={{ marginBottom: 24 }}>
<Step title="配置源与目标" />
<Step title="选择表" />
<Step title="执行结果" />
</Steps>
</div>
<div style={modalScrollableContentStyle}>
{/* STEP 1: CONFIG */}
{currentStep === 0 && (
<div>
<div style={{ display: 'grid', gridTemplateColumns: 'minmax(0, 1fr) 44px minmax(0, 1fr)', gap: 18, alignItems: 'stretch' }}>
<Card
title="源数据库"
style={shellCardStyle}
styles={{ header: { borderBottom: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(15,23,42,0.06)', fontWeight: 700 }, body: { padding: 18 } }}
>
<div style={{ display: 'flex', gap: 24, justifyContent: 'center' }}>
<Card title="源数据库" style={{ width: 350 }}>
<Form layout="vertical">
<Form.Item label="连接">
<Select value={sourceConnId} onChange={handleSourceConnChange}>
@@ -793,16 +572,8 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
</Form.Item>
</Form>
</Card>
<div style={{ display: 'grid', placeItems: 'center' }}>
<div style={{ ...badgeStyle, width: 44, height: 44, borderRadius: 14, justifyContent: 'center', padding: 0 }}>
<SwapOutlined />
</div>
</div>
<Card
title="目标数据库"
style={shellCardStyle}
styles={{ header: { borderBottom: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(15,23,42,0.06)', fontWeight: 700 }, body: { padding: 18 } }}
>
<div style={{ display: 'flex', alignItems: 'center' }}></div>
<Card title="目标数据库" style={{ width: 350 }}>
<Form layout="vertical">
<Form.Item label="连接">
<Select value={targetConnId} onChange={handleTargetConnChange}>
@@ -818,94 +589,27 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
</Card>
</div>
<Card
title={isMigrationWorkflow ? '迁移选项' : '同步选项'}
style={{ ...shellCardStyle, marginTop: 18 }}
styles={{ header: { borderBottom: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(15,23,42,0.06)', fontWeight: 700 }, body: { padding: 18 } }}
>
<div style={{ ...quietPanelStyle, marginBottom: 14 }}>
<Text style={{ color: darkMode ? 'rgba(255,255,255,0.72)' : 'rgba(15,23,42,0.68)', lineHeight: 1.7 }}>
</Text>
</div>
<Card title="同步选项" style={{ marginTop: 16 }}>
<Form layout="vertical">
<Form.Item label="功能类型">
<Select value={workflowType} onChange={setWorkflowType}>
<Option value="sync"></Option>
<Option value="migration"></Option>
</Select>
</Form.Item>
<Alert
type={isMigrationWorkflow ? 'info' : 'success'}
showIcon
style={{ marginBottom: 12 }}
message={isMigrationWorkflow
? '当前为“跨库迁移”模式:适合将表迁移到另一数据源,可自动建表并导入数据。'
: '当前为“数据同步”模式:适合目标表已存在时做增量同步或覆盖导入。'}
/>
<Form.Item label={isMigrationWorkflow ? '迁移内容' : '同步内容'}>
<Form.Item label="同步内容">
<Select value={syncContent} onChange={setSyncContent}>
<Option value="data"></Option>
<Option value="schema"></Option>
<Option value="both"> + </Option>
</Select>
</Form.Item>
<Form.Item label={isMigrationWorkflow ? '迁移模式' : '同步模式'}>
<Form.Item label="同步模式">
<Select value={syncMode} onChange={setSyncMode} disabled={syncContent === 'schema'}>
<Option value="insert_update">//</Option>
<Option value="insert_only"></Option>
<Option value="full_overwrite"></Option>
</Select>
</Form.Item>
<Form.Item label={isMigrationWorkflow ? '目标表处理策略' : '目标表要求'}>
<Select value={targetTableStrategy} onChange={setTargetTableStrategy} disabled={!isMigrationWorkflow}>
<Option value="existing_only">使</Option>
<Option value="auto_create_if_missing"></Option>
<Option value="smart"></Option>
</Select>
</Form.Item>
{isRedisMongoKeyspaceMigration && (
<Form.Item
label="Mongo 集合名(可选)"
extra={sourceType === 'redis'
? '为空时沿用默认集合名;填写后本次 Redis 键空间会统一写入该 Mongo 集合。'
: 'MongoDB → Redis 场景下通常直接选择源集合;这里留空即可,未显式选集合时才会回退使用该名称。'}
>
<Input
value={mongoCollectionName}
onChange={(e) => setMongoCollectionName(e.target.value)}
placeholder={defaultMongoCollectionName || '请输入 Mongo 集合名'}
allowClear
maxLength={128}
/>
</Form.Item>
)}
<Form.Item>
<Checkbox checked={autoAddColumns} onChange={(e) => setAutoAddColumns(e.target.checked)}>
MySQL MySQL Kingbase
MySQL
</Checkbox>
</Form.Item>
<Form.Item>
<Checkbox checked={createIndexes} onChange={(e) => setCreateIndexes(e.target.checked)} disabled={!isMigrationWorkflow || targetTableStrategy === 'existing_only'}>
/
</Checkbox>
</Form.Item>
{isMigrationWorkflow && targetTableStrategy !== 'existing_only' && (
<Alert
type="info"
showIcon
message="自动建表模式首期仅支持 MySQL → Kingbase将迁移字段、主键、普通/唯一/联合索引,并显式跳过全文、空间、前缀、函数类索引。"
style={{ marginBottom: 12 }}
/>
)}
{!isMigrationWorkflow && (
<Alert
type="info"
showIcon
message="数据同步模式默认基于已有目标表执行;如需跨数据源建表导入,请切换到“跨库迁移”。"
style={{ marginBottom: 12 }}
/>
)}
{syncContent !== 'schema' && syncMode === 'full_overwrite' && (
<Alert
type="warning"
@@ -920,42 +624,26 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
{/* STEP 2: TABLES */}
{currentStep === 1 && (
<div style={{ display: 'flex', flexDirection: 'column', gap: 14 }}>
<div style={quietPanelStyle}>
<div style={{ display: 'flex', justifyContent: 'space-between', alignItems: 'center', marginBottom: 10 }}>
<Text type="secondary"></Text>
<div style={{ display: 'flex', flexDirection: 'column', gap: 12 }}>
<div style={{ display: 'flex', justifyContent: 'space-between', alignItems: 'center' }}>
<Text type="secondary">:</Text>
<Checkbox checked={showSameTables} onChange={(e) => setShowSameTables(e.target.checked)}>
</Checkbox>
</div>
<Transfer
</div>
<Transfer
dataSource={allTables.map(t => ({ key: t, title: t }))}
titles={['源表', '已选表']}
targetKeys={selectedTables}
onChange={(keys) => setSelectedTables(keys as string[])}
render={item => item.title}
listStyle={{ width: 390, height: 320, marginTop: 0, borderRadius: 14, overflow: 'hidden' }}
locale={{ itemUnit: '项', itemsUnit: '项', searchPlaceholder: '搜索表', notFoundContent: '暂无数据' }}
listStyle={{ width: 350, height: 280, marginTop: 0 }}
locale={{ itemUnit: '项', itemsUnit: '项', searchPlaceholder: '搜索表', notFoundContent: '暂无数据' }}
/>
</div>
{diffTables.length > 0 && (
<div style={quietPanelStyle}>
<Divider orientation="left" style={{ marginTop: 0 }}></Divider>
{analysisWarnings.length > 0 && (
<Alert
type="warning"
showIcon
message="预检发现风险或降级项,请在执行前确认"
description={
<ul style={{ margin: 0, paddingLeft: 18 }}>
{analysisWarnings.slice(0, 8).map((item) => <li key={item}>{item}</li>)}
{analysisWarnings.length > 8 && <li> {analysisWarnings.length - 8} </li>}
</ul>
}
style={{ marginBottom: 12 }}
/>
)}
<div>
<Divider orientation="left"></Divider>
<Table
size="small"
pagination={false}
@@ -967,29 +655,13 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
const same = Number(t.same || 0);
const msg = String(t.message || '').trim();
const can = !!t.canSync;
const warns = Array.isArray(t.warnings) ? t.warnings.length : 0;
const unsupported = Array.isArray(t.unsupportedObjects) ? t.unsupportedObjects.length : 0;
if (showSameTables) return true;
if (!can) return true;
if (msg || warns > 0 || unsupported > 0) return true;
if (msg) return true;
return ins > 0 || upd > 0 || del > 0 || same === 0;
})}
columns={[
{ title: '表名', dataIndex: 'table', key: 'table', ellipsis: true },
{
title: '目标表',
key: 'targetTableExists',
width: 90,
render: (_: any, r: any) => r.targetTableExists ? '已存在' : '不存在'
},
{
title: '计划',
dataIndex: 'plannedAction',
key: 'plannedAction',
width: 220,
ellipsis: true,
render: (v: any) => String(v || '')
},
{
title: '插入',
key: 'inserts',
@@ -998,7 +670,11 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
const ops = tableOptions[r.table] || { insert: true, update: true, delete: false };
const disabled = !r.canSync || analyzing || Number(r.inserts || 0) === 0;
return (
<Checkbox checked={!!ops.insert} disabled={disabled} onChange={(e) => updateTableOption(r.table, 'insert', e.target.checked)}>
<Checkbox
checked={!!ops.insert}
disabled={disabled}
onChange={(e) => updateTableOption(r.table, 'insert', e.target.checked)}
>
{Number(r.inserts || 0)}
</Checkbox>
);
@@ -1012,7 +688,11 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
const ops = tableOptions[r.table] || { insert: true, update: true, delete: false };
const disabled = !r.canSync || analyzing || Number(r.updates || 0) === 0;
return (
<Checkbox checked={!!ops.update} disabled={disabled} onChange={(e) => updateTableOption(r.table, 'update', e.target.checked)}>
<Checkbox
checked={!!ops.update}
disabled={disabled}
onChange={(e) => updateTableOption(r.table, 'update', e.target.checked)}
>
{Number(r.updates || 0)}
</Checkbox>
);
@@ -1026,28 +706,18 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
const ops = tableOptions[r.table] || { insert: true, update: true, delete: false };
const disabled = !r.canSync || analyzing || Number(r.deletes || 0) === 0;
return (
<Checkbox checked={!!ops.delete} disabled={disabled} onChange={(e) => updateTableOption(r.table, 'delete', e.target.checked)}>
<Checkbox
checked={!!ops.delete}
disabled={disabled}
onChange={(e) => updateTableOption(r.table, 'delete', e.target.checked)}
>
{Number(r.deletes || 0)}
</Checkbox>
);
}
},
{ title: '相同', dataIndex: 'same', key: 'same', width: 70, render: (v: any) => Number(v || 0) },
{
title: '风险',
key: 'warnings',
width: 220,
render: (_: any, r: any) => {
const warns = [...(Array.isArray(r.warnings) ? r.warnings : []), ...(Array.isArray(r.unsupportedObjects) ? r.unsupportedObjects : [])];
if (warns.length === 0) return '-';
return (
<div style={{ color: '#d48806', fontSize: 12, lineHeight: 1.5 }}>
{warns.slice(0, 2).map((item: string) => <div key={item}>{item}</div>)}
{warns.length > 2 && <div> {warns.length - 2} </div>}
</div>
);
}
},
{ title: '消息', dataIndex: 'message', key: 'message', ellipsis: true, render: (v: any) => (v ? String(v) : '') },
{
title: '预览',
key: 'preview',
@@ -1071,8 +741,7 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
{/* STEP 3: RESULT */}
{currentStep === 2 && (
<div style={{ display: 'flex', flexDirection: 'column', gap: 14 }}>
<div style={quietPanelStyle}>
<div>
<Alert
message={syncing ? "正在同步" : (syncResult?.success ? "同步完成" : "同步失败")}
description={
@@ -1084,7 +753,7 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
showIcon
/>
<div style={{ marginTop: 14 }}>
<div style={{ marginTop: 12 }}>
<Progress
percent={syncProgress.percent}
status={syncing ? "active" : (syncResult?.success ? "success" : "exception")}
@@ -1092,9 +761,7 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
/>
</div>
</div>
<div style={quietPanelStyle}>
<Divider orientation="left" style={{ marginTop: 0 }}></Divider>
<Divider orientation="left"></Divider>
<div
ref={logBoxRef}
onScroll={() => {
@@ -1103,25 +770,14 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
const nearBottom = el.scrollHeight - el.scrollTop - el.clientHeight < 40;
autoScrollRef.current = nearBottom;
}}
style={{
background: darkMode ? 'rgba(255,255,255,0.03)' : 'rgba(248,250,252,0.92)',
border: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(15,23,42,0.06)',
borderRadius: 14,
padding: 12,
height: 300,
overflowY: 'auto',
fontFamily: 'SFMono-Regular, ui-monospace, Menlo, Consolas, monospace'
}}
style={{ background: '#f5f5f5', padding: 12, height: 300, overflowY: 'auto', fontFamily: 'monospace' }}
>
{syncLogs.map((item, i: number) => <div key={i}>{renderSyncLogItem(item)}</div>)}
</div>
</div>
</div>
)}
</div>
<div style={modalFooterBarStyle}>
<div style={{ marginTop: 24, textAlign: 'right' }}>
{currentStep === 0 && (
<Button type="primary" onClick={nextToTables} loading={loading}></Button>
)}
@@ -1148,16 +804,14 @@ const DataSyncModal: React.FC<{ open: boolean; onClose: () => void }> = ({ open,
</>
)}
</div>
</div>
</Modal>
<Drawer
title={`差异预览:${previewTable}`}
styles={{ body: { background: darkMode ? 'rgba(9,13,20,0.98)' : '#f8fafc' } }}
open={previewOpen}
onClose={() => { setPreviewOpen(false); setPreviewTable(''); setPreviewData(null); }}
width={900}
>
{previewLoading && <Alert type="info" showIcon message="正在加载差异预览" />}
{previewLoading && <Alert type="info" showIcon message="正在加载差异预览..." />}
{!previewLoading && previewData && (
<div>
<Alert

View File

@@ -4,7 +4,7 @@ import { TabData, ColumnDefinition } from '../types';
import { useStore } from '../store';
import { DBQuery, DBGetColumns } from '../../wailsjs/go/app/App';
import DataGrid, { GONAVI_ROW_KEY } from './DataGrid';
import { buildOrderBySQL, buildPaginatedSelectSQL, buildWhereSQL, quoteIdentPart, quoteQualifiedIdent, withSortBufferTuningSQL, type FilterCondition } from '../utils/sql';
import { buildOrderBySQL, buildWhereSQL, quoteIdentPart, quoteQualifiedIdent, withSortBufferTuningSQL, type FilterCondition } from '../utils/sql';
import { buildMongoCountCommand, buildMongoFilter, buildMongoFindCommand, buildMongoSort } from '../utils/mongodb';
import { getDataSourceCapabilities } from '../utils/dataSourceCapabilities';
@@ -455,7 +455,7 @@ const DataViewer: React.FC<{ tab: TabData }> = ({ tab }) => {
if (pageRowCount > 0) {
const tailOffset = Math.max(0, totalRows - (offset + pageRowCount));
if (tailOffset < offset) {
sql = buildPaginatedSelectSQL(dbType, baseSql, reverseOrderSQL, pageRowCount, tailOffset);
sql = `${baseSql}${reverseOrderSQL} LIMIT ${pageRowCount} OFFSET ${tailOffset}`;
useClickHouseReversePagination = true;
clickHouseReverseLimit = pageRowCount;
clickHouseReverseHasMore = currentPage < totalPages;
@@ -464,7 +464,7 @@ const DataViewer: React.FC<{ tab: TabData }> = ({ tab }) => {
}
if (!useClickHouseReversePagination) {
// 大表性能:打开表不阻塞在 COUNT(*),先通过多取 1 条判断是否还有下一页;总数在后台统计并异步回填。
sql = buildPaginatedSelectSQL(dbType, baseSql, orderBySQL, size + 1, offset);
sql += ` LIMIT ${size + 1} OFFSET ${offset}`;
}
}
@@ -534,7 +534,8 @@ const DataViewer: React.FC<{ tab: TabData }> = ({ tab }) => {
if (safeSelect) {
let fallbackSql = `SELECT ${safeSelect} FROM ${quoteQualifiedIdent(dbType, tableName)} ${whereSQL}`;
fallbackSql = buildPaginatedSelectSQL(dbType, fallbackSql, buildOrderBySQL(dbType, sortInfo, pkColumns), size + 1, offset);
fallbackSql += buildOrderBySQL(dbType, sortInfo, pkColumns);
fallbackSql += ` LIMIT ${size + 1} OFFSET ${offset}`;
executedSql = fallbackSql;
resData = await executeDataQuery(fallbackSql, '复杂类型降级重试');
}

View File

@@ -3,7 +3,7 @@ import { Alert, Button, Collapse, Input, Modal, Progress, Select, Space, Switch,
import { DeleteOutlined, DownloadOutlined, FileSearchOutlined, FolderOpenOutlined, InfoCircleFilled, ReloadOutlined } from '@ant-design/icons';
import { EventsOn } from '../../wailsjs/runtime/runtime';
import { useStore } from '../store';
import { normalizeOpacityForPlatform, resolveAppearanceValues } from '../utils/appearance';
import { normalizeOpacityForPlatform } from '../utils/appearance';
import {
CheckDriverNetworkStatus,
DownloadDriverPackage,
@@ -166,8 +166,7 @@ const DriverManagerModal: React.FC<{ open: boolean; onClose: () => void; onOpenG
const theme = useStore((state) => state.theme);
const appearance = useStore((state) => state.appearance);
const darkMode = theme === 'dark';
const resolvedAppearance = resolveAppearanceValues(appearance);
const opacity = normalizeOpacityForPlatform(resolvedAppearance.opacity);
const opacity = normalizeOpacityForPlatform(appearance.opacity);
const modalContentRef = useRef<HTMLDivElement | null>(null);
const tableContainerRef = useRef<HTMLDivElement | null>(null);
const tableScrollTargetsRef = useRef<HTMLElement[]>([]);
@@ -1224,7 +1223,7 @@ const DriverManagerModal: React.FC<{ open: boolean; onClose: () => void; onOpenG
paddingRight: 18,
},
}}
destroyOnHidden
destroyOnClose
footer={(
<div className="driver-manager-footer">
<div

View File

@@ -1,8 +1,8 @@
import React, { useRef, useEffect } from 'react';
import { Table, Tag, Button, Tooltip, Empty } from 'antd';
import { ClearOutlined, CloseOutlined, BugOutlined, ClockCircleOutlined } from '@ant-design/icons';
import { Table, Tag, Button, Tooltip } from 'antd';
import { ClearOutlined, CloseOutlined, CaretRightOutlined, BugOutlined } from '@ant-design/icons';
import { useStore } from '../store';
import { normalizeOpacityForPlatform, resolveAppearanceValues } from '../utils/appearance';
import { normalizeOpacityForPlatform } from '../utils/appearance';
interface LogPanelProps {
height: number;
@@ -16,8 +16,7 @@ const LogPanel: React.FC<LogPanelProps> = ({ height, onClose, onResizeStart }) =
const theme = useStore(state => state.theme);
const appearance = useStore(state => state.appearance);
const darkMode = theme === 'dark';
const resolvedAppearance = resolveAppearanceValues(appearance);
const opacity = normalizeOpacityForPlatform(resolvedAppearance.opacity);
const opacity = normalizeOpacityForPlatform(appearance.opacity);
// Background Helper
const getBg = (darkHex: string) => {
@@ -29,25 +28,10 @@ const LogPanel: React.FC<LogPanelProps> = ({ height, onClose, onResizeStart }) =
return `rgba(${r}, ${g}, ${b}, ${opacity})`;
};
const bgMain = getBg('#1d1d1d');
const shellOpacity = darkMode ? Math.max(0.18, opacity * 0.82) : Math.max(0.28, opacity * 0.92);
const shellOpacityStrong = darkMode ? Math.max(0.22, opacity * 0.9) : Math.max(0.34, opacity * 0.96);
const panelDividerColor = darkMode
? `rgba(255,255,255,${Math.max(0.04, opacity * 0.10)})`
: `rgba(0,0,0,${Math.max(0.04, opacity * 0.08)})`;
const panelDividerColor = darkMode ? 'rgba(255,255,255,0.08)' : 'rgba(0,0,0,0.08)';
const panelMutedTextColor = darkMode ? 'rgba(255,255,255,0.62)' : 'rgba(0,0,0,0.58)';
const panelShellBg = darkMode
? `linear-gradient(180deg, rgba(15,20,30,${shellOpacity}) 0%, rgba(9,13,22,${shellOpacityStrong}) 100%)`
: `linear-gradient(180deg, rgba(255,255,255,${shellOpacityStrong}) 0%, rgba(246,248,252,${shellOpacity}) 100%)`;
const panelAccentColor = darkMode ? '#ffd666' : '#1677ff';
const panelShadow = darkMode
? `0 12px 28px rgba(0,0,0,${Math.max(0.05, opacity * 0.18)})`
: `0 12px 24px rgba(15,23,42,${Math.max(0.02, opacity * 0.08)})`;
const logScrollbarThumb = darkMode
? `rgba(255, 255, 255, ${Math.max(0.18, opacity * 0.34)})`
: `rgba(0, 0, 0, ${Math.max(0.12, opacity * 0.26)})`;
const logScrollbarThumbHover = darkMode
? `rgba(255, 255, 255, ${Math.max(0.28, opacity * 0.48)})`
: `rgba(0, 0, 0, ${Math.max(0.18, opacity * 0.36)})`;
const logScrollbarThumb = darkMode ? 'rgba(255, 255, 255, 0.34)' : 'rgba(0, 0, 0, 0.26)';
const logScrollbarThumbHover = darkMode ? 'rgba(255, 255, 255, 0.5)' : 'rgba(0, 0, 0, 0.36)';
const columns = [
{
@@ -61,7 +45,7 @@ const LogPanel: React.FC<LogPanelProps> = ({ height, onClose, onResizeStart }) =
dataIndex: 'status',
width: 70,
render: (status: string) => (
<Tag color={status === 'success' ? 'success' : 'error'} style={{ marginRight: 0, borderRadius: 999, paddingInline: 8, fontSize: 11, fontWeight: 700 }}>
<Tag color={status === 'success' ? 'success' : 'error'} style={{ marginRight: 0 }}>
{status === 'success' ? 'OK' : 'ERR'}
</Tag>
)
@@ -76,7 +60,7 @@ const LogPanel: React.FC<LogPanelProps> = ({ height, onClose, onResizeStart }) =
title: 'SQL / Message',
dataIndex: 'sql',
render: (text: string, record: any) => (
<div style={{ fontFamily: 'monospace', wordBreak: 'break-all', fontSize: '12px', lineHeight: '1.45' }}>
<div style={{ fontFamily: 'monospace', wordBreak: 'break-all', fontSize: '12px', lineHeight: '1.2' }}>
<div style={{ color: darkMode ? '#a6e22e' : '#005cc5' }}>{text}</div>
{record.message && <div style={{ color: '#ff4d4f', marginTop: 2 }}>{record.message}</div>}
{record.affectedRows !== undefined && <div style={{ color: panelMutedTextColor, marginTop: 1 }}>Affected: {record.affectedRows}</div>}
@@ -88,18 +72,12 @@ const LogPanel: React.FC<LogPanelProps> = ({ height, onClose, onResizeStart }) =
return (
<div style={{
height,
margin: 0,
border: `1px solid ${panelDividerColor}`,
borderRadius: 14,
background: panelShellBg,
WebkitBackdropFilter: opacity < 0.999 ? 'blur(14px)' : 'none',
boxShadow: panelShadow,
backdropFilter: darkMode && opacity < 0.999 ? 'blur(18px)' : 'none',
borderTop: `1px solid ${panelDividerColor}`,
background: bgMain,
display: 'flex',
flexDirection: 'column',
position: 'relative',
overflow: 'hidden',
zIndex: 100
zIndex: 100 // Ensure above other content
}}>
{/* Resize Handle */}
<div
@@ -117,53 +95,38 @@ const LogPanel: React.FC<LogPanelProps> = ({ height, onClose, onResizeStart }) =
{/* Toolbar */}
<div style={{
padding: '10px 14px',
padding: '4px 8px',
borderBottom: `1px solid ${panelDividerColor}`,
display: 'flex',
justifyContent: 'space-between',
alignItems: 'center',
gap: 12,
minHeight: 48
height: 32
}}>
<div style={{ display: 'flex', alignItems: 'center', gap: 10, minWidth: 0 }}>
<div style={{ width: 30, height: 30, borderRadius: 10, display: 'grid', placeItems: 'center', background: darkMode ? `rgba(255,214,102,${Math.max(0.10, Math.min(0.18, opacity * 0.18))})` : `rgba(24,144,255,${Math.max(0.08, Math.min(0.16, opacity * 0.16))})`, color: panelAccentColor, flexShrink: 0 }}>
<BugOutlined />
</div>
<div style={{ minWidth: 0 }}>
<div style={{ fontWeight: 700, fontSize: 13, color: darkMode ? '#f5f7ff' : '#162033' }}>SQL </div>
<div style={{ fontSize: 12, color: panelMutedTextColor }}>便</div>
</div>
<div style={{ display: 'flex', alignItems: 'center', gap: 8, fontWeight: 'bold', fontSize: '12px' }}>
<BugOutlined /> SQL
</div>
<div style={{ display: 'flex', alignItems: 'center', gap: 6 }}>
<div>
<Tooltip title="清空日志">
<Button type="text" size="small" icon={<ClearOutlined />} onClick={clearSqlLogs} style={{ color: panelMutedTextColor }} />
<Button type="text" size="small" icon={<ClearOutlined />} onClick={clearSqlLogs} />
</Tooltip>
<Tooltip title="关闭面板">
<Button type="text" size="small" icon={<CloseOutlined />} onClick={onClose} style={{ color: panelMutedTextColor }} />
<Button type="text" size="small" icon={<CloseOutlined />} onClick={onClose} />
</Tooltip>
</div>
</div>
{/* List */}
<div className="log-panel-scroll" style={{ flex: 1, overflow: 'auto', padding: '8px 10px 10px' }}>
{sqlLogs.length === 0 ? (
<div style={{ height: '100%', minHeight: 160, display: 'grid', placeItems: 'center' }}>
<Empty
image={Empty.PRESENTED_IMAGE_SIMPLE}
description={<span style={{ color: panelMutedTextColor }}> SQL </span>}
/>
</div>
) : (
<Table
className="log-panel-table"
dataSource={sqlLogs}
columns={columns}
size="small"
pagination={false}
rowKey="id"
showHeader={false}
/>
)}
<div className="log-panel-scroll" style={{ flex: 1, overflow: 'auto' }}>
<Table
className="log-panel-table"
dataSource={sqlLogs}
columns={columns}
size="small"
pagination={false}
rowKey="id"
showHeader={false}
// scroll={{ y: height - 32 }} // Let flex handle it
/>
</div>
<style>{`
.log-panel-scroll {
@@ -193,16 +156,6 @@ const LogPanel: React.FC<LogPanelProps> = ({ height, onClose, onResizeStart }) =
.log-panel-table .ant-table-tbody > tr > td {
background: transparent !important;
}
.log-panel-table .ant-table-tbody > tr > td {
padding: 8px 10px !important;
border-bottom: 1px solid ${panelDividerColor} !important;
}
.log-panel-table .ant-table-tbody > tr:last-child > td {
border-bottom: none !important;
}
.log-panel-table .ant-table-row:hover > td {
background: ${darkMode ? 'rgba(255,255,255,0.03)' : 'rgba(16,24,40,0.03)'} !important;
}
`}</style>
</div>
);

View File

@@ -5,7 +5,7 @@ import { useStore } from '../store';
import { RedisKeyInfo, RedisValue, StreamEntry } from '../types';
import Editor from '@monaco-editor/react';
import type { DataNode } from 'antd/es/tree';
import { normalizeOpacityForPlatform, resolveAppearanceValues } from '../utils/appearance';
import { normalizeOpacityForPlatform } from '../utils/appearance';
const { Search } = Input;
@@ -399,8 +399,7 @@ const RedisViewer: React.FC<RedisViewerProps> = ({ connectionId, redisDB }) => {
const theme = useStore(state => state.theme);
const appearance = useStore(state => state.appearance);
const darkMode = theme === 'dark';
const resolvedAppearance = resolveAppearanceValues(appearance);
const opacity = normalizeOpacityForPlatform(resolvedAppearance.opacity);
const opacity = normalizeOpacityForPlatform(appearance.opacity);
const connection = connections.find(c => c.id === connectionId);
const keyAccentColor = darkMode ? '#ffd666' : '#1677ff';
const jsonAccentColor = darkMode ? '#f6c453' : '#1890ff';

View File

@@ -27,15 +27,12 @@ import { Tree, message, Dropdown, MenuProps, Input, Button, Modal, Form, Badge,
DisconnectOutlined,
CloudOutlined,
CheckSquareOutlined,
CodeOutlined,
TagOutlined,
CheckOutlined,
FilterOutlined
CodeOutlined
} from '@ant-design/icons';
import { useStore } from '../store';
import { SavedConnection } from '../types';
import { DBGetDatabases, DBGetTables, DBQuery, DBShowCreateTable, ExportTable, OpenSQLFile, CreateDatabase, RenameDatabase, DropDatabase, RenameTable, DropTable, DropView, DropFunction, RenameView } from '../../wailsjs/go/app/App';
import { normalizeOpacityForPlatform, resolveAppearanceValues } from '../utils/appearance';
import { normalizeOpacityForPlatform } from '../utils/appearance';
const { Search } = Input;
@@ -76,15 +73,6 @@ const SEARCH_SCOPE_LABEL_MAP: Record<SearchScope, string> = SEARCH_SCOPE_OPTIONS
return acc;
}, {} as Record<SearchScope, string>);
const SEARCH_SCOPE_ICON_MAP: Record<SearchScope, React.ReactNode> = {
smart: <ThunderboltOutlined />,
object: <TableOutlined />,
database: <DatabaseOutlined />,
host: <CloudOutlined />,
tag: <TagOutlined />,
};
const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }> = ({ onEditConnection }) => {
const connections = useStore(state => state.connections);
const savedQueries = useStore(state => state.savedQueries);
@@ -107,8 +95,7 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }>
const recordTableAccess = useStore(state => state.recordTableAccess);
const setTableSortPreference = useStore(state => state.setTableSortPreference);
const darkMode = theme === 'dark';
const resolvedAppearance = resolveAppearanceValues(appearance);
const opacity = normalizeOpacityForPlatform(resolvedAppearance.opacity);
const opacity = normalizeOpacityForPlatform(appearance.opacity);
const [treeData, setTreeData] = useState<TreeNode[]>([]);
// Background Helper (Duplicate logic for now, ideally shared)
@@ -121,44 +108,6 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }>
return `rgba(${r}, ${g}, ${b}, ${opacity})`;
};
const bgMain = getBg('#141414');
const modalPanelStyle = useMemo(() => ({
background: darkMode
? 'linear-gradient(180deg, rgba(20,26,38,0.96) 0%, rgba(13,17,26,0.98) 100%)'
: 'linear-gradient(180deg, rgba(255,255,255,0.98) 0%, rgba(246,248,252,0.98) 100%)',
border: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(16,24,40,0.08)',
boxShadow: darkMode ? '0 20px 48px rgba(0,0,0,0.38)' : '0 18px 42px rgba(15,23,42,0.12)',
backdropFilter: darkMode ? 'blur(18px)' : 'none',
}), [darkMode]);
const modalSectionStyle = useMemo(() => ({
padding: 14,
borderRadius: 14,
border: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(16,24,40,0.08)',
background: darkMode ? 'rgba(255,255,255,0.03)' : 'rgba(255,255,255,0.84)',
}), [darkMode]);
const modalScrollSectionStyle = useMemo(() => ({
maxHeight: 400,
overflow: 'auto' as const,
border: darkMode ? '1px solid rgba(255,255,255,0.08)' : '1px solid rgba(16,24,40,0.08)',
borderRadius: 14,
padding: 12,
background: darkMode ? 'rgba(255,255,255,0.03)' : 'rgba(255,255,255,0.8)',
}), [darkMode]);
const modalHintTextStyle = useMemo(() => ({
color: darkMode ? 'rgba(255,255,255,0.5)' : 'rgba(16,24,40,0.55)',
fontSize: 12,
lineHeight: 1.6,
}), [darkMode]);
const renderSidebarModalTitle = (icon: React.ReactNode, title: string, description: string) => (
<div style={{ display: 'flex', alignItems: 'flex-start', gap: 12 }}>
<div style={{ width: 34, height: 34, borderRadius: 12, display: 'grid', placeItems: 'center', background: darkMode ? 'rgba(255,214,102,0.12)' : 'rgba(24,144,255,0.1)', color: darkMode ? '#ffd666' : '#1677ff', flexShrink: 0 }}>
{icon}
</div>
<div style={{ minWidth: 0 }}>
<div style={{ fontSize: 16, fontWeight: 700, color: darkMode ? '#f5f7ff' : '#162033' }}>{title}</div>
<div style={{ marginTop: 4, color: darkMode ? 'rgba(255,255,255,0.5)' : 'rgba(16,24,40,0.55)', fontSize: 12, lineHeight: 1.6 }}>{description}</div>
</div>
</div>
);
const [searchValue, setSearchValue] = useState('');
const [searchScopes, setSearchScopes] = useState<SearchScope[]>(['smart']);
const [isSearchScopePopoverOpen, setIsSearchScopePopoverOpen] = useState(false);
@@ -792,20 +741,7 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }>
case 'kingbase':
case 'highgo':
case 'vastbase':
return normalizeMetadataQuerySpecs([
{
// PostgreSQL 11+ / 部分 PG-like通过 prokind 区分 FUNCTION/PROCEDURE
sql: `SELECT n.nspname AS schema_name, p.proname AS routine_name, CASE WHEN p.prokind = 'p' THEN 'PROCEDURE' ELSE 'FUNCTION' END AS routine_type FROM pg_proc p JOIN pg_namespace n ON p.pronamespace = n.oid WHERE n.nspname NOT IN ('pg_catalog', 'information_schema') AND n.nspname NOT LIKE 'pg_%' ORDER BY n.nspname, routine_type, p.proname`,
},
{
// PostgreSQL 10 / 不支持 prokind 的兼容路径
sql: `SELECT r.routine_schema AS schema_name, r.routine_name AS routine_name, COALESCE(NULLIF(UPPER(r.routine_type), ''), 'FUNCTION') AS routine_type FROM information_schema.routines r WHERE r.routine_schema NOT IN ('pg_catalog', 'information_schema') AND r.routine_schema NOT LIKE 'pg_%' ORDER BY r.routine_schema, routine_type, r.routine_name`,
},
{
// 最后兜底:仅函数列表,确保 prokind/routines 视图异常时仍可展示
sql: `SELECT n.nspname AS schema_name, p.proname AS routine_name, 'FUNCTION' AS routine_type FROM pg_proc p JOIN pg_namespace n ON p.pronamespace = n.oid WHERE n.nspname NOT IN ('pg_catalog', 'information_schema') AND n.nspname NOT LIKE 'pg_%' ORDER BY n.nspname, p.proname`,
},
]);
return [{ sql: `SELECT n.nspname AS schema_name, p.proname AS routine_name, CASE WHEN p.prokind = 'p' THEN 'PROCEDURE' ELSE 'FUNCTION' END AS routine_type FROM pg_proc p JOIN pg_namespace n ON p.pronamespace = n.oid WHERE n.nspname NOT IN ('pg_catalog', 'information_schema') AND n.nspname NOT LIKE 'pg_%' ORDER BY n.nspname, routine_type, p.proname` }];
case 'sqlserver': {
const safeDb = quoteSqlServerIdentifier(dbName || 'master');
return [{ sql: `SELECT s.name AS schema_name, o.name AS routine_name, CASE o.type WHEN 'P' THEN 'PROCEDURE' WHEN 'FN' THEN 'FUNCTION' WHEN 'IF' THEN 'FUNCTION' WHEN 'TF' THEN 'FUNCTION' END AS routine_type FROM ${safeDb}.sys.objects o JOIN ${safeDb}.sys.schemas s ON o.schema_id = s.schema_id WHERE o.type IN ('P','FN','IF','TF') ORDER BY o.type, s.name, o.name` }];
@@ -2535,100 +2471,32 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }>
const searchScopePopoverContent = useMemo(() => {
const smartSelected = searchScopes.includes('smart');
const scopedOptions = SEARCH_SCOPE_OPTIONS.filter((option) => option.value !== 'smart');
const borderColor = darkMode ? 'rgba(255,255,255,0.08)' : 'rgba(16,24,40,0.08)';
const mutedTextColor = darkMode ? 'rgba(255,255,255,0.5)' : 'rgba(16,24,40,0.55)';
const titleColor = darkMode ? 'rgba(255,255,255,0.92)' : '#162033';
const panelBg = darkMode
? 'linear-gradient(180deg, rgba(17,24,39,0.96) 0%, rgba(10,15,26,0.98) 100%)'
: 'linear-gradient(180deg, rgba(255,255,255,0.98) 0%, rgba(246,248,252,0.98) 100%)';
const smartBg = smartSelected
? (darkMode ? 'linear-gradient(135deg, rgba(255,214,102,0.22) 0%, rgba(255,179,71,0.16) 100%)' : 'linear-gradient(135deg, rgba(255,214,102,0.26) 0%, rgba(255,244,204,0.92) 100%)')
: (darkMode ? 'rgba(255,255,255,0.03)' : 'rgba(255,255,255,0.72)');
const smartBorder = smartSelected
? (darkMode ? 'rgba(255,214,102,0.42)' : 'rgba(245,176,65,0.34)')
: borderColor;
const getOptionCardStyle = (checked: boolean) => ({
display: 'flex',
alignItems: 'center' as const,
justifyContent: 'space-between' as const,
gap: 12,
padding: '10px 12px',
borderRadius: 12,
border: `1px solid ${checked ? (darkMode ? 'rgba(118,169,250,0.44)' : 'rgba(24,144,255,0.32)') : borderColor}`,
background: checked
? (darkMode ? 'rgba(64,124,255,0.18)' : 'rgba(24,144,255,0.08)')
: (darkMode ? 'rgba(255,255,255,0.03)' : 'rgba(255,255,255,0.76)'),
transition: 'all 120ms ease',
});
return (
<div style={{ minWidth: 280, display: 'flex', flexDirection: 'column', background: panelBg, padding: 14, gap: 12 }}>
<div style={{ display: 'flex', alignItems: 'flex-start', justifyContent: 'space-between', gap: 12 }}>
<div>
<div style={{ fontSize: 12, fontWeight: 700, letterSpacing: 0.4, color: mutedTextColor, textTransform: 'uppercase' }}></div>
<div style={{ marginTop: 4, fontSize: 13, lineHeight: 1.5, color: mutedTextColor }}></div>
</div>
<div style={{ width: 32, height: 32, borderRadius: 10, display: 'grid', placeItems: 'center', background: darkMode ? 'rgba(255,255,255,0.05)' : 'rgba(17,24,39,0.06)', color: darkMode ? '#ffd666' : '#1677ff', flexShrink: 0 }}>
<FilterOutlined />
</div>
</div>
<label style={{ display: 'block', cursor: 'pointer' }}>
<div style={{ display: 'flex', alignItems: 'center', gap: 12, padding: '12px 14px', borderRadius: 14, border: `1px solid ${smartBorder}`, background: smartBg, boxShadow: smartSelected ? (darkMode ? '0 10px 24px rgba(0,0,0,0.24)' : '0 10px 24px rgba(245,176,65,0.14)') : 'none' }}>
<div style={{ minWidth: 220, display: 'flex', flexDirection: 'column', gap: 8 }}>
<div style={{ fontSize: 12, color: '#8c8c8c' }}></div>
<Checkbox
checked={smartSelected}
onChange={(e) => setSearchScopeChecked('smart', e.target.checked)}
>
</Checkbox>
<div style={{ paddingLeft: 12, display: 'grid', gap: 6 }}>
{scopedOptions.map((option) => (
<Checkbox
checked={smartSelected}
onChange={(e) => setSearchScopeChecked('smart', e.target.checked)}
/>
<div style={{ width: 30, height: 30, borderRadius: 10, display: 'grid', placeItems: 'center', background: darkMode ? 'rgba(255,214,102,0.16)' : 'rgba(255,214,102,0.3)', color: darkMode ? '#ffd666' : '#ad6800', flexShrink: 0 }}>
{SEARCH_SCOPE_ICON_MAP.smart}
</div>
<div style={{ flex: 1, minWidth: 0 }}>
<div style={{ display: 'flex', alignItems: 'center', gap: 8, flexWrap: 'wrap' }}>
<span style={{ fontSize: 14, fontWeight: 700, color: titleColor }}></span>
<span style={{ padding: '2px 8px', borderRadius: 999, fontSize: 11, fontWeight: 700, color: darkMode ? '#ffe58f' : '#ad6800', background: darkMode ? 'rgba(255,214,102,0.16)' : 'rgba(255,214,102,0.35)' }}></span>
</div>
<div style={{ marginTop: 3, fontSize: 12, lineHeight: 1.5, color: mutedTextColor }}>Host </div>
</div>
</div>
</label>
<div style={{ height: 1, background: borderColor, opacity: 0.9 }} />
<div style={{ display: 'flex', alignItems: 'center', justifyContent: 'space-between', gap: 12 }}>
<div style={{ fontSize: 12, fontWeight: 700, letterSpacing: 0.3, color: mutedTextColor, textTransform: 'uppercase' }}></div>
<div style={{ fontSize: 12, color: mutedTextColor }}></div>
key={option.value}
checked={searchScopes.includes(option.value)}
onChange={(e) => setSearchScopeChecked(option.value, e.target.checked)}
>
{option.label}
</Checkbox>
))}
</div>
<div style={{ display: 'grid', gap: 8 }}>
{scopedOptions.map((option) => {
const checked = searchScopes.includes(option.value);
return (
<label key={option.value} style={{ display: 'block', cursor: 'pointer' }}>
<div style={getOptionCardStyle(checked)}>
<div style={{ display: 'flex', alignItems: 'center', gap: 12, minWidth: 0 }}>
<Checkbox
checked={checked}
onChange={(e) => setSearchScopeChecked(option.value, e.target.checked)}
/>
<div style={{ width: 28, height: 28, borderRadius: 9, display: 'grid', placeItems: 'center', background: checked ? (darkMode ? 'rgba(118,169,250,0.2)' : 'rgba(24,144,255,0.12)') : (darkMode ? 'rgba(255,255,255,0.05)' : 'rgba(17,24,39,0.06)'), color: checked ? (darkMode ? '#91caff' : '#1677ff') : mutedTextColor, flexShrink: 0 }}>
{SEARCH_SCOPE_ICON_MAP[option.value]}
</div>
<span style={{ fontSize: 14, fontWeight: 600, color: titleColor, whiteSpace: 'nowrap' }}>{option.label}</span>
</div>
<div style={{ width: 18, display: 'flex', justifyContent: 'center', color: checked ? (darkMode ? '#91caff' : '#1677ff') : 'transparent', flexShrink: 0 }}>
<CheckOutlined />
</div>
</div>
</label>
);
})}
</div>
<div style={{ padding: '10px 12px', borderRadius: 12, background: darkMode ? 'rgba(255,255,255,0.03)' : 'rgba(17,24,39,0.04)', color: mutedTextColor, fontSize: 12, lineHeight: 1.6 }}>
Host
<div style={{ fontSize: 12, color: '#8c8c8c' }}>
</div>
</div>
);
}, [darkMode, searchScopes]);
}, [searchScopes]);
const parseHostOnlyToken = (value: unknown): string[] => {
const raw = String(value || '').trim();
@@ -3433,14 +3301,14 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }>
return (
<div style={{ display: 'flex', flexDirection: 'column', height: '100%' }}>
<div style={{ padding: '4px 10px' }}>
<div style={{ display: 'flex', alignItems: 'center', gap: 8 }}>
<div style={{ padding: '4px 8px' }}>
<Space.Compact block size="small">
<Search
ref={searchInputRef}
placeholder="搜索..."
onChange={onSearch}
size="small"
style={{ flex: 1, minWidth: 0 }}
style={{ width: '100%' }}
/>
<Popover
content={searchScopePopoverContent}
@@ -3448,66 +3316,18 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }>
placement="bottomRight"
open={isSearchScopePopoverOpen}
onOpenChange={setIsSearchScopePopoverOpen}
styles={{ body: { padding: 0, borderRadius: 18, overflow: 'hidden' } }}
>
<Tooltip title={`搜索范围:${searchScopeSummary}`}>
<Button
size="small"
style={{
minWidth: 86,
display: 'inline-flex',
alignItems: 'center',
justifyContent: 'center',
gap: 6,
paddingInline: 10,
borderRadius: 10,
borderColor: darkMode ? 'rgba(255,255,255,0.12)' : 'rgba(16,24,40,0.12)',
background: darkMode ? bgMain : 'rgba(255,255,255,0.92)',
color: darkMode ? 'rgba(255,255,255,0.88)' : '#162033',
boxShadow: isSearchScopePopoverOpen
? (darkMode ? '0 0 0 1px rgba(255,214,102,0.22) inset' : '0 0 0 1px rgba(24,144,255,0.24) inset')
: 'none',
backdropFilter: darkMode ? 'blur(10px)' : 'none',
flexShrink: 0,
}}
>
<span style={{ display: 'inline-flex', alignItems: 'center', color: searchScopes.includes('smart') ? '#ffd666' : (darkMode ? 'rgba(255,255,255,0.72)' : 'rgba(22,32,51,0.72)') }}>
<FilterOutlined />
</span>
<span style={{ fontWeight: 700, color: darkMode ? 'rgba(255,255,255,0.88)' : '#162033' }}></span>
<span
style={{
minWidth: 18,
height: 18,
padding: '0 5px',
borderRadius: 999,
display: 'inline-flex',
alignItems: 'center',
justifyContent: 'center',
fontSize: 11,
fontWeight: 700,
lineHeight: 1,
background: searchScopes.includes('smart')
? (darkMode ? 'rgba(255,214,102,0.16)' : 'rgba(24,144,255,0.12)')
: (darkMode ? 'rgba(118,169,250,0.18)' : 'rgba(24,144,255,0.12)'),
color: searchScopes.includes('smart')
? (darkMode ? '#ffd666' : '#1677ff')
: (darkMode ? '#91caff' : '#1677ff'),
}}
>
{searchScopes.includes('smart') ? '智' : searchScopes.length}
</span>
<span style={{ display: 'inline-flex', alignItems: 'center', color: darkMode ? 'rgba(255,255,255,0.48)' : 'rgba(22,32,51,0.4)', fontSize: 12 }}>
<DownOutlined />
</span>
<Button size="small" icon={<DownOutlined />} style={{ width: 86 }}>
{searchScopes.includes('smart') ? '(智)' : `(${searchScopes.length})`}
</Button>
</Tooltip>
</Popover>
</div>
</Space.Compact>
</div>
{/* Toolbar */}
<div style={{ padding: '4px 10px', borderBottom: 'none', display: 'flex', flexWrap: 'wrap', gap: 4 }}>
<div style={{ padding: '4px 8px', borderBottom: 'none', display: 'flex', flexWrap: 'wrap', gap: 4 }}>
<Button
size="small"
icon={<FolderOpenOutlined />}
@@ -3575,14 +3395,8 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }>
)}
<Modal
title={renderSidebarModalTitle(
<FolderOpenOutlined />,
renameViewTarget?.type === 'tag' ? "编辑标签" : "新建组",
renameViewTarget?.type === 'tag' ? "调整分组名称和包含的连接。" : "为连接树创建一个更清晰的分组视图。"
)}
title={renameViewTarget?.type === 'tag' ? "编辑标签" : "新建组"}
open={isCreateTagModalOpen}
centered
styles={{ content: modalPanelStyle, header: { background: 'transparent', borderBottom: 'none', paddingBottom: 10 }, body: { paddingTop: 8 }, footer: { background: 'transparent', borderTop: 'none', paddingTop: 12 } }}
onOk={() => {
createTagForm.validateFields().then(values => {
if (renameViewTarget?.type === 'tag') {
@@ -3617,24 +3431,20 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }>
onCancel={() => setIsCreateTagModalOpen(false)}
>
<Form form={createTagForm} layout="vertical">
<div style={modalSectionStyle}>
<Form.Item name="name" label="标签名称" rules={[{ required: true, message: '请输入标签名称' }]}>
<Input placeholder="例如:线上环境 / 核心业务 / 临时调试" />
</Form.Item>
<Form.Item name="connectionIds" label="选择连接" style={{ marginBottom: 0 }}>
<Checkbox.Group style={{ width: '100%' }}>
<div style={modalScrollSectionStyle}>
<Space direction="vertical" style={{ width: '100%' }}>
{connections.map(conn => (
<Checkbox key={conn.id} value={conn.id}>
{conn.name} {conn.config.host ? `(${conn.config.host})` : ''}
</Checkbox>
))}
</Space>
</div>
</Checkbox.Group>
</Form.Item>
</div>
<Form.Item name="name" label="标签名称" rules={[{ required: true, message: '请输入标签名称' }]}>
<Input />
</Form.Item>
<Form.Item name="connectionIds" label="选择连接">
<Checkbox.Group style={{ width: '100%' }}>
<Space direction="vertical" style={{ width: '100%', maxHeight: '400px', overflowY: 'auto' }}>
{connections.map(conn => (
<Checkbox key={conn.id} value={conn.id}>
{conn.name} {conn.config.host ? `(${conn.config.host})` : ''}
</Checkbox>
))}
</Space>
</Checkbox.Group>
</Form.Item>
</Form>
</Modal>
@@ -3704,12 +3514,10 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }>
</Modal>
<Modal
title={renderSidebarModalTitle(<TableOutlined />, "批量操作表", "按对象批量导出结构、数据或完整备份。")}
title="批量操作表"
open={isBatchModalOpen}
onCancel={() => setIsBatchModalOpen(false)}
width={720}
centered
styles={{ content: modalPanelStyle, header: { background: 'transparent', borderBottom: 'none', paddingBottom: 10 }, body: { paddingTop: 8 }, footer: { background: 'transparent', borderTop: 'none', paddingTop: 12 } }}
width={680}
footer={
<div style={{ display: 'flex', justifyContent: 'space-between', alignItems: 'center', gap: 8, flexWrap: 'wrap' }}>
<Button key="cancel" onClick={() => setIsBatchModalOpen(false)}>
@@ -3745,7 +3553,7 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }>
</div>
}
>
<div style={{ ...modalSectionStyle, marginBottom: 16 }}>
<div style={{ marginBottom: 16 }}>
<div style={{ marginBottom: 8 }}>
<label style={{ display: 'block', marginBottom: 4, fontWeight: 500 }}></label>
<Select
@@ -3777,11 +3585,10 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }>
))}
</Select>
</div>
<div style={modalHintTextStyle}></div>
</div>
{batchTables.length > 0 && (
<div style={{ ...modalSectionStyle, marginBottom: 16 }}>
<div style={{ marginBottom: 16 }}>
<Space wrap size={8} style={{ width: '100%' }}>
<Input
allowClear
@@ -3819,7 +3626,7 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }>
{batchTables.length > 0 && (
<>
<div style={{ ...modalSectionStyle, marginBottom: 16 }}>
<div style={{ marginBottom: 16 }}>
<Space>
<Button
size="small"
@@ -3847,7 +3654,7 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }>
</span>
</Space>
</div>
<div style={modalScrollSectionStyle}>
<div style={{ maxHeight: 400, overflow: 'auto', border: darkMode ? '1px solid #303030' : '1px solid #f0f0f0', borderRadius: 4, padding: 8 }}>
<Checkbox.Group
value={checkedTableKeys}
onChange={(values) => setCheckedTableKeys(values as string[])}
@@ -3897,12 +3704,10 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }>
</Modal>
<Modal
title={renderSidebarModalTitle(<DatabaseOutlined />, "批量操作库", "按数据库批量导出结构,或生成结构加数据的备份。")}
title="批量操作库"
open={isBatchDbModalOpen}
onCancel={() => setIsBatchDbModalOpen(false)}
width={640}
centered
styles={{ content: modalPanelStyle, header: { background: 'transparent', borderBottom: 'none', paddingBottom: 10 }, body: { paddingTop: 8 }, footer: { background: 'transparent', borderTop: 'none', paddingTop: 12 } }}
width={600}
footer={[
<Button key="cancel" onClick={() => setIsBatchDbModalOpen(false)}>
@@ -3926,8 +3731,8 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }>
</Button>
]}
>
<div style={{ ...modalSectionStyle, marginBottom: 16 }}>
<label style={{ display: 'block', marginBottom: 4, fontWeight: 600, color: darkMode ? '#f5f7ff' : '#162033' }}></label>
<div style={{ marginBottom: 16 }}>
<label style={{ display: 'block', marginBottom: 4, fontWeight: 500 }}></label>
<Select
value={selectedDbConnection}
onChange={handleDbConnectionChange}
@@ -3940,12 +3745,11 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }>
</Select.Option>
))}
</Select>
<div style={{ ...modalHintTextStyle, marginTop: 10 }}></div>
</div>
{batchDatabases.length > 0 && (
<>
<div style={{ ...modalSectionStyle, marginBottom: 16 }}>
<div style={{ marginBottom: 16 }}>
<Space>
<Button
size="small"
@@ -3970,7 +3774,7 @@ const Sidebar: React.FC<{ onEditConnection?: (conn: SavedConnection) => void }>
</span>
</Space>
</div>
<div style={modalScrollSectionStyle}>
<div style={{ maxHeight: 400, overflow: 'auto', border: darkMode ? '1px solid #303030' : '1px solid #f0f0f0', borderRadius: 4, padding: 8 }}>
<Checkbox.Group
value={checkedDbKeys}
onChange={(values) => setCheckedDbKeys(values as string[])}

View File

@@ -2491,7 +2491,7 @@ END;`;
okText="应用"
cancelText="取消"
width={640}
destroyOnHidden
destroyOnClose
>
<Input.TextArea
value={commentEditorValue}

View File

@@ -9,36 +9,6 @@ import { loader } from '@monaco-editor/react'
import * as monaco from 'monaco-editor'
loader.config({ monaco })
if (typeof window !== 'undefined' && !(window as any).go) {
(window as any).go = {
app: {
App: {
CheckUpdate: async () => ({ success: false }),
DownloadUpdate: async () => ({ success: false }),
GetSavedConnections: async () => [],
SaveConnection: async () => null,
DeleteConnection: async () => null,
OpenConnection: async () => null,
CloseConnection: async () => null,
GetDatabases: async () => [],
GetTables: async () => [],
GetTableData: async () => ({ columns: [], rows: [], total: 0 }),
GetTableColumns: async () => [],
ExecuteQuery: async () => ({ columns: [], rows: [], time: 0 }),
GetSavedQueries: async () => [],
SaveQuery: async () => null,
DeleteQuery: async () => null,
GetAppInfo: async () => ({}),
CheckForUpdates: async () => ({ success: false }),
OpenDownloadedUpdateDirectory: async () => ({ success: false }),
InstallUpdateAndRestart: async () => ({ success: false }),
ImportConfigFile: async () => ({ success: false }),
ExportData: async () => ({ success: false }),
}
}
};
}
// 全局注册透明主题,避免每个 Editor 组件 beforeMount 中重复定义
monaco.editor.defineTheme('transparent-dark', {
base: 'vs-dark', inherit: true, rules: [],

View File

@@ -10,7 +10,7 @@ import {
sanitizeShortcutOptions,
} from './utils/shortcuts';
const DEFAULT_APPEARANCE = { enabled: true, opacity: 1.0, blur: 0 };
const DEFAULT_APPEARANCE = { opacity: 1.0, blur: 0 };
const DEFAULT_UI_SCALE = 1.0;
const MIN_UI_SCALE = 0.8;
const MAX_UI_SCALE = 1.25;
@@ -25,7 +25,7 @@ const MAX_HOST_ENTRY_LENGTH = 512;
const MAX_HOST_ENTRIES = 64;
const DEFAULT_TIMEOUT_SECONDS = 30;
const MAX_TIMEOUT_SECONDS = 3600;
const PERSIST_VERSION = 6;
const PERSIST_VERSION = 5;
const DEFAULT_CONNECTION_TYPE = 'mysql';
const DEFAULT_GLOBAL_PROXY: GlobalProxyConfig = {
enabled: false,
@@ -405,7 +405,7 @@ interface AppState {
activeContext: { connectionId: string; dbName: string } | null;
savedQueries: SavedQuery[];
theme: 'light' | 'dark';
appearance: { enabled: boolean; opacity: number; blur: number };
appearance: { opacity: number; blur: number };
uiScale: number;
fontSize: number;
startupFullscreen: boolean;
@@ -416,10 +416,6 @@ interface AppState {
sqlLogs: SqlLog[];
tableAccessCount: Record<string, number>;
tableSortPreference: Record<string, 'name' | 'frequency'>;
tableColumnOrders: Record<string, string[]>;
enableColumnOrderMemory: boolean;
tableHiddenColumns: Record<string, string[]>;
enableHiddenColumnMemory: boolean;
addConnection: (conn: SavedConnection) => void;
updateConnection: (conn: SavedConnection) => void;
@@ -447,7 +443,7 @@ interface AppState {
deleteQuery: (id: string) => void;
setTheme: (theme: 'light' | 'dark') => void;
setAppearance: (appearance: Partial<{ enabled: boolean; opacity: number; blur: number }>) => void;
setAppearance: (appearance: Partial<{ opacity: number; blur: number }>) => void;
setUiScale: (scale: number) => void;
setFontSize: (size: number) => void;
setStartupFullscreen: (enabled: boolean) => void;
@@ -462,13 +458,6 @@ interface AppState {
recordTableAccess: (connectionId: string, dbName: string, tableName: string) => void;
setTableSortPreference: (connectionId: string, dbName: string, sortBy: 'name' | 'frequency') => void;
setTableColumnOrder: (connectionId: string, dbName: string, tableName: string, order: string[]) => void;
setEnableColumnOrderMemory: (enabled: boolean) => void;
clearTableColumnOrder: (connectionId: string, dbName: string, tableName: string) => void;
setTableHiddenColumns: (connectionId: string, dbName: string, tableName: string, hiddenColumns: string[]) => void;
setEnableHiddenColumnMemory: (enabled: boolean) => void;
clearTableHiddenColumns: (connectionId: string, dbName: string, tableName: string) => void;
}
const sanitizeSavedQueries = (value: unknown): SavedQuery[] => {
@@ -532,37 +521,14 @@ const sanitizeTableSortPreference = (value: unknown): Record<string, 'name' | 'f
return result;
};
const sanitizeTableColumnOrders = (value: unknown): Record<string, string[]> => {
const raw = (value && typeof value === 'object') ? value as Record<string, unknown> : {};
const result: Record<string, string[]> = {};
Object.entries(raw).forEach(([key, orderArray]) => {
if (Array.isArray(orderArray)) {
result[key] = orderArray.map(col => String(col));
}
});
return result;
};
const sanitizeTableHiddenColumns = (value: unknown): Record<string, string[]> => {
const raw = (value && typeof value === 'object') ? value as Record<string, unknown> : {};
const result: Record<string, string[]> = {};
Object.entries(raw).forEach(([key, hiddenArray]) => {
if (Array.isArray(hiddenArray)) {
result[key] = hiddenArray.map(col => String(col));
}
});
return result;
};
const sanitizeAppearance = (
appearance: Partial<{ enabled: boolean; opacity: number; blur: number }> | undefined,
appearance: Partial<{ opacity: number; blur: number }> | undefined,
version: number
): { enabled: boolean; opacity: number; blur: number } => {
): { opacity: number; blur: number } => {
if (!appearance || typeof appearance !== 'object') {
return { ...DEFAULT_APPEARANCE };
}
const nextAppearance = {
enabled: typeof appearance.enabled === 'boolean' ? appearance.enabled : DEFAULT_APPEARANCE.enabled,
opacity: typeof appearance.opacity === 'number' ? appearance.opacity : DEFAULT_APPEARANCE.opacity,
blur: typeof appearance.blur === 'number' ? appearance.blur : DEFAULT_APPEARANCE.blur,
};
@@ -631,10 +597,6 @@ export const useStore = create<AppState>()(
sqlLogs: [],
tableAccessCount: {},
tableSortPreference: {},
tableColumnOrders: {},
enableColumnOrderMemory: true,
tableHiddenColumns: {},
enableHiddenColumnMemory: true,
addConnection: (conn) => set((state) => ({ connections: [...state.connections, conn] })),
updateConnection: (conn) => set((state) => ({
@@ -837,44 +799,6 @@ export const useStore = create<AppState>()(
}
};
}),
setTableColumnOrder: (connectionId, dbName, tableName, order) => set((state) => {
const key = `${connectionId}-${dbName}-${tableName}`;
return {
tableColumnOrders: {
...state.tableColumnOrders,
[key]: order
}
};
}),
clearTableColumnOrder: (connectionId, dbName, tableName) => set((state) => {
const key = `${connectionId}-${dbName}-${tableName}`;
const newOrders = { ...state.tableColumnOrders };
delete newOrders[key];
return { tableColumnOrders: newOrders };
}),
setEnableColumnOrderMemory: (enabled) => set({ enableColumnOrderMemory: !!enabled }),
setTableHiddenColumns: (connectionId, dbName, tableName, hiddenColumns) => set((state) => {
const key = `${connectionId}-${dbName}-${tableName}`;
return {
tableHiddenColumns: {
...state.tableHiddenColumns,
[key]: hiddenColumns
}
};
}),
clearTableHiddenColumns: (connectionId, dbName, tableName) => set((state) => {
const key = `${connectionId}-${dbName}-${tableName}`;
const newHidden = { ...state.tableHiddenColumns };
delete newHidden[key];
return { tableHiddenColumns: newHidden };
}),
setEnableHiddenColumnMemory: (enabled) => set({ enableHiddenColumnMemory: !!enabled }),
}),
{
name: 'lite-db-storage', // name of the item in the storage (must be unique)
@@ -900,13 +824,6 @@ export const useStore = create<AppState>()(
nextState.shortcutOptions = sanitizeShortcutOptions(state.shortcutOptions);
nextState.tableAccessCount = sanitizeTableAccessCount(state.tableAccessCount);
nextState.tableSortPreference = sanitizeTableSortPreference(state.tableSortPreference);
// 新增的列排序记忆状态不需要做版本特殊兼容,直接做基本的类型保护
const safeOrders = sanitizeTableColumnOrders(state.tableColumnOrders);
nextState.tableColumnOrders = safeOrders;
nextState.enableColumnOrderMemory = state.enableColumnOrderMemory !== false;
const safeHidden = sanitizeTableHiddenColumns(state.tableHiddenColumns);
nextState.tableHiddenColumns = safeHidden;
nextState.enableHiddenColumnMemory = state.enableHiddenColumnMemory !== false;
return nextState as AppState;
},
merge: (persistedState, currentState) => {
@@ -923,16 +840,11 @@ export const useStore = create<AppState>()(
fontSize: sanitizeFontSize(state.fontSize),
startupFullscreen: sanitizeStartupFullscreen(state.startupFullscreen),
globalProxy: sanitizeGlobalProxy(state.globalProxy),
tableSortPreference: sanitizeTableSortPreference(state.tableSortPreference),
tableColumnOrders: sanitizeTableColumnOrders(state.tableColumnOrders),
enableColumnOrderMemory: state.enableColumnOrderMemory !== false,
tableHiddenColumns: sanitizeTableHiddenColumns(state.tableHiddenColumns),
enableHiddenColumnMemory: state.enableHiddenColumnMemory !== false,
sqlFormatOptions: sanitizeSqlFormatOptions(state.sqlFormatOptions),
queryOptions: sanitizeQueryOptions(state.queryOptions),
shortcutOptions: sanitizeShortcutOptions(state.shortcutOptions),
tableAccessCount: sanitizeTableAccessCount(state.tableAccessCount),
tableSortPreference: sanitizeTableSortPreference(state.tableSortPreference),
};
},
partialize: (state) => ({
@@ -949,11 +861,7 @@ export const useStore = create<AppState>()(
queryOptions: state.queryOptions,
shortcutOptions: state.shortcutOptions,
tableAccessCount: state.tableAccessCount,
tableSortPreference: state.tableSortPreference,
tableColumnOrders: state.tableColumnOrders,
enableColumnOrderMemory: state.enableColumnOrderMemory,
tableHiddenColumns: state.tableHiddenColumns,
enableHiddenColumnMemory: state.enableHiddenColumnMemory
tableSortPreference: state.tableSortPreference
}), // Don't persist logs
}
)

View File

@@ -10,22 +10,6 @@ const WINDOWS_BLUR_FACTOR = 1.00;
const clamp = (value: number, min: number, max: number) => Math.min(max, Math.max(min, value));
export interface AppearanceSettingsLike {
enabled?: boolean;
opacity?: number;
blur?: number;
}
export const resolveAppearanceValues = (appearance: AppearanceSettingsLike | undefined): { opacity: number; blur: number } => {
if (!appearance || appearance.enabled !== false) {
return {
opacity: appearance?.opacity ?? DEFAULT_OPACITY,
blur: appearance?.blur ?? 0,
};
}
return { opacity: DEFAULT_OPACITY, blur: 0 };
};
export const isMacLikePlatform = (): boolean => {
if (typeof navigator === 'undefined') {
return false;

View File

@@ -50,11 +50,6 @@ export const quoteIdentPart = (dbType: string, ident: string) => {
return raw;
}
// SQL Server 使用 [bracket] 标识符
if (dbTypeLower === 'sqlserver' || dbTypeLower === 'mssql') {
return `[${raw.replace(/]/g, ']]')}]`;
}
// 其他数据库默认加双引号
return `"${raw.replace(/"/g, '""')}"`;
};
@@ -139,42 +134,6 @@ export const buildOrderBySQL = (
return '';
};
export const buildPaginatedSelectSQL = (
dbType: string,
baseSql: string,
orderBySQL: string,
limit: number,
offset: number,
) => {
const normalizedType = String(dbType || '').trim().toLowerCase();
const safeLimit = Math.max(0, Math.floor(Number(limit) || 0));
const safeOffset = Math.max(0, Math.floor(Number(offset) || 0));
const base = String(baseSql || '').trim();
const orderBy = String(orderBySQL || '');
if (!base || safeLimit <= 0) {
return `${base}${orderBy}`;
}
switch (normalizedType) {
case 'oracle': {
const orderedSql = `${base}${orderBy}`;
const upperBound = safeOffset + safeLimit;
if (safeOffset <= 0) {
return `SELECT * FROM (${orderedSql}) WHERE ROWNUM <= ${upperBound}`;
}
return `SELECT * FROM (SELECT "__gonavi_page__".*, ROWNUM "__gonavi_rn__" FROM (${orderedSql}) "__gonavi_page__" WHERE ROWNUM <= ${upperBound}) WHERE "__gonavi_rn__" > ${safeOffset}`;
}
case 'sqlserver':
case 'mssql': {
const effectiveOrderBy = orderBy.trim() ? orderBy : ' ORDER BY (SELECT NULL)';
return `${base}${effectiveOrderBy} OFFSET ${safeOffset} ROWS FETCH NEXT ${safeLimit} ROWS ONLY`;
}
default:
return `${base}${orderBy} LIMIT ${safeLimit} OFFSET ${safeOffset}`;
}
};
export const parseListValues = (val: string) => {
const raw = (val || '').trim();
if (!raw) return [];

View File

@@ -277,9 +277,6 @@ export namespace sync {
mode: string;
jobId?: string;
autoAddColumns?: boolean;
targetTableStrategy?: string;
createIndexes?: boolean;
mongoCollectionName?: string;
tableOptions?: Record<string, TableOptions>;
static createFrom(source: any = {}) {
@@ -295,9 +292,6 @@ export namespace sync {
this.mode = source["mode"];
this.jobId = source["jobId"];
this.autoAddColumns = source["autoAddColumns"];
this.targetTableStrategy = source["targetTableStrategy"];
this.createIndexes = source["createIndexes"];
this.mongoCollectionName = source["mongoCollectionName"];
this.tableOptions = this.convertValues(source["tableOptions"], TableOptions, true);
}

View File

@@ -8,8 +8,6 @@ import (
"errors"
"fmt"
"net"
"net/url"
"os"
"strings"
"sync"
"time"
@@ -220,7 +218,6 @@ func wrapConnectError(config connection.ConnectionConfig, err error) error {
if err == nil {
return nil
}
err = sanitizeMongoConnectErrorLabel(config, err)
var netErr net.Error
if errors.Is(err, context.DeadlineExceeded) || (errors.As(err, &netErr) && netErr.Timeout()) {
@@ -234,73 +231,6 @@ func wrapConnectError(config connection.ConnectionConfig, err error) error {
return withLogHint{err: err, logPath: logger.Path()}
}
type errorMessageOverride struct {
message string
cause error
}
func (e errorMessageOverride) Error() string {
return e.message
}
func (e errorMessageOverride) Unwrap() error {
return e.cause
}
func sanitizeMongoConnectErrorLabel(config connection.ConnectionConfig, err error) error {
if err == nil {
return nil
}
if strings.ToLower(strings.TrimSpace(config.Type)) != "mongodb" {
return err
}
if mongoConnectUsesTLS(config) {
return err
}
original := err.Error()
rewritten := strings.ReplaceAll(original, "SSL 主库凭据", "主库凭据")
rewritten = strings.ReplaceAll(rewritten, "SSL 从库凭据", "从库凭据")
if rewritten == original {
return err
}
return errorMessageOverride{
message: rewritten,
cause: err,
}
}
func mongoConnectUsesTLS(config connection.ConnectionConfig) bool {
if config.UseSSL {
return true
}
uriText := strings.TrimSpace(config.URI)
if uriText == "" {
return false
}
parsed, err := url.Parse(uriText)
if err != nil {
return false
}
for _, key := range []string{"tls", "ssl"} {
if enabled, known := parseMongoBool(parsed.Query().Get(key)); known {
return enabled
}
}
return strings.EqualFold(strings.TrimSpace(parsed.Scheme), "mongodb+srv")
}
func parseMongoBool(raw string) (enabled bool, known bool) {
value := strings.ToLower(strings.TrimSpace(raw))
switch value {
case "1", "true", "t", "yes", "y", "on", "required":
return true, true
case "0", "false", "f", "no", "n", "off", "disable", "disabled":
return false, true
default:
return false, false
}
}
type withLogHint struct {
err error
logPath string
@@ -308,15 +238,10 @@ type withLogHint struct {
func (e withLogHint) Error() string {
message := normalizeErrorMessage(e.err)
path := strings.TrimSpace(e.logPath)
if path == "" {
if strings.TrimSpace(e.logPath) == "" {
return message
}
info, statErr := os.Stat(path)
if statErr != nil || info.IsDir() || info.Size() <= 0 {
return message
}
return fmt.Sprintf("%s详细日志%s", message, path)
return fmt.Sprintf("%s详细日志%s", message, e.logPath)
}
func (e withLogHint) Unwrap() error {

View File

@@ -1,84 +0,0 @@
package app
import (
"errors"
"os"
"path/filepath"
"strings"
"testing"
"GoNavi-Wails/internal/connection"
)
func TestWrapConnectError_MongoNoSSL_RemovesMisleadingSSLLabel(t *testing.T) {
config := connection.ConnectionConfig{
Type: "mongodb",
UseSSL: false,
}
sourceErr := errors.New("MongoDB 连接失败SSL 主库凭据验证失败: mock error")
wrapped := wrapConnectError(config, sourceErr)
text := wrapped.Error()
if strings.Contains(text, "SSL 主库凭据") {
t.Fatalf("expected ssl label to be removed when TLS disabled, got: %s", text)
}
if !strings.Contains(text, "主库凭据验证失败") {
t.Fatalf("expected auth label to remain, got: %s", text)
}
}
func TestWrapConnectError_MongoURIForcesTLS_KeepsSSLLabel(t *testing.T) {
config := connection.ConnectionConfig{
Type: "mongodb",
UseSSL: false,
URI: "mongodb://user:pass@127.0.0.1:27017/admin?tls=true",
}
sourceErr := errors.New("MongoDB 连接失败SSL 主库凭据验证失败: mock error")
wrapped := wrapConnectError(config, sourceErr)
text := wrapped.Error()
if !strings.Contains(text, "SSL 主库凭据") {
t.Fatalf("expected ssl label to remain when URI enables TLS, got: %s", text)
}
}
func TestWrapConnectError_MongoSRVDefaultTLS_KeepsSSLLabel(t *testing.T) {
config := connection.ConnectionConfig{
Type: "mongodb",
UseSSL: false,
URI: "mongodb+srv://user:pass@cluster0.example.com/admin",
}
sourceErr := errors.New("MongoDB 连接失败SSL 主库凭据验证失败: mock error")
wrapped := wrapConnectError(config, sourceErr)
text := wrapped.Error()
if !strings.Contains(text, "SSL 主库凭据") {
t.Fatalf("expected ssl label to remain for mongodb+srv default TLS, got: %s", text)
}
}
func TestWithLogHintError_OmitEmptyLogPath(t *testing.T) {
dir := t.TempDir()
logPath := filepath.Join(dir, "gonavi.log")
if err := os.WriteFile(logPath, nil, 0o644); err != nil {
t.Fatalf("write empty log failed: %v", err)
}
err := withLogHint{err: errors.New("连接失败"), logPath: logPath}
text := err.Error()
if strings.Contains(text, "详细日志:") {
t.Fatalf("expected no log hint for empty file, got: %s", text)
}
}
func TestWithLogHintError_IncludeNonEmptyLogPath(t *testing.T) {
dir := t.TempDir()
logPath := filepath.Join(dir, "gonavi.log")
if err := os.WriteFile(logPath, []byte("log entry\n"), 0o644); err != nil {
t.Fatalf("write log failed: %v", err)
}
err := withLogHint{err: errors.New("连接失败"), logPath: logPath}
text := err.Error()
if !strings.Contains(text, "详细日志:"+logPath) {
t.Fatalf("expected log hint with path, got: %s", text)
}
}

View File

@@ -1,7 +1,6 @@
package app
import (
"strconv"
"strings"
"GoNavi-Wails/internal/connection"
@@ -21,11 +20,6 @@ func normalizeRunConfig(config connection.ConnectionConfig, dbName string) conne
case "dameng":
// 达梦使用 schema 参数沿用现有行为dbName 表示 schema。
runConfig.Database = name
case "redis":
runConfig.Database = name
if idx, err := strconv.Atoi(name); err == nil && idx >= 0 && idx <= 15 {
runConfig.RedisDB = idx
}
default:
// oracle: dbName 表示 schema/owner不能覆盖 config.Database服务名
// sqlite: 无需设置 Database

View File

@@ -73,8 +73,8 @@ func resolveDialConfigWithProxy(raw connection.ConnectionConfig) (connection.Con
// 文件型/自定义 DSN 类型不走标准 host:port不在此层改写。
return config, nil
}
if normalizedType == "mongodb" {
// MongoDB 统一由驱动侧 Dialer 处理代理,保留原始目标地址,避免将连接目标改写为本地转发地址
if normalizedType == "mongodb" && config.MongoSRV {
// Mongo SRV 由驱动侧 Dialer 处理代理,避免破坏 DNS SRV 拓扑发现
return config, nil
}

View File

@@ -1,64 +0,0 @@
package app
import (
"reflect"
"testing"
"GoNavi-Wails/internal/connection"
)
func TestResolveDialConfigWithProxy_MongoKeepsTargetAddress(t *testing.T) {
hosts := []string{"10.20.30.40:27017", "10.20.30.41:27017"}
raw := connection.ConnectionConfig{
Type: "mongodb",
Host: "10.20.30.40",
Port: 27017,
UseProxy: true,
Proxy: connection.ProxyConfig{
Type: "socks5",
Host: "127.0.0.1",
Port: 1080,
},
Hosts: hosts,
}
got, err := resolveDialConfigWithProxy(raw)
if err != nil {
t.Fatalf("resolveDialConfigWithProxy returned error: %v", err)
}
if got.Host != raw.Host || got.Port != raw.Port {
t.Fatalf("mongo target address should be kept, got=%s:%d want=%s:%d", got.Host, got.Port, raw.Host, raw.Port)
}
if !got.UseProxy {
t.Fatalf("mongo should keep UseProxy=true for driver-level dialer")
}
if !reflect.DeepEqual(got.Hosts, hosts) {
t.Fatalf("mongo hosts should be kept, got=%v want=%v", got.Hosts, hosts)
}
}
func TestResolveDialConfigWithProxy_MongoSRVKeepsTargetAddress(t *testing.T) {
raw := connection.ConnectionConfig{
Type: "mongodb",
Host: "cluster0.example.com",
Port: 27017,
MongoSRV: true,
UseProxy: true,
Proxy: connection.ProxyConfig{
Type: "http",
Host: "127.0.0.1",
Port: 7890,
},
}
got, err := resolveDialConfigWithProxy(raw)
if err != nil {
t.Fatalf("resolveDialConfigWithProxy returned error: %v", err)
}
if got.Host != raw.Host || got.Port != raw.Port {
t.Fatalf("mongo SRV target address should be kept, got=%s:%d want=%s:%d", got.Host, got.Port, raw.Host, raw.Port)
}
if !got.UseProxy {
t.Fatalf("mongo SRV should keep UseProxy=true for driver-level dialer")
}
}

View File

@@ -72,30 +72,25 @@ func setGlobalProxyConfig(enabled bool, proxyConfig connection.ProxyConfig) (glo
}
func (a *App) ConfigureGlobalProxy(enabled bool, proxyConfig connection.ProxyConfig) connection.QueryResult {
before := currentGlobalProxyConfig()
snapshot, err := setGlobalProxyConfig(enabled, proxyConfig)
if err != nil {
return connection.QueryResult{Success: false, Message: err.Error()}
}
// 前端可能在同一配置下重复触发同步(例如严格模式或状态回放),
// 这里做幂等日志,避免重复刷屏。
if !globalProxySnapshotEqual(before, snapshot) {
if snapshot.Enabled {
authState := ""
if strings.TrimSpace(snapshot.Proxy.User) != "" {
authState = "(认证:已配置)"
}
logger.Infof(
"全局代理已启用:%s://%s:%d%s",
strings.ToLower(strings.TrimSpace(snapshot.Proxy.Type)),
strings.TrimSpace(snapshot.Proxy.Host),
snapshot.Proxy.Port,
authState,
)
} else {
logger.Infof("全局代理已关闭")
if snapshot.Enabled {
authState := ""
if strings.TrimSpace(snapshot.Proxy.User) != "" {
authState = "(认证:已配置)"
}
logger.Infof(
"全局代理已启用:%s://%s:%d%s",
strings.ToLower(strings.TrimSpace(snapshot.Proxy.Type)),
strings.TrimSpace(snapshot.Proxy.Host),
snapshot.Proxy.Port,
authState,
)
} else {
logger.Infof("全局代理已关闭")
}
return connection.QueryResult{
@@ -105,24 +100,6 @@ func (a *App) ConfigureGlobalProxy(enabled bool, proxyConfig connection.ProxyCon
}
}
func globalProxySnapshotEqual(a, b globalProxySnapshot) bool {
if a.Enabled != b.Enabled {
return false
}
if !a.Enabled {
return true
}
return proxyConfigEqual(a.Proxy, b.Proxy)
}
func proxyConfigEqual(a, b connection.ProxyConfig) bool {
return strings.EqualFold(strings.TrimSpace(a.Type), strings.TrimSpace(b.Type)) &&
strings.TrimSpace(a.Host) == strings.TrimSpace(b.Host) &&
a.Port == b.Port &&
strings.TrimSpace(a.User) == strings.TrimSpace(b.User) &&
a.Password == b.Password
}
func (a *App) GetGlobalProxyConfig() connection.QueryResult {
return connection.QueryResult{
Success: true,

View File

@@ -3,7 +3,6 @@ package app
import (
"context"
"fmt"
"strconv"
"strings"
"time"
@@ -13,16 +12,6 @@ import (
"GoNavi-Wails/internal/utils"
)
const testConnectionTimeoutUpperBoundSeconds = 12
func normalizeTestConnectionConfig(config connection.ConnectionConfig) connection.ConnectionConfig {
normalized := config
if normalized.Timeout <= 0 || normalized.Timeout > testConnectionTimeoutUpperBoundSeconds {
normalized.Timeout = testConnectionTimeoutUpperBoundSeconds
}
return normalized
}
// Generic DB Methods
func (a *App) DBConnect(config connection.ConnectionConfig) connection.QueryResult {
@@ -38,16 +27,13 @@ func (a *App) DBConnect(config connection.ConnectionConfig) connection.QueryResu
}
func (a *App) TestConnection(config connection.ConnectionConfig) connection.QueryResult {
testConfig := normalizeTestConnectionConfig(config)
started := time.Now()
logger.Infof("TestConnection 开始:%s", formatConnSummary(testConfig))
_, err := a.getDatabaseForcePing(testConfig)
_, err := a.getDatabaseForcePing(config)
if err != nil {
logger.Error(err, "TestConnection 连接测试失败:耗时=%s %s", time.Since(started).Round(time.Millisecond), formatConnSummary(testConfig))
logger.Error(err, "TestConnection 连接测试失败:%s", formatConnSummary(config))
return connection.QueryResult{Success: false, Message: err.Error()}
}
logger.Infof("TestConnection 连接测试成功:耗时=%s %s", time.Since(started).Round(time.Millisecond), formatConnSummary(testConfig))
logger.Infof("TestConnection 连接测试成功:%s", formatConnSummary(config))
return connection.QueryResult{Success: true, Message: "连接成功"}
}
@@ -561,24 +547,6 @@ func ensureNonNilSlice[T any](items []T) []T {
func (a *App) DBGetDatabases(config connection.ConnectionConfig) connection.QueryResult {
runConfig := normalizeRunConfig(config, "")
if strings.EqualFold(strings.TrimSpace(runConfig.Type), "redis") {
runConfig.Type = "redis"
client, err := a.getRedisClient(runConfig)
if err != nil {
logger.Error(err, "DBGetDatabases 获取 Redis 连接失败:%s", formatConnSummary(runConfig))
return connection.QueryResult{Success: false, Message: err.Error()}
}
dbs, err := client.GetDatabases()
if err != nil {
logger.Error(err, "DBGetDatabases 获取 Redis 库列表失败:%s", formatConnSummary(runConfig))
return connection.QueryResult{Success: false, Message: err.Error()}
}
resData := make([]map[string]string, 0, len(dbs))
for _, item := range dbs {
resData = append(resData, map[string]string{"Database": strconv.Itoa(item.Index)})
}
return connection.QueryResult{Success: true, Data: resData}
}
dbInst, err := a.getDatabase(runConfig)
if err != nil {
logger.Error(err, "DBGetDatabases 获取连接失败:%s", formatConnSummary(runConfig))
@@ -611,48 +579,6 @@ func (a *App) DBGetDatabases(config connection.ConnectionConfig) connection.Quer
func (a *App) DBGetTables(config connection.ConnectionConfig, dbName string) connection.QueryResult {
runConfig := normalizeRunConfig(config, dbName)
if strings.EqualFold(strings.TrimSpace(runConfig.Type), "redis") {
runConfig.Type = "redis"
client, err := a.getRedisClient(runConfig)
if err != nil {
logger.Error(err, "DBGetTables 获取 Redis 连接失败:%s", formatConnSummary(runConfig))
return connection.QueryResult{Success: false, Message: err.Error()}
}
cursor := uint64(0)
tables := make([]string, 0, 128)
seen := make(map[string]struct{}, 128)
for {
result, err := client.ScanKeys("*", cursor, 1000)
if err != nil {
logger.Error(err, "DBGetTables 扫描 Redis Key 失败:%s", formatConnSummary(runConfig))
return connection.QueryResult{Success: false, Message: err.Error()}
}
for _, item := range result.Keys {
key := strings.TrimSpace(item.Key)
if key == "" {
continue
}
if _, ok := seen[key]; ok {
continue
}
seen[key] = struct{}{}
tables = append(tables, key)
}
if strings.TrimSpace(result.Cursor) == "" || strings.TrimSpace(result.Cursor) == "0" {
break
}
next, err := strconv.ParseUint(strings.TrimSpace(result.Cursor), 10, 64)
if err != nil || next == cursor {
break
}
cursor = next
}
resData := make([]map[string]string, 0, len(tables))
for _, name := range tables {
resData = append(resData, map[string]string{"Table": name})
}
return connection.QueryResult{Success: true, Data: resData}
}
dbInst, err := a.getDatabase(runConfig)
if err != nil {

View File

@@ -1,31 +0,0 @@
package app
import (
"testing"
"GoNavi-Wails/internal/connection"
)
func TestNormalizeTestConnectionConfig_DefaultToUpperBound(t *testing.T) {
config := connection.ConnectionConfig{Type: "mongodb", Timeout: 0}
got := normalizeTestConnectionConfig(config)
if got.Timeout != testConnectionTimeoutUpperBoundSeconds {
t.Fatalf("expected timeout=%d, got=%d", testConnectionTimeoutUpperBoundSeconds, got.Timeout)
}
}
func TestNormalizeTestConnectionConfig_KeepSmallerTimeout(t *testing.T) {
config := connection.ConnectionConfig{Type: "mongodb", Timeout: 6}
got := normalizeTestConnectionConfig(config)
if got.Timeout != 6 {
t.Fatalf("expected timeout=6, got=%d", got.Timeout)
}
}
func TestNormalizeTestConnectionConfig_ClampLargeTimeout(t *testing.T) {
config := connection.ConnectionConfig{Type: "mongodb", Timeout: 60}
got := normalizeTestConnectionConfig(config)
if got.Timeout != testConnectionTimeoutUpperBoundSeconds {
t.Fatalf("expected timeout=%d, got=%d", testConnectionTimeoutUpperBoundSeconds, got.Timeout)
}
}

View File

@@ -2792,7 +2792,6 @@ func ensureOptionalDriverAgentBinary(a *App, definition driverDefinition, execut
driverType := normalizeDriverType(definition.Type)
displayName := resolveDriverDisplayName(definition)
forceSourceBuild := shouldForceSourceBuildForVersion(driverType, selectedVersion)
preferSourceBuildBeforeDownload := shouldPreferSourceBuildBeforeDownload(driverType, selectedVersion)
skipReuseCandidate := shouldSkipReusableAgentCandidate(driverType, selectedVersion)
info, err := os.Stat(executablePath)
@@ -2800,10 +2799,11 @@ func ensureOptionalDriverAgentBinary(a *App, definition driverDefinition, execut
if validateErr := db.ValidateOptionalDriverAgentExecutable(driverType, executablePath); validateErr != nil {
_ = os.Remove(executablePath)
} else {
// 用户点击“安装/重装”时应强制刷新驱动代理,避免沿用旧二进制导致修复不生效。
if removeErr := os.Remove(executablePath); removeErr != nil {
return "", "", fmt.Errorf("清理已安装 %s 驱动代理失败:%w", displayName, removeErr)
hash, hashErr := hashFileSHA256(executablePath)
if hashErr != nil {
return "", "", fmt.Errorf("读取已安装 %s 驱动代理摘要失败:%w", displayName, hashErr)
}
return fmt.Sprintf("local://existing/%s-driver-agent", driverType), hash, nil
}
}
if err == nil && info.IsDir() {
@@ -2834,22 +2834,6 @@ func ensureOptionalDriverAgentBinary(a *App, definition driverDefinition, execut
}
var downloadErrs []string
var sourceBuildAttempted bool
var sourceBuildErr error
if !forceSourceBuild && preferSourceBuildBeforeDownload {
sourceBuildAttempted = true
if a != nil {
a.emitDriverDownloadProgress(driverType, "downloading", 16, 100, fmt.Sprintf("优先使用本地源码构建 %s 驱动代理", displayName))
}
hash, buildErr := buildOptionalDriverAgentFromSource(definition, executablePath, selectedVersion)
if buildErr == nil {
return fmt.Sprintf("local://go-build/%s-driver-agent", driverType), hash, nil
}
sourceBuildErr = buildErr
logger.Warnf("预先本地构建 %s 驱动代理失败,将继续尝试下载预编译包:%v", displayName, buildErr)
}
if !forceSourceBuild {
downloadURLs := resolveOptionalDriverAgentDownloadURLs(definition, downloadURL)
if len(downloadURLs) > 0 {
@@ -2882,15 +2866,9 @@ func ensureOptionalDriverAgentBinary(a *App, definition driverDefinition, execut
a.emitDriverDownloadProgress(driverType, "downloading", 92, 100, "未命中预编译包,尝试开发态本地构建")
}
var buildErr error
if sourceBuildAttempted {
buildErr = sourceBuildErr
} else {
hash, runErr := buildOptionalDriverAgentFromSource(definition, executablePath, selectedVersion)
buildErr = runErr
if buildErr == nil {
return fmt.Sprintf("local://go-build/%s-driver-agent", driverType), hash, nil
}
hash, buildErr := buildOptionalDriverAgentFromSource(definition, executablePath, selectedVersion)
if buildErr == nil {
return fmt.Sprintf("local://go-build/%s-driver-agent", driverType), hash, nil
}
var parts []string
@@ -3108,25 +3086,12 @@ func shouldForceSourceBuildForVersion(driverType string, selectedVersion string)
return resolveMongoDriverMajorFromVersion(selectedVersion) == 1
}
func shouldPreferSourceBuildBeforeDownload(driverType string, selectedVersion string) bool {
_ = selectedVersion
switch normalizeDriverType(driverType) {
case "kingbase":
// 金仓迭代期优先本地源码构建,避免下载到旧版本预编译代理导致修复不生效。
return true
default:
return false
}
}
func shouldSkipReusableAgentCandidate(driverType string, selectedVersion string) bool {
_ = selectedVersion
switch normalizeDriverType(driverType) {
case "mongodb", "kingbase":
return true
default:
if normalizeDriverType(driverType) != "mongodb" {
return false
}
_ = selectedVersion
return true
}
func optionalDriverBuildTag(driverType string, selectedVersion string) (string, error) {

View File

@@ -90,7 +90,6 @@ type IndexDefinition struct {
NonUnique int `json:"nonUnique"`
SeqInIndex int `json:"seqInIndex"`
IndexType string `json:"indexType"`
SubPart int `json:"subPart,omitempty"`
}
// ForeignKeyDefinition represents a foreign key

View File

@@ -8,7 +8,6 @@ import (
"fmt"
"net"
"net/url"
"sort"
"strconv"
"strings"
"time"
@@ -679,134 +678,3 @@ func isClickHouseTruthy(value interface{}) bool {
return normalized == "1" || normalized == "true" || normalized == "yes" || normalized == "y"
}
}
func (c *ClickHouseDB) ApplyChanges(tableName string, changes connection.ChangeSet) error {
if c.conn == nil {
return fmt.Errorf("connection not open")
}
database, table, err := c.resolveDatabaseAndTable(c.database, tableName)
if err != nil {
return err
}
qualifiedTable := fmt.Sprintf("%s.%s", quoteClickHouseIdentifier(database), quoteClickHouseIdentifier(table))
for _, pk := range changes.Deletes {
whereExpr := buildClickHouseWhereClause(pk)
if whereExpr == "" {
continue
}
query := fmt.Sprintf("ALTER TABLE %s DELETE WHERE %s", qualifiedTable, whereExpr)
if _, err := c.conn.Exec(query); err != nil {
return fmt.Errorf("delete error: %v; sql=%s", err, query)
}
}
for _, update := range changes.Updates {
setExpr := buildClickHouseAssignments(update.Values)
whereExpr := buildClickHouseWhereClause(update.Keys)
if setExpr == "" || whereExpr == "" {
continue
}
query := fmt.Sprintf("ALTER TABLE %s UPDATE %s WHERE %s", qualifiedTable, setExpr, whereExpr)
if _, err := c.conn.Exec(query); err != nil {
return fmt.Errorf("update error: %v; sql=%s", err, query)
}
}
for _, row := range changes.Inserts {
query, err := buildClickHouseInsertSQL(qualifiedTable, row)
if err != nil {
return err
}
if query == "" {
continue
}
if _, err := c.conn.Exec(query); err != nil {
return fmt.Errorf("insert error: %v; sql=%s", err, query)
}
}
return nil
}
func buildClickHouseInsertSQL(qualifiedTable string, row map[string]interface{}) (string, error) {
if len(row) == 0 {
return "", nil
}
cols := make([]string, 0, len(row))
for k := range row {
if strings.TrimSpace(k) == "" {
continue
}
cols = append(cols, k)
}
if len(cols) == 0 {
return "", nil
}
sort.Strings(cols)
quotedCols := make([]string, 0, len(cols))
values := make([]string, 0, len(cols))
for _, col := range cols {
quotedCols = append(quotedCols, quoteClickHouseIdentifier(col))
values = append(values, clickHouseLiteral(row[col]))
}
return fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)", qualifiedTable, strings.Join(quotedCols, ", "), strings.Join(values, ", ")), nil
}
func buildClickHouseAssignments(values map[string]interface{}) string {
if len(values) == 0 {
return ""
}
cols := make([]string, 0, len(values))
for k := range values {
if strings.TrimSpace(k) == "" {
continue
}
cols = append(cols, k)
}
sort.Strings(cols)
parts := make([]string, 0, len(cols))
for _, col := range cols {
parts = append(parts, fmt.Sprintf("%s = %s", quoteClickHouseIdentifier(col), clickHouseLiteral(values[col])))
}
return strings.Join(parts, ", ")
}
func buildClickHouseWhereClause(keys map[string]interface{}) string {
if len(keys) == 0 {
return ""
}
cols := make([]string, 0, len(keys))
for k := range keys {
if strings.TrimSpace(k) == "" {
continue
}
cols = append(cols, k)
}
sort.Strings(cols)
parts := make([]string, 0, len(cols))
for _, col := range cols {
parts = append(parts, fmt.Sprintf("%s = %s", quoteClickHouseIdentifier(col), clickHouseLiteral(keys[col])))
}
return strings.Join(parts, " AND ")
}
func clickHouseLiteral(value interface{}) string {
switch val := value.(type) {
case nil:
return "NULL"
case bool:
if val {
return "1"
}
return "0"
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64:
return fmt.Sprintf("%v", val)
case time.Time:
return fmt.Sprintf("'%s'", val.Format("2006-01-02 15:04:05"))
case []byte:
return fmt.Sprintf("'%s'", strings.ReplaceAll(string(val), "'", "''"))
default:
return fmt.Sprintf("'%s'", strings.ReplaceAll(fmt.Sprintf("%v", val), "'", "''"))
}
}

View File

@@ -8,6 +8,7 @@ import (
"fmt"
"net"
"net/url"
"sort"
"strconv"
"strings"
"time"
@@ -204,9 +205,80 @@ func (d *DamengDB) Exec(query string) (int64, error) {
}
func (d *DamengDB) GetDatabases() ([]string, error) {
// 达梦在本项目中将 schema/owner 作为数据库”展示口径
// 先查当前 schema / 当前用户,再聚合可见用户与 owner避免权限受限时返回空列表
return collectDamengDatabaseNames(d.Query)
// 达梦将「用户/模式」作为数据库列表来源,不同权限下可见口径不同
// 这里采用多查询口径聚合,避免仅依赖单一视图导致“少库”
queries := []string{
"SELECT USERNAME AS DATABASE_NAME FROM SYS.DBA_USERS ORDER BY USERNAME",
"SELECT USERNAME AS DATABASE_NAME FROM DBA_USERS ORDER BY USERNAME",
"SELECT USERNAME AS DATABASE_NAME FROM ALL_USERS ORDER BY USERNAME",
"SELECT USERNAME AS DATABASE_NAME FROM USER_USERS",
"SELECT DISTINCT OWNER AS DATABASE_NAME FROM ALL_TABLES ORDER BY OWNER",
}
seen := make(map[string]struct{})
dbs := make([]string, 0, 64)
var lastErr error
success := false
for _, q := range queries {
data, _, err := d.Query(q)
if err != nil {
lastErr = err
continue
}
success = true
for _, row := range data {
name := getDamengRowString(row, "DATABASE_NAME", "USERNAME", "OWNER", "SCHEMA_NAME")
if name == "" {
// 回退到第一列,兼容驱动返回列名差异。
for _, v := range row {
text := strings.TrimSpace(fmt.Sprintf("%v", v))
if text == "" || strings.EqualFold(text, "<nil>") {
continue
}
name = text
break
}
}
if name == "" {
continue
}
key := strings.ToUpper(name)
if _, ok := seen[key]; ok {
continue
}
seen[key] = struct{}{}
dbs = append(dbs, name)
}
}
if !success && lastErr != nil {
return nil, lastErr
}
sort.Slice(dbs, func(i, j int) bool {
return strings.ToUpper(dbs[i]) < strings.ToUpper(dbs[j])
})
return dbs, nil
}
func getDamengRowString(row map[string]interface{}, keys ...string) string {
if len(row) == 0 {
return ""
}
for _, key := range keys {
for k, v := range row {
if !strings.EqualFold(strings.TrimSpace(k), strings.TrimSpace(key)) {
continue
}
text := strings.TrimSpace(fmt.Sprintf("%v", v))
if text == "" || strings.EqualFold(text, "<nil>") {
return ""
}
return text
}
}
return ""
}
func (d *DamengDB) GetTables(dbName string) ([]string, error) {

View File

@@ -1,91 +0,0 @@
package db
import (
"fmt"
"sort"
"strings"
)
var damengDatabaseQueries = []string{
"SELECT SYS_CONTEXT('USERENV', 'CURRENT_SCHEMA') AS DATABASE_NAME FROM DUAL",
"SELECT SYS_CONTEXT('USERENV', 'CURRENT_USER') AS DATABASE_NAME FROM DUAL",
"SELECT USERNAME AS DATABASE_NAME FROM USER_USERS",
"SELECT USERNAME AS DATABASE_NAME FROM ALL_USERS ORDER BY USERNAME",
"SELECT USERNAME AS DATABASE_NAME FROM DBA_USERS ORDER BY USERNAME",
"SELECT USERNAME AS DATABASE_NAME FROM SYS.DBA_USERS ORDER BY USERNAME",
"SELECT DISTINCT OWNER AS DATABASE_NAME FROM ALL_OBJECTS ORDER BY OWNER",
"SELECT DISTINCT OWNER AS DATABASE_NAME FROM ALL_TABLES ORDER BY OWNER",
}
type damengQueryFunc func(query string) ([]map[string]interface{}, []string, error)
func collectDamengDatabaseNames(query damengQueryFunc) ([]string, error) {
seen := make(map[string]struct{})
dbs := make([]string, 0, 64)
var lastErr error
for _, q := range damengDatabaseQueries {
data, _, err := query(q)
if err != nil {
lastErr = err
continue
}
for _, row := range data {
name := getDamengRowString(row,
"DATABASE_NAME",
"USERNAME",
"OWNER",
"SCHEMA_NAME",
"CURRENT_SCHEMA",
"CURRENT_USER",
)
if name == "" {
for _, v := range row {
text := strings.TrimSpace(fmt.Sprintf("%v", v))
if text == "" || strings.EqualFold(text, "<nil>") {
continue
}
name = text
break
}
}
if name == "" {
continue
}
key := strings.ToUpper(name)
if _, ok := seen[key]; ok {
continue
}
seen[key] = struct{}{}
dbs = append(dbs, name)
}
}
if len(dbs) == 0 && lastErr != nil {
return nil, lastErr
}
sort.Slice(dbs, func(i, j int) bool {
return strings.ToUpper(dbs[i]) < strings.ToUpper(dbs[j])
})
return dbs, nil
}
func getDamengRowString(row map[string]interface{}, keys ...string) string {
if len(row) == 0 {
return ""
}
for _, key := range keys {
for k, v := range row {
if !strings.EqualFold(strings.TrimSpace(k), strings.TrimSpace(key)) {
continue
}
text := strings.TrimSpace(fmt.Sprintf("%v", v))
if text == "" || strings.EqualFold(text, "<nil>") {
return ""
}
return text
}
}
return ""
}

View File

@@ -1,73 +0,0 @@
package db
import (
"errors"
"reflect"
"testing"
)
func TestCollectDamengDatabaseNames_UsesCurrentSchemaFallback(t *testing.T) {
t.Parallel()
got, err := collectDamengDatabaseNames(func(query string) ([]map[string]interface{}, []string, error) {
switch query {
case damengDatabaseQueries[0]:
return []map[string]interface{}{{"DATABASE_NAME": "APP_SCHEMA"}}, nil, nil
case damengDatabaseQueries[1]:
return []map[string]interface{}{{"DATABASE_NAME": "app_schema"}}, nil, nil
default:
return nil, nil, errors.New("permission denied")
}
})
if err != nil {
t.Fatalf("collectDamengDatabaseNames 返回错误: %v", err)
}
want := []string{"APP_SCHEMA"}
if !reflect.DeepEqual(got, want) {
t.Fatalf("unexpected database names, got=%v want=%v", got, want)
}
}
func TestCollectDamengDatabaseNames_CollectsOwnersWhenVisible(t *testing.T) {
t.Parallel()
got, err := collectDamengDatabaseNames(func(query string) ([]map[string]interface{}, []string, error) {
switch query {
case damengDatabaseQueries[0], damengDatabaseQueries[1], damengDatabaseQueries[2], damengDatabaseQueries[3], damengDatabaseQueries[4], damengDatabaseQueries[5]:
return []map[string]interface{}{}, nil, nil
case damengDatabaseQueries[6]:
return []map[string]interface{}{{"OWNER": "BIZ"}, {"OWNER": "audit"}}, nil, nil
case damengDatabaseQueries[7]:
return []map[string]interface{}{{"OWNER": "BIZ"}}, nil, nil
default:
return nil, nil, nil
}
})
if err != nil {
t.Fatalf("collectDamengDatabaseNames 返回错误: %v", err)
}
want := []string{"audit", "BIZ"}
if !reflect.DeepEqual(got, want) {
t.Fatalf("unexpected database names, got=%v want=%v", got, want)
}
}
func TestCollectDamengDatabaseNames_ReturnsErrorWhenNoNameResolved(t *testing.T) {
t.Parallel()
expectErr := errors.New("last query failed")
got, err := collectDamengDatabaseNames(func(query string) ([]map[string]interface{}, []string, error) {
if query == damengDatabaseQueries[len(damengDatabaseQueries)-1] {
return nil, nil, expectErr
}
return nil, nil, errors.New("permission denied")
})
if err == nil {
t.Fatalf("期望返回错误,实际 got=%v", got)
}
if !errors.Is(err, expectErr) {
t.Fatalf("错误不符合预期: %v", err)
}
}

View File

@@ -9,6 +9,7 @@ import (
"strings"
"GoNavi-Wails/internal/connection"
"GoNavi-Wails/internal/logger"
"GoNavi-Wails/internal/ssh"
"GoNavi-Wails/internal/utils"
@@ -134,26 +135,26 @@ func collectDirosAddresses(config connection.ConnectionConfig) []string {
return result
}
func (d *DirosDB) getDSN(config connection.ConnectionConfig) (string, error) {
func (d *DirosDB) getDSN(config connection.ConnectionConfig) string {
database := config.Database
protocol := "tcp"
address := normalizeMySQLAddress(config.Host, config.Port)
if config.UseSSH {
netName, err := ssh.RegisterSSHNetwork(config.SSH)
if err != nil {
return "", fmt.Errorf("创建 SSH 隧道失败:%w", err)
if err == nil {
protocol = netName
address = normalizeMySQLAddress(config.Host, config.Port)
} else {
logger.Warnf("注册 Doris SSH 网络失败,将尝试直连:地址=%s:%d 用户=%s原因%v", config.Host, config.Port, config.User, err)
}
protocol = netName
}
timeout := getConnectTimeoutSeconds(config)
tlsMode := resolveMySQLTLSMode(config)
return fmt.Sprintf(
"%s:%s@%s(%s)/%s?charset=utf8mb4&parseTime=True&loc=Local&timeout=%ds&tls=%s",
config.User, config.Password, protocol, address, database, timeout, url.QueryEscape(tlsMode),
), nil
return fmt.Sprintf("%s:%s@%s(%s)/%s?charset=utf8mb4&parseTime=True&loc=Local&timeout=%ds&tls=%s",
config.User, config.Password, protocol, address, database, timeout, url.QueryEscape(tlsMode))
}
func resolveDirosCredential(config connection.ConnectionConfig, addressIndex int) (string, string) {
@@ -191,11 +192,7 @@ func (d *DirosDB) Connect(config connection.ConnectionConfig) error {
candidateConfig.Port = port
candidateConfig.User, candidateConfig.Password = resolveDirosCredential(runConfig, index)
dsn, err := d.getDSN(candidateConfig)
if err != nil {
errorDetails = append(errorDetails, fmt.Sprintf("%s 生成连接串失败: %v", address, err))
continue
}
dsn := d.getDSN(candidateConfig)
db, err := sql.Open(dirosDriverName, dsn)
if err != nil {
errorDetails = append(errorDetails, fmt.Sprintf("%s 打开失败: %v", address, err))

View File

@@ -1,164 +0,0 @@
package db
import "strings"
func normalizeKingbaseIdentCommon(raw string) string {
value := strings.TrimSpace(raw)
if value == "" {
return ""
}
// 兼容被多次 JSON 序列化后的转义引号:
// \\\"schema\\\" -> \"schema\" -> "schema"
for i := 0; i < 8; i++ {
next := strings.TrimSpace(value)
next = strings.ReplaceAll(next, `\\\"`, `\"`)
next = strings.ReplaceAll(next, `\"`, `"`)
if next == value {
break
}
value = next
}
value = strings.TrimSpace(value)
stripWrapperOnce := func(text string) string {
t := strings.TrimSpace(text)
if strings.HasPrefix(t, `\`) && len(t) > 1 {
t = strings.TrimSpace(strings.TrimPrefix(t, `\`))
}
if strings.HasSuffix(t, `\`) && len(t) > 1 {
t = strings.TrimSpace(strings.TrimSuffix(t, `\`))
}
if len(t) >= 4 && strings.HasPrefix(t, `\"`) && strings.HasSuffix(t, `\"`) {
return strings.TrimSpace(t[2 : len(t)-2])
}
if len(t) >= 2 && strings.HasPrefix(t, `"`) && strings.HasSuffix(t, `"`) {
return strings.TrimSpace(t[1 : len(t)-1])
}
if len(t) >= 2 && strings.HasPrefix(t, "`") && strings.HasSuffix(t, "`") {
return strings.TrimSpace(t[1 : len(t)-1])
}
if len(t) >= 2 && strings.HasPrefix(t, "[") && strings.HasSuffix(t, "]") {
return strings.TrimSpace(t[1 : len(t)-1])
}
return t
}
for i := 0; i < 8; i++ {
next := stripWrapperOnce(value)
if next == value {
break
}
value = next
}
value = strings.TrimSpace(value)
// 兼容错误的二次引用与残留反斜杠。
value = strings.ReplaceAll(value, `\"`, `"`)
value = strings.ReplaceAll(value, `""`, "")
value = strings.TrimSpace(value)
for i := 0; i < 8; i++ {
next := strings.TrimSpace(value)
changed := false
if strings.HasPrefix(next, `\`) && len(next) > 1 {
next = strings.TrimSpace(strings.TrimPrefix(next, `\`))
changed = true
}
if strings.HasSuffix(next, `\`) && len(next) > 1 {
next = strings.TrimSpace(strings.TrimSuffix(next, `\`))
changed = true
}
if !changed || next == value {
break
}
value = next
}
return strings.TrimSpace(value)
}
func splitKingbaseQualifiedNameCommon(raw string) (schema string, table string) {
text := strings.TrimSpace(raw)
if text == "" {
return "", ""
}
sep := findKingbaseQualifiedSeparator(text)
if sep < 0 {
return "", normalizeKingbaseIdentCommon(text)
}
schemaPart := normalizeKingbaseIdentCommon(text[:sep])
tablePart := normalizeKingbaseIdentCommon(text[sep+1:])
if tablePart == "" {
if schemaPart == "" {
return "", normalizeKingbaseIdentCommon(text)
}
return "", schemaPart
}
if schemaPart == "" {
return "", tablePart
}
return schemaPart, tablePart
}
func findKingbaseQualifiedSeparator(raw string) int {
inDouble := false
inBacktick := false
inBracket := false
escaped := false
for i := 0; i < len(raw); i++ {
ch := raw[i]
if escaped {
escaped = false
continue
}
if ch == '\\' {
escaped = true
continue
}
if inDouble {
if ch == '"' {
// SQL 双引号转义:"" 代表字面量 "
if i+1 < len(raw) && raw[i+1] == '"' {
i++
continue
}
inDouble = false
}
continue
}
if inBacktick {
if ch == '`' {
inBacktick = false
}
continue
}
if inBracket {
if ch == ']' {
inBracket = false
}
continue
}
switch ch {
case '"':
inDouble = true
case '`':
inBacktick = true
case '[':
inBracket = true
case '.':
return i
}
}
return -1
}

View File

@@ -1,52 +0,0 @@
package db
import "testing"
func TestNormalizeKingbaseIdentCommon(t *testing.T) {
tests := []struct {
name string
in string
want string
}{
{name: "plain", in: "ldf_server", want: "ldf_server"},
{name: "quoted", in: `"ldf_server"`, want: "ldf_server"},
{name: "escaped quoted", in: `\"ldf_server\"`, want: "ldf_server"},
{name: "double escaped quoted", in: `\\\"ldf_server\\\"`, want: "ldf_server"},
{name: "double quoted", in: `""ldf_server""`, want: "ldf_server"},
{name: "backtick quoted", in: "`ldf_server`", want: "ldf_server"},
{name: "bracket quoted", in: "[ldf_server]", want: "ldf_server"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := normalizeKingbaseIdentCommon(tt.in); got != tt.want {
t.Fatalf("normalizeKingbaseIdentCommon(%q)=%q,want=%q", tt.in, got, tt.want)
}
})
}
}
func TestSplitKingbaseQualifiedNameCommon(t *testing.T) {
tests := []struct {
name string
in string
wantSchema string
wantTable string
}{
{name: "plain", in: "ldf_server.andon_events", wantSchema: "ldf_server", wantTable: "andon_events"},
{name: "quoted", in: `"ldf_server"."andon_events"`, wantSchema: "ldf_server", wantTable: "andon_events"},
{name: "escaped quoted", in: `\"ldf_server\".\"andon_events\"`, wantSchema: "ldf_server", wantTable: "andon_events"},
{name: "double escaped quoted", in: `\\\"ldf_server\\\".\\\"andon_events\\\"`, wantSchema: "ldf_server", wantTable: "andon_events"},
{name: "space around dot", in: ` "ldf_server" . "andon_events" `, wantSchema: "ldf_server", wantTable: "andon_events"},
{name: "table only", in: "andon_events", wantSchema: "", wantTable: "andon_events"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotSchema, gotTable := splitKingbaseQualifiedNameCommon(tt.in)
if gotSchema != tt.wantSchema || gotTable != tt.wantTable {
t.Fatalf("splitKingbaseQualifiedNameCommon(%q)=(%q,%q),want=(%q,%q)", tt.in, gotSchema, gotTable, tt.wantSchema, tt.wantTable)
}
})
}
}

View File

@@ -7,7 +7,6 @@ import (
"database/sql"
"fmt"
"net"
"regexp"
"strconv"
"strings"
"time"
@@ -137,88 +136,11 @@ func (k *KingbaseDB) Connect(config connection.ConnectionConfig) error {
if idx > 0 {
logger.Warnf("人大金仓 SSL 优先连接失败,已回退至明文连接")
}
// 获取 schema 列表以重构带有 search_path 的连接池
searchPathStr := k.getSearchPathStr()
if searchPathStr != "" {
// 将 search_path 参数拼入 DSN
finalDSN := dsn + " search_path=" + quoteConnValue(searchPathStr)
if finalDB, err := sql.Open("kingbase", finalDSN); err == nil {
k.pingTimeout = getConnectTimeout(attempt)
finalDB.SetConnMaxLifetime(5 * time.Minute)
// 临时将 k.conn 指向 finalDB 来做 ping 测试
oldConn := k.conn
k.conn = finalDB
if err := k.Ping(); err == nil {
// 成功使用带 search_path 的连接池
_ = oldConn.Close()
logger.Infof("人大金仓已配置连接级 search_path%s", searchPathStr)
} else {
_ = finalDB.Close()
k.conn = oldConn
}
}
}
if searchPathStr != "" {
timeout := k.pingTimeout
if timeout <= 0 {
timeout = 5 * time.Second
}
ctx, cancel := utils.ContextWithTimeout(timeout)
defer cancel()
if _, err := k.conn.ExecContext(ctx, fmt.Sprintf("SET search_path TO %s", searchPathStr)); err != nil {
logger.Warnf("人大金仓显式设置 search_path 失败:%v", err)
} else {
logger.Infof("人大金仓已设置默认 search_path%s", searchPathStr)
}
}
return nil
}
return fmt.Errorf("连接建立后验证失败:%s", strings.Join(failures, ""))
}
// getSearchPathStr 查询当前数据库中所有用户 schema配置 DSN 的 search_path。
// KingBase 默认 search_path 为 "$user", public对于自定义 schema 下的表不可见。
func (k *KingbaseDB) getSearchPathStr() string {
if k.conn == nil {
return ""
}
query := `SELECT nspname FROM pg_namespace
WHERE nspname NOT IN ('pg_catalog', 'information_schema')
AND nspname NOT LIKE 'pg_%'
ORDER BY nspname`
rows, err := k.conn.Query(query)
if err != nil {
logger.Warnf("人大金仓查询用户 schema 失败,跳过 search_path 设置:%v", err)
return ""
}
defer rows.Close()
var schemas []string
for rows.Next() {
var name string
if err := rows.Scan(&name); err != nil {
continue
}
name = strings.TrimSpace(name)
if name != "" {
// 使用 SQL 标准的双引号包裹标识符
escaped := strings.ReplaceAll(name, `"`, `""`)
schemas = append(schemas, `"`+escaped+`"`)
}
}
if len(schemas) == 0 {
return ""
}
return strings.Join(schemas, ", ")
}
func (k *KingbaseDB) Close() error {
// Close SSH forwarder first if exists
if k.forwarder != nil {
@@ -853,63 +775,64 @@ func (k *KingbaseDB) ApplyChanges(tableName string, changes connection.ChangeSet
}
func normalizeKingbaseIdentifier(raw string) string {
return normalizeKingbaseIdentCommon(raw)
}
value := strings.TrimSpace(raw)
if value == "" {
return ""
}
// kingbaseIdentNeedsQuote 判断标识符是否需要双引号包裹。
// 与前端 sql.ts 中 needsQuote 逻辑保持一致。
func kingbaseIdentNeedsQuote(ident string) bool {
if ident == "" {
return false
}
// 不是合法裸标识符格式(必须以字母或下划线开头,仅含字母、数字、下划线)
if matched, _ := regexp.MatchString(`^[a-zA-Z_][a-zA-Z0-9_]*$`, ident); !matched {
return true
}
// 包含大写字母时需要引号保护KingbaseES/PostgreSQL 默认将未加引号的标识符折叠为小写)
for _, r := range ident {
if r >= 'A' && r <= 'Z' {
return true
// 兼容 JSON/字符串转义后传入的标识符:\"schema\" -> "schema"
value = strings.ReplaceAll(value, `\"`, `"`)
value = strings.TrimSpace(value)
// 兼容异常多重包裹引号(例如 ""schema""、""""schema"""")。
// strings.Trim 会移除两端连续引号,迭代后可收敛到纯标识符。
for i := 0; i < 4; i++ {
next := strings.TrimSpace(strings.Trim(value, `"`))
if next == value {
break
}
value = next
}
// 是 SQL 保留字
return isKingbaseReservedWord(ident)
}
// isKingbaseReservedWord 检查是否为常见 SQL 保留字(简化版,与前端保持一致)。
func isKingbaseReservedWord(ident string) bool {
switch strings.ToLower(ident) {
case "select", "from", "where", "table", "index", "user", "order", "group", "by",
"limit", "offset", "and", "or", "not", "null", "true", "false", "key",
"primary", "foreign", "references", "default", "constraint",
"create", "drop", "alter", "insert", "update", "delete", "set", "values", "into",
"join", "left", "right", "inner", "outer", "on", "as", "is", "in", "like",
"between", "case", "when", "then", "else", "end", "having", "distinct",
"all", "any", "exists", "union", "except", "intersect",
"column", "check", "unique", "with", "grant", "revoke", "trigger",
"begin", "commit", "rollback", "schema", "database", "view", "function",
"procedure", "sequence", "type", "domain", "role", "session", "current",
"authorization", "cross", "full", "natural", "some", "cast", "fetch",
"for", "to", "do", "if", "return", "returns", "declare", "cursor", "server", "owner":
return true
// 兼容其他方言可能残留的引用形式
if len(value) >= 2 && strings.HasPrefix(value, "`") && strings.HasSuffix(value, "`") {
value = strings.TrimSpace(strings.Trim(value, "`"))
}
return false
if len(value) >= 2 && strings.HasPrefix(value, "[") && strings.HasSuffix(value, "]") {
value = strings.TrimSpace(value[1 : len(value)-1])
}
return value
}
func quoteKingbaseIdent(name string) string {
n := normalizeKingbaseIdentifier(name)
n = strings.ReplaceAll(n, `"`, `""`)
if n == "" {
return "\"\""
}
if !kingbaseIdentNeedsQuote(n) {
return n
}
n = strings.ReplaceAll(n, `"`, `""`)
return `"` + n + `"`
}
func splitKingbaseQualifiedTable(tableName string) (schema string, table string) {
return splitKingbaseQualifiedNameCommon(tableName)
raw := strings.TrimSpace(tableName)
if raw == "" {
return "", ""
}
if parts := strings.SplitN(raw, ".", 2); len(parts) == 2 {
schema = normalizeKingbaseIdentifier(parts[0])
table = normalizeKingbaseIdentifier(parts[1])
if table == "" {
return "", normalizeKingbaseIdentifier(raw)
}
if schema == "" {
return "", table
}
return schema, table
}
return "", normalizeKingbaseIdentifier(raw)
}
func (k *KingbaseDB) GetAllColumns(dbName string) ([]connection.ColumnDefinitionWithTable, error) {

View File

@@ -15,10 +15,8 @@ func TestNormalizeKingbaseIdentifier(t *testing.T) {
{name: "double quoted", in: `""ldf_server""`, want: "ldf_server"},
{name: "quad quoted", in: `""""ldf_server""""`, want: "ldf_server"},
{name: "escaped quoted", in: `\"ldf_server\"`, want: "ldf_server"},
{name: "double escaped quoted", in: `\\\"ldf_server\\\"`, want: "ldf_server"},
{name: "backtick quoted", in: "`ldf_server`", want: "ldf_server"},
{name: "bracket quoted", in: "[ldf_server]", want: "ldf_server"},
{name: "embedded double quotes", in: `ldf""server`, want: "ldfserver"},
}
for _, tt := range tests {
@@ -36,25 +34,10 @@ func TestQuoteKingbaseIdent(t *testing.T) {
in string
want string
}{
// 纯小写+下划线:不加引号
{name: "plain lowercase", in: "ldf_server", want: "ldf_server"},
{name: "plain lowercase 2", in: "bcs_barcode", want: "bcs_barcode"},
{name: "double quoted input", in: `""ldf_server""`, want: "ldf_server"},
{name: "escaped quoted input", in: `\"ldf_server\"`, want: "ldf_server"},
// 含大写字母:加引号
{name: "uppercase", in: "LDF_Server", want: `"LDF_Server"`},
{name: "mixed case", in: "myTable", want: `"myTable"`},
// SQL 保留字:加引号
{name: "reserved word order", in: "order", want: `"order"`},
{name: "reserved word user", in: "user", want: `"user"`},
{name: "reserved word table", in: "table", want: `"table"`},
{name: "reserved word select", in: "select", want: `"select"`},
// 含特殊字符:加引号
{name: "with hyphen", in: "my-table", want: `"my-table"`},
{name: "with space", in: "my table", want: `"my table"`},
{name: "plain", in: "ldf_server", want: `"ldf_server"`},
{name: "double quoted", in: `""ldf_server""`, want: `"ldf_server"`},
{name: "escaped quoted", in: `\"ldf_server\"`, want: `"ldf_server"`},
{name: "with embedded quote", in: `ab"cd`, want: `"ab""cd"`},
// 空值
{name: "empty", in: "", want: `""`},
}
for _, tt := range tests {
@@ -66,31 +49,6 @@ func TestQuoteKingbaseIdent(t *testing.T) {
}
}
func TestKingbaseIdentNeedsQuote(t *testing.T) {
tests := []struct {
name string
in string
want bool
}{
{name: "plain lowercase", in: "ldf_server", want: false},
{name: "starts with underscore", in: "_col", want: false},
{name: "with digits", in: "col123", want: false},
{name: "uppercase", in: "MyTable", want: true},
{name: "reserved word", in: "order", want: true},
{name: "with hyphen", in: "my-col", want: true},
{name: "starts with digit", in: "123col", want: true},
{name: "empty", in: "", want: false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := kingbaseIdentNeedsQuote(tt.in); got != tt.want {
t.Fatalf("kingbaseIdentNeedsQuote(%q) = %v, want %v", tt.in, got, tt.want)
}
})
}
}
func TestSplitKingbaseQualifiedTable(t *testing.T) {
tests := []struct {
name string
@@ -101,7 +59,6 @@ func TestSplitKingbaseQualifiedTable(t *testing.T) {
{name: "plain qualified", in: "ldf_server.t_user", wantSchema: "ldf_server", wantTable: "t_user"},
{name: "double quoted qualified", in: `""ldf_server"".""t_user""`, wantSchema: "ldf_server", wantTable: "t_user"},
{name: "escaped qualified", in: `\"ldf_server\".\"t_user\"`, wantSchema: "ldf_server", wantTable: "t_user"},
{name: "double escaped qualified", in: `\\\"ldf_server\\\".\\\"t_user\\\"`, wantSchema: "ldf_server", wantTable: "t_user"},
{name: "bracket qualified", in: "[ldf_server].[t_user]", wantSchema: "ldf_server", wantTable: "t_user"},
{name: "table only", in: `""t_user""`, wantSchema: "", wantTable: "t_user"},
}

View File

@@ -11,6 +11,7 @@ import (
"time"
"GoNavi-Wails/internal/connection"
"GoNavi-Wails/internal/logger"
"GoNavi-Wails/internal/ssh"
"GoNavi-Wails/internal/utils"
@@ -24,33 +25,30 @@ type MariaDB struct {
pingTimeout time.Duration
}
func (m *MariaDB) getDSN(config connection.ConnectionConfig) (string, error) {
func (m *MariaDB) getDSN(config connection.ConnectionConfig) string {
database := config.Database
protocol := "tcp"
address := fmt.Sprintf("%s:%d", config.Host, config.Port)
if config.UseSSH {
netName, err := ssh.RegisterSSHNetwork(config.SSH)
if err != nil {
return "", fmt.Errorf("创建 SSH 隧道失败:%w", err)
if err == nil {
protocol = netName
address = fmt.Sprintf("%s:%d", config.Host, config.Port)
} else {
logger.Warnf("注册 SSH 网络失败,将尝试直连:地址=%s:%d 用户=%s原因%v", config.Host, config.Port, config.User, err)
}
protocol = netName
}
timeout := getConnectTimeoutSeconds(config)
tlsMode := resolveMySQLTLSMode(config)
return fmt.Sprintf(
"%s:%s@%s(%s)/%s?charset=utf8mb4&parseTime=True&loc=Local&timeout=%ds&tls=%s",
config.User, config.Password, protocol, address, database, timeout, url.QueryEscape(tlsMode),
), nil
return fmt.Sprintf("%s:%s@%s(%s)/%s?charset=utf8mb4&parseTime=True&loc=Local&timeout=%ds&tls=%s",
config.User, config.Password, protocol, address, database, timeout, url.QueryEscape(tlsMode))
}
func (m *MariaDB) Connect(config connection.ConnectionConfig) error {
dsn, err := m.getDSN(config)
if err != nil {
return err
}
dsn := m.getDSN(config)
db, err := sql.Open("mysql", dsn)
if err != nil {
return fmt.Errorf("打开数据库连接失败:%w", err)
@@ -252,22 +250,12 @@ func (m *MariaDB) GetIndexes(dbName, tableName string) ([]connection.IndexDefini
}
}
subPart := 0
if val, ok := row["Sub_part"]; ok && val != nil {
if f, ok := val.(float64); ok {
subPart = int(f)
} else if i, ok := val.(int64); ok {
subPart = int(i)
}
}
idx := connection.IndexDefinition{
Name: fmt.Sprintf("%v", row["Key_name"]),
ColumnName: fmt.Sprintf("%v", row["Column_name"]),
NonUnique: nonUnique,
SeqInIndex: seq,
IndexType: fmt.Sprintf("%v", row["Index_type"]),
SubPart: subPart,
}
indexes = append(indexes, idx)
}
@@ -335,7 +323,7 @@ func (m *MariaDB) ApplyChanges(tableName string, changes connection.ChangeSet) e
var args []interface{}
for k, v := range pk {
wheres = append(wheres, fmt.Sprintf("`%s` = ?", k))
args = append(args, normalizeMySQLComplexValue(normalizeMySQLDateTimeValue(v)))
args = append(args, normalizeMySQLDateTimeValue(v))
}
if len(wheres) == 0 {
continue
@@ -353,7 +341,7 @@ func (m *MariaDB) ApplyChanges(tableName string, changes connection.ChangeSet) e
for k, v := range update.Values {
sets = append(sets, fmt.Sprintf("`%s` = ?", k))
args = append(args, normalizeMySQLComplexValue(normalizeMySQLDateTimeValue(v)))
args = append(args, normalizeMySQLDateTimeValue(v))
}
if len(sets) == 0 {
@@ -363,7 +351,7 @@ func (m *MariaDB) ApplyChanges(tableName string, changes connection.ChangeSet) e
var wheres []string
for k, v := range update.Keys {
wheres = append(wheres, fmt.Sprintf("`%s` = ?", k))
args = append(args, normalizeMySQLComplexValue(normalizeMySQLDateTimeValue(v)))
args = append(args, normalizeMySQLDateTimeValue(v))
}
if len(wheres) == 0 {
@@ -385,7 +373,7 @@ func (m *MariaDB) ApplyChanges(tableName string, changes connection.ChangeSet) e
for k, v := range row {
cols = append(cols, fmt.Sprintf("`%s`", k))
placeholders = append(placeholders, "?")
args = append(args, normalizeMySQLComplexValue(normalizeMySQLDateTimeValue(v)))
args = append(args, normalizeMySQLDateTimeValue(v))
}
if len(cols) == 0 {

View File

@@ -151,14 +151,10 @@ func applyMongoURI(config connection.ConnectionConfig) connection.ConnectionConf
}
}
explicitHost := strings.TrimSpace(config.Host) != ""
explicitHosts := len(config.Hosts) > 0
// 显式填写的 host/hosts 优先级高于 URI避免表单 host 被 URI 中的 localhost 覆盖。
if !explicitHost && !explicitHosts && len(hostsFromURI) > 0 {
if len(config.Hosts) == 0 && len(hostsFromURI) > 0 {
config.Hosts = hostsFromURI
}
if !explicitHost && !explicitHosts && len(hostsFromURI) > 0 {
if strings.TrimSpace(config.Host) == "" && len(hostsFromURI) > 0 {
host, port, ok := parseHostPortWithDefault(hostsFromURI[0], defaultPort)
if ok {
config.Host = host
@@ -255,11 +251,6 @@ func (m *MongoDB) getURI(config connection.ConnectionConfig) string {
params.Set("authMechanism", authMechanism)
}
// 单机模式且未指定副本集名称时,启用 directConnection 避免驱动自动跟随副本集成员发现
if strings.TrimSpace(config.Topology) != "replica" && strings.TrimSpace(config.ReplicaSet) == "" && !config.MongoSRV {
params.Set("directConnection", "true")
}
if encoded := params.Encode(); encoded != "" {
uri += "?" + encoded
}
@@ -285,44 +276,9 @@ func buildMongoAuthAttempts(config connection.ConnectionConfig) []connection.Con
return attempts
}
func mongoURIForcesTLS(uriText string) bool {
trimmed := strings.TrimSpace(uriText)
if trimmed == "" {
return false
}
parsed, err := url.Parse(trimmed)
if err != nil {
return false
}
query := parsed.Query()
for _, key := range []string{"tls", "ssl"} {
value := strings.ToLower(strings.TrimSpace(query.Get(key)))
switch value {
case "1", "true", "t", "yes", "y", "required":
return true
}
}
return false
}
func mongoAttemptSSLLabel(config connection.ConnectionConfig, fallbackToPlain bool) string {
if fallbackToPlain {
return "明文回退"
}
if mongoURIForcesTLS(config.URI) {
return "SSL"
}
enabled, _ := resolveMongoTLSSettings(config)
if enabled {
return "SSL"
}
return "明文"
}
func (m *MongoDB) Connect(config connection.ConnectionConfig) error {
runConfig := applyMongoURI(config)
connectConfig := runConfig
sshRouteHint := ""
if runConfig.UseSSH && runConfig.MongoSRV {
return fmt.Errorf("MongoDB SRV 记录模式暂不支持 SSH 隧道")
@@ -363,7 +319,6 @@ func (m *MongoDB) Connect(config connection.ConnectionConfig) error {
localConfig.URI = ""
localConfig.Hosts = []string{normalizeMongoAddress(host, port)}
connectConfig = localConfig
sshRouteHint = fmt.Sprintf("SSH隧道 %s -> %s:%d", forwarder.LocalAddr, targetHost, targetPort)
logger.Infof("MongoDB 通过本地端口转发连接:%s -> %s:%d", forwarder.LocalAddr, targetHost, targetPort)
}
@@ -377,32 +332,20 @@ func (m *MongoDB) Connect(config connection.ConnectionConfig) error {
if shouldTrySSLPreferredFallback(connectConfig) {
sslAttempts = append(sslAttempts, withSSLDisabled(connectConfig))
}
totalAttempts := 0
for _, attemptConfig := range sslAttempts {
totalAttempts += len(buildMongoAuthAttempts(attemptConfig))
}
attemptNo := 0
var errorDetails []string
for sslIndex, sslConfig := range sslAttempts {
sslLabel := mongoAttemptSSLLabel(sslConfig, sslIndex > 0)
sslLabel := "SSL"
if sslIndex > 0 {
sslLabel = "明文回退"
}
attemptConfigs := buildMongoAuthAttempts(sslConfig)
for index, attemptConfig := range attemptConfigs {
attemptNo++
authLabel := "主库凭据"
if index > 0 {
authLabel = "从库凭据"
}
targets := collectMongoSeeds(attemptConfig)
if len(targets) == 0 {
targets = append(targets, normalizeMongoAddress(attemptConfig.Host, attemptConfig.Port))
}
attemptStarted := time.Now()
logger.Infof(
"MongoDB 连接尝试:%d/%d 模式=%s 凭据=%s 目标=%s 代理=%t",
attemptNo, totalAttempts, sslLabel, authLabel, strings.Join(targets, ","), attemptConfig.UseProxy,
)
if sslIndex > 0 {
attemptConfig.URI = ""
@@ -421,13 +364,7 @@ func (m *MongoDB) Connect(config connection.ConnectionConfig) error {
}
client, err := mongo.Connect(clientOpts)
if err != nil {
logger.Warnf("MongoDB 连接尝试失败:%d/%d 模式=%s 凭据=%s 耗时=%s 错误=%v",
attemptNo, totalAttempts, sslLabel, authLabel, time.Since(attemptStarted).Round(time.Millisecond), err)
detail := fmt.Sprintf("%s %s连接失败: %v", sslLabel, authLabel, err)
if sshRouteHint != "" {
detail = fmt.Sprintf("%s%s", detail, sshRouteHint)
}
errorDetails = append(errorDetails, detail)
errorDetails = append(errorDetails, fmt.Sprintf("%s %s连接失败: %v", sslLabel, authLabel, err))
continue
}
@@ -437,17 +374,9 @@ func (m *MongoDB) Connect(config connection.ConnectionConfig) error {
_ = client.Disconnect(ctx)
cancel()
m.client = nil
logger.Warnf("MongoDB 连接尝试验证失败:%d/%d 模式=%s 凭据=%s 耗时=%s 错误=%v",
attemptNo, totalAttempts, sslLabel, authLabel, time.Since(attemptStarted).Round(time.Millisecond), err)
detail := fmt.Sprintf("%s %s验证失败: %v", sslLabel, authLabel, err)
if sshRouteHint != "" {
detail = fmt.Sprintf("%s%s", detail, sshRouteHint)
}
errorDetails = append(errorDetails, detail)
errorDetails = append(errorDetails, fmt.Sprintf("%s %s验证失败: %v", sslLabel, authLabel, err))
continue
}
logger.Infof("MongoDB 连接尝试成功:%d/%d 模式=%s 凭据=%s 耗时=%s",
attemptNo, totalAttempts, sslLabel, authLabel, time.Since(attemptStarted).Round(time.Millisecond))
if sslIndex > 0 {
logger.Warnf("MongoDB SSL 优先连接失败,已回退至明文连接")
}

View File

@@ -1,39 +0,0 @@
//go:build gonavi_full_drivers || gonavi_mongodb_driver
package db
import (
"testing"
"GoNavi-Wails/internal/connection"
)
func TestApplyMongoURI_ExplicitHostDoesNotAdoptURIHosts(t *testing.T) {
config := connection.ConnectionConfig{
Host: "10.10.10.10",
Port: 27017,
URI: "mongodb://localhost:27017/admin",
}
got := applyMongoURI(config)
if got.Host != "10.10.10.10" {
t.Fatalf("expected host to remain explicit, got %q", got.Host)
}
if len(got.Hosts) != 0 {
t.Fatalf("expected hosts to remain empty when explicit host exists, got %v", got.Hosts)
}
}
func TestApplyMongoURI_ExplicitHostsDoesNotAdoptURIHosts(t *testing.T) {
config := connection.ConnectionConfig{
Host: "10.10.10.10",
Port: 27017,
Hosts: []string{"10.10.10.10:27017", "10.10.10.11:27017"},
URI: "mongodb://localhost:27017,localhost:27018/admin?replicaSet=rs0",
}
got := applyMongoURI(config)
if len(got.Hosts) != 2 || got.Hosts[0] != "10.10.10.10:27017" {
t.Fatalf("expected explicit hosts to stay untouched, got %v", got.Hosts)
}
}

View File

@@ -152,14 +152,10 @@ func applyMongoURI(config connection.ConnectionConfig) connection.ConnectionConf
}
}
explicitHost := strings.TrimSpace(config.Host) != ""
explicitHosts := len(config.Hosts) > 0
// 显式填写的 host/hosts 优先级高于 URI避免表单 host 被 URI 中的 localhost 覆盖。
if !explicitHost && !explicitHosts && len(hostsFromURI) > 0 {
if len(config.Hosts) == 0 && len(hostsFromURI) > 0 {
config.Hosts = hostsFromURI
}
if !explicitHost && !explicitHosts && len(hostsFromURI) > 0 {
if strings.TrimSpace(config.Host) == "" && len(hostsFromURI) > 0 {
host, port, ok := parseHostPortWithDefault(hostsFromURI[0], defaultPort)
if ok {
config.Host = host
@@ -256,11 +252,6 @@ func (m *MongoDBV1) getURI(config connection.ConnectionConfig) string {
params.Set("authMechanism", authMechanism)
}
// 单机模式且未指定副本集名称时,启用 directConnection 避免驱动自动跟随副本集成员发现
if strings.TrimSpace(config.Topology) != "replica" && strings.TrimSpace(config.ReplicaSet) == "" && !config.MongoSRV {
params.Set("directConnection", "true")
}
if encoded := params.Encode(); encoded != "" {
uri += "?" + encoded
}
@@ -286,44 +277,9 @@ func buildMongoAuthAttempts(config connection.ConnectionConfig) []connection.Con
return attempts
}
func mongoURIForcesTLS(uriText string) bool {
trimmed := strings.TrimSpace(uriText)
if trimmed == "" {
return false
}
parsed, err := url.Parse(trimmed)
if err != nil {
return false
}
query := parsed.Query()
for _, key := range []string{"tls", "ssl"} {
value := strings.ToLower(strings.TrimSpace(query.Get(key)))
switch value {
case "1", "true", "t", "yes", "y", "required":
return true
}
}
return false
}
func mongoAttemptSSLLabel(config connection.ConnectionConfig, fallbackToPlain bool) string {
if fallbackToPlain {
return "明文回退"
}
if mongoURIForcesTLS(config.URI) {
return "SSL"
}
enabled, _ := resolveMongoTLSSettings(config)
if enabled {
return "SSL"
}
return "明文"
}
func (m *MongoDBV1) Connect(config connection.ConnectionConfig) error {
runConfig := applyMongoURI(config)
connectConfig := runConfig
sshRouteHint := ""
if runConfig.UseSSH && runConfig.MongoSRV {
return fmt.Errorf("MongoDB SRV 记录模式暂不支持 SSH 隧道")
@@ -364,7 +320,6 @@ func (m *MongoDBV1) Connect(config connection.ConnectionConfig) error {
localConfig.URI = ""
localConfig.Hosts = []string{normalizeMongoAddress(host, port)}
connectConfig = localConfig
sshRouteHint = fmt.Sprintf("SSH隧道 %s -> %s:%d", forwarder.LocalAddr, targetHost, targetPort)
logger.Infof("MongoDB 通过本地端口转发连接:%s -> %s:%d", forwarder.LocalAddr, targetHost, targetPort)
}
@@ -378,32 +333,20 @@ func (m *MongoDBV1) Connect(config connection.ConnectionConfig) error {
if shouldTrySSLPreferredFallback(connectConfig) {
sslAttempts = append(sslAttempts, withSSLDisabled(connectConfig))
}
totalAttempts := 0
for _, attemptConfig := range sslAttempts {
totalAttempts += len(buildMongoAuthAttempts(attemptConfig))
}
attemptNo := 0
var errorDetails []string
for sslIndex, sslConfig := range sslAttempts {
sslLabel := mongoAttemptSSLLabel(sslConfig, sslIndex > 0)
sslLabel := "SSL"
if sslIndex > 0 {
sslLabel = "明文回退"
}
attemptConfigs := buildMongoAuthAttempts(sslConfig)
for index, attemptConfig := range attemptConfigs {
attemptNo++
authLabel := "主库凭据"
if index > 0 {
authLabel = "从库凭据"
}
targets := collectMongoSeeds(attemptConfig)
if len(targets) == 0 {
targets = append(targets, normalizeMongoAddress(attemptConfig.Host, attemptConfig.Port))
}
attemptStarted := time.Now()
logger.Infof(
"MongoDB(v1) 连接尝试:%d/%d 模式=%s 凭据=%s 目标=%s 代理=%t",
attemptNo, totalAttempts, sslLabel, authLabel, strings.Join(targets, ","), attemptConfig.UseProxy,
)
if sslIndex > 0 {
attemptConfig.URI = ""
@@ -424,13 +367,7 @@ func (m *MongoDBV1) Connect(config connection.ConnectionConfig) error {
client, err := mongo.Connect(connectCtx, clientOpts)
connectCancel()
if err != nil {
logger.Warnf("MongoDB(v1) 连接尝试失败:%d/%d 模式=%s 凭据=%s 耗时=%s 错误=%v",
attemptNo, totalAttempts, sslLabel, authLabel, time.Since(attemptStarted).Round(time.Millisecond), err)
detail := fmt.Sprintf("%s %s连接失败: %v", sslLabel, authLabel, err)
if sshRouteHint != "" {
detail = fmt.Sprintf("%s%s", detail, sshRouteHint)
}
errorDetails = append(errorDetails, detail)
errorDetails = append(errorDetails, fmt.Sprintf("%s %s连接失败: %v", sslLabel, authLabel, err))
continue
}
@@ -440,17 +377,9 @@ func (m *MongoDBV1) Connect(config connection.ConnectionConfig) error {
_ = client.Disconnect(ctx)
cancel()
m.client = nil
logger.Warnf("MongoDB(v1) 连接尝试验证失败:%d/%d 模式=%s 凭据=%s 耗时=%s 错误=%v",
attemptNo, totalAttempts, sslLabel, authLabel, time.Since(attemptStarted).Round(time.Millisecond), err)
detail := fmt.Sprintf("%s %s验证失败: %v", sslLabel, authLabel, err)
if sshRouteHint != "" {
detail = fmt.Sprintf("%s%s", detail, sshRouteHint)
}
errorDetails = append(errorDetails, detail)
errorDetails = append(errorDetails, fmt.Sprintf("%s %s验证失败: %v", sslLabel, authLabel, err))
continue
}
logger.Infof("MongoDB(v1) 连接尝试成功:%d/%d 模式=%s 凭据=%s 耗时=%s",
attemptNo, totalAttempts, sslLabel, authLabel, time.Since(attemptStarted).Round(time.Millisecond))
if sslIndex > 0 {
logger.Warnf("MongoDB(v1) SSL 优先连接失败,已回退至明文连接")
}

View File

@@ -1,25 +0,0 @@
//go:build gonavi_mongodb_driver_v1
package db
import (
"testing"
"GoNavi-Wails/internal/connection"
)
func TestApplyMongoURIV1_ExplicitHostDoesNotAdoptURIHosts(t *testing.T) {
config := connection.ConnectionConfig{
Host: "10.10.10.10",
Port: 27017,
URI: "mongodb://localhost:27017/admin",
}
got := applyMongoURI(config)
if got.Host != "10.10.10.10" {
t.Fatalf("expected host to remain explicit, got %q", got.Host)
}
if len(got.Hosts) != 0 {
t.Fatalf("expected hosts to remain empty when explicit host exists, got %v", got.Hosts)
}
}

View File

@@ -3,7 +3,6 @@ package db
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"net/url"
"strconv"
@@ -169,26 +168,26 @@ func collectMySQLAddresses(config connection.ConnectionConfig) []string {
return result
}
func (m *MySQLDB) getDSN(config connection.ConnectionConfig) (string, error) {
func (m *MySQLDB) getDSN(config connection.ConnectionConfig) string {
database := config.Database
protocol := "tcp"
address := normalizeMySQLAddress(config.Host, config.Port)
if config.UseSSH {
netName, err := ssh.RegisterSSHNetwork(config.SSH)
if err != nil {
return "", fmt.Errorf("创建 SSH 隧道失败:%w", err)
if err == nil {
protocol = netName
address = normalizeMySQLAddress(config.Host, config.Port)
} else {
logger.Warnf("注册 SSH 网络失败,将尝试直连:地址=%s:%d 用户=%s原因%v", config.Host, config.Port, config.User, err)
}
protocol = netName
}
timeout := getConnectTimeoutSeconds(config)
tlsMode := resolveMySQLTLSMode(config)
return fmt.Sprintf(
"%s:%s@%s(%s)/%s?charset=utf8mb4&parseTime=True&loc=Local&timeout=%ds&tls=%s",
config.User, config.Password, protocol, address, database, timeout, url.QueryEscape(tlsMode),
), nil
return fmt.Sprintf("%s:%s@%s(%s)/%s?charset=utf8mb4&parseTime=True&loc=Local&timeout=%ds&tls=%s",
config.User, config.Password, protocol, address, database, timeout, url.QueryEscape(tlsMode))
}
func resolveMySQLCredential(config connection.ConnectionConfig, addressIndex int) (string, string) {
@@ -226,11 +225,7 @@ func (m *MySQLDB) Connect(config connection.ConnectionConfig) error {
candidateConfig.Port = port
candidateConfig.User, candidateConfig.Password = resolveMySQLCredential(runConfig, index)
dsn, err := m.getDSN(candidateConfig)
if err != nil {
errorDetails = append(errorDetails, fmt.Sprintf("%s 生成连接串失败: %v", address, err))
continue
}
dsn := m.getDSN(candidateConfig)
db, err := sql.Open("mysql", dsn)
if err != nil {
errorDetails = append(errorDetails, fmt.Sprintf("%s 打开失败: %v", address, err))
@@ -446,22 +441,12 @@ func (m *MySQLDB) GetIndexes(dbName, tableName string) ([]connection.IndexDefini
}
}
subPart := 0
if val, ok := row["Sub_part"]; ok && val != nil {
if f, ok := val.(float64); ok {
subPart = int(f)
} else if i, ok := val.(int64); ok {
subPart = int(i)
}
}
idx := connection.IndexDefinition{
Name: fmt.Sprintf("%v", row["Key_name"]),
ColumnName: fmt.Sprintf("%v", row["Column_name"]),
NonUnique: nonUnique,
SeqInIndex: seq,
IndexType: fmt.Sprintf("%v", row["Index_type"]),
SubPart: subPart,
}
indexes = append(indexes, idx)
}
@@ -621,18 +606,6 @@ func (m *MySQLDB) ApplyChanges(tableName string, changes connection.ChangeSet) e
return tx.Commit()
}
func normalizeMySQLComplexValue(value interface{}) interface{} {
switch v := value.(type) {
case map[string]interface{}, []interface{}:
if data, err := json.Marshal(v); err == nil {
return string(data)
}
return fmt.Sprintf("%v", value)
default:
return value
}
}
func normalizeMySQLDateTimeValue(value interface{}) interface{} {
text, ok := value.(string)
if !ok {
@@ -697,7 +670,7 @@ func (m *MySQLDB) loadColumnTypeMap(tableName string) map[string]string {
func normalizeMySQLValueForInsert(columnName string, value interface{}, columnTypeMap map[string]string) (interface{}, bool) {
columnType := strings.ToLower(strings.TrimSpace(columnTypeMap[strings.ToLower(strings.TrimSpace(columnName))]))
if !isMySQLTemporalColumnType(columnType) {
return normalizeMySQLComplexValue(value), false
return value, false
}
text, ok := value.(string)
if ok && strings.TrimSpace(text) == "" {

View File

@@ -1,26 +0,0 @@
package db
import (
"testing"
"GoNavi-Wails/internal/connection"
)
func TestMySQLDSN_UseSSH_ShouldFailWhenSSHInvalid(t *testing.T) {
m := &MySQLDB{}
_, err := m.getDSN(connection.ConnectionConfig{
Host: "127.0.0.1",
Port: 3306,
User: "root",
UseSSH: true,
SSH: connection.SSHConfig{
Host: "127.0.0.1",
Port: 0, // invalid port, should fail immediately
User: "bad",
Password: "bad",
},
})
if err == nil {
t.Fatalf("expected error when UseSSH=true and SSH config invalid")
}
}

View File

@@ -9,7 +9,6 @@ import (
"io"
"os"
"os/exec"
"reflect"
"runtime"
"strings"
"sync"
@@ -146,7 +145,6 @@ func (c *optionalDriverAgentClient) captureStderr(stderr io.Reader) {
if line == "" {
continue
}
logger.Warnf("%s 驱动代理 stderr: %s", driverDisplayName(c.driver), line)
c.stderrMu.Lock()
if c.stderr.Len() > 0 {
c.stderr.WriteString(" | ")
@@ -270,7 +268,6 @@ func (d *OptionalDriverAgentDB) Connect(config connection.ConnectionConfig) erro
return err
}
d.client = client
d.ensureKingbaseSearchPath(config)
return nil
}
@@ -491,16 +488,6 @@ func (d *OptionalDriverAgentDB) ApplyChanges(tableName string, changes connectio
if err != nil {
return err
}
if strings.EqualFold(d.driverType, "kingbase") {
if normalized := normalizeKingbaseAgentTableName(tableName); normalized != "" {
tableName = normalized
}
if normalized, normErr := d.normalizeKingbaseAgentChangeSet(tableName, changes); normErr == nil {
changes = normalized
} else {
logger.Warnf("Kingbase ApplyChanges 字段名规范化失败:%v", normErr)
}
}
return client.call(optionalAgentRequest{
Method: optionalAgentMethodApplyChanges,
TableName: tableName,
@@ -515,269 +502,6 @@ func (d *OptionalDriverAgentDB) requireClient() (*optionalDriverAgentClient, err
return d.client, nil
}
func (d *OptionalDriverAgentDB) ensureKingbaseSearchPath(config connection.ConnectionConfig) {
if !strings.EqualFold(d.driverType, "kingbase") {
return
}
client, err := d.requireClient()
if err != nil || client == nil {
return
}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
schemas, err := d.listKingbaseSchemas(ctx)
if err != nil || len(schemas) == 0 {
if err != nil {
logger.Warnf("人大金仓驱动代理探测 schema 失败:%v", err)
}
return
}
searchPath := buildKingbaseSearchPathFromSchemas(schemas)
if strings.TrimSpace(searchPath) == "" {
return
}
if _, err := d.ExecContext(ctx, fmt.Sprintf("SET search_path TO %s", searchPath)); err != nil {
logger.Warnf("人大金仓驱动代理设置 search_path 失败:%v", err)
return
}
logger.Infof("人大金仓驱动代理已设置默认 search_path%s", searchPath)
}
func (d *OptionalDriverAgentDB) listKingbaseSchemas(ctx context.Context) ([]string, error) {
query := `SELECT nspname FROM pg_namespace
WHERE nspname NOT IN ('pg_catalog', 'information_schema')
AND nspname NOT LIKE 'pg_%'
ORDER BY nspname`
rows, _, err := d.QueryContext(ctx, query)
if err != nil {
return nil, err
}
schemas := make([]string, 0, len(rows))
for _, row := range rows {
for key, val := range row {
if strings.EqualFold(key, "nspname") || strings.EqualFold(key, "schema") {
name := strings.TrimSpace(fmt.Sprintf("%v", val))
if name != "" {
schemas = append(schemas, name)
}
break
}
}
if len(row) == 1 {
for _, val := range row {
name := strings.TrimSpace(fmt.Sprintf("%v", val))
if name != "" {
schemas = append(schemas, name)
}
break
}
}
}
return schemas, nil
}
func buildKingbaseSearchPathFromSchemas(schemas []string) string {
if len(schemas) == 0 {
return ""
}
seen := make(map[string]struct{}, len(schemas)+1)
parts := make([]string, 0, len(schemas)+1)
for _, name := range schemas {
trimmed := normalizeKingbaseAgentIdent(name)
if trimmed == "" {
continue
}
key := strings.ToLower(trimmed)
if _, ok := seen[key]; ok {
continue
}
seen[key] = struct{}{}
parts = append(parts, quoteKingbaseAgentIdent(trimmed))
}
if _, ok := seen["public"]; !ok {
parts = append(parts, "public")
}
return strings.Join(parts, ", ")
}
func quoteKingbaseAgentIdent(name string) string {
n := normalizeKingbaseAgentIdent(name)
if n == "" {
return "\"\""
}
n = strings.ReplaceAll(n, `"`, `""`)
return `"` + n + `"`
}
func normalizeKingbaseAgentTableName(raw string) string {
schema, table := splitKingbaseQualifiedNameCommon(raw)
if table == "" {
return ""
}
if schema == "" {
return table
}
return schema + "." + table
}
func normalizeKingbaseAgentIdent(raw string) string {
return normalizeKingbaseIdentCommon(raw)
}
type kingbaseAgentColumnIndex struct {
exact map[string]string
compact map[string]string
}
func buildKingbaseAgentColumnIndex(columns []string) kingbaseAgentColumnIndex {
exact := make(map[string]string, len(columns))
compact := make(map[string]string, len(columns))
compactSeen := make(map[string]string, len(columns))
compactDup := make(map[string]struct{}, len(columns))
for _, col := range columns {
name := normalizeKingbaseAgentIdent(col)
if name == "" {
continue
}
lower := strings.ToLower(name)
if _, ok := exact[lower]; !ok {
exact[lower] = name
}
key := normalizeKingbaseAgentCompactKey(name)
if key == "" {
continue
}
if prev, ok := compactSeen[key]; ok && !strings.EqualFold(prev, name) {
compactDup[key] = struct{}{}
continue
}
compactSeen[key] = name
}
if len(compactDup) > 0 {
for key := range compactDup {
delete(compactSeen, key)
}
}
for key, value := range compactSeen {
compact[key] = value
}
return kingbaseAgentColumnIndex{exact: exact, compact: compact}
}
func normalizeKingbaseAgentCompactKey(raw string) string {
name := normalizeKingbaseAgentIdent(raw)
if name == "" {
return ""
}
name = strings.ToLower(strings.TrimSpace(name))
name = strings.Join(strings.Fields(name), "")
name = strings.ReplaceAll(name, "_", "")
return name
}
func resolveKingbaseAgentColumnName(name string, index kingbaseAgentColumnIndex) string {
cleaned := normalizeKingbaseAgentIdent(name)
if cleaned == "" {
return name
}
lower := strings.ToLower(cleaned)
if actual, ok := index.exact[lower]; ok {
return actual
}
compact := normalizeKingbaseAgentCompactKey(cleaned)
if actual, ok := index.compact[compact]; ok {
return actual
}
return cleaned
}
func normalizeKingbaseAgentChangeSetByColumns(changes connection.ChangeSet, columns []string) (connection.ChangeSet, error) {
index := buildKingbaseAgentColumnIndex(columns)
if len(index.exact) == 0 && len(index.compact) == 0 {
return changes, nil
}
mapRow := func(row map[string]interface{}) (map[string]interface{}, error) {
if row == nil {
return row, nil
}
out := make(map[string]interface{}, len(row))
for key, value := range row {
nextKey := resolveKingbaseAgentColumnName(key, index)
if existing, ok := out[nextKey]; ok && !reflect.DeepEqual(existing, value) {
return nil, fmt.Errorf("duplicate mapped column %q", nextKey)
}
out[nextKey] = value
}
return out, nil
}
next := connection.ChangeSet{
Inserts: make([]map[string]interface{}, 0, len(changes.Inserts)),
Updates: make([]connection.UpdateRow, 0, len(changes.Updates)),
Deletes: make([]map[string]interface{}, 0, len(changes.Deletes)),
}
for _, row := range changes.Inserts {
mapped, err := mapRow(row)
if err != nil {
return changes, err
}
next.Inserts = append(next.Inserts, mapped)
}
for _, upd := range changes.Updates {
keys, err := mapRow(upd.Keys)
if err != nil {
return changes, err
}
values, err := mapRow(upd.Values)
if err != nil {
return changes, err
}
next.Updates = append(next.Updates, connection.UpdateRow{
Keys: keys,
Values: values,
})
}
for _, row := range changes.Deletes {
mapped, err := mapRow(row)
if err != nil {
return changes, err
}
next.Deletes = append(next.Deletes, mapped)
}
return next, nil
}
func (d *OptionalDriverAgentDB) normalizeKingbaseAgentChangeSet(tableName string, changes connection.ChangeSet) (connection.ChangeSet, error) {
columns, err := d.GetColumns("", tableName)
if err != nil {
return changes, err
}
if len(columns) == 0 {
return changes, nil
}
names := make([]string, 0, len(columns))
for _, col := range columns {
name := strings.TrimSpace(col.Name)
if name == "" {
continue
}
names = append(names, name)
}
return normalizeKingbaseAgentChangeSetByColumns(changes, names)
}
func timeoutMsFromContext(ctx context.Context) int64 {
deadline, ok := ctx.Deadline()
if !ok {

View File

@@ -1,67 +1,32 @@
package db
import (
"context"
"testing"
"GoNavi-Wails/internal/connection"
"time"
)
func TestNormalizeKingbaseAgentTableName(t *testing.T) {
tests := []struct {
name string
in string
want string
}{
{name: "plain", in: "ldf_server.andon_events", want: "ldf_server.andon_events"},
{name: "quoted", in: `"ldf_server"."andon_events"`, want: "ldf_server.andon_events"},
{name: "double quoted", in: `""ldf_server"".""andon_events""`, want: "ldf_server.andon_events"},
{name: "escaped", in: `\"ldf_server\".\"andon_events\"`, want: "ldf_server.andon_events"},
{name: "double escaped", in: `\\\"ldf_server\\\".\\\"andon_events\\\"`, want: "ldf_server.andon_events"},
{name: "space around dot", in: ` "ldf_server" . "andon_events" `, want: "ldf_server.andon_events"},
{name: "table only", in: `bcs_barcode`, want: "bcs_barcode"},
{name: "table only quoted", in: `"bcs_barcode"`, want: "bcs_barcode"},
{name: "table only double quoted", in: `""bcs_barcode""`, want: "bcs_barcode"},
{name: "table only double escaped", in: `\\\"bcs_barcode\\\"`, want: "bcs_barcode"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := normalizeKingbaseAgentTableName(tt.in); got != tt.want {
t.Fatalf("normalizeKingbaseAgentTableName(%q) = %q, want %q", tt.in, got, tt.want)
}
})
func TestTimeoutMsFromContext_NoDeadline(t *testing.T) {
if got := timeoutMsFromContext(context.Background()); got != 0 {
t.Fatalf("无 deadline 时应返回 0got=%d", got)
}
}
func TestNormalizeKingbaseAgentChangeSetByColumns(t *testing.T) {
columns := []string{"andon_events_id", "event_name", "event_code"}
input := connection.ChangeSet{
Inserts: []map[string]interface{}{
{"event name": "物料1", "event_code": "EV-0001", "andon_events_id": 1},
},
Updates: []connection.UpdateRow{
{Keys: map[string]interface{}{"andon_events_id": 1}, Values: map[string]interface{}{"event name": "物料2"}},
},
Deletes: []map[string]interface{}{
{"andon_events_id": 1},
},
}
func TestTimeoutMsFromContext_WithDeadline(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
out, err := normalizeKingbaseAgentChangeSetByColumns(input, columns)
if err != nil {
t.Fatalf("normalizeKingbaseAgentChangeSetByColumns error: %v", err)
}
if _, ok := out.Inserts[0]["event_name"]; !ok {
t.Fatalf("expected insert to map \"event name\" -> \"event_name\"")
}
if _, ok := out.Inserts[0]["event name"]; ok {
t.Fatalf("unexpected insert key \"event name\" after normalization")
}
if _, ok := out.Updates[0].Values["event_name"]; !ok {
t.Fatalf("expected update values to map \"event name\" -> \"event_name\"")
}
if _, ok := out.Updates[0].Values["event name"]; ok {
t.Fatalf("unexpected update value key \"event name\" after normalization")
got := timeoutMsFromContext(ctx)
if got <= 0 {
t.Fatalf("有 deadline 时应返回正值got=%d", got)
}
}
func TestTimeoutMsFromContext_ExpiredDeadline(t *testing.T) {
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(-time.Second))
defer cancel()
if got := timeoutMsFromContext(ctx); got != 1 {
t.Fatalf("过期 deadline 应返回 1got=%d", got)
}
}

View File

@@ -1,4 +1,4 @@
package db
package db
import (
"encoding/hex"

View File

@@ -1,168 +0,0 @@
//go:build gonavi_full_drivers || gonavi_tdengine_driver
package db
import (
"context"
"database/sql"
"database/sql/driver"
"fmt"
"strings"
"sync"
"testing"
"GoNavi-Wails/internal/connection"
)
const tdengineRecordingDriverName = "gonavi_tdengine_recording"
var (
registerTDengineRecordingDriverOnce sync.Once
tdengineRecordingDriverMu sync.Mutex
tdengineRecordingDriverSeq int
tdengineRecordingDriverStates = map[string]*tdengineRecordingState{}
)
type tdengineRecordingState struct {
mu sync.Mutex
queries []string
execErr error
}
func (s *tdengineRecordingState) snapshotQueries() []string {
s.mu.Lock()
defer s.mu.Unlock()
queries := make([]string, len(s.queries))
copy(queries, s.queries)
return queries
}
type tdengineRecordingDriver struct{}
func (tdengineRecordingDriver) Open(name string) (driver.Conn, error) {
tdengineRecordingDriverMu.Lock()
state := tdengineRecordingDriverStates[name]
tdengineRecordingDriverMu.Unlock()
if state == nil {
return nil, fmt.Errorf("recording state not found: %s", name)
}
return &tdengineRecordingConn{state: state}, nil
}
type tdengineRecordingConn struct {
state *tdengineRecordingState
}
func (c *tdengineRecordingConn) Prepare(query string) (driver.Stmt, error) {
return nil, fmt.Errorf("prepare not supported in tdengine recording driver: %s", query)
}
func (c *tdengineRecordingConn) Close() error { return nil }
func (c *tdengineRecordingConn) Begin() (driver.Tx, error) {
return nil, fmt.Errorf("transactions not supported in tdengine recording driver")
}
func (c *tdengineRecordingConn) ExecContext(_ context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
if len(args) > 0 {
return nil, fmt.Errorf("unexpected exec args: %d", len(args))
}
c.state.mu.Lock()
defer c.state.mu.Unlock()
if c.state.execErr != nil {
return nil, c.state.execErr
}
c.state.queries = append(c.state.queries, query)
return driver.RowsAffected(1), nil
}
var _ driver.ExecerContext = (*tdengineRecordingConn)(nil)
func openTDengineRecordingDB(t *testing.T) (*sql.DB, *tdengineRecordingState) {
t.Helper()
registerTDengineRecordingDriverOnce.Do(func() {
sql.Register(tdengineRecordingDriverName, tdengineRecordingDriver{})
})
tdengineRecordingDriverMu.Lock()
tdengineRecordingDriverSeq++
dsn := fmt.Sprintf("tdengine-recording-%d", tdengineRecordingDriverSeq)
state := &tdengineRecordingState{}
tdengineRecordingDriverStates[dsn] = state
tdengineRecordingDriverMu.Unlock()
dbConn, err := sql.Open(tdengineRecordingDriverName, dsn)
if err != nil {
t.Fatalf("打开 recording db 失败: %v", err)
}
t.Cleanup(func() {
_ = dbConn.Close()
tdengineRecordingDriverMu.Lock()
delete(tdengineRecordingDriverStates, dsn)
tdengineRecordingDriverMu.Unlock()
})
return dbConn, state
}
func TestTDengineApplyChanges_InsertsIntoQualifiedTable(t *testing.T) {
t.Parallel()
dbConn, state := openTDengineRecordingDB(t)
td := &TDengineDB{conn: dbConn}
changes := connection.ChangeSet{
Inserts: []map[string]interface{}{
{
"ts": "2026-03-09 10:00:00",
"value": 12.5,
"device": "sensor-a",
"enabled": true,
},
},
}
if err := td.ApplyChanges("analytics.metrics", changes); err != nil {
t.Fatalf("ApplyChanges 返回错误: %v", err)
}
queries := state.snapshotQueries()
if len(queries) != 1 {
t.Fatalf("期望执行 1 条 SQL实际 %d 条: %#v", len(queries), queries)
}
want := "INSERT INTO `analytics`.`metrics` (`device`, `enabled`, `ts`, `value`) VALUES ('sensor-a', 1, '2026-03-09 10:00:00', 12.5)"
if queries[0] != want {
t.Fatalf("插入 SQL 不符合预期\nwant: %s\n got: %s", want, queries[0])
}
}
func TestTDengineApplyChanges_RejectsMixedUpdatesWithoutPartialWrite(t *testing.T) {
t.Parallel()
dbConn, state := openTDengineRecordingDB(t)
td := &TDengineDB{conn: dbConn}
changes := connection.ChangeSet{
Inserts: []map[string]interface{}{{
"ts": "2026-03-09 10:00:00",
"value": 12.5,
}},
Updates: []connection.UpdateRow{{
Keys: map[string]interface{}{"ts": "2026-03-09 10:00:00"},
Values: map[string]interface{}{"value": 18.8},
}},
}
err := td.ApplyChanges("metrics", changes)
if err == nil {
t.Fatalf("期望 mixed changes 被拒绝")
}
if !strings.Contains(err.Error(), "UPDATE/DELETE") {
t.Fatalf("错误信息未说明限制边界: %v", err)
}
if queries := state.snapshotQueries(); len(queries) != 0 {
t.Fatalf("期望拒绝 mixed changes 时不执行任何 SQL实际=%#v", queries)
}
}

View File

@@ -7,7 +7,6 @@ import (
"database/sql"
"fmt"
"net"
"sort"
"strconv"
"strings"
"time"
@@ -363,83 +362,6 @@ func (t *TDengineDB) GetTriggers(dbName, tableName string) ([]connection.Trigger
return []connection.TriggerDefinition{}, nil
}
func (t *TDengineDB) ApplyChanges(tableName string, changes connection.ChangeSet) error {
if t.conn == nil {
return fmt.Errorf("connection not open")
}
if strings.TrimSpace(tableName) == "" {
return fmt.Errorf("table name required")
}
if len(changes.Updates) > 0 || len(changes.Deletes) > 0 {
return fmt.Errorf("TDengine 目标端当前仅支持 INSERT 写入,暂不支持 UPDATE/DELETE 差异同步,请改用仅插入或全量覆盖模式")
}
qualifiedTable := quoteTDengineTable("", tableName)
for _, row := range changes.Inserts {
query, err := buildTDengineInsertSQL(qualifiedTable, row)
if err != nil {
return err
}
if query == "" {
continue
}
if _, err := t.conn.Exec(query); err != nil {
return fmt.Errorf("insert error: %v; sql=%s", err, query)
}
}
return nil
}
func buildTDengineInsertSQL(qualifiedTable string, row map[string]interface{}) (string, error) {
if strings.TrimSpace(qualifiedTable) == "" {
return "", fmt.Errorf("qualified table required")
}
if len(row) == 0 {
return "", nil
}
cols := make([]string, 0, len(row))
for key := range row {
if strings.TrimSpace(key) == "" {
continue
}
cols = append(cols, key)
}
if len(cols) == 0 {
return "", nil
}
sort.Strings(cols)
quotedCols := make([]string, 0, len(cols))
values := make([]string, 0, len(cols))
for _, col := range cols {
quotedCols = append(quotedCols, fmt.Sprintf("`%s`", escapeBacktickIdent(col)))
values = append(values, tdengineLiteral(row[col]))
}
return fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s)", qualifiedTable, strings.Join(quotedCols, ", "), strings.Join(values, ", ")), nil
}
func tdengineLiteral(value interface{}) string {
switch val := value.(type) {
case nil:
return "NULL"
case bool:
if val {
return "1"
}
return "0"
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64:
return fmt.Sprintf("%v", val)
case time.Time:
return fmt.Sprintf("'%s'", val.Format("2006-01-02 15:04:05"))
case []byte:
return fmt.Sprintf("'%s'", strings.ReplaceAll(string(val), "'", "''"))
default:
return fmt.Sprintf("'%s'", strings.ReplaceAll(fmt.Sprintf("%v", val), "'", "''"))
}
}
func getValueFromRow(row map[string]interface{}, keys ...string) (interface{}, bool) {
if len(row) == 0 {
return nil, false

View File

@@ -14,9 +14,8 @@ import (
)
const (
envLogDir = "GONAVI_LOG_DIR"
appHiddenDir = ".GoNavi"
appLogDirName = "Logs"
envLogDir = "GONAVI_LOG_DIR"
appDirName = "GoNavi"
logFileName = "gonavi.log"
logRotateMaxBytes = 10 * 1024 * 1024 // 10MB
@@ -38,7 +37,7 @@ func Init() {
defer logMu.Unlock()
logPath = path
logInst = log.New(out, "", log.Ldate|log.Ltime|log.Lmicroseconds)
logInst.Printf("[INFO] 日志初始化完成,日志文件:%s", logPath)
logInst.Printf("[信息] 日志初始化完成,日志文件:%s", logPath)
})
}
@@ -63,15 +62,15 @@ func Close() {
}
func Infof(format string, args ...any) {
printf("INFO", format, args...)
printf("信息", format, args...)
}
func Warnf(format string, args ...any) {
printf("WARN", format, args...)
printf("警告", format, args...)
}
func Errorf(format string, args ...any) {
printf("ERROR", format, args...)
printf("错误", format, args...)
}
func Error(err error, format string, args ...any) {
@@ -116,58 +115,37 @@ func ErrorChain(err error) string {
func printf(level string, format string, args ...any) {
Init()
logMu.Lock()
defer logMu.Unlock()
inst := logInst
logMu.Unlock()
if inst == nil {
return
}
inst.Printf("[%s] %s", level, fmt.Sprintf(format, args...))
if logFile != nil {
_ = logFile.Sync()
}
}
func initOutput() (string, io.Writer) {
dir := strings.TrimSpace(os.Getenv(envLogDir))
if dir == "" {
dir = defaultLogDir()
base, err := os.UserConfigDir()
if err != nil || strings.TrimSpace(base) == "" {
base = os.TempDir()
}
dir = filepath.Join(base, appDirName, "logs")
}
if path, writer, ok := openLogFile(dir); ok {
return path, writer
}
fallbackDir := filepath.Join(os.TempDir(), appHiddenDir, appLogDirName)
if path, writer, ok := openLogFile(fallbackDir); ok {
return path, writer
}
return "", os.Stderr
}
func defaultLogDir() string {
home, err := os.UserHomeDir()
if err != nil || strings.TrimSpace(home) == "" {
return filepath.Join(os.TempDir(), appHiddenDir, appLogDirName)
}
return filepath.Join(home, appHiddenDir, appLogDirName)
}
func openLogFile(dir string) (string, io.Writer, bool) {
if strings.TrimSpace(dir) == "" {
return "", nil, false
}
if err := os.MkdirAll(dir, 0o755); err != nil {
return "", nil, false
return filepath.Join(dir, logFileName), os.Stderr
}
path := filepath.Join(dir, logFileName)
rotateIfNeeded(path, dir)
f, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o644)
if err != nil {
return "", nil, false
return path, os.Stderr
}
logFile = f
return path, f, true
return path, f
}
func rotateIfNeeded(path, dir string) {

View File

@@ -5,7 +5,6 @@ import (
"crypto/tls"
"fmt"
"net"
"net/url"
"strconv"
"strings"
"sync"
@@ -175,31 +174,8 @@ func (r *RedisClientImpl) toDisplayKey(key string) string {
return strings.TrimPrefix(key, prefix)
}
// sanitizeRedisPassword 对 Redis 密码进行防御性 URL 解码。
// 当密码中包含 URL 编码序列(如 %40尝试解码还原原始字符。
// 这可以防止前端 URI 构建中 encodeURIComponent 编码后的密码被误传入。
func sanitizeRedisPassword(password string) string {
if password == "" {
return password
}
// 仅当密码中包含 '%' 且后跟两位十六进制数字时,才尝试 URL 解码
if !strings.Contains(password, "%") {
return password
}
decoded, err := url.QueryUnescape(password)
if err != nil {
// 解码失败,使用原始密码
return password
}
if decoded != password {
logger.Warnf("Redis 密码检测到 URL 编码,已自动解码(原长度=%d 解码后长度=%d", len(password), len(decoded))
}
return decoded
}
// Connect establishes a connection to Redis
func (r *RedisClientImpl) Connect(config connection.ConnectionConfig) error {
config.Password = sanitizeRedisPassword(config.Password)
r.config = config
if r.config.RedisDB < 0 || r.config.RedisDB > 15 {
r.config.RedisDB = 0

View File

@@ -1,81 +0,0 @@
package redis
import "testing"
func TestSanitizeRedisPassword(t *testing.T) {
tests := []struct {
name string
input string
expected string
}{
{
name: "empty password",
input: "",
expected: "",
},
{
name: "plain password without special chars",
input: "mypassword123",
expected: "mypassword123",
},
{
name: "password with @ not encoded",
input: "p@ssword",
expected: "p@ssword",
},
{
name: "password with @ URL-encoded as %40",
input: "p%40ssword",
expected: "p@ssword",
},
{
name: "password with multiple encoded chars",
input: "p%40ss%23word",
expected: "p@ss#word",
},
{
name: "password with + encoded as %2B",
input: "p%2Bss",
expected: "p+ss",
},
{
name: "password that is purely encoded",
input: "%40%23%24",
expected: "@#$",
},
{
name: "password with invalid percent encoding",
input: "p%ZZssword",
expected: "p%ZZssword",
},
{
name: "password with trailing percent",
input: "password%",
expected: "password%",
},
{
name: "password with literal percent not encoding anything",
input: "100%safe",
expected: "100%safe",
},
{
name: "password with space encoded as %20",
input: "my%20pass",
expected: "my pass",
},
{
name: "complex password with mixed content",
input: "P%40ss%23w0rd!",
expected: "P@ss#w0rd!",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := sanitizeRedisPassword(tt.input)
if result != tt.expected {
t.Errorf("sanitizeRedisPassword(%q) = %q, want %q", tt.input, result, tt.expected)
}
})
}
}

View File

@@ -2,13 +2,10 @@ package ssh
import (
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"net"
"os"
"strconv"
"sync"
"time"
@@ -72,7 +69,7 @@ func connectSSH(config connection.SSHConfig) (*ssh.Client, error) {
}
}
}
if config.Password != "" {
authMethods = append(authMethods, ssh.Password(config.Password))
}
@@ -108,7 +105,7 @@ func RegisterSSHNetwork(sshConfig connection.SSHConfig) (string, error) {
// Generate unique network name
netName := fmt.Sprintf("ssh_%s_%d", sshConfig.Host, time.Now().UnixNano())
logger.Infof("注册 SSH 网络:%s地址=%s:%d 用户=%s", netName, sshConfig.Host, sshConfig.Port, sshConfig.User)
mysql.RegisterDialContext(netName, func(ctx context.Context, addr string) (net.Conn, error) {
return dialContext(ctx, client, "tcp", addr)
})
@@ -118,58 +115,12 @@ func RegisterSSHNetwork(sshConfig connection.SSHConfig) (string, error) {
// sshClientCache stores SSH clients to avoid creating multiple connections
var (
sshClientCache = make(map[sshClientCacheKey]*ssh.Client)
sshClientCache = make(map[string]*ssh.Client)
sshClientCacheMu sync.RWMutex
localForwarders = make(map[forwarderCacheKey]*LocalForwarder)
localForwarders = make(map[string]*LocalForwarder)
forwarderMu sync.RWMutex
)
type sshClientCacheKey struct {
host string
port int
user string
auth string
}
type forwarderCacheKey struct {
ssh sshClientCacheKey
remoteHost string
remotePort int
}
func sshAuthFingerprint(config connection.SSHConfig) string {
hasher := sha256.New()
_, _ = hasher.Write([]byte(config.Password))
_, _ = hasher.Write([]byte{0})
_, _ = hasher.Write([]byte(config.KeyPath))
if config.KeyPath != "" {
if st, err := os.Stat(config.KeyPath); err == nil {
_, _ = hasher.Write([]byte{0})
_, _ = hasher.Write([]byte(st.ModTime().UTC().Format(time.RFC3339Nano)))
_, _ = hasher.Write([]byte{0})
_, _ = hasher.Write([]byte(strconv.FormatInt(st.Size(), 10)))
} else {
_, _ = hasher.Write([]byte{0})
_, _ = hasher.Write([]byte("stat_err"))
}
}
sum := hasher.Sum(nil)
return hex.EncodeToString(sum[:8])
}
func newSSHClientCacheKey(config connection.SSHConfig) sshClientCacheKey {
return sshClientCacheKey{
host: config.Host,
port: config.Port,
user: config.User,
auth: sshAuthFingerprint(config),
}
}
func formatSSHClientKeyForLog(key sshClientCacheKey) string {
return fmt.Sprintf("%s:%d 用户=%s", key.host, key.port, key.user)
}
// LocalForwarder represents a local port forwarder through SSH
type LocalForwarder struct {
LocalAddr string
@@ -298,13 +249,9 @@ func (f *LocalForwarder) IsClosed() bool {
// GetOrCreateLocalForwarder returns a cached forwarder or creates a new one
func GetOrCreateLocalForwarder(sshConfig connection.SSHConfig, remoteHost string, remotePort int) (*LocalForwarder, error) {
key := forwarderCacheKey{
ssh: newSSHClientCacheKey(sshConfig),
remoteHost: remoteHost,
remotePort: remotePort,
}
logKey := fmt.Sprintf("%s:%d:%s->%s:%d",
sshConfig.Host, sshConfig.Port, sshConfig.User, remoteHost, remotePort)
key := fmt.Sprintf("%s:%d:%s->%s:%d",
sshConfig.Host, sshConfig.Port, sshConfig.User,
remoteHost, remotePort)
forwarderMu.RLock()
forwarder, exists := localForwarders[key]
@@ -312,7 +259,7 @@ func GetOrCreateLocalForwarder(sshConfig connection.SSHConfig, remoteHost string
// Check if exists and is still valid
if exists && forwarder != nil && !forwarder.IsClosed() {
logger.Infof("复用已有端口转发:%s", logKey)
logger.Infof("复用已有端口转发:%s", key)
return forwarder, nil
}
@@ -340,18 +287,24 @@ func CloseAllForwarders() {
forwarderMu.Lock()
defer forwarderMu.Unlock()
for _, forwarder := range localForwarders {
for key, forwarder := range localForwarders {
if forwarder != nil {
_ = forwarder.Close()
logger.Infof("已关闭端口转发:本地 %s -> 远程 %s", forwarder.LocalAddr, forwarder.RemoteAddr)
logger.Infof("已关闭端口转发:%s", key)
}
}
localForwarders = make(map[forwarderCacheKey]*LocalForwarder)
localForwarders = make(map[string]*LocalForwarder)
}
// getSSHClientCacheKey generates a unique cache key for SSH config
func getSSHClientCacheKey(config connection.SSHConfig) string {
return fmt.Sprintf("%s:%d:%s", config.Host, config.Port, config.User)
}
// GetOrCreateSSHClient returns a cached SSH client or creates a new one
func GetOrCreateSSHClient(config connection.SSHConfig) (*ssh.Client, error) {
key := newSSHClientCacheKey(config)
key := getSSHClientCacheKey(config)
sshClientCacheMu.RLock()
client, exists := sshClientCache[key]
@@ -362,11 +315,11 @@ func GetOrCreateSSHClient(config connection.SSHConfig) (*ssh.Client, error) {
session, err := client.NewSession()
if err == nil {
session.Close()
logger.Infof("复用已有 SSH 连接:%s", formatSSHClientKeyForLog(key))
logger.Infof("复用已有 SSH 连接:%s", key)
return client, nil
}
// Connection is dead, remove from cache
logger.Warnf("SSH 连接已断开,重新建立:%s (错误: %v)", formatSSHClientKeyForLog(key), err)
logger.Warnf("SSH 连接已断开,重新建立:%s (错误: %v)", key, err)
sshClientCacheMu.Lock()
delete(sshClientCache, key)
sshClientCacheMu.Unlock()
@@ -385,7 +338,7 @@ func GetOrCreateSSHClient(config connection.SSHConfig) (*ssh.Client, error) {
sshClientCache[key] = client
sshClientCacheMu.Unlock()
logger.Infof("已缓存 SSH 连接:%s", formatSSHClientKeyForLog(key))
logger.Infof("已缓存 SSH 连接:%s", key)
return client, nil
}
@@ -414,8 +367,9 @@ func CloseAllSSHClients() {
for key, client := range sshClientCache {
if client != nil {
_ = client.Close()
logger.Infof("已关闭 SSH 连接:%s", formatSSHClientKeyForLog(key))
logger.Infof("已关闭 SSH 连接:%s", key)
}
}
sshClientCache = make(map[sshClientCacheKey]*ssh.Client)
sshClientCache = make(map[string]*ssh.Client)
}

View File

@@ -1,46 +0,0 @@
package ssh
import (
"testing"
"GoNavi-Wails/internal/connection"
)
func TestNewSSHClientCacheKey_DiffPassword(t *testing.T) {
a := newSSHClientCacheKey(connection.SSHConfig{
Host: "127.0.0.1",
Port: 22,
User: "root",
Password: "a",
})
b := newSSHClientCacheKey(connection.SSHConfig{
Host: "127.0.0.1",
Port: 22,
User: "root",
Password: "b",
})
if a == b {
t.Fatalf("expected different cache key when password differs")
}
if a.host != b.host || a.port != b.port || a.user != b.user {
t.Fatalf("expected host/port/user to stay identical")
}
}
func TestNewSSHClientCacheKey_DiffKeyPath(t *testing.T) {
a := newSSHClientCacheKey(connection.SSHConfig{
Host: "127.0.0.1",
Port: 22,
User: "root",
KeyPath: "/tmp/a.key",
})
b := newSSHClientCacheKey(connection.SSHConfig{
Host: "127.0.0.1",
Port: 22,
User: "root",
KeyPath: "/tmp/b.key",
})
if a == b {
t.Fatalf("expected different cache key when keyPath differs")
}
}

View File

@@ -1,27 +1,22 @@
package sync
import (
"GoNavi-Wails/internal/db"
"GoNavi-Wails/internal/logger"
"fmt"
"strings"
)
type TableDiffSummary struct {
Table string `json:"table"`
PKColumn string `json:"pkColumn,omitempty"`
CanSync bool `json:"canSync"`
Inserts int `json:"inserts"`
Updates int `json:"updates"`
Deletes int `json:"deletes"`
Same int `json:"same"`
Message string `json:"message,omitempty"`
HasSchema bool `json:"hasSchema,omitempty"`
TargetTableExists bool `json:"targetTableExists,omitempty"`
PlannedAction string `json:"plannedAction,omitempty"`
Warnings []string `json:"warnings,omitempty"`
UnsupportedObjects []string `json:"unsupportedObjects,omitempty"`
IndexesToCreate int `json:"indexesToCreate,omitempty"`
IndexesSkipped int `json:"indexesSkipped,omitempty"`
Table string `json:"table"`
PKColumn string `json:"pkColumn,omitempty"`
CanSync bool `json:"canSync"`
Inserts int `json:"inserts"`
Updates int `json:"updates"`
Deletes int `json:"deletes"`
Same int `json:"same"`
Message string `json:"message,omitempty"`
HasSchema bool `json:"hasSchema,omitempty"`
}
type SyncAnalyzeResult struct {
@@ -32,12 +27,6 @@ type SyncAnalyzeResult struct {
func (s *SyncEngine) Analyze(config SyncConfig) SyncAnalyzeResult {
result := SyncAnalyzeResult{Success: true, Tables: []TableDiffSummary{}}
if isRedisToMongoKeyspacePair(config) {
return s.analyzeRedisToMongo(config)
}
if isMongoToRedisKeyspacePair(config) {
return s.analyzeMongoToRedis(config)
}
contentRaw := strings.ToLower(strings.TrimSpace(config.Content))
syncSchema := false
@@ -59,23 +48,25 @@ func (s *SyncEngine) Analyze(config SyncConfig) SyncAnalyzeResult {
totalTables := len(config.Tables)
s.progress(config.JobID, 0, totalTables, "", "差异分析开始")
sourceDB, err := newSyncDatabase(config.SourceConfig.Type)
sourceDB, err := db.NewDatabase(config.SourceConfig.Type)
if err != nil {
logger.Error(err, "初始化源数据库驱动失败:类型=%s", config.SourceConfig.Type)
return SyncAnalyzeResult{Success: false, Message: "初始化源数据库驱动失败: " + err.Error()}
}
targetDB, err := newSyncDatabase(config.TargetConfig.Type)
targetDB, err := db.NewDatabase(config.TargetConfig.Type)
if err != nil {
logger.Error(err, "初始化目标数据库驱动失败:类型=%s", config.TargetConfig.Type)
return SyncAnalyzeResult{Success: false, Message: "初始化目标数据库驱动失败: " + err.Error()}
}
// Connect Source
if err := sourceDB.Connect(config.SourceConfig); err != nil {
logger.Error(err, "源数据库连接失败:%s", formatConnSummaryForSync(config.SourceConfig))
return SyncAnalyzeResult{Success: false, Message: "源数据库连接失败: " + err.Error()}
}
defer sourceDB.Close()
// Connect Target
if err := targetDB.Connect(config.TargetConfig); err != nil {
logger.Error(err, "目标数据库连接失败:%s", formatConnSummaryForSync(config.TargetConfig))
return SyncAnalyzeResult{Success: false, Message: "目标数据库连接失败: " + err.Error()}
@@ -97,76 +88,51 @@ func (s *SyncEngine) Analyze(config SyncConfig) SyncAnalyzeResult {
HasSchema: syncSchema,
}
plan, cols, _, err := buildSchemaMigrationPlan(config, tableName, sourceDB, targetDB)
if err != nil {
summary.Message = err.Error()
result.Tables = append(result.Tables, summary)
return
}
summary.TargetTableExists = plan.TargetTableExists
summary.PlannedAction = plan.PlannedAction
summary.Warnings = append(summary.Warnings, plan.Warnings...)
summary.UnsupportedObjects = append(summary.UnsupportedObjects, plan.UnsupportedObjects...)
summary.IndexesToCreate = plan.IndexesToCreate
summary.IndexesSkipped = plan.IndexesSkipped
sourceSchema, sourceTable := normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName)
targetSchema, targetTable := normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName)
sourceQueryTable := qualifiedNameForQuery(config.SourceConfig.Type, sourceSchema, sourceTable, tableName)
targetQueryTable := qualifiedNameForQuery(config.TargetConfig.Type, targetSchema, targetTable, tableName)
if !plan.TargetTableExists && !plan.AutoCreate {
summary.Message = firstNonEmpty(plan.PlannedAction, "目标表不存在,无法执行同步")
cols, err := sourceDB.GetColumns(sourceSchema, sourceTable)
if err != nil {
summary.Message = "获取源表字段失败: " + err.Error()
result.Tables = append(result.Tables, summary)
return
}
if !syncData {
summary.CanSync = true
summary.Message = firstNonEmpty(plan.PlannedAction, "仅同步结构,未执行数据差异分析")
summary.Message = "仅同步结构,未执行数据差异分析"
result.Tables = append(result.Tables, summary)
return
}
tableMode := normalizeSyncMode(config.Mode)
pkCols := make([]string, 0, 2)
for _, c := range cols {
if c.Key == "PRI" || c.Key == "PK" {
pkCols = append(pkCols, c.Name)
}
}
sourceRows, _, err := sourceDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.SourceConfig.Type, plan.SourceQueryTable)))
if err != nil {
summary.Message = "读取源表失败: " + err.Error()
result.Tables = append(result.Tables, summary)
return
}
if !plan.TargetTableExists && plan.AutoCreate {
summary.CanSync = true
summary.Inserts = len(sourceRows)
summary.Message = firstNonEmpty(plan.PlannedAction, "目标表不存在,执行时将自动建表并导入全部源数据")
result.Tables = append(result.Tables, summary)
return
}
if tableMode != "insert_update" {
summary.CanSync = true
summary.Inserts = len(sourceRows)
summary.Message = firstNonEmpty(plan.PlannedAction, "当前模式无需差异对比,将按源表数据执行导入")
result.Tables = append(result.Tables, summary)
return
}
if len(pkCols) == 0 {
summary.Message = "无主键,不支持差异对比同步;如需直接导入请使用仅插入或全量覆盖模式"
summary.Message = "无主键,不支持数据对比/同步"
result.Tables = append(result.Tables, summary)
return
}
if len(pkCols) > 1 {
summary.Message = fmt.Sprintf("复合主键(%s暂不支持差异对比同步", strings.Join(pkCols, ","))
summary.Message = fmt.Sprintf("复合主键(%s暂不支持数据对比/同步", strings.Join(pkCols, ","))
result.Tables = append(result.Tables, summary)
return
}
summary.PKColumn = pkCols[0]
targetRows, _, err := targetDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.TargetConfig.Type, plan.TargetQueryTable)))
// Query data for diff
sourceRows, _, err := sourceDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.SourceConfig.Type, sourceQueryTable)))
if err != nil {
summary.Message = "读取源表失败: " + err.Error()
result.Tables = append(result.Tables, summary)
return
}
targetRows, _, err := targetDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.TargetConfig.Type, targetQueryTable)))
if err != nil {
summary.Message = "读取目标表失败: " + err.Error()
result.Tables = append(result.Tables, summary)
@@ -222,9 +188,6 @@ func (s *SyncEngine) Analyze(config SyncConfig) SyncAnalyzeResult {
}
summary.CanSync = true
if strings.TrimSpace(summary.Message) == "" {
summary.Message = firstNonEmpty(plan.PlannedAction, "差异分析完成")
}
result.Tables = append(result.Tables, summary)
}()
}
@@ -233,12 +196,3 @@ func (s *SyncEngine) Analyze(config SyncConfig) SyncAnalyzeResult {
result.Message = fmt.Sprintf("已完成 %d 张表的差异分析", len(result.Tables))
return result
}
func firstNonEmpty(values ...string) string {
for _, value := range values {
if strings.TrimSpace(value) != "" {
return value
}
}
return ""
}

View File

@@ -1,741 +0,0 @@
package sync
import (
"GoNavi-Wails/internal/connection"
"GoNavi-Wails/internal/db"
"fmt"
"regexp"
"strings"
)
func buildMySQLToClickHousePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
plan := SchemaMigrationPlan{}
plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName)
plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName)
plan.SourceQueryTable = qualifiedNameForQuery(config.SourceConfig.Type, plan.SourceSchema, plan.SourceTable, tableName)
plan.TargetQueryTable = qualifiedNameForQuery(config.TargetConfig.Type, plan.TargetSchema, plan.TargetTable, tableName)
plan.PlannedAction = "使用已有目标表导入"
sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable)
if err != nil {
return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err)
}
if !sourceExists {
return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName)
}
targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable)
if err != nil {
return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err)
}
plan.TargetTableExists = targetExists
strategy := normalizeTargetTableStrategy(config.TargetTableStrategy)
if targetExists {
missing := diffMissingColumnNames(sourceCols, targetCols)
if len(missing) > 0 {
plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", ")))
}
if config.AutoAddColumns {
addSQL, addWarnings := buildMySQLToClickHouseAddColumnSQL(plan.TargetQueryTable, sourceCols, targetCols)
plan.PreDataSQL = append(plan.PreDataSQL, addSQL...)
plan.Warnings = append(plan.Warnings, addWarnings...)
if len(addSQL) > 0 {
plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL))
}
}
plan.Warnings = append(plan.Warnings, "ClickHouse 目标端建议优先使用仅插入或全量覆盖;更新/删除语义与传统关系型存在差异")
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
}
switch strategy {
case "existing_only":
plan.PlannedAction = "目标表不存在,需先手工创建"
plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表")
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
case "smart", "auto_create_if_missing":
plan.AutoCreate = true
plan.PlannedAction = "目标表不存在,将自动建表后导入"
createSQL, warnings, unsupported := buildMySQLToClickHouseCreateTableSQL(plan.TargetQueryTable, sourceCols)
plan.CreateTableSQL = createSQL
plan.Warnings = append(plan.Warnings, warnings...)
plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...)
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
default:
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
}
}
func buildPGLikeToClickHousePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
plan := SchemaMigrationPlan{}
sourceType := resolveMigrationDBType(config.SourceConfig)
targetType := resolveMigrationDBType(config.TargetConfig)
plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName)
plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName)
plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName)
plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName)
plan.PlannedAction = "使用已有目标表导入"
sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable)
if err != nil {
return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err)
}
if !sourceExists {
return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName)
}
targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable)
if err != nil {
return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err)
}
plan.TargetTableExists = targetExists
strategy := normalizeTargetTableStrategy(config.TargetTableStrategy)
if targetExists {
missing := diffMissingColumnNames(sourceCols, targetCols)
if len(missing) > 0 {
plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", ")))
}
if config.AutoAddColumns {
addSQL, addWarnings := buildPGLikeToClickHouseAddColumnSQL(plan.TargetQueryTable, sourceCols, targetCols)
plan.PreDataSQL = append(plan.PreDataSQL, addSQL...)
plan.Warnings = append(plan.Warnings, addWarnings...)
if len(addSQL) > 0 {
plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL))
}
}
plan.Warnings = append(plan.Warnings, "ClickHouse 目标端建议优先使用仅插入或全量覆盖;更新/删除语义与传统关系型存在差异")
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
}
switch strategy {
case "existing_only":
plan.PlannedAction = "目标表不存在,需先手工创建"
plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表")
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
case "smart", "auto_create_if_missing":
plan.AutoCreate = true
plan.PlannedAction = "目标表不存在,将自动建表后导入"
createSQL, warnings, unsupported := buildPGLikeToClickHouseCreateTableSQL(plan.TargetQueryTable, sourceCols)
plan.CreateTableSQL = createSQL
plan.Warnings = append(plan.Warnings, warnings...)
plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...)
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
default:
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
}
}
func buildClickHouseToMySQLPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
plan := SchemaMigrationPlan{}
plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName)
plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName)
plan.SourceQueryTable = qualifiedNameForQuery(config.SourceConfig.Type, plan.SourceSchema, plan.SourceTable, tableName)
plan.TargetQueryTable = qualifiedNameForQuery(config.TargetConfig.Type, plan.TargetSchema, plan.TargetTable, tableName)
plan.PlannedAction = "使用已有目标表导入"
sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable)
if err != nil {
return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err)
}
if !sourceExists {
return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName)
}
targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable)
if err != nil {
return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err)
}
plan.TargetTableExists = targetExists
strategy := normalizeTargetTableStrategy(config.TargetTableStrategy)
if targetExists {
missing := diffMissingColumnNames(sourceCols, targetCols)
if len(missing) > 0 {
plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", ")))
}
if config.AutoAddColumns {
addSQL, addWarnings := buildClickHouseToMySQLAddColumnSQL(plan.TargetQueryTable, sourceCols, targetCols)
plan.PreDataSQL = append(plan.PreDataSQL, addSQL...)
plan.Warnings = append(plan.Warnings, addWarnings...)
if len(addSQL) > 0 {
plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL))
}
}
plan.Warnings = append(plan.Warnings, "ClickHouse 源端索引/约束元数据有限,反向迁移将以字段和数据为主")
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
}
switch strategy {
case "existing_only":
plan.PlannedAction = "目标表不存在,需先手工创建"
plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表")
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
case "smart", "auto_create_if_missing":
plan.AutoCreate = true
plan.PlannedAction = "目标表不存在,将自动建表后导入"
createSQL, warnings := buildClickHouseToMySQLCreateTableSQL(plan.TargetQueryTable, sourceCols)
plan.CreateTableSQL = createSQL
plan.Warnings = append(plan.Warnings, warnings...)
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
default:
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
}
}
func buildClickHouseToPGLikePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
plan := SchemaMigrationPlan{}
sourceType := resolveMigrationDBType(config.SourceConfig)
targetType := resolveMigrationDBType(config.TargetConfig)
plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName)
plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName)
plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName)
plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName)
plan.PlannedAction = "使用已有目标表导入"
sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable)
if err != nil {
return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err)
}
if !sourceExists {
return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName)
}
targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable)
if err != nil {
return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err)
}
plan.TargetTableExists = targetExists
strategy := normalizeTargetTableStrategy(config.TargetTableStrategy)
if targetExists {
missing := diffMissingColumnNames(sourceCols, targetCols)
if len(missing) > 0 {
plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", ")))
}
if config.AutoAddColumns {
addSQL, addWarnings := buildClickHouseToPGLikeAddColumnSQL(targetType, plan.TargetQueryTable, sourceCols, targetCols)
plan.PreDataSQL = append(plan.PreDataSQL, addSQL...)
plan.Warnings = append(plan.Warnings, addWarnings...)
if len(addSQL) > 0 {
plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL))
}
}
plan.Warnings = append(plan.Warnings, "ClickHouse 源端索引/约束元数据有限,反向迁移将以字段和数据为主")
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
}
switch strategy {
case "existing_only":
plan.PlannedAction = "目标表不存在,需先手工创建"
plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表")
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
case "smart", "auto_create_if_missing":
plan.AutoCreate = true
plan.PlannedAction = "目标表不存在,将自动建表后导入"
createSQL, warnings, unsupported := buildClickHouseToPGLikeCreateTableSQL(targetType, plan.TargetQueryTable, sourceCols)
plan.CreateTableSQL = createSQL
plan.Warnings = append(plan.Warnings, warnings...)
plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...)
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
default:
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
}
}
func buildPGLikeToClickHouseAddColumnSQL(targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) {
targetSet := make(map[string]struct{}, len(targetCols))
for _, col := range targetCols {
key := strings.ToLower(strings.TrimSpace(col.Name))
if key == "" {
continue
}
targetSet[key] = struct{}{}
}
var sqlList []string
var warnings []string
for _, col := range sourceCols {
key := strings.ToLower(strings.TrimSpace(col.Name))
if key == "" {
continue
}
if _, ok := targetSet[key]; ok {
continue
}
colType, mapWarnings := mapPGLikeColumnToClickHouse(col)
warnings = append(warnings, mapWarnings...)
sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s",
quoteQualifiedIdentByType("clickhouse", targetQueryTable),
quoteIdentByType("clickhouse", col.Name),
colType,
))
}
return sqlList, dedupeStrings(warnings)
}
func buildMySQLToClickHouseAddColumnSQL(targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) {
targetSet := make(map[string]struct{}, len(targetCols))
for _, col := range targetCols {
key := strings.ToLower(strings.TrimSpace(col.Name))
if key == "" {
continue
}
targetSet[key] = struct{}{}
}
var sqlList []string
var warnings []string
for _, col := range sourceCols {
key := strings.ToLower(strings.TrimSpace(col.Name))
if key == "" {
continue
}
if _, ok := targetSet[key]; ok {
continue
}
colType, mapWarnings := mapMySQLColumnToClickHouse(col)
warnings = append(warnings, mapWarnings...)
sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s",
quoteQualifiedIdentByType("clickhouse", targetQueryTable),
quoteIdentByType("clickhouse", col.Name),
colType,
))
}
return sqlList, dedupeStrings(warnings)
}
func buildClickHouseToPGLikeAddColumnSQL(targetType string, targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) {
targetSet := make(map[string]struct{}, len(targetCols))
for _, col := range targetCols {
key := strings.ToLower(strings.TrimSpace(col.Name))
if key == "" {
continue
}
targetSet[key] = struct{}{}
}
var sqlList []string
var warnings []string
for _, col := range sourceCols {
key := strings.ToLower(strings.TrimSpace(col.Name))
if key == "" {
continue
}
if _, ok := targetSet[key]; ok {
continue
}
colType, mapWarnings := mapClickHouseColumnToPGLike(col)
warnings = append(warnings, mapWarnings...)
sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL",
quoteQualifiedIdentByType(targetType, targetQueryTable),
quoteIdentByType(targetType, col.Name),
colType,
))
}
return sqlList, dedupeStrings(warnings)
}
func buildClickHouseToMySQLAddColumnSQL(targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) {
targetSet := make(map[string]struct{}, len(targetCols))
for _, col := range targetCols {
key := strings.ToLower(strings.TrimSpace(col.Name))
if key == "" {
continue
}
targetSet[key] = struct{}{}
}
var sqlList []string
var warnings []string
for _, col := range sourceCols {
key := strings.ToLower(strings.TrimSpace(col.Name))
if key == "" {
continue
}
if _, ok := targetSet[key]; ok {
continue
}
colType, mapWarnings := mapClickHouseColumnToMySQL(col)
warnings = append(warnings, mapWarnings...)
sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL",
quoteQualifiedIdentByType("mysql", targetQueryTable),
quoteIdentByType("mysql", col.Name),
colType,
))
}
return sqlList, dedupeStrings(warnings)
}
func buildPGLikeToClickHouseCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition) (string, []string, []string) {
columnDefs := make([]string, 0, len(sourceCols))
warnings := make([]string, 0)
unsupported := make([]string, 0)
orderByCols := make([]string, 0)
for _, col := range sourceCols {
def, colWarnings := buildPGLikeToClickHouseColumnDefinition(col)
warnings = append(warnings, colWarnings...)
columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("clickhouse", col.Name), def))
if col.Key == "PRI" || col.Key == "PK" {
orderByCols = append(orderByCols, quoteIdentByType("clickhouse", col.Name))
}
}
orderExpr := "tuple()"
if len(orderByCols) > 0 {
orderExpr = "(" + strings.Join(orderByCols, ", ") + ")"
} else {
warnings = append(warnings, "源表未识别到主键ClickHouse 将使用 ORDER BY tuple() 建表,后续查询性能可能受影响")
}
warnings = append(warnings, "ClickHouse 不保留关系型外键/唯一约束语义,将仅迁移字段与数据")
createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n) ENGINE = MergeTree() ORDER BY %s", quoteQualifiedIdentByType("clickhouse", targetQueryTable), strings.Join(columnDefs, ",\n "), orderExpr)
return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported)
}
func buildMySQLToClickHouseCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition) (string, []string, []string) {
columnDefs := make([]string, 0, len(sourceCols))
warnings := make([]string, 0)
unsupported := make([]string, 0)
orderByCols := make([]string, 0)
for _, col := range sourceCols {
def, colWarnings := buildMySQLToClickHouseColumnDefinition(col)
warnings = append(warnings, colWarnings...)
columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("clickhouse", col.Name), def))
if col.Key == "PRI" || col.Key == "PK" {
orderByCols = append(orderByCols, quoteIdentByType("clickhouse", col.Name))
}
}
orderExpr := "tuple()"
if len(orderByCols) > 0 {
orderExpr = "(" + strings.Join(orderByCols, ", ") + ")"
} else {
warnings = append(warnings, "源表未识别到主键ClickHouse 将使用 ORDER BY tuple() 建表,后续查询性能可能受影响")
}
warnings = append(warnings, "ClickHouse 不保留关系型外键/唯一约束语义,将仅迁移字段与数据")
createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n) ENGINE = MergeTree() ORDER BY %s", quoteQualifiedIdentByType("clickhouse", targetQueryTable), strings.Join(columnDefs, ",\n "), orderExpr)
return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported)
}
func buildClickHouseToPGLikeCreateTableSQL(targetType string, targetQueryTable string, sourceCols []connection.ColumnDefinition) (string, []string, []string) {
columnDefs := make([]string, 0, len(sourceCols)+1)
warnings := make([]string, 0)
unsupported := []string{"ClickHouse ORDER BY/PARTITION/TTL/Projection/物化视图 语义当前不会自动迁移到 PG-like"}
pkCols := make([]string, 0)
for _, col := range sourceCols {
def, colWarnings := buildClickHouseToPGLikeColumnDefinition(col)
warnings = append(warnings, colWarnings...)
columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType(targetType, col.Name), def))
if col.Key == "PRI" || col.Key == "PK" {
pkCols = append(pkCols, quoteIdentByType(targetType, col.Name))
}
}
if len(pkCols) > 0 {
columnDefs = append(columnDefs, fmt.Sprintf("PRIMARY KEY (%s)", strings.Join(pkCols, ", ")))
} else {
warnings = append(warnings, "ClickHouse 源端未返回主键信息,目标 PG-like 表将不自动创建主键")
}
createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType(targetType, targetQueryTable), strings.Join(columnDefs, ",\n "))
return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported)
}
func buildClickHouseToMySQLCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition) (string, []string) {
columnDefs := make([]string, 0, len(sourceCols)+1)
warnings := make([]string, 0)
pkCols := make([]string, 0)
for _, col := range sourceCols {
def, colWarnings := buildClickHouseToMySQLColumnDefinition(col)
warnings = append(warnings, colWarnings...)
columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("mysql", col.Name), def))
if col.Key == "PRI" || col.Key == "PK" {
pkCols = append(pkCols, quoteIdentByType("mysql", col.Name))
}
}
if len(pkCols) > 0 {
columnDefs = append(columnDefs, fmt.Sprintf("PRIMARY KEY (%s)", strings.Join(pkCols, ", ")))
} else {
warnings = append(warnings, "ClickHouse 源端未返回主键信息,目标 MySQL 表将不自动创建主键")
}
createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("mysql", targetQueryTable), strings.Join(columnDefs, ",\n "))
return createSQL, dedupeStrings(warnings)
}
func buildPGLikeToClickHouseColumnDefinition(col connection.ColumnDefinition) (string, []string) {
targetType, warnings := mapPGLikeColumnToClickHouse(col)
parts := []string{targetType}
return strings.Join(parts, " "), dedupeStrings(warnings)
}
func buildMySQLToClickHouseColumnDefinition(col connection.ColumnDefinition) (string, []string) {
targetType, warnings := mapMySQLColumnToClickHouse(col)
parts := []string{targetType}
if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") && !strings.HasPrefix(strings.ToLower(targetType), "nullable(") {
return strings.Join(parts, " "), dedupeStrings(warnings)
}
return strings.Join(parts, " "), dedupeStrings(warnings)
}
func buildClickHouseToPGLikeColumnDefinition(col connection.ColumnDefinition) (string, []string) {
targetType, warnings := mapClickHouseColumnToPGLike(col)
parts := []string{targetType}
if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") {
parts = append(parts, "NOT NULL")
}
return strings.Join(parts, " "), dedupeStrings(warnings)
}
func buildClickHouseToMySQLColumnDefinition(col connection.ColumnDefinition) (string, []string) {
targetType, warnings := mapClickHouseColumnToMySQL(col)
parts := []string{targetType}
if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") {
parts = append(parts, "NOT NULL")
}
return strings.Join(parts, " "), dedupeStrings(warnings)
}
func mapPGLikeColumnToClickHouse(col connection.ColumnDefinition) (string, []string) {
raw := strings.ToLower(strings.TrimSpace(col.Type))
warnings := make([]string, 0)
if raw == "" {
return "String", []string{fmt.Sprintf("字段 %s 类型为空,已降级为 String", col.Name)}
}
baseType := "String"
switch {
case raw == "boolean" || strings.HasPrefix(raw, "bool"):
baseType = "UInt8"
case raw == "smallint":
baseType = "Int16"
case raw == "integer" || raw == "int4":
baseType = "Int32"
case raw == "bigint" || raw == "int8":
baseType = "Int64"
case strings.HasPrefix(raw, "numeric"), strings.HasPrefix(raw, "decimal"):
baseType = replaceTypeBase(raw, []string{"numeric", "decimal"}, "Decimal")
case raw == "real" || raw == "float4":
baseType = "Float32"
case raw == "double precision" || raw == "float8":
baseType = "Float64"
case raw == "date":
baseType = "Date"
case strings.HasPrefix(raw, "timestamp") || strings.Contains(raw, "without time zone") || strings.Contains(raw, "with time zone"):
baseType = "DateTime"
case strings.HasPrefix(raw, "time"):
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 String", col.Name, col.Type))
baseType = "String"
case strings.HasPrefix(raw, "character varying"), strings.HasPrefix(raw, "varchar("), strings.HasPrefix(raw, "character("), strings.HasPrefix(raw, "char("), raw == "character", raw == "text", raw == "uuid":
baseType = "String"
case raw == "json" || raw == "jsonb" || raw == "bytea":
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 String", col.Name, col.Type))
baseType = "String"
case strings.HasSuffix(raw, "[]") || strings.HasPrefix(raw, "array"):
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 String", col.Name, col.Type))
baseType = "String"
case raw == "user-defined":
warnings = append(warnings, fmt.Sprintf("字段 %s 为用户自定义类型,已降级为 String", col.Name))
baseType = "String"
default:
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门映射,已降级为 String", col.Name, col.Type))
baseType = "String"
}
if strings.EqualFold(strings.TrimSpace(col.Nullable), "YES") && !strings.HasPrefix(strings.ToLower(baseType), "nullable(") {
baseType = fmt.Sprintf("Nullable(%s)", baseType)
}
if strings.Contains(strings.ToLower(strings.TrimSpace(col.Extra)), "identity") || strings.Contains(strings.ToLower(strings.TrimSpace(col.Extra)), "auto_increment") {
warnings = append(warnings, fmt.Sprintf("字段 %s 的 identity/自增语义在 ClickHouse 中不保留", col.Name))
}
return baseType, dedupeStrings(warnings)
}
func mapMySQLColumnToClickHouse(col connection.ColumnDefinition) (string, []string) {
raw := strings.ToLower(strings.TrimSpace(col.Type))
warnings := make([]string, 0)
if raw == "" {
return "String", []string{fmt.Sprintf("字段 %s 类型为空,已降级为 String", col.Name)}
}
unsigned := strings.Contains(raw, "unsigned")
clean := strings.ReplaceAll(raw, " unsigned", "")
clean = strings.ReplaceAll(clean, " zerofill", "")
baseType := "String"
switch {
case strings.HasPrefix(clean, "tinyint(1)"):
baseType = "UInt8"
case strings.HasPrefix(clean, "tinyint"):
if unsigned {
baseType = "UInt8"
} else {
baseType = "Int8"
}
case strings.HasPrefix(clean, "smallint"):
if unsigned {
baseType = "UInt16"
} else {
baseType = "Int16"
}
case strings.HasPrefix(clean, "mediumint"), strings.HasPrefix(clean, "int"), strings.HasPrefix(clean, "integer"):
if unsigned {
baseType = "UInt32"
} else {
baseType = "Int32"
}
case strings.HasPrefix(clean, "bigint"):
if unsigned {
baseType = "UInt64"
} else {
baseType = "Int64"
}
case strings.HasPrefix(clean, "decimal"), strings.HasPrefix(clean, "numeric"):
baseType = replaceTypeBase(strings.Title(clean), []string{"Decimal", "Numeric"}, "Decimal")
case strings.HasPrefix(clean, "float"):
baseType = "Float32"
case strings.HasPrefix(clean, "double"):
baseType = "Float64"
case strings.HasPrefix(clean, "date"):
baseType = "Date"
case strings.HasPrefix(clean, "datetime"), strings.HasPrefix(clean, "timestamp"):
baseType = "DateTime"
case strings.HasPrefix(clean, "time"):
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 time 已降级为 String", col.Name))
baseType = "String"
case strings.HasPrefix(clean, "json"), strings.HasPrefix(clean, "enum"), strings.HasPrefix(clean, "set"), strings.HasPrefix(clean, "char"), strings.HasPrefix(clean, "varchar"), strings.Contains(clean, "text"):
baseType = "String"
case strings.Contains(clean, "blob"), strings.Contains(clean, "binary"):
warnings = append(warnings, fmt.Sprintf("字段 %s 二进制类型已降级为 String", col.Name))
baseType = "String"
default:
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门映射,已降级为 String", col.Name, col.Type))
baseType = "String"
}
if strings.EqualFold(strings.TrimSpace(col.Nullable), "YES") && !strings.HasPrefix(strings.ToLower(baseType), "nullable(") {
baseType = fmt.Sprintf("Nullable(%s)", baseType)
}
if strings.Contains(strings.ToLower(strings.TrimSpace(col.Extra)), "auto_increment") {
warnings = append(warnings, fmt.Sprintf("字段 %s 的 AUTO_INCREMENT 在 ClickHouse 中不保留自增语义", col.Name))
}
return baseType, dedupeStrings(warnings)
}
var clickHouseDecimalPattern = regexp.MustCompile(`^(decimal|numeric)\((\d+)\s*,\s*(\d+)\)$`)
var clickHouseStringArgsPattern = regexp.MustCompile(`^fixedstring\((\d+)\)$`)
func mapClickHouseColumnToPGLike(col connection.ColumnDefinition) (string, []string) {
raw := strings.TrimSpace(col.Type)
lower := strings.ToLower(raw)
warnings := make([]string, 0)
if strings.HasPrefix(lower, "nullable(") && strings.HasSuffix(lower, ")") {
raw = strings.TrimSpace(raw[len("Nullable(") : len(raw)-1])
lower = strings.ToLower(raw)
}
for {
if strings.HasPrefix(lower, "lowcardinality(") && strings.HasSuffix(lower, ")") {
raw = strings.TrimSpace(raw[len("LowCardinality(") : len(raw)-1])
lower = strings.ToLower(raw)
continue
}
break
}
switch {
case lower == "bool" || lower == "boolean":
return "boolean", warnings
case lower == "int8":
return "smallint", warnings
case lower == "uint8":
return "smallint", warnings
case lower == "int16":
return "smallint", warnings
case lower == "uint16":
return "integer", warnings
case lower == "int32":
return "integer", warnings
case lower == "uint32":
return "bigint", warnings
case lower == "int64":
return "bigint", warnings
case lower == "uint64":
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已映射为 numeric(20,0) 以避免无符号溢出", col.Name, col.Type))
return "numeric(20,0)", warnings
case lower == "float32":
return "real", warnings
case lower == "float64":
return "double precision", warnings
case lower == "date":
return "date", warnings
case strings.HasPrefix(lower, "datetime"):
return "timestamp", warnings
case lower == "string":
return "text", warnings
case lower == "uuid":
return "uuid", warnings
case lower == "json", strings.HasPrefix(lower, "map("), strings.HasPrefix(lower, "array("), strings.HasPrefix(lower, "tuple("), strings.HasPrefix(lower, "nested("):
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 jsonb", col.Name, col.Type))
return "jsonb", warnings
case strings.HasPrefix(lower, "enum8("), strings.HasPrefix(lower, "enum16("):
warnings = append(warnings, fmt.Sprintf("字段 %s 枚举类型 %s 已降级为 varchar(255)", col.Name, col.Type))
return "varchar(255)", warnings
case clickHouseDecimalPattern.MatchString(lower):
parts := clickHouseDecimalPattern.FindStringSubmatch(lower)
return fmt.Sprintf("numeric(%s,%s)", parts[2], parts[3]), warnings
case clickHouseStringArgsPattern.MatchString(lower):
parts := clickHouseStringArgsPattern.FindStringSubmatch(lower)
return fmt.Sprintf("varchar(%s)", parts[1]), warnings
default:
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门 PG-like 映射,已降级为 text", col.Name, col.Type))
return "text", warnings
}
}
func mapClickHouseColumnToMySQL(col connection.ColumnDefinition) (string, []string) {
raw := strings.TrimSpace(col.Type)
lower := strings.ToLower(raw)
warnings := make([]string, 0)
nullable := false
if strings.HasPrefix(lower, "nullable(") && strings.HasSuffix(lower, ")") {
nullable = true
raw = strings.TrimSpace(raw[len("Nullable(") : len(raw)-1])
lower = strings.ToLower(raw)
}
for {
if strings.HasPrefix(lower, "lowcardinality(") && strings.HasSuffix(lower, ")") {
raw = strings.TrimSpace(raw[len("LowCardinality(") : len(raw)-1])
lower = strings.ToLower(raw)
continue
}
break
}
_ = nullable
switch {
case lower == "bool" || lower == "boolean" || lower == "uint8":
return "tinyint(1)", warnings
case lower == "int8":
return "tinyint", warnings
case lower == "uint16":
return "smallint unsigned", warnings
case lower == "int16":
return "smallint", warnings
case lower == "uint32":
return "int unsigned", warnings
case lower == "int32":
return "int", warnings
case lower == "uint64":
return "bigint unsigned", warnings
case lower == "int64":
return "bigint", warnings
case lower == "float32":
return "float", warnings
case lower == "float64":
return "double", warnings
case lower == "date":
return "date", warnings
case strings.HasPrefix(lower, "datetime"):
return "datetime", warnings
case lower == "string":
return "text", warnings
case lower == "uuid":
return "char(36)", warnings
case lower == "json", strings.HasPrefix(lower, "map("), strings.HasPrefix(lower, "array("), strings.HasPrefix(lower, "tuple("), strings.HasPrefix(lower, "nested("):
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 json", col.Name, col.Type))
return "json", warnings
case clickHouseDecimalPattern.MatchString(lower):
parts := clickHouseDecimalPattern.FindStringSubmatch(lower)
return fmt.Sprintf("decimal(%s,%s)", parts[2], parts[3]), warnings
case clickHouseStringArgsPattern.MatchString(lower):
parts := clickHouseStringArgsPattern.FindStringSubmatch(lower)
return fmt.Sprintf("varchar(%s)", parts[1]), warnings
default:
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门映射,已降级为 text", col.Name, col.Type))
return "text", warnings
}
}

View File

@@ -1,379 +0,0 @@
package sync
import (
"GoNavi-Wails/internal/connection"
"GoNavi-Wails/internal/db"
"fmt"
"strings"
)
type genericLegacyPlanner struct{}
type mysqlToPGLikePlanner struct{}
type mysqlToClickHousePlanner struct{}
type pgLikeToClickHousePlanner struct{}
type clickHouseToMySQLPlanner struct{}
type clickHouseToPGLikePlanner struct{}
type mysqlToMongoPlanner struct{}
type pgLikeToMongoPlanner struct{}
type clickHouseToMongoPlanner struct{}
type tdengineToMongoPlanner struct{}
type mongoToMySQLPlanner struct{}
type mongoToPGLikePlanner struct{}
type pgLikeToMySQLPlanner struct{}
type tdengineToMySQLPlanner struct{}
type tdengineToPGLikePlanner struct{}
type mongoToRelationalPlanner struct{}
func buildSchemaMigrationPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
ctx := MigrationBuildContext{
Config: config,
TableName: tableName,
SourceDB: sourceDB,
TargetDB: targetDB,
}
planner := resolveMigrationPlanner(ctx)
if planner == nil {
return buildSchemaMigrationPlanLegacy(config, tableName, sourceDB, targetDB)
}
return planner.BuildPlan(ctx)
}
func resolveMigrationPlanner(ctx MigrationBuildContext) MigrationPlanner {
planners := []MigrationPlanner{
mysqlToPGLikePlanner{},
mySQLLikeToTDenginePlanner{},
pgLikeToTDenginePlanner{},
clickHouseToTDenginePlanner{},
tdengineToTDenginePlanner{},
tdengineToPGLikePlanner{},
tdengineToMySQLPlanner{},
mysqlToClickHousePlanner{},
pgLikeToClickHousePlanner{},
clickHouseToMySQLPlanner{},
clickHouseToPGLikePlanner{},
mysqlToMongoPlanner{},
pgLikeToMongoPlanner{},
clickHouseToMongoPlanner{},
tdengineToMongoPlanner{},
mongoToMySQLPlanner{},
mongoToPGLikePlanner{},
pgLikeToMySQLPlanner{},
mongoToRelationalPlanner{},
genericLegacyPlanner{},
}
bestLevel := MigrationSupportLevelUnsupported
var bestPlanner MigrationPlanner
for _, planner := range planners {
level := planner.SupportLevel(ctx)
if migrationSupportRank(level) > migrationSupportRank(bestLevel) {
bestLevel = level
bestPlanner = planner
}
}
return bestPlanner
}
func migrationSupportRank(level MigrationSupportLevel) int {
switch level {
case MigrationSupportLevelFull:
return 4
case MigrationSupportLevelPlanned:
return 3
case MigrationSupportLevelPartial:
return 2
default:
return 1
}
}
func isMySQLLikeType(dbType string) bool {
return isMySQLLikeWritableTargetType(dbType)
}
func classifyMigrationDataModel(dbType string) MigrationDataModel {
switch normalizeMigrationDBType(dbType) {
case "mysql", "mariadb", "postgres", "kingbase", "highgo", "vastbase", "oracle", "sqlserver", "dameng", "sqlite", "duckdb":
return MigrationDataModelRelational
case "mongodb":
return MigrationDataModelDocument
case "clickhouse", "diros", "sphinx":
return MigrationDataModelColumnar
case "tdengine":
return MigrationDataModelTimeSeries
case "redis":
return MigrationDataModelKeyValue
default:
return MigrationDataModelCustom
}
}
func (genericLegacyPlanner) Name() string { return "generic-legacy-planner" }
func (genericLegacyPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
_ = ctx
return MigrationSupportLevelPartial
}
func (genericLegacyPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
return buildSchemaMigrationPlanLegacy(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
}
func (mysqlToPGLikePlanner) Name() string { return "mysql-pglike-planner" }
func (mysqlToPGLikePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
if isMySQLLikeSourceType(sourceType) && isPGLikeTarget(targetType) {
return MigrationSupportLevelFull
}
return MigrationSupportLevelUnsupported
}
func (mysqlToPGLikePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
return buildMySQLToPGLikePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
}
func (tdengineToMySQLPlanner) Name() string { return "tdengine-mysql-planner" }
func (tdengineToMySQLPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
if sourceType == "tdengine" && isMySQLLikeWritableTargetType(targetType) {
return MigrationSupportLevelFull
}
return MigrationSupportLevelUnsupported
}
func (tdengineToMySQLPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
return buildTDengineToMySQLPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
}
func (tdengineToPGLikePlanner) Name() string { return "tdengine-pglike-planner" }
func (tdengineToPGLikePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
if sourceType == "tdengine" && isPGLikeTarget(targetType) {
return MigrationSupportLevelFull
}
return MigrationSupportLevelUnsupported
}
func (tdengineToPGLikePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
return buildTDengineToPGLikePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
}
func (mysqlToClickHousePlanner) Name() string { return "mysql-clickhouse-planner" }
func (mysqlToClickHousePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
if isMySQLCoreType(sourceType) && targetType == "clickhouse" {
return MigrationSupportLevelFull
}
return MigrationSupportLevelUnsupported
}
func (mysqlToClickHousePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
return buildMySQLToClickHousePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
}
func (pgLikeToClickHousePlanner) Name() string { return "pglike-clickhouse-planner" }
func (pgLikeToClickHousePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
if isPGLikeSource(sourceType) && targetType == "clickhouse" {
return MigrationSupportLevelFull
}
return MigrationSupportLevelUnsupported
}
func (pgLikeToClickHousePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
return buildPGLikeToClickHousePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
}
func (clickHouseToMySQLPlanner) Name() string { return "clickhouse-mysql-planner" }
func (clickHouseToMySQLPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
if sourceType == "clickhouse" && isMySQLLikeWritableTargetType(targetType) {
return MigrationSupportLevelFull
}
return MigrationSupportLevelUnsupported
}
func (clickHouseToMySQLPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
return buildClickHouseToMySQLPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
}
func (clickHouseToPGLikePlanner) Name() string { return "clickhouse-pglike-planner" }
func (clickHouseToPGLikePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
if sourceType == "clickhouse" && isPGLikeTarget(targetType) {
return MigrationSupportLevelFull
}
return MigrationSupportLevelUnsupported
}
func (clickHouseToPGLikePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
return buildClickHouseToPGLikePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
}
func (mysqlToMongoPlanner) Name() string { return "mysql-mongo-planner" }
func (mysqlToMongoPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
if isMySQLCoreType(sourceType) && targetType == "mongodb" {
return MigrationSupportLevelFull
}
return MigrationSupportLevelUnsupported
}
func (mysqlToMongoPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
return buildMySQLToMongoPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
}
func (pgLikeToMongoPlanner) Name() string { return "pglike-mongo-planner" }
func (pgLikeToMongoPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
if isPGLikeSource(sourceType) && targetType == "mongodb" {
return MigrationSupportLevelFull
}
return MigrationSupportLevelUnsupported
}
func (pgLikeToMongoPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
return buildPGLikeToMongoPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
}
func (clickHouseToMongoPlanner) Name() string { return "clickhouse-mongo-planner" }
func (clickHouseToMongoPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
if sourceType == "clickhouse" && targetType == "mongodb" {
return MigrationSupportLevelFull
}
return MigrationSupportLevelUnsupported
}
func (clickHouseToMongoPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
return buildClickHouseToMongoPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
}
func (tdengineToMongoPlanner) Name() string { return "tdengine-mongo-planner" }
func (tdengineToMongoPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
if sourceType == "tdengine" && targetType == "mongodb" {
return MigrationSupportLevelFull
}
return MigrationSupportLevelUnsupported
}
func (tdengineToMongoPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
return buildTDengineToMongoPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
}
func (mongoToMySQLPlanner) Name() string { return "mongo-mysql-planner" }
func (mongoToMySQLPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
if sourceType == "mongodb" && isMySQLLikeWritableTargetType(targetType) {
return MigrationSupportLevelFull
}
return MigrationSupportLevelUnsupported
}
func (mongoToMySQLPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
return buildMongoToMySQLPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
}
func (mongoToPGLikePlanner) Name() string { return "mongo-pglike-planner" }
func (mongoToPGLikePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
if sourceType == "mongodb" && isPGLikeTarget(targetType) {
return MigrationSupportLevelFull
}
return MigrationSupportLevelUnsupported
}
func (mongoToPGLikePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
return buildMongoToPGLikePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
}
func (pgLikeToMySQLPlanner) Name() string { return "pglike-mysql-planner" }
func (pgLikeToMySQLPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
if isPGLikeSource(sourceType) && isMySQLLikeWritableTargetType(targetType) {
return MigrationSupportLevelFull
}
return MigrationSupportLevelUnsupported
}
func (pgLikeToMySQLPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
return buildPGLikeToMySQLPlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
}
func (mongoToRelationalPlanner) Name() string { return "mongo-relational-inference-planner" }
func (mongoToRelationalPlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
if !shouldUseSchemaInference(sourceType, targetType) {
return MigrationSupportLevelUnsupported
}
return MigrationSupportLevelPlanned
}
func (mongoToRelationalPlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
inference, err := inferSchemaForPair(sourceType, targetType, ctx.TableName)
if err != nil {
return SchemaMigrationPlan{}, nil, nil, err
}
plan := SchemaMigrationPlan{}
plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, ctx.Config.SourceConfig.Database, ctx.TableName)
plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, ctx.Config.TargetConfig.Database, ctx.TableName)
plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, ctx.TableName)
plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, ctx.TableName)
plan.PlannedAction = "当前库对已进入迁移内核规划阶段,等待 schema 推断与目标方言生成器落地"
for _, issue := range inference.Issues {
msg := strings.TrimSpace(issue.Message)
if msg == "" {
continue
}
plan.Warnings = append(plan.Warnings, msg)
}
plan.Warnings = append(plan.Warnings, fmt.Sprintf("迁移对象=%s目标类型=%s当前仅提供规划入口暂不执行自动建表", inference.Object.Kind, targetType))
return dedupeSchemaMigrationPlan(plan), nil, nil, nil
}

View File

@@ -1,447 +0,0 @@
package sync
import (
"GoNavi-Wails/internal/connection"
"strings"
"testing"
)
func TestClassifyMigrationDataModel(t *testing.T) {
t.Parallel()
cases := map[string]MigrationDataModel{
"mysql": MigrationDataModelRelational,
"postgres": MigrationDataModelRelational,
"kingbase": MigrationDataModelRelational,
"mongodb": MigrationDataModelDocument,
"clickhouse": MigrationDataModelColumnar,
"tdengine": MigrationDataModelTimeSeries,
"redis": MigrationDataModelKeyValue,
"custom": MigrationDataModelCustom,
}
for input, want := range cases {
input, want := input, want
t.Run(input, func(t *testing.T) {
t.Parallel()
got := classifyMigrationDataModel(input)
if got != want {
t.Fatalf("unexpected data model, input=%s got=%s want=%s", input, got, want)
}
})
}
}
func TestResolveMigrationPlanner_PrefersMySQLKingbasePlanner(t *testing.T) {
t.Parallel()
planner := resolveMigrationPlanner(MigrationBuildContext{
Config: SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "mysql"},
TargetConfig: connection.ConnectionConfig{Type: "kingbase"},
},
})
if planner == nil {
t.Fatalf("expected planner")
}
if planner.Name() != "mysql-pglike-planner" {
t.Fatalf("unexpected planner: %s", planner.Name())
}
}
func TestResolveMigrationPlanner_UsesSchemaInferencePlannerForMongoToMySQL(t *testing.T) {
t.Parallel()
planner := resolveMigrationPlanner(MigrationBuildContext{
Config: SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "mongodb"},
TargetConfig: connection.ConnectionConfig{Type: "mysql"},
},
})
if planner == nil {
t.Fatalf("expected planner")
}
if planner.Name() != "mongo-mysql-planner" {
t.Fatalf("unexpected planner: %s", planner.Name())
}
}
func TestInferSchemaForPair_MongoToMySQLReturnsPlannedWarning(t *testing.T) {
t.Parallel()
result, err := inferSchemaForPair("mongodb", "mysql", "users")
if err != nil {
t.Fatalf("inferSchemaForPair returned error: %v", err)
}
if !result.NeedsReview {
t.Fatalf("expected needs review")
}
if result.Object.Name != "users" {
t.Fatalf("unexpected object name: %s", result.Object.Name)
}
if len(result.Issues) == 0 || !strings.Contains(result.Issues[0].Message, "schema 推断") {
t.Fatalf("unexpected issues: %+v", result.Issues)
}
}
func TestResolveMigrationPlanner_UsesPGLikeMySQLPlannerForKingbaseToMySQL(t *testing.T) {
t.Parallel()
planner := resolveMigrationPlanner(MigrationBuildContext{
Config: SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "kingbase"},
TargetConfig: connection.ConnectionConfig{Type: "mysql"},
},
})
if planner == nil {
t.Fatalf("expected planner")
}
if planner.Name() != "pglike-mysql-planner" {
t.Fatalf("unexpected planner: %s", planner.Name())
}
}
func TestResolveMigrationPlanner_UsesMySQLPGLikePlannerForMySQLToPostgres(t *testing.T) {
t.Parallel()
planner := resolveMigrationPlanner(MigrationBuildContext{
Config: SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "mysql"},
TargetConfig: connection.ConnectionConfig{Type: "postgres"},
},
})
if planner == nil {
t.Fatalf("expected planner")
}
if planner.Name() != "mysql-pglike-planner" {
t.Fatalf("unexpected planner: %s", planner.Name())
}
}
func TestResolveMigrationPlanner_UsesMySQLClickHousePlanner(t *testing.T) {
t.Parallel()
planner := resolveMigrationPlanner(MigrationBuildContext{
Config: SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "mysql"},
TargetConfig: connection.ConnectionConfig{Type: "clickhouse"},
},
})
if planner == nil {
t.Fatalf("expected planner")
}
if planner.Name() != "mysql-clickhouse-planner" {
t.Fatalf("unexpected planner: %s", planner.Name())
}
}
func TestResolveMigrationPlanner_UsesClickHouseMySQLPlanner(t *testing.T) {
t.Parallel()
planner := resolveMigrationPlanner(MigrationBuildContext{
Config: SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "clickhouse"},
TargetConfig: connection.ConnectionConfig{Type: "mysql"},
},
})
if planner == nil {
t.Fatalf("expected planner")
}
if planner.Name() != "clickhouse-mysql-planner" {
t.Fatalf("unexpected planner: %s", planner.Name())
}
}
func TestResolveMigrationPlanner_UsesMySQLMongoPlanner(t *testing.T) {
t.Parallel()
planner := resolveMigrationPlanner(MigrationBuildContext{
Config: SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "mysql"},
TargetConfig: connection.ConnectionConfig{Type: "mongodb"},
},
})
if planner == nil || planner.Name() != "mysql-mongo-planner" {
t.Fatalf("unexpected planner: %v", planner)
}
}
func TestResolveMigrationPlanner_UsesMongoMySQLPlanner(t *testing.T) {
t.Parallel()
planner := resolveMigrationPlanner(MigrationBuildContext{
Config: SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "mongodb"},
TargetConfig: connection.ConnectionConfig{Type: "mysql"},
},
})
if planner == nil || planner.Name() != "mongo-mysql-planner" {
t.Fatalf("unexpected planner: %v", planner)
}
}
func TestResolveMigrationPlanner_UsesMongoPGLikePlanner(t *testing.T) {
t.Parallel()
planner := resolveMigrationPlanner(MigrationBuildContext{
Config: SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "mongodb"},
TargetConfig: connection.ConnectionConfig{Type: "postgres"},
},
})
if planner == nil || planner.Name() != "mongo-pglike-planner" {
t.Fatalf("unexpected planner: %v", planner)
}
}
func TestResolveMigrationPlanner_UsesPGLikeMongoPlanner(t *testing.T) {
t.Parallel()
planner := resolveMigrationPlanner(MigrationBuildContext{
Config: SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "postgres"},
TargetConfig: connection.ConnectionConfig{Type: "mongodb"},
},
})
if planner == nil || planner.Name() != "pglike-mongo-planner" {
t.Fatalf("unexpected planner: %v", planner)
}
}
func TestResolveMigrationPlanner_UsesClickHouseMongoPlanner(t *testing.T) {
t.Parallel()
planner := resolveMigrationPlanner(MigrationBuildContext{
Config: SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "clickhouse"},
TargetConfig: connection.ConnectionConfig{Type: "mongodb"},
},
})
if planner == nil || planner.Name() != "clickhouse-mongo-planner" {
t.Fatalf("unexpected planner: %v", planner)
}
}
func TestResolveMigrationPlanner_UsesTDengineMongoPlanner(t *testing.T) {
t.Parallel()
planner := resolveMigrationPlanner(MigrationBuildContext{
Config: SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "tdengine"},
TargetConfig: connection.ConnectionConfig{Type: "mongodb"},
},
})
if planner == nil || planner.Name() != "tdengine-mongo-planner" {
t.Fatalf("unexpected planner: %v", planner)
}
}
func TestResolveMigrationPlanner_UsesMySQLPGLikePlannerForDirosToPostgres(t *testing.T) {
t.Parallel()
planner := resolveMigrationPlanner(MigrationBuildContext{
Config: SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "diros"},
TargetConfig: connection.ConnectionConfig{Type: "postgres"},
},
})
if planner == nil || planner.Name() != "mysql-pglike-planner" {
t.Fatalf("unexpected planner: %v", planner)
}
}
func TestResolveMigrationPlanner_UsesPGLikeMySQLPlannerForPostgresToDiros(t *testing.T) {
t.Parallel()
planner := resolveMigrationPlanner(MigrationBuildContext{
Config: SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "postgres"},
TargetConfig: connection.ConnectionConfig{Type: "diros"},
},
})
if planner == nil || planner.Name() != "pglike-mysql-planner" {
t.Fatalf("unexpected planner: %v", planner)
}
}
func TestResolveMigrationPlanner_UsesMySQLPGLikePlannerForMySQLToDuckDB(t *testing.T) {
t.Parallel()
planner := resolveMigrationPlanner(MigrationBuildContext{
Config: SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "mysql"},
TargetConfig: connection.ConnectionConfig{Type: "duckdb"},
},
})
if planner == nil || planner.Name() != "mysql-pglike-planner" {
t.Fatalf("unexpected planner: %v", planner)
}
}
func TestResolveMigrationPlanner_UsesPGLikeClickHousePlanner(t *testing.T) {
t.Parallel()
planner := resolveMigrationPlanner(MigrationBuildContext{
Config: SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "postgres"},
TargetConfig: connection.ConnectionConfig{Type: "clickhouse"},
},
})
if planner == nil || planner.Name() != "pglike-clickhouse-planner" {
t.Fatalf("unexpected planner: %v", planner)
}
}
func TestResolveMigrationPlanner_UsesPGLikeMySQLPlannerForDuckDBToMySQL(t *testing.T) {
t.Parallel()
planner := resolveMigrationPlanner(MigrationBuildContext{
Config: SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "duckdb"},
TargetConfig: connection.ConnectionConfig{Type: "mysql"},
},
})
if planner == nil || planner.Name() != "pglike-mysql-planner" {
t.Fatalf("unexpected planner: %v", planner)
}
}
func TestResolveMigrationPlanner_UsesMySQLPGLikePlannerForSphinxToPostgres(t *testing.T) {
t.Parallel()
planner := resolveMigrationPlanner(MigrationBuildContext{
Config: SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "sphinx"},
TargetConfig: connection.ConnectionConfig{Type: "postgres"},
},
})
if planner == nil || planner.Name() != "mysql-pglike-planner" {
t.Fatalf("unexpected planner: %v", planner)
}
}
func TestResolveMigrationPlanner_UsesPGLikeMySQLPlannerForCustomKingbaseToMySQL(t *testing.T) {
t.Parallel()
planner := resolveMigrationPlanner(MigrationBuildContext{
Config: SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "custom", Driver: "kingbase8"},
TargetConfig: connection.ConnectionConfig{Type: "mysql"},
},
})
if planner == nil || planner.Name() != "pglike-mysql-planner" {
t.Fatalf("unexpected planner: %v", planner)
}
}
func TestResolveMigrationPlanner_UsesMySQLPGLikePlannerForMySQLToCustomPostgres(t *testing.T) {
t.Parallel()
planner := resolveMigrationPlanner(MigrationBuildContext{
Config: SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "mysql"},
TargetConfig: connection.ConnectionConfig{Type: "custom", Driver: "postgresql"},
},
})
if planner == nil || planner.Name() != "mysql-pglike-planner" {
t.Fatalf("unexpected planner: %v", planner)
}
}
func TestResolveMigrationPlanner_UsesTDengineMySQLPlanner(t *testing.T) {
t.Parallel()
planner := resolveMigrationPlanner(MigrationBuildContext{
Config: SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "tdengine"},
TargetConfig: connection.ConnectionConfig{Type: "mysql"},
},
})
if planner == nil || planner.Name() != "tdengine-mysql-planner" {
t.Fatalf("unexpected planner: %v", planner)
}
}
func TestResolveMigrationPlanner_UsesTDenginePGLikePlanner(t *testing.T) {
t.Parallel()
planner := resolveMigrationPlanner(MigrationBuildContext{
Config: SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "tdengine"},
TargetConfig: connection.ConnectionConfig{Type: "kingbase"},
},
})
if planner == nil || planner.Name() != "tdengine-pglike-planner" {
t.Fatalf("unexpected planner: %v", planner)
}
}
func TestResolveMigrationPlanner_UsesMySQLLikeTDenginePlanner(t *testing.T) {
t.Parallel()
planner := resolveMigrationPlanner(MigrationBuildContext{
Config: SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "mysql"},
TargetConfig: connection.ConnectionConfig{Type: "tdengine"},
},
})
if planner == nil || planner.Name() != "mysqllike-tdengine-planner" {
t.Fatalf("unexpected planner: %v", planner)
}
}
func TestResolveMigrationPlanner_UsesPGLikeTDenginePlanner(t *testing.T) {
t.Parallel()
planner := resolveMigrationPlanner(MigrationBuildContext{
Config: SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "postgres"},
TargetConfig: connection.ConnectionConfig{Type: "tdengine"},
},
})
if planner == nil || planner.Name() != "pglike-tdengine-planner" {
t.Fatalf("unexpected planner: %v", planner)
}
}
func TestResolveMigrationPlanner_UsesClickHouseTDenginePlanner(t *testing.T) {
t.Parallel()
planner := resolveMigrationPlanner(MigrationBuildContext{
Config: SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "clickhouse"},
TargetConfig: connection.ConnectionConfig{Type: "tdengine"},
},
})
if planner == nil || planner.Name() != "clickhouse-tdengine-planner" {
t.Fatalf("unexpected planner: %v", planner)
}
}
func TestResolveMigrationPlanner_UsesClickHousePGLikePlanner(t *testing.T) {
t.Parallel()
planner := resolveMigrationPlanner(MigrationBuildContext{
Config: SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "clickhouse"},
TargetConfig: connection.ConnectionConfig{Type: "postgres"},
},
})
if planner == nil || planner.Name() != "clickhouse-pglike-planner" {
t.Fatalf("unexpected planner: %v", planner)
}
}
func TestResolveMigrationPlanner_UsesTDengineTDenginePlanner(t *testing.T) {
t.Parallel()
planner := resolveMigrationPlanner(MigrationBuildContext{
Config: SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "tdengine"},
TargetConfig: connection.ConnectionConfig{Type: "tdengine"},
},
})
if planner == nil || planner.Name() != "tdengine-tdengine-planner" {
t.Fatalf("unexpected planner: %v", planner)
}
}

View File

@@ -1,104 +0,0 @@
package sync
import (
"GoNavi-Wails/internal/connection"
"GoNavi-Wails/internal/db"
)
type MigrationDataModel string
const (
MigrationDataModelRelational MigrationDataModel = "relational"
MigrationDataModelDocument MigrationDataModel = "document"
MigrationDataModelColumnar MigrationDataModel = "columnar"
MigrationDataModelTimeSeries MigrationDataModel = "timeseries"
MigrationDataModelKeyValue MigrationDataModel = "keyvalue"
MigrationDataModelCustom MigrationDataModel = "custom"
)
type MigrationObjectKind string
const (
MigrationObjectKindTable MigrationObjectKind = "table"
MigrationObjectKindCollection MigrationObjectKind = "collection"
MigrationObjectKindKeyspace MigrationObjectKind = "keyspace"
)
type MigrationSupportLevel string
const (
MigrationSupportLevelFull MigrationSupportLevel = "full"
MigrationSupportLevelPartial MigrationSupportLevel = "partial"
MigrationSupportLevelPlanned MigrationSupportLevel = "planned"
MigrationSupportLevelUnsupported MigrationSupportLevel = "unsupported"
)
type CanonicalFieldSpec struct {
Name string
SourceType string
CanonicalType string
Nullable bool
DefaultValue *string
AutoIncrement bool
Comment string
NestedPath string
Confidence float64
}
type CanonicalIndexSpec struct {
Name string
Kind string
Columns []string
Expression string
PrefixLength int
Supported bool
DegradeStrategy string
Unique bool
}
type CanonicalConstraintSpec struct {
Name string
Kind string
Columns []string
RefName string
}
type CanonicalObjectSpec struct {
Name string
Schema string
Kind MigrationObjectKind
Fields []CanonicalFieldSpec
PrimaryKey []string
Indexes []CanonicalIndexSpec
Constraints []CanonicalConstraintSpec
Comments []string
SourceHints map[string]string
}
type SchemaInferenceIssue struct {
Field string
Level string
Message string
Resolution string
}
type SchemaInferenceResult struct {
Object CanonicalObjectSpec
Issues []SchemaInferenceIssue
SampleSize int
Confidence float64
NeedsReview bool
}
type MigrationBuildContext struct {
Config SyncConfig
TableName string
SourceDB db.Database
TargetDB db.Database
}
type MigrationPlanner interface {
Name() string
SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel
BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error)
}

View File

@@ -1,603 +0,0 @@
package sync
import (
"GoNavi-Wails/internal/connection"
"GoNavi-Wails/internal/db"
"encoding/json"
"fmt"
"sort"
"strings"
"time"
)
func buildMySQLToMongoPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
return buildTabularToMongoPlan(config, tableName, sourceDB, targetDB)
}
func buildPGLikeToMongoPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
return buildTabularToMongoPlan(config, tableName, sourceDB, targetDB)
}
func buildClickHouseToMongoPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
return buildTabularToMongoPlan(config, tableName, sourceDB, targetDB)
}
func buildTDengineToMongoPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
return buildTabularToMongoPlan(config, tableName, sourceDB, targetDB)
}
func buildTabularToMongoPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
plan := SchemaMigrationPlan{}
sourceType := resolveMigrationDBType(config.SourceConfig)
targetType := resolveMigrationDBType(config.TargetConfig)
plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName)
plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName)
plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName)
plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName)
plan.PlannedAction = "使用已有目标集合导入"
sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable)
if err != nil {
return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err)
}
if !sourceExists {
return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName)
}
targetExists, err := inspectMongoCollection(targetDB, plan.TargetSchema, plan.TargetTable)
if err != nil {
return plan, sourceCols, nil, fmt.Errorf("检查目标集合失败: %w", err)
}
plan.TargetTableExists = targetExists
strategy := normalizeTargetTableStrategy(config.TargetTableStrategy)
if targetExists {
plan.Warnings = append(plan.Warnings, "MongoDB 为弱 schema 目标,字段结构以写入文档为准,不执行目标列校验")
return dedupeSchemaMigrationPlan(plan), sourceCols, nil, nil
}
switch strategy {
case "existing_only":
plan.PlannedAction = "目标集合不存在,需先手工创建"
plan.Warnings = append(plan.Warnings, "当前策略要求目标集合已存在,执行时不会自动创建")
return dedupeSchemaMigrationPlan(plan), sourceCols, nil, nil
case "smart", "auto_create_if_missing":
plan.AutoCreate = true
plan.PlannedAction = "目标集合不存在,将自动创建集合后导入"
createCmd, err := buildMongoCreateCollectionCommand(plan.TargetTable)
if err != nil {
return plan, sourceCols, nil, err
}
plan.PreDataSQL = append(plan.PreDataSQL, createCmd)
if config.CreateIndexes {
indexCmds, warnings, unsupported, created, skipped, err := buildMongoIndexCommands(sourceDB, plan.SourceSchema, plan.SourceTable, plan.TargetTable)
if err != nil {
plan.Warnings = append(plan.Warnings, fmt.Sprintf("读取源表索引失败,已跳过索引迁移:%v", err))
} else {
plan.PostDataSQL = append(plan.PostDataSQL, indexCmds...)
plan.Warnings = append(plan.Warnings, warnings...)
plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...)
plan.IndexesToCreate = created
plan.IndexesSkipped = skipped
}
}
return dedupeSchemaMigrationPlan(plan), sourceCols, nil, nil
default:
return dedupeSchemaMigrationPlan(plan), sourceCols, nil, nil
}
}
func buildMongoToMySQLPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
plan := SchemaMigrationPlan{}
plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName)
plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName)
plan.SourceQueryTable = qualifiedNameForQuery(config.SourceConfig.Type, plan.SourceSchema, plan.SourceTable, tableName)
plan.TargetQueryTable = qualifiedNameForQuery(config.TargetConfig.Type, plan.TargetSchema, plan.TargetTable, tableName)
plan.PlannedAction = "使用已有目标表导入"
sourceCols, warnings, err := inferMongoCollectionColumns(sourceDB, plan.SourceTable)
if err != nil {
return plan, nil, nil, err
}
plan.Warnings = append(plan.Warnings, warnings...)
if len(sourceCols) == 0 {
return plan, nil, nil, fmt.Errorf("源集合未推断出可迁移字段: %s", tableName)
}
targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable)
if err != nil {
return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err)
}
plan.TargetTableExists = targetExists
strategy := normalizeTargetTableStrategy(config.TargetTableStrategy)
if targetExists {
missing := diffMissingColumnNames(sourceCols, targetCols)
if len(missing) > 0 {
plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", ")))
}
if config.AutoAddColumns {
addSQL, addWarnings := buildMongoToMySQLAddColumnSQL(plan.TargetQueryTable, sourceCols, targetCols)
plan.PreDataSQL = append(plan.PreDataSQL, addSQL...)
plan.Warnings = append(plan.Warnings, addWarnings...)
if len(addSQL) > 0 {
plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL))
}
}
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
}
switch strategy {
case "existing_only":
plan.PlannedAction = "目标表不存在,需先手工创建"
plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表")
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
case "smart", "auto_create_if_missing":
plan.AutoCreate = true
plan.PlannedAction = "目标表不存在,将自动建表后导入"
createSQL, postSQL, moreWarnings, unsupported, idxCreate, idxSkip, err := buildMongoToMySQLCreateTablePlan(config, plan.TargetQueryTable, sourceCols, sourceDB, plan.SourceSchema, plan.SourceTable)
if err != nil {
return plan, sourceCols, targetCols, err
}
plan.CreateTableSQL = createSQL
plan.PostDataSQL = append(plan.PostDataSQL, postSQL...)
plan.Warnings = append(plan.Warnings, moreWarnings...)
plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...)
plan.IndexesToCreate = idxCreate
plan.IndexesSkipped = idxSkip
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
default:
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
}
}
func inspectMongoCollection(database db.Database, dbName, collection string) (bool, error) {
items, err := database.GetTables(dbName)
if err != nil {
return false, err
}
target := strings.TrimSpace(collection)
for _, item := range items {
if strings.EqualFold(strings.TrimSpace(item), target) {
return true, nil
}
}
return false, nil
}
func buildMongoCreateCollectionCommand(collection string) (string, error) {
cmd := map[string]interface{}{"create": strings.TrimSpace(collection)}
data, err := json.Marshal(cmd)
if err != nil {
return "", err
}
return string(data), nil
}
func buildMongoIndexCommands(sourceDB db.Database, dbName, tableName, targetCollection string) ([]string, []string, []string, int, int, error) {
indexes, err := sourceDB.GetIndexes(dbName, tableName)
if err != nil {
return nil, nil, nil, 0, 0, err
}
grouped := groupIndexDefinitions(indexes)
cmds := make([]string, 0, len(grouped))
warnings := make([]string, 0)
unsupported := make([]string, 0)
created := 0
skipped := 0
for _, idx := range grouped {
name := strings.TrimSpace(idx.Name)
if name == "" || strings.EqualFold(name, "primary") {
continue
}
if len(idx.Columns) == 0 {
skipped++
unsupported = append(unsupported, fmt.Sprintf("索引 %s 缺少列定义,已跳过", name))
continue
}
kind := strings.ToLower(strings.TrimSpace(idx.IndexType))
if idx.SubPart > 0 {
skipped++
unsupported = append(unsupported, fmt.Sprintf("索引 %s 使用前缀长度MongoDB 目标暂不支持等价迁移", name))
continue
}
if kind != "" && kind != "btree" {
warnings = append(warnings, fmt.Sprintf("索引 %s 类型=%s 将按普通索引迁移到 MongoDB", name, idx.IndexType))
}
keySpec := make(map[string]int)
for _, col := range idx.Columns {
keySpec[col] = 1
}
command := map[string]interface{}{
"createIndexes": strings.TrimSpace(targetCollection),
"indexes": []map[string]interface{}{{
"name": name,
"key": keySpec,
"unique": idx.Unique,
}},
}
data, err := json.Marshal(command)
if err != nil {
skipped++
unsupported = append(unsupported, fmt.Sprintf("索引 %s 生成 MongoDB createIndexes 命令失败:%v", name, err))
continue
}
cmds = append(cmds, string(data))
created++
}
return cmds, dedupeStrings(warnings), dedupeStrings(unsupported), created, skipped, nil
}
func inferMongoCollectionColumns(sourceDB db.Database, collection string) ([]connection.ColumnDefinition, []string, error) {
query := fmt.Sprintf(`{"find":"%s","filter":{},"limit":200}`, strings.TrimSpace(collection))
rows, _, err := sourceDB.Query(query)
if err != nil {
return nil, nil, fmt.Errorf("读取源集合样本失败: %w", err)
}
if len(rows) == 0 {
return []connection.ColumnDefinition{{Name: "_id", Type: "varchar(64)", Nullable: "NO", Key: "PRI"}}, []string{"源集合暂无样本数据,仅按 `_id` 生成基础主键列"}, nil
}
fieldNames := make(map[string]struct{})
for _, row := range rows {
for key := range row {
fieldNames[key] = struct{}{}
}
}
orderedFields := make([]string, 0, len(fieldNames))
for key := range fieldNames {
orderedFields = append(orderedFields, key)
}
sort.Strings(orderedFields)
if containsString(orderedFields, "_id") {
orderedFields = moveStringToFront(orderedFields, "_id")
}
columns := make([]connection.ColumnDefinition, 0, len(orderedFields))
warnings := make([]string, 0)
for _, field := range orderedFields {
typeName, nullable, fieldWarnings := inferMongoFieldType(rows, field)
warnings = append(warnings, fieldWarnings...)
col := connection.ColumnDefinition{
Name: field,
Type: typeName,
Nullable: ternaryString(nullable, "YES", "NO"),
Key: "",
Extra: "",
}
if field == "_id" {
col.Key = "PRI"
col.Nullable = "NO"
}
columns = append(columns, col)
}
return columns, dedupeStrings(warnings), nil
}
func inferMongoFieldType(rows []map[string]interface{}, field string) (string, bool, []string) {
nullable := false
hasString, hasBool, hasInt, hasFloat, hasTime, hasComplex := false, false, false, false, false, false
for _, row := range rows {
value, ok := row[field]
if !ok || value == nil {
nullable = true
continue
}
switch value.(type) {
case bool:
hasBool = true
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
hasInt = true
case float32, float64:
hasFloat = true
case time.Time:
hasTime = true
case map[string]interface{}, []interface{}:
hasComplex = true
default:
hasString = true
}
}
kinds := 0
for _, flag := range []bool{hasString, hasBool, hasInt, hasFloat, hasTime, hasComplex} {
if flag {
kinds++
}
}
warnings := make([]string, 0)
if kinds > 1 {
warnings = append(warnings, fmt.Sprintf("字段 %s 存在多种 BSON 值类型,已按兼容类型降级", field))
}
if field == "_id" {
return "varchar(64)", false, warnings
}
switch {
case hasComplex:
return "json", nullable, warnings
case hasTime:
return "datetime", nullable, warnings
case hasFloat:
return "double", nullable, warnings
case hasInt:
return "bigint", nullable, warnings
case hasBool:
return "tinyint(1)", nullable, warnings
default:
return "varchar(255)", nullable, warnings
}
}
func buildMongoToMySQLAddColumnSQL(targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) {
targetSet := make(map[string]struct{}, len(targetCols))
for _, col := range targetCols {
key := strings.ToLower(strings.TrimSpace(col.Name))
if key == "" {
continue
}
targetSet[key] = struct{}{}
}
var sqlList []string
for _, col := range sourceCols {
key := strings.ToLower(strings.TrimSpace(col.Name))
if key == "" {
continue
}
if _, ok := targetSet[key]; ok {
continue
}
sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL",
quoteQualifiedIdentByType("mysql", targetQueryTable),
quoteIdentByType("mysql", col.Name),
strings.TrimSpace(col.Type),
))
}
return sqlList, nil
}
func buildMongoToMySQLCreateTablePlan(config SyncConfig, targetQueryTable string, sourceCols []connection.ColumnDefinition, sourceDB db.Database, sourceSchema, sourceTable string) (string, []string, []string, []string, int, int, error) {
columnDefs := make([]string, 0, len(sourceCols)+1)
warnings := make([]string, 0)
unsupported := make([]string, 0)
pkCols := make([]string, 0, 1)
for _, col := range sourceCols {
columnDef := fmt.Sprintf("%s %s", quoteIdentByType("mysql", col.Name), strings.TrimSpace(col.Type))
if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") {
columnDef += " NOT NULL"
}
columnDefs = append(columnDefs, columnDef)
if col.Key == "PRI" || col.Key == "PK" {
pkCols = append(pkCols, quoteIdentByType("mysql", col.Name))
}
}
if len(pkCols) > 0 {
columnDefs = append(columnDefs, fmt.Sprintf("PRIMARY KEY (%s)", strings.Join(pkCols, ", ")))
} else {
warnings = append(warnings, "MongoDB 源集合未推断出稳定主键,目标表将不自动创建主键")
}
createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("mysql", targetQueryTable), strings.Join(columnDefs, ",\n "))
if !config.CreateIndexes {
return createSQL, nil, dedupeStrings(warnings), dedupeStrings(unsupported), 0, 0, nil
}
indexes, err := sourceDB.GetIndexes(sourceSchema, sourceTable)
if err != nil {
warnings = append(warnings, fmt.Sprintf("读取源集合索引失败,已跳过索引迁移:%v", err))
return createSQL, nil, dedupeStrings(warnings), dedupeStrings(unsupported), 0, 0, nil
}
grouped := groupIndexDefinitions(indexes)
postSQL := make([]string, 0, len(grouped))
created := 0
skipped := 0
for _, idx := range grouped {
name := strings.TrimSpace(idx.Name)
if name == "" || strings.EqualFold(name, "_id_") || strings.EqualFold(name, "primary") {
continue
}
if len(idx.Columns) == 0 {
skipped++
unsupported = append(unsupported, fmt.Sprintf("索引 %s 缺少列定义,已跳过", name))
continue
}
quotedCols := make([]string, 0, len(idx.Columns))
for _, col := range idx.Columns {
quotedCols = append(quotedCols, quoteIdentByType("mysql", col))
}
prefix := "CREATE INDEX"
if idx.Unique {
prefix = "CREATE UNIQUE INDEX"
}
postSQL = append(postSQL, fmt.Sprintf("%s %s ON %s (%s)", prefix, quoteIdentByType("mysql", name), quoteQualifiedIdentByType("mysql", targetQueryTable), strings.Join(quotedCols, ", ")))
created++
}
return createSQL, postSQL, dedupeStrings(warnings), dedupeStrings(unsupported), created, skipped, nil
}
func containsString(items []string, target string) bool {
for _, item := range items {
if item == target {
return true
}
}
return false
}
func moveStringToFront(items []string, target string) []string {
out := make([]string, 0, len(items))
for _, item := range items {
if item == target {
continue
}
out = append(out, item)
}
return append([]string{target}, out...)
}
func buildMongoToPGLikePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
plan := SchemaMigrationPlan{}
targetType := strings.ToLower(strings.TrimSpace(config.TargetConfig.Type))
plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName)
plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName)
plan.SourceQueryTable = qualifiedNameForQuery(config.SourceConfig.Type, plan.SourceSchema, plan.SourceTable, tableName)
plan.TargetQueryTable = qualifiedNameForQuery(config.TargetConfig.Type, plan.TargetSchema, plan.TargetTable, tableName)
plan.PlannedAction = "使用已有目标表导入"
sourceCols, warnings, err := inferMongoCollectionColumns(sourceDB, plan.SourceTable)
if err != nil {
return plan, nil, nil, err
}
plan.Warnings = append(plan.Warnings, warnings...)
if len(sourceCols) == 0 {
return plan, nil, nil, fmt.Errorf("源集合未推断出可迁移字段: %s", tableName)
}
targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable)
if err != nil {
return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err)
}
plan.TargetTableExists = targetExists
strategy := normalizeTargetTableStrategy(config.TargetTableStrategy)
if targetExists {
missing := diffMissingColumnNames(sourceCols, targetCols)
if len(missing) > 0 {
plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", ")))
}
if config.AutoAddColumns {
addSQL, addWarnings := buildMongoToPGLikeAddColumnSQL(targetType, plan.TargetQueryTable, sourceCols, targetCols)
plan.PreDataSQL = append(plan.PreDataSQL, addSQL...)
plan.Warnings = append(plan.Warnings, addWarnings...)
if len(addSQL) > 0 {
plan.PlannedAction = fmt.Sprintf("补齐缺失字段(%d)后导入", len(addSQL))
}
}
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
}
switch strategy {
case "existing_only":
plan.PlannedAction = "目标表不存在,需先手工创建"
plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表")
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
case "smart", "auto_create_if_missing":
plan.AutoCreate = true
plan.PlannedAction = "目标表不存在,将自动建表后导入"
createSQL, postSQL, moreWarnings, unsupported, idxCreate, idxSkip, err := buildMongoToPGLikeCreateTablePlan(targetType, config, plan.TargetQueryTable, sourceCols, sourceDB, plan.SourceSchema, plan.SourceTable)
if err != nil {
return plan, sourceCols, targetCols, err
}
plan.CreateTableSQL = createSQL
plan.PostDataSQL = append(plan.PostDataSQL, postSQL...)
plan.Warnings = append(plan.Warnings, moreWarnings...)
plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...)
plan.IndexesToCreate = idxCreate
plan.IndexesSkipped = idxSkip
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
default:
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
}
}
func buildMongoToPGLikeAddColumnSQL(targetType string, targetQueryTable string, sourceCols, targetCols []connection.ColumnDefinition) ([]string, []string) {
targetSet := make(map[string]struct{}, len(targetCols))
for _, col := range targetCols {
key := strings.ToLower(strings.TrimSpace(col.Name))
if key == "" {
continue
}
targetSet[key] = struct{}{}
}
var sqlList []string
var warnings []string
for _, col := range sourceCols {
key := strings.ToLower(strings.TrimSpace(col.Name))
if key == "" {
continue
}
if _, ok := targetSet[key]; ok {
continue
}
colType, mapWarnings := mapMongoInferredColumnToPGLike(col)
warnings = append(warnings, mapWarnings...)
sqlList = append(sqlList, fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL",
quoteQualifiedIdentByType(targetType, targetQueryTable),
quoteIdentByType(targetType, col.Name),
colType,
))
}
return sqlList, dedupeStrings(warnings)
}
func buildMongoToPGLikeCreateTablePlan(targetType string, config SyncConfig, targetQueryTable string, sourceCols []connection.ColumnDefinition, sourceDB db.Database, sourceSchema, sourceTable string) (string, []string, []string, []string, int, int, error) {
columnDefs := make([]string, 0, len(sourceCols)+1)
warnings := make([]string, 0)
unsupported := make([]string, 0)
pkCols := make([]string, 0, 1)
for _, col := range sourceCols {
colType, colWarnings := mapMongoInferredColumnToPGLike(col)
warnings = append(warnings, colWarnings...)
parts := []string{colType}
if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") {
parts = append(parts, "NOT NULL")
}
columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType(targetType, col.Name), strings.Join(parts, " ")))
if col.Key == "PRI" || col.Key == "PK" {
pkCols = append(pkCols, quoteIdentByType(targetType, col.Name))
}
}
if len(pkCols) > 0 {
columnDefs = append(columnDefs, fmt.Sprintf("PRIMARY KEY (%s)", strings.Join(pkCols, ", ")))
}
createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType(targetType, targetQueryTable), strings.Join(columnDefs, ",\n "))
if !config.CreateIndexes {
return createSQL, nil, dedupeStrings(warnings), dedupeStrings(unsupported), 0, 0, nil
}
indexes, err := sourceDB.GetIndexes(sourceSchema, sourceTable)
if err != nil {
warnings = append(warnings, fmt.Sprintf("读取源集合索引失败,已跳过索引迁移:%v", err))
return createSQL, nil, dedupeStrings(warnings), dedupeStrings(unsupported), 0, 0, nil
}
grouped := groupIndexDefinitions(indexes)
postSQL := make([]string, 0, len(grouped))
created := 0
skipped := 0
for _, idx := range grouped {
name := strings.TrimSpace(idx.Name)
if name == "" || strings.EqualFold(name, "_id_") || strings.EqualFold(name, "primary") {
continue
}
if len(idx.Columns) == 0 {
skipped++
unsupported = append(unsupported, fmt.Sprintf("索引 %s 缺少列定义,已跳过", name))
continue
}
quotedCols := make([]string, 0, len(idx.Columns))
for _, col := range idx.Columns {
quotedCols = append(quotedCols, quoteIdentByType(targetType, col))
}
prefix := "CREATE INDEX"
if idx.Unique {
prefix = "CREATE UNIQUE INDEX"
}
postSQL = append(postSQL, fmt.Sprintf("%s %s ON %s (%s)", prefix, quoteIdentByType(targetType, name), quoteQualifiedIdentByType(targetType, targetQueryTable), strings.Join(quotedCols, ", ")))
created++
}
return createSQL, postSQL, dedupeStrings(warnings), dedupeStrings(unsupported), created, skipped, nil
}
func mapMongoInferredColumnToPGLike(col connection.ColumnDefinition) (string, []string) {
raw := strings.ToLower(strings.TrimSpace(col.Type))
warnings := make([]string, 0)
switch {
case strings.HasPrefix(raw, "varchar"):
return col.Type, warnings
case raw == "json":
return "jsonb", warnings
case raw == "datetime":
return "timestamp", warnings
case raw == "tinyint(1)":
return "boolean", warnings
case raw == "double":
return "double precision", warnings
case raw == "bigint":
return "bigint", warnings
default:
return col.Type, warnings
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,58 +0,0 @@
package sync
import (
"GoNavi-Wails/internal/connection"
"fmt"
"strings"
)
func supportsAutoAddColumnsForPair(sourceType string, targetType string) bool {
source := normalizeMigrationDBType(sourceType)
target := normalizeMigrationDBType(targetType)
if isMySQLLikeWritableTargetType(target) {
return isMySQLCoreType(source)
}
if isPGLikeTarget(target) {
return isMySQLLikeSourceType(source)
}
return false
}
func buildAddColumnSQLForPair(sourceType string, targetType string, targetQueryTable string, sourceCol connection.ColumnDefinition) (string, error) {
source := normalizeMigrationDBType(sourceType)
target := normalizeMigrationDBType(targetType)
switch {
case isMySQLCoreType(source) && isMySQLLikeWritableTargetType(target):
colType := sanitizeMySQLColumnType(sourceCol.Type)
return fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL",
quoteQualifiedIdentByType("mysql", targetQueryTable),
quoteIdentByType("mysql", sourceCol.Name),
colType,
), nil
case isMySQLLikeSourceType(source) && isPGLikeTarget(target):
colType, _, warnings := mapMySQLColumnToKingbase(sourceCol)
if len(warnings) > 0 && strings.Contains(strings.Join(warnings, " "), "identity") {
// 对已有目标表补字段时保守处理,不补建自增语义。
}
return fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL",
quoteQualifiedIdentByType(target, targetQueryTable),
quoteIdentByType(target, sourceCol.Name),
colType,
), nil
default:
return "", fmt.Errorf("当前不支持 source=%s target=%s 的自动补字段", sourceType, targetType)
}
}
func executeSQLStatements(execFn func(string) (int64, error), statements []string) error {
for _, stmt := range statements {
trimmed := strings.TrimSpace(stmt)
if trimmed == "" {
continue
}
if _, err := execFn(trimmed); err != nil {
return err
}
}
return nil
}

View File

@@ -1,53 +0,0 @@
package sync
import (
"fmt"
"strings"
)
type SchemaInferenceStrategy string
const (
SchemaInferenceStrategySample SchemaInferenceStrategy = "sample"
SchemaInferenceStrategyStrict SchemaInferenceStrategy = "strict"
)
func shouldUseSchemaInference(sourceType string, targetType string) bool {
sourceModel := classifyMigrationDataModel(sourceType)
targetModel := classifyMigrationDataModel(targetType)
return sourceModel == MigrationDataModelDocument && targetModel == MigrationDataModelRelational
}
func inferMigrationObjectKind(sourceType string, targetType string) MigrationObjectKind {
sourceModel := classifyMigrationDataModel(sourceType)
targetModel := classifyMigrationDataModel(targetType)
switch {
case sourceModel == MigrationDataModelDocument || targetModel == MigrationDataModelDocument:
return MigrationObjectKindCollection
case sourceModel == MigrationDataModelKeyValue || targetModel == MigrationDataModelKeyValue:
return MigrationObjectKindKeyspace
default:
return MigrationObjectKindTable
}
}
func inferSchemaForPair(sourceType string, targetType string, objectName string) (SchemaInferenceResult, error) {
if !shouldUseSchemaInference(sourceType, targetType) {
return SchemaInferenceResult{}, fmt.Errorf("当前迁移对 %s -> %s 不需要 schema 推断", sourceType, targetType)
}
return SchemaInferenceResult{
Object: CanonicalObjectSpec{
Name: strings.TrimSpace(objectName),
Kind: MigrationObjectKindCollection,
Fields: []CanonicalFieldSpec{},
},
Issues: []SchemaInferenceIssue{
{
Level: "info",
Message: "MongoDB -> 关系型数据库的 schema 推断能力尚在建设中,当前仅提供内核入口。",
Resolution: "后续将基于样本数据生成列定义与类型降级策略。",
},
},
NeedsReview: true,
}, nil
}

View File

@@ -1,296 +0,0 @@
package sync
import (
"GoNavi-Wails/internal/connection"
"GoNavi-Wails/internal/db"
"fmt"
"strconv"
"strings"
)
func buildTDengineToMySQLPlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
plan := SchemaMigrationPlan{}
sourceType := resolveMigrationDBType(config.SourceConfig)
targetType := resolveMigrationDBType(config.TargetConfig)
plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName)
plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName)
plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName)
plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName)
plan.PlannedAction = "使用已有目标表导入"
sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable)
if err != nil {
return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err)
}
if !sourceExists {
return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName)
}
targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable)
if err != nil {
return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err)
}
plan.TargetTableExists = targetExists
plan.Warnings = append(plan.Warnings, tdengineSemanticWarnings(sourceCols)...)
strategy := normalizeTargetTableStrategy(config.TargetTableStrategy)
if targetExists {
missing := diffMissingColumnNames(sourceCols, targetCols)
if len(missing) > 0 {
plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", ")))
}
if strategy != "existing_only" {
plan.Warnings = append(plan.Warnings, "TDengine 源端当前不自动补齐已有目标表字段,请先确认目标表结构")
}
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
}
switch strategy {
case "existing_only":
plan.PlannedAction = "目标表不存在,需先手工创建"
plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表")
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
case "smart", "auto_create_if_missing":
plan.AutoCreate = true
plan.PlannedAction = "目标表不存在,将自动建表后导入"
createSQL, warnings, unsupported := buildTDengineToMySQLCreateTableSQL(plan.TargetQueryTable, sourceCols)
plan.CreateTableSQL = createSQL
plan.Warnings = append(plan.Warnings, warnings...)
plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...)
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
default:
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
}
}
func buildTDengineToPGLikePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
plan := SchemaMigrationPlan{}
sourceType := resolveMigrationDBType(config.SourceConfig)
targetType := resolveMigrationDBType(config.TargetConfig)
plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName)
plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName)
plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName)
plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName)
plan.PlannedAction = "使用已有目标表导入"
sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable)
if err != nil {
return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err)
}
if !sourceExists {
return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName)
}
targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable)
if err != nil {
return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err)
}
plan.TargetTableExists = targetExists
plan.Warnings = append(plan.Warnings, tdengineSemanticWarnings(sourceCols)...)
strategy := normalizeTargetTableStrategy(config.TargetTableStrategy)
if targetExists {
missing := diffMissingColumnNames(sourceCols, targetCols)
if len(missing) > 0 {
plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", ")))
}
if strategy != "existing_only" {
plan.Warnings = append(plan.Warnings, "TDengine 源端当前不自动补齐已有目标表字段,请先确认目标表结构")
}
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
}
switch strategy {
case "existing_only":
plan.PlannedAction = "目标表不存在,需先手工创建"
plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表")
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
case "smart", "auto_create_if_missing":
plan.AutoCreate = true
plan.PlannedAction = "目标表不存在,将自动建表后导入"
createSQL, warnings, unsupported := buildTDengineToPGLikeCreateTableSQL(targetType, plan.TargetQueryTable, sourceCols)
plan.CreateTableSQL = createSQL
plan.Warnings = append(plan.Warnings, warnings...)
plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...)
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
default:
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
}
}
func buildTDengineToMySQLCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition) (string, []string, []string) {
columnDefs := make([]string, 0, len(sourceCols))
warnings := make([]string, 0)
unsupported := []string{"TDengine 的索引/外键/触发器/超级表/TTL 等时序语义当前不会自动迁移"}
for _, col := range sourceCols {
def, colWarnings := buildTDengineToMySQLColumnDefinition(col)
columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("mysql", col.Name), def))
warnings = append(warnings, colWarnings...)
}
createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("mysql", targetQueryTable), strings.Join(columnDefs, ",\n "))
return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported)
}
func buildTDengineToPGLikeCreateTableSQL(targetType string, targetQueryTable string, sourceCols []connection.ColumnDefinition) (string, []string, []string) {
columnDefs := make([]string, 0, len(sourceCols))
warnings := make([]string, 0)
unsupported := []string{"TDengine 的索引/外键/触发器/超级表/TTL 等时序语义当前不会自动迁移"}
for _, col := range sourceCols {
def, colWarnings := buildTDengineToPGLikeColumnDefinition(col)
columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType(targetType, col.Name), def))
warnings = append(warnings, colWarnings...)
}
createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType(targetType, targetQueryTable), strings.Join(columnDefs, ",\n "))
return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported)
}
func buildTDengineToMySQLColumnDefinition(col connection.ColumnDefinition) (string, []string) {
targetType, warnings := mapTDengineColumnToMySQL(col)
parts := []string{targetType}
if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") {
parts = append(parts, "NOT NULL")
} else {
parts = append(parts, "NULL")
}
return strings.Join(parts, " "), warnings
}
func buildTDengineToPGLikeColumnDefinition(col connection.ColumnDefinition) (string, []string) {
targetType, warnings := mapTDengineColumnToPGLike(col)
parts := []string{targetType}
if strings.EqualFold(strings.TrimSpace(col.Nullable), "NO") {
parts = append(parts, "NOT NULL")
} else {
parts = append(parts, "NULL")
}
return strings.Join(parts, " "), warnings
}
func tdengineSemanticWarnings(sourceCols []connection.ColumnDefinition) []string {
warnings := []string{"TDengine 到关系型目标库当前仅迁移列与数据超级表、TAG 关联、保留策略等时序语义会降级或丢失"}
for _, col := range sourceCols {
if isTDengineTagColumn(col) {
warnings = append(warnings, fmt.Sprintf("字段 %s 为 TDengine TAG 列,迁移到关系型目标后将降级为普通字段", col.Name))
}
}
return dedupeStrings(warnings)
}
func isTDengineTagColumn(col connection.ColumnDefinition) bool {
return strings.EqualFold(strings.TrimSpace(col.Key), "TAG") || strings.Contains(strings.ToUpper(strings.TrimSpace(col.Extra)), "TAG")
}
func parseTDengineType(raw string) (string, int) {
cleaned := strings.TrimSpace(strings.ToUpper(raw))
if cleaned == "" {
return "", 0
}
base := cleaned
length := 0
if idx := strings.Index(base, "("); idx >= 0 {
end := strings.Index(base[idx+1:], ")")
if end >= 0 {
lengthText := strings.TrimSpace(base[idx+1 : idx+1+end])
if v, err := strconv.Atoi(lengthText); err == nil {
length = v
}
}
base = strings.TrimSpace(base[:idx])
}
return base, length
}
func mapTDengineColumnToMySQL(col connection.ColumnDefinition) (string, []string) {
base, length := parseTDengineType(col.Type)
warnings := make([]string, 0)
if isTDengineTagColumn(col) {
warnings = append(warnings, fmt.Sprintf("字段 %s 为 TDengine TAG 列,已按普通列映射", col.Name))
}
switch base {
case "BOOL", "BOOLEAN":
return "tinyint(1)", warnings
case "TINYINT":
return "tinyint", warnings
case "UTINYINT":
return "tinyint unsigned", warnings
case "SMALLINT":
return "smallint", warnings
case "USMALLINT":
return "smallint unsigned", warnings
case "INT", "INTEGER":
return "int", warnings
case "UINT":
return "int unsigned", warnings
case "BIGINT":
return "bigint", warnings
case "UBIGINT":
return "bigint unsigned", warnings
case "FLOAT":
return "float", warnings
case "DOUBLE":
return "double", warnings
case "DECIMAL", "NUMERIC":
if length > 0 {
return strings.ToLower(strings.TrimSpace(col.Type)), warnings
}
return "decimal(38,10)", warnings
case "TIMESTAMP":
return "datetime", warnings
case "DATE":
return "date", warnings
case "JSON":
return "json", warnings
case "BINARY", "NCHAR", "VARCHAR", "VARBINARY":
if length > 0 && length <= 65535 {
return fmt.Sprintf("varchar(%d)", length), warnings
}
return "text", warnings
default:
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门 MySQL 映射,已降级为 text", col.Name, col.Type))
return "text", warnings
}
}
func mapTDengineColumnToPGLike(col connection.ColumnDefinition) (string, []string) {
base, length := parseTDengineType(col.Type)
warnings := make([]string, 0)
if isTDengineTagColumn(col) {
warnings = append(warnings, fmt.Sprintf("字段 %s 为 TDengine TAG 列,已按普通列映射", col.Name))
}
switch base {
case "BOOL", "BOOLEAN":
return "boolean", warnings
case "TINYINT", "UTINYINT", "SMALLINT":
return "smallint", warnings
case "USMALLINT", "INT", "INTEGER":
return "integer", warnings
case "UINT", "BIGINT":
return "bigint", warnings
case "UBIGINT":
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 UBIGINT 已映射为 numeric(20,0) 以避免无符号溢出", col.Name))
return "numeric(20,0)", warnings
case "FLOAT":
return "real", warnings
case "DOUBLE":
return "double precision", warnings
case "DECIMAL", "NUMERIC":
if length > 0 {
return strings.ToLower(strings.TrimSpace(col.Type)), warnings
}
return "numeric(38,10)", warnings
case "TIMESTAMP":
return "timestamp", warnings
case "DATE":
return "date", warnings
case "JSON":
return "jsonb", warnings
case "BINARY", "NCHAR", "VARCHAR", "VARBINARY":
if length > 0 {
return fmt.Sprintf("varchar(%d)", length), warnings
}
return "text", warnings
default:
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门 PG-like 映射,已降级为 text", col.Name, col.Type))
return "text", warnings
}
}

View File

@@ -1,657 +0,0 @@
package sync
import (
"GoNavi-Wails/internal/connection"
"GoNavi-Wails/internal/db"
"fmt"
"strconv"
"strings"
)
type mySQLLikeToTDenginePlanner struct{}
type pgLikeToTDenginePlanner struct{}
type clickHouseToTDenginePlanner struct{}
type tdengineToTDenginePlanner struct{}
func (mySQLLikeToTDenginePlanner) Name() string { return "mysqllike-tdengine-planner" }
func (mySQLLikeToTDenginePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
if isMySQLLikeSourceType(sourceType) && targetType == "tdengine" {
return MigrationSupportLevelFull
}
return MigrationSupportLevelUnsupported
}
func (mySQLLikeToTDenginePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
return buildMySQLLikeToTDenginePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
}
func (pgLikeToTDenginePlanner) Name() string { return "pglike-tdengine-planner" }
func (pgLikeToTDenginePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
if isPGLikeSource(sourceType) && targetType == "tdengine" {
return MigrationSupportLevelFull
}
return MigrationSupportLevelUnsupported
}
func (pgLikeToTDenginePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
return buildPGLikeToTDenginePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
}
func buildMySQLLikeToTDenginePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
return buildSourceToTDenginePlan(config, tableName, sourceDB, targetDB, isMySQLLikeTDengineTimestampCandidate, buildMySQLLikeToTDengineCreateTableSQL)
}
func buildPGLikeToTDenginePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
return buildSourceToTDenginePlan(config, tableName, sourceDB, targetDB, isPGLikeTDengineTimestampCandidate, buildPGLikeToTDengineCreateTableSQL)
}
func buildClickHouseToTDenginePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
return buildSourceToTDenginePlan(config, tableName, sourceDB, targetDB, isClickHouseTDengineTimestampCandidate, buildClickHouseToTDengineCreateTableSQL)
}
func buildTDengineToTDenginePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
return buildSourceToTDenginePlan(config, tableName, sourceDB, targetDB, isTDengineTDengineTimestampCandidate, buildTDengineToTDengineCreateTableSQL)
}
func (clickHouseToTDenginePlanner) Name() string { return "clickhouse-tdengine-planner" }
func (clickHouseToTDenginePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
if sourceType == "clickhouse" && targetType == "tdengine" {
return MigrationSupportLevelFull
}
return MigrationSupportLevelUnsupported
}
func (clickHouseToTDenginePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
return buildClickHouseToTDenginePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
}
func (tdengineToTDenginePlanner) Name() string { return "tdengine-tdengine-planner" }
func (tdengineToTDenginePlanner) SupportLevel(ctx MigrationBuildContext) MigrationSupportLevel {
sourceType := resolveMigrationDBType(ctx.Config.SourceConfig)
targetType := resolveMigrationDBType(ctx.Config.TargetConfig)
if sourceType == "tdengine" && targetType == "tdengine" {
return MigrationSupportLevelFull
}
return MigrationSupportLevelUnsupported
}
func (tdengineToTDenginePlanner) BuildPlan(ctx MigrationBuildContext) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
return buildTDengineToTDenginePlan(ctx.Config, ctx.TableName, ctx.SourceDB, ctx.TargetDB)
}
type tdengineTimestampCandidate func(connection.ColumnDefinition) bool
type tdengineCreateTableBuilder func(string, []connection.ColumnDefinition, int) (string, []string, []string)
func buildSourceToTDenginePlan(config SyncConfig, tableName string, sourceDB db.Database, targetDB db.Database, isTimestamp tdengineTimestampCandidate, buildCreateSQL tdengineCreateTableBuilder) (SchemaMigrationPlan, []connection.ColumnDefinition, []connection.ColumnDefinition, error) {
plan := SchemaMigrationPlan{}
sourceType := resolveMigrationDBType(config.SourceConfig)
targetType := resolveMigrationDBType(config.TargetConfig)
plan.SourceSchema, plan.SourceTable = normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName)
plan.TargetSchema, plan.TargetTable = normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName)
plan.SourceQueryTable = qualifiedNameForQuery(sourceType, plan.SourceSchema, plan.SourceTable, tableName)
plan.TargetQueryTable = qualifiedNameForQuery(targetType, plan.TargetSchema, plan.TargetTable, tableName)
plan.PlannedAction = "使用已有目标表导入"
sourceCols, sourceExists, err := inspectTableColumns(sourceDB, plan.SourceSchema, plan.SourceTable)
if err != nil {
return plan, nil, nil, fmt.Errorf("获取源表字段失败: %w", err)
}
if !sourceExists {
return plan, nil, nil, fmt.Errorf("源表不存在或无列定义: %s", tableName)
}
plan.Warnings = append(plan.Warnings, tdengineTargetBaseWarnings()...)
timestampIndex := findTDengineTimestampColumn(sourceCols, isTimestamp)
if timestampIndex < 0 {
plan.Warnings = append(plan.Warnings, tdengineTargetMissingTimeWarning())
}
targetCols, targetExists, err := inspectTableColumns(targetDB, plan.TargetSchema, plan.TargetTable)
if err != nil {
return plan, sourceCols, nil, fmt.Errorf("获取目标表字段失败: %w", err)
}
plan.TargetTableExists = targetExists
strategy := normalizeTargetTableStrategy(config.TargetTableStrategy)
if targetExists {
missing := diffMissingColumnNames(sourceCols, targetCols)
if len(missing) > 0 {
plan.Warnings = append(plan.Warnings, fmt.Sprintf("目标表缺失字段 %d 个:%s", len(missing), strings.Join(missing, ", ")))
}
if strategy != "existing_only" {
plan.Warnings = append(plan.Warnings, "TDengine 目标端当前不自动补齐已有目标表字段,请先确认目标表结构")
}
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
}
switch strategy {
case "existing_only":
plan.PlannedAction = "目标表不存在,需先手工创建"
plan.Warnings = append(plan.Warnings, "当前策略要求目标表已存在,执行时不会自动建表")
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
case "smart", "auto_create_if_missing":
if timestampIndex < 0 {
plan.PlannedAction = "源表未识别到可映射为 TDengine 首列的时间列,无法自动建表"
plan.UnsupportedObjects = append(plan.UnsupportedObjects, "TDengine regular table 首列必须为 TIMESTAMP当前源表缺少可直接映射的时间列")
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
}
plan.AutoCreate = true
plan.PlannedAction = "目标表不存在,将自动建表后导入"
createSQL, warnings, unsupported := buildCreateSQL(plan.TargetQueryTable, sourceCols, timestampIndex)
plan.CreateTableSQL = createSQL
plan.Warnings = append(plan.Warnings, warnings...)
plan.UnsupportedObjects = append(plan.UnsupportedObjects, unsupported...)
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
default:
return dedupeSchemaMigrationPlan(plan), sourceCols, targetCols, nil
}
}
func tdengineTargetBaseWarnings() []string {
return []string{
"TDengine 目标端当前仅支持 INSERT 写入;若存在差异 update/delete执行期会被拒绝",
"TDengine 目标端 auto-create 当前仅创建基础表索引、外键、触发器、supertable/TAGS/TTL 不会自动迁移",
}
}
func tdengineTargetMissingTimeWarning() string {
return "源表缺少可映射的时间列,自动建表将不可用;如需继续,请先人工准备 TDengine 目标表与时间列"
}
func findTDengineTimestampColumn(sourceCols []connection.ColumnDefinition, candidate tdengineTimestampCandidate) int {
preferred := []string{"ts", "timestamp", "event_time", "eventtime", "created_at", "create_time", "occurred_at"}
for _, name := range preferred {
for idx, col := range sourceCols {
if !candidate(col) {
continue
}
if strings.EqualFold(strings.TrimSpace(col.Name), name) {
return idx
}
}
}
for idx, col := range sourceCols {
if candidate(col) {
return idx
}
}
return -1
}
func reorderTDengineColumns(sourceCols []connection.ColumnDefinition, timestampIndex int) []connection.ColumnDefinition {
if timestampIndex <= 0 || timestampIndex >= len(sourceCols) {
cloned := make([]connection.ColumnDefinition, len(sourceCols))
copy(cloned, sourceCols)
return cloned
}
ordered := make([]connection.ColumnDefinition, 0, len(sourceCols))
ordered = append(ordered, sourceCols[timestampIndex])
for idx, col := range sourceCols {
if idx == timestampIndex {
continue
}
ordered = append(ordered, col)
}
return ordered
}
func buildMySQLLikeToTDengineCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition, timestampIndex int) (string, []string, []string) {
ordered := reorderTDengineColumns(sourceCols, timestampIndex)
columnDefs := make([]string, 0, len(ordered))
warnings := make([]string, 0)
unsupported := []string{"源表索引/外键/触发器/唯一约束/自增语义当前不会自动迁移到 TDengine"}
if timestampIndex != 0 && timestampIndex >= 0 && timestampIndex < len(sourceCols) {
warnings = append(warnings, fmt.Sprintf("TDengine 基础表要求时间列优先,已将字段 %s 调整为首列", sourceCols[timestampIndex].Name))
}
for idx, col := range ordered {
def, colWarnings := mapMySQLLikeColumnToTDengine(col, idx == 0)
warnings = append(warnings, colWarnings...)
columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("tdengine", col.Name), def))
}
createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("tdengine", targetQueryTable), strings.Join(columnDefs, ",\n "))
return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported)
}
func buildPGLikeToTDengineCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition, timestampIndex int) (string, []string, []string) {
ordered := reorderTDengineColumns(sourceCols, timestampIndex)
columnDefs := make([]string, 0, len(ordered))
warnings := make([]string, 0)
unsupported := []string{"源表索引/外键/触发器/唯一约束/identity/sequence 语义当前不会自动迁移到 TDengine"}
if timestampIndex != 0 && timestampIndex >= 0 && timestampIndex < len(sourceCols) {
warnings = append(warnings, fmt.Sprintf("TDengine 基础表要求时间列优先,已将字段 %s 调整为首列", sourceCols[timestampIndex].Name))
}
for idx, col := range ordered {
def, colWarnings := mapPGLikeColumnToTDengine(col, idx == 0)
warnings = append(warnings, colWarnings...)
columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("tdengine", col.Name), def))
}
createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("tdengine", targetQueryTable), strings.Join(columnDefs, ",\n "))
return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported)
}
func buildClickHouseToTDengineCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition, timestampIndex int) (string, []string, []string) {
ordered := reorderTDengineColumns(sourceCols, timestampIndex)
columnDefs := make([]string, 0, len(ordered))
warnings := make([]string, 0)
unsupported := []string{"源表 ORDER BY/PARTITION/TTL/Projection/物化视图 语义当前不会自动迁移到 TDengine"}
if timestampIndex != 0 && timestampIndex >= 0 && timestampIndex < len(sourceCols) {
warnings = append(warnings, fmt.Sprintf("TDengine 基础表要求时间列优先,已将字段 %s 调整为首列", sourceCols[timestampIndex].Name))
}
for idx, col := range ordered {
def, colWarnings := mapClickHouseColumnToTDengine(col, idx == 0)
warnings = append(warnings, colWarnings...)
columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("tdengine", col.Name), def))
}
createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("tdengine", targetQueryTable), strings.Join(columnDefs, ",\n "))
return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported)
}
func buildTDengineToTDengineCreateTableSQL(targetQueryTable string, sourceCols []connection.ColumnDefinition, timestampIndex int) (string, []string, []string) {
ordered := reorderTDengineColumns(sourceCols, timestampIndex)
columnDefs := make([]string, 0, len(ordered))
warnings := make([]string, 0)
unsupported := []string{"源表 supertable/TAGS/TTL/保留策略/索引 语义当前不会自动迁移到 TDengine regular table"}
if timestampIndex != 0 && timestampIndex >= 0 && timestampIndex < len(sourceCols) {
warnings = append(warnings, fmt.Sprintf("TDengine 基础表要求时间列优先,已将字段 %s 调整为首列", sourceCols[timestampIndex].Name))
}
for idx, col := range ordered {
def, colWarnings := mapTDengineColumnToTDengine(col, idx == 0)
warnings = append(warnings, colWarnings...)
columnDefs = append(columnDefs, fmt.Sprintf("%s %s", quoteIdentByType("tdengine", col.Name), def))
}
createSQL := fmt.Sprintf("CREATE TABLE %s (\n %s\n)", quoteQualifiedIdentByType("tdengine", targetQueryTable), strings.Join(columnDefs, ",\n "))
return createSQL, dedupeStrings(warnings), dedupeStrings(unsupported)
}
func isMySQLLikeTDengineTimestampCandidate(col connection.ColumnDefinition) bool {
raw := strings.ToLower(strings.TrimSpace(col.Type))
clean := strings.ReplaceAll(raw, " unsigned", "")
clean = strings.ReplaceAll(clean, " zerofill", "")
return strings.HasPrefix(clean, "timestamp") || strings.HasPrefix(clean, "datetime")
}
func isPGLikeTDengineTimestampCandidate(col connection.ColumnDefinition) bool {
raw := strings.ToLower(strings.TrimSpace(col.Type))
return strings.HasPrefix(raw, "timestamp")
}
func isClickHouseTDengineTimestampCandidate(col connection.ColumnDefinition) bool {
lower, _ := unwrapClickHouseTDengineType(col.Type)
return strings.HasPrefix(lower, "datetime")
}
func isTDengineTDengineTimestampCandidate(col connection.ColumnDefinition) bool {
base, _ := parseTDengineType(col.Type)
return base == "TIMESTAMP"
}
func mapMySQLLikeColumnToTDengine(col connection.ColumnDefinition, forceTimestamp bool) (string, []string) {
warnings := make([]string, 0)
if forceTimestamp {
if !isMySQLLikeTDengineTimestampCandidate(col) {
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已提升为 TDengine 首列 TIMESTAMP", col.Name, col.Type))
}
return "TIMESTAMP", warnings
}
raw := strings.ToLower(strings.TrimSpace(col.Type))
if raw == "" {
return "VARCHAR(1024)", []string{fmt.Sprintf("字段 %s 类型为空,已降级为 VARCHAR(1024)", col.Name)}
}
unsigned := strings.Contains(raw, "unsigned")
clean := strings.ReplaceAll(raw, " unsigned", "")
clean = strings.ReplaceAll(clean, " zerofill", "")
isAutoIncrement := strings.Contains(strings.ToLower(strings.TrimSpace(col.Extra)), "auto_increment")
if isAutoIncrement {
warnings = append(warnings, fmt.Sprintf("字段 %s 自增语义不会迁移到 TDengine", col.Name))
}
if col.Key == "PRI" || col.Key == "PK" {
warnings = append(warnings, fmt.Sprintf("字段 %s 主键语义不会按关系型约束迁移到 TDengine", col.Name))
}
switch {
case strings.HasPrefix(clean, "tinyint(1)") && !unsigned && !isAutoIncrement:
return "BOOL", warnings
case strings.HasPrefix(clean, "tinyint"):
if unsigned {
return "UTINYINT", warnings
}
return "TINYINT", warnings
case strings.HasPrefix(clean, "smallint"):
if unsigned {
return "USMALLINT", warnings
}
return "SMALLINT", warnings
case strings.HasPrefix(clean, "mediumint"), strings.HasPrefix(clean, "int"), strings.HasPrefix(clean, "integer"):
if unsigned {
return "UINT", warnings
}
return "INT", warnings
case strings.HasPrefix(clean, "bigint"):
if unsigned {
return "UBIGINT", warnings
}
return "BIGINT", warnings
case strings.HasPrefix(clean, "decimal"), strings.HasPrefix(clean, "numeric"):
return normalizeTDengineDecimalType(clean), warnings
case strings.HasPrefix(clean, "float"):
return "FLOAT", warnings
case strings.HasPrefix(clean, "double"):
return "DOUBLE", warnings
case strings.HasPrefix(clean, "date"):
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 date 已降级映射为 TIMESTAMP", col.Name))
return "TIMESTAMP", warnings
case strings.HasPrefix(clean, "timestamp"), strings.HasPrefix(clean, "datetime"):
return "TIMESTAMP", warnings
case strings.HasPrefix(clean, "time"):
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无稳定 TDengine 时间-only 映射,已降级为 VARCHAR(64)", col.Name, col.Type))
return "VARCHAR(64)", warnings
case strings.HasPrefix(clean, "char("), strings.HasPrefix(clean, "varchar("):
return fmt.Sprintf("VARCHAR(%d)", normalizeTDengineVarcharLength(extractFirstTypeLength(clean), 255)), warnings
case strings.HasPrefix(clean, "tinytext"), strings.HasPrefix(clean, "text"), strings.HasPrefix(clean, "mediumtext"), strings.HasPrefix(clean, "longtext"):
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 VARCHAR(4096)", col.Name, col.Type))
return "VARCHAR(4096)", warnings
case strings.HasPrefix(clean, "json"):
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 因 TDengine JSON 仅适用于 TAG已降级为 VARCHAR(4096)", col.Name, col.Type))
return "VARCHAR(4096)", warnings
case strings.HasPrefix(clean, "enum"), strings.HasPrefix(clean, "set"):
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 VARCHAR(255)", col.Name, col.Type))
return "VARCHAR(255)", warnings
case strings.HasPrefix(clean, "binary"), strings.HasPrefix(clean, "varbinary"), strings.HasPrefix(clean, "tinyblob"), strings.HasPrefix(clean, "blob"), strings.HasPrefix(clean, "mediumblob"), strings.HasPrefix(clean, "longblob"):
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已按字符串语义降级为 VARCHAR(4096)", col.Name, col.Type))
return "VARCHAR(4096)", warnings
default:
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门 TDengine 映射,已降级为 VARCHAR(1024)", col.Name, col.Type))
return "VARCHAR(1024)", warnings
}
}
func mapPGLikeColumnToTDengine(col connection.ColumnDefinition, forceTimestamp bool) (string, []string) {
warnings := make([]string, 0)
if forceTimestamp {
if raw := strings.ToLower(strings.TrimSpace(col.Type)); !strings.HasPrefix(raw, "timestamp") {
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已提升为 TDengine 首列 TIMESTAMP", col.Name, col.Type))
}
return "TIMESTAMP", warnings
}
raw := strings.ToLower(strings.TrimSpace(col.Type))
if raw == "" {
return "VARCHAR(1024)", []string{fmt.Sprintf("字段 %s 类型为空,已降级为 VARCHAR(1024)", col.Name)}
}
if col.Key == "PRI" || col.Key == "PK" {
warnings = append(warnings, fmt.Sprintf("字段 %s 主键语义不会按关系型约束迁移到 TDengine", col.Name))
}
if strings.Contains(strings.ToLower(strings.TrimSpace(col.Extra)), "identity") || strings.Contains(strings.ToLower(strings.TrimSpace(col.Extra)), "auto_increment") {
warnings = append(warnings, fmt.Sprintf("字段 %s 自增/identity 语义不会迁移到 TDengine", col.Name))
}
switch {
case raw == "boolean" || strings.HasPrefix(raw, "bool"):
return "BOOL", warnings
case raw == "smallint":
return "SMALLINT", warnings
case raw == "integer" || raw == "int4":
return "INT", warnings
case raw == "bigint" || raw == "int8":
return "BIGINT", warnings
case strings.HasPrefix(raw, "numeric"), strings.HasPrefix(raw, "decimal"):
return normalizeTDengineDecimalType(raw), warnings
case raw == "real" || raw == "float4":
return "FLOAT", warnings
case raw == "double precision" || raw == "float8":
return "DOUBLE", warnings
case raw == "date":
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 date 已降级映射为 TIMESTAMP", col.Name))
return "TIMESTAMP", warnings
case strings.HasPrefix(raw, "timestamp"):
return "TIMESTAMP", warnings
case strings.HasPrefix(raw, "time"):
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无稳定 TDengine 时间-only 映射,已降级为 VARCHAR(64)", col.Name, col.Type))
return "VARCHAR(64)", warnings
case strings.HasPrefix(raw, "character varying("), strings.HasPrefix(raw, "varchar("), strings.HasPrefix(raw, "character("), strings.HasPrefix(raw, "char("):
return fmt.Sprintf("VARCHAR(%d)", normalizeTDengineVarcharLength(extractFirstTypeLength(raw), 255)), warnings
case raw == "text":
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 text 已降级为 VARCHAR(4096)", col.Name))
return "VARCHAR(4096)", warnings
case raw == "uuid":
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 uuid 已降级为 VARCHAR(36)", col.Name))
return "VARCHAR(36)", warnings
case raw == "json" || raw == "jsonb":
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 因 TDengine JSON 仅适用于 TAG已降级为 VARCHAR(4096)", col.Name, col.Type))
return "VARCHAR(4096)", warnings
case raw == "bytea":
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 bytea 已按字符串语义降级为 VARCHAR(4096)", col.Name))
return "VARCHAR(4096)", warnings
case strings.HasSuffix(raw, "[]") || strings.HasPrefix(raw, "array"):
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 VARCHAR(4096)", col.Name, col.Type))
return "VARCHAR(4096)", warnings
case raw == "user-defined":
warnings = append(warnings, fmt.Sprintf("字段 %s 为用户自定义类型,已降级为 VARCHAR(1024)", col.Name))
return "VARCHAR(1024)", warnings
default:
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门 TDengine 映射,已降级为 VARCHAR(1024)", col.Name, col.Type))
return "VARCHAR(1024)", warnings
}
}
func mapClickHouseColumnToTDengine(col connection.ColumnDefinition, forceTimestamp bool) (string, []string) {
warnings := make([]string, 0)
if forceTimestamp {
if !isClickHouseTDengineTimestampCandidate(col) {
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已提升为 TDengine 首列 TIMESTAMP", col.Name, col.Type))
}
return "TIMESTAMP", warnings
}
lower, _ := unwrapClickHouseTDengineType(col.Type)
if lower == "" {
return "VARCHAR(1024)", []string{fmt.Sprintf("字段 %s 类型为空,已降级为 VARCHAR(1024)", col.Name)}
}
switch {
case lower == "bool" || lower == "boolean":
return "BOOL", warnings
case lower == "int8":
return "TINYINT", warnings
case lower == "uint8":
return "UTINYINT", warnings
case lower == "int16":
return "SMALLINT", warnings
case lower == "uint16":
return "USMALLINT", warnings
case lower == "int32":
return "INT", warnings
case lower == "uint32":
return "UINT", warnings
case lower == "int64":
return "BIGINT", warnings
case lower == "uint64":
return "UBIGINT", warnings
case lower == "float32":
return "FLOAT", warnings
case lower == "float64":
return "DOUBLE", warnings
case lower == "date":
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 date 已降级映射为 TIMESTAMP", col.Name))
return "TIMESTAMP", warnings
case strings.HasPrefix(lower, "datetime"):
return "TIMESTAMP", warnings
case lower == "string":
return "VARCHAR(1024)", warnings
case lower == "uuid":
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 uuid 已降级为 VARCHAR(36)", col.Name))
return "VARCHAR(36)", warnings
case lower == "json", strings.HasPrefix(lower, "map("), strings.HasPrefix(lower, "array("), strings.HasPrefix(lower, "tuple("), strings.HasPrefix(lower, "nested("):
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已降级为 VARCHAR(4096)", col.Name, col.Type))
return "VARCHAR(4096)", warnings
case strings.HasPrefix(lower, "enum8("), strings.HasPrefix(lower, "enum16("):
warnings = append(warnings, fmt.Sprintf("字段 %s 枚举类型 %s 已降级为 VARCHAR(255)", col.Name, col.Type))
return "VARCHAR(255)", warnings
case clickHouseDecimalPattern.MatchString(lower):
parts := clickHouseDecimalPattern.FindStringSubmatch(lower)
return fmt.Sprintf("DECIMAL(%s,%s)", parts[2], parts[3]), warnings
case clickHouseStringArgsPattern.MatchString(lower):
parts := clickHouseStringArgsPattern.FindStringSubmatch(lower)
length, err := strconv.Atoi(parts[1])
if err != nil {
warnings = append(warnings, fmt.Sprintf("字段 %s FixedString 长度解析失败,已降级为 VARCHAR(255)", col.Name))
return "VARCHAR(255)", warnings
}
return fmt.Sprintf("VARCHAR(%d)", normalizeTDengineVarcharLength(length, 255)), warnings
default:
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门 TDengine 映射,已降级为 VARCHAR(1024)", col.Name, col.Type))
return "VARCHAR(1024)", warnings
}
}
func mapTDengineColumnToTDengine(col connection.ColumnDefinition, forceTimestamp bool) (string, []string) {
warnings := make([]string, 0)
if forceTimestamp {
if !isTDengineTDengineTimestampCandidate(col) {
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 已提升为 TDengine 首列 TIMESTAMP", col.Name, col.Type))
}
return "TIMESTAMP", warnings
}
base, length := parseTDengineType(col.Type)
if base == "" {
return "VARCHAR(1024)", []string{fmt.Sprintf("字段 %s 类型为空,已降级为 VARCHAR(1024)", col.Name)}
}
if isTDengineTagColumn(col) {
warnings = append(warnings, fmt.Sprintf("字段 %s 为 TDengine TAG 列,迁移到 regular table 后将降级为普通字段", col.Name))
}
switch base {
case "BOOL", "BOOLEAN":
return "BOOL", warnings
case "TINYINT":
return "TINYINT", warnings
case "UTINYINT":
return "UTINYINT", warnings
case "SMALLINT":
return "SMALLINT", warnings
case "USMALLINT":
return "USMALLINT", warnings
case "INT", "INTEGER":
return "INT", warnings
case "UINT":
return "UINT", warnings
case "BIGINT":
return "BIGINT", warnings
case "UBIGINT":
return "UBIGINT", warnings
case "FLOAT":
return "FLOAT", warnings
case "DOUBLE":
return "DOUBLE", warnings
case "DECIMAL", "NUMERIC":
return normalizeTDengineDecimalType(col.Type), warnings
case "TIMESTAMP":
return "TIMESTAMP", warnings
case "DATE":
return "DATE", warnings
case "JSON":
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 JSON 在 TDengine regular table 中不保留 TAG 语义,已降级为 VARCHAR(4096)", col.Name))
return "VARCHAR(4096)", warnings
case "BINARY", "NCHAR", "VARCHAR", "VARBINARY":
if length > 0 {
return fmt.Sprintf("%s(%d)", base, normalizeTDengineVarcharLength(length, length)), warnings
}
fallback := 255
if base == "VARCHAR" {
fallback = 1024
}
return fmt.Sprintf("%s(%d)", base, fallback), warnings
default:
warnings = append(warnings, fmt.Sprintf("字段 %s 类型 %s 暂无专门 TDengine 同库映射,已降级为 VARCHAR(1024)", col.Name, col.Type))
return "VARCHAR(1024)", warnings
}
}
func unwrapClickHouseTDengineType(raw string) (string, bool) {
text := strings.TrimSpace(raw)
lower := strings.ToLower(text)
nullable := false
for {
switched := false
if strings.HasPrefix(lower, "nullable(") && strings.HasSuffix(lower, ")") {
text = strings.TrimSpace(text[len("Nullable(") : len(text)-1])
lower = strings.ToLower(text)
nullable = true
switched = true
}
if strings.HasPrefix(lower, "lowcardinality(") && strings.HasSuffix(lower, ")") {
text = strings.TrimSpace(text[len("LowCardinality(") : len(text)-1])
lower = strings.ToLower(text)
switched = true
}
if !switched {
break
}
}
return lower, nullable
}
func normalizeTDengineDecimalType(raw string) string {
text := strings.TrimSpace(raw)
if text == "" {
return "DECIMAL(38,10)"
}
lower := strings.ToLower(text)
if strings.HasPrefix(lower, "numeric") {
return "DECIMAL" + text[len("numeric"):]
}
if strings.HasPrefix(lower, "decimal") {
return "DECIMAL" + text[len("decimal"):]
}
return "DECIMAL(38,10)"
}
func normalizeTDengineVarcharLength(length int, fallback int) int {
if fallback <= 0 {
fallback = 255
}
if length <= 0 {
return fallback
}
if length > 16384 {
return 16384
}
return length
}
func extractFirstTypeLength(raw string) int {
start := strings.Index(raw, "(")
if start < 0 {
return 0
}
end := strings.Index(raw[start+1:], ")")
if end < 0 {
return 0
}
inside := strings.TrimSpace(raw[start+1 : start+1+end])
if inside == "" {
return 0
}
parts := strings.SplitN(inside, ",", 2)
length, err := strconv.Atoi(strings.TrimSpace(parts[0]))
if err != nil {
return 0
}
return length
}

View File

@@ -1,98 +0,0 @@
package sync
import (
"GoNavi-Wails/internal/connection"
"strings"
)
func normalizeMigrationDBType(dbType string) string {
normalized := strings.ToLower(strings.TrimSpace(dbType))
switch normalized {
case "doris":
return "diros"
case "postgresql":
return "postgres"
case "dm", "dm8":
return "dameng"
case "sqlite3":
return "sqlite"
default:
return normalized
}
}
func resolveMigrationDBType(config connection.ConnectionConfig) string {
dbType := normalizeMigrationDBType(config.Type)
if dbType != "custom" {
return dbType
}
driver := strings.ToLower(strings.TrimSpace(config.Driver))
switch driver {
case "postgresql", "postgres", "pg", "pq", "pgx":
return "postgres"
case "dm", "dameng", "dm8":
return "dameng"
case "sqlite3", "sqlite":
return "sqlite"
case "sphinxql":
return "sphinx"
case "diros", "doris":
return "diros"
case "kingbase", "kingbase8", "kingbasees", "kingbasev8":
return "kingbase"
case "highgo":
return "highgo"
case "vastbase":
return "vastbase"
case "mysql", "mysql2":
return "mysql"
case "mariadb":
return "mariadb"
}
switch {
case strings.Contains(driver, "postgres"):
return "postgres"
case strings.Contains(driver, "kingbase"):
return "kingbase"
case strings.Contains(driver, "highgo"):
return "highgo"
case strings.Contains(driver, "vastbase"):
return "vastbase"
case strings.Contains(driver, "sqlite"):
return "sqlite"
case strings.Contains(driver, "sphinx"):
return "sphinx"
case strings.Contains(driver, "diros"), strings.Contains(driver, "doris"):
return "diros"
case strings.Contains(driver, "maria"):
return "mariadb"
case strings.Contains(driver, "mysql"):
return "mysql"
case strings.Contains(driver, "dameng"), strings.Contains(driver, "dm"):
return "dameng"
default:
return normalizeMigrationDBType(driver)
}
}
func isMySQLCoreType(dbType string) bool {
switch normalizeMigrationDBType(dbType) {
case "mysql", "mariadb", "diros":
return true
default:
return false
}
}
func isMySQLLikeSourceType(dbType string) bool {
if isMySQLCoreType(dbType) {
return true
}
return normalizeMigrationDBType(dbType) == "sphinx"
}
func isMySQLLikeWritableTargetType(dbType string) bool {
return isMySQLCoreType(dbType)
}

View File

@@ -1,7 +1,7 @@
package sync
import (
"errors"
"GoNavi-Wails/internal/db"
"fmt"
"strings"
)
@@ -36,18 +36,12 @@ func (s *SyncEngine) Preview(config SyncConfig, tableName string, limit int) (Ta
if limit > 500 {
limit = 500
}
if isRedisToMongoKeyspacePair(config) {
return s.previewRedisToMongo(config, tableName, limit)
}
if isMongoToRedisKeyspacePair(config) {
return s.previewMongoToRedis(config, tableName, limit)
}
sourceDB, err := newSyncDatabase(config.SourceConfig.Type)
sourceDB, err := db.NewDatabase(config.SourceConfig.Type)
if err != nil {
return TableDiffPreview{}, fmt.Errorf("初始化源数据库驱动失败: %w", err)
}
targetDB, err := newSyncDatabase(config.TargetConfig.Type)
targetDB, err := db.NewDatabase(config.TargetConfig.Type)
if err != nil {
return TableDiffPreview{}, fmt.Errorf("初始化目标数据库驱动失败: %w", err)
}
@@ -62,12 +56,14 @@ func (s *SyncEngine) Preview(config SyncConfig, tableName string, limit int) (Ta
}
defer targetDB.Close()
plan, cols, _, err := buildSchemaMigrationPlan(config, tableName, sourceDB, targetDB)
sourceSchema, sourceTable := normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName)
targetSchema, targetTable := normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName)
sourceQueryTable := qualifiedNameForQuery(config.SourceConfig.Type, sourceSchema, sourceTable, tableName)
targetQueryTable := qualifiedNameForQuery(config.TargetConfig.Type, targetSchema, targetTable, tableName)
cols, err := sourceDB.GetColumns(sourceSchema, sourceTable)
if err != nil {
return TableDiffPreview{}, err
}
if !plan.TargetTableExists && !plan.AutoCreate {
return TableDiffPreview{}, errors.New(firstNonEmpty(plan.PlannedAction, "目标表不存在,无法预览差异"))
return TableDiffPreview{}, fmt.Errorf("获取源表字段失败: %w", err)
}
pkCols := make([]string, 0, 2)
@@ -84,17 +80,13 @@ func (s *SyncEngine) Preview(config SyncConfig, tableName string, limit int) (Ta
}
pkCol := pkCols[0]
sourceRows, _, err := sourceDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(resolveMigrationDBType(config.SourceConfig), plan.SourceQueryTable)))
sourceRows, _, err := sourceDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.SourceConfig.Type, sourceQueryTable)))
if err != nil {
return TableDiffPreview{}, fmt.Errorf("读取源表失败: %w", err)
}
targetRows := make([]map[string]interface{}, 0)
if plan.TargetTableExists {
targetRows, _, err = targetDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(resolveMigrationDBType(config.TargetConfig), plan.TargetQueryTable)))
if err != nil {
return TableDiffPreview{}, fmt.Errorf("读取目标表失败: %w", err)
}
targetRows, _, err := targetDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.TargetConfig.Type, targetQueryTable)))
if err != nil {
return TableDiffPreview{}, fmt.Errorf("读取目标表失败: %w", err)
}
targetMap := make(map[string]map[string]interface{}, len(targetRows))
@@ -141,7 +133,12 @@ func (s *SyncEngine) Preview(config SyncConfig, tableName string, limit int) (Ta
if len(changedColumns) > 0 {
out.TotalUpdates++
if len(out.Updates) < limit {
out.Updates = append(out.Updates, PreviewUpdateRow{PK: pkVal, ChangedColumns: changedColumns, Source: sRow, Target: tRow})
out.Updates = append(out.Updates, PreviewUpdateRow{
PK: pkVal,
ChangedColumns: changedColumns,
Source: sRow,
Target: tRow,
})
}
}
continue

View File

@@ -1,490 +0,0 @@
package sync
import (
"GoNavi-Wails/internal/connection"
"GoNavi-Wails/internal/db"
redispkg "GoNavi-Wails/internal/redis"
"fmt"
"sort"
"strings"
"testing"
)
type fakeRedisMigrationClient struct {
values map[string]*redispkg.RedisValue
scannedKeys []string
connectConfig connection.ConnectionConfig
closed bool
}
func (f *fakeRedisMigrationClient) Connect(config connection.ConnectionConfig) error {
f.connectConfig = config
return nil
}
func (f *fakeRedisMigrationClient) Close() error {
f.closed = true
return nil
}
func (f *fakeRedisMigrationClient) ScanKeys(pattern string, cursor uint64, count int64) (*redispkg.RedisScanResult, error) {
items := make([]redispkg.RedisKeyInfo, 0, len(f.scannedKeys))
for _, key := range f.scannedKeys {
items = append(items, redispkg.RedisKeyInfo{Key: key, Type: "string", TTL: -1})
}
return &redispkg.RedisScanResult{Keys: items, Cursor: "0"}, nil
}
func (f *fakeRedisMigrationClient) GetKeyType(key string) (string, error) {
if value, ok := f.values[key]; ok && value != nil {
return value.Type, nil
}
return "none", nil
}
func (f *fakeRedisMigrationClient) GetValue(key string) (*redispkg.RedisValue, error) {
if value, ok := f.values[key]; ok {
return value, nil
}
return nil, fmt.Errorf("key not found: %s", key)
}
func (f *fakeRedisMigrationClient) DeleteKeys(keys []string) (int64, error) {
var deleted int64
for _, key := range keys {
if _, ok := f.values[key]; ok {
delete(f.values, key)
deleted++
}
}
return deleted, nil
}
func (f *fakeRedisMigrationClient) SetTTL(key string, ttl int64) error {
value, ok := f.values[key]
if !ok {
return nil
}
value.TTL = ttl
return nil
}
func (f *fakeRedisMigrationClient) SetString(key, value string, ttl int64) error {
if f.values == nil {
f.values = map[string]*redispkg.RedisValue{}
}
f.values[key] = &redispkg.RedisValue{Type: "string", TTL: ttl, Value: value, Length: int64(len(value))}
return nil
}
func (f *fakeRedisMigrationClient) SetHashField(key, field, value string) error {
if f.values == nil {
f.values = map[string]*redispkg.RedisValue{}
}
current, ok := f.values[key]
if !ok || current == nil || current.Type != "hash" {
current = &redispkg.RedisValue{Type: "hash", TTL: -1, Value: map[string]string{}}
f.values[key] = current
}
hash, _ := current.Value.(map[string]string)
if hash == nil {
hash = map[string]string{}
}
hash[field] = value
current.Value = hash
current.Length = int64(len(hash))
return nil
}
func (f *fakeRedisMigrationClient) ListPush(key string, values ...string) error {
if f.values == nil {
f.values = map[string]*redispkg.RedisValue{}
}
current, ok := f.values[key]
if !ok || current == nil || current.Type != "list" {
current = &redispkg.RedisValue{Type: "list", TTL: -1, Value: []string{}}
f.values[key] = current
}
list, _ := current.Value.([]string)
list = append(list, values...)
current.Value = list
current.Length = int64(len(list))
return nil
}
func (f *fakeRedisMigrationClient) SetAdd(key string, members ...string) error {
if f.values == nil {
f.values = map[string]*redispkg.RedisValue{}
}
current, ok := f.values[key]
if !ok || current == nil || current.Type != "set" {
current = &redispkg.RedisValue{Type: "set", TTL: -1, Value: []string{}}
f.values[key] = current
}
setValues, _ := current.Value.([]string)
seen := make(map[string]struct{}, len(setValues)+len(members))
for _, item := range setValues {
seen[item] = struct{}{}
}
for _, item := range members {
if _, ok := seen[item]; ok {
continue
}
seen[item] = struct{}{}
setValues = append(setValues, item)
}
sort.Strings(setValues)
current.Value = setValues
current.Length = int64(len(setValues))
return nil
}
func (f *fakeRedisMigrationClient) ZSetAdd(key string, members ...redispkg.ZSetMember) error {
if f.values == nil {
f.values = map[string]*redispkg.RedisValue{}
}
copied := append([]redispkg.ZSetMember(nil), members...)
sort.Slice(copied, func(i, j int) bool {
if copied[i].Score == copied[j].Score {
return copied[i].Member < copied[j].Member
}
return copied[i].Score < copied[j].Score
})
f.values[key] = &redispkg.RedisValue{Type: "zset", TTL: -1, Value: copied, Length: int64(len(copied))}
return nil
}
func (f *fakeRedisMigrationClient) StreamAdd(key string, fields map[string]string, id string) (string, error) {
if f.values == nil {
f.values = map[string]*redispkg.RedisValue{}
}
current, ok := f.values[key]
if !ok || current == nil || current.Type != "stream" {
current = &redispkg.RedisValue{Type: "stream", TTL: -1, Value: []redispkg.StreamEntry{}}
f.values[key] = current
}
entries, _ := current.Value.([]redispkg.StreamEntry)
entryID := id
if entryID == "" {
entryID = fmt.Sprintf("%d-0", len(entries)+1)
}
entries = append(entries, redispkg.StreamEntry{ID: entryID, Fields: fields})
current.Value = entries
current.Length = int64(len(entries))
return entryID, nil
}
type fakeRedisMongoTargetDB struct {
tables []string
queryTable string
queryRows []map[string]interface{}
execs []string
applyTable string
applySet connection.ChangeSet
}
func (f *fakeRedisMongoTargetDB) Connect(config connection.ConnectionConfig) error { return nil }
func (f *fakeRedisMongoTargetDB) Close() error { return nil }
func (f *fakeRedisMongoTargetDB) Ping() error { return nil }
func (f *fakeRedisMongoTargetDB) Query(query string) ([]map[string]interface{}, []string, error) {
queryTable := strings.TrimSpace(f.queryTable)
if queryTable == "" {
queryTable = "redis_db_0_keys"
}
if strings.Contains(query, fmt.Sprintf(`"find":"%s"`, queryTable)) {
return f.queryRows, []string{"_id", "key", "value"}, nil
}
return nil, nil, nil
}
func (f *fakeRedisMongoTargetDB) Exec(query string) (int64, error) {
f.execs = append(f.execs, query)
return 1, nil
}
func (f *fakeRedisMongoTargetDB) GetDatabases() ([]string, error) { return []string{"app"}, nil }
func (f *fakeRedisMongoTargetDB) GetTables(dbName string) ([]string, error) {
return f.tables, nil
}
func (f *fakeRedisMongoTargetDB) GetCreateStatement(dbName, tableName string) (string, error) {
return "", nil
}
func (f *fakeRedisMongoTargetDB) GetColumns(dbName, tableName string) ([]connection.ColumnDefinition, error) {
return nil, nil
}
func (f *fakeRedisMongoTargetDB) GetAllColumns(dbName string) ([]connection.ColumnDefinitionWithTable, error) {
return nil, nil
}
func (f *fakeRedisMongoTargetDB) GetIndexes(dbName, tableName string) ([]connection.IndexDefinition, error) {
return nil, nil
}
func (f *fakeRedisMongoTargetDB) GetForeignKeys(dbName, tableName string) ([]connection.ForeignKeyDefinition, error) {
return nil, nil
}
func (f *fakeRedisMongoTargetDB) GetTriggers(dbName, tableName string) ([]connection.TriggerDefinition, error) {
return nil, nil
}
func (f *fakeRedisMongoTargetDB) ApplyChanges(tableName string, changes connection.ChangeSet) error {
f.applyTable = tableName
f.applySet = changes
return nil
}
type fakeMongoRedisSourceDB struct {
tables []string
rowsByTable map[string][]map[string]interface{}
connectConfig connection.ConnectionConfig
}
func (f *fakeMongoRedisSourceDB) Connect(config connection.ConnectionConfig) error {
f.connectConfig = config
return nil
}
func (f *fakeMongoRedisSourceDB) Close() error { return nil }
func (f *fakeMongoRedisSourceDB) Ping() error { return nil }
func (f *fakeMongoRedisSourceDB) Query(query string) ([]map[string]interface{}, []string, error) {
for tableName, rows := range f.rowsByTable {
if strings.Contains(query, fmt.Sprintf(`"find":"%s"`, tableName)) {
return rows, []string{"_id", "key", "type", "ttl", "value"}, nil
}
}
return nil, nil, fmt.Errorf("unexpected query: %s", query)
}
func (f *fakeMongoRedisSourceDB) Exec(query string) (int64, error) { return 0, nil }
func (f *fakeMongoRedisSourceDB) GetDatabases() ([]string, error) { return []string{"app"}, nil }
func (f *fakeMongoRedisSourceDB) GetTables(dbName string) ([]string, error) {
return f.tables, nil
}
func (f *fakeMongoRedisSourceDB) GetCreateStatement(dbName, tableName string) (string, error) {
return "", nil
}
func (f *fakeMongoRedisSourceDB) GetColumns(dbName, tableName string) ([]connection.ColumnDefinition, error) {
return nil, nil
}
func (f *fakeMongoRedisSourceDB) GetAllColumns(dbName string) ([]connection.ColumnDefinitionWithTable, error) {
return nil, nil
}
func (f *fakeMongoRedisSourceDB) GetIndexes(dbName, tableName string) ([]connection.IndexDefinition, error) {
return nil, nil
}
func (f *fakeMongoRedisSourceDB) GetForeignKeys(dbName, tableName string) ([]connection.ForeignKeyDefinition, error) {
return nil, nil
}
func (f *fakeMongoRedisSourceDB) GetTriggers(dbName, tableName string) ([]connection.TriggerDefinition, error) {
return nil, nil
}
func TestRunSync_RedisToMongoAppliesInsertAndUpdate(t *testing.T) {
fakeRedis := &fakeRedisMigrationClient{
values: map[string]*redispkg.RedisValue{
"user:1": {Type: "hash", TTL: 120, Length: 2, Value: map[string]string{"name": "alice"}},
"user:2": {Type: "string", TTL: -1, Length: 1, Value: "online"},
},
}
fakeTarget := &fakeRedisMongoTargetDB{
tables: []string{"redis_db_0_keys"},
queryRows: []map[string]interface{}{
{"_id": "db0:user:1", "redisDb": 0, "key": "user:1", "type": "hash", "ttl": 120, "length": int64(2), "value": map[string]interface{}{"name": "old"}},
},
}
oldNewRedisClient := newRedisSourceClient
oldNewDatabase := newSyncDatabase
defer func() {
newRedisSourceClient = oldNewRedisClient
newSyncDatabase = oldNewDatabase
}()
newRedisSourceClient = func() redisMigrationClient { return fakeRedis }
newSyncDatabase = func(dbType string) (db.Database, error) { return fakeTarget, nil }
engine := NewSyncEngine(Reporter{})
result := engine.RunSync(SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "redis", Database: "0"},
TargetConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"},
Tables: []string{"user:1", "user:2"},
Content: "data",
Mode: "insert_update",
})
if !result.Success {
t.Fatalf("expected success, got: %+v", result)
}
if fakeRedis.connectConfig.RedisDB != 0 {
t.Fatalf("expected redis db 0, got %d", fakeRedis.connectConfig.RedisDB)
}
if fakeTarget.applyTable != "redis_db_0_keys" {
t.Fatalf("unexpected apply table: %s", fakeTarget.applyTable)
}
if len(fakeTarget.applySet.Inserts) != 1 || len(fakeTarget.applySet.Updates) != 1 {
t.Fatalf("unexpected change set: %+v", fakeTarget.applySet)
}
}
func TestRunSync_RedisToMongoUsesConfiguredCollectionName(t *testing.T) {
fakeRedis := &fakeRedisMigrationClient{
values: map[string]*redispkg.RedisValue{
"user:1": {Type: "string", TTL: -1, Length: 1, Value: "online"},
},
}
fakeTarget := &fakeRedisMongoTargetDB{
tables: []string{"custom_keyspace_docs"},
queryTable: "custom_keyspace_docs",
}
oldNewRedisClient := newRedisSourceClient
oldNewDatabase := newSyncDatabase
defer func() {
newRedisSourceClient = oldNewRedisClient
newSyncDatabase = oldNewDatabase
}()
newRedisSourceClient = func() redisMigrationClient { return fakeRedis }
newSyncDatabase = func(dbType string) (db.Database, error) { return fakeTarget, nil }
engine := NewSyncEngine(Reporter{})
result := engine.RunSync(SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "redis", Database: "0"},
TargetConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"},
Tables: []string{"user:1"},
Content: "data",
Mode: "insert_update",
MongoCollectionName: "custom_keyspace_docs",
})
if !result.Success {
t.Fatalf("expected success, got: %+v", result)
}
if fakeTarget.applyTable != "custom_keyspace_docs" {
t.Fatalf("unexpected apply table: %s", fakeTarget.applyTable)
}
}
func TestPreview_RedisToMongoReturnsDocumentPreview(t *testing.T) {
fakeRedis := &fakeRedisMigrationClient{
values: map[string]*redispkg.RedisValue{
"session:1": {Type: "string", TTL: 60, Length: 1, Value: "token"},
},
}
fakeTarget := &fakeRedisMongoTargetDB{}
oldNewRedisClient := newRedisSourceClient
oldNewDatabase := newSyncDatabase
defer func() {
newRedisSourceClient = oldNewRedisClient
newSyncDatabase = oldNewDatabase
}()
newRedisSourceClient = func() redisMigrationClient { return fakeRedis }
newSyncDatabase = func(dbType string) (db.Database, error) { return fakeTarget, nil }
engine := NewSyncEngine(Reporter{})
preview, err := engine.Preview(SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "redis", Database: "0"},
TargetConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"},
Tables: []string{"session:1"},
Content: "data",
Mode: "insert_update",
}, "session:1", 20)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if preview.PKColumn != "_id" {
t.Fatalf("unexpected pk column: %s", preview.PKColumn)
}
if preview.TotalInserts != 1 || len(preview.Inserts) != 1 {
t.Fatalf("unexpected preview: %+v", preview)
}
if preview.Inserts[0].PK != "db0:session:1" {
t.Fatalf("unexpected preview pk: %+v", preview.Inserts[0])
}
}
func TestRunSync_MongoToRedisAppliesStringAndHash(t *testing.T) {
fakeSource := &fakeMongoRedisSourceDB{
tables: []string{"redis_db_0_keys"},
rowsByTable: map[string][]map[string]interface{}{
"redis_db_0_keys": {
{"_id": "db0:session:1", "key": "session:1", "type": "string", "ttl": int64(60), "value": "token"},
{"_id": "db0:user:1", "key": "user:1", "type": "hash", "ttl": int64(120), "value": map[string]interface{}{"name": "alice", "role": "admin"}},
},
},
}
fakeRedis := &fakeRedisMigrationClient{
values: map[string]*redispkg.RedisValue{
"user:1": {Type: "hash", TTL: 120, Length: 1, Value: map[string]string{"name": "old"}},
},
}
oldNewRedisClient := newRedisSourceClient
oldNewDatabase := newSyncDatabase
defer func() {
newRedisSourceClient = oldNewRedisClient
newSyncDatabase = oldNewDatabase
}()
newRedisSourceClient = func() redisMigrationClient { return fakeRedis }
newSyncDatabase = func(dbType string) (db.Database, error) { return fakeSource, nil }
engine := NewSyncEngine(Reporter{})
result := engine.RunSync(SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"},
TargetConfig: connection.ConnectionConfig{Type: "redis", Database: "0"},
Tables: []string{"redis_db_0_keys"},
Content: "data",
Mode: "insert_update",
})
if !result.Success {
t.Fatalf("expected success, got: %+v", result)
}
if fakeRedis.connectConfig.RedisDB != 0 {
t.Fatalf("expected redis db 0, got %d", fakeRedis.connectConfig.RedisDB)
}
if got := fakeRedis.values["session:1"]; got == nil || got.Type != "string" || got.Value != "token" || got.TTL != 60 {
t.Fatalf("unexpected string value: %+v", got)
}
gotHash, _ := fakeRedis.values["user:1"].Value.(map[string]string)
if gotHash["name"] != "alice" || gotHash["role"] != "admin" {
t.Fatalf("unexpected hash value: %+v", fakeRedis.values["user:1"])
}
if result.RowsInserted != 1 || result.RowsUpdated != 1 {
t.Fatalf("unexpected sync result: %+v", result)
}
}
func TestPreview_MongoToRedisReturnsCollectionPreview(t *testing.T) {
fakeSource := &fakeMongoRedisSourceDB{
tables: []string{"redis_db_0_keys"},
rowsByTable: map[string][]map[string]interface{}{
"redis_db_0_keys": {
{"_id": "db0:session:1", "key": "session:1", "type": "string", "ttl": int64(60), "value": "token"},
},
},
}
fakeRedis := &fakeRedisMigrationClient{values: map[string]*redispkg.RedisValue{}}
oldNewRedisClient := newRedisSourceClient
oldNewDatabase := newSyncDatabase
defer func() {
newRedisSourceClient = oldNewRedisClient
newSyncDatabase = oldNewDatabase
}()
newRedisSourceClient = func() redisMigrationClient { return fakeRedis }
newSyncDatabase = func(dbType string) (db.Database, error) { return fakeSource, nil }
engine := NewSyncEngine(Reporter{})
preview, err := engine.Preview(SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"},
TargetConfig: connection.ConnectionConfig{Type: "redis", Database: "0"},
Tables: []string{"redis_db_0_keys"},
Content: "data",
Mode: "insert_update",
}, "redis_db_0_keys", 20)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if preview.Table != "redis_db_0_keys" || preview.PKColumn != "key" {
t.Fatalf("unexpected preview header: %+v", preview)
}
if preview.TotalInserts != 1 || len(preview.Inserts) != 1 {
t.Fatalf("unexpected preview rows: %+v", preview)
}
if preview.Inserts[0].PK != "session:1" {
t.Fatalf("unexpected preview pk: %+v", preview.Inserts[0])
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,957 +0,0 @@
package sync
import (
"GoNavi-Wails/internal/connection"
"context"
"reflect"
"strings"
"testing"
)
type fakeMigrationDB struct {
columns map[string][]connection.ColumnDefinition
indexes map[string][]connection.IndexDefinition
queryData map[string][]map[string]interface{}
queryCols map[string][]string
}
func (f *fakeMigrationDB) Connect(config connection.ConnectionConfig) error { return nil }
func (f *fakeMigrationDB) Close() error { return nil }
func (f *fakeMigrationDB) Ping() error { return nil }
func (f *fakeMigrationDB) Query(query string) ([]map[string]interface{}, []string, error) {
if rows, ok := f.queryData[query]; ok {
return rows, f.queryCols[query], nil
}
return nil, nil, nil
}
func (f *fakeMigrationDB) Exec(query string) (int64, error) { return 0, nil }
func (f *fakeMigrationDB) GetDatabases() ([]string, error) { return nil, nil }
func (f *fakeMigrationDB) GetTables(dbName string) ([]string, error) {
return nil, nil
}
func (f *fakeMigrationDB) GetCreateStatement(dbName, tableName string) (string, error) {
return "", nil
}
func (f *fakeMigrationDB) GetColumns(dbName, tableName string) ([]connection.ColumnDefinition, error) {
key := dbName + "." + tableName
if rows, ok := f.columns[key]; ok {
return rows, nil
}
return []connection.ColumnDefinition{}, nil
}
func (f *fakeMigrationDB) GetAllColumns(dbName string) ([]connection.ColumnDefinitionWithTable, error) {
return nil, nil
}
func (f *fakeMigrationDB) GetIndexes(dbName, tableName string) ([]connection.IndexDefinition, error) {
key := dbName + "." + tableName
if rows, ok := f.indexes[key]; ok {
return rows, nil
}
return nil, nil
}
func (f *fakeMigrationDB) GetForeignKeys(dbName, tableName string) ([]connection.ForeignKeyDefinition, error) {
return nil, nil
}
func (f *fakeMigrationDB) GetTriggers(dbName, tableName string) ([]connection.TriggerDefinition, error) {
return nil, nil
}
func (f *fakeMigrationDB) QueryContext(ctx context.Context, query string) ([]map[string]interface{}, []string, error) {
return f.Query(query)
}
func (f *fakeMigrationDB) ExecContext(ctx context.Context, query string) (int64, error) {
return 0, nil
}
func TestBuildMySQLToKingbaseColumnDefinition_AutoIncrementAndBoolean(t *testing.T) {
t.Parallel()
def, warnings := buildMySQLToKingbaseColumnDefinition(connection.ColumnDefinition{
Name: "id",
Type: "int unsigned",
Nullable: "NO",
Extra: "auto_increment",
})
if !strings.Contains(def, "bigint") || !strings.Contains(def, "GENERATED BY DEFAULT AS IDENTITY") || !strings.Contains(def, "NOT NULL") {
t.Fatalf("unexpected definition: %s", def)
}
if len(warnings) != 0 {
t.Fatalf("unexpected warnings: %v", warnings)
}
def, warnings = buildMySQLToKingbaseColumnDefinition(connection.ColumnDefinition{
Name: "enabled",
Type: "tinyint(1)",
Nullable: "YES",
Default: stringPtr("1"),
})
if !strings.Contains(def, "boolean") || !strings.Contains(def, "DEFAULT TRUE") {
t.Fatalf("unexpected boolean definition: %s", def)
}
if len(warnings) != 0 {
t.Fatalf("unexpected warnings for boolean: %v", warnings)
}
}
func TestBuildMySQLToKingbaseCreateTablePlan_GeneratesAndSkipsIndexes(t *testing.T) {
t.Parallel()
sourceDB := &fakeMigrationDB{
indexes: map[string][]connection.IndexDefinition{
"shop.orders": {
{Name: "PRIMARY", ColumnName: "id", NonUnique: 0, SeqInIndex: 1, IndexType: "BTREE"},
{Name: "idx_user_status", ColumnName: "user_id", NonUnique: 1, SeqInIndex: 1, IndexType: "BTREE"},
{Name: "idx_user_status", ColumnName: "status", NonUnique: 1, SeqInIndex: 2, IndexType: "BTREE"},
{Name: "idx_name_prefix", ColumnName: "name", NonUnique: 1, SeqInIndex: 1, IndexType: "BTREE", SubPart: 12},
{Name: "idx_fulltext_note", ColumnName: "note", NonUnique: 1, SeqInIndex: 1, IndexType: "FULLTEXT"},
},
},
}
cols := []connection.ColumnDefinition{
{Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI", Extra: "auto_increment"},
{Name: "user_id", Type: "bigint", Nullable: "NO"},
{Name: "status", Type: "varchar(32)", Nullable: "YES"},
{Name: "name", Type: "varchar(128)", Nullable: "YES"},
{Name: "note", Type: "text", Nullable: "YES"},
}
cfg := SyncConfig{CreateIndexes: true}
createSQL, postSQL, warnings, unsupported, idxCreate, idxSkip, err := buildMySQLToKingbaseCreateTablePlan(cfg, "public.orders", cols, sourceDB, "shop", "orders")
if err != nil {
t.Fatalf("buildMySQLToKingbaseCreateTablePlan returned error: %v", err)
}
if !strings.Contains(createSQL, `CREATE TABLE "public"."orders"`) {
t.Fatalf("unexpected create SQL: %s", createSQL)
}
if !strings.Contains(createSQL, `PRIMARY KEY ("id")`) {
t.Fatalf("create SQL missing primary key: %s", createSQL)
}
if idxCreate != 1 || idxSkip != 2 {
t.Fatalf("unexpected index summary: create=%d skip=%d", idxCreate, idxSkip)
}
if len(postSQL) != 1 || !strings.Contains(postSQL[0], `CREATE INDEX "idx_user_status"`) {
t.Fatalf("unexpected post SQL: %v", postSQL)
}
if len(warnings) != 0 {
t.Fatalf("unexpected warnings: %v", warnings)
}
wantUnsupported := []string{
"索引 idx_name_prefix 使用前缀长度,当前暂不支持迁移",
"索引 idx_fulltext_note 类型=FULLTEXT当前暂不支持自动迁移",
}
if !reflect.DeepEqual(unsupported, wantUnsupported) {
t.Fatalf("unexpected unsupported objects: got=%v want=%v", unsupported, wantUnsupported)
}
}
func TestBuildSchemaMigrationPlan_AutoCreateWhenTargetMissing(t *testing.T) {
t.Parallel()
sourceDB := &fakeMigrationDB{
columns: map[string][]connection.ColumnDefinition{
"shop.orders": {
{Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI", Extra: "auto_increment"},
{Name: "name", Type: "varchar(128)", Nullable: "YES"},
},
},
indexes: map[string][]connection.IndexDefinition{},
}
targetDB := &fakeMigrationDB{columns: map[string][]connection.ColumnDefinition{}}
cfg := SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "mysql", Database: "shop"},
TargetConfig: connection.ConnectionConfig{Type: "kingbase", Database: "demo"},
TargetTableStrategy: "smart",
CreateIndexes: true,
}
plan, sourceCols, targetCols, err := buildSchemaMigrationPlan(cfg, "orders", sourceDB, targetDB)
if err != nil {
t.Fatalf("buildSchemaMigrationPlan returned error: %v", err)
}
if len(sourceCols) != 2 || len(targetCols) != 0 {
t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols))
}
if plan.TargetTableExists {
t.Fatalf("expected target table missing")
}
if !plan.AutoCreate {
t.Fatalf("expected auto create enabled")
}
if !strings.Contains(plan.PlannedAction, "自动建表") {
t.Fatalf("unexpected planned action: %s", plan.PlannedAction)
}
if !strings.Contains(plan.CreateTableSQL, `CREATE TABLE "public"."orders"`) {
t.Fatalf("unexpected create table SQL: %s", plan.CreateTableSQL)
}
}
func stringPtr(v string) *string { return &v }
func TestBuildPGLikeToMySQLCreateTablePlan_GeneratesMySQLDDL(t *testing.T) {
t.Parallel()
sourceDB := &fakeMigrationDB{
indexes: map[string][]connection.IndexDefinition{
"public.users": {
{Name: "users_email_key", ColumnName: "email", NonUnique: 0, SeqInIndex: 1, IndexType: "BTREE"},
{Name: "idx_users_name", ColumnName: "name", NonUnique: 1, SeqInIndex: 1, IndexType: "BTREE"},
},
},
}
cols := []connection.ColumnDefinition{
{Name: "id", Type: "integer", Nullable: "NO", Key: "PRI", Extra: "auto_increment"},
{Name: "email", Type: "character varying(120)", Nullable: "NO"},
{Name: "name", Type: "text", Nullable: "YES"},
{Name: "profile", Type: "jsonb", Nullable: "YES"},
}
cfg := SyncConfig{CreateIndexes: true}
createSQL, postSQL, warnings, unsupported, idxCreate, idxSkip, err := buildPGLikeToMySQLCreateTablePlan(cfg, "app.users", cols, sourceDB, "public", "users")
if err != nil {
t.Fatalf("buildPGLikeToMySQLCreateTablePlan returned error: %v", err)
}
if !strings.Contains(createSQL, "CREATE TABLE `app`.`users`") {
t.Fatalf("unexpected create SQL: %s", createSQL)
}
if !strings.Contains(createSQL, "`id` int AUTO_INCREMENT NOT NULL") {
t.Fatalf("unexpected id definition: %s", createSQL)
}
if !strings.Contains(createSQL, "`profile` json") {
t.Fatalf("unexpected json definition: %s", createSQL)
}
if idxCreate != 2 || idxSkip != 0 {
t.Fatalf("unexpected index summary: create=%d skip=%d", idxCreate, idxSkip)
}
if len(postSQL) != 2 {
t.Fatalf("unexpected post sql length: %v", postSQL)
}
if len(warnings) != 0 {
t.Fatalf("unexpected warnings: %v", warnings)
}
if len(unsupported) != 0 {
t.Fatalf("unexpected unsupported: %v", unsupported)
}
}
func TestBuildPGLikeToMySQLPlan_AutoCreateWhenTargetMissing(t *testing.T) {
t.Parallel()
sourceDB := &fakeMigrationDB{
columns: map[string][]connection.ColumnDefinition{
"public.orders": {
{Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI", Extra: "auto_increment"},
{Name: "amount", Type: "numeric(10,2)", Nullable: "NO"},
},
},
indexes: map[string][]connection.IndexDefinition{},
}
targetDB := &fakeMigrationDB{columns: map[string][]connection.ColumnDefinition{}}
cfg := SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "kingbase", Database: "public"},
TargetConfig: connection.ConnectionConfig{Type: "mysql", Database: "app"},
TargetTableStrategy: "smart",
CreateIndexes: true,
}
plan, sourceCols, targetCols, err := buildPGLikeToMySQLPlan(cfg, "orders", sourceDB, targetDB)
if err != nil {
t.Fatalf("buildPGLikeToMySQLPlan returned error: %v", err)
}
if len(sourceCols) != 2 || len(targetCols) != 0 {
t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols))
}
if plan.TargetTableExists {
t.Fatalf("expected target table missing")
}
if !plan.AutoCreate {
t.Fatalf("expected auto create enabled")
}
if !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `app`.`orders`") {
t.Fatalf("unexpected create table SQL: %s", plan.CreateTableSQL)
}
}
func TestBuildMySQLToPGLikeCreateTablePlan_GeneratesPostgresDDL(t *testing.T) {
t.Parallel()
sourceDB := &fakeMigrationDB{
indexes: map[string][]connection.IndexDefinition{
"shop.orders": {
{Name: "idx_orders_user", ColumnName: "user_id", NonUnique: 1, SeqInIndex: 1, IndexType: "BTREE"},
{Name: "idx_orders_user", ColumnName: "status", NonUnique: 1, SeqInIndex: 2, IndexType: "BTREE"},
},
},
}
cols := []connection.ColumnDefinition{
{Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI", Extra: "auto_increment"},
{Name: "user_id", Type: "bigint", Nullable: "NO"},
{Name: "status", Type: "varchar(32)", Nullable: "YES"},
{Name: "payload", Type: "json", Nullable: "YES"},
}
cfg := SyncConfig{CreateIndexes: true}
createSQL, postSQL, warnings, unsupported, idxCreate, idxSkip, err := buildMySQLToPGLikeCreateTablePlan("postgres", cfg, "public.orders", cols, sourceDB, "shop", "orders")
if err != nil {
t.Fatalf("buildMySQLToPGLikeCreateTablePlan returned error: %v", err)
}
if !strings.Contains(createSQL, `CREATE TABLE "public"."orders"`) {
t.Fatalf("unexpected create SQL: %s", createSQL)
}
if !strings.Contains(createSQL, `GENERATED BY DEFAULT AS IDENTITY`) {
t.Fatalf("missing identity mapping: %s", createSQL)
}
if !strings.Contains(createSQL, `jsonb`) {
t.Fatalf("missing jsonb mapping: %s", createSQL)
}
if idxCreate != 1 || idxSkip != 0 {
t.Fatalf("unexpected index summary: create=%d skip=%d", idxCreate, idxSkip)
}
if len(postSQL) != 1 || !strings.Contains(postSQL[0], `CREATE INDEX "idx_orders_user"`) {
t.Fatalf("unexpected post SQL: %v", postSQL)
}
if len(warnings) != 0 || len(unsupported) != 0 {
t.Fatalf("unexpected warnings/unsupported: warnings=%v unsupported=%v", warnings, unsupported)
}
}
func TestBuildMySQLToClickHouseCreateTableSQL_GeneratesMergeTree(t *testing.T) {
t.Parallel()
cols := []connection.ColumnDefinition{
{Name: "id", Type: "bigint unsigned", Nullable: "NO", Key: "PRI"},
{Name: "name", Type: "varchar(128)", Nullable: "YES"},
{Name: "payload", Type: "json", Nullable: "YES"},
}
createSQL, warnings, unsupported := buildMySQLToClickHouseCreateTableSQL("analytics.orders", cols)
if !strings.Contains(createSQL, "ENGINE = MergeTree()") {
t.Fatalf("unexpected create SQL: %s", createSQL)
}
if !strings.Contains(createSQL, "ORDER BY (`id`)") {
t.Fatalf("unexpected order by: %s", createSQL)
}
if !strings.Contains(createSQL, "`payload` Nullable(String)") {
t.Fatalf("unexpected json mapping: %s", createSQL)
}
if len(warnings) == 0 {
t.Fatalf("expected warnings for clickhouse semantics")
}
if len(unsupported) != 0 {
t.Fatalf("unexpected unsupported: %v", unsupported)
}
}
func TestBuildClickHouseToMySQLCreateTableSQL_GeneratesMySQLDDL(t *testing.T) {
t.Parallel()
cols := []connection.ColumnDefinition{
{Name: "id", Type: "UInt64", Nullable: "NO", Key: "PRI"},
{Name: "event_time", Type: "DateTime", Nullable: "NO"},
{Name: "payload", Type: "Map(String, String)", Nullable: "YES"},
}
createSQL, warnings := buildClickHouseToMySQLCreateTableSQL("app.metrics", cols)
if !strings.Contains(createSQL, "CREATE TABLE `app`.`metrics`") {
t.Fatalf("unexpected create SQL: %s", createSQL)
}
if !strings.Contains(createSQL, "`id` bigint unsigned NOT NULL") {
t.Fatalf("unexpected uint64 mapping: %s", createSQL)
}
if !strings.Contains(createSQL, "`payload` json") {
t.Fatalf("unexpected complex type mapping: %s", createSQL)
}
if len(warnings) == 0 {
t.Fatalf("expected warning for limited clickhouse reverse semantics")
}
}
func TestBuildMySQLToMongoPlan_AutoCreateCollection(t *testing.T) {
t.Parallel()
sourceDB := &fakeMigrationDB{
columns: map[string][]connection.ColumnDefinition{
"shop.users": {
{Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI"},
{Name: "name", Type: "varchar(64)", Nullable: "YES"},
},
},
indexes: map[string][]connection.IndexDefinition{
"shop.users": {
{Name: "idx_users_name", ColumnName: "name", NonUnique: 1, SeqInIndex: 1, IndexType: "BTREE"},
},
},
}
targetDB := &fakeMigrationDB{}
cfg := SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "mysql", Database: "shop"},
TargetConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"},
TargetTableStrategy: "smart",
CreateIndexes: true,
}
plan, sourceCols, targetCols, err := buildMySQLToMongoPlan(cfg, "users", sourceDB, targetDB)
if err != nil {
t.Fatalf("buildMySQLToMongoPlan returned error: %v", err)
}
if len(sourceCols) != 2 || targetCols != nil {
t.Fatalf("unexpected source/target columns: %d / %v", len(sourceCols), targetCols)
}
if !plan.AutoCreate || len(plan.PreDataSQL) == 0 {
t.Fatalf("expected auto create collection command: %+v", plan)
}
if !strings.Contains(plan.PreDataSQL[0], `"create":"users"`) {
t.Fatalf("unexpected create collection command: %v", plan.PreDataSQL)
}
if len(plan.PostDataSQL) != 1 || !strings.Contains(plan.PostDataSQL[0], `"createIndexes":"users"`) {
t.Fatalf("unexpected index commands: %v", plan.PostDataSQL)
}
}
func TestBuildPGLikeToMongoPlan_AutoCreateCollection(t *testing.T) {
t.Parallel()
sourceDB := &fakeMigrationDB{
columns: map[string][]connection.ColumnDefinition{
"public.orders": {
{Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI"},
{Name: "name", Type: "varchar(64)", Nullable: "YES"},
},
},
indexes: map[string][]connection.IndexDefinition{
"public.orders": {
{Name: "idx_orders_name", ColumnName: "name", NonUnique: 1, SeqInIndex: 1, IndexType: "BTREE"},
},
},
}
targetDB := &fakeMigrationDB{}
cfg := SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "postgres", Database: "public"},
TargetConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"},
TargetTableStrategy: "smart",
CreateIndexes: true,
}
plan, sourceCols, targetCols, err := buildPGLikeToMongoPlan(cfg, "orders", sourceDB, targetDB)
if err != nil {
t.Fatalf("buildPGLikeToMongoPlan returned error: %v", err)
}
if len(sourceCols) != 2 || targetCols != nil {
t.Fatalf("unexpected source/target columns: %d / %v", len(sourceCols), targetCols)
}
if !plan.AutoCreate || len(plan.PreDataSQL) == 0 {
t.Fatalf("expected auto create collection command: %+v", plan)
}
if !strings.Contains(plan.PreDataSQL[0], `"create":"orders"`) {
t.Fatalf("unexpected create collection command: %v", plan.PreDataSQL)
}
if len(plan.PostDataSQL) != 1 || !strings.Contains(plan.PostDataSQL[0], `"createIndexes":"orders"`) {
t.Fatalf("unexpected index commands: %v", plan.PostDataSQL)
}
}
func TestBuildClickHouseToMongoPlan_AutoCreateCollection(t *testing.T) {
t.Parallel()
sourceDB := &fakeMigrationDB{
columns: map[string][]connection.ColumnDefinition{
"analytics.metrics": {
{Name: "id", Type: "UInt64", Nullable: "NO", Key: "PRI"},
{Name: "host", Type: "String", Nullable: "YES"},
},
},
}
targetDB := &fakeMigrationDB{}
cfg := SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "clickhouse", Database: "analytics"},
TargetConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"},
TargetTableStrategy: "smart",
}
plan, sourceCols, targetCols, err := buildClickHouseToMongoPlan(cfg, "metrics", sourceDB, targetDB)
if err != nil {
t.Fatalf("buildClickHouseToMongoPlan returned error: %v", err)
}
if len(sourceCols) != 2 || targetCols != nil {
t.Fatalf("unexpected source/target columns: %d / %v", len(sourceCols), targetCols)
}
if !plan.AutoCreate || len(plan.PreDataSQL) == 0 {
t.Fatalf("expected auto create collection command: %+v", plan)
}
if !strings.Contains(plan.PreDataSQL[0], `"create":"metrics"`) {
t.Fatalf("unexpected create collection command: %v", plan.PreDataSQL)
}
}
func TestBuildTDengineToMongoPlan_AutoCreateCollection(t *testing.T) {
t.Parallel()
sourceDB := &fakeMigrationDB{
columns: map[string][]connection.ColumnDefinition{
"src.cpu": {
{Name: "ts", Type: "TIMESTAMP", Nullable: "NO"},
{Name: "host", Type: "NCHAR(64)", Nullable: "YES"},
},
},
}
targetDB := &fakeMigrationDB{}
cfg := SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "tdengine", Database: "src"},
TargetConfig: connection.ConnectionConfig{Type: "mongodb", Database: "app"},
TargetTableStrategy: "smart",
}
plan, sourceCols, targetCols, err := buildTDengineToMongoPlan(cfg, "cpu", sourceDB, targetDB)
if err != nil {
t.Fatalf("buildTDengineToMongoPlan returned error: %v", err)
}
if len(sourceCols) != 2 || targetCols != nil {
t.Fatalf("unexpected source/target columns: %d / %v", len(sourceCols), targetCols)
}
if !plan.AutoCreate || len(plan.PreDataSQL) == 0 {
t.Fatalf("expected auto create collection command: %+v", plan)
}
if !strings.Contains(plan.PreDataSQL[0], `"create":"cpu"`) {
t.Fatalf("unexpected create collection command: %v", plan.PreDataSQL)
}
}
func TestBuildMongoToMySQLPlan_InfersColumnsAndCreatesTable(t *testing.T) {
t.Parallel()
query := `{"find":"users","filter":{},"limit":200}`
sourceDB := &fakeMigrationDB{
queryData: map[string][]map[string]interface{}{
query: {
{"_id": "a1", "name": "alice", "age": int64(18), "profile": map[string]interface{}{"city": "shanghai"}},
{"_id": "b2", "name": "bob", "profile": map[string]interface{}{"city": "beijing"}},
},
},
queryCols: map[string][]string{query: {"_id", "name", "age", "profile"}},
indexes: map[string][]connection.IndexDefinition{
"crm.users": {{Name: "email_1", ColumnName: "name", NonUnique: 1, SeqInIndex: 1, IndexType: "BTREE"}},
},
}
targetDB := &fakeMigrationDB{}
cfg := SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "mongodb", Database: "crm"},
TargetConfig: connection.ConnectionConfig{Type: "mysql", Database: "app"},
TargetTableStrategy: "smart",
CreateIndexes: true,
}
plan, sourceCols, _, err := buildMongoToMySQLPlan(cfg, "users", sourceDB, targetDB)
if err != nil {
t.Fatalf("buildMongoToMySQLPlan returned error: %v", err)
}
if len(sourceCols) == 0 {
t.Fatalf("expected inferred source cols")
}
if !plan.AutoCreate || !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `app`.`users`") {
t.Fatalf("unexpected create table sql: %s", plan.CreateTableSQL)
}
if !strings.Contains(plan.CreateTableSQL, "`_id` text NOT NULL") && !strings.Contains(plan.CreateTableSQL, "`_id` varchar") {
t.Fatalf("missing inferred _id column: %s", plan.CreateTableSQL)
}
if !strings.Contains(plan.CreateTableSQL, "`profile` json") {
t.Fatalf("expected nested field degrade to json: %s", plan.CreateTableSQL)
}
if len(plan.PostDataSQL) != 1 {
t.Fatalf("expected one post index sql, got=%v", plan.PostDataSQL)
}
}
func TestBuildTDengineToMySQLPlan_AutoCreateWhenTargetMissing(t *testing.T) {
t.Parallel()
sourceDB := &fakeMigrationDB{
columns: map[string][]connection.ColumnDefinition{
"metrics.cpu": {
{Name: "ts", Type: "TIMESTAMP", Nullable: "NO"},
{Name: "host", Type: "NCHAR(64)", Nullable: "YES", Key: "TAG", Extra: "TAG"},
{Name: "usage", Type: "DOUBLE", Nullable: "YES"},
},
},
}
targetDB := &fakeMigrationDB{}
cfg := SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "tdengine", Database: "metrics"},
TargetConfig: connection.ConnectionConfig{Type: "mysql", Database: "app"},
TargetTableStrategy: "smart",
}
plan, sourceCols, targetCols, err := buildTDengineToMySQLPlan(cfg, "cpu", sourceDB, targetDB)
if err != nil {
t.Fatalf("buildTDengineToMySQLPlan returned error: %v", err)
}
if len(sourceCols) != 3 || len(targetCols) != 0 {
t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols))
}
if !plan.AutoCreate {
t.Fatalf("expected auto create enabled")
}
if !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `app`.`cpu`") {
t.Fatalf("unexpected create table sql: %s", plan.CreateTableSQL)
}
if !strings.Contains(plan.CreateTableSQL, "`ts` datetime") {
t.Fatalf("expected timestamp mapping, got: %s", plan.CreateTableSQL)
}
if !strings.Contains(plan.CreateTableSQL, "`host` varchar(64)") {
t.Fatalf("expected nchar mapping, got: %s", plan.CreateTableSQL)
}
if len(plan.Warnings) == 0 || !strings.Contains(strings.Join(plan.Warnings, " "), "TAG") {
t.Fatalf("expected TAG warning, got: %v", plan.Warnings)
}
}
func TestBuildTDengineToPGLikePlan_AutoCreateWhenTargetMissing(t *testing.T) {
t.Parallel()
sourceDB := &fakeMigrationDB{
columns: map[string][]connection.ColumnDefinition{
"metrics.cpu": {
{Name: "ts", Type: "TIMESTAMP", Nullable: "NO"},
{Name: "payload", Type: "JSON", Nullable: "YES"},
{Name: "host", Type: "BINARY(32)", Nullable: "YES", Key: "TAG", Extra: "TAG"},
},
},
}
targetDB := &fakeMigrationDB{}
cfg := SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "tdengine", Database: "metrics"},
TargetConfig: connection.ConnectionConfig{Type: "kingbase", Database: "ignored"},
TargetTableStrategy: "smart",
}
plan, sourceCols, targetCols, err := buildTDengineToPGLikePlan(cfg, "cpu", sourceDB, targetDB)
if err != nil {
t.Fatalf("buildTDengineToPGLikePlan returned error: %v", err)
}
if len(sourceCols) != 3 || len(targetCols) != 0 {
t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols))
}
if !plan.AutoCreate {
t.Fatalf("expected auto create enabled")
}
if !strings.Contains(plan.CreateTableSQL, `CREATE TABLE "public"."cpu"`) {
t.Fatalf("unexpected create table sql: %s", plan.CreateTableSQL)
}
if !strings.Contains(plan.CreateTableSQL, `"ts" timestamp`) {
t.Fatalf("expected timestamp mapping, got: %s", plan.CreateTableSQL)
}
if !strings.Contains(plan.CreateTableSQL, `"payload" jsonb`) {
t.Fatalf("expected json mapping, got: %s", plan.CreateTableSQL)
}
if len(plan.Warnings) == 0 || !strings.Contains(strings.Join(plan.Warnings, " "), "TAG") {
t.Fatalf("expected TAG warning, got: %v", plan.Warnings)
}
}
func TestBuildSchemaMigrationPlan_TDengineTargetWarnsInsertOnlyBoundary(t *testing.T) {
t.Parallel()
sourceDB := &fakeMigrationDB{
columns: map[string][]connection.ColumnDefinition{
"shop.metrics": {
{Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI"},
{Name: "ts", Type: "datetime", Nullable: "NO"},
{Name: "value", Type: "double", Nullable: "YES"},
},
},
}
targetDB := &fakeMigrationDB{
columns: map[string][]connection.ColumnDefinition{
"taos.metrics": {
{Name: "id", Type: "bigint", Nullable: "NO"},
{Name: "ts", Type: "timestamp", Nullable: "NO"},
{Name: "value", Type: "double", Nullable: "YES"},
},
},
}
cfg := SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "mysql", Database: "shop"},
TargetConfig: connection.ConnectionConfig{Type: "tdengine", Database: "taos"},
Mode: "insert_update",
}
plan, _, _, err := buildSchemaMigrationPlan(cfg, "metrics", sourceDB, targetDB)
if err != nil {
t.Fatalf("buildSchemaMigrationPlan returned error: %v", err)
}
warnings := strings.Join(plan.Warnings, " ")
if !strings.Contains(warnings, "仅支持 INSERT 写入") {
t.Fatalf("expected TDengine target warning, got: %v", plan.Warnings)
}
}
func TestBuildMySQLLikeToTDenginePlan_AutoCreateWhenTargetMissing(t *testing.T) {
t.Parallel()
sourceDB := &fakeMigrationDB{
columns: map[string][]connection.ColumnDefinition{
"shop.metrics": {
{Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI", Extra: "auto_increment"},
{Name: "ts", Type: "datetime", Nullable: "NO"},
{Name: "payload", Type: "json", Nullable: "YES"},
},
},
}
targetDB := &fakeMigrationDB{}
cfg := SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "mysql", Database: "shop"},
TargetConfig: connection.ConnectionConfig{Type: "tdengine", Database: "taos"},
TargetTableStrategy: "smart",
}
plan, sourceCols, targetCols, err := buildMySQLLikeToTDenginePlan(cfg, "metrics", sourceDB, targetDB)
if err != nil {
t.Fatalf("buildMySQLLikeToTDenginePlan returned error: %v", err)
}
if len(sourceCols) != 3 || len(targetCols) != 0 {
t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols))
}
if !plan.AutoCreate {
t.Fatalf("expected auto create enabled")
}
if !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `taos`.`metrics`") {
t.Fatalf("unexpected create sql: %s", plan.CreateTableSQL)
}
if !strings.Contains(plan.CreateTableSQL, "`ts` TIMESTAMP") {
t.Fatalf("expected ts first column mapped to TIMESTAMP, got: %s", plan.CreateTableSQL)
}
if !strings.Contains(plan.CreateTableSQL, "`payload` VARCHAR(") {
t.Fatalf("expected json degrade to VARCHAR, got: %s", plan.CreateTableSQL)
}
if !strings.Contains(strings.Join(plan.Warnings, " "), "insert-only") && !strings.Contains(strings.Join(plan.Warnings, " "), "INSERT") {
t.Fatalf("expected tdengine target warning, got: %v", plan.Warnings)
}
}
func TestBuildPGLikeToTDenginePlan_AutoCreateWhenTargetMissing(t *testing.T) {
t.Parallel()
sourceDB := &fakeMigrationDB{
columns: map[string][]connection.ColumnDefinition{
"public.metrics": {
{Name: "event_time", Type: "timestamp without time zone", Nullable: "NO"},
{Name: "name", Type: "character varying(64)", Nullable: "YES"},
{Name: "meta", Type: "jsonb", Nullable: "YES"},
},
},
}
targetDB := &fakeMigrationDB{}
cfg := SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "postgres", Database: "ignored"},
TargetConfig: connection.ConnectionConfig{Type: "tdengine", Database: "taos"},
TargetTableStrategy: "smart",
}
plan, sourceCols, targetCols, err := buildPGLikeToTDenginePlan(cfg, "metrics", sourceDB, targetDB)
if err != nil {
t.Fatalf("buildPGLikeToTDenginePlan returned error: %v", err)
}
if len(sourceCols) != 3 || len(targetCols) != 0 {
t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols))
}
if !plan.AutoCreate {
t.Fatalf("expected auto create enabled")
}
if !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `taos`.`metrics`") {
t.Fatalf("unexpected create sql: %s", plan.CreateTableSQL)
}
if !strings.Contains(plan.CreateTableSQL, "`event_time` TIMESTAMP") {
t.Fatalf("expected timestamp mapping, got: %s", plan.CreateTableSQL)
}
if !strings.Contains(plan.CreateTableSQL, "`meta` VARCHAR(") {
t.Fatalf("expected jsonb degrade to VARCHAR, got: %s", plan.CreateTableSQL)
}
}
func TestBuildMySQLLikeToTDenginePlan_RejectsAutoCreateWithoutTimestampColumn(t *testing.T) {
t.Parallel()
sourceDB := &fakeMigrationDB{
columns: map[string][]connection.ColumnDefinition{
"shop.metrics": {
{Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI"},
{Name: "name", Type: "varchar(64)", Nullable: "YES"},
},
},
}
targetDB := &fakeMigrationDB{}
cfg := SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "mysql", Database: "shop"},
TargetConfig: connection.ConnectionConfig{Type: "tdengine", Database: "taos"},
TargetTableStrategy: "smart",
}
plan, _, _, err := buildMySQLLikeToTDenginePlan(cfg, "metrics", sourceDB, targetDB)
if err != nil {
t.Fatalf("buildMySQLLikeToTDenginePlan returned error: %v", err)
}
if plan.AutoCreate {
t.Fatalf("expected auto create disabled when source has no timestamp column")
}
if !strings.Contains(plan.PlannedAction, "时间列") {
t.Fatalf("unexpected planned action: %s", plan.PlannedAction)
}
if !strings.Contains(strings.Join(plan.Warnings, " "), "时间列") {
t.Fatalf("expected missing timestamp warning, got: %v", plan.Warnings)
}
}
func TestBuildClickHouseToTDenginePlan_AutoCreateWhenTargetMissing(t *testing.T) {
t.Parallel()
sourceDB := &fakeMigrationDB{
columns: map[string][]connection.ColumnDefinition{
"analytics.metrics": {
{Name: "event_time", Type: "DateTime64(3)", Nullable: "NO"},
{Name: "host", Type: "FixedString(64)", Nullable: "YES"},
{Name: "payload", Type: "Map(String,String)", Nullable: "YES"},
},
},
}
targetDB := &fakeMigrationDB{}
cfg := SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "clickhouse", Database: "analytics"},
TargetConfig: connection.ConnectionConfig{Type: "tdengine", Database: "taos"},
TargetTableStrategy: "smart",
}
plan, sourceCols, targetCols, err := buildClickHouseToTDenginePlan(cfg, "metrics", sourceDB, targetDB)
if err != nil {
t.Fatalf("buildClickHouseToTDenginePlan returned error: %v", err)
}
if len(sourceCols) != 3 || len(targetCols) != 0 {
t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols))
}
if !plan.AutoCreate {
t.Fatalf("expected auto create enabled")
}
if !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `taos`.`metrics`") {
t.Fatalf("unexpected create sql: %s", plan.CreateTableSQL)
}
if !strings.Contains(plan.CreateTableSQL, "`event_time` TIMESTAMP") {
t.Fatalf("expected datetime64 mapping, got: %s", plan.CreateTableSQL)
}
if !strings.Contains(plan.CreateTableSQL, "`host` VARCHAR(64)") {
t.Fatalf("expected fixedstring mapping, got: %s", plan.CreateTableSQL)
}
if !strings.Contains(plan.CreateTableSQL, "`payload` VARCHAR(") {
t.Fatalf("expected complex type degrade to VARCHAR, got: %s", plan.CreateTableSQL)
}
}
func TestBuildClickHouseToPGLikePlan_AutoCreateWhenTargetMissing(t *testing.T) {
t.Parallel()
sourceDB := &fakeMigrationDB{
columns: map[string][]connection.ColumnDefinition{
"analytics.metrics": {
{Name: "id", Type: "UInt64", Nullable: "NO", Key: "PRI"},
{Name: "event_time", Type: "DateTime64(3)", Nullable: "NO"},
{Name: "host", Type: "FixedString(64)", Nullable: "YES"},
{Name: "payload", Type: "Map(String,String)", Nullable: "YES"},
},
},
}
targetDB := &fakeMigrationDB{}
cfg := SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "clickhouse", Database: "analytics"},
TargetConfig: connection.ConnectionConfig{Type: "postgres", Database: "public"},
TargetTableStrategy: "smart",
}
plan, sourceCols, targetCols, err := buildClickHouseToPGLikePlan(cfg, "metrics", sourceDB, targetDB)
if err != nil {
t.Fatalf("buildClickHouseToPGLikePlan returned error: %v", err)
}
if len(sourceCols) != 4 || len(targetCols) != 0 {
t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols))
}
if !plan.AutoCreate {
t.Fatalf("expected auto create enabled")
}
if !strings.Contains(plan.CreateTableSQL, `CREATE TABLE "public"."metrics"`) {
t.Fatalf("unexpected create sql: %s", plan.CreateTableSQL)
}
if !strings.Contains(plan.CreateTableSQL, `"id" numeric(20,0)`) {
t.Fatalf("expected uint64 safeguard mapping, got: %s", plan.CreateTableSQL)
}
if !strings.Contains(plan.CreateTableSQL, `"event_time" timestamp`) {
t.Fatalf("expected datetime64 mapping, got: %s", plan.CreateTableSQL)
}
if !strings.Contains(plan.CreateTableSQL, `"host" varchar(64)`) {
t.Fatalf("expected fixedstring mapping, got: %s", plan.CreateTableSQL)
}
if !strings.Contains(plan.CreateTableSQL, `"payload" jsonb`) {
t.Fatalf("expected complex type degrade to jsonb, got: %s", plan.CreateTableSQL)
}
if !strings.Contains(plan.CreateTableSQL, `PRIMARY KEY ("id")`) {
t.Fatalf("expected primary key preservation, got: %s", plan.CreateTableSQL)
}
}
func TestBuildPGLikeToClickHousePlan_AutoCreateWhenTargetMissing(t *testing.T) {
t.Parallel()
sourceDB := &fakeMigrationDB{
columns: map[string][]connection.ColumnDefinition{
"public.orders": {
{Name: "id", Type: "bigint", Nullable: "NO", Key: "PRI"},
{Name: "created_at", Type: "timestamp without time zone", Nullable: "NO"},
{Name: "profile", Type: "jsonb", Nullable: "YES"},
},
},
}
targetDB := &fakeMigrationDB{}
cfg := SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "postgres", Database: "public"},
TargetConfig: connection.ConnectionConfig{Type: "clickhouse", Database: "analytics"},
TargetTableStrategy: "smart",
}
plan, sourceCols, targetCols, err := buildPGLikeToClickHousePlan(cfg, "orders", sourceDB, targetDB)
if err != nil {
t.Fatalf("buildPGLikeToClickHousePlan returned error: %v", err)
}
if len(sourceCols) != 3 || len(targetCols) != 0 {
t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols))
}
if !plan.AutoCreate {
t.Fatalf("expected auto create enabled")
}
if !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `analytics`.`orders`") {
t.Fatalf("unexpected create sql: %s", plan.CreateTableSQL)
}
if !strings.Contains(plan.CreateTableSQL, "`created_at` DateTime") {
t.Fatalf("expected timestamp mapping, got: %s", plan.CreateTableSQL)
}
if !strings.Contains(plan.CreateTableSQL, "`profile` Nullable(String)") {
t.Fatalf("expected jsonb degrade to Nullable(String), got: %s", plan.CreateTableSQL)
}
if !strings.Contains(plan.CreateTableSQL, "ORDER BY (`id`)") {
t.Fatalf("expected primary key order by, got: %s", plan.CreateTableSQL)
}
}
func TestBuildTDengineToTDenginePlan_AutoCreateWhenTargetMissing(t *testing.T) {
t.Parallel()
sourceDB := &fakeMigrationDB{
columns: map[string][]connection.ColumnDefinition{
"src.cpu": {
{Name: "ts", Type: "TIMESTAMP", Nullable: "NO"},
{Name: "host", Type: "NCHAR(64)", Nullable: "YES"},
{Name: "region", Type: "NCHAR(32)", Nullable: "YES", Key: "TAG"},
},
},
}
targetDB := &fakeMigrationDB{}
cfg := SyncConfig{
SourceConfig: connection.ConnectionConfig{Type: "tdengine", Database: "src"},
TargetConfig: connection.ConnectionConfig{Type: "tdengine", Database: "dst"},
TargetTableStrategy: "smart",
}
plan, sourceCols, targetCols, err := buildTDengineToTDenginePlan(cfg, "cpu", sourceDB, targetDB)
if err != nil {
t.Fatalf("buildTDengineToTDenginePlan returned error: %v", err)
}
if len(sourceCols) != 3 || len(targetCols) != 0 {
t.Fatalf("unexpected columns lengths: source=%d target=%d", len(sourceCols), len(targetCols))
}
if !plan.AutoCreate {
t.Fatalf("expected auto create enabled")
}
if !strings.Contains(plan.CreateTableSQL, "CREATE TABLE `dst`.`cpu`") {
t.Fatalf("unexpected create sql: %s", plan.CreateTableSQL)
}
if !strings.Contains(plan.CreateTableSQL, "`ts` TIMESTAMP") {
t.Fatalf("expected timestamp preserved, got: %s", plan.CreateTableSQL)
}
if !strings.Contains(plan.CreateTableSQL, "`region` NCHAR(32)") {
t.Fatalf("expected tag degrade to regular nchar column, got: %s", plan.CreateTableSQL)
}
if !strings.Contains(strings.Join(plan.Warnings, " "), "TAG") {
t.Fatalf("expected TAG degrade warning, got: %v", plan.Warnings)
}
}

View File

@@ -7,16 +7,15 @@ import (
)
func (s *SyncEngine) syncTableSchema(config SyncConfig, res *SyncResult, sourceDB db.Database, targetDB db.Database, tableName string) error {
targetType := resolveMigrationDBType(config.TargetConfig)
targetType := strings.ToLower(strings.TrimSpace(config.TargetConfig.Type))
if targetType != "mysql" {
s.appendLog(config.JobID, res, "warn", fmt.Sprintf("目标数据库类型=%s 暂不支持结构同步,已跳过表 %s", config.TargetConfig.Type, tableName))
return nil
}
sourceType := resolveMigrationDBType(config.SourceConfig)
sourceSchema, sourceTable := normalizeSchemaAndTable(sourceType, config.SourceConfig.Database, tableName)
targetSchema, targetTable := normalizeSchemaAndTable(targetType, config.TargetConfig.Database, tableName)
targetQueryTable := qualifiedNameForQuery(targetType, targetSchema, targetTable, tableName)
sourceSchema, sourceTable := normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName)
targetSchema, targetTable := normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName)
targetQueryTable := qualifiedNameForQuery(config.TargetConfig.Type, targetSchema, targetTable, tableName)
// 1) 获取源表字段
sourceCols, err := sourceDB.GetColumns(sourceSchema, sourceTable)
@@ -27,6 +26,7 @@ func (s *SyncEngine) syncTableSchema(config SyncConfig, res *SyncResult, sourceD
// 2) 确保目标表存在
targetCols, err := targetDB.GetColumns(targetSchema, targetTable)
if err != nil {
sourceType := strings.ToLower(strings.TrimSpace(config.SourceConfig.Type))
if sourceType != "mysql" {
return fmt.Errorf("目标表不存在且源类型=%s 暂不支持自动建表: %w", config.SourceConfig.Type, err)
}
@@ -62,6 +62,7 @@ func (s *SyncEngine) syncTableSchema(config SyncConfig, res *SyncResult, sourceD
// 3) 补齐目标缺失字段(安全策略:新增字段统一允许 NULL
missing := make([]string, 0)
sourceType := strings.ToLower(strings.TrimSpace(config.SourceConfig.Type))
for _, c := range sourceCols {
colName := strings.TrimSpace(c.Name)
if colName == "" {

View File

@@ -22,7 +22,7 @@ func quoteIdentByType(dbType string, ident string) string {
}
switch dbType {
case "mysql", "mariadb", "diros", "sphinx", "clickhouse", "tdengine":
case "mysql", "mariadb", "diros", "sphinx":
return "`" + strings.ReplaceAll(ident, "`", "``") + "`"
case "sqlserver":
escaped := strings.ReplaceAll(ident, "]", "]]")
@@ -74,10 +74,8 @@ func normalizeSchemaAndTable(dbType string, dbName string, tableName string) (st
}
switch strings.ToLower(strings.TrimSpace(dbType)) {
case "postgres", "kingbase", "highgo", "vastbase":
case "postgres", "kingbase", "vastbase":
return "public", rawTable
case "duckdb":
return "main", rawTable
default:
return rawDB, rawTable
}
@@ -93,7 +91,7 @@ func qualifiedNameForQuery(dbType string, schema string, table string, original
}
switch strings.ToLower(strings.TrimSpace(dbType)) {
case "postgres", "kingbase", "highgo", "vastbase":
case "postgres", "kingbase", "vastbase":
s := strings.TrimSpace(schema)
if s == "" {
s = "public"
@@ -102,16 +100,7 @@ func qualifiedNameForQuery(dbType string, schema string, table string, original
return raw
}
return s + "." + table
case "duckdb":
s := strings.TrimSpace(schema)
if s == "" {
s = "main"
}
if table == "" {
return raw
}
return s + "." + table
case "mysql", "mariadb", "diros", "sphinx", "clickhouse", "tdengine":
case "mysql", "mariadb", "diros", "sphinx":
s := strings.TrimSpace(schema)
if s == "" || table == "" {
return table

View File

@@ -12,17 +12,14 @@ import (
// SyncConfig defines the parameters for a synchronization task
type SyncConfig struct {
SourceConfig connection.ConnectionConfig `json:"sourceConfig"`
TargetConfig connection.ConnectionConfig `json:"targetConfig"`
Tables []string `json:"tables"`
Content string `json:"content,omitempty"` // "data", "schema", "both"
Mode string `json:"mode"` // "insert_update", "insert_only", "full_overwrite"
JobID string `json:"jobId,omitempty"`
AutoAddColumns bool `json:"autoAddColumns,omitempty"` // 自动补齐缺失字段
TargetTableStrategy string `json:"targetTableStrategy,omitempty"`
CreateIndexes bool `json:"createIndexes,omitempty"`
MongoCollectionName string `json:"mongoCollectionName,omitempty"`
TableOptions map[string]TableOptions `json:"tableOptions,omitempty"`
SourceConfig connection.ConnectionConfig `json:"sourceConfig"`
TargetConfig connection.ConnectionConfig `json:"targetConfig"`
Tables []string `json:"tables"` // Tables to sync
Content string `json:"content,omitempty"` // "data", "schema", "both"
Mode string `json:"mode"` // "insert_update", "insert_only", "full_overwrite"
JobID string `json:"jobId,omitempty"`
AutoAddColumns bool `json:"autoAddColumns,omitempty"` // 自动补齐缺失字段(当前仅 MySQL 目标支持)
TableOptions map[string]TableOptions `json:"tableOptions,omitempty"`
}
// SyncResult holds the result of the sync operation
@@ -48,13 +45,6 @@ func NewSyncEngine(reporter Reporter) *SyncEngine {
func (s *SyncEngine) RunSync(config SyncConfig) SyncResult {
result := SyncResult{Success: true, Logs: []string{}}
logger.Infof("开始数据同步:源=%s 目标=%s 表数量=%d", formatConnSummaryForSync(config.SourceConfig), formatConnSummaryForSync(config.TargetConfig), len(config.Tables))
if isRedisToMongoKeyspacePair(config) {
return s.runRedisToMongoSync(config, result)
}
if isMongoToRedisKeyspacePair(config) {
return s.runMongoToRedisSync(config, result)
}
totalTables := len(config.Tables)
s.progress(config.JobID, 0, totalTables, "", "开始同步")
@@ -80,7 +70,6 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult {
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("未知同步模式 %q已自动使用 insert_update", config.Mode))
}
defaultMode := normalizeSyncMode(config.Mode)
strategy := normalizeTargetTableStrategy(config.TargetTableStrategy)
contentLabel := "仅同步数据"
if syncSchema && syncData {
@@ -88,9 +77,9 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult {
} else if syncSchema {
contentLabel = "仅同步结构"
}
s.appendLog(config.JobID, &result, "info", fmt.Sprintf("同步内容:%s模式%s自动补字段%v;目标表策略:%s创建索引%v", contentLabel, defaultMode, config.AutoAddColumns, strategy, config.CreateIndexes))
s.appendLog(config.JobID, &result, "info", fmt.Sprintf("同步内容:%s模式%s自动补字段%v", contentLabel, defaultMode, config.AutoAddColumns))
sourceDB, err := newSyncDatabase(config.SourceConfig.Type)
sourceDB, err := db.NewDatabase(config.SourceConfig.Type)
if err != nil {
logger.Error(err, "初始化源数据库驱动失败:类型=%s", config.SourceConfig.Type)
return s.fail(config.JobID, totalTables, result, "初始化源数据库驱动失败: "+err.Error())
@@ -99,7 +88,7 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult {
// Custom DB setup would go here if needed
}
targetDB, err := newSyncDatabase(config.TargetConfig.Type)
targetDB, err := db.NewDatabase(config.TargetConfig.Type)
if err != nil {
logger.Error(err, "初始化目标数据库驱动失败:类型=%s", config.TargetConfig.Type)
return s.fail(config.JobID, totalTables, result, "初始化目标数据库驱动失败: "+err.Error())
@@ -123,6 +112,7 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult {
}
defer targetDB.Close()
// Iterate Tables
for i, tableName := range config.Tables {
func() {
tableMode := defaultMode
@@ -130,82 +120,30 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult {
s.progress(config.JobID, i, totalTables, tableName, fmt.Sprintf("同步表(%d/%d)", i+1, totalTables))
defer s.progress(config.JobID, i+1, totalTables, tableName, "表处理完成")
plan, cols, targetCols, err := buildSchemaMigrationPlan(config, tableName, sourceDB, targetDB)
if err != nil {
s.appendLog(config.JobID, &result, "error", fmt.Sprintf("生成迁移计划失败:表=%s 错误=%v", tableName, err))
return
}
for _, warning := range plan.Warnings {
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> %s", warning))
}
for _, unsupported := range plan.UnsupportedObjects {
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> %s", unsupported))
}
if strings.TrimSpace(plan.PlannedAction) != "" {
s.appendLog(config.JobID, &result, "info", fmt.Sprintf(" -> %s", plan.PlannedAction))
}
if !plan.TargetTableExists && !plan.AutoCreate {
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("表 %s 目标表不存在,当前策略不允许自动建表,已跳过", tableName))
return
}
if !plan.TargetTableExists && plan.AutoCreate {
s.progress(config.JobID, i, totalTables, tableName, "创建目标表")
if len(plan.PreDataSQL) > 0 {
if err := executeSQLStatements(targetDB.Exec, plan.PreDataSQL); err != nil {
s.appendLog(config.JobID, &result, "error", fmt.Sprintf("预执行建表 SQL 失败:表=%s 错误=%v", tableName, err))
return
}
}
if strings.TrimSpace(plan.CreateTableSQL) == "" {
s.appendLog(config.JobID, &result, "error", fmt.Sprintf("表 %s 自动建表失败:建表 SQL 为空", tableName))
return
}
if _, err := targetDB.Exec(plan.CreateTableSQL); err != nil {
s.appendLog(config.JobID, &result, "error", fmt.Sprintf("创建目标表失败:表=%s 错误=%v", tableName, err))
return
}
s.appendLog(config.JobID, &result, "info", fmt.Sprintf("目标表创建成功:%s", tableName))
targetCols, err = targetDB.GetColumns(plan.TargetSchema, plan.TargetTable)
if err != nil {
s.appendLog(config.JobID, &result, "error", fmt.Sprintf("创建目标表后获取字段失败:表=%s 错误=%v", tableName, err))
return
}
} else if len(plan.PreDataSQL) > 0 {
if syncSchema {
s.progress(config.JobID, i, totalTables, tableName, "同步表结构")
if err := executeSQLStatements(targetDB.Exec, plan.PreDataSQL); err != nil {
s.appendLog(config.JobID, &result, "error", fmt.Sprintf("同步表结构失败:表=%s 错误=%v", tableName, err))
if err := s.syncTableSchema(config, &result, sourceDB, targetDB, tableName); err != nil {
s.appendLog(config.JobID, &result, "error", fmt.Sprintf("表结构同步失败:表=%s 错误=%v", tableName, err))
return
}
targetCols, err = targetDB.GetColumns(plan.TargetSchema, plan.TargetTable)
if err != nil {
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("补字段后刷新目标字段失败:表=%s 错误=%v", tableName, err))
}
}
if !syncData {
if len(plan.PostDataSQL) > 0 {
s.progress(config.JobID, i, totalTables, tableName, "创建索引")
if err := executeSQLStatements(targetDB.Exec, plan.PostDataSQL); err != nil {
s.appendLog(config.JobID, &result, "error", fmt.Sprintf("创建索引失败:表=%s 错误=%v", tableName, err))
return
}
}
result.TablesSynced++
return
}
targetType := resolveMigrationDBType(config.TargetConfig)
sourceType := resolveMigrationDBType(config.SourceConfig)
targetTable := plan.TargetTable
sourceQueryTable, targetQueryTable := plan.SourceQueryTable, plan.TargetQueryTable
applyTableName := targetTable
switch targetType {
case "postgres", "kingbase", "highgo", "vastbase", "sqlserver":
applyTableName = targetQueryTable
}
sourceSchema, sourceTable := normalizeSchemaAndTable(config.SourceConfig.Type, config.SourceConfig.Database, tableName)
targetSchema, targetTable := normalizeSchemaAndTable(config.TargetConfig.Type, config.TargetConfig.Database, tableName)
sourceQueryTable := qualifiedNameForQuery(config.SourceConfig.Type, sourceSchema, sourceTable, tableName)
targetQueryTable := qualifiedNameForQuery(config.TargetConfig.Type, targetSchema, targetTable, tableName)
// 1. Get Columns & PKs
cols, err := sourceDB.GetColumns(sourceSchema, sourceTable)
if err != nil {
logger.Error(err, "获取源表列信息失败:表=%s", tableName)
s.appendLog(config.JobID, &result, "error", fmt.Sprintf("获取表 %s 的列信息失败: %v", tableName, err))
return
}
sourceColsByLower := make(map[string]connection.ColumnDefinition, len(cols))
for _, col := range cols {
if strings.TrimSpace(col.Name) == "" {
@@ -220,24 +158,25 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult {
pkCols = append(pkCols, col.Name)
}
}
requirePK := tableMode == "insert_update" && plan.TargetTableExists
pkCol := ""
if requirePK {
if len(pkCols) == 0 {
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("表 %s 未找到主键,当前模式需要差异对比,已跳过", tableName))
return
}
if len(pkCols) > 1 {
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("表 %s 为复合主键(%s当前暂不支持差异同步", tableName, strings.Join(pkCols, ",")))
return
}
pkCol = pkCols[0]
if len(pkCols) == 0 {
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("表 %s 未找到主键,已跳过数据同步(避免产生重复数据)", tableName))
return
}
if len(pkCols) > 1 {
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf("表 %s 为复合主键(%s当前暂不支持数据同步", tableName, strings.Join(pkCols, ",")))
return
}
pkCol := pkCols[0]
opts := TableOptions{Insert: true, Update: true, Delete: false}
if config.TableOptions != nil {
if t, ok := config.TableOptions[tableName]; ok {
opts = t
// 默认防护:如用户未设置任意一个字段,保持 insert/update 默认 true、delete 默认 false
if !t.Insert && !t.Update && !t.Delete {
opts = t
}
}
}
if !opts.Insert && !opts.Update && !opts.Delete {
@@ -245,8 +184,10 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult {
return
}
// 2. Fetch Data (MEMORY INTENSIVE - PROTOTYPE ONLY)
// TODO: Implement paging/streaming
s.progress(config.JobID, i, totalTables, tableName, "读取源表数据")
sourceRows, _, err := sourceDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(sourceType, sourceQueryTable)))
sourceRows, _, err := sourceDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.SourceConfig.Type, sourceQueryTable)))
if err != nil {
logger.Error(err, "读取源表失败:表=%s", tableName)
s.appendLog(config.JobID, &result, "error", fmt.Sprintf("读取源表 %s 失败: %v", tableName, err))
@@ -255,19 +196,19 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult {
var inserts []map[string]interface{}
var updates []connection.UpdateRow
var deletes []map[string]interface{}
if tableMode == "insert_update" && plan.TargetTableExists {
if tableMode == "insert_update" {
s.progress(config.JobID, i, totalTables, tableName, "读取目标表数据")
targetRows, _, err := targetDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(targetType, targetQueryTable)))
targetRows, _, err := targetDB.Query(fmt.Sprintf("SELECT * FROM %s", quoteQualifiedIdentByType(config.TargetConfig.Type, targetQueryTable)))
if err != nil {
logger.Error(err, "读取目标表失败:表=%s", tableName)
s.appendLog(config.JobID, &result, "error", fmt.Sprintf("读取目标表 %s 失败: %v", tableName, err))
return
}
// 3. Compare (In-Memory Hash Map)
s.progress(config.JobID, i, totalTables, tableName, "对比差异")
targetMap := make(map[string]map[string]interface{}, len(targetRows))
targetMap := make(map[string]map[string]interface{})
for _, row := range targetRows {
if row[pkCol] == nil {
continue
@@ -279,6 +220,7 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult {
targetMap[pkVal] = row
}
sourcePKSet := make(map[string]struct{}, len(sourceRows))
for _, sRow := range sourceRows {
if sRow[pkCol] == nil {
continue
@@ -288,6 +230,7 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult {
continue
}
sourcePKSet[pkVal] = struct{}{}
if tRow, exists := targetMap[pkVal]; exists {
changes := make(map[string]interface{})
for k, v := range sRow {
@@ -296,12 +239,17 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult {
}
}
if len(changes) > 0 {
updates = append(updates, connection.UpdateRow{Keys: map[string]interface{}{pkCol: sRow[pkCol]}, Values: changes})
updates = append(updates, connection.UpdateRow{
Keys: map[string]interface{}{pkCol: sRow[pkCol]},
Values: changes,
})
}
} else {
inserts = append(inserts, sRow)
}
}
var deletes []map[string]interface{}
if opts.Delete {
for pkStr, row := range targetMap {
if _, ok := sourcePKSet[pkStr]; ok {
@@ -310,49 +258,150 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult {
deletes = append(deletes, map[string]interface{}{pkCol: row[pkCol]})
}
}
// apply operation selection
inserts = filterRowsByPKSelection(pkCol, inserts, opts.Insert, opts.SelectedInsertPKs)
updates = filterUpdatesByPKSelection(pkCol, updates, opts.Update, opts.SelectedUpdatePKs)
deletes = filterRowsByPKSelection(pkCol, deletes, opts.Delete, opts.SelectedDeletePKs)
} else {
inserts = sourceRows
if !opts.Insert {
inserts = nil
changeSet := connection.ChangeSet{
Inserts: inserts,
Updates: updates,
Deletes: deletes,
}
if tableMode == "full_overwrite" && plan.TargetTableExists {
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 全量覆盖模式:即将清空目标表 %s", tableName))
s.progress(config.JobID, i, totalTables, tableName, "清空目标表")
clearSQL := ""
if targetType == "mysql" {
clearSQL = fmt.Sprintf("TRUNCATE TABLE %s", quoteQualifiedIdentByType(targetType, targetQueryTable))
// 4. Align schema (target missing columns)
s.progress(config.JobID, i, totalTables, tableName, "检查字段一致性")
requiredCols := collectRequiredColumns(changeSet.Inserts, changeSet.Updates)
targetCols, err := targetDB.GetColumns(targetSchema, targetTable)
if err != nil {
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 获取目标表字段失败,已跳过字段一致性检查: %v", err))
} else {
targetColSet := make(map[string]struct{}, len(targetCols))
for _, c := range targetCols {
name := strings.ToLower(strings.TrimSpace(c.Name))
if name == "" {
continue
}
targetColSet[name] = struct{}{}
}
missing := make([]string, 0)
for lower, original := range requiredCols {
if _, ok := targetColSet[lower]; !ok {
missing = append(missing, original)
}
}
sort.Strings(missing)
if len(missing) > 0 {
if config.AutoAddColumns && strings.ToLower(strings.TrimSpace(config.TargetConfig.Type)) == "mysql" {
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 目标表缺少字段 %d 个,开始自动补齐: %s", len(missing), strings.Join(missing, ", ")))
added := 0
for _, colName := range missing {
colLower := strings.ToLower(strings.TrimSpace(colName))
colType := "TEXT"
if strings.ToLower(strings.TrimSpace(config.SourceConfig.Type)) == "mysql" {
if srcCol, ok := sourceColsByLower[colLower]; ok {
colType = sanitizeMySQLColumnType(srcCol.Type)
}
}
alterSQL := fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL",
quoteQualifiedIdentByType("mysql", targetQueryTable),
quoteIdentByType("mysql", colName),
colType,
)
if _, err := targetDB.Exec(alterSQL); err != nil {
s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 自动补字段失败:字段=%s 错误=%v", colName, err))
continue
}
added++
}
s.appendLog(config.JobID, &result, "info", fmt.Sprintf(" -> 自动补字段完成:成功=%d 失败=%d", added, len(missing)-added))
// refresh columns
targetCols, err = targetDB.GetColumns(targetSchema, targetTable)
if err == nil {
targetColSet = make(map[string]struct{}, len(targetCols))
for _, c := range targetCols {
name := strings.ToLower(strings.TrimSpace(c.Name))
if name == "" {
continue
}
targetColSet[name] = struct{}{}
}
}
} else {
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 目标表缺少字段 %d 个(未开启自动补齐),将自动忽略:%s", len(missing), strings.Join(missing, ", ")))
}
// filter out still-missing columns to avoid apply failure
changeSet.Inserts = filterInsertRows(changeSet.Inserts, targetColSet)
changeSet.Updates = filterUpdateRows(changeSet.Updates, targetColSet)
}
}
// 5. Apply Changes
s.progress(config.JobID, i, totalTables, tableName, "应用变更")
if len(changeSet.Inserts) > 0 || len(changeSet.Updates) > 0 || len(changeSet.Deletes) > 0 {
s.appendLog(config.JobID, &result, "info", fmt.Sprintf(" -> 需插入: %d 行, 需更新: %d 行, 需删除: %d 行", len(changeSet.Inserts), len(changeSet.Updates), len(changeSet.Deletes)))
if applier, ok := targetDB.(db.BatchApplier); ok {
if err := applier.ApplyChanges(targetTable, changeSet); err != nil {
s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 应用变更失败: %v", err))
} else {
result.RowsInserted += len(changeSet.Inserts)
result.RowsUpdated += len(changeSet.Updates)
result.RowsDeleted += len(changeSet.Deletes)
}
} else {
clearSQL = fmt.Sprintf("DELETE FROM %s", quoteQualifiedIdentByType(targetType, targetQueryTable))
}
if _, err := targetDB.Exec(clearSQL); err != nil {
s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 清空目标表失败: %v", err))
return
s.appendLog(config.JobID, &result, "warn", " -> 目标驱动不支持应用数据变更 (ApplyChanges).")
}
} else {
s.appendLog(config.JobID, &result, "info", " -> 数据一致,无需变更.")
}
result.TablesSynced++
return
} else {
// insert_only / full_overwrite: do not compare target, just insert source rows
inserts = sourceRows
}
// full_overwrite: clear target table first
if tableMode == "full_overwrite" {
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 全量覆盖模式:即将清空目标表 %s", tableName))
s.progress(config.JobID, i, totalTables, tableName, "清空目标表")
clearSQL := ""
if strings.ToLower(strings.TrimSpace(config.TargetConfig.Type)) == "mysql" {
clearSQL = fmt.Sprintf("TRUNCATE TABLE %s", quoteQualifiedIdentByType(config.TargetConfig.Type, targetQueryTable))
} else {
clearSQL = fmt.Sprintf("DELETE FROM %s", quoteQualifiedIdentByType(config.TargetConfig.Type, targetQueryTable))
}
if _, err := targetDB.Exec(clearSQL); err != nil {
s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 清空目标表失败: %v", err))
return
}
}
changeSet := connection.ChangeSet{Inserts: inserts, Updates: updates, Deletes: deletes}
// 4. Align schema (target missing columns)
s.progress(config.JobID, i, totalTables, tableName, "检查字段一致性")
targetColsResolved := targetCols
if len(targetColsResolved) == 0 {
targetColsResolved, err = targetDB.GetColumns(plan.TargetSchema, plan.TargetTable)
if err != nil {
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 获取目标表字段失败,已跳过字段一致性检查: %v", err))
}
}
if len(targetColsResolved) > 0 {
targetColSet := make(map[string]struct{}, len(targetColsResolved))
for _, c := range targetColsResolved {
requiredCols := collectRequiredColumns(inserts, updates)
targetCols, err := targetDB.GetColumns(targetSchema, targetTable)
if err != nil {
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 获取目标表字段失败,已跳过字段一致性检查: %v", err))
} else {
targetColSet := make(map[string]struct{}, len(targetCols))
for _, c := range targetCols {
name := strings.ToLower(strings.TrimSpace(c.Name))
if name == "" {
continue
}
targetColSet[name] = struct{}{}
}
requiredCols := collectRequiredColumns(changeSet.Inserts, changeSet.Updates)
missing := make([]string, 0)
for lower, original := range requiredCols {
if _, ok := targetColSet[lower]; !ok {
@@ -360,64 +409,79 @@ func (s *SyncEngine) RunSync(config SyncConfig) SyncResult {
}
}
sort.Strings(missing)
if len(missing) > 0 {
if config.AutoAddColumns && supportsAutoAddColumnsForPair(sourceType, targetType) {
if config.AutoAddColumns && strings.ToLower(strings.TrimSpace(config.TargetConfig.Type)) == "mysql" {
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 目标表缺少字段 %d 个,开始自动补齐: %s", len(missing), strings.Join(missing, ", ")))
added := 0
for _, colName := range missing {
colLower := strings.ToLower(strings.TrimSpace(colName))
srcCol, ok := sourceColsByLower[colLower]
if !ok {
continue
}
alterSQL, err := buildAddColumnSQLForPair(sourceType, targetType, targetQueryTable, srcCol)
if err != nil {
s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 自动补字段失败:字段=%s 错误=%v", colName, err))
continue
colType := "TEXT"
if strings.ToLower(strings.TrimSpace(config.SourceConfig.Type)) == "mysql" {
if srcCol, ok := sourceColsByLower[colLower]; ok {
colType = sanitizeMySQLColumnType(srcCol.Type)
}
}
alterSQL := fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s %s NULL",
quoteQualifiedIdentByType("mysql", targetQueryTable),
quoteIdentByType("mysql", colName),
colType,
)
if _, err := targetDB.Exec(alterSQL); err != nil {
s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 自动补字段失败:字段=%s 错误=%v", colName, err))
continue
}
added++
targetColSet[colLower] = struct{}{}
}
s.appendLog(config.JobID, &result, "info", fmt.Sprintf(" -> 自动补字段完成:成功=%d 失败=%d", added, len(missing)-added))
// refresh columns
targetCols, err = targetDB.GetColumns(targetSchema, targetTable)
if err == nil {
targetColSet = make(map[string]struct{}, len(targetCols))
for _, c := range targetCols {
name := strings.ToLower(strings.TrimSpace(c.Name))
if name == "" {
continue
}
targetColSet[name] = struct{}{}
}
}
} else {
s.appendLog(config.JobID, &result, "warn", fmt.Sprintf(" -> 目标表缺少字段 %d 个(未开启自动补齐),将自动忽略:%s", len(missing), strings.Join(missing, ", ")))
}
changeSet.Inserts = filterInsertRows(changeSet.Inserts, targetColSet)
changeSet.Updates = filterUpdateRows(changeSet.Updates, targetColSet)
// filter out still-missing columns to avoid apply failure
inserts = filterInsertRows(inserts, targetColSet)
updates = filterUpdateRows(updates, targetColSet)
}
}
// 5. Apply Changes
s.progress(config.JobID, i, totalTables, tableName, "应用变更")
if len(changeSet.Inserts) > 0 || len(changeSet.Updates) > 0 || len(changeSet.Deletes) > 0 {
s.appendLog(config.JobID, &result, "info", fmt.Sprintf(" -> 需插入: %d 行, 需更新: %d 行, 需删除: %d 行", len(changeSet.Inserts), len(changeSet.Updates), len(changeSet.Deletes)))
changeSet := connection.ChangeSet{
Inserts: inserts,
Updates: updates,
}
if len(changeSet.Inserts) > 0 || len(changeSet.Updates) > 0 {
s.appendLog(config.JobID, &result, "info", fmt.Sprintf(" -> 需插入: %d 行, 需更新: %d 行", len(changeSet.Inserts), len(changeSet.Updates)))
if applier, ok := targetDB.(db.BatchApplier); ok {
if err := applier.ApplyChanges(applyTableName, changeSet); err != nil {
if err := applier.ApplyChanges(targetTable, changeSet); err != nil {
s.appendLog(config.JobID, &result, "error", fmt.Sprintf(" -> 应用变更失败: %v", err))
return
} else {
result.RowsInserted += len(changeSet.Inserts)
result.RowsUpdated += len(changeSet.Updates)
}
result.RowsInserted += len(changeSet.Inserts)
result.RowsUpdated += len(changeSet.Updates)
result.RowsDeleted += len(changeSet.Deletes)
} else {
s.appendLog(config.JobID, &result, "warn", " -> 目标驱动不支持应用数据变更 (ApplyChanges).")
return
}
} else {
s.appendLog(config.JobID, &result, "info", " -> 数据一致,无需变更.")
}
if len(plan.PostDataSQL) > 0 {
s.progress(config.JobID, i, totalTables, tableName, "创建索引")
if err := executeSQLStatements(targetDB.Exec, plan.PostDataSQL); err != nil {
s.appendLog(config.JobID, &result, "error", fmt.Sprintf("创建索引失败:表=%s 错误=%v", tableName, err))
return
}
}
result.TablesSynced++
}()
}
@@ -490,26 +554,3 @@ func (s *SyncEngine) fail(jobID string, totalTables int, res SyncResult, msg str
s.progress(jobID, res.TablesSynced, totalTables, "", "同步失败")
return res
}
func (s *SyncEngine) execDDLStatements(jobID string, res *SyncResult, database db.Database, tableName string, stage string, statements []string) error {
for _, statement := range statements {
sqlText := strings.TrimSpace(statement)
if sqlText == "" {
continue
}
if _, err := database.Exec(sqlText); err != nil {
return fmt.Errorf("%s失败: %w", stage, err)
}
s.appendLog(jobID, res, "info", fmt.Sprintf("表 %s %s成功%s", tableName, stage, shortenSyncSQL(sqlText)))
}
return nil
}
func shortenSyncSQL(sqlText string) string {
text := strings.TrimSpace(strings.ReplaceAll(strings.ReplaceAll(sqlText, "\n", " "), "\t", " "))
text = strings.Join(strings.Fields(text), " ")
if len(text) <= 120 {
return text
}
return text[:117] + "..."
}

View File

@@ -27,3 +27,4 @@ type Reporter struct {
OnLog func(event SyncLogEvent)
OnProgress func(event SyncProgressEvent)
}