From 5e5eb95b55f9d2469e0745e6a129c1ed93be1a9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=AC=A8=E7=AC=A8?= Date: Tue, 21 Apr 2026 19:20:03 +0800 Subject: [PATCH] feat: add llm test button --- src/locales/en-US.ts | 16 ++ src/locales/zh-CN.ts | 16 ++ src/locales/zh-TW.ts | 16 ++ src/views/setting/AccountSettingSystem.vue | 212 ++++++++++++++++++++- 4 files changed, 257 insertions(+), 3 deletions(-) diff --git a/src/locales/en-US.ts b/src/locales/en-US.ts index 5673ce8b..0bc4324e 100644 --- a/src/locales/en-US.ts +++ b/src/locales/en-US.ts @@ -1334,6 +1334,22 @@ export default { llmApiKeyPlaceholder: 'Please enter API key', llmBaseUrl: 'LLM Base URL', llmBaseUrlHint: 'Base URL for LLM API, used for custom API endpoints', + llmTestAction: 'Test Call', + llmTestDisabledAgent: 'Please enable AI Assistant first', + llmTestDisabledApiKey: 'Please save the LLM API key before testing', + llmTestDisabledModel: 'Please save the LLM model before testing', + llmTestDisabledUnsaved: 'Testing only uses saved configuration. Please save before testing', + llmTestSaving: 'Basic settings are being saved, please test again in a moment', + llmTestLoading: 'Testing LLM call, please wait', + llmTestSuccess: 'Test Succeeded', + llmTestFailed: 'Test Failed', + llmTestProvider: 'Provider', + llmTestModel: 'Model', + llmTestDuration: 'Duration', + llmTestReplyPreview: 'Reply Preview', + llmTestErrorMessage: 'Error Message', + llmTestSuccessToast: 'LLM test call succeeded', + llmTestFailedToast: 'LLM test call failed', aiAgentGlobal: 'Global AI Assistant', aiAgentGlobalHint: 'Enable global AI assistant functionality, all message conversations will be answered by the AI agent without using the /ai command', diff --git a/src/locales/zh-CN.ts b/src/locales/zh-CN.ts index 96fda8fa..5c0cfb02 100644 --- a/src/locales/zh-CN.ts +++ b/src/locales/zh-CN.ts @@ -1332,6 +1332,22 @@ export default { llmApiKeyPlaceholder: '请输入API密钥', llmBaseUrl: 'LLM基础URL', llmBaseUrlHint: 'LLM API的基础URL地址,用于自定义API端点', + llmTestAction: '测试调用', + llmTestDisabledAgent: '请先启用智能助手', + llmTestDisabledApiKey: '请先保存 LLM API 密钥后再测试', + llmTestDisabledModel: '请先保存 LLM 模型后再测试', + llmTestDisabledUnsaved: '测试仅针对已保存配置,请先保存后再测试', + llmTestSaving: '基础设置保存中,请稍后再测试', + llmTestLoading: '正在测试 LLM 调用,请稍候', + llmTestSuccess: '测试成功', + llmTestFailed: '测试失败', + llmTestProvider: '供应商', + llmTestModel: '模型', + llmTestDuration: '耗时', + llmTestReplyPreview: '结果摘要', + llmTestErrorMessage: '错误信息', + llmTestSuccessToast: 'LLM 调用测试成功', + llmTestFailedToast: 'LLM 调用测试失败', aiAgentGlobal: '全局智能助手', aiAgentGlobalHint: '启用全局智能助手功能,所有消息对话均使用智能体回答而不用使用/ai命令', aiAgentJobInterval: '定时唤醒', diff --git a/src/locales/zh-TW.ts b/src/locales/zh-TW.ts index 5c3d2361..5ca828d6 100644 --- a/src/locales/zh-TW.ts +++ b/src/locales/zh-TW.ts @@ -1334,6 +1334,22 @@ export default { llmApiKeyPlaceholder: '請輸入API密鑰', llmBaseUrl: 'LLM基礎URL', llmBaseUrlHint: 'LLM API的基礎URL地址,用於自定義API端點', + llmTestAction: '測試調用', + llmTestDisabledAgent: '請先啟用智能助手', + llmTestDisabledApiKey: '請先保存 LLM API 密鑰後再測試', + llmTestDisabledModel: '請先保存 LLM 模型後再測試', + llmTestDisabledUnsaved: '測試僅針對已保存配置,請先保存後再測試', + llmTestSaving: '基礎設置保存中,請稍後再測試', + llmTestLoading: '正在測試 LLM 調用,請稍候', + llmTestSuccess: '測試成功', + llmTestFailed: '測試失敗', + llmTestProvider: '供應商', + llmTestModel: '模型', + llmTestDuration: '耗時', + llmTestReplyPreview: '結果摘要', + llmTestErrorMessage: '錯誤信息', + llmTestSuccessToast: 'LLM 調用測試成功', + llmTestFailedToast: 'LLM 調用測試失敗', aiAgentGlobal: '全局智能助手', aiAgentGlobalHint: '啟用全局智能助手功能,所有消息對話均使用智能體回答而不用使用/ai命令', aiAgentJobInterval: '定時喚醒', diff --git a/src/views/setting/AccountSettingSystem.vue b/src/views/setting/AccountSettingSystem.vue index 4f758188..168457cf 100644 --- a/src/views/setting/AccountSettingSystem.vue +++ b/src/views/setting/AccountSettingSystem.vue @@ -159,6 +159,86 @@ const advancedDialog = ref(false) // LLM 模型列表 const llmModels = ref([]) const loadingModels = ref(false) +const savingBasic = ref(false) +const testingLlm = ref(false) + +type LlmSettingsSnapshot = { + AI_AGENT_ENABLE: boolean + LLM_PROVIDER: string + LLM_MODEL: string + LLM_API_KEY: string + LLM_BASE_URL: string +} + +type LlmTestResult = { + success: boolean + provider: string + model: string + duration_ms?: number + reply_preview?: string + message?: string +} + +const llmTestResult = ref(null) +let llmTestRequestId = 0 +let llmTestAbortController: AbortController | null = null + +function buildLlmSnapshot(): LlmSettingsSnapshot { + return { + AI_AGENT_ENABLE: Boolean(SystemSettings.value.Basic.AI_AGENT_ENABLE), + LLM_PROVIDER: String(SystemSettings.value.Basic.LLM_PROVIDER ?? ''), + LLM_MODEL: String(SystemSettings.value.Basic.LLM_MODEL ?? ''), + LLM_API_KEY: String(SystemSettings.value.Basic.LLM_API_KEY ?? ''), + LLM_BASE_URL: String(SystemSettings.value.Basic.LLM_BASE_URL ?? ''), + } +} + +function buildLlmSnapshotKey(snapshot: LlmSettingsSnapshot) { + return JSON.stringify(snapshot) +} + +function invalidateLlmTestState() { + llmTestRequestId += 1 + if (llmTestAbortController) { + llmTestAbortController.abort() + llmTestAbortController = null + } + testingLlm.value = false + llmTestResult.value = null +} + +const savedLlmSnapshot = ref(buildLlmSnapshot()) + +const currentLlmSnapshot = computed(() => buildLlmSnapshot()) +const currentLlmSnapshotKey = computed(() => buildLlmSnapshotKey(currentLlmSnapshot.value)) +const savedLlmSnapshotKey = computed(() => buildLlmSnapshotKey(savedLlmSnapshot.value)) + +const hasSavedLlmChanges = computed( + () => currentLlmSnapshotKey.value !== savedLlmSnapshotKey.value, +) + +const canTestLlm = computed(() => { + const snapshot = currentLlmSnapshot.value + return ( + snapshot.AI_AGENT_ENABLE && + Boolean(snapshot.LLM_API_KEY.trim()) && + Boolean(snapshot.LLM_MODEL.trim()) && + !savingBasic.value && + !testingLlm.value && + !hasSavedLlmChanges.value + ) +}) + +const llmTestDisabledReason = computed(() => { + const snapshot = currentLlmSnapshot.value + if (!snapshot.AI_AGENT_ENABLE) return t('setting.system.llmTestDisabledAgent') + if (!snapshot.LLM_API_KEY.trim()) return t('setting.system.llmTestDisabledApiKey') + if (!snapshot.LLM_MODEL.trim()) return t('setting.system.llmTestDisabledModel') + if (savingBasic.value) return t('setting.system.llmTestSaving') + if (testingLlm.value) return t('setting.system.llmTestLoading') + if (hasSavedLlmChanges.value) return t('setting.system.llmTestDisabledUnsaved') + return '' +}) const activeTab = ref('system') @@ -300,6 +380,7 @@ async function saveMediaServerSetting() { // 加载系统设置 async function loadSystemSettings() { + invalidateLlmTestState() try { const result: { [key: string]: any } = await api.get('system/env') if (result.success) { @@ -309,6 +390,7 @@ async function loadSystemSettings() { if (result.data.hasOwnProperty(key)) (SystemSettings.value[sectionKey] as any)[key] = result.data[key] }) } + savedLlmSnapshot.value = buildLlmSnapshot() } } catch (error) { console.log(error) @@ -333,8 +415,67 @@ async function saveSystemSetting(value: { [key: string]: any }) { // 保存基础设置 async function saveBasicSettings() { - if (await saveSystemSetting(SystemSettings.value.Basic)) { - $toast.success(t('setting.system.basicSaveSuccess')) + savingBasic.value = true + try { + if (await saveSystemSetting(SystemSettings.value.Basic)) { + savedLlmSnapshot.value = buildLlmSnapshot() + $toast.success(t('setting.system.basicSaveSuccess')) + } else { + llmTestResult.value = null + } + } finally { + savingBasic.value = false + } +} + +async function testLlmConnection() { + if (!canTestLlm.value) return + + const snapshot = buildLlmSnapshot() + const snapshotKey = buildLlmSnapshotKey(snapshot) + const requestId = ++llmTestRequestId + if (llmTestAbortController) llmTestAbortController.abort() + const abortController = new AbortController() + llmTestAbortController = abortController + + testingLlm.value = true + llmTestResult.value = null + try { + const result: { [key: string]: any } = await api.post('system/llm-test', null, { + signal: abortController.signal, + }) + if (requestId !== llmTestRequestId || abortController.signal.aborted || currentLlmSnapshotKey.value !== snapshotKey) { + return + } + + const data = result?.data ?? {} + llmTestResult.value = { + success: Boolean(result?.success), + provider: data.provider ?? snapshot.LLM_PROVIDER, + model: data.model ?? snapshot.LLM_MODEL, + duration_ms: data.duration_ms, + reply_preview: data.reply_preview, + message: result?.message, + } + if (result?.success) $toast.success(t('setting.system.llmTestSuccessToast')) + else $toast.error(t('setting.system.llmTestFailedToast')) + } catch (error) { + if (requestId !== llmTestRequestId || abortController.signal.aborted || currentLlmSnapshotKey.value !== snapshotKey) { + return + } + const message = error instanceof Error ? error.message : String(error) + llmTestResult.value = { + success: false, + provider: snapshot.LLM_PROVIDER, + model: snapshot.LLM_MODEL, + message, + } + $toast.error(t('setting.system.llmTestFailedToast')) + console.log(error) + } finally { + if (requestId !== llmTestRequestId) return + if (llmTestAbortController === abortController) llmTestAbortController = null + testingLlm.value = false } } @@ -557,6 +698,17 @@ onActivated(async () => { onDeactivated(() => { isRequest.value = false }) + +onBeforeUnmount(() => { + invalidateLlmTestState() +}) + +watch( + currentLlmSnapshotKey, + (snapshotKey, previousSnapshotKey) => { + if (snapshotKey !== previousSnapshotKey) invalidateLlmTestState() + }, +)