feat: add llm test button

This commit is contained in:
笨笨
2026-04-21 19:20:03 +08:00
committed by jxxghp
parent 74e6f8b03e
commit 5e5eb95b55
4 changed files with 257 additions and 3 deletions

View File

@@ -1334,6 +1334,22 @@ export default {
llmApiKeyPlaceholder: 'Please enter API key',
llmBaseUrl: 'LLM Base URL',
llmBaseUrlHint: 'Base URL for LLM API, used for custom API endpoints',
llmTestAction: 'Test Call',
llmTestDisabledAgent: 'Please enable AI Assistant first',
llmTestDisabledApiKey: 'Please save the LLM API key before testing',
llmTestDisabledModel: 'Please save the LLM model before testing',
llmTestDisabledUnsaved: 'Testing only uses saved configuration. Please save before testing',
llmTestSaving: 'Basic settings are being saved, please test again in a moment',
llmTestLoading: 'Testing LLM call, please wait',
llmTestSuccess: 'Test Succeeded',
llmTestFailed: 'Test Failed',
llmTestProvider: 'Provider',
llmTestModel: 'Model',
llmTestDuration: 'Duration',
llmTestReplyPreview: 'Reply Preview',
llmTestErrorMessage: 'Error Message',
llmTestSuccessToast: 'LLM test call succeeded',
llmTestFailedToast: 'LLM test call failed',
aiAgentGlobal: 'Global AI Assistant',
aiAgentGlobalHint:
'Enable global AI assistant functionality, all message conversations will be answered by the AI agent without using the /ai command',

View File

@@ -1332,6 +1332,22 @@ export default {
llmApiKeyPlaceholder: '请输入API密钥',
llmBaseUrl: 'LLM基础URL',
llmBaseUrlHint: 'LLM API的基础URL地址用于自定义API端点',
llmTestAction: '测试调用',
llmTestDisabledAgent: '请先启用智能助手',
llmTestDisabledApiKey: '请先保存 LLM API 密钥后再测试',
llmTestDisabledModel: '请先保存 LLM 模型后再测试',
llmTestDisabledUnsaved: '测试仅针对已保存配置,请先保存后再测试',
llmTestSaving: '基础设置保存中,请稍后再测试',
llmTestLoading: '正在测试 LLM 调用,请稍候',
llmTestSuccess: '测试成功',
llmTestFailed: '测试失败',
llmTestProvider: '供应商',
llmTestModel: '模型',
llmTestDuration: '耗时',
llmTestReplyPreview: '结果摘要',
llmTestErrorMessage: '错误信息',
llmTestSuccessToast: 'LLM 调用测试成功',
llmTestFailedToast: 'LLM 调用测试失败',
aiAgentGlobal: '全局智能助手',
aiAgentGlobalHint: '启用全局智能助手功能,所有消息对话均使用智能体回答而不用使用/ai命令',
aiAgentJobInterval: '定时唤醒',

View File

@@ -1334,6 +1334,22 @@ export default {
llmApiKeyPlaceholder: '請輸入API密鑰',
llmBaseUrl: 'LLM基礎URL',
llmBaseUrlHint: 'LLM API的基礎URL地址用於自定義API端點',
llmTestAction: '測試調用',
llmTestDisabledAgent: '請先啟用智能助手',
llmTestDisabledApiKey: '請先保存 LLM API 密鑰後再測試',
llmTestDisabledModel: '請先保存 LLM 模型後再測試',
llmTestDisabledUnsaved: '測試僅針對已保存配置,請先保存後再測試',
llmTestSaving: '基礎設置保存中,請稍後再測試',
llmTestLoading: '正在測試 LLM 調用,請稍候',
llmTestSuccess: '測試成功',
llmTestFailed: '測試失敗',
llmTestProvider: '供應商',
llmTestModel: '模型',
llmTestDuration: '耗時',
llmTestReplyPreview: '結果摘要',
llmTestErrorMessage: '錯誤信息',
llmTestSuccessToast: 'LLM 調用測試成功',
llmTestFailedToast: 'LLM 調用測試失敗',
aiAgentGlobal: '全局智能助手',
aiAgentGlobalHint: '啟用全局智能助手功能,所有消息對話均使用智能體回答而不用使用/ai命令',
aiAgentJobInterval: '定時喚醒',

View File

@@ -159,6 +159,86 @@ const advancedDialog = ref(false)
// LLM 模型列表
const llmModels = ref<string[]>([])
const loadingModels = ref(false)
const savingBasic = ref(false)
const testingLlm = ref(false)
type LlmSettingsSnapshot = {
AI_AGENT_ENABLE: boolean
LLM_PROVIDER: string
LLM_MODEL: string
LLM_API_KEY: string
LLM_BASE_URL: string
}
type LlmTestResult = {
success: boolean
provider: string
model: string
duration_ms?: number
reply_preview?: string
message?: string
}
const llmTestResult = ref<LlmTestResult | null>(null)
let llmTestRequestId = 0
let llmTestAbortController: AbortController | null = null
function buildLlmSnapshot(): LlmSettingsSnapshot {
return {
AI_AGENT_ENABLE: Boolean(SystemSettings.value.Basic.AI_AGENT_ENABLE),
LLM_PROVIDER: String(SystemSettings.value.Basic.LLM_PROVIDER ?? ''),
LLM_MODEL: String(SystemSettings.value.Basic.LLM_MODEL ?? ''),
LLM_API_KEY: String(SystemSettings.value.Basic.LLM_API_KEY ?? ''),
LLM_BASE_URL: String(SystemSettings.value.Basic.LLM_BASE_URL ?? ''),
}
}
function buildLlmSnapshotKey(snapshot: LlmSettingsSnapshot) {
return JSON.stringify(snapshot)
}
function invalidateLlmTestState() {
llmTestRequestId += 1
if (llmTestAbortController) {
llmTestAbortController.abort()
llmTestAbortController = null
}
testingLlm.value = false
llmTestResult.value = null
}
const savedLlmSnapshot = ref<LlmSettingsSnapshot>(buildLlmSnapshot())
const currentLlmSnapshot = computed(() => buildLlmSnapshot())
const currentLlmSnapshotKey = computed(() => buildLlmSnapshotKey(currentLlmSnapshot.value))
const savedLlmSnapshotKey = computed(() => buildLlmSnapshotKey(savedLlmSnapshot.value))
const hasSavedLlmChanges = computed(
() => currentLlmSnapshotKey.value !== savedLlmSnapshotKey.value,
)
const canTestLlm = computed(() => {
const snapshot = currentLlmSnapshot.value
return (
snapshot.AI_AGENT_ENABLE &&
Boolean(snapshot.LLM_API_KEY.trim()) &&
Boolean(snapshot.LLM_MODEL.trim()) &&
!savingBasic.value &&
!testingLlm.value &&
!hasSavedLlmChanges.value
)
})
const llmTestDisabledReason = computed(() => {
const snapshot = currentLlmSnapshot.value
if (!snapshot.AI_AGENT_ENABLE) return t('setting.system.llmTestDisabledAgent')
if (!snapshot.LLM_API_KEY.trim()) return t('setting.system.llmTestDisabledApiKey')
if (!snapshot.LLM_MODEL.trim()) return t('setting.system.llmTestDisabledModel')
if (savingBasic.value) return t('setting.system.llmTestSaving')
if (testingLlm.value) return t('setting.system.llmTestLoading')
if (hasSavedLlmChanges.value) return t('setting.system.llmTestDisabledUnsaved')
return ''
})
const activeTab = ref('system')
@@ -300,6 +380,7 @@ async function saveMediaServerSetting() {
// 加载系统设置
async function loadSystemSettings() {
invalidateLlmTestState()
try {
const result: { [key: string]: any } = await api.get('system/env')
if (result.success) {
@@ -309,6 +390,7 @@ async function loadSystemSettings() {
if (result.data.hasOwnProperty(key)) (SystemSettings.value[sectionKey] as any)[key] = result.data[key]
})
}
savedLlmSnapshot.value = buildLlmSnapshot()
}
} catch (error) {
console.log(error)
@@ -333,8 +415,67 @@ async function saveSystemSetting(value: { [key: string]: any }) {
// 保存基础设置
async function saveBasicSettings() {
if (await saveSystemSetting(SystemSettings.value.Basic)) {
$toast.success(t('setting.system.basicSaveSuccess'))
savingBasic.value = true
try {
if (await saveSystemSetting(SystemSettings.value.Basic)) {
savedLlmSnapshot.value = buildLlmSnapshot()
$toast.success(t('setting.system.basicSaveSuccess'))
} else {
llmTestResult.value = null
}
} finally {
savingBasic.value = false
}
}
async function testLlmConnection() {
if (!canTestLlm.value) return
const snapshot = buildLlmSnapshot()
const snapshotKey = buildLlmSnapshotKey(snapshot)
const requestId = ++llmTestRequestId
if (llmTestAbortController) llmTestAbortController.abort()
const abortController = new AbortController()
llmTestAbortController = abortController
testingLlm.value = true
llmTestResult.value = null
try {
const result: { [key: string]: any } = await api.post('system/llm-test', null, {
signal: abortController.signal,
})
if (requestId !== llmTestRequestId || abortController.signal.aborted || currentLlmSnapshotKey.value !== snapshotKey) {
return
}
const data = result?.data ?? {}
llmTestResult.value = {
success: Boolean(result?.success),
provider: data.provider ?? snapshot.LLM_PROVIDER,
model: data.model ?? snapshot.LLM_MODEL,
duration_ms: data.duration_ms,
reply_preview: data.reply_preview,
message: result?.message,
}
if (result?.success) $toast.success(t('setting.system.llmTestSuccessToast'))
else $toast.error(t('setting.system.llmTestFailedToast'))
} catch (error) {
if (requestId !== llmTestRequestId || abortController.signal.aborted || currentLlmSnapshotKey.value !== snapshotKey) {
return
}
const message = error instanceof Error ? error.message : String(error)
llmTestResult.value = {
success: false,
provider: snapshot.LLM_PROVIDER,
model: snapshot.LLM_MODEL,
message,
}
$toast.error(t('setting.system.llmTestFailedToast'))
console.log(error)
} finally {
if (requestId !== llmTestRequestId) return
if (llmTestAbortController === abortController) llmTestAbortController = null
testingLlm.value = false
}
}
@@ -557,6 +698,17 @@ onActivated(async () => {
onDeactivated(() => {
isRequest.value = false
})
onBeforeUnmount(() => {
invalidateLlmTestState()
})
watch(
currentLlmSnapshotKey,
(snapshotKey, previousSnapshotKey) => {
if (snapshotKey !== previousSnapshotKey) invalidateLlmTestState()
},
)
</script>
<template>
@@ -854,10 +1006,64 @@ onDeactivated(() => {
</VCardText>
<VCardText>
<VForm @submit.prevent="() => {}">
<VAlert
v-if="SystemSettings.Basic.AI_AGENT_ENABLE && llmTestResult"
:type="llmTestResult.success ? 'success' : 'error'"
variant="tonal"
density="comfortable"
class="mb-4"
>
<div class="text-subtitle-2 mb-2">
{{ llmTestResult.success ? t('setting.system.llmTestSuccess') : t('setting.system.llmTestFailed') }}
</div>
<div class="text-body-2">{{ t('setting.system.llmTestProvider') }}{{ llmTestResult.provider }}</div>
<div class="text-body-2">{{ t('setting.system.llmTestModel') }}{{ llmTestResult.model }}</div>
<div v-if="llmTestResult.duration_ms !== undefined" class="text-body-2">
{{ t('setting.system.llmTestDuration') }}{{ llmTestResult.duration_ms }} ms
</div>
<div v-if="llmTestResult.success && llmTestResult.reply_preview" class="text-body-2">
{{ t('setting.system.llmTestReplyPreview') }}{{ llmTestResult.reply_preview }}
</div>
<div v-else-if="llmTestResult.message" class="text-body-2">
{{ t('setting.system.llmTestErrorMessage') }}{{ llmTestResult.message }}
</div>
</VAlert>
<div class="d-flex flex-wrap gap-4 mt-4">
<VBtn type="submit" @click="saveBasicSettings" prepend-icon="mdi-content-save">
<VBtn
type="submit"
@click="saveBasicSettings"
prepend-icon="mdi-content-save"
:loading="savingBasic"
:disabled="testingLlm"
>
{{ t('common.save') }}
</VBtn>
<VTooltip v-if="SystemSettings.Basic.AI_AGENT_ENABLE && llmTestDisabledReason" location="top">
<template #activator="{ props }">
<span v-bind="props" class="d-inline-flex">
<VBtn
color="secondary"
variant="tonal"
prepend-icon="mdi-connection"
:disabled="true"
:loading="testingLlm"
>
{{ t('setting.system.llmTestAction') }}
</VBtn>
</span>
</template>
<span>{{ llmTestDisabledReason }}</span>
</VTooltip>
<VBtn
v-else-if="SystemSettings.Basic.AI_AGENT_ENABLE"
color="secondary"
variant="tonal"
prepend-icon="mdi-connection"
:loading="testingLlm"
@click="testLlmConnection"
>
{{ t('setting.system.llmTestAction') }}
</VBtn>
<VSpacer />
<VBtn
color="error"