mirror of
https://github.com/jxxghp/MoviePilot-Frontend.git
synced 2026-05-06 20:43:03 +08:00
feat: add unified llm thinking level setting
This commit is contained in:
@@ -54,6 +54,7 @@ export interface WizardData {
|
||||
verbose: boolean
|
||||
provider: string
|
||||
model: string
|
||||
thinkingLevel: string
|
||||
supportImageInput: boolean
|
||||
apiKey: string
|
||||
baseUrl: string
|
||||
@@ -121,6 +122,33 @@ export interface ValidationErrorState {
|
||||
}
|
||||
}
|
||||
|
||||
function normalizeThinkingLevelValue(value?: unknown) {
|
||||
const normalized = String(value ?? '').trim().toLowerCase()
|
||||
if (!normalized) return ''
|
||||
|
||||
const aliasMap: Record<string, string> = {
|
||||
none: 'off',
|
||||
disabled: 'off',
|
||||
disable: 'off',
|
||||
enabled: 'auto',
|
||||
enable: 'auto',
|
||||
default: 'auto',
|
||||
dynamic: 'auto',
|
||||
}
|
||||
|
||||
return aliasMap[normalized] || normalized
|
||||
}
|
||||
|
||||
function resolveThinkingLevelValue(data?: Record<string, any>) {
|
||||
const explicit = normalizeThinkingLevelValue(data?.LLM_THINKING_LEVEL)
|
||||
if (explicit) return explicit
|
||||
|
||||
const legacyEffort = normalizeThinkingLevelValue(data?.LLM_REASONING_EFFORT)
|
||||
if (data?.LLM_DISABLE_THINKING === true) return 'off'
|
||||
if (data?.LLM_DISABLE_THINKING === false) return legacyEffort || 'auto'
|
||||
return legacyEffort || 'off'
|
||||
}
|
||||
|
||||
// 全局状态,所有组件共享
|
||||
const currentStep = ref(1)
|
||||
const totalSteps = 8
|
||||
@@ -196,6 +224,7 @@ const wizardData = ref<WizardData>({
|
||||
verbose: false,
|
||||
provider: 'deepseek',
|
||||
model: 'deepseek-chat',
|
||||
thinkingLevel: 'off',
|
||||
supportImageInput: true,
|
||||
apiKey: '',
|
||||
baseUrl: 'https://api.deepseek.com',
|
||||
@@ -1332,6 +1361,7 @@ export function useSetupWizard() {
|
||||
AI_AGENT_VERBOSE: wizardData.value.agent.enabled ? wizardData.value.agent.verbose : false,
|
||||
LLM_PROVIDER: wizardData.value.agent.provider,
|
||||
LLM_MODEL: wizardData.value.agent.model,
|
||||
LLM_THINKING_LEVEL: wizardData.value.agent.thinkingLevel,
|
||||
LLM_SUPPORT_IMAGE_INPUT: wizardData.value.agent.supportImageInput,
|
||||
LLM_API_KEY: wizardData.value.agent.apiKey,
|
||||
LLM_BASE_URL: wizardData.value.agent.baseUrl || null,
|
||||
@@ -1429,6 +1459,7 @@ export function useSetupWizard() {
|
||||
wizardData.value.agent.verbose = Boolean(result.data.AI_AGENT_VERBOSE)
|
||||
wizardData.value.agent.provider = result.data.LLM_PROVIDER || 'deepseek'
|
||||
wizardData.value.agent.model = result.data.LLM_MODEL || ''
|
||||
wizardData.value.agent.thinkingLevel = resolveThinkingLevelValue(result.data)
|
||||
wizardData.value.agent.supportImageInput = result.data.LLM_SUPPORT_IMAGE_INPUT ?? true
|
||||
wizardData.value.agent.apiKey = result.data.LLM_API_KEY || ''
|
||||
wizardData.value.agent.baseUrl = result.data.LLM_BASE_URL || ''
|
||||
|
||||
@@ -1326,6 +1326,17 @@ export default {
|
||||
llmProviderHint: 'Select the LLM service provider to use',
|
||||
llmModel: 'LLM Model Name',
|
||||
llmModelHint: 'Specify the LLM model to use, such as gpt-3.5-turbo, deepseek-chat, etc.',
|
||||
llmThinking: 'Thinking Mode / Depth',
|
||||
llmThinkingHint:
|
||||
'Use one setting to control both thinking off and depth: off/auto/minimal/low/medium/high/max/xhigh. Unsupported levels will be mapped to the nearest provider-supported value.',
|
||||
llmThinkingLevelOff: 'Off (off)',
|
||||
llmThinkingLevelAuto: 'Auto (auto)',
|
||||
llmThinkingLevelMinimal: 'Minimal (minimal)',
|
||||
llmThinkingLevelLow: 'Low (low)',
|
||||
llmThinkingLevelMedium: 'Medium (medium)',
|
||||
llmThinkingLevelHigh: 'High (high)',
|
||||
llmThinkingLevelMax: 'Max (max)',
|
||||
llmThinkingLevelXhigh: 'XHigh (xhigh)',
|
||||
llmMaxContextTokens: 'LLM Max Context Tokens (K)',
|
||||
llmMaxContextTokensHint:
|
||||
'Set the maximum number of context tokens (in thousands) for the LLM. Exceeding this limit will trigger context trimming.',
|
||||
|
||||
@@ -1321,6 +1321,17 @@ export default {
|
||||
llmProviderHint: '选择使用的LLM服务提供商',
|
||||
llmModel: 'LLM模型名称',
|
||||
llmModelHint: '指定使用的LLM模型,如gpt-3.5-turbo、deepseek-chat等',
|
||||
llmThinking: '思考模式 / 深度',
|
||||
llmThinkingHint:
|
||||
'用一个设置统一控制关闭与深度:off/auto/minimal/low/medium/high/max/xhigh;不支持的级别会按 provider 能力自动映射到最近值',
|
||||
llmThinkingLevelOff: '关闭 (off)',
|
||||
llmThinkingLevelAuto: '自动 (auto)',
|
||||
llmThinkingLevelMinimal: '最小 (minimal)',
|
||||
llmThinkingLevelLow: '低 (low)',
|
||||
llmThinkingLevelMedium: '中 (medium)',
|
||||
llmThinkingLevelHigh: '高 (high)',
|
||||
llmThinkingLevelMax: '极高 (max)',
|
||||
llmThinkingLevelXhigh: '超高 (xhigh)',
|
||||
llmSupportImageInput: '模型支持图片输入',
|
||||
llmSupportImageInputHint:
|
||||
'启用后,消息中的图片会按多模态图片发送给 LLM;关闭后图片会作为附件保存到本地,并将文件路径提供给智能助手处理',
|
||||
|
||||
@@ -1323,6 +1323,17 @@ export default {
|
||||
llmProviderHint: '選擇使用的LLM服務提供商',
|
||||
llmModel: 'LLM模型名稱',
|
||||
llmModelHint: '指定使用的LLM模型,如gpt-3.5-turbo、deepseek-chat等',
|
||||
llmThinking: '思考模式 / 深度',
|
||||
llmThinkingHint:
|
||||
'用一個設置統一控制關閉與深度:off/auto/minimal/low/medium/high/max/xhigh;不支援的級別會按 provider 能力自動映射到最近值',
|
||||
llmThinkingLevelOff: '關閉 (off)',
|
||||
llmThinkingLevelAuto: '自動 (auto)',
|
||||
llmThinkingLevelMinimal: '最小 (minimal)',
|
||||
llmThinkingLevelLow: '低 (low)',
|
||||
llmThinkingLevelMedium: '中 (medium)',
|
||||
llmThinkingLevelHigh: '高 (high)',
|
||||
llmThinkingLevelMax: '極高 (max)',
|
||||
llmThinkingLevelXhigh: '超高 (xhigh)',
|
||||
llmSupportImageInput: '模型支援圖片輸入',
|
||||
llmSupportImageInputHint:
|
||||
'啟用後,消息中的圖片會按多模態圖片發送給 LLM;關閉後圖片會作為附件保存到本地,並將檔案路徑提供給智能助手處理',
|
||||
|
||||
@@ -37,6 +37,7 @@ const SystemSettings = ref<any>({
|
||||
AI_AGENT_JOB_INTERVAL: 24,
|
||||
LLM_PROVIDER: 'deepseek',
|
||||
LLM_MODEL: 'deepseek-chat',
|
||||
LLM_THINKING_LEVEL: 'off',
|
||||
LLM_SUPPORT_IMAGE_INPUT: false,
|
||||
LLM_API_KEY: null,
|
||||
LLM_BASE_URL: 'https://api.deepseek.com',
|
||||
@@ -166,6 +167,7 @@ type LlmSettingsSnapshot = {
|
||||
AI_AGENT_ENABLE: boolean
|
||||
LLM_PROVIDER: string
|
||||
LLM_MODEL: string
|
||||
LLM_THINKING_LEVEL: string
|
||||
LLM_API_KEY: string
|
||||
LLM_BASE_URL: string
|
||||
}
|
||||
@@ -178,6 +180,7 @@ function buildLlmSnapshot(): LlmSettingsSnapshot {
|
||||
AI_AGENT_ENABLE: Boolean(SystemSettings.value.Basic.AI_AGENT_ENABLE),
|
||||
LLM_PROVIDER: String(SystemSettings.value.Basic.LLM_PROVIDER ?? ''),
|
||||
LLM_MODEL: String(SystemSettings.value.Basic.LLM_MODEL ?? ''),
|
||||
LLM_THINKING_LEVEL: String(SystemSettings.value.Basic.LLM_THINKING_LEVEL ?? 'off'),
|
||||
LLM_API_KEY: String(SystemSettings.value.Basic.LLM_API_KEY ?? ''),
|
||||
LLM_BASE_URL: String(SystemSettings.value.Basic.LLM_BASE_URL ?? ''),
|
||||
}
|
||||
@@ -192,11 +195,39 @@ function buildLlmTestPayload(snapshot: LlmSettingsSnapshot) {
|
||||
enabled: snapshot.AI_AGENT_ENABLE,
|
||||
provider: snapshot.LLM_PROVIDER.trim(),
|
||||
model: snapshot.LLM_MODEL.trim(),
|
||||
thinking_level: snapshot.LLM_THINKING_LEVEL.trim(),
|
||||
api_key: snapshot.LLM_API_KEY.trim(),
|
||||
base_url: snapshot.LLM_BASE_URL.trim(),
|
||||
}
|
||||
}
|
||||
|
||||
function normalizeThinkingLevelValue(value?: unknown) {
|
||||
const normalized = String(value ?? '').trim().toLowerCase()
|
||||
if (!normalized) return ''
|
||||
|
||||
const aliasMap: Record<string, string> = {
|
||||
none: 'off',
|
||||
disabled: 'off',
|
||||
disable: 'off',
|
||||
enabled: 'auto',
|
||||
enable: 'auto',
|
||||
default: 'auto',
|
||||
dynamic: 'auto',
|
||||
}
|
||||
|
||||
return aliasMap[normalized] || normalized
|
||||
}
|
||||
|
||||
function resolveThinkingLevelValue(data?: Record<string, any>) {
|
||||
const explicit = normalizeThinkingLevelValue(data?.LLM_THINKING_LEVEL)
|
||||
if (explicit) return explicit
|
||||
|
||||
const legacyEffort = normalizeThinkingLevelValue(data?.LLM_REASONING_EFFORT)
|
||||
if (data?.LLM_DISABLE_THINKING === true) return 'off'
|
||||
if (data?.LLM_DISABLE_THINKING === false) return legacyEffort || 'auto'
|
||||
return legacyEffort || 'off'
|
||||
}
|
||||
|
||||
function showLlmTestFailedToast(message?: string) {
|
||||
const normalizedMessage = String(message ?? '').trim()
|
||||
if (normalizedMessage) {
|
||||
@@ -230,6 +261,17 @@ const canTestLlm = computed(() => {
|
||||
)
|
||||
})
|
||||
|
||||
const thinkingLevelItems = computed(() => [
|
||||
{ title: t('setting.system.llmThinkingLevelOff'), value: 'off' },
|
||||
{ title: t('setting.system.llmThinkingLevelAuto'), value: 'auto' },
|
||||
{ title: t('setting.system.llmThinkingLevelMinimal'), value: 'minimal' },
|
||||
{ title: t('setting.system.llmThinkingLevelLow'), value: 'low' },
|
||||
{ title: t('setting.system.llmThinkingLevelMedium'), value: 'medium' },
|
||||
{ title: t('setting.system.llmThinkingLevelHigh'), value: 'high' },
|
||||
{ title: t('setting.system.llmThinkingLevelMax'), value: 'max' },
|
||||
{ title: t('setting.system.llmThinkingLevelXhigh'), value: 'xhigh' },
|
||||
])
|
||||
|
||||
const activeTab = ref('system')
|
||||
|
||||
// 元数据语言
|
||||
@@ -380,6 +422,7 @@ async function loadSystemSettings() {
|
||||
if (result.data.hasOwnProperty(key)) (SystemSettings.value[sectionKey] as any)[key] = result.data[key]
|
||||
})
|
||||
}
|
||||
SystemSettings.value.Basic.LLM_THINKING_LEVEL = resolveThinkingLevelValue(result.data)
|
||||
}
|
||||
} catch (error) {
|
||||
console.log(error)
|
||||
@@ -948,6 +991,15 @@ watch(currentLlmSnapshotKey, (snapshotKey, previousSnapshotKey) => {
|
||||
persistent-hint
|
||||
/>
|
||||
</VCol>
|
||||
<VCol v-if="SystemSettings.Basic.AI_AGENT_ENABLE" cols="12" md="6">
|
||||
<VSelect
|
||||
v-model="SystemSettings.Basic.LLM_THINKING_LEVEL"
|
||||
:label="t('setting.system.llmThinking')"
|
||||
:hint="t('setting.system.llmThinkingHint')"
|
||||
:items="thinkingLevelItems"
|
||||
persistent-hint
|
||||
/>
|
||||
</VCol>
|
||||
<VCol v-if="SystemSettings.Basic.AI_AGENT_ENABLE" cols="12" md="6">
|
||||
<VSwitch
|
||||
v-model="SystemSettings.Basic.AI_AGENT_RETRY_TRANSFER"
|
||||
|
||||
@@ -27,6 +27,17 @@ const jobIntervalItems = computed(() => [
|
||||
{ title: t('setting.system.aiAgentJobInterval1M'), value: 720 },
|
||||
])
|
||||
|
||||
const thinkingLevelItems = computed(() => [
|
||||
{ title: t('setting.system.llmThinkingLevelOff'), value: 'off' },
|
||||
{ title: t('setting.system.llmThinkingLevelAuto'), value: 'auto' },
|
||||
{ title: t('setting.system.llmThinkingLevelMinimal'), value: 'minimal' },
|
||||
{ title: t('setting.system.llmThinkingLevelLow'), value: 'low' },
|
||||
{ title: t('setting.system.llmThinkingLevelMedium'), value: 'medium' },
|
||||
{ title: t('setting.system.llmThinkingLevelHigh'), value: 'high' },
|
||||
{ title: t('setting.system.llmThinkingLevelMax'), value: 'max' },
|
||||
{ title: t('setting.system.llmThinkingLevelXhigh'), value: 'xhigh' },
|
||||
])
|
||||
|
||||
async function loadLlmModels() {
|
||||
if (!wizardData.value.agent.provider || !wizardData.value.agent.apiKey) {
|
||||
return
|
||||
@@ -89,7 +100,7 @@ onMounted(() => {
|
||||
</VCol>
|
||||
|
||||
<template v-if="wizardData.agent.enabled">
|
||||
<VCol cols="12" md="4">
|
||||
<VCol cols="12" md="3">
|
||||
<VSwitch
|
||||
v-model="wizardData.agent.global"
|
||||
:label="t('setting.system.aiAgentGlobal')"
|
||||
@@ -99,7 +110,7 @@ onMounted(() => {
|
||||
/>
|
||||
</VCol>
|
||||
|
||||
<VCol cols="12" md="4">
|
||||
<VCol cols="12" md="3">
|
||||
<VSwitch
|
||||
v-model="wizardData.agent.verbose"
|
||||
:label="t('setting.system.aiAgentVerbose')"
|
||||
@@ -109,7 +120,7 @@ onMounted(() => {
|
||||
/>
|
||||
</VCol>
|
||||
|
||||
<VCol cols="12" md="4">
|
||||
<VCol cols="12" md="3">
|
||||
<VSwitch
|
||||
v-model="wizardData.agent.supportImageInput"
|
||||
:label="t('setting.system.llmSupportImageInput')"
|
||||
@@ -119,6 +130,17 @@ onMounted(() => {
|
||||
/>
|
||||
</VCol>
|
||||
|
||||
<VCol cols="12" md="3">
|
||||
<VSelect
|
||||
v-model="wizardData.agent.thinkingLevel"
|
||||
:label="t('setting.system.llmThinking')"
|
||||
:hint="t('setting.system.llmThinkingHint')"
|
||||
:items="thinkingLevelItems"
|
||||
persistent-hint
|
||||
color="primary"
|
||||
/>
|
||||
</VCol>
|
||||
|
||||
<VCol cols="12" md="6">
|
||||
<VSelect
|
||||
v-model="wizardData.agent.provider"
|
||||
|
||||
Reference in New Issue
Block a user