Compare commits

...

18 Commits
v2.10.8 ... v2

Author SHA1 Message Date
jxxghp
a9403c9c34 chore: bump version to 2.10.11 2026-05-07 08:23:20 +08:00
jxxghp
dc4914e3ca style: adjust downloader card API key field to span full width 2026-05-07 08:22:39 +08:00
jxxghp
f3dbc4afad feat: add qBittorrent API key setup support
Expose qBittorrent WebUI API Key fields in settings and setup so 5.2 users can connect without requiring username/password.

Refs jxxghp/MoviePilot#5724
2026-05-07 07:41:05 +08:00
jxxghp
e3e22aebd9 feat: replace log level chips with VSelect dropdown in LoggingView and adjust layout spacing 2026-05-06 13:04:35 +08:00
jxxghp
0ca2f20b24 refactor: update logging record layout to use block-level elements for better alignment and structure 2026-05-06 08:02:50 +08:00
jxxghp
14279c773d fix: update LoggingView layout to support responsive height for mobile devices 2026-05-05 12:37:30 +08:00
jxxghp
8372f63eb6 refactor: dynamic logging view height calculation and remove redundant LLM model refresh on settings save 2026-05-05 12:34:09 +08:00
jxxghp
b7b62d7922 feat: overhaul logging view with advanced filtering, grouped display, and real-time streaming controls 2026-05-05 11:53:21 +08:00
jxxghp
162cce1f50 feat: replace VSelect with VAutocomplete for LLM provider selection in settings 2026-05-04 20:04:14 +08:00
jxxghp
aa49c6ccbc refactor(llm): merge preset selection into base URL field
Use a single editable Base URL combobox for LLM providers so preset endpoints and manual input share one field, with preset types shown as subtitles.
2026-05-03 11:31:06 +08:00
jxxghp
a40e52079f 更新 package.json 2026-05-03 09:44:37 +08:00
jxxghp
c29e329548 feat(llm): add provider URL presets
Expose provider-specific preset endpoints in the setup and system settings flows so users can start from the correct base URL while still editing it manually.
2026-05-03 09:38:28 +08:00
mcgrady.sun
e2d26f6a25 fix(resource): 解决重新搜索按钮 review 问题
- 简化 refreshSearch:移除多余的 switchToOriginalResults 调用,
  直接置 showingAiResults=false,其余状态由 fetchData 内部重置
- 标题栏 v-if 去掉 !progressActive 条件,避免点击重新搜索时
  整个标题栏 unmount 导致按钮 :loading 不可见、页面跳动
2026-05-02 16:33:19 +08:00
mcgrady.sun
1752256868 feat(resource): 资源搜索结果页增加重新搜索按钮
- 在搜索结果页右侧操作区新增"重新搜索"按钮(mdi-refresh 图标)
- 点击后使用相同搜索参数重新触发请求;正在请求或加载中时按钮禁用
- 若当前展示的是 AI 推荐结果,先切回原始结果再重新搜索,避免状态不一致
- 同步补充 zh-CN / zh-TW / en-US 三份本地化文案
2026-05-02 16:33:19 +08:00
jxxghp
23d7f0dcc1 更新 package.json 2026-04-30 11:55:22 +08:00
jxxghp
288aeed178 chore: update LLM model hint examples in localization files 2026-04-30 11:29:17 +08:00
jxxghp
9a9a618136 refactor: extract LLM provider management logic into composable and add OAuth support for system settings 2026-04-30 09:49:05 +08:00
jxxghp
723eb319e1 feat: add batch AI reorganization support to Transfer History view 2026-04-29 20:37:52 +08:00
15 changed files with 2123 additions and 246 deletions

View File

@@ -1,6 +1,6 @@
{
"name": "moviepilot",
"version": "2.10.8",
"version": "2.10.11",
"private": true,
"type": "module",
"bin": "dist/service.js",

View File

@@ -346,11 +346,23 @@ onUnmounted(() => {
prepend-inner-icon="mdi-server"
/>
</VCol>
<VCol cols="12">
<VTextField
v-model="downloaderInfo.config.apikey"
type="password"
:label="t('downloader.apiKey')"
:hint="t('downloader.qbittorrentApiKeyHint')"
persistent-hint
active
prepend-inner-icon="mdi-key-variant"
/>
</VCol>
<VCol cols="12" md="6">
<VTextField
v-model="downloaderInfo.config.username"
:label="t('downloader.username')"
:hint="t('downloader.username')"
:disabled="!!downloaderInfo.config.apikey"
persistent-hint
active
prepend-inner-icon="mdi-account"
@@ -362,6 +374,7 @@ onUnmounted(() => {
type="password"
:label="t('downloader.password')"
:hint="t('downloader.password')"
:disabled="!!downloaderInfo.config.apikey"
persistent-hint
active
prepend-inner-icon="mdi-lock"

View File

@@ -566,13 +566,13 @@ watch(
</VDialog>
<!-- 实时日志弹窗 -->
<VDialog
v-if="loggingDialog"
v-model="loggingDialog"
scrollable
max-width="60rem"
:fullscreen="!display.mdAndUp.value"
>
<VDialog
v-if="loggingDialog"
v-model="loggingDialog"
scrollable
max-width="72rem"
:fullscreen="!display.mdAndUp.value"
>
<VCard>
<VDialogCloseBtn @click="loggingDialog = false" />
<VCardItem>
@@ -588,7 +588,7 @@ watch(
</VCardTitle>
</VCardItem>
<VDivider />
<VCardText>
<VCardText class="pa-0">
<LoggingView :logfile="`plugins/${props.plugin?.id?.toLowerCase()}.log`" />
</VCardText>
</VCard>

View File

@@ -0,0 +1,380 @@
import { computed, onBeforeUnmount, ref, type Ref } from 'vue'
import api from '@/api'
export interface LlmProviderAuthMethod {
id: string
type: string
label: string
description?: string
}
export interface LlmProviderAuthStatus {
connected: boolean
type?: string
label?: string
expires_at?: number | null
updated_at?: number | null
}
export interface LlmProviderUrlPreset {
label: string
value: string
}
export interface LlmProviderUrlPresetItem {
title: string
value: string
subtitle?: string
}
export interface LlmProvider {
id: string
name: string
runtime: string
default_base_url: string
base_url_presets?: LlmProviderUrlPreset[]
base_url_editable: boolean
requires_base_url: boolean
supports_api_key: boolean
api_key_label: string
api_key_hint: string
supports_model_refresh: boolean
oauth_methods: LlmProviderAuthMethod[]
description?: string
auth_status: LlmProviderAuthStatus
}
export interface LlmModel {
id: string
name: string
family?: string
context_tokens?: number | null
input_tokens?: number | null
output_tokens?: number | null
context_tokens_k?: number | null
supports_reasoning?: boolean
supports_tools?: boolean
supports_image_input?: boolean
supports_audio_input?: boolean
transport?: string
source?: string
release_date?: string | null
status?: string | null
}
export interface LlmProviderAuthSession {
session_id: string
provider_id: string
flow_type: string
status: string
message?: string
authorize_url?: string
verification_url?: string
user_code?: string
instructions?: string
interval_seconds?: number
expires_at?: number
}
interface UseLlmProviderDirectoryOptions {
provider: Ref<string>
apiKey: Ref<string>
baseUrl: Ref<string>
model: Ref<string>
maxContextTokens?: Ref<number>
authConnected?: Ref<boolean>
}
function normalizeValue(value: unknown) {
return String(value ?? '').trim()
}
export function useLlmProviderDirectory(options: UseLlmProviderDirectoryOptions) {
const providers = ref<LlmProvider[]>([])
const models = ref<LlmModel[]>([])
const loadingProviders = ref(false)
const loadingModels = ref(false)
const authDialogVisible = ref(false)
const authPolling = ref(false)
const authPopupBlocked = ref(false)
const authSession = ref<LlmProviderAuthSession | null>(null)
let pollTimer: number | null = null
const selectedProvider = computed(
() => providers.value.find(item => item.id === normalizeValue(options.provider.value)) || null,
)
const selectedModel = computed(
() => models.value.find(item => item.id === normalizeValue(options.model.value)) || null,
)
const providerItems = computed(() => providers.value.map(item => ({ title: item.name, value: item.id })))
const baseUrlPresetItems = computed<LlmProviderUrlPresetItem[]>(() =>
(selectedProvider.value?.base_url_presets || []).map(item => ({
title: item.value,
value: item.value,
subtitle: item.label,
})),
)
const providerConnected = computed(() => Boolean(selectedProvider.value?.auth_status?.connected))
const showBaseUrlField = computed(
() => Boolean(selectedProvider.value && (selectedProvider.value.oauth_methods || []).length === 0),
)
const showApiKeyField = computed(() => selectedProvider.value?.supports_api_key !== false)
const hasUsableCredential = computed(() => {
if (providerConnected.value) return true
return Boolean(normalizeValue(options.apiKey.value))
})
const canRefreshModels = computed(() => {
if (!selectedProvider.value?.supports_model_refresh) return false
if (!hasUsableCredential.value) return false
if (selectedProvider.value.requires_base_url && !normalizeValue(options.baseUrl.value)) return false
return true
})
function clearPollTimer() {
if (pollTimer !== null) {
window.clearTimeout(pollTimer)
pollTimer = null
}
}
function syncAuthConnected() {
if (options.authConnected) {
options.authConnected.value = providerConnected.value
}
}
function ensureBaseUrl(reset = false) {
const provider = selectedProvider.value
if (!provider) return
const currentBaseUrl = normalizeValue(options.baseUrl.value)
const defaultBaseUrl = provider.default_base_url || ''
if (reset) {
options.baseUrl.value = defaultBaseUrl
return
}
if (!currentBaseUrl && defaultBaseUrl) {
options.baseUrl.value = defaultBaseUrl
}
}
function handleProviderSelection(resetBaseUrl = true) {
ensureBaseUrl(resetBaseUrl)
options.apiKey.value = ''
if (options.maxContextTokens) {
options.maxContextTokens.value = 64
}
models.value = []
options.model.value = ''
syncAuthConnected()
}
function applyModelMetadata(modelId?: string) {
const targetId = normalizeValue(modelId ?? options.model.value)
if (!targetId) return null
const matched = models.value.find(item => item.id === targetId) || null
if (matched?.context_tokens_k && options.maxContextTokens) {
// models.dev / provider 返回的是精确 token这里回填到现有的 K 单位配置。
options.maxContextTokens.value = matched.context_tokens_k
}
return matched
}
function updateProviderAuthStatus(providerId: string, authStatus?: LlmProviderAuthStatus) {
if (!authStatus) return
const index = providers.value.findIndex(item => item.id === providerId)
if (index === -1) return
providers.value[index] = {
...providers.value[index],
auth_status: authStatus,
}
syncAuthConnected()
}
async function loadProviders(preserveBaseUrl = true) {
loadingProviders.value = true
try {
const result: { [key: string]: any } = await api.get('llm/providers')
if (!result.success) {
throw new Error(result.message || 'Load LLM providers failed')
}
providers.value = Array.isArray(result.data) ? result.data : []
if (!selectedProvider.value && providers.value.length > 0) {
options.provider.value = providers.value[0].id
}
ensureBaseUrl(!preserveBaseUrl)
syncAuthConnected()
return providers.value
} finally {
loadingProviders.value = false
}
}
async function loadModels(forceRefresh = false) {
if (!selectedProvider.value) return []
loadingModels.value = true
try {
const result: { [key: string]: any } = await api.get('llm/models', {
params: {
provider: normalizeValue(options.provider.value),
api_key: normalizeValue(options.apiKey.value) || undefined,
base_url: normalizeValue(options.baseUrl.value) || undefined,
force_refresh: forceRefresh,
},
})
if (!result.success) {
throw new Error(result.message || 'Load LLM models failed')
}
const payload = result.data || {}
models.value = Array.isArray(payload.models) ? payload.models : []
updateProviderAuthStatus(normalizeValue(options.provider.value), payload.auth_status)
const currentModelId = normalizeValue(options.model.value)
const matchedModel = currentModelId
? models.value.find(item => item.id === currentModelId)
: null
if (matchedModel) {
applyModelMetadata(matchedModel.id)
} else if (models.value.length > 0) {
options.model.value = models.value[0].id
applyModelMetadata(models.value[0].id)
}
return models.value
} finally {
loadingModels.value = false
}
}
function openAuthPage() {
const session = authSession.value
const targetUrl = session?.authorize_url || session?.verification_url
if (!targetUrl) return
const popup = window.open(targetUrl, '_blank', 'noopener,noreferrer,width=960,height=780')
authPopupBlocked.value = !popup
}
async function pollAuthSession() {
if (!authSession.value) return null
authPolling.value = true
clearPollTimer()
try {
const result: { [key: string]: any } = await api.post(
`llm/provider-auth/${authSession.value.session_id}/poll`,
)
if (!result.success) {
throw new Error(result.message || 'Poll LLM auth failed')
}
authSession.value = {
...authSession.value,
...result.data,
}
const nextSession = authSession.value
if (!nextSession) return null
if (nextSession.status === 'pending') {
pollTimer = window.setTimeout(
() => pollAuthSession().catch(() => undefined),
Math.max(nextSession.interval_seconds || 5, 1) * 1000,
)
return nextSession
}
await loadProviders()
if (nextSession.status === 'authorized') {
await loadModels(true).catch(() => undefined)
}
return nextSession
} finally {
authPolling.value = false
}
}
async function startAuth(methodId: string) {
if (!selectedProvider.value) {
throw new Error('LLM provider is required')
}
const result: { [key: string]: any } = await api.post('llm/provider-auth/start', {
provider: normalizeValue(options.provider.value),
method: methodId,
})
if (!result.success) {
throw new Error(result.message || 'Start LLM auth failed')
}
authSession.value = {
status: 'pending',
provider_id: normalizeValue(options.provider.value),
...result.data,
}
authDialogVisible.value = true
authPopupBlocked.value = false
openAuthPage()
pollTimer = window.setTimeout(() => pollAuthSession().catch(() => undefined), 1200)
return authSession.value
}
async function disconnectAuth() {
if (!selectedProvider.value) return false
const result: { [key: string]: any } = await api.delete(
`llm/provider-auth/${normalizeValue(options.provider.value)}`,
)
if (!result.success) {
throw new Error(result.message || 'Disconnect LLM auth failed')
}
await loadProviders()
return true
}
function closeAuthDialog() {
authDialogVisible.value = false
clearPollTimer()
}
onBeforeUnmount(() => {
clearPollTimer()
})
return {
providers,
providerItems,
baseUrlPresetItems,
models,
selectedProvider,
selectedModel,
loadingProviders,
loadingModels,
providerConnected,
showBaseUrlField,
showApiKeyField,
hasUsableCredential,
canRefreshModels,
authDialogVisible,
authPolling,
authPopupBlocked,
authSession,
handleProviderSelection,
applyModelMetadata,
loadProviders,
loadModels,
openAuthPage,
startAuth,
pollAuthSession,
disconnectAuth,
closeAuthDialog,
}
}

View File

@@ -53,6 +53,7 @@ export interface WizardData {
global: boolean
verbose: boolean
provider: string
authConnected: boolean
model: string
thinkingLevel: string
supportImageInput: boolean
@@ -106,6 +107,7 @@ export interface ValidationErrorState {
downloader: {
name: boolean
host: boolean
apikey: boolean
username: boolean
password: boolean
}
@@ -231,6 +233,7 @@ const wizardData = ref<WizardData>({
global: false,
verbose: false,
provider: 'deepseek',
authConnected: false,
model: 'deepseek-chat',
thinkingLevel: 'off',
supportImageInput: true,
@@ -275,6 +278,7 @@ const validationErrors = ref<ValidationErrorState>({
downloader: {
name: false,
host: false,
apikey: false,
username: false,
password: false,
},
@@ -464,6 +468,7 @@ export function useSetupWizard() {
validationErrors.value.downloader = {
name: false,
host: false,
apikey: false,
username: false,
password: false,
}
@@ -546,9 +551,18 @@ export function useSetupWizard() {
}
// 根据下载器类型验证其他必输项
if (
wizardData.value.downloader.type === 'qbittorrent'
|| wizardData.value.downloader.type === 'transmission'
if (wizardData.value.downloader.type === 'qbittorrent') {
const hasApiKey = !!wizardData.value.downloader.config?.apikey?.trim()
if (!hasApiKey && !wizardData.value.downloader.config?.username?.trim()) {
errors.push(t('downloader.usernameRequired'))
validationErrors.value.downloader.username = true
}
if (!hasApiKey && !wizardData.value.downloader.config?.password?.trim()) {
errors.push(t('downloader.passwordRequired'))
validationErrors.value.downloader.password = true
}
} else if (
wizardData.value.downloader.type === 'transmission'
|| wizardData.value.downloader.type === 'rtorrent'
) {
if (!wizardData.value.downloader.config?.username?.trim()) {
@@ -717,8 +731,8 @@ export function useSetupWizard() {
validationErrors.value.agent.provider = true
}
if (!wizardData.value.agent.apiKey?.trim()) {
errors.push(t('setupWizard.agent.apiKeyRequired'))
if (!wizardData.value.agent.apiKey?.trim() && !wizardData.value.agent.authConnected) {
errors.push(t('setupWizard.agent.authOrApiKeyRequired'))
validationErrors.value.agent.apiKey = true
}
@@ -1482,6 +1496,7 @@ export function useSetupWizard() {
wizardData.value.agent.global = Boolean(result.data.AI_AGENT_GLOBAL)
wizardData.value.agent.verbose = Boolean(result.data.AI_AGENT_VERBOSE)
wizardData.value.agent.provider = result.data.LLM_PROVIDER || 'deepseek'
wizardData.value.agent.authConnected = false
wizardData.value.agent.model = result.data.LLM_MODEL || ''
wizardData.value.agent.thinkingLevel = resolveThinkingLevelValue(result.data)
wizardData.value.agent.supportImageInput = result.data.LLM_SUPPORT_IMAGE_INPUT ?? true

View File

@@ -361,13 +361,13 @@ onMounted(() => {
</VCard>
</VDialog>
<!-- 实时日志弹窗 -->
<VDialog
v-if="loggingDialog"
v-model="loggingDialog"
scrollable
max-width="70rem"
:fullscreen="!display.mdAndUp.value"
>
<VDialog
v-if="loggingDialog"
v-model="loggingDialog"
scrollable
max-width="80rem"
:fullscreen="!display.mdAndUp.value"
>
<VCard>
<VDialogCloseBtn @click="loggingDialog = false" />
<VCardItem>
@@ -383,7 +383,7 @@ onMounted(() => {
</VCardTitle>
</VCardItem>
<VDivider />
<VCardText>
<VCardText class="pa-0">
<LoggingView logfile="moviepilot.log" />
</VCardText>
</VCard>

View File

@@ -460,7 +460,8 @@ export default {
botSecret: 'Bot Secret',
botSecretHint: 'WebSocket secret of the WeChat Work AI bot',
botChatId: 'Default Target',
botChatIdHint: 'Use user userid; for proactive group messages use group:chatid. Leave empty to notify known interacted users',
botChatIdHint:
'Use user userid; for proactive group messages use group:chatid. Leave empty to notify known interacted users',
botChatIdPlaceholder: 'userid or group:chatid',
botWsUrl: 'WebSocket URL',
botWsUrlHint: 'WebSocket endpoint for the WeChat Work AI bot, usually the default value',
@@ -997,6 +998,7 @@ export default {
aiRecommend: 'AI Recommendation',
reRecommend: 'Regenerate Recommendation',
aiRecommendError: 'AI Recommendation Failed',
refreshSearch: 'Re-search',
},
browse: {
actor: 'Actor',
@@ -1232,6 +1234,17 @@ export default {
content: 'Content',
refreshing: 'Refreshing',
initializing: 'Initializing',
searchPlaceholder: 'Search logs',
allLevels: 'All Levels',
followTail: 'Follow latest logs',
wrapLines: 'Wrap lines',
pauseStream: 'Pause stream',
resumeStream: 'Resume stream',
waitingForLogs: 'Waiting for logs...',
paused: 'Paused',
connected: 'Live',
lineCount: 'Showing {visible}/{total} lines',
jumpToLatest: 'Jump to latest ({count})',
},
moduleTest: {
normal: 'Normal',
@@ -1328,7 +1341,8 @@ export default {
llmProvider: 'LLM Provider',
llmProviderHint: 'Select the LLM service provider to use',
llmModel: 'LLM Model Name',
llmModelHint: 'Specify the LLM model to use, such as gpt-3.5-turbo, deepseek-chat, etc.',
llmModelHint: 'Specify the LLM model to use, such as deepseek-v4-flash, gpt-5.4, etc.',
llmModelResolvedHint: 'Max context has been auto-filled to {context}K from the model catalog. Source: {source}',
llmThinking: 'Thinking Mode / Depth',
llmThinkingHint:
'Thinking depth: off/auto/minimal/low/medium/high/max/xhigh. Unsupported levels will be mapped to the nearest provider-supported value.',
@@ -1354,6 +1368,18 @@ export default {
llmApiKeyPlaceholder: 'Please enter API key',
llmBaseUrl: 'LLM Base URL',
llmBaseUrlHint: 'Base URL for LLM API, used for custom API endpoints',
llmProviderAuth: 'Provider Authorization',
llmProviderAuthHint:
'Providers that support account authorization can complete sign-in here and reuse the saved auth state.',
llmProviderConnectedAs: 'Connected as: {label}',
llmProviderDisconnect: 'Disconnect Authorization',
llmProviderDisconnected: 'Provider authorization disconnected',
llmProviderAuthDialogTitle: 'Provider Authorization',
llmProviderPopupBlocked:
'The browser blocked the authorization popup. Use the button below to continue manually.',
llmProviderDeviceCode: 'Device Code',
llmProviderOpenAuthPage: 'Open Authorization Page',
llmProviderCheckAuthStatus: 'Check Authorization Status',
aiVoiceApiKey: 'Audio API Key',
aiVoiceApiKeyHint:
'API key used for audio transcription and speech synthesis. Falls back to the current LLM API key when left blank.',
@@ -1475,8 +1501,9 @@ export default {
fanartEnableHint: 'Use image data from fanart.tv',
fanartLang: 'Fanart Language',
fanartLangHint: 'Set language preference for Fanart images, ordered by priority when multiple selected',
recognizePluginFirst: "Prioritize Plugin Recognition",
recognizePluginFirstHint: "Prioritize calling plugins for media recognition. If a plugin matches, native recognition will be skipped",
recognizePluginFirst: 'Prioritize Plugin Recognition',
recognizePluginFirstHint:
'Prioritize calling plugins for media recognition. If a plugin matches, native recognition will be skipped',
githubProxy: 'Github Acceleration Proxy',
githubProxyPlaceholder: 'Leave empty for no proxy',
githubProxyHint: 'Use proxy to accelerate Github access speed',
@@ -1598,7 +1625,7 @@ export default {
skipDesc: 'Skip scraping, this file will not be generated',
missingOnlyDesc: 'Scrape only if missing, existing file remains unchanged',
overwriteDesc: 'Always scrape, existing file will be overwritten',
}
},
},
site: {
siteSync: 'Site Synchronization',
@@ -2842,6 +2869,7 @@ export default {
actions: {
aiRedo: 'Assistant Organize',
aiRedoPending: 'Assistant Organizing...',
batchAiRedo: 'Assistant Batch Organize',
redo: 'Reorganize',
delete: 'Delete',
batchRedo: 'Batch Reorganize',
@@ -2905,8 +2933,10 @@ export default {
rtorrentHostHint: 'HTTP: http://ip:port/RPC2 or SCGI: scgi://ip:port',
default: 'Default',
host: 'Host',
apiKey: 'API Key',
username: 'Username',
password: 'Password',
qbittorrentApiKeyHint: 'For qBittorrent 5.2+, you can use the WebUI API Key directly. When set, API Key auth is preferred.',
category: 'Auto Category Management',
sequentail: 'Sequential Download',
force_resume: 'Force Resume',
@@ -3265,7 +3295,8 @@ export default {
infoDesc:
'Completing site authentication unlocks site capabilities and some plugin permissions. This step is optional and can also be configured later from the user menu.',
selectSiteHint: 'Choose a supported auth site and fill in the required credentials for that site',
submitHint: 'When you click Next, the wizard will immediately validate against the selected auth site and save the current parameters on success.',
submitHint:
'When you click Next, the wizard will immediately validate against the selected auth site and save the current parameters on success.',
siteConfigNotExist: 'Authentication site configuration does not exist',
fieldRequired: 'Please enter {name}',
},
@@ -3341,6 +3372,7 @@ export default {
'After enabling it, you can use the Agent in message conversations and optionally turn on transfer-failure takeover and AI recommendations.',
providerRequired: 'LLM provider is required',
apiKeyRequired: 'LLM API key is required',
authOrApiKeyRequired: 'Provide an LLM API key or complete provider authorization first',
modelRequired: 'LLM model name is required',
maxContextTokensRequired: 'LLM max context tokens must be greater than 0',
recommendMaxItemsRequired: 'AI recommendation analysis limit must be greater than 0',

View File

@@ -993,6 +993,7 @@ export default {
aiRecommend: '智能推荐',
reRecommend: '重新生成推荐',
aiRecommendError: '智能推荐失败',
refreshSearch: '重新搜索',
},
browse: {
actor: '演员',
@@ -1228,6 +1229,17 @@ export default {
content: '内容',
refreshing: '正在刷新',
initializing: '正在初始化',
searchPlaceholder: '搜索日志内容',
allLevels: '全部级别',
followTail: '跟随最新日志',
wrapLines: '自动换行',
pauseStream: '暂停日志流',
resumeStream: '恢复日志流',
waitingForLogs: '等待日志输出...',
paused: '已暂停',
connected: '实时更新中',
lineCount: '显示 {visible}/{total} 行',
jumpToLatest: '查看最新 ({count})',
},
moduleTest: {
normal: '正常',
@@ -1322,7 +1334,8 @@ export default {
llmProvider: 'LLM提供商',
llmProviderHint: '选择使用的LLM服务提供商',
llmModel: 'LLM模型名称',
llmModelHint: '指定使用的LLM模型gpt-3.5-turbo、deepseek-chat等',
llmModelHint: '指定使用的LLM模型deepseek-v4-flash、gpt-5.4等',
llmModelResolvedHint: '已根据模型目录自动回填最大上下文为 {context}K来源{source}',
llmThinking: '思考模式 / 深度',
llmThinkingHint:
'思考深度off/auto/minimal/low/medium/high/max/xhigh不支持的级别会按 provider 能力自动映射到最近值',
@@ -1348,6 +1361,16 @@ export default {
llmApiKeyPlaceholder: '请输入API密钥',
llmBaseUrl: 'LLM基础URL',
llmBaseUrlHint: 'LLM API的基础URL地址用于自定义API端点',
llmProviderAuth: '提供商授权',
llmProviderAuthHint: '支持账号登录授权的提供商,可以直接在这里完成登录并复用授权状态。',
llmProviderConnectedAs: '当前已连接:{label}',
llmProviderDisconnect: '断开授权',
llmProviderDisconnected: '已断开提供商授权',
llmProviderAuthDialogTitle: '提供商授权',
llmProviderPopupBlocked: '浏览器拦截了授权窗口,请手动点击下方按钮继续。',
llmProviderDeviceCode: '设备码',
llmProviderOpenAuthPage: '打开授权页面',
llmProviderCheckAuthStatus: '检查授权状态',
aiVoiceApiKey: '音频 API密钥',
aiVoiceApiKeyHint: '音频转写与语音合成使用的 API 密钥,留空时回退到当前 LLM API 密钥',
aiVoiceBaseUrl: '音频基础URL',
@@ -2799,6 +2822,7 @@ export default {
actions: {
aiRedo: '智能助手整理',
aiRedoPending: '智能助手整理中...',
batchAiRedo: '智能助手批量整理',
redo: '重新整理',
delete: '删除',
batchRedo: '批量重新整理',
@@ -2862,8 +2886,10 @@ export default {
rtorrentHostHint: 'HTTP: http://ip:port/RPC2 或 SCGI: scgi://ip:port',
default: '默认',
host: '地址',
apiKey: 'API Key',
username: '用户名',
password: '密码',
qbittorrentApiKeyHint: 'qBittorrent 5.2+ 可直接使用 WebUI API Key填写后将优先使用 API Key 登录。',
category: '自动分类管理',
sequentail: '顺序下载',
force_resume: '强制继续',
@@ -3294,6 +3320,7 @@ export default {
infoDesc: '启用后可在消息会话中使用 Agent 能力,也可开启失败整理接管和智能推荐。',
providerRequired: 'LLM 提供商不能为空',
apiKeyRequired: 'LLM API 密钥不能为空',
authOrApiKeyRequired: '请填写 LLM API 密钥或先完成提供商授权',
modelRequired: 'LLM 模型名称不能为空',
maxContextTokensRequired: 'LLM 最大上下文 Token 数量必须大于 0',
recommendMaxItemsRequired: '智能推荐分析条目上限必须大于 0',

View File

@@ -994,6 +994,7 @@ export default {
aiRecommend: '智能推薦',
reRecommend: '重新生成推薦',
aiRecommendError: '智能推薦失敗',
refreshSearch: '重新搜尋',
},
browse: {
actor: '演員',
@@ -1230,6 +1231,17 @@ export default {
content: '內容',
refreshing: '正在刷新',
initializing: '正在初始化',
searchPlaceholder: '搜索日誌內容',
allLevels: '全部級別',
followTail: '跟隨最新日誌',
wrapLines: '自動換行',
pauseStream: '暫停日誌流',
resumeStream: '恢復日誌流',
waitingForLogs: '等待日誌輸出...',
paused: '已暫停',
connected: '實時更新中',
lineCount: '顯示 {visible}/{total} 行',
jumpToLatest: '查看最新 ({count})',
},
moduleTest: {
normal: '正常',
@@ -1324,7 +1336,8 @@ export default {
llmProvider: 'LLM提供商',
llmProviderHint: '選擇使用的LLM服務提供商',
llmModel: 'LLM模型名稱',
llmModelHint: '指定使用的LLM模型gpt-3.5-turbo、deepseek-chat等',
llmModelHint: '指定使用的LLM模型deepseek-v4-flash、gpt-5.4等',
llmModelResolvedHint: '已根據模型目錄自動回填最大上下文為 {context}K來源{source}',
llmThinking: '思考模式 / 深度',
llmThinkingHint:
'思考深度off/auto/minimal/low/medium/high/max/xhigh不支援的級別會按 provider 能力自動映射到最近值',
@@ -1350,6 +1363,16 @@ export default {
llmApiKeyPlaceholder: '請輸入API密鑰',
llmBaseUrl: 'LLM基礎URL',
llmBaseUrlHint: 'LLM API的基礎URL地址用於自定義API端點',
llmProviderAuth: '提供商授權',
llmProviderAuthHint: '支援帳號登入授權的提供商,可以直接在這裡完成登入並重用授權狀態。',
llmProviderConnectedAs: '目前已連接:{label}',
llmProviderDisconnect: '斷開授權',
llmProviderDisconnected: '已斷開提供商授權',
llmProviderAuthDialogTitle: '提供商授權',
llmProviderPopupBlocked: '瀏覽器攔截了授權視窗,請手動點擊下方按鈕繼續。',
llmProviderDeviceCode: '設備碼',
llmProviderOpenAuthPage: '開啟授權頁面',
llmProviderCheckAuthStatus: '檢查授權狀態',
aiVoiceApiKey: '音頻 API密鑰',
aiVoiceApiKeyHint: '音頻轉寫與語音合成使用的 API 密鑰,留空時回退到當前 LLM API 密鑰',
aiVoiceBaseUrl: '音頻基礎URL',
@@ -2801,6 +2824,7 @@ export default {
actions: {
aiRedo: '智能助手整理',
aiRedoPending: '智能助手整理中...',
batchAiRedo: '智能助手批量整理',
redo: '重新整理',
delete: '刪除',
batchRedo: '批量重新整理',
@@ -2864,8 +2888,10 @@ export default {
enabled: '啟用',
default: '預設',
host: '地址',
apiKey: 'API Key',
username: '用戶名',
password: '密碼',
qbittorrentApiKeyHint: 'qBittorrent 5.2+ 可直接使用 WebUI API Key填寫後將優先使用 API Key 登入。',
category: '自動分類管理',
sequentail: '順序下載',
force_resume: '強制繼續',
@@ -3296,6 +3322,7 @@ export default {
infoDesc: '啟用後可在消息對話中使用 Agent 能力,也可開啟失敗整理接管與智能推薦。',
providerRequired: 'LLM 提供商不能為空',
apiKeyRequired: 'LLM API 密鑰不能為空',
authOrApiKeyRequired: '請填寫 LLM API 密鑰或先完成提供商授權',
modelRequired: 'LLM 模型名稱不能為空',
maxContextTokensRequired: 'LLM 最大上下文 Token 數量必須大於 0',
recommendMaxItemsRequired: '智能推薦分析條目上限必須大於 0',

View File

@@ -95,6 +95,9 @@ const cardScroll = useInfiniteScroll(filteredCardDataList)
// 是否刷新过
const isRefreshed = ref(false)
// 是否正在重新搜索
const isRefreshing = ref(false)
// 加载进度文本
const progressText = ref(t('common.pleaseWait'))
@@ -464,6 +467,21 @@ async function fetchData() {
}
}
// 重新搜索(使用相同参数重新触发搜索)
async function refreshSearch() {
if (isRefreshing.value || progressActive.value) return
isRefreshing.value = true
try {
// 重新搜索时退出 AI 视图,其余状态由 fetchData 内部重置
showingAiResults.value = false
await fetchData()
} catch (error) {
console.error('重新搜索失败:', error)
} finally {
isRefreshing.value = false
}
}
// 切换到智能推荐结果(自动保存筛选条件)
async function switchToAiResults() {
if (showingAiResults.value) {
@@ -808,8 +826,8 @@ onUnmounted(() => {
</div>
</VFadeTransition>
<!-- 精简标题栏 -->
<VCard v-if="isRefreshed && !progressActive" class="search-header d-flex align-center mb-3">
<!-- 精简标题栏搜索过后保持挂载加载中由按钮 :disabled / :loading 表达状态 -->
<VCard v-if="isRefreshed" class="search-header d-flex align-center mb-3">
<div class="search-info-container">
<div class="search-title text-moviepilot">
<span class="d-none d-sm-inline">{{ t('resource.searchResults') }}</span>
@@ -833,6 +851,22 @@ onUnmounted(() => {
<VSpacer />
<!-- 重新搜索按钮 -->
<VBtn
variant="text"
size="small"
icon
class="me-2 refresh-search-btn"
:loading="isRefreshing"
:disabled="isRefreshing || progressActive"
@click="refreshSearch"
>
<VIcon icon="mdi-refresh" size="20" />
<VTooltip activator="parent" location="top">
{{ t('resource.refreshSearch') }}
</VTooltip>
</VBtn>
<!-- AI操作按钮组 -->
<div v-if="aiRecommendEnabled && originalDataList.length > 0" class="ai-toggle-container me-2">
<div class="ai-toggle-buttons">
@@ -1180,6 +1214,14 @@ onUnmounted(() => {
background-color: rgba(var(--v-theme-primary), 0.05);
}
/* 重新搜索按钮 */
.refresh-search-btn {
block-size: 44px !important;
inline-size: 44px !important;
border-radius: 8px !important;
background-color: rgba(var(--v-theme-surface-variant), 0.1);
}
/* AI按钮组样式 */
.ai-toggle-container {
position: relative;
@@ -1371,6 +1413,11 @@ onUnmounted(() => {
inline-size: 36px;
}
.refresh-search-btn {
block-size: 36px !important;
inline-size: 36px !important;
}
.ai-toggle-buttons {
block-size: 36px;
}

View File

@@ -59,7 +59,7 @@ const aiRedoProgressDialog = ref(false)
const aiRedoProgressActive = ref(false)
const aiRedoProgressText = ref(t('transferHistory.actions.aiRedoPending'))
const aiRedoProgressSSE = ref<any>(null)
const aiRedoProgressHistoryId = ref<number>()
const aiRedoProgressHistoryIds = ref<number[]>([])
// 重新整理IDS
const redoIds = ref<number[]>([])
@@ -374,6 +374,7 @@ async function removeSingle(deleteSrc: boolean, deleteDest: boolean) {
// 批量删除记录
async function removeBatch(deleteSrc: boolean, deleteDest: boolean) {
if (hasRunningAiRedo.value) return
// 关闭弹窗
deleteConfirmDialog.value = false
// 总条数
@@ -409,6 +410,7 @@ async function deleteConfirmHandler(deleteSrc: boolean, deleteDest: boolean) {
// 批量删除历史记录
async function removeHistoryBatch() {
if (hasRunningAiRedo.value) return
if (selected.value.length === 0) return
// 清空当前操作记录
@@ -421,6 +423,7 @@ async function removeHistoryBatch() {
}
// 批量重新整理
async function retransferBatch() {
if (hasRunningAiRedo.value) return
if (selected.value.length === 0) return
// 清空当前操作记录
@@ -462,15 +465,14 @@ function stopAiRedoProgress() {
// AI整理完成
async function finishAiRedo(success: boolean, errorMessage?: string) {
const historyId = aiRedoProgressHistoryId.value
const historyIds = [...aiRedoProgressHistoryIds.value]
const historyIdSet = new Set(historyIds)
stopAiRedoProgress()
aiRedoProgressDialog.value = false
aiRedoProgressHistoryId.value = undefined
if (historyId !== undefined) {
aiRedoIds.value = aiRedoIds.value.filter(id => id !== historyId)
}
aiRedoProgressHistoryIds.value = []
aiRedoIds.value = aiRedoIds.value.filter(id => !historyIdSet.has(id))
selected.value = selected.value.filter(item => !historyIdSet.has(item.id))
await fetchData()
@@ -493,9 +495,14 @@ async function handleAiRedoProgressMessage(event: MessageEvent) {
// 开始监听整理进度
function startAiRedoProgress(historyId: number, progressKey: string) {
startAiRedoProgressBatch([historyId], progressKey)
}
// 开始监听批量整理进度
function startAiRedoProgressBatch(historyIds: number[], progressKey: string) {
stopAiRedoProgress()
aiRedoProgressHistoryId.value = historyId
aiRedoProgressHistoryIds.value = historyIds
aiRedoProgressDialog.value = true
aiRedoProgressActive.value = true
aiRedoProgressText.value = t('transferHistory.actions.aiRedoPending')
@@ -543,6 +550,44 @@ async function triggerAiRedo(item: TransferHistory) {
}
}
// 批量触发AI整理
async function triggerBatchAiRedo() {
if (!aiAgentEnabled.value) {
$toast.error(t('transferHistory.aiRedoDisabled'))
return
}
if (hasRunningAiRedo.value) return
const historyIds = [...new Set(selected.value.map(item => item.id))]
if (historyIds.length === 0) return
aiRedoIds.value = [...new Set([...aiRedoIds.value, ...historyIds])]
let progressStarted = false
try {
const result: { [key: string]: any } = await api.post('history/transfer/ai-redo', {
history_ids: historyIds,
})
const progressKey = result.data?.progress_key
const acceptedIds = (result.data?.history_ids as number[] | undefined) ?? historyIds
if (!result.success || !progressKey) {
$toast.error(result.message || t('transferHistory.aiRedoFailed'))
return
}
startAiRedoProgressBatch(acceptedIds, progressKey)
selected.value = selected.value.filter(item => !acceptedIds.includes(item.id))
progressStarted = true
} catch (error) {
console.error(error)
$toast.error(t('transferHistory.aiRedoFailed'))
} finally {
if (!progressStarted) {
aiRedoIds.value = aiRedoIds.value.filter(id => !historyIds.includes(id))
}
}
}
// 计算下拉菜单
function getDropdownItems(item: TransferHistory) {
return [
@@ -645,7 +690,7 @@ const historyDynamicIcon = computed(() => (selected.value.length > 0 ? 'mdi-chev
const historyDynamicMenuItems = computed(() => {
if (selected.value.length === 0) return undefined
return [
const items: Array<{ titleKey: string; icon: string; action: () => void; color?: string }> = [
{
titleKey: 'dialog.transferQueue.title',
icon: 'mdi-timer-sand-paused',
@@ -653,22 +698,36 @@ const historyDynamicMenuItems = computed(() => {
transferQueueDialog.value = true
},
},
{
titleKey: 'transferHistory.actions.batchRedo',
icon: 'mdi-redo-variant',
action: () => {
retransferBatch()
},
},
{
titleKey: 'transferHistory.actions.batchDelete',
icon: 'mdi-trash-can-outline',
color: 'error',
action: () => {
removeHistoryBatch()
},
},
]
if (!hasRunningAiRedo.value) {
items.push(
{
titleKey: 'transferHistory.actions.batchAiRedo',
icon: 'mdi-robot-outline',
action: () => {
triggerBatchAiRedo()
},
},
{
titleKey: 'transferHistory.actions.batchRedo',
icon: 'mdi-redo-variant',
action: () => {
retransferBatch()
},
},
{
titleKey: 'transferHistory.actions.batchDelete',
icon: 'mdi-trash-can-outline',
color: 'error',
action: () => {
removeHistoryBatch()
},
},
)
}
return items
})
useDynamicButton({
@@ -980,7 +1039,7 @@ onUnmounted(() => {
<Teleport to="body" v-if="!appMode && route.path === '/history'">
<div v-if="isRefreshed" class="compact-fab-stack compact-fab-stack--history">
<VFab
v-if="selected.length > 0"
v-if="selected.length > 0 && !hasRunningAiRedo"
icon="mdi-trash-can-outline"
color="warning"
variant="tonal"
@@ -989,7 +1048,7 @@ onUnmounted(() => {
@click="removeHistoryBatch"
/>
<VFab
v-if="selected.length > 0"
v-if="selected.length > 0 && !hasRunningAiRedo"
icon="mdi-redo-variant"
color="success"
variant="tonal"
@@ -997,6 +1056,15 @@ onUnmounted(() => {
class="compact-fab compact-fab--secondary"
@click="retransferBatch"
/>
<VFab
v-if="selected.length > 0 && !hasRunningAiRedo"
icon="mdi-robot-outline"
color="info"
variant="tonal"
appear
class="compact-fab compact-fab--secondary"
@click="triggerBatchAiRedo"
/>
<VFab
icon="mdi-timer-sand-paused"
color="primary"

View File

@@ -12,6 +12,7 @@ import ProgressDialog from '@/components/dialog/ProgressDialog.vue'
import { useI18n } from 'vue-i18n'
import { downloaderOptions, mediaServerOptions } from '@/api/constants'
import { useDisplay, useTheme } from 'vuetify'
import { useLlmProviderDirectory } from '@/composables/useLlmProviderDirectory'
const display = useDisplay()
const theme = useTheme()
@@ -168,9 +169,6 @@ const progressDialog = ref(false)
// 高级设置对话框
const advancedDialog = ref(false)
// LLM 模型列表
const llmModels = ref<string[]>([])
const loadingModels = ref(false)
const savingBasic = ref(false)
const testingLlm = ref(false)
@@ -186,6 +184,74 @@ type LlmSettingsSnapshot = {
let llmTestRequestId = 0
let llmTestAbortController: AbortController | null = null
const llmProviderRef = computed({
get: () => String(SystemSettings.value.Basic.LLM_PROVIDER ?? ''),
set: value => {
SystemSettings.value.Basic.LLM_PROVIDER = value || ''
},
})
const llmApiKeyRef = computed({
get: () => String(SystemSettings.value.Basic.LLM_API_KEY ?? ''),
set: value => {
SystemSettings.value.Basic.LLM_API_KEY = value || ''
},
})
const llmBaseUrlRef = computed({
get: () => String(SystemSettings.value.Basic.LLM_BASE_URL ?? ''),
set: value => {
SystemSettings.value.Basic.LLM_BASE_URL = value || ''
},
})
const llmModelRef = computed({
get: () => String(SystemSettings.value.Basic.LLM_MODEL ?? ''),
set: value => {
SystemSettings.value.Basic.LLM_MODEL = value || ''
},
})
const llmMaxContextRef = computed({
get: () => Number(SystemSettings.value.Basic.LLM_MAX_CONTEXT_TOKENS ?? 0),
set: value => {
SystemSettings.value.Basic.LLM_MAX_CONTEXT_TOKENS = value || 0
},
})
const {
providerItems: llmProviderItems,
baseUrlPresetItems: llmBaseUrlPresetItems,
models: llmModels,
selectedProvider: selectedLlmProvider,
selectedModel: selectedLlmModel,
loadingProviders: loadingLlmProviders,
loadingModels,
providerConnected,
showBaseUrlField,
showApiKeyField,
canRefreshModels,
authDialogVisible,
authPolling,
authPopupBlocked,
authSession,
handleProviderSelection,
applyModelMetadata,
loadProviders: loadLlmProviders,
loadModels: loadLlmModels,
openAuthPage,
startAuth: startLlmProviderAuth,
pollAuthSession,
disconnectAuth: disconnectLlmProviderAuth,
closeAuthDialog,
} = useLlmProviderDirectory({
provider: llmProviderRef,
apiKey: llmApiKeyRef,
baseUrl: llmBaseUrlRef,
model: llmModelRef,
maxContextTokens: llmMaxContextRef,
})
function buildLlmSnapshot(): LlmSettingsSnapshot {
return {
AI_AGENT_ENABLE: Boolean(SystemSettings.value.Basic.AI_AGENT_ENABLE),
@@ -261,13 +327,22 @@ function invalidateLlmTestState() {
const currentLlmSnapshot = computed(() => buildLlmSnapshot())
const currentLlmSnapshotKey = computed(() => buildLlmSnapshotKey(currentLlmSnapshot.value))
const llmProviderAuthMethods = computed(() => selectedLlmProvider.value?.oauth_methods || [])
const llmProviderAuthLabel = computed(() => selectedLlmProvider.value?.auth_status?.label || '')
const selectedLlmModelInfo = computed(() => {
if (!selectedLlmModel.value?.context_tokens_k) return ''
return t('setting.system.llmModelResolvedHint', {
context: selectedLlmModel.value.context_tokens_k,
source: selectedLlmModel.value.source || 'models.dev',
})
})
const canTestLlm = computed(() => {
const snapshot = currentLlmSnapshot.value
return (
snapshot.AI_AGENT_ENABLE &&
Boolean(snapshot.LLM_PROVIDER.trim()) &&
Boolean(snapshot.LLM_API_KEY.trim()) &&
(Boolean(snapshot.LLM_API_KEY.trim()) || providerConnected.value) &&
Boolean(snapshot.LLM_MODEL.trim()) &&
!savingBasic.value &&
!testingLlm.value
@@ -320,28 +395,42 @@ const logLevelItems = [
// 安全域名添加变量
const newSecurityDomain = ref('')
// 加载LLM模型列表
async function loadLlmModels() {
loadingModels.value = true
// 加载 LLM 模型列表与 provider 目录
async function refreshLlmModels(forceRefresh = true) {
try {
const result: { [key: string]: any } = await api.get('system/llm-models', {
params: {
provider: SystemSettings.value.Basic.LLM_PROVIDER,
api_key: SystemSettings.value.Basic.LLM_API_KEY,
base_url: SystemSettings.value.Basic.LLM_BASE_URL,
},
})
if (result.success) {
llmModels.value = result.data
if (llmModels.value.length > 0) SystemSettings.value.Basic.LLM_MODEL = llmModels.value[0]
} else {
$toast.error(result.message)
}
await loadLlmModels(forceRefresh)
} catch (error) {
$toast.error(error instanceof Error ? error.message : String(error))
console.log(error)
}
loadingModels.value = false
}
async function handleLlmProviderChanged() {
handleProviderSelection(true)
if (canRefreshModels.value) {
await refreshLlmModels(false)
}
}
function handleLlmModelChanged() {
applyModelMetadata()
}
async function startProviderAuth(methodId: string) {
try {
await startLlmProviderAuth(methodId)
} catch (error) {
$toast.error(error instanceof Error ? error.message : String(error))
}
}
async function disconnectProviderAuth() {
try {
await disconnectLlmProviderAuth()
$toast.success(t('setting.system.llmProviderDisconnected'))
} catch (error) {
$toast.error(error instanceof Error ? error.message : String(error))
}
}
// 添加安全域名
@@ -436,6 +525,7 @@ async function loadSystemSettings() {
})
}
SystemSettings.value.Basic.LLM_THINKING_LEVEL = resolveThinkingLevelValue(result.data)
await loadLlmProviders()
}
} catch (error) {
console.log(error)
@@ -483,7 +573,7 @@ async function testLlmConnection() {
testingLlm.value = true
try {
const result: { [key: string]: any } = await api.post('system/llm-test', payload, {
const result: { [key: string]: any } = await api.post('llm/test', payload, {
signal: abortController.signal,
})
if (
@@ -911,49 +1001,104 @@ watch(currentLlmSnapshotKey, (snapshotKey, previousSnapshotKey) => {
/>
</VCol>
<VCol v-if="SystemSettings.Basic.AI_AGENT_ENABLE" cols="12" md="6">
<VSelect
<VAutocomplete
v-model="SystemSettings.Basic.LLM_PROVIDER"
:label="t('setting.system.llmProvider')"
:hint="t('setting.system.llmProviderHint')"
persistent-hint
:items="[
{ title: 'OpenAI', value: 'openai' },
{ title: 'Google', value: 'google' },
{ title: 'DeepSeek', value: 'deepseek' },
]"
:items="llmProviderItems"
:loading="loadingLlmProviders"
prepend-inner-icon="mdi-robot"
@update:model-value="handleLlmProviderChanged"
/>
</VCol>
<VCol v-if="SystemSettings.Basic.AI_AGENT_ENABLE" cols="12" md="6">
<VTextField
v-model="SystemSettings.Basic.LLM_BASE_URL"
<VCol v-if="SystemSettings.Basic.AI_AGENT_ENABLE && showBaseUrlField" cols="12" md="6">
<VCombobox
:model-value="SystemSettings.Basic.LLM_BASE_URL"
@update:model-value="(value: any) => {
SystemSettings.Basic.LLM_BASE_URL = typeof value === 'object' && value !== null ? value.value : (value || '');
}"
:label="t('setting.system.llmBaseUrl')"
:hint="t('setting.system.llmBaseUrlHint')"
placeholder="https://api.deepseek.com"
:placeholder="selectedLlmProvider?.default_base_url || 'https://api.deepseek.com'"
:items="llmBaseUrlPresetItems"
item-title="title"
item-value="value"
persistent-hint
prepend-inner-icon="mdi-link"
/>
>
<template #item="{ props, item }">
<VListItem v-bind="props" :subtitle="item.raw.subtitle" />
</template>
</VCombobox>
</VCol>
<VCol v-if="SystemSettings.Basic.AI_AGENT_ENABLE" cols="12" md="6">
<VCol v-if="SystemSettings.Basic.AI_AGENT_ENABLE && showApiKeyField" cols="12" md="6">
<VTextField
v-model="SystemSettings.Basic.LLM_API_KEY"
:label="t('setting.system.llmApiKey')"
:hint="t('setting.system.llmApiKeyHint')"
:label="selectedLlmProvider?.api_key_label || t('setting.system.llmApiKey')"
:hint="selectedLlmProvider?.api_key_hint || t('setting.system.llmApiKeyHint')"
:placeholder="t('setting.system.llmApiKeyPlaceholder')"
persistent-hint
type="password"
prepend-inner-icon="mdi-key-variant"
/>
</VCol>
<VCol
v-if="SystemSettings.Basic.AI_AGENT_ENABLE && llmProviderAuthMethods.length > 0"
cols="12"
>
<VAlert type="info" variant="tonal">
<div class="d-flex flex-column flex-md-row justify-space-between ga-3">
<div>
<div class="text-subtitle-2">{{ t('setting.system.llmProviderAuth') }}</div>
<div class="text-body-2">
{{ selectedLlmProvider?.description || t('setting.system.llmProviderAuthHint') }}
</div>
<div v-if="providerConnected" class="text-body-2 mt-2">
{{ t('setting.system.llmProviderConnectedAs', { label: llmProviderAuthLabel || selectedLlmProvider?.name }) }}
</div>
</div>
<div class="d-flex flex-wrap ga-2">
<VBtn
v-for="method in llmProviderAuthMethods"
:key="method.id"
color="primary"
variant="tonal"
prepend-icon="mdi-account-arrow-right-outline"
@click="startProviderAuth(method.id)"
>
{{ method.label }}
</VBtn>
<VBtn
v-if="providerConnected"
color="error"
variant="text"
prepend-icon="mdi-link-off"
@click="disconnectProviderAuth"
>
{{ t('setting.system.llmProviderDisconnect') }}
</VBtn>
</div>
</div>
</VAlert>
</VCol>
<VCol v-if="SystemSettings.Basic.AI_AGENT_ENABLE" cols="12" md="6">
<div>
<VCombobox
v-model="SystemSettings.Basic.LLM_MODEL"
:model-value="SystemSettings.Basic.LLM_MODEL"
@update:model-value="(val: any) => {
SystemSettings.Basic.LLM_MODEL = typeof val === 'object' && val !== null ? val.id : val;
handleLlmModelChanged();
}"
:label="t('setting.system.llmModel')"
:hint="t('setting.system.llmModelHint')"
:placeholder="t('setting.system.llmModelHint')"
persistent-hint
:items="llmModels"
item-title="name"
item-value="id"
:loading="loadingModels"
prepend-inner-icon="mdi-brain"
>
@@ -962,12 +1107,16 @@ watch(currentLlmSnapshotKey, (snapshotKey, previousSnapshotKey) => {
variant="text"
icon="mdi-refresh"
size="small"
@click="loadLlmModels"
:disabled="!SystemSettings.Basic.LLM_API_KEY"
@click="refreshLlmModels(true)"
:disabled="!canRefreshModels"
/>
</template>
</VCombobox>
<VAlert v-if="selectedLlmModelInfo" type="info" variant="tonal" density="compact" class="mt-2">
{{ selectedLlmModelInfo }}
</VAlert>
<div class="d-flex justify-end mt-2">
<VBtn
color="info"
@@ -1846,6 +1995,50 @@ watch(currentLlmSnapshotKey, (snapshotKey, previousSnapshotKey) => {
</VCardActions>
</VCard>
</VDialog>
<VDialog v-model="authDialogVisible" max-width="560">
<VCard>
<VCardTitle>{{ t('setting.system.llmProviderAuthDialogTitle') }}</VCardTitle>
<VCardText class="d-flex flex-column ga-4">
<VAlert v-if="authSession?.instructions" type="info" variant="tonal">
{{ authSession.instructions }}
</VAlert>
<VAlert v-if="authPopupBlocked" type="warning" variant="tonal">
{{ t('setting.system.llmProviderPopupBlocked') }}
</VAlert>
<div v-if="authSession?.user_code">
<div class="text-caption text-medium-emphasis mb-1">{{ t('setting.system.llmProviderDeviceCode') }}</div>
<div class="text-h5 font-weight-bold">{{ authSession.user_code }}</div>
</div>
<div v-if="authSession?.message" class="text-body-2">
{{ authSession.message }}
</div>
<div class="d-flex flex-wrap ga-2">
<VBtn color="primary" prepend-icon="mdi-open-in-new" @click="openAuthPage">
{{ t('setting.system.llmProviderOpenAuthPage') }}
</VBtn>
<VBtn
variant="tonal"
prepend-icon="mdi-refresh"
:loading="authPolling"
@click="pollAuthSession"
>
{{ t('setting.system.llmProviderCheckAuthStatus') }}
</VBtn>
</div>
</VCardText>
<VCardActions>
<VSpacer />
<VBtn variant="text" @click="closeAuthDialog">
{{ t('common.close') }}
</VBtn>
</VCardActions>
</VCard>
</VDialog>
</template>
<style scoped>

View File

@@ -1,20 +1,89 @@
<script lang="ts" setup>
import { computed, onMounted, ref } from 'vue'
import { computed, onMounted } from 'vue'
import { useToast } from 'vue-toastification'
import { useI18n } from 'vue-i18n'
import api from '@/api'
import { useSetupWizard } from '@/composables/useSetupWizard'
import { useLlmProviderDirectory } from '@/composables/useLlmProviderDirectory'
const { t } = useI18n()
const $toast = useToast()
const { wizardData, validationErrors } = useSetupWizard()
const llmModels = ref<string[]>([])
const loadingModels = ref(false)
const providerRef = computed({
get: () => wizardData.value.agent.provider,
set: value => {
wizardData.value.agent.provider = value || ''
},
})
const providerItems = [
{ title: 'OpenAI', value: 'openai' },
{ title: 'Google', value: 'google' },
{ title: 'DeepSeek', value: 'deepseek' },
]
const apiKeyRef = computed({
get: () => wizardData.value.agent.apiKey,
set: value => {
wizardData.value.agent.apiKey = value || ''
},
})
const baseUrlRef = computed({
get: () => wizardData.value.agent.baseUrl,
set: value => {
wizardData.value.agent.baseUrl = value || ''
},
})
const modelRef = computed({
get: () => wizardData.value.agent.model,
set: value => {
wizardData.value.agent.model = value || ''
},
})
const maxContextTokensRef = computed({
get: () => wizardData.value.agent.maxContextTokens,
set: value => {
wizardData.value.agent.maxContextTokens = value || 0
},
})
const authConnectedRef = computed({
get: () => wizardData.value.agent.authConnected,
set: value => {
wizardData.value.agent.authConnected = Boolean(value)
},
})
const {
providerItems,
baseUrlPresetItems,
models: llmModels,
selectedProvider,
selectedModel,
loadingProviders,
loadingModels,
providerConnected,
showBaseUrlField,
showApiKeyField,
canRefreshModels,
authDialogVisible,
authPolling,
authPopupBlocked,
authSession,
handleProviderSelection,
applyModelMetadata,
loadProviders,
loadModels,
openAuthPage,
startAuth,
pollAuthSession,
disconnectAuth,
closeAuthDialog,
} = useLlmProviderDirectory({
provider: providerRef,
apiKey: apiKeyRef,
baseUrl: baseUrlRef,
model: modelRef,
maxContextTokens: maxContextTokensRef,
authConnected: authConnectedRef,
})
const jobIntervalItems = computed(() => [
{ title: t('setting.system.aiAgentJobIntervalDisabled'), value: 0 },
@@ -38,37 +107,61 @@ const thinkingLevelItems = computed(() => [
{ title: t('setting.system.llmThinkingLevelXhigh'), value: 'xhigh' },
])
async function loadLlmModels() {
if (!wizardData.value.agent.provider || !wizardData.value.agent.apiKey) {
return
}
const providerAuthMethods = computed(() => selectedProvider.value?.oauth_methods || [])
const providerAuthLabel = computed(() => selectedProvider.value?.auth_status?.label || '')
const selectedModelInfo = computed(() => {
if (!selectedModel.value?.context_tokens_k) return ''
return t('setting.system.llmModelResolvedHint', {
context: selectedModel.value.context_tokens_k,
source: selectedModel.value.source || 'models.dev',
})
})
loadingModels.value = true
async function refreshModels(forceRefresh = true) {
try {
const result: { [key: string]: any } = await api.get('system/llm-models', {
params: {
provider: wizardData.value.agent.provider,
api_key: wizardData.value.agent.apiKey,
base_url: wizardData.value.agent.baseUrl,
},
})
if (result.success) {
llmModels.value = result.data || []
if (!wizardData.value.agent.model && llmModels.value.length > 0) {
wizardData.value.agent.model = llmModels.value[0]
}
}
await loadModels(forceRefresh)
} catch (error) {
$toast.error(error instanceof Error ? error.message : String(error))
console.log('Load LLM models failed:', error)
} finally {
loadingModels.value = false
}
}
onMounted(() => {
if (wizardData.value.agent.enabled && wizardData.value.agent.apiKey) {
loadLlmModels()
async function handleProviderChanged() {
handleProviderSelection(true)
if (canRefreshModels.value) {
await refreshModels(false)
}
}
function handleModelChanged() {
applyModelMetadata()
}
async function startProviderAuth(methodId: string) {
try {
await startAuth(methodId)
} catch (error) {
$toast.error(error instanceof Error ? error.message : String(error))
}
}
async function disconnectProviderAuth() {
try {
await disconnectAuth()
$toast.success(t('setting.system.llmProviderDisconnected'))
} catch (error) {
$toast.error(error instanceof Error ? error.message : String(error))
}
}
onMounted(async () => {
try {
await loadProviders()
if (wizardData.value.agent.enabled && canRefreshModels.value) {
await refreshModels(false)
}
} catch (error) {
console.log('Load LLM providers failed:', error)
}
})
</script>
@@ -121,49 +214,108 @@ onMounted(() => {
</VCol>
<VCol cols="12" md="6">
<VSelect
<VAutocomplete
v-model="wizardData.agent.provider"
:label="t('setting.system.llmProvider')"
:hint="t('setting.system.llmProviderHint')"
:items="providerItems"
:loading="loadingProviders"
:error="validationErrors.agent.provider"
:error-messages="validationErrors.agent.provider ? [t('setupWizard.agent.providerRequired')] : []"
persistent-hint
prepend-inner-icon="mdi-robot-outline"
@update:model-value="handleProviderChanged"
/>
</VCol>
<VCol cols="12" md="6">
<VTextField
v-model="wizardData.agent.baseUrl"
<VCol v-if="showBaseUrlField" cols="12" md="6">
<VCombobox
:model-value="wizardData.agent.baseUrl"
@update:model-value="(value: any) => {
wizardData.agent.baseUrl = typeof value === 'object' && value !== null ? value.value : (value || '');
}"
:label="t('setting.system.llmBaseUrl')"
:hint="t('setting.system.llmBaseUrlHint')"
placeholder="https://api.deepseek.com"
:placeholder="selectedProvider?.default_base_url || 'https://api.deepseek.com'"
:items="baseUrlPresetItems"
item-title="title"
item-value="value"
persistent-hint
prepend-inner-icon="mdi-link-variant"
/>
>
<template #item="{ props, item }">
<VListItem v-bind="props" :subtitle="item.raw.subtitle" />
</template>
</VCombobox>
</VCol>
<VCol cols="12" md="6">
<VCol v-if="showApiKeyField" cols="12" md="6">
<VTextField
v-model="wizardData.agent.apiKey"
:label="t('setting.system.llmApiKey')"
:hint="t('setting.system.llmApiKeyHint')"
:label="selectedProvider?.api_key_label || t('setting.system.llmApiKey')"
:hint="selectedProvider?.api_key_hint || t('setting.system.llmApiKeyHint')"
:placeholder="t('setting.system.llmApiKeyPlaceholder')"
:error="validationErrors.agent.apiKey"
:error-messages="validationErrors.agent.apiKey ? [t('setupWizard.agent.apiKeyRequired')] : []"
:error-messages="
validationErrors.agent.apiKey ? [t('setupWizard.agent.authOrApiKeyRequired')] : []
"
persistent-hint
prepend-inner-icon="mdi-key-variant"
type="password"
/>
</VCol>
<VCol v-if="providerAuthMethods.length > 0" cols="12">
<VAlert type="info" variant="tonal">
<div class="d-flex flex-column ga-3">
<div>
<div class="text-subtitle-2">{{ t('setting.system.llmProviderAuth') }}</div>
<div class="text-body-2">
{{ selectedProvider?.description || t('setting.system.llmProviderAuthHint') }}
</div>
<div v-if="providerConnected" class="text-body-2 mt-2">
{{ t('setting.system.llmProviderConnectedAs', { label: providerAuthLabel || selectedProvider?.name }) }}
</div>
</div>
<div class="d-flex flex-wrap ga-2">
<VBtn
v-for="method in providerAuthMethods"
:key="method.id"
color="primary"
variant="tonal"
prepend-icon="mdi-account-arrow-right-outline"
@click="startProviderAuth(method.id)"
>
{{ method.label }}
</VBtn>
<VBtn
v-if="providerConnected"
color="error"
variant="text"
prepend-icon="mdi-link-off"
@click="disconnectProviderAuth"
>
{{ t('setting.system.llmProviderDisconnect') }}
</VBtn>
</div>
</div>
</VAlert>
</VCol>
<VCol cols="12" md="6">
<VCombobox
v-model="wizardData.agent.model"
:model-value="wizardData.agent.model"
@update:model-value="(val: any) => {
wizardData.agent.model = typeof val === 'object' && val !== null ? val.id : val;
handleModelChanged();
}"
:label="t('setting.system.llmModel')"
:hint="t('setting.system.llmModelHint')"
:items="llmModels"
item-title="name"
item-value="id"
:loading="loadingModels"
:error="validationErrors.agent.model"
:error-messages="validationErrors.agent.model ? [t('setupWizard.agent.modelRequired')] : []"
@@ -175,11 +327,15 @@ onMounted(() => {
variant="text"
icon="mdi-refresh"
size="small"
:disabled="!wizardData.agent.provider || !wizardData.agent.apiKey"
@click="loadLlmModels"
:disabled="!canRefreshModels"
@click="refreshModels(true)"
/>
</template>
</VCombobox>
<VAlert v-if="selectedModelInfo" type="info" variant="tonal" density="compact" class="mt-2">
{{ selectedModelInfo }}
</VAlert>
</VCol>
<VCol cols="12" md="6">
@@ -364,4 +520,48 @@ onMounted(() => {
</VRow>
</VCardText>
</VCard>
<VDialog v-model="authDialogVisible" max-width="560">
<VCard>
<VCardTitle>{{ t('setting.system.llmProviderAuthDialogTitle') }}</VCardTitle>
<VCardText class="d-flex flex-column ga-4">
<VAlert v-if="authSession?.instructions" type="info" variant="tonal">
{{ authSession.instructions }}
</VAlert>
<VAlert v-if="authPopupBlocked" type="warning" variant="tonal">
{{ t('setting.system.llmProviderPopupBlocked') }}
</VAlert>
<div v-if="authSession?.user_code">
<div class="text-caption text-medium-emphasis mb-1">{{ t('setting.system.llmProviderDeviceCode') }}</div>
<div class="text-h5 font-weight-bold">{{ authSession.user_code }}</div>
</div>
<div v-if="authSession?.message" class="text-body-2">
{{ authSession.message }}
</div>
<div class="d-flex flex-wrap ga-2">
<VBtn color="primary" prepend-icon="mdi-open-in-new" @click="openAuthPage">
{{ t('setting.system.llmProviderOpenAuthPage') }}
</VBtn>
<VBtn
variant="tonal"
prepend-icon="mdi-refresh"
:loading="authPolling"
@click="pollAuthSession"
>
{{ t('setting.system.llmProviderCheckAuthStatus') }}
</VBtn>
</div>
</VCardText>
<VCardActions>
<VSpacer />
<VBtn variant="text" @click="closeAuthDialog">
{{ t('common.close') }}
</VBtn>
</VCardActions>
</VCard>
</VDialog>
</template>

View File

@@ -104,6 +104,17 @@ const { wizardData, selectDownloader, validationErrors } = useSetupWizard()
required
/>
</VCol>
<VCol cols="12" md="6">
<VTextField
v-model="wizardData.downloader.config.apikey"
type="password"
:label="t('downloader.apiKey')"
:hint="t('downloader.qbittorrentApiKeyHint')"
persistent-hint
active
prepend-inner-icon="mdi-key-variant"
/>
</VCol>
<VCol cols="12" md="6">
<VTextField
v-model="wizardData.downloader.config.username"
@@ -111,10 +122,11 @@ const { wizardData, selectDownloader, validationErrors } = useSetupWizard()
:hint="t('downloader.username')"
:error="validationErrors.downloader.username"
:error-messages="validationErrors.downloader.username ? [t('downloader.usernameRequired')] : []"
:disabled="!!wizardData.downloader.config.apikey"
persistent-hint
active
prepend-inner-icon="mdi-account"
required
:required="!wizardData.downloader.config.apikey"
/>
</VCol>
<VCol cols="12" md="6">
@@ -125,10 +137,11 @@ const { wizardData, selectDownloader, validationErrors } = useSetupWizard()
:hint="t('downloader.password')"
:error="validationErrors.downloader.password"
:error-messages="validationErrors.downloader.password ? [t('downloader.passwordRequired')] : []"
:disabled="!!wizardData.downloader.config.apikey"
persistent-hint
active
prepend-inner-icon="mdi-lock"
required
:required="!wizardData.downloader.config.apikey"
/>
</VCol>
<VCol cols="12" md="6">

File diff suppressed because it is too large Load Diff