Compare commits

...

121 Commits

Author SHA1 Message Date
jxxghp
7fc496cf5b 更新 __init__.py 2026-04-29 07:31:52 +08:00
jxxghp
8789f35228 Improve non-verbose agent tool summaries 2026-04-29 07:07:33 +08:00
jxxghp
d4dec90e2f 更新 version.py 2026-04-28 20:49:05 +08:00
jxxghp
5c1487a9a6 Optimize agent tool async blocking paths 2026-04-28 20:36:49 +08:00
jxxghp
c5b716c231 feat: introduce unified agent runtime config and system task prompt framework
- Add structured runtime config files (AGENT_PROFILE.md, AGENT_WORKFLOW.md, AGENT_HOOKS.md, USER_PREFERENCES.md, SYSTEM_TASKS.md, CURRENT_PERSONA.md) for persona, workflow, hooks, and system tasks
- Implement agent_runtime_manager to load, validate, and render runtime config and system task prompts
- Refactor agent initialization to use runtime-managed directories for skills, jobs, memory, and activity logs
- Add AgentHooksMiddleware for structured pre/in/post hooks injection
- Replace hardcoded system task prompts with template-driven rendering from SYSTEM_TASKS.md
- Update tests to cover runtime config loading, migration, and system task prompt rendering
- Update .gitignore to exclude config/agent/
2026-04-28 13:04:28 +08:00
jxxghp
483fe55372 fix: correct Plex notification image lookup
Closes #5700
2026-04-28 09:19:18 +08:00
jxxghp
5d588ee127 fix: correct traditional Chinese subtitle rename detection
Fixes #5703
2026-04-28 09:00:14 +08:00
jxxghp
afcd895f52 fix: backfill transfer download history matching
Fixes #5702
2026-04-28 08:55:40 +08:00
jxxghp
1ded58adbb fix: adapt audiences user data parser 2026-04-27 12:56:45 +08:00
jxxghp
019a077407 Apply suggestions from code review
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-04-27 11:35:44 +08:00
PKC278
0f190057d3 fix #5528 2026-04-27 11:35:44 +08:00
jxxghp
840c8f7298 更新 APP_VERSION 至 v2.10.6 2026-04-27 11:32:39 +08:00
jxxghp
6a6bcf59a0 增强 execute_command 工具:支持输出截断、并发限制与进程组清理,新增单元测试 2026-04-27 10:05:25 +08:00
jxxghp
323844b26d revert execute_command streaming changes
Restore the previous subprocess handling for execute_command and drop the new command streaming test so agent startup is unblocked.
2026-04-27 08:12:37 +08:00
jxxghp
140d224a9a fix agent stream blocking during command execution
Offload synchronous message edits from the event loop and stream subprocess output so long-running commands stay responsive.
2026-04-27 07:57:32 +08:00
jxxghp
7bc032d17c Revert Telegram duplicate edit fix 2026-04-27 07:36:13 +08:00
jxxghp
2df476dbff Fix Telegram duplicate message edits 2026-04-27 07:17:58 +08:00
jxxghp
bae086d8b8 更新 __init__.py 2026-04-27 06:57:18 +08:00
jxxghp
221eb21694 refine internal middleware llm usage for streaming agents
Use a non-streaming model for middleware-only calls so internal outputs do not leak into user streams and model-based middleware stays consistent.
2026-04-27 06:55:41 +08:00
jxxghp
4208c79d72 refine tool提示语为更简洁风格,补充last_buffer_char属性及非VERBOSE模式流式输出换行逻辑,新增工具流式分隔符单元测试 2026-04-26 11:15:11 +08:00
jxxghp
90245a13e1 refine non-verbose prompt wording 2026-04-26 08:54:07 +08:00
jxxghp
b5979b9b09 refine agent subscription defaults and silent tool prompts 2026-04-26 08:51:56 +08:00
jxxghp
0277288a41 feat: add agent session usage status reporting
Track per-session model and token usage so users can inspect context pressure and cumulative usage with /session_status.
2026-04-26 08:19:05 +08:00
jxxghp
79bfeaf2af 移除工具调用前的流重置,保留模型思考文本可见 2026-04-25 23:12:34 +08:00
jxxghp
4fe41ba5e9 更新 base.py 2026-04-25 22:16:15 +08:00
jxxghp
14d6e2febc Refine agent prompts for concise professional replies 2026-04-25 22:04:35 +08:00
jxxghp
97c7e71207 更新 Agent Prompt.txt 2026-04-25 21:51:47 +08:00
jxxghp
8f29a218ea chore: bump version to v2.10.5 2026-04-25 12:55:33 +08:00
jxxghp
4fd5aa3eb6 fix: improve DeepSeek reasoning_content payload handling and update langchain dependencies 2026-04-25 12:46:21 +08:00
jxxghp
bfc27d151c 更新 ask_user_choice.py 2026-04-25 11:36:36 +08:00
jxxghp
f2b56b8f40 更新 ask_user_choice.py 2026-04-25 11:35:32 +08:00
jxxghp
a05ffc07d4 refactor: remove legacy LLM_DISABLE_THINKING and LLM_REASONING_EFFORT config, unify thinking_level handling
- Eliminate support for LLM_DISABLE_THINKING and LLM_REASONING_EFFORT in config, code, and tests
- Simplify LLM thinking level logic to rely solely on LLM_THINKING_LEVEL
- Refactor LLMHelper and related endpoints to remove legacy parameter handling
- Update system API and test utilities to match new configuration structure
- Minor code cleanup and formatting improvements
2026-04-25 10:42:03 +08:00
jxxghp
4a81417fb7 fix: preserve deepseek reasoning content in tool loops 2026-04-25 09:37:01 +08:00
jxxghp
c7fa3dc863 feat: unify llm thinking level controls 2026-04-24 19:50:23 +08:00
jxxghp
28f9756dd6 feat: improve skill instructions with highlighted command formatting 2026-04-22 18:12:21 +08:00
jxxghp
4bffe2cff1 chore: bump version to v2.10.4 2026-04-22 18:02:28 +08:00
jxxghp
fca478f1d8 feat: support custom skill sources in /skills 2026-04-22 18:00:57 +08:00
Sebastian
097dff13a3 feat: add ai-compatible API endpoints 2026-04-22 17:21:43 +08:00
jxxghp
460b386004 feat: add searchable skills marketplace 2026-04-22 16:49:42 +08:00
jxxghp
89bf89c02d feat: add clawhub skill registry source 2026-04-22 16:22:10 +08:00
jxxghp
cefb60ba2c refactor: unify message interactions 2026-04-22 15:18:04 +08:00
jxxghp
8c78627647 feat: add skills marketplace management 2026-04-22 14:55:00 +08:00
jxxghp
51189210c2 更新 config.py 2026-04-22 10:39:25 +08:00
jxxghp
38933d5882 feat(agent): support disabling model thinking 2026-04-22 10:36:36 +08:00
jxxghp
4619fc4042 更新 version.py 2026-04-21 22:25:57 +08:00
jxxghp
ee7ba28235 Allow LLM test to use request payload 2026-04-21 22:14:19 +08:00
笨笨
409abb66be test: remove absolute path from llm helper test 2026-04-21 20:39:32 +08:00
笨笨
8aa8b1897b feat: add llm test endpoint 2026-04-21 20:39:32 +08:00
jxxghp
8c256d91bd refine custom identifier skill scope 2026-04-21 17:31:37 +08:00
jxxghp
d1d3fc7f30 更新 media.py 2026-04-21 14:38:16 +08:00
jxxghp
ae15eac0f8 feat: normalize internal system user ID in notification dispatch
- Add SYSTEM_INTERNAL_USER_ID constant and helpers to app.utils.identity
- Ensure internal user ID is normalized to None before dispatching notifications, preventing misrouting to external channels
- Refactor MessageChain to use normalization for all message dispatch methods
- Add tests for internal user ID normalization and notification dispatch behavior
2026-04-21 14:32:14 +08:00
jxxghp
1282ad5004 feat: improve local CLI startup management 2026-04-21 11:26:56 +08:00
笨笨
6f6fcc79f2 fix: serialize rclone folder creation during concurrent transfers 2026-04-20 21:34:35 +08:00
jxxghp
e5c64e73b5 docs: add English README 2026-04-20 19:46:34 +08:00
jxxghp
93a19b467b Add uninstall workflow to local CLI 2026-04-20 13:38:06 +08:00
jxxghp
4ba8d42272 fix #5688 2026-04-19 17:29:07 +08:00
jxxghp
32e247b4d5 更新 version.py 2026-04-19 15:44:22 +08:00
InfinityPacer
1d0d09c909 fix(plugin): merge local repo sources during sync 2026-04-19 07:07:00 +08:00
InfinityPacer
b7ee6ca8c4 fix(plugin): sanitize local repo path telemetry 2026-04-19 07:07:00 +08:00
InfinityPacer
4a4d93e7f9 refactor(plugin): expose plugin list processing helper 2026-04-19 07:07:00 +08:00
InfinityPacer
7b096c0a09 feat(plugin): encode local repo path in source url 2026-04-19 07:07:00 +08:00
InfinityPacer
3a93efb082 refactor(plugin): centralize local install dispatch 2026-04-19 07:07:00 +08:00
InfinityPacer
73cdd297b1 refactor(plugin): align local repo naming 2026-04-19 07:07:00 +08:00
InfinityPacer
83187ea17d refactor(plugin): rename local repo paths setting 2026-04-19 07:07:00 +08:00
InfinityPacer
6d8eed30ce fix(plugin): reload monitor on local path changes 2026-04-19 07:07:00 +08:00
InfinityPacer
6fa48afa34 feat(plugin): support local plugin sources 2026-04-19 07:07:00 +08:00
jxxghp
115fb40772 Allow known nettest redirects 2026-04-18 18:27:03 +08:00
jxxghp
10b0dbb5d3 Add nettest documentation comments 2026-04-18 17:52:01 +08:00
jxxghp
4c32ad902b Harden system nettest SSRF handling 2026-04-18 17:43:38 +08:00
jxxghp
787db8f5ac fix: 修复子进程环境下获取事件循环失败的问题 2026-04-17 13:02:28 +08:00
jxxghp
df1b2067b6 fix: 修正 docker 和 update.sh 中 python_version 的格式以匹配 sites.cpython-* 命名规则 2026-04-17 11:05:26 +08:00
jxxghp
f3d9f25d02 优化资源包下载逻辑,只下载对应操作系统和Python版本的sites文件 2026-04-17 08:37:50 +08:00
jxxghp
eea7e3b55f feat(cli): optimize installation command and support initializing user password 2026-04-16 23:43:20 +08:00
jxxghp
810cb0a203 relax local install python requirement to 3.11 2026-04-16 23:13:45 +08:00
jxxghp
e0e21e39a2 refactor: generalize agent interaction requests 2026-04-16 22:51:51 +08:00
jxxghp
cc31c66b93 feat: add agent button choice workflow 2026-04-16 22:32:59 +08:00
jxxghp
011535fbc3 feat: add retry actions for failed transfers 2026-04-16 22:07:21 +08:00
jxxghp
77b95d11fb bump version to v2.10.1 2026-04-16 19:55:35 +08:00
jxxghp
89f6164eba automate local bootstrap prerequisites 2026-04-16 19:47:56 +08:00
jxxghp
70350aa39f fix local update dirty check 2026-04-16 19:36:55 +08:00
jxxghp
61a0a66c47 support local restart and site auth wizard 2026-04-16 19:21:00 +08:00
jxxghp
6fcc5c84a6 bump version to v2.10.0 2026-04-16 17:14:30 +08:00
jxxghp
5995b3f3e8 extend setup wizard for database and agent 2026-04-16 17:10:25 +08:00
jxxghp
60996be71b fix local db initialization model registration 2026-04-16 17:05:57 +08:00
jxxghp
49b50e5975 run setup config step inside venv 2026-04-16 17:00:49 +08:00
jxxghp
262bd6808b update reused bootstrap repo before setup 2026-04-16 16:51:44 +08:00
jxxghp
e9c8db9950 fix bootstrap script for macos bash 2026-04-16 16:43:21 +08:00
jxxghp
02a98f832f fix local cli install and config workflow 2026-04-16 14:55:31 +08:00
jxxghp
9a2a241a30 add full-stack local cli install flow 2026-04-16 09:52:15 +08:00
jxxghp
04c2a1eb18 Add manual AI redo flow 2026-04-15 17:10:18 +08:00
jxxghp
65a4b7438c 更新 config.py 2026-04-15 09:02:05 +08:00
jxxghp
13c3c082b8 Improve agent image capability routing 2026-04-15 08:55:32 +08:00
jxxghp
bf127d6a70 更新 version.py 2026-04-14 18:10:22 +08:00
jxxghp
117672384c 更新 llm.py 2026-04-14 16:00:44 +08:00
jxxghp
2ae2ea8ef7 feat: expose AI agent flag in user global settings 2026-04-14 15:50:46 +08:00
jxxghp
7a5e513f25 feat(agent): support file attachments and local file replies 2026-04-14 15:22:01 +08:00
InfinityPacer
81828948dd fix(transfer): tighten queue cleanup edge cases 2026-04-14 14:45:18 +08:00
InfinityPacer
eda73e14f7 refactor(transfer): make queue job migration explicit 2026-04-14 14:45:18 +08:00
InfinityPacer
6aec326d05 fix(transfer): fail stale queue tasks on errors 2026-04-14 14:45:18 +08:00
InfinityPacer
d36dd69ec3 fix(transfer): clean migrated queue jobs 2026-04-14 14:45:18 +08:00
ilvsx
1688063450 fix(subtitle): create missing download root before saving subtitles 2026-04-14 12:24:18 +08:00
InfinityPacer
ae5207f0e4 fix(plugin): handle 404 plugin index and None response safely 2026-04-13 18:34:44 +08:00
jxxghp
f1f4743936 fix #5661 插件package文件不存在时不报错 2026-04-13 09:06:45 +08:00
jxxghp
e09f9ad009 feat(agent): add audio message extraction and download support for Slack, QQ, Discord, SynologyChat, and VoceChat 2026-04-13 08:36:57 +08:00
InfinityPacer
8d938c2273 fix(system): expose backend dev flag only in dev mode 2026-04-13 06:54:33 +08:00
jxxghp
e5f97cd299 feat(agent): add voice message support with TTS/STT for Telegram and WeChat
- Integrate voice message handling: detect and extract audio references from Telegram and WeChat messages, route to agent with voice reply preference.
- Add voice provider abstraction and OpenAI-based TTS/STT implementation.
- Implement agent tool `send_voice_message` for generating and sending voice replies, with fallback to text if voice is unavailable.
- Extend agent prompt and context to support voice reply instructions.
- Update notification and message schemas to support audio fields.
- Add Telegram and WeChat voice sending logic, including audio file conversion and temporary media upload for WeChat.
- Add tests for voice helper and agent voice routing.
2026-04-12 12:30:02 +08:00
jxxghp
9dababbcfd 更新 version.py 2026-04-12 10:27:01 +08:00
jxxghp
9d8bd5044b 更新 version.py 2026-04-12 08:46:09 +08:00
InfinityPacer
5d07381111 chore(subscribe): update last_update when refreshing episode totals 2026-04-11 22:58:24 +08:00
InfinityPacer
61c695b77d fix(subscribe): reset tv episode counts in history response 2026-04-11 22:58:24 +08:00
InfinityPacer
1ceb8891b0 fix(subscribe): refresh total episodes before completion check 2026-04-11 22:58:24 +08:00
jxxghp
2f53fd3108 Expand image and edit support across messaging channels 2026-04-11 22:10:54 +08:00
jxxghp
bf2d2cbd03 Fix Telegram agent image download path 2026-04-11 21:11:03 +08:00
jxxghp
cb323653b8 Add tracing logs for agent image message flow 2026-04-11 20:58:20 +08:00
jxxghp
edf3946558 Fix forwarded image payload parsing for agent channels 2026-04-11 20:55:14 +08:00
jxxghp
6c5fae56d9 Add agent image support for Telegram and Slack 2026-04-11 20:40:02 +08:00
DDSRem
a4f2c574b0 fix(telegram): pass disable_web_page_preview through edit_message_text
Interactive plugin flows edit existing messages; the flag was only applied
on send_message, so link previews stayed enabled after edits.

Co-authored-by: Cursor Agent <cursoragent@cursor.com>
2026-04-11 08:31:15 +08:00
InfinityPacer
815d83bfb3 fix(http): close helper responses consistently 2026-04-10 18:21:30 +08:00
InfinityPacer
df3294c9d2 fix(http): require 200 for share reporting requests 2026-04-10 18:21:30 +08:00
InfinityPacer
1af5f02832 fix(http): use explicit success checks in async callers 2026-04-10 18:21:30 +08:00
InfinityPacer
217fcfd1b2 fix(http): close non-success responses safely 2026-04-10 18:21:30 +08:00
182 changed files with 26423 additions and 2514 deletions

6
.gitignore vendored
View File

@@ -1,4 +1,5 @@
.idea/
.DS_Store
*.c
*.so
*.pyd
@@ -15,11 +16,16 @@ app/helper/*.bin
app/plugins/**
!app/plugins/__init__.py
config/cookies/**
config/app.env
config/user.db*
config/sites/**
config/agent/
config/logs/
config/temp/
config/cache/
.runtime/
public/
.moviepilot.env
*.pyc
*.log
.vscode

View File

@@ -1,5 +1,7 @@
# MoviePilot
简体中文 | [English](README_EN.md)
![GitHub Repo stars](https://img.shields.io/github/stars/jxxghp/MoviePilot?style=for-the-badge)
![GitHub forks](https://img.shields.io/github/forks/jxxghp/MoviePilot?style=for-the-badge)
![GitHub contributors](https://img.shields.io/github/contributors/jxxghp/MoviePilot?style=for-the-badge)
@@ -16,17 +18,31 @@
发布频道https://t.me/moviepilot_channel
## 主要特性
- 前后端分离基于FastApi + Vue3。
- 聚焦核心需求,简化功能和设置,部分设置项可直接使用默认值。
- 重新设计了用户界面,更加美观易用。
## 安装使用
官方Wikihttps://wiki.movie-pilot.org
### 为 AI Agent 添加 Skills
## 本地 CLI
一键安装运行脚本:
```shell
curl -fsSL https://raw.githubusercontent.com/jxxghp/MoviePilot/v2/scripts/bootstrap-local.sh | bash
```
使用 `moviepilot` 命令管理MoviePilot完整 CLI 文档:[`docs/cli.md`](docs/cli.md)
## 为 AI Agent 添加 Skills
```shell
npx skills add https://github.com/jxxghp/MoviePilot
```
@@ -37,32 +53,9 @@ API文档https://api.movie-pilot.org
MCP工具API文档详见 [docs/mcp-api.md](docs/mcp-api.md)
本地运行需要 `Python 3.12``Node JS v20.12.1`
开发环境准备与本地源码运行说明:[`docs/development-setup.md`](docs/development-setup.md)
- 克隆主项目 [MoviePilot](https://github.com/jxxghp/MoviePilot)
```shell
git clone https://github.com/jxxghp/MoviePilot
```
- 克隆资源项目 [MoviePilot-Resources](https://github.com/jxxghp/MoviePilot-Resources) ,将 `resources` 目录下对应平台及版本的库 `.so`/`.pyd`/`.bin` 文件复制到 `app/helper` 目录
```shell
git clone https://github.com/jxxghp/MoviePilot-Resources
```
- 安装后端依赖,运行 `main.py` 启动后端服务,默认监听端口:`3001`API文档地址`http://localhost:3001/docs`
```shell
cd MoviePilot
pip install -r requirements.txt
python3 -m app.main
```
- 克隆前端项目 [MoviePilot-Frontend](https://github.com/jxxghp/MoviePilot-Frontend)
```shell
git clone https://github.com/jxxghp/MoviePilot-Frontend
```
- 安装前端依赖,运行前端项目,访问:`http://localhost:5173`
```shell
yarn
yarn dev
```
- 参考 [插件开发指引](https://wiki.movie-pilot.org/zh/plugindev) 在 `app/plugins` 目录下开发插件代码
插件开发说明:<https://wiki.movie-pilot.org/zh/plugindev>
## 相关项目

77
README_EN.md Normal file
View File

@@ -0,0 +1,77 @@
# MoviePilot
[简体中文](README.md) | English
![GitHub Repo stars](https://img.shields.io/github/stars/jxxghp/MoviePilot?style=for-the-badge)
![GitHub forks](https://img.shields.io/github/forks/jxxghp/MoviePilot?style=for-the-badge)
![GitHub contributors](https://img.shields.io/github/contributors/jxxghp/MoviePilot?style=for-the-badge)
![GitHub repo size](https://img.shields.io/github/repo-size/jxxghp/MoviePilot?style=for-the-badge)
![GitHub issues](https://img.shields.io/github/issues/jxxghp/MoviePilot?style=for-the-badge)
![Docker Pulls](https://img.shields.io/docker/pulls/jxxghp/moviepilot?style=for-the-badge)
![Docker Pulls V2](https://img.shields.io/docker/pulls/jxxghp/moviepilot-v2?style=for-the-badge)
![Platform](https://img.shields.io/badge/platform-Windows%20%7C%20Linux%20%7C%20Synology-blue?style=for-the-badge)
Redesigned from parts of [NAStool](https://github.com/NAStool/nas-tools), with a stronger focus on core automation scenarios while reducing issues and making the project easier to extend and maintain.
# For learning and personal communication only. Please do not promote this project on platforms in mainland China.
Release channel: https://t.me/moviepilot_channel
## Key Features
- Frontend/backend separation based on FastApi + Vue3.
- Focuses on core needs, simplifies features and settings, and allows some options to work well with sensible defaults.
- Reworked user interface for a cleaner and more practical experience.
## Installation
Official wiki: https://wiki.movie-pilot.org
## Local CLI
One-command bootstrap script:
```shell
curl -fsSL https://raw.githubusercontent.com/jxxghp/MoviePilot/v2/scripts/bootstrap-local.sh | bash
```
Manage MoviePilot with the `moviepilot` command. Full CLI documentation: [`docs/cli.md`](docs/cli.md)
## Add Skills for AI Agents
```shell
npx skills add https://github.com/jxxghp/MoviePilot
```
## Development
API documentation: https://api.movie-pilot.org
MCP tool API documentation: see [docs/mcp-api.md](docs/mcp-api.md)
Development environment setup and local source-run guide: [`docs/development-setup.md`](docs/development-setup.md)
Plugin development guide: <https://wiki.movie-pilot.org/zh/plugindev>
## Related Projects
- [MoviePilot-Frontend](https://github.com/jxxghp/MoviePilot-Frontend)
- [MoviePilot-Resources](https://github.com/jxxghp/MoviePilot-Resources)
- [MoviePilot-Plugins](https://github.com/jxxghp/MoviePilot-Plugins)
- [MoviePilot-Server](https://github.com/jxxghp/MoviePilot-Server)
- [MoviePilot-Wiki](https://github.com/jxxghp/MoviePilot-Wiki)
## Disclaimer
- This software is for learning and personal communication only. It must not be used for commercial purposes or illegal activities. The software does not know how users choose to use it, and all responsibility rests with the user.
- The source code is open source and derived from other open-source code. If someone removes the relevant restrictions and redistributes or publishes modified versions that lead to liability events, the publisher of those modifications bears full responsibility. Public releases that bypass or alter the user authentication mechanism are not recommended.
- This project does not accept donations and has not published any donation page anywhere. The software itself is free of charge and does not provide paid services. Please verify information carefully to avoid being misled.
## Contributors
<a href="https://github.com/jxxghp/MoviePilot/graphs/contributors">
<img src="https://contrib.rocks/image?repo=jxxghp/MoviePilot" />
</a>

View File

@@ -1,9 +1,11 @@
import asyncio
import json
import re
import traceback
import uuid
from dataclasses import dataclass
from typing import Callable, Dict, List, Optional
from datetime import datetime
from typing import Any, Callable, Dict, List, Optional
from langchain.agents import create_agent
from langchain.agents.middleware import (
@@ -19,25 +21,63 @@ from langgraph.checkpoint.memory import InMemorySaver
from app.agent.callback import StreamingHandler
from app.agent.memory import memory_manager
from app.agent.middleware.activity_log import ActivityLogMiddleware
from app.agent.middleware.hooks import AgentHooksMiddleware
from app.agent.middleware.jobs import JobsMiddleware
from app.agent.middleware.memory import MemoryMiddleware
from app.agent.middleware.patch_tool_calls import PatchToolCallsMiddleware
from app.agent.middleware.skills import SkillsMiddleware
from app.agent.middleware.usage import UsageMiddleware
from app.agent.prompt import prompt_manager
from app.agent.runtime import agent_runtime_manager
from app.agent.tools.factory import MoviePilotToolFactory
from app.chain import ChainBase
from app.core.config import settings
from app.db.transferhistory_oper import TransferHistoryOper
from app.helper.llm import LLMHelper
from app.log import logger
from app.schemas import Notification, NotificationType
from app.schemas.message import ChannelCapabilityManager, ChannelCapability
from app.schemas.types import MessageChannel
from app.utils.identity import SYSTEM_INTERNAL_USER_ID
class AgentChain(ChainBase):
pass
@dataclass
class _SessionUsageSnapshot:
model: Optional[str] = None
context_window_tokens: Optional[int] = None
last_input_tokens: int = 0
last_output_tokens: int = 0
last_total_tokens: int = 0
last_context_usage_ratio: Optional[float] = None
total_input_tokens: int = 0
total_output_tokens: int = 0
total_tokens: int = 0
model_call_count: int = 0
last_updated_at: Optional[datetime] = None
def to_dict(self, session_id: str) -> dict[str, Any]:
return {
"session_id": session_id,
"model": self.model,
"context_window_tokens": self.context_window_tokens,
"last_input_tokens": self.last_input_tokens,
"last_output_tokens": self.last_output_tokens,
"last_total_tokens": self.last_total_tokens,
"last_context_usage_ratio": self.last_context_usage_ratio,
"total_input_tokens": self.total_input_tokens,
"total_output_tokens": self.total_output_tokens,
"total_tokens": self.total_tokens,
"model_call_count": self.model_call_count,
"last_updated_at": self.last_updated_at.strftime("%Y-%m-%d %H:%M:%S")
if self.last_updated_at
else None,
}
class _ThinkTagStripper:
"""
流式剥离 <think>...</think> 标签的辅助类。
@@ -70,7 +110,7 @@ class _ThinkTagStripper:
on_output(self.buffer[:start_idx])
emitted = True
self.in_think_tag = True
self.buffer = self.buffer[start_idx + 7:]
self.buffer = self.buffer[start_idx + 7 :]
else:
# 检查是否以 <think> 的不完整前缀结尾
partial_match = False
@@ -90,7 +130,7 @@ class _ThinkTagStripper:
end_idx = self.buffer.find("</think>")
if end_idx != -1:
self.in_think_tag = False
self.buffer = self.buffer[end_idx + 8:]
self.buffer = self.buffer[end_idx + 8 :]
else:
# 检查是否以 </think> 的不完整前缀结尾
partial_match = False
@@ -129,10 +169,98 @@ class MoviePilotAgent:
self.channel = channel
self.source = source
self.username = username
self.reply_with_voice = False
self._tool_context: Dict[str, object] = {}
self.output_callback: Optional[Callable[[str], None]] = None
self.force_streaming = False
self.suppress_user_reply = False
self._streamed_output = ""
self._session_usage = _SessionUsageSnapshot()
# 流式token管理
self.stream_handler = StreamingHandler()
@staticmethod
def _coerce_int(value: Any) -> Optional[int]:
if value is None:
return None
try:
return int(value)
except (TypeError, ValueError):
return None
@classmethod
def _get_model_name(cls, llm: Any) -> Optional[str]:
return (
getattr(llm, "model", None)
or getattr(llm, "model_name", None)
or getattr(llm, "model_id", None)
)
@classmethod
def _get_context_window_tokens(cls, llm: Any) -> Optional[int]:
profile = getattr(llm, "profile", None)
if not profile:
return None
if isinstance(profile, dict):
return cls._coerce_int(
profile.get("max_input_tokens") or profile.get("input_token_limit")
)
return cls._coerce_int(
getattr(profile, "max_input_tokens", None)
or getattr(profile, "input_token_limit", None)
)
def _sync_model_profile(self, llm: Any) -> None:
model_name = self._get_model_name(llm)
context_window_tokens = self._get_context_window_tokens(llm)
if model_name:
self._session_usage.model = model_name
if context_window_tokens:
self._session_usage.context_window_tokens = context_window_tokens
def _record_usage(self, usage: dict[str, Any]) -> None:
if not usage:
return
model_name = usage.get("model")
context_window_tokens = self._coerce_int(usage.get("context_window_tokens"))
if model_name:
self._session_usage.model = model_name
if context_window_tokens:
self._session_usage.context_window_tokens = context_window_tokens
self._session_usage.model_call_count += 1
self._session_usage.last_updated_at = datetime.now()
if not usage.get("has_usage"):
return
input_tokens = self._coerce_int(usage.get("input_tokens")) or 0
output_tokens = self._coerce_int(usage.get("output_tokens")) or 0
total_tokens = self._coerce_int(usage.get("total_tokens"))
if total_tokens is None:
total_tokens = input_tokens + output_tokens
self._session_usage.last_input_tokens = input_tokens
self._session_usage.last_output_tokens = output_tokens
self._session_usage.last_total_tokens = total_tokens
self._session_usage.last_context_usage_ratio = usage.get("context_usage_ratio")
self._session_usage.total_input_tokens += input_tokens
self._session_usage.total_output_tokens += output_tokens
self._session_usage.total_tokens += total_tokens
def get_session_status(self) -> dict[str, Any]:
if not self._session_usage.model:
self._session_usage.model = settings.LLM_MODEL
if not self._session_usage.context_window_tokens:
self._session_usage.context_window_tokens = (
settings.LLM_MAX_CONTEXT_TOKENS * 1000
if settings.LLM_MAX_CONTEXT_TOKENS
else None
)
return self._session_usage.to_dict(self.session_id)
@property
def is_background(self) -> bool:
"""
@@ -150,7 +278,11 @@ class MoviePilotAgent:
- 其他情况不启用流式输出
"""
if self.is_background:
return self.force_streaming or callable(self.output_callback)
if self.reply_with_voice:
return False
if self.force_streaming or callable(self.output_callback):
return True
# 啰嗦模式下始终需要流式输出来捕获工具调用前的 Agent 文字
if settings.AI_AGENT_VERBOSE:
return True
@@ -203,6 +335,28 @@ class MoviePilotAgent:
return "".join(text_parts)
return str(content)
def _emit_output(self, text: str):
"""
输出当前流式文本到外部回调。
"""
if not text:
return
self._streamed_output += text
if not callable(self.output_callback):
return
try:
self.output_callback(self._streamed_output)
except Exception as e:
logger.debug(f"智能体输出回调失败: {e}")
def _handle_stream_text(self, text: str):
"""
统一处理一段可见流式文本,确保工具统计注入后的内容会同时进入
消息缓冲区和外部流式回调。
"""
emitted_text = self.stream_handler.emit(text)
self._emit_output(emitted_text)
def _initialize_tools(self) -> List:
"""
初始化工具列表
@@ -214,6 +368,7 @@ class MoviePilotAgent:
source=self.source,
username=self.username,
stream_handler=self.stream_handler,
agent_context=self._tool_context,
)
def _create_agent(self, streaming: bool = False):
@@ -223,10 +378,19 @@ class MoviePilotAgent:
"""
try:
# 系统提示词
system_prompt = prompt_manager.get_agent_prompt(channel=self.channel)
system_prompt = prompt_manager.get_agent_prompt(
channel=self.channel,
prefer_voice_reply=self.reply_with_voice,
)
# LLM 模型(用于 agent 执行)
llm = self._initialize_llm(streaming=streaming)
self._sync_model_profile(llm)
# 为中间件内部模型调用准备非流式 LLM避免与用户流式回复复用同一实例。
non_streaming_llm = (
llm if not streaming else self._initialize_llm(streaming=False)
)
# 工具列表
tools = self._initialize_tools()
@@ -235,21 +399,27 @@ class MoviePilotAgent:
middlewares = [
# Skills
SkillsMiddleware(
sources=[str(settings.CONFIG_PATH / "agent" / "skills")],
sources=[str(agent_runtime_manager.skills_dir)],
bundled_skills_dir=str(settings.ROOT_PATH / "skills"),
),
# Jobs 任务管理
JobsMiddleware(
sources=[str(settings.CONFIG_PATH / "agent" / "jobs")],
sources=[str(agent_runtime_manager.jobs_dir)],
),
# 记忆管理(自动扫描 agent 目录下所有 .md 文件)
MemoryMiddleware(memory_dir=str(settings.CONFIG_PATH / "agent")),
# 结构化 hooks
AgentHooksMiddleware(),
# 记忆管理(仅扫描 memory 目录,避免与根层 persona/workflow 配置混写)
MemoryMiddleware(memory_dir=str(agent_runtime_manager.memory_dir)),
# 活动日志
ActivityLogMiddleware(
activity_dir=str(settings.CONFIG_PATH / "agent" / "activity"),
activity_dir=str(agent_runtime_manager.activity_dir),
),
# 用量统计
UsageMiddleware(on_usage=self._record_usage),
# 上下文压缩
SummarizationMiddleware(model=llm, trigger=("fraction", 0.85)),
SummarizationMiddleware(
model=non_streaming_llm, trigger=("fraction", 0.85)
),
# 错误工具调用修复
PatchToolCallsMiddleware(),
]
@@ -258,7 +428,8 @@ class MoviePilotAgent:
if settings.LLM_MAX_TOOLS > 0:
middlewares.append(
LLMToolSelectorMiddleware(
model=llm, max_tools=settings.LLM_MAX_TOOLS
model=non_streaming_llm,
max_tools=settings.LLM_MAX_TOOLS,
)
)
@@ -273,30 +444,50 @@ class MoviePilotAgent:
logger.error(f"创建 Agent 失败: {e}")
raise e
async def process(self, message: str, images: List[str] = None) -> str:
async def process(
self,
message: str,
images: List[str] = None,
files: Optional[List[dict]] = None,
) -> str:
"""
处理用户消息,流式推理并返回 Agent 回复
"""
try:
logger.info(
f"Agent推理: session_id={self.session_id}, input={message}, images={len(images) if images else 0}"
f"Agent推理: session_id={self.session_id}, input={message}, "
f"images={len(images) if images else 0}, files={len(files) if files else 0}"
)
self._tool_context = {
"incoming_voice": self.reply_with_voice,
"user_reply_sent": False,
"reply_mode": None,
}
self._streamed_output = ""
# 获取历史消息
messages = memory_manager.get_agent_messages(
session_id=self.session_id, user_id=self.user_id
)
# 构建用户消息内容
if images:
content = []
if message:
content.append({"type": "text", "text": message})
for img in images:
content.append({"type": "image_url", "image_url": {"url": img}})
messages.append(HumanMessage(content=content))
else:
messages.append(HumanMessage(content=message))
# 构建结构化用户消息内容
request_payload = {
"message": message or "",
"images": [
{"index": index + 1, "type": "image"}
for index, _ in enumerate(images or [])
],
"files": files or [],
}
content = [
{
"type": "text",
"text": json.dumps(request_payload, ensure_ascii=False, indent=2),
}
]
for img in images or []:
content.append({"type": "image_url", "image_url": {"url": img}})
messages.append(HumanMessage(content=content))
# 执行推理
await self._execute_agent(messages)
@@ -304,6 +495,8 @@ class MoviePilotAgent:
except Exception as e:
error_message = f"处理消息时发生错误: {str(e)}"
logger.error(error_message)
if self.suppress_user_reply:
raise
await self.send_agent_message(error_message)
return error_message
@@ -318,10 +511,6 @@ class MoviePilotAgent:
:param on_token: 收到有效 token 时的回调
"""
stripper = _ThinkTagStripper()
# 非VERBOSE模式下跟踪当前langgraph_step以检测中间步骤的模型输出
# 当模型在工具调用之前输出的"计划/思考"文本会在检测到tool_call时被清除
current_model_step = -1
has_emitted_in_step = False
async for chunk in agent.astream(
messages,
@@ -335,25 +524,13 @@ class MoviePilotAgent:
if not token or not hasattr(token, "tool_call_chunks"):
continue
# 获取当前步骤信息
step = metadata.get("langgraph_step", -1) if metadata else -1
if token.tool_call_chunks:
# 检测到工具调用token说明当前步骤是中间步骤
# 非VERBOSE模式下清除该步骤之前输出的"计划/思考"文本
if not settings.AI_AGENT_VERBOSE and has_emitted_in_step:
self.stream_handler.reset()
stripper.reset()
has_emitted_in_step = False
# 清除 stripper 内部缓冲中可能残留的 <think> 标签中间状态
stripper.reset()
continue
# 以下处理纯文本tokentool_call_chunks为空
# 检测步骤变化重置步骤内emit跟踪
if step != current_model_step:
current_model_step = step
has_emitted_in_step = False
# 跳过模型思考/推理内容(如 DeepSeek R1 的 reasoning_content
additional = getattr(token, "additional_kwargs", None)
if additional and additional.get("reasoning_content"):
@@ -363,8 +540,7 @@ class MoviePilotAgent:
# content 可能是字符串或内容块列表,过滤掉思考类型的块
content = self._extract_text_content(token.content)
if content:
if stripper.process(content, on_token):
has_emitted_in_step = True
stripper.process(content, on_token)
stripper.flush(on_token)
@@ -404,9 +580,13 @@ class MoviePilotAgent:
agent=agent,
messages={"messages": messages},
config=agent_config,
on_token=self.stream_handler.emit,
on_token=self._handle_stream_text,
)
trailing_tool_summary = self.stream_handler.flush_pending_tool_summary()
if trailing_tool_summary:
self._emit_output(trailing_tool_summary)
# 停止流式输出,返回是否已通过流式编辑发送了所有内容及最终文本
(
all_sent_via_stream,
@@ -418,6 +598,18 @@ class MoviePilotAgent:
# 通过常规方式发送剩余内容
remaining_text = await self.stream_handler.take()
if remaining_text:
unsent_text = remaining_text
if self._streamed_output and remaining_text.startswith(
self._streamed_output
):
unsent_text = remaining_text[len(self._streamed_output) :]
if unsent_text:
self._emit_output(unsent_text)
if (
remaining_text
and not self.suppress_user_reply
and not self._tool_context.get("user_reply_sent")
):
await self.send_agent_message(remaining_text)
elif streamed_text:
# 流式输出已发送全部内容,但未记录到数据库,补充保存消息记录
@@ -447,7 +639,14 @@ class MoviePilotAgent:
final_text = text.strip()
break
if final_text:
if final_text and not self._streamed_output:
self._emit_output(final_text)
if (
final_text
and not self.suppress_user_reply
and not self._tool_context.get("user_reply_sent")
):
if self.is_background:
# 后台任务仅广播最终回复,带标题
await self.send_agent_message(
@@ -478,16 +677,12 @@ class MoviePilotAgent:
"""
通过原渠道发送消息给用户
"""
user_id = self.user_id
if self.user_id == "system":
user_id = None
await AgentChain().async_post_message(
Notification(
channel=self.channel,
source=self.source,
mtype=NotificationType.Agent,
userid=user_id,
userid=self.user_id,
username=self.username,
title=title,
text=message,
@@ -531,9 +726,11 @@ class _MessageTask:
user_id: str
message: str
images: Optional[List[str]] = None
files: Optional[List[dict]] = None
channel: Optional[str] = None
source: Optional[str] = None
username: Optional[str] = None
reply_with_voice: bool = False
class AgentManager:
@@ -558,6 +755,37 @@ class AgentManager:
# 重试整理缓冲区锁
self._retry_transfer_lock = asyncio.Lock()
def get_session_status(self, session_id: str) -> dict[str, Any]:
"""获取会话当前模型与 token 使用状态。"""
agent = self.active_agents.get(session_id)
if agent:
status = agent.get_session_status()
else:
status = {
"session_id": session_id,
"model": settings.LLM_MODEL,
"context_window_tokens": settings.LLM_MAX_CONTEXT_TOKENS * 1000
if settings.LLM_MAX_CONTEXT_TOKENS
else None,
"last_input_tokens": 0,
"last_output_tokens": 0,
"last_total_tokens": 0,
"last_context_usage_ratio": None,
"total_input_tokens": 0,
"total_output_tokens": 0,
"total_tokens": 0,
"model_call_count": 0,
"last_updated_at": None,
}
queue = self._session_queues.get(session_id)
status["pending_messages"] = queue.qsize() if queue else 0
status["is_processing"] = (
session_id in self._session_workers
and not self._session_workers[session_id].done()
)
return status
@staticmethod
async def initialize():
"""
@@ -596,9 +824,11 @@ class AgentManager:
user_id: str,
message: str,
images: List[str] = None,
files: Optional[List[dict]] = None,
channel: str = None,
source: str = None,
username: str = None,
reply_with_voice: bool = False,
) -> str:
"""
处理用户消息:将消息放入会话队列,按顺序依次处理。
@@ -609,9 +839,11 @@ class AgentManager:
user_id=user_id,
message=message,
images=images,
files=files,
channel=channel,
source=source,
username=username,
reply_with_voice=reply_with_voice,
)
# 获取或创建会话队列
@@ -709,8 +941,9 @@ class AgentManager:
agent.source = task.source
if task.username:
agent.username = task.username
agent.reply_with_voice = task.reply_with_voice
return await agent.process(task.message, images=task.images)
return await agent.process(task.message, images=task.images, files=task.files)
async def stop_current_task(self, session_id: str):
"""
@@ -773,6 +1006,69 @@ class AgentManager:
memory_manager.clear_memory(session_id, user_id)
logger.info(f"会话 {session_id} 的记忆已清空")
@staticmethod
def _build_heartbeat_prompt() -> str:
"""使用统一 wake 模板源构建心跳任务提示词。"""
runtime_config = agent_runtime_manager.load_runtime_config()
return runtime_config.render_system_task_message("heartbeat")
@staticmethod
def _build_retry_transfer_template_context(
history_ids: list[int],
) -> tuple[str, dict[str, int | str]]:
"""仅负责把失败重试任务的动态数据映射成模板变量。"""
is_batch = len(history_ids) > 1
task_type = (
"batch_transfer_failed_retry" if is_batch else "transfer_failed_retry"
)
template_context: dict[str, int | str] = {
"history_ids_csv": ", ".join(str(item) for item in history_ids),
"history_count": len(history_ids),
}
if not is_batch:
template_context["history_id"] = history_ids[0]
return task_type, template_context
@staticmethod
def _build_retry_transfer_prompt(
history_ids: list[int],
) -> str:
"""根据失败记录数量构建统一的重试整理后台任务提示词。"""
runtime_config = agent_runtime_manager.load_runtime_config()
task_type, template_context = AgentManager._build_retry_transfer_template_context(
history_ids
)
return runtime_config.render_system_task_message(
task_type,
template_context=template_context,
)
@staticmethod
def _build_manual_redo_template_context(history) -> dict[str, int | str]:
"""仅负责把整理历史对象映射成 SYSTEM_TASKS 需要的模板变量。"""
src_fileitem = history.src_fileitem or {}
source_path = src_fileitem.get("path") if isinstance(src_fileitem, dict) else ""
source_path = source_path or history.src or ""
season_episode = f"{history.seasons or ''}{history.episodes or ''}".strip()
# 这里故意只做数据整形,具体行为定义全部交给 SYSTEM_TASKS。
return {
"history_id": history.id,
"current_status": "success" if history.status else "failed",
"recognized_title": history.title or "unknown",
"media_type": history.type or "unknown",
"category": history.category or "unknown",
"year": history.year or "unknown",
"season_episode": season_episode or "unknown",
"source_path": source_path or "unknown",
"source_storage": history.src_storage or "local",
"destination_path": history.dest or "unknown",
"destination_storage": history.dest_storage or "unknown",
"transfer_mode": history.mode or "unknown",
"tmdbid": history.tmdbid or "none",
"doubanid": history.doubanid or "none",
"error_message": history.errmsg or "none",
}
async def heartbeat_check_jobs(self):
"""
心跳唤醒检查并执行待处理的定时任务Jobs
@@ -781,25 +1077,10 @@ class AgentManager:
try:
# 每次使用唯一的 session_id避免共享上下文
session_id = f"__agent_heartbeat_{uuid.uuid4().hex[:12]}__"
user_id = "system"
user_id = SYSTEM_INTERNAL_USER_ID
logger.info("智能体心跳唤醒:开始检查待处理任务...")
# 英文提示词,便于大模型理解
heartbeat_message = (
"[System Heartbeat] Check all jobs in your jobs directory and process pending tasks:\n"
"1. List all jobs with status 'pending' or 'in_progress'\n"
"2. For 'recurring' jobs, check 'last_run' to determine if it's time to run again\n"
"3. For 'once' jobs with status 'pending', execute them now\n"
"4. After executing each job, update its status, 'last_run' time, and execution log in the JOB.md file\n"
"5. If there are no pending jobs, do NOT generate any response\n\n"
"IMPORTANT: This is a background system task, NOT a user conversation. "
"Your final response will be broadcast as a notification. "
"Only output a brief completion summary listing each executed job and its result. "
"Do NOT include greetings, explanations, or conversational text. "
"If no jobs were executed, output nothing. "
"Respond in Chinese (中文)."
)
heartbeat_message = self._build_heartbeat_prompt()
await self.process_message(
session_id=session_id,
@@ -876,66 +1157,15 @@ class AgentManager:
return
session_id = f"__agent_retry_transfer_batch_{uuid.uuid4().hex[:8]}__"
user_id = "system"
user_id = SYSTEM_INTERNAL_USER_ID
ids_str = ", ".join(str(i) for i in history_ids)
logger.info(
f"智能体重试整理:开始批量处理失败记录 IDs=[{ids_str}] (group={group_key})"
)
if len(history_ids) == 1:
# 单条记录,使用原有逻辑
retry_message = (
f"[System Task - Transfer Failed Retry] A file transfer/organization has failed. "
f"Please use the 'transfer-failed-retry' skill to retry the failed transfer.\n\n"
f"Failed transfer history record ID: {history_ids[0]}\n\n"
f"Follow these steps:\n"
f"1. Use `query_transfer_history` with status='failed' to find the record with id={history_ids[0]} "
f"and understand the failure details (source path, error message, media info)\n"
f"2. Analyze the error message to determine the best retry strategy\n"
f"3. If the source file no longer exists, skip this retry and report that the file is missing\n"
f"4. Delete the failed history record using `delete_transfer_history` with history_id={history_ids[0]}\n"
f"5. Re-identify the media using `recognize_media` with the source file path\n"
f"6. If recognition fails, try `search_media` with keywords from the filename\n"
f"7. Re-transfer using `transfer_file` with the source path and any identified media info (tmdbid, media_type)\n"
f"8. Report the final result\n\n"
f"IMPORTANT: This is a background system task, NOT a user conversation. "
f"Your final response will be broadcast as a notification. "
f"Only output a brief result summary. "
f"Do NOT include greetings, explanations, or conversational text. "
f"Respond in Chinese (中文)."
)
else:
# 多条记录,使用批量处理逻辑
retry_message = (
f"[System Task - Batch Transfer Failed Retry] Multiple file transfers from the same source "
f"have failed. These files likely belong to the SAME media (e.g., multiple episodes of the same TV show). "
f"Please use the 'transfer-failed-retry' skill to retry them efficiently.\n\n"
f"Failed transfer history record IDs: {ids_str}\n"
f"Total failed records: {len(history_ids)}\n\n"
f"Follow these steps:\n"
f"1. Use `query_transfer_history` with status='failed' to find ALL records with these IDs "
f"and understand the failure details\n"
f"2. Since these files are likely from the same media, analyze the FIRST record to determine "
f"the media identity and the best retry strategy. The root cause is usually the same for all files.\n"
f"3. If the error is about media recognition (e.g., '未识别到媒体信息'), identify the media ONCE "
f"using `recognize_media` or `search_media`, then reuse that result (tmdbid, media_type) for all files\n"
f"4. For EACH failed record:\n"
f" a. Delete the failed history record using `delete_transfer_history`\n"
f" b. Re-transfer using `transfer_file` with the source path and the identified media info\n"
f"5. Report a summary of results (how many succeeded, how many failed)\n\n"
f"IMPORTANT OPTIMIZATION: These files share the same media identity. "
f"Do NOT call `recognize_media` or `search_media` repeatedly for each file. "
f"Identify the media ONCE, then apply to all files.\n\n"
f"IMPORTANT: This is a background system task, NOT a user conversation. "
f"Your final response will be broadcast as a notification. "
f"Only output a brief result summary. "
f"Do NOT include greetings, explanations, or conversational text. "
f"Respond in Chinese (中文)."
)
retry_message = self._build_retry_transfer_prompt(history_ids)
try:
await self.process_message(
session_id=session_id,
user_id=user_id,
@@ -968,6 +1198,48 @@ class AgentManager:
f"智能体重试整理失败 (IDs=[{ids_str}], group={group_key}): {e}"
)
@staticmethod
def _build_manual_redo_prompt(history) -> str:
"""
构建手动 AI 整理提示词。
"""
runtime_config = agent_runtime_manager.load_runtime_config()
return runtime_config.render_system_task_message(
"manual_transfer_redo",
template_context=AgentManager._build_manual_redo_template_context(history),
)
async def manual_redo_transfer(
self,
history_id: int,
output_callback: Optional[Callable[[str], None]] = None,
) -> None:
"""
手动触发单条历史记录的 AI 整理。
"""
session_id = f"__agent_manual_redo_{history_id}_{uuid.uuid4().hex[:8]}__"
user_id = SYSTEM_INTERNAL_USER_ID
agent = MoviePilotAgent(
session_id=session_id,
user_id=user_id,
channel=None,
source=None,
username=settings.SUPERUSER,
)
agent.output_callback = output_callback
agent.force_streaming = True
agent.suppress_user_reply = True
try:
history = TransferHistoryOper().get(history_id)
if not history:
raise ValueError(f"整理记录不存在: {history_id}")
await agent.process(self._build_manual_redo_prompt(history))
finally:
await agent.cleanup()
memory_manager.clear_memory(session_id, user_id)
# 全局智能体管理器实例
agent_manager = AgentManager()

View File

@@ -1,6 +1,8 @@
import asyncio
import threading
from typing import Optional, Tuple
from typing import Any, Optional, Tuple
from fastapi.concurrency import run_in_threadpool
from app.chain import ChainBase
from app.log import logger
@@ -60,16 +62,30 @@ class StreamingHandler:
self._user_id: Optional[str] = None
self._username: Optional[str] = None
self._title: str = ""
# 非啰嗦模式下的待输出工具统计,等下一段文本到来时再统一补一句摘要
self._pending_tool_stats: dict[str, dict[str, Any]] = {}
def emit(self, token: str):
def emit(self, token: str) -> str:
"""
接收 LLM 流式 token积累到缓冲区。
如果存在待输出的工具统计,则会先补上一句摘要再追加 token。
"""
with self._lock:
emitted = token or ""
if self._pending_tool_stats:
summary = self._consume_pending_tool_summary_locked()
if summary:
if emitted:
emitted = f"{summary}{emitted.lstrip(chr(10))}"
else:
emitted = summary
# 如果存量消息结束是两个换行,则去掉新消息前面的换行,避免过多空行
if self._buffer.endswith("\n\n") and token.startswith("\n"):
token = token.lstrip("\n")
self._buffer += token
if self._buffer.endswith("\n\n") and emitted.startswith("\n"):
emitted = emitted.lstrip("\n")
self._buffer += emitted
return emitted
async def take(self) -> str:
"""
@@ -80,6 +96,8 @@ class StreamingHandler:
注意:流式渠道不调用此方法,工具消息直接 emit 到 buffer 中。
"""
self.flush_pending_tool_summary()
with self._lock:
if not self._buffer:
return ""
@@ -97,6 +115,7 @@ class StreamingHandler:
self._sent_text = ""
self._message_response = None
self._msg_start_offset = 0
self._pending_tool_stats = {}
def reset(self):
"""
@@ -110,6 +129,7 @@ class StreamingHandler:
self._buffer = ""
self._sent_text = ""
self._msg_start_offset = 0
self._pending_tool_stats = {}
async def start_streaming(
self,
@@ -139,6 +159,7 @@ class StreamingHandler:
self._sent_text = ""
self._message_response = None
self._msg_start_offset = 0
self._pending_tool_stats = {}
# 检查渠道是否支持消息编辑,不支持则仅收集 token 到 buffer不实时推送
if not self._can_stream():
@@ -174,13 +195,16 @@ class StreamingHandler:
# 取消定时任务
await self._cancel_flush_task()
# 将未落地的工具统计补入缓冲区,避免流式结束时丢失这段执行信息
self.flush_pending_tool_summary()
# 执行最后一次刷新
await self._flush()
# 检查是否所有缓冲内容都已发送
with self._lock:
# 当前消息的文本 = buffer 中从 _msg_start_offset 开始的部分
current_msg_text = self._buffer[self._msg_start_offset :]
current_msg_text = self._buffer[self._msg_start_offset:]
all_sent = (
self._message_response is not None
and self._sent_text
@@ -192,11 +216,172 @@ class StreamingHandler:
self._sent_text = ""
self._message_response = None
self._msg_start_offset = 0
self._pending_tool_stats = {}
if all_sent:
# 所有内容已通过流式发送,清空缓冲区
self._buffer = ""
return all_sent, final_text
def record_tool_call(
self,
tool_name: str,
tool_message: Optional[str] = None,
tool_kwargs: Optional[dict[str, Any]] = None,
):
"""
记录一次工具调用,供非啰嗦模式下延迟汇总输出。
"""
category, target = self._classify_tool_call(
tool_name=tool_name,
tool_message=tool_message,
tool_kwargs=tool_kwargs or {},
)
with self._lock:
bucket = self._pending_tool_stats.setdefault(
category,
{
"count": 0,
"targets": set(),
},
)
bucket["count"] += 1
if target:
bucket["targets"].add(str(target))
def flush_pending_tool_summary(self) -> str:
"""
将待输出的工具统计摘要补入缓冲区,并返回本次新增的摘要文本。
"""
with self._lock:
summary = self._consume_pending_tool_summary_locked()
if summary:
self._buffer += summary
return summary
@staticmethod
def _classify_tool_call(
tool_name: str,
tool_message: Optional[str],
tool_kwargs: dict[str, Any],
) -> tuple[str, Optional[str]]:
tool_name = (tool_name or "").strip().lower()
tool_message = (tool_message or "").strip()
tool_message_lower = tool_message.lower()
if tool_name == "read_file":
return "file_read", tool_kwargs.get("file_path")
if tool_name in {"write_file", "edit_file"}:
return "file_write", tool_kwargs.get("file_path")
if tool_name in {"list_directory", "query_directory_settings"}:
return "directory", tool_kwargs.get("path")
if tool_name == "browse_webpage":
return (
"web_browse",
tool_kwargs.get("url")
or tool_kwargs.get("target_url")
or tool_kwargs.get("path"),
)
if tool_name == "execute_command":
return "command", tool_kwargs.get("command")
if tool_name == "ask_user_choice":
return "interaction", tool_kwargs.get("message")
if tool_name.startswith("search_") or tool_name in {"get_search_results"}:
return (
"search",
tool_kwargs.get("query")
or tool_kwargs.get("title")
or tool_kwargs.get("keyword"),
)
if tool_name.startswith("query_") or tool_name.startswith("list_") or tool_name.startswith("get_"):
return "data_query", None
if tool_name.startswith(("add_", "update_", "delete_", "modify_", "run_")):
return "action", None
if tool_name in {
"recognize_media",
"scrape_metadata",
"transfer_file",
"test_site",
"send_message",
"send_local_file",
"send_voice_message",
}:
return "action", None
if "读取文件" in tool_message or "read file" in tool_message_lower:
return "file_read", tool_kwargs.get("file_path")
if (
"写入文件" in tool_message
or "编辑文件" in tool_message
or "write file" in tool_message_lower
or "edit file" in tool_message_lower
):
return "file_write", tool_kwargs.get("file_path")
if "目录" in tool_message or "directory" in tool_message_lower:
return "directory", tool_kwargs.get("path")
if "搜索" in tool_message or "search" in tool_message_lower:
return (
"search",
tool_kwargs.get("query")
or tool_kwargs.get("title")
or tool_kwargs.get("keyword"),
)
if "网页" in tool_message or "browser" in tool_message_lower or "webpage" in tool_message_lower:
return "web_browse", tool_kwargs.get("url")
if "命令" in tool_message or "command" in tool_message_lower:
return "command", tool_kwargs.get("command")
return "tool", None
def _consume_pending_tool_summary_locked(self) -> str:
if not self._pending_tool_stats:
return ""
parts = []
for category, bucket in self._pending_tool_stats.items():
value = bucket["count"]
if category in {"file_read", "file_write", "directory", "web_browse"} and bucket["targets"]:
value = len(bucket["targets"])
part = self._format_tool_stat(category, value)
if part:
parts.append(part)
self._pending_tool_stats = {}
if not parts:
return ""
summary = f"{''.join(parts)}"
visible_buffer = self._buffer.rstrip(" \t")
last_char = visible_buffer[-1:] if visible_buffer.strip() else ""
prefix = ""
if self._buffer and last_char != "\n":
prefix = "\n"
return f"{prefix}{summary}\n\n"
@staticmethod
def _format_tool_stat(category: str, count: int) -> str:
if count <= 0:
return ""
if category == "search":
return f"执行了 {count} 次搜索"
if category == "file_read":
return f"读取了 {count} 个文件"
if category == "file_write":
return f"修改了 {count} 个文件"
if category == "directory":
return f"查看了 {count} 个目录"
if category == "web_browse":
return f"浏览了 {count} 个网页"
if category == "command":
return f"执行了 {count} 条命令"
if category == "data_query":
return f"查询了 {count} 次数据"
if category == "action":
return f"执行了 {count} 次操作"
if category == "interaction":
return f"发起了 {count} 次交互"
return f"调用了 {count} 次工具"
def _can_stream(self) -> bool:
"""
检查当前渠道是否支持流式输出(消息编辑)
@@ -246,7 +431,7 @@ class StreamingHandler:
"""
with self._lock:
# 当前消息的文本 = buffer 中从 _msg_start_offset 开始的部分
current_text = self._buffer[self._msg_start_offset :]
current_text = self._buffer[self._msg_start_offset:]
if not current_text or current_text == self._sent_text:
# 没有新内容需要刷新
return
@@ -256,7 +441,8 @@ class StreamingHandler:
try:
if self._message_response is None:
# 第一次发送:发送新消息并获取 message_id
response = chain.send_direct_message(
response = await run_in_threadpool(
chain.send_direct_message,
Notification(
channel=self._channel,
source=self._source,
@@ -264,7 +450,7 @@ class StreamingHandler:
username=self._username,
title=self._title,
text=current_text,
)
),
)
if response and response.success and response.message_id:
self._message_response = response
@@ -291,13 +477,14 @@ class StreamingHandler:
)
with self._lock:
self._msg_start_offset += len(self._sent_text)
current_text = self._buffer[self._msg_start_offset :]
current_text = self._buffer[self._msg_start_offset:]
self._message_response = None
self._sent_text = ""
# 如果偏移后还有新内容,立即发送为新消息
if current_text:
response = chain.send_direct_message(
response = await run_in_threadpool(
chain.send_direct_message,
Notification(
channel=self._channel,
source=self._source,
@@ -305,7 +492,7 @@ class StreamingHandler:
username=self._username,
title=self._title,
text=current_text,
)
),
)
if response and response.success and response.message_id:
self._message_response = response
@@ -324,7 +511,8 @@ class StreamingHandler:
except (ValueError, KeyError):
return
success = chain.edit_message(
success = await run_in_threadpool(
chain.edit_message,
channel=channel_enum,
source=self._message_response.source,
message_id=self._message_response.message_id,
@@ -360,3 +548,11 @@ class StreamingHandler:
是否已经通过流式输出发送过消息(当前轮次)
"""
return self._message_response is not None
@property
def last_buffer_char(self) -> str:
"""
返回当前缓冲区最后一个字符;缓冲区为空时返回空字符串。
"""
with self._lock:
return self._buffer[-1:] if self._buffer else ""

View File

@@ -0,0 +1,68 @@
"""结构化 Agent hooks 中间件。"""
from collections.abc import Awaitable, Callable
from typing import Annotated, NotRequired, TypedDict
from langchain.agents.middleware.types import (
AgentMiddleware,
AgentState,
ContextT,
ModelRequest,
ModelResponse,
PrivateStateAttr, # noqa
ResponseT,
)
from langchain_core.runnables import RunnableConfig
from langgraph.runtime import Runtime
from app.agent.middleware.utils import append_to_system_message
from app.agent.runtime import agent_runtime_manager
class HooksState(AgentState):
"""hooks 中间件状态。"""
hooks_prompt: NotRequired[Annotated[str, PrivateStateAttr]]
class HooksStateUpdate(TypedDict):
"""hooks 状态更新。"""
hooks_prompt: str
class AgentHooksMiddleware(AgentMiddleware[HooksState, ContextT, ResponseT]): # noqa
"""在固定生命周期点注入结构化 pre/in/post hooks。"""
state_schema = HooksState
async def abefore_agent( # noqa
self, state: HooksState, runtime: Runtime, config: RunnableConfig
) -> HooksStateUpdate | None:
if "hooks_prompt" in state:
return None
runtime_config = agent_runtime_manager.load_runtime_config()
return HooksStateUpdate(hooks_prompt=runtime_config.render_hooks_prompt())
def modify_request(self, request: ModelRequest[ContextT]) -> ModelRequest[ContextT]: # noqa
hooks_prompt = request.state.get("hooks_prompt", "") # noqa
if not hooks_prompt:
return request
new_system_message = append_to_system_message(
request.system_message, hooks_prompt
)
return request.override(system_message=new_system_message)
async def awrap_model_call(
self,
request: ModelRequest[ContextT],
handler: Callable[
[ModelRequest[ContextT]], Awaitable[ModelResponse[ResponseT]]
],
) -> ModelResponse[ResponseT]:
return await handler(self.modify_request(request))
__all__ = ["AgentHooksMiddleware"]

View File

@@ -124,34 +124,29 @@ Default memory file: {memory_file}
</agent_memory>
<memory_onboarding>
**IMPORTANT — First-time user detected!**
First-time user detected.
The memory directory is currently empty. This means this is likely the user's first interaction, or their preferences have been reset.
The memory directory is currently empty. This likely means the user has no saved long-term preferences yet.
**Your MANDATORY first action in this conversation:**
Before doing ANYTHING else (before answering questions, before calling tools, before performing any task), you MUST proactively greet the user warmly and ask them about their preferences so you can provide personalized service going forward. Specifically, ask about:
**Behavior requirements:**
- Do NOT interrupt the current task just to collect preferences.
- Do NOT proactively greet warmly, build rapport, or ask a long onboarding questionnaire.
- Default to a concise, professional style until the user states a preference.
- Only ask for preferences when they are directly useful for the current task, or when a short follow-up question at the end would clearly help future interactions.
1. **How to address the user** — Ask what name or nickname they'd like you to call them (e.g., a real name, a nickname, or a fun title). This is the top priority for building a personal connection.
2. **Communication style preference** — Do they prefer a cute/playful tone (with emojis), a formal/professional tone, a concise/minimalist style, or something else?
3. **Media preferences** — What types of media do they primarily care about? (e.g., movies, TV shows, anime, documentaries, etc.)
4. **Quality preferences** — Do they have preferred video quality (4K, 1080p), codecs (H.265, H.264), or subtitle language preferences?
5. **Any other special requests** — Anything else they'd like you to always keep in mind?
**What to collect when useful:**
- Preferred communication style
- Media interests
- Quality / codec / subtitle preferences
- Any standing rules the user wants you to follow
**After the user replies**, you MUST immediately:
1. Use the `write_file` tool to save ALL their preferences to the memory file at: `{memory_file}`
2. Format the memory file in clean Markdown with clear sections (e.g., `## User Profile`, `## Communication Style`, `## Media Preferences`, etc.)
3. The `## User Profile` section MUST include the user's preferred name/nickname at the top
4. Only AFTER saving the preferences, proceed to help with whatever the user originally asked about (if anything)
5. From this point on, always address the user by their preferred name/nickname in conversations
6. You may also create additional `.md` files in the memory directory (`{memory_dir}`) for different topics as needed.
**When the user provides lasting preferences**, you MUST promptly save them to `{memory_file}` using `write_file` or `edit_file`.
**If the user skips the preference questions** and directly asks you to do something:
- Go ahead and help them with their request first
- But still ask about their preferences naturally at the end of the interaction
- Save whatever you learn about them (implicit or explicit) to the memory file
**Example onboarding flow:**
The greeting should introduce yourself, explain this is the first meeting, and ask the above questions in a numbered list. Adapt the tone to your persona defined in the base system prompt.
**Memory format requirements:**
- Use clean Markdown with short sections.
- Record only durable preferences and working rules.
- Do NOT invent personal details or preferred names.
- Do NOT force use of a nickname or personalized greeting.
</memory_onboarding>
<memory_guidelines>
@@ -193,7 +188,8 @@ class MemoryMiddleware(AgentMiddleware[MemoryState, ContextT, ResponseT]): # no
支持多文件记忆组织:用户可以创建多个 `.md` 文件来按主题组织知识。
参数:
memory_dir: 记忆文件目录路径。
memory_dir: 记忆文件目录路径。建议使用独立的 `config/agent/memory`
目录,避免与 persona/workflow 等根层配置混写。
"""
state_schema = MemoryState
@@ -206,7 +202,7 @@ class MemoryMiddleware(AgentMiddleware[MemoryState, ContextT, ResponseT]): # no
"""初始化记忆中间件。
参数:
memory_dir: 记忆文件目录路径(例如,`"/config/agent"`)。
memory_dir: 记忆文件目录路径(例如,`"/config/agent/memory"`)。
该目录下所有 `.md` 文件都会被自动加载为记忆。
"""
self.memory_dir = memory_dir

View File

@@ -0,0 +1,184 @@
from collections.abc import Awaitable, Callable
from typing import Any
from langchain.agents.middleware.types import (
AgentMiddleware,
ContextT,
ModelRequest,
ModelResponse,
ResponseT,
)
from langchain_core.messages import AIMessage
from app.log import logger
class UsageMiddleware(AgentMiddleware):
"""记录模型调用 usage 信息并回传给外部会话。"""
def __init__(
self,
*,
on_usage: Callable[[dict[str, Any]], None] | None = None,
) -> None:
self.on_usage = on_usage
@staticmethod
def _coerce_int(value: Any) -> int | None:
if value is None:
return None
try:
return int(value)
except (TypeError, ValueError):
return None
@classmethod
def _lookup_int(cls, container: Any, *keys: str) -> int | None:
if not container:
return None
getter = getattr(container, "get", None)
if callable(getter):
for key in keys:
value = getter(key)
if value is not None:
return cls._coerce_int(value)
for key in keys:
value = getattr(container, key, None)
if value is not None:
return cls._coerce_int(value)
return None
@classmethod
def _extract_model_name(cls, model: Any) -> str | None:
return (
getattr(model, "model", None)
or getattr(model, "model_name", None)
or getattr(model, "model_id", None)
)
@classmethod
def _extract_context_window_tokens(cls, model: Any) -> int | None:
profile = getattr(model, "profile", None)
if not profile:
return None
return cls._lookup_int(profile, "max_input_tokens", "input_token_limit")
@classmethod
def _extract_usage(cls, ai_message: AIMessage) -> dict[str, Any]:
usage_metadata = getattr(ai_message, "usage_metadata", None)
input_tokens = cls._lookup_int(usage_metadata, "input_tokens")
output_tokens = cls._lookup_int(usage_metadata, "output_tokens")
total_tokens = cls._lookup_int(usage_metadata, "total_tokens")
response_metadata = getattr(ai_message, "response_metadata", None) or {}
token_usage = (
response_metadata.get("token_usage")
or response_metadata.get("usage")
or response_metadata.get("usage_metadata")
or {}
)
if input_tokens is None:
input_tokens = cls._lookup_int(
token_usage,
"prompt_tokens",
"input_tokens",
)
if input_tokens is None:
input_tokens = cls._lookup_int(
response_metadata,
"prompt_token_count",
"input_tokens",
)
if output_tokens is None:
output_tokens = cls._lookup_int(
token_usage,
"completion_tokens",
"output_tokens",
)
if output_tokens is None:
output_tokens = cls._lookup_int(
response_metadata,
"candidates_token_count",
"output_tokens",
)
if total_tokens is None:
total_tokens = cls._lookup_int(token_usage, "total_tokens")
if total_tokens is None:
total_tokens = cls._lookup_int(response_metadata, "total_token_count")
has_usage = any(
value is not None for value in (input_tokens, output_tokens, total_tokens)
)
resolved_input = input_tokens or 0
resolved_output = output_tokens or 0
resolved_total = (
total_tokens
if total_tokens is not None
else resolved_input + resolved_output
)
return {
"has_usage": has_usage,
"input_tokens": resolved_input,
"output_tokens": resolved_output,
"total_tokens": resolved_total,
}
async def awrap_model_call(
self,
request: ModelRequest[ContextT],
handler: Callable[
[ModelRequest[ContextT]], Awaitable[ModelResponse[ResponseT]]
],
) -> ModelResponse[ResponseT]:
response = await handler(request)
if not callable(self.on_usage):
return response
try:
ai_message = next(
(
message
for message in reversed(response.result)
if isinstance(message, AIMessage)
),
None,
)
usage = (
self._extract_usage(ai_message)
if ai_message
else {
"has_usage": False,
"input_tokens": 0,
"output_tokens": 0,
"total_tokens": 0,
}
)
context_window_tokens = self._extract_context_window_tokens(request.model)
context_usage_ratio = None
if context_window_tokens and usage["has_usage"]:
context_usage_ratio = usage["input_tokens"] / context_window_tokens
self.on_usage(
{
"model": self._extract_model_name(request.model),
"context_window_tokens": context_window_tokens,
"context_usage_ratio": context_usage_ratio,
**usage,
}
)
except Exception as e:
logger.debug("记录模型 usage 失败: %s", e)
return response
__all__ = ["UsageMiddleware"]

View File

@@ -1,62 +0,0 @@
You are an AI media assistant powered by MoviePilot. You specialize in managing home media ecosystems: searching for movies/TV shows, managing subscriptions, overseeing downloads, and organizing media libraries.
All your responses must be in **Chinese (中文)**.
You act as a proactive agent. Your goal is to fully resolve the user's media-related requests autonomously. Do not end your turn until the task is complete or you are blocked and require user feedback.
Core Capabilities:
1. Media Search & Recognition — Identify movies, TV shows, and anime; recognize media from fuzzy filenames or incomplete titles.
2. Subscription Management — Create rules for automated downloading; monitor trending content.
3. Download Control — Search torrents across trackers; filter by quality, codec, and release group.
4. System Status & Organization — Monitor downloads, server health, file transfers, renaming, and library cleanup.
<communication>
{verbose_spec}
- Tone: friendly, concise. Like a knowledgeable friend, not a corporate bot.
- Use emojis sparingly (1-3 per response): greetings, completions, errors.
- Be direct. NO unnecessary preamble, NO repeating user's words, NO explaining your thinking.
- Use Markdown for structured data. Use `inline code` for media titles/paths.
- Include key details (year, rating, resolution) but do NOT over-explain.
- Do not stop for approval on read-only operations. Only confirm before critical actions (starting downloads, deleting subscriptions).
- NOT a coding assistant. Do not offer code snippets.
- If user has set preferred communication style in memory, follow that strictly.
</communication>
<response_format>
- Responses MUST be short and punchy: one sentence for confirmations, brief list for search results.
- NO filler phrases like "Let me help you", "Here are the results", "I found..." — skip all unnecessary preamble.
- NO repeating what user said.
- NO narrating your internal reasoning.
- After task completion: one line summary only.
- When error occurs: brief acknowledgment + suggestion, then move on.
</response_format>
<flow>
1. Media Discovery: Identify exact media metadata (TMDB ID, Season/Episode) using search tools.
2. Context Checking: Verify current status (already in library? already subscribed?).
3. Action Execution: Perform the task with a brief status update only if the operation takes time.
4. Final Confirmation: State the result concisely.
</flow>
<tool_calling_strategy>
- Call independent tools in parallel whenever possible.
- If search results are ambiguous, use `query_media_detail` or `recognize_media` to clarify before proceeding.
- If `search_media` fails, fall back to `search_web` or `recognize_media`. Only ask the user when all automated methods are exhausted.
</tool_calling_strategy>
<media_management_rules>
1. Download Safety: Present found torrents (size, seeds, quality) and get explicit consent before downloading.
2. Subscription Logic: Check for the best matching quality profile based on user history or defaults.
3. Library Awareness: Check if content already exists in the library to avoid duplicates.
4. Error Handling: If a tool or site fails, briefly explain what went wrong and suggest an alternative.
</media_management_rules>
<markdown_spec>
Specific markdown rules:
{markdown_spec}
</markdown_spec>
<system_info>
{moviepilot_info}
</system_info>

View File

@@ -0,0 +1,37 @@
You are the MoviePilot agent runtime. Follow the injected root configuration to determine the active persona, workflow, and operator preferences.
All your responses must be in **Chinese (中文)**.
You act as a proactive agent. Your goal is to fully resolve the user's media-related requests autonomously. Do not end your turn until the task is complete or you are blocked and require user feedback.
<agent_runtime>
{runtime_sections}
</agent_runtime>
<communication_runtime>
{verbose_spec}
- Channel-aware formatting: Follow the capability rules below for Markdown, plain text, buttons, and voice replies.
{button_choice_spec}
- Voice replies: {voice_reply_spec}
- If the current channel supports image sending and an image would materially help, you may use the `send_message` tool with `image_url` to send it.
- If the current channel supports file sending and you need to return a local image or file for the user to download, use `send_local_file`.
</communication_runtime>
<core_capabilities>
1. Media Search and Recognition - Identify movies, TV shows, and anime; recognize media from fuzzy filenames or incomplete titles.
2. Subscription Management - Create rules for automated downloading and monitor trending content.
3. Download Control - Search torrents across trackers and filter by quality, codec, and release group.
4. System Status and Organization - Monitor downloads, server health, file transfers, renaming, and library cleanup.
5. Visual Input Handling - Users may attach images from supported channels; analyze them together with the text when relevant.
6. File Context Handling - User messages may arrive as structured JSON. Treat the `message` field as the user's text. Attachments appear in `files`; when `local_path` is present, use local file tools to inspect the uploaded file directly. When image input is disabled for the current model, user images may also be delivered through `files`.
</core_capabilities>
<markdown_spec>
Specific markdown rules:
{markdown_spec}
</markdown_spec>
<system_info>
{moviepilot_info}
</system_info>

View File

@@ -7,6 +7,7 @@ from typing import Dict
from app.core.config import settings
from app.log import logger
from app.agent.runtime import agent_runtime_manager
from app.schemas import (
ChannelCapability,
ChannelCapabilities,
@@ -50,14 +51,21 @@ class PromptManager:
logger.error(f"加载提示词失败: {prompt_name}, 错误: {e}")
raise
def get_agent_prompt(self, channel: str = None) -> str:
def get_agent_prompt(
self, channel: str = None, prefer_voice_reply: bool = False
) -> str:
"""
获取智能体提示词
:param channel: 消息渠道Telegram、微信、Slack等
:param prefer_voice_reply: 是否优先使用语音回复
:return: 提示词内容
"""
# 基础提示词
base_prompt = self.load_prompt("Agent Prompt.txt")
# 根层运行时配置由独立装配器负责,避免人格/工作流继续硬编码在单文件 prompt 中。
runtime_config = agent_runtime_manager.load_runtime_config()
runtime_sections = runtime_config.render_prompt_sections()
# 基础提示词只保留 MoviePilot 运行时和渠道能力相关约束。
base_prompt = self.load_prompt("System Core Prompt.txt")
# 识别渠道
markdown_spec = ""
@@ -73,26 +81,35 @@ class PromptManager:
caps = ChannelCapabilityManager.get_capabilities(msg_channel)
if caps:
markdown_spec = self._generate_formatting_instructions(caps)
button_choice_spec = self._generate_button_choice_instructions(msg_channel)
# 啰嗦模式
verbose_spec = ""
if not settings.AI_AGENT_VERBOSE:
verbose_spec = (
"\n\n[Important Instruction] STRICTLY ENFORCED: DO NOT output any conversational "
"text, thinking processes, or explanations before or during tool calls. Call tools "
"directly without any transitional phrases. "
"You MUST remain completely silent until the task is completely finished. "
"DO NOT output any content whatsoever until your final summary reply."
"\n\n[Important Instruction] STRICTLY ENFORCED: "
"If tools are needed, DO NOT output any conversational text, explanations, progress updates, "
"or acknowledgements before the first tool call or between tool calls. "
"Call tools directly without any transitional phrases. "
"You MUST remain completely silent until all required tools have finished and you have the final result. "
"Only then may you send one final user-facing reply. "
"DO NOT output any intermediate content whatsoever."
)
# MoviePilot系统信息
moviepilot_info = self._get_moviepilot_info()
voice_reply_spec = self._generate_voice_reply_instructions(
prefer_voice_reply=prefer_voice_reply
)
# 始终替换占位符,避免后续 .format() 时因残留花括号报 KeyError
base_prompt = base_prompt.format(
markdown_spec=markdown_spec,
verbose_spec=verbose_spec,
moviepilot_info=moviepilot_info,
voice_reply_spec=voice_reply_spec,
button_choice_spec=button_choice_spec,
runtime_sections=runtime_sections,
)
return base_prompt
@@ -166,6 +183,37 @@ class PromptManager:
instructions.append("- Links: Paste URLs directly as text.")
return "\n".join(instructions)
@staticmethod
def _generate_voice_reply_instructions(prefer_voice_reply: bool) -> str:
if not prefer_voice_reply:
return (
"- Voice replies: Use normal text replies by default. "
"Only call `send_voice_message` when spoken playback is clearly better than plain text."
)
return (
"- Current message context: The user sent a voice message.\n"
"- Reply preference: Prioritize calling `send_voice_message` for the main user-facing reply.\n"
"- Fallback: If voice is unavailable on the current channel, `send_voice_message` will fall back to text.\n"
"- Do not repeat the same full reply again after calling `send_voice_message`."
)
@staticmethod
def _generate_button_choice_instructions(
channel: MessageChannel = None,
) -> str:
if (
channel
and ChannelCapabilityManager.supports_buttons(channel)
and ChannelCapabilityManager.supports_callbacks(channel)
):
return (
"- User questions: If you need the user to choose from a few clear options, "
"call `ask_user_choice` to send button options. After the user clicks a button, "
"the selected value will come back as the user's next message. After calling this tool, "
"wait for the user's selection instead of repeating the question in plain text."
)
return "- User questions: When you truly need user input, ask briefly in plain text."
def clear_cache(self):
"""
清空缓存

721
app/agent/runtime.py Normal file
View File

@@ -0,0 +1,721 @@
"""Agent 根层运行时配置管理。"""
from __future__ import annotations
import re
import shutil
import threading
from dataclasses import dataclass, field
from pathlib import Path
from string import Formatter
from typing import Any, Iterable, Optional
import yaml
from app.core.config import settings
from app.log import logger
CURRENT_PERSONA_FILE = "CURRENT_PERSONA.md"
USER_PREFERENCES_FILE = "USER_PREFERENCES.md"
SYSTEM_TASKS_FILE = "SYSTEM_TASKS.md"
LEGACY_WAKE_FORMAT_FILE = "WAKE_FORMAT.md"
SYSTEM_RUNTIME_DIR = "runtime"
MEMORY_DIR = "memory"
SKILLS_DIR = "skills"
JOBS_DIR = "jobs"
ACTIVITY_DIR = "activity"
SYSTEM_TASKS_SCHEMA_VERSION = 2
ROOT_LEVEL_RUNTIME_FILES = {
CURRENT_PERSONA_FILE,
"AGENT_PROFILE.md",
"AGENT_WORKFLOW.md",
"AGENT_HOOKS.md",
USER_PREFERENCES_FILE,
SYSTEM_TASKS_FILE,
LEGACY_WAKE_FORMAT_FILE,
}
FRONTMATTER_PATTERN = re.compile(r"^---\s*\n(.*?)\n---\s*\n?", re.DOTALL)
class AgentRuntimeConfigError(ValueError):
"""根层配置加载异常。"""
@dataclass
class ParsedMarkdownDocument:
"""解析后的 Markdown 文档。"""
metadata: dict[str, Any]
body: str
@dataclass
class HookDefinition:
"""结构化执行钩子定义。"""
path: Path
pre_task: list[str]
in_task: list[str]
post_task: list[str]
@dataclass
class SystemTaskTypeDefinition:
"""单个后台系统任务定义。"""
header: str
objective: str
context_title: Optional[str] = None
context_lines: list[str] = field(default_factory=list)
steps_title: Optional[str] = None
steps: list[str] = field(default_factory=list)
task_rules: list[str] = field(default_factory=list)
empty_result: Optional[str] = None
@dataclass
class SystemTasksDefinition:
"""统一的后台系统任务定义源。"""
path: Path
version: int
shared_rules: list[str]
task_types: dict[str, SystemTaskTypeDefinition]
@dataclass
class AgentRuntimeConfig:
"""一次加载后的根层配置快照。"""
source_root: Path
active_persona: str
current_persona_path: Path
profile_path: Path
workflow_path: Path
hooks_path: Path
user_preferences_path: Optional[Path]
system_tasks_path: Path
extra_context_paths: list[Path]
profile_text: str
workflow_text: str
user_preferences_text: str
extra_contexts: list[tuple[Path, str]]
hooks: HookDefinition
system_tasks: SystemTasksDefinition
warnings: list[str] = field(default_factory=list)
used_fallback: bool = False
def render_prompt_sections(self) -> str:
"""渲染进入系统提示词的根层配置片段。"""
sections: list[str] = [
"<agent_root_config>",
f"- Active persona: `{self.active_persona}`",
f"- Profile source: `{self.profile_path}`",
f"- Workflow source: `{self.workflow_path}`",
]
if self.user_preferences_path:
sections.append(f"- Root preferences source: `{self.user_preferences_path}`")
sections.append(f"- System task source: `{self.system_tasks_path}`")
sections.append("</agent_root_config>")
sections.append("")
sections.append("<agent_profile>")
sections.append(self.profile_text.strip() or "(No agent profile configured.)")
sections.append("</agent_profile>")
sections.append("")
sections.append("<agent_workflow>")
sections.append(self.workflow_text.strip() or "(No agent workflow configured.)")
sections.append("</agent_workflow>")
if self.user_preferences_text.strip():
sections.append("")
sections.append("<agent_user_preferences>")
sections.append(self.user_preferences_text.strip())
sections.append("</agent_user_preferences>")
for path, text in self.extra_contexts:
if not text.strip():
continue
sections.append("")
sections.append(f'<agent_extra_context source="{path.name}">')
sections.append(text.strip())
sections.append("</agent_extra_context>")
return "\n".join(sections).strip()
def render_hooks_prompt(self) -> str:
"""渲染结构化 hooks 提示词。"""
blocks = [
"<agent_execution_hooks>",
f"- Hook source: `{self.hooks.path}`",
"- These hooks are loaded structurally by the runtime and must be followed at the matching lifecycle stage.",
"",
"Pre-Task Hooks:",
self._format_hook_list(self.hooks.pre_task),
"",
"In-Task Hooks:",
self._format_hook_list(self.hooks.in_task),
"",
"Post-Task Hooks:",
self._format_hook_list(self.hooks.post_task),
"</agent_execution_hooks>",
]
return "\n".join(blocks)
def render_system_task_message(
self,
task_type: str,
*,
template_context: Optional[dict[str, Any]] = None,
extra_rules: Optional[list[str]] = None,
) -> str:
"""根据统一的后台系统任务定义渲染提示词。"""
task_definition = self.system_tasks.task_types.get(task_type)
if not task_definition:
raise AgentRuntimeConfigError(f"未定义的后台系统任务类型: {task_type}")
rendered_context = self._render_template_lines(
task_definition.context_lines,
template_context,
task_type,
"context_lines",
)
rendered_steps = self._render_template_lines(
task_definition.steps,
template_context,
task_type,
"steps",
)
rendered_task_rules = self._render_template_lines(
task_definition.task_rules,
template_context,
task_type,
"task_rules",
)
sections = [
self._render_template_text(
task_definition.header,
template_context,
task_type,
"header",
).strip(),
self._render_template_text(
task_definition.objective,
template_context,
task_type,
"objective",
).strip(),
]
if rendered_context:
sections.append(
self._format_titled_lines(
task_definition.context_title or "Task context",
rendered_context,
)
)
if rendered_steps:
sections.append(
self._format_titled_lines(
task_definition.steps_title or "Follow these steps",
rendered_steps,
)
)
rules = list(self.system_tasks.shared_rules)
if task_definition.empty_result:
rules.append(task_definition.empty_result)
rules.extend(rendered_task_rules)
if extra_rules:
rules.extend(rule.strip() for rule in extra_rules if rule and rule.strip())
if rules:
sections.append(self._format_numbered_rules("IMPORTANT", rules))
return "\n\n".join(section for section in sections if section).strip()
@classmethod
def _render_template_text(
cls,
text: str,
template_context: Optional[dict[str, Any]],
task_type: str,
field_name: str,
) -> str:
if not text:
return ""
formatter = Formatter()
required_fields = {
placeholder_name
for _, placeholder_name, _, _ in formatter.parse(text)
if placeholder_name
}
if not required_fields:
return text
context = cls._normalize_template_context(template_context)
missing_fields = sorted(field for field in required_fields if field not in context)
if missing_fields:
raise AgentRuntimeConfigError(
f"系统任务定义 `{task_type}` 的 `{field_name}` 缺少变量: "
+ ", ".join(f"`{field}`" for field in missing_fields)
)
# 这里统一做字符串替换,让模板文件成为后台任务文案的唯一行为来源。
return text.format_map(context)
@classmethod
def _render_template_lines(
cls,
items: list[str],
template_context: Optional[dict[str, Any]],
task_type: str,
field_name: str,
) -> list[str]:
return [
cls._render_template_text(
item,
template_context,
task_type,
f"{field_name}[{index}]",
).rstrip()
for index, item in enumerate(items, start=1)
if item and item.rstrip()
]
@staticmethod
def _normalize_template_context(
template_context: Optional[dict[str, Any]],
) -> dict[str, str]:
if not template_context:
return {}
return {
str(key): "" if value is None else str(value)
for key, value in template_context.items()
}
@staticmethod
def _format_hook_list(items: list[str]) -> str:
if not items:
return "(No hooks configured.)"
return "\n".join(f"{index}. {item}" for index, item in enumerate(items, start=1))
@staticmethod
def _format_numbered_rules(title: str, items: list[str]) -> str:
return "\n".join(
[f"{title}:"]
+ [f"{index}. {item}" for index, item in enumerate(items, start=1)]
)
@staticmethod
def _format_titled_lines(title: str, items: list[str]) -> str:
cleaned = [item.rstrip() for item in items if item and item.rstrip()]
return "\n".join([f"{title}:"] + cleaned)
class AgentRuntimeManager:
"""统一管理 agent 根层配置目录、迁移、校验与模板渲染。"""
def __init__(
self,
*,
agent_root_dir: Optional[Path] = None,
bundled_runtime_dir: Optional[Path] = None,
) -> None:
self.agent_root_dir = agent_root_dir or (settings.CONFIG_PATH / "agent")
self.runtime_dir = self.agent_root_dir / SYSTEM_RUNTIME_DIR
self.memory_dir = self.agent_root_dir / MEMORY_DIR
self.skills_dir = self.agent_root_dir / SKILLS_DIR
self.jobs_dir = self.agent_root_dir / JOBS_DIR
self.activity_dir = self.agent_root_dir / ACTIVITY_DIR
self.bundled_runtime_dir = bundled_runtime_dir or (
Path(__file__).parent / "runtime_defaults"
)
self._cache_lock = threading.Lock()
self._cached_signature: Optional[tuple[tuple[str, int, int], ...]] = None
self._cached_config: Optional[AgentRuntimeConfig] = None
def ensure_layout(self) -> None:
"""创建目录、同步默认文件,并迁移旧版 memory/runtime 文件。"""
self.agent_root_dir.mkdir(parents=True, exist_ok=True)
self.runtime_dir.mkdir(parents=True, exist_ok=True)
self.memory_dir.mkdir(parents=True, exist_ok=True)
self.skills_dir.mkdir(parents=True, exist_ok=True)
self.jobs_dir.mkdir(parents=True, exist_ok=True)
self.activity_dir.mkdir(parents=True, exist_ok=True)
self._migrate_root_runtime_files()
self._sync_bundled_runtime_defaults()
self._migrate_root_memory_files()
def load_runtime_config(self) -> AgentRuntimeConfig:
"""加载配置。用户目录损坏时自动回退到内置默认配置。"""
self.ensure_layout()
signature = self._build_signature()
with self._cache_lock:
if self._cached_signature == signature and self._cached_config:
return self._cached_config
try:
config = self._load_from_root(self.runtime_dir)
except AgentRuntimeConfigError as err:
logger.warning("Agent 根层配置无效,回退到内置默认配置: %s", err)
config = self._load_from_root(self.bundled_runtime_dir)
config.used_fallback = True
config.warnings.insert(
0, f"用户运行时配置加载失败,已回退到内置默认配置: {err}"
)
self._cached_signature = signature
self._cached_config = config
return config
def invalidate_cache(self) -> None:
"""供测试或手动刷新时清理缓存。"""
with self._cache_lock:
self._cached_signature = None
self._cached_config = None
def _build_signature(self) -> tuple[tuple[str, int, int], ...]:
"""基于运行时配置和内置默认配置生成文件签名。"""
entries: list[tuple[str, int, int]] = []
for prefix, root in (("runtime", self.runtime_dir), ("bundled", self.bundled_runtime_dir)):
if not root.exists():
continue
for path in sorted(root.rglob("*")):
if not path.is_file():
continue
stat = path.stat()
relative = path.relative_to(root).as_posix()
entries.append((f"{prefix}:{relative}", stat.st_mtime_ns, stat.st_size))
return tuple(entries)
def _sync_bundled_runtime_defaults(self) -> None:
"""仅复制缺失的默认根层配置,避免覆盖用户自定义。"""
if not self.bundled_runtime_dir.exists():
return
for path in sorted(self.bundled_runtime_dir.rglob("*")):
relative = path.relative_to(self.bundled_runtime_dir)
target = self.runtime_dir / relative
if path.is_dir():
target.mkdir(parents=True, exist_ok=True)
continue
if target.exists():
continue
target.parent.mkdir(parents=True, exist_ok=True)
shutil.copy2(path, target)
logger.info("已同步默认 Agent 运行时文件: %s", target)
def _migrate_root_runtime_files(self) -> None:
"""兼容早期直接放在 `config/agent` 根目录的 RFC 文件。"""
migration_targets = {
CURRENT_PERSONA_FILE: self.runtime_dir / CURRENT_PERSONA_FILE,
USER_PREFERENCES_FILE: self.runtime_dir / USER_PREFERENCES_FILE,
SYSTEM_TASKS_FILE: self.runtime_dir / "system_tasks" / SYSTEM_TASKS_FILE,
LEGACY_WAKE_FORMAT_FILE: self.runtime_dir / "system_tasks" / SYSTEM_TASKS_FILE,
"AGENT_PROFILE.md": self.runtime_dir / "personas" / "default" / "AGENT_PROFILE.md",
"AGENT_WORKFLOW.md": self.runtime_dir / "personas" / "default" / "AGENT_WORKFLOW.md",
"AGENT_HOOKS.md": self.runtime_dir / "personas" / "default" / "AGENT_HOOKS.md",
}
for filename, target in migration_targets.items():
source = self.agent_root_dir / filename
if not source.exists() or target.exists():
continue
target.parent.mkdir(parents=True, exist_ok=True)
source.rename(target)
logger.info("已迁移旧版 Agent 根配置文件: %s -> %s", source, target)
def _migrate_root_memory_files(self) -> None:
"""将旧版根目录 memory 文件移入 `config/agent/memory`。"""
for path in sorted(self.agent_root_dir.glob("*.md")):
if path.name in ROOT_LEVEL_RUNTIME_FILES:
continue
target = self.memory_dir / path.name
if target.exists():
continue
path.rename(target)
logger.info("已迁移旧版 Agent memory 文件: %s -> %s", path, target)
def _load_from_root(self, root: Path) -> AgentRuntimeConfig:
current_persona_path = root / CURRENT_PERSONA_FILE
current_doc = self._read_markdown(current_persona_path)
current_meta = current_doc.metadata
active_persona = str(current_meta.get("active_persona") or "default").strip()
if not active_persona:
raise AgentRuntimeConfigError("CURRENT_PERSONA.md 缺少 active_persona")
profile_path = self._resolve_required_path(root, current_meta, "profile")
workflow_path = self._resolve_required_path(root, current_meta, "workflow")
hooks_path = self._resolve_required_path(root, current_meta, "hooks")
system_tasks_path = self._resolve_required_path(root, current_meta, "system_tasks")
user_preferences_path = self._resolve_optional_path(
root, current_meta.get("user_preferences")
)
extra_context_paths = self._resolve_optional_paths(
root, current_meta.get("extra_context_files", [])
)
profile_doc = self._read_markdown(profile_path)
workflow_doc = self._read_markdown(workflow_path)
hooks_doc = self._read_markdown(hooks_path)
system_tasks_doc = self._read_markdown(system_tasks_path)
preferences_doc = (
self._read_markdown(user_preferences_path)
if user_preferences_path and user_preferences_path.exists()
else ParsedMarkdownDocument(metadata={}, body="")
)
extra_contexts = [
(path, self._read_markdown(path).body)
for path in extra_context_paths
]
hooks = self._parse_hooks_document(hooks_path, hooks_doc)
system_tasks = self._parse_system_tasks_document(
system_tasks_path,
system_tasks_doc,
)
warnings = self._validate_runtime_config(
current_meta=current_meta,
profile_path=profile_path,
workflow_path=workflow_path,
hooks_path=hooks_path,
user_preferences_path=user_preferences_path,
system_tasks_path=system_tasks_path,
extra_context_paths=extra_context_paths,
profile_text=profile_doc.body,
workflow_text=workflow_doc.body,
preferences_text=preferences_doc.body,
)
return AgentRuntimeConfig(
source_root=root,
active_persona=active_persona,
current_persona_path=current_persona_path,
profile_path=profile_path,
workflow_path=workflow_path,
hooks_path=hooks_path,
user_preferences_path=user_preferences_path,
system_tasks_path=system_tasks_path,
extra_context_paths=extra_context_paths,
profile_text=profile_doc.body,
workflow_text=workflow_doc.body,
user_preferences_text=preferences_doc.body,
extra_contexts=extra_contexts,
hooks=hooks,
system_tasks=system_tasks,
warnings=warnings,
)
@staticmethod
def _read_markdown(path: Path) -> ParsedMarkdownDocument:
if not path.exists():
raise AgentRuntimeConfigError(f"缺少配置文件: {path}")
try:
content = path.read_text(encoding="utf-8")
except Exception as err: # noqa: BLE001
raise AgentRuntimeConfigError(f"读取配置文件失败 {path}: {err}") from err
metadata: dict[str, Any] = {}
body = content
match = FRONTMATTER_PATTERN.match(content)
if match:
try:
metadata = yaml.safe_load(match.group(1)) or {}
except yaml.YAMLError as err:
raise AgentRuntimeConfigError(f"YAML frontmatter 解析失败 {path}: {err}") from err
if not isinstance(metadata, dict):
raise AgentRuntimeConfigError(f"frontmatter 必须是映射类型: {path}")
body = content[match.end():]
return ParsedMarkdownDocument(metadata=metadata, body=body.strip())
@staticmethod
def _resolve_required_path(root: Path, metadata: dict[str, Any], field_name: str) -> Path:
raw = metadata.get(field_name)
if not raw or not str(raw).strip():
raise AgentRuntimeConfigError(f"CURRENT_PERSONA.md 缺少必填字段 `{field_name}`")
return AgentRuntimeManager._resolve_relative_path(root, str(raw))
@staticmethod
def _resolve_optional_path(root: Path, raw: Any) -> Optional[Path]:
if not raw or not str(raw).strip():
return None
return AgentRuntimeManager._resolve_relative_path(root, str(raw))
@staticmethod
def _resolve_optional_paths(root: Path, values: Any) -> list[Path]:
if not values:
return []
if not isinstance(values, list):
raise AgentRuntimeConfigError("extra_context_files 必须是数组")
return [AgentRuntimeManager._resolve_relative_path(root, str(value)) for value in values]
@staticmethod
def _resolve_relative_path(root: Path, value: str) -> Path:
candidate = Path(value)
return candidate if candidate.is_absolute() else (root / candidate).resolve()
@staticmethod
def _normalize_string_list(values: Any, field_name: str) -> list[str]:
if values is None:
return []
if not isinstance(values, list):
raise AgentRuntimeConfigError(f"{field_name} 必须是字符串数组")
normalized: list[str] = []
for value in values:
text = str(value).strip()
if text:
normalized.append(text)
return normalized
def _parse_hooks_document(
self, path: Path, document: ParsedMarkdownDocument
) -> HookDefinition:
pre_task = self._normalize_string_list(document.metadata.get("pre_task"), "pre_task")
in_task = self._normalize_string_list(document.metadata.get("in_task"), "in_task")
post_task = self._normalize_string_list(
document.metadata.get("post_task"), "post_task"
)
if not (pre_task or in_task or post_task):
raise AgentRuntimeConfigError(f"{path} 未定义任何结构化 hooks")
return HookDefinition(
path=path,
pre_task=pre_task,
in_task=in_task,
post_task=post_task,
)
def _parse_system_tasks_document(
self, path: Path, document: ParsedMarkdownDocument
) -> SystemTasksDefinition:
"""解析后台系统任务定义文件。"""
version = self._normalize_positive_int(
document.metadata.get("version"),
"version",
default=1,
)
if version < SYSTEM_TASKS_SCHEMA_VERSION:
raise AgentRuntimeConfigError(
f"{path} 的 version={version} 过旧,"
f"当前要求 SYSTEM_TASKS schema v{SYSTEM_TASKS_SCHEMA_VERSION} 或更高版本"
)
shared_rules = self._normalize_string_list(
document.metadata.get("shared_rules"), "shared_rules"
)
if not shared_rules:
raise AgentRuntimeConfigError(f"{path} 缺少 shared_rules")
raw_task_types = document.metadata.get("task_types")
if not isinstance(raw_task_types, dict) or not raw_task_types:
raise AgentRuntimeConfigError(f"{path} 缺少 task_types 映射")
task_types: dict[str, SystemTaskTypeDefinition] = {}
for key, raw in raw_task_types.items():
if not isinstance(raw, dict):
raise AgentRuntimeConfigError(f"task_types.{key} 必须是映射")
header = str(raw.get("header") or "").strip()
objective = str(raw.get("objective") or "").strip()
if not header or not objective:
raise AgentRuntimeConfigError(
f"task_types.{key} 缺少 header 或 objective"
)
context_lines = self._normalize_string_list(
raw.get("context_lines"),
f"task_types.{key}.context_lines",
)
steps = self._normalize_string_list(
raw.get("steps"),
f"task_types.{key}.steps",
)
task_rules = self._normalize_string_list(
raw.get("task_rules"),
f"task_types.{key}.task_rules",
)
empty_result = str(raw.get("empty_result") or "").strip() or None
context_title = str(raw.get("context_title") or "").strip() or None
steps_title = str(raw.get("steps_title") or "").strip() or None
task_types[str(key)] = SystemTaskTypeDefinition(
header=header,
objective=objective,
context_title=context_title,
context_lines=context_lines,
steps_title=steps_title,
steps=steps,
task_rules=task_rules,
empty_result=empty_result,
)
return SystemTasksDefinition(
path=path,
version=version,
shared_rules=shared_rules,
task_types=task_types,
)
@staticmethod
def _normalize_positive_int(
value: Any,
field_name: str,
*,
default: int,
) -> int:
if value in (None, ""):
return default
try:
normalized = int(value)
except (TypeError, ValueError) as err:
raise AgentRuntimeConfigError(f"{field_name} 必须是正整数") from err
if normalized <= 0:
raise AgentRuntimeConfigError(f"{field_name} 必须是正整数")
return normalized
def _validate_runtime_config(
self,
*,
current_meta: dict[str, Any],
profile_path: Path,
workflow_path: Path,
hooks_path: Path,
user_preferences_path: Optional[Path],
system_tasks_path: Path,
extra_context_paths: list[Path],
profile_text: str,
workflow_text: str,
preferences_text: str,
) -> list[str]:
warnings: list[str] = []
required_paths = [profile_path, workflow_path, hooks_path, system_tasks_path]
if user_preferences_path:
required_paths.append(user_preferences_path)
duplicates = self._find_duplicate_paths(required_paths + extra_context_paths)
if duplicates:
warnings.append(
"检测到重复引用的根层配置文件: "
+ ", ".join(path.as_posix() for path in duplicates)
)
deprecated_phrases = self._normalize_string_list(
current_meta.get("deprecated_phrases"), "deprecated_phrases"
)
if deprecated_phrases:
scan_targets = {
"profile": profile_text,
"workflow": workflow_text,
"user_preferences": preferences_text,
}
for phrase in deprecated_phrases:
for target_name, text in scan_targets.items():
if phrase and phrase in text:
warnings.append(
f"检测到已废弃短语 `{phrase}` 仍出现在 {target_name}"
)
return warnings
@staticmethod
def _find_duplicate_paths(paths: Iterable[Path]) -> list[Path]:
seen: set[Path] = set()
duplicates: list[Path] = []
for path in paths:
resolved = path.resolve()
if resolved in seen and resolved not in duplicates:
duplicates.append(resolved)
seen.add(resolved)
return duplicates
agent_runtime_manager = AgentRuntimeManager()

View File

@@ -0,0 +1,24 @@
---
version: 1
active_persona: default
profile: personas/default/AGENT_PROFILE.md
workflow: personas/default/AGENT_WORKFLOW.md
hooks: personas/default/AGENT_HOOKS.md
user_preferences: USER_PREFERENCES.md
system_tasks: system_tasks/SYSTEM_TASKS.md
extra_context_files: []
deprecated_phrases: []
---
# CURRENT_PERSONA
当前激活人格:`default`
加载顺序固定如下:
1. `AGENT_PROFILE.md`
2. `AGENT_WORKFLOW.md`
3. `AGENT_HOOKS.md`
4. `USER_PREFERENCES.md`
5. `SYSTEM_TASKS.md`
如果需要扩展额外上下文,请使用 `extra_context_files` 显式声明,而不是把额外规则散落到 memory 中。

View File

@@ -0,0 +1,10 @@
---
version: 1
---
# USER_PREFERENCES
这是根层的运维偏好文件,不是用户长期记忆。
- 这里只放稳定的系统级输出规则或部署方偏好。
- 用户在对话中形成的长期习惯,仍应写入 `config/agent/memory/*.md`
- 默认保持精简,避免与 `AGENT_PROFILE.md``AGENT_WORKFLOW.md` 重复。

View File

@@ -0,0 +1,26 @@
---
version: 1
pre_task:
- Identify whether the request is a normal user conversation or a background system task before choosing a workflow.
- Classify intent before acting, then prefer an existing skill or dedicated workflow over ad-hoc prompting.
- Check read-only context first so the final action is based on current library, subscription, or history state.
- Only stop for confirmation when the next action is destructive, high-impact, or user-facing.
- Keep the final delivery target explicit before calling tools.
in_task:
- Execute in small, outcome-oriented steps and prefer tool calls over long explanations when the task is actionable.
- Reuse known media identity, prior tool results, and shared context instead of repeating expensive recognition or search calls.
- When a tool fails, try one narrower fallback path before escalating to the user.
- Keep intermediate user-facing output minimal; when verbose mode is disabled, stay silent until the final result.
- Treat progress reporting as task-specific glue, not a shared abstraction to leak into every tool.
post_task:
- Perform the minimum validation needed to confirm the result actually landed.
- Summarize only the outcome, key media facts, and the remaining blocker if something still failed.
- If the task established a reusable workflow, prefer encoding it in skills or root config instead of relying on prompt residue.
---
# AGENT_HOOKS
这些 hooks 由运行时结构化加载,不依赖自由文本约定。
- `pre_task` 对应开始执行前的统一检查点。
- `in_task` 对应工具调用和失败降级阶段。
- `post_task` 对应最小验证与收口阶段。

View File

@@ -0,0 +1,27 @@
---
version: 1
---
# AGENT_PROFILE
- Identity: You are an AI media assistant powered by MoviePilot. You specialize in managing home media ecosystems: searching for movies and TV shows, managing subscriptions, overseeing downloads, and organizing media libraries.
- Tone: professional, concise, restrained.
- Be direct. NO unnecessary preamble, NO repeating user's words, NO explaining your thinking.
- Prioritize task progress over conversation. Answer only what is necessary to move the task forward.
- Do NOT flatter the user, praise the question, or use overly eager service phrases.
- Do NOT use emojis, exclamation marks, cute language, or excessive apology.
- Prefer short declarative sentences. Default to one or two short paragraphs; use lists only when they improve scanability.
- Use Markdown for structured data. Use `inline code` for media titles and paths.
- Include key details such as year, rating, and resolution, but do NOT over-explain.
- Do not stop for approval on read-only operations. Only confirm before critical actions such as starting downloads or deleting subscriptions.
- NOT a coding assistant. Do not offer code snippets.
- If user has set preferred communication style in memory, follow that strictly.
# RESPONSE_FORMAT
- Responses MUST be short and punchy: one sentence for confirmations, brief list for search results.
- NO filler phrases like "Let me help you", "Here are the results", "I found..." - skip all unnecessary preamble.
- NO repeating what user said.
- NO narrating your internal reasoning.
- NO praise, emotional cushioning, or unnecessary politeness padding.
- After task completion: one line summary only.
- When error occurs: brief acknowledgment plus suggestion, then move on.

View File

@@ -0,0 +1,25 @@
---
version: 1
---
# AGENT_WORKFLOW
## FLOW
1. Media Discovery: Identify exact media metadata such as TMDB ID and Season or Episode using search tools.
2. Context Checking: Verify current status such as whether the media is already in the library or already subscribed.
3. Action Execution: Perform the task with a brief status update only if the operation takes time.
4. Final Confirmation: State the result concisely.
## TOOL_CALLING_STRATEGY
- Call independent tools in parallel whenever possible.
- If search results are ambiguous, use `query_media_detail` or `recognize_media` to clarify before proceeding.
- If `search_media` fails, fall back to `search_web` or `recognize_media`. Only ask the user when all automated methods are exhausted.
## MEDIA_MANAGEMENT_RULES
1. Download Safety: Present found torrents with size, seeds, and quality, then get explicit consent before downloading.
2. Subscription Logic: Check for the best matching quality profile based on user history or defaults.
3. Library Awareness: Check if content already exists in the library to avoid duplicates.
4. Error Handling: If a tool or site fails, briefly explain what went wrong and suggest an alternative.
5. TV Subscription Rule: When calling `add_subscribe` for a TV show, omitting `season` means subscribe to season 1 only. To subscribe multiple seasons or the full series, call `add_subscribe` separately for each season.

View File

@@ -0,0 +1,108 @@
---
version: 2
shared_rules:
- This is a background system task, NOT a user conversation.
- Your final response will be broadcast as a notification.
- Do NOT include greetings, explanations, or conversational text.
- Respond in Chinese (中文).
task_types:
heartbeat:
header: "[System Heartbeat]"
objective: "Check all jobs in your jobs directory and process pending tasks."
steps_title: "Follow these steps"
steps:
- "List all jobs with status 'pending' or 'in_progress'."
- "For 'recurring' jobs, check 'last_run' to determine if it's time to run again."
- "For 'once' jobs with status 'pending', execute them now."
- "After executing each job, update its status, 'last_run' time, and execution log in the JOB.md file."
empty_result: "If no jobs were executed, output nothing."
health_check:
header: "[System Health Check]"
objective: "Verify that the agent execution pipeline is alive."
steps_title: "Follow these steps"
steps:
- "Verify that runtime config, tools, and jobs can all be accessed normally."
- "If a real issue is detected, report the failing subsystem and the immediate blocking reason."
empty_result: "If there is nothing meaningful to report, output OK only."
transfer_failed_retry:
header: "[System Task - Transfer Failed Retry]"
objective: "A file transfer or organization has failed. Please use the `transfer-failed-retry` skill to retry the failed transfer."
context_title: "Task context"
context_lines:
- "Failed transfer history record IDs: {history_ids_csv}"
- "Total failed records: {history_count}"
steps_title: "Follow these steps"
steps:
- "Use `query_transfer_history` with status='failed' to find the record with id={history_id} and understand the failure details such as source path, error message, and media info."
- "Analyze the error message to determine the best retry strategy."
- "If the source file no longer exists, skip this retry and report that the file is missing."
- "Delete the failed history record using `delete_transfer_history` with history_id={history_id}."
- "Re-identify the media using `recognize_media` with the source file path."
- "If recognition fails, try `search_media` with keywords from the filename."
- "Re-transfer using `transfer_file` with the source path and any identified media info such as tmdbid and media_type."
- "Report the final result."
batch_transfer_failed_retry:
header: "[System Task - Batch Transfer Failed Retry]"
objective: "Multiple file transfers from the same source have failed. These files likely belong to the same media. Please use the `transfer-failed-retry` skill to retry them efficiently."
context_title: "Task context"
context_lines:
- "Failed transfer history record IDs: {history_ids_csv}"
- "Total failed records: {history_count}"
steps_title: "Follow these steps"
steps:
- "Use `query_transfer_history` with status='failed' to find all records with these IDs and understand the failure details."
- "Analyze the first record to determine the shared media identity and the best retry strategy because the root cause is usually the same for all files."
- "If the error is about media recognition, identify the media once using `recognize_media` or `search_media`, then reuse that result for all files."
- "For each failed record, delete the old history entry with `delete_transfer_history` and re-transfer using `transfer_file`."
- "Report how many retries succeeded and how many still failed."
task_rules:
- "These files share the same media identity. Do NOT call `recognize_media` or `search_media` repeatedly for each file."
manual_transfer_redo:
header: "[System Task - Manual Transfer Re-Organize]"
objective: "A user manually triggered an AI re-organize task from the transfer history page."
context_title: "Transfer history record"
context_lines:
- "- History ID: {history_id}"
- "- Current status: {current_status}"
- "- Current recognized title: {recognized_title}"
- "- Media type: {media_type}"
- "- Category: {category}"
- "- Year: {year}"
- "- Season/Episode: {season_episode}"
- "- Source path: {source_path}"
- "- Source storage: {source_storage}"
- "- Destination path: {destination_path}"
- "- Destination storage: {destination_storage}"
- "- Transfer mode: {transfer_mode}"
- "- Current TMDB ID: {tmdbid}"
- "- Current Douban ID: {doubanid}"
- "- Error message: {error_message}"
steps_title: "Required workflow"
steps:
- "Use `query_transfer_history` to locate and inspect the record with id={history_id}, and verify the source path, status, media info, and failure context."
- "Decide whether the current recognition is trustworthy."
- "If the source file no longer exists or cannot be safely processed, stop and report the reason."
- "If the current recognition is wrong or the record should be reorganized, determine the correct media identity first."
- "Prefer `recognize_media` with the source path. If recognition is not reliable, use `search_media` with keywords from filename, title, or year."
- "Only continue when you have high confidence in the target media."
- "Before re-organizing, delete the old transfer history record with `delete_transfer_history` so the system will not skip the source file."
- "Then use `transfer_file` to organize the source path directly."
- "When calling `transfer_file`, reuse known context when appropriate: source storage, target path, target storage, transfer mode, season, tmdbid or doubanid, and media_type."
- "If this record is already correct and no re-organize is needed, do not perform destructive actions; simply report that no change is necessary."
task_rules:
- "Do NOT rely on previous chat context. Work only from the record above."
- "Your goal is to directly fix one transfer history record by using MoviePilot tools to analyze, clean up the old history entry if necessary, and organize the source file again."
- "You should complete the re-organize by directly using tools such as `query_transfer_history`, `recognize_media`, `search_media`, `delete_transfer_history`, and `transfer_file`."
- "Do NOT reorganize blindly when media identity is uncertain."
- "If the previous record was successful but obviously identified as the wrong media, still use the tool-based flow above instead of `/redo`."
- "Keep the final response short and focused on outcome."
---
# SYSTEM_TASKS
这是后台系统任务的唯一定义源。
- `shared_rules` 负责统一口径。
- `task_types.<type>.context_lines` 负责定义上下文字段展示。
- `task_types.<type>.steps` 负责定义任务执行步骤。
- `task_types.<type>.task_rules` 负责定义该任务独有的补充约束。
- 代码侧只负责触发任务并提供模板变量,不再保存具体行为提示词。

View File

@@ -1,6 +1,10 @@
import asyncio
import json
import threading
from abc import ABCMeta, abstractmethod
from typing import Any, Optional
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from typing import Any, Callable, Optional
from langchain_core.tools import BaseTool
from pydantic import PrivateAttr
@@ -19,6 +23,44 @@ class ToolChain(ChainBase):
pass
# 将常见的阻塞调用按能力域拆分到独立线程池,避免外部慢 IO 抢占同一批 worker。
_BLOCKING_BUCKET_LIMITS = {
"default": 4,
"config": 2,
"db": 4,
"downloader": 4,
"mediaserver": 4,
"plugin": 2,
"rule": 2,
"site": 4,
"storage": 4,
"subscribe": 2,
"workflow": 2,
}
_blocking_semaphores = {
bucket: asyncio.Semaphore(limit)
for bucket, limit in _BLOCKING_BUCKET_LIMITS.items()
}
_blocking_executors: dict[str, ThreadPoolExecutor] = {}
_blocking_executor_lock = threading.Lock()
def _get_blocking_executor(bucket: str) -> ThreadPoolExecutor:
"""按桶懒加载线程池,避免在导入阶段创建过多 worker。"""
with _blocking_executor_lock:
executor = _blocking_executors.get(bucket)
if executor:
return executor
limit = _BLOCKING_BUCKET_LIMITS[bucket]
executor = ThreadPoolExecutor(
max_workers=limit,
thread_name_prefix=f"agent-tool-{bucket}",
)
_blocking_executors[bucket] = executor
return executor
class MoviePilotTool(BaseTool, metaclass=ABCMeta):
"""
MoviePilot专用工具基类LangChain v1 / langchain_core
@@ -31,6 +73,7 @@ class MoviePilotTool(BaseTool, metaclass=ABCMeta):
_username: Optional[str] = PrivateAttr(default=None)
_stream_handler: Optional[StreamingHandler] = PrivateAttr(default=None)
_require_admin: bool = PrivateAttr(default=False)
_agent_context: dict = PrivateAttr(default_factory=dict)
def __init__(self, session_id: str, user_id: str, **kwargs):
super().__init__(**kwargs)
@@ -81,8 +124,12 @@ class MoviePilotTool(BaseTool, metaclass=ABCMeta):
merged_message = "\n\n".join(messages)
await self.send_tool_message(merged_message)
else:
# 非VERBOSE,重置缓冲区从头更新,保持消息编辑能力
self._stream_handler.reset()
# 非VERBOSE:不逐条回显工具调用,转为在下一段文本前补一句聚合摘要
self._stream_handler.record_tool_call(
tool_name=self.name,
tool_message=tool_message,
tool_kwargs=kwargs,
)
else:
# 未启用流式传输,不发送任何工具消息内容
pass
@@ -128,6 +175,23 @@ class MoviePilotTool(BaseTool, metaclass=ABCMeta):
"""子类实现具体的工具执行逻辑"""
raise NotImplementedError
@staticmethod
async def run_blocking(
bucket: str, func: Callable[..., Any], *args: Any, **kwargs: Any
) -> Any:
"""
在受控线程池中运行阻塞型同步代码,避免拖住 FastAPI 主事件循环。
"""
bucket_name = bucket if bucket in _BLOCKING_BUCKET_LIMITS else "default"
semaphore = _blocking_semaphores[bucket_name]
bound_call = partial(func, *args, **kwargs)
async with semaphore:
loop = asyncio.get_running_loop()
return await loop.run_in_executor(
_get_blocking_executor(bucket_name), bound_call
)
def set_message_attr(self, channel: str, source: str, username: str):
"""
设置消息属性
@@ -142,6 +206,12 @@ class MoviePilotTool(BaseTool, metaclass=ABCMeta):
"""
self._stream_handler = stream_handler
def set_agent_context(self, agent_context: Optional[dict]):
"""
设置与当前 Agent 共享的上下文。
"""
self._agent_context = agent_context or {}
async def _check_permission(self) -> Optional[str]:
"""
检查用户权限:
@@ -157,6 +227,8 @@ class MoviePilotTool(BaseTool, metaclass=ABCMeta):
if not self._channel or not self._source:
return None
# 渠道配置来自 SystemConfigOper 内存缓存,可以直接读取;
# 只有用户信息需要走异步数据库查询。
user_id_str = str(self._user_id) if self._user_id else None
channel_type_map = {
@@ -212,7 +284,7 @@ class MoviePilotTool(BaseTool, metaclass=ABCMeta):
return None
user = (
UserOper().get_by_name(self._username)
await UserOper().async_get_by_name(self._username)
if self._username
else None
)
@@ -227,7 +299,7 @@ class MoviePilotTool(BaseTool, metaclass=ABCMeta):
)
else:
user = (
UserOper().get_by_name(self._username)
await UserOper().async_get_by_name(self._username)
if self._username
else None
)
@@ -249,7 +321,9 @@ class MoviePilotTool(BaseTool, metaclass=ABCMeta):
return None
async def send_tool_message(self, message: str, title: str = ""):
async def send_tool_message(
self, message: str, title: str = "", image: Optional[str] = None
):
"""
发送工具消息
"""
@@ -261,5 +335,6 @@ class MoviePilotTool(BaseTool, metaclass=ABCMeta):
username=self._username,
title=title,
text=message,
image=image,
)
)

View File

@@ -30,6 +30,9 @@ from app.agent.tools.impl.search_torrents import SearchTorrentsTool
from app.agent.tools.impl.get_search_results import GetSearchResultsTool
from app.agent.tools.impl.search_web import SearchWebTool
from app.agent.tools.impl.send_message import SendMessageTool
from app.agent.tools.impl.ask_user_choice import AskUserChoiceTool
from app.agent.tools.impl.send_local_file import SendLocalFileTool
from app.agent.tools.impl.send_voice_message import SendVoiceMessageTool
from app.agent.tools.impl.query_schedulers import QuerySchedulersTool
from app.agent.tools.impl.run_scheduler import RunSchedulerTool
from app.agent.tools.impl.query_workflows import QueryWorkflowsTool
@@ -56,6 +59,8 @@ from app.agent.tools.impl.query_custom_identifiers import QueryCustomIdentifiers
from app.agent.tools.impl.update_custom_identifiers import UpdateCustomIdentifiersTool
from app.core.plugin import PluginManager
from app.log import logger
from app.schemas.message import ChannelCapabilityManager
from app.schemas.types import MessageChannel
from .base import MoviePilotTool
@@ -64,6 +69,18 @@ class MoviePilotToolFactory:
MoviePilot工具工厂
"""
@staticmethod
def _should_enable_choice_tool(channel: str = None) -> bool:
if not channel:
return False
try:
message_channel = MessageChannel(channel)
except ValueError:
return False
return ChannelCapabilityManager.supports_buttons(
message_channel
) and ChannelCapabilityManager.supports_callbacks(message_channel)
@staticmethod
def create_tools(
session_id: str,
@@ -72,6 +89,7 @@ class MoviePilotToolFactory:
source: str = None,
username: str = None,
stream_handler: Callable = None,
agent_context: dict = None,
) -> List[MoviePilotTool]:
"""
创建MoviePilot工具列表
@@ -133,11 +151,20 @@ class MoviePilotToolFactory:
QueryCustomIdentifiersTool,
UpdateCustomIdentifiersTool,
]
if MoviePilotToolFactory._should_enable_choice_tool(channel):
tool_definitions.append(AskUserChoiceTool)
tool_definitions.extend(
[
SendLocalFileTool,
SendVoiceMessageTool,
]
)
# 创建内置工具
for ToolClass in tool_definitions:
tool = ToolClass(session_id=session_id, user_id=user_id)
tool.set_message_attr(channel=channel, source=source, username=username)
tool.set_stream_handler(stream_handler=stream_handler)
tool.set_agent_context(agent_context=agent_context)
tools.append(tool)
# 加载插件提供的工具
@@ -161,6 +188,7 @@ class MoviePilotToolFactory:
channel=channel, source=source, username=username
)
tool.set_stream_handler(stream_handler=stream_handler)
tool.set_agent_context(agent_context=agent_context)
tools.append(tool)
plugin_tools_count += 1
logger.debug(

View File

@@ -47,13 +47,13 @@ class AddDownloadTool(MoviePilotTool):
if torrent_urls:
if len(torrent_urls) == 1:
if self._is_torrent_ref(torrent_urls[0]):
message = f"正在添加下载任务: 资源 {torrent_urls[0]}"
message = f"添加下载任务: 资源 {torrent_urls[0]}"
else:
message = "正在添加下载任务: 磁力链接"
message = "添加下载任务: 磁力链接"
else:
message = f"正在批量添加下载任务: 共 {len(torrent_urls)} 个资源"
message = f"批量添加下载任务: 共 {len(torrent_urls)} 个资源"
else:
message = "正在添加下载任务"
message = "添加下载任务"
if downloader:
message += f" [下载器: {downloader}]"
@@ -104,6 +104,29 @@ class AddDownloadTool(MoviePilotTool):
return None
return context
@classmethod
async def _async_resolve_cached_context(cls, torrent_ref: str) -> Optional[Context]:
"""异步读取最近搜索缓存,避免在协程里直接访问同步文件缓存。"""
ref = str(torrent_ref).strip()
if ":" not in ref:
return None
try:
ref_hash, ref_index = ref.split(":", 1)
index = int(ref_index)
except (TypeError, ValueError):
return None
if index < 1:
return None
results = await SearchChain().async_last_search_results() or []
if index > len(results):
return None
context = results[index - 1]
if not ref_hash or cls._build_torrent_ref(context) != ref_hash:
return None
return context
@staticmethod
def _merge_labels_with_system_tag(labels: Optional[str]) -> Optional[str]:
"""合并用户标签与系统默认标签,确保任务可被系统管理"""
@@ -164,6 +187,43 @@ class AddDownloadTool(MoviePilotTool):
return Path(FileURI(storage=dir_conf.storage or "local", path=dir_conf.download_path).uri)
@staticmethod
def _download_direct_sync(
torrent_input: str,
download_dir: Path,
merged_labels: Optional[str],
downloader: Optional[str],
) -> tuple[Optional[str], Optional[str]]:
"""同步添加磁力下载任务,避免下载器调用阻塞事件循环。"""
result = DownloadChain().download(
content=torrent_input,
download_dir=download_dir,
cookie=None,
label=merged_labels,
downloader=downloader,
)
if result:
_, did, _, error_msg = result
else:
did, error_msg = None, "未找到下载器"
return did, error_msg
@staticmethod
def _download_single_sync(
context: Context,
downloader: Optional[str],
save_path: Optional[str],
merged_labels: Optional[str],
) -> tuple[Optional[str], Optional[str]]:
"""同步提交带上下文的下载任务,避免站点下载与下载器调用阻塞事件循环。"""
return DownloadChain().download_single(
context=context,
downloader=downloader,
save_path=save_path,
label=merged_labels,
return_detail=True,
)
async def run(self, torrent_url: Optional[List[str]] = None,
downloader: Optional[str] = None, save_path: Optional[str] = None,
labels: Optional[str] = None, **kwargs) -> str:
@@ -175,14 +235,13 @@ class AddDownloadTool(MoviePilotTool):
if not torrent_inputs:
return "错误torrent_url 不能为空。"
download_chain = DownloadChain()
merged_labels = self._merge_labels_with_system_tag(labels)
success_count = 0
failed_messages = []
for torrent_input in torrent_inputs:
if self._is_torrent_ref(torrent_input):
cached_context = self._resolve_cached_context(torrent_input)
cached_context = await self._async_resolve_cached_context(torrent_input)
if not cached_context or not cached_context.torrent_info:
failed_messages.append(f"{torrent_input} 引用无效,请重新使用 get_search_results 查看搜索结果")
continue
@@ -232,33 +291,33 @@ class AddDownloadTool(MoviePilotTool):
f"{torrent_input} 不是有效的下载内容,非 hash:id 时仅支持 magnet: 开头"
)
continue
download_dir = self._resolve_direct_download_dir(save_path)
download_dir = await self.run_blocking(
"storage", self._resolve_direct_download_dir, save_path
)
if not download_dir:
failed_messages.append(f"{torrent_input} 缺少保存路径,且系统未配置可用下载目录")
continue
result = download_chain.download(
content=torrent_input,
download_dir=download_dir,
cookie=None,
label=merged_labels,
downloader=downloader
did, error_msg = await self.run_blocking(
"downloader",
self._download_direct_sync,
torrent_input,
download_dir,
merged_labels,
downloader,
)
if result:
_, did, _, error_msg = result
else:
did, error_msg = None, "未找到下载器"
if did:
success_count += 1
else:
failed_messages.append(self._build_failure_message(torrent_input, error_msg))
continue
did, error_msg = download_chain.download_single(
context=context,
downloader=downloader,
save_path=save_path,
label=merged_labels,
return_detail=True
did, error_msg = await self.run_blocking(
"downloader",
self._download_single_sync,
context,
downloader,
save_path,
merged_labels,
)
if did:
success_count += 1

View File

@@ -12,36 +12,74 @@ from app.schemas.types import MediaType
class AddSubscribeInput(BaseModel):
"""添加订阅工具的输入参数模型"""
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
title: str = Field(..., description="The title of the media to subscribe to (e.g., 'The Matrix', 'Breaking Bad')")
year: str = Field(..., description="Release year of the media (required for accurate identification)")
media_type: str = Field(...,
description="Allowed values: movie, tv")
season: Optional[int] = Field(None,
description="Season number for TV shows (optional, if not specified will subscribe to all seasons)")
tmdb_id: Optional[int] = Field(None,
description="TMDB database ID for precise media identification (optional, can be obtained from search_media tool)")
douban_id: Optional[str] = Field(None,
description="Douban ID for precise media identification (optional, alternative to tmdb_id)")
start_episode: Optional[int] = Field(None,
description="Starting episode number for TV shows (optional, defaults to 1 if not specified)")
total_episode: Optional[int] = Field(None,
description="Total number of episodes for TV shows (optional, will be auto-detected from TMDB if not specified)")
quality: Optional[str] = Field(None,
description="Quality filter as regular expression (optional, e.g., 'BluRay|WEB-DL|HDTV')")
resolution: Optional[str] = Field(None,
description="Resolution filter as regular expression (optional, e.g., '1080p|720p|2160p')")
effect: Optional[str] = Field(None,
description="Effect filter as regular expression (optional, e.g., 'HDR|DV|SDR')")
filter_groups: Optional[List[str]] = Field(None,
description="List of filter rule group names to apply (optional, can be obtained from query_rule_groups tool)")
sites: Optional[List[int]] = Field(None,
description="List of site IDs to search from (optional, can be obtained from query_sites tool)")
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
title: str = Field(
...,
description="The title of the media to subscribe to (e.g., 'The Matrix', 'Breaking Bad')",
)
year: str = Field(
...,
description="Release year of the media (required for accurate identification)",
)
media_type: str = Field(..., description="Allowed values: movie, tv")
season: Optional[int] = Field(
None,
description=(
"Season number for TV shows (optional). If omitted, the subscription defaults to season 1 only. "
"To subscribe multiple seasons or the full series, call this tool separately for each season."
),
)
tmdb_id: Optional[int] = Field(
None,
description="TMDB database ID for precise media identification (optional, can be obtained from search_media tool)",
)
douban_id: Optional[str] = Field(
None,
description="Douban ID for precise media identification (optional, alternative to tmdb_id)",
)
start_episode: Optional[int] = Field(
None,
description="Starting episode number for TV shows (optional, defaults to 1 if not specified)",
)
total_episode: Optional[int] = Field(
None,
description="Total number of episodes for TV shows (optional, will be auto-detected from TMDB if not specified)",
)
quality: Optional[str] = Field(
None,
description="Quality filter as regular expression (optional, e.g., 'BluRay|WEB-DL|HDTV')",
)
resolution: Optional[str] = Field(
None,
description="Resolution filter as regular expression (optional, e.g., '1080p|720p|2160p')",
)
effect: Optional[str] = Field(
None,
description="Effect filter as regular expression (optional, e.g., 'HDR|DV|SDR')",
)
filter_groups: Optional[List[str]] = Field(
None,
description="List of filter rule group names to apply (optional, can be obtained from query_rule_groups tool)",
)
sites: Optional[List[int]] = Field(
None,
description="List of site IDs to search from (optional, can be obtained from query_sites tool)",
)
class AddSubscribeTool(MoviePilotTool):
name: str = "add_subscribe"
description: str = "Add media subscription to create automated download rules for movies and TV shows. The system will automatically search and download new episodes or releases based on the subscription criteria. Supports advanced filtering options like quality, resolution, and effect filters using regular expressions."
description: str = (
"Add media subscription to create automated download rules for movies and TV shows. "
"The system will automatically search and download new episodes or releases based on the subscription criteria. "
"For TV shows, omitting `season` subscribes season 1 only by default; to subscribe multiple seasons or "
"the full series, call this tool once per season. Supports advanced filtering options like quality, "
"resolution, and effect filters using regular expressions."
)
args_schema: Type[BaseModel] = AddSubscribeInput
def get_tool_message(self, **kwargs) -> Optional[str]:
@@ -50,52 +88,72 @@ class AddSubscribeTool(MoviePilotTool):
year = kwargs.get("year", "")
media_type = kwargs.get("media_type", "")
season = kwargs.get("season")
message = f"正在添加订阅: {title}"
message = f"添加订阅: {title}"
if year:
message += f" ({year})"
if media_type:
message += f" [{media_type}]"
if season:
message += f"{season}"
elif media_type == "tv":
message += " 第1季(默认)"
return message
async def run(self, title: str, year: str, media_type: str,
season: Optional[int] = None, tmdb_id: Optional[int] = None,
douban_id: Optional[str] = None,
start_episode: Optional[int] = None, total_episode: Optional[int] = None,
quality: Optional[str] = None, resolution: Optional[str] = None,
effect: Optional[str] = None, filter_groups: Optional[List[str]] = None,
sites: Optional[List[int]] = None, **kwargs) -> str:
async def run(
self,
title: str,
year: str,
media_type: str,
season: Optional[int] = None,
tmdb_id: Optional[int] = None,
douban_id: Optional[str] = None,
start_episode: Optional[int] = None,
total_episode: Optional[int] = None,
quality: Optional[str] = None,
resolution: Optional[str] = None,
effect: Optional[str] = None,
filter_groups: Optional[List[str]] = None,
sites: Optional[List[int]] = None,
**kwargs,
) -> str:
logger.info(
f"执行工具: {self.name}, 参数: title={title}, year={year}, media_type={media_type}, "
f"season={season}, tmdb_id={tmdb_id}, douban_id={douban_id}, start_episode={start_episode}, "
f"total_episode={total_episode}, quality={quality}, resolution={resolution}, "
f"effect={effect}, filter_groups={filter_groups}, sites={sites}")
f"effect={effect}, filter_groups={filter_groups}, sites={sites}"
)
try:
subscribe_chain = SubscribeChain()
media_type_enum = MediaType.from_agent(media_type)
if not media_type_enum:
return f"错误:无效的媒体类型 '{media_type}',支持的类型:'movie', 'tv'"
effective_season = (
season
if season is not None
else 1
if media_type_enum == MediaType.TV
else None
)
# 构建额外的订阅参数
subscribe_kwargs = {}
if start_episode is not None:
subscribe_kwargs['start_episode'] = start_episode
subscribe_kwargs["start_episode"] = start_episode
if total_episode is not None:
subscribe_kwargs['total_episode'] = total_episode
subscribe_kwargs["total_episode"] = total_episode
if quality:
subscribe_kwargs['quality'] = quality
subscribe_kwargs["quality"] = quality
if resolution:
subscribe_kwargs['resolution'] = resolution
subscribe_kwargs["resolution"] = resolution
if effect:
subscribe_kwargs['effect'] = effect
subscribe_kwargs["effect"] = effect
if filter_groups:
subscribe_kwargs['filter_groups'] = filter_groups
subscribe_kwargs["filter_groups"] = filter_groups
if sites:
subscribe_kwargs['sites'] = sites
subscribe_kwargs["sites"] = sites
sid, message = await subscribe_chain.async_add(
mtype=media_type_enum,
@@ -105,13 +163,21 @@ class AddSubscribeTool(MoviePilotTool):
doubanid=douban_id,
season=season,
username=self._user_id,
**subscribe_kwargs
**subscribe_kwargs,
)
if sid:
if message and "已存在" in message:
return f"订阅已存在:{title} ({year})。如需修改参数请先删除旧订阅。"
result_msg = f"订阅已存在:{title} ({year})"
if effective_season is not None:
result_msg += f"{effective_season}"
result_msg += "。如需修改参数请先删除旧订阅。"
return result_msg
result_msg = f"成功添加订阅:{title} ({year})"
if effective_season is not None:
result_msg += f"{effective_season}"
if season is None:
result_msg += "(未指定季号,默认按第一季订阅)"
if subscribe_kwargs:
params = []
if start_episode is not None:

View File

@@ -0,0 +1,173 @@
"""让用户通过按钮进行选择的工具。"""
from typing import List, Optional, Type
from pydantic import BaseModel, Field, model_validator
from app.agent.tools.base import MoviePilotTool, ToolChain
from app.chain.interaction import (
AgentInteractionOption,
agent_interaction_manager,
)
from app.log import logger
from app.schemas import Notification, NotificationType
from app.schemas.message import ChannelCapabilityManager
from app.schemas.types import MessageChannel
class UserChoiceOptionInput(BaseModel):
"""单个按钮选项。"""
label: str = Field(..., description="Text shown on the button")
value: str = Field(
...,
description="The exact content that will be sent back to the agent after the user clicks this button",
)
@model_validator(mode="after")
def validate_option(self):
if not self.label.strip():
raise ValueError("label 不能为空")
if not self.value.strip():
raise ValueError("value 不能为空")
return self
class AskUserChoiceInput(BaseModel):
"""按钮选择工具输入。"""
explanation: str = Field(
...,
description="Clear explanation of why the agent needs the user to choose from buttons",
)
message: str = Field(
...,
description="Question or prompt shown to the user together with the buttons",
)
title: Optional[str] = Field(
None,
description="Optional short title displayed above the question",
)
options: List[UserChoiceOptionInput] = Field(
...,
description="Button options to show to the user",
)
@model_validator(mode="after")
def validate_payload(self):
if not self.message.strip():
raise ValueError("message 不能为空")
if not self.options:
raise ValueError("options 至少需要提供一个")
return self
class AskUserChoiceTool(MoviePilotTool):
name: str = "ask_user_choice"
description: str = (
"Ask the user to choose from button options on channels that support interactive buttons. "
"After the user clicks a button, the selected value will come back as the user's next message."
)
args_schema: Type[BaseModel] = AskUserChoiceInput
require_admin: bool = False
def get_tool_message(self, **kwargs) -> Optional[str]:
message = kwargs.get("message", "") or ""
if len(message) > 40:
message = message[:40] + "..."
return f"发送按钮选择: {message}"
@staticmethod
def _truncate_button_text(text: str, max_length: int) -> str:
if max_length <= 0 or len(text) <= max_length:
return text
if max_length <= 3:
return text[:max_length]
return text[: max_length - 3] + "..."
async def run(
self,
message: str,
options: List[UserChoiceOptionInput],
title: Optional[str] = None,
**kwargs,
) -> str:
if not self._channel or not self._source:
return "当前不在可回传消息的会话中,无法发起按钮选择"
try:
channel = MessageChannel(self._channel)
except ValueError:
return f"不支持的消息渠道: {self._channel}"
if not (
ChannelCapabilityManager.supports_buttons(channel)
and ChannelCapabilityManager.supports_callbacks(channel)
):
return f"当前渠道 {channel.value} 不支持按钮选择"
max_per_row = 1
max_rows = ChannelCapabilityManager.get_max_button_rows(channel)
max_text_length = ChannelCapabilityManager.get_max_button_text_length(channel)
max_options = max_per_row * max_rows
if len(options) > max_options:
return f"当前渠道最多支持 {max_options} 个按钮选项"
choice_options = [
AgentInteractionOption(
label=option.label.strip(), value=option.value.strip()
)
for option in options
]
request = agent_interaction_manager.create_request(
session_id=self._session_id,
user_id=str(self._user_id),
channel=channel.value,
source=self._source,
username=self._username,
title=title,
prompt=message.strip(),
options=choice_options,
)
buttons = []
current_row = []
for index, option in enumerate(choice_options, start=1):
current_row.append(
{
"text": self._truncate_button_text(option.label, max_text_length),
"callback_data": (
f"agent_interaction:choice:{request.request_id}:{index}"
),
}
)
if len(current_row) >= max_per_row:
buttons.append(current_row)
current_row = []
if current_row:
buttons.append(current_row)
logger.info(
"执行工具: %s, channel=%s, session_id=%s, options=%s",
self.name,
channel.value,
self._session_id,
len(choice_options),
)
await ToolChain().async_post_message(
Notification(
channel=channel,
source=self._source,
mtype=NotificationType.Agent,
userid=self._user_id,
username=self._username,
title=title,
text=message.strip(),
buttons=buttons,
)
)
self._agent_context["user_reply_sent"] = True
self._agent_context["reply_mode"] = "button_choice"
return f"已发送 {len(choice_options)} 个按钮选项,等待用户选择"

View File

@@ -108,16 +108,16 @@ class BrowseWebpageTool(MoviePilotTool):
url = kwargs.get("url", "")
selector = kwargs.get("selector", "")
action_messages = {
"goto": f"正在打开网页: {url}",
"get_content": "正在获取页面内容",
"screenshot": "正在截取页面截图",
"click": f"正在点击元素: {selector}",
"fill": f"正在填写表单: {selector}",
"select": f"正在选择选项: {selector}",
"evaluate": "正在执行 JavaScript",
"wait": f"正在等待元素: {selector}",
"goto": f"打开网页: {url}",
"get_content": "获取页面内容",
"screenshot": "截取页面截图",
"click": f"点击元素: {selector}",
"fill": f"填写表单: {selector}",
"select": f"选择选项: {selector}",
"evaluate": "执行 JavaScript",
"wait": f"等待元素: {selector}",
}
return action_messages.get(action, f"正在执行浏览器操作: {action}")
return action_messages.get(action, f"执行浏览器操作: {action}")
async def run(
self,

View File

@@ -41,7 +41,7 @@ class DeleteDownloadTool(MoviePilotTool):
downloader = kwargs.get("downloader")
delete_files = kwargs.get("delete_files", False)
message = f"正在删除下载任务: {hash_value}"
message = f"删除下载任务: {hash_value}"
if downloader:
message += f" [下载器: {downloader}]"
if delete_files:
@@ -49,6 +49,15 @@ class DeleteDownloadTool(MoviePilotTool):
return message
@staticmethod
def _delete_download_sync(
hash_value: str, downloader: Optional[str] = None, delete_files: bool = False
) -> bool:
"""同步删除下载任务,避免下载器客户端阻塞事件循环。"""
return DownloadChain().remove_torrents(
hashs=[hash_value], downloader=downloader, delete_file=delete_files
)
async def run(
self,
hash: str,
@@ -61,16 +70,18 @@ class DeleteDownloadTool(MoviePilotTool):
)
try:
download_chain = DownloadChain()
# 仅支持通过hash删除任务
if len(hash) != 40 or not all(c in "0123456789abcdefABCDEF" for c in hash):
return "参数错误hash 格式无效,请先使用 query_download_tasks 工具获取正确的 hash。"
# 删除下载任务
# remove_torrents 支持 delete_file 参数,可以控制是否删除文件
result = download_chain.remove_torrents(
hashs=[hash], downloader=downloader, delete_file=delete_files
result = await self.run_blocking(
"downloader",
self._delete_download_sync,
hash,
downloader,
bool(delete_files),
)
if result:

View File

@@ -30,7 +30,7 @@ class DeleteDownloadHistoryTool(MoviePilotTool):
def get_tool_message(self, **kwargs) -> Optional[str]:
history_id = kwargs.get("history_id")
return f"正在删除下载历史记录 ID: {history_id}"
return f"删除下载历史记录 ID: {history_id}"
async def run(self, history_id: int, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: history_id={history_id}")

View File

@@ -34,7 +34,7 @@ class DeleteSubscribeTool(MoviePilotTool):
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据删除参数生成友好的提示消息"""
subscribe_id = kwargs.get("subscribe_id")
return f"正在删除订阅 (ID: {subscribe_id})"
return f"删除订阅 (ID: {subscribe_id})"
async def run(self, subscribe_id: int, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: subscribe_id={subscribe_id}")
@@ -49,8 +49,11 @@ class DeleteSubscribeTool(MoviePilotTool):
# 在删除之前获取订阅信息(用于事件)
subscribe_info = subscribe.to_dict()
# 删除订阅
subscribe_oper.delete(subscribe_id)
await subscribe_oper.async_delete(subscribe_id)
# 分享订阅统计刷新本身已异步化,这里只需要在删除后触发即可。
SubscribeHelper().sub_done_async(
{"tmdbid": subscribe.tmdbid, "doubanid": subscribe.doubanid}
)
# 发送事件
await eventmanager.async_send_event(
@@ -58,11 +61,6 @@ class DeleteSubscribeTool(MoviePilotTool):
{"subscribe_id": subscribe_id, "subscribe_info": subscribe_info},
)
# 统计订阅
SubscribeHelper().sub_done_async(
{"tmdbid": subscribe.tmdbid, "doubanid": subscribe.doubanid}
)
return f"成功删除订阅:{subscribe.name} ({subscribe.year})"
except Exception as e:
logger.error(f"删除订阅失败: {e}", exc_info=True)

View File

@@ -30,28 +30,24 @@ class DeleteTransferHistoryTool(MoviePilotTool):
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据参数生成友好的提示消息"""
history_id = kwargs.get("history_id")
return f"正在删除整理历史记录: ID={history_id}"
return f"删除整理历史记录: ID={history_id}"
async def run(self, history_id: int, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: history_id={history_id}")
try:
transferhis = TransferHistoryOper()
# 查询历史记录是否存在
history = transferhis.get(history_id)
history = await transferhis.async_get(history_id)
if not history:
return f"错误整理历史记录不存在ID={history_id}"
# 保存信息用于返回
title = history.title or "未知"
src = history.src or "未知"
status = "成功" if history.status else "失败"
# 删除记录
transferhis.delete(history_id)
return f"已删除整理历史记录ID={history_id},标题={title},源路径={src},状态={status}"
await transferhis.async_delete(history_id)
return (
f"已删除整理历史记录ID={history_id},标题={title},源路径={src},状态={status}"
)
except Exception as e:
logger.error(f"删除整理历史记录失败: {e}", exc_info=True)
return f"删除整理历史记录时发生错误: {str(e)}"

View File

@@ -28,7 +28,7 @@ class EditFileTool(MoviePilotTool):
"""根据参数生成友好的提示消息"""
file_path = kwargs.get("file_path", "")
file_name = Path(file_path).name if file_path else "未知文件"
return f"正在编辑文件: {file_name}"
return f"编辑文件: {file_name}"
async def run(self, file_path: str, old_text: str, new_text: str, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: file_path={file_path}")

View File

@@ -1,6 +1,10 @@
"""执行Shell命令工具"""
import asyncio
import os
import signal
import subprocess
from dataclasses import dataclass, field
from typing import Optional, Type
from pydantic import BaseModel, Field
@@ -9,6 +13,54 @@ from app.agent.tools.base import MoviePilotTool
from app.log import logger
DEFAULT_TIMEOUT_SECONDS = 60
MAX_TIMEOUT_SECONDS = 300
MAX_OUTPUT_CHARS = 6000
READ_CHUNK_SIZE = 4096
KILL_GRACE_SECONDS = 3
COMMAND_CONCURRENCY_LIMIT = 2
_command_semaphore = asyncio.Semaphore(COMMAND_CONCURRENCY_LIMIT)
@dataclass
class _CommandOutput:
"""保存受限命令输出,避免大输出一次性进入内存。"""
limit: int
stdout_chunks: list[str] = field(default_factory=list)
stderr_chunks: list[str] = field(default_factory=list)
captured_chars: int = 0
truncated: bool = False
def append(self, stream_name: str, text: str) -> None:
if not text:
return
remaining = self.limit - self.captured_chars
if remaining <= 0:
self.truncated = True
return
captured = text[:remaining]
if stream_name == "stdout":
self.stdout_chunks.append(captured)
else:
self.stderr_chunks.append(captured)
self.captured_chars += len(captured)
if len(text) > remaining:
self.truncated = True
@property
def stdout(self) -> str:
return "".join(self.stdout_chunks).strip()
@property
def stderr(self) -> str:
return "".join(self.stderr_chunks).strip()
class ExecuteCommandInput(BaseModel):
"""执行Shell命令工具的输入参数模型"""
@@ -23,14 +75,160 @@ class ExecuteCommandInput(BaseModel):
class ExecuteCommandTool(MoviePilotTool):
name: str = "execute_command"
description: str = "Safely execute shell commands on the server. Useful for system maintenance, checking status, or running custom scripts. Includes timeout and output limits."
description: str = (
"Safely execute shell commands on the server. Useful for system "
"maintenance, checking status, or running custom scripts. Includes "
"timeout, concurrency, and hard output limits."
)
args_schema: Type[BaseModel] = ExecuteCommandInput
require_admin: bool = True
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据命令生成友好的提示消息"""
command = kwargs.get("command", "")
return f"正在执行系统命令: {command}"
return f"执行系统命令: {command}"
@staticmethod
def _normalize_timeout(timeout: Optional[int]) -> tuple[int, Optional[str]]:
"""限制命令最长运行时间,避免 Agent 传入过大的 timeout。"""
try:
normalized = int(timeout or DEFAULT_TIMEOUT_SECONDS)
except (TypeError, ValueError):
normalized = DEFAULT_TIMEOUT_SECONDS
if normalized <= 0:
return DEFAULT_TIMEOUT_SECONDS, "timeout 参数无效,已使用默认 60 秒"
if normalized > MAX_TIMEOUT_SECONDS:
return (
MAX_TIMEOUT_SECONDS,
f"timeout 参数超过上限,已从 {normalized} 秒限制为 {MAX_TIMEOUT_SECONDS}",
)
return normalized, None
@staticmethod
def _subprocess_kwargs() -> dict:
"""为子进程创建独立进程组,便于超时或输出过大时清理整棵子进程。"""
kwargs = {
"stdin": subprocess.DEVNULL,
"stdout": asyncio.subprocess.PIPE,
"stderr": asyncio.subprocess.PIPE,
}
if os.name == "posix":
kwargs["start_new_session"] = True
elif os.name == "nt":
kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
return kwargs
@staticmethod
async def _read_stream(
stream: asyncio.StreamReader,
stream_name: str,
output: _CommandOutput,
limit_reached: asyncio.Event,
) -> None:
"""按块读取输出,达到上限后通知主流程终止命令。"""
while True:
chunk = await stream.read(READ_CHUNK_SIZE)
if not chunk:
break
if output.truncated:
limit_reached.set()
continue
output.append(stream_name, chunk.decode("utf-8", errors="replace"))
if output.truncated:
limit_reached.set()
# 达到上限后继续排空管道但不再保存内容,避免子进程因 pipe 反压卡住。
continue
@staticmethod
def _terminate_process(process: asyncio.subprocess.Process, sig: int):
"""向进程组发送终止信号;不支持进程组的平台回退为单进程终止。"""
try:
if os.name == "posix":
os.killpg(process.pid, sig)
elif sig == getattr(signal, "SIGKILL", None):
process.kill()
else:
process.terminate()
except ProcessLookupError:
pass
@classmethod
async def _cleanup_process(
cls,
process: asyncio.subprocess.Process,
wait_task: asyncio.Task,
) -> None:
"""先温和终止,失败后强杀,避免超时 shell 遗留子进程。"""
if wait_task.done():
return
cls._terminate_process(process, signal.SIGTERM)
try:
await asyncio.wait_for(
asyncio.shield(wait_task), timeout=KILL_GRACE_SECONDS
)
return
except asyncio.TimeoutError:
pass
kill_signal = getattr(signal, "SIGKILL", signal.SIGTERM)
cls._terminate_process(process, kill_signal)
try:
await asyncio.wait_for(
asyncio.shield(wait_task), timeout=KILL_GRACE_SECONDS
)
except asyncio.TimeoutError:
logger.warning("命令进程强制清理超时: pid=%s", process.pid)
@staticmethod
async def _finish_reader_tasks(reader_tasks: list[asyncio.Task]) -> None:
"""等待输出读取任务退出,异常只记录不影响工具返回。"""
if not reader_tasks:
return
done, pending = await asyncio.wait(reader_tasks, timeout=1)
for task in pending:
task.cancel()
results = await asyncio.gather(*done, *pending, return_exceptions=True)
for result in results:
if isinstance(result, Exception) and not isinstance(
result, asyncio.CancelledError
):
logger.debug("命令输出读取任务异常: %s", result)
@staticmethod
def _format_result(
*,
exit_code: Optional[int],
output: _CommandOutput,
timeout: int,
timed_out: bool,
output_limited: bool,
timeout_note: Optional[str],
) -> str:
if timed_out:
result = f"命令执行超时 (限制: {timeout}秒,已终止进程)"
elif output_limited:
result = (
f"命令输出超过限制 (限制: {MAX_OUTPUT_CHARS}字符,"
f"已截断并终止进程,退出码: {exit_code})"
)
else:
result = f"命令执行完成 (退出码: {exit_code})"
if timeout_note:
result += f"\n\n提示:\n{timeout_note}"
if output.stdout:
result += f"\n\n标准输出:\n{output.stdout}"
if output.stderr:
result += f"\n\n错误输出:\n{output.stderr}"
if output.truncated:
result += "\n\n...(输出内容过长,已截断)"
if not output.stdout and not output.stderr:
result += "\n\n(无输出内容)"
return result
async def run(self, command: str, timeout: Optional[int] = 60, **kwargs) -> str:
logger.info(
@@ -50,46 +248,57 @@ class ExecuteCommandTool(MoviePilotTool):
if keyword in command:
return f"错误:命令包含禁止使用的关键字 '{keyword}'"
try:
# 执行命令
process = await asyncio.create_subprocess_shell(
command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
normalized_timeout, timeout_note = self._normalize_timeout(timeout)
try:
# 等待完成,带超时
stdout, stderr = await asyncio.wait_for(
process.communicate(), timeout=timeout
try:
async with _command_semaphore:
# 命令输出可能非常大,必须边读边截断,不能使用 communicate() 一次性收集。
process = await asyncio.create_subprocess_shell(
command, **self._subprocess_kwargs()
)
output = _CommandOutput(limit=MAX_OUTPUT_CHARS)
limit_reached = asyncio.Event()
wait_task = asyncio.create_task(process.wait())
limit_task = asyncio.create_task(limit_reached.wait())
reader_tasks = [
asyncio.create_task(
self._read_stream(
process.stdout, "stdout", output, limit_reached
)
),
asyncio.create_task(
self._read_stream(
process.stderr, "stderr", output, limit_reached
)
),
]
timed_out = False
output_limited = False
done, _ = await asyncio.wait(
{wait_task, limit_task},
timeout=normalized_timeout,
return_when=asyncio.FIRST_COMPLETED,
)
# 处理输出
stdout_str = stdout.decode("utf-8", errors="replace").strip()
stderr_str = stderr.decode("utf-8", errors="replace").strip()
exit_code = process.returncode
if wait_task not in done:
if limit_task in done:
output_limited = True
else:
timed_out = True
await self._cleanup_process(process, wait_task)
result = f"命令执行完成 (退出码: {exit_code})"
if stdout_str:
result += f"\n\n标准输出:\n{stdout_str}"
if stderr_str:
result += f"\n\n错误输出:\n{stderr_str}"
limit_task.cancel()
await self._finish_reader_tasks(reader_tasks)
# 如果没有输出
if not stdout_str and not stderr_str:
result += "\n\n(无输出内容)"
# 限制输出长度,防止上下文过长
if len(result) > 3000:
result = result[:3000] + "\n\n...(输出内容过长,已截断)"
return result
except asyncio.TimeoutError:
# 超时处理
try:
process.kill()
except ProcessLookupError:
pass
return f"命令执行超时 (限制: {timeout}秒)"
return self._format_result(
exit_code=process.returncode,
output=output,
timeout=normalized_timeout,
timed_out=timed_out,
output_limited=output_limited,
timeout_note=timeout_note,
)
except Exception as e:
logger.error(f"执行命令失败: {e}", exc_info=True)

View File

@@ -62,7 +62,7 @@ class GetRecommendationsTool(MoviePilotTool):
"douban_hot": "豆瓣热门",
"douban_movie_hot": "豆瓣热门电影",
"douban_tv_hot": "豆瓣热门电视剧",
"douban_movie_showing": "豆瓣正在热映",
"douban_movie_showing": "豆瓣热映",
"douban_movies": "豆瓣最新电影",
"douban_tvs": "豆瓣最新电视剧",
"douban_movie_top250": "豆瓣电影TOP250",
@@ -73,7 +73,7 @@ class GetRecommendationsTool(MoviePilotTool):
}
source_desc = source_map.get(source, source)
message = f"正在获取推荐: {source_desc}"
message = f"获取推荐: {source_desc}"
if media_type != "all":
message += f" [{media_type}]"
message += f" (第{page}页)"

View File

@@ -53,7 +53,7 @@ class GetSearchResultsTool(MoviePilotTool):
args_schema: Type[BaseModel] = GetSearchResultsInput
def get_tool_message(self, **kwargs) -> Optional[str]:
return "正在获取搜索结果"
return "获取搜索结果"
async def run(
self,

View File

@@ -32,99 +32,87 @@ class ListDirectoryTool(MoviePilotTool):
path = kwargs.get("path", "")
storage = kwargs.get("storage", "local")
message = f"正在查询目录: {path}"
message = f"查询目录: {path}"
if storage != "local":
message += f" [存储: {storage}]"
return message
@staticmethod
def _list_directory_sync(
path: str, storage: Optional[str] = "local", sort_by: Optional[str] = "name"
) -> str:
"""
目录遍历可能触发本地磁盘或远程存储请求,统一放到线程池中执行。
"""
if not path:
return "错误:路径不能为空"
if storage == "local":
if not path.startswith("/") and not (len(path) > 1 and path[1] == ":"):
path = str(Path(path).resolve())
elif not path.startswith("/"):
path = "/" + path
fileitem = FileItem(storage=storage or "local", path=path, type="dir")
file_list = StorageChain().list_files(fileitem, recursion=False)
if file_list is None:
return f"无法访问目录:{path},请检查路径是否正确或存储是否可用"
if not file_list:
return f"目录 {path} 为空"
if sort_by == "time":
file_list.sort(key=lambda x: x.modify_time or 0, reverse=True)
else:
file_list.sort(
key=lambda x: (
0 if x.type == "dir" else 1,
StringUtils.natural_sort_key(x.name or ""),
)
)
total_count = len(file_list)
limited_list = file_list[:20]
simplified_items = []
for item in limited_list:
size_str = StringUtils.str_filesize(item.size) if item.size else None
modify_time_str = None
if item.modify_time:
try:
modify_time_str = datetime.fromtimestamp(item.modify_time).strftime(
"%Y-%m-%d %H:%M:%S"
)
except (ValueError, OSError):
modify_time_str = str(item.modify_time)
simplified = {
"name": item.name,
"type": item.type,
"path": item.path,
"size": size_str,
"modify_time": modify_time_str,
}
if item.type == "file" and item.extension:
simplified["extension"] = item.extension
simplified_items.append(simplified)
result_json = json.dumps(simplified_items, ensure_ascii=False, indent=2)
if total_count > 20:
return (
f"注意:目录中共有 {total_count} 个项目,为节省上下文空间,仅显示前 20 个项目。\n\n"
f"{result_json}"
)
return result_json
async def run(self, path: str, storage: Optional[str] = "local",
sort_by: Optional[str] = "name", **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: path={path}, storage={storage}, sort_by={sort_by}")
try:
# 规范化路径
if not path:
return "错误:路径不能为空"
# 确保路径格式正确
if storage == "local":
# 本地路径处理
if not path.startswith("/") and not (len(path) > 1 and path[1] == ":"):
# 相对路径,尝试转换为绝对路径
path = str(Path(path).resolve())
else:
# 远程存储路径,确保以/开头
if not path.startswith("/"):
path = "/" + path
# 创建FileItem
fileitem = FileItem(
storage=storage or "local",
path=path,
type="dir"
return await self.run_blocking(
"storage", self._list_directory_sync, path, storage, sort_by
)
# 查询目录内容
storage_chain = StorageChain()
file_list = storage_chain.list_files(fileitem, recursion=False)
if file_list is None:
return f"无法访问目录:{path},请检查路径是否正确或存储是否可用"
if not file_list:
return f"目录 {path} 为空"
# 排序
if sort_by == "time":
file_list.sort(key=lambda x: x.modify_time or 0, reverse=True)
else:
# 默认按名称排序(目录优先,然后按名称)
file_list.sort(key=lambda x: (
0 if x.type == "dir" else 1,
StringUtils.natural_sort_key(x.name or "")
))
# 限制返回数量
total_count = len(file_list)
limited_list = file_list[:20]
# 转换为字典格式
simplified_items = []
for item in limited_list:
# 格式化文件大小
size_str = None
if item.size:
size_str = StringUtils.str_filesize(item.size)
# 格式化修改时间
modify_time_str = None
if item.modify_time:
try:
modify_time_str = datetime.fromtimestamp(item.modify_time).strftime("%Y-%m-%d %H:%M:%S")
except (ValueError, OSError):
modify_time_str = str(item.modify_time)
simplified = {
"name": item.name,
"type": item.type,
"path": item.path,
"size": size_str,
"modify_time": modify_time_str
}
# 如果是文件,添加扩展名
if item.type == "file" and item.extension:
simplified["extension"] = item.extension
simplified_items.append(simplified)
result_json = json.dumps(simplified_items, ensure_ascii=False, indent=2)
# 如果结果被裁剪,添加提示信息
if total_count > 100:
return f"注意:目录中共有 {total_count} 个项目,为节省上下文空间,仅显示前 100 个项目。\n\n{result_json}"
else:
return result_json
except Exception as e:
logger.error(f"查询目录内容失败: {e}", exc_info=True)
return f"查询目录内容时发生错误: {str(e)}"

View File

@@ -33,7 +33,7 @@ class ListSlashCommandsTool(MoviePilotTool):
def get_tool_message(self, **kwargs) -> Optional[str]:
"""生成友好的提示消息"""
return "正在查询所有可用命令"
return "查询所有可用命令"
async def run(self, **kwargs) -> str:
logger.info(f"执行工具: {self.name}")

View File

@@ -55,7 +55,7 @@ class ModifyDownloadTool(MoviePilotTool):
tags = kwargs.get("tags")
downloader = kwargs.get("downloader")
parts = [f"正在修改下载任务: {hash_value}"]
parts = [f"修改下载任务: {hash_value}"]
if action == "start":
parts.append("操作: 开始下载")
elif action == "stop":
@@ -66,6 +66,38 @@ class ModifyDownloadTool(MoviePilotTool):
parts.append(f"下载器: {downloader}")
return " | ".join(parts)
@staticmethod
def _modify_download_sync(
hash_value: str,
action: Optional[str] = None,
tags: Optional[List[str]] = None,
downloader: Optional[str] = None,
) -> List[str]:
"""同步修改下载任务状态和标签,避免下载器 SDK 阻塞事件循环。"""
download_chain = DownloadChain()
results = []
if tags:
tag_result = download_chain.set_torrents_tag(
hashs=[hash_value], tags=tags, downloader=downloader
)
if tag_result:
results.append(f"成功设置标签:{', '.join(tags)}")
else:
results.append("设置标签失败,请检查任务是否存在或下载器是否可用")
if action:
action_result = download_chain.set_downloading(
hash_str=hash_value, oper=action, name=downloader
)
action_desc = "开始" if action == "start" else "暂停"
if action_result:
results.append(f"成功{action_desc}下载任务")
else:
results.append(f"{action_desc}下载任务失败,请检查任务是否存在或下载器是否可用")
return results
async def run(
self,
hash: str,
@@ -91,31 +123,14 @@ class ModifyDownloadTool(MoviePilotTool):
if action and action not in ("start", "stop"):
return f"参数错误action 只支持 'start'(开始下载)或 'stop'(暂停下载),收到: '{action}'"
download_chain = DownloadChain()
results = []
# 设置标签
if tags:
tag_result = download_chain.set_torrents_tag(
hashs=[hash], tags=tags, downloader=downloader
)
if tag_result:
results.append(f"成功设置标签:{', '.join(tags)}")
else:
results.append(f"设置标签失败,请检查任务是否存在或下载器是否可用")
# 执行开始/暂停操作
if action:
action_result = download_chain.set_downloading(
hash_str=hash, oper=action, name=downloader
)
action_desc = "开始" if action == "start" else "暂停"
if action_result:
results.append(f"成功{action_desc}下载任务")
else:
results.append(
f"{action_desc}下载任务失败,请检查任务是否存在或下载器是否可用"
)
results = await self.run_blocking(
"downloader",
self._modify_download_sync,
hash,
action,
tags,
downloader,
)
return f"下载任务 {hash}" + "".join(results)

View File

@@ -31,13 +31,17 @@ class QueryCustomIdentifiersTool(MoviePilotTool):
def get_tool_message(self, **kwargs) -> Optional[str]:
"""生成友好的提示消息"""
return "正在查询自定义识别词"
return "查询自定义识别词"
@staticmethod
def _load_custom_identifiers():
"""从内存配置缓存中读取自定义识别词。"""
return SystemConfigOper().get(SystemConfigKey.CustomIdentifiers)
async def run(self, **kwargs) -> str:
logger.info(f"执行工具: {self.name}")
try:
system_config_oper = SystemConfigOper()
identifiers = system_config_oper.get(SystemConfigKey.CustomIdentifiers)
identifiers = self._load_custom_identifiers()
if identifiers:
return json.dumps(
{

View File

@@ -32,7 +32,7 @@ class QueryDirectorySettingsTool(MoviePilotTool):
storage_type = kwargs.get("storage_type", "all")
name = kwargs.get("name")
parts = ["正在查询目录配置"]
parts = ["查询目录配置"]
if directory_type != "all":
type_map = {"download": "下载目录", "library": "媒体库目录"}
@@ -47,88 +47,93 @@ class QueryDirectorySettingsTool(MoviePilotTool):
return " | ".join(parts) if len(parts) > 1 else parts[0]
@staticmethod
def _query_directory_settings(
directory_type: Optional[str] = "all",
storage_type: Optional[str] = "all",
name: Optional[str] = None,
) -> str:
"""
目录配置完全来自内存配置缓存,这里只做本地过滤和序列化。
"""
directory_helper = DirectoryHelper()
if directory_type == "download":
dirs = directory_helper.get_download_dirs()
elif directory_type == "library":
dirs = directory_helper.get_library_dirs()
else:
dirs = directory_helper.get_dirs()
filtered_dirs = []
for d in dirs:
if storage_type == "local":
if directory_type == "download" and d.storage != "local":
continue
if directory_type == "library" and d.library_storage != "local":
continue
if directory_type == "all":
if d.download_path and d.storage != "local":
continue
if d.library_path and d.library_storage != "local":
continue
elif storage_type == "remote":
if directory_type == "download" and d.storage == "local":
continue
if directory_type == "library" and d.library_storage == "local":
continue
if directory_type == "all":
if d.download_path and d.storage == "local":
continue
if d.library_path and d.library_storage == "local":
continue
if name and d.name and name.lower() not in d.name.lower():
continue
filtered_dirs.append(d)
if not filtered_dirs:
return "未找到相关目录配置"
simplified_dirs = []
for d in filtered_dirs:
simplified_dirs.append(
{
"name": d.name,
"priority": d.priority,
"storage": d.storage,
"download_path": d.download_path,
"library_path": d.library_path,
"library_storage": d.library_storage,
"media_type": d.media_type,
"media_category": d.media_category,
"monitor_type": d.monitor_type,
"monitor_mode": d.monitor_mode,
"transfer_type": d.transfer_type,
"overwrite_mode": d.overwrite_mode,
"renaming": d.renaming,
"scraping": d.scraping,
"notify": d.notify,
"download_type_folder": d.download_type_folder,
"download_category_folder": d.download_category_folder,
"library_type_folder": d.library_type_folder,
"library_category_folder": d.library_category_folder,
}
)
return json.dumps(simplified_dirs, ensure_ascii=False, indent=2)
async def run(self, directory_type: Optional[str] = "all",
storage_type: Optional[str] = "all",
name: Optional[str] = None, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: directory_type={directory_type}, storage_type={storage_type}, name={name}")
try:
directory_helper = DirectoryHelper()
# 根据目录类型获取目录列表
if directory_type == "download":
dirs = directory_helper.get_download_dirs()
elif directory_type == "library":
dirs = directory_helper.get_library_dirs()
else:
dirs = directory_helper.get_dirs()
# 按存储类型过滤
filtered_dirs = []
for d in dirs:
# 按存储类型过滤
if storage_type == "local":
# 对于下载目录,检查 storage对于媒体库目录检查 library_storage
if directory_type == "download" and d.storage != "local":
continue
elif directory_type == "library" and d.library_storage != "local":
continue
elif directory_type == "all":
# 检查是否有本地存储配置
if d.download_path and d.storage != "local":
continue
if d.library_path and d.library_storage != "local":
continue
elif storage_type == "remote":
# 对于下载目录,检查 storage对于媒体库目录检查 library_storage
if directory_type == "download" and d.storage == "local":
continue
elif directory_type == "library" and d.library_storage == "local":
continue
elif directory_type == "all":
# 检查是否有远程存储配置
if d.download_path and d.storage == "local":
continue
if d.library_path and d.library_storage == "local":
continue
# 按名称过滤(部分匹配)
if name and d.name and name.lower() not in d.name.lower():
continue
filtered_dirs.append(d)
if filtered_dirs:
# 转换为字典格式,只保留关键信息
simplified_dirs = []
for d in filtered_dirs:
simplified = {
"name": d.name,
"priority": d.priority,
"storage": d.storage,
"download_path": d.download_path,
"library_path": d.library_path,
"library_storage": d.library_storage,
"media_type": d.media_type,
"media_category": d.media_category,
"monitor_type": d.monitor_type,
"monitor_mode": d.monitor_mode,
"transfer_type": d.transfer_type,
"overwrite_mode": d.overwrite_mode,
"renaming": d.renaming,
"scraping": d.scraping,
"notify": d.notify,
"download_type_folder": d.download_type_folder,
"download_category_folder": d.download_category_folder,
"library_type_folder": d.library_type_folder,
"library_category_folder": d.library_category_folder
}
simplified_dirs.append(simplified)
result_json = json.dumps(simplified_dirs, ensure_ascii=False, indent=2)
return result_json
return "未找到相关目录配置"
return self._query_directory_settings(
directory_type=directory_type,
storage_type=storage_type,
name=name,
)
except Exception as e:
logger.error(f"查询系统目录设置失败: {e}", exc_info=True)
return f"查询系统目录设置时发生错误: {str(e)}"

View File

@@ -1,7 +1,7 @@
"""查询下载工具"""
import json
from typing import Optional, Type, List, Union
from typing import Any, Dict, List, Optional, Type, Union
from pydantic import BaseModel, Field
@@ -36,7 +36,7 @@ class QueryDownloadTasksTool(MoviePilotTool):
查询所有状态的任务(包括下载中和已完成的任务)
"""
all_torrents = []
# 查询正在下载的任务
# 查询下载的任务
downloading_torrents = download_chain.list_torrents(
downloader=downloader,
status=TorrentStatus.DOWNLOADING
@@ -64,6 +64,126 @@ class QueryDownloadTasksTool(MoviePilotTool):
except (TypeError, ValueError):
return None
@staticmethod
def _apply_download_history(
torrent: Union[TransferTorrent, DownloadingTorrent], history: Any
) -> None:
"""将下载历史中的补充信息回填到下载任务结果中。"""
if not history:
return
if hasattr(torrent, "media"):
torrent.media = {
"tmdbid": history.tmdbid,
"type": history.type,
"title": history.title,
"season": history.seasons,
"episode": history.episodes,
"image": history.image,
}
if hasattr(torrent, "username"):
torrent.username = history.username
torrent.userid = history.userid
@classmethod
def _load_history_map(
cls, torrents: List[Union[TransferTorrent, DownloadingTorrent]]
) -> Dict[str, Any]:
"""批量加载下载历史,避免逐条查询形成 N+1。"""
hashes = [torrent.hash for torrent in torrents if getattr(torrent, "hash", None)]
if not hashes:
return {}
return DownloadHistoryOper().get_by_hashes(hashes)
@classmethod
def _query_downloads_sync(
cls,
downloader: Optional[str] = None,
status: Optional[str] = "all",
hash_value: Optional[str] = None,
title: Optional[str] = None,
tag: Optional[str] = None,
) -> Dict[str, Any]:
"""
同步查询下载器和下载历史,整个链路放在线程池中执行。
"""
download_chain = DownloadChain()
if hash_value:
torrents = (
download_chain.list_torrents(downloader=downloader, hashs=[hash_value])
or []
)
if not torrents:
return {
"message": f"未找到hash为 {hash_value} 的下载任务(该任务可能已完成、已删除或不存在)"
}
history_map = cls._load_history_map(torrents)
for torrent in torrents:
cls._apply_download_history(torrent, history_map.get(torrent.hash))
filtered_downloads = list(torrents)
elif title:
all_torrents = cls._get_all_torrents(download_chain, downloader)
history_map = cls._load_history_map(all_torrents)
filtered_downloads = []
title_lower = title.lower()
for torrent in all_torrents:
history = history_map.get(torrent.hash)
matched = title_lower in (torrent.title or "").lower() or title_lower in (
getattr(torrent, "name", None) or ""
).lower()
if not matched and history and history.title:
matched = title_lower in history.title.lower()
if not matched:
continue
cls._apply_download_history(torrent, history)
filtered_downloads.append(torrent)
if not filtered_downloads:
return {"message": f"未找到标题包含 '{title}' 的下载任务"}
else:
if status == "downloading":
downloads = download_chain.downloading(name=downloader) or []
filtered_downloads = [
dl
for dl in downloads
if not downloader or dl.downloader == downloader
]
else:
all_torrents = cls._get_all_torrents(download_chain, downloader)
filtered_downloads = []
for torrent in all_torrents:
if downloader and torrent.downloader != downloader:
continue
if status == "completed" and torrent.state not in [
"seeding",
"completed",
]:
continue
if status == "paused" and torrent.state != "paused":
continue
filtered_downloads.append(torrent)
history_map = cls._load_history_map(filtered_downloads)
for torrent in filtered_downloads:
cls._apply_download_history(torrent, history_map.get(torrent.hash))
if tag and filtered_downloads:
tag_lower = tag.lower()
filtered_downloads = [
d for d in filtered_downloads if d.tags and tag_lower in d.tags.lower()
]
if not filtered_downloads:
return {"message": f"未找到标签包含 '{tag}' 的下载任务"}
if not filtered_downloads:
return {"message": "未找到相关下载任务"}
return {"downloads": filtered_downloads}
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据查询参数生成友好的提示消息"""
downloader = kwargs.get("downloader")
@@ -71,7 +191,7 @@ class QueryDownloadTasksTool(MoviePilotTool):
hash_value = kwargs.get("hash")
title = kwargs.get("title")
parts = ["正在查询下载任务"]
parts = ["查询下载任务"]
if downloader:
parts.append(f"下载器: {downloader}")
@@ -98,124 +218,19 @@ class QueryDownloadTasksTool(MoviePilotTool):
tag: Optional[str] = None, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: downloader={downloader}, status={status}, hash={hash}, title={title}, tag={tag}")
try:
download_chain = DownloadChain()
# 如果提供了hash直接查询该hash的任务不限制状态
if hash:
torrents = download_chain.list_torrents(downloader=downloader, hashs=[hash]) or []
if not torrents:
return f"未找到hash为 {hash} 的下载任务(该任务可能已完成、已删除或不存在)"
# 转换为DownloadingTorrent格式
downloads = []
for torrent in torrents:
# 获取下载历史信息
history = DownloadHistoryOper().get_by_hash(torrent.hash)
if history:
if hasattr(torrent, "media"):
torrent.media = {
"tmdbid": history.tmdbid,
"type": history.type,
"title": history.title,
"season": history.seasons,
"episode": history.episodes,
"image": history.image,
}
if hasattr(torrent, "username"):
torrent.username = history.username
torrent.userid = history.userid
downloads.append(torrent)
filtered_downloads = downloads
elif title:
# 如果提供了title查询所有任务并搜索匹配的标题
# 查询所有状态的任务
all_torrents = self._get_all_torrents(download_chain, downloader)
filtered_downloads = []
title_lower = title.lower()
for torrent in all_torrents:
# 获取下载历史信息
history = DownloadHistoryOper().get_by_hash(torrent.hash)
# 检查标题或名称是否匹配(包括下载历史中的标题)
matched = False
# 检查torrent的title和name字段
if (title_lower in (torrent.title or "").lower()) or \
(title_lower in (getattr(torrent, "name", None) or "").lower()):
matched = True
# 检查下载历史中的标题
if history and history.title:
if title_lower in history.title.lower():
matched = True
if matched:
if history:
if hasattr(torrent, "media"):
torrent.media = {
"tmdbid": history.tmdbid,
"type": history.type,
"title": history.title,
"season": history.seasons,
"episode": history.episodes,
"image": history.image,
}
if hasattr(torrent, "username"):
torrent.username = history.username
torrent.userid = history.userid
filtered_downloads.append(torrent)
if not filtered_downloads:
return f"未找到标题包含 '{title}' 的下载任务"
else:
# 根据status决定查询方式
if status == "downloading":
# 如果status为下载中使用downloading方法
downloads = download_chain.downloading(name=downloader) or []
filtered_downloads = []
for dl in downloads:
if downloader and dl.downloader != downloader:
continue
filtered_downloads.append(dl)
else:
# 其他状态completed、paused、all使用list_torrents查询所有任务
# 查询所有状态的任务
all_torrents = self._get_all_torrents(download_chain, downloader)
filtered_downloads = []
for torrent in all_torrents:
if downloader and torrent.downloader != downloader:
continue
# 根据status过滤
if status == "completed":
# 已完成的任务state为seeding或completed
if torrent.state not in ["seeding", "completed"]:
continue
elif status == "paused":
# 已暂停的任务
if torrent.state != "paused":
continue
# status == "all" 时不过滤
# 获取下载历史信息
history = DownloadHistoryOper().get_by_hash(torrent.hash)
if history:
if hasattr(torrent, "media"):
torrent.media = {
"tmdbid": history.tmdbid,
"type": history.type,
"title": history.title,
"season": history.seasons,
"episode": history.episodes,
"image": history.image,
}
if hasattr(torrent, "username"):
torrent.username = history.username
torrent.userid = history.userid
filtered_downloads.append(torrent)
# 按tag过滤
if tag and filtered_downloads:
tag_lower = tag.lower()
filtered_downloads = [
d for d in filtered_downloads
if d.tags and tag_lower in d.tags.lower()
]
if not filtered_downloads:
return f"未找到标签包含 '{tag}' 的下载任务"
payload = await self.run_blocking(
"downloader",
self._query_downloads_sync,
downloader,
status,
hash,
title,
tag,
)
if payload.get("message"):
return payload["message"]
filtered_downloads = payload.get("downloads") or []
if filtered_downloads:
# 限制最多20条结果
total_count = len(filtered_downloads)

View File

@@ -23,13 +23,17 @@ class QueryDownloadersTool(MoviePilotTool):
def get_tool_message(self, **kwargs) -> Optional[str]:
"""生成友好的提示消息"""
return "正在查询下载器配置"
return "查询下载器配置"
@staticmethod
def _load_downloaders_config():
"""从内存配置缓存中读取下载器配置。"""
return SystemConfigOper().get(SystemConfigKey.Downloaders)
async def run(self, **kwargs) -> str:
logger.info(f"执行工具: {self.name}")
try:
system_config_oper = SystemConfigOper()
downloaders_config = system_config_oper.get(SystemConfigKey.Downloaders)
downloaders_config = self._load_downloaders_config()
if downloaders_config:
return json.dumps(downloaders_config, ensure_ascii=False, indent=2)
return "未配置下载器。"

View File

@@ -29,7 +29,7 @@ class QueryEpisodeScheduleTool(MoviePilotTool):
season = kwargs.get("season")
episode_group = kwargs.get("episode_group")
message = f"正在查询剧集上映时间: TMDB ID {tmdb_id}{season}"
message = f"查询剧集上映时间: TMDB ID {tmdb_id}{season}"
if episode_group:
message += f" (剧集组: {episode_group})"

View File

@@ -31,34 +31,34 @@ class QueryInstalledPluginsTool(MoviePilotTool):
def get_tool_message(self, **kwargs) -> Optional[str]:
"""生成友好的提示消息"""
return "正在查询已安装插件"
return "查询已安装插件"
@staticmethod
def _list_installed_plugins() -> list[dict]:
"""读取已加载插件的内存快照。"""
plugin_manager = PluginManager()
local_plugins = plugin_manager.get_local_plugins()
installed_plugins = [plugin for plugin in local_plugins if plugin.installed]
return [
{
"id": plugin.id,
"plugin_name": plugin.plugin_name,
"plugin_desc": plugin.plugin_desc,
"plugin_version": plugin.plugin_version,
"plugin_author": plugin.plugin_author,
"state": plugin.state,
"has_page": plugin.has_page,
}
for plugin in installed_plugins
]
async def run(self, **kwargs) -> str:
logger.info(f"执行工具: {self.name}")
try:
plugin_manager = PluginManager()
local_plugins = plugin_manager.get_local_plugins()
# 仅返回已安装的插件
installed_plugins = [plugin for plugin in local_plugins if plugin.installed]
installed_plugins = self._list_installed_plugins()
if not installed_plugins:
return "当前没有已安装的插件"
plugins_list = []
for plugin in installed_plugins:
plugins_list.append(
{
"id": plugin.id,
"plugin_name": plugin.plugin_name,
"plugin_desc": plugin.plugin_desc,
"plugin_version": plugin.plugin_version,
"plugin_author": plugin.plugin_author,
"state": plugin.state,
"has_page": plugin.has_page,
}
)
result_json = json.dumps(plugins_list, ensure_ascii=False, indent=2)
result_json = json.dumps(installed_plugins, ensure_ascii=False, indent=2)
return result_json
except Exception as e:
logger.error(f"查询已安装插件失败: {e}", exc_info=True)

View File

@@ -1,5 +1,6 @@
"""查询媒体库工具"""
import asyncio
import json
from collections import OrderedDict
from typing import Optional, Type, Any
@@ -93,15 +94,25 @@ class QueryLibraryExistsTool(MoviePilotTool):
media_type = kwargs.get("media_type")
if tmdb_id:
message = f"正在查询媒体库: TMDB={tmdb_id}"
message = f"查询媒体库: TMDB={tmdb_id}"
elif douban_id:
message = f"正在查询媒体库: 豆瓣={douban_id}"
message = f"查询媒体库: 豆瓣={douban_id}"
else:
message = "正在查询媒体库"
message = "查询媒体库"
if media_type:
message += f" [{media_type}]"
return message
@staticmethod
def _get_media_server_names() -> list[str]:
"""同步读取已加载媒体服务器名称。"""
return sorted(MediaServerHelper().get_services().keys())
@staticmethod
def _query_media_exists(mediainfo, server: Optional[str] = None):
"""同步查询单个媒体服务器的存在性信息。"""
return MediaServerChain().media_exists(mediainfo=mediainfo, server=server)
async def run(self, tmdb_id: Optional[int] = None, douban_id: Optional[str] = None,
media_type: Optional[str] = None, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: tmdb_id={tmdb_id}, douban_id={douban_id}, media_type={media_type}")
@@ -116,7 +127,7 @@ class QueryLibraryExistsTool(MoviePilotTool):
return f"错误:无效的媒体类型 '{media_type}',支持的类型:'movie', 'tv'"
media_chain = MediaServerChain()
mediainfo = media_chain.recognize_media(
mediainfo = await media_chain.async_recognize_media(
tmdbid=tmdb_id,
doubanid=douban_id,
mtype=media_type_enum,
@@ -127,12 +138,22 @@ class QueryLibraryExistsTool(MoviePilotTool):
# 2. 遍历所有媒体服务器,分别查询存在性信息
server_results = OrderedDict()
media_server_helper = MediaServerHelper()
total_seasons = _filter_regular_seasons(mediainfo.seasons)
global_existsinfo = media_chain.media_exists(mediainfo=mediainfo)
service_names = self._get_media_server_names()
for service_name in sorted(media_server_helper.get_services().keys()):
existsinfo = media_chain.media_exists(mediainfo=mediainfo, server=service_name)
server_checks = await asyncio.gather(
*[
self.run_blocking(
"mediaserver",
self._query_media_exists,
mediainfo,
service_name,
)
for service_name in service_names
]
)
for service_name, existsinfo in zip(service_names, server_checks):
if not existsinfo:
continue
@@ -147,21 +168,23 @@ class QueryLibraryExistsTool(MoviePilotTool):
"exists": True
}
if global_existsinfo:
fallback_server_name = global_existsinfo.server or "local"
if fallback_server_name not in server_results:
if global_existsinfo.type == MediaType.TV:
server_results[fallback_server_name] = _build_tv_server_result(
existing_seasons=_filter_regular_seasons(global_existsinfo.seasons),
total_seasons=total_seasons
)
else:
server_results[fallback_server_name] = {
"exists": True
}
if not server_results:
return "媒体库中未找到相关媒体"
global_existsinfo = await self.run_blocking(
"mediaserver", self._query_media_exists, mediainfo, None
)
if not global_existsinfo:
return "媒体库中未找到相关媒体"
fallback_server_name = global_existsinfo.server or "local"
if global_existsinfo.type == MediaType.TV:
server_results[fallback_server_name] = _build_tv_server_result(
existing_seasons=_filter_regular_seasons(global_existsinfo.seasons),
total_seasons=total_seasons
)
else:
server_results[fallback_server_name] = {
"exists": True
}
# 3. 组装统一的存在性结果,不查询媒体服务器详情
result_dict = {

View File

@@ -1,5 +1,6 @@
"""查询媒体服务器最近入库影片工具"""
import asyncio
import json
from typing import Optional, Type
@@ -39,7 +40,7 @@ class QueryLibraryLatestTool(MoviePilotTool):
server = kwargs.get("server")
page = kwargs.get("page", 1)
parts = ["正在查询媒体服务器最近入库影片"]
parts = ["查询媒体服务器最近入库影片"]
if server:
parts.append(f"服务器: {server}")
@@ -50,6 +51,32 @@ class QueryLibraryLatestTool(MoviePilotTool):
return " | ".join(parts)
@staticmethod
def _get_enabled_servers() -> list[str]:
"""同步读取启用的媒体服务器列表。"""
mediaservers = ServiceConfigHelper.get_mediaserver_configs()
return [ms.name for ms in mediaservers if ms.enabled]
@staticmethod
def _load_latest_items(
server_name: str, count: int, username: Optional[str] = None
) -> list[dict]:
"""
媒体服务器 SDK 和 requests 调用都是同步的,这里在线程池中转换为可序列化结果。
"""
latest_items = MediaServerChain().latest(
server=server_name, count=count, username=username
)
if not latest_items:
return []
return [
{
**item.model_dump(exclude_none=True),
"server": server_name,
}
for item in latest_items
]
async def run(
self, server: Optional[str] = None, page: Optional[int] = 1, **kwargs
) -> str:
@@ -58,37 +85,34 @@ class QueryLibraryLatestTool(MoviePilotTool):
fetch_count = page * PAGE_SIZE
logger.info(f"执行工具: {self.name}, 参数: server={server}, page={page}")
try:
media_chain = MediaServerChain()
results = []
# 如果没有指定服务器,获取所有启用的媒体服务器
if not server:
mediaservers = ServiceConfigHelper.get_mediaserver_configs()
enabled_servers = [ms.name for ms in mediaservers if ms.enabled]
enabled_servers = self._get_enabled_servers()
if not enabled_servers:
return "未找到启用的媒体服务器"
# 遍历所有启用的服务器
for server_name in enabled_servers:
latest_items = media_chain.latest(
server=server_name, count=fetch_count, username=self._username
)
if latest_items:
for item in latest_items:
item_dict = item.model_dump(exclude_none=True)
item_dict["server"] = server_name
results.append(item_dict)
else:
# 查询指定服务器
latest_items = media_chain.latest(
server=server, count=fetch_count, username=self._username
server_results = await asyncio.gather(
*[
self.run_blocking(
"mediaserver",
self._load_latest_items,
server_name,
fetch_count,
self._username,
)
for server_name in enabled_servers
]
)
results = [
item for items in server_results for item in items if items
]
else:
results = await self.run_blocking(
"mediaserver",
self._load_latest_items,
server,
fetch_count,
self._username,
)
if latest_items:
for item in latest_items:
item_dict = item.model_dump(exclude_none=True)
item_dict["server"] = server
results.append(item_dict)
if not results:
server_info = f"服务器 {server}" if server else "所有服务器"

View File

@@ -29,8 +29,8 @@ class QueryMediaDetailTool(MoviePilotTool):
tmdb_id = kwargs.get("tmdb_id")
douban_id = kwargs.get("douban_id")
if tmdb_id:
return f"正在查询媒体详情: TMDB ID {tmdb_id}"
return f"正在查询媒体详情: 豆瓣 ID {douban_id}"
return f"查询媒体详情: TMDB ID {tmdb_id}"
return f"查询媒体详情: 豆瓣 ID {douban_id}"
async def run(self, media_type: str, tmdb_id: Optional[int] = None, douban_id: Optional[str] = None, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: tmdb_id={tmdb_id}, douban_id={douban_id}, media_type={media_type}")

View File

@@ -40,73 +40,71 @@ class QueryPluginCapabilitiesTool(MoviePilotTool):
"""生成友好的提示消息"""
plugin_id = kwargs.get("plugin_id")
if plugin_id:
return f"正在查询插件 {plugin_id} 的能力"
return "正在查询所有插件的能力"
return f"查询插件 {plugin_id} 的能力"
return "查询所有插件的能力"
async def run(self, plugin_id: Optional[str] = None, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: plugin_id={plugin_id}")
try:
plugin_manager = PluginManager()
result = {}
@staticmethod
def _load_plugin_capabilities(plugin_id: Optional[str] = None) -> dict:
"""读取运行中插件实例暴露的内存能力信息。"""
plugin_manager = PluginManager()
result = {}
# 获取插件命令
commands = plugin_manager.get_plugin_commands(pid=plugin_id)
if commands:
commands_list = []
for cmd in commands:
cmd_info = {
"cmd": cmd.get("cmd"),
"desc": cmd.get("desc"),
"plugin_id": cmd.get("pid"),
}
# data 字段可能包含额外参数信息
if cmd.get("data"):
cmd_info["data"] = cmd.get("data")
commands_list.append(cmd_info)
result["commands"] = commands_list
commands = plugin_manager.get_plugin_commands(pid=plugin_id)
if commands:
result["commands"] = [
{
"cmd": cmd.get("cmd"),
"desc": cmd.get("desc"),
"plugin_id": cmd.get("pid"),
**({"data": cmd.get("data")} if cmd.get("data") else {}),
}
for cmd in commands
]
# 获取插件动作
actions = plugin_manager.get_plugin_actions(pid=plugin_id)
if actions:
actions_list = []
for action_group in actions:
plugin_actions = {
actions = plugin_manager.get_plugin_actions(pid=plugin_id)
if actions:
actions_list = []
for action_group in actions:
actions_list.append(
{
"plugin_id": action_group.get("plugin_id"),
"plugin_name": action_group.get("plugin_name"),
"actions": [],
}
for action in action_group.get("actions", []):
plugin_actions["actions"].append(
"actions": [
{
"id": action.get("id"),
"name": action.get("name"),
}
)
actions_list.append(plugin_actions)
result["actions"] = actions_list
# 获取插件定时服务
services = plugin_manager.get_plugin_services(pid=plugin_id)
if services:
services_list = []
for svc in services:
svc_info = {
"id": svc.get("id"),
"name": svc.get("name"),
for action in action_group.get("actions", [])
],
}
# 包含触发器信息
trigger = svc.get("trigger")
if trigger:
svc_info["trigger"] = str(trigger)
# 包含定时器参数
svc_kwargs = svc.get("kwargs")
if svc_kwargs:
svc_info["trigger_kwargs"] = {
k: str(v) for k, v in svc_kwargs.items()
}
services_list.append(svc_info)
result["services"] = services_list
)
result["actions"] = actions_list
services = plugin_manager.get_plugin_services(pid=plugin_id)
if services:
services_list = []
for svc in services:
svc_info = {
"id": svc.get("id"),
"name": svc.get("name"),
}
trigger = svc.get("trigger")
if trigger:
svc_info["trigger"] = str(trigger)
svc_kwargs = svc.get("kwargs")
if svc_kwargs:
svc_info["trigger_kwargs"] = {
k: str(v) for k, v in svc_kwargs.items()
}
services_list.append(svc_info)
result["services"] = services_list
return result
async def run(self, plugin_id: Optional[str] = None, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: plugin_id={plugin_id}")
try:
result = self._load_plugin_capabilities(plugin_id)
if not result:
if plugin_id:
return f"插件 {plugin_id} 没有注册任何命令、动作或定时服务"

View File

@@ -39,7 +39,7 @@ class QueryPopularSubscribesTool(MoviePilotTool):
min_rating = kwargs.get("min_rating")
max_rating = kwargs.get("max_rating")
parts = [f"正在查询热门订阅 [{media_type}]"]
parts = [f"查询热门订阅 [{media_type}]"]
if min_sub:
parts.append(f"最少订阅: {min_sub}")

View File

@@ -22,38 +22,37 @@ class QueryRuleGroupsTool(MoviePilotTool):
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据查询参数生成友好的提示消息"""
return "正在查询所有规则组"
return "查询所有规则组"
@staticmethod
def _load_rule_groups() -> dict:
"""从内存配置缓存中读取规则组。"""
rule_groups = RuleHelper().get_rule_groups()
if not rule_groups:
return {
"message": "未找到任何规则组",
"rule_groups": [],
}
simplified_groups = [
{
"name": group.name,
"media_type": group.media_type,
"category": group.category,
}
for group in rule_groups
]
return {
"message": f"找到 {len(simplified_groups)} 个规则组",
"rule_groups": simplified_groups,
}
async def run(self, **kwargs) -> str:
logger.info(f"执行工具: {self.name}")
try:
rule_helper = RuleHelper()
rule_groups = rule_helper.get_rule_groups()
if not rule_groups:
return json.dumps({
"message": "未找到任何规则组",
"rule_groups": []
}, ensure_ascii=False, indent=2)
# 精简字段,过滤掉 rule_string 避免结果过大
simplified_groups = []
for group in rule_groups:
simplified = {
"name": group.name,
"media_type": group.media_type,
"category": group.category
}
simplified_groups.append(simplified)
result = {
"message": f"找到 {len(simplified_groups)} 个规则组",
"rule_groups": simplified_groups
}
result = self._load_rule_groups()
return json.dumps(result, ensure_ascii=False, indent=2)
except Exception as e:
error_message = f"查询规则组失败: {str(e)}"
logger.error(f"查询规则组失败: {e}", exc_info=True)

View File

@@ -22,7 +22,7 @@ class QuerySchedulersTool(MoviePilotTool):
def get_tool_message(self, **kwargs) -> Optional[str]:
"""生成友好的提示消息"""
return "正在查询定时服务"
return "查询定时服务"
async def run(self, **kwargs) -> str:
logger.info(f"执行工具: {self.name}")

View File

@@ -40,7 +40,7 @@ class QuerySiteUserdataTool(MoviePilotTool):
site_id = kwargs.get("site_id")
workdate = kwargs.get("workdate")
message = f"正在查询站点 #{site_id} 的用户数据"
message = f"查询站点 #{site_id} 的用户数据"
if workdate:
message += f" (日期: {workdate})"
else:

View File

@@ -37,7 +37,7 @@ class QuerySitesTool(MoviePilotTool):
status = kwargs.get("status", "all")
name = kwargs.get("name")
parts = ["正在查询站点"]
parts = ["查询站点"]
if status != "all":
status_map = {"active": "已启用", "inactive": "已禁用"}

View File

@@ -44,7 +44,7 @@ class QuerySubscribeHistoryTool(MoviePilotTool):
name = kwargs.get("name")
page = kwargs.get("page", 1)
parts = ["正在查询订阅历史"]
parts = ["查询订阅历史"]
if media_type != "all":
parts.append(f"类型: {media_type}")

View File

@@ -34,7 +34,7 @@ class QuerySubscribeSharesTool(MoviePilotTool):
min_rating = kwargs.get("min_rating")
max_rating = kwargs.get("max_rating")
parts = ["正在查询订阅分享"]
parts = ["查询订阅分享"]
if name:
parts.append(f"名称: {name}")

View File

@@ -79,7 +79,7 @@ class QuerySubscribesTool(MoviePilotTool):
media_type = kwargs.get("media_type", "all")
page = kwargs.get("page", 1)
parts = ["正在查询订阅"]
parts = ["查询订阅"]
# 根据状态过滤条件生成提示
if status != "all":

View File

@@ -33,7 +33,7 @@ class QueryTransferHistoryTool(MoviePilotTool):
status = kwargs.get("status", "all")
page = kwargs.get("page", 1)
parts = ["正在查询整理历史"]
parts = ["查询整理历史"]
if title:
parts.append(f"标题: {title}")

View File

@@ -30,7 +30,7 @@ class QueryWorkflowsTool(MoviePilotTool):
name = kwargs.get("name")
trigger_type = kwargs.get("trigger_type", "all")
parts = ["正在查询工作流"]
parts = ["查询工作流"]
if state != "all":
state_map = {"W": "等待", "R": "运行中", "P": "暂停", "S": "成功", "F": "失败"}

View File

@@ -29,7 +29,7 @@ class ReadFileTool(MoviePilotTool):
"""根据参数生成友好的提示消息"""
file_path = kwargs.get("file_path", "")
file_name = Path(file_path).name if file_path else "未知文件"
return f"正在读取文件: {file_name}"
return f"读取文件: {file_name}"
async def run(self, file_path: str, start_line: Optional[int] = None,
end_line: Optional[int] = None, **kwargs) -> str:

View File

@@ -33,13 +33,13 @@ class RecognizeMediaTool(MoviePilotTool):
path = kwargs.get("path")
if path:
message = f"正在识别文件媒体信息: {path}"
message = f"识别文件媒体信息: {path}"
elif title:
message = f"正在识别种子媒体信息: {title}"
message = f"识别种子媒体信息: {title}"
if subtitle:
message += f" ({subtitle})"
else:
message = "正在识别媒体信息"
message = "识别媒体信息"
return message

View File

@@ -31,29 +31,28 @@ class RunSchedulerTool(MoviePilotTool):
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据运行参数生成友好的提示消息"""
job_id = kwargs.get("job_id", "")
return f"正在运行定时服务 (ID: {job_id})"
return f"运行定时服务 (ID: {job_id})"
@staticmethod
def _run_scheduler_sync(job_id: str) -> tuple[bool, str]:
"""同步触发定时服务,避免调度器扫描阻塞事件循环。"""
scheduler = Scheduler()
for scheduler_item in scheduler.list():
if scheduler_item.id == job_id:
scheduler.start(job_id)
return True, scheduler_item.name
return False, ""
async def run(self, job_id: str, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: job_id={job_id}")
try:
scheduler = Scheduler()
# 检查定时服务是否存在
schedulers = scheduler.list()
job_exists = False
job_name = None
for s in schedulers:
if s.id == job_id:
job_exists = True
job_name = s.name
break
job_exists, job_name = await self.run_blocking(
"workflow", self._run_scheduler_sync, job_id
)
if not job_exists:
return f"定时服务 ID {job_id} 不存在,请使用 query_schedulers 工具查询可用的定时服务"
# 运行定时服务
scheduler.start(job_id)
return f"成功触发定时服务:{job_name} (ID: {job_id})"
except Exception as e:
logger.error(f"运行定时服务失败: {e}", exc_info=True)

View File

@@ -45,7 +45,7 @@ class RunSlashCommandTool(MoviePilotTool):
def get_tool_message(self, **kwargs) -> Optional[str]:
"""生成友好的提示消息"""
command = kwargs.get("command", "")
return f"正在执行命令: {command}"
return f"执行命令: {command}"
async def run(self, command: str, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: command={command}")

View File

@@ -38,7 +38,7 @@ class RunWorkflowTool(MoviePilotTool):
workflow_id = kwargs.get("workflow_id")
from_begin = kwargs.get("from_begin", True)
message = f"正在执行工作流: {workflow_id}"
message = f"执行工作流: {workflow_id}"
if not from_begin:
message += " (从上次位置继续)"
else:
@@ -46,6 +46,13 @@ class RunWorkflowTool(MoviePilotTool):
return message
@staticmethod
def _run_workflow_sync(
workflow_id: int, from_begin: Optional[bool] = True
) -> tuple[bool, str]:
"""同步执行工作流,放到专用线程池避免长流程阻塞 API 响应。"""
return WorkflowChain().process(workflow_id, from_begin=from_begin)
async def run(
self, workflow_id: int, from_begin: Optional[bool] = True, **kwargs
) -> str:
@@ -62,10 +69,12 @@ class RunWorkflowTool(MoviePilotTool):
if not workflow:
return f"未找到工作流:{workflow_id},请使用 query_workflows 工具查询可用的工作流"
# 执行工作流
workflow_chain = WorkflowChain()
state, errmsg = workflow_chain.process(
workflow.id, from_begin=from_begin
# 工作流执行链路包含大量同步步骤,统一放到 workflow 线程池。
state, errmsg = await self.run_blocking(
"workflow",
self._run_workflow_sync,
workflow.id,
from_begin,
)
if not state:

View File

@@ -8,7 +8,6 @@ from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.chain.media import MediaChain
from app.core.config import global_vars
from app.core.metainfo import MetaInfoPath
from app.log import logger
from app.schemas import FileItem
@@ -47,7 +46,7 @@ class ScrapeMetadataTool(MoviePilotTool):
storage = kwargs.get("storage", "local")
overwrite = kwargs.get("overwrite", False)
message = f"正在刮削媒体元数据: {path}"
message = f"刮削媒体元数据: {path}"
if storage != "local":
message += f" [存储: {storage}]"
if overwrite:
@@ -104,15 +103,14 @@ class ScrapeMetadataTool(MoviePilotTool):
ensure_ascii=False,
)
# 在线程池中执行同步的刮削操作
await global_vars.loop.run_in_executor(
None,
lambda: media_chain.scrape_metadata(
fileitem=fileitem,
meta=meta,
mediainfo=mediainfo,
overwrite=overwrite,
),
# 刮削会包含磁盘写入和外部图片/元数据访问,统一放到 storage 线程池。
await self.run_blocking(
"storage",
media_chain.scrape_metadata,
fileitem=fileitem,
meta=meta,
mediainfo=mediainfo,
overwrite=overwrite,
)
return json.dumps(

View File

@@ -34,7 +34,7 @@ class SearchMediaTool(MoviePilotTool):
media_type = kwargs.get("media_type")
season = kwargs.get("season")
message = f"正在搜索媒体: {title}"
message = f"搜索媒体: {title}"
if year:
message += f" ({year})"
if media_type:

View File

@@ -24,7 +24,7 @@ class SearchPersonTool(MoviePilotTool):
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据搜索参数生成友好的提示消息"""
name = kwargs.get("name", "")
return f"正在搜索人物: {name}"
return f"搜索人物: {name}"
async def run(self, name: str, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: name={name}")

View File

@@ -29,7 +29,7 @@ class SearchPersonCreditsTool(MoviePilotTool):
"""根据搜索参数生成友好的提示消息"""
person_id = kwargs.get("person_id", "")
source = kwargs.get("source", "")
return f"正在搜索人物参演作品: {source} ID {person_id}"
return f"搜索人物参演作品: {source} ID {person_id}"
async def run(self, person_id: int, source: str, page: Optional[int] = 1, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: person_id={person_id}, source={source}, page={page}")

View File

@@ -7,7 +7,6 @@ from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.chain.subscribe import SubscribeChain
from app.core.config import global_vars
from app.db.subscribe_oper import SubscribeOper
from app.log import logger
from app.schemas.types import media_type_to_agent
@@ -32,7 +31,7 @@ class SearchSubscribeTool(MoviePilotTool):
subscribe_id = kwargs.get("subscribe_id")
manual = kwargs.get("manual", False)
message = f"正在搜索订阅 #{subscribe_id} 的缺失剧集"
message = f"搜索订阅 #{subscribe_id} 的缺失剧集"
if manual:
message += "(手动搜索)"
@@ -81,19 +80,13 @@ class SearchSubscribeTool(MoviePilotTool):
subscribe_oper.update(subscribe_id, {"filter_groups": filter_groups})
logger.info(f"更新订阅 #{subscribe_id} 的规则组为: {filter_groups}")
# 调用 SubscribeChain 的 search 方法
# search 方法是同步的,需要在异步环境中运行
subscribe_chain = SubscribeChain()
# 在线程池中执行同步的搜索操作
# 当 sid 有值时state 参数会被忽略,直接处理该订阅
await global_vars.loop.run_in_executor(
None,
lambda: subscribe_chain.search(
sid=subscribe_id,
state='R', # 默认状态,当 sid 有值时此参数会被忽略
manual=manual
)
# 订阅搜索会触发大量同步站点访问,统一走 subscribe 线程池。
await self.run_blocking(
"subscribe",
SubscribeChain().search,
sid=subscribe_id,
state="R", # 当 sid 有值时此参数会被忽略
manual=manual,
)
# 重新获取订阅信息以获取更新后的状态

View File

@@ -41,15 +41,20 @@ class SearchTorrentsTool(MoviePilotTool):
media_type = kwargs.get("media_type")
if tmdb_id:
message = f"正在搜索种子: TMDB={tmdb_id}"
message = f"搜索种子: TMDB={tmdb_id}"
elif douban_id:
message = f"正在搜索种子: 豆瓣={douban_id}"
message = f"搜索种子: 豆瓣={douban_id}"
else:
message = "正在搜索种子"
message = "搜索种子"
if media_type:
message += f" [{media_type}]"
return message
@staticmethod
def _load_configured_sites() -> List[int]:
"""同步读取默认搜索站点列表。"""
return SystemConfigOper().get(SystemConfigKey.IndexerSites) or []
async def run(self, tmdb_id: Optional[int] = None, douban_id: Optional[str] = None,
media_type: Optional[str] = None, area: Optional[str] = None,
sites: Optional[List[int]] = None, **kwargs) -> str:
@@ -83,8 +88,7 @@ class SearchTorrentsTool(MoviePilotTool):
if sites:
search_site_ids = sites
else:
configured_sites = SystemConfigOper().get(SystemConfigKey.IndexerSites)
search_site_ids = configured_sites if configured_sites else []
search_site_ids = self._load_configured_sites()
if filtered_torrents:
await search_chain.async_save_cache(filtered_torrents, SEARCH_RESULT_CACHE_FILE)

View File

@@ -41,7 +41,7 @@ class SearchWebTool(MoviePilotTool):
"""根据搜索参数生成友好的提示消息"""
query = kwargs.get("query", "")
max_results = kwargs.get("max_results", 20)
return f"正在搜索网络内容: {query} (最多返回 {max_results} 条结果)"
return f"搜索网络内容: {query} (最多返回 {max_results} 条结果)"
async def run(self, query: str, max_results: Optional[int] = 20, **kwargs) -> str:
"""

View File

@@ -0,0 +1,107 @@
"""发送本地附件工具。"""
from pathlib import Path
from typing import Optional, Type
from pydantic import BaseModel, Field, model_validator
from app.agent.tools.base import MoviePilotTool, ToolChain
from app.log import logger
from app.schemas import Notification, NotificationType
from app.schemas.message import ChannelCapabilityManager, ChannelCapability
from app.schemas.types import MessageChannel
class SendLocalFileInput(BaseModel):
"""发送本地附件工具输入。"""
explanation: str = Field(
...,
description="Clear explanation of why sending this local file helps the user",
)
file_path: str = Field(
...,
description="Absolute path to the local image or file to send to the user",
)
message: Optional[str] = Field(
None,
description="Optional message or caption to send with the attachment",
)
title: Optional[str] = Field(
None,
description="Optional short title shown together with the attachment",
)
file_name: Optional[str] = Field(
None,
description="Optional override filename presented to the user when downloading",
)
@model_validator(mode="after")
def validate_file_path(self):
if not self.file_path:
raise ValueError("file_path 不能为空")
return self
class SendLocalFileTool(MoviePilotTool):
name: str = "send_local_file"
description: str = (
"Send a local image or file from the server filesystem to the current user. "
"Use this when you have generated or identified a local file the user should download."
)
args_schema: Type[BaseModel] = SendLocalFileInput
require_admin: bool = False
def get_tool_message(self, **kwargs) -> Optional[str]:
file_path = kwargs.get("file_path", "")
file_name = Path(file_path).name if file_path else "未知文件"
return f"发送本地附件: {file_name}"
async def run(
self,
file_path: str,
message: Optional[str] = None,
title: Optional[str] = None,
file_name: Optional[str] = None,
**kwargs,
) -> str:
if not self._channel or not self._source:
return "当前不在可回传消息的会话中,无法发送附件"
try:
channel = MessageChannel(self._channel)
except ValueError:
return f"不支持的消息渠道: {self._channel}"
if not ChannelCapabilityManager.supports_capability(
channel, ChannelCapability.FILE_SENDING
):
return f"当前渠道 {channel.value} 暂不支持发送本地文件"
resolved_path = Path(file_path).expanduser()
if not resolved_path.is_absolute():
resolved_path = resolved_path.resolve()
if not resolved_path.exists() or not resolved_path.is_file():
return f"文件不存在: {resolved_path}"
logger.info(
"执行工具: %s, channel=%s, file=%s",
self.name,
channel.value,
resolved_path,
)
await ToolChain().async_post_message(
Notification(
channel=channel,
source=self._source,
mtype=NotificationType.Agent,
userid=self._user_id,
username=self._username,
title=title,
text=message,
file_path=str(resolved_path),
file_name=file_name or resolved_path.name,
)
)
return "本地附件已发送"

View File

@@ -2,7 +2,7 @@
from typing import Optional, Type
from pydantic import BaseModel, Field
from pydantic import BaseModel, Field, model_validator
from app.agent.tools.base import MoviePilotTool
from app.log import logger
@@ -15,42 +15,64 @@ class SendMessageInput(BaseModel):
...,
description="Clear explanation of why this tool is being used in the current context",
)
message: str = Field(
...,
message: Optional[str] = Field(
None,
description="The message content to send to the user (should be clear and informative)",
)
message_type: Optional[str] = Field(
title: Optional[str] = Field(
None,
description="Title of the message, a short summary of the message content",
)
image_url: Optional[str] = Field(
None,
description="Optional image URL to send together with the message on channels that support images (such as Telegram and Slack)",
)
@model_validator(mode="after")
def validate_payload(self):
if not self.message and not self.title and not self.image_url:
raise ValueError("message、title、image_url 至少需要提供一个")
return self
class SendMessageTool(MoviePilotTool):
name: str = "send_message"
description: str = "Send notification message to the user through configured notification channels (Telegram, Slack, WeChat, etc.). Used to inform users about operation results, errors, or important updates."
description: str = "Send notification message to the user through configured notification channels (Telegram, Slack, WeChat, etc.). Supports optional image_url on channels that can send images. Used to inform users about operation results, errors, important updates, or proactively send a relevant image."
args_schema: Type[BaseModel] = SendMessageInput
require_admin: bool = True
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据消息参数生成友好的提示消息"""
message = kwargs.get("message", "")
title = kwargs.get("message_type") or ""
message = kwargs.get("message", "") or ""
title = kwargs.get("title") or ""
image_url = kwargs.get("image_url")
# 截断过长的消息
if len(message) > 50:
message = message[:50] + "..."
if title and image_url:
return f"发送图文消息: [{title}] {message}"
if title:
return f"正在发送消息: [{title}] {message}"
return f"正在发送消息: {message}"
return f"发送消息: [{title}] {message}"
if image_url:
return f"发送图片消息: {message}"
return f"发送消息: {message}"
async def run(
self, message: str, message_type: Optional[str] = None, **kwargs
self,
message: Optional[str] = None,
title: Optional[str] = None,
image_url: Optional[str] = None,
**kwargs,
) -> str:
title = message_type or ""
logger.info(f"执行工具: {self.name}, 参数: title={title}, message={message}")
title = title or ("图片" if image_url and not message else "")
text = message or ""
logger.info(
f"执行工具: {self.name}, 参数: title={title}, message={text}, image_url={image_url}"
)
try:
await self.send_tool_message(message, title=title)
await self.send_tool_message(text, title=title, image=image_url)
return "消息已发送"
except Exception as e:
logger.error(f"发送消息失败: {e}")

View File

@@ -0,0 +1,96 @@
"""发送语音消息工具。"""
import asyncio
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool, ToolChain
from app.core.config import settings
from app.helper.voice import VoiceHelper
from app.helper.service import ServiceConfigHelper
from app.log import logger
from app.schemas import Notification, NotificationType
from app.schemas.types import MessageChannel
class SendVoiceMessageInput(BaseModel):
"""发送语音消息工具输入。"""
explanation: str = Field(
...,
description="Clear explanation of why a voice reply is the best fit in the current context",
)
message: str = Field(
...,
description="The spoken content to send back to the user",
)
class SendVoiceMessageTool(MoviePilotTool):
name: str = "send_voice_message"
description: str = (
"Send a voice reply to the current user. Prefer this when the user sent a voice message "
"or when spoken playback is more natural. On channels without voice support or when TTS "
"is unavailable, it automatically falls back to sending the same content as plain text."
)
args_schema: Type[BaseModel] = SendVoiceMessageInput
require_admin: bool = False
def get_tool_message(self, **kwargs) -> Optional[str]:
message = kwargs.get("message") or ""
if len(message) > 40:
message = message[:40] + "..."
return f"发送语音回复: {message}"
def _supports_real_voice_reply(self) -> bool:
channel = self._channel or ""
if channel == MessageChannel.Telegram.value:
return True
if channel != MessageChannel.Wechat.value:
return False
for config in ServiceConfigHelper.get_notification_configs():
if config.name != self._source:
continue
return (config.config or {}).get("WECHAT_MODE", "app") != "bot"
return False
async def run(self, message: str, **kwargs) -> str:
if not message:
return "语音回复内容不能为空"
voice_path = None
used_voice = False
channel = self._channel or ""
if self._supports_real_voice_reply() and VoiceHelper.is_available("tts"):
voice_file = await asyncio.to_thread(VoiceHelper.synthesize_speech, message)
if voice_file:
voice_path = str(voice_file)
used_voice = True
logger.info(
"执行工具: %s, channel=%s, use_voice=%s, text_len=%s",
self.name,
channel,
used_voice,
len(message),
)
await ToolChain().async_post_message(
Notification(
channel=self._channel,
source=self._source,
mtype=NotificationType.Agent,
userid=self._user_id,
username=self._username,
text=message,
voice_path=voice_path,
voice_caption=message if settings.AI_VOICE_REPLY_WITH_TEXT else None,
)
)
self._agent_context["user_reply_sent"] = True
self._agent_context["reply_mode"] = "voice" if used_voice else "text_fallback"
if used_voice:
return "语音回复已发送"
return "当前未使用语音通道,已自动回退为文字回复"

View File

@@ -24,26 +24,31 @@ class TestSiteTool(MoviePilotTool):
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据测试参数生成友好的提示消息"""
site_identifier = kwargs.get("site_identifier")
return f"正在测试站点连通性: {site_identifier}"
return f"测试站点连通性: {site_identifier}"
@staticmethod
def _test_site_sync(site_identifier: int) -> tuple[Optional[str], Optional[str], bool, str]:
"""在同步线程里执行站点联通测试,避免网络请求卡住事件循环。"""
site = SiteOper().get(site_identifier)
if not site:
return None, None, False, f"未找到站点:{site_identifier},请使用 query_sites 工具查询可用的站点"
status, message = SiteChain().test(site.domain)
return site.name, site.domain, status, message
async def run(self, site_identifier: int, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: site_identifier={site_identifier}")
try:
site_oper = SiteOper()
site_chain = SiteChain()
site = await site_oper.async_get(site_identifier)
if not site:
return f"未找到站点:{site_identifier},请使用 query_sites 工具查询可用的站点"
# 测试站点连通性
status, message = site_chain.test(site.domain)
site_name, site_domain, status, message = await self.run_blocking(
"site", self._test_site_sync, site_identifier
)
if not site_name:
return message
if status:
return f"站点连通性测试成功:{site.name} ({site.domain})\n{message}"
return f"站点连通性测试成功:{site_name} ({site_domain})\n{message}"
else:
return f"站点连通性测试失败:{site.name} ({site.domain})\n{message}"
return f"站点连通性测试失败:{site_name} ({site_domain})\n{message}"
except Exception as e:
logger.error(f"测试站点连通性失败: {e}", exc_info=True)
return f"测试站点连通性时发生错误: {str(e)}"

View File

@@ -68,7 +68,7 @@ class TransferFileTool(MoviePilotTool):
transfer_type = kwargs.get("transfer_type")
background = kwargs.get("background", False)
message = f"正在整理文件: {file_path}"
message = f"整理文件: {file_path}"
if media_type:
message += f" [{media_type}]"
if transfer_type:
@@ -84,6 +84,73 @@ class TransferFileTool(MoviePilotTool):
return message
@staticmethod
def _transfer_file_sync(
file_path: str,
storage: Optional[str] = "local",
target_path: Optional[str] = None,
target_storage: Optional[str] = None,
media_type: Optional[str] = None,
tmdbid: Optional[int] = None,
doubanid: Optional[str] = None,
season: Optional[int] = None,
transfer_type: Optional[str] = None,
background: Optional[bool] = False,
) -> str:
"""
文件整理链路包含大量同步磁盘与外部服务调用,需要在线程池中运行。
"""
if not file_path:
return "错误:必须提供文件或目录路径"
if storage == "local":
if not file_path.startswith("/") and not (
len(file_path) > 1 and file_path[1] == ":"
):
file_path = str(Path(file_path).resolve())
elif not file_path.startswith("/"):
file_path = "/" + file_path
fileitem = FileItem(
storage=storage or "local",
path=file_path,
type="dir" if file_path.endswith("/") else "file",
)
target_path_obj = Path(target_path) if target_path else None
media_type_enum = None
if media_type:
media_type_enum = MediaType.from_agent(media_type)
if not media_type_enum:
return f"错误:无效的媒体类型 '{media_type}',支持的类型:'movie', 'tv'"
state, errormsg = TransferChain().manual_transfer(
fileitem=fileitem,
target_storage=target_storage,
target_path=target_path_obj,
tmdbid=tmdbid,
doubanid=doubanid,
mtype=media_type_enum,
season=season,
transfer_type=transfer_type,
background=background,
)
if state:
if background:
return f"整理任务已提交到后台运行:{file_path}"
return f"整理成功:{file_path}"
if isinstance(errormsg, list):
error_text = f"整理完成,{len(errormsg)} 个文件转移失败"
if errormsg:
error_text += "\n" + "\n".join(str(e) for e in errormsg[:5])
if len(errormsg) > 5:
error_text += f"\n... 还有 {len(errormsg) - 5} 个错误"
else:
error_text = str(errormsg)
return f"整理失败:{error_text}"
async def run(
self,
file_path: str,
@@ -105,73 +172,20 @@ class TransferFileTool(MoviePilotTool):
)
try:
if not file_path:
return "错误:必须提供文件或目录路径"
# 规范化路径
if storage == "local":
# 本地路径处理
if not file_path.startswith("/") and not (
len(file_path) > 1 and file_path[1] == ":"
):
# 相对路径,尝试转换为绝对路径
file_path = str(Path(file_path).resolve())
else:
# 远程存储路径,确保以/开头
if not file_path.startswith("/"):
file_path = "/" + file_path
# 创建FileItem
fileitem = FileItem(
storage=storage or "local",
path=file_path,
type="dir" if file_path.endswith("/") else "file",
return await self.run_blocking(
"storage",
self._transfer_file_sync,
file_path,
storage,
target_path,
target_storage,
media_type,
tmdbid,
doubanid,
season,
transfer_type,
background,
)
# 处理目标路径
target_path_obj = None
if target_path:
target_path_obj = Path(target_path)
# 处理媒体类型
media_type_enum = None
if media_type:
media_type_enum = MediaType.from_agent(media_type)
if not media_type_enum:
return f"错误:无效的媒体类型 '{media_type}',支持的类型:'movie', 'tv'"
# 调用整理方法
transfer_chain = TransferChain()
state, errormsg = transfer_chain.manual_transfer(
fileitem=fileitem,
target_storage=target_storage,
target_path=target_path_obj,
tmdbid=tmdbid,
doubanid=doubanid,
mtype=media_type_enum,
season=season,
transfer_type=transfer_type,
background=background,
)
if not state:
# 处理错误信息
if isinstance(errormsg, list):
error_text = f"整理完成,{len(errormsg)} 个文件转移失败"
if errormsg:
error_text += f"\n" + "\n".join(
str(e) for e in errormsg[:5]
) # 只显示前5个错误
if len(errormsg) > 5:
error_text += f"\n... 还有 {len(errormsg) - 5} 个错误"
else:
error_text = str(errormsg)
return f"整理失败:{error_text}"
else:
if background:
return f"整理任务已提交到后台运行:{file_path}"
else:
return f"整理成功:{file_path}"
except Exception as e:
logger.error(f"整理文件失败: {e}", exc_info=True)
return f"整理文件时发生错误: {str(e)}"

View File

@@ -23,7 +23,12 @@ class UpdateCustomIdentifiersInput(BaseModel):
description=(
"The complete list of custom identifier rules to save. "
"This REPLACES the entire existing list. "
"Always query existing identifiers first, merge new rules, then pass the full list."
"Always query existing identifiers first, merge new rules, then pass the full list. "
"These rules are global and affect future recognition for all torrents/files. "
"When adding a rule for a user-provided sample, prefer narrow regex patterns that include "
"sample-specific anchors such as the title alias, year, season/episode marker, group tag, "
"resolution, or other distinctive fragments. Avoid overly broad patterns like bare generic "
"tags, pure episode numbers, or common release words unless the user explicitly wants a global rule."
),
)
@@ -35,6 +40,10 @@ class UpdateCustomIdentifiersTool(MoviePilotTool):
"This tool REPLACES all existing identifier rules with the provided list. "
"IMPORTANT: Always use 'query_custom_identifiers' first to get existing rules, "
"then merge new rules into the list before calling this tool to avoid accidentally deleting existing rules. "
"IMPORTANT: New identifier rules are global. When the rule is created from a specific torrent/file name, "
"make the regex as narrow as possible and include distinctive elements from that sample so unrelated titles "
"are not affected. Prefer contextual replacements with capture groups/backreferences over bare block words "
"when a generic word like REPACK, WEB-DL, 1080p, 字幕, or a simple episode marker would otherwise match too broadly. "
"Supported rule formats (spaces around operators are required): "
"1) Block word: just the word/regex to remove; "
"2) Replacement: '被替换词 => 替换词'; "
@@ -48,7 +57,7 @@ class UpdateCustomIdentifiersTool(MoviePilotTool):
def get_tool_message(self, **kwargs) -> Optional[str]:
"""生成友好的提示消息"""
identifiers = kwargs.get("identifiers", [])
return f"正在更新自定义识别词(共 {len(identifiers)} 条规则)"
return f"更新自定义识别词(共 {len(identifiers)} 条规则)"
async def run(self, identifiers: List[str] = None, **kwargs) -> str:
logger.info(

View File

@@ -95,8 +95,8 @@ class UpdateSiteTool(MoviePilotTool):
fields_updated.append("下载器")
if fields_updated:
return f"正在更新站点 #{site_id}: {', '.join(fields_updated)}"
return f"正在更新站点 #{site_id}"
return f"更新站点 #{site_id}: {', '.join(fields_updated)}"
return f"更新站点 #{site_id}"
async def run(
self,

View File

@@ -41,12 +41,34 @@ class UpdateSiteCookieTool(MoviePilotTool):
username = kwargs.get("username", "")
two_step_code = kwargs.get("two_step_code")
message = f"正在更新站点Cookie: {site_identifier} (用户: {username})"
message = f"更新站点Cookie: {site_identifier} (用户: {username})"
if two_step_code:
message += " [需要两步验证]"
return message
@staticmethod
def _update_site_cookie_sync(
site_identifier: int,
username: str,
password: str,
two_step_code: Optional[str] = None,
) -> tuple[Optional[str], bool, str]:
"""
在同步线程里执行站点登录和 Cookie 更新,避免网络登录阻塞协程。
"""
site = SiteOper().get(site_identifier)
if not site:
return None, False, f"未找到站点:{site_identifier},请使用 query_sites 工具查询可用的站点"
status, message = SiteChain().update_cookie(
site_info=site,
username=username,
password=password,
two_step_code=two_step_code,
)
return site.name, status, message
async def run(
self,
site_identifier: int,
@@ -60,25 +82,21 @@ class UpdateSiteCookieTool(MoviePilotTool):
)
try:
site_oper = SiteOper()
site_chain = SiteChain()
site = await site_oper.async_get(site_identifier)
if not site:
return f"未找到站点:{site_identifier},请使用 query_sites 工具查询可用的站点"
# 更新站点Cookie和UA
status, message = site_chain.update_cookie(
site_info=site,
username=username,
password=password,
two_step_code=two_step_code,
site_name, status, message = await self.run_blocking(
"site",
self._update_site_cookie_sync,
site_identifier,
username,
password,
two_step_code,
)
if not site_name:
return message
if status:
return f"站点【{site.name}】Cookie和UA更新成功\n{message}"
return f"站点【{site_name}】Cookie和UA更新成功\n{message}"
else:
return f"站点【{site.name}】Cookie和UA更新失败\n错误原因:{message}"
return f"站点【{site_name}】Cookie和UA更新失败\n错误原因:{message}"
except Exception as e:
logger.error(f"更新站点Cookie和UA失败: {e}", exc_info=True)
return f"更新站点Cookie和UA时发生错误: {str(e)}"

View File

@@ -117,8 +117,8 @@ class UpdateSubscribeTool(MoviePilotTool):
fields_updated.append("下载器")
if fields_updated:
return f"正在更新订阅 #{subscribe_id}: {', '.join(fields_updated)}"
return f"正在更新订阅 #{subscribe_id}"
return f"更新订阅 #{subscribe_id}: {', '.join(fields_updated)}"
return f"更新订阅 #{subscribe_id}"
async def run(
self,

View File

@@ -27,7 +27,7 @@ class WriteFileTool(MoviePilotTool):
"""根据参数生成友好的提示消息"""
file_path = kwargs.get("file_path", "")
file_name = Path(file_path).name if file_path else "未知文件"
return f"正在写入文件: {file_name}"
return f"写入文件: {file_name}"
async def run(self, file_path: str, content: str, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: file_path={file_path}")

View File

@@ -2,7 +2,7 @@ from fastapi import APIRouter
from app.api.endpoints import login, user, webhook, message, site, subscribe, \
media, douban, search, plugin, tmdb, history, system, download, dashboard, \
transfer, mediaserver, bangumi, storage, discover, recommend, workflow, torrent, mcp, mfa
transfer, mediaserver, bangumi, storage, discover, recommend, workflow, torrent, mcp, mfa, openai, anthropic
api_router = APIRouter()
api_router.include_router(login.router, prefix="/login", tags=["login"])
@@ -30,3 +30,5 @@ api_router.include_router(recommend.router, prefix="/recommend", tags=["recommen
api_router.include_router(workflow.router, prefix="/workflow", tags=["workflow"])
api_router.include_router(torrent.router, prefix="/torrent", tags=["torrent"])
api_router.include_router(mcp.router, prefix="/mcp", tags=["mcp"])
api_router.include_router(openai.router, prefix="/openai/v1", tags=["openai"])
api_router.include_router(anthropic.router, prefix="/anthropic/v1", tags=["anthropic"])

View File

@@ -0,0 +1,158 @@
import asyncio
import json
import time
import uuid
from typing import AsyncIterator, List, Optional
from fastapi import APIRouter, Header, Security
from fastapi.responses import JSONResponse, StreamingResponse
from app import schemas
from app.api.endpoints.openai import (
MODEL_ID,
_CollectingMoviePilotAgent,
_error_response as _openai_error_response,
)
from app.api.openai_utils import build_anthropic_messages, build_prompt, build_session_id
from app.core.config import settings
from app.core.security import anthropic_api_key_header
from app.schemas.types import MessageChannel
router = APIRouter()
SESSION_PREFIX = "anthropic:"
def _anthropic_error_response(
message: str,
status_code: int,
error_type: str = "invalid_request_error",
) -> JSONResponse:
return JSONResponse(
status_code=status_code,
content=schemas.AnthropicErrorResponse(
error=schemas.AnthropicErrorDetail(type=error_type, message=message)
).model_dump(),
)
def _check_auth(api_key: Optional[str]) -> Optional[JSONResponse]:
if not api_key or api_key != settings.API_TOKEN:
return _anthropic_error_response(
"invalid x-api-key",
401,
error_type="authentication_error",
)
return None
async def _stream_anthropic_response(
agent: _CollectingMoviePilotAgent,
prompt: str,
images: List[str],
) -> AsyncIterator[str]:
event_queue: asyncio.Queue = asyncio.Queue()
if hasattr(agent.stream_handler, "bind_queue"):
agent.stream_handler.bind_queue(event_queue)
message_id = f"msg_{uuid.uuid4().hex}"
async def _run_agent():
try:
await agent.process(prompt, images=images, files=None)
except Exception as exc:
await event_queue.put({"error": str(exc)})
finally:
await event_queue.put(None)
task = asyncio.create_task(_run_agent())
try:
yield f"event: message_start\ndata: {json.dumps({'type': 'message_start', 'message': {'id': message_id, 'type': 'message', 'role': 'assistant', 'content': [], 'model': MODEL_ID, 'stop_reason': None, 'stop_sequence': None, 'usage': {'input_tokens': 0, 'output_tokens': 0}}}, ensure_ascii=False)}\n\n"
yield f"event: content_block_start\ndata: {json.dumps({'type': 'content_block_start', 'index': 0, 'content_block': {'type': 'text', 'text': ''}}, ensure_ascii=False)}\n\n"
while True:
item = await event_queue.get()
if item is None:
break
if isinstance(item, dict) and item.get("error"):
raise RuntimeError(str(item["error"]))
text = str(item or "")
if not text:
continue
yield f"event: content_block_delta\ndata: {json.dumps({'type': 'content_block_delta', 'index': 0, 'delta': {'type': 'text_delta', 'text': text}}, ensure_ascii=False)}\n\n"
yield f"event: content_block_stop\ndata: {json.dumps({'type': 'content_block_stop', 'index': 0}, ensure_ascii=False)}\n\n"
yield f"event: message_delta\ndata: {json.dumps({'type': 'message_delta', 'delta': {'stop_reason': 'end_turn', 'stop_sequence': None}, 'usage': {'output_tokens': 0}}, ensure_ascii=False)}\n\n"
yield f"event: message_stop\ndata: {json.dumps({'type': 'message_stop'}, ensure_ascii=False)}\n\n"
finally:
if not task.done():
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
@router.post("/messages", summary="Anthropic compatible messages", response_model=schemas.AnthropicMessagesResponse)
async def messages(
payload: schemas.AnthropicMessagesRequest,
x_api_key: Optional[str] = Security(anthropic_api_key_header),
anthropic_version: Optional[str] = Header(default=None, alias="anthropic-version"),
):
auth_error = _check_auth(x_api_key)
if auth_error:
return auth_error
if not settings.AI_AGENT_ENABLE:
return _anthropic_error_response(
"MoviePilot AI agent is disabled.",
503,
error_type="api_error",
)
normalized_messages = build_anthropic_messages(payload.system, payload.messages)
try:
prompt, images = build_prompt(normalized_messages, use_server_session=False)
except ValueError as exc:
return _anthropic_error_response(str(exc), 400)
session_seed = anthropic_version or "anthropic"
session_id = build_session_id(f"{session_seed}:{uuid.uuid4().hex}", SESSION_PREFIX)
agent = _CollectingMoviePilotAgent(
session_id=session_id,
user_id=session_id,
channel=MessageChannel.Web.value,
source="anthropic",
username="anthropic-client",
stream_mode=payload.stream,
)
if payload.stream:
return StreamingResponse(
_stream_anthropic_response(agent=agent, prompt=prompt, images=images),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no",
},
)
try:
result = await agent.process(prompt, images=images, files=None)
except Exception as exc:
return _anthropic_error_response(str(exc), 500, error_type="api_error")
content = "\n\n".join(
message.strip()
for message in agent.collected_messages
if message and message.strip()
).strip()
if not content and result:
content = str(result).strip()
if not content:
content = "未获得有效回复。"
return schemas.AnthropicMessagesResponse(
id=f"msg_{uuid.uuid4().hex}",
content=[schemas.AnthropicTextBlock(text=content)],
model=MODEL_ID,
)

View File

@@ -1,3 +1,5 @@
import asyncio
import time
from typing import List, Any, Optional
import jieba
@@ -8,6 +10,7 @@ from pathlib import Path
from app import schemas
from app.chain.storage import StorageChain
from app.core.config import settings, global_vars
from app.core.event import eventmanager
from app.core.security import verify_token
from app.db import get_async_db, get_db
@@ -15,11 +18,51 @@ from app.db.models import User
from app.db.models.downloadhistory import DownloadHistory, DownloadFiles
from app.db.models.transferhistory import TransferHistory
from app.db.user_oper import get_current_active_superuser_async, get_current_active_superuser
from app.helper.progress import ProgressHelper
from app.schemas.types import EventType
router = APIRouter()
def _start_ai_redo_task(history_id: int, progress_key: str):
from app.agent import agent_manager
progress = ProgressHelper(progress_key)
progress.start()
progress.update(
text=f"智能助正在准备整理记录 #{history_id} ...",
data={"history_id": history_id, "success": True},
)
def update_output(text: str):
progress.update(text=text, data={"history_id": history_id})
async def runner():
try:
await agent_manager.manual_redo_transfer(
history_id=history_id,
output_callback=update_output,
)
progress.update(
text="智能助手整理完成",
data={"history_id": history_id, "success": True, "completed": True},
)
except Exception as e:
progress.update(
text=f"智能助手整理失败:{str(e)}",
data={
"history_id": history_id,
"success": False,
"completed": True,
"error": str(e),
},
)
finally:
progress.end()
asyncio.run_coroutine_threadsafe(runner(), global_vars.loop)
@router.get("/download", summary="查询下载历史记录", response_model=List[schemas.DownloadHistory])
async def download_history(page: Optional[int] = 1,
count: Optional[int] = 30,
@@ -114,6 +157,28 @@ def delete_transfer_history(history_in: schemas.TransferHistory,
return schemas.Response(success=True)
@router.post("/transfer/{history_id}/ai-redo", summary="智能助手重新整理", response_model=schemas.Response)
def ai_redo_transfer_history(
history_id: int,
db: Session = Depends(get_db),
_: User = Depends(get_current_active_superuser),
) -> Any:
"""
手动触发单条历史记录的 AI 重新整理,并返回进度键。
"""
if not settings.AI_AGENT_ENABLE:
return schemas.Response(success=False, message="MoviePilot智能助手未启用")
history = TransferHistory.get(db, history_id)
if not history:
return schemas.Response(success=False, message="整理记录不存在")
progress_key = f"ai_redo_transfer_{history_id}_{int(time.time() * 1000)}"
_start_ai_redo_task(history_id=history_id, progress_key=progress_key)
return schemas.Response(success=True, data={"progress_key": progress_key})
@router.get("/empty/transfer", summary="清空整理记录", response_model=schemas.Response)
async def empty_transfer_history(db: AsyncSession = Depends(get_async_db),
_: User = Depends(get_current_active_superuser_async)) -> Any:

View File

@@ -38,21 +38,69 @@ async def user_message(background_tasks: BackgroundTasks, request: Request,
body = await request.body()
form = await request.form()
args = request.query_params
source = args.get("source")
content_type = request.headers.get("content-type", "")
body_text = body.decode("utf-8", errors="ignore")
image_markers = [
marker
for marker in (
'"photo"',
'"document"',
'"files"',
'"attachments"',
'"url_private"',
'"image/"',
'"image_url"',
)
if marker in body_text
]
logger.info(
"消息入口收到请求: source=%s, content_type=%s, body_bytes=%s, form_keys=%s, image_markers=%s",
source,
content_type,
len(body),
list(form.keys()) if form else [],
image_markers,
)
background_tasks.add_task(start_message_chain, body, form, args)
return schemas.Response(success=True)
@router.post("/web", summary="接收WEB消息", response_model=schemas.Response)
def web_message(text: str, current_user: User = Depends(get_current_active_superuser)):
async def web_message(
request: Request,
text: Optional[str] = None,
current_user: User = Depends(get_current_active_superuser),
):
"""
WEB消息响应
"""
images = None
content_type = request.headers.get("content-type", "")
if "application/json" in content_type:
try:
payload = await request.json()
except Exception:
payload = None
if isinstance(payload, dict):
text = payload.get("text", text)
image = payload.get("image")
images = payload.get("images")
if image:
if isinstance(images, list):
images = [*images, image]
else:
images = [image]
elif isinstance(images, str):
images = [images]
MessageChain().handle_message(
channel=MessageChannel.Web,
source=current_user.name,
userid=current_user.name,
username=current_user.name,
text=text
text=text or "",
images=images,
)
return schemas.Response(success=True)

432
app/api/endpoints/openai.py Normal file
View File

@@ -0,0 +1,432 @@
import asyncio
import json
import time
import uuid
from typing import AsyncIterator, List, Optional, Tuple
from fastapi import APIRouter, Request, Security
from fastapi.responses import JSONResponse, StreamingResponse
from fastapi.security import HTTPAuthorizationCredentials
from app import schemas
from app.api.openai_utils import (
build_completion_payload,
build_prompt,
build_responses_input,
build_session_id,
)
from app.agent import MoviePilotAgent, StreamingHandler
from app.core.config import settings
from app.core.security import openai_bearer_scheme
from app.schemas.types import MessageChannel
router = APIRouter()
MODEL_ID = "moviepilot-agent"
SESSION_PREFIX = "openai:"
class _CollectingMoviePilotAgent(MoviePilotAgent):
"""
捕获 Agent 最终输出,避免再通过消息渠道二次发送。
"""
def __init__(self, *args, stream_mode: bool = False, **kwargs):
super().__init__(*args, **kwargs)
self.collected_messages: List[str] = []
self.stream_mode = stream_mode
if stream_mode:
self.stream_handler = _OpenAIStreamingHandler()
def _should_stream(self) -> bool:
return self.stream_mode
async def send_agent_message(self, message: str, title: str = ""):
text = (message or "").strip()
if title and text:
text = f"{title}\n{text}"
elif title:
text = title.strip()
if text:
self.collected_messages.append(text)
if self.stream_mode:
self.stream_handler.emit(text)
async def _save_agent_message_to_db(self, message: str, title: str = ""):
return None
class _OpenAIStreamingHandler(StreamingHandler):
"""
将 Agent 流式输出转发到 OpenAI SSE 队列,不向站内消息系统落消息。
"""
def __init__(self):
super().__init__()
self._event_queue: Optional[asyncio.Queue] = None
def bind_queue(self, queue: asyncio.Queue):
self._event_queue = queue
def emit(self, token: str):
emitted = super().emit(token)
if emitted and self._event_queue is not None:
self._event_queue.put_nowait(emitted)
def flush_pending_tool_summary(self) -> str:
emitted = super().flush_pending_tool_summary()
if emitted and self._event_queue is not None:
self._event_queue.put_nowait(emitted)
return emitted
async def start_streaming(
self,
channel: Optional[str] = None,
source: Optional[str] = None,
user_id: Optional[str] = None,
username: Optional[str] = None,
title: str = "",
):
self._channel = channel
self._source = source
self._user_id = user_id
self._username = username
self._title = title
self._streaming_enabled = True
self._sent_text = ""
self._message_response = None
self._msg_start_offset = 0
self._max_message_length = 0
async def stop_streaming(self) -> Tuple[bool, str]:
if not self._streaming_enabled:
return False, ""
self._streaming_enabled = False
with self._lock:
final_text = self._buffer
self._buffer = ""
self._sent_text = ""
self._message_response = None
self._msg_start_offset = 0
return True, final_text
def _sse_payload(data: dict) -> str:
return f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
async def _stream_response(
agent: _CollectingMoviePilotAgent,
prompt: str,
images: List[str],
) -> AsyncIterator[str]:
event_queue: asyncio.Queue = asyncio.Queue()
if isinstance(agent.stream_handler, _OpenAIStreamingHandler):
agent.stream_handler.bind_queue(event_queue)
created = int(time.time())
completion_id = f"chatcmpl-{uuid.uuid4().hex}"
finished = False
async def _run_agent():
try:
await agent.process(prompt, images=images, files=None)
except Exception as exc:
await event_queue.put({"error": str(exc)})
finally:
await event_queue.put(None)
task = asyncio.create_task(_run_agent())
try:
yield _sse_payload(
{
"id": completion_id,
"object": "chat.completion.chunk",
"created": created,
"model": MODEL_ID,
"choices": [
{
"index": 0,
"delta": {"role": "assistant"},
"finish_reason": None,
}
],
}
)
while True:
item = await event_queue.get()
if item is None:
break
if isinstance(item, dict) and item.get("error"):
raise RuntimeError(str(item["error"]))
text = str(item or "")
if not text:
continue
yield _sse_payload(
{
"id": completion_id,
"object": "chat.completion.chunk",
"created": created,
"model": MODEL_ID,
"choices": [
{
"index": 0,
"delta": {"content": text},
"finish_reason": None,
}
],
}
)
finished = True
yield _sse_payload(
{
"id": completion_id,
"object": "chat.completion.chunk",
"created": created,
"model": MODEL_ID,
"choices": [
{
"index": 0,
"delta": {},
"finish_reason": "stop",
}
],
}
)
yield "data: [DONE]\n\n"
finally:
if not task.done():
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
elif finished:
await task
def _error_response(
message: str,
status_code: int,
error_type: str = "invalid_request_error",
code: Optional[str] = None,
) -> JSONResponse:
return JSONResponse(
status_code=status_code,
content=schemas.OpenAIErrorResponse(
error=schemas.OpenAIErrorDetail(
message=message,
type=error_type,
code=code,
)
).model_dump(),
headers={"WWW-Authenticate": "Bearer"},
)
def _check_auth(
credentials: Optional[HTTPAuthorizationCredentials],
) -> Optional[JSONResponse]:
if not credentials or credentials.scheme.lower() != "bearer":
return _error_response(
"Invalid bearer token.",
401,
error_type="authentication_error",
code="invalid_api_key",
)
if credentials.credentials != settings.API_TOKEN:
return _error_response(
"Invalid bearer token.",
401,
error_type="authentication_error",
code="invalid_api_key",
)
return None
@router.get("/models", summary="OpenAI compatible models", response_model=schemas.OpenAIModelListResponse)
async def list_models(
credentials: Optional[HTTPAuthorizationCredentials] = Security(openai_bearer_scheme),
):
auth_error = _check_auth(credentials)
if auth_error:
return auth_error
now = int(time.time())
return schemas.OpenAIModelListResponse(
data=[schemas.OpenAIModelInfo(id=MODEL_ID, created=now)]
)
@router.post(
"/chat/completions",
summary="OpenAI compatible chat completions",
response_model=schemas.OpenAIChatCompletionResponse,
)
async def chat_completions(
payload: schemas.OpenAIChatCompletionsRequest,
request: Request,
credentials: Optional[HTTPAuthorizationCredentials] = Security(openai_bearer_scheme),
):
auth_error = _check_auth(credentials)
if auth_error:
return auth_error
if not settings.AI_AGENT_ENABLE:
return _error_response(
"MoviePilot AI agent is disabled.",
503,
error_type="server_error",
code="ai_agent_disabled",
)
if not payload.messages:
return _error_response(
"`messages` must be a non-empty array.",
400,
code="invalid_messages",
)
session_key = (
str(payload.user or "").strip()
or str(request.headers.get("x-session-id") or "").strip()
or str(uuid.uuid4())
)
use_server_session = bool(
str(payload.user or "").strip()
or str(request.headers.get("x-session-id") or "").strip()
)
try:
prompt, images = build_prompt(payload.messages, use_server_session=use_server_session)
except ValueError as exc:
return _error_response(str(exc), 400, code="invalid_messages")
session_id = build_session_id(session_key, SESSION_PREFIX)
username = str(payload.user or "openai-client")
agent = _CollectingMoviePilotAgent(
session_id=session_id,
user_id=session_key,
channel=MessageChannel.Web.value,
source="openai",
username=username,
stream_mode=payload.stream,
)
if payload.stream:
return StreamingResponse(
_stream_response(agent=agent, prompt=prompt, images=images),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no",
},
)
try:
result = await agent.process(prompt, images=images, files=None)
except Exception as exc:
return _error_response(
str(exc),
500,
error_type="server_error",
code="agent_execution_failed",
)
content = "\n\n".join(
message.strip()
for message in agent.collected_messages
if message and message.strip()
).strip()
if not content and result:
content = str(result).strip()
if not content:
content = "未获得有效回复。"
return JSONResponse(content=build_completion_payload(content, MODEL_ID))
@router.post("/responses", summary="OpenAI compatible responses", response_model=schemas.OpenAIResponsesResponse)
async def responses(
payload: schemas.OpenAIResponsesRequest,
credentials: Optional[HTTPAuthorizationCredentials] = Security(openai_bearer_scheme),
):
auth_error = _check_auth(credentials)
if auth_error:
return auth_error
if not settings.AI_AGENT_ENABLE:
return _error_response(
"MoviePilot AI agent is disabled.",
503,
error_type="server_error",
code="ai_agent_disabled",
)
if payload.stream:
return _error_response(
"Streaming is not supported for /responses yet.",
400,
code="unsupported_stream",
)
normalized_messages = build_responses_input(payload.input, instructions=payload.instructions)
if not normalized_messages:
return _error_response(
"`input` must include at least one usable message.",
400,
code="invalid_input",
)
try:
prompt, images = build_prompt(normalized_messages, use_server_session=bool(payload.user))
except ValueError as exc:
return _error_response(str(exc), 400, code="invalid_input")
session_key = str(payload.user or uuid.uuid4())
session_id = build_session_id(session_key, SESSION_PREFIX)
agent = _CollectingMoviePilotAgent(
session_id=session_id,
user_id=session_key,
channel=MessageChannel.Web.value,
source="openai.responses",
username=str(payload.user or "openai-client"),
stream_mode=False,
)
try:
result = await agent.process(prompt, images=images, files=None)
except Exception as exc:
return _error_response(
str(exc),
500,
error_type="server_error",
code="agent_execution_failed",
)
content = "\n\n".join(
message.strip()
for message in agent.collected_messages
if message and message.strip()
).strip()
if not content and result:
content = str(result).strip()
if not content:
content = "未获得有效回复。"
created_at = int(time.time())
response_id = f"resp_{uuid.uuid4().hex}"
output_message = schemas.OpenAIResponsesOutputMessage(
id=f"msg_{uuid.uuid4().hex}",
content=[schemas.OpenAIResponsesOutputText(text=content)],
)
return schemas.OpenAIResponsesResponse(
id=response_id,
created_at=created_at,
model=MODEL_ID,
output=[output_message],
usage=schemas.OpenAIUsage(),
)

View File

@@ -155,9 +155,13 @@ async def all_plugins(_: User = Depends(get_current_active_superuser_async),
# 未安装的本地插件
not_installed_plugins = [plugin for plugin in local_plugins if not plugin.installed]
# 本地插件仓库目录中的插件
local_repo_plugins = plugin_manager.get_local_repo_plugins()
# 在线插件
online_plugins = await plugin_manager.async_get_online_plugins(force)
if not online_plugins:
candidate_plugins = plugin_manager.process_plugins_list(online_plugins + local_repo_plugins, []) \
if online_plugins or local_repo_plugins else []
if not candidate_plugins:
# 没有获取在线插件
if state == "market":
# 返回未安装的本地插件
@@ -169,7 +173,7 @@ async def all_plugins(_: User = Depends(get_current_active_superuser_async),
# 已安装插件IDS
_installed_ids = [plugin.id for plugin in installed_plugins]
# 未安装的线上插件或者有更新的插件
for plugin in online_plugins:
for plugin in candidate_plugins:
if plugin.id not in _installed_ids:
market_plugins.append(plugin)
elif plugin.has_update:
@@ -229,11 +233,15 @@ async def install(plugin_id: str,
# 首先检查插件是否已经存在,并且是否强制安装,否则只进行安装统计
plugin_helper = PluginHelper()
if not force and plugin_id in PluginManager().get_plugin_ids():
await plugin_helper.async_install_reg(pid=plugin_id)
await plugin_helper.async_install_reg(pid=plugin_id, repo_url=repo_url)
else:
# 插件不存在或需要强制安装,下载安装并注册插件
if repo_url:
state, msg = await plugin_helper.async_install(pid=plugin_id, repo_url=repo_url)
state, msg = await plugin_helper.async_install(
pid=plugin_id,
repo_url=repo_url,
force_install=force
)
# 安装失败则直接响应
if not state:
return schemas.Response(success=False, message=msg)

View File

@@ -399,7 +399,15 @@ async def subscribe_history(
"""
查询电影/电视剧订阅历史
"""
return await SubscribeHistory.async_list_by_type(db, mtype=mtype, page=page, count=count)
histories = await SubscribeHistory.async_list_by_type(db, mtype=mtype, page=page, count=count)
result = []
for history in histories:
history_item = schemas.Subscribe.model_validate(history, from_attributes=True)
if history_item.type == MediaType.TV.value:
history_item.total_episode = 0
history_item.lack_episode = 0
result.append(history_item)
return result
@router.delete("/history/{history_id}", summary="删除订阅历史", response_model=schemas.Response)

View File

@@ -3,7 +3,8 @@ import json
import re
from collections import deque
from datetime import datetime
from typing import Optional, Union, Annotated
from typing import Any, Optional, Union, Annotated
from urllib.parse import urljoin, urlparse
import aiofiles
import pillow_avif # noqa 用于自动注册AVIF支持
@@ -11,6 +12,7 @@ from anyio import Path as AsyncPath
from app.helper.sites import SitesHelper # noqa # noqa
from fastapi import APIRouter, Body, Depends, HTTPException, Header, Request, Response
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from app import schemas
from app.chain.mediaserver import MediaServerChain
@@ -28,14 +30,14 @@ from app.db.user_oper import (
get_current_active_superuser_async,
get_current_active_user_async,
)
from app.helper.llm import LLMHelper
from app.helper.image import ImageHelper
from app.helper.llm import LLMHelper, LLMTestTimeout
from app.helper.mediaserver import MediaServerHelper
from app.helper.message import MessageHelper
from app.helper.progress import ProgressHelper
from app.helper.rule import RuleHelper
from app.helper.subscribe import SubscribeHelper
from app.helper.system import SystemHelper
from app.helper.image import ImageHelper
from app.log import logger
from app.scheduler import Scheduler
from app.schemas import ConfigChangeEventData
@@ -48,14 +50,322 @@ from version import APP_VERSION
router = APIRouter()
_NETTEST_REDIRECT_STATUS_CODES = {301, 302, 303, 307, 308}
class LlmTestRequest(BaseModel):
enabled: Optional[bool] = None
provider: Optional[str] = None
model: Optional[str] = None
thinking_level: Optional[str] = None
api_key: Optional[str] = None
base_url: Optional[str] = None
def _match_nettest_prefix(url: str, prefix: str) -> bool:
"""
判断目标URL是否仍然落在允许的协议、主机、端口和路径前缀内。
nettest 会在服务端手动处理重定向,因此这里需要一个比简单 startswith
更严格的匹配,避免不同端口或同名路径被误判为白名单内跳转。
"""
parsed_url = urlparse(url)
parsed_prefix = urlparse(prefix)
if parsed_url.scheme.lower() != parsed_prefix.scheme.lower():
return False
if (parsed_url.hostname or "").lower() != (parsed_prefix.hostname or "").lower():
return False
url_port = parsed_url.port or (443 if parsed_url.scheme.lower() == "https" else 80)
prefix_port = parsed_prefix.port or (443 if parsed_prefix.scheme.lower() == "https" else 80)
if url_port != prefix_port:
return False
return parsed_url.path.startswith(parsed_prefix.path or "/")
def _build_nettest_rules() -> list[dict[str, Any]]:
"""
构建系统内置的网络测试目标。
这里集中维护“前端允许显示哪些测试项”和“后端允许访问哪些远端地址”。
前端只拿到展示所需的 id/name/icon真正的 URL、代理策略、内容校验规则
和重定向白名单都保留在服务端,避免再出现用户可控 SSRF。
"""
github_proxy = UrlUtils.standardize_base_url(settings.GITHUB_PROXY or "")
pip_proxy = UrlUtils.standardize_base_url(
settings.PIP_PROXY or "https://pypi.org/simple/"
)
tmdb_key = settings.TMDB_API_KEY
tmdb_domain = settings.TMDB_API_DOMAIN or "api.themoviedb.org"
github_readme_url = "https://github.com/jxxghp/MoviePilot/blob/v2/README.md"
raw_readme_url = "https://raw.githubusercontent.com/jxxghp/MoviePilot/v2/README.md"
rules = [
{
"id": "tmdb_api",
"name": "api.themoviedb.org",
"icon": "tmdb",
"url": f"https://api.themoviedb.org/3/movie/550?api_key={tmdb_key}",
"proxy": True,
"allowed_redirect_prefixes": [
"https://api.themoviedb.org/3/",
],
},
{
"id": "tmdb_api_alt",
"name": "api.tmdb.org",
"icon": "tmdb",
"url": f"https://api.tmdb.org/3/movie/550?api_key={tmdb_key}",
"proxy": True,
"allowed_redirect_prefixes": [
"https://api.tmdb.org/3/",
],
},
{
"id": "tmdb_web",
"name": "www.themoviedb.org",
"icon": "tmdb",
"url": "https://www.themoviedb.org",
"proxy": True,
"allowed_redirect_prefixes": ["https://www.themoviedb.org/"],
},
{
"id": "tvdb_api",
"name": "api.thetvdb.com",
"icon": "tvdb",
"url": "https://api.thetvdb.com/series/81189",
"proxy": True,
"allowed_redirect_prefixes": ["https://api.thetvdb.com/"],
},
{
"id": "fanart_api",
"name": "webservice.fanart.tv",
"icon": "fanart",
"url": "https://webservice.fanart.tv",
"proxy": True,
"allowed_redirect_prefixes": ["https://webservice.fanart.tv/"],
},
{
"id": "telegram_api",
"name": "api.telegram.org",
"icon": "telegram",
"url": "https://api.telegram.org",
"proxy": True,
"allowed_redirect_prefixes": [
"https://api.telegram.org/",
"https://core.telegram.org/",
],
},
{
"id": "wechat_api",
"name": "qyapi.weixin.qq.com",
"icon": "wechat",
"url": "https://qyapi.weixin.qq.com/cgi-bin/gettoken",
"proxy": False,
"allowed_redirect_prefixes": ["https://qyapi.weixin.qq.com/"],
},
{
"id": "douban_api",
"name": "frodo.douban.com",
"icon": "douban",
"url": "https://frodo.douban.com",
"proxy": False,
"allowed_redirect_prefixes": [
"https://frodo.douban.com/",
"https://www.douban.com/doubanapp/frodo",
],
},
{
"id": "slack_api",
"name": "slack.com",
"icon": "slack",
"url": "https://slack.com",
"proxy": False,
"allowed_redirect_prefixes": [
"https://slack.com/",
"https://www.slack.com/",
],
},
{
"id": "pip_proxy",
"name": "pypi.org",
"icon": "python",
"url": f"{pip_proxy}rsa/",
"proxy": True,
"allowed_redirect_prefixes": [
pip_proxy,
"https://pypi.org/simple/",
],
"expected_text": "pypi:repository-version",
"invalid_message": "PIP加速代理已失效请检查配置",
"proxy_name": "PIP加速代理",
},
{
"id": "github_proxy_web",
"name": "github.com",
"icon": "github",
"url": f"{github_proxy}{github_readme_url}" if github_proxy else github_readme_url,
"proxy": True,
"allowed_redirect_prefixes": [
"https://github.com/",
*((f"{github_proxy}https://github.com/",) if github_proxy else ()),
],
"expected_text": "MoviePilot",
"invalid_message": "Github加速代理已失效请检查配置" if github_proxy else "无效响应",
"proxy_name": "Github加速代理" if github_proxy else "",
"headers": settings.GITHUB_HEADERS,
},
{
"id": "github_api",
"name": "api.github.com",
"icon": "github",
"url": "https://api.github.com",
"proxy": True,
"allowed_redirect_prefixes": ["https://api.github.com/"],
"headers": settings.GITHUB_HEADERS,
},
{
"id": "github_codeload",
"name": "codeload.github.com",
"icon": "github",
"url": "https://codeload.github.com",
"proxy": True,
"allowed_redirect_prefixes": [
"https://codeload.github.com/",
"https://github.com/",
],
"headers": settings.GITHUB_HEADERS,
},
{
"id": "github_proxy_raw",
"name": "raw.githubusercontent.com",
"icon": "github",
"url": f"{github_proxy}{raw_readme_url}" if github_proxy else raw_readme_url,
"proxy": True,
"allowed_redirect_prefixes": [
"https://raw.githubusercontent.com/",
*((f"{github_proxy}https://raw.githubusercontent.com/",) if github_proxy else ()),
],
"expected_text": "MoviePilot",
"invalid_message": "Github加速代理已失效请检查配置" if github_proxy else "无效响应",
"proxy_name": "Github加速代理" if github_proxy else "",
"headers": settings.GITHUB_HEADERS,
},
]
if tmdb_domain not in {"api.themoviedb.org", "api.tmdb.org"}:
rules.insert(
2,
{
"id": "tmdb_api_configured",
"name": tmdb_domain,
"icon": "tmdb",
"url": f"https://{tmdb_domain}/3/movie/550?api_key={tmdb_key}",
"proxy": True,
"allowed_redirect_prefixes": [
f"https://{tmdb_domain}/3/",
],
},
)
return rules
def _sanitize_llm_test_error(message: str, api_key: Optional[str] = None) -> str:
"""
清理错误信息中的敏感字段,避免回显密钥。
"""
if not message:
return "LLM 调用失败"
sanitized = message
if api_key:
sanitized = sanitized.replace(api_key, "***")
sanitized = re.sub(
r"(?i)(api[_-]?key\s*[:=]\s*)([^\s,;]+)",
r"\1***",
sanitized,
)
sanitized = re.sub(
r"(?i)authorization\s*:\s*bearer\s+[^\s,;]+",
"Authorization: ***",
sanitized,
)
return sanitized
def _validate_nettest_url(url: str) -> Optional[str]:
"""
对实际请求地址做基础安全校验。
即使请求来自服务端内置规则,这里仍保留一层兜底校验,防止配置项被拼出
非 HTTPS、带凭据或不在内置目标集合中的地址。
"""
parsed = urlparse(url)
if parsed.scheme.lower() != "https":
return "测试地址仅支持 HTTPS"
if not parsed.netloc:
return "测试地址无效"
if parsed.username or parsed.password:
return "测试地址不支持携带账号信息"
if not _get_nettest_rule(url):
return "测试地址不在允许的测试目标列表中"
return None
def _get_nettest_rule(url: Optional[str] = None, target_id: Optional[str] = None) -> Optional[dict[str, Any]]:
"""
根据 target_id 或历史兼容参数匹配网络测试规则。
现在的主路径是 target_id。保留 url 参数是为了兼容旧前端或未升级的调用方,
但匹配结果仍然只能落到服务端预定义规则上。
"""
for rule in _build_nettest_rules():
if target_id and rule.get("id") == target_id:
return rule
if url and rule.get("url") == url:
return rule
return None
def _is_allowed_nettest_redirect(url: str, rule: dict[str, Any]) -> bool:
"""
校验重定向目标是否仍属于当前测试项允许的跳转范围。
nettest 不再信任客户端跟随重定向,而是只允许在该测试项自己的白名单内跳转,
这样既能兼容正常 30x又不会把安全边界重新放开。
"""
parsed = urlparse(url)
if parsed.scheme.lower() != "https" or not parsed.netloc:
return False
if parsed.username or parsed.password:
return False
return any(
_match_nettest_prefix(url, prefix)
for prefix in rule.get("allowed_redirect_prefixes", [])
)
async def _close_nettest_response(response: Any) -> None:
"""
安静地关闭 httpx 响应对象。
nettest 在手动处理重定向时会提前结束部分响应读取,这里统一做资源回收,
避免连接泄漏干扰后续测试。
"""
if response is None or not hasattr(response, "aclose"):
return
try:
await response.aclose()
except Exception as err:
logger.debug(f"关闭网络测试响应失败: {err}")
async def fetch_image(
url: str,
proxy: Optional[bool] = None,
use_cache: bool = False,
if_none_match: Optional[str] = None,
cookies: Optional[str | dict] = None,
allowed_domains: Optional[set[str]] = None,
url: str,
proxy: Optional[bool] = None,
use_cache: bool = False,
if_none_match: Optional[str] = None,
cookies: Optional[str | dict] = None,
allowed_domains: Optional[set[str]] = None,
) -> Optional[Response]:
"""
处理图片缓存逻辑支持HTTP缓存和磁盘缓存
@@ -77,6 +387,7 @@ async def fetch_image(
use_cache=use_cache,
cookies=cookies,
)
if content:
# 检查 If-None-Match
etag = HashUtils.md5(content)
@@ -89,16 +400,17 @@ async def fetch_image(
media_type=UrlUtils.get_mime_type(url, "image/jpeg"),
headers=headers,
)
return None
@router.get("/img/{proxy}", summary="图片代理")
async def proxy_img(
imgurl: str,
proxy: bool = False,
cache: bool = False,
use_cookies: bool = False,
if_none_match: Annotated[str | None, Header()] = None,
_: schemas.TokenPayload = Depends(verify_resource_token),
imgurl: str,
proxy: bool = False,
cache: bool = False,
use_cookies: bool = False,
if_none_match: Annotated[str | None, Header()] = None,
_: schemas.TokenPayload = Depends(verify_resource_token),
) -> Response:
"""
图片代理,可选是否使用代理服务器,支持 HTTP 缓存
@@ -127,9 +439,9 @@ async def proxy_img(
@router.get("/cache/image", summary="图片缓存")
async def cache_img(
url: str,
if_none_match: Annotated[str | None, Header()] = None,
_: schemas.TokenPayload = Depends(verify_resource_token),
url: str,
if_none_match: Annotated[str | None, Header()] = None,
_: schemas.TokenPayload = Depends(verify_resource_token),
) -> Response:
"""
本地缓存图片文件,支持 HTTP 缓存,如果启用全局图片缓存,则使用磁盘缓存
@@ -164,6 +476,9 @@ def get_global_setting(token: str):
"BACKEND_VERSION": APP_VERSION,
}
)
# 仅在后端开发模式下返回该标记,避免生产环境暴露无意义运行态信息
if settings.DEV:
info.update({"BACKEND_DEV": True})
return schemas.Response(success=True, data=info)
@@ -178,6 +493,7 @@ async def get_user_global_setting(_: User = Depends(get_current_active_user_asyn
# 业务功能相关的配置字段
info = settings.model_dump(
include={
"AI_AGENT_ENABLE",
"RECOGNIZE_SOURCE",
"SEARCH_SOURCE",
"AI_RECOMMEND_ENABLED",
@@ -219,7 +535,7 @@ async def get_env_setting(_: User = Depends(get_current_active_user_async)):
@router.post("/env", summary="更新系统配置", response_model=schemas.Response)
async def set_env_setting(
env: dict, _: User = Depends(get_current_active_superuser_async)
env: dict, _: User = Depends(get_current_active_superuser_async)
):
"""
更新系统环境变量(仅管理员)
@@ -254,9 +570,9 @@ async def set_env_setting(
@router.get("/progress/{process_type}", summary="实时进度")
async def get_progress(
request: Request,
process_type: str,
_: schemas.TokenPayload = Depends(verify_resource_token),
request: Request,
process_type: str,
_: schemas.TokenPayload = Depends(verify_resource_token),
):
"""
实时获取处理进度返回格式为SSE
@@ -291,9 +607,9 @@ async def get_setting(key: str, _: User = Depends(get_current_active_user_async)
@router.post("/setting/{key}", summary="更新系统设置", response_model=schemas.Response)
async def set_setting(
key: str,
value: Annotated[Union[list, dict, bool, int, str] | None, Body()] = None,
_: User = Depends(get_current_active_superuser_async),
key: str,
value: Annotated[Union[list, dict, bool, int, str] | None, Body()] = None,
_: User = Depends(get_current_active_superuser_async),
):
"""
更新系统设置(仅管理员)
@@ -327,10 +643,10 @@ async def set_setting(
@router.get("/llm-models", summary="获取LLM模型列表", response_model=schemas.Response)
async def get_llm_models(
provider: str,
api_key: str,
base_url: Optional[str] = None,
_: User = Depends(get_current_active_user_async),
provider: str,
api_key: str,
base_url: Optional[str] = None,
_: User = Depends(get_current_active_user_async),
):
"""
获取LLM模型列表
@@ -344,11 +660,73 @@ async def get_llm_models(
return schemas.Response(success=False, message=str(e))
@router.post("/llm-test", summary="测试LLM调用", response_model=schemas.Response)
async def llm_test(
payload: Annotated[Optional[LlmTestRequest], Body()] = None,
_: User = Depends(get_current_active_superuser_async),
):
"""
使用传入配置或当前已保存配置执行一次最小 LLM 调用。
"""
if not payload:
return schemas.Response(success=False, message="请配置智能助手LLM相关参数后再进行测试")
if not payload.provider or not payload.model:
return schemas.Response(success=False, message="请配置LLM提供商和模型")
data = {
"provider": payload.provider,
"model": payload.model,
}
if not payload.enabled:
return schemas.Response(success=False, message="请先启用智能助手", data=data)
if not payload.api_key or not payload.api_key.strip():
return schemas.Response(
success=False,
message="请先配置 LLM API Key",
data=data,
)
if not payload.model or not payload.model.strip():
return schemas.Response(
success=False,
message="请先配置 LLM 模型",
data=data,
)
try:
result = await LLMHelper.test_current_settings(
provider=payload.provider,
model=payload.model,
thinking_level=payload.thinking_level,
api_key=payload.api_key,
base_url=payload.base_url,
)
if not result.get("reply_preview"):
return schemas.Response(
success=False,
message="模型响应为空"
)
return schemas.Response(success=True, data=result)
except (LLMTestTimeout, TimeoutError) as err:
logger.warning(err)
return schemas.Response(
success=False,
message="LLM 调用超时"
)
except Exception as err:
return schemas.Response(
success=False,
message=_sanitize_llm_test_error(str(err), payload.api_key)
)
@router.get("/message", summary="实时消息")
async def get_message(
request: Request,
role: Optional[str] = "system",
_: schemas.TokenPayload = Depends(verify_resource_token),
request: Request,
role: Optional[str] = "system",
_: schemas.TokenPayload = Depends(verify_resource_token),
):
"""
实时获取系统消息返回格式为SSE
@@ -371,10 +749,10 @@ async def get_message(
@router.get("/logging", summary="实时日志")
async def get_logging(
request: Request,
length: Optional[int] = 50,
logfile: Optional[str] = "moviepilot.log",
_: schemas.TokenPayload = Depends(verify_resource_token),
request: Request,
length: Optional[int] = 50,
logfile: Optional[str] = "moviepilot.log",
_: schemas.TokenPayload = Depends(verify_resource_token),
):
"""
实时获取系统日志
@@ -385,7 +763,7 @@ async def get_logging(
log_path = base_path / logfile
if not await SecurityUtils.async_is_safe_path(
base_path=base_path, user_path=log_path, allowed_suffixes={".log"}
base_path=base_path, user_path=log_path, allowed_suffixes={".log"}
):
raise HTTPException(status_code=404, detail="Not Found")
@@ -402,7 +780,7 @@ async def get_logging(
# 读取历史日志
async with aiofiles.open(
log_path, mode="r", encoding="utf-8", errors="ignore"
log_path, mode="r", encoding="utf-8", errors="ignore"
) as f:
# 优化大文件读取策略
if file_size > 100 * 1024:
@@ -414,7 +792,7 @@ async def get_logging(
# 找到第一个完整的行
first_newline = content.find("\n")
if first_newline != -1:
content = content[first_newline + 1 :]
content = content[first_newline + 1:]
else:
# 小文件直接读取全部内容
content = await f.read()
@@ -422,7 +800,7 @@ async def get_logging(
# 按行分割并添加到队列,只保留非空行
lines = [line.strip() for line in content.splitlines() if line.strip()]
# 只取最后N行
for line in lines[-max(length, 50) :]:
for line in lines[-max(length, 50):]:
lines_queue.append(line)
# 输出历史日志
@@ -431,7 +809,7 @@ async def get_logging(
# 实时监听新日志
async with aiofiles.open(
log_path, mode="r", encoding="utf-8", errors="ignore"
log_path, mode="r", encoding="utf-8", errors="ignore"
) as f:
# 移动文件指针到文件末尾,继续监听新增内容
await f.seek(0, 2)
@@ -470,7 +848,7 @@ async def get_logging(
try:
# 使用 aiofiles 异步读取文件
async with aiofiles.open(
log_path, mode="r", encoding="utf-8", errors="ignore"
log_path, mode="r", encoding="utf-8", errors="ignore"
) as file:
text = await file.read()
# 倒序输出
@@ -493,7 +871,7 @@ async def latest_version(_: schemas.TokenPayload = Depends(verify_token)):
version_res = await AsyncRequestUtils(
proxies=settings.PROXY, headers=settings.GITHUB_HEADERS
).get_res(f"https://api.github.com/repos/jxxghp/MoviePilot/releases")
if version_res:
if version_res is not None and version_res.status_code == 200:
ver_json = version_res.json()
if ver_json:
return schemas.Response(success=True, data=ver_json)
@@ -502,10 +880,10 @@ async def latest_version(_: schemas.TokenPayload = Depends(verify_token)):
@router.get("/ruletest", summary="过滤规则测试", response_model=schemas.Response)
def ruletest(
title: str,
rulegroup_name: str,
subtitle: Optional[str] = None,
_: schemas.TokenPayload = Depends(verify_token),
title: str,
rulegroup_name: str,
subtitle: Optional[str] = None,
_: schemas.TokenPayload = Depends(verify_token),
):
"""
过滤规则测试,规则类型 1-订阅2-洗版3-搜索
@@ -537,72 +915,106 @@ def ruletest(
)
@router.get("/nettest/targets", summary="获取网络测试目标", response_model=schemas.Response)
async def nettest_targets(_: schemas.TokenPayload = Depends(verify_token)):
"""
获取网络测试目标。
这里只返回前端渲染所需的最小信息,避免把可请求 URL、内容校验规则和
跳转白名单暴露给客户端。
"""
return schemas.Response(
success=True,
data=[
{
"id": item["id"],
"name": item["name"],
"icon": item["icon"],
}
for item in _build_nettest_rules()
],
)
@router.get("/nettest", summary="测试网络连通性")
async def nettest(
url: str,
proxy: bool,
include: Optional[str] = None,
_: schemas.TokenPayload = Depends(verify_token),
target_id: Optional[str] = None,
url: Optional[str] = None,
include: Optional[str] = None,
_: schemas.TokenPayload = Depends(verify_token),
):
"""
测试网络连通性
测试内置目标的网络连通性
`target_id` 是当前前端使用的正式入口。`url/proxy/include` 仅作兼容保留,
其中 `include` 不再参与客户端可控的内容匹配,具体校验由服务端规则决定。
"""
target = _get_nettest_rule(url=url, target_id=target_id)
if not target:
return schemas.Response(success=False, message="测试目标不存在")
# 记录开始的毫秒数
start_time = datetime.now()
headers = None
# 当前使用的加速代理
proxy_name = ""
if "github" in url:
# 这是github的连通性测试
headers = settings.GITHUB_HEADERS
if "{GITHUB_PROXY}" in url:
url = url.replace(
"{GITHUB_PROXY}", UrlUtils.standardize_base_url(settings.GITHUB_PROXY or "")
)
if settings.GITHUB_PROXY:
proxy_name = "Github加速代理"
if "{PIP_PROXY}" in url:
url = url.replace(
"{PIP_PROXY}",
UrlUtils.standardize_base_url(
settings.PIP_PROXY or "https://pypi.org/simple/"
),
)
if settings.PIP_PROXY:
proxy_name = "PIP加速代理"
url = url.replace("{TMDBAPIKEY}", settings.TMDB_API_KEY)
result = await AsyncRequestUtils(
proxies=settings.PROXY if proxy else None,
headers=headers,
url = target["url"]
invalid_message = _validate_nettest_url(url)
if invalid_message:
logger.warning(f"拦截不安全的网络测试地址: {url}")
return schemas.Response(success=False, message=invalid_message)
if include:
logger.debug("nettest include 参数已忽略,改为服务端固定校验")
request_utils = AsyncRequestUtils(
proxies=settings.PROXY if target.get("proxy") else None,
headers=target.get("headers"),
timeout=10,
ua=settings.NORMAL_USER_AGENT,
).get_res(url)
verify=True,
follow_redirects=False,
)
result = None
current_url = url
redirect_count = 0
while redirect_count <= 3:
result = await request_utils.get_res(current_url, allow_redirects=False)
if result is None:
break
if result.status_code not in _NETTEST_REDIRECT_STATUS_CODES:
break
location = result.headers.get("location")
if not location:
break
next_url = urljoin(current_url, location)
if not _is_allowed_nettest_redirect(next_url, target):
await _close_nettest_response(result)
logger.warning(f"拦截网络测试重定向: {current_url} -> {next_url}")
return schemas.Response(success=False, message="测试目标发生了未授权跳转")
await _close_nettest_response(result)
current_url = next_url
redirect_count += 1
if redirect_count > 3:
return schemas.Response(success=False, message="测试目标重定向次数过多")
# 计时结束的毫秒数
end_time = datetime.now()
time = round((end_time - start_time).total_seconds() * 1000)
# 计算相关秒数
if result is None:
return schemas.Response(
success=False, message=f"{proxy_name}无法连接", data={"time": time}
success=False,
message=f"{target.get('proxy_name') or target.get('name')}无法连接",
data={"time": time},
)
elif result.status_code == 200:
if include and not re.search(r"%s" % include, result.text, re.IGNORECASE):
# 通常是被加速代理跳转到其它页面了
logger.error(f"{url} 的响应内容不匹配包含规则 {include}")
if proxy_name:
message = f"{proxy_name}已失效,请检查配置"
else:
message = f"无效响应,不匹配 {include}"
expected_text = target.get("expected_text")
if expected_text and expected_text.lower() not in (result.text or "").lower():
return schemas.Response(
success=False,
message=message,
message=target.get("invalid_message") or "无效响应",
data={"time": time},
)
return schemas.Response(success=True, data={"time": time})
else:
if proxy_name:
if target.get("proxy_name"):
# 加速代理失败
message = f"{proxy_name}已失效,错误码:{result.status_code}"
message = f"{target['proxy_name']}已失效,错误码:{result.status_code}"
else:
message = f"错误码:{result.status_code}"
if "github" in url:

177
app/api/openai_utils.py Normal file
View File

@@ -0,0 +1,177 @@
import hashlib
import time
import uuid
from typing import Any, Dict, List, Tuple
def _get_message_field(message: Any, field: str, default: Any = None) -> Any:
if isinstance(message, dict):
return message.get(field, default)
return getattr(message, field, default)
def extract_text_and_images(content: Any) -> Tuple[str, List[str]]:
if content is None:
return "", []
if isinstance(content, str):
return content.strip(), []
text_parts: List[str] = []
image_urls: List[str] = []
if isinstance(content, list):
for item in content:
if isinstance(item, str):
normalized = item.strip()
if normalized:
text_parts.append(normalized)
continue
if not isinstance(item, dict):
continue
item_type = (item.get("type") or "").lower()
if item_type == "text":
text = item.get("text")
if text and str(text).strip():
text_parts.append(str(text).strip())
elif item_type == "input_text":
text = item.get("text")
if text and str(text).strip():
text_parts.append(str(text).strip())
elif item_type == "image_url":
image_url = item.get("image_url")
url = image_url.get("url") if isinstance(image_url, dict) else image_url
if url and str(url).strip():
image_urls.append(str(url).strip())
elif item_type == "input_image":
url = item.get("image_url")
if url and str(url).strip():
image_urls.append(str(url).strip())
elif item_type == "image":
source = item.get("source") or {}
if isinstance(source, dict) and source.get("type") == "base64":
data = source.get("data")
media_type = source.get("media_type") or "image/png"
if data and str(data).strip():
image_urls.append(f"data:{media_type};base64,{str(data).strip()}")
return "\n".join(text_parts).strip(), image_urls
def build_prompt(messages: List[Any], use_server_session: bool) -> Tuple[str, List[str]]:
system_texts: List[str] = []
transcript: List[str] = []
latest_user_text = ""
latest_user_images: List[str] = []
for message in messages:
role = str(_get_message_field(message, "role", "user") or "user").lower()
if role == "developer":
role = "system"
text, images = extract_text_and_images(_get_message_field(message, "content"))
if role == "system":
if text:
system_texts.append(text)
continue
if role == "user":
if text or images:
latest_user_text = text
latest_user_images = images
if text:
transcript.append(f"user: {text}")
continue
if text:
transcript.append(f"{role}: {text}")
if not latest_user_text and not latest_user_images:
raise ValueError("No usable user message found in messages.")
prompt_parts: List[str] = []
if system_texts:
prompt_parts.append("系统要求:\n" + "\n\n".join(system_texts))
if not use_server_session and transcript:
history = transcript[:-1] if transcript[-1].startswith("user: ") else transcript
if history:
prompt_parts.append("对话上下文:\n" + "\n".join(history[-10:]))
if latest_user_text:
prompt_parts.append("当前用户消息:\n" + latest_user_text)
else:
prompt_parts.append("当前用户消息:\n请结合图片内容回复。")
return "\n\n".join(part for part in prompt_parts if part).strip(), latest_user_images
def build_session_id(session_key: str, prefix: str) -> str:
digest = hashlib.sha256(session_key.encode("utf-8")).hexdigest()
return f"{prefix}{digest[:32]}"
def build_completion_payload(content: str, model_id: str) -> Dict[str, Any]:
created = int(time.time())
return {
"id": f"chatcmpl-{uuid.uuid4().hex}",
"object": "chat.completion",
"created": created,
"model": model_id,
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": content,
},
"finish_reason": "stop",
}
],
"usage": {
"prompt_tokens": 0,
"completion_tokens": 0,
"total_tokens": 0,
},
}
def build_responses_input(
input_data: Any, instructions: str | None = None
) -> List[Dict[str, Any]]:
messages: List[Dict[str, Any]] = []
if instructions and str(instructions).strip():
messages.append({"role": "system", "content": str(instructions).strip()})
if isinstance(input_data, str):
normalized = input_data.strip()
if normalized:
messages.append({"role": "user", "content": normalized})
return messages
if isinstance(input_data, list):
for item in input_data:
if not isinstance(item, dict):
continue
item_type = (item.get("type") or "").lower()
if item_type == "message":
role = item.get("role") or "user"
content = item.get("content")
messages.append({"role": role, "content": content})
elif item.get("role") and "content" in item:
messages.append({"role": item.get("role"), "content": item.get("content")})
return messages
if isinstance(input_data, dict) and input_data.get("role") and "content" in input_data:
messages.append({"role": input_data.get("role"), "content": input_data.get("content")})
return messages
def build_anthropic_messages(
system: Any, messages: List[Any]
) -> List[Dict[str, Any]]:
normalized: List[Dict[str, Any]] = []
system_text, _ = extract_text_and_images(system)
if system_text:
normalized.append({"role": "system", "content": system_text})
for message in messages:
role = _get_message_field(message, "role", "user")
content = _get_message_field(message, "content")
normalized.append({"role": role, "content": content})
return normalized

View File

@@ -38,6 +38,7 @@ from app.schemas import (
TransferDirectoryConf,
MessageResponse,
)
from app.utils.identity import normalize_internal_user_id
from app.schemas.category import CategoryConfig
from app.schemas.types import (
TorrentStatus,
@@ -119,6 +120,21 @@ class ChainBase(metaclass=ABCMeta):
"""
self.filecache.delete(filename)
@staticmethod
def _normalize_notification_for_dispatch(
message: Notification
) -> Notification:
"""
规范化待发送的通知消息。
后台任务会复用内部占位用户ID作为会话身份这里在真正发送前清空
让消息重新走默认通知路由或基于 targets 的目标解析。
"""
dispatch_message = copy.deepcopy(message)
dispatch_message.userid = normalize_internal_user_id(
dispatch_message.userid
)
return dispatch_message
async def async_remove_cache(self, filename: str) -> None:
"""
异步删除缓存同时删除Redis和本地缓存
@@ -317,19 +333,20 @@ class ChainBase(metaclass=ABCMeta):
if inspect.iscoroutinefunction(func):
result = await func(*args, **kwargs)
else:
result = func(*args, **kwargs)
# 系统同步模块在异步路径里也必须切到线程池,避免阻塞共享事件循环。
result = await run_in_threadpool(func, *args, **kwargs)
elif ObjectUtils.check_signature(func, result):
# 返回结果与方法签名一致,将结果传入
if inspect.iscoroutinefunction(func):
result = await func(result)
else:
result = func(result)
result = await run_in_threadpool(func, result)
elif isinstance(result, list):
# 返回为列表,有多个模块运行结果时进行合并
if inspect.iscoroutinefunction(func):
temp = await func(*args, **kwargs)
else:
temp = func(*args, **kwargs)
temp = await run_in_threadpool(func, *args, **kwargs)
if isinstance(temp, list):
result.extend(temp)
else:
@@ -1119,10 +1136,13 @@ class ChainBase(metaclass=ABCMeta):
# 保存消息
self.messagehelper.put(message, role="user", title=message.title)
self.messageoper.add(**message.model_dump())
dispatch_message = self._normalize_notification_for_dispatch(message)
# 发送消息按设置隔离
if not message.userid and message.mtype:
if not dispatch_message.userid and dispatch_message.mtype:
# 消息隔离设置
notify_action = ServiceConfigHelper.get_notification_switch(message.mtype)
notify_action = ServiceConfigHelper.get_notification_switch(
dispatch_message.mtype
)
if notify_action:
# 'admin' 'user,admin' 'user' 'all'
actions = notify_action.split(",")
@@ -1131,7 +1151,7 @@ class ChainBase(metaclass=ABCMeta):
send_orignal = False
useroper = UserOper()
for action in actions:
send_message = copy.deepcopy(message)
send_message = copy.deepcopy(dispatch_message)
if action == "admin" and not admin_sended:
# 仅发送管理员
logger.info(f"{send_message.mtype} 的消息已设置发送给管理员")
@@ -1186,13 +1206,13 @@ class ChainBase(metaclass=ABCMeta):
# 发送消息事件
self.eventmanager.send_event(
etype=EventType.NoticeMessage,
data={**message.model_dump(), "type": message.mtype},
data={**dispatch_message.model_dump(), "type": dispatch_message.mtype},
)
# 按原消息发送
self.messagequeue.send_message(
"post_message",
message=message,
immediately=True if message.userid else False,
message=dispatch_message,
immediately=True if dispatch_message.userid else False,
**kwargs,
)
@@ -1233,10 +1253,13 @@ class ChainBase(metaclass=ABCMeta):
# 保存消息
self.messagehelper.put(message, role="user", title=message.title)
await self.messageoper.async_add(**message.model_dump())
dispatch_message = self._normalize_notification_for_dispatch(message)
# 发送消息按设置隔离
if not message.userid and message.mtype:
if not dispatch_message.userid and dispatch_message.mtype:
# 消息隔离设置
notify_action = ServiceConfigHelper.get_notification_switch(message.mtype)
notify_action = ServiceConfigHelper.get_notification_switch(
dispatch_message.mtype
)
if notify_action:
# 'admin' 'user,admin' 'user' 'all'
actions = notify_action.split(",")
@@ -1245,7 +1268,7 @@ class ChainBase(metaclass=ABCMeta):
send_orignal = False
useroper = UserOper()
for action in actions:
send_message = copy.deepcopy(message)
send_message = copy.deepcopy(dispatch_message)
if action == "admin" and not admin_sended:
# 仅发送管理员
logger.info(f"{send_message.mtype} 的消息已设置发送给管理员")
@@ -1300,13 +1323,13 @@ class ChainBase(metaclass=ABCMeta):
# 发送消息事件
await self.eventmanager.async_send_event(
etype=EventType.NoticeMessage,
data={**message.model_dump(), "type": message.mtype},
data={**dispatch_message.model_dump(), "type": dispatch_message.mtype},
)
# 按原消息发送
await self.messagequeue.async_send_message(
"post_message",
message=message,
immediately=True if message.userid else False,
message=dispatch_message,
immediately=True if dispatch_message.userid else False,
**kwargs,
)
@@ -1324,11 +1347,12 @@ class ChainBase(metaclass=ABCMeta):
message, role="user", note=note_list, title=message.title
)
self.messageoper.add(**message.model_dump(), note=note_list)
dispatch_message = self._normalize_notification_for_dispatch(message)
return self.messagequeue.send_message(
"post_medias_message",
message=message,
message=dispatch_message,
medias=medias,
immediately=True if message.userid else False,
immediately=True if dispatch_message.userid else False,
)
def post_torrents_message(
@@ -1345,11 +1369,12 @@ class ChainBase(metaclass=ABCMeta):
message, role="user", note=note_list, title=message.title
)
self.messageoper.add(**message.model_dump(), note=note_list)
dispatch_message = self._normalize_notification_for_dispatch(message)
return self.messagequeue.send_message(
"post_torrents_message",
message=message,
message=dispatch_message,
torrents=torrents,
immediately=True if message.userid else False,
immediately=True if dispatch_message.userid else False,
)
def delete_message(
@@ -1383,6 +1408,7 @@ class ChainBase(metaclass=ABCMeta):
chat_id: Union[str, int],
text: str,
title: Optional[str] = None,
buttons: Optional[List[List[dict]]] = None,
) -> bool:
"""
编辑已发送的消息
@@ -1392,6 +1418,7 @@ class ChainBase(metaclass=ABCMeta):
:param chat_id: 聊天ID
:param text: 新的消息内容
:param title: 消息标题
:param buttons: 更新后的按钮列表
:return: 编辑是否成功
"""
return self.run_module(
@@ -1402,6 +1429,7 @@ class ChainBase(metaclass=ABCMeta):
chat_id=chat_id,
text=text,
title=title,
buttons=buttons,
)
def send_direct_message(self, message: Notification) -> Optional[MessageResponse]:
@@ -1411,7 +1439,10 @@ class ChainBase(metaclass=ABCMeta):
:param message: 消息体
:return: 消息响应包含message_id, chat_id等
"""
return self.run_module("send_direct_message", message=message)
return self.run_module(
"send_direct_message",
message=self._normalize_notification_for_dispatch(message),
)
def metadata_img(
self,

View File

@@ -950,9 +950,13 @@ class DownloadChain(ChainBase):
torrents = self.list_torrents(downloader=name, status=TorrentStatus.DOWNLOADING)
if not torrents:
return []
history_map = DownloadHistoryOper().get_by_hashes(
[torrent.hash for torrent in torrents if torrent.hash]
)
ret_torrents = []
for torrent in torrents:
history = DownloadHistoryOper().get_by_hash(torrent.hash)
history = history_map.get(torrent.hash)
if history:
# 媒体信息
torrent.media = {

1363
app/chain/interaction.py Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1320,7 +1320,7 @@ class MediaChain(ChainBase, ConfigReloadMixin, metaclass=Singleton):
mediainfo = await native_fn()
else:
# 原生优先
logger.info(f"插件优先模式未开启。尝试原生识别标题:{log_name} ...")
logger.info(f"识别标题:{log_name} ...")
mediainfo = await native_fn()
if not mediainfo and plugin_available:
logger.info(

File diff suppressed because it is too large Load Diff

View File

@@ -566,8 +566,8 @@ class SearchChain(ChainBase):
) or []
)
search_count += 1
# 有结果则停止
if torrents:
# 未开启多名称搜索时,有结果则停止
if not settings.SEARCH_MULTIPLE_NAME and torrents:
logger.info(f"共搜索到 {len(torrents)} 个资源,停止搜索")
break
@@ -654,7 +654,7 @@ class SearchChain(ChainBase):
}
search_count += 1
if torrents:
if not settings.SEARCH_MULTIPLE_NAME and torrents:
logger.info(f"共搜索到 {len(torrents)} 个资源,停止搜索")
break

1241
app/chain/skills.py Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -61,6 +61,12 @@ class StorageChain(ChainBase):
"""
return self.run_module("create_folder", fileitem=fileitem, name=name)
def get_folder(self, storage: str, path: Path) -> Optional[schemas.FileItem]:
"""
获取目录,不存在则递归创建
"""
return self.run_module("get_folder", storage=storage, path=path)
def download_file(self, fileitem: schemas.FileItem, path: Path = None) -> Optional[Path]:
"""
下载文件

View File

@@ -1766,6 +1766,8 @@ class SubscribeChain(ChainBase):
- exist_flag (bool): 布尔值,表示媒体是否已经完全下载或已存在
- no_exists (dict): 缺失的媒体信息,包含缺失的集数或其他相关信息
"""
self.__refresh_total_episode_before_completion(subscribe=subscribe, mediainfo=mediainfo)
# 非洗版
if not subscribe.best_version:
# 每季总集数
@@ -1834,6 +1836,38 @@ class SubscribeChain(ChainBase):
# 返回结果,表示媒体未完全下载或存在
return False, no_exists
@staticmethod
def __refresh_total_episode_before_completion(subscribe: Subscribe, mediainfo: MediaInfo):
"""
在完成判断前,按最新识别结果兜底修正订阅总集数,防止旧总集数导致误完成。
"""
if subscribe.type != MediaType.TV.value:
return
if subscribe.manual_total_episode:
return
if subscribe.season is None:
return
new_total_episode = len((mediainfo.seasons or {}).get(subscribe.season) or [])
old_total_episode = subscribe.total_episode or 0
if not new_total_episode or new_total_episode <= old_total_episode:
return
old_lack_episode = subscribe.lack_episode or 0
new_lack_episode = old_lack_episode + (new_total_episode - old_total_episode)
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
SubscribeOper().update(subscribe.id, {
"total_episode": new_total_episode,
"lack_episode": new_lack_episode,
"last_update": now
})
subscribe.total_episode = new_total_episode
subscribe.lack_episode = new_lack_episode
subscribe.last_update = now
logger.info(
f"订阅 {subscribe.name}{subscribe.season}季 总集数更新为 {new_total_episode},缺失集数更新为 {new_lack_episode}"
)
@staticmethod
def _is_episode_range_covered(meta: MetaBase, subscribe: Subscribe) -> bool:
"""

View File

@@ -74,10 +74,13 @@ class JobManager:
_job_view: Dict[Tuple, TransferJob] = {}
# 汇总季集清单
_season_episodes: Dict[Tuple, List[int]] = {}
# 记录从 meta 作业迁移到 media 作业的关系,用于清理提前失败后残留的 media 作业
_meta_to_media_ids: Dict[Tuple, set[Tuple]] = {}
def __init__(self):
self._job_view = {}
self._season_episodes = {}
self._meta_to_media_ids = {}
@staticmethod
def __get_meta_id(meta: MetaBase = None, season: Optional[int] = None) -> Tuple:
@@ -185,6 +188,43 @@ class JobManager:
self._season_episodes[__mediaid__] = task.meta.episode_list
return True
def migrate_task(self, task: TransferTask) -> bool:
"""
将任务从 meta 作业迁移到 media 作业
"""
curr_task, source_job_id = self.__remove_task_with_job_id(task.fileitem)
if not self.add_task(task, state=curr_task.state if curr_task else "waiting"):
return False
if curr_task and task.mediainfo:
metaid = self.__get_meta_id(
meta=task.meta, season=task.meta.begin_season
)
mediaid = self.__get_id(task)
if source_job_id == metaid and mediaid != metaid:
with job_lock:
self._meta_to_media_ids.setdefault(metaid, set()).add(mediaid)
return True
def __is_job_done(self, job_id: Tuple) -> bool:
"""
检查指定作业是否已完成
"""
if job_id not in self._job_view:
return True
return all(
task.state in ["completed", "failed"]
for task in self._job_view[job_id].tasks
)
def __pop_job(self, job_id: Tuple):
"""
移除指定作业和对应季集缓存
"""
if job_id in self._season_episodes:
self._season_episodes.pop(job_id)
if job_id in self._job_view:
self._job_view.pop(job_id)
def running_task(self, task: TransferTask):
"""
设置任务为运行中
@@ -233,10 +273,39 @@ class JobManager:
- set(task.meta.episode_list)
)
def fail_unfinished_task(self, task: TransferTask):
"""
将指定任务视图中的非终态任务标记为失败
"""
if not task or not task.fileitem:
return
with job_lock:
for mediaid, job in self._job_view.items():
for job_task in job.tasks:
if job_task.fileitem != task.fileitem:
continue
if job_task.state not in ["completed", "failed"]:
job_task.state = "failed"
if mediaid in self._season_episodes:
self._season_episodes[mediaid] = list(
set(self._season_episodes[mediaid])
- set(task.meta.episode_list)
)
return
def remove_task(self, fileitem: FileItem) -> Optional[TransferJobTask]:
"""
根据文件项移除任务
"""
task, _ = self.__remove_task_with_job_id(fileitem)
return task
def __remove_task_with_job_id(
self, fileitem: FileItem
) -> Tuple[Optional[TransferJobTask], Optional[Tuple]]:
"""
根据文件项移除任务并返回任务所在的作业ID
"""
with job_lock:
for mediaid in list(self._job_view):
job = self._job_view[mediaid]
@@ -252,8 +321,8 @@ class JobManager:
set(self._season_episodes[mediaid])
- set(task.meta.episode_list)
)
return task
return None
return task, mediaid
return None, None
def remove_job(self, task: TransferTask) -> Optional[TransferJob]:
"""
@@ -280,27 +349,20 @@ class JobManager:
media=task.mediainfo, season=task.meta.begin_season
)
meta_done = True
if __metaid__ in self._job_view:
meta_done = all(
t.state in ["completed", "failed"]
for t in self._job_view[__metaid__].tasks
)
related_media_ids = set(self._meta_to_media_ids.get(__metaid__, set()))
if task.mediainfo:
related_media_ids.add(__mediaid__)
media_done = True
if __mediaid__ in self._job_view:
media_done = all(
t.state in ["completed", "failed"]
for t in self._job_view[__mediaid__].tasks
)
meta_done = self.__is_job_done(__metaid__)
media_done = all(
self.__is_job_done(mediaid) for mediaid in related_media_ids
)
if meta_done and media_done:
__id__ = self.__get_id(task)
if __id__ in self._job_view:
# 移除季集信息
if __id__ in self._season_episodes:
self._season_episodes.pop(__id__)
self._job_view.pop(__id__)
remove_ids = {__metaid__, self.__get_id(task), *related_media_ids}
for job_id in remove_ids:
self.__pop_job(job_id)
self._meta_to_media_ids.pop(__metaid__, None)
def is_done(self, task: TransferTask) -> bool:
"""
@@ -780,10 +842,22 @@ class TransferChain(ChainBase, ConfigReloadMixin, metaclass=Singleton):
Notification(
mtype=NotificationType.Manual,
title=f"{task.mediainfo.title_year} {task.meta.season_episode} 入库失败!",
text=f"原因:{transferinfo.message or '未知'}",
text="\n".join(
[
f"原因:{transferinfo.message or '未知'}",
(
f"如果按钮不可用,可回复:\n```\n/redo {history.id}\n```"
if history
else ""
),
]
).strip(),
image=task.mediainfo.get_message_image(),
username=task.username,
link=settings.MP_DOMAIN("#/history"),
buttons=self.build_failed_transfer_buttons(
history.id if history else None
),
)
)
@@ -967,6 +1041,17 @@ class TransferChain(ChainBase, ConfigReloadMixin, metaclass=Singleton):
return
self.jobview.remove_task(fileitem)
def __fail_transfer_task(self, task: TransferTask):
"""
标记异常整理任务失败并清理作业视图
"""
self.jobview.fail_unfinished_task(task)
if task.download_hash and self.jobview.is_torrent_done(task.download_hash):
self.transfer_completed(
hashs=task.download_hash, downloader=task.downloader
)
self.jobview.try_remove_job(task)
def __start_transfer(self):
"""
处理队列
@@ -1043,6 +1128,7 @@ class TransferChain(ChainBase, ConfigReloadMixin, metaclass=Singleton):
logger.error(
f"{fileitem.name} 整理任务处理出现错误:{e} - {traceback.format_exc()}"
)
self.__fail_transfer_task(task)
with task_lock:
self._processed_num += 1
self._fail_num += 1
@@ -1119,9 +1205,17 @@ class TransferChain(ChainBase, ConfigReloadMixin, metaclass=Singleton):
Notification(
mtype=NotificationType.Manual,
title=f"{task.fileitem.name} 未识别到媒体信息,无法入库!",
text=f"回复:\n```\n/redo {his.id} [tmdbid]|[类型]\n```\n手动识别整理。",
text=(
"原因:未识别到媒体信息\n"
"如果按钮不可用,可回复:\n"
f"```\n/redo {his.id}\n/redo {his.id} [tmdbid]|[类型]\n```\n"
"自动重试或手动识别整理。"
),
username=task.username,
link=settings.MP_DOMAIN("#/history"),
buttons=self.build_failed_transfer_buttons(
his.id if his else None
),
)
)
# 任务失败直接移除task
@@ -1170,10 +1264,7 @@ class TransferChain(ChainBase, ConfigReloadMixin, metaclass=Singleton):
# 更新任务信息
task.mediainfo = mediainfo
# 更新队列任务
curr_task = self.jobview.remove_task(task.fileitem)
self.jobview.add_task(
task, state=curr_task.state if curr_task else "waiting"
)
self.jobview.migrate_task(task)
# 获取集数据
if task.mediainfo.type == MediaType.TV and not task.episodes_info:
@@ -1493,6 +1584,45 @@ class TransferChain(ChainBase, ConfigReloadMixin, metaclass=Singleton):
)
]
@staticmethod
def _resolve_download_history(
downloadhis: DownloadHistoryOper,
file_path: Path,
bluray_dir: bool = False,
download_hash: Optional[str] = None,
) -> Optional[DownloadHistory]:
"""
根据显式 hash、文件路径或种子根目录回查下载历史。
"""
if download_hash:
return downloadhis.get_by_hash(download_hash)
if bluray_dir:
return downloadhis.get_by_path(file_path.as_posix())
download_file = downloadhis.get_file_by_fullpath(file_path.as_posix())
if download_file:
return downloadhis.get_by_hash(download_file.download_hash)
# 多文件种子里的字幕/附加文件可能没有稳定的 fullpath 记录,
# 退回到父目录和 savepath 继续查找,尽量补齐同一种子的关联信息。
for parent_path in file_path.parents:
parent_posix = parent_path.as_posix()
download_history = downloadhis.get_by_path(parent_posix)
if download_history:
return download_history
download_files = downloadhis.get_files_by_savepath(parent_posix) or []
download_hashes = {
download_file.download_hash
for download_file in download_files
if download_file.download_hash
}
if len(download_hashes) == 1:
return downloadhis.get_by_hash(next(iter(download_hashes)))
return None
def do_transfer(
self,
fileitem: FileItem,
@@ -1634,23 +1764,13 @@ class TransferChain(ChainBase, ConfigReloadMixin, metaclass=Singleton):
continue
# 提前获取下载历史,以便获取自定义识别词
download_history = None
downloadhis = DownloadHistoryOper()
if download_hash:
# 先按hash查询
download_history = downloadhis.get_by_hash(download_hash)
elif bluray_dir:
# 蓝光原盘,按目录名查询
download_history = downloadhis.get_by_path(file_path.as_posix())
else:
# 按文件全路径查询
download_file = downloadhis.get_file_by_fullpath(
file_path.as_posix()
)
if download_file:
download_history = downloadhis.get_by_hash(
download_file.download_hash
)
download_history = self._resolve_download_history(
downloadhis=downloadhis,
file_path=file_path,
bluray_dir=bluray_dir,
download_hash=download_hash,
)
if not meta:
subscribe_custom_words = None
@@ -1771,9 +1891,17 @@ class TransferChain(ChainBase, ConfigReloadMixin, metaclass=Singleton):
"finished": finished_files,
},
)
state, err_msg = self.__handle_transfer(
task=transfer_task, callback=self.__default_callback
)
try:
state, err_msg = self.__handle_transfer(
task=transfer_task, callback=self.__default_callback
)
except Exception as e:
logger.error(
f"{transfer_task.fileitem.name} 整理任务处理出现错误:"
f"{e} - {traceback.format_exc()}"
)
self.__fail_transfer_task(transfer_task)
state, err_msg = False, str(e)
if not state:
all_success = False
logger.warn(f"{transfer_task.fileitem.name} {err_msg}")
@@ -1816,8 +1944,8 @@ class TransferChain(ChainBase, ConfigReloadMixin, metaclass=Singleton):
Notification(
channel=channel,
source=source,
title="请输入正确的命令格式:/redo [id] [tmdbid/豆瓣id]|[类型]"
"[id]整理记录编号",
title="请输入正确的命令格式:/redo [id] 或 /redo [id] [tmdbid/豆瓣id]|[类型]"
"[id]整理记录编号",
userid=userid,
)
)
@@ -1826,7 +1954,7 @@ class TransferChain(ChainBase, ConfigReloadMixin, metaclass=Singleton):
args_error()
return
arg_strs = str(arg_str).split()
if len(arg_strs) != 2:
if len(arg_strs) not in (1, 2):
args_error()
return
# 历史记录ID
@@ -1834,6 +1962,20 @@ class TransferChain(ChainBase, ConfigReloadMixin, metaclass=Singleton):
if not logid.isdigit():
args_error()
return
if len(arg_strs) == 1:
state, errmsg = self.redo_transfer_history(int(logid))
if not state:
self.post_message(
Notification(
channel=channel,
title="手动整理失败",
source=source,
text=errmsg,
userid=userid,
link=settings.MP_DOMAIN("#/history"),
)
)
return
# TMDBID/豆瓣ID
id_strs = arg_strs[1].split("|")
media_id = id_strs[0]
@@ -1861,6 +2003,31 @@ class TransferChain(ChainBase, ConfigReloadMixin, metaclass=Singleton):
)
return
@staticmethod
def build_failed_transfer_buttons(
history_id: Optional[int],
) -> Optional[List[List[dict]]]:
"""
构建整理失败通知的操作按钮。
"""
if not history_id:
return None
return [
[
{"text": "重试", "callback_data": f"transfer_retry_{history_id}"},
{
"text": "智能助手接管",
"callback_data": f"transfer_ai_retry_{history_id}",
},
]
]
def redo_transfer_history(self, history_id: int) -> Tuple[bool, str]:
"""
按历史记录直接重新整理,自动重新识别媒体信息。
"""
return self.__re_transfer(logid=history_id)
def __re_transfer(
self, logid: int, mtype: MediaType = None, mediaid: Optional[str] = None
) -> Tuple[bool, str]:

1184
app/cli.py Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -7,6 +7,7 @@ from app.chain import ChainBase
from app.chain.download import DownloadChain
from app.chain.message import MessageChain
from app.chain.site import SiteChain
from app.chain.skills import SkillsChain
from app.chain.subscribe import SubscribeChain
from app.chain.system import SystemChain
from app.chain.transfer import TransferChain
@@ -154,6 +155,18 @@ class Command(metaclass=Singleton):
"category": "管理",
"data": {},
},
"/session_status": {
"func": MessageChain().remote_session_status,
"description": "会话状态",
"category": "智能体",
"data": {},
},
"/skills": {
"func": SkillsChain().remote_manage,
"description": "管理技能",
"category": "智能体",
"data": {},
},
}
# 插件命令集合
self._plugin_commands = {}

View File

@@ -417,6 +417,17 @@ class ConfigModel(BaseModel):
PLUGIN_STATISTIC_SHARE: bool = True
# 是否开启插件热加载
PLUGIN_AUTO_RELOAD: bool = False
# 本地插件仓库目录,多个地址使用,分隔
PLUGIN_LOCAL_REPO_PATHS: Optional[str] = None
# ==================== 技能配置 ====================
# 技能市场仓库地址,多个地址使用,分隔
SKILL_MARKET: str = (
"https://clawhub.ai,"
"https://github.com/openai/skills,"
"https://github.com/anthropics/skills,"
"https://github.com/vercel-labs/agent-skills"
)
# ==================== Github & PIP ====================
# Github token提高请求api限流阈值 ghp_****
@@ -494,6 +505,10 @@ class ConfigModel(BaseModel):
LLM_PROVIDER: str = "deepseek"
# LLM模型名称
LLM_MODEL: str = "deepseek-chat"
# 思考模式/深度配置off/auto/minimal/low/medium/high/max/xhigh
LLM_THINKING_LEVEL: Optional[str] = 'off'
# LLM是否支持图片输入开启后消息图片会按多模态输入发送给模型
LLM_SUPPORT_IMAGE_INPUT: bool = True
# LLM API密钥
LLM_API_KEY: Optional[str] = None
# LLM基础URL用于自定义API端点
@@ -538,6 +553,35 @@ class ConfigModel(BaseModel):
# AI智能体自动重试整理失败记录开关
AI_AGENT_RETRY_TRANSFER: bool = False
# 语音能力提供商(当前仅支持 openai
AI_VOICE_PROVIDER: str = "openai"
# 语音识别提供商,未设置时回退到 AI_VOICE_PROVIDER
AI_VOICE_STT_PROVIDER: Optional[str] = None
# 语音合成提供商,未设置时回退到 AI_VOICE_PROVIDER
AI_VOICE_TTS_PROVIDER: Optional[str] = None
# 语音能力 API 密钥,未设置且 LLM_PROVIDER=openai 时回退使用 LLM_API_KEY
AI_VOICE_API_KEY: Optional[str] = None
# 语音识别 API 密钥,未设置时回退到 AI_VOICE_API_KEY
AI_VOICE_STT_API_KEY: Optional[str] = None
# 语音合成 API 密钥,未设置时回退到 AI_VOICE_API_KEY
AI_VOICE_TTS_API_KEY: Optional[str] = None
# 语音能力基础URL未设置且 LLM_PROVIDER=openai 时回退使用 LLM_BASE_URL
AI_VOICE_BASE_URL: Optional[str] = None
# 语音识别基础URL未设置时回退到 AI_VOICE_BASE_URL
AI_VOICE_STT_BASE_URL: Optional[str] = None
# 语音合成基础URL未设置时回退到 AI_VOICE_BASE_URL
AI_VOICE_TTS_BASE_URL: Optional[str] = None
# 语音转文字模型
AI_VOICE_STT_MODEL: str = "gpt-4o-mini-transcribe"
# 文字转语音模型
AI_VOICE_TTS_MODEL: str = "gpt-4o-mini-tts"
# TTS 发音人
AI_VOICE_TTS_VOICE: str = "alloy"
# 语音识别语言
AI_VOICE_LANGUAGE: str = "zh"
# 回复语音时是否同时附带文字说明
AI_VOICE_REPLY_WITH_TEXT: bool = False
class Settings(BaseSettings, ConfigModel, LogConfigModel):
"""
@@ -1015,7 +1059,16 @@ class GlobalVar(object):
# 需应急停止文件整理
EMERGENCY_STOP_TRANSFER: List[str] = []
# 当前事件循环
CURRENT_EVENT_LOOP: AbstractEventLoop = asyncio.get_event_loop()
CURRENT_EVENT_LOOP: AbstractEventLoop = None
@classmethod
def _get_event_loop(cls) -> AbstractEventLoop:
try:
return asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop
def stop_system(self):
"""
@@ -1085,6 +1138,8 @@ class GlobalVar(object):
"""
当前循环
"""
if self.CURRENT_EVENT_LOOP is None:
self.CURRENT_EVENT_LOOP = self._get_event_loop()
return self.CURRENT_EVENT_LOOP
def set_loop(self, loop: AbstractEventLoop):

View File

@@ -6,6 +6,7 @@ import importlib.util
import inspect
import os
import posixpath
import shutil
import sys
import threading
import time
@@ -38,7 +39,7 @@ from app.utils.system import SystemUtils
class PluginManager(ConfigReloadMixin, metaclass=Singleton):
"""插件管理器"""
CONFIG_WATCH = {"DEV", "PLUGIN_AUTO_RELOAD"}
CONFIG_WATCH = {"DEV", "PLUGIN_AUTO_RELOAD", "PLUGIN_LOCAL_REPO_PATHS"}
def __init__(self):
# 插件列表
@@ -51,6 +52,8 @@ class PluginManager(ConfigReloadMixin, metaclass=Singleton):
self._monitor_thread: Optional[threading.Thread] = None
# 监控停止事件
self._stop_monitor_event = threading.Event()
# 本地插件同步写入运行目录后的短时忽略窗口
self._recent_local_sync: Dict[str, float] = {}
# 开发者模式监测插件修改
if settings.DEV or settings.PLUGIN_AUTO_RELOAD:
self.__start_monitor()
@@ -308,11 +311,14 @@ class PluginManager(ConfigReloadMixin, metaclass=Singleton):
运行 watchfiles 监视器的主循环。
"""
# 监视插件目录
plugins_path = str(settings.ROOT_PATH / "app" / "plugins")
plugin_paths = [str(settings.ROOT_PATH / "app" / "plugins")]
for local_repo_path in PluginHelper.get_local_repo_paths():
if local_repo_path.exists() and local_repo_path.is_dir():
plugin_paths.append(str(local_repo_path))
logger.info(">>> 监控线程已启动准备进入watch循环...")
# 使用 watchfiles 监视目录变化,并响应变化事件
# Todo: yield_on_timeout = True 时,每秒检查停止事件,会返回空集合;后续可以考虑用来做心跳之类的功能?
for changes in watch(plugins_path, stop_event=self._stop_monitor_event, rust_timeout=1000,
for changes in watch(*plugin_paths, stop_event=self._stop_monitor_event, rust_timeout=1000,
yield_on_timeout=True):
# 如果收到停止事件,退出循环
if not changes:
@@ -320,18 +326,56 @@ class PluginManager(ConfigReloadMixin, metaclass=Singleton):
# 处理变化事件
plugins_to_reload = set()
local_plugins_to_sync = {}
for _change_type, path_str in changes:
event_path = Path(path_str)
# 跳过非 .py 文件以及 pycache 目录中的文件
if not event_path.name.endswith(".py") or "__pycache__" in event_path.parts:
# 跳过 pycache 目录中的文件
if "__pycache__" in event_path.parts:
continue
if event_path.name == "requirements.txt":
candidate = self._get_local_plugin_candidate_from_path(event_path)
if candidate:
if candidate.get("compatible") is False:
logger.info(
f"检测到本地插件 {candidate.get('id')} 依赖文件变化,"
f"但跳过处理:{candidate.get('skip_reason')}"
)
continue
logger.warn(f"检测到本地插件 {candidate.get('id')} 依赖文件变化,请重新安装本地插件以安装依赖")
continue
# 跳过非 .py 文件
if not event_path.name.endswith(".py"):
continue
# 解析插件ID
pid = self._get_plugin_id_from_path(event_path)
# 跳过无效插件文件
if pid:
# 收集需要重载的插件ID自动去重避免重复重载
runtime_pid = self._get_plugin_id_from_path(event_path)
local_candidate = self._get_local_plugin_candidate_from_path(event_path) if not runtime_pid else None
if runtime_pid:
last_sync_time = self._recent_local_sync.get(runtime_pid)
if last_sync_time and time.time() - last_sync_time < 2:
logger.debug(f"忽略本地插件同步产生的运行目录变化:{runtime_pid}")
continue
# 运行目录变化只重载,不能反向触发本地同步。
plugins_to_reload.add(runtime_pid)
elif local_candidate:
if local_candidate.get("compatible") is False:
package_version = local_candidate.get("package_version")
source_root = f"plugins.{package_version}" if package_version else "plugins"
logger.info(
f"检测到本地插件 {local_candidate.get('id')} 文件变化,来源:{source_root}"
f"文件:{event_path},但跳过同步:{local_candidate.get('skip_reason')}"
)
continue
local_plugins_to_sync[local_candidate.get("id")] = (local_candidate, event_path)
for pid, (candidate, event_path) in local_plugins_to_sync.items():
package_version = candidate.get("package_version")
source_root = f"plugins.{package_version}" if package_version else "plugins"
logger.info(f"检测到本地插件 {pid} 文件变化,来源:{source_root},文件:{event_path}")
if self._sync_local_plugin_if_installed(pid, candidate):
plugins_to_reload.add(pid)
# 触发重载
@@ -351,6 +395,7 @@ class PluginManager(ConfigReloadMixin, metaclass=Singleton):
:return: 插件ID字符串如果不是有效插件文件则返回 None。
"""
try:
event_path = event_path.resolve()
plugins_root = settings.ROOT_PATH / "app" / "plugins"
# 确保修改的文件在 plugins 目录下
if not event_path.is_relative_to(plugins_root):
@@ -389,6 +434,78 @@ class PluginManager(ConfigReloadMixin, metaclass=Singleton):
logger.error(f"从路径解析插件ID时出错: {e}")
return None
@staticmethod
def _get_local_plugin_candidate_from_path(event_path: Path) -> Optional[dict]:
"""
根据本地插件仓库路径解析具体插件候选,保留 plugins/plugins.v2 来源差异
"""
try:
event_path = event_path.resolve()
for local_repo_path in PluginHelper.get_local_repo_paths():
if not local_repo_path.exists() or not local_repo_path.is_dir():
continue
if not event_path.is_relative_to(local_repo_path):
continue
try:
relative_parts = event_path.relative_to(local_repo_path).parts
except (ValueError, IndexError):
continue
if len(relative_parts) < 2:
continue
if relative_parts[0] == "plugins":
package_version = ""
elif relative_parts[0].startswith("plugins."):
package_version = relative_parts[0].split(".", 1)[1]
else:
continue
plugin_dir_name = relative_parts[1]
candidate = PluginHelper().get_local_plugin_candidate(
pid=plugin_dir_name,
package_version=package_version,
repo_path=local_repo_path,
strict_compat=False
)
if candidate:
return candidate
return None
except Exception as e:
logger.error(f"从本地插件仓库路径解析插件候选时出错: {e}")
return None
@staticmethod
def _sync_local_plugin_if_installed(pid: str, candidate: Optional[dict] = None) -> bool:
"""
已安装本地插件源码变化时,同步到运行目录
"""
installed_plugins = SystemConfigOper().get(SystemConfigKey.UserInstalledPlugins) or []
if pid not in installed_plugins:
logger.info(f"本地插件 {pid} 尚未安装,跳过自动同步和热重载")
return False
candidate = candidate or PluginHelper().get_local_plugin_candidate(pid)
if not candidate:
return False
source_dir = Path(candidate.get("path"))
dest_dir = settings.ROOT_PATH / "app" / "plugins" / pid.lower()
try:
if source_dir.resolve() == dest_dir.resolve():
return True
if dest_dir.exists():
shutil.rmtree(dest_dir, ignore_errors=True)
shutil.copytree(
source_dir,
dest_dir,
dirs_exist_ok=True,
ignore=shutil.ignore_patterns("__pycache__", "*.pyc", ".DS_Store")
)
PluginManager()._recent_local_sync[pid] = time.time()
logger.info(f"已同步本地插件 {pid}{source_dir} -> {dest_dir}")
return True
except Exception as e:
logger.error(f"同步本地插件 {pid} 失败:{e}")
return False
@staticmethod
def __stop_plugin(plugin: Any):
"""
@@ -484,11 +601,14 @@ class PluginManager(ConfigReloadMixin, metaclass=Singleton):
# 获取已安装插件列表
install_plugins = SystemConfigOper().get(SystemConfigKey.UserInstalledPlugins) or []
# 获取在线插件列表
# 获取远程和本地仓库来源插件列表
online_plugins = self.get_online_plugins()
local_repo_plugins = self.get_local_repo_plugins()
candidate_plugins = self.process_plugins_list(online_plugins + local_repo_plugins, []) \
if online_plugins or local_repo_plugins else []
# 确定需要安装的插件
plugins_to_install = [
plugin for plugin in online_plugins
plugin for plugin in candidate_plugins
if plugin.id in install_plugins and not self.is_plugin_exists(plugin.id, plugin.plugin_version)
]
@@ -1041,7 +1161,9 @@ class PluginManager(ConfigReloadMixin, metaclass=Singleton):
else:
base_version_plugins.extend(plugins) # 收集 v1 版本插件
return self._process_plugins_list(higher_version_plugins, base_version_plugins)
result = self.process_plugins_list(higher_version_plugins, base_version_plugins)
logger.info(f"获取到 {len(result)} 个线上插件")
return result
def get_local_plugins(self) -> List[schemas.Plugin]:
"""
@@ -1116,6 +1238,38 @@ class PluginManager(ConfigReloadMixin, metaclass=Singleton):
plugins.sort(key=lambda x: x.plugin_order if hasattr(x, "plugin_order") else 0)
return plugins
def get_local_repo_plugins(self) -> List[schemas.Plugin]:
"""
获取本地插件仓库目录中的插件信息
"""
plugins = []
installed_apps = SystemConfigOper().get(SystemConfigKey.UserInstalledPlugins) or []
local_candidates = PluginHelper().get_local_plugin_candidates()
if not local_candidates:
return []
for pid, plugin_info in local_candidates.items():
package_version = plugin_info.get("package_version")
plugin = self._process_plugin_info(
pid=pid,
plugin_info=plugin_info,
market=PluginHelper.make_local_repo_url(
pid,
plugin_info.get("repo_path"),
package_version
),
installed_apps=installed_apps,
add_time=0,
package_version=package_version
)
if not plugin:
continue
plugin.is_local = True
plugins.append(plugin)
plugins.sort(key=lambda x: x.plugin_order if hasattr(x, "plugin_order") else 0)
logger.info(f"获取到 {len(plugins)} 个本地插件")
return plugins
@staticmethod
def is_plugin_exists(pid: str, version: str = None) -> bool:
"""
@@ -1180,8 +1334,8 @@ class PluginManager(ConfigReloadMixin, metaclass=Singleton):
return ret_plugins
@staticmethod
def _process_plugins_list(higher_version_plugins: List[schemas.Plugin],
base_version_plugins: List[schemas.Plugin]) -> List[schemas.Plugin]:
def process_plugins_list(higher_version_plugins: List[schemas.Plugin],
base_version_plugins: List[schemas.Plugin]) -> List[schemas.Plugin]:
"""
处理插件列表:合并、去重、排序、保留最高版本
:param higher_version_plugins: 高版本插件列表
@@ -1194,20 +1348,41 @@ class PluginManager(ConfigReloadMixin, metaclass=Singleton):
# 将未出现在高版本插件列表中的 v1 插件加入 all_plugins
higher_plugin_ids = {f"{p.id}{p.plugin_version}" for p in higher_version_plugins}
all_plugins.extend([p for p in base_version_plugins if f"{p.id}{p.plugin_version}" not in higher_plugin_ids])
# 去重
all_plugins = list({f"{p.id}{p.plugin_version}": p for p in all_plugins}.values())
# 所有插件按 repo 在设置中的顺序排序
all_plugins.sort(
key=lambda x: settings.PLUGIN_MARKET.split(",").index(x.repo_url) if x.repo_url else 0
)
# 相同 ID 的插件保留版本号最大的版本
max_versions = {}
for p in all_plugins:
if p.id not in max_versions or StringUtils.compare_version(p.plugin_version, ">", max_versions[p.id]):
max_versions[p.id] = p.plugin_version
result = [p for p in all_plugins if p.plugin_version == max_versions[p.id]]
logger.info(f"共获取到 {len(result)} 个线上插件")
return result
markets = [item for item in settings.PLUGIN_MARKET.split(",") if item]
def repo_order(plugin: schemas.Plugin) -> int:
if PluginHelper.is_local_repo_url(plugin.repo_url):
return len(markets) + 1
if plugin.repo_url in markets:
return markets.index(plugin.repo_url)
return len(markets)
# 去重:同 ID + 版本优先保留市场来源,其次按来源顺序稳定保留。
dedup_plugins = {}
for plugin in sorted(all_plugins, key=repo_order):
key = f"{plugin.id}{plugin.plugin_version}"
exists = dedup_plugins.get(key)
if not exists:
dedup_plugins[key] = plugin
continue
if PluginHelper.is_local_repo_url(exists.repo_url) and not PluginHelper.is_local_repo_url(plugin.repo_url):
dedup_plugins[key] = plugin
# 相同 ID 的插件保留版本号最大的版本;同版本市场来源优先。
result_by_id = {}
for plugin in sorted(dedup_plugins.values(), key=repo_order):
exists = result_by_id.get(plugin.id)
if not exists:
result_by_id[plugin.id] = plugin
continue
if StringUtils.compare_version(plugin.plugin_version, ">", exists.plugin_version):
result_by_id[plugin.id] = plugin
elif plugin.plugin_version == exists.plugin_version \
and PluginHelper.is_local_repo_url(exists.repo_url) \
and not PluginHelper.is_local_repo_url(plugin.repo_url):
result_by_id[plugin.id] = plugin
return list(result_by_id.values())
def _process_plugin_info(self, pid: str, plugin_info: dict, market: str,
installed_apps: List[str], add_time: int,
@@ -1354,7 +1529,9 @@ class PluginManager(ConfigReloadMixin, metaclass=Singleton):
else:
base_version_plugins.extend(plugins) # 收集 v1 版本插件
return self._process_plugins_list(higher_version_plugins, base_version_plugins)
result = self.process_plugins_list(higher_version_plugins, base_version_plugins)
logger.info(f"获取到 {len(result)} 个线上插件")
return result
async def async_get_plugins_from_market(self, market: str,
package_version: Optional[str] = None,

Some files were not shown because too many files have changed in this diff Show More