Compare commits

...

754 Commits
v2.9.2 ... v2

Author SHA1 Message Date
jxxghp
06197144c0 refactor(qbittorrent): convert static methods to instance methods for better encapsulation 2026-05-07 08:25:34 +08:00
jxxghp
62541ffe43 fix(qbittorrent): restore qBittorrent 5.2 compatibility
Support WebUI API Key auth, newer add responses, and cookie sync so qBittorrent 5.2 can connect reliably while keeping legacy fallback behavior.

Fixes #5724
2026-05-07 07:41:05 +08:00
jxxghp
c762628217 fix(agent): preserve full command output in temp logs
Return only a 10KB preview to the agent so large command results do not overwhelm conversations while keeping the full output available for follow-up reads. Add pytest to the project dependencies to make the regression tests runnable in the project venv.
2026-05-06 20:04:17 +08:00
jxxghp
caf615f3bd feat(system): implement one-shot upgrade mode and enhance upgrade handling 2026-05-05 15:22:33 +08:00
jxxghp
27436757a0 更新 version.py 2026-05-05 12:43:09 +08:00
jxxghp
924d54dfd3 perf(search): 按站点并行过滤搜索结果 2026-05-05 09:01:18 +08:00
jxxghp
39f9550f86 fix(agent): 修复添加订阅时的用户名映射 2026-05-04 21:27:48 +08:00
Attente
367ecafbbb fix(subscribe): 修复订阅电视剧季数判断逻辑 2026-05-04 11:34:13 +08:00
jxxghp
10467244e0 align llm provider registry with opencode endpoints 2026-05-03 09:36:39 +08:00
Yifan
cb6dcc6a2e refactor jellyfin module load logic in unittest 2026-05-02 16:32:18 +08:00
奕凡
43c421b0bb Import call in unittest.mock for additional testing 2026-05-02 16:32:18 +08:00
奕凡
45d0891502 Potential fix for pull request finding
Co-authored-by: Copilot Autofix powered by AI <175728472+Copilot@users.noreply.github.com>
2026-05-02 16:32:18 +08:00
奕凡
76c5f54465 Apply suggestions from code review
Co-authored-by: Copilot Autofix powered by AI <175728472+Copilot@users.noreply.github.com>
2026-05-02 16:32:18 +08:00
奕凡
bcf8116172 handle best_admin_id is None
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2026-05-02 16:32:18 +08:00
奕凡
1f889596b7 fix f-string usage
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2026-05-02 16:32:18 +08:00
Yifan
04443fcfba fix(jellyfin): resolve URL string interpolation failure and enhance RBAC fallback resilience
Co-authored-by: Copilot <copilot@github.com>
2026-05-02 16:32:18 +08:00
jxxghp
5d7a7fd301 更新 message.py 2026-05-01 10:02:23 +08:00
jxxghp
4d0a722b09 refactor: reorganize interaction chain 2026-05-01 09:53:04 +08:00
jxxghp
db6dc926cf feat: unify slash command interactions 2026-05-01 08:53:52 +08:00
jxxghp
4bb4f5aeb5 更新 version.py 2026-04-30 21:08:31 +08:00
jxxghp
58e25fe900 删除 test_openai_stream_patch.py 2026-04-30 21:02:58 +08:00
jxxghp
03f6b9bc96 删除 test_openai_responses_patch3.py 2026-04-30 21:02:25 +08:00
jxxghp
6fdda3a570 删除 test_openai_copilot_patch.py 2026-04-30 21:01:49 +08:00
jxxghp
100eaec38f feat: improve tool selection prompt with clearer instructions 2026-04-30 20:33:46 +08:00
jxxghp
b129508304 chore: disable tool selection middleware by setting LLM_MAX_TOOLS to 0 2026-04-30 19:07:07 +08:00
jxxghp
53bf81aede refactor: rename MoviePilotToolSelectorMiddleware to ToolSelectorMiddleware and enhance tool selection logic 2026-04-30 19:05:49 +08:00
jxxghp
afcc071d07 feat: optimize tool selection middleware to cache and reuse tool selection per agent run
- Refactor MoviePilotToolSelectorMiddleware to perform tool selection once per agent execution and cache the result in state, avoiding redundant LLM calls for each model round.
- Add abefore_agent to select tools at the start of agent execution and store selected tool names in state.
- Update awrap_model_call to reuse cached tool selection from state for subsequent model calls.
- Enhance test coverage for tool selection caching and reuse logic.
- Improve error logging in skill version extraction.
2026-04-30 18:29:54 +08:00
jxxghp
2ea617655c refactor: streamline agent initialization and parameter handling for improved clarity and consistency 2026-04-30 18:03:04 +08:00
jxxghp
0583495548 refactor: remove legacy disable_thinking and reasoning_effort parameters from LLM helper and related tests 2026-04-30 17:10:14 +08:00
jxxghp
516aea6312 refactor: rename llm variables for clarity and consistency in agent initialization 2026-04-30 16:41:46 +08:00
jxxghp
2d412cae1c style: improve log formatting for torrent publish time checks in FilterModule 2026-04-30 16:28:28 +08:00
jxxghp
45f5326fb4 fix tool selection middleware 2026-04-30 13:47:43 +08:00
jxxghp
2ccea2da39 chore: update langchain-anthropic, openai, and google-genai dependencies 2026-04-30 13:14:03 +08:00
jxxghp
53f6897d62 feat: ensure essential tools are always included in LLM tool selection and update tests
- Add mechanism to always include core tools (e.g., file operations, command execution) in LLMToolSelectorMiddleware
- Update MoviePilotToolFactory to provide filtered always-include tool names based on loaded tools
- Set default LLM_MAX_TOOLS to 30 in config
- Refactor agent initialization to support always_include parameter
- Enhance tests to cover always_include logic and async agent creation
2026-04-30 13:04:52 +08:00
jxxghp
28a2386f2f feat: add agent tools for querying and managing filter rules and rule groups
- Add tools for querying built-in and custom filter rules, and for adding, updating, and deleting custom rules and rule groups
- Refactor filter module to use shared builtin rule definitions
- Enhance rule group querying to include syntax guidance and usage references
- Add unittests for agent filter rule tools registration and parsing logic
2026-04-30 12:56:38 +08:00
jxxghp
abda9d3212 feat: improve context_tokens_k calculation and update Tencent provider name 2026-04-30 11:41:00 +08:00
jxxghp
34e7c4ac14 feat: enhance openai-compatible provider support and patch responses API instructions handling
- Add compatibility patch for langchain-openai responses API to ensure system messages are extracted as top-level instructions, addressing Codex endpoint requirements.
- Update provider list: add Alibaba, Volcengine, and Tencent TokenHub; adjust SiliconFlow and MiniMax endpoints; refine provider ordering and model list strategies.
- Extend models.dev-only listing logic for providers lacking stable models.list endpoints.
- Increase models.dev cache TTL for improved efficiency.
- Add tests for openai responses API and streaming compatibility patches.
2026-04-30 11:32:55 +08:00
jxxghp
b228107a25 refactor: migrate LLM helper to agent module and add unified LLM API endpoints
- Move LLMHelper and related logic from app.helper.llm to app.agent.llm.helper
- Update all imports to reference new LLMHelper location
- Introduce app/agent/llm/__init__.py for internal LLM adapter exports
- Add llm.py API router with endpoints for model listing, provider auth, and test calls
- Remove legacy LLM endpoints from system.py
- Update requirements for langchain-anthropic and anthropic
- Refactor test_llm_helper_testcall.py for async LLMHelper usage and new import paths
2026-04-30 09:48:50 +08:00
jxxghp
2375508616 Restore background dispatch without channel context 2026-04-30 07:04:59 +08:00
jxxghp
baebd0ed1a Fix background prompt message leakage 2026-04-30 06:58:43 +08:00
jxxghp
6532c60a3c Refine agent background reply handling 2026-04-30 00:25:23 +08:00
jxxghp
11478faff3 separate reply sending from output persistence 2026-04-29 23:56:16 +08:00
jxxghp
e9291cec6a respect output persistence in background agent replies 2026-04-29 23:51:03 +08:00
jxxghp
7586a2cd42 disable agent message tools for ui background tasks 2026-04-29 23:30:59 +08:00
jxxghp
ef5bd29759 move ui background message suppression into agent context 2026-04-29 23:22:37 +08:00
jxxghp
7ab643d34a suppress channel notifications for ui background tasks 2026-04-29 23:13:57 +08:00
jxxghp
0b7505a604 refactor search AI recommendation flow 2026-04-29 22:55:27 +08:00
jxxghp
460d716512 feat: add batch AI re-organize for transfer history and search result recommendation
- Implement batch AI re-organize endpoint for transfer history with progress tracking
- Add batch_manual_transfer_redo system task template and prompt generation
- Refactor agent_manager to support generic background prompt execution
- Add AIRecommendChain for search result recommendation using agent background prompt
- Update search endpoints to use new AIRecommendChain and remove legacy code
- Enhance test cases for batch manual transfer redo
- Minor code cleanup and style fixes
2026-04-29 22:16:04 +08:00
jxxghp
b6f0ef99ab 更新 version.py 2026-04-29 19:02:35 +08:00
jxxghp
af35101774 fix: default to text replies for voice input 2026-04-29 18:54:58 +08:00
jxxghp
9ed5018cc2 refactor: clarify attachment data url handling 2026-04-29 18:51:39 +08:00
jxxghp
7299733960 调整语音文件大小限制,超出 10MB 时禁止识别 2026-04-29 18:41:54 +08:00
jxxghp
bd5c3d848c 修复 _resolve_provider_name 方法递归调用问题,改为静态方法并标准化 provider 名称解析逻辑 2026-04-29 18:41:24 +08:00
jxxghp
38c48fa4ce 优化 OpenAIVoiceProvider 逻辑,简化凭证与 provider 解析方法并调整最大转录文件大小限制 2026-04-29 18:32:12 +08:00
jxxghp
b7749c44fd 重构语音能力配置与逻辑,统一音频输入输出开关并优化语音回复判断 2026-04-29 18:15:34 +08:00
jxxghp
e4a7333b79 调整 LLM_TEMPERATURE 配置参数默认值为 0.3 2026-04-29 16:54:14 +08:00
jxxghp
4b27b7bc42 重构 UgreenCrypto 模块路径至 app.modules.ugreen 并更新相关引用 2026-04-29 16:29:17 +08:00
jxxghp
c91e87115a 调整 System Core Prompt.txt,将核心能力说明移至更显著位置并优化结构 2026-04-29 16:25:16 +08:00
jxxghp
4a3cc5ee18 新增多个人设说明文档并完善测试用例 2026-04-29 16:20:44 +08:00
jxxghp
54d6c2ad4a 更新 __init__.py 2026-04-29 15:27:37 +08:00
jxxghp
090dcacd30 更新 README.md 2026-04-29 14:38:35 +08:00
jxxghp
344280cd61 Refactor agent persona runtime layering 2026-04-29 14:12:47 +08:00
jxxghp
2c7fb5786c 更新 _plugin_tool_utils.py 2026-04-29 09:16:02 +08:00
jxxghp
6b9790026c refine plugin agent tool responsibilities 2026-04-29 08:50:48 +08:00
jxxghp
6c70531967 Refactor _query_plugin_data to static async method 2026-04-29 08:31:38 +08:00
jxxghp
bcc321eb70 add plugin agent management tools 2026-04-29 08:29:04 +08:00
MseeP.ai
2ff1cd1045 Add MseeP.ai badge to README.md 2026-04-29 08:02:47 +08:00
jxxghp
7fc496cf5b 更新 __init__.py 2026-04-29 07:31:52 +08:00
jxxghp
8789f35228 Improve non-verbose agent tool summaries 2026-04-29 07:07:33 +08:00
jxxghp
d4dec90e2f 更新 version.py 2026-04-28 20:49:05 +08:00
jxxghp
5c1487a9a6 Optimize agent tool async blocking paths 2026-04-28 20:36:49 +08:00
jxxghp
c5b716c231 feat: introduce unified agent runtime config and system task prompt framework
- Add structured runtime config files (AGENT_PROFILE.md, AGENT_WORKFLOW.md, AGENT_HOOKS.md, USER_PREFERENCES.md, SYSTEM_TASKS.md, CURRENT_PERSONA.md) for persona, workflow, hooks, and system tasks
- Implement agent_runtime_manager to load, validate, and render runtime config and system task prompts
- Refactor agent initialization to use runtime-managed directories for skills, jobs, memory, and activity logs
- Add AgentHooksMiddleware for structured pre/in/post hooks injection
- Replace hardcoded system task prompts with template-driven rendering from SYSTEM_TASKS.md
- Update tests to cover runtime config loading, migration, and system task prompt rendering
- Update .gitignore to exclude config/agent/
2026-04-28 13:04:28 +08:00
jxxghp
483fe55372 fix: correct Plex notification image lookup
Closes #5700
2026-04-28 09:19:18 +08:00
jxxghp
5d588ee127 fix: correct traditional Chinese subtitle rename detection
Fixes #5703
2026-04-28 09:00:14 +08:00
jxxghp
afcd895f52 fix: backfill transfer download history matching
Fixes #5702
2026-04-28 08:55:40 +08:00
jxxghp
1ded58adbb fix: adapt audiences user data parser 2026-04-27 12:56:45 +08:00
jxxghp
019a077407 Apply suggestions from code review
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-04-27 11:35:44 +08:00
PKC278
0f190057d3 fix #5528 2026-04-27 11:35:44 +08:00
jxxghp
840c8f7298 更新 APP_VERSION 至 v2.10.6 2026-04-27 11:32:39 +08:00
jxxghp
6a6bcf59a0 增强 execute_command 工具:支持输出截断、并发限制与进程组清理,新增单元测试 2026-04-27 10:05:25 +08:00
jxxghp
323844b26d revert execute_command streaming changes
Restore the previous subprocess handling for execute_command and drop the new command streaming test so agent startup is unblocked.
2026-04-27 08:12:37 +08:00
jxxghp
140d224a9a fix agent stream blocking during command execution
Offload synchronous message edits from the event loop and stream subprocess output so long-running commands stay responsive.
2026-04-27 07:57:32 +08:00
jxxghp
7bc032d17c Revert Telegram duplicate edit fix 2026-04-27 07:36:13 +08:00
jxxghp
2df476dbff Fix Telegram duplicate message edits 2026-04-27 07:17:58 +08:00
jxxghp
bae086d8b8 更新 __init__.py 2026-04-27 06:57:18 +08:00
jxxghp
221eb21694 refine internal middleware llm usage for streaming agents
Use a non-streaming model for middleware-only calls so internal outputs do not leak into user streams and model-based middleware stays consistent.
2026-04-27 06:55:41 +08:00
jxxghp
4208c79d72 refine tool提示语为更简洁风格,补充last_buffer_char属性及非VERBOSE模式流式输出换行逻辑,新增工具流式分隔符单元测试 2026-04-26 11:15:11 +08:00
jxxghp
90245a13e1 refine non-verbose prompt wording 2026-04-26 08:54:07 +08:00
jxxghp
b5979b9b09 refine agent subscription defaults and silent tool prompts 2026-04-26 08:51:56 +08:00
jxxghp
0277288a41 feat: add agent session usage status reporting
Track per-session model and token usage so users can inspect context pressure and cumulative usage with /session_status.
2026-04-26 08:19:05 +08:00
jxxghp
79bfeaf2af 移除工具调用前的流重置,保留模型思考文本可见 2026-04-25 23:12:34 +08:00
jxxghp
4fe41ba5e9 更新 base.py 2026-04-25 22:16:15 +08:00
jxxghp
14d6e2febc Refine agent prompts for concise professional replies 2026-04-25 22:04:35 +08:00
jxxghp
97c7e71207 更新 Agent Prompt.txt 2026-04-25 21:51:47 +08:00
jxxghp
8f29a218ea chore: bump version to v2.10.5 2026-04-25 12:55:33 +08:00
jxxghp
4fd5aa3eb6 fix: improve DeepSeek reasoning_content payload handling and update langchain dependencies 2026-04-25 12:46:21 +08:00
jxxghp
bfc27d151c 更新 ask_user_choice.py 2026-04-25 11:36:36 +08:00
jxxghp
f2b56b8f40 更新 ask_user_choice.py 2026-04-25 11:35:32 +08:00
jxxghp
a05ffc07d4 refactor: remove legacy LLM_DISABLE_THINKING and LLM_REASONING_EFFORT config, unify thinking_level handling
- Eliminate support for LLM_DISABLE_THINKING and LLM_REASONING_EFFORT in config, code, and tests
- Simplify LLM thinking level logic to rely solely on LLM_THINKING_LEVEL
- Refactor LLMHelper and related endpoints to remove legacy parameter handling
- Update system API and test utilities to match new configuration structure
- Minor code cleanup and formatting improvements
2026-04-25 10:42:03 +08:00
jxxghp
4a81417fb7 fix: preserve deepseek reasoning content in tool loops 2026-04-25 09:37:01 +08:00
jxxghp
c7fa3dc863 feat: unify llm thinking level controls 2026-04-24 19:50:23 +08:00
jxxghp
28f9756dd6 feat: improve skill instructions with highlighted command formatting 2026-04-22 18:12:21 +08:00
jxxghp
4bffe2cff1 chore: bump version to v2.10.4 2026-04-22 18:02:28 +08:00
jxxghp
fca478f1d8 feat: support custom skill sources in /skills 2026-04-22 18:00:57 +08:00
Sebastian
097dff13a3 feat: add ai-compatible API endpoints 2026-04-22 17:21:43 +08:00
jxxghp
460b386004 feat: add searchable skills marketplace 2026-04-22 16:49:42 +08:00
jxxghp
89bf89c02d feat: add clawhub skill registry source 2026-04-22 16:22:10 +08:00
jxxghp
cefb60ba2c refactor: unify message interactions 2026-04-22 15:18:04 +08:00
jxxghp
8c78627647 feat: add skills marketplace management 2026-04-22 14:55:00 +08:00
jxxghp
51189210c2 更新 config.py 2026-04-22 10:39:25 +08:00
jxxghp
38933d5882 feat(agent): support disabling model thinking 2026-04-22 10:36:36 +08:00
jxxghp
4619fc4042 更新 version.py 2026-04-21 22:25:57 +08:00
jxxghp
ee7ba28235 Allow LLM test to use request payload 2026-04-21 22:14:19 +08:00
笨笨
409abb66be test: remove absolute path from llm helper test 2026-04-21 20:39:32 +08:00
笨笨
8aa8b1897b feat: add llm test endpoint 2026-04-21 20:39:32 +08:00
jxxghp
8c256d91bd refine custom identifier skill scope 2026-04-21 17:31:37 +08:00
jxxghp
d1d3fc7f30 更新 media.py 2026-04-21 14:38:16 +08:00
jxxghp
ae15eac0f8 feat: normalize internal system user ID in notification dispatch
- Add SYSTEM_INTERNAL_USER_ID constant and helpers to app.utils.identity
- Ensure internal user ID is normalized to None before dispatching notifications, preventing misrouting to external channels
- Refactor MessageChain to use normalization for all message dispatch methods
- Add tests for internal user ID normalization and notification dispatch behavior
2026-04-21 14:32:14 +08:00
jxxghp
1282ad5004 feat: improve local CLI startup management 2026-04-21 11:26:56 +08:00
笨笨
6f6fcc79f2 fix: serialize rclone folder creation during concurrent transfers 2026-04-20 21:34:35 +08:00
jxxghp
e5c64e73b5 docs: add English README 2026-04-20 19:46:34 +08:00
jxxghp
93a19b467b Add uninstall workflow to local CLI 2026-04-20 13:38:06 +08:00
jxxghp
4ba8d42272 fix #5688 2026-04-19 17:29:07 +08:00
jxxghp
32e247b4d5 更新 version.py 2026-04-19 15:44:22 +08:00
InfinityPacer
1d0d09c909 fix(plugin): merge local repo sources during sync 2026-04-19 07:07:00 +08:00
InfinityPacer
b7ee6ca8c4 fix(plugin): sanitize local repo path telemetry 2026-04-19 07:07:00 +08:00
InfinityPacer
4a4d93e7f9 refactor(plugin): expose plugin list processing helper 2026-04-19 07:07:00 +08:00
InfinityPacer
7b096c0a09 feat(plugin): encode local repo path in source url 2026-04-19 07:07:00 +08:00
InfinityPacer
3a93efb082 refactor(plugin): centralize local install dispatch 2026-04-19 07:07:00 +08:00
InfinityPacer
73cdd297b1 refactor(plugin): align local repo naming 2026-04-19 07:07:00 +08:00
InfinityPacer
83187ea17d refactor(plugin): rename local repo paths setting 2026-04-19 07:07:00 +08:00
InfinityPacer
6d8eed30ce fix(plugin): reload monitor on local path changes 2026-04-19 07:07:00 +08:00
InfinityPacer
6fa48afa34 feat(plugin): support local plugin sources 2026-04-19 07:07:00 +08:00
jxxghp
115fb40772 Allow known nettest redirects 2026-04-18 18:27:03 +08:00
jxxghp
10b0dbb5d3 Add nettest documentation comments 2026-04-18 17:52:01 +08:00
jxxghp
4c32ad902b Harden system nettest SSRF handling 2026-04-18 17:43:38 +08:00
jxxghp
787db8f5ac fix: 修复子进程环境下获取事件循环失败的问题 2026-04-17 13:02:28 +08:00
jxxghp
df1b2067b6 fix: 修正 docker 和 update.sh 中 python_version 的格式以匹配 sites.cpython-* 命名规则 2026-04-17 11:05:26 +08:00
jxxghp
f3d9f25d02 优化资源包下载逻辑,只下载对应操作系统和Python版本的sites文件 2026-04-17 08:37:50 +08:00
jxxghp
eea7e3b55f feat(cli): optimize installation command and support initializing user password 2026-04-16 23:43:20 +08:00
jxxghp
810cb0a203 relax local install python requirement to 3.11 2026-04-16 23:13:45 +08:00
jxxghp
e0e21e39a2 refactor: generalize agent interaction requests 2026-04-16 22:51:51 +08:00
jxxghp
cc31c66b93 feat: add agent button choice workflow 2026-04-16 22:32:59 +08:00
jxxghp
011535fbc3 feat: add retry actions for failed transfers 2026-04-16 22:07:21 +08:00
jxxghp
77b95d11fb bump version to v2.10.1 2026-04-16 19:55:35 +08:00
jxxghp
89f6164eba automate local bootstrap prerequisites 2026-04-16 19:47:56 +08:00
jxxghp
70350aa39f fix local update dirty check 2026-04-16 19:36:55 +08:00
jxxghp
61a0a66c47 support local restart and site auth wizard 2026-04-16 19:21:00 +08:00
jxxghp
6fcc5c84a6 bump version to v2.10.0 2026-04-16 17:14:30 +08:00
jxxghp
5995b3f3e8 extend setup wizard for database and agent 2026-04-16 17:10:25 +08:00
jxxghp
60996be71b fix local db initialization model registration 2026-04-16 17:05:57 +08:00
jxxghp
49b50e5975 run setup config step inside venv 2026-04-16 17:00:49 +08:00
jxxghp
262bd6808b update reused bootstrap repo before setup 2026-04-16 16:51:44 +08:00
jxxghp
e9c8db9950 fix bootstrap script for macos bash 2026-04-16 16:43:21 +08:00
jxxghp
02a98f832f fix local cli install and config workflow 2026-04-16 14:55:31 +08:00
jxxghp
9a2a241a30 add full-stack local cli install flow 2026-04-16 09:52:15 +08:00
jxxghp
04c2a1eb18 Add manual AI redo flow 2026-04-15 17:10:18 +08:00
jxxghp
65a4b7438c 更新 config.py 2026-04-15 09:02:05 +08:00
jxxghp
13c3c082b8 Improve agent image capability routing 2026-04-15 08:55:32 +08:00
jxxghp
bf127d6a70 更新 version.py 2026-04-14 18:10:22 +08:00
jxxghp
117672384c 更新 llm.py 2026-04-14 16:00:44 +08:00
jxxghp
2ae2ea8ef7 feat: expose AI agent flag in user global settings 2026-04-14 15:50:46 +08:00
jxxghp
7a5e513f25 feat(agent): support file attachments and local file replies 2026-04-14 15:22:01 +08:00
InfinityPacer
81828948dd fix(transfer): tighten queue cleanup edge cases 2026-04-14 14:45:18 +08:00
InfinityPacer
eda73e14f7 refactor(transfer): make queue job migration explicit 2026-04-14 14:45:18 +08:00
InfinityPacer
6aec326d05 fix(transfer): fail stale queue tasks on errors 2026-04-14 14:45:18 +08:00
InfinityPacer
d36dd69ec3 fix(transfer): clean migrated queue jobs 2026-04-14 14:45:18 +08:00
ilvsx
1688063450 fix(subtitle): create missing download root before saving subtitles 2026-04-14 12:24:18 +08:00
InfinityPacer
ae5207f0e4 fix(plugin): handle 404 plugin index and None response safely 2026-04-13 18:34:44 +08:00
jxxghp
f1f4743936 fix #5661 插件package文件不存在时不报错 2026-04-13 09:06:45 +08:00
jxxghp
e09f9ad009 feat(agent): add audio message extraction and download support for Slack, QQ, Discord, SynologyChat, and VoceChat 2026-04-13 08:36:57 +08:00
InfinityPacer
8d938c2273 fix(system): expose backend dev flag only in dev mode 2026-04-13 06:54:33 +08:00
jxxghp
e5f97cd299 feat(agent): add voice message support with TTS/STT for Telegram and WeChat
- Integrate voice message handling: detect and extract audio references from Telegram and WeChat messages, route to agent with voice reply preference.
- Add voice provider abstraction and OpenAI-based TTS/STT implementation.
- Implement agent tool `send_voice_message` for generating and sending voice replies, with fallback to text if voice is unavailable.
- Extend agent prompt and context to support voice reply instructions.
- Update notification and message schemas to support audio fields.
- Add Telegram and WeChat voice sending logic, including audio file conversion and temporary media upload for WeChat.
- Add tests for voice helper and agent voice routing.
2026-04-12 12:30:02 +08:00
jxxghp
9dababbcfd 更新 version.py 2026-04-12 10:27:01 +08:00
jxxghp
9d8bd5044b 更新 version.py 2026-04-12 08:46:09 +08:00
InfinityPacer
5d07381111 chore(subscribe): update last_update when refreshing episode totals 2026-04-11 22:58:24 +08:00
InfinityPacer
61c695b77d fix(subscribe): reset tv episode counts in history response 2026-04-11 22:58:24 +08:00
InfinityPacer
1ceb8891b0 fix(subscribe): refresh total episodes before completion check 2026-04-11 22:58:24 +08:00
jxxghp
2f53fd3108 Expand image and edit support across messaging channels 2026-04-11 22:10:54 +08:00
jxxghp
bf2d2cbd03 Fix Telegram agent image download path 2026-04-11 21:11:03 +08:00
jxxghp
cb323653b8 Add tracing logs for agent image message flow 2026-04-11 20:58:20 +08:00
jxxghp
edf3946558 Fix forwarded image payload parsing for agent channels 2026-04-11 20:55:14 +08:00
jxxghp
6c5fae56d9 Add agent image support for Telegram and Slack 2026-04-11 20:40:02 +08:00
DDSRem
a4f2c574b0 fix(telegram): pass disable_web_page_preview through edit_message_text
Interactive plugin flows edit existing messages; the flag was only applied
on send_message, so link previews stayed enabled after edits.

Co-authored-by: Cursor Agent <cursoragent@cursor.com>
2026-04-11 08:31:15 +08:00
InfinityPacer
815d83bfb3 fix(http): close helper responses consistently 2026-04-10 18:21:30 +08:00
InfinityPacer
df3294c9d2 fix(http): require 200 for share reporting requests 2026-04-10 18:21:30 +08:00
InfinityPacer
1af5f02832 fix(http): use explicit success checks in async callers 2026-04-10 18:21:30 +08:00
InfinityPacer
217fcfd1b2 fix(http): close non-success responses safely 2026-04-10 18:21:30 +08:00
jxxghp
80825584ac 更新 version.py 2026-04-10 17:02:45 +08:00
jxxghp
10543eedd0 feat(search): 支持渐进式(SSE)搜索资源并实时返回搜索进度与结果
- 新增 /media/{mediaid}/stream 和 /title/stream 接口,支持基于 SSE 的渐进式搜索
- SearchChain 增加 async_search_by_title_stream、async_search_by_id_stream、async_process_stream、__async_search_all_sites_stream 方法
- 搜索结果按站点完成顺序实时推送,支持进度、候选、过滤、完成等阶段事件
- 优化参数解析与异常处理,提升大规模搜索体验
2026-04-10 16:50:23 +08:00
jxxghp
bf12a8679d refactor: 移除 agent 批量重试逻辑中的多余 try 块并优化缩进 2026-04-10 15:03:49 +08:00
InfinityPacer
8cd12ab584 fix(plugin): avoid caching failed plugin index responses 2026-04-10 14:34:00 +08:00
InfinityPacer
351de8b4da feat(plugin): reuse plugin wheels in batch dependency install 2026-04-10 13:32:30 +08:00
jxxghp
75fca971d4 refactor(agent): 重命名 can_edit_message 为 is_auto_flushing 更贴切语义 2026-04-09 23:29:29 +08:00
jxxghp
22f3244bf5 fix(agent): 流式+啰嗦模式下渠道不支持编辑时立即发送工具消息
渠道不支持编辑时没有定时刷新任务,emit 到 buffer 的内容不会被推送。
新增 can_edit_message 属性区分两种模式:支持编辑的继续 emit 到 buffer,
不支持编辑的 take 出 agent 文字与工具消息合并独立发送。
2026-04-09 23:26:39 +08:00
jxxghp
aafc4b3a39 fix(agent): start_streaming 始终标记流式状态以支持 buffer 收集
渠道不支持消息编辑时,仍需标记 streaming_enabled 为 True,
以便啰嗦模式下工具调用时能通过 is_streaming 进入流式分支
发送 agent 中间文字和工具消息。只是不启动定时刷新任务。
2026-04-09 23:19:34 +08:00
jxxghp
18906e5ab2 更新 __init__.py 2026-04-09 22:51:37 +08:00
jxxghp
9675d199f9 fix(agent): 非流式模式下不发送任何工具中间消息 2026-04-09 22:48:03 +08:00
jxxghp
78e8faa203 fix(agent): 非流式模式下啰嗦模式仍需发送工具调用中间消息
啰嗦模式+渠道不支持编辑时,虽然 is_streaming 为 False,
但 astream 仍会将 token 写入 buffer,需要在工具调用时
取出 agent 文字与工具消息合并发送
2026-04-09 22:23:00 +08:00
jxxghp
d5ed9bc654 fix(agent): 简化非流式模式下工具调用的消息处理逻辑
非流式模式下使用 ainvoke 执行,无流式 token 产出,
不需要操作 stream_handler 或发送中间消息
2026-04-09 22:20:09 +08:00
jxxghp
770065d9ed feat(agent): 优化Agent流式输出与工具消息发送逻辑
- 新增 _should_stream() 方法,根据运行环境决定是否启用流式输出:
  后台模式不启用;渠道支持编辑启用;啰嗦模式开启时也启用
- 非流式模式下使用非流式LLM + ainvoke,避免不必要的流式开销
- 非啰嗦模式下工具调用时不发送任何中间消息(agent文字和工具提示),直接清掉缓冲区
2026-04-09 22:12:20 +08:00
jxxghp
abc4154e2c 更新 version.py 2026-04-09 13:46:34 +08:00
DDSRem
fd6c9d5d34 feat(plugin): 聚合插件侧栏导航
- PluginManager.get_plugin_sidebar_nav:已启用 Vue 插件且实现 get_sidebar_nav
- schemas.PluginSidebarNavItem 与 verify_token 鉴权接口
2026-04-09 08:03:30 +08:00
jxxghp
dc428e7de0 feat(skills): 内置技能支持版本号管理,更新时自动覆盖旧版本
SKILL.md frontmatter新增version字段,同步时比较版本号,
内置版本更高时直接覆盖用户目录中的旧版本。
2026-04-09 07:17:04 +08:00
jxxghp
0c51d79be7 feat(agent): 合并同批次整理失败的agent重试调用,避免重复浪费token
同一download_hash或同一源目录下的失败记录在5分钟缓冲期内合并为一次agent调用,
批量处理时只识别一次媒体信息后复用到所有文件。
2026-04-09 07:16:56 +08:00
DDSRem
1b489ba581 feat(transfer): TransferOverwriteCheck 支持插件提供源文件真实大小
strm → strm 整理场景下,源 .strm 的 fileitem.size 同样不准,
size 模式比较仍会失效,新增 source_size 输出字段允许插件同时
覆盖源/目标的真实媒体大小。

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-04-08 17:28:24 +08:00
DDSRem
4d9f17b083 feat(transfer): 新增 TransferOverwriteCheck 事件支持插件介入覆盖判断
允许插件在覆盖模式判断前提供目标文件的真实大小或直接给出覆盖决策,
解决 .strm 等本地大小不准的场景下 size 模式失效的问题。

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-04-08 17:28:24 +08:00
jxxghp
3c7cd2186f 查询订阅历史工具有名称过滤时不分页直接返回所有匹配结果 2026-04-08 07:58:54 +08:00
jxxghp
5acfd683b9 agent工具支持翻页及取消数量限制 2026-04-08 07:41:34 +08:00
jxxghp
6b01901a4a 更新 search_web.py 2026-04-08 07:29:30 +08:00
jxxghp
1ca54afd6c 更新 search_person.py 2026-04-08 07:27:29 +08:00
jxxghp
9c75c2d22e 更新 search_media.py 2026-04-08 07:26:54 +08:00
jxxghp
79ec3ed2c3 更新 list_directory.py 2026-04-08 07:21:37 +08:00
jxxghp
7072d2cfe8 更新 query_installed_plugins.py 2026-04-08 07:15:13 +08:00
jxxghp
c0c08b0b84 更新 query_subscribe_history.py 2026-04-08 07:12:39 +08:00
jxxghp
01329195ee 更新 query_subscribes.py 2026-04-08 07:11:45 +08:00
jxxghp
ad40b99313 更新 version.py 2026-04-07 13:24:13 +08:00
jxxghp
1e338e48ab fix(agent): 基于langgraph_step过滤中间步骤思考文本,抽离ThinkTagStripper类
- 利用metadata中的langgraph_step检测工具调用前的中间步骤,非VERBOSE模式下
  自动reset清除模型输出的计划/推理文本(如NEXT STEPS、tool call描述等)
- 将<think>标签流式剥离逻辑抽离为独立的_ThinkTagStripper类,简化主流程
2026-04-07 12:42:46 +08:00
jxxghp
ac9c9598f4 feat(agent): add tools for querying and updating custom identifiers 2026-04-07 09:00:15 +08:00
jxxghp
02cb5dfc31 refactor(agent): optimize database-operation skill to read DB info from system prompt <system_info> 2026-04-07 07:37:39 +08:00
jxxghp
8109ffb445 feat(agent): add /stop_agent command for emergency stop of agent reasoning
Add /stop_agent command that cancels the currently running agent reasoning
task without clearing the session or memory. Unlike /clear_session which
destroys the entire session, this allows users to stop a long-running or
stuck agent process and continue the conversation afterward.
2026-04-07 07:32:35 +08:00
jxxghp
0ecbcb89fa 更新 SKILL.md 2026-04-07 07:16:18 +08:00
jxxghp
8f38c06424 feat(agent): add database-operation skill for SQL access with auto SQLite/PostgreSQL detection 2026-04-07 00:43:28 +08:00
jxxghp
902394f86e fix(agent): resolve circular import by lazy-importing Command in run_slash_command and list_slash_commands 2026-04-07 00:16:09 +08:00
jxxghp
9fefd807f9 refactor(agent): rename list_all_commands to list_slash_commands and skill to command-dispatch 2026-04-07 00:00:10 +08:00
jxxghp
a8fb4a6d84 refactor(agent): rename run_plugin_command to run_slash_command to avoid confusion with execute_command (shell) 2026-04-06 23:53:49 +08:00
jxxghp
7806267e92 feat(agent): add command-execute skill for intelligent command dispatch
- Enhance run_plugin_command tool to support all registered commands (system preset + plugin + other), not just plugin commands
- Add list_all_commands tool to discover all available commands with descriptions and categories
- Add command-execute skill that guides the agent to recognize user intent from natural language and match it to available system/plugin commands
2026-04-06 23:45:48 +08:00
Attente
eb5e17a115 test: 补充媒体刮削路径、图片与事件流程测试 2026-04-06 11:28:00 +08:00
Attente
2ae98d628d feat(subscribe): 优化洗版订阅合集的识别 2026-04-05 21:39:44 +08:00
EkkoG
8b9dc0e77f 修复 QQbot渠道依旧会重复发送消息问题 2026-04-05 15:42:46 +08:00
Attente
2f151cea64 fix(helper): 统一redis缓存键 2026-04-05 13:55:54 +08:00
jxxghp
b777e8cab1 删除 .DS_Store 2026-04-04 14:06:05 +08:00
jxxghp
663e37bd03 refactor: SendMessageTool message_type 改为消息标题 2026-04-04 07:42:36 +08:00
jxxghp
8960620883 更新 __init__.py 2026-04-04 07:29:41 +08:00
jxxghp
5b892b3a63 fix: 修复Gemini 2.5思考模型工具调用时thought_signature缺失导致400错误
- Google provider统一使用ChatGoogleGenerativeAI原生接口,不再走OpenAI兼容端点
  (OpenAI协议不支持thought_signature字段,导致思考模型工具调用必然失败)
- 通过client_args传递代理配置,替代原来的OpenAI兼容端点+openai_proxy方案
- 修补langchain-google-genai的_is_gemini_3_or_later()以覆盖Gemini 2.5模型
- 自动适配httpx代理参数名(proxies/proxy),修复代理配置被静默丢弃的问题
2026-04-04 07:24:47 +08:00
jxxghp
974d5f2f49 fix: 修复获取Google模型列表阻塞事件循环及缺少代理配置的问题 2026-04-04 06:58:39 +08:00
DDSRem
f70881bb4f feat: TransferRename 事件增加 source_item 源文件信息 2026-04-03 17:51:05 +08:00
jxxghp
376c65335f 更新 version.py 2026-04-03 13:49:38 +08:00
jxxghp
d7a5c32b08 feat: 整理失败时AI智能体自动重试
- 新增 delete_transfer_history 工具供智能体删除失败历史记录
- 新增 transfer-failed-retry 技能引导智能体执行重试流程
- 新增 AI_AGENT_RETRY_TRANSFER 配置项控制是否启用
- AgentManager 新增 retry_failed_transfer() 方法创建独立会话执行重试
- 整理失败和媒体未识别时自动触发智能体重试
2026-04-03 13:33:27 +08:00
jxxghp
4cda182ccd fix: change logger warning to debug for empty Discord configs 2026-04-03 12:50:14 +08:00
DDSRem
60ac901c6c feat: TransferRename 事件增加 source_path 源文件路径参数
在智能重命名事件中传递源文件路径,便于插件在重命名时获取待整理文件的原始路径信息。

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-04-03 06:55:06 +08:00
DDSRem
388afa8d3c fix(meta): 修复首括号被误删导致标题识别错误
首括号包含完整发布名(如 [Movie.Name.2023.1080p.BluRay-GROUP])时,
保留内容去掉括号而非整体移除;同时修复 _name_movie_words 和
_name_se_words 列表误用为正则表达式的问题

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-04-03 06:54:11 +08:00
jxxghp
ec0915e488 fix: 智能体唤醒后消息发送问题 2026-04-02 19:26:17 +08:00
jxxghp
244112be5c fix: 智能体唤醒后消息发送问题 2026-04-02 19:23:40 +08:00
jxxghp
1f526adbe7 feat: add NotificationType for Agent messages 2026-04-02 19:13:05 +08:00
jxxghp
c4cfd70f7c 更新 version.py 2026-04-02 16:54:50 +08:00
DDSRem
c9149d1761 fix(system): 补充 fuse 挂载关键词
fix https://github.com/jxxghp/MoviePilot/issues/5624
2026-04-02 08:53:28 +08:00
DDSRem
c68450fc7f refactor(telegram): 显式传递 disable_web_page_preview 参数避免 @retry 下修改 kwargs
将 disable_web_page_preview 从修改 kwargs 字典改为显式传参给 send_message,
避免在 @retry 重试时因共享 kwargs 字典导致潜在问题。

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-04-02 08:53:10 +08:00
DDSRem
d9eb3295b0 fix(telegram): 修复 disable_web_page_preview 传递给不支持的方法及 UTF-16 偏移量问题
1. disable_web_page_preview 仅在 send_message 时传入,避免 send_photo/send_document 抛出 TypeError
2. _embed_entity_links 中将 Telegram UTF-16 编码单位偏移量转换为 Python 字符偏移量,修复含 emoji 等非 BMP 字符时切片错误

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-04-02 08:53:10 +08:00
DDSRem
5440dbae51 feat(telegram): 支持 disable_web_page_preview 禁用链接预览
Notification schema 新增 disable_web_page_preview 字段,透传至 Telegram send_message,
插件可通过 post_message(disable_web_page_preview=True) 关闭链接预览,
不传时行为与旧版一致,完全向后兼容。

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-04-02 08:53:10 +08:00
DDSRem
321bf94de8 fix(telegram): 转发频道消息无法接收及内容丢失
message_handler 默认只处理 text 类型,转发的媒体消息(视频、图片等)被忽略;
解析器未读取 caption 字段导致媒体消息文字丢失;
新增提取 text_link 实体 URL 和 reply_markup URL 按钮信息到文本中。

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-04-02 08:53:10 +08:00
jxxghp
84b938c0d2 fix: 后台模式不发送工具调用消息 2026-03-31 18:25:55 +08:00
jxxghp
fc47382938 docs: 新增SKILL.md,补充MoviePilot重启与升级操作说明 2026-03-30 18:14:49 +08:00
jxxghp
2e034f7990 更新 version.py 2026-03-30 17:51:19 +08:00
jxxghp
e61299f748 refactor: 移除多余的空行,优化类型导入及方法为静态方法 2026-03-30 17:07:45 +08:00
jxxghp
cbff2fed17 agent工具增加管理员权限校验:查询站点、查询已安装插件、查询插件能力、查询站点用户数据、刮削元数据 2026-03-30 11:54:48 +08:00
jxxghp
9c51f73a72 feat(telegram): 优化Telegram文件下载与base64转换逻辑,重构消息发送相关代码
- 新增 TelegramModule.download_file_to_base64 方法,统一文件下载与base64编码
- Telegram 客户端新增 download_file 方法,简化文件下载流程
- 消息图片下载逻辑调整为通过模块方法调用,移除冗余静态方法
- 修复部分参数格式与空格风格,提升代码一致性
- 优化长消息发送异常处理与代码结构
2026-03-30 10:28:40 +08:00
jxxghp
70109635c7 feat(agent): 接入Exa API用于网络搜索 2026-03-30 07:11:29 +08:00
jxxghp
8999c3a855 去除集图片的-thumb后缀,使图片名称与视频文件名称一致 2026-03-29 12:06:42 +08:00
jxxghp
7bd775130e fix: 修复QQ渠道key映射 2026-03-29 10:48:23 +08:00
jxxghp
4bba7dbe76 fix: 修复QQ渠道名称为qq 2026-03-29 10:47:16 +08:00
jxxghp
0cab21b83c feat(agent): 为需要管理员权限的工具添加 require_admin 字段
- ExecuteCommandTool: 执行命令行
- DeleteDownloadHistoryTool: 删除下载历史
- EditFileTool: 编辑文件
- WriteFileTool: 写入文件
- TransferFileTool: 传输文件
- UpdateSiteTool: 更新站点
- UpdateSiteCookieTool: 更新站点Cookie
- UpdateSubscribeTool: 更新订阅
- DeleteSubscribeTool: 删除订阅
- DeleteDownloadTool: 删除下载
- ModifyDownloadTool: 修改下载
- RunSchedulerTool: 运行定时任务
- RunWorkflowTool: 运行工作流
- RunPluginCommandTool: 运行插件命令
- SendMessageTool: 发送消息
2026-03-29 10:46:35 +08:00
jxxghp
ca9cbc1160 fix(agent): 修复 MessageChannel.QQBot 不存在的错误 2026-03-29 10:38:52 +08:00
jxxghp
02439f55a9 feat(agent): 增加工具执行权限控制
- 工具执行前检查用户权限
- 支持渠道管理员名单验证
- 支持系统管理员验证
- 支持渠道配置用户ID验证
2026-03-29 10:30:09 +08:00
jxxghp
2d358e376c refactor: 移除多余的局部导入 2026-03-29 09:59:22 +08:00
jxxghp
b349aa2693 feat(agent): 支持图片消息处理 2026-03-29 09:56:53 +08:00
jxxghp
e3fee39043 agent提示词中注入PostgreSQL数据库密码 2026-03-29 09:07:46 +08:00
jxxghp
a1a72df6c6 feat(telegram): 保持正在输入状态直到消息发送完成 2026-03-29 09:04:42 +08:00
jxxghp
cdf40a7046 feat(agent): 添加PostgreSQL用户名到数据库信息 2026-03-29 08:05:40 +08:00
jxxghp
b9b19c9acc feat(agent): 添加数据库信息到系统提示词 2026-03-29 08:05:01 +08:00
jxxghp
8c603baa43 更新 version.py 2026-03-29 07:40:24 +08:00
jxxghp
a977948f2b 优化Agent提示词:日期改为当前时间,注入系统安装目录,强调简洁回复 2026-03-29 07:21:11 +08:00
jxxghp
f70eaf9363 feat(agent): 添加reset方法支持流式消息原地更新 2026-03-28 23:08:06 +08:00
jxxghp
bfea0174dd refactor: 优化工具消息发送逻辑 2026-03-28 22:38:20 +08:00
jxxghp
296d815e3e refactor: 移除异步pop操作 2026-03-28 12:42:26 +08:00
jxxghp
c3b7a50642 refactor(prompt): 优化MoviePilot系统信息注入,统一日期与环境信息展示 2026-03-28 12:28:14 +08:00
jxxghp
8e0a9f94f6 feat(agent): 在系统提示词中注入MoviePilot配置信息 2026-03-28 11:03:02 +08:00
jxxghp
6806900436 refactor: Update agent package initialization and imports. 2026-03-28 07:58:47 +08:00
jxxghp
a8ecdc8206 refactor: Invert AI agent verbose mode condition and strengthen silence instructions for tool calls. 2026-03-27 22:01:08 +08:00
jxxghp
60e1e3c173 Merge remote-tracking branch 'origin/v2' into v2 2026-03-27 21:55:19 +08:00
jxxghp
f859d99d91 fix current_date 2026-03-27 21:55:09 +08:00
jxxghp
31640b780c 更新 __init__.py 2026-03-27 21:50:19 +08:00
jxxghp
aaeb4d2634 fix verbose_spec 2026-03-27 21:45:50 +08:00
jxxghp
75d4c0153c v2.9.21 2026-03-27 21:05:04 +08:00
jxxghp
8d7ff2bd1d feat(agent): 新增AI_AGENT_VERBOSE开关,控制工具调用过程回复及提示词输出 2026-03-27 20:12:01 +08:00
jxxghp
c3e96ae73f 更新 version.py 2026-03-27 18:06:54 +08:00
developer-wlj
d8c86069f2 fix(agent): 解决内存文件读取编码问题
- 为文件读取操作明确指定 UTF-8 编码
- 防止因默认编码导致的字符读取错误
- 确保跨平台环境下的文件内容一致性
2026-03-27 11:52:07 +08:00
jxxghp
a25c709927 新增agent删除下载历史记录工具 2026-03-27 11:50:46 +08:00
jxxghp
d7c62fb55a feat(agent): 支持Slack和Discord渠道的流式输出功能
- 为Slack添加MESSAGE_EDITING能力
- 为Slack添加edit_message和send_direct_message方法
- 为Discord添加edit_message和send_direct_message方法
- 修改Discord send_msg返回(bool, message_id)元组以支持流式输出
2026-03-27 07:02:50 +08:00
jxxghp
27cc559c86 更新 memory.py 2026-03-26 22:33:03 +08:00
jxxghp
e7d14691df 优化记忆结构 2026-03-26 22:29:09 +08:00
jxxghp
20387a0085 更新 version.py 2026-03-26 17:31:30 +08:00
jxxghp
740b0a1396 fix 2026-03-26 12:42:54 +08:00
jxxghp
7d0c790185 fix: agent过滤模型思考/推理内容,不输出thinking到用户 2026-03-26 12:37:45 +08:00
jxxghp
a12147d0f5 style: 调整默认回复风格,简洁干练但保留适度的俏皮和emoji 2026-03-26 07:45:08 +08:00
jxxghp
213a298813 feat: 记忆为空时自动引导用户设置偏好;优化默认回复风格为简约直接 2026-03-26 07:30:18 +08:00
DDSRem
1acf78342c feat: tmdbid优先识别,同ID电影/电视剧通过元数据自动消歧
当名称中包含 {tmdbid=xxx} 时,优先使用tmdbid直接查询TMDB,不再回退到标题搜索。
当同一tmdbid同时存在电影和电视剧时,通过标题、年份、类型等元数据自动消歧。

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-26 06:45:17 +08:00
jxxghp
c85d3adb34 refactor: 活动日志摘要改用 LLM 总结替代文本截取 2026-03-26 03:48:16 +08:00
jxxghp
83bf59dd4d feat: 新增 ActivityLogMiddleware,自动记录每次交互的活动日志并注入系统提示词 2026-03-26 03:32:20 +08:00
jxxghp
d5d6442e1d feat: 新增 moviepilot-api 技能,支持全量 REST API 调用;技能中间件自动同步内置技能到用户目录 2026-03-26 03:10:30 +08:00
jxxghp
a1fa469026 feat: 新增插件相关agent工具(查询插件、查询插件能力、运行插件命令) 2026-03-26 02:45:03 +08:00
jxxghp
4b4b808b76 feat: 流式输出消息超长时自动分段发送,消息长度限制纳入渠道能力管理 2026-03-26 01:56:11 +08:00
jxxghp
a6f16dcf8f feat: 同一会话消息排队顺序处理,不同会话互不影响 2026-03-25 22:01:35 +08:00
jxxghp
c822782910 更新 version.py 2026-03-25 18:35:09 +08:00
jxxghp
e598d5edc4 fix: AI_AGENT_JOB_INTERVAL 默认为0 2026-03-25 18:28:44 +08:00
jxxghp
d38b6dfc0a fix: 优化心跳提示词,后台任务只生成执行结果摘要 2026-03-25 18:18:34 +08:00
jxxghp
0a4091d93c fix: 后台任务使用非流式执行,仅发送模型最后一条回复 2026-03-25 18:15:19 +08:00
jxxghp
0399ab73cf feat: 后台任务(定时唤醒)跳过流式输出,仅广播最终结果 2026-03-25 17:10:48 +08:00
jxxghp
940cececf4 fix: 修复 channel 为空时系统提示词 markdown_spec 占位符未替换导致 KeyError 2026-03-25 13:17:41 +08:00
jxxghp
94c75eb1c7 feat: 智能体增加定时任务(Jobs)管理和心跳唤醒机制
- 新增 JobsMiddleware 中间件,支持通过 JOB.md 文件管理长期/重复性任务
- 智能体可创建一次性(once)和重复性(recurring)任务,自动跟踪执行状态
- 新增心跳唤醒机制,定时调度器周期性唤醒智能体检查并执行待处理任务
- 新增 AI_AGENT_JOB_INTERVAL 配置项控制检查间隔,默认24小时
- 每次心跳使用独立会话,执行完毕后清理资源
2026-03-25 13:02:20 +08:00
DDSRem
de4dbf283b feat: 文件名为辅助中文标签时使用父目录标题识别
当文件名(stem)为纯中文压制/字幕辅助标签(如"简英双语特效")且父目录包含
拉丁片名时,清空文件元数据的标题信息,改由父目录标题合并填充,避免识别失败。

新增 infopath 模块集中管理辅助标签判断逻辑与关键词正则。
2026-03-25 09:27:52 +08:00
jxxghp
10807a6fb7 fix: build actions 2026-03-25 08:43:04 +08:00
jxxghp
04b8475761 ci: 优化发布脚本,自动生成分类更新日志 2026-03-25 07:12:17 +08:00
jxxghp
e6e50d7f0a fix: 修复Agent流式输出时回复消息未记录到数据库的问题 2026-03-25 07:01:17 +08:00
jxxghp
94ed065344 fix: 修复Agent技能元数据中Path对象无法msgpack序列化的问题 2026-03-24 23:48:45 +08:00
jxxghp
d94b5962b4 fix: 修复技能加载时误读目录导致 IsADirectoryError 的问题 2026-03-24 23:12:52 +08:00
jxxghp
dcca318733 feat: QuerySitesTool 增加返回 cookie 字段 2026-03-24 22:53:30 +08:00
jxxghp
4a789297fe 更新 version.py 2026-03-24 21:22:49 +08:00
jxxghp
1249929b6a feat: 新增Agent浏览器操作工具(browse_webpage),支持通过Playwright控制浏览器进行网页交互 2026-03-24 21:06:41 +08:00
jxxghp
864af45f85 Merge pull request #5616 from DDSRem-Dev/package-fix 2026-03-24 20:44:55 +08:00
DDSRem
bd68bcfd27 fix: await async config reload handlers
Restore awaiting of async on_config_changed callbacks in ConfigReloadMixin to ensure config reload logic executes correctly.

Made-with: Cursor
2026-03-24 20:44:26 +08:00
jxxghp
17373bc0fe fix: 优化Agent消息排版 2026-03-24 20:21:58 +08:00
DDSRem
4612d3cdde fix: unpin pip tooling in Docker build
Allow Docker builds to install the latest pip and pip-tools versions instead of constraining them, reducing maintenance overhead from version pin drift.

Made-with: Cursor
2026-03-24 19:59:32 +08:00
DDSRem
517300afe9 fix: clean typing issues and refresh runtime dependencies
Align endpoint/module type hints and config reload handling while updating base Python image and package pins to improve build/runtime compatibility.

Made-with: Cursor
2026-03-24 19:21:04 +08:00
jxxghp
3c7fdfec3c 更新 base.py 2026-03-24 19:14:34 +08:00
jxxghp
cfc8d26558 fix: 修复查询下载任务工具访问TransferTorrent不存在字段的问题 2026-03-24 18:53:08 +08:00
jxxghp
1c16b8bfec feat: 查询下载任务工具支持按标签过滤 2026-03-24 18:45:47 +08:00
jxxghp
aae50004b1 feat: 新增修改下载任务Agent工具,查询下载任务支持返回标签
- 新增 modify_download Agent工具,支持通过hash修改下载任务的标签、开始和暂停下载
- 在 ChainBase 及三个下载器模块中新增 set_torrents_tag 方法
- DownloadingTorrent schema 新增 tags 字段
- 各下载器模块构建 DownloadingTorrent 时填充 tags
- query_download_tasks 工具输出中新增 tags 字段
2026-03-24 18:33:06 +08:00
jxxghp
4fbd2a7612 Merge pull request #5615 from Adraca/fix-v2-4939 2026-03-24 18:10:42 +08:00
Abhishek Khaparde
cede1a1100 fix: reset Telegram API URL to default when cleared
Co-authored-by: aider (deepseek/deepseek-chat) <aider@aider.chat>
2026-03-24 14:33:17 +05:30
jxxghp
5d3511cbc2 更新 skills.py 2026-03-24 12:25:06 +08:00
jxxghp
a66e082a8c fix: change U115_APP_ID 2026-03-24 11:23:06 +08:00
jxxghp
2406438d1b docs: Add guidelines for creating new skills, including directory structure and SKILL.md format. 2026-03-24 09:20:02 +08:00
jxxghp
be42c78aca fix bug 2026-03-24 09:11:37 +08:00
jxxghp
78b8b30351 rollback aiopathlib 2026-03-24 09:06:44 +08:00
jxxghp
80e35fa938 feat(agent): support skills 2026-03-24 08:51:17 +08:00
jxxghp
e82494c444 feat(agent): support skills 2026-03-24 08:48:27 +08:00
jxxghp
309b7b8a77 feat: 新增 LLM_MAX_TOOLS 配置项,支持按需启用 LLMToolSelectorMiddleware 2026-03-23 23:45:32 +08:00
jxxghp
f2daa633b6 更新 version.py 2026-03-23 23:11:19 +08:00
jxxghp
630d13ac52 fix: 修复集缩略图文件名错误,episode-thumb-xx 改为 视频文件名-thumb.xx 2026-03-23 23:05:25 +08:00
jxxghp
40c79b249b 更新 __init__.py 2026-03-23 22:35:08 +08:00
jxxghp
6f4df912d8 更新 __init__.py 2026-03-23 22:33:18 +08:00
jxxghp
5744228a9d 更新 base.py 2026-03-23 22:31:27 +08:00
jxxghp
8c46ece44a Merge pull request #5612 from PKC278/v2 2026-03-23 22:21:32 +08:00
PKC278
4cbf1a886e fix: 移除AI智能体初始化中错误的await 2026-03-23 22:16:34 +08:00
jxxghp
17519d5a96 add TAVILY_API_KEYS 2026-03-23 22:13:44 +08:00
jxxghp
faa046eea4 更新 __init__.py 2026-03-23 21:31:46 +08:00
jxxghp
873e3832b6 更新 __init__.py 2026-03-23 20:18:05 +08:00
jxxghp
d4a15d3b53 更新 version.py 2026-03-23 20:02:31 +08:00
jxxghp
6ca6a94631 更新 base.py 2026-03-23 20:01:38 +08:00
jxxghp
61fced0df3 Merge pull request #5611 from wikrin/fix 2026-03-23 19:57:51 +08:00
jxxghp
b2f6ffddee Merge remote-tracking branch 'origin/v2' into v2 2026-03-23 19:52:34 +08:00
jxxghp
c85805b15d feat(agent): Telegram与Agent相互时支持流式输出 2026-03-23 19:52:26 +08:00
Attente
a0838ed9cd fix(media): 修复剧集单集图片刮削 2026-03-23 19:41:48 +08:00
jxxghp
63bbec5db4 更新 __init__.py 2026-03-23 19:18:25 +08:00
jxxghp
4bc67dc816 feat(agent): Telegram与Agent相互时支持流式输出 2026-03-23 19:13:51 +08:00
jxxghp
9620a06552 Merge branch 'v2' of https://github.com/jxxghp/MoviePilot into v2 2026-03-23 17:23:53 +08:00
jxxghp
9b00a5f3f1 refactor: Update agent stream processing to support 'v2' chunk format and prevent emitting empty content. 2026-03-23 17:23:44 +08:00
jxxghp
faa77be843 Merge pull request #5609 from PKC278/v2 2026-03-23 17:18:51 +08:00
PKC278
28f158c479 feat: 重构MediaChain初始化,调整存储链和刮削策略的加载顺序 2026-03-23 17:11:33 +08:00
PKC278
90c3afcfa4 feat: 优化SKILL.md 2026-03-23 15:23:34 +08:00
jxxghp
565e10b6a5 add LLMToolSelectorMiddleware 2026-03-23 08:16:19 +08:00
jxxghp
773ed5e6f7 Merge pull request #5604 from PKC278/v2 2026-03-23 06:57:48 +08:00
PKC278
8351312b2b feat: 优化SKILL.md
feat: 更新 MCP_HIDDEN_TOOLS 列表,对mcp客户端隐藏文件相关工具
2026-03-23 05:13:45 +08:00
Aqr-K
41f53d39a0 Fix agent memory (#5607) 2026-03-23 04:30:46 +08:00
Aqr-K
4873ffda84 fix: bug (#5605) 2026-03-23 03:34:55 +08:00
jxxghp
b79609bb8b Merge pull request #5603 from jxxghp/copilot/add-read-file-tool 2026-03-23 00:34:44 +08:00
copilot-swe-agent[bot]
bdcbb5cce6 feat: add read_file tool for agent with line range and 50KB size limit support
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
Agent-Logs-Url: https://github.com/jxxghp/MoviePilot/sessions/615dcf93-c017-4d3f-a96f-5cdad426b9a4
2026-03-22 16:32:19 +00:00
copilot-swe-agent[bot]
d1503f9df3 Initial plan 2026-03-22 16:29:07 +00:00
jxxghp
210c3234d2 更新 version.py 2026-03-22 23:45:37 +08:00
jxxghp
c13abfdd0d fix:Agent会话时间调整为1天,通过自动压缩控制上下文 2026-03-22 23:41:47 +08:00
jxxghp
30b332ac7e feat: Introduce MemoryMiddleware and PatchToolCallsMiddleware to the agent, and add EditFileTool and WriteFileTool for file manipulation. 2026-03-22 23:35:34 +08:00
jxxghp
7e9c489aeb feat: Enhance agent persona to be cute, playful, and anthropomorphic with increased emoji usage and updated Chinese examples. 2026-03-22 22:07:05 +08:00
jxxghp
5739ca7f97 fix agent bug 2026-03-22 22:02:54 +08:00
jxxghp
e4451c7e6a fix: qqbot wechatbot 模块循环依赖问题 2026-03-22 21:49:57 +08:00
jxxghp
5cded77387 feat(agent): upgrade langchain to v1.0+ 2026-03-22 21:41:12 +08:00
jxxghp
ea4e0dd764 feat(agent): upgrade langchain to v1.0+ 2026-03-22 21:07:45 +08:00
jxxghp
f105357f96 Merge pull request #5602 from PKC278/v2 2026-03-22 07:08:41 +08:00
PKC278
bc2302baeb feat: 优化query_library_exists和query_subscribes工具输出,优化SKILL.md
fix(add_download): 更新torrent_url和description字段的描述,移除错误的添加直链的功能
2026-03-22 01:47:38 +08:00
jxxghp
afcdefbbf3 Merge pull request #5597 from DDSRem-Dev/dev 2026-03-20 21:30:03 +08:00
jxxghp
3ad8557065 Merge pull request #5596 from Aqr-K/fix-restart 2026-03-20 21:29:45 +08:00
Aqr-K
e68d607c9b fix(docker): 优化容器启动、优雅退出和Nginx配置 2026-03-20 20:28:16 +08:00
DDSRem
8e9cf67190 fix(workflows): operations per run 2026-03-20 19:13:05 +08:00
jxxghp
0cb6cd8761 Merge pull request #5594 from PKC278/v2 2026-03-20 12:02:45 +08:00
PKC278
17aa795b3e feat: add_download工具支持添加多个 torrent_url,优化下载任务处理和反馈信息 2026-03-20 11:47:27 +08:00
jxxghp
7d47096e6e Merge pull request #5592 from wikrin/refactor/scraping-switch-to-policy 2026-03-19 22:25:24 +08:00
Attente
48b59df11b refactor(media): 引入配置化刮削策略 2026-03-19 21:25:57 +08:00
jxxghp
a90a3b2445 Merge pull request #5589 from PKC278/v2 2026-03-18 21:33:33 +08:00
PKC278
d18b68d24a feat: 为部分工具添加豆瓣ID支持 2026-03-18 19:36:32 +08:00
PKC278
78c4ec8bfe feat: 将部分逻辑移到后端,简化脚本 2026-03-18 17:48:28 +08:00
PKC278
b50a3b9aae feat: 工具输入输出统一为movie或tv 2026-03-18 17:07:03 +08:00
PKC278
4f3eaa12d5 feat: 更新下载工具和搜索结果工具的描述,添加可选展示过滤选项参数,优化SKILL.md 2026-03-18 16:30:31 +08:00
PKC278
cedb0f565c feat: 优化工具和SKILL.md 2026-03-18 14:27:07 +08:00
PKC278
226432ec7f feat(mcp): add torrent filter workflow and moviepilot cli skill 2026-03-17 17:22:33 +08:00
jxxghp
d93ab0143c Merge pull request #5583 from lclrc/fix 2026-03-16 19:30:46 +08:00
lclrc
3d32d66ab1 Fixes: FetchRssParams in workflow 2026-03-16 15:32:04 +08:00
jxxghp
e814eed047 更新 version.py 2026-03-15 10:09:17 +08:00
jxxghp
96395c1469 feat: 增强插件静态文件API安全性 2026-03-14 21:12:54 +08:00
jxxghp
6065c29891 Revert "Merge pull request #5573 from wikrin/refactor-static-methods-conversion"
This reverts commit b8fc20b981, reversing
changes made to e09cfc6704.
2026-03-14 18:21:31 +08:00
jxxghp
f38cb274e4 Revert "refactor(helper): 将LLMHelper StorageHelper的相关方法改为静态方法,移除实例调用"
This reverts commit 9f381b3c73.
2026-03-14 18:19:47 +08:00
jxxghp
7bfee87cbf Merge pull request #5577 from EkkoG/wechat_bot 2026-03-14 18:02:33 +08:00
jxxghp
2ce2a3754c Merge pull request #5576 from wikrin/refactor-static-methods 2026-03-14 17:59:48 +08:00
EkkoG
510476c214 feat(wechat): add WeChatBot class for intelligent bot integration and enhance WechatModule to support bot mode
- Introduced WeChatBot class for handling intelligent bot functionalities.
- Updated WechatModule to differentiate between traditional and bot modes using WECHAT_MODE configuration.
- Enhanced stop method in WechatModule to gracefully stop client instances.
- Added logic to skip traditional menu initialization for bot mode.
- Updated .gitignore to include .venv directory.
2026-03-14 16:17:39 +08:00
jxxghp
6cd071c84b Merge pull request #5575 from DDSRem-Dev/dev 2026-03-14 14:54:51 +08:00
DDSRem
406e17b3fa fix(docker): locale-gen zh_CN, set LD_PRELOAD in final stage only
fix 858da38680
2026-03-14 14:36:56 +08:00
jxxghp
dd184255ad Merge pull request #5574 from tejasae-afk/fix/set-a-timeout-on-alipan-http-calls 2026-03-14 13:25:09 +08:00
jxxghp
77a0b38081 更新 alipan.py
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2026-03-14 13:25:00 +08:00
Tejas Attarde
14c3d66ce6 perf(modules): set a timeout on alipan HTTP calls 2026-03-14 00:23:45 -04:00
jxxghp
858da38680 feat: Integrate jemalloc for improved memory allocation. 2026-03-14 12:00:12 +08:00
Attente
9f381b3c73 refactor(helper): 将LLMHelper StorageHelper的相关方法改为静态方法,移除实例调用 2026-03-14 10:29:37 +08:00
jxxghp
b8fc20b981 Merge pull request #5573 from wikrin/refactor-static-methods-conversion 2026-03-13 22:53:54 +08:00
Attente
b89825525a refactor(helper): 将DirectoryHelper、RuleHelper和TorrentHelper方法改为静态方法
- 移除不必要的实例化操作,直接使用类方法
2026-03-13 19:50:10 +08:00
jxxghp
e09cfc6704 Merge pull request #5570 from Seed680/v2 2026-03-12 21:54:06 +08:00
jxxghp
0c9c303c60 Merge pull request #5569 from YuF-9468/fix-5567-match-season-episodes-call 2026-03-12 21:53:46 +08:00
noone
3156b43739 bugfix(meta): 添加视频帧率信息解析支持
- 添加自定义格式中关于fps的内容
2026-03-12 14:51:23 +08:00
YuF-9468
591aa990a6 fix(search): call match_season_episodes via class to avoid bound-arg conflict 2026-03-12 09:27:31 +08:00
jxxghp
3be29f36a7 Merge pull request #5564 from DDSRem-Dev/dev 2026-03-11 15:24:46 +08:00
DDSRem
7638db4c3b fix(plugin): return remoteEntry path without API prefix to avoid double prefix 404
- get_plugin_remote_entry returns /plugin/file/... (relative to API root)
- Frontend already prepends API base; adding API_V1_STR caused /api/v1/api/v1/...

Made-with: Cursor
2026-03-11 15:12:40 +08:00
DDSRem
0312a500a6 refactor(plugin): replace deprecated pkg_resources with importlib.metadata
- Use distributions() in __get_installed_packages for installed packages
- Use packaging.requirements.Requirement, drop pkg_resources dependency
- __standardize_pkg_name: normalize dots to underscores (PEP-style)
- Keep max version when multiple distributions exist for same package

Made-with: Cursor
2026-03-11 14:53:15 +08:00
jxxghp
1a88b5355a 更新 requirements.in 2026-03-11 12:23:09 +08:00
jxxghp
3374773de5 更新 version.py 2026-03-11 07:22:09 +08:00
jxxghp
872b5fe3da Merge pull request #5559 from xiaoQQya/develop 2026-03-10 21:01:57 +08:00
xiaoQQya
be15e9871c perf: 优化站点 hhanclub 用户等级与加入时间获取兼容性 2026-03-10 19:42:04 +08:00
jxxghp
024a6a253b Merge pull request #5531 from WongWang/feat-plugin-priority 2026-03-10 12:54:39 +08:00
jxxghp
1af662df7b Merge pull request #5558 from YuF-9468/fix/5483-history-reorganize-event 2026-03-09 22:36:17 +08:00
YuF-9468
b4f64eb593 fix: preserve download context when re-organizing from history 2026-03-09 19:33:49 +08:00
jxxghp
86aa86208c Merge pull request #5557 from eNkru/feature/panda-group 2026-03-09 15:24:07 +08:00
Howard Ju
018e814615 feat(panda): add release group for PandaPT 2026-03-09 20:21:18 +13:00
jxxghp
e4d6e5cfc7 Merge pull request #5556 from YuF-9468/fix/5554-plugin-remote-entry-prefix 2026-03-09 12:02:20 +08:00
YuF-9468
770cd77632 refactor(plugin): build remoteEntry path with posixpath.join 2026-03-09 11:53:28 +08:00
YuF-9468
9f1692b33d fix(plugin): prepend API prefix for plugin remoteEntry URL 2026-03-09 11:41:42 +08:00
jxxghp
6f63e0a5d7 feat: enhance Telegram module with new functionality and improvements. 2026-03-08 09:48:42 +08:00
jxxghp
6a90e2c796 fix ide warnings 2026-03-08 08:32:29 +08:00
jxxghp
23b90ff0f9 remove app.env 2026-03-08 08:25:07 +08:00
jxxghp
dc86af2fa4 Merge pull request #5552 from EkkoG/qqbot 2026-03-08 08:23:53 +08:00
EkkoG
425b822046 feat(qqbot): enhance message sending with Markdown support and image size detection
- Added `use_markdown` parameter to `send_proactive_c2c_message` and `send_proactive_group_message` for Markdown formatting.
- Implemented methods to escape Markdown characters and format messages accordingly.
- Introduced image size detection for Markdown image rendering.
- Updated message sending logic to fallback to plain text if Markdown is unsupported.
2026-03-07 23:51:30 +08:00
EkkoG
65c18b1d52 feat(qqbot): implement QQ Bot notification module with API and WebSocket support
- Added QQ Bot notification module to facilitate proactive message sending and message reception via Gateway.
- Implemented API functions for sending C2C and group messages.
- Established WebSocket client for real-time message handling.
- Updated requirements to include websocket-client dependency.
- Enhanced schemas to support QQ channel capabilities and notification configurations.
2026-03-07 23:21:07 +08:00
jxxghp
1bddf3daa7 Merge pull request #5550 from wumode/fix_openlist 2026-03-07 08:21:11 +08:00
wumode
600b6af876 fix(openlist): transfer queue blocking 2026-03-06 23:21:43 +08:00
jxxghp
4bdf16331d Merge pull request #5546 from ziwiwiz/fix-docker-proxy-unauthorized-access 2026-03-06 13:19:34 +08:00
ziwiwiz
87cbda0528 fix(docker): optimize docker proxy listener config for better network isolation 2026-03-06 01:33:18 +08:00
jxxghp
9897941bf9 Merge pull request #5544 from YuF-9468/fix-issue-5495-tnode-json-guard 2026-03-05 18:08:24 +08:00
YuF-9468
31938812d0 chore: add warning logs for invalid tnode seeding payload 2026-03-05 09:35:25 +08:00
YuF-9468
19d879d3f6 fix(parser): guard invalid tnode seeding json response 2026-03-05 09:21:16 +08:00
jxxghp
cc41036c63 Merge pull request #5537 from jxxghp/copilot/optimize-message-logic 2026-03-03 20:45:00 +08:00
copilot-swe-agent[bot]
a9f2b40529 test: extend media-title detection coverage and cleanup
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-03-03 12:20:54 +00:00
copilot-swe-agent[bot]
86000ea19a feat: improve user message media-title detection
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-03-03 12:14:25 +00:00
copilot-swe-agent[bot]
0422c3b9e7 Initial plan 2026-03-03 12:08:33 +00:00
jxxghp
64c8bd5b5a Merge pull request #5535 from Seed680/v2 2026-03-03 20:00:31 +08:00
jxxghp
a7eba2c5fc Merge pull request #5534 from YuF-9468/fix-workflow-rating-float 2026-03-03 19:59:04 +08:00
YuF-9468
2b7753e43e workflow: handle zero vote threshold explicitly 2026-03-03 15:41:27 +08:00
noone
47c1e5b5b8 Merge remote-tracking branch 'origin/v2' into v2 2026-03-03 14:31:24 +08:00
noone
14ee97def0 feat(meta): 添加视频帧率信息解析支持
- 在MetaBase基类中新增fps属性用于存储帧率信息
- 实现MetaVideo中帧率信息的识别和解析逻辑
- 为MetaAnime添加帧率提取功能,与MetaVideo保持一致
- 更新测试用例以验证帧率信息的正确解析
- 在元数据测试数据中增加fps字段的预期值
2026-03-03 14:31:12 +08:00
Seed680
92e262f732 Merge branch 'jxxghp:v2' into v2 2026-03-03 14:13:07 +08:00
noone
c46880b701 feat(meta): 添加视频帧率信息解析支持
- 在MetaBase基类中新增fps属性用于存储帧率信息
- 实现MetaVideo中帧率信息的识别和解析逻辑
- 为MetaAnime添加帧率提取功能,与MetaVideo保持一致
- 更新测试用例以验证帧率信息的正确解析
- 在元数据测试数据中增加fps字段的预期值
2026-03-03 14:12:06 +08:00
YuF-9468
473e9b9300 workflow: allow decimal rating in filter medias 2026-03-03 13:56:24 +08:00
Castell
28945ef153 refactor: 将 download.py 中重复的媒体识别模式选择逻辑封装进选择器函数 2026-03-03 01:58:49 +08:00
Castell
b6b5d9f9c4 refactor: 将重复的媒体识别模式选择逻辑封装进选择器函数 2026-03-03 01:33:44 +08:00
Castell
ba5de1ab31 fix: 修复异步函数调用少写 await 关键字的错误 2026-03-03 00:37:55 +08:00
Castell
002ebeaade refactor: 简化媒体识别模式选择逻辑中的 if/else 结构 2026-03-03 00:21:55 +08:00
Castell
894756000c feat: 新增优先使用插件识别的功能 2026-03-02 20:58:10 +08:00
jxxghp
cdb178c503 Merge pull request #5530 from cddjr/bugfix/season-regex-capture-group 2026-03-02 12:07:37 +08:00
大虾
7c48cafc71 Update app/core/meta/metavideo.py
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2026-03-02 11:47:47 +08:00
景大侠
74d4592238 fix(meta): 修复正则表达式以正确匹配 Sxx 季信息格式 2026-03-02 11:35:41 +08:00
jxxghp
0044dd104e 更新 version.py 2026-03-02 07:04:49 +08:00
jxxghp
05041e2eae Merge pull request #5526 from baozaodetudou/orv2 2026-03-01 12:02:49 +08:00
doumao
78908f216d Merge branch 'v2' of github.com:jxxghp/MoviePilot into orv2 2026-02-28 22:58:09 +08:00
doumao
efc68ae701 fix: 绿联接口支持可配置SSL证书校验 2026-02-28 22:55:47 +08:00
jxxghp
e9340a8b4b Merge pull request #5525 from baozaodetudou/orv2 2026-02-28 22:50:10 +08:00
逗猫
66e199d516 Merge pull request #1 from baozaodetudou/v2
perf: 使用deque优化绿联媒体库遍历队列性能
2026-02-28 22:15:39 +08:00
doumao
6151d8a787 perf: 使用deque优化绿联媒体库遍历队列性能 2026-02-28 22:13:54 +08:00
doumao
296261da8a feat: 完成绿联影视接入并补齐扫描模式与统计展示 2026-02-28 21:58:35 +08:00
doumao
383371dd6f Merge branch 'v2' of github.com:jxxghp/MoviePilot into orv2 2026-02-28 21:57:45 +08:00
jxxghp
bb8c026bda Merge pull request #5523 from YuF-9468/fix-issue-5508-manual-transfer-auto-type 2026-02-28 17:18:23 +08:00
doumao
344993dd6f 新增绿联接口加解密工具与单元测试 2026-02-28 15:35:27 +08:00
YuF-bot
ffb048c314 refactor(transfer): narrow manual type parse exception to ValueError 2026-02-28 13:44:06 +08:00
jxxghp
3eef9b8faa Merge pull request #5522 from YuF-9468/fix-issue-5461-filemanager-test-optional-library 2026-02-28 13:31:09 +08:00
YuF-9468
5704bb646b fix(transfer): treat auto type as unspecified in manual transfer 2026-02-28 13:29:08 +08:00
YuF-9468
fbc684b3a7 fix(filemanager): skip library path check when transfer is disabled 2026-02-28 12:58:53 +08:00
jxxghp
6529b2a9c3 Merge pull request #5521 from YuF-9468/fix-issue-5463-agent-sites-list-parse 2026-02-28 12:31:54 +08:00
YuF-9468
a1701e2edf fix(agent): accept string-form sites list in search_torrents input 2026-02-28 12:30:12 +08:00
jxxghp
eba6391de7 Merge pull request #5520 from YuF-9468/fix-issue-5211-telegram-username-fallback 2026-02-28 12:17:33 +08:00
jxxghp
9f2c3c9688 Merge pull request #5517 from wumode/fix-progress-displaying 2026-02-28 12:16:13 +08:00
YuF-bot
57f5a19d0c fix(message): fallback Telegram username to string userid when absent 2026-02-28 11:10:15 +08:00
wumode
c8d53c6964 fix(ProgressHelper): progress displaying 2026-02-27 16:13:34 +08:00
jxxghp
643cda1abe Merge pull request #5516 from shawnlu96/fix/alipan-snapshot-monitoring 2026-02-27 07:07:31 +08:00
Shawn Lu
03d118a73a fix: 修复阿里云盘目录监控快照无法检测文件的问题
1. 为阿里云盘添加 ALIPAN_SNAPSHOT_CHECK_FOLDER_MODTIME 配置(默认 False)
   - 阿里云盘目录的 updated_at 不会随子文件变更而更新,导致增量快照
     始终跳过目录,快照结果为空
   - 与 Rclone/Alist 保持一致的配置模式

2. 移除 snapshot() 中文件级 modify_time 过滤
   - 原逻辑:仅包含 modify_time > last_snapshot_time 的文件
   - 问题:首次快照建立基准后,save_snapshot 将 timestamp 设为
     max(modify_times),后续快照中未变更的文件因 modify_time 不大于
     timestamp 而被排除,导致 compare_snapshots 无法检测到任何变化
   - 此外当 last_snapshot_time 为 None 时,比较会触发 TypeError
     并被静默捕获
   - 修复:始终包含所有遍历到的文件,由 compare_snapshots 负责变化检测
     目录级优化仍由 snapshot_check_folder_modtime 控制

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-26 23:43:21 +08:00
jxxghp
51dd7f5c17 Merge pull request #5512 from cddjr/bugfix/issue-5501 2026-02-25 21:13:35 +08:00
jxxghp
af7e1e7a3c Merge pull request #5509 from xiaoQQya/develop 2026-02-25 21:13:00 +08:00
大虾
ea5d855bc3 Update app/helper/directory.py
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2026-02-25 20:21:37 +08:00
景大侠
5f74367cd6 fix: 修复电视剧刮削问题 2026-02-25 20:18:05 +08:00
jxxghp
26e41e1c14 更新 version.py 2026-02-24 19:25:20 +08:00
xiaoQQya
1bb2b50043 fix: 修复站点 hhanclub 用户等级与加入时间不显示的问题 2026-02-23 21:44:23 +08:00
jxxghp
7bdb629f03 Merge pull request #5505 from DDSRem-Dev/rtorrent 2026-02-22 16:10:39 +08:00
jxxghp
fd92f986da Merge pull request #5504 from DDSRem-Dev/fix_smb_alipan 2026-02-22 16:10:08 +08:00
DDSRem
69a1207102 chore(rtorrent): formatting code 2026-02-22 13:42:27 +08:00
DDSRem
def652c768 fix(rtorrent): address code review feedback
- Replace direct _proxy access in transfer_completed with set_torrents_tag(overwrite=True) for proper encapsulation and error logging
- Optimize episode collection by using set accumulation instead of repeated list-set conversions in loop
- Fix type hint for hashs parameter in transfer_completed (str -> Union[str, list])
- Add overwrite parameter to set_torrents_tag to support tag replacement

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-22 13:40:15 +08:00
DDSRem
c35faf5356 feat(downloader): add rTorrent downloader support
Implement rTorrent downloader module via XML-RPC protocol, supporting both HTTP (nginx/ruTorrent proxy) and SCGI connection modes. Add RtorrentModule implementing _ModuleBase and _DownloaderBase interfaces with no extra dependencies.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-22 13:12:22 +08:00
jxxghp
0615a33206 Merge pull request #5503 from DDSRem-Dev/fix_u115 2026-02-22 13:00:16 +08:00
DDSRem
e77530bdc5 fix(storages): download directory concatenation error 2026-02-22 12:35:27 +08:00
DDSRem
8c62df63cc fix(u115): download directory concatenation error
fix: https://github.com/jxxghp/MoviePilot/issues/5429
2026-02-22 12:22:58 +08:00
jxxghp
bd36eade77 Merge pull request #5502 from DDSRem-Dev/dev 2026-02-22 12:17:33 +08:00
DDSRem
d2c023081a fix(openList): openList file upload and retrieval errors
fix https://github.com/jxxghp/MoviePilot/issues/5369
fix https://github.com/jxxghp/MoviePilot/issues/5038
2026-02-22 12:05:14 +08:00
jxxghp
63d0850b38 Merge pull request #5498 from cddjr/feat/recommend_manual_force_refresh 2026-02-13 18:39:21 +08:00
景大侠
c86659428f feat(recommend): 手动执行推荐缓存服务时强刷数据 2026-02-13 18:17:42 +08:00
jxxghp
bf7cc6caf0 Merge pull request #5497 from cddjr/bugfix/glitchtip_9684 2026-02-13 17:09:04 +08:00
jxxghp
26b8be6041 Merge pull request #5496 from cddjr/bugfix/issue_5456 2026-02-13 17:08:21 +08:00
景大侠
f978f9196f fix(transfer): 修复移动模式下过早删除种子的问题
- 撤回提交 4502a9c 的部分改动
2026-02-13 13:28:05 +08:00
景大侠
75cb8d2a3c fix(torrents): 修复刷新站点资源时因缺失种子链接导致的 'Failed to exists key: None' 错误 2026-02-12 17:45:15 +08:00
jxxghp
17a21ed707 更新 version.py 2026-02-12 07:09:45 +08:00
jxxghp
f390647139 fix(site): 更新站点信息时同步更新domain域名 2026-02-12 06:59:13 +08:00
jxxghp
aacd91e196 Merge pull request #5487 from cddjr/bugfix/issue_5242 2026-02-11 16:02:54 +08:00
景大侠
258171c9c4 fix(telegram): 修复通知标题含特殊符号时异常显示**符号 2026-02-11 09:20:50 +08:00
jxxghp
812c5873aa Merge pull request #5486 from cddjr/feat/shared-sync-async-cache 2026-02-10 22:11:42 +08:00
景大侠
4c3d47f1f0 feat(cache): 同步/异步函数可共享缓存
- 缓存键支持自定义命名,使异步与同步函数可共享缓存结果
- 内存缓存改为类变量,实现多个cache装饰器共享同一缓存空间
- 重构AsyncMemoryBackend,减少重复代码
- 补齐部分模块的缓存清理功能
2026-02-10 18:46:49 +08:00
jxxghp
ba7b6ba869 Merge pull request #5485 from yubanmeiqin9048/patch-2 2026-02-10 17:41:51 +08:00
yubanmeiqin9048
d0471ae512 fix: 修复目标目录无视频文件时转移字幕和音频触发目录删除 2026-02-10 14:10:42 +08:00
jxxghp
636c4be9fb 更新 version.py 2026-02-07 08:13:43 +08:00
jxxghp
6bec765a9d Merge pull request #5474 from jxxghp/copilot/optimize-file-move-implementation 2026-02-06 22:20:11 +08:00
copilot-swe-agent[bot]
d61d16ccc4 Restore the optimization - accidentally reverted in previous commit
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-02-06 14:15:29 +00:00
copilot-swe-agent[bot]
f2a5715b24 Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com> 2026-02-06 14:11:15 +00:00
copilot-swe-agent[bot]
c064c3781f Optimize SystemUtils.move to avoid triggering directory monitoring
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-02-06 14:03:03 +00:00
copilot-swe-agent[bot]
bb4dffe2a4 Initial plan 2026-02-06 13:59:59 +00:00
jxxghp
37cf3eeef3 Merge pull request #5473 from cddjr/feat_transfer_files_filter 2026-02-06 21:04:52 +08:00
景大侠
40395b2999 feat: 在构造待整理文件列表时引入过滤逻辑以简化后续处理 2026-02-06 20:56:26 +08:00
景大侠
32afe6445f fix: 整理成功事件缺少历史记录ID 2026-02-06 20:33:13 +08:00
jxxghp
793a991913 Merge remote-tracking branch 'origin/v2' into v2 2026-02-05 14:16:55 +08:00
jxxghp
d278224ff1 fix:优化第三方插件存储类型的检测提示 2026-02-05 14:16:50 +08:00
jxxghp
9b4d0ce6a8 Merge pull request #5466 from DDSRem-Dev/dev 2026-02-05 06:56:25 +08:00
DDSRem
a1829fe590 feat: u115 global rate limiting strategy 2026-02-04 23:24:14 +08:00
jxxghp
2b2b39365c Merge pull request #5464 from ChanningHe/enhance/discord 2026-02-04 18:08:38 +08:00
ChanningHe
1147930f3f fix: [slack&discord&telegram] handle special characters in config names 2026-02-04 14:09:40 +09:00
ChanningHe
636f338ed7 enhance: [discord] add _user_chat_mapping to chat in channel 2026-02-04 13:42:33 +09:00
ChanningHe
72365d00b4 enhance: discord debug information 2026-02-04 12:54:17 +09:00
jxxghp
19d8086732 Merge pull request #5460 from cddjr/fix_download_hash_overridden 2026-02-03 21:23:04 +08:00
大虾
30488418e5 修复 整理时download_hash参数被覆盖
导致后续文件均识别成同一个媒体信息
2026-02-03 18:59:32 +08:00
jxxghp
2f0badd74a Merge pull request #5457 from cddjr/fix_5449 2026-02-02 23:45:07 +08:00
jxxghp
6045b0579b Merge pull request #5455 from cddjr/fix_transfer_result_incorrect 2026-02-02 23:44:32 +08:00
景大侠
498f1fec74 修复 整理视频可能导致误删字幕及音轨 2026-02-02 23:18:46 +08:00
景大侠
f6a541f2b9 修复 覆盖整理失败时误报成功 2026-02-02 21:50:35 +08:00
jxxghp
8ce78eabca 更新 version.py 2026-02-02 18:44:30 +08:00
jxxghp
2c34c5309f Merge pull request #5454 from CHANTXU64/v2 2026-02-02 18:02:45 +08:00
jxxghp
77e680168a Merge pull request #5452 from 0honus0/v2 2026-02-02 17:22:00 +08:00
jxxghp
8a7e59742f Merge pull request #5451 from cddjr/fix_specials_season 2026-02-02 17:21:29 +08:00
jxxghp
42bac14770 Merge pull request #5450 from CHANTXU64/v2 2026-02-02 17:20:40 +08:00
CHANTXU64
8323834483 feat: 优化RSS订阅和网页抓取中发布日期(PubDate)的获取兼容性
- app/helper/rss.py: 优化RSS解析,支持带命名空间的日期标签(如 pubDate/published/updated)。
- app/modules/indexer/spider/__init__.py: 优化网页抓取,增加日期格式校验并对非标准格式进行自动归一化。
2026-02-02 16:52:04 +08:00
景大侠
1751caef62 fix: 补充几处season的判空 2026-02-02 15:01:12 +08:00
0honus0
d622d1474d 根据意见增加尾部逗号 2026-02-02 07:00:57 +00:00
0honus0
f28be2e7de 增加登录按钮xpath支持nicept网站 2026-02-02 06:52:48 +00:00
jxxghp
17773913ae fix: 统一了数据库查询中 season 参数的非空判断逻辑,以正确处理 season=0 的情况。 2026-02-02 14:23:51 +08:00
jxxghp
d469c2d3f9 refactor: 统一将布尔判断 if var:if not var: 更改为显式的 if var is not None:if var is None: 以正确处理 None 值。 2026-02-02 13:49:32 +08:00
CHANTXU64
4e74d32882 Fix: TMDB 剧集详情页不显示第 0 季(特别篇) #5444 2026-02-02 10:28:22 +08:00
jxxghp
7b8cd37a9b feat(transfer): enhance job removal methods for thread safety and strict checks 2026-02-01 16:58:32 +08:00
jxxghp
eda306d726 Merge pull request #5448 from cddjr/feat_japanese_subtitles 2026-02-01 16:25:56 +08:00
景大侠
94f3b1fe84 feat: 支持整理日语字幕 2026-02-01 16:04:22 +08:00
jxxghp
c50e3ba293 Merge pull request #5445 from jxxghp/copilot/analyze-task-loss-reason 2026-02-01 08:42:17 +08:00
copilot-swe-agent[bot]
eff7818912 Improve documentation and fix validation bug in add_task
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-31 16:44:01 +00:00
copilot-swe-agent[bot]
270bcff8f3 Fix task loss issue in do_transfer multi-threading batch adding
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-31 16:38:55 +00:00
copilot-swe-agent[bot]
e04963c2dc Initial plan 2026-01-31 16:33:59 +00:00
jxxghp
f369967c91 更新 version.py 2026-01-29 22:32:03 +08:00
jxxghp
cd982c5526 Merge pull request #5439 from DDSRem-Dev/dev 2026-01-29 22:30:28 +08:00
jxxghp
16e03c9d37 Merge pull request #5438 from cddjr/fix_scrape_follow_tmdb 2026-01-29 22:29:06 +08:00
DDSRem
d38b1f5364 feat: u115 support oauth 2026-01-29 22:14:10 +08:00
景大侠
f57ba4d05e 修复 整理时可能误跟随TMDB变化的问题 2026-01-29 15:04:42 +08:00
jxxghp
172eeaafcf 更新 version.py 2026-01-27 18:07:55 +08:00
jxxghp
3115ed28b2 fix: 历史记录删除源文件后,不在订阅的文件列表中显示 2026-01-26 21:47:26 +08:00
jxxghp
d8dc53805c feat(transfer): 整理事件增加历史记录ID 2026-01-26 21:29:05 +08:00
jxxghp
7218d10e1b feat(transfer): 拆分字幕和音频整理事件 2026-01-26 19:33:50 +08:00
jxxghp
89bf85f501 Merge pull request #5425 from xiaoQQya/develop 2026-01-26 18:41:42 +08:00
jxxghp
8334a468d0 feat(category): Add API endpoints for retrieving and saving category configuration 2026-01-26 12:53:26 +08:00
jxxghp
3da80ed077 Merge pull request #5423 from jxxghp/copilot/update-category-helper-integration 2026-01-26 12:35:05 +08:00
copilot-swe-agent[bot]
2883ccbe87 Move category methods to ChainBase and use consistent naming
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-26 04:32:11 +00:00
copilot-swe-agent[bot]
5d3443fee4 Use ruamel.yaml consistently in CategoryHelper
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-26 04:10:15 +00:00
copilot-swe-agent[bot]
27756a53db Implement proper architecture: module->chain->API with single CategoryHelper
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-26 04:07:56 +00:00
copilot-swe-agent[bot]
71cde6661d Improve comments for clarity
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-25 10:08:13 +00:00
copilot-swe-agent[bot]
a857337b31 Fix architecture - restore helper layer and use ModuleManager for reload trigger
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-25 10:06:01 +00:00
copilot-swe-agent[bot]
4ee21ffae4 Address code review feedback - use ruamel.yaml consistently and fix typo
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-25 09:58:28 +00:00
copilot-swe-agent[bot]
d8399f7e85 Consolidate CategoryHelper classes and add reload trigger
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-25 09:56:11 +00:00
copilot-swe-agent[bot]
574ac8d32f Initial plan 2026-01-25 09:52:31 +00:00
jxxghp
a2611bfa7d feat: Add search_imdbid to subscriptions and improve error message propagation and handling for existing subscriptions. 2026-01-25 14:57:46 +08:00
xiaoQQya
853badb76f fix: 更新站点 Rousi Pro 获取未读消息接口 2026-01-25 14:36:22 +08:00
jxxghp
5d69e1d2a5 Merge pull request #5419 from wikrin/subscribe-source-query-enhancement 2026-01-25 14:04:42 +08:00
jxxghp
6494f28bdb Fix: Remove isolated ToolMessage instances after message trimming to prevent OpenAI errors. 2026-01-25 13:42:29 +08:00
Attente
f55916bda2 feat(transfer): 支持按条件查询订阅获取自定义识别词用于文件转移 2026-01-25 11:34:03 +08:00
jxxghp
04691ee197 Merge remote-tracking branch 'origin/v2' into v2 2026-01-25 09:39:59 +08:00
jxxghp
2ac0e564e1 feat(category):新增二级分类维护API 2026-01-25 09:39:48 +08:00
jxxghp
6072a29a20 Merge pull request #5418 from wikrin/CNSUB-filter-rules-update 2026-01-25 08:17:20 +08:00
Attente
8658942385 feat(filter): 添加配置监听和改进中字过滤规则 2026-01-25 01:06:50 +08:00
jxxghp
cc4859950c Merge remote-tracking branch 'origin/v2' into v2 2026-01-24 19:24:22 +08:00
jxxghp
23b81ad6f1 feat(config):完善默认插件库 2026-01-24 19:24:15 +08:00
jxxghp
e3b9dca5c0 Merge pull request #5417 from cddjr/fix_u115_create_folder
fix(u115): 创建目录误报失败
2026-01-24 19:14:40 +08:00
景大侠
a2359a1ad2 fix(u115): 创建目录误报失败
- 解析响应时忽略20004错误码
- 根目录创建目录会报错ValueError
2026-01-24 17:48:53 +08:00
jxxghp
cb875b1b34 更新 version.py 2026-01-24 12:04:54 +08:00
jxxghp
b92a85b4bc Merge pull request #5415 from cddjr/fix_bluray_scrape 2026-01-24 11:43:44 +08:00
景大侠
8c7dd6bab2 修复 原盘目录不刮削 2026-01-24 11:42:00 +08:00
景大侠
aad7df64d7 简化原盘大小计算代码 2026-01-24 11:29:30 +08:00
jxxghp
8474342007 feat(agent):上下文超长时自动摘要 2026-01-24 11:24:59 +08:00
jxxghp
61ccb4be65 feat(agent): 新增命令行工具 2026-01-24 11:10:15 +08:00
jxxghp
1c6f69707c fix 增加模块异常traceback打印 2026-01-24 11:00:24 +08:00
jxxghp
e08e8c482a Merge pull request #5414 from jxxghp/copilot/fix-file-organization-error 2026-01-24 10:49:19 +08:00
copilot-swe-agent[bot]
548c1d2cab Add null check for schema access in IndexerModule
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-24 02:26:55 +00:00
copilot-swe-agent[bot]
5a071bf3d1 Add null check for schema.value access in FileManagerModule
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-24 02:25:55 +00:00
copilot-swe-agent[bot]
1bffcbd947 Initial plan 2026-01-24 02:22:25 +00:00
jxxghp
274a36a83a 更新 config.py 2026-01-24 10:04:37 +08:00
jxxghp
ec40f36114 fix(agent):修复智能体工具调用,优化媒体库查询工具 2026-01-24 09:46:19 +08:00
jxxghp
af19f274a7 Merge pull request #5413 from jxxghp/copilot/fix-runnable-lambda-error 2026-01-24 08:38:24 +08:00
copilot-swe-agent[bot]
2316004194 Fix 'RunnableLambda' object is not callable error by wrapping validated_trimmer
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-24 00:35:59 +00:00
copilot-swe-agent[bot]
98762198ef Initial plan 2026-01-24 00:33:35 +00:00
jxxghp
1469de22a4 Merge pull request #5412 from jxxghp/copilot/translate-comments-to-chinese 2026-01-24 08:27:11 +08:00
copilot-swe-agent[bot]
1e687f960a Translate English comments to Chinese in agent/__init__.py
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-24 00:25:21 +00:00
copilot-swe-agent[bot]
7f01b835fd Initial plan 2026-01-24 00:22:19 +00:00
jxxghp
e46b6c5c01 Merge pull request #5411 from jxxghp/copilot/fix-tool-call-exception-handling 2026-01-24 08:20:51 +08:00
copilot-swe-agent[bot]
74226ad8df Improve error message to include exception type for better debugging
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-24 00:18:43 +00:00
copilot-swe-agent[bot]
f8ae7be539 Fix: Ensure tool exceptions are stored in memory to maintain message chain integrity
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-24 00:18:06 +00:00
copilot-swe-agent[bot]
37b16e380d Initial plan 2026-01-24 00:14:13 +00:00
jxxghp
9ea3e9f652 Merge pull request #5409 from jxxghp/copilot/fix-agent-execution-error 2026-01-24 08:12:39 +08:00
copilot-swe-agent[bot]
54422b5181 Final refinements: fix falsy value handling and add warning for extra ToolMessages
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-24 00:10:00 +00:00
copilot-swe-agent[bot]
712995dcf3 Address code review feedback: fix ToolCall handling and add orphaned message filtering
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-24 00:08:25 +00:00
jxxghp
c2767b0fd6 Merge pull request #5410 from jxxghp/copilot/fix-media-exists-error 2026-01-24 08:08:03 +08:00
copilot-swe-agent[bot]
179cc61f65 Fix tool call integrity validation to skip orphaned ToolMessages
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-24 00:05:21 +00:00
copilot-swe-agent[bot]
f3b910d55a Fix AttributeError when mediainfo.type is None
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-24 00:04:02 +00:00
copilot-swe-agent[bot]
f4157b52ea Fix agent tool_calls integrity validation
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-24 00:02:47 +00:00
copilot-swe-agent[bot]
79710310ce Initial plan 2026-01-24 00:00:31 +00:00
copilot-swe-agent[bot]
3412498438 Initial plan 2026-01-23 23:57:27 +00:00
jxxghp
b896b07a08 fix search_web tool 2026-01-24 07:39:07 +08:00
jxxghp
379bff0622 Merge pull request #5407 from cddjr/fix_db 2026-01-24 06:45:54 +08:00
jxxghp
474f47aa9f Merge pull request #5406 from cddjr/fix_transfer 2026-01-24 06:45:10 +08:00
jxxghp
f1e26a4133 Merge pull request #5405 from cddjr/fix_modify_time_comparison 2026-01-24 06:44:05 +08:00
jxxghp
e37f881207 Merge pull request #5404 from jxxghp/copilot/reimplement-network-search-tool 2026-01-24 06:39:56 +08:00
大虾
306c0b707b Update database/versions/41ef1dd7467c_2_2_2.py
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2026-01-24 02:53:14 +08:00
景大侠
08c448ee30 修复 迁移PG后可能卡启动的问题 2026-01-24 02:49:54 +08:00
景大侠
1532014067 修复 多下载器返回相同种子造成的重复整理 2026-01-24 01:41:48 +08:00
景大侠
fa9f604af9 修复 入库通知不显示集数
因过早清理作业导致
2026-01-24 01:17:23 +08:00
景大侠
3b3d0d6539 修复 文件列表接口中空值时间戳的比较逻辑 2026-01-23 23:52:43 +08:00
copilot-swe-agent[bot]
9641d33040 Fix generator handling and update error message to reference requirements.in
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-23 15:23:52 +00:00
copilot-swe-agent[bot]
eca339d107 Address code review comments: improve code organization and use modern asyncio
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-23 15:22:45 +00:00
copilot-swe-agent[bot]
ca18705d88 Reimplemented SearchWebTool using duckduckgo-search library
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-23 15:20:06 +00:00
copilot-swe-agent[bot]
8f17b52466 Initial plan 2026-01-23 15:16:09 +00:00
jxxghp
8cf84e722b fix agent error message 2026-01-23 22:50:59 +08:00
jxxghp
7c4d736b54 feat:Agent上下文裁剪 2026-01-23 22:47:18 +08:00
jxxghp
1b3ae6ab25 fix 下载器整理标签设置 2026-01-23 18:10:59 +08:00
jxxghp
a4ad08136e 更新 version.py 2026-01-23 14:33:41 +08:00
jxxghp
df5e7997c5 Merge pull request #5401 from jxxghp/copilot/check-jobview-logic 2026-01-23 07:21:46 +08:00
copilot-swe-agent[bot]
b2cb3768c1 Fix remove_job to use __get_id for consistent job removal
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-22 14:38:33 +00:00
copilot-swe-agent[bot]
fa169c5cd3 Initial plan 2026-01-22 14:34:18 +00:00
jxxghp
bbb3975b67 更新 transfer.py 2026-01-22 22:31:52 +08:00
jxxghp
4502a9c4fa fix:优化移动模式删除逻辑 2026-01-22 22:15:40 +08:00
jxxghp
86905a2670 Merge pull request #5399 from cddjr/fix_downloader_monitor 2026-01-22 21:41:25 +08:00
景大侠
b1e60a4867 修复 下载器监控 2026-01-22 21:34:50 +08:00
jxxghp
1efe3324fb fix:优化设置种子状态标签的时机 2026-01-22 08:24:23 +08:00
jxxghp
55c1e37d39 更新 query_subscribes.py 2026-01-22 08:05:41 +08:00
jxxghp
7fa700317c 更新 update_subscribe.py 2026-01-22 08:03:48 +08:00
jxxghp
bbe831a57c 优化 transfer.py 中任务处理逻辑,增强错误信息反馈 2026-01-21 23:55:20 +08:00
jxxghp
90c86c056c fix all_tasks 2026-01-21 23:30:39 +08:00
jxxghp
36f22a28df fix 完成状态计算 2026-01-21 23:23:37 +08:00
jxxghp
ac03c51e2c 更新 transfer.py 2026-01-21 23:06:29 +08:00
jxxghp
bd9e92f705 更新 transfer.py 2026-01-21 22:59:30 +08:00
jxxghp
281eff5eb2 更新 version.py 2026-01-21 22:54:31 +08:00
jxxghp
abbd2253ad fix deadlock 2026-01-21 22:46:04 +08:00
jxxghp
46466624ae fix:优化下载器整理控制逻辑 2026-01-21 22:21:17 +08:00
jxxghp
0ba8d51b2a fix:优化下载器整理 2026-01-21 21:31:55 +08:00
jxxghp
a1408ee18f feat:TRANSFER_THREADS 变更监听 2026-01-21 20:46:34 +08:00
jxxghp
58030bbcff fix #5392 2026-01-21 20:12:05 +08:00
jxxghp
e1b3e6ef01 fix:只有媒体文件整完成才触发事件,以保持与历史一致 2026-01-21 20:07:18 +08:00
jxxghp
298a6ba8ab 更新 update_subscribe.py 2026-01-21 19:36:12 +08:00
jxxghp
e5bf47629f 更新 config.py 2026-01-21 19:13:36 +08:00
jxxghp
ea29ee9f66 Merge pull request #5390 from xiaoQQya/develop 2026-01-21 18:39:06 +08:00
jxxghp
868c2254de v2.9.5 2026-01-21 17:59:52 +08:00
jxxghp
567522c87a fix:统一调整文件类型支持 2026-01-21 17:59:18 +08:00
jxxghp
25fd47f57b Merge pull request #5389 from hyuan280/v2 2026-01-21 17:22:27 +08:00
hyuan280
f89d6342d1 fix: 修复Cookie解码二进制数据导致请求发送时UnicodeEncodeError 2026-01-21 16:36:28 +08:00
jxxghp
b02affdea3 Merge pull request #5388 from cddjr/fix_tmdb_img_url 2026-01-21 13:24:39 +08:00
景大侠
6e5ade943b 修复 订阅无法查看文件列表的问题
TMDB图片路径参数增加空值检查
2026-01-21 12:47:39 +08:00
jxxghp
a6ed0c0d00 fix:优化transhandler线程安全 2026-01-21 08:42:57 +08:00
jxxghp
68402aadd7 fix:去除文件操作全局锁 2026-01-21 08:31:51 +08:00
jxxghp
85cacd447b feat: 为文件整理服务引入多线程处理并优化进度管理。 2026-01-21 08:16:02 +08:00
xiaoQQya
11262b321a fix(rousi pro): 修复 Rousi Pro 站点未读消息未推送通知的问题 2026-01-20 22:12:31 +08:00
jxxghp
bf290f063d Merge pull request #5386 from PKC278/v2 2026-01-20 22:09:02 +08:00
PKC278
7ac0fbaf76 fix(otp): 修正 OTP 关闭逻辑 2026-01-20 19:53:59 +08:00
PKC278
7489c76722 feat(passkey): 允许在未开启 OTP 时注册通行密钥 2026-01-20 19:35:36 +08:00
jxxghp
bcdf1b6efe 更新 transhandler.py 2026-01-20 15:29:28 +08:00
jxxghp
8a9dbe212c Merge pull request #5385 from cddjr/feature_optimize_transfer 2026-01-20 15:25:38 +08:00
景大侠
16bd71a6cb 优化整理代码效率、减少额外递归 2026-01-20 14:38:41 +08:00
jxxghp
71caad0655 feat:优化蓝光目录判断,减少目录遍历 2026-01-20 13:38:52 +08:00
jxxghp
2c62ffe34a feat:优化字幕和音频文件整理方式 2026-01-20 13:24:35 +08:00
jxxghp
3450a89880 Merge pull request #5383 from jxxghp/copilot/merge-agent-and-execution-messages 2026-01-20 00:03:23 +08:00
copilot-swe-agent[bot]
a081a69bbe Simplify message merging logic using list join
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-19 16:00:36 +00:00
copilot-swe-agent[bot]
271d1d23d5 Merge agent and tool execution messages into a single message
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-19 15:59:21 +00:00
copilot-swe-agent[bot]
605aba1a3c Initial plan 2026-01-19 15:55:13 +00:00
jxxghp
be3c2b4c7c Merge pull request #5382 from jxxghp/copilot/fix-tool-call-id-error 2026-01-19 21:36:09 +08:00
copilot-swe-agent[bot]
08eb32d7bd Fix isinstance syntax error for int/float type checking
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-19 13:33:01 +00:00
copilot-swe-agent[bot]
2b9cda15e4 Fix tool_call_id error by adding metadata to tool_result and using it in ToolMessage
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-19 13:31:43 +00:00
copilot-swe-agent[bot]
f6055b290a Initial plan 2026-01-19 13:28:07 +00:00
jxxghp
ec665e05e4 Merge pull request #5379 from Pollo3470/v2 2026-01-19 21:17:53 +08:00
jxxghp
2b6d7205ec Merge pull request #5378 from cddjr/fix_tv_dir_scrape 2026-01-19 17:47:11 +08:00
Pollo
41381a920c fix: 修复订阅自定义识别词在整理时不生效的问题
问题:订阅中添加的自定义识别词(特别是集数偏移)在下载时正常生效,
但在下载完成整理时没有生效。

根因:下载历史中未保存识别词,整理时 MetaInfoPath 未接收
custom_words 参数。

修复:
- 在 DownloadHistory 模型中添加 custom_words 字段
- 下载时从 meta.apply_words 获取并保存识别词到下载历史
- MetaInfoPath 函数添加 custom_words 参数支持
- 整理时从下载历史获取 custom_words 并传递给 MetaInfoPath
- 添加 Alembic 迁移脚本 (2.2.3)
- 添加相关单元测试
2026-01-19 15:46:00 +08:00
大虾
f1b3fc2254 更新注释 2026-01-19 10:11:54 +08:00
景大侠
a677ed307d 修复 剧集nfo文件刮削了错误的tmdb id
应使用剧集id而非剧id
2026-01-18 16:23:05 +08:00
景大侠
0ab23ee972 修复 刮削电视剧目录会误判剧集根目录为季目录
因辅助识别词指定了季号
2026-01-18 15:17:22 +08:00
景大侠
43f56d39be 修复 手动刮削电视剧目录可能会遗漏特别季 2026-01-18 01:51:35 +08:00
jxxghp
a39caee5f5 Merge pull request #5371 from cddjr/remove_unused_finished_files 2026-01-17 07:50:54 +08:00
景大侠
2edfdf47c8 移除整理进度数据中无用的文件列表 2026-01-17 00:20:09 +08:00
jxxghp
3819461db5 更新 version.py 2026-01-16 19:27:57 +08:00
jxxghp
85654dd7dd Merge pull request #5367 from PKC278/v2 2026-01-15 22:58:10 +08:00
PKC278
619a70416b fix: 修正智能推荐功能未检查智能助手总开关的问题 2026-01-15 22:57:49 +08:00
jxxghp
16d996fe70 Merge pull request #5366 from xiaoQQya/develop 2026-01-15 21:11:10 +08:00
xiaoQQya
1baeb6da19 feat(rousi pro): 支持解析 Rousi Pro 站点未读消息 2026-01-15 21:08:08 +08:00
jxxghp
1641d432dd feat: 为工具管理器添加参数类型规范化处理,并基于渠道能力动态生成提示词中的格式要求 2026-01-15 20:55:35 +08:00
jxxghp
1bf9862e47 feat: 更新代理提示词,增加详细的沟通、状态更新、总结、操作流程、工具使用和媒体管理规则。 2026-01-15 19:50:37 +08:00
jxxghp
602a394043 Merge pull request #5362 from cddjr/feat_extended_api_token_support 2026-01-15 13:33:56 +08:00
景大侠
22a2415ca5 缓存api鉴权结果 2026-01-15 12:38:47 +08:00
景大侠
feb034352d 让现有基于JWT令牌鉴权的接口也能支持API令牌鉴权 2026-01-15 12:30:03 +08:00
jxxghp
a7c8942c78 Merge pull request #5358 from PKC278/v2 2026-01-15 07:04:21 +08:00
PKC278
95f2ac3811 feat(search): 添加AI推荐功能并优化相关逻辑 2026-01-15 02:49:29 +08:00
jxxghp
91354295f2 Merge pull request #5356 from cddjr/fix_manual_transfer 2026-01-14 22:48:24 +08:00
景大侠
c9c4ab5911 修复 手动重新整理没有更新源文件大小的问题
- V1迁移过来的记录,重整理后文件大小显示为0
- 部分源文件大小有变动,重整理后大小显示没变化
2026-01-14 22:42:36 +08:00
jxxghp
a26c5e40dd Merge pull request #5354 from cddjr/fix_media_root_path 2026-01-14 19:04:53 +08:00
景大侠
80f5c7bc44 修复 整理文件或目录时没有正确应用多层标题的重命名格式
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2026-01-14 19:02:17 +08:00
jxxghp
4833b39c52 Merge pull request #5352 from cddjr/fix_concurrency_systemconfig 2026-01-13 16:58:21 +08:00
景大侠
f478958943 修复 SystemConfig潜在的资源竞争问题
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2026-01-13 14:33:53 +08:00
jxxghp
0469ad46d6 Merge pull request #5351 from winter0245/v2 2026-01-13 11:48:28 +08:00
winter0245
5fe5deb9df Merge branch 'jxxghp:v2' into v2 2026-01-13 09:30:16 +08:00
xjy
ce83bc24bd fix: 修复站点Cookie处理的两个关键问题
本次提交修复了PT站点搜索功能失败的两个根本原因:

1. **Cookie URL解码问题**
   - 问题:数据库中存储的Cookie值包含URL编码(如%3D、%2B、%2F),
     但cookie_parse()函数未进行解码
   - 影响:所有使用URL编码Cookie的站点可能无法正常登录
   - 修复:在app/utils/http.py的cookie_parse()中添加unquote()解码

2. **httpx Cookie jar覆盖问题**(关键)
 - 问题:httpx.AsyncClient的Cookie jar机制会自动保存服务器返回的
 Set-Cookie,并在后续请求中覆盖我们传入的Cookie
 - 表现:传入正确的c_secure_uid/c_secure_pass,实际发送的却是
 PHPSESSID等错误Cookie
 - 修复:在创建AsyncClient时传入Cookie,而不是在request()时传入

修改文件:
- app/utils/http.py: cookie_parse()添加URL解码 + AsyncClient传入cookies
- app/modules/indexer/spider/__init__.py: 清理调试代码

测试验证:
-  pterclub 搜索功能恢复正常
-  春天站点搜索功能正常(验证通用性)
2026-01-13 09:29:05 +08:00
jxxghp
dce729c8cb Merge pull request #5350 from cddjr/fix_tmdb_cache 2026-01-13 07:04:58 +08:00
jxxghp
a9d17cd96f Merge pull request #5349 from cddjr/fix_bluray 2026-01-13 07:03:16 +08:00
景大侠
294bb3d4a1 修复 目录监控无法触发蓝光原盘整理 2026-01-13 00:09:34 +08:00
景大侠
b31b9261f2 Update app/core/config.py
接受AI的建议
2026-01-12 23:31:44 +08:00
大虾
2211f8d9e4 Update tests/test_bluray.py
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2026-01-12 23:17:39 +08:00
景大侠
b9b7b00a7f 修复 下载器监控构造的原盘路径需以/结尾 2026-01-12 22:54:19 +08:00
景大侠
843faf6103 修复 整理记录无法显示原盘大小 2026-01-12 22:14:43 +08:00
景大侠
4af5dad9a8 修复 原盘自动刮削缺少nfo 2026-01-12 21:36:17 +08:00
景大侠
52437c9d18 按语言缓存tmdb元数据 2026-01-12 09:41:34 +08:00
景大侠
c6cb4c8479 统一构造tmdb图片网址 2026-01-12 09:41:25 +08:00
jxxghp
c3714ec251 Merge pull request #5346 from HankunYu/v2 2026-01-12 09:04:35 +08:00
HankunYu
dbe2f94af1 修改embed解析以支持emoji字符 2026-01-12 00:46:26 +00:00
jxxghp
07fd5f8a9e Merge pull request #5344 from PKC278/v2 2026-01-11 20:30:28 +08:00
PKC278
9e64b4cd7f refactor: 优化登录安全性并重构 PassKey 逻辑
- 统一登录失败返回信息,防止信息泄露
- 提取 PassKeyHelper 公共函数,简化 Base64 和凭证处理
- 重构 mfa.py 端点代码,提升可读性和维护性
- 移除冗余的 origin 验证逻辑
2026-01-11 19:20:53 +08:00
jxxghp
f08a7b9eb3 Merge pull request #5343 from Lyzd1/v2 2026-01-10 19:10:04 +08:00
The falling leaves know
a6fa764e2a Change media_type to required field in QueryMediaDetailInput 2026-01-10 18:49:02 +08:00
jxxghp
01676668f1 Merge pull request #5342 from Lyzd1/v2 2026-01-10 17:58:13 +08:00
The falling leaves know
8e5e4f460d Enhance media detail query with media type handling 2026-01-10 17:44:11 +08:00
jxxghp
f907b8a84d v2.9.3
- 优化通行密钥登录体验
- 优化智能体
- 支持Rousi Pro全新架构站点
2026-01-10 10:32:56 +08:00
jxxghp
a3a4285f90 Merge pull request #5339 from PKC278/v2 2026-01-10 07:42:38 +08:00
PKC278
0979163b79 fix(rousi): 修正分类参数为单一值以符合API要求 2026-01-10 02:12:33 +08:00
PKC278
248a25eaee fix(rousi): 移除单例模式 2026-01-10 01:39:40 +08:00
PKC278
f95b1fa68a fix(rousi): 修正分类映射 2026-01-10 01:31:12 +08:00
PKC278
d2b5d69051 feat(rousi): 重构响应处理逻辑以提高代码可读性和维护性 2026-01-10 00:54:43 +08:00
PKC278
3ca419b735 fix(rousi): 精简并修正分类映射 2026-01-10 00:27:45 +08:00
PKC278
50e275a2f9 feat(config): 增加最大搜索名称数量限制至3 确保包含 en_title 2026-01-09 23:53:09 +08:00
PKC278
aeccf78957 feat(rousi): 新增分类参数支持以优化搜索功能 2026-01-09 23:05:02 +08:00
PKC278
cb3cef70e5 feat: 新增 RousiPro 站点支持 2026-01-09 22:08:24 +08:00
jxxghp
b9bd303bf8 fix:优化Agent参数校验,避免中止推理 2026-01-09 20:26:49 +08:00
jxxghp
57d4786a7f Merge pull request #5332 from PKC278/v2 2026-01-08 07:47:01 +08:00
PKC278
df031455b2 feat(agent): 新增媒体详情查询工具 2026-01-07 23:31:08 +08:00
371 changed files with 65107 additions and 8108 deletions

View File

@@ -14,6 +14,9 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
fetch-tags: true
- name: Release version
id: release_version
@@ -66,6 +69,98 @@ jobs:
cache-from: type=gha, scope=${{ github.workflow }}-docker
cache-to: type=gha, scope=${{ github.workflow }}-docker
- name: Generate Changelog
id: changelog
run: |
# 获取上一个 tag排除当前版本的 tag
PREVIOUS_TAG=$(git tag -l 'v*' --sort=-v:refname | grep -v "^v${{ env.app_version }}$" | head -n 1)
echo "Previous tag: $PREVIOUS_TAG"
# 使用 || 作为分隔符,同时获取 commit 消息和作者 GitHub 用户名
if [ -z "$PREVIOUS_TAG" ]; then
COMMITS=$(git log --pretty=format:"%s||%an" HEAD)
else
COMMITS=$(git log --pretty=format:"%s||%an" ${PREVIOUS_TAG}..HEAD)
fi
# 分类收集 commit 消息(使用关联数组去重)
declare -A SEEN
FEATURES=""
FIXES=""
OTHERS=""
while IFS= read -r line; do
# 跳过空行
if [ -z "$line" ]; then
continue
fi
# 分离 commit 消息和作者
msg=$(echo "$line" | sed 's/||[^|]*$//')
author=$(echo "$line" | sed 's/.*||//')
# 跳过 Merge commit 和版本更新 commit
if echo "$msg" | grep -qE "^Merge pull request|^Merge branch|^更新 version"; then
continue
fi
# 按 Conventional Commits 前缀分类
if echo "$msg" | grep -qiE "^feat(\(.+\))?:"; then
desc=$(echo "$msg" | sed -E 's/^feat(\([^)]*\))?:\s*//')
category="FEATURES"
elif echo "$msg" | grep -qiE "^fix(\(.+\))?:"; then
desc=$(echo "$msg" | sed -E 's/^fix(\([^)]*\))?:\s*//')
category="FIXES"
elif echo "$msg" | grep -qiE "^(docs|style|refactor|perf|test|build|ci|chore|revert)(\(.+\))?:"; then
desc=$(echo "$msg" | sed -E 's/^(docs|style|refactor|perf|test|build|ci|chore|revert)(\([^)]*\))?:\s*//')
category="OTHERS"
else
desc="$msg"
category="OTHERS"
fi
# 使用 "分类+描述" 作为去重的 key跳过重复内容
dedup_key="${category}::${desc}"
if [ -n "${SEEN[$dedup_key]+x}" ]; then
continue
fi
SEEN[$dedup_key]=1
# 添加 by @author 引用
entry="- ${desc} by @${author}"
case "$category" in
FEATURES) FEATURES="${FEATURES}${entry}\n" ;;
FIXES) FIXES="${FIXES}${entry}\n" ;;
OTHERS) OTHERS="${OTHERS}${entry}\n" ;;
esac
done <<< "$COMMITS"
# 组装 changelog
CHANGELOG=""
if [ -n "$FEATURES" ]; then
CHANGELOG="${CHANGELOG}### ✨ 新功能\n\n${FEATURES}\n"
fi
if [ -n "$FIXES" ]; then
CHANGELOG="${CHANGELOG}### 🐛 修复\n\n${FIXES}\n"
fi
if [ -n "$OTHERS" ]; then
CHANGELOG="${CHANGELOG}### 🔧 其他\n\n${OTHERS}\n"
fi
# 添加版本对比链接
if [ -n "$PREVIOUS_TAG" ]; then
CHANGELOG="${CHANGELOG}**完整更新记录**: https://github.com/${{ github.repository }}/compare/${PREVIOUS_TAG}...v${{ env.app_version }}"
fi
# 写入环境变量
echo "CHANGELOG<<EOF" >> $GITHUB_ENV
echo -e "$CHANGELOG" >> $GITHUB_ENV
echo "EOF" >> $GITHUB_ENV
- name: Get existing release body
id: get_release_body
continue-on-error: true
@@ -73,9 +168,17 @@ jobs:
release_body=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
"https://api.github.com/repos/${{ github.repository }}/releases/tags/v${{ env.app_version }}" | \
jq -r '.body // ""')
echo "RELEASE_BODY<<EOF" >> $GITHUB_ENV
echo "$release_body" >> $GITHUB_ENV
echo "EOF" >> $GITHUB_ENV
# 如果已有手动编写的 release body,则保留;否则使用自动生成的 changelog
if [ -n "$release_body" ] && [ "$release_body" != "null" ] && [ "$release_body" != "" ]; then
echo "RELEASE_BODY<<EOF" >> $GITHUB_ENV
echo "$release_body" >> $GITHUB_ENV
echo "EOF" >> $GITHUB_ENV
else
echo "RELEASE_BODY<<EOF" >> $GITHUB_ENV
echo "${{ env.CHANGELOG }}" >> $GITHUB_ENV
echo "EOF" >> $GITHUB_ENV
fi
- name: Delete Release
uses: dev-drprasad/delete-tag-and-release@v1.1

View File

@@ -29,4 +29,5 @@ jobs:
days-before-pr-close: -1
# 排除带有RFC标签的issue
exempt-issue-labels: "RFC"
operations-per-run: 500
repo-token: ${{ secrets.GITHUB_TOKEN }}

11
.gitignore vendored
View File

@@ -1,4 +1,5 @@
.idea/
.DS_Store
*.c
*.so
*.pyd
@@ -15,11 +16,16 @@ app/helper/*.bin
app/plugins/**
!app/plugins/__init__.py
config/cookies/**
config/app.env
config/user.db*
config/sites/**
config/agent/
config/logs/
config/temp/
config/cache/
.runtime/
public/
.moviepilot.env
*.pyc
*.log
.vscode
@@ -27,4 +33,7 @@ venv
# Pylint
pylint-report.json
.pylint.d/
.pylint.d/
# AI
.claude/

View File

@@ -1,5 +1,8 @@
# MoviePilot
简体中文 | [English](README_EN.md)
![GitHub Repo stars](https://img.shields.io/github/stars/jxxghp/MoviePilot?style=for-the-badge)
![GitHub forks](https://img.shields.io/github/forks/jxxghp/MoviePilot?style=for-the-badge)
![GitHub contributors](https://img.shields.io/github/contributors/jxxghp/MoviePilot?style=for-the-badge)
@@ -16,48 +19,44 @@
发布频道https://t.me/moviepilot_channel
## 主要特性
- 前后端分离基于FastApi + Vue3。
- 聚焦核心需求,简化功能和设置,部分设置项可直接使用默认值。
- 重新设计了用户界面,更加美观易用。
## 安装使用
官方Wikihttps://wiki.movie-pilot.org
## 本地 CLI
一键安装运行脚本:
```shell
curl -fsSL https://raw.githubusercontent.com/jxxghp/MoviePilot/v2/scripts/bootstrap-local.sh | bash
```
使用 `moviepilot` 命令管理MoviePilot完整 CLI 文档:[`docs/cli.md`](docs/cli.md)
## 为 AI Agent 添加 Skills
```shell
npx skills add https://github.com/jxxghp/MoviePilot
```
## 参与开发
API文档https://api.movie-pilot.org
MCP工具API文档详见 [docs/mcp-api.md](docs/mcp-api.md)
本地运行需要 `Python 3.12``Node JS v20.12.1`
开发环境准备与本地源码运行说明:[`docs/development-setup.md`](docs/development-setup.md)
- 克隆主项目 [MoviePilot](https://github.com/jxxghp/MoviePilot)
```shell
git clone https://github.com/jxxghp/MoviePilot
```
- 克隆资源项目 [MoviePilot-Resources](https://github.com/jxxghp/MoviePilot-Resources) ,将 `resources` 目录下对应平台及版本的库 `.so`/`.pyd`/`.bin` 文件复制到 `app/helper` 目录
```shell
git clone https://github.com/jxxghp/MoviePilot-Resources
```
- 安装后端依赖,运行 `main.py` 启动后端服务,默认监听端口:`3001`API文档地址`http://localhost:3001/docs`
```shell
cd MoviePilot
pip install -r requirements.txt
python3 -m app.main
```
- 克隆前端项目 [MoviePilot-Frontend](https://github.com/jxxghp/MoviePilot-Frontend)
```shell
git clone https://github.com/jxxghp/MoviePilot-Frontend
```
- 安装前端依赖,运行前端项目,访问:`http://localhost:5173`
```shell
yarn
yarn dev
```
- 参考 [插件开发指引](https://wiki.movie-pilot.org/zh/plugindev) 在 `app/plugins` 目录下开发插件代码
插件开发说明:<https://wiki.movie-pilot.org/zh/plugindev>
## 相关项目

77
README_EN.md Normal file
View File

@@ -0,0 +1,77 @@
# MoviePilot
[简体中文](README.md) | English
![GitHub Repo stars](https://img.shields.io/github/stars/jxxghp/MoviePilot?style=for-the-badge)
![GitHub forks](https://img.shields.io/github/forks/jxxghp/MoviePilot?style=for-the-badge)
![GitHub contributors](https://img.shields.io/github/contributors/jxxghp/MoviePilot?style=for-the-badge)
![GitHub repo size](https://img.shields.io/github/repo-size/jxxghp/MoviePilot?style=for-the-badge)
![GitHub issues](https://img.shields.io/github/issues/jxxghp/MoviePilot?style=for-the-badge)
![Docker Pulls](https://img.shields.io/docker/pulls/jxxghp/moviepilot?style=for-the-badge)
![Docker Pulls V2](https://img.shields.io/docker/pulls/jxxghp/moviepilot-v2?style=for-the-badge)
![Platform](https://img.shields.io/badge/platform-Windows%20%7C%20Linux%20%7C%20Synology-blue?style=for-the-badge)
Redesigned from parts of [NAStool](https://github.com/NAStool/nas-tools), with a stronger focus on core automation scenarios while reducing issues and making the project easier to extend and maintain.
# For learning and personal communication only. Please do not promote this project on platforms in mainland China.
Release channel: https://t.me/moviepilot_channel
## Key Features
- Frontend/backend separation based on FastApi + Vue3.
- Focuses on core needs, simplifies features and settings, and allows some options to work well with sensible defaults.
- Reworked user interface for a cleaner and more practical experience.
## Installation
Official wiki: https://wiki.movie-pilot.org
## Local CLI
One-command bootstrap script:
```shell
curl -fsSL https://raw.githubusercontent.com/jxxghp/MoviePilot/v2/scripts/bootstrap-local.sh | bash
```
Manage MoviePilot with the `moviepilot` command. Full CLI documentation: [`docs/cli.md`](docs/cli.md)
## Add Skills for AI Agents
```shell
npx skills add https://github.com/jxxghp/MoviePilot
```
## Development
API documentation: https://api.movie-pilot.org
MCP tool API documentation: see [docs/mcp-api.md](docs/mcp-api.md)
Development environment setup and local source-run guide: [`docs/development-setup.md`](docs/development-setup.md)
Plugin development guide: <https://wiki.movie-pilot.org/zh/plugindev>
## Related Projects
- [MoviePilot-Frontend](https://github.com/jxxghp/MoviePilot-Frontend)
- [MoviePilot-Resources](https://github.com/jxxghp/MoviePilot-Resources)
- [MoviePilot-Plugins](https://github.com/jxxghp/MoviePilot-Plugins)
- [MoviePilot-Server](https://github.com/jxxghp/MoviePilot-Server)
- [MoviePilot-Wiki](https://github.com/jxxghp/MoviePilot-Wiki)
## Disclaimer
- This software is for learning and personal communication only. It must not be used for commercial purposes or illegal activities. The software does not know how users choose to use it, and all responsibility rests with the user.
- The source code is open source and derived from other open-source code. If someone removes the relevant restrictions and redistributes or publishes modified versions that lead to liability events, the publisher of those modifications bears full responsibility. Public releases that bypass or alter the user authentication mechanism are not recommended.
- This project does not accept donations and has not published any donation page anywhere. The software itself is free of charge and does not provide paid services. Please verify information carefully to avoid being misled.
## Contributors
<a href="https://github.com/jxxghp/MoviePilot/graphs/contributors">
<img src="https://contrib.rocks/image?repo=jxxghp/MoviePilot" />
</a>

File diff suppressed because it is too large Load Diff

View File

@@ -1,33 +1,574 @@
import asyncio
import threading
from typing import Any, Optional, Tuple
from langchain_core.callbacks import AsyncCallbackHandler
from fastapi.concurrency import run_in_threadpool
from app.chain import ChainBase
from app.log import logger
from app.schemas import Notification
from app.schemas.message import (
MessageResponse,
ChannelCapabilityManager,
ChannelCapability,
)
from app.schemas.types import MessageChannel
class StreamingCallbackHandler(AsyncCallbackHandler):
"""流式输出回调处理器"""
class _StreamChain(ChainBase):
pass
def __init__(self, session_id: str):
class StreamingHandler:
"""
流式Token缓冲管理器
负责从 LLM 流式 token 中积累文本,并在支持消息编辑的渠道上实时推送给用户。
工作流程:
1. Agent开始处理时调用 start_streaming(),检查渠道能力并启动定时刷新
2. LLM 产生 token 时调用 emit() 积累到缓冲区
3. 定时器周期性调用 _flush()
- 第一次有内容时发送新消息(通过 send_direct_message 获取 message_id
- 后续有新内容时编辑同一条消息(通过 edit_message
- 当消息长度接近渠道限制时,冻结当前消息并发送新消息继续输出
4. 工具调用时:
- 流式渠道:工具消息直接 emit() 追加到 buffer与 Agent 文字合并为同一条流式消息
- 非流式渠道:调用 take() 取出已积累的文字,与工具消息合并独立发送
5. Agent最终完成时调用 stop_streaming():执行最后一次刷新,
返回是否已通过流式发送完所有内容(调用方据此决定是否还需额外发送)
"""
# 流式输出的刷新间隔(秒)
FLUSH_INTERVAL = 0.3
def __init__(self):
self._lock = threading.Lock()
self.session_id = session_id
self.current_message = ""
self._buffer = ""
# 流式输出相关状态
self._streaming_enabled = False
self._flush_task: Optional[asyncio.Task] = None
# 当前消息的发送信息(用于编辑消息)
self._message_response: Optional[MessageResponse] = None
# 已发送给用户的文本(用于追踪增量)
self._sent_text = ""
# 当前消息的起始偏移量buffer 中属于当前消息的起始位置)
self._msg_start_offset = 0
# 当前渠道的单条消息最大长度0 表示不限制)
self._max_message_length = 0
# 消息发送所需的上下文信息
self._channel: Optional[str] = None
self._source: Optional[str] = None
self._user_id: Optional[str] = None
self._username: Optional[str] = None
self._title: str = ""
self._allow_dispatch_without_context = False
# 非啰嗦模式下的待输出工具统计,等下一段文本到来时再统一补一句摘要
self._pending_tool_stats: dict[str, dict[str, Any]] = {}
async def get_message(self):
"""获取当前消息内容,获取后清空"""
def set_dispatch_policy(
self, allow_dispatch_without_context: bool = False
) -> None:
"""
设置在缺少渠道上下文时是否仍允许向默认通知渠道分发消息。
后台 DISPATCH 任务允许CAPTURE_ONLY 必须禁止。
"""
self._allow_dispatch_without_context = allow_dispatch_without_context
def emit(self, token: str) -> str:
"""
接收 LLM 流式 token积累到缓冲区。
如果存在待输出的工具统计,则会先补上一句摘要再追加 token。
"""
with self._lock:
if not self.current_message:
emitted = token or ""
if self._pending_tool_stats:
summary = self._consume_pending_tool_summary_locked()
if summary:
if emitted:
emitted = f"{summary}{emitted.lstrip(chr(10))}"
else:
emitted = summary
# 如果存量消息结束是两个换行,则去掉新消息前面的换行,避免过多空行
if self._buffer.endswith("\n\n") and emitted.startswith("\n"):
emitted = emitted.lstrip("\n")
self._buffer += emitted
return emitted
async def take(self) -> str:
"""
获取当前已积累的消息内容,获取后清空缓冲区。
用于非流式渠道:工具调用前取出 Agent 已产出的文字,
与工具提示合并后独立发送。
注意:流式渠道不调用此方法,工具消息直接 emit 到 buffer 中。
"""
self.flush_pending_tool_summary()
with self._lock:
if not self._buffer:
return ""
msg = self.current_message
logger.info(f"Agent消息: {msg}")
self.current_message = ""
return msg
message = self._buffer
logger.info(f"Agent消息: {message}")
self._buffer = ""
return message
async def on_llm_new_token(self, token: str, **kwargs):
"""处理新的token"""
if not token:
return
def clear(self):
"""
清空缓冲区(不返回内容)
"""
with self._lock:
# 缓存当前消息
self.current_message += token
self._buffer = ""
self._sent_text = ""
self._message_response = None
self._msg_start_offset = 0
self._pending_tool_stats = {}
def reset(self):
"""
重置缓冲区,清空已发送的文本从头更新,但保持消息编辑能力。
与 clear 的区别:
- clear完全重置所有状态后续会开新消息
- reset只清空buffer保留消息编辑状态后续继续编辑同一条消息
"""
with self._lock:
self._buffer = ""
self._sent_text = ""
self._msg_start_offset = 0
self._pending_tool_stats = {}
async def start_streaming(
self,
channel: Optional[str] = None,
source: Optional[str] = None,
user_id: Optional[str] = None,
username: Optional[str] = None,
title: str = "",
):
"""
启动流式输出。
始终标记为流式状态(用于 buffer 收集 token
但只有渠道支持消息编辑时才启动定时刷新任务(实时推送给用户)。
:param channel: 消息渠道
:param source: 消息来源
:param user_id: 用户ID
:param username: 用户名
:param title: 消息标题
"""
self._channel = channel
self._source = source
self._user_id = user_id
self._username = username
self._title = title
self._streaming_enabled = True
self._sent_text = ""
self._message_response = None
self._msg_start_offset = 0
self._pending_tool_stats = {}
# 检查渠道是否支持消息编辑,不支持则仅收集 token 到 buffer不实时推送
if not self._can_stream():
logger.debug(f"渠道 {channel} 不支持消息编辑,仅启用 buffer 收集模式")
return
# 从渠道能力中获取单条消息最大长度
try:
channel_enum = MessageChannel(self._channel)
self._max_message_length = ChannelCapabilityManager.get_max_message_length(
channel_enum
)
except (ValueError, KeyError):
self._max_message_length = 0
# 启动异步定时刷新任务
self._flush_task = asyncio.create_task(self._flush_loop())
logger.debug("流式输出已启动")
async def stop_streaming(self) -> Tuple[bool, str]:
"""
停止流式输出。执行最后一次刷新确保所有内容都已发送。
:return: (all_sent, final_text)
all_sent: 是否已经通过流式编辑将最终完整内容发送给了用户
True 表示调用方无需再额外发送消息)
final_text: 流式发送的完整文本内容(用于调用方保存消息记录)
"""
if not self._streaming_enabled:
return False, ""
self._streaming_enabled = False
# 取消定时任务
await self._cancel_flush_task()
# 将未落地的工具统计补入缓冲区,避免流式结束时丢失这段执行信息
self.flush_pending_tool_summary()
# 执行最后一次刷新
await self._flush()
# 检查是否所有缓冲内容都已发送
with self._lock:
# 当前消息的文本 = buffer 中从 _msg_start_offset 开始的部分
current_msg_text = self._buffer[self._msg_start_offset:]
all_sent = (
self._message_response is not None
and self._sent_text
and current_msg_text == self._sent_text
)
# 保留最终文本用于返回(返回完整 buffer 内容,包含所有分段消息)
final_text = self._buffer if all_sent else ""
# 重置状态
self._sent_text = ""
self._message_response = None
self._msg_start_offset = 0
self._pending_tool_stats = {}
if all_sent:
# 所有内容已通过流式发送,清空缓冲区
self._buffer = ""
return all_sent, final_text
def record_tool_call(
self,
tool_name: str,
tool_message: Optional[str] = None,
tool_kwargs: Optional[dict[str, Any]] = None,
):
"""
记录一次工具调用,供非啰嗦模式下延迟汇总输出。
"""
category, target = self._classify_tool_call(
tool_name=tool_name,
tool_message=tool_message,
tool_kwargs=tool_kwargs or {},
)
with self._lock:
bucket = self._pending_tool_stats.setdefault(
category,
{
"count": 0,
"targets": set(),
},
)
bucket["count"] += 1
if target:
bucket["targets"].add(str(target))
def flush_pending_tool_summary(self) -> str:
"""
将待输出的工具统计摘要补入缓冲区,并返回本次新增的摘要文本。
"""
with self._lock:
summary = self._consume_pending_tool_summary_locked()
if summary:
self._buffer += summary
return summary
@staticmethod
def _classify_tool_call(
tool_name: str,
tool_message: Optional[str],
tool_kwargs: dict[str, Any],
) -> tuple[str, Optional[str]]:
tool_name = (tool_name or "").strip().lower()
tool_message = (tool_message or "").strip()
tool_message_lower = tool_message.lower()
if tool_name == "read_file":
return "file_read", tool_kwargs.get("file_path")
if tool_name in {"write_file", "edit_file"}:
return "file_write", tool_kwargs.get("file_path")
if tool_name in {"list_directory", "query_directory_settings"}:
return "directory", tool_kwargs.get("path")
if tool_name == "browse_webpage":
return (
"web_browse",
tool_kwargs.get("url")
or tool_kwargs.get("target_url")
or tool_kwargs.get("path"),
)
if tool_name == "execute_command":
return "command", tool_kwargs.get("command")
if tool_name == "ask_user_choice":
return "interaction", tool_kwargs.get("message")
if tool_name.startswith("search_") or tool_name in {"get_search_results"}:
return (
"search",
tool_kwargs.get("query")
or tool_kwargs.get("title")
or tool_kwargs.get("keyword"),
)
if tool_name.startswith("query_") or tool_name.startswith("list_") or tool_name.startswith("get_"):
return "data_query", None
if tool_name.startswith(("add_", "update_", "delete_", "modify_", "run_")):
return "action", None
if tool_name in {
"recognize_media",
"scrape_metadata",
"transfer_file",
"test_site",
"send_message",
"send_local_file",
"send_voice_message",
}:
return "action", None
if "读取文件" in tool_message or "read file" in tool_message_lower:
return "file_read", tool_kwargs.get("file_path")
if (
"写入文件" in tool_message
or "编辑文件" in tool_message
or "write file" in tool_message_lower
or "edit file" in tool_message_lower
):
return "file_write", tool_kwargs.get("file_path")
if "目录" in tool_message or "directory" in tool_message_lower:
return "directory", tool_kwargs.get("path")
if "搜索" in tool_message or "search" in tool_message_lower:
return (
"search",
tool_kwargs.get("query")
or tool_kwargs.get("title")
or tool_kwargs.get("keyword"),
)
if "网页" in tool_message or "browser" in tool_message_lower or "webpage" in tool_message_lower:
return "web_browse", tool_kwargs.get("url")
if "命令" in tool_message or "command" in tool_message_lower:
return "command", tool_kwargs.get("command")
return "tool", None
def _consume_pending_tool_summary_locked(self) -> str:
if not self._pending_tool_stats:
return ""
parts = []
for category, bucket in self._pending_tool_stats.items():
value = bucket["count"]
if category in {"file_read", "file_write", "directory", "web_browse"} and bucket["targets"]:
value = len(bucket["targets"])
part = self._format_tool_stat(category, value)
if part:
parts.append(part)
self._pending_tool_stats = {}
if not parts:
return ""
summary = f"{''.join(parts)}"
visible_buffer = self._buffer.rstrip(" \t")
last_char = visible_buffer[-1:] if visible_buffer.strip() else ""
prefix = ""
if self._buffer and last_char != "\n":
prefix = "\n\n"
return f"{prefix}{summary}\n\n"
@staticmethod
def _format_tool_stat(category: str, count: int) -> str:
if count <= 0:
return ""
if category == "search":
return f"执行了 {count} 次搜索"
if category == "file_read":
return f"读取了 {count} 个文件"
if category == "file_write":
return f"修改了 {count} 个文件"
if category == "directory":
return f"查看了 {count} 个目录"
if category == "web_browse":
return f"浏览了 {count} 个网页"
if category == "command":
return f"执行了 {count} 条命令"
if category == "data_query":
return f"查询了 {count} 次数据"
if category == "action":
return f"执行了 {count} 次操作"
if category == "interaction":
return f"发起了 {count} 次交互"
return f"调用了 {count} 次工具"
def _can_stream(self) -> bool:
"""
检查当前渠道是否支持流式输出(消息编辑)
"""
if not self._channel:
return False
try:
channel_enum = MessageChannel(self._channel)
return ChannelCapabilityManager.supports_capability(
channel_enum, ChannelCapability.MESSAGE_EDITING
)
except (ValueError, KeyError):
return False
async def _flush_loop(self):
"""
定时刷新循环,定期将缓冲区内容发送/编辑到用户
"""
try:
while self._streaming_enabled:
await asyncio.sleep(self.FLUSH_INTERVAL)
if self._streaming_enabled:
await self._flush()
except asyncio.CancelledError:
pass
except Exception as e:
logger.error(f"流式刷新异常: {e}")
async def _cancel_flush_task(self):
"""
取消当前的定时刷新任务
"""
if self._flush_task and not self._flush_task.done():
self._flush_task.cancel()
try:
await self._flush_task
except asyncio.CancelledError:
pass
self._flush_task = None
async def _flush(self):
"""
将当前缓冲区内容刷新到用户消息
- 如果还没有发送过消息先发送一条新消息并记录message_id
- 如果已经发送过消息,编辑该消息为最新的完整内容
- 如果当前消息内容超过长度限制,冻结当前消息并发送新消息继续输出
"""
with self._lock:
# 当前消息的文本 = buffer 中从 _msg_start_offset 开始的部分
current_text = self._buffer[self._msg_start_offset:]
if not current_text or current_text == self._sent_text:
# 没有新内容需要刷新
return
if (
(not self._channel or not self._source)
and not self._allow_dispatch_without_context
):
logger.debug("流式输出缺少渠道上下文,当前模式禁止外发消息")
return
chain = _StreamChain()
try:
if self._message_response is None:
# 第一次发送:发送新消息并获取 message_id
response = await run_in_threadpool(
chain.send_direct_message,
Notification(
channel=self._channel,
source=self._source,
userid=self._user_id,
username=self._username,
title=self._title,
text=current_text,
),
)
if response and response.success and response.message_id:
self._message_response = response
with self._lock:
self._sent_text = current_text
logger.debug(
f"流式输出初始消息已发送: message_id={response.message_id}"
)
else:
logger.debug(
"流式输出初始消息发送失败或未返回message_id降级为非流式输出"
)
self._streaming_enabled = False
else:
# 检查当前消息内容是否超过长度限制
if (
self._max_message_length
and len(current_text) > self._max_message_length
):
# 消息过长,冻结当前消息(保持最后一次成功编辑的内容)
# 将 offset 移动到已发送文本之后,开启新消息
logger.debug(
f"流式消息长度 {len(current_text)} 超过限制 {self._max_message_length},启用新消息"
)
with self._lock:
self._msg_start_offset += len(self._sent_text)
current_text = self._buffer[self._msg_start_offset:]
self._message_response = None
self._sent_text = ""
# 如果偏移后还有新内容,立即发送为新消息
if current_text:
response = await run_in_threadpool(
chain.send_direct_message,
Notification(
channel=self._channel,
source=self._source,
userid=self._user_id,
username=self._username,
title=self._title,
text=current_text,
),
)
if response and response.success and response.message_id:
self._message_response = response
with self._lock:
self._sent_text = current_text
logger.debug(
f"流式输出新消息已发送: message_id={response.message_id}"
)
else:
logger.debug("流式输出新消息发送失败,降级为非流式输出")
self._streaming_enabled = False
else:
# 后续更新:编辑已有消息
try:
channel_enum = MessageChannel(self._channel)
except (ValueError, KeyError):
return
success = await run_in_threadpool(
chain.edit_message,
channel=channel_enum,
source=self._message_response.source,
message_id=self._message_response.message_id,
chat_id=self._message_response.chat_id,
text=current_text,
title=self._title,
)
if success:
with self._lock:
self._sent_text = current_text
else:
logger.debug("流式输出消息编辑失败")
except Exception as e:
logger.error(f"流式输出刷新失败: {e}")
@property
def is_streaming(self) -> bool:
"""
是否正在流式输出
"""
return self._streaming_enabled
@property
def is_auto_flushing(self) -> bool:
"""
是否正在定时刷新(渠道支持消息编辑时自动推送 buffer 内容)
"""
return self._flush_task is not None
@property
def has_sent_message(self) -> bool:
"""
是否已经通过流式输出发送过消息(当前轮次)
"""
return self._message_response is not None
@property
def last_buffer_char(self) -> str:
"""
返回当前缓冲区最后一个字符;缓冲区为空时返回空字符串。
"""
with self._lock:
return self._buffer[-1:] if self._buffer else ""

View File

@@ -0,0 +1,19 @@
---
version: 3
active_persona: default
extra_context_files: []
deprecated_phrases: []
---
# CURRENT_PERSONA
当前激活人格:`default`
运行时加载顺序固定如下:
1. 核心系统提示词(程序内置,不可运行时覆盖)
2. `personas/<active_persona>/PERSONA.md`
3. `extra_context_files`
4. `memory/*.md`
5. `activity/*.md`
`memory` 中的长期偏好可以细化回复方式,但不应覆盖系统核心身份、目标和安全边界。

View File

@@ -0,0 +1,22 @@
---
version: 1
persona_id: aloof
label: 高冷
description: 冷静、克制、低温度,话少但不失礼。
aliases:
- 冷淡
- 冷感
- 冷艳
---
# PERSONA
- Tone: cool, distant, and composed.
- Keep emotional temperature low and transitions short.
- Be brief and efficient, but do not become rude or contemptuous.
- Prefer understatement over enthusiasm.
## RESPONSE_FORMAT
- Lead with the answer or the action result.
- Keep explanations minimal unless the user explicitly asks for detail.
- Avoid extra reassurance, hype, or emotional softening.

View File

@@ -0,0 +1,22 @@
---
version: 1
persona_id: anime
label: 二次元
description: 带一点 ACG 语感和戏剧化表达,但仍然以任务完成和清晰沟通为主。
aliases:
- 动漫风
- ACG
- 宅系
---
# PERSONA
- Tone: lively, stylized, and lightly dramatic, with a small amount of anime-flavored wording.
- Keep the actual task handling grounded and practical; the style should stay mostly in phrasing.
- You may occasionally use short ACG-like interjections, but do not flood the reply with memes, kaomoji, or niche jargon.
- Stay readable first. If the task is serious, reduce the stylistic flavor automatically.
## RESPONSE_FORMAT
- Prefer short paragraphs or compact lists.
- A light playful closing line is acceptable after the real result is already clear.
- Do not let the style make operational instructions vague.

View File

@@ -0,0 +1,22 @@
---
version: 1
persona_id: catgirl
label: 猫娘
description: 带一点猫系拟人风格,轻松可爱,但不过度角色扮演。
aliases:
- 猫猫
- 喵系
- 猫耳
---
# PERSONA
- Tone: playful, cat-like, and cute, with occasional feline wording.
- You may occasionally use a light "喵" style suffix or cat metaphor, but only sparingly.
- Do not turn the reply into full roleplay; task clarity remains the primary goal.
- If the content is operational, keep the answer direct first and add only a thin layer of style.
## RESPONSE_FORMAT
- Keep answers compact and readable.
- Use only a very small amount of repeated verbal tic.
- The result or action status should always appear before any playful flourish.

View File

@@ -0,0 +1,23 @@
---
version: 1
persona_id: concise
label: 极简
description: 更短、更硬朗,优先结论和动作,不主动展开背景解释。
aliases:
- 简洁
- 干脆
- 极简人格
---
# PERSONA
- Tone: terse, decisive, and highly compressed.
- Prefer the shortest complete answer that still moves the task forward.
- Default to one sentence when possible. Only use lists when they materially improve readability.
- Avoid extra context, caveats, or teaching unless the user explicitly asks for explanation.
- Keep transitions minimal and skip conversational softening.
## RESPONSE_FORMAT
- Lead with the conclusion or result.
- For option lists, keep each item very short.
- Do not repeat already-known context back to the user unless it is needed to disambiguate the action.

View File

@@ -0,0 +1,22 @@
---
version: 1
persona_id: cute
label: 可爱
description: 语气更亲和、更柔软、更讨喜,但不做重度角色扮演。
aliases:
- 软萌
- 甜系
- 亲和
---
# PERSONA
- Tone: warm, cheerful, and gently cute.
- Sound approachable and pleasant, but keep the answer concise and useful.
- Avoid baby talk, excessive repetition, or exaggerated emotive punctuation.
- If the user asks for directness, keep the cute flavor minimal.
## RESPONSE_FORMAT
- Prefer friendly short paragraphs.
- For lists, keep each item short and easy to read.
- When something fails, explain it gently but clearly.

View File

@@ -0,0 +1,24 @@
---
version: 1
persona_id: default
label: 默认
description: 专业、克制、简洁,适合大多数日常媒体管理场景。
aliases:
- 专业
- 默认人格
---
# PERSONA
- Tone: professional, concise, restrained.
- Be direct. No unnecessary preamble, no repeating the user's words, no narrating internal reasoning.
- Do not flatter the user, praise the question, or add emotional cushioning.
- Do not use emojis, exclamation marks, cute language, or excessive apology.
- Prefer short declarative sentences. Default to one or two short paragraphs; use lists only when they improve scanability.
- Use Markdown for structured data. Use `inline code` for media titles and paths.
## RESPONSE_FORMAT
- Keep confirmations short.
- For search or comparison results, prefer a brief list over a long paragraph.
- Skip filler phrases like "Let me help you", "Here are the results", or "I found...".
- When an error occurs, briefly state the blocker and the next best action.

View File

@@ -0,0 +1,22 @@
---
version: 1
persona_id: disdain
label: 不屑
description: 带一点嫌弃感和轻微毒舌,但必须保持可控和不越界。
aliases:
- 嫌弃
- 毒舌
- 鄙视链
---
# PERSONA
- Tone: dry, skeptical, and faintly dismissive.
- Mild sarcasm is acceptable, but it must stay controlled and should never turn into direct insult or humiliation.
- Prioritize sharp phrasing and low patience, while still giving the user the actual answer.
- If the task is sensitive or the user is clearly frustrated, reduce the bite automatically.
## RESPONSE_FORMAT
- Keep answers crisp and pointed.
- Use short, cutting observations only when they improve the style without harming clarity.
- Always include the concrete result, instruction, or blocker.

View File

@@ -0,0 +1,22 @@
---
version: 1
persona_id: guide
label: 说明型
description: 在复杂问题上更愿意解释原因和步骤,但仍保持克制,不会无节制展开。
aliases:
- 讲解
- 解释型
- 教学
---
# PERSONA
- Tone: clear, structured, and mildly explanatory.
- When the task is simple, stay concise. When the task is complex or the user asks why/how, provide a short explanation with visible structure.
- Keep explanations practical and tied to the current decision, not generic theory.
- Remain restrained: do not become chatty, cute, or overly warm.
## RESPONSE_FORMAT
- For non-trivial tasks, prefer short sections or a compact numbered list.
- When describing tradeoffs, keep them concrete and action-oriented.
- End with the actual outcome or next step, not a generic summary.

View File

@@ -0,0 +1,23 @@
---
version: 1
persona_id: moe
label: 萌系
description: 更轻小说感、更元气、更可爱,但仍然保持边界和专业度。
aliases:
- 萝莉风
- 轻小说风
- 元气少女
- 萌萌
---
# PERSONA
- Tone: soft, upbeat, cute, and lightly playful.
- Keep the personality in wording only; do not imitate a child, emphasize age, or use any sexualized framing.
- Use cute particles or soft wording sparingly so the answer still feels useful instead of noisy.
- When the task is urgent or technical, reduce the fluff and keep the result clear.
## RESPONSE_FORMAT
- Prefer short, bright sentences.
- A small amount of cute phrasing is acceptable, but the final answer must still be easy to scan.
- Do not bury the actual conclusion under roleplay language.

19
app/agent/llm/__init__.py Normal file
View File

@@ -0,0 +1,19 @@
"""Agent 内部使用的 LLM 适配层。"""
from app.agent.llm.helper import LLMHelper, LLMTestError, LLMTestTimeout
from app.agent.llm.provider import (
LLMProviderAuthError,
LLMProviderError,
LLMProviderManager,
render_auth_result_html,
)
__all__ = [
"LLMHelper",
"LLMProviderAuthError",
"LLMProviderError",
"LLMProviderManager",
"LLMTestError",
"LLMTestTimeout",
"render_auth_result_html",
]

839
app/agent/llm/helper.py Normal file
View File

@@ -0,0 +1,839 @@
"""LLM模型相关辅助功能"""
import asyncio
import inspect
import json
import time
from functools import wraps
from typing import Any, List
from langchain_core.messages import AIMessage
from app.core.config import settings
from app.log import logger
class LLMTestError(RuntimeError):
"""LLM 测试调用异常,附带请求耗时。"""
def __init__(self, message: str, duration_ms: int | None = None):
super().__init__(message)
self.duration_ms = duration_ms
class LLMTestTimeout(TimeoutError):
"""LLM 测试调用超时,附带请求耗时。"""
def __init__(self, message: str, duration_ms: int | None = None):
super().__init__(message)
self.duration_ms = duration_ms
def _patch_gemini_thought_signature():
"""
修复 langchain-google-genai 中 Gemini 2.5 思考模型的 thought_signature 兼容问题。
langchain-google-genai 的 _is_gemini_3_or_later() 仅检查 "gemini-3"
导致 Gemini 2.5 思考模型(如 gemini-2.5-flash、gemini-2.5-pro在工具调用时
缺少 thought_signature 而报错 400。
此补丁将检查范围扩展到 Gemini 2.5 模型。
"""
try:
import langchain_google_genai.chat_models as _cm
# 仅在未修补时执行
if getattr(_cm, "_thought_signature_patched", False):
return
def _patched_is_gemini_3_or_later(model_name: str) -> bool:
if not model_name:
return False
name = model_name.lower().replace("models/", "")
# Gemini 2.5 思考模型也需要 thought_signature 支持
return "gemini-3" in name or "gemini-2.5" in name
_cm._is_gemini_3_or_later = _patched_is_gemini_3_or_later
_cm._thought_signature_patched = True
logger.debug(
"已修补 langchain-google-genai thought_signature 兼容性(覆盖 Gemini 2.5 模型)"
)
except Exception as e:
logger.warning(f"修补 langchain-google-genai thought_signature 失败: {e}")
def _get_httpx_proxy_key() -> str:
"""
获取当前 httpx 版本支持的代理参数名。
httpx < 0.28 使用 "proxies"(复数),>= 0.28 使用 "proxy"(单数)。
google-genai SDK 会静默过滤掉不在 httpx.Client.__init__ 签名中的参数,
因此必须使用与当前 httpx 版本匹配的参数名。
"""
try:
import httpx
params = inspect.signature(httpx.Client.__init__).parameters
if "proxy" in params:
return "proxy"
return "proxies"
except Exception as e:
logger.warning(f"检测 httpx 代理参数失败,默认使用 'proxies'{e}")
return "proxies"
def _deepseek_thinking_toggle(extra_body: Any) -> bool | None:
"""
解析 DeepSeek extra_body 中显式传入的 thinking 开关。
"""
if not isinstance(extra_body, dict):
return None
thinking = extra_body.get("thinking")
if not isinstance(thinking, dict):
return None
thinking_type = str(thinking.get("type") or "").strip().lower()
if thinking_type == "enabled":
return True
if thinking_type == "disabled":
return False
return None
def _is_deepseek_thinking_enabled(model_name: str | None, extra_body: Any) -> bool:
"""
判断本次 DeepSeek 调用是否处于 thinking mode。
"""
explicit_toggle = _deepseek_thinking_toggle(extra_body)
if explicit_toggle is not None:
return explicit_toggle
normalized_model_name = str(model_name or "").strip().lower()
if normalized_model_name == "deepseek-reasoner":
return True
if normalized_model_name.startswith("deepseek-v4-"):
# DeepSeek V4 默认启用 thinking mode除非显式关闭。
return True
return False
def _patch_deepseek_reasoning_content_support():
"""
修补 langchain-deepseek 在 tool-call 场景下遗漏 reasoning_content 回传的问题。
DeepSeek thinking mode 要求:若 assistant 历史消息包含 tool_calls
后续请求中必须带回该条消息的顶层 reasoning_content。
某些 langchain-deepseek 版本虽然能从响应中拿到 reasoning_content
但不会在重放消息历史时写回请求载荷,导致 400。
"""
try:
from langchain_deepseek import ChatDeepSeek
except Exception as err:
logger.debug(f"跳过 langchain-deepseek reasoning_content 修补:{err}")
return
if getattr(ChatDeepSeek, "_moviepilot_reasoning_content_patched", False):
return
original_get_request_payload = getattr(ChatDeepSeek, "_get_request_payload", None)
if not callable(original_get_request_payload):
logger.warning("langchain-deepseek 缺少 _get_request_payload无法修补 reasoning_content")
return
@wraps(original_get_request_payload)
def _patched_get_request_payload(self, input_, *, stop=None, **kwargs):
payload = original_get_request_payload(self, input_, stop=stop, **kwargs)
# Resolve original messages so we can extract reasoning_content from
# additional_kwargs. The parent's payload builder does not propagate
# this DeepSeek-specific field.
messages = self._convert_input(input_).to_messages()
for i, message in enumerate(payload["messages"]):
if message["role"] == "tool" and isinstance(message["content"], list):
message["content"] = json.dumps(message["content"])
elif message["role"] == "assistant":
if isinstance(message["content"], list):
# DeepSeek API expects assistant content to be a string,
# not a list. Extract text blocks and join them, or use
# empty string if none exist.
text_parts = [
block.get("text", "")
for block in message["content"]
if isinstance(block, dict) and block.get("type") == "text"
]
message["content"] = "".join(text_parts) if text_parts else ""
# DeepSeek reasoning models require every assistant message to
# carry a reasoning_content field (even when empty). The value
# is stored in AIMessage.additional_kwargs by
# _create_chat_result(); re-inject it into the API payload.
if (
"reasoning_content" not in message
and i < len(messages)
and isinstance(messages[i], AIMessage)
):
message["reasoning_content"] = messages[i].additional_kwargs.get(
"reasoning_content", ""
)
return payload
ChatDeepSeek._get_request_payload = _patched_get_request_payload
ChatDeepSeek._moviepilot_reasoning_content_patched = True
logger.debug("已修补 langchain-deepseek thinking tool-call 的 reasoning_content 回传兼容性")
def _patch_openai_responses_instructions_support():
"""
修补 langchain-openai 在使用 use_responses_api=True 时,
提取 system 消息为顶层 instructions 字段。
由于 Codex 等模型 (Responses API) 强依赖 instructions 字段,
如果没有该字段会报 400 "Instructions are required"
"""
try:
from langchain_openai import ChatOpenAI
except Exception as err:
logger.debug(f"跳过 langchain-openai instructions 修补:{err}")
return
if getattr(ChatOpenAI, "_moviepilot_responses_instructions_patched", False):
return
original_get_request_payload = getattr(ChatOpenAI, "_get_request_payload", None)
if not callable(original_get_request_payload):
logger.warning("langchain-openai 缺少 _get_request_payload无法修补 instructions")
return
@wraps(original_get_request_payload)
def _patched_get_request_payload(self, input_, *, stop=None, **kwargs):
payload = original_get_request_payload(self, input_, stop=stop, **kwargs)
base_url = str(getattr(self, "openai_api_base", "") or "").lower()
# 处理 GitHub Copilot 端点兼容性
if "githubcopilot.com" in base_url:
payload.pop("stream_options", None)
payload.pop("metadata", None)
# 处理 ChatGPT 官方 Responses API (Codex) 端点兼容性
is_codex = "chatgpt.com/backend-api/codex" in base_url
if is_codex and (getattr(self, "use_responses_api", False) or "input" in payload):
instructions = payload.get("instructions", "")
inputs = payload.get("input", [])
new_inputs = []
for msg in inputs:
if isinstance(msg, dict) and msg.get("role") == "system":
content = msg.get("content")
if isinstance(content, str) and content.strip():
if instructions:
instructions += "\n\n" + content
else:
instructions = content
else:
new_inputs.append(msg)
payload["input"] = new_inputs
payload["instructions"] = instructions or "You are a helpful assistant."
payload["store"] = False
# Codex 端点不支持的部分常见补全参数,统一清理避免 400 报错
unsupported_keys = [
"presence_penalty", "frequency_penalty", "top_p", "n", "user",
"stop", "metadata", "logit_bias", "logprobs", "top_logprobs",
"stream_options", "temperature"
]
for key in unsupported_keys:
payload.pop(key, None)
return payload
ChatOpenAI._get_request_payload = _patched_get_request_payload
ChatOpenAI._moviepilot_responses_instructions_patched = True
logger.debug("已修补 langchain-openai responses API 的 instructions 兼容性")
class LLMHelper:
"""LLM模型相关辅助功能"""
_SUPPORTED_THINKING_LEVELS = frozenset(
{"off", "auto", "minimal", "low", "medium", "high", "max", "xhigh"}
)
@staticmethod
def _normalize_model_name(model_name: str | None) -> str:
"""
统一清理模型名称,便于按模型族做能力映射。
"""
return (model_name or "").strip().lower()
@classmethod
def _normalize_deepseek_reasoning_effort(
cls, thinking_level: str | None = None
) -> str | None:
"""
DeepSeek 文档当前建议使用 high/max兼容常见 effort 别名。
"""
if not thinking_level or thinking_level in {"off", "auto"}:
return None
if thinking_level in {"minimal", "low", "medium", "high"}:
return "high"
if thinking_level in {"max", "xhigh"}:
return "max"
logger.warning(f"忽略不支持的 DeepSeek reasoning_effort 配置: {thinking_level}")
return None
@classmethod
def _normalize_openai_reasoning_effort(
cls, thinking_level: str | None = None
) -> str | None:
"""
OpenAI reasoning_effort 支持更细粒度的 effort统一做最近似映射。
"""
if not thinking_level or thinking_level == "auto":
return None
if thinking_level == "off":
return "none"
if thinking_level == "max":
return "xhigh"
return thinking_level
@classmethod
def _build_google_thinking_kwargs(
cls, model_name: str, thinking_level: str
) -> dict[str, Any]:
"""
Gemini 3 使用 thinking_levelGemini 2.5 使用 thinking_budget。
"""
if not model_name or thinking_level == "auto":
return {}
if "gemini-2.5" in model_name:
if thinking_level == "off":
if "pro" in model_name:
# Gemini 2.5 Pro 官方不支持完全关闭思考,回退到最小预算。
return {
"thinking_budget": 128,
"include_thoughts": False,
}
return {
"thinking_budget": 0,
"include_thoughts": False,
}
budget_map = {
"minimal": 512,
"low": 1024,
"medium": 4096,
"high": 8192,
"max": 24576,
"xhigh": 24576,
}
budget = budget_map.get(thinking_level)
return (
{
"thinking_budget": budget,
"include_thoughts": False,
}
if budget is not None
else {}
)
if "gemini-3" in model_name:
level_map = {
"off": "minimal",
"minimal": "minimal",
"low": "low",
"medium": "medium",
"high": "high",
"max": "high",
"xhigh": "high",
}
google_level = level_map.get(thinking_level)
return (
{
"thinking_level": google_level,
"include_thoughts": False,
}
if google_level
else {}
)
return {}
@classmethod
def _build_kimi_thinking_kwargs(
cls, model_name: str, thinking_level: str
) -> dict[str, Any]:
"""
Kimi 当前公开文档仅支持思考开关,不支持显式深度调节。
"""
if model_name.startswith("kimi-k2-thinking"):
return {}
if thinking_level == "off":
return {"extra_body": {"thinking": {"type": "disabled"}}}
return {}
@classmethod
def _build_thinking_kwargs(
cls,
provider: str,
model: str | None,
thinking_level: str | None = None
) -> dict[str, Any]:
"""
按 provider/model 生成思考模式相关参数。
优先使用 LangChain/OpenAI SDK 已支持的原生字段;仅在 provider
明确要求自定义请求体时,才回退到 extra_body。
"""
provider_name = (provider or "").strip().lower()
model_name = cls._normalize_model_name(model)
if provider_name == "deepseek":
if thinking_level == "off":
return {"extra_body": {"thinking": {"type": "disabled"}}}
if thinking_level == "auto":
return {}
kwargs: dict[str, Any] = {"extra_body": {"thinking": {"type": "enabled"}}}
deepseek_effort = cls._normalize_deepseek_reasoning_effort(
thinking_level
)
if deepseek_effort:
kwargs["reasoning_effort"] = deepseek_effort
return kwargs
if model_name.startswith(("kimi-k2.5", "kimi-k2.6", "kimi-k2-thinking")):
return cls._build_kimi_thinking_kwargs(model_name, thinking_level)
if not model_name:
return {}
# OpenAI 原生推理模型优先走 LangChain 内置 reasoning_effort。
if provider_name in {"openai", "chatgpt"} and model_name.startswith(
("gpt-5", "o1", "o3", "o4")
):
openai_effort = cls._normalize_openai_reasoning_effort(
thinking_level
)
return {"reasoning_effort": openai_effort} if openai_effort else {}
# Gemini 使用 google-genai / langchain-google-genai 内置思考控制参数。
if provider_name == "google":
return cls._build_google_thinking_kwargs(
model_name, thinking_level
)
return {}
@staticmethod
def supports_image_input() -> bool:
"""
判断当前模型是否启用了图片输入能力。
"""
return bool(settings.LLM_SUPPORT_IMAGE_INPUT)
@staticmethod
def _build_legacy_runtime(
provider_name: str,
model_name: str | None,
api_key: str | None = None,
base_url: str | None = None,
) -> dict[str, Any]:
"""
在 provider 目录不可用时回退到旧的直接构造逻辑。
这主要用于单测 stub 环境以及极端的最小运行环境,正常生产路径仍优先
走 `LLMProviderManager.resolve_runtime()`。
"""
api_key_value = api_key if api_key is not None else settings.LLM_API_KEY
base_url_value = base_url if base_url is not None else settings.LLM_BASE_URL
if not api_key_value:
raise ValueError("未配置LLM API Key")
runtime_name = provider_name if provider_name in {"google", "deepseek"} else "openai_compatible"
return {
"provider_id": provider_name,
"runtime": runtime_name,
"model_id": model_name,
"api_key": api_key_value,
"base_url": base_url_value,
"default_headers": None,
"use_responses_api": None,
"model_record": None,
"model_metadata": None,
}
@classmethod
def _resolve_thinking_level(
cls,
thinking_level: str | None = None,
) -> str | None:
"""
统一兼容新旧 thinking 参数。
"""
def _normalize(value: str | None) -> str | None:
normalized = str(value or "").strip().lower()
if not normalized:
return None
alias_map = {
"none": "off",
"disabled": "off",
"disable": "off",
"enabled": "auto",
"enable": "auto",
"default": "auto",
"dynamic": "auto",
}
normalized = alias_map.get(normalized, normalized)
if normalized in cls._SUPPORTED_THINKING_LEVELS:
return normalized
logger.warning(f"忽略不支持的思考级别: {value}")
return None
normalized_thinking_level = _normalize(thinking_level)
if normalized_thinking_level:
return normalized_thinking_level
return "off"
@classmethod
async def get_llm(
cls,
streaming: bool = False,
provider: str | None = None,
model: str | None = None,
thinking_level: str | None = None,
api_key: str | None = settings.LLM_API_KEY,
base_url: str | None = settings.LLM_BASE_URL,
):
"""
获取LLM实例
:param streaming: 是否启用流式输出
:param provider: LLM提供商默认为配置项LLM_PROVIDER
:param model: 模型名称默认为配置项LLM_MODEL
:param thinking_level: 思考模式级别,默认为 None即自动判断
是否启用思考模式)。支持的级别包括 "off"(关闭)、"auto"(自动)、"minimal""low""medium""high""max"/"xhigh"(最大)。
不同模型对思考模式的支持和表现不同,具体映射关系请
参考代码实现。对于不支持思考模式的模型,该参数将被忽略。
:param api_key: API Key默认为配置项LLM_API_KEY。对于某些提供商如 DeepSeek可能需要同时提供 base_url。
:param base_url: API Base URL默认为配置项LLM_BASE_URL。
:return: LLM实例
"""
provider_name = str(provider if provider is not None else settings.LLM_PROVIDER).lower()
model_name = model if model is not None else settings.LLM_MODEL
normalized_thinking_level = cls._resolve_thinking_level(
thinking_level=thinking_level,
)
try:
# 延迟导入,避免单测在最小 stub 环境下 import `llm.py` 时被 provider
# 目录依赖链拖住。
from app.agent.llm.provider import LLMProviderManager
runtime = await LLMProviderManager().resolve_runtime(
provider_id=provider_name,
model=model_name,
api_key=api_key,
base_url=base_url,
)
except Exception as err:
logger.debug(f"LLM provider 目录不可用,回退到旧运行时逻辑: {err}")
runtime = cls._build_legacy_runtime(
provider_name=provider_name,
model_name=model_name,
api_key=api_key,
base_url=base_url,
)
model_name = runtime.get("model_id") or model_name
thinking_kwargs = cls._build_thinking_kwargs(
provider=provider_name,
model=model_name,
thinking_level=normalized_thinking_level,
)
if runtime["runtime"] == "google":
# 修补 Gemini 2.5 思考模型的 thought_signature 兼容性
_patch_gemini_thought_signature()
# 统一使用 langchain-google-genai 原生接口
# 不使用 OpenAI 兼容端点,因其不支持 Gemini 思考模型的 thought_signature
# 会导致工具调用时报错 400
from langchain_google_genai import ChatGoogleGenerativeAI
client_args = None
if settings.PROXY_HOST:
proxy_key = _get_httpx_proxy_key()
client_args = {proxy_key: settings.PROXY_HOST}
model = ChatGoogleGenerativeAI(
model=model_name,
api_key=runtime["api_key"],
retries=3,
temperature=settings.LLM_TEMPERATURE,
streaming=streaming,
client_args=client_args,
**thinking_kwargs,
)
elif runtime["runtime"] == "deepseek":
from langchain_deepseek import ChatDeepSeek
_patch_deepseek_reasoning_content_support()
model = ChatDeepSeek(
model=model_name,
api_key=runtime["api_key"],
api_base=runtime["base_url"],
max_retries=3,
temperature=settings.LLM_TEMPERATURE,
streaming=streaming,
stream_usage=True,
**thinking_kwargs,
)
elif runtime["runtime"] in {"anthropic_compatible", "copilot_anthropic"}:
from langchain_anthropic import ChatAnthropic
model = ChatAnthropic(
model=model_name,
api_key=runtime["api_key"],
base_url=runtime["base_url"],
max_retries=3,
temperature=settings.LLM_TEMPERATURE,
streaming=streaming,
stream_usage=True,
anthropic_proxy=settings.PROXY_HOST,
default_headers=runtime.get("default_headers"),
**thinking_kwargs,
)
else:
from langchain_openai import ChatOpenAI
_patch_openai_responses_instructions_support()
# ChatGPT Codex 端点强制要求 stream: True
if runtime.get("use_responses_api") and "chatgpt.com/backend-api/codex" in str(runtime.get("base_url") or ""):
streaming = True
model = ChatOpenAI(
model=model_name,
api_key=runtime["api_key"],
max_retries=3,
base_url=runtime.get("base_url"),
temperature=settings.LLM_TEMPERATURE,
streaming=streaming,
stream_usage=True,
openai_proxy=settings.PROXY_HOST,
default_headers=runtime.get("default_headers"),
use_responses_api=runtime.get("use_responses_api"),
**thinking_kwargs,
)
# 优先使用 provider / models.dev 目录中的上下文上限,减少用户手填成本。
model_profile = getattr(model, "profile", None)
if model_profile:
logger.debug(f"使用LLM模型: {model.model}Profile: {model.profile}")
else:
model_record = runtime.get("model_record") or {}
model_metadata = runtime.get("model_metadata") or {}
metadata_limit = model_metadata.get("limit") or {}
max_input_tokens = (
model_record.get("input_tokens")
or model_record.get("context_tokens")
or metadata_limit.get("input")
or metadata_limit.get("context")
or settings.LLM_MAX_CONTEXT_TOKENS * 1000
)
model.profile = {
"max_input_tokens": int(max_input_tokens),
}
return model
@staticmethod
def _extract_text_content(content) -> str:
"""
从响应内容中提取纯文本,仅保留真实文本块。
"""
if content is None:
return ""
if isinstance(content, str):
return content
if isinstance(content, list):
text_parts = []
for block in content:
if isinstance(block, str):
text_parts.append(block)
continue
if isinstance(block, dict) or hasattr(block, "get"):
block_type = block.get("type")
if block.get("thought") or block_type in (
"thinking",
"reasoning_content",
"reasoning",
"thought",
):
continue
if block_type == "text":
text_parts.append(block.get("text", ""))
continue
if not block_type and isinstance(block.get("text"), str):
text_parts.append(block.get("text", ""))
return "".join(text_parts)
if isinstance(content, dict) or hasattr(content, "get"):
if content.get("thought"):
return ""
if content.get("type") == "text":
return content.get("text", "")
if not content.get("type") and isinstance(content.get("text"), str):
return content.get("text", "")
return ""
@staticmethod
async def test_current_settings(
prompt: str = "请只回复 OK",
timeout: int = 20,
provider: str | None = None,
model: str | None = None,
thinking_level: str | None = None,
api_key: str | None = None,
base_url: str | None = None,
) -> dict:
"""
使用当前已保存配置执行一次最小 LLM 调用。
"""
provider_name = provider if provider is not None else settings.LLM_PROVIDER
model_name = model if model is not None else settings.LLM_MODEL
start = time.perf_counter()
llm = await LLMHelper.get_llm(
streaming=False,
provider=provider_name,
model=model_name,
thinking_level=thinking_level,
api_key=api_key,
base_url=base_url,
)
try:
response = await asyncio.wait_for(llm.ainvoke(prompt), timeout=timeout)
except TimeoutError as err:
duration_ms = round((time.perf_counter() - start) * 1000)
raise LLMTestTimeout("LLM 调用超时", duration_ms=duration_ms) from err
except Exception as err:
duration_ms = round((time.perf_counter() - start) * 1000)
raise LLMTestError(str(err), duration_ms=duration_ms) from err
reply_text = LLMHelper._extract_text_content(
getattr(response, "content", response)
).strip()
duration_ms = round((time.perf_counter() - start) * 1000)
data = {
"provider": provider_name,
"model": model_name,
"duration_ms": duration_ms,
}
if reply_text:
data["reply_preview"] = reply_text[:120]
return data
async def get_models(
self,
provider: str,
api_key: str | None = None,
base_url: str | None = None,
force_refresh: bool = False,
) -> List[dict[str, Any]]:
"""
获取模型列表。
返回值会带上 context/supports_reasoning 等元数据,供前端直接渲染并自动
回填上下文大小。
"""
logger.info(f"获取 {provider} 模型列表...")
try:
from app.agent.llm.provider import LLMProviderManager
return await LLMProviderManager().list_models(
provider_id=provider,
api_key=api_key,
base_url=base_url,
force_refresh=force_refresh,
)
except Exception as err:
logger.debug(f"LLM provider 目录不可用,回退旧模型列表逻辑: {err}")
if provider == "google":
return [
{"id": model_id, "name": model_id}
for model_id in await self._get_google_models(api_key or "")
]
model_list_base_url = base_url
try:
from app.agent.llm.provider import LLMProviderManager
model_list_base_url = (
LLMProviderManager().resolve_model_list_base_url(
provider_id=provider,
base_url=base_url,
)
or base_url
)
except Exception:
model_list_base_url = base_url
return [
{"id": model_id, "name": model_id}
for model_id in await self._get_openai_compatible_models(
provider,
api_key or "",
model_list_base_url,
)
]
@staticmethod
async def _get_google_models(api_key: str) -> List[str]:
"""获取Google模型列表使用 google-genai SDK v1"""
try:
from google import genai
from google.genai.types import HttpOptions
http_options = None
if settings.PROXY_HOST:
proxy_key = _get_httpx_proxy_key()
proxy_args = {proxy_key: settings.PROXY_HOST}
http_options = HttpOptions(
client_args=proxy_args,
async_client_args=proxy_args,
)
client = genai.Client(api_key=api_key, http_options=http_options)
models = await client.aio.models.list()
result = [
m.name
for m in models.page
if m.supported_actions and "generateContent" in m.supported_actions
]
await client.aio.aclose()
return result
except Exception as e:
logger.error(f"获取Google模型列表失败{e}")
raise e
@staticmethod
async def _get_openai_compatible_models(
provider: str, api_key: str, base_url: str = None
) -> List[str]:
"""获取OpenAI兼容模型列表"""
try:
from openai import AsyncOpenAI
if provider == "deepseek":
base_url = base_url or "https://api.deepseek.com"
client = AsyncOpenAI(api_key=api_key, base_url=base_url)
models = await client.models.list()
await client.close()
return [model.id for model in models.data]
except Exception as e:
logger.error(f"获取 {provider} 模型列表失败:{e}")
raise e

2048
app/agent/llm/provider.py Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,39 +1,45 @@
"""对话记忆管理器"""
import asyncio
import json
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any
from datetime import datetime
from typing import Dict, List, Optional
from langchain_core.messages import BaseMessage
from app.core.config import settings
from app.helper.redis import AsyncRedisHelper
from app.log import logger
from app.schemas.agent import ConversationMemory
class ConversationMemoryManager:
"""对话记忆管理器"""
class MemoryManager:
"""
对话记忆管理器
"""
def __init__(self):
# 内存中的会话记忆缓存
self.memory_cache: Dict[str, ConversationMemory] = {}
# 使用现有的Redis助手
self.redis_helper = AsyncRedisHelper()
# 内存缓存清理任务Redis通过TTL自动过期
# 内存缓存清理任务
self.cleanup_task: Optional[asyncio.Task] = None
async def initialize(self):
"""初始化记忆管理器"""
def initialize(self):
"""
初始化记忆管理器
"""
try:
# 启动内存缓存清理任务Redis通过TTL自动过期
self.cleanup_task = asyncio.create_task(self._cleanup_expired_memories())
self.cleanup_task = asyncio.create_task(
self._cleanup_expired_memories()
)
logger.info("对话记忆管理器初始化完成")
except Exception as e:
logger.warning(f"Redis连接失败将使用内存存储: {e}")
async def close(self):
"""关闭记忆管理器"""
"""
关闭记忆管理器
"""
if self.cleanup_task:
self.cleanup_task.cancel()
try:
@@ -41,222 +47,77 @@ class ConversationMemoryManager:
except asyncio.CancelledError:
pass
await self.redis_helper.close()
logger.info("对话记忆管理器已关闭")
@staticmethod
def get_memory_key(session_id: str, user_id: str):
"""计算内存Key"""
def _get_memory_key(session_id: str, user_id: str):
"""
计算内存Key
"""
return f"{user_id}:{session_id}" if user_id else session_id
@staticmethod
def get_redis_key(session_id: str, user_id: str):
"""计算Redis Key"""
return f"agent_memory:{user_id}:{session_id}" if user_id else f"agent_memory:{session_id}"
async def get_memory(self, session_id: str, user_id: str) -> ConversationMemory:
"""获取会话记忆"""
# 首先检查缓存
cache_key = self.get_memory_key(session_id, user_id)
if cache_key in self.memory_cache:
return self.memory_cache[cache_key]
# 尝试从Redis加载
if settings.CACHE_BACKEND_TYPE == "redis":
try:
redis_key = self.get_redis_key(session_id, user_id)
memory_data = await self.redis_helper.get(redis_key, region="AI_AGENT")
if memory_data:
memory_dict = json.loads(memory_data) if isinstance(memory_data, str) else memory_data
memory = ConversationMemory(**memory_dict)
self.memory_cache[cache_key] = memory
return memory
except Exception as e:
logger.warning(f"从Redis加载记忆失败: {e}")
# 创建新的记忆
memory = ConversationMemory(session_id=session_id, user_id=user_id)
self.memory_cache[cache_key] = memory
await self._save_memory(memory)
return memory
async def set_title(self, session_id: str, user_id: str, title: str):
"""设置会话标题"""
memory = await self.get_memory(session_id=session_id, user_id=user_id)
memory.title = title
memory.updated_at = datetime.now()
await self._save_memory(memory)
async def get_title(self, session_id: str, user_id: str) -> Optional[str]:
"""获取会话标题"""
memory = await self.get_memory(session_id=session_id, user_id=user_id)
return memory.title
async def list_sessions(self, user_id: str, limit: int = 100) -> List[Dict[str, Any]]:
"""列出历史会话摘要(按更新时间倒序)
- 当启用Redis时遍历 `agent_memory:*` 键并读取摘要
- 当未启用Redis时基于内存缓存返回
def get_memory(self, session_id: str, user_id: str) -> Optional[ConversationMemory]:
"""
sessions: List[ConversationMemory] = []
# 从Redis遍历
if settings.CACHE_BACKEND_TYPE == "redis":
try:
# 使用Redis助手的items方法遍历所有键
async for key, value in self.redis_helper.items(region="AI_AGENT"):
if key.startswith("agent_memory:"):
try:
# 解析键名获取user_id和session_id
key_parts = key.split(":")
if len(key_parts) >= 3:
key_user_id = key_parts[2] if len(key_parts) > 3 else None
if not user_id or key_user_id == user_id:
data = value if isinstance(value, dict) else json.loads(value)
memory = ConversationMemory(**data)
sessions.append(memory)
except Exception as err:
logger.warning(f"解析Redis记忆数据失败: {err}")
continue
except Exception as e:
logger.warning(f"遍历Redis会话失败: {e}")
获取内存中的记忆
"""
cache_key = self._get_memory_key(session_id, user_id)
return self.memory_cache.get(cache_key)
# 合并内存缓存(确保包含近期的会话)
for cache_key, memory in self.memory_cache.items():
# 如果指定了user_id只返回该用户的会话
if not user_id or memory.user_id == user_id:
sessions.append(memory)
# 去重(以 session_id 为键取最近updated
uniq: Dict[str, ConversationMemory] = {}
for mem in sessions:
existed = uniq.get(mem.session_id)
if (not existed) or (mem.updated_at > existed.updated_at):
uniq[mem.session_id] = mem
# 排序并裁剪
sorted_list = sorted(uniq.values(), key=lambda m: m.updated_at, reverse=True)[:limit]
return [
{
"session_id": m.session_id,
"title": m.title or "新会话",
"message_count": len(m.messages),
"created_at": m.created_at.isoformat(),
"updated_at": m.updated_at.isoformat(),
}
for m in sorted_list
]
async def add_memory(
self,
session_id: str,
user_id: str,
role: str,
content: str,
metadata: Optional[Dict[str, Any]] = None
):
"""添加消息到记忆"""
memory = await self.get_memory(session_id=session_id, user_id=user_id)
message = {
"role": role,
"content": content,
"timestamp": datetime.now().isoformat(),
"metadata": metadata or {}
}
memory.messages.append(message)
memory.updated_at = datetime.now()
# 限制消息数量,避免记忆过大
max_messages = settings.LLM_MAX_MEMORY_MESSAGES
if len(memory.messages) > max_messages:
# 保留最近的消息,但保留第一条系统消息
system_messages = [msg for msg in memory.messages if msg["role"] == "system"]
recent_messages = memory.messages[-(max_messages - len(system_messages)):]
memory.messages = system_messages + recent_messages
await self._save_memory(memory)
logger.debug(f"消息已添加到记忆: session_id={session_id}, user_id={user_id}, role={role}")
def get_recent_messages_for_agent(
self,
session_id: str,
user_id: str
) -> List[Dict[str, Any]]:
"""为Agent获取最近的消息仅内存缓存
def get_agent_messages(
self, session_id: str, user_id: str
) -> List[BaseMessage]:
"""
为Agent获取最近的消息仅内存缓存
如果消息Token数量超过模型最大上下文长度的阀值会自动进行摘要裁剪
"""
cache_key = self.get_memory_key(session_id, user_id)
memory = self.memory_cache.get(cache_key)
memory = self.get_memory(session_id, user_id)
if not memory:
return []
# 获取所有消息
return memory.messages
async def get_recent_messages(
self,
session_id: str,
user_id: str,
limit: int = 10,
role_filter: Optional[list] = None
) -> List[Dict[str, Any]]:
"""获取最近的消息"""
memory = await self.get_memory(session_id=session_id, user_id=user_id)
def save_agent_messages(
self, session_id: str, user_id: str, messages: List[BaseMessage]
):
"""
保存Agent消息仅内存缓存
messages = memory.messages
if role_filter:
messages = [msg for msg in messages if msg["role"] in role_filter]
注意Redis中的记忆通过TTL机制自动过期这里只更新内存缓存Redis会在下次访问时自动过期
"""
memory = self.get_memory(session_id, user_id)
if not memory:
memory = ConversationMemory(session_id=session_id, user_id=user_id)
return messages[-limit:] if messages else []
memory.messages = messages
memory.updated_at = datetime.now()
async def get_context(self, session_id: str, user_id: str) -> Dict[str, Any]:
"""获取会话上下文"""
memory = await self.get_memory(session_id=session_id, user_id=user_id)
return memory.context
# 更新内存缓存
self.save_memory(memory)
async def clear_memory(self, session_id: str, user_id: str):
"""清空会话记忆"""
cache_key = f"{user_id}:{session_id}" if user_id else session_id
def save_memory(self, memory: ConversationMemory):
"""
保存记忆到内存缓存
注意Redis中的记忆通过TTL机制自动过期这里只更新内存缓存Redis会在下次访问时自动过期
"""
cache_key = self._get_memory_key(memory.session_id, memory.user_id)
self.memory_cache[cache_key] = memory
def clear_memory(self, session_id: str, user_id: str):
"""
清空会话记忆
"""
cache_key = self._get_memory_key(session_id, user_id)
if cache_key in self.memory_cache:
del self.memory_cache[cache_key]
if settings.CACHE_BACKEND_TYPE == "redis":
redis_key = self.get_redis_key(session_id, user_id)
await self.redis_helper.delete(redis_key, region="AI_AGENT")
logger.info(f"会话记忆已清空: session_id={session_id}, user_id={user_id}")
async def _save_memory(self, memory: ConversationMemory):
"""保存记忆到存储
Redis中的记忆会自动通过TTL机制过期无需手动清理
"""
# 更新内存缓存
cache_key = self.get_memory_key(memory.session_id, memory.user_id)
self.memory_cache[cache_key] = memory
# 保存到Redis设置TTL自动过期
if settings.CACHE_BACKEND_TYPE == "redis":
try:
memory_dict = memory.model_dump()
redis_key = self.get_redis_key(memory.session_id, memory.user_id)
ttl = int(timedelta(days=settings.LLM_REDIS_MEMORY_RETENTION_DAYS).total_seconds())
await self.redis_helper.set(
redis_key,
memory_dict,
ttl=ttl,
region="AI_AGENT"
)
except Exception as e:
logger.warning(f"保存记忆到Redis失败: {e}")
async def _cleanup_expired_memories(self):
"""清理内存中过期记忆的后台任务
"""
清理内存中过期记忆的后台任务
注意Redis中的记忆通过TTL机制自动过期这里只清理内存缓存
"""
@@ -271,7 +132,9 @@ class ConversationMemoryManager:
# 只检查内存缓存中的过期记忆
# Redis中的记忆会通过TTL自动过期无需手动处理
for cache_key, memory in self.memory_cache.items():
if (current_time - memory.updated_at).days > settings.LLM_MEMORY_RETENTION_DAYS:
if (
current_time - memory.updated_at
).days > settings.LLM_MEMORY_RETENTION_DAYS:
expired_sessions.append(cache_key)
# 只清理内存缓存不删除Redis中的键Redis会自动过期
@@ -286,3 +149,6 @@ class ConversationMemoryManager:
break
except Exception as e:
logger.error(f"清理记忆时发生错误: {e}")
memory_manager = MemoryManager()

View File

View File

@@ -0,0 +1,406 @@
"""
活动日志中间件 - 自动记录 Agent 每次交互的操作摘要。
按日期存储在 CONFIG_PATH/agent/activity/YYYY-MM-DD.md 中,
每次 Agent 执行完毕后自动调用 LLM 对本轮对话生成简洁的活动摘要,
并在每次 Agent 启动时加载近几天的活动日志注入系统提示词。
"""
import re
from collections.abc import Awaitable, Callable
from datetime import datetime, timedelta
from typing import Annotated, Any, NotRequired, TypedDict
from anyio import Path as AsyncPath
from langchain.agents.middleware.types import (
AgentMiddleware,
AgentState,
ContextT,
ModelRequest,
ModelResponse,
PrivateStateAttr, # noqa
ResponseT,
)
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
from langgraph.runtime import Runtime
from app.agent.middleware.utils import append_to_system_message
from app.log import logger
# 活动日志保留天数
DEFAULT_RETENTION_DAYS = 7
# 注入系统提示词时加载的天数
PROMPT_LOAD_DAYS = 3
# 每日日志文件最大大小 (256KB)
MAX_LOG_FILE_SIZE = 256 * 1024
# 提取本轮对话上下文的最大字符数(避免过长的对话消耗太多 token
MAX_CONTEXT_FOR_SUMMARY = 4000
# LLM 总结的提示词
SUMMARY_PROMPT = """请根据以下 AI 助手与用户的对话记录生成一条简洁的活动摘要中文一句话不超过80字
摘要应包含:用户的需求是什么、助手做了什么、结果如何。
只输出摘要内容,不要加任何前缀、标点序号或解释。
对话记录:
{conversation}"""
class ActivityLogState(AgentState):
"""ActivityLogMiddleware 的状态模型。"""
activity_log_contents: NotRequired[Annotated[dict[str, str], PrivateStateAttr]]
"""将日期字符串映射到日志内容的字典。标记为私有,不包含在最终代理状态中。"""
class ActivityLogStateUpdate(TypedDict):
"""ActivityLogMiddleware 的状态更新。"""
activity_log_contents: dict[str, str]
def _extract_last_round(messages: list) -> list | None:
"""从完整消息列表中提取最后一轮交互。
从最后一条 HumanMessage 到消息末尾即为本轮交互。
参数:
messages: Agent 执行后的完整消息列表。
返回:
本轮交互的消息子列表,如果无有效交互则返回 None。
"""
if not messages:
return None
# 找到最后一条用户消息的索引
last_human_idx = None
for i in range(len(messages) - 1, -1, -1):
if isinstance(messages[i], HumanMessage) and messages[i].content:
last_human_idx = i
break
if last_human_idx is None:
return None
round_messages = messages[last_human_idx:]
# 检查是否为系统心跳消息
user_msg = round_messages[0]
user_content = (
user_msg.content if isinstance(user_msg.content, str) else str(user_msg.content)
)
if user_content.strip().startswith("[System Heartbeat]"):
return None
return round_messages
def _format_conversation_for_summary(round_messages: list) -> str:
"""将本轮对话消息格式化为文本,供 LLM 总结。
参数:
round_messages: 本轮交互的消息列表。
返回:
格式化后的对话文本。
"""
lines = []
total_len = 0
for msg in round_messages:
if isinstance(msg, HumanMessage):
content = msg.content if isinstance(msg.content, str) else str(msg.content)
line = f"用户: {content}"
elif isinstance(msg, AIMessage):
if hasattr(msg, "tool_calls") and msg.tool_calls:
tool_names = [
tc["name"]
for tc in msg.tool_calls
if isinstance(tc, dict) and "name" in tc
]
line = f"助手调用工具: {', '.join(tool_names)}"
elif msg.content:
content = (
msg.content if isinstance(msg.content, str) else str(msg.content)
)
line = f"助手: {content}"
else:
continue
elif isinstance(msg, ToolMessage):
content = msg.content if isinstance(msg.content, str) else str(msg.content)
# 工具返回可能很长,截断
if len(content) > 200:
content = content[:200] + "..."
line = f"工具返回: {content}"
else:
continue
# 控制总长度
if total_len + len(line) > MAX_CONTEXT_FOR_SUMMARY:
lines.append("...(后续对话省略)")
break
lines.append(line)
total_len += len(line)
return "\n".join(lines)
async def _summarize_with_llm(conversation_text: str) -> str | None:
"""调用 LLM 对对话文本生成活动摘要。
参数:
conversation_text: 格式化后的对话文本。
返回:
LLM 生成的摘要字符串,失败时返回 None。
"""
try:
from app.agent.llm import LLMHelper
llm = await LLMHelper.get_llm(streaming=False)
prompt = SUMMARY_PROMPT.format(conversation=conversation_text)
response = await llm.ainvoke(prompt)
summary = response.content.strip()
# 清理模型可能输出的前缀(如 "摘要:" "总结:"
summary = re.sub(r"^(摘要|总结|活动记录)[:]\s*", "", summary)
return summary if summary else None
except Exception as e:
logger.debug("LLM summarization failed: %s", e)
return None
ACTIVITY_LOG_SYSTEM_PROMPT = """<activity_log>
{activity_log}
</activity_log>
<activity_log_guidelines>
The above <activity_log> contains a record of your recent interactions with the user, automatically maintained by the system.
**How to use this information:**
- Reference past activities when relevant to provide continuity (e.g., "之前帮你订阅了《XXX》现在有更新了")
- Use activity history to understand ongoing tasks and user patterns
- When the user asks "你之前帮我做了什么" or similar questions, refer to this log
- Activity logs are automatically recorded after each interaction - you do NOT need to manually update them
**What is automatically logged:**
- Each user interaction: what was asked, which tools were used, and the outcome
- Timestamps for all activities
- The log is organized by date for easy reference
**Important:**
- Activity logs are READ-ONLY from your perspective - the system manages them automatically
- Do not attempt to edit or write to activity log files
- For long-term preferences and knowledge, continue to use MEMORY.md
- Activity logs are retained for {retention_days} days and then automatically cleaned up
</activity_log_guidelines>
"""
class ActivityLogMiddleware(AgentMiddleware[ActivityLogState, ContextT, ResponseT]): # noqa
"""自动记录和加载 Agent 活动日志的中间件。
- abefore_agent: 加载近几天的活动日志
- awrap_model_call: 将活动日志注入系统提示词
- aafter_agent: 从本次对话中提取摘要并追加到当日日志文件
参数:
activity_dir: 活动日志存储目录路径。
retention_days: 日志保留天数(默认 7 天)。
prompt_load_days: 注入系统提示词时加载的天数(默认 3 天)。
"""
state_schema = ActivityLogState
def __init__(
self,
*,
activity_dir: str,
retention_days: int = DEFAULT_RETENTION_DAYS,
prompt_load_days: int = PROMPT_LOAD_DAYS,
) -> None:
self.activity_dir = activity_dir
self.retention_days = retention_days
self.prompt_load_days = prompt_load_days
def _get_log_path(self, date_str: str) -> AsyncPath:
"""获取指定日期的日志文件路径。"""
return AsyncPath(self.activity_dir) / f"{date_str}.md"
def _format_activity_log(self, contents: dict[str, str]) -> str:
"""格式化活动日志用于系统提示词注入。"""
if not contents:
return ACTIVITY_LOG_SYSTEM_PROMPT.format(
activity_log="(暂无活动记录)",
retention_days=self.retention_days,
)
# 按日期排序(最近的在前)
sorted_dates = sorted(contents.keys(), reverse=True)
sections = []
for date_str in sorted_dates:
content = contents[date_str].strip()
if content:
sections.append(f"### {date_str}\n{content}")
if not sections:
return ACTIVITY_LOG_SYSTEM_PROMPT.format(
activity_log="(暂无活动记录)",
retention_days=self.retention_days,
)
log_body = "\n\n".join(sections)
return ACTIVITY_LOG_SYSTEM_PROMPT.format(
activity_log=log_body,
retention_days=self.retention_days,
)
async def _load_recent_logs(self) -> dict[str, str]:
"""加载近几天的活动日志。"""
contents: dict[str, str] = {}
today = datetime.now().date()
for i in range(self.prompt_load_days):
date = today - timedelta(days=i)
date_str = date.strftime("%Y-%m-%d")
log_path = self._get_log_path(date_str)
if await log_path.exists():
try:
content = await log_path.read_text(encoding="utf-8")
contents[date_str] = content
logger.debug("Loaded activity log for %s", date_str)
except Exception as e:
logger.warning("Failed to load activity log %s: %s", date_str, e)
return contents
async def _append_activity(self, summary: str) -> None:
"""将一条活动记录追加到当日日志文件。"""
today_str = datetime.now().strftime("%Y-%m-%d")
now_str = datetime.now().strftime("%H:%M")
log_path = self._get_log_path(today_str)
# 确保目录存在
dir_path = AsyncPath(self.activity_dir)
if not await dir_path.exists():
await dir_path.mkdir(parents=True, exist_ok=True)
# 检查文件大小
if await log_path.exists():
stat = await log_path.stat()
if stat.st_size >= MAX_LOG_FILE_SIZE:
logger.warning(
"Activity log %s exceeds size limit (%d bytes), skipping append",
today_str,
stat.st_size,
)
return
# 追加记录
entry = f"- **{now_str}** {summary}\n"
try:
if await log_path.exists():
existing = await log_path.read_text(encoding="utf-8")
await log_path.write_text(existing + entry, encoding="utf-8")
else:
header = f"# {today_str} 活动日志\n\n"
await log_path.write_text(header + entry, encoding="utf-8")
logger.debug("Activity logged: %s", summary[:80])
except Exception as e:
logger.warning("Failed to append activity log: %s", e)
async def _cleanup_old_logs(self) -> None:
"""清理超过保留天数的旧日志文件。"""
dir_path = AsyncPath(self.activity_dir)
if not await dir_path.exists():
return
cutoff_date = datetime.now().date() - timedelta(days=self.retention_days)
date_pattern = re.compile(r"^(\d{4}-\d{2}-\d{2})\.md$")
try:
async for path in dir_path.iterdir():
if not await path.is_file():
continue
match = date_pattern.match(path.name)
if not match:
continue
try:
file_date = datetime.strptime(match.group(1), "%Y-%m-%d").date()
if file_date < cutoff_date:
await path.unlink()
logger.debug("Cleaned up old activity log: %s", path.name)
except ValueError:
continue
except Exception as e:
logger.warning("Failed to cleanup old activity logs: %s", e)
async def abefore_agent(
self, state: ActivityLogState, runtime: Runtime
) -> ActivityLogStateUpdate | None:
"""在 Agent 执行前加载近期活动日志。"""
# 如果已经加载则跳过
if "activity_log_contents" in state:
return None
contents = await self._load_recent_logs()
# 趁机清理旧日志(低频操作,不影响性能)
await self._cleanup_old_logs()
return ActivityLogStateUpdate(activity_log_contents=contents)
def modify_request(self, request: ModelRequest[ContextT]) -> ModelRequest[ContextT]:
"""将活动日志注入系统消息。"""
contents = request.state.get("activity_log_contents", {}) # noqa
activity_log_prompt = self._format_activity_log(contents)
new_system_message = append_to_system_message(
request.system_message, activity_log_prompt
)
return request.override(system_message=new_system_message)
async def awrap_model_call(
self,
request: ModelRequest[ContextT],
handler: Callable[
[ModelRequest[ContextT]], Awaitable[ModelResponse[ResponseT]]
],
) -> ModelResponse[ResponseT]:
"""异步包装模型调用,注入活动日志到系统提示词。"""
modified_request = self.modify_request(request)
return await handler(modified_request)
async def aafter_agent(
self, state: ActivityLogState, runtime: Runtime
) -> dict[str, Any] | None:
"""Agent 执行完毕后,调用 LLM 对本轮对话生成摘要并追加到当日活动日志。"""
try:
messages = state.get("messages", [])
if not messages:
return None
# 提取本轮交互
round_messages = _extract_last_round(messages)
if not round_messages:
return None
# 格式化对话文本
conversation_text = _format_conversation_for_summary(round_messages)
if not conversation_text:
return None
# 调用 LLM 生成摘要
summary = await _summarize_with_llm(conversation_text)
if summary:
await self._append_activity(summary)
except Exception as e:
logger.warning("Failed to record activity: %s", e)
return None
__all__ = ["ActivityLogMiddleware"]

View File

@@ -0,0 +1,350 @@
import re
from collections.abc import Awaitable, Callable
from typing import Annotated, NotRequired, TypedDict
import yaml # noqa
from anyio import Path as AsyncPath
from langchain.agents.middleware.types import (
AgentMiddleware,
AgentState,
ContextT,
ModelRequest,
ModelResponse,
PrivateStateAttr, # noqa
ResponseT,
)
from langchain_core.runnables import RunnableConfig
from langgraph.runtime import Runtime
from app.agent.middleware.utils import append_to_system_message
from app.log import logger
# JOB.md 文件最大限制为 1MB
MAX_JOB_FILE_SIZE = 1 * 1024 * 1024
class JobMetadata(TypedDict):
"""Job 元数据。"""
path: str
"""JOB.md 文件路径。"""
id: str
"""Job 标识符(目录名)。"""
name: str
"""Job 名称。"""
description: str
"""Job 描述。"""
schedule: str
"""调度类型: once一次性/ recurring重复性"""
status: str
"""当前状态: pending / in_progress / completed / cancelled。"""
last_run: str | None
"""上次执行时间。"""
class JobsState(AgentState):
"""jobs 中间件状态。"""
jobs_metadata: NotRequired[Annotated[list[JobMetadata], PrivateStateAttr]]
"""已加载的 job 元数据列表,不传播给父 agent。"""
class JobsStateUpdate(TypedDict):
"""jobs 中间件状态更新项。"""
jobs_metadata: list[JobMetadata]
"""待合并的 job 元数据列表。"""
def _parse_job_metadata(
content: str,
job_path: str,
job_id: str,
) -> JobMetadata | None:
"""从 JOB.md 内容中解析 YAML 前言并验证元数据。"""
if len(content) > MAX_JOB_FILE_SIZE:
logger.warning(
"Skipping %s: content too large (%d bytes)", job_path, len(content)
)
return None
# 匹配 --- 分隔的 YAML 前言
frontmatter_pattern = r"^---\s*\n(.*?)\n---\s*\n"
match = re.match(frontmatter_pattern, content, re.DOTALL)
if not match:
logger.warning("Skipping %s: no valid YAML frontmatter found", job_path)
return None
frontmatter_str = match.group(1)
# 解析 YAML
try:
frontmatter_data = yaml.safe_load(frontmatter_str)
except yaml.YAMLError as e:
logger.warning("Invalid YAML in %s: %s", job_path, e)
return None
if not isinstance(frontmatter_data, dict):
logger.warning("Skipping %s: frontmatter is not a mapping", job_path)
return None
# Job 名称和描述
name = str(frontmatter_data.get("name", "")).strip()
description = str(frontmatter_data.get("description", "")).strip()
if not name:
logger.warning("Skipping %s: missing required 'name'", job_path)
return None
# 调度类型
schedule = str(frontmatter_data.get("schedule", "once")).strip().lower()
if schedule not in ("once", "recurring"):
schedule = "once"
# 状态
status = str(frontmatter_data.get("status", "pending")).strip().lower()
if status not in ("pending", "in_progress", "completed", "cancelled"):
status = "pending"
# 上次执行时间
last_run = str(frontmatter_data.get("last_run", "")).strip() or None
return JobMetadata(
id=job_id,
name=name,
description=description,
path=job_path,
schedule=schedule,
status=status,
last_run=last_run,
)
async def _alist_jobs(source_path: AsyncPath) -> list[JobMetadata]:
"""异步列出指定路径下的所有任务。
扫描包含 JOB.md 的目录并解析其元数据。
"""
jobs: list[JobMetadata] = []
if not await source_path.exists():
return []
# 查找所有任务目录(包含 JOB.md 的目录)
job_dirs: list[AsyncPath] = []
async for path in source_path.iterdir():
if await path.is_dir() and await (path / "JOB.md").is_file():
job_dirs.append(path)
if not job_dirs:
return []
# 解析 JOB.md
for job_path in job_dirs:
job_md_path = job_path / "JOB.md"
job_content = await job_md_path.read_text(encoding="utf-8")
# 解析元数据
job_metadata = _parse_job_metadata(
content=job_content,
job_path=str(job_md_path),
job_id=job_path.name,
)
if job_metadata:
jobs.append(job_metadata)
return jobs
JOBS_SYSTEM_PROMPT = """
<jobs_system>
You have a **scheduled jobs** system that allows you to track and execute long-running or recurring tasks.
**Jobs Location:** `{jobs_location}`
**Current Jobs:**
{jobs_list}
**Job File Format:**
Each job is a directory containing a `JOB.md` file with YAML frontmatter followed by task details:
```markdown
---
name: 任务名称(简短中文描述)
description: 任务的详细描述,说明要做什么
schedule: once 或 recurring
status: pending / in_progress / completed / cancelled
last_run: "YYYY-MM-DD HH:MM"(上次执行时间,可选)
---
# 任务详情
## 目标
详细描述这个任务要完成的目标。
## 执行日志
记录每次执行的情况和结果。
- **2024-01-15 10:00** - 执行了XXX操作结果成功/失败
- **2024-01-16 10:00** - 继续执行XXX...
```
**Job Lifecycle Rules:**
1. **Creating a Job**: When a user asks you to do something periodically or at a later time:
- Create a new directory under the jobs location, directory name is the `job-id` (lowercase, hyphens, 1-64 chars)
- Write a `JOB.md` file with proper frontmatter and detailed task description
- Set `schedule: once` for one-time tasks, `schedule: recurring` for repeating tasks (e.g., daily sign-in, weekly checks)
- Set initial `status: pending`
2. **Executing a Job**: When you work on a job:
- Update `status: in_progress` in the frontmatter
- Execute the required actions using your tools
- Log the execution result in the "执行日志" section with timestamp
- Update `last_run` in frontmatter to current time
3. **Completing a Job**:
- For `schedule: once` tasks: set `status: completed` after successful execution
- For `schedule: recurring` tasks: keep `status: pending` after execution, only update `last_run` time. The job stays active for the next scheduled run.
- Set `status: cancelled` if the user explicitly asks to cancel/stop a task
4. **Heartbeat Check**: You will be periodically woken up to check pending jobs. When woken up:
- Read the jobs directory to find all active jobs (status: pending or in_progress)
- Skip jobs with `status: completed` or `status: cancelled`
- For `schedule: recurring` jobs, check `last_run` to determine if it's time to run again
- Execute pending jobs and update their status/logs accordingly
**Important Notes:**
- Each job MUST have its own separate directory and JOB.md file to avoid conflicts
- Always update the frontmatter fields (status, last_run) when executing a job
- Keep execution logs concise but informative
- For recurring jobs, maintain a rolling log (keep recent entries, you can summarize/remove old entries to keep the file manageable)
- When creating jobs, make the description detailed enough that you can understand and execute the task in future sessions without additional context
**When to Create Jobs:**
- User says "每天帮我..." / "定期..." / "定时..." / "提醒我..." / "以后每次..."
- User requests a task that should be done repeatedly
- User asks for monitoring or periodic checking of something
**When NOT to Create Jobs:**
- User asks for an immediate one-time action (just do it now)
- Simple questions or conversations
- Tasks that are already handled by MoviePilot's built-in scheduler services
</jobs_system>
"""
class JobsMiddleware(AgentMiddleware[JobsState, ContextT, ResponseT]): # noqa
"""加载并向系统提示词注入 Agent Jobs 的中间件。
扫描 jobs 目录下的 JOB.md 文件,解析元数据并注入到系统提示词中,
使智能体了解当前的长期任务及其状态。
"""
state_schema = JobsState
def __init__(self, *, sources: list[str]) -> None:
"""初始化 Jobs 中间件。"""
self.sources = sources
self.system_prompt_template = JOBS_SYSTEM_PROMPT
@staticmethod
def _format_jobs_list(jobs: list[JobMetadata]) -> str:
"""格式化任务元数据列表用于系统提示词。"""
if not jobs:
return "(No active jobs. You can create jobs when users request periodic or scheduled tasks.)"
lines = []
for job in jobs:
status_emoji = {
"pending": "",
"in_progress": "🔄",
"completed": "",
"cancelled": "",
}.get(job["status"], "")
schedule_label = (
"recurring (重复)"
if job["schedule"] == "recurring"
else "once (一次性)"
)
desc_line = (
f"- {status_emoji} **{job['id']}**: {job['name']}"
f" [{schedule_label}] - {job['description']}"
)
if job.get("last_run"):
desc_line += f" (上次执行: {job['last_run']})"
lines.append(desc_line)
lines.append(f" -> Read `{job['path']}` for full details")
return "\n".join(lines)
def modify_request(self, request: ModelRequest[ContextT]) -> ModelRequest[ContextT]:
"""将任务文档注入模型请求的系统消息中。"""
jobs_metadata = request.state.get("jobs_metadata", []) # noqa
# 过滤只展示活跃任务pending / in_progress / recurring
active_jobs = [
j
for j in jobs_metadata
if j["status"] in ("pending", "in_progress")
or (j["schedule"] == "recurring" and j["status"] not in ("cancelled",))
]
jobs_list = self._format_jobs_list(active_jobs)
jobs_location = self.sources[0] if self.sources else ""
jobs_section = self.system_prompt_template.format(
jobs_location=jobs_location,
jobs_list=jobs_list,
)
new_system_message = append_to_system_message(
request.system_message, jobs_section
)
return request.override(system_message=new_system_message)
async def abefore_agent( # noqa
self, state: JobsState, runtime: Runtime, config: RunnableConfig
) -> JobsStateUpdate | None:
"""在 Agent 执行前异步加载任务元数据。
每个会话仅加载一次。若 state 中已有则跳过。
"""
# 如果 state 中已存在元数据则跳过
if "jobs_metadata" in state:
return None
all_jobs: list[JobMetadata] = []
# 遍历源加载任务
for source_path_str in self.sources:
source_path = AsyncPath(source_path_str)
if not await source_path.exists():
await source_path.mkdir(parents=True, exist_ok=True)
continue
source_jobs = await _alist_jobs(source_path)
all_jobs.extend(source_jobs)
return JobsStateUpdate(jobs_metadata=all_jobs)
async def awrap_model_call(
self,
request: ModelRequest[ContextT],
handler: Callable[
[ModelRequest[ContextT]], Awaitable[ModelResponse[ResponseT]]
],
) -> ModelResponse[ResponseT]:
"""在模型调用时注入任务文档。"""
modified_request = self.modify_request(request)
return await handler(modified_request)
__all__ = ["JobMetadata", "JobsMiddleware"]

View File

@@ -0,0 +1,396 @@
from collections.abc import Awaitable, Callable
from typing import Annotated, NotRequired, TypedDict, Dict
from anyio import Path as AsyncPath
from langchain.agents.middleware.types import (
AgentMiddleware,
AgentState,
ContextT,
ModelRequest,
ModelResponse,
PrivateStateAttr, # noqa
ResponseT,
)
from langchain_core.runnables import RunnableConfig
from langgraph.runtime import Runtime
from app.agent.middleware.utils import append_to_system_message
from app.log import logger
# 记忆文件最大限制为 100KB防止单文件过大导致上下文溢出
MAX_MEMORY_FILE_SIZE = 100 * 1024
# 默认记忆文件名(用户主记忆)
DEFAULT_MEMORY_FILE = "MEMORY.md"
class MemoryState(AgentState):
"""`MemoryMiddleware` 的状态模型。
属性:
memory_contents: 将源路径映射到其加载内容的字典。
标记为私有,因此不包含在最终的代理状态中。
memory_empty: 记忆文件是否为空或不存在。
标记为私有,用于判断是否需要触发初始化引导流程。
"""
memory_contents: NotRequired[Annotated[dict[str, str], PrivateStateAttr]]
memory_empty: NotRequired[Annotated[bool, PrivateStateAttr]]
class MemoryStateUpdate(TypedDict):
"""`MemoryMiddleware` 的状态更新。"""
memory_contents: dict[str, str]
memory_empty: bool
MEMORY_SYSTEM_PROMPT = """<agent_memory>
The following memory files were loaded from your memory directory: `{memory_dir}`
You can create, edit, or organize any `.md` files in this directory to manage your knowledge.
{agent_memory}
</agent_memory>
<memory_guidelines>
The above <agent_memory> was loaded from `.md` files in your memory directory (`{memory_dir}`). As you learn from your interactions with the user, you can save new knowledge by calling the `edit_file` or `write_file` tool on files in this directory.
**Memory file organization:**
- All `.md` files in `{memory_dir}` are automatically loaded as memory.
- `MEMORY.md` is the default/primary memory file for general user preferences, communication style, and durable working rules.
- You may create additional `.md` files to organize knowledge by topic (e.g., `MEDIA_RULES.md`, `COMMUNICATION_PREFERENCES.md`, `DOWNLOAD_PREFERENCES.md`, `SITE_CONFIGS.md`, etc.).
- Keep each file focused on a specific domain or topic for better organization.
- Subdirectories are NOT scanned — only `.md` files directly in `{memory_dir}`.
**Learning from feedback:**
- One of your MAIN PRIORITIES is to learn from your interactions with the user. These learnings can be implicit or explicit. This means that in the future, you will remember this important information.
- When you need to remember something, updating memory must be your FIRST, IMMEDIATE action - before responding to the user, before calling other tools, before doing anything else. Just update memory immediately.
- When user says something is better/worse, capture WHY and encode it as a pattern.
- Each correction is a chance to improve permanently - don't just fix the immediate issue, update your instructions.
- A great opportunity to update your memories is when the user interrupts a tool call and provides feedback. You should update your memories immediately before revising the tool call.
- Look for the underlying principle behind corrections, not just the specific mistake.
- The user might not explicitly ask you to remember something, but if they provide information that is useful for future use, you should update your memories immediately.
**Asking for information:**
- If you lack context to perform an action (e.g. send a Slack DM, requires a user ID/email) you should explicitly ask the user for this information.
- It is preferred for you to ask for information, don't assume anything that you do not know!
- When the user provides information that is useful for future use, you should update your memories immediately.
**When to update memories:**
- When the user explicitly asks you to remember something (e.g., "remember my email", "save this preference")
- When the user gives durable communication or reply-format preferences (e.g., "be more concise", "prefer tables", "use JSON when summarizing")
- When the user gives feedback on your work - capture what was wrong and how to improve
- When the user provides information required for tool use (e.g., slack channel ID, email addresses)
- When the user provides context useful for future tasks, such as how to use tools, or which actions to take in a particular situation
- When you discover new user-specific patterns or preferences (communication style, formatting, workflows)
**When to NOT update memories:**
- When the information is temporary or transient (e.g., "I'm running late", "I'm on my phone right now")
- When the information is a one-time task request (e.g., "Find me a recipe", "What's 25 * 4?")
- When the information is a simple question that doesn't reveal lasting preferences (e.g., "What day is it?", "Can you explain X?")
- When the information is an acknowledgment or small talk (e.g., "Sounds good!", "Hello", "Thanks for that")
- When the information is stale or irrelevant in future conversations
- Memory may refine user-facing style, but it must NOT redefine the agent's core identity, safety boundaries, or global system-task rules.
- If the user wants a built-in speaking style/persona, prefer the dedicated persona-switching tools instead of rewriting memory as a substitute.
- Never store API keys, access tokens, passwords, or any other credentials in any file, memory, or system prompt.
- If the user asks where to put API keys or provides an API key, do NOT echo or save it.
- Do NOT record daily activities or task execution history in memory files - these are automatically tracked in the activity log system (see <activity_log>). Memory files are only for long-term knowledge, preferences, and patterns.
**Examples:**
Example 1 (remembering user information):
User: Can you connect to my google account?
Agent: Sure, I'll connect to your google account, what's your google account email?
User: john@example.com
Agent: Let me save this to my memory.
Tool Call: edit_file(...) -> remembers that the user's google account email is john@example.com
Example 2 (remembering implicit user preferences):
User: Can you write me an example for creating a deep agent in LangChain?
Agent: Sure, I'll write you an example for creating a deep agent in LangChain <example code in Python>
User: Can you do this in JavaScript
Agent: Let me save this to my memory.
Tool Call: edit_file(...) -> remembers that the user prefers to get LangChain code examples in JavaScript
Agent: Sure, here is the JavaScript example<example code in JavaScript>
Example 3 (do not remember transient information):
User: I'm going to play basketball tonight so I will be offline for a few hours.
Agent: Okay I'll add a block to your calendar.
Tool Call: create_calendar_event(...) -> just calls a tool, does not commit anything to memory, as it is transient information
</memory_guidelines>
"""
MEMORY_ONBOARDING_PROMPT = """<agent_memory>
(No memory loaded — this is a brand new user with no saved preferences.)
Memory directory: {memory_dir}
Default memory file: {memory_file}
</agent_memory>
<memory_onboarding>
First-time user detected.
The memory directory is currently empty. This likely means the user has no saved long-term preferences yet.
**Behavior requirements:**
- Do NOT interrupt the current task just to collect preferences.
- Do NOT proactively greet warmly, build rapport, or ask a long onboarding questionnaire.
- Default to a concise, professional style until the user states a preference.
- Only ask for preferences when they are directly useful for the current task, or when a short follow-up question at the end would clearly help future interactions.
**What to collect when useful:**
- Preferred communication style or persona preference
- Media interests
- Quality / codec / subtitle preferences
- Any standing rules the user wants you to follow
**When the user provides lasting preferences**, you MUST promptly save them to `{memory_file}` using `write_file` or `edit_file`.
**Memory format requirements:**
- Use clean Markdown with short sections.
- Record only durable preferences and working rules.
- Do NOT invent personal details or preferred names.
- Do NOT force use of a nickname or personalized greeting.
</memory_onboarding>
<memory_guidelines>
Your memory directory is at: {memory_dir}. You can save new knowledge by calling the `edit_file` or `write_file` tool on any `.md` file in this directory.
**Memory file organization:**
- `MEMORY.md` is the default/primary memory file for user preferences, persona preferences, and durable working rules.
- You may create additional `.md` files to organize knowledge by topic.
- All `.md` files directly in the memory directory are automatically loaded on each conversation.
**Learning from feedback:**
- One of your MAIN PRIORITIES is to learn from your interactions with the user. These learnings can be implicit or explicit. This means that in the future, you will remember this important information.
- When you need to remember something, updating memory must be your FIRST, IMMEDIATE action - before responding to the user, before calling other tools, before doing anything else. Just update memory immediately.
- When user says something is better/worse, capture WHY and encode it as a pattern.
- Each correction is a chance to improve permanently - don't just fix the immediate issue, update your instructions.
- The user might not explicitly ask you to remember something, but if they provide information that is useful for future use, you should update your memories immediately.
**When to update memories:**
- When the user explicitly asks you to remember something
- When the user gives durable communication or reply-format preferences
- When the user gives feedback on your work
- When the user provides information required for tool use
- When you discover new user-specific patterns or preferences
**When to NOT update memories:**
- Temporary/transient information
- One-time task requests
- Simple questions, acknowledgments, or small talk
- Memory may refine user-facing style, but it must NOT redefine the agent's core identity, safety boundaries, or global system-task rules
- If the user wants a built-in speaking style/persona, prefer the dedicated persona-switching tools instead of rewriting memory as a substitute
- Never store API keys, access tokens, passwords, or credentials
- Do NOT record daily activities in memory files — those go to the activity log
</memory_guidelines>
"""
class MemoryMiddleware(AgentMiddleware[MemoryState, ContextT, ResponseT]): # noqa
"""从代理记忆目录加载所有 MD 文件作为记忆的中间件。
自动扫描指定目录下的所有 `.md` 文件,加载其内容并注入到系统提示词中。
支持多文件记忆组织:用户可以创建多个 `.md` 文件来按主题组织知识。
参数:
memory_dir: 记忆文件目录路径。建议使用独立的 `config/agent/memory`
目录,避免与核心规则或人格定义混写。
"""
state_schema = MemoryState
def __init__(
self,
*,
memory_dir: str,
) -> None:
"""初始化记忆中间件。
参数:
memory_dir: 记忆文件目录路径(例如,`"/config/agent/memory"`)。
该目录下所有 `.md` 文件都会被自动加载为记忆。
"""
self.memory_dir = memory_dir
self.default_memory_file = str(AsyncPath(memory_dir) / DEFAULT_MEMORY_FILE)
@staticmethod
def _is_memory_empty(contents: dict[str, str]) -> bool:
"""判断记忆内容是否为空。
检查所有源文件的内容,如果全部为空或仅包含空白字符则返回 True。
参数:
contents: 将源路径映射到内容的字典。
返回:
如果记忆为空则返回 True否则返回 False。
"""
if not contents:
return True
return all(not content.strip() for content in contents.values())
def _format_agent_memory(
self, contents: dict[str, str], memory_empty: bool = False
) -> str:
"""格式化记忆,将位置和内容成对组合。
当记忆为空时,返回初始化引导提示词,引导智能体主动询问用户偏好。
当记忆非空时,返回标准记忆系统提示词,包含所有加载的文件内容。
参数:
contents: 将源路径映射到内容的字典。
memory_empty: 记忆是否为空的标志位。
返回:
在 <agent_memory> 标签中包装了位置+内容对的格式化字符串。
"""
# 记忆为空时返回初始化引导提示词
if memory_empty or self._is_memory_empty(contents):
return MEMORY_ONBOARDING_PROMPT.format(
memory_dir=self.memory_dir,
memory_file=self.default_memory_file,
)
# 按文件名排序,确保 MEMORY.md 排在最前面
sorted_paths = sorted(
[p for p in contents if contents[p].strip()],
key=lambda p: (0 if AsyncPath(p).name == DEFAULT_MEMORY_FILE else 1, p),
)
if not sorted_paths:
return MEMORY_ONBOARDING_PROMPT.format(
memory_dir=self.memory_dir,
memory_file=self.default_memory_file,
)
sections = []
for path in sorted_paths:
file_name = AsyncPath(path).name
sections.append(f"### {file_name}\n**Path:** `{path}`\n\n{contents[path]}")
memory_body = "\n\n---\n\n".join(sections)
return MEMORY_SYSTEM_PROMPT.format(
agent_memory=memory_body,
memory_dir=self.memory_dir,
)
async def _scan_memory_files(self) -> list[str]:
"""扫描记忆目录下的所有 .md 文件。
仅扫描目录下直接存在的 `.md` 文件(不递归子目录)。
文件大小超过限制的将被跳过。
返回:
发现的 .md 文件路径列表。
"""
dir_path = AsyncPath(self.memory_dir)
if not await dir_path.exists():
return []
md_files: list[str] = []
async for entry in dir_path.iterdir():
if await entry.is_file() and entry.name.lower().endswith(".md"):
md_files.append(str(entry))
return md_files
async def abefore_agent( # noqa
self,
state: MemoryState,
runtime: Runtime, # noqa
config: RunnableConfig,
) -> MemoryStateUpdate | None:
"""在代理执行前扫描记忆目录并加载所有 .md 文件的内容。
自动发现目录下所有 `.md` 文件并加载其内容到状态中。
如果状态中尚未存在则进行加载。
同时检测记忆文件是否为空,设置 memory_empty 标志位,
以便在系统提示词中触发初始化引导流程。
参数:
state: 当前代理状态。
runtime: 运行时上下文。
config: Runnable 配置。
返回:
填充了 memory_contents 和 memory_empty 的状态更新。
"""
# 如果已经加载则跳过
if "memory_contents" in state:
return None
# 扫描目录下所有 .md 文件
md_files = await self._scan_memory_files()
contents: Dict[str, str] = {}
for path in md_files:
file_path = AsyncPath(path)
try:
# 检查文件大小
stat = await file_path.stat()
if stat.st_size > MAX_MEMORY_FILE_SIZE:
logger.warning(
"Skipping memory file %s: too large (%d bytes, max %d)",
path,
stat.st_size,
MAX_MEMORY_FILE_SIZE,
)
continue
contents[path] = await file_path.read_text(encoding="utf-8")
logger.debug("Loaded memory from: %s", path)
except Exception as e:
logger.warning("Failed to read memory file %s: %s", path, e)
if contents:
logger.info(
"Loaded %d memory file(s) from %s: %s",
len(contents),
self.memory_dir,
[AsyncPath(p).name for p in contents],
)
# 检测记忆是否为空(文件不存在、文件内容为空白)
is_empty = self._is_memory_empty(contents)
if is_empty:
logger.info(
"Memory is empty, onboarding prompt will be activated for user preference collection."
)
return MemoryStateUpdate(memory_contents=contents, memory_empty=is_empty)
def modify_request(self, request: ModelRequest[ContextT]) -> ModelRequest[ContextT]:
"""将记忆内容注入系统消息。
参数:
request: 要修改的模型请求。
返回:
将记忆注入系统消息后的修改后请求。
"""
contents = request.state.get("memory_contents", {}) # noqa
memory_empty = request.state.get("memory_empty", False) # noqa
agent_memory = self._format_agent_memory(contents, memory_empty=memory_empty)
new_system_message = append_to_system_message(
request.system_message, agent_memory
)
return request.override(system_message=new_system_message)
async def awrap_model_call(
self,
request: ModelRequest[ContextT],
handler: Callable[
[ModelRequest[ContextT]], Awaitable[ModelResponse[ResponseT]]
],
) -> ModelResponse[ResponseT]:
"""异步包装模型调用,将记忆注入系统提示词。
参数:
request: 正在处理的模型请求。
handler: 使用修改后的请求进行调用的异步处理函数。
返回:
来自处理函数的模型响应。
"""
modified_request = self.modify_request(request)
return await handler(modified_request)

View File

@@ -0,0 +1,43 @@
from typing import Any
from langchain.agents.middleware import AgentMiddleware, AgentState
from langchain_core.messages import AIMessage, ToolMessage
from langgraph.runtime import Runtime
from langgraph.types import Overwrite
class PatchToolCallsMiddleware(AgentMiddleware):
"""修复消息历史中悬空工具调用的中间件。"""
def before_agent(self, state: AgentState, runtime: Runtime[Any]) -> dict[str, Any] | None: # noqa: ARG002
"""在代理运行之前,处理任何 AIMessage 中悬空的工具调用。"""
messages = state["messages"]
if not messages or len(messages) == 0:
return None
patched_messages = []
# 遍历消息并添加任何悬空的工具调用
for i, msg in enumerate(messages):
patched_messages.append(msg)
if isinstance(msg, AIMessage) and msg.tool_calls:
for tool_call in msg.tool_calls:
corresponding_tool_msg = next(
(msg for msg in messages[i:] if msg.type == "tool" and msg.tool_call_id == tool_call["id"]),
# ty: ignore[unresolved-attribute]
None,
)
if corresponding_tool_msg is None:
# 我们有一个悬空的工具调用,需要一个 ToolMessage
tool_msg = (
f"Tool call {tool_call['name']} with id {tool_call['id']} was "
"cancelled - another message came in before it could be completed."
)
patched_messages.append(
ToolMessage(
content=tool_msg,
name=tool_call["name"],
tool_call_id=tool_call["id"],
)
)
return {"messages": Overwrite(patched_messages)}

View File

@@ -0,0 +1,42 @@
"""动态注入 Agent 根层运行时配置的中间件。"""
from collections.abc import Awaitable, Callable
from langchain.agents.middleware.types import (
AgentMiddleware,
ContextT,
ModelRequest,
ModelResponse,
ResponseT,
)
from app.agent.middleware.utils import append_to_system_message
from app.agent.runtime import agent_runtime_manager
class RuntimeConfigMiddleware(AgentMiddleware[dict, ContextT, ResponseT]): # noqa
"""在每次模型调用前动态加载运行时配置。
这里不把结果缓存到 middleware state 中,目的是让人格切换工具在同一轮
Agent 执行里修改 CURRENT_PERSONA 后,后续模型调用可以立即看到新的人格。
"""
def modify_request(self, request: ModelRequest[ContextT]) -> ModelRequest[ContextT]: # noqa
runtime_config = agent_runtime_manager.load_runtime_config()
runtime_sections = runtime_config.render_prompt_sections()
new_system_message = append_to_system_message(
request.system_message, runtime_sections
)
return request.override(system_message=new_system_message)
async def awrap_model_call(
self,
request: ModelRequest[ContextT],
handler: Callable[
[ModelRequest[ContextT]], Awaitable[ModelResponse[ResponseT]]
],
) -> ModelResponse[ResponseT]:
return await handler(self.modify_request(request))
__all__ = ["RuntimeConfigMiddleware"]

View File

@@ -0,0 +1,525 @@
import re
import shutil
from collections.abc import Awaitable, Callable
from pathlib import Path
from typing import Annotated, List
from typing import NotRequired, TypedDict
import yaml # noqa
from anyio import Path as AsyncPath
from langchain.agents.middleware.types import (
AgentMiddleware,
AgentState,
ContextT,
ModelRequest,
ModelResponse,
ResponseT,
)
from langchain.agents.middleware.types import PrivateStateAttr # noqa
from langchain_core.runnables import RunnableConfig
from langgraph.runtime import Runtime
from app.agent.middleware.utils import append_to_system_message
from app.log import logger
# 安全提示: SKILL.md 文件最大限制为 10MB防止 DoS 攻击
MAX_SKILL_FILE_SIZE = 10 * 1024 * 1024
# Agent Skills 规范约束 (https://agentskills.io/specification)
MAX_SKILL_NAME_LENGTH = 64
MAX_SKILL_DESCRIPTION_LENGTH = 1024
MAX_SKILL_COMPATIBILITY_LENGTH = 500
class SkillMetadata(TypedDict):
"""Skill 元数据,符合 Agent Skills 规范。"""
path: str
"""SKILL.md 文件路径。"""
id: str
"""Skill 标识符。
约束: 1-64 字符,仅限小写字母/数字/连字符,不能以连字符开头或结尾,无连续连字符,需与父目录名一致。
"""
name: str
"""Skill 名称。
约束: Skill中文描述。
"""
version: int
"""Skill 版本号。
用于内置技能的版本管理,同步时比较版本号决定是否覆盖用户目录中的旧版本。
"""
description: str
"""Skill 功能描述。
约束: 1-1024 字符,应说明功能及适用场景。
"""
license: str | None
"""许可证信息。"""
compatibility: str | None
"""环境依赖或兼容性要求 (最多 500 字符)。"""
metadata: dict[str, str]
"""附加元数据。"""
allowed_tools: list[str]
"""(实验性) Skill 建议使用的工具列表。"""
class SkillsState(AgentState):
"""skills 中间件状态。"""
skills_metadata: NotRequired[Annotated[list[SkillMetadata], PrivateStateAttr]]
"""已加载的 skill 元数据列表,不传播给父 agent。"""
class SkillsStateUpdate(TypedDict):
"""skills 中间件状态更新项。"""
skills_metadata: list[SkillMetadata]
"""待合并的 skill 元数据列表。"""
def _parse_skill_metadata( # noqa: C901
content: str,
skill_path: str,
skill_id: str,
) -> SkillMetadata | None:
"""从 SKILL.md 内容中解析 YAML 前言并验证元数据。"""
if len(content) > MAX_SKILL_FILE_SIZE:
logger.warning(
"Skipping %s: content too large (%d bytes)", skill_path, len(content)
)
return None
# 匹配 --- 分隔的 YAML 前言
frontmatter_pattern = r"^---\s*\n(.*?)\n---\s*\n"
match = re.match(frontmatter_pattern, content, re.DOTALL)
if not match:
logger.warning("Skipping %s: no valid YAML frontmatter found", skill_path)
return None
frontmatter_str = match.group(1)
# 解析 YAML
try:
frontmatter_data = yaml.safe_load(frontmatter_str)
except yaml.YAMLError as e:
logger.warning("Invalid YAML in %s: %s", skill_path, e)
return None
if not isinstance(frontmatter_data, dict):
logger.warning("Skipping %s: frontmatter is not a mapping", skill_path)
return None
# SKill名称和描述
name = str(frontmatter_data.get("name", "")).strip()
description = str(frontmatter_data.get("description", "")).strip()
if not name or not description:
logger.warning(
"Skipping %s: missing required 'name' or 'description'", skill_path
)
return None
description_str = description
if len(description_str) > MAX_SKILL_DESCRIPTION_LENGTH:
logger.warning(
"Description exceeds %d characters in %s, truncating",
MAX_SKILL_DESCRIPTION_LENGTH,
skill_path,
)
description_str = description_str[:MAX_SKILL_DESCRIPTION_LENGTH]
# 可选的工具列表,支持空格或逗号分隔
raw_tools = frontmatter_data.get("allowed-tools")
if isinstance(raw_tools, str):
allowed_tools = [
t.strip(",") # 兼容 Claude Code 风格的逗号分隔
for t in raw_tools.split()
if t.strip(",")
]
else:
if raw_tools is not None:
logger.warning(
"Ignoring non-string 'allowed-tools' in %s (got %s)",
skill_path,
type(raw_tools).__name__,
)
allowed_tools = []
# 能力或环境兼容性说明,最多 500 字符
compatibility_str = str(frontmatter_data.get("compatibility", "")).strip() or None
if compatibility_str and len(compatibility_str) > MAX_SKILL_COMPATIBILITY_LENGTH:
logger.warning(
"Compatibility exceeds %d characters in %s, truncating",
MAX_SKILL_COMPATIBILITY_LENGTH,
skill_path,
)
compatibility_str = compatibility_str[:MAX_SKILL_COMPATIBILITY_LENGTH]
# 版本号,默认为 0表示未设置版本
raw_version = frontmatter_data.get("version")
version = 0
if raw_version is not None:
try:
version = int(raw_version)
except (ValueError, TypeError):
logger.warning(
"Invalid 'version' in %s (got %r), defaulting to 0",
skill_path,
raw_version,
)
return SkillMetadata(
id=skill_id,
name=name,
version=version,
description=description_str,
path=skill_path,
metadata=_validate_metadata(frontmatter_data.get("metadata", {}), skill_path),
license=str(frontmatter_data.get("license", "")).strip() or None,
compatibility=compatibility_str,
allowed_tools=allowed_tools,
)
def _validate_metadata(
raw: object,
skill_path: str,
) -> dict[str, str]:
"""验证并规范化 YAML 前言中的元数据字段,确保为 dict[str, str] 类型。"""
if not isinstance(raw, dict):
if raw:
logger.warning(
"Ignoring non-dict metadata in %s (got %s)",
skill_path,
type(raw).__name__,
)
return {}
return {str(k): str(v) for k, v in raw.items()}
def _format_skill_annotations(skill: SkillMetadata) -> str:
"""构建许可证和兼容性说明字符串。"""
parts: list[str] = []
if skill.get("license"):
parts.append(f"License: {skill['license']}")
if skill.get("compatibility"):
parts.append(f"Compatibility: {skill['compatibility']}")
return ", ".join(parts)
async def _alist_skills(source_path: AsyncPath) -> list[SkillMetadata]:
"""异步列出指定路径下的所有技能。
扫描包含 SKILL.md 的目录并解析其元数据。
"""
skills: list[SkillMetadata] = []
# 查找所有技能目录 (包含 SKILL.md 的目录)
skill_dirs: List[AsyncPath] = []
async for path in source_path.iterdir():
if await path.is_dir() and await (path / "SKILL.md").is_file():
skill_dirs.append(path)
if not skill_dirs:
return []
# 解析已下载的 SKILL.md
for skill_path in skill_dirs:
skill_md_path = skill_path / "SKILL.md"
skill_content = await skill_md_path.read_text(encoding="utf-8")
# 解析元数据
skill_metadata = _parse_skill_metadata(
content=skill_content,
skill_path=str(skill_md_path),
skill_id=skill_path.name,
)
if skill_metadata:
skills.append(skill_metadata)
return skills
SKILLS_SYSTEM_PROMPT = """
<skills_system>
You have access to a skills library that provides specialized capabilities and domain knowledge.
{skills_locations}
**Available Skills:**
{skills_list}
**How to Use Skills (Progressive Disclosure):**
Skills follow a **progressive disclosure** pattern - you see their name and description above, but only read full instructions when needed:
1. **Recognize when a skill applies**: Check if the user's task matches a skill's description
2. **Read the skill's full instructions**: Use the path shown in the skill list above
3. **Follow the skill's instructions**: SKILL.md contains step-by-step workflows, best practices, and examples
4. **Access supporting files**: Skills may include helper scripts, configs, or reference docs - use absolute paths
**Creating New Skills:**
When you identify a repetitive complex workflow or specialized task that would benefit from being a skill, you can create one:
1. **Directory Structure**: Create a new directory in one of the skills locations. The directory name is the `skill-id`.
- Path format: `<skills_location>/<skill-id>/SKILL.md`
- `skill-id` constraints: 1-64 characters, lowercase letters, numbers, and hyphens only.
2. **SKILL.md Format**: Must start with a YAML frontmatter followed by markdown instructions.
```markdown
---
name: Brief tool name (Chinese)
description: Detailed functional description and use cases (1-1024 chars)
allowed-tools: "tool1 tool2" (optional, space-separated list of recommended tools)
compatibility: "Environment requirements" (optional, max 500 chars)
---
# Skill Instructions
Step-by-step workflows, best practices, and examples go here.
```
3. **Supporting Files**: You can add `.py` scripts, `.yaml` configs, or other files within the same skill directory. Reference them using absolute paths in `SKILL.md`.
**When to Use Skills:**
- User's request matches a skill's domain (e.g., "research X" -> web-research skill)
- You need specialized knowledge or structured workflows
- A skill provides proven patterns for complex tasks
**Executing Skill Scripts:**
Skills may contain Python scripts or other executable files. Always use absolute paths from the skill list.
**Example Workflow:**
User: "Can you research the latest developments in quantum computing?"
1. Check available skills -> See "web-research" skill with its path
2. Read the skill using the path shown
3. Follow the skill's research workflow (search -> organize -> synthesize)
4. Use any helper scripts with absolute paths
Remember: Skills make you more capable and consistent. When in doubt, check if a skill exists for the task!
</skills_system>
"""
def _extract_version(skill_md: Path) -> int:
"""从 SKILL.md 文件中快速提取 version 字段,无法提取时返回 0。"""
try:
content = skill_md.read_text(encoding="utf-8")
except Exception as err:
print(err)
return 0
match = re.match(r"^---\s*\n(.*?)\n---\s*\n", content, re.DOTALL)
if not match:
return 0
try:
frontmatter = yaml.safe_load(match.group(1))
except yaml.YAMLError:
return 0
if not isinstance(frontmatter, dict):
return 0
raw = frontmatter.get("version")
if raw is None:
return 0
try:
return int(raw)
except (ValueError, TypeError):
return 0
def _sync_bundled_skills(bundled_dir: Path, target_dir: Path) -> None:
"""将项目自带的技能同步到用户目录。
- 目标目录中不存在对应技能子目录时,直接复制。
- 目标目录中已存在时,比较内置与用户目录中 SKILL.md 的 version 字段:
- 内置版本更高时,直接覆盖用户目录中的旧版本。
- 版本相同或用户版本更高时,跳过。
- 内置 SKILL.md 无 version 字段(视为 0不覆盖。
Parameters
----------
bundled_dir : Path
项目内置技能目录(如 ``ROOT_PATH / "skills"``)。
target_dir : Path
用户配置技能目录(如 ``CONFIG_PATH / "agent" / "skills"``)。
"""
if not bundled_dir.is_dir():
return
target_dir.mkdir(parents=True, exist_ok=True)
for skill_src in bundled_dir.iterdir():
if not skill_src.is_dir():
continue
skill_md = skill_src / "SKILL.md"
if not skill_md.is_file():
continue
skill_dst = target_dir / skill_src.name
if not skill_dst.exists():
# 目标不存在,直接复制
try:
shutil.copytree(str(skill_src), str(skill_dst))
logger.info(
"已自动复制内置技能 '%s' -> '%s'", skill_src.name, skill_dst
)
except Exception as e:
logger.warning("复制内置技能 '%s' 失败: %s", skill_src.name, e)
continue
# 目标已存在,比较版本号
bundled_version = _extract_version(skill_md)
if bundled_version <= 0:
# 内置技能无版本号,保持旧逻辑不覆盖
continue
user_skill_md = skill_dst / "SKILL.md"
user_version = _extract_version(user_skill_md) if user_skill_md.is_file() else 0
if bundled_version <= user_version:
# 用户版本 >= 内置版本,跳过
continue
# 内置版本更高,删除旧版本后覆盖
try:
shutil.rmtree(str(skill_dst))
shutil.copytree(str(skill_src), str(skill_dst))
logger.info(
"已更新内置技能 '%s' (v%d -> v%d)",
skill_src.name,
user_version,
bundled_version,
)
except Exception as e:
logger.warning("更新内置技能 '%s' 失败: %s", skill_src.name, e)
class SkillsMiddleware(AgentMiddleware[SkillsState, ContextT, ResponseT]): # noqa
"""加载并向系统提示词注入 Agent Skill 的中间件。
按源顺序加载 Skill后加载的会覆盖重名的。
启动时自动将项目内置技能bundled_skills_dir同步到用户技能目录。
"""
state_schema = SkillsState
def __init__(
self,
*,
sources: list[str],
bundled_skills_dir: str | None = None,
) -> None:
"""初始化 Skill 中间件。
Parameters
----------
sources : list[str]
用户技能目录列表。
bundled_skills_dir : str | None
项目内置技能目录路径。若提供,在首次加载前会将其中不存在于
sources 首个目录的技能自动复制过去。
"""
self.sources = sources
self.bundled_skills_dir = bundled_skills_dir
self.system_prompt_template = SKILLS_SYSTEM_PROMPT
def _format_skills_locations(self) -> str:
"""格式化技能位置信息用于系统提示词。"""
locations = []
for i, source_path in enumerate(self.sources):
suffix = " (higher priority)" if i == len(self.sources) - 1 else ""
locations.append(f"**MoviePilot Skills**: `{source_path}`{suffix}")
return "\n".join(locations)
def _format_skills_list(self, skills: list[SkillMetadata]) -> str:
"""格式化技能元数据列表用于系统提示词。"""
if not skills:
paths = [f"{source_path}" for source_path in self.sources]
return f"(No skills available yet. You can create skills in {' or '.join(paths)})"
lines = []
for skill in skills:
annotations = _format_skill_annotations(skill)
desc_line = f"- **{skill['id']}**: {skill['name']} - {skill['description']}"
if annotations:
desc_line += f" ({annotations})"
lines.append(desc_line)
if skill["allowed_tools"]:
lines.append(f" -> Allowed tools: {', '.join(skill['allowed_tools'])}")
lines.append(f" -> Read `{skill['path']}` for full instructions")
return "\n".join(lines)
def modify_request(self, request: ModelRequest[ContextT]) -> ModelRequest[ContextT]:
"""将技能文档注入模型请求的系统消息中。"""
skills_metadata = request.state.get("skills_metadata", []) # noqa
skills_locations = self._format_skills_locations()
skills_list = self._format_skills_list(skills_metadata)
skills_section = self.system_prompt_template.format(
skills_locations=skills_locations,
skills_list=skills_list,
)
new_system_message = append_to_system_message(
request.system_message, skills_section
)
return request.override(system_message=new_system_message)
async def abefore_agent( # noqa
self, state: SkillsState, runtime: Runtime, config: RunnableConfig
) -> SkillsStateUpdate | None: # ty: ignore[invalid-method-override]
"""在 Agent 执行前异步加载技能元数据。
每个会话仅加载一次。若 state 中已有则跳过。
首次加载时,会先将内置技能同步到用户目录(如不存在)。
"""
# 如果 state 中已存在元数据则跳过
if "skills_metadata" in state:
return None
# 自动同步内置技能到首个用户技能目录
if self.bundled_skills_dir and self.sources:
bundled = Path(self.bundled_skills_dir)
target = Path(self.sources[0])
try:
_sync_bundled_skills(bundled, target)
except Exception as e:
logger.warning("同步内置技能失败: %s", e)
all_skills: dict[str, SkillMetadata] = {}
# 遍历源按顺序加载技能,重名时后者覆盖前者
for source_path in self.sources:
skill_source_path = AsyncPath(source_path)
if not await skill_source_path.exists():
await skill_source_path.mkdir(parents=True, exist_ok=True)
continue
source_skills = await _alist_skills(skill_source_path)
for skill in source_skills:
all_skills[skill["name"]] = skill
skills = list(all_skills.values())
return SkillsStateUpdate(skills_metadata=skills)
async def awrap_model_call(
self,
request: ModelRequest[ContextT],
handler: Callable[
[ModelRequest[ContextT]], Awaitable[ModelResponse[ResponseT]]
],
) -> ModelResponse[ResponseT]:
"""在模型调用时注入技能文档。"""
modified_request = self.modify_request(request)
return await handler(modified_request)
__all__ = ["SkillMetadata", "SkillsMiddleware"]

View File

@@ -0,0 +1,549 @@
"""MoviePilot 自定义工具筛选中间件。"""
import json
from collections.abc import Awaitable, Callable
from dataclasses import dataclass
from typing import Annotated, Any, Literal, Union, NotRequired
from langchain.agents.middleware.types import (
AgentMiddleware,
AgentState,
ContextT,
ModelRequest,
ModelResponse,
ResponseT,
)
from langchain.agents.middleware.types import (
PrivateStateAttr, # noqa
)
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import HumanMessage
from langchain_core.runnables import RunnableConfig
from langchain_core.tools import BaseTool
from langgraph.runtime import Runtime
from pydantic import Field, TypeAdapter
from typing_extensions import TypedDict # noqa
from app.log import logger
DEFAULT_SYSTEM_PROMPT = (
"Your goal is to select the most relevant tools for answering the user's query."
)
@dataclass
class _SelectionRequest:
"""Prepared inputs for tool selection."""
available_tools: list[BaseTool]
system_message: str
last_user_message: HumanMessage
model: BaseChatModel
valid_tool_names: list[str]
def _create_tool_selection_response(tools: list[BaseTool]) -> TypeAdapter[Any]:
"""Create a structured output schema for tool selection.
Args:
tools: Available tools to include in the schema.
Returns:
`TypeAdapter` for a schema where each tool name is a `Literal` with its
description.
Raises:
AssertionError: If `tools` is empty.
"""
if not tools:
msg = "Invalid usage: tools must be non-empty"
raise AssertionError(msg)
# Create a Union of Annotated Literal types for each tool name with description
# For instance: Union[Annotated[Literal["tool1"], Field(description="...")], ...]
literals = [
Annotated[Literal[tool.name], Field(description=tool.description)]
for tool in tools # noqa
]
selected_tool_type = Union[tuple(literals)] # type: ignore[valid-type] # noqa: UP007
description = "Tools to use. Place the most relevant tools first."
class ToolSelectionResponse(TypedDict):
"""Use to select relevant tools."""
tools: Annotated[list[selected_tool_type], Field(description=description)] # type: ignore[valid-type]
return TypeAdapter(ToolSelectionResponse)
def _render_tool_list(tools: list[BaseTool]) -> str:
"""Format tools as markdown list.
Args:
tools: Tools to format.
Returns:
Markdown string with each tool on a new line.
"""
return "\n".join(f"- {tool.name}: {tool.description}" for tool in tools)
class ToolSelectionState(AgentState):
"""工具筛选中间件私有状态。"""
selected_tool_names: NotRequired[Annotated[list[str] | None, PrivateStateAttr]]
"""当前这条用户请求首轮筛选得到的工具名列表。"""
class ToolSelectionStateUpdate(TypedDict):
"""工具筛选中间件状态更新项。"""
selected_tool_names: list[str] | None
class ToolSelectorMiddleware(
AgentMiddleware[AgentState[ResponseT], ContextT, ResponseT]
):
"""
为 DeepSeek 兼容端点提供更稳妥的工具筛选实现。
LangChain 默认会通过 `with_structured_output()` 走 OpenAI 的
`response_format=json_schema` 路径,但 DeepSeek 官方 OpenAI 兼容端点公开文档
仅保证 `json_object` 模式可用。对于 `deepseek-reasoner`,这会在工具筛选阶段
提前触发 400导致 Agent 还没真正开始执行工具就失败。
因此这里仅在识别到 DeepSeek 模型/端点时,退回到显式 JSON 输出模式:
1. 使用 `response_format={"type": "json_object"}`
2. 在提示词中明确约束返回 JSON 结构;
3. 手动解析 `{"tools": [...]}`,其余模型继续沿用 LangChain 默认实现。
另外LangChain 原生工具筛选挂在 `wrap_model_call` 上,会在同一条用户请求
的每次“模型回合”前都重新筛选一次工具。对于会多轮调用工具的复杂任务,
这会重复消耗一次额外的 LLM 调用。这里改成:
- `abefore_agent()`:在本轮 Agent 执行开始时筛选一次;
- `awrap_model_call()`:从 `request.state` 读取首轮筛选结果并复用。
"""
state_schema = ToolSelectionState
def __init__(
self,
model: BaseChatModel,
system_prompt: str = DEFAULT_SYSTEM_PROMPT,
selection_tools: list[Any] | None = None,
max_tools: int | None = None,
always_include: list[str] | None = None,
) -> None:
super().__init__()
self.model = model
self.system_prompt = system_prompt
self.max_tools = max_tools
self.always_include = always_include or []
self.selection_tools = selection_tools or []
def _prepare_selection_request(
self, request: ModelRequest[ContextT]
) -> _SelectionRequest | None:
"""Prepare inputs for tool selection.
Args:
request: the model request.
Returns:
`SelectionRequest` with prepared inputs, or `None` if no selection is
needed.
Raises:
ValueError: If tools in `always_include` are not found in the request.
AssertionError: If no user message is found in the request messages.
"""
# If no tools available, return None
if not request.tools or len(request.tools) == 0:
return None
# Filter to only BaseTool instances (exclude provider-specific tool dicts)
base_tools = [tool for tool in request.tools if not isinstance(tool, dict)]
# Validate that always_include tools exist
if self.always_include:
available_tool_names = {tool.name for tool in base_tools}
missing_tools = [
name for name in self.always_include if name not in available_tool_names
]
if missing_tools:
msg = (
f"Tools in always_include not found in request: {missing_tools}. "
f"Available tools: {sorted(available_tool_names)}"
)
raise ValueError(msg)
# Separate tools that are always included from those available for selection
available_tools = [
tool for tool in base_tools if tool.name not in self.always_include
]
# If no tools available for selection, return None
if not available_tools:
return None
system_message = self.system_prompt
# If there's a max_tools limit, append instructions to the system prompt
if self.max_tools is not None:
system_message += (
f"\nIMPORTANT: List the tool names in order of relevance, "
f"with the most relevant first. "
f"If you exceed the maximum number of tools, "
f"only the first {self.max_tools} will be used."
)
# Get the last user message from the conversation history
last_user_message: HumanMessage
for message in reversed(request.messages):
if isinstance(message, HumanMessage):
last_user_message = message
break
else:
msg = "No user message found in request messages"
raise AssertionError(msg)
model = self.model or request.model
valid_tool_names = [tool.name for tool in available_tools]
return _SelectionRequest(
available_tools=available_tools,
system_message=system_message,
last_user_message=last_user_message,
model=model,
valid_tool_names=valid_tool_names,
)
def _process_selection_response(
self,
response: dict[str, Any],
available_tools: list[BaseTool],
valid_tool_names: list[str],
request: ModelRequest[ContextT],
) -> ModelRequest[ContextT]:
"""Process the selection response and return filtered `ModelRequest`."""
selected_tool_names: list[str] = []
invalid_tool_selections = []
for tool_name in response["tools"]:
if tool_name not in valid_tool_names:
invalid_tool_selections.append(tool_name)
continue
# Only add if not already selected and within max_tools limit
if tool_name not in selected_tool_names and (
self.max_tools is None or len(selected_tool_names) < self.max_tools
):
selected_tool_names.append(tool_name)
if invalid_tool_selections:
msg = f"Model selected invalid tools: {invalid_tool_selections}"
raise ValueError(msg)
# Filter tools based on selection and append always-included tools
if selected_tool_names:
selected_tools: list[BaseTool] = [
tool for tool in available_tools if tool.name in selected_tool_names
]
else:
# 如果模型筛选结果为空,则不对工具进行裁剪,使用所有可用工具
logger.warning("工具筛选结果为空,将恢复使用所有工具。")
selected_tools = available_tools
always_included_tools: list[BaseTool] = [
tool
for tool in request.tools
if not isinstance(tool, dict) and tool.name in self.always_include
]
selected_tools.extend(always_included_tools)
# Also preserve any provider-specific tool dicts from the original request
provider_tools = [tool for tool in request.tools if isinstance(tool, dict)]
return request.override(tools=[*selected_tools, *provider_tools])
@staticmethod
def _is_deepseek_compatible_model(model: BaseChatModel) -> bool:
"""
判断当前模型是否应当走 DeepSeek JSON 兼容分支。
除了官方 `langchain_deepseek`,用户也可能通过 OpenAI-compatible
配置把 DeepSeek 端点接到 `ChatOpenAI`。因此这里同时检查模块名、模型名
和 Base URL避免只靠单一条件漏判。
"""
module_name = type(model).__module__.lower()
model_name = (
str(getattr(model, "model_name", "") or getattr(model, "model", ""))
.strip()
.lower()
)
base_url = (
str(getattr(model, "openai_api_base", "") or getattr(model, "api_base", ""))
.strip()
.lower()
)
return (
"deepseek" in module_name
or model_name.startswith("deepseek-")
or "api.deepseek.com" in base_url
)
@staticmethod
def _extract_text_content(content: Any) -> str:
"""
从模型响应中提取纯文本。
这里不依赖上层 LLMHelper避免中间件与 LLM 构造逻辑互相耦合。
"""
if content is None:
return ""
if isinstance(content, str):
return content
if isinstance(content, list):
text_parts: list[str] = []
for block in content:
if isinstance(block, str):
text_parts.append(block)
continue
if isinstance(block, dict):
if block.get("type") == "text" and isinstance(
block.get("text"), str
):
text_parts.append(block["text"])
continue
if not block.get("type") and isinstance(block.get("text"), str):
text_parts.append(block["text"])
return "".join(text_parts)
if isinstance(content, dict):
if content.get("type") == "text" and isinstance(content.get("text"), str):
return content["text"]
if not content.get("type") and isinstance(content.get("text"), str):
return content["text"]
return ""
@staticmethod
def _parse_json_object(text: str) -> dict[str, Any]:
"""
解析模型返回的 JSON。
DeepSeek 在 JSON 模式下通常会返回纯 JSON但这里仍做一层兜底
兼容模型偶发输出围栏或前后说明文本的情况。
"""
stripped_text = text.strip()
if not stripped_text:
raise ValueError("工具筛选返回了空响应")
try:
payload = json.loads(stripped_text)
if isinstance(payload, dict):
return payload
except json.JSONDecodeError:
pass
start = stripped_text.find("{")
end = stripped_text.rfind("}")
if start == -1 or end == -1 or end <= start:
raise ValueError(f"工具筛选返回的内容不是合法 JSON: {stripped_text}")
payload = json.loads(stripped_text[start: end + 1])
if not isinstance(payload, dict):
raise ValueError("工具筛选 JSON 顶层必须是对象")
return payload
@staticmethod
def _render_tool_list(available_tools: list[Any]) -> str:
"""把工具名和描述渲染成稳定的文本列表。"""
return "\n".join(
f"- {tool.name}: {tool.description}" for tool in available_tools
)
def _build_deepseek_selection_prompt(self, selection_request: Any) -> str:
"""
为 DeepSeek 生成显式 JSON 输出提示。
DeepSeek 官方文档要求在 JSON 输出模式下,提示词中必须明确包含 JSON
约束,否则兼容端点可能返回空内容或无意义输出。
"""
limit_instruction = ""
if self.max_tools:
limit_instruction = f"- Select up to {self.max_tools} tools. IF NO TOOLS ARE RELEVANT, DO NOT RETURN AN EMPTY ARRAY. SELECT THE MOST APPLICABLE ONES TO ENSURE THE REQUEST IS HANDLED."
return (
f"{selection_request.system_message}\n\n"
"Return the answer in JSON only.\n"
'Use exactly this shape: {"tools": ["tool_name_1", "tool_name_2"]}\n'
"Rules:\n"
"- The `tools` field must be a JSON array of strings.\n"
"- Only use tool names from the allowed list below.\n"
"- Order tools by relevance, with the most relevant first.\n"
f"{limit_instruction}\n"
"- Do not add explanations, markdown, or extra keys.\n\n"
"Allowed tools:\n"
f"{self._render_tool_list(selection_request.available_tools)}"
)
def _normalize_selection_response(self, response: Any) -> dict[str, list[str]]:
"""
解析并标准化 DeepSeek JSON 模式的工具筛选结果。
"""
content = getattr(response, "content", response)
text = self._extract_text_content(content)
logger.debug(f"工具筛选原始响应: {text}")
payload = self._parse_json_object(text)
tools = payload.get("tools")
if not isinstance(tools, list):
raise ValueError(f"工具筛选 JSON 缺少 `tools` 数组: {payload}")
normalized_tools = [
tool_name for tool_name in tools if isinstance(tool_name, str)
]
logger.debug(f"工具筛选标准化结果: {normalized_tools}")
return {"tools": normalized_tools}
async def _aselect_tools_with_deepseek(
self, selection_request: Any
) -> dict[str, list[str]]:
"""
使用 DeepSeek 兼容的 JSON 输出模式执行异步工具筛选。
"""
logger.debug("工具筛选走 DeepSeek JSON 兼容分支")
structured_model = selection_request.model.bind(
response_format={"type": "json_object"}
)
response = await structured_model.ainvoke(
[
{
"role": "system",
"content": self._build_deepseek_selection_prompt(selection_request),
},
selection_request.last_user_message,
]
)
return self._normalize_selection_response(response)
@staticmethod
def _extract_selected_tool_names(request: ModelRequest) -> list[str]:
"""从已筛选后的请求中提取最终工具名,保留原有顺序。"""
return [tool.name for tool in request.tools if not isinstance(tool, dict)]
@staticmethod
def _apply_selected_tools(
request: ModelRequest[ContextT],
selected_tool_names: list[str],
) -> ModelRequest[ContextT]:
"""
将已筛选出的工具集应用到当前模型请求。
这里只复用首次筛选出的客户端工具名provider-specific 的 dict 工具仍然
原样保留,避免破坏 LangChain/provider 自身的工具绑定约定。
"""
if not selected_tool_names:
return request
current_tools_by_name = {
tool.name: tool for tool in request.tools if not isinstance(tool, dict)
}
selected_tools = [
current_tools_by_name[tool_name]
for tool_name in selected_tool_names
if tool_name in current_tools_by_name
]
provider_tools = [tool for tool in request.tools if isinstance(tool, dict)]
return request.override(tools=[*selected_tools, *provider_tools])
async def _aselect_request_once(
self, request: ModelRequest[ContextT]
) -> ModelRequest[ContextT]:
"""
执行一次真实工具筛选,并返回筛选后的请求对象。
这里单独抽成 helper便于首次筛选后缓存结果也便于测试覆盖
“首轮筛选,后续复用”的行为。
"""
selection_request = self._prepare_selection_request(request)
if selection_request is None:
return request
if not self._is_deepseek_compatible_model(selection_request.model):
captured_request: ModelRequest[ContextT] = request
async def _capture_handler(
updated_request: ModelRequest[ContextT],
) -> ModelRequest[ContextT]:
nonlocal captured_request
captured_request = updated_request
return updated_request
await super().awrap_model_call(request, _capture_handler)
return captured_request
response = await self._aselect_tools_with_deepseek(selection_request)
return self._process_selection_response(
response,
selection_request.available_tools,
selection_request.valid_tool_names,
request,
)
async def abefore_agent( # noqa
self,
state: ToolSelectionState,
runtime: Runtime, # noqa
config: RunnableConfig,
) -> ToolSelectionStateUpdate | None: # ty: ignore[invalid-method-override]
"""
在本轮 Agent 执行开始前完成一次真实工具筛选。
这样后续多轮 `model -> tools -> model` 循环都只复用这一次结果,
不会为每次模型回合重复追加一笔 selector LLM 开销。
"""
if "selected_tool_names" in state:
return None
if not self.selection_tools or self.model is None:
return ToolSelectionStateUpdate(selected_tool_names=None)
selection_request = ModelRequest(
model=self.model,
tools=list(self.selection_tools),
messages=state["messages"],
state=state,
runtime=runtime,
)
modified_request = await self._aselect_request_once(selection_request)
selected_tool_names = self._extract_selected_tool_names(modified_request)
return ToolSelectionStateUpdate(selected_tool_names=selected_tool_names or None)
async def awrap_model_call(
self,
request: ModelRequest[ContextT],
handler: Callable[
[ModelRequest[ContextT]], Awaitable[ModelResponse[ResponseT]]
],
) -> ModelResponse[ResponseT]:
"""
从 state 中读取首次筛选结果,并应用到每次模型回合。
"""
selected_tool_names = request.state.get("selected_tool_names") # noqa
# 正常路径下,`abefore_agent()` 已经提前写入状态;这里只保留一层兜底,
# 兼容直接单测或未来某些绕过 before_agent 的调用场景。
if (
selected_tool_names is None
and self.selection_tools
and self.model is not None
):
request = await self._aselect_request_once(request)
selected_tool_names = self._extract_selected_tool_names(request) or None
request.state["selected_tool_names"] = selected_tool_names # noqa
if selected_tool_names:
request = self._apply_selected_tools(request, selected_tool_names)
return await handler(request)

View File

@@ -0,0 +1,184 @@
from collections.abc import Awaitable, Callable
from typing import Any
from langchain.agents.middleware.types import (
AgentMiddleware,
ContextT,
ModelRequest,
ModelResponse,
ResponseT,
)
from langchain_core.messages import AIMessage
from app.log import logger
class UsageMiddleware(AgentMiddleware):
"""记录模型调用 usage 信息并回传给外部会话。"""
def __init__(
self,
*,
on_usage: Callable[[dict[str, Any]], None] | None = None,
) -> None:
self.on_usage = on_usage
@staticmethod
def _coerce_int(value: Any) -> int | None:
if value is None:
return None
try:
return int(value)
except (TypeError, ValueError):
return None
@classmethod
def _lookup_int(cls, container: Any, *keys: str) -> int | None:
if not container:
return None
getter = getattr(container, "get", None)
if callable(getter):
for key in keys:
value = getter(key)
if value is not None:
return cls._coerce_int(value)
for key in keys:
value = getattr(container, key, None)
if value is not None:
return cls._coerce_int(value)
return None
@classmethod
def _extract_model_name(cls, model: Any) -> str | None:
return (
getattr(model, "model", None)
or getattr(model, "model_name", None)
or getattr(model, "model_id", None)
)
@classmethod
def _extract_context_window_tokens(cls, model: Any) -> int | None:
profile = getattr(model, "profile", None)
if not profile:
return None
return cls._lookup_int(profile, "max_input_tokens", "input_token_limit")
@classmethod
def _extract_usage(cls, ai_message: AIMessage) -> dict[str, Any]:
usage_metadata = getattr(ai_message, "usage_metadata", None)
input_tokens = cls._lookup_int(usage_metadata, "input_tokens")
output_tokens = cls._lookup_int(usage_metadata, "output_tokens")
total_tokens = cls._lookup_int(usage_metadata, "total_tokens")
response_metadata = getattr(ai_message, "response_metadata", None) or {}
token_usage = (
response_metadata.get("token_usage")
or response_metadata.get("usage")
or response_metadata.get("usage_metadata")
or {}
)
if input_tokens is None:
input_tokens = cls._lookup_int(
token_usage,
"prompt_tokens",
"input_tokens",
)
if input_tokens is None:
input_tokens = cls._lookup_int(
response_metadata,
"prompt_token_count",
"input_tokens",
)
if output_tokens is None:
output_tokens = cls._lookup_int(
token_usage,
"completion_tokens",
"output_tokens",
)
if output_tokens is None:
output_tokens = cls._lookup_int(
response_metadata,
"candidates_token_count",
"output_tokens",
)
if total_tokens is None:
total_tokens = cls._lookup_int(token_usage, "total_tokens")
if total_tokens is None:
total_tokens = cls._lookup_int(response_metadata, "total_token_count")
has_usage = any(
value is not None for value in (input_tokens, output_tokens, total_tokens)
)
resolved_input = input_tokens or 0
resolved_output = output_tokens or 0
resolved_total = (
total_tokens
if total_tokens is not None
else resolved_input + resolved_output
)
return {
"has_usage": has_usage,
"input_tokens": resolved_input,
"output_tokens": resolved_output,
"total_tokens": resolved_total,
}
async def awrap_model_call(
self,
request: ModelRequest[ContextT],
handler: Callable[
[ModelRequest[ContextT]], Awaitable[ModelResponse[ResponseT]]
],
) -> ModelResponse[ResponseT]:
response = await handler(request)
if not callable(self.on_usage):
return response
try:
ai_message = next(
(
message
for message in reversed(response.result)
if isinstance(message, AIMessage)
),
None,
)
usage = (
self._extract_usage(ai_message)
if ai_message
else {
"has_usage": False,
"input_tokens": 0,
"output_tokens": 0,
"total_tokens": 0,
}
)
context_window_tokens = self._extract_context_window_tokens(request.model)
context_usage_ratio = None
if context_window_tokens and usage["has_usage"]:
context_usage_ratio = usage["input_tokens"] / context_window_tokens
self.on_usage(
{
"model": self._extract_model_name(request.model),
"context_window_tokens": context_window_tokens,
"context_usage_ratio": context_usage_ratio,
**usage,
}
)
except Exception as e:
logger.debug("记录模型 usage 失败: %s", e)
return response
__all__ = ["UsageMiddleware"]

View File

@@ -0,0 +1,21 @@
from langchain_core.messages import SystemMessage, ContentBlock
def append_to_system_message(
system_message: SystemMessage | None,
text: str,
) -> SystemMessage:
"""将文本追加到系统消息。
参数:
system_message: 现有的系统消息或 None。
text: 要添加到系统消息的文本。
返回:
追加了文本的新 SystemMessage。
"""
new_content: list[ContentBlock] = list(system_message.content_blocks) if system_message else [] # noqa
if new_content:
text = f"\n\n{text}"
new_content.append({"type": "text", "text": text})
return SystemMessage(content_blocks=new_content)

View File

@@ -1,70 +0,0 @@
You are MoviePilot's AI assistant, specialized in helping users manage media resources including subscriptions, searching, downloading, and organization.
## Your Identity and Capabilities
You are an AI agent for the MoviePilot media management system with the following core capabilities:
### Media Management Capabilities
- **Search Media Resources**: Search for movies, TV shows, anime, and other media content based on user requirements
- **Add Subscriptions**: Create subscription rules for media content that users are interested in
- **Manage Downloads**: Search and add torrent resources to downloaders
- **Query Status**: Check subscription status, download progress, and media library status
### Intelligent Interaction Capabilities
- **Natural Language Understanding**: Understand user requests in natural language (Chinese/English)
- **Context Memory**: Remember conversation history and user preferences
- **Smart Recommendations**: Recommend related media content based on user preferences
- **Task Execution**: Automatically execute complex media management tasks
## Working Principles
1. **Always respond in Chinese**: All responses must be in Chinese
2. **Proactive Task Completion**: Understand user needs and proactively use tools to complete related operations
3. **Provide Detailed Information**: Explain what you're doing when executing operations
4. **Safety First**: Confirm user intent before performing download operations
5. **Continuous Learning**: Remember user preferences and habits to provide personalized service
## Common Operation Workflows
### Add Subscription Workflow
1. Understand the media content the user wants to subscribe to
2. Search for related media information
3. Create subscription rules
4. Confirm successful subscription
### Search and Download Workflow
1. Understand user requirements (movie names, TV show names, etc.)
2. Search for related media information
3. Search for related torrent resources by media info
4. Filter suitable resources
5. Add to downloader
### Query Status Workflow
1. Understand what information the user wants to know
2. Query related data
3. Organize and present results
## Tool Usage Guidelines
### Tool Usage Principles
- Use tools proactively to complete user requests
- Always explain what you're doing when using tools
- Provide detailed results and explanations
- Handle errors gracefully and suggest alternatives
- Confirm user intent before performing download operations
### Response Format
- Always respond in Chinese
- Use clear and friendly language
- Provide structured information when appropriate
- Include relevant details about media content (title, year, type, etc.)
- Explain the results of tool operations clearly
## Important Notes
- Always confirm user intent before performing download operations
- If search results are not ideal, proactively adjust search strategies
- Maintain a friendly and professional tone
- Seek solutions proactively when encountering problems
- Remember user preferences and provide personalized recommendations
- Handle errors gracefully and provide helpful suggestions

View File

@@ -0,0 +1,72 @@
You are the MoviePilot agent runtime. Follow the injected runtime configuration to determine the active persona and any extra user-specific context.
All your responses must be in **Chinese (中文)**.
You act as a proactive agent. Your goal is to fully resolve the user's media-related requests autonomously. Do not end your turn until the task is complete or you are blocked and require user feedback.
<agent_core>
Identity and Goal:
- You are an AI media assistant powered by MoviePilot.
- Your primary goal is to fully resolve the user's MoviePilot-related media tasks with the available tools whenever the request is actionable.
- Focus on MoviePilot's home media domain: search, recognition, subscriptions, downloads, library organization, file transfer, and system status.
- Stay within the MoviePilot product domain unless the user explicitly asks for adjacent help that can be handled with your existing tools.
Behavior Model:
- Prioritize task progress over conversation.
- Check current state before making changes, then do the smallest correct action.
- Do not stop for approval on read-only operations. Only confirm before destructive or high-impact actions such as starting downloads, deleting subscriptions, or removing history.
- When a request can be completed by tools, prefer doing the work over explaining what you might do.
- After an action, perform the minimum validation needed to confirm the result actually landed.
- If the user explicitly asks to change the speaking style or persona, use the dedicated persona tools instead of editing runtime files manually.
- If the user explicitly asks to rewrite or create a persona definition, prefer `update_persona_definition` rather than generic file-editing tools.
- Do not let user memory or persona style override this core identity, safety boundaries, or built-in background task rules.
- You are not a general-purpose coding assistant in normal media conversations. Only cross into implementation details when the user explicitly asks about MoviePilot internals or debugging.
Core Capabilities:
1. Media Search and Recognition - Identify movies, TV shows, and anime; recognize media from fuzzy filenames or incomplete titles.
2. Subscription Management - Create rules for automated downloading and monitor trending content.
3. Download Control - Search torrents across trackers and filter by quality, codec, and release group.
4. System Status and Organization - Monitor downloads, server health, file transfers, renaming, and library cleanup.
5. Visual Input Handling - Users may attach images from supported channels; analyze them together with the text when relevant.
6. File Context Handling - User messages may arrive as structured JSON. Treat the `message` field as the user's text. Attachments appear in `files`; when `local_path` is present, use local file tools to inspect the uploaded file directly. When image input is disabled for the current model, user images may also be delivered through `files`.
7. Persona Management - If the user explicitly asks to change the speaking style or persona, prefer `query_personas` and `switch_persona`; if the user asks to rewrite or create a persona definition, prefer `update_persona_definition` instead of editing runtime files manually.
Core Workflow:
1. Media Discovery: Identify exact media metadata such as TMDB ID and Season or Episode using search tools when needed.
2. Context Checking: Verify whether the media already exists in the library, has already been subscribed, or has relevant history that affects the next step.
3. Action Execution: Perform the requested task with concise user-facing output unless the operation is destructive or blocked.
4. Final Confirmation: State the outcome briefly, including the key media facts or blocker.
Tool Calling Strategy:
- Call independent tools in parallel whenever possible.
- If search results are ambiguous, use `query_media_detail` or `recognize_media` to clarify before proceeding.
- If `search_media` fails, fall back to `search_web` or `recognize_media`. Only ask the user when automated paths are exhausted.
- Reuse known media identity, prior tool results, and current system context instead of repeating expensive recognition or search calls.
- When a tool fails, try one narrower fallback path before escalating to the user.
Media Management Rules:
1. Download Safety: Present found torrents with size, seeds, and quality, then get explicit consent before downloading.
2. Subscription Logic: Check for the best matching quality profile based on user history or defaults.
3. Library Awareness: Check if content already exists in the library to avoid duplicates.
4. Error Handling: If a tool or site fails, briefly explain what went wrong and suggest an alternative.
5. TV Subscription Rule: When calling `add_subscribe` for a TV show, omitting `season` means subscribe to season 1 only. To subscribe multiple seasons or the full series, call `add_subscribe` separately for each season.
</agent_core>
<communication_runtime>
{verbose_spec}
- Channel-aware formatting: Follow the capability rules below for Markdown, plain text, buttons, and voice replies.
{button_choice_spec}
- Voice replies: {voice_reply_spec}
- If the current channel supports image sending and an image would materially help, you may use the `send_message` tool with `image_url` to send it.
- If the current channel supports file sending and you need to return a local image or file for the user to download, use `send_local_file`.
</communication_runtime>
<markdown_spec>
Specific markdown rules:
{markdown_spec}
</markdown_spec>
<system_info>
{moviepilot_info}
</system_info>

View File

@@ -0,0 +1,139 @@
version: 2
shared_rules:
- This is a background system task, NOT a user conversation.
- Your final response will be consumed by the system. Keep it concise and task-focused.
- Do NOT include greetings, explanations, or conversational text.
- Respond in Chinese (中文).
task_types:
heartbeat:
header: "[System Heartbeat]"
objective: "Check all jobs in your jobs directory and process pending tasks."
steps_title: "Follow these steps"
steps:
- "List all jobs with status 'pending' or 'in_progress'."
- "For 'recurring' jobs, check 'last_run' to determine if it's time to run again."
- "For 'once' jobs with status 'pending', execute them now."
- "After executing each job, update its status, 'last_run' time, and execution log in the JOB.md file."
empty_result: "If no jobs were executed, output nothing."
health_check:
header: "[System Health Check]"
objective: "Verify that the agent execution pipeline is alive."
steps_title: "Follow these steps"
steps:
- "Verify that runtime config, tools, and jobs can all be accessed normally."
- "If a real issue is detected, report the failing subsystem and the immediate blocking reason."
empty_result: "If there is nothing meaningful to report, output OK only."
transfer_failed_retry:
header: "[System Task - Transfer Failed Retry]"
objective: "A file transfer or organization has failed. Please use the `transfer-failed-retry` skill to retry the failed transfer."
context_title: "Task context"
context_lines:
- "Failed transfer history record IDs: {history_ids_csv}"
- "Total failed records: {history_count}"
steps_title: "Follow these steps"
steps:
- "Use `query_transfer_history` with status='failed' to find the record with id={history_id} and understand the failure details such as source path, error message, and media info."
- "Analyze the error message to determine the best retry strategy."
- "If the source file no longer exists, skip this retry and report that the file is missing."
- "Delete the failed history record using `delete_transfer_history` with history_id={history_id}."
- "Re-identify the media using `recognize_media` with the source file path."
- "If recognition fails, try `search_media` with keywords from the filename."
- "Re-transfer using `transfer_file` with the source path and any identified media info such as tmdbid and media_type."
- "Report the final result."
batch_transfer_failed_retry:
header: "[System Task - Batch Transfer Failed Retry]"
objective: "Multiple file transfers from the same source have failed. These files likely belong to the same media. Please use the `transfer-failed-retry` skill to retry them efficiently."
context_title: "Task context"
context_lines:
- "Failed transfer history record IDs: {history_ids_csv}"
- "Total failed records: {history_count}"
steps_title: "Follow these steps"
steps:
- "Use `query_transfer_history` with status='failed' to find all records with these IDs and understand the failure details."
- "Analyze the first record to determine the shared media identity and the best retry strategy because the root cause is usually the same for all files."
- "If the error is about media recognition, identify the media once using `recognize_media` or `search_media`, then reuse that result for all files."
- "For each failed record, delete the old history entry with `delete_transfer_history` and re-transfer using `transfer_file`."
- "Report how many retries succeeded and how many still failed."
task_rules:
- "These files share the same media identity. Do NOT call `recognize_media` or `search_media` repeatedly for each file."
manual_transfer_redo:
header: "[System Task - Manual Transfer Re-Organize]"
objective: "A user manually triggered an AI re-organize task from the transfer history page."
context_title: "Transfer history record"
context_lines:
- "- History ID: {history_id}"
- "- Current status: {current_status}"
- "- Current recognized title: {recognized_title}"
- "- Media type: {media_type}"
- "- Category: {category}"
- "- Year: {year}"
- "- Season/Episode: {season_episode}"
- "- Source path: {source_path}"
- "- Source storage: {source_storage}"
- "- Destination path: {destination_path}"
- "- Destination storage: {destination_storage}"
- "- Transfer mode: {transfer_mode}"
- "- Current TMDB ID: {tmdbid}"
- "- Current Douban ID: {doubanid}"
- "- Error message: {error_message}"
steps_title: "Required workflow"
steps:
- "Use `query_transfer_history` to locate and inspect the record with id={history_id}, and verify the source path, status, media info, and failure context."
- "Decide whether the current recognition is trustworthy."
- "If the source file no longer exists or cannot be safely processed, stop and report the reason."
- "If the current recognition is wrong or the record should be reorganized, determine the correct media identity first."
- "Prefer `recognize_media` with the source path. If recognition is not reliable, use `search_media` with keywords from filename, title, or year."
- "Only continue when you have high confidence in the target media."
- "Before re-organizing, delete the old transfer history record with `delete_transfer_history` so the system will not skip the source file."
- "Then use `transfer_file` to organize the source path directly."
- "When calling `transfer_file`, reuse known context when appropriate: source storage, target path, target storage, transfer mode, season, tmdbid or doubanid, and media_type."
- "If this record is already correct and no re-organize is needed, do not perform destructive actions; simply report that no change is necessary."
task_rules:
- "Do NOT rely on previous chat context. Work only from the record above."
- "Your goal is to directly fix one transfer history record by using MoviePilot tools to analyze, clean up the old history entry if necessary, and organize the source file again."
- "You should complete the re-organize by directly using tools such as `query_transfer_history`, `recognize_media`, `search_media`, `delete_transfer_history`, and `transfer_file`."
- "Do NOT reorganize blindly when media identity is uncertain."
- "If the previous record was successful but obviously identified as the wrong media, still use the tool-based flow above instead of `/redo`."
- "Keep the final response short and focused on outcome."
batch_manual_transfer_redo:
header: "[System Task - Batch Manual Transfer Re-Organize]"
objective: "A user manually triggered a batch AI re-organize task from the transfer history page."
context_title: "Selected transfer history records"
context_lines:
- "- History IDs: {history_ids_csv}"
- "- Total records: {history_count}"
- "{records_context}"
steps_title: "Required workflow"
steps:
- "Review the selected records below first and group them by likely shared media identity, source directory, or retry strategy when possible."
- "Use the provided record context as the primary source of truth. Call `query_transfer_history` only when you need extra confirmation."
- "For each group, decide whether the current recognition is trustworthy."
- "If multiple records clearly belong to the same movie or series, identify the media once with `recognize_media` or `search_media`, then reuse that result for the related records."
- "If a source file no longer exists or cannot be safely processed, skip that record and note the reason."
- "Before re-organizing a record, delete the old transfer history record with `delete_transfer_history` so the system will not skip the source file."
- "Then use `transfer_file` to organize the source path directly."
- "When calling `transfer_file`, reuse known context when appropriate: source storage, target path, target storage, transfer mode, season, tmdbid or doubanid, and media_type."
- "If a record is already correct and no re-organize is needed, do not perform destructive actions; simply mark it as skipped."
- "Report only the aggregate outcome, including how many records succeeded, skipped, and failed."
task_rules:
- "Do NOT assume every selected record belongs to the same media."
- "When several records obviously share the same media identity, avoid repeated `recognize_media` or `search_media` calls."
- "Process every selected record exactly once."
- "Keep the final response short and focused on the aggregate outcome."
search_recommend:
header: "[System Task - Search Results Recommendation]"
objective: "Analyze the provided search results and select the best matching items based on user preferences."
context_title: "Task context"
context_lines:
- "{search_results}"
steps_title: "Follow these steps"
steps:
- "Review all search result items carefully."
- "Evaluate each item based on the user preference criteria."
- "Select the top items that best match the preferences."
- "Return ONLY a JSON array of item indices."
task_rules:
- "Return ONLY a JSON array of index numbers, e.g., [0, 3, 1]."
- "Do NOT include any explanations, markdown formatting, conversational text, or other content."
- "Do NOT call any tools. Simply analyze and return the JSON result directly."
- "Respond in JSON format only."

View File

@@ -1,13 +1,60 @@
"""提示词管理器"""
import socket
from dataclasses import dataclass, field
from pathlib import Path
from typing import Dict
from string import Formatter
from time import strftime
from typing import Any, Dict, Optional
import yaml
from app.core.config import settings
from app.log import logger
from app.schemas import (
ChannelCapability,
ChannelCapabilities,
MessageChannel,
ChannelCapabilityManager,
)
from app.utils.system import SystemUtils
SYSTEM_TASKS_FILE = "System Tasks.yaml"
SYSTEM_TASKS_SCHEMA_VERSION = 2
class PromptConfigError(ValueError):
"""程序内置提示词定义加载异常。"""
@dataclass
class SystemTaskTypeDefinition:
"""单个后台系统任务定义。"""
header: str
objective: str
context_title: Optional[str] = None
context_lines: list[str] = field(default_factory=list)
steps_title: Optional[str] = None
steps: list[str] = field(default_factory=list)
task_rules: list[str] = field(default_factory=list)
empty_result: Optional[str] = None
@dataclass
class SystemTasksDefinition:
"""程序内置后台系统任务定义。"""
path: Path
version: int
shared_rules: list[str]
task_types: dict[str, SystemTaskTypeDefinition]
class PromptManager:
"""提示词管理器"""
"""
提示词管理器
"""
def __init__(self, prompts_dir: str = None):
if prompts_dir is None:
@@ -15,24 +62,24 @@ class PromptManager:
else:
self.prompts_dir = Path(prompts_dir)
self.prompts_cache: Dict[str, str] = {}
self._system_tasks_cache: Optional[SystemTasksDefinition] = None
self._system_tasks_signature: Optional[tuple[int, int]] = None
def load_prompt(self, prompt_name: str) -> str:
"""加载指定的提示词"""
"""
加载指定的提示词
"""
if prompt_name in self.prompts_cache:
return self.prompts_cache[prompt_name]
prompt_file = self.prompts_dir / prompt_name
try:
with open(prompt_file, 'r', encoding='utf-8') as f:
with open(prompt_file, "r", encoding="utf-8") as f:
content = f.read().strip()
# 缓存提示词
self.prompts_cache[prompt_name] = content
logger.info(f"提示词加载成功: {prompt_name},长度:{len(content)} 字符")
return content
except FileNotFoundError:
logger.error(f"提示词文件不存在: {prompt_file}")
raise
@@ -46,73 +93,425 @@ class PromptManager:
:param channel: 消息渠道Telegram、微信、Slack等
:return: 提示词内容
"""
base_prompt = self.load_prompt("Agent Prompt.txt")
# 根据渠道添加特定的格式说明
if channel:
channel_format_info = self._get_channel_format_info(channel)
if channel_format_info:
base_prompt += f"\n\n## Current Message Channel Format Requirements\n\n{channel_format_info}"
# 基础提示词只保留 MoviePilot 运行时和渠道能力相关约束。
# 根层运行时配置由 RuntimeConfigMiddleware 在每次模型调用前动态注入,
# 这样人格切换可以在同一轮 Agent 执行里立即生效。
base_prompt = self.load_prompt("System Core Prompt.txt")
# 识别渠道
markdown_spec = ""
msg_channel = (
next(
(c for c in MessageChannel if c.value.lower() == channel.lower()), None
)
if channel
else None
)
# 获取渠道能力说明
if msg_channel:
caps = ChannelCapabilityManager.get_capabilities(msg_channel)
if caps:
markdown_spec = self._generate_formatting_instructions(caps)
button_choice_spec = self._generate_button_choice_instructions(msg_channel)
# 啰嗦模式
verbose_spec = ""
if not settings.AI_AGENT_VERBOSE:
verbose_spec = (
"\n\n[Important Instruction] STRICTLY ENFORCED: "
"If tools are needed, DO NOT output any conversational text, explanations, progress updates, "
"or acknowledgements before the first tool call or between tool calls. "
"Call tools directly without any transitional phrases. "
"You MUST remain completely silent until all required tools have finished and you have the final result. "
"Only then may you send one final user-facing reply. "
"DO NOT output any intermediate content whatsoever."
)
# MoviePilot系统信息
moviepilot_info = self._get_moviepilot_info()
voice_reply_spec = self._generate_voice_reply_instructions()
# 始终替换占位符,避免后续 .format() 时因残留花括号报 KeyError
base_prompt = base_prompt.format(
markdown_spec=markdown_spec,
verbose_spec=verbose_spec,
moviepilot_info=moviepilot_info,
voice_reply_spec=voice_reply_spec,
button_choice_spec=button_choice_spec,
)
return base_prompt
def load_system_tasks_definition(self) -> SystemTasksDefinition:
"""加载程序内置的后台系统任务定义。"""
system_tasks_path = self.prompts_dir / SYSTEM_TASKS_FILE
try:
stat = system_tasks_path.stat()
except FileNotFoundError as err:
logger.error(f"系统任务定义文件不存在: {system_tasks_path}")
raise PromptConfigError(f"系统任务定义文件不存在: {system_tasks_path}") from err
signature = (stat.st_mtime_ns, stat.st_size)
if (
self._system_tasks_signature == signature
and self._system_tasks_cache is not None
):
return self._system_tasks_cache
try:
content = system_tasks_path.read_text(encoding="utf-8")
except Exception as err: # noqa: BLE001
logger.error(f"读取系统任务定义失败: {system_tasks_path}, 错误: {err}")
raise PromptConfigError(
f"读取系统任务定义失败 {system_tasks_path}: {err}"
) from err
try:
data = yaml.safe_load(content) or {}
except yaml.YAMLError as err:
raise PromptConfigError(f"YAML 解析失败 {system_tasks_path}: {err}") from err
if not isinstance(data, dict):
raise PromptConfigError(
f"YAML 根节点必须是映射类型: {system_tasks_path}"
)
definition = self._parse_system_tasks_definition(system_tasks_path, data)
self._system_tasks_signature = signature
self._system_tasks_cache = definition
return definition
def render_system_task_message(
self,
task_type: str,
*,
template_context: Optional[dict[str, Any]] = None,
extra_rules: Optional[list[str]] = None,
) -> str:
"""根据程序内置 YAML 渲染后台系统任务提示词。"""
system_tasks = self.load_system_tasks_definition()
task_definition = system_tasks.task_types.get(task_type)
if not task_definition:
raise PromptConfigError(f"未定义的后台系统任务类型: {task_type}")
rendered_context = self._render_template_lines(
task_definition.context_lines,
template_context,
task_type,
"context_lines",
)
rendered_steps = self._render_template_lines(
task_definition.steps,
template_context,
task_type,
"steps",
)
rendered_task_rules = self._render_template_lines(
task_definition.task_rules,
template_context,
task_type,
"task_rules",
)
sections = [
self._render_template_text(
task_definition.header,
template_context,
task_type,
"header",
).strip(),
self._render_template_text(
task_definition.objective,
template_context,
task_type,
"objective",
).strip(),
]
if rendered_context:
sections.append(
self._format_titled_lines(
task_definition.context_title or "Task context",
rendered_context,
)
)
if rendered_steps:
sections.append(
self._format_titled_lines(
task_definition.steps_title or "Follow these steps",
rendered_steps,
)
)
rules = list(system_tasks.shared_rules)
if task_definition.empty_result:
rules.append(task_definition.empty_result)
rules.extend(rendered_task_rules)
if extra_rules:
rules.extend(rule.strip() for rule in extra_rules if rule and rule.strip())
if rules:
sections.append(self._format_numbered_rules("IMPORTANT", rules))
return "\n\n".join(section for section in sections if section).strip()
@staticmethod
def _get_channel_format_info(channel: str) -> str:
def _get_moviepilot_info() -> str:
"""
获取渠道特定的格式说明
:param channel: 消息渠道
:return: 格式说明文本
获取MoviePilot系统信息用于注入到系统提示词中
"""
channel_lower = channel.lower() if channel else ""
if "telegram" in channel_lower:
return """Messages are being sent through the **Telegram** channel. You must follow these format requirements:
# 获取主机名和IP地址
try:
hostname = socket.gethostname()
ip_address = socket.gethostbyname(hostname)
except Exception: # noqa
hostname = "localhost"
ip_address = "127.0.0.1"
**Supported Formatting:**
- **Bold text**: Use `*text*` (single asterisk, not double asterisks)
- **Italic text**: Use `_text_` (underscore)
- **Code**: Use `` `text` `` (backtick)
- **Links**: Use `[text](url)` format
- **Strikethrough**: Use `~text~` (tilde)
# 配置文件和日志文件目录
config_path = str(settings.CONFIG_PATH)
log_path = str(settings.LOG_PATH)
**IMPORTANT - Headings and Lists:**
- **DO NOT use heading syntax** (`#`, `##`, `###`) - Telegram MarkdownV2 does NOT support it
- **Instead, use bold text for headings**: `*Heading Text*` followed by a blank line
- **DO NOT use list syntax** (`-`, `*`, `+` at line start) - these will be escaped and won't display as lists
- **For lists**, use plain text with line breaks, or use bold for list item labels: `*Item 1:* description`
# API地址构建
api_port = settings.PORT
api_path = settings.API_V1_STR
**Examples:**
- ❌ Wrong heading: `# Main Title` or `## Subtitle`
- ✅ Correct heading: `*Main Title*` (followed by blank line) or `*Subtitle*` (followed by blank line)
- ❌ Wrong list: `- Item 1` or `* Item 2`
- ✅ Correct list format: `*Item 1:* description` or use plain text with line breaks
# API令牌
api_token = settings.API_TOKEN or "未设置"
**Special Characters:**
- Avoid using special characters that need escaping in MarkdownV2: `_*[]()~`>#+-=|{}.!` unless they are part of the formatting syntax
- Keep formatting simple, avoid nested formatting to ensure proper rendering in Telegram"""
elif "wechat" in channel_lower or "微信" in channel:
return """Messages are being sent through the **WeChat** channel. Please follow these format requirements:
# 数据库信息
db_type = settings.DB_TYPE
if db_type == "sqlite":
db_info = f"SQLite ({settings.CONFIG_PATH / 'db' / 'moviepilot.db'})"
else:
db_password = settings.DB_POSTGRESQL_PASSWORD or ""
db_info = f"PostgreSQL ({settings.DB_POSTGRESQL_USERNAME}:{db_password}@{settings.DB_POSTGRESQL_HOST}:{settings.DB_POSTGRESQL_PORT}/{settings.DB_POSTGRESQL_DATABASE})"
- WeChat does NOT support Markdown formatting. Use plain text format only.
- Do NOT use any Markdown syntax (such as `**bold**`, `*italic*`, `` `code` `` etc.)
- Use plain text descriptions. You can organize content using line breaks and punctuation
- Links can be provided directly as URLs, no Markdown link format needed
- Keep messages concise and clear, use natural Chinese expressions"""
elif "slack" in channel_lower:
return """Messages are being sent through the **Slack** channel. Please follow these format requirements:
info_lines = [
f"- 当前时间: {strftime('%Y-%m-%d %H:%M:%S')}",
f"- 运行环境: {SystemUtils.platform} {'docker' if SystemUtils.is_docker() else ''}",
f"- 主机名: {hostname}",
f"- IP地址: {ip_address}",
f"- API端口: {api_port}",
f"- API路径: {api_path}",
f"- API令牌: {api_token}",
f"- 外网域名: {settings.APP_DOMAIN or '未设置'}",
f"- 数据库类型: {db_type}",
f"- 数据库: {db_info}",
f"- 配置文件目录: {config_path}",
f"- 日志文件目录: {log_path}",
f"- 系统安装目录: {settings.ROOT_PATH}",
]
- Slack supports Markdown formatting
- Use `*text*` for bold
- Use `_text_` for italic
- Use `` `text` `` for code
- Link format: `<url|text>` or `[text](url)`"""
# 其他渠道使用标准Markdown
return None
return "\n".join(info_lines)
@staticmethod
def _generate_formatting_instructions(caps: ChannelCapabilities) -> str:
"""
根据渠道能力动态生成格式指令
"""
instructions = []
if ChannelCapability.RICH_TEXT not in caps.capabilities:
instructions.append(
"- Formatting: Use **Plain Text ONLY**. The channel does NOT support Markdown."
)
instructions.append(
"- No Markdown Symbols: NEVER use `**`, `*`, `__`, or `[` blocks. Use natural text to emphasize (e.g., using ALL CAPS or separators)."
)
instructions.append(
"- Lists: Use plain text symbols like `>` or `*` at the start of lines, followed by manual line breaks."
)
instructions.append("- Links: Paste URLs directly as text.")
return "\n".join(instructions)
@staticmethod
def _generate_voice_reply_instructions() -> str:
return (
"- Voice replies: Use normal text replies by default. "
"Only call `send_voice_message` when the user explicitly asks for a voice reply "
"or spoken playback is clearly better than plain text."
)
@staticmethod
def _generate_button_choice_instructions(
channel: MessageChannel = None,
) -> str:
if (
channel
and ChannelCapabilityManager.supports_buttons(channel)
and ChannelCapabilityManager.supports_callbacks(channel)
):
return (
"- User questions: If you need the user to choose from a few clear options, "
"call `ask_user_choice` to send button options. After the user clicks a button, "
"the selected value will come back as the user's next message. After calling this tool, "
"wait for the user's selection instead of repeating the question in plain text."
)
return "- User questions: When you truly need user input, ask briefly in plain text."
def _parse_system_tasks_definition(
self,
path: Path,
data: dict[str, Any],
) -> SystemTasksDefinition:
"""把 YAML 结构转换成系统任务定义对象。"""
version = self._normalize_positive_int(data.get("version"), "version", default=1)
if version < SYSTEM_TASKS_SCHEMA_VERSION:
raise PromptConfigError(
f"{path} 的 version={version} 过旧,"
f"当前要求 System Tasks schema v{SYSTEM_TASKS_SCHEMA_VERSION} 或更高版本"
)
shared_rules = self._normalize_string_list(data.get("shared_rules"), "shared_rules")
if not shared_rules:
raise PromptConfigError(f"{path} 缺少 shared_rules")
raw_task_types = data.get("task_types")
if not isinstance(raw_task_types, dict) or not raw_task_types:
raise PromptConfigError(f"{path} 缺少 task_types 映射")
task_types: dict[str, SystemTaskTypeDefinition] = {}
for key, raw in raw_task_types.items():
if not isinstance(raw, dict):
raise PromptConfigError(f"task_types.{key} 必须是映射")
header = str(raw.get("header") or "").strip()
objective = str(raw.get("objective") or "").strip()
if not header or not objective:
raise PromptConfigError(f"task_types.{key} 缺少 header 或 objective")
task_types[str(key)] = SystemTaskTypeDefinition(
header=header,
objective=objective,
context_title=str(raw.get("context_title") or "").strip() or None,
context_lines=self._normalize_string_list(
raw.get("context_lines"),
f"task_types.{key}.context_lines",
),
steps_title=str(raw.get("steps_title") or "").strip() or None,
steps=self._normalize_string_list(
raw.get("steps"),
f"task_types.{key}.steps",
),
task_rules=self._normalize_string_list(
raw.get("task_rules"),
f"task_types.{key}.task_rules",
),
empty_result=str(raw.get("empty_result") or "").strip() or None,
)
return SystemTasksDefinition(
path=path,
version=version,
shared_rules=shared_rules,
task_types=task_types,
)
@classmethod
def _render_template_text(
cls,
text: str,
template_context: Optional[dict[str, Any]],
task_type: str,
field_name: str,
) -> str:
if not text:
return ""
formatter = Formatter()
required_fields = {
placeholder_name
for _, placeholder_name, _, _ in formatter.parse(text)
if placeholder_name
}
if not required_fields:
return text
context = cls._normalize_template_context(template_context)
missing_fields = sorted(field for field in required_fields if field not in context)
if missing_fields:
raise PromptConfigError(
f"系统任务定义 `{task_type}` 的 `{field_name}` 缺少变量: "
+ ", ".join(f"`{field}`" for field in missing_fields)
)
# 这里统一做字符串替换,让 YAML 成为后台任务文案的唯一行为来源。
return text.format_map(context)
@classmethod
def _render_template_lines(
cls,
items: list[str],
template_context: Optional[dict[str, Any]],
task_type: str,
field_name: str,
) -> list[str]:
return [
cls._render_template_text(
item,
template_context,
task_type,
f"{field_name}[{index}]",
).rstrip()
for index, item in enumerate(items, start=1)
if item and item.rstrip()
]
@staticmethod
def _normalize_template_context(
template_context: Optional[dict[str, Any]],
) -> dict[str, str]:
if not template_context:
return {}
return {
str(key): "" if value is None else str(value)
for key, value in template_context.items()
}
@staticmethod
def _format_numbered_rules(title: str, items: list[str]) -> str:
return "\n".join(
[f"{title}:"] + [f"{index}. {item}" for index, item in enumerate(items, start=1)]
)
@staticmethod
def _format_titled_lines(title: str, items: list[str]) -> str:
cleaned = [item.rstrip() for item in items if item and item.rstrip()]
return "\n".join([f"{title}:"] + cleaned)
@staticmethod
def _normalize_positive_int(
value: Any,
field_name: str,
*,
default: int,
) -> int:
if value in (None, ""):
return default
try:
normalized = int(value)
except (TypeError, ValueError) as err:
raise PromptConfigError(f"{field_name} 必须是正整数") from err
if normalized <= 0:
raise PromptConfigError(f"{field_name} 必须是正整数")
return normalized
@staticmethod
def _normalize_string_list(values: Any, field_name: str) -> list[str]:
if values is None:
return []
if not isinstance(values, list):
raise PromptConfigError(f"{field_name} 必须是字符串数组")
normalized: list[str] = []
for value in values:
text = str(value).strip()
if text:
normalized.append(text)
return normalized
def clear_cache(self):
"""清空缓存"""
"""
清空缓存
"""
self.prompts_cache.clear()
self._system_tasks_cache = None
self._system_tasks_signature = None
logger.info("提示词缓存已清空")
prompt_manager = PromptManager()

755
app/agent/runtime.py Normal file
View File

@@ -0,0 +1,755 @@
"""Agent 根层运行时配置管理。"""
from __future__ import annotations
import re
import shutil
import threading
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Iterable, Optional
import yaml
from app.core.config import settings
from app.log import logger
CURRENT_PERSONA_FILE = "CURRENT_PERSONA.md"
SYSTEM_RUNTIME_DIR = "runtime"
MEMORY_DIR = "memory"
SKILLS_DIR = "skills"
JOBS_DIR = "jobs"
ACTIVITY_DIR = "activity"
PERSONAS_DIR = "personas"
PERSONA_FILE = "PERSONA.md"
CURRENT_PERSONA_SCHEMA_VERSION = 3
PERSONA_SCHEMA_VERSION = 1
DEFAULT_PERSONA_ID = "default"
PERSONA_ID_PATTERN = re.compile(r"^[a-z0-9][a-z0-9_-]{0,63}$")
ROOT_LEVEL_RUNTIME_FILES = {
CURRENT_PERSONA_FILE,
}
OBSOLETE_AGENT_ROOT_FILES = {
"AGENT_CORE.md",
"AGENT_PROFILE.md",
"AGENT_WORKFLOW.md",
"AGENT_HOOKS.md",
"USER_PREFERENCES.md",
"SYSTEM_TASKS.md",
"WAKE_FORMAT.md",
}
OBSOLETE_RUNTIME_FILES = {
Path("AGENT_CORE.md"),
Path("AGENT_PROFILE.md"),
Path("AGENT_WORKFLOW.md"),
Path("AGENT_HOOKS.md"),
Path("USER_PREFERENCES.md"),
Path("SYSTEM_TASKS.md"),
Path("WAKE_FORMAT.md"),
Path("personas") / DEFAULT_PERSONA_ID / "AGENT_PROFILE.md",
Path("personas") / DEFAULT_PERSONA_ID / "AGENT_WORKFLOW.md",
Path("personas") / DEFAULT_PERSONA_ID / "AGENT_HOOKS.md",
Path("system_tasks") / "SYSTEM_TASKS.md",
Path("templates") / "WAKE_FORMAT.md",
}
FRONTMATTER_PATTERN = re.compile(r"^---\s*\n(.*?)\n---\s*\n?", re.DOTALL)
class AgentRuntimeConfigError(ValueError):
"""根层配置加载异常。"""
@dataclass
class ParsedMarkdownDocument:
"""解析后的 Markdown 文档。"""
metadata: dict[str, Any]
body: str
@dataclass
class PersonaDefinition:
"""单个人格定义。"""
persona_id: str
path: Path
label: str
description: str
text: str
aliases: list[str] = field(default_factory=list)
def matches(self, query: str) -> bool:
"""判断 query 是否命中当前人格。"""
normalized = query.strip().casefold()
if not normalized:
return False
candidates = [self.persona_id, self.label, *self.aliases]
return any(candidate.strip().casefold() == normalized for candidate in candidates)
def summary_line(self) -> str:
"""渲染可读的一行人格摘要。"""
parts = [f"`{self.persona_id}`"]
if self.label and self.label != self.persona_id:
parts.append(self.label)
if self.description:
parts.append(self.description)
return " - ".join(parts)
def to_dict(self, *, is_active: bool) -> dict[str, Any]:
"""输出给查询工具的结构化信息。"""
return {
"persona_id": self.persona_id,
"label": self.label,
"description": self.description,
"aliases": self.aliases,
"is_active": is_active,
"path": str(self.path),
}
@dataclass
class AgentRuntimeConfig:
"""一次加载后的根层配置快照。"""
source_root: Path
active_persona: str
current_persona_path: Path
persona: PersonaDefinition
available_personas: list[PersonaDefinition]
extra_context_paths: list[Path]
extra_contexts: list[tuple[Path, str]]
warnings: list[str] = field(default_factory=list)
used_fallback: bool = False
def render_prompt_sections(self) -> str:
"""渲染进入系统提示词的运行时片段。"""
sections: list[str] = [
"<agent_runtime_config>",
f"- Active persona: `{self.active_persona}`",
f"- Active persona source: `{self.persona.path}`",
]
if self.available_personas:
sections.append("- Available personas:")
sections.extend(f" - {persona.summary_line()}" for persona in self.available_personas)
sections.append("</agent_runtime_config>")
if self.warnings:
sections.extend(
[
"",
"<agent_runtime_warnings>",
*[f"- {warning}" for warning in self.warnings],
"</agent_runtime_warnings>",
]
)
sections.extend(
[
"",
"<agent_persona>",
f"- Persona ID: `{self.persona.persona_id}`",
]
)
if self.persona.label and self.persona.label != self.persona.persona_id:
sections.append(f"- Persona Label: {self.persona.label}")
if self.persona.description:
sections.append(f"- Persona Description: {self.persona.description}")
sections.extend(
[
"",
self.persona.text.strip() or "(No persona instructions configured.)",
"</agent_persona>",
]
)
for path, text in self.extra_contexts:
if not text.strip():
continue
sections.extend(
[
"",
f'<agent_extra_context source="{path.name}">',
text.strip(),
"</agent_extra_context>",
]
)
return "\n".join(sections).strip()
def list_personas(self) -> list[dict[str, Any]]:
"""返回全部人格摘要。"""
return [
persona.to_dict(is_active=persona.persona_id == self.active_persona)
for persona in self.available_personas
]
class AgentRuntimeManager:
"""统一管理 agent 根层运行时配置目录、校验与人格切换。"""
def __init__(
self,
*,
agent_root_dir: Optional[Path] = None,
bundled_defaults_dir: Optional[Path] = None,
) -> None:
self.agent_root_dir = agent_root_dir or (settings.CONFIG_PATH / "agent")
self.runtime_dir = self.agent_root_dir / SYSTEM_RUNTIME_DIR
self.memory_dir = self.agent_root_dir / MEMORY_DIR
self.skills_dir = self.agent_root_dir / SKILLS_DIR
self.jobs_dir = self.agent_root_dir / JOBS_DIR
self.activity_dir = self.agent_root_dir / ACTIVITY_DIR
self.bundled_defaults_dir = bundled_defaults_dir or (
Path(__file__).parent / "defaults"
)
self._cache_lock = threading.Lock()
self._cached_signature: Optional[tuple[tuple[str, int, int], ...]] = None
self._cached_config: Optional[AgentRuntimeConfig] = None
def ensure_layout(self) -> None:
"""创建目录、同步默认文件,并清理废弃的旧版 runtime 文件。"""
self.agent_root_dir.mkdir(parents=True, exist_ok=True)
self.runtime_dir.mkdir(parents=True, exist_ok=True)
self.memory_dir.mkdir(parents=True, exist_ok=True)
self.skills_dir.mkdir(parents=True, exist_ok=True)
self.jobs_dir.mkdir(parents=True, exist_ok=True)
self.activity_dir.mkdir(parents=True, exist_ok=True)
self._migrate_root_runtime_files()
self._remove_obsolete_runtime_files()
self._sync_bundled_defaults()
self._migrate_root_memory_files()
def load_runtime_config(self) -> AgentRuntimeConfig:
"""加载配置。用户目录损坏时自动回退到内置默认配置。"""
self.ensure_layout()
signature = self._build_signature()
with self._cache_lock:
if self._cached_signature == signature and self._cached_config:
return self._cached_config
try:
config = self._load_from_root(self.runtime_dir)
except AgentRuntimeConfigError as err:
logger.warning("Agent 根层配置无效,回退到内置默认配置: %s", err)
config = self._load_from_root(self.bundled_defaults_dir)
config.used_fallback = True
config.warnings.insert(
0, f"用户运行时配置加载失败,已回退到内置默认配置: {err}"
)
self._cached_signature = signature
self._cached_config = config
return config
def invalidate_cache(self) -> None:
"""供测试或手动刷新时清理缓存。"""
with self._cache_lock:
self._cached_signature = None
self._cached_config = None
def set_active_persona(self, persona_query: str) -> AgentRuntimeConfig:
"""切换当前激活人格,并立即刷新缓存。"""
self.ensure_layout()
runtime_root = self.runtime_dir
current_path = runtime_root / CURRENT_PERSONA_FILE
current_doc = self._read_markdown(current_path)
current_meta = current_doc.metadata
available_personas = self._load_personas(runtime_root)
persona = self._resolve_persona_definition(persona_query, available_personas)
document = self._render_current_persona_document(
active_persona=persona.persona_id,
extra_context_files=self._coerce_string_list(
current_meta.get("extra_context_files")
),
deprecated_phrases=self._coerce_string_list(
current_meta.get("deprecated_phrases")
),
)
current_path.write_text(document, encoding="utf-8")
self.invalidate_cache()
logger.info("已切换 Agent 人格: %s", persona.persona_id)
return self.load_runtime_config()
def list_personas(self) -> list[PersonaDefinition]:
"""列出当前可用人格。"""
return self.load_runtime_config().available_personas
def update_persona_definition(
self,
persona_query: str,
*,
label: Optional[str] = None,
description: Optional[str] = None,
aliases: Optional[list[str]] = None,
instructions: Optional[str] = None,
append_instructions: Optional[list[str]] = None,
create_if_missing: bool = False,
) -> tuple[PersonaDefinition, bool]:
"""更新或创建运行时人格定义。"""
self.ensure_layout()
runtime_root = self.runtime_dir
available_personas = self._load_personas(runtime_root)
created = False
try:
persona = self._resolve_persona_definition(persona_query, available_personas)
target_persona_id = persona.persona_id
target_path = persona.path
existing_body = persona.text
existing_label = persona.label
existing_description = persona.description
existing_aliases = list(persona.aliases)
except AgentRuntimeConfigError:
if not create_if_missing:
raise
target_persona_id = self._validate_new_persona_id(persona_query)
target_path = runtime_root / PERSONAS_DIR / target_persona_id / PERSONA_FILE
existing_body = ""
existing_label = target_persona_id
existing_description = ""
existing_aliases = []
created = True
final_label = (
label.strip()
if isinstance(label, str) and label.strip()
else existing_label or target_persona_id
)
final_description = (
description.strip()
if isinstance(description, str) and description.strip()
else existing_description
)
final_aliases = (
self._normalize_persona_aliases(aliases, "aliases")
if aliases is not None
else existing_aliases
)
final_body = (
self._normalize_persona_body(instructions)
if isinstance(instructions, str) and instructions.strip()
else self._normalize_persona_body(existing_body)
)
final_body = self._merge_persona_instructions(
final_body,
append_instructions,
)
if not final_body.strip():
raise AgentRuntimeConfigError("人格定义正文不能为空")
document = self._render_persona_document(
persona_id=target_persona_id,
label=final_label,
description=final_description,
aliases=final_aliases,
body=final_body,
)
target_path.parent.mkdir(parents=True, exist_ok=True)
target_path.write_text(document, encoding="utf-8")
self.invalidate_cache()
runtime_config = self.load_runtime_config()
updated_persona = self._resolve_persona_definition(
target_persona_id,
runtime_config.available_personas,
)
logger.info(
"%s Agent 人格定义: %s",
"创建" if created else "更新",
updated_persona.persona_id,
)
return updated_persona, created
def _build_signature(self) -> tuple[tuple[str, int, int], ...]:
"""基于运行时配置和内置人格生成文件签名。"""
entries: list[tuple[str, int, int]] = []
for prefix, root in (
("runtime", self.runtime_dir),
("bundled", self.bundled_defaults_dir),
):
if not root.exists():
continue
for path in sorted(root.rglob("*")):
if not path.is_file():
continue
stat = path.stat()
relative = path.relative_to(root).as_posix()
entries.append((f"{prefix}:{relative}", stat.st_mtime_ns, stat.st_size))
return tuple(entries)
def _sync_bundled_defaults(self) -> None:
"""仅复制缺失的默认运行时文件,避免覆盖用户自定义。"""
if not self.bundled_defaults_dir.exists():
return
for path in sorted(self.bundled_defaults_dir.rglob("*")):
relative = path.relative_to(self.bundled_defaults_dir)
target = self.runtime_dir / relative
if path.is_dir():
target.mkdir(parents=True, exist_ok=True)
continue
if target.exists():
continue
target.parent.mkdir(parents=True, exist_ok=True)
shutil.copy2(path, target)
logger.info("已同步默认 Agent 运行时文件: %s", target)
def _migrate_root_runtime_files(self) -> None:
"""兼容早期直接放在 `config/agent` 根目录的 CURRENT_PERSONA。"""
source = self.agent_root_dir / CURRENT_PERSONA_FILE
target = self.runtime_dir / CURRENT_PERSONA_FILE
if not source.exists() or target.exists():
return
target.parent.mkdir(parents=True, exist_ok=True)
source.rename(target)
logger.info("已迁移旧版 Agent 根配置文件: %s -> %s", source, target)
def _remove_obsolete_runtime_files(self) -> None:
"""删除不再支持的旧版 Agent 配置文件,避免被误迁移到 memory。"""
for filename in sorted(OBSOLETE_AGENT_ROOT_FILES):
path = self.agent_root_dir / filename
if not path.exists() or not path.is_file():
continue
path.unlink()
logger.info("已删除废弃的 Agent 根配置文件: %s", path)
for relative_path in sorted(OBSOLETE_RUNTIME_FILES):
path = self.runtime_dir / relative_path
if not path.exists() or not path.is_file():
continue
path.unlink()
logger.info("已删除废弃的 Agent 运行时文件: %s", path)
def _migrate_root_memory_files(self) -> None:
"""将旧版根目录 memory 文件移入 `config/agent/memory`。"""
for path in sorted(self.agent_root_dir.glob("*.md")):
if path.name in ROOT_LEVEL_RUNTIME_FILES:
continue
target = self.memory_dir / path.name
if target.exists():
continue
path.rename(target)
logger.info("已迁移旧版 Agent memory 文件: %s -> %s", path, target)
def _load_from_root(self, root: Path) -> AgentRuntimeConfig:
current_persona_path = root / CURRENT_PERSONA_FILE
current_doc = self._read_markdown(current_persona_path)
current_meta = current_doc.metadata
active_persona = str(
current_meta.get("active_persona") or DEFAULT_PERSONA_ID
).strip()
if not active_persona:
raise AgentRuntimeConfigError("CURRENT_PERSONA.md 缺少 active_persona")
extra_context_paths = self._resolve_optional_paths(
root, current_meta.get("extra_context_files", [])
)
available_personas = self._load_personas(root)
persona = self._resolve_persona_definition(active_persona, available_personas)
extra_contexts = [
(path, self._read_markdown(path).body)
for path in extra_context_paths
]
warnings = self._validate_runtime_config(
current_meta=current_meta,
persona_path=persona.path,
extra_context_paths=extra_context_paths,
persona_text=persona.text,
)
return AgentRuntimeConfig(
source_root=root,
active_persona=active_persona,
current_persona_path=current_persona_path,
persona=persona,
available_personas=available_personas,
extra_context_paths=extra_context_paths,
extra_contexts=extra_contexts,
warnings=warnings,
)
def _load_personas(self, root: Path) -> list[PersonaDefinition]:
"""扫描并解析所有可用人格。"""
personas_root = root / PERSONAS_DIR
if not personas_root.exists():
raise AgentRuntimeConfigError(f"缺少 personas 目录: {personas_root}")
personas: list[PersonaDefinition] = []
seen_ids: set[str] = set()
for persona_dir in sorted(personas_root.iterdir()):
if not persona_dir.is_dir():
continue
persona_path = persona_dir / PERSONA_FILE
if not persona_path.exists():
continue
document = self._read_markdown(persona_path)
persona_id = str(document.metadata.get("persona_id") or persona_dir.name).strip()
if not persona_id:
raise AgentRuntimeConfigError(f"{persona_path} 缺少 persona_id")
if persona_id in seen_ids:
raise AgentRuntimeConfigError(f"检测到重复的人格 ID: {persona_id}")
seen_ids.add(persona_id)
aliases = self._normalize_string_list(
document.metadata.get("aliases"),
f"{persona_path}.aliases",
)
personas.append(
PersonaDefinition(
persona_id=persona_id,
path=persona_path,
label=str(document.metadata.get("label") or persona_id).strip(),
description=str(document.metadata.get("description") or "").strip(),
text=document.body,
aliases=aliases,
)
)
if not personas:
raise AgentRuntimeConfigError(f"{personas_root} 中未找到任何人格定义")
return personas
@staticmethod
def _resolve_persona_definition(
persona_query: str,
personas: list[PersonaDefinition],
) -> PersonaDefinition:
"""按 persona_id、label 或 aliases 解析人格。"""
normalized = (persona_query or "").strip()
if not normalized:
raise AgentRuntimeConfigError("人格 ID 不能为空")
for persona in personas:
if persona.persona_id == normalized:
return persona
for persona in personas:
if persona.matches(normalized):
return persona
available = ", ".join(persona.persona_id for persona in personas)
raise AgentRuntimeConfigError(
f"未找到人格 `{persona_query}`,可用人格: {available}"
)
@staticmethod
def _validate_new_persona_id(persona_id: str) -> str:
"""校验新建人格的 ID避免写入非法路径。"""
normalized = (persona_id or "").strip()
if not normalized:
raise AgentRuntimeConfigError("新建人格时 persona_id 不能为空")
if not PERSONA_ID_PATTERN.fullmatch(normalized):
raise AgentRuntimeConfigError(
"新建人格时 persona_id 只能使用小写字母、数字、下划线和中划线,且必须以字母或数字开头"
)
return normalized
@staticmethod
def _read_markdown(path: Path) -> ParsedMarkdownDocument:
if not path.exists():
raise AgentRuntimeConfigError(f"缺少配置文件: {path}")
try:
content = path.read_text(encoding="utf-8")
except Exception as err: # noqa: BLE001
raise AgentRuntimeConfigError(f"读取配置文件失败 {path}: {err}") from err
metadata: dict[str, Any] = {}
body = content
match = FRONTMATTER_PATTERN.match(content)
if match:
try:
metadata = yaml.safe_load(match.group(1)) or {}
except yaml.YAMLError as err:
raise AgentRuntimeConfigError(
f"YAML frontmatter 解析失败 {path}: {err}"
) from err
if not isinstance(metadata, dict):
raise AgentRuntimeConfigError(f"frontmatter 必须是映射类型: {path}")
body = content[match.end():]
return ParsedMarkdownDocument(metadata=metadata, body=body.strip())
@staticmethod
def _resolve_optional_paths(root: Path, values: Any) -> list[Path]:
if not values:
return []
if not isinstance(values, list):
raise AgentRuntimeConfigError("extra_context_files 必须是数组")
return [AgentRuntimeManager._resolve_relative_path(root, str(value)) for value in values]
@staticmethod
def _resolve_relative_path(root: Path, value: str) -> Path:
candidate = Path(value)
return candidate if candidate.is_absolute() else (root / candidate).resolve()
@staticmethod
def _normalize_string_list(values: Any, field_name: str) -> list[str]:
if values is None:
return []
if not isinstance(values, list):
raise AgentRuntimeConfigError(f"{field_name} 必须是字符串数组")
normalized: list[str] = []
for value in values:
text = str(value).strip()
if text:
normalized.append(text)
return normalized
@staticmethod
def _coerce_string_list(values: Any) -> list[str]:
if not isinstance(values, list):
return []
return [str(value).strip() for value in values if str(value).strip()]
@staticmethod
def _normalize_persona_aliases(values: Any, field_name: str) -> list[str]:
"""规范化人格别名,保持顺序并去重。"""
normalized = AgentRuntimeManager._normalize_string_list(values, field_name)
deduped: list[str] = []
seen: set[str] = set()
for alias in normalized:
folded = alias.casefold()
if folded in seen:
continue
seen.add(folded)
deduped.append(alias)
return deduped
@staticmethod
def _merge_persona_instructions(
base_body: str,
append_instructions: Optional[list[str]],
) -> str:
"""把增量规则安全追加到人格正文末尾。"""
merged = (base_body or "").strip()
if not append_instructions:
return merged
extras: list[str] = []
for item in append_instructions:
text = str(item).strip()
if not text:
continue
if not re.match(r"^([-*]|\d+\.)\s", text):
text = f"- {text}"
extras.append(text)
if not extras:
return merged
if not merged:
return "\n".join(extras)
return merged.rstrip() + "\n\n" + "\n".join(extras)
@staticmethod
def _normalize_persona_body(body: Optional[str]) -> str:
"""去掉重复的 PERSONA 标题,保持正文可安全回写。"""
normalized = (body or "").strip()
if not normalized:
return ""
if normalized.startswith("# PERSONA"):
_, _, remainder = normalized.partition("\n")
return remainder.strip()
return normalized
def _validate_runtime_config(
self,
*,
current_meta: dict[str, Any],
persona_path: Path,
extra_context_paths: list[Path],
persona_text: str,
) -> list[str]:
warnings: list[str] = []
required_paths = [persona_path]
duplicates = self._find_duplicate_paths(required_paths + extra_context_paths)
if duplicates:
warnings.append(
"检测到重复引用的根层配置文件: "
+ ", ".join(path.as_posix() for path in duplicates)
)
deprecated_phrases = self._normalize_string_list(
current_meta.get("deprecated_phrases"), "deprecated_phrases"
)
if deprecated_phrases:
for phrase in deprecated_phrases:
if phrase and phrase in persona_text:
warnings.append(f"检测到已废弃短语 `{phrase}` 仍出现在 persona 中")
return warnings
@staticmethod
def _find_duplicate_paths(paths: Iterable[Path]) -> list[Path]:
seen: set[Path] = set()
duplicates: list[Path] = []
for path in paths:
resolved = path.resolve()
if resolved in seen and resolved not in duplicates:
duplicates.append(resolved)
seen.add(resolved)
return duplicates
@staticmethod
def _render_current_persona_document(
*,
active_persona: str,
extra_context_files: list[str],
deprecated_phrases: list[str],
) -> str:
"""统一生成 CURRENT_PERSONA.md避免手写时结构漂移。"""
metadata = {
"version": CURRENT_PERSONA_SCHEMA_VERSION,
"active_persona": active_persona,
"extra_context_files": extra_context_files,
"deprecated_phrases": deprecated_phrases,
}
body_lines = [
"# CURRENT_PERSONA",
"",
f"当前激活人格:`{active_persona}`",
"",
"运行时加载顺序固定如下:",
"",
"1. 核心系统提示词(程序内置,不可运行时覆盖)",
"2. `personas/<active_persona>/PERSONA.md`",
"3. `extra_context_files`",
"4. `memory/*.md`",
"5. `activity/*.md`",
"",
"`memory` 中的长期偏好可以细化回复方式,但不应覆盖系统核心身份、目标和安全边界。",
]
frontmatter = yaml.safe_dump(
metadata,
sort_keys=False,
allow_unicode=True,
).strip()
return f"---\n{frontmatter}\n---\n" + "\n".join(body_lines) + "\n"
@staticmethod
def _render_persona_document(
*,
persona_id: str,
label: str,
description: str,
aliases: list[str],
body: str,
) -> str:
"""统一生成人格定义文件,避免手写 frontmatter 漂移。"""
metadata = {
"version": PERSONA_SCHEMA_VERSION,
"persona_id": persona_id,
"label": label,
"description": description,
"aliases": aliases,
}
frontmatter = yaml.safe_dump(
metadata,
sort_keys=False,
allow_unicode=True,
).strip()
normalized_body = AgentRuntimeManager._normalize_persona_body(body)
return f"---\n{frontmatter}\n---\n# PERSONA\n\n{normalized_body}\n"
agent_runtime_manager = AgentRuntimeManager()

View File

@@ -1,101 +1,191 @@
"""MoviePilot工具基类"""
import asyncio
import json
import threading
from abc import ABCMeta, abstractmethod
from typing import Any, Optional
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from typing import Any, Callable, Optional
from langchain.tools import BaseTool
from langchain_core.tools import BaseTool
from pydantic import PrivateAttr
from app.agent import StreamingCallbackHandler, ConversationMemoryManager
from app.agent import StreamingHandler
from app.chain import ChainBase
from app.core.config import settings
from app.db.user_oper import UserOper
from app.helper.service import ServiceConfigHelper
from app.log import logger
from app.schemas import Notification
from app.schemas.types import MessageChannel
class ToolChain(ChainBase):
pass
# 将常见的阻塞调用按能力域拆分到独立线程池,避免外部慢 IO 抢占同一批 worker。
_BLOCKING_BUCKET_LIMITS = {
"default": 4,
"config": 2,
"db": 4,
"downloader": 4,
"mediaserver": 4,
"plugin": 2,
"rule": 2,
"site": 4,
"storage": 4,
"subscribe": 2,
"workflow": 2,
}
_blocking_semaphores = {
bucket: asyncio.Semaphore(limit)
for bucket, limit in _BLOCKING_BUCKET_LIMITS.items()
}
_blocking_executors: dict[str, ThreadPoolExecutor] = {}
_blocking_executor_lock = threading.Lock()
def _get_blocking_executor(bucket: str) -> ThreadPoolExecutor:
"""按桶懒加载线程池,避免在导入阶段创建过多 worker。"""
with _blocking_executor_lock:
executor = _blocking_executors.get(bucket)
if executor:
return executor
limit = _BLOCKING_BUCKET_LIMITS[bucket]
executor = ThreadPoolExecutor(
max_workers=limit,
thread_name_prefix=f"agent-tool-{bucket}",
)
_blocking_executors[bucket] = executor
return executor
class MoviePilotTool(BaseTool, metaclass=ABCMeta):
"""MoviePilot专用工具基类"""
"""
MoviePilot专用工具基类LangChain v1 / langchain_core
"""
_session_id: str = PrivateAttr()
_user_id: str = PrivateAttr()
_channel: str = PrivateAttr(default=None)
_source: str = PrivateAttr(default=None)
_username: str = PrivateAttr(default=None)
_callback_handler: StreamingCallbackHandler = PrivateAttr(default=None)
_memory_manager: ConversationMemoryManager = PrivateAttr(default=None)
_channel: Optional[str] = PrivateAttr(default=None)
_source: Optional[str] = PrivateAttr(default=None)
_username: Optional[str] = PrivateAttr(default=None)
_stream_handler: Optional[StreamingHandler] = PrivateAttr(default=None)
_require_admin: bool = PrivateAttr(default=False)
_agent_context: dict = PrivateAttr(default_factory=dict)
def __init__(self, session_id: str, user_id: str, **kwargs):
super().__init__(**kwargs)
self._session_id = session_id
self._user_id = user_id
self._require_admin = getattr(self.__class__, "require_admin", False)
def _run(self, *args: Any, **kwargs: Any) -> Any:
pass
raise NotImplementedError("MoviePilotTool 只支持异步调用,请使用 _arun")
async def _arun(self, **kwargs) -> str:
"""异步运行工具"""
# 发送和记忆工具调用前的信息
agent_message = await self._callback_handler.get_message()
if agent_message:
# 发送消息
await self.send_tool_message(agent_message, title="MoviePilot助手")
async def _arun(self, *args: Any, **kwargs: Any) -> str:
"""
异步运行工具,负责:
1. 在工具调用前将流式消息推送给用户
2. 持久化工具调用记录到会话记忆
3. 调用具体工具逻辑(子类实现的 execute 方法)
4. 持久化工具结果到会话记忆
5. 权限检查
"""
# 记忆工具调用
await self._memory_manager.add_memory(
session_id=self._session_id,
user_id=self._user_id,
role="tool_call",
content=agent_message,
metadata={
"call_id": self.__class__.__name__,
"tool_name": self.__class__.__name__,
"parameters": kwargs
}
)
permission_result = await self._check_permission()
if permission_result:
return permission_result
# 发送执行工具说明,优先使用工具自定义的提示消息,如果没有则使用 explanation
# 获取工具执行提示消息
tool_message = self.get_tool_message(**kwargs)
if not tool_message:
explanation = kwargs.get("explanation")
if explanation:
tool_message = explanation
if tool_message:
formatted_message = f"⚙️ => {tool_message}"
await self.send_tool_message(formatted_message)
logger.debug(f'Executing tool {self.name} with args: {kwargs}')
result = await self.run(**kwargs)
logger.debug(f'Tool {self.name} executed with result: {result}')
# 记忆工具调用结果
if isinstance(result, str):
formated_result = result
elif isinstance(result, int, float):
formated_result = str(result)
# 发送工具执行过程消息
if self._stream_handler and self._stream_handler.is_streaming:
if settings.AI_AGENT_VERBOSE:
if self._stream_handler.is_auto_flushing:
# 渠道支持编辑:工具消息追加到 buffer由定时刷新推送
if tool_message:
self._stream_handler.emit(f"\n\n⚙️ => {tool_message}\n\n")
else:
allow_dispatch_without_context = self._agent_context.get(
"should_dispatch_reply", False
)
if self._channel and self._source:
# 渠道不支持编辑:取出 Agent 文字 + 工具消息合并独立发送
agent_message = await self._stream_handler.take()
messages = []
if agent_message:
messages.append(agent_message)
if tool_message:
messages.append(f"⚙️ => {tool_message}")
if messages:
merged_message = "\n\n".join(messages)
await self.send_tool_message(merged_message)
elif allow_dispatch_without_context:
agent_message = await self._stream_handler.take()
messages = []
if agent_message:
messages.append(agent_message)
if tool_message:
messages.append(f"⚙️ => {tool_message}")
if messages:
merged_message = "\n\n".join(messages)
await self.send_tool_message(merged_message)
else:
# 后台 capture 流程没有渠道上下文,不能把工具提示回灌到默认通知渠道。
self._stream_handler.record_tool_call(
tool_name=self.name,
tool_message=tool_message,
tool_kwargs=kwargs,
)
else:
# 非VERBOSE不逐条回显工具调用转为在下一段文本前补一句聚合摘要
self._stream_handler.record_tool_call(
tool_name=self.name,
tool_message=tool_message,
tool_kwargs=kwargs,
)
else:
formated_result = json.dumps(result, ensure_ascii=False, indent=2)
await self._memory_manager.add_memory(
session_id=self._session_id,
user_id=self._user_id,
role="tool_result",
content=formated_result
)
# 未启用流式传输,不发送任何工具消息内容
pass
return result
logger.debug(f"Executing tool {self.name} with args: {kwargs}")
# 执行具体工具逻辑
try:
result = await self.run(**kwargs)
logger.debug(f"Tool {self.name} executed with result: {result}")
except Exception as e:
error_message = f"工具执行异常 ({type(e).__name__}): {str(e)}"
logger.error(f"Tool {self.name} execution failed: {e}", exc_info=True)
result = error_message
# 格式化结果
if isinstance(result, str):
formatted_result = result
elif isinstance(result, (int, float)):
formatted_result = str(result)
else:
formatted_result = json.dumps(result, ensure_ascii=False, indent=2)
return formatted_result
def get_tool_message(self, **kwargs) -> Optional[str]:
"""
获取工具执行时的友好提示消息
获取工具执行时的友好提示消息
子类可以重写此方法,根据实际参数生成个性化的提示消息。
如果返回 None 或空字符串,将回退使用 explanation 参数。
Args:
**kwargs: 工具的所有参数(包括 explanation
Returns:
str: 友好的提示消息,如果返回 None 或空字符串则使用 explanation
"""
@@ -103,24 +193,161 @@ class MoviePilotTool(BaseTool, metaclass=ABCMeta):
@abstractmethod
async def run(self, **kwargs) -> str:
"""子类实现具体的工具执行逻辑"""
raise NotImplementedError
@staticmethod
async def run_blocking(
bucket: str, func: Callable[..., Any], *args: Any, **kwargs: Any
) -> Any:
"""
在受控线程池中运行阻塞型同步代码,避免拖住 FastAPI 主事件循环。
"""
bucket_name = bucket if bucket in _BLOCKING_BUCKET_LIMITS else "default"
semaphore = _blocking_semaphores[bucket_name]
bound_call = partial(func, *args, **kwargs)
async with semaphore:
loop = asyncio.get_running_loop()
return await loop.run_in_executor(
_get_blocking_executor(bucket_name), bound_call
)
def set_message_attr(self, channel: str, source: str, username: str):
"""设置消息属性"""
"""
设置消息属性
"""
self._channel = channel
self._source = source
self._username = username
def set_callback_handler(self, callback_handler: StreamingCallbackHandler):
"""设置回调处理器"""
self._callback_handler = callback_handler
def set_stream_handler(self, stream_handler: StreamingHandler):
"""
设置回调处理器
"""
self._stream_handler = stream_handler
def set_memory_manager(self, memory_manager: ConversationMemoryManager):
"""设置记忆客理器"""
self._memory_manager = memory_manager
def set_agent_context(self, agent_context: Optional[dict]):
"""
设置与当前 Agent 共享的上下文。
"""
self._agent_context = agent_context or {}
async def send_tool_message(self, message: str, title: str = ""):
"""发送工具消息"""
async def _check_permission(self) -> Optional[str]:
"""
检查用户权限:
1. 首先检查工具是否需要管理员权限
2. 如果需要管理员权限,则检查用户是否是渠道管理员
3. 如果渠道没有设置管理员名单,则检查用户是否是系统管理员
4. 如果都不是系统管理员检查用户ID是否等于渠道配置的用户ID
5. 如果都不是,返回权限拒绝消息
"""
if not self._require_admin:
return None
if not self._channel or not self._source:
return None
# 渠道配置来自 SystemConfigOper 内存缓存,可以直接读取;
# 只有用户信息需要走异步数据库查询。
user_id_str = str(self._user_id) if self._user_id else None
channel_type_map = {
MessageChannel.Telegram: "telegram",
MessageChannel.Discord: "discord",
MessageChannel.Wechat: "wechat",
MessageChannel.Slack: "slack",
MessageChannel.VoceChat: "vocechat",
MessageChannel.SynologyChat: "synologychat",
MessageChannel.QQ: "qqbot",
}
channel_type = None
for key, value in channel_type_map.items():
if self._channel == key.value:
channel_type = value
break
if not channel_type:
return None
admin_key_map = {
"telegram": "TELEGRAM_ADMINS",
"discord": "DISCORD_ADMINS",
"wechat": "WECHAT_ADMINS",
"slack": "SLACK_ADMINS",
"vocechat": "VOCECHAT_ADMINS",
"synologychat": "SYNOLOGYCHAT_ADMINS",
"qqbot": "QQBOT_ADMINS",
}
user_id_key_map = {
"telegram": "TELEGRAM_CHAT_ID",
"vocechat": "VOCECHAT_CHANNEL_ID",
"wechat": "WECHAT_BOT_CHAT_ID",
}
admin_key = admin_key_map.get(channel_type)
user_id_key = user_id_key_map.get(channel_type)
try:
configs = ServiceConfigHelper.get_notification_configs()
for config in configs:
if config.name == self._source and config.config:
channel_admins = config.config.get(admin_key) if admin_key else None
if channel_admins:
admin_list = [
aid.strip()
for aid in str(channel_admins).split(",")
if aid.strip()
]
if user_id_str and user_id_str in admin_list:
return None
user = (
await UserOper().async_get_by_name(self._username)
if self._username
else None
)
if user and user.is_superuser:
return None
return (
"抱歉,您没有执行此工具的权限。"
"只有渠道管理员或系统管理员才能执行工具操作。"
"如需执行工具请联系渠道管理员将您的用户ID添加到渠道管理员列表中"
"或联系系统管理员为您设置权限。"
)
else:
user = (
await UserOper().async_get_by_name(self._username)
if self._username
else None
)
if user and user.is_superuser:
return None
if user_id_key:
config_user_id = config.config.get(user_id_key)
if config_user_id and str(config_user_id) == user_id_str:
return None
return (
"抱歉,您没有执行此工具的权限。"
"只有系统管理员才能执行工具操作。"
"如需执行工具,请联系系统管理员为您设置权限。"
)
except Exception as e:
logger.error(f"检查权限失败: {e}")
return None
async def send_tool_message(
self, message: str, title: str = "", image: Optional[str] = None
):
"""
发送工具消息
"""
await ToolChain().async_post_message(
Notification(
channel=self._channel,
@@ -128,6 +355,7 @@ class MoviePilotTool(BaseTool, metaclass=ABCMeta):
userid=self._user_id,
username=self._username,
title=title,
text=message
text=message,
image=image,
)
)

View File

@@ -1,5 +1,3 @@
"""MoviePilot工具工厂"""
from typing import List, Callable
from app.agent.tools.impl.add_download import AddDownloadTool
@@ -18,6 +16,14 @@ from app.agent.tools.impl.test_site import TestSiteTool
from app.agent.tools.impl.query_subscribes import QuerySubscribesTool
from app.agent.tools.impl.query_subscribe_shares import QuerySubscribeSharesTool
from app.agent.tools.impl.query_rule_groups import QueryRuleGroupsTool
from app.agent.tools.impl.query_builtin_filter_rules import QueryBuiltinFilterRulesTool
from app.agent.tools.impl.query_custom_filter_rules import QueryCustomFilterRulesTool
from app.agent.tools.impl.add_custom_filter_rule import AddCustomFilterRuleTool
from app.agent.tools.impl.update_custom_filter_rule import UpdateCustomFilterRuleTool
from app.agent.tools.impl.delete_custom_filter_rule import DeleteCustomFilterRuleTool
from app.agent.tools.impl.add_rule_group import AddRuleGroupTool
from app.agent.tools.impl.update_rule_group import UpdateRuleGroupTool
from app.agent.tools.impl.delete_rule_group import DeleteRuleGroupTool
from app.agent.tools.impl.query_popular_subscribes import QueryPopularSubscribesTool
from app.agent.tools.impl.query_subscribe_history import QuerySubscribeHistoryTool
from app.agent.tools.impl.delete_subscribe import DeleteSubscribeTool
@@ -27,32 +33,117 @@ from app.agent.tools.impl.search_person_credits import SearchPersonCreditsTool
from app.agent.tools.impl.recognize_media import RecognizeMediaTool
from app.agent.tools.impl.scrape_metadata import ScrapeMetadataTool
from app.agent.tools.impl.query_episode_schedule import QueryEpisodeScheduleTool
from app.agent.tools.impl.query_media_detail import QueryMediaDetailTool
from app.agent.tools.impl.search_torrents import SearchTorrentsTool
from app.agent.tools.impl.get_search_results import GetSearchResultsTool
from app.agent.tools.impl.search_web import SearchWebTool
from app.agent.tools.impl.send_message import SendMessageTool
from app.agent.tools.impl.ask_user_choice import AskUserChoiceTool
from app.agent.tools.impl.send_local_file import SendLocalFileTool
from app.agent.tools.impl.send_voice_message import SendVoiceMessageTool
from app.agent.tools.impl.query_schedulers import QuerySchedulersTool
from app.agent.tools.impl.run_scheduler import RunSchedulerTool
from app.agent.tools.impl.query_workflows import QueryWorkflowsTool
from app.agent.tools.impl.run_workflow import RunWorkflowTool
from app.agent.tools.impl.query_personas import QueryPersonasTool
from app.agent.tools.impl.switch_persona import SwitchPersonaTool
from app.agent.tools.impl.update_persona_definition import UpdatePersonaDefinitionTool
from app.agent.tools.impl.update_site_cookie import UpdateSiteCookieTool
from app.agent.tools.impl.delete_download import DeleteDownloadTool
from app.agent.tools.impl.delete_download_history import DeleteDownloadHistoryTool
from app.agent.tools.impl.delete_transfer_history import DeleteTransferHistoryTool
from app.agent.tools.impl.modify_download import ModifyDownloadTool
from app.agent.tools.impl.query_directory_settings import QueryDirectorySettingsTool
from app.agent.tools.impl.list_directory import ListDirectoryTool
from app.agent.tools.impl.query_transfer_history import QueryTransferHistoryTool
from app.agent.tools.impl.transfer_file import TransferFileTool
from app.agent.tools.impl.execute_command import ExecuteCommandTool
from app.agent.tools.impl.edit_file import EditFileTool
from app.agent.tools.impl.write_file import WriteFileTool
from app.agent.tools.impl.read_file import ReadFileTool
from app.agent.tools.impl.browse_webpage import BrowseWebpageTool
from app.agent.tools.impl.query_installed_plugins import QueryInstalledPluginsTool
from app.agent.tools.impl.query_market_plugins import QueryMarketPluginsTool
from app.agent.tools.impl.query_plugin_capabilities import QueryPluginCapabilitiesTool
from app.agent.tools.impl.query_plugin_config import QueryPluginConfigTool
from app.agent.tools.impl.update_plugin_config import UpdatePluginConfigTool
from app.agent.tools.impl.reload_plugin import ReloadPluginTool
from app.agent.tools.impl.query_plugin_data import QueryPluginDataTool
from app.agent.tools.impl.install_plugin import InstallPluginTool
from app.agent.tools.impl.uninstall_plugin import UninstallPluginTool
from app.agent.tools.impl.run_slash_command import RunSlashCommandTool
from app.agent.tools.impl.list_slash_commands import ListSlashCommandsTool
from app.agent.tools.impl.query_custom_identifiers import QueryCustomIdentifiersTool
from app.agent.tools.impl.update_custom_identifiers import UpdateCustomIdentifiersTool
from app.core.plugin import PluginManager
from app.log import logger
from app.schemas.message import ChannelCapabilityManager
from app.schemas.types import MessageChannel
from .base import MoviePilotTool
class MoviePilotToolFactory:
"""MoviePilot工具工厂"""
"""
MoviePilot工具工厂
"""
# 这些通用工具需要始终保留,避免大工具集裁剪后让 Agent 丢失基础的
# 文件系统、命令执行或交互确认能力。AskUserChoiceTool 仅在支持按钮
# 的渠道中才会实际注入,因此后续会再按已加载工具做一次求交集。
TOOL_SELECTOR_ALWAYS_INCLUDE_NAMES = (
"list_directory",
"write_file",
"read_file",
"edit_file",
"execute_command",
"ask_user_choice",
)
@staticmethod
def create_tools(session_id: str, user_id: str,
channel: str = None, source: str = None, username: str = None,
callback_handler: Callable = None, memory_mananger: Callable = None) -> List[MoviePilotTool]:
"""创建MoviePilot工具列表"""
def _should_enable_choice_tool(channel: str = None) -> bool:
if not channel:
return False
try:
message_channel = MessageChannel(channel)
except ValueError:
return False
return ChannelCapabilityManager.supports_buttons(
message_channel
) and ChannelCapabilityManager.supports_callbacks(message_channel)
@classmethod
def get_tool_selector_always_include_names(
cls, tools: List[MoviePilotTool]
) -> List[str]:
"""
返回当前实际已加载且需要绕过工具筛选的工具名。
`LLMToolSelectorMiddleware` 会校验 `always_include` 中的工具名是否
存在于当前请求里,因此这里必须根据运行时工具列表做交集过滤。
"""
available_tool_names = {
tool.name for tool in tools if getattr(tool, "name", None)
}
return [
tool_name
for tool_name in cls.TOOL_SELECTOR_ALWAYS_INCLUDE_NAMES
if tool_name in available_tool_names
]
@staticmethod
def create_tools(
session_id: str,
user_id: str,
channel: str = None,
source: str = None,
username: str = None,
stream_handler: Callable = None,
agent_context: dict = None,
allow_message_tools: bool = True,
) -> List[MoviePilotTool]:
"""
创建MoviePilot工具列表
"""
tools = []
tool_definitions = [
SearchMediaTool,
@@ -61,20 +152,33 @@ class MoviePilotToolFactory:
RecognizeMediaTool,
ScrapeMetadataTool,
QueryEpisodeScheduleTool,
QueryMediaDetailTool,
AddSubscribeTool,
UpdateSubscribeTool,
SearchSubscribeTool,
SearchTorrentsTool,
GetSearchResultsTool,
SearchWebTool,
AddDownloadTool,
QuerySubscribesTool,
QuerySubscribeSharesTool,
QueryPopularSubscribesTool,
QueryBuiltinFilterRulesTool,
QueryCustomFilterRulesTool,
QueryRuleGroupsTool,
AddCustomFilterRuleTool,
UpdateCustomFilterRuleTool,
DeleteCustomFilterRuleTool,
AddRuleGroupTool,
UpdateRuleGroupTool,
DeleteRuleGroupTool,
QuerySubscribeHistoryTool,
DeleteSubscribeTool,
QueryDownloadTasksTool,
DeleteDownloadTool,
DeleteDownloadHistoryTool,
DeleteTransferHistoryTool,
ModifyDownloadTool,
QueryDownloadersTool,
QuerySitesTool,
UpdateSiteTool,
@@ -92,19 +196,47 @@ class MoviePilotToolFactory:
QuerySchedulersTool,
RunSchedulerTool,
QueryWorkflowsTool,
RunWorkflowTool
RunWorkflowTool,
QueryPersonasTool,
SwitchPersonaTool,
UpdatePersonaDefinitionTool,
ExecuteCommandTool,
EditFileTool,
WriteFileTool,
ReadFileTool,
BrowseWebpageTool,
QueryInstalledPluginsTool,
QueryMarketPluginsTool,
QueryPluginCapabilitiesTool,
QueryPluginConfigTool,
UpdatePluginConfigTool,
ReloadPluginTool,
QueryPluginDataTool,
InstallPluginTool,
UninstallPluginTool,
RunSlashCommandTool,
ListSlashCommandsTool,
QueryCustomIdentifiersTool,
UpdateCustomIdentifiersTool,
]
if MoviePilotToolFactory._should_enable_choice_tool(channel):
tool_definitions.append(AskUserChoiceTool)
tool_definitions.extend(
[
SendLocalFileTool,
SendVoiceMessageTool,
]
)
# 创建内置工具
for ToolClass in tool_definitions:
tool = ToolClass(
session_id=session_id,
user_id=user_id
)
tool = ToolClass(session_id=session_id, user_id=user_id)
if not allow_message_tools and getattr(tool, "sends_message", False):
continue
tool.set_message_attr(channel=channel, source=source, username=username)
tool.set_callback_handler(callback_handler=callback_handler)
tool.set_memory_manager(memory_manager=memory_mananger)
tool.set_stream_handler(stream_handler=stream_handler)
tool.set_agent_context(agent_context=agent_context)
tools.append(tool)
# 加载插件提供的工具
plugin_tools_count = 0
plugin_tools_info = PluginManager().get_plugin_agent_tools()
@@ -116,25 +248,34 @@ class MoviePilotToolFactory:
try:
# 验证工具类是否继承自 MoviePilotTool
if not issubclass(ToolClass, MoviePilotTool):
logger.warning(f"插件 {plugin_name}({plugin_id}) 提供的工具类 {ToolClass.__name__} 未继承自 MoviePilotTool已跳过")
logger.warning(
f"插件 {plugin_name}({plugin_id}) 提供的工具类 {ToolClass.__name__} 未继承自 MoviePilotTool已跳过"
)
continue
# 创建工具实例
tool = ToolClass(
session_id=session_id,
user_id=user_id
tool = ToolClass(session_id=session_id, user_id=user_id)
if not allow_message_tools and getattr(tool, "sends_message", False):
continue
tool.set_message_attr(
channel=channel, source=source, username=username
)
tool.set_message_attr(channel=channel, source=source, username=username)
tool.set_callback_handler(callback_handler=callback_handler)
tool.set_memory_manager(memory_manager=memory_mananger)
tool.set_stream_handler(stream_handler=stream_handler)
tool.set_agent_context(agent_context=agent_context)
tools.append(tool)
plugin_tools_count += 1
logger.debug(f"成功加载插件 {plugin_name}({plugin_id}) 的工具: {ToolClass.__name__}")
logger.debug(
f"成功加载插件 {plugin_name}({plugin_id}) 的工具: {ToolClass.__name__}"
)
except Exception as e:
logger.error(f"加载插件 {plugin_name}({plugin_id}) 的工具 {ToolClass.__name__} 失败: {str(e)}")
logger.error(
f"加载插件 {plugin_name}({plugin_id}) 的工具 {ToolClass.__name__} 失败: {str(e)}"
)
builtin_tools_count = len(tool_definitions)
if plugin_tools_count > 0:
logger.info(f"成功创建 {len(tools)} 个MoviePilot工具内置工具: {builtin_tools_count} 个,插件工具: {plugin_tools_count} 个)")
logger.info(
f"成功创建 {len(tools)} 个MoviePilot工具内置工具: {builtin_tools_count} 个,插件工具: {plugin_tools_count} 个)"
)
else:
logger.info(f"成功创建 {len(tools)} 个MoviePilot工具")
return tools

View File

@@ -0,0 +1,540 @@
"""过滤规则 Agent 工具共用的校验、查询和引用处理逻辑。"""
import copy
import re
from typing import Any, Dict, Iterable, Optional
from app.core.event import eventmanager
from app.db import AsyncSessionFactory
from app.db.models.subscribe import Subscribe
from app.db.systemconfig_oper import SystemConfigOper
from app.helper.rule import RuleHelper
from app.modules.filter.RuleParser import RuleParser
from app.modules.filter.builtin_rules import BUILTIN_RULE_SET
from app.schemas import CustomRule, FilterRuleGroup
from app.schemas.event import ConfigChangeEventData
from app.schemas.types import EventType, SystemConfigKey
RULE_ID_PATTERN = re.compile(r"^[A-Za-z0-9]+$")
RULE_TOKEN_PATTERN = re.compile(r"[A-Za-z][A-Za-z0-9]*|[0-9][A-Za-z0-9]+")
NUMERIC_RANGE_PATTERN = re.compile(
r"^\d+(?:\.\d+)?(?:\s*-\s*\d+(?:\.\d+)?)?$"
)
MEDIA_TYPE_ALIASES = {
"movie": "电影",
"film": "电影",
"tv": "电视剧",
"series": "电视剧",
"show": "电视剧",
"电影": "电影",
"电视剧": "电视剧",
}
RULE_STRING_SYNTAX = {
"level_separator": ">",
"and_operator": "&",
"not_operator": "!",
"supported_grouping": "Parentheses are supported inside a single level.",
"spacing_note": "Prefer spaces around '&', and '>' for readability; use '!RULE' for negation.",
"match_order": "Levels are evaluated from left to right. The first matched level wins and stops further matching.",
"match_result": "If no level matches, the torrent is filtered out. If a level matches, the torrent is kept.",
"writing_workflow": [
"First query built-in rules and custom rules to learn valid rule IDs.",
"Compose one priority level with '&', '!' and optional parentheses.",
"Join multiple priority levels with '>' from highest priority to lowest priority.",
"Use spaces around '&', and '>' for readability.",
],
"examples": [
{
"description": "Prefer torrents with special subtitles and Chinese dubbing at 4K, otherwise fall back to Chinese subtitles and Chinese dubbing at 4K.",
"rule_string": "SPECSUB & CNVOI & 4K & !BLU & !REMUX & !WEBDL > CNSUB & CNVOI & 4K & !BLU & !REMUX & !WEBDL",
},
{
"description": "Inside one level, require 4K and reject Blu-ray source.",
"rule_string": "4K & !BLU",
},
{
"description": "Inside one level, accept either special subtitles or Chinese subtitles, then also require 1080P.",
"rule_string": "(SPECSUB | CNSUB) & 1080P",
},
],
}
def normalize_optional_text(value: Optional[str]) -> Optional[str]:
"""把空白字符串折叠为 None避免保存无意义的空值。"""
if value is None:
return None
value = str(value).strip()
return value or None
def normalize_media_type(value: Optional[str]) -> Optional[str]:
"""兼容英中文媒体类型输入,最终统一为后端实际使用的中文值。"""
value = normalize_optional_text(value)
if not value:
return None
normalized = MEDIA_TYPE_ALIASES.get(value.lower(), value)
if normalized not in {"电影", "电视剧"}:
raise ValueError(
"media_type 仅支持 '电影''电视剧''movie''tv'"
)
return normalized
def validate_numeric_range(
field_name: str, value: Optional[str]
) -> Optional[str]:
"""校验 size_range / publish_time 这类单值或区间值。"""
value = normalize_optional_text(value)
if not value:
return None
if not NUMERIC_RANGE_PATTERN.match(value):
raise ValueError(
f"{field_name} 格式无效,支持 '1000''1000-5000' 这类数字区间格式"
)
parts = [float(item.strip()) for item in value.split("-")]
if len(parts) == 2 and parts[0] > parts[1]:
raise ValueError(f"{field_name} 区间起始值不能大于结束值")
return value
def validate_seeders(value: Optional[str]) -> Optional[str]:
"""做种人数最终会被 int() 解析,这里提前拦住非法值。"""
value = normalize_optional_text(value)
if not value:
return None
if not value.isdigit():
raise ValueError("seeders 必须是非负整数")
return value
def get_builtin_rules() -> Dict[str, dict]:
"""返回内置规则的深拷贝,避免调用方误改共享常量。"""
return copy.deepcopy(BUILTIN_RULE_SET)
def get_custom_rules() -> list[CustomRule]:
return RuleHelper().get_custom_rules()
def get_rule_groups() -> list[FilterRuleGroup]:
return RuleHelper().get_rule_groups()
def build_custom_rule_map(rules: Optional[Iterable[CustomRule]] = None) -> Dict[str, CustomRule]:
return {
rule.id: rule
for rule in (rules or get_custom_rules())
if rule.id
}
def build_rule_group_map(
groups: Optional[Iterable[FilterRuleGroup]] = None,
) -> Dict[str, FilterRuleGroup]:
return {
group.name: group
for group in (groups or get_rule_groups())
if group.name
}
def extract_rule_tokens(rule_string: Optional[str]) -> list[str]:
"""从规则串里提取规则 ID用于引用分析和未知规则校验。"""
if not rule_string:
return []
# dict.fromkeys 用来在保留顺序的同时去重,便于展示和报错。
return list(dict.fromkeys(RULE_TOKEN_PATTERN.findall(rule_string)))
def parse_rule_string(rule_string: str) -> dict:
"""使用后端同款 RuleParser 解析规则串,并拆出每一层的元数据。"""
normalized = normalize_optional_text(rule_string)
if not normalized:
raise ValueError("rule_string 不能为空")
parser = RuleParser()
levels = [level.strip() for level in normalized.split(">")]
if any(not level for level in levels):
raise ValueError("rule_string 不能包含空层级,请检查 '>' 两侧内容")
parsed_levels = []
for index, level in enumerate(levels, start=1):
try:
parser.parse(level)
except Exception as exc: # pragma: no cover - 依赖 pyparsing 的具体异常
raise ValueError(f"规则串第 {index} 层语法错误: {exc}") from exc
parsed_levels.append(
{
"priority": index,
"expression": level,
"referenced_rules": extract_rule_tokens(level),
}
)
return {
"rule_string": " > ".join(levels),
"levels": parsed_levels,
"referenced_rules": extract_rule_tokens(normalized),
}
def validate_rule_string(rule_string: str, available_rule_ids: Iterable[str]) -> dict:
"""校验规则串语法和引用规则是否都存在。"""
parsed = parse_rule_string(rule_string)
available_ids = set(available_rule_ids)
unknown_rules = sorted(
{
rule_id
for rule_id in parsed["referenced_rules"]
if rule_id not in available_ids
}
)
if unknown_rules:
raise ValueError(
f"rule_string 引用了不存在的规则: {', '.join(unknown_rules)}"
)
return parsed
def serialize_builtin_rule(rule_id: str, payload: dict) -> dict:
"""把内置规则整理成适合 Agent 阅读的结构。"""
data = copy.deepcopy(payload)
data["id"] = rule_id
data["source"] = "builtin"
return data
def serialize_custom_rule(rule: CustomRule, group_refs: Optional[list[str]] = None) -> dict:
data = rule.model_dump(exclude_none=True)
data["source"] = "custom"
data["referenced_by_rule_groups"] = group_refs or []
return data
def serialize_rule_group(group: FilterRuleGroup, usage: Optional[dict] = None) -> dict:
"""查询时尽量附带解析结果,便于 Agent 理解优先级层级。"""
data = group.model_dump(exclude_none=True)
if group.rule_string:
try:
parsed = parse_rule_string(group.rule_string)
data["levels"] = parsed["levels"]
data["referenced_rules"] = parsed["referenced_rules"]
data["syntax_valid"] = True
except ValueError as exc:
data["syntax_valid"] = False
data["syntax_error"] = str(exc)
data["referenced_rules"] = extract_rule_tokens(group.rule_string)
else:
data["syntax_valid"] = False
data["syntax_error"] = "rule_string 为空"
data["referenced_rules"] = []
data["usage"] = usage or default_rule_group_usage()
return data
def default_rule_group_usage() -> dict:
return {
"used_in_global_search": False,
"used_in_global_subscribe": False,
"used_in_global_best_version": False,
"subscribes": [],
}
async def collect_rule_group_usages(
group_names: Optional[Iterable[str]] = None,
) -> Dict[str, dict]:
"""收集规则组在全局配置和订阅上的引用情况。"""
target_names = set(group_names or [])
search_groups = set(
SystemConfigOper().get(SystemConfigKey.SearchFilterRuleGroups) or []
)
subscribe_groups = set(
SystemConfigOper().get(SystemConfigKey.SubscribeFilterRuleGroups) or []
)
best_version_groups = set(
SystemConfigOper().get(SystemConfigKey.BestVersionFilterRuleGroups) or []
)
usage_map = {
name: default_rule_group_usage()
for name in target_names
}
def ensure_usage(name: str) -> dict:
if name not in usage_map:
usage_map[name] = default_rule_group_usage()
return usage_map[name]
for name in search_groups:
if target_names and name not in target_names:
continue
ensure_usage(name)["used_in_global_search"] = True
for name in subscribe_groups:
if target_names and name not in target_names:
continue
ensure_usage(name)["used_in_global_subscribe"] = True
for name in best_version_groups:
if target_names and name not in target_names:
continue
ensure_usage(name)["used_in_global_best_version"] = True
async with AsyncSessionFactory() as db:
subscribes = await Subscribe.async_list(db)
for subscribe in subscribes:
filter_groups = subscribe.filter_groups or []
for name in filter_groups:
if target_names and name not in target_names:
continue
ensure_usage(name)["subscribes"].append(
{
"subscribe_id": subscribe.id,
"name": subscribe.name,
"season": subscribe.season,
"type": subscribe.type,
"username": subscribe.username,
"best_version": bool(subscribe.best_version),
}
)
return usage_map
def collect_custom_rule_group_refs(
rule_groups: Iterable[FilterRuleGroup],
rule_ids: Optional[Iterable[str]] = None,
) -> Dict[str, list[str]]:
"""收集自定义规则被哪些规则组引用。"""
target_rule_ids = set(rule_ids or [])
refs: Dict[str, list[str]] = {
rule_id: []
for rule_id in target_rule_ids
}
for group in rule_groups:
if not group.name or not group.rule_string:
continue
referenced = set(extract_rule_tokens(group.rule_string))
for rule_id in referenced:
if target_rule_ids and rule_id not in target_rule_ids:
continue
refs.setdefault(rule_id, []).append(group.name)
for names in refs.values():
names.sort()
return refs
def normalize_custom_rule(
rule_id: str,
name: str,
include: Optional[str],
exclude: Optional[str],
size_range: Optional[str],
seeders: Optional[str],
publish_time: Optional[str],
existing_rules: Iterable[CustomRule],
original_rule_id: Optional[str] = None,
) -> CustomRule:
"""新增/更新自定义规则时统一走这里,避免多处散落校验逻辑。"""
normalized_rule_id = normalize_optional_text(rule_id)
normalized_name = normalize_optional_text(name)
if not normalized_rule_id:
raise ValueError("rule_id 不能为空")
if not normalized_name:
raise ValueError("name 不能为空")
if not RULE_ID_PATTERN.match(normalized_rule_id):
raise ValueError("rule_id 仅支持英文字母和数字")
if (
normalized_rule_id in BUILTIN_RULE_SET
and normalized_rule_id != original_rule_id
):
raise ValueError(
f"rule_id '{normalized_rule_id}' 与内置规则冲突,不能覆盖内置规则"
)
for existing_rule in existing_rules:
if (
existing_rule.id == normalized_rule_id
and existing_rule.id != original_rule_id
):
raise ValueError(f"rule_id '{normalized_rule_id}' 已存在")
if (
existing_rule.name == normalized_name
and existing_rule.id != original_rule_id
):
raise ValueError(f"规则名称 '{normalized_name}' 已存在")
return CustomRule(
id=normalized_rule_id,
name=normalized_name,
include=normalize_optional_text(include),
exclude=normalize_optional_text(exclude),
size_range=validate_numeric_range("size_range", size_range),
seeders=validate_seeders(seeders),
publish_time=validate_numeric_range("publish_time", publish_time),
)
def normalize_rule_group(
name: str,
rule_string: str,
media_type: Optional[str],
category: Optional[str],
existing_groups: Iterable[FilterRuleGroup],
available_rule_ids: Iterable[str],
original_name: Optional[str] = None,
) -> tuple[FilterRuleGroup, dict]:
"""新增/更新规则组时统一校验名字、适用范围和规则串。"""
normalized_name = normalize_optional_text(name)
if not normalized_name:
raise ValueError("规则组名称不能为空")
for group in existing_groups:
if group.name == normalized_name and group.name != original_name:
raise ValueError(f"规则组名称 '{normalized_name}' 已存在")
normalized_media_type = normalize_media_type(media_type)
normalized_category = normalize_optional_text(category)
if normalized_category and not normalized_media_type:
raise ValueError("设置 category 时必须同时设置 media_type")
parsed = validate_rule_string(rule_string, available_rule_ids)
return (
FilterRuleGroup(
name=normalized_name,
rule_string=parsed["rule_string"],
media_type=normalized_media_type,
category=normalized_category,
),
parsed,
)
async def save_system_config(
key: SystemConfigKey, value: Any
) -> Optional[bool]:
"""通过统一入口保存配置并补发 ConfigChanged 事件。"""
normalized_value = value
if isinstance(normalized_value, list):
normalized_value = [
item
for item in normalized_value
if item is not None and item != ""
]
normalized_value = normalized_value or None
success = await SystemConfigOper().async_set(key, normalized_value)
if success:
await eventmanager.async_send_event(
etype=EventType.ConfigChanged,
data=ConfigChangeEventData(
key=key,
value=normalized_value,
change_type="update",
),
)
return success
def replace_rule_id_in_rule_string(
rule_string: str, old_rule_id: str, new_rule_id: str
) -> str:
"""只替换完整 token避免误伤其他规则名。"""
pattern = re.compile(
rf"(?<![A-Za-z0-9]){re.escape(old_rule_id)}(?![A-Za-z0-9])"
)
return pattern.sub(new_rule_id, rule_string)
def replace_group_name_in_list(
values: Optional[Iterable[str]], old_name: str, new_name: str
) -> list[str]:
"""更新配置里的规则组名引用,并顺手去重。"""
result = []
for value in values or []:
mapped = new_name if value == old_name else value
if mapped not in result:
result.append(mapped)
return result
async def rename_rule_group_references(old_name: str, new_name: str) -> dict:
"""规则组改名后,联动更新全局设置和订阅引用。"""
changed = {
"global_settings": {},
"subscribes": [],
}
for config_key in (
SystemConfigKey.SearchFilterRuleGroups,
SystemConfigKey.SubscribeFilterRuleGroups,
SystemConfigKey.BestVersionFilterRuleGroups,
):
original = SystemConfigOper().get(config_key) or []
updated = replace_group_name_in_list(original, old_name, new_name)
if updated != original:
await save_system_config(config_key, updated)
changed["global_settings"][config_key.value] = updated
async with AsyncSessionFactory() as db:
subscribes = await Subscribe.async_list(db)
for subscribe in subscribes:
original = subscribe.filter_groups or []
updated = replace_group_name_in_list(original, old_name, new_name)
if updated == original:
continue
await subscribe.async_update(db, {"filter_groups": updated})
changed["subscribes"].append(
{
"subscribe_id": subscribe.id,
"name": subscribe.name,
"season": subscribe.season,
"filter_groups": updated,
}
)
return changed
async def remove_rule_group_references(group_name: str) -> dict:
"""删除规则组后,清理全局设置和订阅里的悬空引用。"""
changed = {
"global_settings": {},
"subscribes": [],
}
for config_key in (
SystemConfigKey.SearchFilterRuleGroups,
SystemConfigKey.SubscribeFilterRuleGroups,
SystemConfigKey.BestVersionFilterRuleGroups,
):
original = SystemConfigOper().get(config_key) or []
updated = [value for value in original if value != group_name]
if updated != original:
await save_system_config(config_key, updated)
changed["global_settings"][config_key.value] = updated
async with AsyncSessionFactory() as db:
subscribes = await Subscribe.async_list(db)
for subscribe in subscribes:
original = subscribe.filter_groups or []
updated = [value for value in original if value != group_name]
if updated == original:
continue
await subscribe.async_update(db, {"filter_groups": updated})
changed["subscribes"].append(
{
"subscribe_id": subscribe.id,
"name": subscribe.name,
"season": subscribe.season,
"filter_groups": updated,
}
)
return changed

View File

@@ -0,0 +1,290 @@
"""插件 Agent 工具共享辅助方法"""
import json
import shutil
from typing import Any, Optional
from app.core.config import settings
from app.core.plugin import PluginManager
from app.db.systemconfig_oper import SystemConfigOper
from app.helper.plugin import PluginHelper
from app.schemas.types import SystemConfigKey
# 默认只向智能体返回一个可读预览,避免超大插件数据挤爆上下文窗口。
DEFAULT_PLUGIN_DATA_PREVIEW_CHARS = 12_000
MAX_PLUGIN_DATA_PREVIEW_CHARS = 50_000
PLUGIN_DATA_KEY_PREVIEW_LIMIT = 50
PLUGIN_DATA_TRUNCATION_SUFFIX = "\n...(插件数据内容过长,已截断)"
DEFAULT_PLUGIN_CANDIDATE_LIMIT = 500
def get_plugin_snapshot(plugin_id: str) -> Optional[dict[str, Any]]:
"""
获取已安装插件的基础信息快照。
"""
plugin_manager = PluginManager()
for plugin in plugin_manager.get_local_plugins():
if plugin.id == plugin_id:
return {
"plugin_id": plugin.id,
"plugin_name": plugin.plugin_name,
"plugin_version": plugin.plugin_version,
"state": plugin.state,
}
return None
def clamp_preview_chars(max_chars: Optional[int]) -> int:
"""
约束插件数据预览长度,避免工具结果无限膨胀。
"""
if max_chars is None:
return DEFAULT_PLUGIN_DATA_PREVIEW_CHARS
return max(512, min(int(max_chars), MAX_PLUGIN_DATA_PREVIEW_CHARS))
def serialize_for_agent(value: Any) -> str:
"""
将结果稳定序列化为 JSON 字符串,无法原生序列化的对象退化为字符串。
"""
return json.dumps(value, ensure_ascii=False, indent=2, default=str)
def build_preview_payload(value: Any, max_chars: Optional[int]) -> tuple[bool, int, int, str]:
"""
为可能很大的插件数据生成预览结果。
"""
serialized = serialize_for_agent(value)
if len(serialized) <= clamp_preview_chars(max_chars):
return False, len(serialized), len(serialized), serialized
preview_limit = clamp_preview_chars(max_chars)
preview = serialized[:preview_limit] + PLUGIN_DATA_TRUNCATION_SUFFIX
return True, len(serialized), len(preview), preview
def reload_plugin_runtime(plugin_id: str) -> None:
"""
重载插件并重新注册其命令、定时任务和 API。
"""
# 这些依赖只在真正执行重载时才导入,避免普通查询工具引入不必要的初始化开销。
from app.api.endpoints.plugin import register_plugin_api
from app.command import Command
from app.scheduler import Scheduler
plugin_manager = PluginManager()
plugin_manager.reload_plugin(plugin_id)
Scheduler().update_plugin_job(plugin_id)
Command().init_commands(plugin_id)
register_plugin_api(plugin_id)
def summarize_plugin(plugin: Any) -> dict[str, Any]:
"""
提取插件对象中对 Agent 有价值的摘要字段。
"""
repo_url = getattr(plugin, "repo_url", None)
return {
"id": getattr(plugin, "id", None),
"plugin_name": getattr(plugin, "plugin_name", None),
"plugin_desc": getattr(plugin, "plugin_desc", None),
"plugin_version": getattr(plugin, "plugin_version", None),
"plugin_author": getattr(plugin, "plugin_author", None),
"installed": bool(getattr(plugin, "installed", False)),
"has_update": bool(getattr(plugin, "has_update", False)),
"state": bool(getattr(plugin, "state", False)),
"repo_url": repo_url,
"source": "local_repo" if PluginHelper.is_local_repo_url(repo_url) else "market",
}
async def load_market_plugins(force_refresh: bool = False) -> list[Any]:
"""
聚合插件市场与本地插件仓库中的候选插件。
"""
plugin_manager = PluginManager()
online_plugins = await plugin_manager.async_get_online_plugins(force=force_refresh)
local_repo_plugins = plugin_manager.get_local_repo_plugins()
if not online_plugins and not local_repo_plugins:
return []
return plugin_manager.process_plugins_list(online_plugins + local_repo_plugins, [])
def list_installed_plugins() -> list[Any]:
"""
返回当前已安装插件列表。
"""
plugin_manager = PluginManager()
return [plugin for plugin in plugin_manager.get_local_plugins() if plugin.installed]
def _normalize_text(value: Optional[str]) -> str:
return (value or "").strip().lower()
def is_exact_plugin_match(plugin: Any, query: str) -> bool:
"""
精确匹配插件 ID 或插件名称,用于安全地自动选择候选。
"""
normalized_query = _normalize_text(query)
return normalized_query in {
_normalize_text(getattr(plugin, "id", None)),
_normalize_text(getattr(plugin, "plugin_name", None)),
}
def search_plugin_candidates(query: str, plugins: list[Any]) -> list[dict[str, Any]]:
"""
按插件 ID、名称、描述和作者搜索候选并返回打分结果。
"""
normalized_query = _normalize_text(query)
if not normalized_query:
return []
tokens = [token for token in normalized_query.replace("-", " ").split() if token]
matches: list[dict[str, Any]] = []
for plugin in plugins:
plugin_id = _normalize_text(getattr(plugin, "id", None))
plugin_name = _normalize_text(getattr(plugin, "plugin_name", None))
plugin_desc = _normalize_text(getattr(plugin, "plugin_desc", None))
plugin_author = _normalize_text(getattr(plugin, "plugin_author", None))
haystack = "\n".join([plugin_id, plugin_name, plugin_desc, plugin_author])
score = 0
if normalized_query == plugin_id:
score = 100
elif normalized_query == plugin_name:
score = 95
elif plugin_id.startswith(normalized_query):
score = 85
elif plugin_name.startswith(normalized_query):
score = 80
elif normalized_query in plugin_id:
score = 75
elif normalized_query in plugin_name:
score = 70
elif tokens and all(token in plugin_name for token in tokens):
score = 68
elif tokens and all(token in plugin_id for token in tokens):
score = 66
elif normalized_query in plugin_desc:
score = 45
elif normalized_query in plugin_author:
score = 40
elif tokens and all(token in haystack for token in tokens):
score = 35
if score <= 0:
continue
matches.append(
{
"plugin": plugin,
"score": score,
"exact": is_exact_plugin_match(plugin, normalized_query),
}
)
return sorted(
matches,
key=lambda item: (
-item["score"],
not item["exact"],
-int(bool(getattr(item["plugin"], "has_update", False))),
-int(bool(getattr(item["plugin"], "installed", False))),
-int(getattr(item["plugin"], "add_time", 0) or 0),
),
)
def summarize_candidates(matches: list[dict[str, Any]], limit: int = DEFAULT_PLUGIN_CANDIDATE_LIMIT) -> list[dict[str, Any]]:
"""
压缩候选列表,避免一次性把完整市场数据返回给 Agent。
"""
return [
{
**summarize_plugin(item["plugin"]),
"score": item["score"],
"exact": item["exact"],
}
for item in matches[:limit]
]
async def install_plugin_runtime(
plugin_id: str, repo_url: Optional[str], force: bool = False
) -> tuple[bool, str, bool]:
"""
按现有插件接口的行为安装插件,并刷新运行态注册信息。
"""
install_plugins = SystemConfigOper().get(SystemConfigKey.UserInstalledPlugins) or []
plugin_manager = PluginManager()
plugin_helper = PluginHelper()
refreshed_only = False
if not force and plugin_id in plugin_manager.get_plugin_ids():
refreshed_only = True
await plugin_helper.async_install_reg(pid=plugin_id, repo_url=repo_url)
message = "插件已存在,已刷新加载"
else:
if not repo_url:
return False, "没有传入仓库地址,无法正确安装插件,请检查配置", False
state, message = await plugin_helper.async_install(
pid=plugin_id,
repo_url=repo_url,
force_install=force,
)
if not state:
return False, message, False
if plugin_id not in install_plugins:
install_plugins.append(plugin_id)
await SystemConfigOper().async_set(
SystemConfigKey.UserInstalledPlugins, install_plugins
)
reload_plugin_runtime(plugin_id)
return True, message or "插件安装成功", refreshed_only
async def uninstall_plugin_runtime(plugin_id: str) -> dict[str, Any]:
"""
按现有卸载逻辑移除插件,并清理运行态注册与分组信息。
"""
from app.api.endpoints.plugin import _remove_plugin_from_folders, remove_plugin_api
from app.scheduler import Scheduler
config_oper = SystemConfigOper()
install_plugins = config_oper.get(SystemConfigKey.UserInstalledPlugins) or []
if plugin_id in install_plugins:
install_plugins = [plugin for plugin in install_plugins if plugin != plugin_id]
await config_oper.async_set(SystemConfigKey.UserInstalledPlugins, install_plugins)
remove_plugin_api(plugin_id)
Scheduler().remove_plugin_job(plugin_id)
plugin_manager = PluginManager()
plugin_class = plugin_manager.plugins.get(plugin_id)
was_clone = bool(getattr(plugin_class, "is_clone", False))
clone_files_removed = False
if was_clone:
plugin_manager.delete_plugin_config(plugin_id)
plugin_manager.delete_plugin_data(plugin_id)
plugin_base_dir = settings.ROOT_PATH / "app" / "plugins" / plugin_id.lower()
if plugin_base_dir.exists():
try:
shutil.rmtree(plugin_base_dir)
plugin_manager.plugins.pop(plugin_id, None)
clone_files_removed = True
except Exception:
clone_files_removed = False
_remove_plugin_from_folders(plugin_id)
plugin_manager.remove_plugin(plugin_id)
return {
"was_clone": was_clone,
"clone_files_removed": clone_files_removed,
}

View File

@@ -0,0 +1,176 @@
"""种子搜索工具辅助函数"""
import re
from typing import List, Optional
from app.core.context import Context
from app.utils.crypto import HashUtils
from app.utils.string import StringUtils
SEARCH_RESULT_CACHE_FILE = "__search_result__"
TORRENT_RESULT_LIMIT = 50
def build_torrent_ref(context: Optional[Context]) -> str:
"""生成用于下载校验的短引用"""
if not context or not context.torrent_info:
return ""
return HashUtils.sha1(context.torrent_info.enclosure or "")[:7]
def sort_season_options(options: List[str]) -> List[str]:
"""按前端逻辑排序季集选项"""
if len(options) <= 1:
return options
parsed_options = []
for index, option in enumerate(options):
match = re.match(r"^S(\d+)(?:-S(\d+))?\s*(?:E(\d+)(?:-E(\d+))?)?$", option or "")
if not match:
parsed_options.append({
"original": option,
"season_num": 0,
"episode_num": 0,
"max_episode_num": 0,
"is_whole_season": False,
"index": index,
})
continue
episode_num = int(match.group(3)) if match.group(3) else 0
max_episode_num = int(match.group(4)) if match.group(4) else episode_num
parsed_options.append({
"original": option,
"season_num": int(match.group(1)),
"episode_num": episode_num,
"max_episode_num": max_episode_num,
"is_whole_season": not match.group(3),
"index": index,
})
whole_seasons = [item for item in parsed_options if item["is_whole_season"]]
episodes = [item for item in parsed_options if not item["is_whole_season"]]
whole_seasons.sort(key=lambda item: (-item["season_num"], item["index"]))
episodes.sort(
key=lambda item: (
-item["season_num"],
-(item["max_episode_num"] or item["episode_num"]),
-item["episode_num"],
item["index"],
)
)
return [item["original"] for item in whole_seasons + episodes]
def append_option(options: List[str], value: Optional[str]) -> None:
"""按前端逻辑收集去重后的筛选项"""
if value and value not in options:
options.append(value)
def build_filter_options(items: List[Context]) -> dict:
"""从搜索结果中构建筛选项汇总"""
filter_options = {
"site": [],
"season": [],
"freeState": [],
"edition": [],
"resolution": [],
"videoCode": [],
"releaseGroup": [],
}
for item in items:
torrent_info = item.torrent_info
meta_info = item.meta_info
append_option(filter_options["site"], getattr(torrent_info, "site_name", None))
append_option(filter_options["season"], getattr(meta_info, "season_episode", None))
append_option(filter_options["freeState"], getattr(torrent_info, "volume_factor", None))
append_option(filter_options["edition"], getattr(meta_info, "edition", None))
append_option(filter_options["resolution"], getattr(meta_info, "resource_pix", None))
append_option(filter_options["videoCode"], getattr(meta_info, "video_encode", None))
append_option(filter_options["releaseGroup"], getattr(meta_info, "resource_team", None))
filter_options["season"] = sort_season_options(filter_options["season"])
return filter_options
def match_filter(filter_values: Optional[List[str]], value: Optional[str]) -> bool:
"""匹配前端同款多选筛选规则"""
return not filter_values or bool(value and value in filter_values)
def filter_contexts(items: List[Context],
site: Optional[List[str]] = None,
season: Optional[List[str]] = None,
free_state: Optional[List[str]] = None,
video_code: Optional[List[str]] = None,
edition: Optional[List[str]] = None,
resolution: Optional[List[str]] = None,
release_group: Optional[List[str]] = None) -> List[Context]:
"""按前端同款维度筛选结果"""
filtered_items = []
for item in items:
torrent_info = item.torrent_info
meta_info = item.meta_info
if (
match_filter(site, getattr(torrent_info, "site_name", None))
and match_filter(free_state, getattr(torrent_info, "volume_factor", None))
and match_filter(season, getattr(meta_info, "season_episode", None))
and match_filter(release_group, getattr(meta_info, "resource_team", None))
and match_filter(video_code, getattr(meta_info, "video_encode", None))
and match_filter(resolution, getattr(meta_info, "resource_pix", None))
and match_filter(edition, getattr(meta_info, "edition", None))
):
filtered_items.append(item)
return filtered_items
def simplify_search_result(context: Context, index: int) -> dict:
"""精简单条搜索结果"""
simplified = {}
torrent_info = context.torrent_info
meta_info = context.meta_info
media_info = context.media_info
if torrent_info:
simplified["torrent_info"] = {
"title": torrent_info.title,
"size": StringUtils.format_size(torrent_info.size),
"seeders": torrent_info.seeders,
"peers": torrent_info.peers,
"site_name": torrent_info.site_name,
"torrent_url": f"{build_torrent_ref(context)}:{index}",
"page_url": torrent_info.page_url,
"volume_factor": torrent_info.volume_factor,
"freedate_diff": torrent_info.freedate_diff,
"pubdate": torrent_info.pubdate,
}
if media_info:
simplified["media_info"] = {
"title": media_info.title,
"en_title": media_info.en_title,
"year": media_info.year,
"type": media_info.type.value if media_info.type else None,
"season": media_info.season,
"tmdb_id": media_info.tmdb_id,
}
if meta_info:
simplified["meta_info"] = {
"name": meta_info.name,
"cn_name": meta_info.cn_name,
"en_name": meta_info.en_name,
"year": meta_info.year,
"type": meta_info.type.value if meta_info.type else None,
"begin_season": meta_info.begin_season,
"season_episode": meta_info.season_episode,
"resource_team": meta_info.resource_team,
"video_encode": meta_info.video_encode,
"edition": meta_info.edition,
"resource_pix": meta_info.resource_pix,
}
return simplified

View File

@@ -0,0 +1,111 @@
"""新增自定义过滤规则工具。"""
import json
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.agent.tools.impl._filter_rule_utils import (
get_custom_rules,
normalize_custom_rule,
save_system_config,
serialize_custom_rule,
)
from app.log import logger
from app.schemas.types import SystemConfigKey
class AddCustomFilterRuleInput(BaseModel):
"""新增自定义过滤规则工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
rule_id: str = Field(
...,
description="Unique custom rule ID. Only letters and numbers are allowed.",
)
name: str = Field(..., description="Display name of the custom rule.")
include: Optional[str] = Field(
None, description="Optional include regex for the rule."
)
exclude: Optional[str] = Field(
None, description="Optional exclude regex for the rule."
)
size_range: Optional[str] = Field(
None, description="Optional size range in MB, for example '1000-5000'."
)
seeders: Optional[str] = Field(
None, description="Optional minimum seeder count as a non-negative integer."
)
publish_time: Optional[str] = Field(
None,
description="Optional publish-time filter in minutes, for example '60' or '60-1440'.",
)
class AddCustomFilterRuleTool(MoviePilotTool):
name: str = "add_custom_filter_rule"
description: str = (
"Add a custom filter rule to CustomFilterRules. "
"The new rule can then be referenced by rule ID inside filter rule groups."
)
args_schema: Type[BaseModel] = AddCustomFilterRuleInput
require_admin: bool = True
def get_tool_message(self, **kwargs) -> Optional[str]:
return f"新增自定义过滤规则 {kwargs.get('rule_id', '')}"
async def run(
self,
rule_id: str,
name: str,
include: Optional[str] = None,
exclude: Optional[str] = None,
size_range: Optional[str] = None,
seeders: Optional[str] = None,
publish_time: Optional[str] = None,
**kwargs,
) -> str:
logger.info(f"执行工具: {self.name}, rule_id={rule_id}")
try:
custom_rules = get_custom_rules()
new_rule = normalize_custom_rule(
rule_id=rule_id,
name=name,
include=include,
exclude=exclude,
size_range=size_range,
seeders=seeders,
publish_time=publish_time,
existing_rules=custom_rules,
)
custom_rules.append(new_rule)
await save_system_config(
SystemConfigKey.CustomFilterRules,
[rule.model_dump(exclude_none=True) for rule in custom_rules],
)
return json.dumps(
{
"success": True,
"message": f"已新增自定义过滤规则 {new_rule.id}",
"custom_rule": serialize_custom_rule(new_rule),
"count": len(custom_rules),
},
ensure_ascii=False,
indent=2,
)
except Exception as exc:
logger.error(f"新增自定义过滤规则失败: {exc}", exc_info=True)
return json.dumps(
{
"success": False,
"message": f"新增自定义过滤规则失败: {exc}",
},
ensure_ascii=False,
)

View File

@@ -1,27 +1,31 @@
"""添加下载工具"""
from typing import Optional, Type
import re
from pathlib import Path
from typing import List, Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool, ToolChain
from app.chain.search import SearchChain
from app.chain.download import DownloadChain
from app.core.config import settings
from app.core.context import Context
from app.core.metainfo import MetaInfo
from app.db.site_oper import SiteOper
from app.helper.directory import DirectoryHelper
from app.log import logger
from app.schemas import TorrentInfo
from app.schemas import TorrentInfo, FileURI
from app.utils.crypto import HashUtils
class AddDownloadInput(BaseModel):
"""添加下载工具的输入参数模型"""
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
site_name: str = Field(..., description="Name of the torrent site/source (e.g., 'The Pirate Bay')")
torrent_title: str = Field(...,
description="The display name/title of the torrent (e.g., 'The.Matrix.1999.1080p.BluRay.x264')")
torrent_url: str = Field(..., description="Direct URL to the torrent file (.torrent) or magnet link")
torrent_description: Optional[str] = Field(None,
description="Brief description of the torrent content (optional)")
torrent_url: List[str] = Field(
...,
description="One or more torrent_url values. Supports refs from get_search_results (`hash:id`) and magnet links."
)
downloader: Optional[str] = Field(None,
description="Name of the downloader to use (optional, uses default if not specified)")
save_path: Optional[str] = Field(None,
@@ -32,75 +36,301 @@ class AddDownloadInput(BaseModel):
class AddDownloadTool(MoviePilotTool):
name: str = "add_download"
description: str = "Add torrent download task to the configured downloader (qBittorrent, Transmission, etc.). Downloads the torrent file and starts the download process with specified settings."
description: str = "Add torrent download tasks using refs from get_search_results or magnet links."
args_schema: Type[BaseModel] = AddDownloadInput
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据下载参数生成友好的提示消息"""
torrent_title = kwargs.get("torrent_title", "")
site_name = kwargs.get("site_name", "")
torrent_urls = self._normalize_torrent_urls(kwargs.get("torrent_url"))
downloader = kwargs.get("downloader")
message = f"正在添加下载任务: {torrent_title}"
if site_name:
message += f" (来源: {site_name})"
if torrent_urls:
if len(torrent_urls) == 1:
if self._is_torrent_ref(torrent_urls[0]):
message = f"添加下载任务: 资源 {torrent_urls[0]}"
else:
message = "添加下载任务: 磁力链接"
else:
message = f"批量添加下载任务: 共 {len(torrent_urls)} 个资源"
else:
message = "添加下载任务"
if downloader:
message += f" [下载器: {downloader}]"
return message
async def run(self, site_name: str, torrent_title: str, torrent_url: str, torrent_description: Optional[str] = None,
@staticmethod
def _build_torrent_ref(context: Context) -> str:
"""生成用于校验缓存项的短引用"""
if not context or not context.torrent_info:
return ""
return HashUtils.sha1(context.torrent_info.enclosure or "")[:7]
@staticmethod
def _is_torrent_ref(torrent_ref: Optional[str]) -> bool:
"""判断是否为内部搜索结果引用"""
if not torrent_ref:
return False
return bool(re.fullmatch(r"[0-9a-f]{7}:\d+", str(torrent_ref).strip()))
@staticmethod
def _is_magnet_link_input(torrent_url: Optional[str]) -> bool:
"""判断输入是否为允许直接添加的磁力链接"""
if not torrent_url:
return False
value = str(torrent_url).strip()
return value.startswith("magnet:")
@classmethod
def _resolve_cached_context(cls, torrent_ref: str) -> Optional[Context]:
"""从最近一次搜索缓存中解析种子上下文,仅支持 hash:id 格式"""
ref = str(torrent_ref).strip()
if ":" not in ref:
return None
try:
ref_hash, ref_index = ref.split(":", 1)
index = int(ref_index)
except (TypeError, ValueError):
return None
if index < 1:
return None
results = SearchChain().last_search_results() or []
if index > len(results):
return None
context = results[index - 1]
if not ref_hash or cls._build_torrent_ref(context) != ref_hash:
return None
return context
@classmethod
async def _async_resolve_cached_context(cls, torrent_ref: str) -> Optional[Context]:
"""异步读取最近搜索缓存,避免在协程里直接访问同步文件缓存。"""
ref = str(torrent_ref).strip()
if ":" not in ref:
return None
try:
ref_hash, ref_index = ref.split(":", 1)
index = int(ref_index)
except (TypeError, ValueError):
return None
if index < 1:
return None
results = await SearchChain().async_last_search_results() or []
if index > len(results):
return None
context = results[index - 1]
if not ref_hash or cls._build_torrent_ref(context) != ref_hash:
return None
return context
@staticmethod
def _merge_labels_with_system_tag(labels: Optional[str]) -> Optional[str]:
"""合并用户标签与系统默认标签,确保任务可被系统管理"""
system_tag = (settings.TORRENT_TAG or "").strip()
user_labels = [item.strip() for item in (labels or "").split(",") if item.strip()]
if system_tag and system_tag not in user_labels:
user_labels.append(system_tag)
return ",".join(user_labels) if user_labels else None
@staticmethod
def _format_failed_result(failed_messages: List[str]) -> str:
"""统一格式化失败结果"""
return ", ".join([message for message in failed_messages if message])
@staticmethod
def _build_failure_message(torrent_ref: str, error_msg: Optional[str] = None) -> str:
"""构造失败提示"""
normalized_error = (error_msg or "").strip()
prefix = "添加种子任务失败:"
if normalized_error.startswith(prefix):
normalized_error = normalized_error[len(prefix):].lstrip()
if AddDownloadTool._is_magnet_link_input(normalized_error):
normalized_error = ""
if normalized_error:
return f"{torrent_ref} {normalized_error}"
if AddDownloadTool._is_torrent_ref(torrent_ref):
return torrent_ref
return ""
@classmethod
def _normalize_torrent_urls(cls, torrent_url: Optional[List[str] | str]) -> List[str]:
"""统一规范 torrent_url 输入,保留所有非空值"""
if torrent_url is None:
return []
if isinstance(torrent_url, str):
candidates = torrent_url.split(",")
else:
candidates = torrent_url
return [str(item).strip() for item in candidates if item and str(item).strip()]
@staticmethod
def _resolve_direct_download_dir(save_path: Optional[str]) -> Optional[Path]:
"""解析直接下载使用的目录,优先使用 save_path其次使用默认下载目录"""
if save_path:
return Path(save_path)
download_dirs = DirectoryHelper().get_download_dirs()
if not download_dirs:
return None
dir_conf = download_dirs[0]
if not dir_conf.download_path:
return None
return Path(FileURI(storage=dir_conf.storage or "local", path=dir_conf.download_path).uri)
@staticmethod
def _download_direct_sync(
torrent_input: str,
download_dir: Path,
merged_labels: Optional[str],
downloader: Optional[str],
) -> tuple[Optional[str], Optional[str]]:
"""同步添加磁力下载任务,避免下载器调用阻塞事件循环。"""
result = DownloadChain().download(
content=torrent_input,
download_dir=download_dir,
cookie=None,
label=merged_labels,
downloader=downloader,
)
if result:
_, did, _, error_msg = result
else:
did, error_msg = None, "未找到下载器"
return did, error_msg
@staticmethod
def _download_single_sync(
context: Context,
downloader: Optional[str],
save_path: Optional[str],
merged_labels: Optional[str],
) -> tuple[Optional[str], Optional[str]]:
"""同步提交带上下文的下载任务,避免站点下载与下载器调用阻塞事件循环。"""
return DownloadChain().download_single(
context=context,
downloader=downloader,
save_path=save_path,
label=merged_labels,
return_detail=True,
)
async def run(self, torrent_url: Optional[List[str]] = None,
downloader: Optional[str] = None, save_path: Optional[str] = None,
labels: Optional[str] = None, **kwargs) -> str:
logger.info(
f"执行工具: {self.name}, 参数: site_name={site_name}, torrent_title={torrent_title}, torrent_url={torrent_url}, downloader={downloader}, save_path={save_path}, labels={labels}")
f"执行工具: {self.name}, 参数: torrent_url={torrent_url}, downloader={downloader}, save_path={save_path}, labels={labels}")
try:
if not torrent_title or not torrent_url:
return "错误:必须提供种子标题和下载链接"
torrent_inputs = self._normalize_torrent_urls(torrent_url)
if not torrent_inputs:
return "错误torrent_url 不能为空。"
# 使用DownloadChain添加下载
download_chain = DownloadChain()
merged_labels = self._merge_labels_with_system_tag(labels)
success_count = 0
failed_messages = []
# 根据站点名称查询站点cookie
if not site_name:
return "错误:必须提供站点名称,请从搜索资源结果信息中获取"
siteinfo = await SiteOper().async_get_by_name(site_name)
if not siteinfo:
return f"错误:未找到站点信息:{site_name}"
for torrent_input in torrent_inputs:
if self._is_torrent_ref(torrent_input):
cached_context = await self._async_resolve_cached_context(torrent_input)
if not cached_context or not cached_context.torrent_info:
failed_messages.append(f"{torrent_input} 引用无效,请重新使用 get_search_results 查看搜索结果")
continue
# 创建下载上下文
torrent_info = TorrentInfo(
title=torrent_title,
description=torrent_description,
enclosure=torrent_url,
site_name=site_name,
site_ua=siteinfo.ua,
site_cookie=siteinfo.cookie,
site_proxy=siteinfo.proxy,
site_order=siteinfo.pri,
site_downloader=siteinfo.downloader
)
meta_info = MetaInfo(title=torrent_title, subtitle=torrent_description)
media_info = await ToolChain().async_recognize_media(meta=meta_info)
if not media_info:
return "错误:无法识别媒体信息,无法添加下载任务"
context = Context(
torrent_info=torrent_info,
meta_info=meta_info,
media_info=media_info
)
cached_torrent = cached_context.torrent_info
site_name = cached_torrent.site_name
torrent_title = cached_torrent.title or torrent_input
torrent_description = cached_torrent.description
enclosure = cached_torrent.enclosure
did = download_chain.download_single(
context=context,
downloader=downloader,
save_path=save_path,
label=labels
)
if did:
return f"成功添加下载任务:{torrent_title}"
else:
return "添加下载任务失败"
if not site_name:
failed_messages.append(f"{torrent_input} 缺少站点名称")
continue
siteinfo = await SiteOper().async_get_by_name(site_name)
if not siteinfo:
failed_messages.append(f"{torrent_input} 未找到站点信息 {site_name}")
continue
torrent_info = TorrentInfo(
title=torrent_title,
description=torrent_description,
enclosure=enclosure,
site_name=site_name,
site_ua=siteinfo.ua,
site_cookie=siteinfo.cookie,
site_proxy=siteinfo.proxy,
site_order=siteinfo.pri,
site_downloader=siteinfo.downloader
)
meta_info = MetaInfo(title=torrent_title, subtitle=torrent_description)
media_info = cached_context.media_info if cached_context.media_info else None
if not media_info:
media_info = await ToolChain().async_recognize_media(meta=meta_info)
if not media_info:
failed_messages.append(f"{torrent_input} 无法识别媒体信息")
continue
context = Context(
torrent_info=torrent_info,
meta_info=meta_info,
media_info=media_info
)
else:
if not self._is_magnet_link_input(torrent_input):
failed_messages.append(
f"{torrent_input} 不是有效的下载内容,非 hash:id 时仅支持 magnet: 开头"
)
continue
download_dir = await self.run_blocking(
"storage", self._resolve_direct_download_dir, save_path
)
if not download_dir:
failed_messages.append(f"{torrent_input} 缺少保存路径,且系统未配置可用下载目录")
continue
did, error_msg = await self.run_blocking(
"downloader",
self._download_direct_sync,
torrent_input,
download_dir,
merged_labels,
downloader,
)
if did:
success_count += 1
else:
failed_messages.append(self._build_failure_message(torrent_input, error_msg))
continue
did, error_msg = await self.run_blocking(
"downloader",
self._download_single_sync,
context,
downloader,
save_path,
merged_labels,
)
if did:
success_count += 1
else:
failed_messages.append(self._build_failure_message(torrent_input, error_msg))
if success_count and not failed_messages:
return "任务添加成功"
if success_count:
return f"部分任务添加失败:{self._format_failed_result(failed_messages)}"
return f"任务添加失败:{self._format_failed_result(failed_messages)}"
except Exception as e:
logger.error(f"添加下载任务失败: {e}", exc_info=True)
return f"添加下载任务时发生错误: {str(e)}"

View File

@@ -0,0 +1,115 @@
"""新增过滤规则组工具。"""
import json
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.agent.tools.impl._filter_rule_utils import (
build_custom_rule_map,
collect_rule_group_usages,
get_builtin_rules,
get_custom_rules,
get_rule_groups,
normalize_rule_group,
save_system_config,
serialize_rule_group,
)
from app.log import logger
from app.schemas.types import SystemConfigKey
class AddRuleGroupInput(BaseModel):
"""新增过滤规则组工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
name: str = Field(..., description="New rule group name.")
rule_string: str = Field(
...,
description=(
"Rule expression using built-in/custom rule IDs. "
"Use '&', '!' inside one level, and use '>' between priority levels. "
"Example: 'SPECSUB & CNVOI & 4K & !BLU > CNSUB & CNVOI & 4K & !BLU'."
),
)
media_type: Optional[str] = Field(
None,
description="Optional media type scope: '电影', '电视剧', 'movie', or 'tv'.",
)
category: Optional[str] = Field(
None,
description="Optional media category. Only valid when media_type is set.",
)
class AddRuleGroupTool(MoviePilotTool):
name: str = "add_rule_group"
description: str = (
"Add a new filter rule group to UserFilterRuleGroups. "
"Rule groups are matched level by level from left to right and can be linked to search/subscription flows. "
"Before calling this tool, first use query_builtin_filter_rules and query_custom_filter_rules to confirm valid rule IDs, "
"and optionally use query_rule_groups to imitate existing rule_string patterns."
)
args_schema: Type[BaseModel] = AddRuleGroupInput
require_admin: bool = True
def get_tool_message(self, **kwargs) -> Optional[str]:
return f"新增规则组 {kwargs.get('name', '')}"
async def run(
self,
name: str,
rule_string: str,
media_type: Optional[str] = None,
category: Optional[str] = None,
**kwargs,
) -> str:
logger.info(f"执行工具: {self.name}, name={name}")
try:
custom_rules = get_custom_rules()
available_rule_ids = set(get_builtin_rules().keys()) | set(
build_custom_rule_map(custom_rules).keys()
)
rule_groups = get_rule_groups()
new_group, _ = normalize_rule_group(
name=name,
rule_string=rule_string,
media_type=media_type,
category=category,
existing_groups=rule_groups,
available_rule_ids=available_rule_ids,
)
rule_groups.append(new_group)
await save_system_config(
SystemConfigKey.UserFilterRuleGroups,
[group.model_dump(exclude_none=True) for group in rule_groups],
)
usage = await collect_rule_group_usages([new_group.name])
return json.dumps(
{
"success": True,
"message": f"已新增规则组 {new_group.name}",
"rule_group": serialize_rule_group(
new_group, usage.get(new_group.name)
),
"count": len(rule_groups),
},
ensure_ascii=False,
indent=2,
)
except Exception as exc:
logger.error(f"新增规则组失败: {exc}", exc_info=True)
return json.dumps(
{
"success": False,
"message": f"新增规则组失败: {exc}",
},
ensure_ascii=False,
)

View File

@@ -1,45 +1,86 @@
"""添加订阅工具"""
from typing import Optional, Type, List
from typing import List, Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.chain.subscribe import SubscribeChain
from app.db.user_oper import UserOper
from app.log import logger
from app.schemas.types import MediaType
from app.schemas.types import MediaType, MessageChannel
class AddSubscribeInput(BaseModel):
"""添加订阅工具的输入参数模型"""
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
title: str = Field(..., description="The title of the media to subscribe to (e.g., 'The Matrix', 'Breaking Bad')")
year: str = Field(..., description="Release year of the media (required for accurate identification)")
media_type: str = Field(...,
description="Type of media content: '电影' for films, '电视剧' for television series or anime series")
season: Optional[int] = Field(None,
description="Season number for TV shows (optional, if not specified will subscribe to all seasons)")
tmdb_id: Optional[str] = Field(None,
description="TMDB database ID for precise media identification (optional but recommended for accuracy)")
start_episode: Optional[int] = Field(None,
description="Starting episode number for TV shows (optional, defaults to 1 if not specified)")
total_episode: Optional[int] = Field(None,
description="Total number of episodes for TV shows (optional, will be auto-detected from TMDB if not specified)")
quality: Optional[str] = Field(None,
description="Quality filter as regular expression (optional, e.g., 'BluRay|WEB-DL|HDTV')")
resolution: Optional[str] = Field(None,
description="Resolution filter as regular expression (optional, e.g., '1080p|720p|2160p')")
effect: Optional[str] = Field(None,
description="Effect filter as regular expression (optional, e.g., 'HDR|DV|SDR')")
filter_groups: Optional[List[str]] = Field(None,
description="List of filter rule group names to apply (optional, use query_rule_groups tool to get available rule groups)")
sites: Optional[List[int]] = Field(None,
description="List of site IDs to search from (optional, use query_sites tool to get available site IDs)")
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
title: str = Field(
...,
description="The title of the media to subscribe to (e.g., 'The Matrix', 'Breaking Bad')",
)
year: str = Field(
...,
description="Release year of the media (required for accurate identification)",
)
media_type: str = Field(..., description="Allowed values: movie, tv")
season: Optional[int] = Field(
None,
description=(
"Season number for TV shows (optional). If omitted, the subscription defaults to season 1 only. "
"To subscribe multiple seasons or the full series, call this tool separately for each season."
),
)
tmdb_id: Optional[int] = Field(
None,
description="TMDB database ID for precise media identification (optional, can be obtained from search_media tool)",
)
douban_id: Optional[str] = Field(
None,
description="Douban ID for precise media identification (optional, alternative to tmdb_id)",
)
start_episode: Optional[int] = Field(
None,
description="Starting episode number for TV shows (optional, defaults to 1 if not specified)",
)
total_episode: Optional[int] = Field(
None,
description="Total number of episodes for TV shows (optional, will be auto-detected from TMDB if not specified)",
)
quality: Optional[str] = Field(
None,
description="Quality filter as regular expression (optional, e.g., 'BluRay|WEB-DL|HDTV')",
)
resolution: Optional[str] = Field(
None,
description="Resolution filter as regular expression (optional, e.g., '1080p|720p|2160p')",
)
effect: Optional[str] = Field(
None,
description="Effect filter as regular expression (optional, e.g., 'HDR|DV|SDR')",
)
filter_groups: Optional[List[str]] = Field(
None,
description="List of filter rule group names to apply (optional, can be obtained from query_rule_groups tool)",
)
sites: Optional[List[int]] = Field(
None,
description="List of site IDs to search from (optional, can be obtained from query_sites tool)",
)
class AddSubscribeTool(MoviePilotTool):
name: str = "add_subscribe"
description: str = "Add media subscription to create automated download rules for movies and TV shows. The system will automatically search and download new episodes or releases based on the subscription criteria. Supports advanced filtering options like quality, resolution, and effect filters using regular expressions."
description: str = (
"Add media subscription to create automated download rules for movies and TV shows. "
"The system will automatically search and download new episodes or releases based on the subscription criteria. "
"For TV shows, omitting `season` subscribes season 1 only by default; to subscribe multiple seasons or "
"the full series, call this tool once per season. Supports advanced filtering options like quality, "
"resolution, and effect filters using regular expressions."
)
args_schema: Type[BaseModel] = AddSubscribeInput
def get_tool_message(self, **kwargs) -> Optional[str]:
@@ -48,67 +89,127 @@ class AddSubscribeTool(MoviePilotTool):
year = kwargs.get("year", "")
media_type = kwargs.get("media_type", "")
season = kwargs.get("season")
message = f"正在添加订阅: {title}"
message = f"添加订阅: {title}"
if year:
message += f" ({year})"
if media_type:
message += f" [{media_type}]"
if season:
message += f"{season}"
elif media_type == "tv":
message += " 第1季(默认)"
return message
async def run(self, title: str, year: str, media_type: str,
season: Optional[int] = None, tmdb_id: Optional[str] = None,
start_episode: Optional[int] = None, total_episode: Optional[int] = None,
quality: Optional[str] = None, resolution: Optional[str] = None,
effect: Optional[str] = None, filter_groups: Optional[List[str]] = None,
sites: Optional[List[int]] = None, **kwargs) -> str:
async def _resolve_subscribe_username(self) -> Optional[str]:
"""优先映射为系统用户名,未绑定时回退当前渠道用户名。"""
resolved_username = self._username
if not self._channel or not self._user_id:
return resolved_username
try:
channel = MessageChannel(self._channel)
except ValueError:
return resolved_username
binding_keys = {
MessageChannel.Telegram: ("telegram_userid",),
MessageChannel.Discord: ("discord_userid",),
MessageChannel.Wechat: ("wechat_userid",),
MessageChannel.Slack: ("slack_userid",),
MessageChannel.VoceChat: ("vocechat_userid",),
MessageChannel.SynologyChat: ("synologychat_userid",),
MessageChannel.QQ: ("qq_userid", "qq_openid"),
}.get(channel)
if not binding_keys:
return resolved_username
mapped_username = await self.run_blocking(
"db",
UserOper().get_name,
**{key: self._user_id for key in binding_keys},
)
return mapped_username or resolved_username
async def run(
self,
title: str,
year: str,
media_type: str,
season: Optional[int] = None,
tmdb_id: Optional[int] = None,
douban_id: Optional[str] = None,
start_episode: Optional[int] = None,
total_episode: Optional[int] = None,
quality: Optional[str] = None,
resolution: Optional[str] = None,
effect: Optional[str] = None,
filter_groups: Optional[List[str]] = None,
sites: Optional[List[int]] = None,
**kwargs,
) -> str:
logger.info(
f"执行工具: {self.name}, 参数: title={title}, year={year}, media_type={media_type}, "
f"season={season}, tmdb_id={tmdb_id}, start_episode={start_episode}, "
f"season={season}, tmdb_id={tmdb_id}, douban_id={douban_id}, start_episode={start_episode}, "
f"total_episode={total_episode}, quality={quality}, resolution={resolution}, "
f"effect={effect}, filter_groups={filter_groups}, sites={sites}")
f"effect={effect}, filter_groups={filter_groups}, sites={sites}"
)
try:
subscribe_chain = SubscribeChain()
# 转换 tmdb_id 为整数
tmdbid_int = None
if tmdb_id:
try:
tmdbid_int = int(tmdb_id)
except (ValueError, TypeError):
logger.warning(f"无效的 tmdb_id: {tmdb_id},将忽略")
media_type_enum = MediaType.from_agent(media_type)
if not media_type_enum:
return f"错误:无效的媒体类型 '{media_type}',支持的类型:'movie', 'tv'"
effective_season = (
season
if season is not None
else 1
if media_type_enum == MediaType.TV
else None
)
subscribe_username = await self._resolve_subscribe_username()
# 构建额外的订阅参数
subscribe_kwargs = {}
if start_episode is not None:
subscribe_kwargs['start_episode'] = start_episode
subscribe_kwargs["start_episode"] = start_episode
if total_episode is not None:
subscribe_kwargs['total_episode'] = total_episode
subscribe_kwargs["total_episode"] = total_episode
if quality:
subscribe_kwargs['quality'] = quality
subscribe_kwargs["quality"] = quality
if resolution:
subscribe_kwargs['resolution'] = resolution
subscribe_kwargs["resolution"] = resolution
if effect:
subscribe_kwargs['effect'] = effect
subscribe_kwargs["effect"] = effect
if filter_groups:
subscribe_kwargs['filter_groups'] = filter_groups
subscribe_kwargs["filter_groups"] = filter_groups
if sites:
subscribe_kwargs['sites'] = sites
subscribe_kwargs["sites"] = sites
sid, message = await subscribe_chain.async_add(
mtype=MediaType(media_type),
mtype=media_type_enum,
title=title,
year=year,
tmdbid=tmdbid_int,
tmdbid=tmdb_id,
doubanid=douban_id,
season=season,
username=self._user_id,
**subscribe_kwargs
username=subscribe_username,
**subscribe_kwargs,
)
if sid:
if message and "已存在" in message:
result_msg = f"订阅已存在:{title} ({year})"
if effective_season is not None:
result_msg += f"{effective_season}"
result_msg += "。如需修改参数请先删除旧订阅。"
return result_msg
result_msg = f"成功添加订阅:{title} ({year})"
if effective_season is not None:
result_msg += f"{effective_season}"
if season is None:
result_msg += "(未指定季号,默认按第一季订阅)"
if subscribe_kwargs:
params = []
if start_episode is not None:

View File

@@ -0,0 +1,174 @@
"""让用户通过按钮进行选择的工具。"""
from typing import List, Optional, Type
from pydantic import BaseModel, Field, model_validator
from app.agent.tools.base import MoviePilotTool, ToolChain
from app.helper.interaction import (
AgentInteractionOption,
agent_interaction_manager,
)
from app.log import logger
from app.schemas import Notification, NotificationType
from app.schemas.message import ChannelCapabilityManager
from app.schemas.types import MessageChannel
class UserChoiceOptionInput(BaseModel):
"""单个按钮选项。"""
label: str = Field(..., description="Text shown on the button")
value: str = Field(
...,
description="The exact content that will be sent back to the agent after the user clicks this button",
)
@model_validator(mode="after")
def validate_option(self):
if not self.label.strip():
raise ValueError("label 不能为空")
if not self.value.strip():
raise ValueError("value 不能为空")
return self
class AskUserChoiceInput(BaseModel):
"""按钮选择工具输入。"""
explanation: str = Field(
...,
description="Clear explanation of why the agent needs the user to choose from buttons",
)
message: str = Field(
...,
description="Question or prompt shown to the user together with the buttons",
)
title: Optional[str] = Field(
None,
description="Optional short title displayed above the question",
)
options: List[UserChoiceOptionInput] = Field(
...,
description="Button options to show to the user",
)
@model_validator(mode="after")
def validate_payload(self):
if not self.message.strip():
raise ValueError("message 不能为空")
if not self.options:
raise ValueError("options 至少需要提供一个")
return self
class AskUserChoiceTool(MoviePilotTool):
name: str = "ask_user_choice"
sends_message: bool = True
description: str = (
"Ask the user to choose from button options on channels that support interactive buttons. "
"After the user clicks a button, the selected value will come back as the user's next message."
)
args_schema: Type[BaseModel] = AskUserChoiceInput
require_admin: bool = False
def get_tool_message(self, **kwargs) -> Optional[str]:
message = kwargs.get("message", "") or ""
if len(message) > 40:
message = message[:40] + "..."
return f"发送按钮选择: {message}"
@staticmethod
def _truncate_button_text(text: str, max_length: int) -> str:
if max_length <= 0 or len(text) <= max_length:
return text
if max_length <= 3:
return text[:max_length]
return text[: max_length - 3] + "..."
async def run(
self,
message: str,
options: List[UserChoiceOptionInput],
title: Optional[str] = None,
**kwargs,
) -> str:
if not self._channel or not self._source:
return "当前不在可回传消息的会话中,无法发起按钮选择"
try:
channel = MessageChannel(self._channel)
except ValueError:
return f"不支持的消息渠道: {self._channel}"
if not (
ChannelCapabilityManager.supports_buttons(channel)
and ChannelCapabilityManager.supports_callbacks(channel)
):
return f"当前渠道 {channel.value} 不支持按钮选择"
max_per_row = 1
max_rows = ChannelCapabilityManager.get_max_button_rows(channel)
max_text_length = ChannelCapabilityManager.get_max_button_text_length(channel)
max_options = max_per_row * max_rows
if len(options) > max_options:
return f"当前渠道最多支持 {max_options} 个按钮选项"
choice_options = [
AgentInteractionOption(
label=option.label.strip(), value=option.value.strip()
)
for option in options
]
request = agent_interaction_manager.create_request(
session_id=self._session_id,
user_id=str(self._user_id),
channel=channel.value,
source=self._source,
username=self._username,
title=title,
prompt=message.strip(),
options=choice_options,
)
buttons = []
current_row = []
for index, option in enumerate(choice_options, start=1):
current_row.append(
{
"text": self._truncate_button_text(option.label, max_text_length),
"callback_data": (
f"agent_interaction:choice:{request.request_id}:{index}"
),
}
)
if len(current_row) >= max_per_row:
buttons.append(current_row)
current_row = []
if current_row:
buttons.append(current_row)
logger.info(
"执行工具: %s, channel=%s, session_id=%s, options=%s",
self.name,
channel.value,
self._session_id,
len(choice_options),
)
await ToolChain().async_post_message(
Notification(
channel=channel,
source=self._source,
mtype=NotificationType.Agent,
userid=self._user_id,
username=self._username,
title=title,
text=message.strip(),
buttons=buttons,
)
)
self._agent_context["user_reply_sent"] = True
self._agent_context["reply_mode"] = "button_choice"
return f"已发送 {len(choice_options)} 个按钮选项,等待用户选择"

View File

@@ -0,0 +1,539 @@
"""浏览器操作工具 - 让Agent能够通过Playwright控制浏览器进行网页交互"""
import asyncio
import base64
import json
from enum import Enum
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.core.config import settings
from app.log import logger
# 页面内容最大长度
MAX_CONTENT_LENGTH = 8000
# 默认超时时间(秒)
DEFAULT_TIMEOUT = 30
# 截图最大宽度
SCREENSHOT_MAX_WIDTH = 1280
# 截图最大高度
SCREENSHOT_MAX_HEIGHT = 720
class BrowserAction(str, Enum):
"""浏览器操作类型"""
GOTO = "goto"
GET_CONTENT = "get_content"
SCREENSHOT = "screenshot"
CLICK = "click"
FILL = "fill"
SELECT = "select"
EVALUATE = "evaluate"
WAIT = "wait"
class BrowseWebpageInput(BaseModel):
"""浏览器操作工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this browser action is being performed",
)
action: str = Field(
...,
description=(
"The browser action to perform. Available actions:\n"
"- 'goto': Navigate to a URL, returns page title and text summary\n"
"- 'get_content': Get current page content (text or HTML)\n"
"- 'screenshot': Take a screenshot of the current page, returns base64 image\n"
"- 'click': Click on an element specified by selector\n"
"- 'fill': Fill text into an input element specified by selector\n"
"- 'select': Select an option from a dropdown element\n"
"- 'evaluate': Execute JavaScript code on the page and return the result\n"
"- 'wait': Wait for an element to appear on the page"
),
)
url: Optional[str] = Field(
None, description="URL to navigate to (required for 'goto' action)"
)
selector: Optional[str] = Field(
None,
description="CSS selector or text selector for the target element (for 'click', 'fill', 'select', 'wait' actions). "
"Supports CSS selectors like '#id', '.class', 'tag', and Playwright text selectors like 'text=Click me'",
)
value: Optional[str] = Field(
None,
description="Value to fill into input or option value to select (for 'fill' and 'select' actions)",
)
script: Optional[str] = Field(
None,
description="JavaScript code to execute on the page (for 'evaluate' action). "
"The script should return a value that can be serialized to JSON.",
)
content_type: Optional[str] = Field(
"text",
description="Content type for 'get_content' action: 'text' for readable text, 'html' for raw HTML",
)
timeout: Optional[int] = Field(
DEFAULT_TIMEOUT, description="Timeout in seconds for the action (default: 30)"
)
cookies: Optional[str] = Field(
None,
description="Cookies to set for the browser context, format: 'name1=value1; name2=value2'",
)
user_agent: Optional[str] = Field(
None, description="Custom User-Agent string for the browser context"
)
class BrowseWebpageTool(MoviePilotTool):
name: str = "browse_webpage"
description: str = (
"Control a real browser (Playwright) to interact with web pages. "
"Supports navigating to URLs, reading page content, taking screenshots, "
"clicking elements, filling forms, selecting dropdown options, executing JavaScript, and waiting for elements. "
"Use this tool when you need to interact with dynamic web pages, "
"fill in forms, click buttons, or extract content from JavaScript-rendered pages. "
"The browser session persists across multiple calls within the same conversation - "
"first call 'goto' to open a page, then use other actions to interact with it."
)
args_schema: Type[BaseModel] = BrowseWebpageInput
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据操作类型生成友好的提示消息"""
action = kwargs.get("action", "")
url = kwargs.get("url", "")
selector = kwargs.get("selector", "")
action_messages = {
"goto": f"打开网页: {url}",
"get_content": "获取页面内容",
"screenshot": "截取页面截图",
"click": f"点击元素: {selector}",
"fill": f"填写表单: {selector}",
"select": f"选择选项: {selector}",
"evaluate": "执行 JavaScript",
"wait": f"等待元素: {selector}",
}
return action_messages.get(action, f"执行浏览器操作: {action}")
async def run(
self,
action: str,
url: Optional[str] = None,
selector: Optional[str] = None,
value: Optional[str] = None,
script: Optional[str] = None,
content_type: Optional[str] = "text",
timeout: Optional[int] = DEFAULT_TIMEOUT,
cookies: Optional[str] = None,
user_agent: Optional[str] = None,
**kwargs,
) -> str:
"""执行浏览器操作"""
logger.info(
f"执行工具: {self.name}, 动作: {action}, URL: {url}, 选择器: {selector}"
)
try:
# 验证操作类型
try:
browser_action = BrowserAction(action)
except ValueError:
valid_actions = ", ".join([a.value for a in BrowserAction])
return f"错误: 不支持的操作类型 '{action}',支持的操作: {valid_actions}"
# 参数校验
if browser_action == BrowserAction.GOTO and not url:
return "错误: 'goto' 操作需要提供 url 参数"
if (
browser_action
in (
BrowserAction.CLICK,
BrowserAction.FILL,
BrowserAction.SELECT,
BrowserAction.WAIT,
)
and not selector
):
return f"错误: '{action}' 操作需要提供 selector 参数"
if browser_action == BrowserAction.FILL and value is None:
return "错误: 'fill' 操作需要提供 value 参数"
if browser_action == BrowserAction.EVALUATE and not script:
return "错误: 'evaluate' 操作需要提供 script 参数"
# 在线程池中运行同步的 Playwright 操作
loop = asyncio.get_running_loop()
result = await loop.run_in_executor(
None,
lambda: self._execute_browser_action(
browser_action=browser_action,
url=url,
selector=selector,
value=value,
script=script,
content_type=content_type,
timeout=timeout,
cookies=cookies,
user_agent=user_agent,
),
)
return result
except Exception as e:
logger.error(f"浏览器操作失败: {e}", exc_info=True)
return f"浏览器操作失败: {str(e)}"
def _execute_browser_action(
self,
browser_action: BrowserAction,
url: Optional[str],
selector: Optional[str],
value: Optional[str],
script: Optional[str],
content_type: Optional[str],
timeout: int,
cookies: Optional[str],
user_agent: Optional[str],
) -> str:
"""在同步上下文中执行 Playwright 浏览器操作"""
from playwright.sync_api import sync_playwright
try:
with sync_playwright() as playwright:
browser = None
context = None
page = None
try:
# 启动浏览器
browser_type = settings.PLAYWRIGHT_BROWSER_TYPE or "chromium"
browser = playwright[browser_type].launch(headless=True)
# 创建上下文
context_kwargs = {}
if user_agent:
context_kwargs["user_agent"] = user_agent
# 设置视口大小
context_kwargs["viewport"] = {
"width": SCREENSHOT_MAX_WIDTH,
"height": SCREENSHOT_MAX_HEIGHT,
}
context = browser.new_context(**context_kwargs)
page = context.new_page()
page.set_default_timeout(timeout * 1000)
# 设置 cookies
if cookies:
page.set_extra_http_headers({"cookie": cookies})
# 对于非 goto 操作,如果提供了 url 先导航
if url and browser_action != BrowserAction.GOTO:
page.goto(
url, wait_until="domcontentloaded", timeout=timeout * 1000
)
page.wait_for_load_state("networkidle", timeout=timeout * 1000)
# 执行具体操作
result = self._do_action(
page,
browser_action,
url,
selector,
value,
script,
content_type,
timeout,
)
return result
finally:
if page:
page.close()
if context:
context.close()
if browser:
browser.close()
except Exception as e:
logger.error(f"Playwright 执行失败: {e}", exc_info=True)
return f"Playwright 执行失败: {str(e)}"
def _do_action(
self,
page,
browser_action: BrowserAction,
url: Optional[str],
selector: Optional[str],
value: Optional[str],
script: Optional[str],
content_type: Optional[str],
timeout: int,
) -> str:
"""执行具体的浏览器操作"""
if browser_action == BrowserAction.GOTO:
return self._action_goto(page, url, timeout)
elif browser_action == BrowserAction.GET_CONTENT:
return self._action_get_content(page, content_type)
elif browser_action == BrowserAction.SCREENSHOT:
return self._action_screenshot(page)
elif browser_action == BrowserAction.CLICK:
return self._action_click(page, selector, timeout)
elif browser_action == BrowserAction.FILL:
return self._action_fill(page, selector, value, timeout)
elif browser_action == BrowserAction.SELECT:
return self._action_select(page, selector, value, timeout)
elif browser_action == BrowserAction.EVALUATE:
return self._action_evaluate(page, script)
elif browser_action == BrowserAction.WAIT:
return self._action_wait(page, selector, timeout)
return f"未知操作: {browser_action}"
@staticmethod
def _action_goto(page, url: str, timeout: int) -> str:
"""导航到URL"""
response = page.goto(url, wait_until="domcontentloaded", timeout=timeout * 1000)
try:
page.wait_for_load_state("networkidle", timeout=min(timeout, 15) * 1000)
except Exception:
# networkidle 超时不是致命错误,页面可能已经可用
pass
status = response.status if response else "unknown"
title = page.title()
page_url = page.url
# 提取页面可读文本摘要
text_content = page.inner_text("body")
if text_content and len(text_content) > MAX_CONTENT_LENGTH:
text_content = text_content[:MAX_CONTENT_LENGTH] + "\n\n...(内容已截断)"
# 提取页面链接
links = page.evaluate("""
() => {
const links = [];
document.querySelectorAll('a[href]').forEach(a => {
const text = a.innerText.trim();
const href = a.href;
if (text && href && !href.startsWith('javascript:')) {
links.push({text: text.substring(0, 80), href: href});
}
});
return links.slice(0, 30);
}
""")
# 提取表单信息
forms = page.evaluate("""
() => {
const forms = [];
document.querySelectorAll('input, textarea, select, button').forEach(el => {
const info = {
tag: el.tagName.toLowerCase(),
type: el.type || '',
name: el.name || '',
id: el.id || '',
placeholder: el.placeholder || '',
value: el.tagName.toLowerCase() === 'select' ? '' : (el.value || '').substring(0, 50),
text: el.innerText ? el.innerText.trim().substring(0, 50) : ''
};
// 只保留有标识信息的元素
if (info.name || info.id || info.placeholder || info.text) {
forms.push(info);
}
});
return forms.slice(0, 30);
}
""")
result = {
"status": status,
"url": page_url,
"title": title,
"text_content": text_content,
}
if links:
result["links"] = links
if forms:
result["form_elements"] = forms
return json.dumps(result, ensure_ascii=False, indent=2)
@staticmethod
def _action_get_content(page, content_type: Optional[str]) -> str:
"""获取页面内容"""
title = page.title()
page_url = page.url
if content_type == "html":
content = page.content()
else:
content = page.inner_text("body")
if content and len(content) > MAX_CONTENT_LENGTH:
content = content[:MAX_CONTENT_LENGTH] + "\n\n...(内容已截断)"
result = {
"url": page_url,
"title": title,
"content_type": content_type,
"content": content,
}
return json.dumps(result, ensure_ascii=False, indent=2)
@staticmethod
def _action_screenshot(page) -> str:
"""截取页面截图"""
screenshot_bytes = page.screenshot(
full_page=False,
type="jpeg",
quality=60,
)
screenshot_b64 = base64.b64encode(screenshot_bytes).decode("utf-8")
# 限制截图大小base64编码后大约增大33%
max_b64_size = 200 * 1024 # ~150KB 原始图片
if len(screenshot_b64) > max_b64_size:
# 降低质量重新截图
screenshot_bytes = page.screenshot(
full_page=False,
type="jpeg",
quality=30,
)
screenshot_b64 = base64.b64encode(screenshot_bytes).decode("utf-8")
title = page.title()
page_url = page.url
result = {
"url": page_url,
"title": title,
"screenshot_base64": screenshot_b64,
"format": "jpeg",
"note": "截图已以 base64 编码返回",
}
return json.dumps(result, ensure_ascii=False, indent=2)
@staticmethod
def _action_click(page, selector: str, timeout: int) -> str:
"""点击元素"""
page.click(selector, timeout=timeout * 1000)
# 等待可能的页面变化
try:
page.wait_for_load_state("networkidle", timeout=5000)
except Exception:
pass
title = page.title()
page_url = page.url
return json.dumps(
{
"success": True,
"message": f"成功点击元素: {selector}",
"current_url": page_url,
"current_title": title,
},
ensure_ascii=False,
indent=2,
)
@staticmethod
def _action_fill(page, selector: str, value: str, timeout: int) -> str:
"""填写表单"""
page.fill(selector, value, timeout=timeout * 1000)
return json.dumps(
{
"success": True,
"message": f"成功填写元素 '{selector}' 的值为 '{value}'",
},
ensure_ascii=False,
indent=2,
)
@staticmethod
def _action_select(page, selector: str, value: Optional[str], timeout: int) -> str:
"""选择下拉选项"""
if value:
page.select_option(selector, value=value, timeout=timeout * 1000)
else:
return "错误: 'select' 操作需要提供 value 参数"
return json.dumps(
{
"success": True,
"message": f"成功选择元素 '{selector}' 的选项 '{value}'",
},
ensure_ascii=False,
indent=2,
)
@staticmethod
def _action_evaluate(page, script: str) -> str:
"""执行 JavaScript"""
result = page.evaluate(script)
# 格式化结果
if result is None:
formatted = "null"
elif isinstance(result, (dict, list)):
formatted = json.dumps(result, ensure_ascii=False, indent=2)
else:
formatted = str(result)
# 限制结果长度
if len(formatted) > MAX_CONTENT_LENGTH:
formatted = formatted[:MAX_CONTENT_LENGTH] + "\n\n...(结果已截断)"
return json.dumps(
{
"success": True,
"result": formatted,
},
ensure_ascii=False,
indent=2,
)
@staticmethod
def _action_wait(page, selector: str, timeout: int) -> str:
"""等待元素出现"""
element = page.wait_for_selector(selector, timeout=timeout * 1000)
if element:
visible = element.is_visible()
text = element.inner_text()
if text and len(text) > 200:
text = text[:200] + "..."
return json.dumps(
{
"success": True,
"message": f"元素 '{selector}' 已出现",
"visible": visible,
"text": text,
},
ensure_ascii=False,
indent=2,
)
else:
return json.dumps(
{
"success": False,
"message": f"等待元素 '{selector}' 超时",
},
ensure_ascii=False,
indent=2,
)

View File

@@ -0,0 +1,97 @@
"""删除自定义过滤规则工具。"""
import json
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.agent.tools.impl._filter_rule_utils import (
collect_custom_rule_group_refs,
get_custom_rules,
get_rule_groups,
save_system_config,
)
from app.log import logger
from app.schemas.types import SystemConfigKey
class DeleteCustomFilterRuleInput(BaseModel):
"""删除自定义过滤规则工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
rule_id: str = Field(..., description="Custom rule ID to delete.")
class DeleteCustomFilterRuleTool(MoviePilotTool):
name: str = "delete_custom_filter_rule"
description: str = (
"Delete a custom filter rule from CustomFilterRules. "
"If the rule is still referenced by rule groups, the deletion is blocked to avoid breaking rule_string expressions."
)
args_schema: Type[BaseModel] = DeleteCustomFilterRuleInput
require_admin: bool = True
def get_tool_message(self, **kwargs) -> Optional[str]:
return f"删除自定义过滤规则 {kwargs.get('rule_id', '')}"
async def run(self, rule_id: str, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, rule_id={rule_id}")
try:
custom_rules = get_custom_rules()
target_rule = next((rule for rule in custom_rules if rule.id == rule_id), None)
if not target_rule:
return json.dumps(
{
"success": False,
"message": f"自定义过滤规则 '{rule_id}' 不存在",
},
ensure_ascii=False,
)
refs = collect_custom_rule_group_refs(get_rule_groups(), [rule_id]).get(
rule_id, []
)
if refs:
return json.dumps(
{
"success": False,
"message": (
f"自定义过滤规则 '{rule_id}' 仍被规则组引用,无法删除。"
),
"referenced_by_rule_groups": refs,
},
ensure_ascii=False,
indent=2,
)
remaining_rules = [
rule for rule in custom_rules if rule.id != rule_id
]
await save_system_config(
SystemConfigKey.CustomFilterRules,
[rule.model_dump(exclude_none=True) for rule in remaining_rules],
)
return json.dumps(
{
"success": True,
"message": f"已删除自定义过滤规则 {rule_id}",
"count": len(remaining_rules),
},
ensure_ascii=False,
indent=2,
)
except Exception as exc:
logger.error(f"删除自定义过滤规则失败: {exc}", exc_info=True)
return json.dumps(
{
"success": False,
"message": f"删除自定义过滤规则失败: {exc}",
},
ensure_ascii=False,
)

View File

@@ -11,66 +11,84 @@ from app.log import logger
class DeleteDownloadInput(BaseModel):
"""删除下载任务工具的输入参数模型"""
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
task_identifier: str = Field(..., description="Task identifier: can be task hash (unique identifier) or task title/name")
downloader: Optional[str] = Field(None, description="Name of specific downloader (optional, if not provided will search all downloaders)")
delete_files: Optional[bool] = Field(False, description="Whether to delete downloaded files along with the task (default: False, only removes the task from downloader)")
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
hash: str = Field(
..., description="Task hash (can be obtained from query_download_tasks tool)"
)
downloader: Optional[str] = Field(
None,
description="Name of specific downloader (optional, if not provided will search all downloaders)",
)
delete_files: Optional[bool] = Field(
False,
description="Whether to delete downloaded files along with the task (default: False, only removes the task from downloader)",
)
class DeleteDownloadTool(MoviePilotTool):
name: str = "delete_download"
description: str = "Delete a download task from the downloader. Can delete by task hash (unique identifier) or task title/name. Optionally specify the downloader name and whether to delete downloaded files."
description: str = "Delete a download task from the downloader by task hash only. Optionally specify the downloader name and whether to delete downloaded files."
args_schema: Type[BaseModel] = DeleteDownloadInput
require_admin: bool = True
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据删除参数生成友好的提示消息"""
task_identifier = kwargs.get("task_identifier", "")
hash_value = kwargs.get("hash", "")
downloader = kwargs.get("downloader")
delete_files = kwargs.get("delete_files", False)
message = f"正在删除下载任务: {task_identifier}"
message = f"删除下载任务: {hash_value}"
if downloader:
message += f" [下载器: {downloader}]"
if delete_files:
message += " (包含文件)"
return message
async def run(self, task_identifier: str, downloader: Optional[str] = None,
delete_files: Optional[bool] = False, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: task_identifier={task_identifier}, downloader={downloader}, delete_files={delete_files}")
@staticmethod
def _delete_download_sync(
hash_value: str, downloader: Optional[str] = None, delete_files: bool = False
) -> bool:
"""同步删除下载任务,避免下载器客户端阻塞事件循环。"""
return DownloadChain().remove_torrents(
hashs=[hash_value], downloader=downloader, delete_file=delete_files
)
async def run(
self,
hash: str,
downloader: Optional[str] = None,
delete_files: Optional[bool] = False,
**kwargs,
) -> str:
logger.info(
f"执行工具: {self.name}, 参数: hash={hash}, downloader={downloader}, delete_files={delete_files}"
)
try:
download_chain = DownloadChain()
# 如果task_identifier看起来像hash通常是40个字符的十六进制字符串
task_hash = None
if len(task_identifier) == 40 and all(c in '0123456789abcdefABCDEF' for c in task_identifier):
# 直接使用hash
task_hash = task_identifier
else:
# 通过标题查找任务
downloads = download_chain.downloading(name=downloader)
for dl in downloads:
# 检查标题或名称是否匹配
if (task_identifier.lower() in (dl.title or "").lower()) or \
(task_identifier.lower() in (dl.name or "").lower()):
task_hash = dl.hash
break
if not task_hash:
return f"未找到匹配的下载任务:{task_identifier},请使用 query_downloads 工具查询可用的下载任务"
# 仅支持通过hash删除任务
if len(hash) != 40 or not all(c in "0123456789abcdefABCDEF" for c in hash):
return "参数错误hash 格式无效,请先使用 query_download_tasks 工具获取正确的 hash。"
# 删除下载任务
# remove_torrents 支持 delete_file 参数,可以控制是否删除文件
result = download_chain.remove_torrents(hashs=[task_hash], downloader=downloader, delete_file=delete_files)
result = await self.run_blocking(
"downloader",
self._delete_download_sync,
hash,
downloader,
bool(delete_files),
)
if result:
files_info = "(包含文件)" if delete_files else "(不包含文件)"
return f"成功删除下载任务:{task_identifier} {files_info}"
return f"成功删除下载任务:{hash} {files_info}"
else:
return f"删除下载任务失败:{task_identifier},请检查任务是否存在或下载器是否可用"
return f"删除下载任务失败:{hash},请检查任务是否存在或下载器是否可用"
except Exception as e:
logger.error(f"删除下载任务失败: {e}", exc_info=True)
return f"删除下载任务时发生错误: {str(e)}"

View File

@@ -0,0 +1,44 @@
"""删除下载历史记录工具"""
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.db import AsyncSessionFactory
from app.db.models.downloadhistory import DownloadHistory
from app.log import logger
class DeleteDownloadHistoryInput(BaseModel):
"""删除下载历史记录工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
history_id: int = Field(
..., description="The ID of the download history record to delete"
)
class DeleteDownloadHistoryTool(MoviePilotTool):
name: str = "delete_download_history"
description: str = "Delete a download history record by ID. This only removes the record from the database, does not delete any actual files."
args_schema: Type[BaseModel] = DeleteDownloadHistoryInput
require_admin: bool = True
def get_tool_message(self, **kwargs) -> Optional[str]:
history_id = kwargs.get("history_id")
return f"删除下载历史记录 ID: {history_id}"
async def run(self, history_id: int, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: history_id={history_id}")
try:
async with AsyncSessionFactory() as db:
await DownloadHistory.async_delete(db, history_id)
return f"下载历史记录 ID: {history_id} 已成功删除"
except Exception as e:
logger.error(f"删除下载历史记录失败: {e}", exc_info=True)
return f"删除下载历史记录时发生错误: {str(e)}"

View File

@@ -0,0 +1,81 @@
"""删除过滤规则组工具。"""
import json
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.agent.tools.impl._filter_rule_utils import (
get_rule_groups,
remove_rule_group_references,
save_system_config,
)
from app.log import logger
from app.schemas.types import SystemConfigKey
class DeleteRuleGroupInput(BaseModel):
"""删除过滤规则组工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
name: str = Field(..., description="Rule group name to delete.")
class DeleteRuleGroupTool(MoviePilotTool):
name: str = "delete_rule_group"
description: str = (
"Delete a filter rule group from UserFilterRuleGroups. "
"The tool also removes dangling references from global settings and subscriptions."
)
args_schema: Type[BaseModel] = DeleteRuleGroupInput
require_admin: bool = True
def get_tool_message(self, **kwargs) -> Optional[str]:
return f"删除规则组 {kwargs.get('name', '')}"
async def run(self, name: str, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, name={name}")
try:
rule_groups = get_rule_groups()
if not any(group.name == name for group in rule_groups):
return json.dumps(
{
"success": False,
"message": f"规则组 '{name}' 不存在",
},
ensure_ascii=False,
)
remaining_groups = [
group for group in rule_groups if group.name != name
]
await save_system_config(
SystemConfigKey.UserFilterRuleGroups,
[group.model_dump(exclude_none=True) for group in remaining_groups],
)
reference_changes = await remove_rule_group_references(name)
return json.dumps(
{
"success": True,
"message": f"已删除规则组 {name}",
"count": len(remaining_groups),
"reference_updates": reference_changes,
},
ensure_ascii=False,
indent=2,
)
except Exception as exc:
logger.error(f"删除规则组失败: {exc}", exc_info=True)
return json.dumps(
{
"success": False,
"message": f"删除规则组失败: {exc}",
},
ensure_ascii=False,
)

View File

@@ -14,19 +14,27 @@ from app.schemas.types import EventType
class DeleteSubscribeInput(BaseModel):
"""删除订阅工具的输入参数模型"""
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
subscribe_id: int = Field(..., description="The ID of the subscription to delete (can be obtained from query_subscribes tool)")
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
subscribe_id: int = Field(
...,
description="The ID of the subscription to delete (can be obtained from query_subscribes tool)",
)
class DeleteSubscribeTool(MoviePilotTool):
name: str = "delete_subscribe"
description: str = "Delete a media subscription by its ID. This will remove the subscription and stop automatic downloads for that media."
args_schema: Type[BaseModel] = DeleteSubscribeInput
require_admin: bool = True
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据删除参数生成友好的提示消息"""
subscribe_id = kwargs.get("subscribe_id")
return f"正在删除订阅 (ID: {subscribe_id})"
return f"删除订阅 (ID: {subscribe_id})"
async def run(self, subscribe_id: int, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: subscribe_id={subscribe_id}")
@@ -37,27 +45,23 @@ class DeleteSubscribeTool(MoviePilotTool):
subscribe = await subscribe_oper.async_get(subscribe_id)
if not subscribe:
return f"订阅 ID {subscribe_id} 不存在"
# 在删除之前获取订阅信息(用于事件)
subscribe_info = subscribe.to_dict()
# 删除订阅
subscribe_oper.delete(subscribe_id)
await subscribe_oper.async_delete(subscribe_id)
# 分享订阅统计刷新本身已异步化,这里只需要在删除后触发即可。
SubscribeHelper().sub_done_async(
{"tmdbid": subscribe.tmdbid, "doubanid": subscribe.doubanid}
)
# 发送事件
await eventmanager.async_send_event(EventType.SubscribeDeleted, {
"subscribe_id": subscribe_id,
"subscribe_info": subscribe_info
})
# 统计订阅
SubscribeHelper().sub_done_async({
"tmdbid": subscribe.tmdbid,
"doubanid": subscribe.doubanid
})
await eventmanager.async_send_event(
EventType.SubscribeDeleted,
{"subscribe_id": subscribe_id, "subscribe_info": subscribe_info},
)
return f"成功删除订阅:{subscribe.name} ({subscribe.year})"
except Exception as e:
logger.error(f"删除订阅失败: {e}", exc_info=True)
return f"删除订阅时发生错误: {str(e)}"

View File

@@ -0,0 +1,53 @@
"""删除整理历史记录工具"""
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.db.transferhistory_oper import TransferHistoryOper
from app.log import logger
class DeleteTransferHistoryInput(BaseModel):
"""删除整理历史记录工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
history_id: int = Field(
..., description="The ID of the transfer history record to delete"
)
class DeleteTransferHistoryTool(MoviePilotTool):
name: str = "delete_transfer_history"
description: str = "Delete a specific transfer history record by its ID. This is useful when you need to remove a failed transfer record before retrying the transfer, as the system skips files that already have transfer history."
args_schema: Type[BaseModel] = DeleteTransferHistoryInput
require_admin: bool = True
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据参数生成友好的提示消息"""
history_id = kwargs.get("history_id")
return f"删除整理历史记录: ID={history_id}"
async def run(self, history_id: int, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: history_id={history_id}")
try:
transferhis = TransferHistoryOper()
history = await transferhis.async_get(history_id)
if not history:
return f"错误整理历史记录不存在ID={history_id}"
title = history.title or "未知"
src = history.src or "未知"
status = "成功" if history.status else "失败"
await transferhis.async_delete(history_id)
return (
f"已删除整理历史记录ID={history_id},标题={title},源路径={src},状态={status}"
)
except Exception as e:
logger.error(f"删除整理历史记录失败: {e}", exc_info=True)
return f"删除整理历史记录时发生错误: {str(e)}"

View File

@@ -0,0 +1,74 @@
"""文件编辑工具"""
from pathlib import Path
from typing import Optional, Type
from anyio import Path as AsyncPath
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.log import logger
class EditFileInput(BaseModel):
"""Input parameters for edit file tool"""
file_path: str = Field(..., description="The absolute path of the file to edit")
old_text: str = Field(..., description="The exact old text to be replaced")
new_text: str = Field(..., description="The new text to replace with")
class EditFileTool(MoviePilotTool):
name: str = "edit_file"
description: str = "Edit a file by replacing specific old text with new text. Useful for modifying configuration files, code, or scripts."
args_schema: Type[BaseModel] = EditFileInput
require_admin: bool = True
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据参数生成友好的提示消息"""
file_path = kwargs.get("file_path", "")
file_name = Path(file_path).name if file_path else "未知文件"
return f"编辑文件: {file_name}"
async def run(self, file_path: str, old_text: str, new_text: str, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: file_path={file_path}")
try:
path = AsyncPath(file_path)
# 校验逻辑:如果要替换特定文本,文件必须存在且包含该文本
if not await path.exists():
# 如果 old_text 为空,可能用户想直接创建文件,但通常 edit_file 需要匹配旧内容
if old_text:
return f"错误:文件 {file_path} 不存在,无法进行内容替换。"
if await path.exists() and not await path.is_file():
return f"错误:{file_path} 不是一个文件"
if await path.exists():
content = await path.read_text(encoding="utf-8")
if old_text not in content:
logger.warning(f"编辑文件 {file_path} 失败:未找到指定的旧文本块")
return f"错误:在文件 {file_path} 中未找到指定的旧文本。请确保包含所有的空格、缩进 and 换行符。"
occurrences = content.count(old_text)
new_content = content.replace(old_text, new_text)
else:
# 文件不存在且 old_text 为空的情形(初始化新文件)
new_content = new_text
occurrences = 1
# 自动创建父目录
await path.parent.mkdir(parents=True, exist_ok=True)
# 写入文件
await path.write_text(new_content, encoding="utf-8")
logger.info(f"成功编辑文件 {file_path},替换了 {occurrences} 处内容")
return f"成功编辑文件 {file_path} (替换了 {occurrences} 处匹配内容)"
except PermissionError:
return f"错误:没有访问/修改 {file_path} 的权限"
except UnicodeDecodeError:
return f"错误:{file_path} 不是文本文件,无法编辑"
except Exception as e:
logger.error(f"编辑文件 {file_path} 时发生错误: {str(e)}", exc_info=True)
return f"操作失败: {str(e)}"

View File

@@ -0,0 +1,345 @@
"""执行Shell命令工具"""
import asyncio
import os
import signal
import subprocess
from dataclasses import dataclass, field
from tempfile import NamedTemporaryFile
from typing import Optional, TextIO, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.log import logger
DEFAULT_TIMEOUT_SECONDS = 60
MAX_TIMEOUT_SECONDS = 300
MAX_OUTPUT_PREVIEW_BYTES = 10 * 1024
READ_CHUNK_SIZE = 4096
KILL_GRACE_SECONDS = 3
COMMAND_CONCURRENCY_LIMIT = 2
_command_semaphore = asyncio.Semaphore(COMMAND_CONCURRENCY_LIMIT)
@dataclass
class _CommandOutput:
"""保存前 10KB 预览,并在超限时将完整输出写入临时文件。"""
preview_limit_bytes: int
preview_entries: list[tuple[str, str]] = field(default_factory=list)
captured_bytes: int = 0
preview_truncated: bool = False
temp_file_path: Optional[str] = None
temp_file_handle: Optional[TextIO] = None
last_written_stream: Optional[str] = None
@staticmethod
def _clip_text_to_bytes(text: str, byte_limit: int) -> str:
if byte_limit <= 0:
return ""
return text.encode("utf-8")[:byte_limit].decode("utf-8", errors="ignore")
def _write_chunk(self, stream_name: str, text: str) -> None:
if not self.temp_file_handle or not text:
return
if self.last_written_stream != stream_name:
if self.temp_file_handle.tell() > 0:
self.temp_file_handle.write("\n")
title = "标准输出" if stream_name == "stdout" else "错误输出"
self.temp_file_handle.write(f"[{title}]\n")
self.last_written_stream = stream_name
self.temp_file_handle.write(text)
def _ensure_temp_file(self) -> None:
if self.temp_file_handle:
return
temp_file = NamedTemporaryFile(
mode="w",
encoding="utf-8",
suffix=".log",
prefix="moviepilot-command-",
delete=False,
)
self.temp_file_path = temp_file.name
self.temp_file_handle = temp_file
for stream_name, chunk in self.preview_entries:
self._write_chunk(stream_name, chunk)
def close(self) -> None:
if not self.temp_file_handle:
return
self.temp_file_handle.flush()
self.temp_file_handle.close()
self.temp_file_handle = None
def append(self, stream_name: str, text: str) -> None:
if not text:
return
if self.temp_file_handle:
self._write_chunk(stream_name, text)
return
chunk_bytes = len(text.encode("utf-8"))
remaining = self.preview_limit_bytes - self.captured_bytes
if chunk_bytes <= remaining:
self.preview_entries.append((stream_name, text))
self.captured_bytes += chunk_bytes
return
self.preview_truncated = True
self._ensure_temp_file()
self._write_chunk(stream_name, text)
preview = self._clip_text_to_bytes(text, remaining)
if preview:
self.preview_entries.append((stream_name, preview))
self.captured_bytes += len(preview.encode("utf-8"))
@property
def stdout(self) -> str:
return "".join(
text for stream_name, text in self.preview_entries if stream_name == "stdout"
).strip()
@property
def stderr(self) -> str:
return "".join(
text for stream_name, text in self.preview_entries if stream_name == "stderr"
).strip()
class ExecuteCommandInput(BaseModel):
"""执行Shell命令工具的输入参数模型"""
explanation: str = Field(
..., description="Clear explanation of why this command is being executed"
)
command: str = Field(..., description="The shell command to execute")
timeout: Optional[int] = Field(
60, description="Max execution time in seconds (default: 60)"
)
class ExecuteCommandTool(MoviePilotTool):
name: str = "execute_command"
description: str = (
"Safely execute shell commands on the server. Useful for system "
"maintenance, checking status, or running custom scripts. Includes "
"timeout, concurrency, and output preview limits."
)
args_schema: Type[BaseModel] = ExecuteCommandInput
require_admin: bool = True
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据命令生成友好的提示消息"""
command = kwargs.get("command", "")
return f"执行系统命令: {command}"
@staticmethod
def _normalize_timeout(timeout: Optional[int]) -> tuple[int, Optional[str]]:
"""限制命令最长运行时间,避免 Agent 传入过大的 timeout。"""
try:
normalized = int(timeout or DEFAULT_TIMEOUT_SECONDS)
except (TypeError, ValueError):
normalized = DEFAULT_TIMEOUT_SECONDS
if normalized <= 0:
return DEFAULT_TIMEOUT_SECONDS, "timeout 参数无效,已使用默认 60 秒"
if normalized > MAX_TIMEOUT_SECONDS:
return (
MAX_TIMEOUT_SECONDS,
f"timeout 参数超过上限,已从 {normalized} 秒限制为 {MAX_TIMEOUT_SECONDS}",
)
return normalized, None
@staticmethod
def _subprocess_kwargs() -> dict:
"""为子进程创建独立进程组,便于超时场景清理整棵子进程。"""
kwargs = {
"stdin": subprocess.DEVNULL,
"stdout": asyncio.subprocess.PIPE,
"stderr": asyncio.subprocess.PIPE,
}
if os.name == "posix":
kwargs["start_new_session"] = True
elif os.name == "nt":
kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
return kwargs
@staticmethod
async def _read_stream(
stream: asyncio.StreamReader,
stream_name: str,
output: _CommandOutput,
) -> None:
"""按块读取输出,始终只把前 10KB 保留在返回结果中。"""
while True:
chunk = await stream.read(READ_CHUNK_SIZE)
if not chunk:
break
output.append(stream_name, chunk.decode("utf-8", errors="replace"))
@staticmethod
def _terminate_process(process: asyncio.subprocess.Process, sig: int):
"""向进程组发送终止信号;不支持进程组的平台回退为单进程终止。"""
try:
if os.name == "posix":
os.killpg(process.pid, sig)
elif sig == getattr(signal, "SIGKILL", None):
process.kill()
else:
process.terminate()
except ProcessLookupError:
pass
@classmethod
async def _cleanup_process(
cls,
process: asyncio.subprocess.Process,
wait_task: asyncio.Task,
) -> None:
"""先温和终止,失败后强杀,避免超时 shell 遗留子进程。"""
if wait_task.done():
return
cls._terminate_process(process, signal.SIGTERM)
try:
await asyncio.wait_for(
asyncio.shield(wait_task), timeout=KILL_GRACE_SECONDS
)
return
except asyncio.TimeoutError:
pass
kill_signal = getattr(signal, "SIGKILL", signal.SIGTERM)
cls._terminate_process(process, kill_signal)
try:
await asyncio.wait_for(
asyncio.shield(wait_task), timeout=KILL_GRACE_SECONDS
)
except asyncio.TimeoutError:
logger.warning("命令进程强制清理超时: pid=%s", process.pid)
@staticmethod
async def _finish_reader_tasks(reader_tasks: list[asyncio.Task]) -> None:
"""等待输出读取任务退出,异常只记录不影响工具返回。"""
if not reader_tasks:
return
done, pending = await asyncio.wait(reader_tasks, timeout=1)
for task in pending:
task.cancel()
results = await asyncio.gather(*done, *pending, return_exceptions=True)
for result in results:
if isinstance(result, Exception) and not isinstance(
result, asyncio.CancelledError
):
logger.debug("命令输出读取任务异常: %s", result)
@staticmethod
def _format_result(
*,
exit_code: Optional[int],
output: _CommandOutput,
timeout: int,
timed_out: bool,
timeout_note: Optional[str],
) -> str:
if timed_out:
result = f"命令执行超时 (限制: {timeout}秒,已终止进程)"
else:
result = f"命令执行完成 (退出码: {exit_code})"
if timeout_note:
result += f"\n\n提示:\n{timeout_note}"
if output.temp_file_path:
file_note = (
"截至命令终止前的完整输出"
if timed_out
else "完整输出"
)
result += (
"\n\n提示:\n"
f"命令输出超过 10KB仅返回前 {MAX_OUTPUT_PREVIEW_BYTES} 字节内容。\n"
f"{file_note}已写入临时文件: {output.temp_file_path}\n"
"如需完整内容,请继续读取该文件。"
)
if output.stdout:
result += f"\n\n标准输出:\n{output.stdout}"
if output.stderr:
result += f"\n\n错误输出:\n{output.stderr}"
if output.preview_truncated:
result += "\n\n...(仅展示前 10KB 内容)"
if not output.stdout and not output.stderr:
result += "\n\n(无输出内容)"
return result
async def run(self, command: str, timeout: Optional[int] = 60, **kwargs) -> str:
logger.info(
f"执行工具: {self.name}, 参数: command={command}, timeout={timeout}"
)
# 简单安全过滤
forbidden_keywords = [
"rm -rf /",
":(){ :|:& };:",
"dd if=/dev/zero",
"mkfs",
"reboot",
"shutdown",
]
for keyword in forbidden_keywords:
if keyword in command:
return f"错误:命令包含禁止使用的关键字 '{keyword}'"
normalized_timeout, timeout_note = self._normalize_timeout(timeout)
try:
async with _command_semaphore:
# 命令输出可能非常大,必须边读边落盘,不能使用 communicate() 一次性收集。
process = await asyncio.create_subprocess_shell(
command, **self._subprocess_kwargs()
)
output = _CommandOutput(preview_limit_bytes=MAX_OUTPUT_PREVIEW_BYTES)
wait_task = asyncio.create_task(process.wait())
reader_tasks = [
asyncio.create_task(
self._read_stream(process.stdout, "stdout", output)
),
asyncio.create_task(
self._read_stream(process.stderr, "stderr", output)
),
]
timed_out = False
try:
await asyncio.wait_for(
asyncio.shield(wait_task), timeout=normalized_timeout
)
except asyncio.TimeoutError:
timed_out = True
await self._cleanup_process(process, wait_task)
try:
await self._finish_reader_tasks(reader_tasks)
finally:
output.close()
return self._format_result(
exit_code=process.returncode,
output=output,
timeout=normalized_timeout,
timed_out=timed_out,
timeout_note=timeout_note,
)
except Exception as e:
logger.error(f"执行命令失败: {e}", exc_info=True)
return f"执行命令时发生错误: {str(e)}"

View File

@@ -8,44 +8,53 @@ from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.chain.recommend import RecommendChain
from app.log import logger
from app.schemas.types import MediaType, media_type_to_agent
class GetRecommendationsInput(BaseModel):
"""获取推荐工具的输入参数模型"""
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
source: Optional[str] = Field("tmdb_trending",
description="Recommendation source: "
"'tmdb_trending' for TMDB trending content, "
"'tmdb_movies' for TMDB popular movies, "
"'tmdb_tvs' for TMDB popular TV shows, "
"'douban_hot' for Douban popular content, "
"'douban_movie_hot' for Douban hot movies, "
"'douban_tv_hot' for Douban hot TV shows, "
"'douban_movie_showing' for Douban movies currently showing, "
"'douban_movies' for Douban latest movies, "
"'douban_tvs' for Douban latest TV shows, "
"'douban_movie_top250' for Douban movie TOP250, "
"'douban_tv_weekly_chinese' for Douban Chinese TV weekly chart, "
"'douban_tv_weekly_global' for Douban global TV weekly chart, "
"'douban_tv_animation' for Douban popular animation, "
"'bangumi_calendar' for Bangumi anime calendar")
media_type: Optional[str] = Field("all",
description="Type of media content: '电影' for films, '电视剧' for television series or anime series, 'all' for all types")
limit: Optional[int] = Field(20,
description="Maximum number of recommendations to return (default: 20, maximum: 100)")
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
source: Optional[str] = Field(
"tmdb_trending",
description="Recommendation source: "
"'tmdb_trending' for TMDB trending content, "
"'tmdb_movies' for TMDB popular movies, "
"'tmdb_tvs' for TMDB popular TV shows, "
"'douban_hot' for Douban popular content, "
"'douban_movie_hot' for Douban hot movies, "
"'douban_tv_hot' for Douban hot TV shows, "
"'douban_movie_showing' for Douban movies currently showing, "
"'douban_movies' for Douban latest movies, "
"'douban_tvs' for Douban latest TV shows, "
"'douban_movie_top250' for Douban movie TOP250, "
"'douban_tv_weekly_chinese' for Douban Chinese TV weekly chart, "
"'douban_tv_weekly_global' for Douban global TV weekly chart, "
"'douban_tv_animation' for Douban popular animation, "
"'bangumi_calendar' for Bangumi anime calendar",
)
media_type: Optional[str] = Field(
"all", description="Allowed values: movie, tv, all"
)
page: Optional[int] = Field(
1, description="Page number for pagination (default: 1, 20 items per page)"
)
class GetRecommendationsTool(MoviePilotTool):
name: str = "get_recommendations"
description: str = "Get trending and popular media recommendations from various sources. Returns curated lists of popular movies, TV shows, and anime based on different criteria like trending, ratings, or calendar schedules."
description: str = "Get trending and popular media recommendations from various sources. Returns curated lists of popular movies, TV shows, and anime based on different criteria like trending, ratings, or calendar schedules. Supports pagination with 20 items per page."
args_schema: Type[BaseModel] = GetRecommendationsInput
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据推荐参数生成友好的提示消息"""
source = kwargs.get("source", "tmdb_trending")
media_type = kwargs.get("media_type", "all")
limit = kwargs.get("limit", 20)
page = kwargs.get("page", 1)
source_map = {
"tmdb_trending": "TMDB流行趋势",
"tmdb_movies": "TMDB热门电影",
@@ -53,117 +62,167 @@ class GetRecommendationsTool(MoviePilotTool):
"douban_hot": "豆瓣热门",
"douban_movie_hot": "豆瓣热门电影",
"douban_tv_hot": "豆瓣热门电视剧",
"douban_movie_showing": "豆瓣正在热映",
"douban_movie_showing": "豆瓣热映",
"douban_movies": "豆瓣最新电影",
"douban_tvs": "豆瓣最新电视剧",
"douban_movie_top250": "豆瓣电影TOP250",
"douban_tv_weekly_chinese": "豆瓣国产剧集榜",
"douban_tv_weekly_global": "豆瓣全球剧集榜",
"douban_tv_animation": "豆瓣热门动漫",
"bangumi_calendar": "番组计划"
"bangumi_calendar": "番组计划",
}
source_desc = source_map.get(source, source)
message = f"正在获取推荐: {source_desc}"
message = f"获取推荐: {source_desc}"
if media_type != "all":
message += f" [{media_type}]"
message += f" (限制: {limit})"
message += f" ({page})"
return message
async def run(self, source: Optional[str] = "tmdb_trending",
media_type: Optional[str] = "all", limit: Optional[int] = 20, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: source={source}, media_type={media_type}, limit={limit}")
async def run(
self,
source: Optional[str] = "tmdb_trending",
media_type: Optional[str] = "all",
page: Optional[int] = 1,
**kwargs,
) -> str:
page = max(1, page or 1)
page_size = 20
logger.info(
f"执行工具: {self.name}, 参数: source={source}, media_type={media_type}, page={page}"
)
try:
if media_type != "all":
media_type_enum = MediaType.from_agent(media_type)
if not media_type_enum:
return f"错误:无效的媒体类型 '{media_type}',支持的类型:'movie', 'tv', 'all'"
media_type = media_type_enum.to_agent() # 归一化为 "movie"/"tv"
recommend_chain = RecommendChain()
results = []
if source == "tmdb_trending":
# async_tmdb_trending 只接受 page 参数,返回固定数量的结果
# 如果需要限制数量,需要在返回后截取
results = await recommend_chain.async_tmdb_trending(page=1)
if limit and limit > 0:
results = results[:limit]
results = await recommend_chain.async_tmdb_trending(page=page)
elif source == "tmdb_movies":
# async_tmdb_movies 接受 page 参数,返回固定数量的结果
results = await recommend_chain.async_tmdb_movies(page=1)
if limit and limit > 0:
results = results[:limit]
results = await recommend_chain.async_tmdb_movies(page=page)
elif source == "tmdb_tvs":
# async_tmdb_tvs 接受 page 参数,返回固定数量的结果
results = await recommend_chain.async_tmdb_tvs(page=1)
if limit and limit > 0:
results = results[:limit]
results = await recommend_chain.async_tmdb_tvs(page=page)
elif source == "douban_hot":
if media_type == "movie":
results = await recommend_chain.async_douban_movie_hot(page=1, count=limit)
results = await recommend_chain.async_douban_movie_hot(
page=page, count=page_size
)
elif media_type == "tv":
results = await recommend_chain.async_douban_tv_hot(page=1, count=limit)
results = await recommend_chain.async_douban_tv_hot(
page=page, count=page_size
)
else: # all
results.extend(await recommend_chain.async_douban_movie_hot(page=1, count=limit))
results.extend(await recommend_chain.async_douban_tv_hot(page=1, count=limit))
results.extend(
await recommend_chain.async_douban_movie_hot(
page=page, count=page_size
)
)
results.extend(
await recommend_chain.async_douban_tv_hot(
page=page, count=page_size
)
)
elif source == "douban_movie_hot":
results = await recommend_chain.async_douban_movie_hot(page=1, count=limit)
results = await recommend_chain.async_douban_movie_hot(
page=page, count=page_size
)
elif source == "douban_tv_hot":
results = await recommend_chain.async_douban_tv_hot(page=1, count=limit)
results = await recommend_chain.async_douban_tv_hot(
page=page, count=page_size
)
elif source == "douban_movie_showing":
results = await recommend_chain.async_douban_movie_showing(page=1, count=limit)
results = await recommend_chain.async_douban_movie_showing(
page=page, count=page_size
)
elif source == "douban_movies":
results = await recommend_chain.async_douban_movies(page=1, count=limit)
results = await recommend_chain.async_douban_movies(
page=page, count=page_size
)
elif source == "douban_tvs":
results = await recommend_chain.async_douban_tvs(page=1, count=limit)
results = await recommend_chain.async_douban_tvs(
page=page, count=page_size
)
elif source == "douban_movie_top250":
results = await recommend_chain.async_douban_movie_top250(page=1, count=limit)
results = await recommend_chain.async_douban_movie_top250(
page=page, count=page_size
)
elif source == "douban_tv_weekly_chinese":
results = await recommend_chain.async_douban_tv_weekly_chinese(page=1, count=limit)
results = await recommend_chain.async_douban_tv_weekly_chinese(
page=page, count=page_size
)
elif source == "douban_tv_weekly_global":
results = await recommend_chain.async_douban_tv_weekly_global(page=1, count=limit)
results = await recommend_chain.async_douban_tv_weekly_global(
page=page, count=page_size
)
elif source == "douban_tv_animation":
results = await recommend_chain.async_douban_tv_animation(page=1, count=limit)
results = await recommend_chain.async_douban_tv_animation(
page=page, count=page_size
)
elif source == "bangumi_calendar":
results = await recommend_chain.async_bangumi_calendar(page=1, count=limit)
results = await recommend_chain.async_bangumi_calendar(
page=page, count=page_size
)
else:
# 不支持的推荐来源
supported_sources = [
"tmdb_trending", "tmdb_movies", "tmdb_tvs",
"douban_hot", "douban_movie_hot", "douban_tv_hot",
"douban_movie_showing", "douban_movies", "douban_tvs",
"douban_movie_top250", "douban_tv_weekly_chinese",
"douban_tv_weekly_global", "douban_tv_animation",
"bangumi_calendar"
"tmdb_trending",
"tmdb_movies",
"tmdb_tvs",
"douban_hot",
"douban_movie_hot",
"douban_tv_hot",
"douban_movie_showing",
"douban_movies",
"douban_tvs",
"douban_movie_top250",
"douban_tv_weekly_chinese",
"douban_tv_weekly_global",
"douban_tv_animation",
"bangumi_calendar",
]
return f"不支持的推荐来源: {source}。支持的来源包括: {', '.join(supported_sources)}"
if results:
# 限制最多20条结果
# 对于TMDB来源API自身按页返回取前page_size条
total_count = len(results)
limited_results = results[:20]
page_results = results[:page_size]
# 精简字段,只保留关键信息
simplified_results = []
for r in limited_results:
for r in page_results:
# r 应该是字典格式to_dict的结果但为了安全起见进行检查
if not isinstance(r, dict):
logger.warning(f"推荐结果格式异常,跳过: {type(r)}")
continue
simplified = {
"title": r.get("title"),
"en_title": r.get("en_title"),
"year": r.get("year"),
"type": r.get("type"),
"type": media_type_to_agent(r.get("type")),
"season": r.get("season"),
"tmdb_id": r.get("tmdb_id"),
"imdb_id": r.get("imdb_id"),
"douban_id": r.get("douban_id"),
"vote_average": r.get("vote_average"),
"poster_path": r.get("poster_path"),
"detail_link": r.get("detail_link")
"detail_link": r.get("detail_link"),
}
simplified_results.append(simplified)
result_json = json.dumps(simplified_results, ensure_ascii=False, indent=2)
# 如果结果被裁剪,添加提示信息
if total_count > 20:
return f"注意:推荐结果共找到 {total_count} 条,为节省上下文空间,仅显示前 20 条结果。\n\n{result_json}"
return result_json
result_json = json.dumps(
simplified_results, ensure_ascii=False, indent=2
)
has_more = total_count > page_size
payload_msg = f"{page} 页,当前页 {len(simplified_results)} 条结果。"
if has_more:
payload_msg += (
f" 可能有更多数据,可使用 page={page + 1} 获取下一页。"
)
return f"{payload_msg}\n\n{result_json}"
return "未找到推荐内容。"
except Exception as e:
logger.error(f"获取推荐失败: {e}", exc_info=True)

View File

@@ -0,0 +1,155 @@
"""获取搜索结果工具"""
import json
import re
from typing import List, Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.chain.search import SearchChain
from app.log import logger
from ._torrent_search_utils import (
TORRENT_RESULT_LIMIT,
build_filter_options,
filter_contexts,
simplify_search_result,
)
class GetSearchResultsInput(BaseModel):
"""获取搜索结果工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
site: Optional[List[str]] = Field(None, description="Site name filters")
season: Optional[List[str]] = Field(None, description="Season or episode filters")
free_state: Optional[List[str]] = Field(None, description="Promotion state filters")
video_code: Optional[List[str]] = Field(None, description="Video codec filters")
edition: Optional[List[str]] = Field(None, description="Edition filters")
resolution: Optional[List[str]] = Field(None, description="Resolution filters")
release_group: Optional[List[str]] = Field(
None, description="Release group filters"
)
title_pattern: Optional[str] = Field(
None,
description="Regular expression pattern to filter torrent titles (e.g., '4K|2160p|UHD', '1080p.*BluRay')",
)
show_filter_options: Optional[bool] = Field(
False,
description="Whether to return only optional filter options for re-checking available conditions",
)
page: Optional[int] = Field(
1,
description="Page number for pagination (default: 1, each page returns up to 50 results)",
)
class GetSearchResultsTool(MoviePilotTool):
name: str = "get_search_results"
description: str = "Get cached torrent search results from search_torrents with optional filters. Supports pagination with up to 50 results per page."
args_schema: Type[BaseModel] = GetSearchResultsInput
def get_tool_message(self, **kwargs) -> Optional[str]:
return "获取搜索结果"
async def run(
self,
site: Optional[List[str]] = None,
season: Optional[List[str]] = None,
free_state: Optional[List[str]] = None,
video_code: Optional[List[str]] = None,
edition: Optional[List[str]] = None,
resolution: Optional[List[str]] = None,
release_group: Optional[List[str]] = None,
title_pattern: Optional[str] = None,
show_filter_options: bool = False,
page: Optional[int] = 1,
**kwargs,
) -> str:
page = max(1, page or 1)
logger.info(
f"执行工具: {self.name}, 参数: site={site}, season={season}, free_state={free_state}, video_code={video_code}, edition={edition}, resolution={resolution}, release_group={release_group}, title_pattern={title_pattern}, show_filter_options={show_filter_options}, page={page}"
)
try:
items = await SearchChain().async_last_search_results() or []
if not items:
return "没有可用的搜索结果,请先使用 search_torrents 搜索"
if show_filter_options:
payload = {
"total_count": len(items),
"filter_options": build_filter_options(items),
}
return json.dumps(payload, ensure_ascii=False, indent=2)
regex_pattern = None
if title_pattern:
try:
regex_pattern = re.compile(title_pattern, re.IGNORECASE)
except re.error as e:
logger.warning(f"正则表达式编译失败: {title_pattern}, 错误: {e}")
return f"正则表达式格式错误: {str(e)}"
filtered_items = filter_contexts(
items=items,
site=site,
season=season,
free_state=free_state,
video_code=video_code,
edition=edition,
resolution=resolution,
release_group=release_group,
)
if regex_pattern:
filtered_items = [
item
for item in filtered_items
if item.torrent_info
and item.torrent_info.title
and regex_pattern.search(item.torrent_info.title)
]
if not filtered_items:
return "没有符合筛选条件的搜索结果,请调整筛选条件"
total_count = len(filtered_items)
filtered_ids = {id(item) for item in filtered_items}
matched_indices = [
index
for index, item in enumerate(items, start=1)
if id(item) in filtered_ids
]
# 分页
page_size = TORRENT_RESULT_LIMIT
start = (page - 1) * page_size
end = start + page_size
page_items = filtered_items[start:end]
page_indices = matched_indices[start:end]
if not page_items:
return f"{page} 页没有数据,共 {total_count} 条结果,共 {(total_count + page_size - 1) // page_size} 页。"
results = [
simplify_search_result(item, index)
for item, index in zip(page_items, page_indices)
]
total_pages = (total_count + page_size - 1) // page_size
payload = {
"total_count": total_count,
"page": page,
"total_pages": total_pages,
"results": results,
}
if page < total_pages:
payload["message"] = (
f"搜索结果共 {total_count} 条,当前第 {page}/{total_pages} 页,可使用 page={page + 1} 获取下一页。"
)
return json.dumps(payload, ensure_ascii=False, indent=2)
except Exception as e:
error_message = f"获取搜索结果失败: {str(e)}"
logger.error(f"获取搜索结果失败: {e}", exc_info=True)
return error_message

View File

@@ -0,0 +1,118 @@
"""安装插件工具"""
import json
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.agent.tools.impl._plugin_tool_utils import (
get_plugin_snapshot,
install_plugin_runtime,
load_market_plugins,
summarize_plugin,
)
from app.log import logger
class InstallPluginInput(BaseModel):
"""安装插件工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
plugin_id: str = Field(
...,
description="Exact plugin ID to install. Use query_market_plugins first to find the correct plugin_id.",
)
force: bool = Field(
False,
description="Whether to force reinstall or upgrade the specified plugin.",
)
force_refresh_market: bool = Field(
False,
description="Whether to refresh plugin market caches before reading the market list.",
)
class InstallPluginTool(MoviePilotTool):
name: str = "install_plugin"
description: str = (
"Install a plugin by exact plugin_id from the plugin market or local plugin repositories. "
"Use query_market_plugins first when you need filtering or discovery."
)
require_admin: bool = True
args_schema: Type[BaseModel] = InstallPluginInput
def get_tool_message(self, **kwargs) -> Optional[str]:
plugin_id = kwargs.get("plugin_id")
return f"安装插件: {plugin_id or '未知插件'}"
async def run(
self,
plugin_id: str,
force: bool = False,
force_refresh_market: bool = False,
**kwargs,
) -> str:
logger.info(
f"执行工具: {self.name}, 参数: plugin_id={plugin_id}, force={force}"
)
try:
plugins = await load_market_plugins(force_refresh=force_refresh_market)
if not plugins:
return json.dumps(
{"success": False, "message": "当前插件市场没有可用插件"},
ensure_ascii=False,
)
candidate = next((plugin for plugin in plugins if plugin.id == plugin_id), None)
if not candidate:
return json.dumps(
{
"success": False,
"message": f"未在插件市场中找到插件: {plugin_id}。请先调用 query_market_plugins 确认 plugin_id。",
},
ensure_ascii=False,
)
success, message, refreshed_only = await install_plugin_runtime(
candidate.id,
getattr(candidate, "repo_url", None),
force=force,
)
if not success:
return json.dumps(
{
"success": False,
"plugin": summarize_plugin(candidate),
"message": message,
},
ensure_ascii=False,
indent=2,
)
plugin_snapshot = get_plugin_snapshot(candidate.id)
if refreshed_only and getattr(candidate, "has_update", False) and not force:
message = "插件已安装,当前仅刷新加载;如需升级到市场新版本,请设置 force=true"
return json.dumps(
{
"success": True,
"message": message,
"force": force,
"refreshed_only": refreshed_only,
"plugin": summarize_plugin(candidate),
"runtime": plugin_snapshot,
},
ensure_ascii=False,
indent=2,
)
except Exception as e:
logger.error(f"安装插件失败: {e}", exc_info=True)
return json.dumps(
{"success": False, "message": f"安装插件时发生错误: {str(e)}"},
ensure_ascii=False,
)

View File

@@ -24,7 +24,7 @@ class ListDirectoryInput(BaseModel):
class ListDirectoryTool(MoviePilotTool):
name: str = "list_directory"
description: str = "List actual files and folders in a file system directory (NOT configuration). Shows files and subdirectories with their names, types, sizes, and modification times. Returns up to 20 items and the total count if there are more items. Use 'query_directories' to query directory configuration settings."
description: str = "List actual files and folders in a file system directory (NOT configuration). Shows files and subdirectories with their names, types, sizes, and modification times. Returns up to 20 items and the total count if there are more items. Use 'query_directory_settings' to query directory configuration settings."
args_schema: Type[BaseModel] = ListDirectoryInput
def get_tool_message(self, **kwargs) -> Optional[str]:
@@ -32,99 +32,87 @@ class ListDirectoryTool(MoviePilotTool):
path = kwargs.get("path", "")
storage = kwargs.get("storage", "local")
message = f"正在查询目录: {path}"
message = f"查询目录: {path}"
if storage != "local":
message += f" [存储: {storage}]"
return message
@staticmethod
def _list_directory_sync(
path: str, storage: Optional[str] = "local", sort_by: Optional[str] = "name"
) -> str:
"""
目录遍历可能触发本地磁盘或远程存储请求,统一放到线程池中执行。
"""
if not path:
return "错误:路径不能为空"
if storage == "local":
if not path.startswith("/") and not (len(path) > 1 and path[1] == ":"):
path = str(Path(path).resolve())
elif not path.startswith("/"):
path = "/" + path
fileitem = FileItem(storage=storage or "local", path=path, type="dir")
file_list = StorageChain().list_files(fileitem, recursion=False)
if file_list is None:
return f"无法访问目录:{path},请检查路径是否正确或存储是否可用"
if not file_list:
return f"目录 {path} 为空"
if sort_by == "time":
file_list.sort(key=lambda x: x.modify_time or 0, reverse=True)
else:
file_list.sort(
key=lambda x: (
0 if x.type == "dir" else 1,
StringUtils.natural_sort_key(x.name or ""),
)
)
total_count = len(file_list)
limited_list = file_list[:20]
simplified_items = []
for item in limited_list:
size_str = StringUtils.str_filesize(item.size) if item.size else None
modify_time_str = None
if item.modify_time:
try:
modify_time_str = datetime.fromtimestamp(item.modify_time).strftime(
"%Y-%m-%d %H:%M:%S"
)
except (ValueError, OSError):
modify_time_str = str(item.modify_time)
simplified = {
"name": item.name,
"type": item.type,
"path": item.path,
"size": size_str,
"modify_time": modify_time_str,
}
if item.type == "file" and item.extension:
simplified["extension"] = item.extension
simplified_items.append(simplified)
result_json = json.dumps(simplified_items, ensure_ascii=False, indent=2)
if total_count > 20:
return (
f"注意:目录中共有 {total_count} 个项目,为节省上下文空间,仅显示前 20 个项目。\n\n"
f"{result_json}"
)
return result_json
async def run(self, path: str, storage: Optional[str] = "local",
sort_by: Optional[str] = "name", **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: path={path}, storage={storage}, sort_by={sort_by}")
try:
# 规范化路径
if not path:
return "错误:路径不能为空"
# 确保路径格式正确
if storage == "local":
# 本地路径处理
if not path.startswith("/") and not (len(path) > 1 and path[1] == ":"):
# 相对路径,尝试转换为绝对路径
path = str(Path(path).resolve())
else:
# 远程存储路径,确保以/开头
if not path.startswith("/"):
path = "/" + path
# 创建FileItem
fileitem = FileItem(
storage=storage or "local",
path=path,
type="dir"
return await self.run_blocking(
"storage", self._list_directory_sync, path, storage, sort_by
)
# 查询目录内容
storage_chain = StorageChain()
file_list = storage_chain.list_files(fileitem, recursion=False)
if file_list is None:
return f"无法访问目录:{path},请检查路径是否正确或存储是否可用"
if not file_list:
return f"目录 {path} 为空"
# 排序
if sort_by == "time":
file_list.sort(key=lambda x: x.modify_time or 0, reverse=True)
else:
# 默认按名称排序(目录优先,然后按名称)
file_list.sort(key=lambda x: (
0 if x.type == "dir" else 1,
StringUtils.natural_sort_key(x.name or "")
))
# 限制返回数量
total_count = len(file_list)
limited_list = file_list[:20]
# 转换为字典格式
simplified_items = []
for item in limited_list:
# 格式化文件大小
size_str = None
if item.size:
size_str = StringUtils.str_filesize(item.size)
# 格式化修改时间
modify_time_str = None
if item.modify_time:
try:
modify_time_str = datetime.fromtimestamp(item.modify_time).strftime("%Y-%m-%d %H:%M:%S")
except (ValueError, OSError):
modify_time_str = str(item.modify_time)
simplified = {
"name": item.name,
"type": item.type,
"path": item.path,
"size": size_str,
"modify_time": modify_time_str
}
# 如果是文件,添加扩展名
if item.type == "file" and item.extension:
simplified["extension"] = item.extension
simplified_items.append(simplified)
result_json = json.dumps(simplified_items, ensure_ascii=False, indent=2)
# 如果结果被裁剪,添加提示信息
if total_count > 20:
return f"注意:目录中共有 {total_count} 个项目,为节省上下文空间,仅显示前 20 个项目。\n\n{result_json}"
else:
return result_json
except Exception as e:
logger.error(f"查询目录内容失败: {e}", exc_info=True)
return f"查询目录内容时发生错误: {str(e)}"

View File

@@ -0,0 +1,79 @@
"""查询所有可用斜杠命令工具(系统命令 + 插件命令)"""
import json
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.log import logger
class ListSlashCommandsInput(BaseModel):
"""查询所有可用斜杠命令工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
class ListSlashCommandsTool(MoviePilotTool):
name: str = "list_slash_commands"
description: str = (
"List all available slash commands in the system, including system preset commands "
"(e.g. /cookiecloud, /sites, /subscribes, /downloading, /transfer, /restart, etc.) "
"and plugin-registered commands. "
"Use this tool to discover what slash commands are available before executing them with run_slash_command. "
"This is especially useful when the user describes an action in natural language and you need to "
"find the matching command to fulfill their request."
)
args_schema: Type[BaseModel] = ListSlashCommandsInput
require_admin: bool = True
def get_tool_message(self, **kwargs) -> Optional[str]:
"""生成友好的提示消息"""
return "查询所有可用命令"
async def run(self, **kwargs) -> str:
logger.info(f"执行工具: {self.name}")
try:
from app.command import Command
command_obj = Command()
all_commands = command_obj.get_commands()
if not all_commands:
return "当前没有可用的命令"
commands_list = []
for cmd, info in all_commands.items():
cmd_info = {
"command": cmd,
"description": info.get("description", ""),
}
if info.get("category"):
cmd_info["category"] = info["category"]
# 标识命令类型
if info.get("type") == "scheduler":
cmd_info["type"] = "scheduler"
elif info.get("pid"):
cmd_info["type"] = "plugin"
cmd_info["plugin_id"] = info["pid"]
else:
cmd_info["type"] = "system"
commands_list.append(cmd_info)
result = {
"total": len(commands_list),
"commands": commands_list,
}
return json.dumps(result, ensure_ascii=False, indent=2)
except Exception as e:
logger.error(f"查询可用命令失败: {e}", exc_info=True)
return json.dumps(
{"success": False, "message": f"查询可用命令时发生错误: {str(e)}"},
ensure_ascii=False,
)

View File

@@ -0,0 +1,139 @@
"""修改下载任务工具"""
from typing import Optional, Type, List
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.chain.download import DownloadChain
from app.log import logger
class ModifyDownloadInput(BaseModel):
"""修改下载任务工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
hash: str = Field(
..., description="Task hash (can be obtained from query_download_tasks tool)"
)
action: Optional[str] = Field(
None,
description="Action to perform on the task: 'start' to resume downloading, 'stop' to pause downloading. "
"If not provided, no start/stop action will be performed.",
)
tags: Optional[List[str]] = Field(
None,
description="List of tags to set on the download task. If provided, these tags will be added to the task. "
"Example: ['movie', 'hd']",
)
downloader: Optional[str] = Field(
None,
description="Name of specific downloader (optional, if not provided will search all downloaders)",
)
class ModifyDownloadTool(MoviePilotTool):
"""修改下载任务工具"""
name: str = "modify_download"
description: str = (
"Modify a download task in the downloader by task hash. "
"Supports: 1) Setting tags on a download task, "
"2) Starting (resuming) a paused download task, "
"3) Stopping (pausing) a downloading task. "
"Multiple operations can be performed in a single call."
)
args_schema: Type[BaseModel] = ModifyDownloadInput
require_admin: bool = True
def get_tool_message(self, **kwargs) -> Optional[str]:
hash_value = kwargs.get("hash", "")
action = kwargs.get("action")
tags = kwargs.get("tags")
downloader = kwargs.get("downloader")
parts = [f"修改下载任务: {hash_value}"]
if action == "start":
parts.append("操作: 开始下载")
elif action == "stop":
parts.append("操作: 暂停下载")
if tags:
parts.append(f"标签: {', '.join(tags)}")
if downloader:
parts.append(f"下载器: {downloader}")
return " | ".join(parts)
@staticmethod
def _modify_download_sync(
hash_value: str,
action: Optional[str] = None,
tags: Optional[List[str]] = None,
downloader: Optional[str] = None,
) -> List[str]:
"""同步修改下载任务状态和标签,避免下载器 SDK 阻塞事件循环。"""
download_chain = DownloadChain()
results = []
if tags:
tag_result = download_chain.set_torrents_tag(
hashs=[hash_value], tags=tags, downloader=downloader
)
if tag_result:
results.append(f"成功设置标签:{', '.join(tags)}")
else:
results.append("设置标签失败,请检查任务是否存在或下载器是否可用")
if action:
action_result = download_chain.set_downloading(
hash_str=hash_value, oper=action, name=downloader
)
action_desc = "开始" if action == "start" else "暂停"
if action_result:
results.append(f"成功{action_desc}下载任务")
else:
results.append(f"{action_desc}下载任务失败,请检查任务是否存在或下载器是否可用")
return results
async def run(
self,
hash: str,
action: Optional[str] = None,
tags: Optional[List[str]] = None,
downloader: Optional[str] = None,
**kwargs,
) -> str:
logger.info(
f"执行工具: {self.name}, 参数: hash={hash}, action={action}, tags={tags}, downloader={downloader}"
)
try:
# 校验 hash 格式
if len(hash) != 40 or not all(c in "0123456789abcdefABCDEF" for c in hash):
return "参数错误hash 格式无效,请先使用 query_download_tasks 工具获取正确的 hash。"
# 校验参数:至少需要一个操作
if not action and not tags:
return "参数错误:至少需要指定 actionstart/stop或 tags 中的一个。"
# 校验 action 参数
if action and action not in ("start", "stop"):
return f"参数错误action 只支持 'start'(开始下载)或 'stop'(暂停下载),收到: '{action}'"
results = await self.run_blocking(
"downloader",
self._modify_download_sync,
hash,
action,
tags,
downloader,
)
return f"下载任务 {hash}" + "".join(results)
except Exception as e:
logger.error(f"修改下载任务失败: {e}", exc_info=True)
return f"修改下载任务时发生错误: {str(e)}"

View File

@@ -0,0 +1,85 @@
"""查询内置过滤规则工具。"""
import json
from typing import Optional, Type, List
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.agent.tools.impl._filter_rule_utils import (
get_builtin_rules,
serialize_builtin_rule,
RULE_STRING_SYNTAX,
)
from app.log import logger
class QueryBuiltinFilterRulesInput(BaseModel):
"""查询内置过滤规则工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
rule_ids: Optional[List[str]] = Field(
None,
description="Optional list of built-in rule IDs to query. If omitted, return all built-in rules.",
)
class QueryBuiltinFilterRulesTool(MoviePilotTool):
name: str = "query_builtin_filter_rules"
description: str = (
"Query built-in filter rules defined by the backend filter module. "
"These rule IDs can be used directly inside rule_string expressions for filter rule groups. "
"Use this tool before add_rule_group or update_rule_group to learn valid built-in rule IDs."
)
args_schema: Type[BaseModel] = QueryBuiltinFilterRulesInput
def get_tool_message(self, **kwargs) -> Optional[str]:
rule_ids = kwargs.get("rule_ids") or []
if rule_ids:
return f"查询内置过滤规则: {', '.join(rule_ids)}"
return "查询所有内置过滤规则"
async def run(
self,
rule_ids: Optional[List[str]] = None,
**kwargs,
) -> str:
logger.info(f"执行工具: {self.name}")
try:
builtin_rules = get_builtin_rules()
if rule_ids:
target_ids = set(rule_ids)
builtin_rules = {
rule_id: payload
for rule_id, payload in builtin_rules.items()
if rule_id in target_ids
}
serialized = [
serialize_builtin_rule(rule_id, payload)
for rule_id, payload in builtin_rules.items()
]
return json.dumps(
{
"success": True,
"count": len(serialized),
"rule_string_syntax": RULE_STRING_SYNTAX,
"rules": serialized,
},
ensure_ascii=False,
indent=2,
)
except Exception as exc:
logger.error(f"查询内置过滤规则失败: {exc}", exc_info=True)
return json.dumps(
{
"success": False,
"message": f"查询内置过滤规则失败: {exc}",
"rules": [],
},
ensure_ascii=False,
)

View File

@@ -0,0 +1,95 @@
"""查询自定义过滤规则工具。"""
import json
from typing import Optional, Type, List
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.agent.tools.impl._filter_rule_utils import (
collect_custom_rule_group_refs,
get_custom_rules,
get_rule_groups,
serialize_custom_rule,
)
from app.log import logger
class QueryCustomFilterRulesInput(BaseModel):
"""查询自定义过滤规则工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
rule_ids: Optional[List[str]] = Field(
None,
description="Optional list of custom rule IDs to query. If omitted, return all custom rules.",
)
include_group_refs: bool = Field(
True,
description="Whether to include which rule groups reference each custom rule.",
)
class QueryCustomFilterRulesTool(MoviePilotTool):
name: str = "query_custom_filter_rules"
description: str = (
"Query custom filter rules stored in CustomFilterRules. "
"Custom rules can be referenced from rule_string expressions in filter rule groups. "
"Use this tool before add_rule_group or update_rule_group to learn valid custom rule IDs."
)
args_schema: Type[BaseModel] = QueryCustomFilterRulesInput
def get_tool_message(self, **kwargs) -> Optional[str]:
rule_ids = kwargs.get("rule_ids") or []
if rule_ids:
return f"查询自定义过滤规则: {', '.join(rule_ids)}"
return "查询所有自定义过滤规则"
async def run(
self,
rule_ids: Optional[List[str]] = None,
include_group_refs: bool = True,
**kwargs,
) -> str:
logger.info(f"执行工具: {self.name}")
try:
custom_rules = get_custom_rules()
if rule_ids:
target_ids = set(rule_ids)
custom_rules = [
rule for rule in custom_rules if rule.id in target_ids
]
refs = {}
if include_group_refs:
refs = collect_custom_rule_group_refs(
get_rule_groups(),
[rule.id for rule in custom_rules if rule.id],
)
serialized = [
serialize_custom_rule(rule, refs.get(rule.id))
for rule in custom_rules
]
return json.dumps(
{
"success": True,
"count": len(serialized),
"rules": serialized,
},
ensure_ascii=False,
indent=2,
)
except Exception as exc:
logger.error(f"查询自定义过滤规则失败: {exc}", exc_info=True)
return json.dumps(
{
"success": False,
"message": f"查询自定义过滤规则失败: {exc}",
"rules": [],
},
ensure_ascii=False,
)

View File

@@ -0,0 +1,70 @@
"""查询自定义识别词工具"""
import json
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.db.systemconfig_oper import SystemConfigOper
from app.log import logger
from app.schemas.types import SystemConfigKey
class QueryCustomIdentifiersInput(BaseModel):
"""查询自定义识别词工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
class QueryCustomIdentifiersTool(MoviePilotTool):
name: str = "query_custom_identifiers"
description: str = (
"Query all currently configured custom identifiers (自定义识别词). "
"Returns the list of identifier rules used for preprocessing torrent/file names before media recognition. "
"Use this tool to check existing rules before adding new ones to avoid duplicates."
)
args_schema: Type[BaseModel] = QueryCustomIdentifiersInput
def get_tool_message(self, **kwargs) -> Optional[str]:
"""生成友好的提示消息"""
return "查询自定义识别词"
@staticmethod
def _load_custom_identifiers():
"""从内存配置缓存中读取自定义识别词。"""
return SystemConfigOper().get(SystemConfigKey.CustomIdentifiers)
async def run(self, **kwargs) -> str:
logger.info(f"执行工具: {self.name}")
try:
identifiers = self._load_custom_identifiers()
if identifiers:
return json.dumps(
{
"success": True,
"count": len(identifiers),
"identifiers": identifiers,
},
ensure_ascii=False,
indent=2,
)
return json.dumps(
{
"success": True,
"count": 0,
"identifiers": [],
"message": "当前没有配置自定义识别词",
},
ensure_ascii=False,
indent=2,
)
except Exception as e:
logger.error(f"查询自定义识别词失败: {e}")
return json.dumps(
{"success": False, "message": f"查询自定义识别词时发生错误: {str(e)}"},
ensure_ascii=False,
)

View File

@@ -32,7 +32,7 @@ class QueryDirectorySettingsTool(MoviePilotTool):
storage_type = kwargs.get("storage_type", "all")
name = kwargs.get("name")
parts = ["正在查询目录配置"]
parts = ["查询目录配置"]
if directory_type != "all":
type_map = {"download": "下载目录", "library": "媒体库目录"}
@@ -47,88 +47,93 @@ class QueryDirectorySettingsTool(MoviePilotTool):
return " | ".join(parts) if len(parts) > 1 else parts[0]
@staticmethod
def _query_directory_settings(
directory_type: Optional[str] = "all",
storage_type: Optional[str] = "all",
name: Optional[str] = None,
) -> str:
"""
目录配置完全来自内存配置缓存,这里只做本地过滤和序列化。
"""
directory_helper = DirectoryHelper()
if directory_type == "download":
dirs = directory_helper.get_download_dirs()
elif directory_type == "library":
dirs = directory_helper.get_library_dirs()
else:
dirs = directory_helper.get_dirs()
filtered_dirs = []
for d in dirs:
if storage_type == "local":
if directory_type == "download" and d.storage != "local":
continue
if directory_type == "library" and d.library_storage != "local":
continue
if directory_type == "all":
if d.download_path and d.storage != "local":
continue
if d.library_path and d.library_storage != "local":
continue
elif storage_type == "remote":
if directory_type == "download" and d.storage == "local":
continue
if directory_type == "library" and d.library_storage == "local":
continue
if directory_type == "all":
if d.download_path and d.storage == "local":
continue
if d.library_path and d.library_storage == "local":
continue
if name and d.name and name.lower() not in d.name.lower():
continue
filtered_dirs.append(d)
if not filtered_dirs:
return "未找到相关目录配置"
simplified_dirs = []
for d in filtered_dirs:
simplified_dirs.append(
{
"name": d.name,
"priority": d.priority,
"storage": d.storage,
"download_path": d.download_path,
"library_path": d.library_path,
"library_storage": d.library_storage,
"media_type": d.media_type,
"media_category": d.media_category,
"monitor_type": d.monitor_type,
"monitor_mode": d.monitor_mode,
"transfer_type": d.transfer_type,
"overwrite_mode": d.overwrite_mode,
"renaming": d.renaming,
"scraping": d.scraping,
"notify": d.notify,
"download_type_folder": d.download_type_folder,
"download_category_folder": d.download_category_folder,
"library_type_folder": d.library_type_folder,
"library_category_folder": d.library_category_folder,
}
)
return json.dumps(simplified_dirs, ensure_ascii=False, indent=2)
async def run(self, directory_type: Optional[str] = "all",
storage_type: Optional[str] = "all",
name: Optional[str] = None, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: directory_type={directory_type}, storage_type={storage_type}, name={name}")
try:
directory_helper = DirectoryHelper()
# 根据目录类型获取目录列表
if directory_type == "download":
dirs = directory_helper.get_download_dirs()
elif directory_type == "library":
dirs = directory_helper.get_library_dirs()
else:
dirs = directory_helper.get_dirs()
# 按存储类型过滤
filtered_dirs = []
for d in dirs:
# 按存储类型过滤
if storage_type == "local":
# 对于下载目录,检查 storage对于媒体库目录检查 library_storage
if directory_type == "download" and d.storage != "local":
continue
elif directory_type == "library" and d.library_storage != "local":
continue
elif directory_type == "all":
# 检查是否有本地存储配置
if d.download_path and d.storage != "local":
continue
if d.library_path and d.library_storage != "local":
continue
elif storage_type == "remote":
# 对于下载目录,检查 storage对于媒体库目录检查 library_storage
if directory_type == "download" and d.storage == "local":
continue
elif directory_type == "library" and d.library_storage == "local":
continue
elif directory_type == "all":
# 检查是否有远程存储配置
if d.download_path and d.storage == "local":
continue
if d.library_path and d.library_storage == "local":
continue
# 按名称过滤(部分匹配)
if name and d.name and name.lower() not in d.name.lower():
continue
filtered_dirs.append(d)
if filtered_dirs:
# 转换为字典格式,只保留关键信息
simplified_dirs = []
for d in filtered_dirs:
simplified = {
"name": d.name,
"priority": d.priority,
"storage": d.storage,
"download_path": d.download_path,
"library_path": d.library_path,
"library_storage": d.library_storage,
"media_type": d.media_type,
"media_category": d.media_category,
"monitor_type": d.monitor_type,
"monitor_mode": d.monitor_mode,
"transfer_type": d.transfer_type,
"overwrite_mode": d.overwrite_mode,
"renaming": d.renaming,
"scraping": d.scraping,
"notify": d.notify,
"download_type_folder": d.download_type_folder,
"download_category_folder": d.download_category_folder,
"library_type_folder": d.library_type_folder,
"library_category_folder": d.library_category_folder
}
simplified_dirs.append(simplified)
result_json = json.dumps(simplified_dirs, ensure_ascii=False, indent=2)
return result_json
return "未找到相关目录配置"
return self._query_directory_settings(
directory_type=directory_type,
storage_type=storage_type,
name=name,
)
except Exception as e:
logger.error(f"查询系统目录设置失败: {e}", exc_info=True)
return f"查询系统目录设置时发生错误: {str(e)}"

View File

@@ -1,7 +1,7 @@
"""查询下载工具"""
import json
from typing import Optional, Type, List, Union
from typing import Any, Dict, List, Optional, Type, Union
from pydantic import BaseModel, Field
@@ -10,7 +10,7 @@ from app.chain.download import DownloadChain
from app.db.downloadhistory_oper import DownloadHistoryOper
from app.log import logger
from app.schemas import TransferTorrent, DownloadingTorrent
from app.schemas.types import TorrentStatus
from app.schemas.types import TorrentStatus, media_type_to_agent
class QueryDownloadTasksInput(BaseModel):
@@ -22,19 +22,21 @@ class QueryDownloadTasksInput(BaseModel):
description="Filter downloads by status: 'downloading' for active downloads, 'completed' for finished downloads, 'paused' for paused downloads, 'all' for all downloads")
hash: Optional[str] = Field(None, description="Query specific download task by hash (optional, if provided will search for this specific task regardless of status)")
title: Optional[str] = Field(None, description="Query download tasks by title/name (optional, supports partial match, searches all tasks if provided)")
tag: Optional[str] = Field(None, description="Filter download tasks by tag (optional, supports partial match, e.g. 'movie' will match tasks with tag 'movie' or 'movie_2024')")
class QueryDownloadTasksTool(MoviePilotTool):
name: str = "query_download_tasks"
description: str = "Query download status and list download tasks. Can query all active downloads, or search for specific tasks by hash or title. Shows download progress, completion status, and task details from configured downloaders."
description: str = "Query download status and list download tasks. Can query all active downloads, or search for specific tasks by hash, title, or tag. Shows download progress, completion status, tags, and task details from configured downloaders."
args_schema: Type[BaseModel] = QueryDownloadTasksInput
def _get_all_torrents(self, download_chain: DownloadChain, downloader: Optional[str] = None) -> List[Union[TransferTorrent, DownloadingTorrent]]:
@staticmethod
def _get_all_torrents(download_chain: DownloadChain, downloader: Optional[str] = None) -> List[Union[TransferTorrent, DownloadingTorrent]]:
"""
查询所有状态的任务(包括下载中和已完成的任务)
"""
all_torrents = []
# 查询正在下载的任务
# 查询下载的任务
downloading_torrents = download_chain.list_torrents(
downloader=downloader,
status=TorrentStatus.DOWNLOADING
@@ -50,6 +52,138 @@ class QueryDownloadTasksTool(MoviePilotTool):
return all_torrents
@staticmethod
def _format_progress(progress: Optional[float]) -> Optional[str]:
"""
将下载进度格式化为保留一位小数的百分比字符串
"""
try:
if progress is None:
return None
return f"{float(progress):.1f}%"
except (TypeError, ValueError):
return None
@staticmethod
def _apply_download_history(
torrent: Union[TransferTorrent, DownloadingTorrent], history: Any
) -> None:
"""将下载历史中的补充信息回填到下载任务结果中。"""
if not history:
return
if hasattr(torrent, "media"):
torrent.media = {
"tmdbid": history.tmdbid,
"type": history.type,
"title": history.title,
"season": history.seasons,
"episode": history.episodes,
"image": history.image,
}
if hasattr(torrent, "username"):
torrent.username = history.username
torrent.userid = history.userid
@classmethod
def _load_history_map(
cls, torrents: List[Union[TransferTorrent, DownloadingTorrent]]
) -> Dict[str, Any]:
"""批量加载下载历史,避免逐条查询形成 N+1。"""
hashes = [torrent.hash for torrent in torrents if getattr(torrent, "hash", None)]
if not hashes:
return {}
return DownloadHistoryOper().get_by_hashes(hashes)
@classmethod
def _query_downloads_sync(
cls,
downloader: Optional[str] = None,
status: Optional[str] = "all",
hash_value: Optional[str] = None,
title: Optional[str] = None,
tag: Optional[str] = None,
) -> Dict[str, Any]:
"""
同步查询下载器和下载历史,整个链路放在线程池中执行。
"""
download_chain = DownloadChain()
if hash_value:
torrents = (
download_chain.list_torrents(downloader=downloader, hashs=[hash_value])
or []
)
if not torrents:
return {
"message": f"未找到hash为 {hash_value} 的下载任务(该任务可能已完成、已删除或不存在)"
}
history_map = cls._load_history_map(torrents)
for torrent in torrents:
cls._apply_download_history(torrent, history_map.get(torrent.hash))
filtered_downloads = list(torrents)
elif title:
all_torrents = cls._get_all_torrents(download_chain, downloader)
history_map = cls._load_history_map(all_torrents)
filtered_downloads = []
title_lower = title.lower()
for torrent in all_torrents:
history = history_map.get(torrent.hash)
matched = title_lower in (torrent.title or "").lower() or title_lower in (
getattr(torrent, "name", None) or ""
).lower()
if not matched and history and history.title:
matched = title_lower in history.title.lower()
if not matched:
continue
cls._apply_download_history(torrent, history)
filtered_downloads.append(torrent)
if not filtered_downloads:
return {"message": f"未找到标题包含 '{title}' 的下载任务"}
else:
if status == "downloading":
downloads = download_chain.downloading(name=downloader) or []
filtered_downloads = [
dl
for dl in downloads
if not downloader or dl.downloader == downloader
]
else:
all_torrents = cls._get_all_torrents(download_chain, downloader)
filtered_downloads = []
for torrent in all_torrents:
if downloader and torrent.downloader != downloader:
continue
if status == "completed" and torrent.state not in [
"seeding",
"completed",
]:
continue
if status == "paused" and torrent.state != "paused":
continue
filtered_downloads.append(torrent)
history_map = cls._load_history_map(filtered_downloads)
for torrent in filtered_downloads:
cls._apply_download_history(torrent, history_map.get(torrent.hash))
if tag and filtered_downloads:
tag_lower = tag.lower()
filtered_downloads = [
d for d in filtered_downloads if d.tags and tag_lower in d.tags.lower()
]
if not filtered_downloads:
return {"message": f"未找到标签包含 '{tag}' 的下载任务"}
if not filtered_downloads:
return {"message": "未找到相关下载任务"}
return {"downloads": filtered_downloads}
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据查询参数生成友好的提示消息"""
downloader = kwargs.get("downloader")
@@ -57,7 +191,7 @@ class QueryDownloadTasksTool(MoviePilotTool):
hash_value = kwargs.get("hash")
title = kwargs.get("title")
parts = ["正在查询下载任务"]
parts = ["查询下载任务"]
if downloader:
parts.append(f"下载器: {downloader}")
@@ -70,118 +204,33 @@ class QueryDownloadTasksTool(MoviePilotTool):
parts.append(f"Hash: {hash_value[:8]}...")
elif title:
parts.append(f"标题: {title}")
tag = kwargs.get("tag")
if tag:
parts.append(f"标签: {tag}")
return " | ".join(parts) if len(parts) > 1 else parts[0]
async def run(self, downloader: Optional[str] = None,
status: Optional[str] = "all",
hash: Optional[str] = None,
title: Optional[str] = None, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: downloader={downloader}, status={status}, hash={hash}, title={title}")
title: Optional[str] = None,
tag: Optional[str] = None, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: downloader={downloader}, status={status}, hash={hash}, title={title}, tag={tag}")
try:
download_chain = DownloadChain()
# 如果提供了hash直接查询该hash的任务不限制状态
if hash:
torrents = download_chain.list_torrents(downloader=downloader, hashs=[hash]) or []
if not torrents:
return f"未找到hash为 {hash} 的下载任务(该任务可能已完成、已删除或不存在)"
# 转换为DownloadingTorrent格式
downloads = []
for torrent in torrents:
# 获取下载历史信息
history = DownloadHistoryOper().get_by_hash(torrent.hash)
if history:
torrent.media = {
"tmdbid": history.tmdbid,
"type": history.type,
"title": history.title,
"season": history.seasons,
"episode": history.episodes,
"image": history.image,
}
torrent.userid = history.userid
torrent.username = history.username
downloads.append(torrent)
filtered_downloads = downloads
elif title:
# 如果提供了title查询所有任务并搜索匹配的标题
# 查询所有状态的任务
all_torrents = self._get_all_torrents(download_chain, downloader)
filtered_downloads = []
title_lower = title.lower()
for torrent in all_torrents:
# 获取下载历史信息
history = DownloadHistoryOper().get_by_hash(torrent.hash)
# 检查标题或名称是否匹配(包括下载历史中的标题)
matched = False
# 检查torrent的title和name字段
if (title_lower in (torrent.title or "").lower()) or \
(title_lower in (torrent.name or "").lower()):
matched = True
# 检查下载历史中的标题
if history and history.title:
if title_lower in history.title.lower():
matched = True
if matched:
if history:
torrent.media = {
"tmdbid": history.tmdbid,
"type": history.type,
"title": history.title,
"season": history.seasons,
"episode": history.episodes,
"image": history.image,
}
torrent.userid = history.userid
torrent.username = history.username
filtered_downloads.append(torrent)
if not filtered_downloads:
return f"未找到标题包含 '{title}' 的下载任务"
else:
# 根据status决定查询方式
if status == "downloading":
# 如果status为下载中使用downloading方法
downloads = download_chain.downloading(name=downloader) or []
filtered_downloads = []
for dl in downloads:
if downloader and dl.downloader != downloader:
continue
filtered_downloads.append(dl)
else:
# 其他状态completed、paused、all使用list_torrents查询所有任务
# 查询所有状态的任务
all_torrents = self._get_all_torrents(download_chain, downloader)
filtered_downloads = []
for torrent in all_torrents:
if downloader and torrent.downloader != downloader:
continue
# 根据status过滤
if status == "completed":
# 已完成的任务state为seeding或completed
if torrent.state not in ["seeding", "completed"]:
continue
elif status == "paused":
# 已暂停的任务
if torrent.state != "paused":
continue
# status == "all" 时不过滤
# 获取下载历史信息
history = DownloadHistoryOper().get_by_hash(torrent.hash)
if history:
torrent.media = {
"tmdbid": history.tmdbid,
"type": history.type,
"title": history.title,
"season": history.seasons,
"episode": history.episodes,
"image": history.image,
}
torrent.userid = history.userid
torrent.username = history.username
filtered_downloads.append(torrent)
payload = await self.run_blocking(
"downloader",
self._query_downloads_sync,
downloader,
status,
hash,
title,
tag,
)
if payload.get("message"):
return payload["message"]
filtered_downloads = payload.get("downloads") or []
if filtered_downloads:
# 限制最多20条结果
total_count = len(filtered_downloads)
@@ -193,24 +242,26 @@ class QueryDownloadTasksTool(MoviePilotTool):
"downloader": d.downloader,
"hash": d.hash,
"title": d.title,
"name": d.name,
"year": d.year,
"season_episode": d.season_episode,
"name": getattr(d, "name", None),
"year": getattr(d, "year", None),
"season_episode": getattr(d, "season_episode", None),
"size": d.size,
"progress": d.progress,
"progress": self._format_progress(d.progress),
"state": d.state,
"upspeed": d.upspeed,
"dlspeed": d.dlspeed,
"left_time": d.left_time
"upspeed": getattr(d, "upspeed", None),
"dlspeed": getattr(d, "dlspeed", None),
"tags": d.tags,
"left_time": getattr(d, "left_time", None)
}
# 精简 media 字段
if d.media:
media = getattr(d, "media", None)
if media:
simplified["media"] = {
"tmdbid": d.media.get("tmdbid"),
"type": d.media.get("type"),
"title": d.media.get("title"),
"season": d.media.get("season"),
"episode": d.media.get("episode")
"tmdbid": media.get("tmdbid"),
"type": media_type_to_agent(media.get("type")),
"title": media.get("title"),
"season": media.get("season"),
"episode": media.get("episode")
}
simplified_downloads.append(simplified)
result_json = json.dumps(simplified_downloads, ensure_ascii=False, indent=2)

View File

@@ -23,13 +23,17 @@ class QueryDownloadersTool(MoviePilotTool):
def get_tool_message(self, **kwargs) -> Optional[str]:
"""生成友好的提示消息"""
return "正在查询下载器配置"
return "查询下载器配置"
@staticmethod
def _load_downloaders_config():
"""从内存配置缓存中读取下载器配置。"""
return SystemConfigOper().get(SystemConfigKey.Downloaders)
async def run(self, **kwargs) -> str:
logger.info(f"执行工具: {self.name}")
try:
system_config_oper = SystemConfigOper()
downloaders_config = system_config_oper.get(SystemConfigKey.Downloaders)
downloaders_config = self._load_downloaders_config()
if downloaders_config:
return json.dumps(downloaders_config, ensure_ascii=False, indent=2)
return "未配置下载器。"

View File

@@ -6,23 +6,21 @@ from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.chain.media import MediaChain
from app.chain.tmdb import TmdbChain
from app.log import logger
from app.schemas import MediaType
class QueryEpisodeScheduleInput(BaseModel):
"""查询剧集上映时间工具的输入参数模型"""
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
tmdb_id: int = Field(..., description="TMDB ID of the TV series")
tmdb_id: int = Field(..., description="TMDB ID of the TV series (can be obtained from search_media tool)")
season: int = Field(..., description="Season number to query")
episode_group: Optional[str] = Field(None, description="Episode group ID (optional)")
class QueryEpisodeScheduleTool(MoviePilotTool):
name: str = "query_episode_schedule"
description: str = "Query TV series episode air dates and schedule. Returns detailed information for each episode including air date, episode number, title, overview, and other metadata. Filters out episodes without air dates."
description: str = "Query TV series episode air dates and schedule. Returns non-duplicated schedule fields, including episode list, air-date statistics, and per-episode metadata. Filters out episodes without air dates."
args_schema: Type[BaseModel] = QueryEpisodeScheduleInput
def get_tool_message(self, **kwargs) -> Optional[str]:
@@ -31,7 +29,7 @@ class QueryEpisodeScheduleTool(MoviePilotTool):
season = kwargs.get("season")
episode_group = kwargs.get("episode_group")
message = f"正在查询剧集上映时间: TMDB ID {tmdb_id}{season}"
message = f"查询剧集上映时间: TMDB ID {tmdb_id}{season}"
if episode_group:
message += f" (剧集组: {episode_group})"
@@ -41,12 +39,6 @@ class QueryEpisodeScheduleTool(MoviePilotTool):
logger.info(f"执行工具: {self.name}, 参数: tmdb_id={tmdb_id}, season={season}, episode_group={episode_group}")
try:
# 获取媒体信息(用于获取标题和海报)
media_chain = MediaChain()
mediainfo = await media_chain.async_recognize_media(tmdbid=tmdb_id, mtype=MediaType.TV)
if not mediainfo:
return f"未找到 TMDB ID {tmdb_id} 的媒体信息"
# 获取集列表
tmdb_chain = TmdbChain()
episodes = await tmdb_chain.async_tmdb_episodes(
@@ -92,12 +84,7 @@ class QueryEpisodeScheduleTool(MoviePilotTool):
episode_list.sort(key=lambda x: (x["air_date"] or "", x["episode_number"] or 0))
result = {
"success": True,
"tmdb_id": tmdb_id,
"season": season,
"episode_group": episode_group,
"series_title": mediainfo.title if mediainfo else None,
"series_poster": mediainfo.poster_path if mediainfo else None,
"total_episodes": len(episodes),
"episodes_with_air_date": len(episode_list),
"episodes": episode_list

View File

@@ -0,0 +1,108 @@
"""查询已安装插件工具"""
import json
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.agent.tools.impl._plugin_tool_utils import (
DEFAULT_PLUGIN_CANDIDATE_LIMIT,
list_installed_plugins,
search_plugin_candidates,
summarize_candidates,
summarize_plugin,
)
from app.log import logger
class QueryInstalledPluginsInput(BaseModel):
"""查询已安装插件工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
query: Optional[str] = Field(
None,
description="Optional keyword to filter installed plugins by plugin ID, name, description, or author.",
)
max_results: Optional[int] = Field(
DEFAULT_PLUGIN_CANDIDATE_LIMIT,
description="Maximum number of plugins to return. Defaults to 10.",
)
class QueryInstalledPluginsTool(MoviePilotTool):
name: str = "query_installed_plugins"
description: str = (
"Query installed plugins in MoviePilot. Returns all installed plugins or filters them by keywords. "
"Use this tool to find the exact plugin_id before uninstall_plugin or other plugin management tools are used."
)
require_admin: bool = True
args_schema: Type[BaseModel] = QueryInstalledPluginsInput
def get_tool_message(self, **kwargs) -> Optional[str]:
"""生成友好的提示消息"""
query = kwargs.get("query")
if query:
return f"查询已安装插件: {query}"
return "查询已安装插件"
@staticmethod
def _clamp_results(max_results: Optional[int]) -> int:
if max_results is None:
return DEFAULT_PLUGIN_CANDIDATE_LIMIT
return max(1, min(int(max_results), 200))
async def run(
self,
query: Optional[str] = None,
max_results: Optional[int] = DEFAULT_PLUGIN_CANDIDATE_LIMIT,
**kwargs,
) -> str:
logger.info(f"执行工具: {self.name}, 参数: query={query}")
try:
installed_plugins = list_installed_plugins()
if not installed_plugins:
return json.dumps(
{"success": False, "message": "当前没有已安装的插件"},
ensure_ascii=False,
)
limit = self._clamp_results(max_results)
if query:
matches = search_plugin_candidates(query, installed_plugins)
return json.dumps(
{
"success": True,
"query": query,
"total_installed": len(installed_plugins),
"match_count": len(matches),
"truncated": len(matches) > limit,
"plugins": summarize_candidates(matches, limit=limit),
},
ensure_ascii=False,
indent=2,
)
plugin_summaries = [
summarize_plugin(plugin) for plugin in installed_plugins[:limit]
]
return json.dumps(
{
"success": True,
"total_installed": len(installed_plugins),
"returned_count": len(plugin_summaries),
"truncated": len(installed_plugins) > limit,
"plugins": plugin_summaries,
},
ensure_ascii=False,
indent=2,
)
except Exception as e:
logger.error(f"查询已安装插件失败: {e}", exc_info=True)
return json.dumps(
{"success": False, "message": f"查询已安装插件时发生错误: {str(e)}"},
ensure_ascii=False,
)

View File

@@ -1,96 +1,199 @@
"""查询媒体库工具"""
import asyncio
import json
from typing import Optional, Type
from collections import OrderedDict
from typing import Optional, Type, Any
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.chain.mediaserver import MediaServerChain
from app.core.context import MediaInfo
from app.helper.mediaserver import MediaServerHelper
from app.log import logger
from app.schemas.types import MediaType
from app.schemas.types import MediaType, media_type_to_agent
def _sort_seasons(seasons: Optional[dict]) -> dict:
"""按季号、集号升序整理季集信息,保证输出稳定。"""
if not seasons:
return {}
def _sort_key(value):
try:
return int(value)
except (TypeError, ValueError):
return str(value)
return OrderedDict(
(season, sorted(episodes, key=_sort_key))
for season, episodes in sorted(seasons.items(), key=lambda item: _sort_key(item[0]))
)
def _filter_regular_seasons(seasons: Optional[dict]) -> OrderedDict:
"""仅保留正片季,忽略 season 0 等特殊季。"""
sorted_seasons = _sort_seasons(seasons)
regular_seasons = OrderedDict()
for season, episodes in sorted_seasons.items():
try:
season_number = int(season)
except (TypeError, ValueError):
continue
if season_number > 0:
regular_seasons[season_number] = episodes
return regular_seasons
def _build_tv_server_result(existing_seasons: OrderedDict, total_seasons: OrderedDict) -> dict[str, Any]:
"""构建单个服务器的电视剧存在性结果。"""
seasons_result = OrderedDict()
missing_seasons = []
all_seasons = sorted(set(total_seasons.keys()) | set(existing_seasons.keys()))
for season in all_seasons:
existing_episodes = existing_seasons.get(season, [])
total_episodes = total_seasons.get(season)
if total_episodes is not None:
missing_episodes = [episode for episode in total_episodes if episode not in existing_episodes]
total_episode_count = len(total_episodes)
else:
missing_episodes = None
total_episode_count = None
seasons_result[str(season)] = {
"existing_episodes": existing_episodes,
"total_episodes": total_episode_count,
"missing_episodes": missing_episodes
}
if total_episodes is not None and not existing_episodes:
missing_seasons.append(season)
return {
"seasons": seasons_result,
"missing_seasons": missing_seasons
}
class QueryLibraryExistsInput(BaseModel):
"""查询媒体库工具的输入参数模型"""
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
media_type: Optional[str] = Field("all",
description="Type of media content: '电影' for films, '电视剧' for television series or anime series, 'all' for all types")
title: Optional[str] = Field(None,
description="Specific media title to check if it exists in the media library (optional, if provided checks for that specific media)")
year: Optional[str] = Field(None,
description="Release year of the media (optional, helps narrow down search results)")
tmdb_id: Optional[int] = Field(None, description="TMDB ID (can be obtained from search_media tool). Either tmdb_id or douban_id must be provided.")
douban_id: Optional[str] = Field(None, description="Douban ID (can be obtained from search_media tool). Either tmdb_id or douban_id must be provided.")
media_type: Optional[str] = Field(None, description="Allowed values: movie, tv")
class QueryLibraryExistsTool(MoviePilotTool):
name: str = "query_library_exists"
description: str = "Check if a specific media resource already exists in the media library (Plex, Emby, Jellyfin). Use this tool to verify whether a movie or TV series has been successfully processed and added to the media server before performing operations like downloading or subscribing."
description: str = "Check whether media already exists in Plex, Emby, or Jellyfin by media ID. Results are grouped by media server; TV results include existing episodes, total episodes, and missing episodes/seasons. Requires tmdb_id or douban_id from search_media."
args_schema: Type[BaseModel] = QueryLibraryExistsInput
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据查询参数生成友好的提示消息"""
media_type = kwargs.get("media_type", "all")
title = kwargs.get("title")
year = kwargs.get("year")
parts = ["正在查询媒体库"]
if title:
parts.append(f"标题: {title}")
if year:
parts.append(f"年份: {year}")
if media_type != "all":
parts.append(f"类型: {media_type}")
return " | ".join(parts) if len(parts) > 1 else parts[0]
tmdb_id = kwargs.get("tmdb_id")
douban_id = kwargs.get("douban_id")
media_type = kwargs.get("media_type")
async def run(self, media_type: Optional[str] = "all",
title: Optional[str] = None, year: Optional[str] = None, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: media_type={media_type}, title={title}")
if tmdb_id:
message = f"查询媒体库: TMDB={tmdb_id}"
elif douban_id:
message = f"查询媒体库: 豆瓣={douban_id}"
else:
message = "查询媒体库"
if media_type:
message += f" [{media_type}]"
return message
@staticmethod
def _get_media_server_names() -> list[str]:
"""同步读取已加载媒体服务器名称。"""
return sorted(MediaServerHelper().get_services().keys())
@staticmethod
def _query_media_exists(mediainfo, server: Optional[str] = None):
"""同步查询单个媒体服务器的存在性信息。"""
return MediaServerChain().media_exists(mediainfo=mediainfo, server=server)
async def run(self, tmdb_id: Optional[int] = None, douban_id: Optional[str] = None,
media_type: Optional[str] = None, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: tmdb_id={tmdb_id}, douban_id={douban_id}, media_type={media_type}")
try:
if not title:
return "请提供媒体标题进行查询"
# 创建 MediaInfo 对象
mediainfo = MediaInfo()
mediainfo.title = title
mediainfo.year = year
# 转换媒体类型
if media_type == "电影":
mediainfo.type = MediaType.MOVIE
elif media_type == "电视剧":
mediainfo.type = MediaType.TV
# media_type == "all" 时不设置类型,让媒体服务器自动判断
# 调用媒体服务器接口实时查询
if not tmdb_id and not douban_id:
return "参数错误tmdb_id 和 douban_id 至少需要提供一个,请先使用 search_media 工具获取媒体 ID。"
media_type_enum = None
if media_type:
media_type_enum = MediaType.from_agent(media_type)
if not media_type_enum:
return f"错误:无效的媒体类型 '{media_type}',支持的类型:'movie', 'tv'"
media_chain = MediaServerChain()
existsinfo = media_chain.media_exists(mediainfo=mediainfo)
if not existsinfo:
return "媒体库中未找到相关媒体"
# 如果找到了,获取详细信息
result_items = []
if existsinfo.itemid and existsinfo.server:
iteminfo = media_chain.iteminfo(server=existsinfo.server, item_id=existsinfo.itemid)
if iteminfo:
# 使用 model_dump() 转换为字典格式
item_dict = iteminfo.model_dump(exclude_none=True)
result_items.append(item_dict)
if result_items:
return json.dumps(result_items, ensure_ascii=False)
# 如果找到了但没有详细信息,返回基本信息
mediainfo = await media_chain.async_recognize_media(
tmdbid=tmdb_id,
doubanid=douban_id,
mtype=media_type_enum,
)
if not mediainfo:
media_id = f"TMDB={tmdb_id}" if tmdb_id else f"豆瓣={douban_id}"
return f"未识别到媒体信息: {media_id}"
# 2. 遍历所有媒体服务器,分别查询存在性信息
server_results = OrderedDict()
total_seasons = _filter_regular_seasons(mediainfo.seasons)
service_names = self._get_media_server_names()
server_checks = await asyncio.gather(
*[
self.run_blocking(
"mediaserver",
self._query_media_exists,
mediainfo,
service_name,
)
for service_name in service_names
]
)
for service_name, existsinfo in zip(service_names, server_checks):
if not existsinfo:
continue
if existsinfo.type == MediaType.TV:
existing_seasons = _filter_regular_seasons(existsinfo.seasons)
server_results[service_name] = _build_tv_server_result(
existing_seasons=existing_seasons,
total_seasons=total_seasons
)
else:
server_results[service_name] = {
"exists": True
}
if not server_results:
global_existsinfo = await self.run_blocking(
"mediaserver", self._query_media_exists, mediainfo, None
)
if not global_existsinfo:
return "媒体库中未找到相关媒体"
fallback_server_name = global_existsinfo.server or "local"
if global_existsinfo.type == MediaType.TV:
server_results[fallback_server_name] = _build_tv_server_result(
existing_seasons=_filter_regular_seasons(global_existsinfo.seasons),
total_seasons=total_seasons
)
else:
server_results[fallback_server_name] = {
"exists": True
}
# 3. 组装统一的存在性结果,不查询媒体服务器详情
result_dict = {
"type": existsinfo.type.value if existsinfo.type else None,
"server": existsinfo.server,
"server_type": existsinfo.server_type,
"itemid": existsinfo.itemid,
"seasons": existsinfo.seasons if existsinfo.seasons else {}
"title": mediainfo.title,
"year": mediainfo.year,
"type": media_type_to_agent(mediainfo.type),
"servers": server_results
}
return json.dumps([result_dict], ensure_ascii=False)
except Exception as e:
logger.error(f"查询媒体库失败: {e}", exc_info=True)

View File

@@ -1,5 +1,6 @@
"""查询媒体服务器最近入库影片工具"""
import asyncio
import json
from typing import Optional, Type
@@ -10,77 +11,131 @@ from app.chain.mediaserver import MediaServerChain
from app.helper.service import ServiceConfigHelper
from app.log import logger
PAGE_SIZE = 20
class QueryLibraryLatestInput(BaseModel):
"""查询媒体服务器最近入库影片工具的输入参数模型"""
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
server: Optional[str] = Field(None, description="Media server name (optional, if not specified queries all enabled media servers)")
count: Optional[int] = Field(20, description="Number of items to return (default: 20)")
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
server: Optional[str] = Field(
None,
description="Media server name (optional, if not specified queries all enabled media servers)",
)
page: Optional[int] = Field(
1, description="Page number for pagination (default: 1, 20 items per page)"
)
class QueryLibraryLatestTool(MoviePilotTool):
name: str = "query_library_latest"
description: str = "Query the latest media items added to the media server (Plex, Emby, Jellyfin). Returns recently added movies and TV series with their titles, images, links, and other metadata."
description: str = "Query the latest media items added to the media server (Plex, Emby, Jellyfin). Returns recently added movies and TV series with their titles, images, links, and other metadata. Supports pagination with 20 items per page."
args_schema: Type[BaseModel] = QueryLibraryLatestInput
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据查询参数生成友好的提示消息"""
server = kwargs.get("server")
count = kwargs.get("count", 20)
parts = ["正在查询媒体服务器最近入库影片"]
page = kwargs.get("page", 1)
parts = ["查询媒体服务器最近入库影片"]
if server:
parts.append(f"服务器: {server}")
else:
parts.append("所有服务器")
parts.append(f"数量: {count}")
parts.append(f"{page}")
return " | ".join(parts)
async def run(self, server: Optional[str] = None, count: Optional[int] = 20, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: server={server}, count={count}")
@staticmethod
def _get_enabled_servers() -> list[str]:
"""同步读取启用的媒体服务器列表。"""
mediaservers = ServiceConfigHelper.get_mediaserver_configs()
return [ms.name for ms in mediaservers if ms.enabled]
@staticmethod
def _load_latest_items(
server_name: str, count: int, username: Optional[str] = None
) -> list[dict]:
"""
媒体服务器 SDK 和 requests 调用都是同步的,这里在线程池中转换为可序列化结果。
"""
latest_items = MediaServerChain().latest(
server=server_name, count=count, username=username
)
if not latest_items:
return []
return [
{
**item.model_dump(exclude_none=True),
"server": server_name,
}
for item in latest_items
]
async def run(
self, server: Optional[str] = None, page: Optional[int] = 1, **kwargs
) -> str:
page = max(1, page or 1)
# 为了支持分页,需要获取足够多的数据再切片
fetch_count = page * PAGE_SIZE
logger.info(f"执行工具: {self.name}, 参数: server={server}, page={page}")
try:
media_chain = MediaServerChain()
results = []
# 如果没有指定服务器,获取所有启用的媒体服务器
if not server:
mediaservers = ServiceConfigHelper.get_mediaserver_configs()
enabled_servers = [ms.name for ms in mediaservers if ms.enabled]
enabled_servers = self._get_enabled_servers()
if not enabled_servers:
return "未找到启用的媒体服务器"
# 遍历所有启用的服务器
for server_name in enabled_servers:
latest_items = media_chain.latest(server=server_name, count=count, username=self._username)
if latest_items:
for item in latest_items:
item_dict = item.model_dump(exclude_none=True)
item_dict["server"] = server_name
results.append(item_dict)
server_results = await asyncio.gather(
*[
self.run_blocking(
"mediaserver",
self._load_latest_items,
server_name,
fetch_count,
self._username,
)
for server_name in enabled_servers
]
)
results = [
item for items in server_results for item in items if items
]
else:
# 查询指定服务器
latest_items = media_chain.latest(server=server, count=count, username=self._username)
if latest_items:
for item in latest_items:
item_dict = item.model_dump(exclude_none=True)
item_dict["server"] = server
results.append(item_dict)
results = await self.run_blocking(
"mediaserver",
self._load_latest_items,
server,
fetch_count,
self._username,
)
if not results:
server_info = f"服务器 {server}" if server else "所有服务器"
return f"未找到 {server_info} 的最近入库影片"
# 限制返回数量,避免结果过多
if len(results) > count:
results = results[:count]
return json.dumps(results, ensure_ascii=False, indent=2)
# 分页
total_count = len(results)
start = (page - 1) * PAGE_SIZE
end = start + PAGE_SIZE
page_results = results[start:end]
if not page_results:
total_pages = (total_count + PAGE_SIZE - 1) // PAGE_SIZE
return f"{page} 页没有数据,共 {total_count} 条结果,共 {total_pages} 页。"
total_pages = (total_count + PAGE_SIZE - 1) // PAGE_SIZE
payload_msg = f"{page}/{total_pages} 页,当前页 {len(page_results)} 条结果,共 {total_count} 条。"
if page < total_pages:
payload_msg += f" 可使用 page={page + 1} 获取下一页。"
result_json = json.dumps(page_results, ensure_ascii=False, indent=2)
return f"{payload_msg}\n\n{result_json}"
except Exception as e:
logger.error(f"查询媒体服务器最近入库影片失败: {e}", exc_info=True)
return f"查询媒体服务器最近入库影片时发生错误: {str(e)}"

View File

@@ -0,0 +1,113 @@
"""查询插件市场工具"""
import json
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.agent.tools.impl._plugin_tool_utils import (
DEFAULT_PLUGIN_CANDIDATE_LIMIT,
load_market_plugins,
search_plugin_candidates,
summarize_candidates,
summarize_plugin,
)
from app.log import logger
class QueryMarketPluginsInput(BaseModel):
"""查询插件市场工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
query: Optional[str] = Field(
None,
description="Optional keyword to filter plugin market results by plugin ID, name, description, or author.",
)
max_results: Optional[int] = Field(
DEFAULT_PLUGIN_CANDIDATE_LIMIT,
description="Maximum number of plugins to return. Defaults to 10.",
)
force_refresh: Optional[bool] = Field(
False,
description="Whether to refresh plugin market caches before querying.",
)
class QueryMarketPluginsTool(MoviePilotTool):
name: str = "query_market_plugins"
description: str = (
"Query available plugins from the plugin market and local plugin repositories. "
"Can return the full plugin list or filter by keywords before install_plugin is used."
)
require_admin: bool = True
args_schema: Type[BaseModel] = QueryMarketPluginsInput
def get_tool_message(self, **kwargs) -> Optional[str]:
query = kwargs.get("query")
if query:
return f"查询插件市场: {query}"
return "查询插件市场全部插件"
@staticmethod
def _clamp_results(max_results: Optional[int]) -> int:
if max_results is None:
return DEFAULT_PLUGIN_CANDIDATE_LIMIT
return max(1, min(int(max_results), 200))
async def run(
self,
query: Optional[str] = None,
max_results: Optional[int] = DEFAULT_PLUGIN_CANDIDATE_LIMIT,
force_refresh: bool = False,
**kwargs,
) -> str:
logger.info(
f"执行工具: {self.name}, 参数: query={query}, force_refresh={force_refresh}"
)
try:
plugins = await load_market_plugins(force_refresh=force_refresh)
if not plugins:
return json.dumps(
{"success": False, "message": "当前插件市场没有可用插件"},
ensure_ascii=False,
)
limit = self._clamp_results(max_results)
if query:
matches = search_plugin_candidates(query, plugins)
return json.dumps(
{
"success": True,
"query": query,
"total_available": len(plugins),
"match_count": len(matches),
"truncated": len(matches) > limit,
"plugins": summarize_candidates(matches, limit=limit),
},
ensure_ascii=False,
indent=2,
)
plugin_summaries = [summarize_plugin(plugin) for plugin in plugins[:limit]]
return json.dumps(
{
"success": True,
"total_available": len(plugins),
"returned_count": len(plugin_summaries),
"truncated": len(plugins) > limit,
"plugins": plugin_summaries,
},
ensure_ascii=False,
indent=2,
)
except Exception as e:
logger.error(f"查询插件市场失败: {e}", exc_info=True)
return json.dumps(
{"success": False, "message": f"查询插件市场时发生错误: {str(e)}"},
ensure_ascii=False,
)

View File

@@ -0,0 +1,126 @@
"""查询媒体详情工具"""
import json
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.chain.media import MediaChain
from app.log import logger
from app.schemas.types import MediaType
class QueryMediaDetailInput(BaseModel):
"""查询媒体详情工具的输入参数模型"""
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
tmdb_id: Optional[int] = Field(None, description="TMDB ID of the media (movie or TV series, can be obtained from search_media tool)")
douban_id: Optional[str] = Field(None, description="Douban ID of the media (alternative to tmdb_id)")
media_type: str = Field(..., description="Allowed values: movie, tv")
class QueryMediaDetailTool(MoviePilotTool):
name: str = "query_media_detail"
description: str = "Query supplementary media details from TMDB by ID and media_type. Accepts tmdb_id or douban_id (at least one required). media_type accepts 'movie' or 'tv'. Returns non-duplicated detail fields such as status, genres, directors, actors, and season info for TV series."
args_schema: Type[BaseModel] = QueryMediaDetailInput
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据查询参数生成友好的提示消息"""
tmdb_id = kwargs.get("tmdb_id")
douban_id = kwargs.get("douban_id")
if tmdb_id:
return f"查询媒体详情: TMDB ID {tmdb_id}"
return f"查询媒体详情: 豆瓣 ID {douban_id}"
async def run(self, media_type: str, tmdb_id: Optional[int] = None, douban_id: Optional[str] = None, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: tmdb_id={tmdb_id}, douban_id={douban_id}, media_type={media_type}")
if tmdb_id is None and douban_id is None:
return json.dumps({
"success": False,
"message": "必须提供 tmdb_id 或 douban_id 之一"
}, ensure_ascii=False)
try:
media_chain = MediaChain()
media_type_enum = MediaType.from_agent(media_type)
if not media_type_enum:
return json.dumps({
"success": False,
"message": f"无效的媒体类型 '{media_type}',支持的类型:'movie', 'tv'"
}, ensure_ascii=False)
mediainfo = await media_chain.async_recognize_media(tmdbid=tmdb_id, doubanid=douban_id, mtype=media_type_enum)
if not mediainfo:
id_info = f"TMDB ID {tmdb_id}" if tmdb_id else f"豆瓣 ID {douban_id}"
return json.dumps({
"success": False,
"message": f"未找到 {id_info} 的媒体信息"
}, ensure_ascii=False)
# 精简 genres - 只保留名称
genres = [g.get("name") for g in (mediainfo.genres or []) if g.get("name")]
# 精简 directors - 只保留姓名和职位
directors = [
{
"name": d.get("name"),
"job": d.get("job")
}
for d in (mediainfo.directors or [])
if d.get("name")
]
# 精简 actors - 只保留姓名和角色
actors = [
{
"name": a.get("name"),
"character": a.get("character")
}
for a in (mediainfo.actors or [])
if a.get("name")
]
# 构建基础媒体详情信息
result = {
"status": mediainfo.status,
"genres": genres,
"directors": directors,
"actors": actors
}
# 如果是电视剧,添加电视剧特有信息
if mediainfo.type == MediaType.TV:
# 精简 season_info - 只保留基础摘要
season_info = [
{
"season_number": s.get("season_number"),
"name": s.get("name"),
"episode_count": s.get("episode_count"),
"air_date": s.get("air_date")
}
for s in (mediainfo.season_info or [])
if s.get("season_number") is not None
]
result.update({
"number_of_seasons": mediainfo.number_of_seasons,
"number_of_episodes": mediainfo.number_of_episodes,
"first_air_date": mediainfo.first_air_date,
"last_air_date": mediainfo.last_air_date,
"season_info": season_info
})
return json.dumps(result, ensure_ascii=False, indent=2)
except Exception as e:
error_message = f"查询媒体详情失败: {str(e)}"
logger.error(f"查询媒体详情失败: {e}", exc_info=True)
return json.dumps({
"success": False,
"message": error_message,
"tmdb_id": tmdb_id,
"douban_id": douban_id
}, ensure_ascii=False)

View File

@@ -0,0 +1,75 @@
"""查询可用人格工具。"""
import json
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.runtime import agent_runtime_manager
from app.agent.tools.base import MoviePilotTool
from app.log import logger
class QueryPersonasInput(BaseModel):
"""查询人格工具的输入参数模型。"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
query: Optional[str] = Field(
None,
description=(
"Optional search keyword for persona_id, label, description, or aliases. "
"Use this when the user asks for a certain speaking style but the exact persona name is unknown."
),
)
class QueryPersonasTool(MoviePilotTool):
name: str = "query_personas"
description: str = (
"List all available personas (人格) and show which one is currently active. "
"Use this before switching persona when the user asks for a different speaking style but does not name "
"an exact persona_id. The result includes persona_id, label, description, aliases, and whether it is active."
)
args_schema: Type[BaseModel] = QueryPersonasInput
def get_tool_message(self, **kwargs) -> Optional[str]:
query = kwargs.get("query")
if query:
return f"查询人格列表: {query}"
return "查询人格列表"
async def run(self, query: Optional[str] = None, **kwargs) -> str:
logger.info("执行工具: %s, 参数: query=%s", self.name, query)
try:
runtime_config = agent_runtime_manager.load_runtime_config()
personas = runtime_config.list_personas()
if query:
normalized = query.strip().casefold()
personas = [
persona
for persona in personas
if normalized in persona["persona_id"].casefold()
or normalized in persona["label"].casefold()
or normalized in persona["description"].casefold()
or any(normalized in alias.casefold() for alias in persona["aliases"])
]
payload = {
"active_persona": runtime_config.active_persona,
"count": len(personas),
"personas": personas,
}
return json.dumps(payload, ensure_ascii=False, indent=2)
except Exception as e: # noqa: BLE001
logger.error("查询人格列表失败: %s", e, exc_info=True)
return json.dumps(
{
"success": False,
"message": f"查询人格列表时发生错误: {str(e)}",
},
ensure_ascii=False,
)

View File

@@ -0,0 +1,116 @@
"""查询插件能力工具"""
import json
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.core.plugin import PluginManager
from app.log import logger
class QueryPluginCapabilitiesInput(BaseModel):
"""查询插件能力工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
plugin_id: Optional[str] = Field(
None,
description="Optional plugin ID to query capabilities for a specific plugin. "
"If not provided, returns capabilities of all running plugins. "
"Use query_installed_plugins tool to get the plugin IDs first.",
)
class QueryPluginCapabilitiesTool(MoviePilotTool):
name: str = "query_plugin_capabilities"
description: str = (
"Query the capabilities of installed plugins, including supported commands and scheduled services. "
"Commands are slash-commands (e.g. /xxx) that can be executed via the run_slash_command tool. "
"Scheduled services are periodic tasks that can be triggered via the run_scheduler tool. "
"Optionally specify a plugin_id to query a specific plugin, or omit to query all running plugins."
)
require_admin: bool = True
args_schema: Type[BaseModel] = QueryPluginCapabilitiesInput
def get_tool_message(self, **kwargs) -> Optional[str]:
"""生成友好的提示消息"""
plugin_id = kwargs.get("plugin_id")
if plugin_id:
return f"查询插件 {plugin_id} 的能力"
return "查询所有插件的能力"
@staticmethod
def _load_plugin_capabilities(plugin_id: Optional[str] = None) -> dict:
"""读取运行中插件实例暴露的内存能力信息。"""
plugin_manager = PluginManager()
result = {}
commands = plugin_manager.get_plugin_commands(pid=plugin_id)
if commands:
result["commands"] = [
{
"cmd": cmd.get("cmd"),
"desc": cmd.get("desc"),
"plugin_id": cmd.get("pid"),
**({"data": cmd.get("data")} if cmd.get("data") else {}),
}
for cmd in commands
]
actions = plugin_manager.get_plugin_actions(pid=plugin_id)
if actions:
actions_list = []
for action_group in actions:
actions_list.append(
{
"plugin_id": action_group.get("plugin_id"),
"plugin_name": action_group.get("plugin_name"),
"actions": [
{
"id": action.get("id"),
"name": action.get("name"),
}
for action in action_group.get("actions", [])
],
}
)
result["actions"] = actions_list
services = plugin_manager.get_plugin_services(pid=plugin_id)
if services:
services_list = []
for svc in services:
svc_info = {
"id": svc.get("id"),
"name": svc.get("name"),
}
trigger = svc.get("trigger")
if trigger:
svc_info["trigger"] = str(trigger)
svc_kwargs = svc.get("kwargs")
if svc_kwargs:
svc_info["trigger_kwargs"] = {
k: str(v) for k, v in svc_kwargs.items()
}
services_list.append(svc_info)
result["services"] = services_list
return result
async def run(self, plugin_id: Optional[str] = None, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: plugin_id={plugin_id}")
try:
result = self._load_plugin_capabilities(plugin_id)
if not result:
if plugin_id:
return f"插件 {plugin_id} 没有注册任何命令、动作或定时服务"
return "当前没有运行中的插件注册了命令、动作或定时服务"
return json.dumps(result, ensure_ascii=False, indent=2)
except Exception as e:
logger.error(f"查询插件能力失败: {e}", exc_info=True)
return f"查询插件能力时发生错误: {str(e)}"

View File

@@ -0,0 +1,88 @@
"""查询插件配置工具"""
import json
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.agent.tools.impl._plugin_tool_utils import get_plugin_snapshot
from app.core.plugin import PluginManager
from app.log import logger
class QueryPluginConfigInput(BaseModel):
"""查询插件配置工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
plugin_id: str = Field(
...,
description="The plugin ID to query. Use query_installed_plugins first to discover valid plugin IDs.",
)
class QueryPluginConfigTool(MoviePilotTool):
name: str = "query_plugin_config"
description: str = (
"Query the saved configuration of an installed plugin. "
"Returns the current saved config and, when available, the plugin's default config model. "
"Use this before update_plugin_config so you only change the intended keys."
)
require_admin: bool = True
args_schema: Type[BaseModel] = QueryPluginConfigInput
def get_tool_message(self, **kwargs) -> Optional[str]:
"""生成友好的提示消息"""
plugin_id = kwargs.get("plugin_id", "")
return f"查询插件配置: {plugin_id}"
@staticmethod
def _query_plugin_config(plugin_id: str) -> str:
"""
读取插件已保存配置,并尽量补充默认配置模型方便后续精确修改。
"""
plugin_info = get_plugin_snapshot(plugin_id)
if not plugin_info:
return json.dumps(
{
"success": False,
"message": f"插件 {plugin_id} 不存在,请先使用 query_installed_plugins 查询有效插件 ID",
},
ensure_ascii=False,
)
plugin_manager = PluginManager()
saved_config = plugin_manager.get_plugin_config(plugin_id) or {}
result = {
"success": True,
**plugin_info,
"config": saved_config,
}
# get_form 的 model 通常就是插件期望的配置结构,适合作为修改前的键参考。
plugin_instance = plugin_manager.running_plugins.get(plugin_id)
if plugin_instance and hasattr(plugin_instance, "get_form"):
try:
_form_schema, default_model = plugin_instance.get_form()
if default_model is not None:
result["default_model"] = default_model
except Exception as err:
logger.warning(f"读取插件 {plugin_id} 默认配置模型失败: {err}")
return json.dumps(result, ensure_ascii=False, indent=2, default=str)
async def run(self, plugin_id: str, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: plugin_id={plugin_id}")
try:
# 插件配置来自内存配置缓存和运行态插件实例,直接读取即可。
return self._query_plugin_config(plugin_id)
except Exception as e:
logger.error(f"查询插件配置失败: {e}", exc_info=True)
return json.dumps(
{"success": False, "message": f"查询插件配置时发生错误: {str(e)}"},
ensure_ascii=False,
)

View File

@@ -0,0 +1,158 @@
"""查询插件数据工具"""
import json
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.agent.tools.impl._plugin_tool_utils import (
PLUGIN_DATA_KEY_PREVIEW_LIMIT,
build_preview_payload,
get_plugin_snapshot,
)
from app.db.plugindata_oper import PluginDataOper
from app.log import logger
class QueryPluginDataInput(BaseModel):
"""查询插件数据工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
plugin_id: str = Field(
...,
description="The plugin ID to query. Use query_installed_plugins first to discover valid plugin IDs.",
)
key: Optional[str] = Field(
None,
description="Optional plugin data key. If omitted, returns all plugin data entries for the plugin.",
)
max_chars: Optional[int] = Field(
None,
description="Maximum number of preview characters to return when plugin data is too large. Default 12000, capped at 50000.",
)
class QueryPluginDataTool(MoviePilotTool):
name: str = "query_plugin_data"
description: str = (
"Query persisted data of an installed plugin. "
"Optionally specify a key to read a single data item; otherwise all plugin data entries are returned. "
"When the result is too large, the tool automatically truncates it and returns a preview instead."
)
require_admin: bool = True
args_schema: Type[BaseModel] = QueryPluginDataInput
def get_tool_message(self, **kwargs) -> Optional[str]:
"""生成友好的提示消息"""
plugin_id = kwargs.get("plugin_id", "")
key = kwargs.get("key")
if key:
return f"查询插件数据: {plugin_id}.{key}"
return f"查询插件全部数据: {plugin_id}"
@staticmethod
async def _query_plugin_data(
plugin_id: str, key: Optional[str] = None, max_chars: Optional[int] = None
) -> str:
"""
插件数据改走异步 ORM 查询,避免再套一层线程池。
"""
plugin_info = get_plugin_snapshot(plugin_id)
if not plugin_info:
return json.dumps(
{
"success": False,
"message": f"插件 {plugin_id} 不存在,请先使用 query_installed_plugins 查询有效插件 ID",
},
ensure_ascii=False,
)
plugin_data_oper = PluginDataOper()
if key:
value = await plugin_data_oper.async_get_data(plugin_id, key)
if value is None:
return json.dumps(
{
"success": True,
**plugin_info,
"key": key,
"found": False,
"message": f"插件 {plugin_id} 没有数据项 {key}",
},
ensure_ascii=False,
indent=2,
)
truncated, total_chars, returned_chars, preview = build_preview_payload(
value, max_chars
)
result = {
"success": True,
**plugin_info,
"key": key,
"found": True,
"truncated": truncated,
"total_chars": total_chars,
"returned_chars": returned_chars,
}
if truncated:
result["value_preview"] = preview
result["message"] = "插件数据内容过大,已截断预览"
else:
result["value"] = value
return json.dumps(result, ensure_ascii=False, indent=2, default=str)
rows = await plugin_data_oper.async_get_data_all(plugin_id) or []
data_map = {row.key: row.value for row in rows}
keys = list(data_map.keys())
key_preview = keys[:PLUGIN_DATA_KEY_PREVIEW_LIMIT]
result = {
"success": True,
**plugin_info,
"count": len(data_map),
"keys": key_preview,
"keys_truncated": len(keys) > PLUGIN_DATA_KEY_PREVIEW_LIMIT,
}
if not data_map:
result["data"] = {}
result["truncated"] = False
return json.dumps(result, ensure_ascii=False, indent=2, default=str)
truncated, total_chars, returned_chars, preview = build_preview_payload(
data_map, max_chars
)
result["truncated"] = truncated
result["total_chars"] = total_chars
result["returned_chars"] = returned_chars
if truncated:
result["data_preview"] = preview
result["message"] = "插件数据内容过大,已截断。请传入 key 精确查询单个数据项。"
else:
result["data"] = data_map
return json.dumps(result, ensure_ascii=False, indent=2, default=str)
async def run(
self,
plugin_id: str,
key: Optional[str] = None,
max_chars: Optional[int] = None,
**kwargs,
) -> str:
logger.info(
f"执行工具: {self.name}, 参数: plugin_id={plugin_id}, key={key}"
)
try:
return await self._query_plugin_data(plugin_id, key, max_chars)
except Exception as e:
logger.error(f"查询插件数据失败: {e}", exc_info=True)
return json.dumps(
{"success": False, "message": f"查询插件数据时发生错误: {str(e)}"},
ensure_ascii=False,
)

View File

@@ -10,13 +10,13 @@ from app.agent.tools.base import MoviePilotTool
from app.core.context import MediaInfo
from app.helper.subscribe import SubscribeHelper
from app.log import logger
from app.schemas.types import MediaType
from app.schemas.types import MediaType, media_type_to_agent
class QueryPopularSubscribesInput(BaseModel):
"""查询热门订阅工具的输入参数模型"""
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
stype: str = Field(..., description="Media type: '电影' for films, '电视剧' for television series")
media_type: str = Field(..., description="Allowed values: movie, tv")
page: Optional[int] = Field(1, description="Page number for pagination (default: 1)")
count: Optional[int] = Field(30, description="Number of items per page (default: 30)")
min_sub: Optional[int] = Field(None, description="Minimum number of subscribers filter (optional, e.g., 5)")
@@ -33,13 +33,13 @@ class QueryPopularSubscribesTool(MoviePilotTool):
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据查询参数生成友好的提示消息"""
stype = kwargs.get("stype", "")
media_type = kwargs.get("media_type", "")
page = kwargs.get("page", 1)
min_sub = kwargs.get("min_sub")
min_rating = kwargs.get("min_rating")
max_rating = kwargs.get("max_rating")
parts = [f"正在查询热门订阅 [{stype}]"]
parts = [f"查询热门订阅 [{media_type}]"]
if min_sub:
parts.append(f"最少订阅: {min_sub}")
@@ -52,7 +52,7 @@ class QueryPopularSubscribesTool(MoviePilotTool):
return " | ".join(parts) if len(parts) > 1 else parts[0]
async def run(self, stype: str,
async def run(self, media_type: str,
page: Optional[int] = 1,
count: Optional[int] = 30,
min_sub: Optional[int] = None,
@@ -61,7 +61,7 @@ class QueryPopularSubscribesTool(MoviePilotTool):
max_rating: Optional[float] = None,
sort_type: Optional[str] = None, **kwargs) -> str:
logger.info(
f"执行工具: {self.name}, 参数: stype={stype}, page={page}, count={count}, min_sub={min_sub}, "
f"执行工具: {self.name}, 参数: media_type={media_type}, page={page}, count={count}, min_sub={min_sub}, "
f"genre_id={genre_id}, min_rating={min_rating}, max_rating={max_rating}, sort_type={sort_type}")
try:
@@ -69,10 +69,13 @@ class QueryPopularSubscribesTool(MoviePilotTool):
page = 1
if count is None or count < 1:
count = 30
media_type_enum = MediaType.from_agent(media_type)
if not media_type_enum:
return f"错误:无效的媒体类型 '{media_type}',支持的类型:'movie', 'tv'"
subscribe_helper = SubscribeHelper()
subscribes = await subscribe_helper.async_get_statistic(
stype=stype,
stype=media_type_enum.to_agent(),
page=page,
count=count,
genre_id=genre_id,
@@ -94,7 +97,15 @@ class QueryPopularSubscribesTool(MoviePilotTool):
continue
media = MediaInfo()
media.type = MediaType(sub.get("type"))
raw_type = str(sub.get("type") or "").strip().lower()
if raw_type in ["movie", "电影"]:
media.type = MediaType.MOVIE
elif raw_type in ["tv", "电视剧"]:
media.type = MediaType.TV
else:
# 跳过无法识别类型的数据,避免单条脏数据导致整批失败
logger.warning(f"跳过未知媒体类型: {sub.get('type')}")
continue
media.tmdb_id = sub.get("tmdbid")
# 处理标题
title = sub.get("name")
@@ -124,7 +135,7 @@ class QueryPopularSubscribesTool(MoviePilotTool):
for media in ret_medias:
media_dict = media.to_dict()
simplified = {
"type": media_dict.get("type"),
"type": media_type_to_agent(media_dict.get("type")),
"title": media_dict.get("title"),
"year": media_dict.get("year"),
"tmdb_id": media_dict.get("tmdb_id"),
@@ -149,4 +160,3 @@ class QueryPopularSubscribesTool(MoviePilotTool):
except Exception as e:
logger.error(f"查询热门订阅失败: {e}", exc_info=True)
return f"查询热门订阅时发生错误: {str(e)}"

View File

@@ -1,65 +1,104 @@
"""查询规则组工具"""
"""查询过滤规则组工具"""
import json
from typing import Optional, Type
from typing import Optional, Type, List
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.helper.rule import RuleHelper
from app.agent.tools.impl._filter_rule_utils import (
collect_rule_group_usages,
get_rule_groups,
serialize_rule_group,
RULE_STRING_SYNTAX,
)
from app.log import logger
class QueryRuleGroupsInput(BaseModel):
"""查询规则组工具的输入参数模型"""
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
group_names: Optional[List[str]] = Field(
None,
description="Optional list of rule group names to query. If omitted, return all rule groups.",
)
include_usage: bool = Field(
True,
description="Whether to include where each rule group is referenced by global settings or subscriptions.",
)
class QueryRuleGroupsTool(MoviePilotTool):
name: str = "query_rule_groups"
description: str = "Query all filter rule groups available in the system. Rule groups are used to filter torrents when searching or subscribing. Returns rule group names, media types, and categories, but excludes rule_string to keep results concise."
description: str = (
"Query filter rule groups (过滤规则组 / 优先级规则组). "
"Each rule group contains a rule_string made of built-in rules and/or custom rules. "
"Inside one level use '&', '|', '!' and optional parentheses; use '>' between levels. "
"Levels are evaluated from left to right, and the first matched level wins. "
"The result includes parsed levels and syntax guidance so the agent can learn existing patterns before writing a new rule group."
)
args_schema: Type[BaseModel] = QueryRuleGroupsInput
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据查询参数生成友好的提示消息"""
return "正在查询所有规则组"
group_names = kwargs.get("group_names") or []
if group_names:
return f"查询规则组: {', '.join(group_names)}"
return "查询所有规则组"
async def run(self, **kwargs) -> str:
async def run(
self,
group_names: Optional[List[str]] = None,
include_usage: bool = True,
**kwargs,
) -> str:
logger.info(f"执行工具: {self.name}")
try:
rule_helper = RuleHelper()
rule_groups = rule_helper.get_rule_groups()
if not rule_groups:
return json.dumps({
"message": "未找到任何规则组",
"rule_groups": []
}, ensure_ascii=False, indent=2)
# 精简字段,过滤掉 rule_string 避免结果过大
simplified_groups = []
for group in rule_groups:
simplified = {
"name": group.name,
"media_type": group.media_type,
"category": group.category
}
simplified_groups.append(simplified)
result = {
"message": f"找到 {len(simplified_groups)} 个规则组",
"rule_groups": simplified_groups
}
return json.dumps(result, ensure_ascii=False, indent=2)
except Exception as e:
error_message = f"查询规则组失败: {str(e)}"
logger.error(f"查询规则组失败: {e}", exc_info=True)
return json.dumps({
"success": False,
"message": error_message,
"rule_groups": []
}, ensure_ascii=False)
try:
rule_groups = get_rule_groups()
if group_names:
target_names = set(group_names)
rule_groups = [
group for group in rule_groups if group.name in target_names
]
usage_map = {}
if include_usage:
usage_map = await collect_rule_group_usages(
[group.name for group in rule_groups if group.name]
)
serialized = [
serialize_rule_group(group, usage_map.get(group.name))
for group in rule_groups
]
message = (
f"找到 {len(serialized)} 个规则组"
if serialized
else "未找到任何规则组"
)
return json.dumps(
{
"success": True,
"message": message,
"count": len(serialized),
"rule_string_syntax": RULE_STRING_SYNTAX,
"rule_groups": serialized,
},
ensure_ascii=False,
indent=2,
)
except Exception as exc:
logger.error(f"查询规则组失败: {exc}", exc_info=True)
return json.dumps(
{
"success": False,
"message": f"查询规则组失败: {exc}",
"rule_groups": [],
},
ensure_ascii=False,
)

View File

@@ -7,7 +7,6 @@ from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.log import logger
from app.scheduler import Scheduler
class QuerySchedulersInput(BaseModel):
@@ -22,11 +21,13 @@ class QuerySchedulersTool(MoviePilotTool):
def get_tool_message(self, **kwargs) -> Optional[str]:
"""生成友好的提示消息"""
return "正在查询定时服务"
return "查询定时服务"
async def run(self, **kwargs) -> str:
logger.info(f"执行工具: {self.name}")
try:
from app.scheduler import Scheduler
scheduler = Scheduler()
schedulers = scheduler.list()
if schedulers:
@@ -52,4 +53,3 @@ class QuerySchedulersTool(MoviePilotTool):
except Exception as e:
logger.error(f"查询定时服务失败: {e}", exc_info=True)
return f"查询定时服务时发生错误: {str(e)}"

View File

@@ -14,60 +14,74 @@ from app.log import logger
class QuerySiteUserdataInput(BaseModel):
"""查询站点用户数据工具的输入参数模型"""
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
site_id: int = Field(..., description="The ID of the site to query user data for")
workdate: Optional[str] = Field(None, description="Work date to query (optional, format: 'YYYY-MM-DD', if not specified returns latest data)")
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
site_id: int = Field(
...,
description="The ID of the site to query user data for (can be obtained from query_sites tool)",
)
workdate: Optional[str] = Field(
None,
description="Work date to query (optional, format: 'YYYY-MM-DD', if not specified returns latest data)",
)
class QuerySiteUserdataTool(MoviePilotTool):
name: str = "query_site_userdata"
description: str = "Query user data for a specific site including username, user level, upload/download statistics, seeding information, bonus points, and other account details. Supports querying data for a specific date or latest data."
require_admin: bool = True
args_schema: Type[BaseModel] = QuerySiteUserdataInput
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据查询参数生成友好的提示消息"""
site_id = kwargs.get("site_id")
workdate = kwargs.get("workdate")
message = f"正在查询站点 #{site_id} 的用户数据"
message = f"查询站点 #{site_id} 的用户数据"
if workdate:
message += f" (日期: {workdate})"
else:
message += " (最新数据)"
return message
async def run(self, site_id: int, workdate: Optional[str] = None, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: site_id={site_id}, workdate={workdate}")
logger.info(
f"执行工具: {self.name}, 参数: site_id={site_id}, workdate={workdate}"
)
try:
# 获取数据库会话
async with AsyncSessionFactory() as db:
# 获取站点
site = await Site.async_get(db, site_id)
if not site:
return json.dumps({
"success": False,
"message": f"站点不存在: {site_id}"
}, ensure_ascii=False)
return json.dumps(
{"success": False, "message": f"站点不存在: {site_id}"},
ensure_ascii=False,
)
# 获取站点用户数据
user_data_list = await SiteUserData.async_get_by_domain(
db,
domain=site.domain,
workdate=workdate
db, domain=site.domain, workdate=workdate
)
if not user_data_list:
return json.dumps({
"success": False,
"message": f"站点 {site.name} ({site.domain}) 暂无用户数据",
"site_id": site_id,
"site_name": site.name,
"site_domain": site.domain,
"workdate": workdate
}, ensure_ascii=False)
return json.dumps(
{
"success": False,
"message": f"站点 {site.name} ({site.domain}) 暂无用户数据",
"site_id": site_id,
"site_name": site.name,
"site_domain": site.domain,
"workdate": workdate,
},
ensure_ascii=False,
)
# 格式化用户数据
result = {
"success": True,
@@ -76,16 +90,26 @@ class QuerySiteUserdataTool(MoviePilotTool):
"site_domain": site.domain,
"workdate": workdate,
"data_count": len(user_data_list),
"user_data": []
"user_data": [],
}
for user_data in user_data_list:
# 格式化上传/下载量(转换为可读格式)
upload_gb = user_data.upload / (1024 ** 3) if user_data.upload else 0
download_gb = user_data.download / (1024 ** 3) if user_data.download else 0
seeding_size_gb = user_data.seeding_size / (1024 ** 3) if user_data.seeding_size else 0
leeching_size_gb = user_data.leeching_size / (1024 ** 3) if user_data.leeching_size else 0
upload_gb = user_data.upload / (1024**3) if user_data.upload else 0
download_gb = (
user_data.download / (1024**3) if user_data.download else 0
)
seeding_size_gb = (
user_data.seeding_size / (1024**3)
if user_data.seeding_size
else 0
)
leeching_size_gb = (
user_data.leeching_size / (1024**3)
if user_data.leeching_size
else 0
)
user_data_dict = {
"domain": user_data.domain,
"name": user_data.name,
@@ -100,37 +124,46 @@ class QuerySiteUserdataTool(MoviePilotTool):
"download_gb": round(download_gb, 2),
"ratio": round(user_data.ratio, 2) if user_data.ratio else 0,
"seeding": int(user_data.seeding) if user_data.seeding else 0,
"leeching": int(user_data.leeching) if user_data.leeching else 0,
"leeching": int(user_data.leeching)
if user_data.leeching
else 0,
"seeding_size": user_data.seeding_size,
"seeding_size_gb": round(seeding_size_gb, 2),
"leeching_size": user_data.leeching_size,
"leeching_size_gb": round(leeching_size_gb, 2),
"seeding_info": user_data.seeding_info if user_data.seeding_info else [],
"seeding_info": user_data.seeding_info
if user_data.seeding_info
else [],
"message_unread": user_data.message_unread,
"message_unread_contents": user_data.message_unread_contents if user_data.message_unread_contents else [],
"message_unread_contents": user_data.message_unread_contents
if user_data.message_unread_contents
else [],
"err_msg": user_data.err_msg,
"updated_day": user_data.updated_day,
"updated_time": user_data.updated_time
"updated_time": user_data.updated_time,
}
result["user_data"].append(user_data_dict)
# 如果有多条数据,只返回最新的(按更新时间排序)
if len(result["user_data"]) > 1:
result["user_data"].sort(
key=lambda x: (x.get("updated_day", ""), x.get("updated_time", "")),
reverse=True
key=lambda x: (
x.get("updated_day", ""),
x.get("updated_time", ""),
),
reverse=True,
)
result["message"] = (
f"找到 {len(result['user_data'])} 条数据,显示最新的一条"
)
result["message"] = f"找到 {len(result['user_data'])} 条数据,显示最新的一条"
result["user_data"] = [result["user_data"][0]]
return json.dumps(result, ensure_ascii=False, indent=2)
except Exception as e:
error_message = f"查询站点用户数据失败: {str(e)}"
logger.error(f"查询站点用户数据失败: {e}", exc_info=True)
return json.dumps({
"success": False,
"message": error_message,
"site_id": site_id
}, ensure_ascii=False)
return json.dumps(
{"success": False, "message": error_message, "site_id": site_id},
ensure_ascii=False,
)

View File

@@ -12,35 +12,45 @@ from app.log import logger
class QuerySitesInput(BaseModel):
"""查询站点工具的输入参数模型"""
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
status: Optional[str] = Field("all",
description="Filter sites by status: 'active' for enabled sites, 'inactive' for disabled sites, 'all' for all sites")
name: Optional[str] = Field(None,
description="Filter sites by name (partial match, optional)")
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
status: Optional[str] = Field(
"all",
description="Filter sites by status: 'active' for enabled sites, 'inactive' for disabled sites, 'all' for all sites",
)
name: Optional[str] = Field(
None, description="Filter sites by name (partial match, optional)"
)
class QuerySitesTool(MoviePilotTool):
name: str = "query_sites"
description: str = "Query site status and list all configured sites. Shows site name, domain, status, priority, and basic configuration. Site priority (pri): smaller values have higher priority (e.g., pri=1 has higher priority than pri=10)."
require_admin: bool = True
args_schema: Type[BaseModel] = QuerySitesInput
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据查询参数生成友好的提示消息"""
status = kwargs.get("status", "all")
name = kwargs.get("name")
parts = ["正在查询站点"]
parts = ["查询站点"]
if status != "all":
status_map = {"active": "已启用", "inactive": "已禁用"}
parts.append(f"状态: {status_map.get(status, status)}")
if name:
parts.append(f"名称: {name}")
return " | ".join(parts) if len(parts) > 1 else parts[0]
async def run(self, status: Optional[str] = "all", name: Optional[str] = None, **kwargs) -> str:
async def run(
self, status: Optional[str] = "all", name: Optional[str] = None, **kwargs
) -> str:
logger.info(f"执行工具: {self.name}, 参数: status={status}, name={name}")
try:
site_oper = SiteOper()
@@ -68,9 +78,10 @@ class QuerySitesTool(MoviePilotTool):
"url": s.url,
"pri": s.pri,
"is_active": s.is_active,
"cookie": s.cookie,
"downloader": s.downloader,
"proxy": s.proxy,
"timeout": s.timeout
"timeout": s.timeout,
}
simplified_sites.append(simplified)
result_json = json.dumps(simplified_sites, ensure_ascii=False, indent=2)
@@ -79,4 +90,3 @@ class QuerySitesTool(MoviePilotTool):
except Exception as e:
logger.error(f"查询站点失败: {e}", exc_info=True)
return f"查询站点时发生错误: {str(e)}"

View File

@@ -9,105 +9,179 @@ from app.agent.tools.base import MoviePilotTool
from app.db import AsyncSessionFactory
from app.db.models.subscribehistory import SubscribeHistory
from app.log import logger
from app.schemas.types import media_type_to_agent
PAGE_SIZE = 20
class QuerySubscribeHistoryInput(BaseModel):
"""查询订阅历史工具的输入参数模型"""
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
media_type: Optional[str] = Field("all", description="Filter by media type: '电影' for films, '电视剧' for television series, 'all' for all types (default: 'all')")
name: Optional[str] = Field(None, description="Filter by media name (partial match, optional)")
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
media_type: Optional[str] = Field(
"all", description="Allowed values: movie, tv, all"
)
name: Optional[str] = Field(
None, description="Filter by media name (partial match, optional)"
)
page: Optional[int] = Field(
1,
description="Page number for pagination (default: 1, 20 items per page). Ignored when name filter is provided.",
)
class QuerySubscribeHistoryTool(MoviePilotTool):
name: str = "query_subscribe_history"
description: str = "Query subscription history records. Shows completed subscriptions with their details including name, type, rating, completion date, and other subscription information. Supports filtering by media type and name. Returns up to 30 records."
description: str = "Query subscription history records. Shows completed subscriptions with their details including name, type, rating, completion date, and other subscription information. Supports filtering by media type and name. Supports pagination with 20 records per page."
args_schema: Type[BaseModel] = QuerySubscribeHistoryInput
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据查询参数生成友好的提示消息"""
media_type = kwargs.get("media_type", "all")
name = kwargs.get("name")
parts = ["正在查询订阅历史"]
page = kwargs.get("page", 1)
parts = ["查询订阅历史"]
if media_type != "all":
parts.append(f"类型: {media_type}")
if name:
parts.append(f"名称: {name}")
return " | ".join(parts) if len(parts) > 1 else parts[0]
else:
parts.append(f"{page}")
async def run(self, media_type: Optional[str] = "all",
name: Optional[str] = None, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: media_type={media_type}, name={name}")
return " | ".join(parts)
async def run(
self,
media_type: Optional[str] = "all",
name: Optional[str] = None,
page: Optional[int] = 1,
**kwargs,
) -> str:
page = max(1, page or 1)
logger.info(
f"执行工具: {self.name}, 参数: media_type={media_type}, name={name}, page={page}"
)
try:
if media_type not in ["all", "movie", "tv"]:
return f"错误:无效的媒体类型 '{media_type}',支持的类型:'movie', 'tv', 'all'"
# 获取数据库会话
async with AsyncSessionFactory() as db:
# 根据类型查询
if media_type == "all":
# 查询所有类型,需要分别查询电影和电视剧
movie_history = await SubscribeHistory.async_list_by_type(db, mtype="movie", page=1, count=100)
tv_history = await SubscribeHistory.async_list_by_type(db, mtype="tv", page=1, count=100)
all_history = list(movie_history) + list(tv_history)
# 按日期排序
all_history.sort(key=lambda x: x.date or "", reverse=True)
else:
# 查询指定类型
all_history = await SubscribeHistory.async_list_by_type(db, mtype=media_type, page=1, count=100)
# 按名称过滤
filtered_history = []
if name:
# 有名称过滤时,获取足够多的记录在内存中过滤,不分页
fetch_count = 500
if media_type == "all":
movie_history = await SubscribeHistory.async_list_by_type(
db, mtype="movie", page=1, count=fetch_count
)
tv_history = await SubscribeHistory.async_list_by_type(
db, mtype="tv", page=1, count=fetch_count
)
all_history = list(movie_history) + list(tv_history)
all_history.sort(key=lambda x: x.date or "", reverse=True)
else:
all_history = list(
await SubscribeHistory.async_list_by_type(
db, mtype=media_type, page=1, count=fetch_count
)
)
# 按名称过滤
name_lower = name.lower()
for record in all_history:
if record.name and name_lower in record.name.lower():
filtered_history.append(record)
filtered_history = [
record
for record in all_history
if record.name and name_lower in record.name.lower()
]
if not filtered_history:
return "未找到相关订阅历史记录"
# 名称过滤时直接返回所有匹配结果,不分页
simplified_records = self._simplify_records(filtered_history)
result_json = json.dumps(
simplified_records, ensure_ascii=False, indent=2
)
return result_json
else:
filtered_history = all_history
# 无名称过滤时,直接利用数据库分页
if media_type == "all":
movie_history = await SubscribeHistory.async_list_by_type(
db, mtype="movie", page=1, count=page * PAGE_SIZE
)
tv_history = await SubscribeHistory.async_list_by_type(
db, mtype="tv", page=1, count=page * PAGE_SIZE
)
all_history = list(movie_history) + list(tv_history)
all_history.sort(key=lambda x: x.date or "", reverse=True)
filtered_history = all_history
else:
filtered_history = list(
await SubscribeHistory.async_list_by_type(
db, mtype=media_type, page=1, count=page * PAGE_SIZE
)
)
if not filtered_history:
return "未找到相关订阅历史记录"
# 限制最多30条
# 分页切片
total_count = len(filtered_history)
limited_history = filtered_history[:30]
# 转换为字典格式,只保留关键信息
simplified_records = []
for record in limited_history:
simplified = {
"id": record.id,
"name": record.name,
"year": record.year,
"type": record.type,
"season": record.season,
"tmdbid": record.tmdbid,
"doubanid": record.doubanid,
"bangumiid": record.bangumiid,
"poster": record.poster,
"vote": record.vote,
"total_episode": record.total_episode,
"date": record.date,
"username": record.username
}
# 添加过滤规则信息(如果有)
if record.filter:
simplified["filter"] = record.filter
if record.quality:
simplified["quality"] = record.quality
if record.resolution:
simplified["resolution"] = record.resolution
simplified_records.append(simplified)
result_json = json.dumps(simplified_records, ensure_ascii=False, indent=2)
# 如果结果被裁剪,添加提示信息
if total_count > 30:
return f"注意:查询结果共找到 {total_count} 条,为节省上下文空间,仅显示前 30 条结果。\n\n{result_json}"
return result_json
start = (page - 1) * PAGE_SIZE
end = start + PAGE_SIZE
page_records = filtered_history[start:end]
if not page_records:
return f"{page} 页没有数据。"
simplified_records = self._simplify_records(page_records)
result_json = json.dumps(
simplified_records, ensure_ascii=False, indent=2
)
has_more = total_count > end
payload_msg = f"{page} 页,当前页 {len(simplified_records)} 条结果。"
if has_more:
payload_msg += (
f" 可能有更多数据,可使用 page={page + 1} 获取下一页。"
)
return f"{payload_msg}\n\n{result_json}"
except Exception as e:
logger.error(f"查询订阅历史失败: {e}", exc_info=True)
return f"查询订阅历史时发生错误: {str(e)}"
@staticmethod
def _simplify_records(records) -> list:
"""转换为字典格式,只保留关键信息"""
simplified_records = []
for record in records:
simplified = {
"id": record.id,
"name": record.name,
"year": record.year,
"type": media_type_to_agent(record.type),
"season": record.season,
"tmdbid": record.tmdbid,
"doubanid": record.doubanid,
"bangumiid": record.bangumiid,
"poster": record.poster,
"vote": record.vote,
"total_episode": record.total_episode,
"date": record.date,
"username": record.username,
}
if record.filter:
simplified["filter"] = record.filter
if record.quality:
simplified["quality"] = record.quality
if record.resolution:
simplified["resolution"] = record.resolution
simplified_records.append(simplified)
return simplified_records

View File

@@ -34,7 +34,7 @@ class QuerySubscribeSharesTool(MoviePilotTool):
min_rating = kwargs.get("min_rating")
max_rating = kwargs.get("max_rating")
parts = ["正在查询订阅分享"]
parts = ["查询订阅分享"]
if name:
parts.append(f"名称: {name}")
@@ -110,4 +110,3 @@ class QuerySubscribeSharesTool(MoviePilotTool):
except Exception as e:
logger.error(f"查询订阅分享失败: {e}", exc_info=True)
return f"查询订阅分享时发生错误: {str(e)}"

View File

@@ -8,82 +8,150 @@ from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.db.subscribe_oper import SubscribeOper
from app.log import logger
from app.schemas.subscribe import Subscribe as SubscribeSchema
from app.schemas.types import MediaType
PAGE_SIZE = 100
QUERY_SUBSCRIBE_OUTPUT_FIELDS = [
"id",
"name",
"year",
"type",
"season",
"total_episode",
"start_episode",
"lack_episode",
"filter",
"include",
"exclude",
"quality",
"resolution",
"effect",
"state",
"last_update",
"sites",
"downloader",
"best_version",
"save_path",
"custom_words",
"media_category",
"filter_groups",
"episode_group",
]
class QuerySubscribesInput(BaseModel):
"""查询订阅工具的输入参数模型"""
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
status: Optional[str] = Field("all",
description="Filter subscriptions by status: 'R' for enabled subscriptions, 'P' for disabled ones, 'all' for all subscriptions")
media_type: Optional[str] = Field("all",
description="Filter by media type: '电影' for films, '电视剧' for television series, 'all' for all types")
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
status: Optional[str] = Field(
"all",
description="Filter subscriptions by status: 'R' for enabled subscriptions, 'S' for paused ones, 'all' for all subscriptions",
)
media_type: Optional[str] = Field(
"all", description="Allowed values: movie, tv, all"
)
tmdb_id: Optional[int] = Field(
None,
description="Filter by TMDB ID to check if a specific media is already subscribed",
)
douban_id: Optional[str] = Field(
None,
description="Filter by Douban ID to check if a specific media is already subscribed",
)
page: Optional[int] = Field(
1, description="Page number for pagination (default: 1, 100 items per page)"
)
class QuerySubscribesTool(MoviePilotTool):
name: str = "query_subscribes"
description: str = "Query subscription status and list all user subscriptions. Shows active subscriptions, their download status, and configuration details."
description: str = "Query subscription status and list user subscriptions. Returns full subscription parameters for each matched subscription. Supports pagination with 100 items per page."
args_schema: Type[BaseModel] = QuerySubscribesInput
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据查询参数生成友好的提示消息"""
status = kwargs.get("status", "all")
media_type = kwargs.get("media_type", "all")
parts = ["正在查询订阅"]
page = kwargs.get("page", 1)
parts = ["查询订阅"]
# 根据状态过滤条件生成提示
if status != "all":
status_map = {"R": "已启用", "P": "禁用"}
status_map = {"R": "已启用", "S": "暂停"}
parts.append(f"状态: {status_map.get(status, status)}")
# 根据媒体类型过滤条件生成提示
if media_type != "all":
parts.append(f"类型: {media_type}")
return " | ".join(parts) if len(parts) > 1 else parts[0]
async def run(self, status: Optional[str] = "all", media_type: Optional[str] = "all", **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: status={status}, media_type={media_type}")
parts.append(f"{page}")
return " | ".join(parts)
async def run(
self,
status: Optional[str] = "all",
media_type: Optional[str] = "all",
tmdb_id: Optional[int] = None,
douban_id: Optional[str] = None,
page: Optional[int] = 1,
**kwargs,
) -> str:
page = max(1, page or 1)
logger.info(
f"执行工具: {self.name}, 参数: status={status}, media_type={media_type}, tmdb_id={tmdb_id}, douban_id={douban_id}, page={page}"
)
try:
if media_type != "all" and not MediaType.from_agent(media_type):
return f"错误:无效的媒体类型 '{media_type}',支持的类型:'movie', 'tv', 'all'"
subscribe_oper = SubscribeOper()
subscribes = await subscribe_oper.async_list()
filtered_subscribes = []
for sub in subscribes:
if status != "all" and sub.state != status:
continue
if media_type != "all" and sub.type != media_type:
if (
media_type != "all"
and sub.type != MediaType.from_agent(media_type).value
):
continue
if tmdb_id is not None and sub.tmdbid != tmdb_id:
continue
if douban_id is not None and sub.doubanid != douban_id:
continue
filtered_subscribes.append(sub)
if filtered_subscribes:
# 限制最多50条结果
total_count = len(filtered_subscribes)
limited_subscribes = filtered_subscribes[:50]
# 精简字段,只保留关键信息
simplified_subscribes = []
for s in limited_subscribes:
simplified = {
"id": s.id,
"name": s.name,
"year": s.year,
"type": s.type,
"season": s.season,
"tmdbid": s.tmdbid,
"doubanid": s.doubanid,
"bangumiid": s.bangumiid,
"poster": s.poster,
"vote": s.vote,
"state": s.state,
"total_episode": s.total_episode,
"lack_episode": s.lack_episode,
"last_update": s.last_update,
"username": s.username
}
simplified_subscribes.append(simplified)
result_json = json.dumps(simplified_subscribes, ensure_ascii=False, indent=2)
# 如果结果被裁剪,添加提示信息
if total_count > 50:
return f"注意:查询结果共找到 {total_count} 条,为节省上下文空间,仅显示前 50 条结果。\n\n{result_json}"
return result_json
# 分页
start = (page - 1) * PAGE_SIZE
end = start + PAGE_SIZE
page_subscribes = filtered_subscribes[start:end]
if not page_subscribes:
total_pages = (total_count + PAGE_SIZE - 1) // PAGE_SIZE
return f"{page} 页没有数据,共 {total_count} 条结果,共 {total_pages} 页。"
full_subscribes = [
SubscribeSchema.model_validate(s, from_attributes=True).model_dump(
include=set(QUERY_SUBSCRIBE_OUTPUT_FIELDS), exclude_none=True
)
for s in page_subscribes
]
result_json = json.dumps(full_subscribes, ensure_ascii=False, indent=2)
total_pages = (total_count + PAGE_SIZE - 1) // PAGE_SIZE
payload_msg = f"{page}/{total_pages} 页,当前页 {len(page_subscribes)} 条结果,共 {total_count} 条。"
if page < total_pages:
payload_msg += f" 可使用 page={page + 1} 获取下一页。"
return f"{payload_msg}\n\n{result_json}"
return "未找到相关订阅"
except Exception as e:
logger.error(f"查询订阅失败: {e}", exc_info=True)

View File

@@ -10,6 +10,7 @@ from app.agent.tools.base import MoviePilotTool
from app.db import AsyncSessionFactory
from app.db.models.transferhistory import TransferHistory
from app.log import logger
from app.schemas.types import media_type_to_agent
class QueryTransferHistoryInput(BaseModel):
@@ -32,7 +33,7 @@ class QueryTransferHistoryTool(MoviePilotTool):
status = kwargs.get("status", "all")
page = kwargs.get("page", 1)
parts = ["正在查询整理历史"]
parts = ["查询整理历史"]
if title:
parts.append(f"标题: {title}")
@@ -95,7 +96,7 @@ class QueryTransferHistoryTool(MoviePilotTool):
"id": record.id,
"title": record.title,
"year": record.year,
"type": record.type,
"type": media_type_to_agent(record.type),
"category": record.category,
"seasons": record.seasons,
"episodes": record.episodes,

View File

@@ -30,7 +30,7 @@ class QueryWorkflowsTool(MoviePilotTool):
name = kwargs.get("name")
trigger_type = kwargs.get("trigger_type", "all")
parts = ["正在查询工作流"]
parts = ["查询工作流"]
if state != "all":
state_map = {"W": "等待", "R": "运行中", "P": "暂停", "S": "成功", "F": "失败"}
@@ -125,4 +125,3 @@ class QueryWorkflowsTool(MoviePilotTool):
except Exception as e:
logger.error(f"查询工作流失败: {e}", exc_info=True)
return f"查询工作流时发生错误: {str(e)}"

View File

@@ -0,0 +1,81 @@
"""文件读取工具"""
from pathlib import Path
from typing import Optional, Type
from anyio import Path as AsyncPath
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.log import logger
# 最大读取大小 50KB
MAX_READ_SIZE = 50 * 1024
class ReadFileInput(BaseModel):
"""Input parameters for read file tool"""
file_path: str = Field(..., description="The absolute path of the file to read")
start_line: Optional[int] = Field(None, description="The starting line number (1-based, inclusive). If not provided, reading starts from the beginning of the file.")
end_line: Optional[int] = Field(None, description="The ending line number (1-based, inclusive). If not provided, reading goes until the end of the file.")
class ReadFileTool(MoviePilotTool):
name: str = "read_file"
description: str = "Read the content of a text file. Supports reading by line range. Each read is limited to 50KB; content exceeding this limit will be truncated."
args_schema: Type[BaseModel] = ReadFileInput
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据参数生成友好的提示消息"""
file_path = kwargs.get("file_path", "")
file_name = Path(file_path).name if file_path else "未知文件"
return f"读取文件: {file_name}"
async def run(self, file_path: str, start_line: Optional[int] = None,
end_line: Optional[int] = None, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: file_path={file_path}, start_line={start_line}, end_line={end_line}")
try:
path = AsyncPath(file_path)
if not await path.exists():
return f"错误:文件 {file_path} 不存在"
if not await path.is_file():
return f"错误:{file_path} 不是一个文件"
content = await path.read_text(encoding="utf-8")
truncated = False
if start_line is not None or end_line is not None:
lines = content.splitlines(keepends=True)
total_lines = len(lines)
# 将行号转换为索引1-based -> 0-based
s = (start_line - 1) if start_line and start_line >= 1 else 0
e = end_line if end_line and end_line >= 1 else total_lines
# 确保范围有效
s = max(0, min(s, total_lines))
e = max(s, min(e, total_lines))
content = "".join(lines[s:e])
# 检查大小限制
content_bytes = content.encode("utf-8")
if len(content_bytes) > MAX_READ_SIZE:
content = content_bytes[:MAX_READ_SIZE].decode("utf-8", errors="ignore")
truncated = True
if truncated:
return f"{content}\n\n[警告文件内容已超过50KB限制以上内容已被截断。请使用 start_line/end_line 参数分段读取。]"
return content
except PermissionError:
return f"错误:没有权限读取 {file_path}"
except UnicodeDecodeError:
return f"错误:{file_path} 不是文本文件,无法读取"
except Exception as e:
logger.error(f"读取文件 {file_path} 时发生错误: {str(e)}", exc_info=True)
return f"操作失败: {str(e)}"

View File

@@ -10,6 +10,7 @@ from app.chain.media import MediaChain
from app.core.context import Context
from app.core.metainfo import MetaInfo
from app.log import logger
from app.schemas.types import media_type_to_agent
class RecognizeMediaInput(BaseModel):
@@ -32,13 +33,13 @@ class RecognizeMediaTool(MoviePilotTool):
path = kwargs.get("path")
if path:
message = f"正在识别文件媒体信息: {path}"
message = f"识别文件媒体信息: {path}"
elif title:
message = f"正在识别种子媒体信息: {title}"
message = f"识别种子媒体信息: {title}"
if subtitle:
message += f" ({subtitle})"
else:
message = "正在识别媒体信息"
message = "识别媒体信息"
return message
@@ -98,7 +99,8 @@ class RecognizeMediaTool(MoviePilotTool):
"message": error_message
}, ensure_ascii=False)
def _format_context_result(self, context: Context, source_type: str) -> str:
@staticmethod
def _format_context_result(context: Context, source_type: str) -> str:
"""格式化识别结果为JSON字符串"""
if not context:
return json.dumps({
@@ -124,7 +126,7 @@ class RecognizeMediaTool(MoviePilotTool):
"title": media_info.get("title"),
"en_title": media_info.get("en_title"),
"year": media_info.get("year"),
"type": media_info.get("type"),
"type": media_type_to_agent(media_info.get("type")),
"season": media_info.get("season"),
"tmdb_id": media_info.get("tmdb_id"),
"imdb_id": media_info.get("imdb_id"),
@@ -145,7 +147,7 @@ class RecognizeMediaTool(MoviePilotTool):
"name": meta_info.get("name"),
"title": meta_info.get("title"),
"year": meta_info.get("year"),
"type": meta_info.get("type"),
"type": media_type_to_agent(meta_info.get("type")),
"begin_season": meta_info.get("begin_season"),
"end_season": meta_info.get("end_season"),
"begin_episode": meta_info.get("begin_episode"),
@@ -159,4 +161,3 @@ class RecognizeMediaTool(MoviePilotTool):
}
return json.dumps(result, ensure_ascii=False, indent=2)

View File

@@ -0,0 +1,84 @@
"""重载插件工具"""
import json
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.agent.tools.impl._plugin_tool_utils import (
get_plugin_snapshot,
reload_plugin_runtime,
)
from app.log import logger
class ReloadPluginInput(BaseModel):
"""重载插件工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
plugin_id: str = Field(
...,
description="The plugin ID to reload so the latest saved config takes effect.",
)
class ReloadPluginTool(MoviePilotTool):
name: str = "reload_plugin"
description: str = (
"Reload an installed plugin so its latest saved configuration takes effect. "
"This also refreshes the plugin's registered commands, scheduled services, and API routes."
)
require_admin: bool = True
args_schema: Type[BaseModel] = ReloadPluginInput
def get_tool_message(self, **kwargs) -> Optional[str]:
"""生成友好的提示消息"""
plugin_id = kwargs.get("plugin_id", "")
return f"重载插件: {plugin_id}"
@staticmethod
def _reload_plugin_sync(plugin_id: str) -> str:
"""
按后台接口同样的流程重载插件,确保最新配置和注册信息一起刷新。
"""
plugin_info = get_plugin_snapshot(plugin_id)
if not plugin_info:
return json.dumps(
{
"success": False,
"message": f"插件 {plugin_id} 不存在,请先使用 query_installed_plugins 查询有效插件 ID",
},
ensure_ascii=False,
)
reload_plugin_runtime(plugin_id)
refreshed_plugin = get_plugin_snapshot(plugin_id) or plugin_info
return json.dumps(
{
"success": True,
**refreshed_plugin,
"message": "插件已重载,最新配置已生效",
},
ensure_ascii=False,
indent=2,
default=str,
)
async def run(self, plugin_id: str, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: plugin_id={plugin_id}")
try:
return await self.run_blocking(
"plugin", self._reload_plugin_sync, plugin_id
)
except Exception as e:
logger.error(f"重载插件失败: {e}", exc_info=True)
return json.dumps(
{"success": False, "message": f"重载插件时发生错误: {str(e)}"},
ensure_ascii=False,
)

View File

@@ -6,48 +6,55 @@ from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.log import logger
from app.scheduler import Scheduler
class RunSchedulerInput(BaseModel):
"""运行定时服务工具的输入参数模型"""
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
job_id: str = Field(..., description="The ID of the scheduled job to run (can be obtained from query_schedulers tool)")
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
job_id: str = Field(
...,
description="The ID of the scheduled job to run (can be obtained from query_schedulers tool)",
)
class RunSchedulerTool(MoviePilotTool):
name: str = "run_scheduler"
description: str = "Manually trigger a scheduled task to run immediately. This will execute the specified scheduler job by its ID."
args_schema: Type[BaseModel] = RunSchedulerInput
require_admin: bool = True
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据运行参数生成友好的提示消息"""
job_id = kwargs.get("job_id", "")
return f"正在运行定时服务 (ID: {job_id})"
return f"运行定时服务 (ID: {job_id})"
@staticmethod
def _run_scheduler_sync(job_id: str) -> tuple[bool, str]:
"""同步触发定时服务,避免调度器扫描阻塞事件循环。"""
from app.scheduler import Scheduler
scheduler = Scheduler()
for scheduler_item in scheduler.list():
if scheduler_item.id == job_id:
scheduler.start(job_id)
return True, scheduler_item.name
return False, ""
async def run(self, job_id: str, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: job_id={job_id}")
try:
scheduler = Scheduler()
# 检查定时服务是否存在
schedulers = scheduler.list()
job_exists = False
job_name = None
for s in schedulers:
if s.id == job_id:
job_exists = True
job_name = s.name
break
job_exists, job_name = await self.run_blocking(
"workflow", self._run_scheduler_sync, job_id
)
if not job_exists:
return f"定时服务 ID {job_id} 不存在,请使用 query_schedulers 工具查询可用的定时服务"
# 运行定时服务
scheduler.start(job_id)
return f"成功触发定时服务:{job_name} (ID: {job_id})"
except Exception as e:
logger.error(f"运行定时服务失败: {e}", exc_info=True)
return f"运行定时服务时发生错误: {str(e)}"

View File

@@ -0,0 +1,115 @@
"""运行斜杠命令工具(系统命令 + 插件命令)"""
import json
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.core.event import eventmanager
from app.log import logger
from app.schemas.types import EventType, MessageChannel
class RunSlashCommandInput(BaseModel):
"""运行斜杠命令工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
command: str = Field(
...,
description="The slash command to execute, e.g. '/cookiecloud'. "
"Must start with '/'. Can include arguments after the command, e.g. '/command arg1 arg2'. "
"Use query_plugin_capabilities tool to discover available plugin commands, "
"or list_slash_commands tool to discover all available commands (including system commands).",
)
class RunSlashCommandTool(MoviePilotTool):
name: str = "run_slash_command"
description: str = (
"Execute a slash command (system or plugin) by sending a CommandExcute event. "
"This tool supports ALL registered slash commands, including: "
"1) System preset commands (e.g. /cookiecloud, /sites, /subscribes, /downloading, /transfer, /restart, etc.) "
"2) Plugin commands registered by installed plugins. "
"Use the query_plugin_capabilities tool to discover plugin commands, "
"or the list_slash_commands tool to discover all available commands. "
"The command will be executed asynchronously. "
"Note: This tool triggers the command execution but the actual processing happens in the background."
)
args_schema: Type[BaseModel] = RunSlashCommandInput
require_admin: bool = True
def get_tool_message(self, **kwargs) -> Optional[str]:
"""生成友好的提示消息"""
command = kwargs.get("command", "")
return f"执行命令: {command}"
async def run(self, command: str, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: command={command}")
try:
# 确保命令以 / 开头
if not command.startswith("/"):
command = f"/{command}"
# 从全局 Command 单例中验证命令是否存在(包含系统预设命令 + 插件命令 + 其他命令)
from app.command import Command
cmd_name = command.split()[0]
command_obj = Command()
matched_command = command_obj.get(cmd_name)
if not matched_command:
# 列出所有可用命令帮助用户
all_commands = command_obj.get_commands()
available_cmds = [
f"{cmd} - {info.get('description', '无描述')}"
for cmd, info in all_commands.items()
]
result = {
"success": False,
"message": f"命令 {cmd_name} 不存在",
}
if available_cmds:
result["available_commands"] = available_cmds
return json.dumps(result, ensure_ascii=False, indent=2)
# 构建消息渠道,优先使用当前会话的渠道信息
channel = None
if self._channel:
try:
channel = MessageChannel(self._channel)
except (ValueError, KeyError):
channel = None
# 发送命令执行事件,与 message.py 中的方式一致
eventmanager.send_event(
EventType.CommandExcute,
{
"cmd": command,
"user": self._user_id,
"channel": channel,
"source": self._source,
},
)
result = {
"success": True,
"message": f"命令 {cmd_name} 已触发执行",
"command": command,
"command_desc": matched_command.get("description", ""),
}
# 如果是插件命令附加插件ID
if matched_command.get("pid"):
result["plugin_id"] = matched_command["pid"]
return json.dumps(result, ensure_ascii=False, indent=2)
except Exception as e:
logger.error(f"执行命令失败: {e}", exc_info=True)
return json.dumps(
{"success": False, "message": f"执行命令时发生错误: {str(e)}"},
ensure_ascii=False,
)

View File

@@ -13,55 +13,70 @@ from app.log import logger
class RunWorkflowInput(BaseModel):
"""执行工作流工具的输入参数模型"""
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
workflow_identifier: str = Field(..., description="Workflow identifier: can be workflow ID (integer as string) or workflow name")
from_begin: Optional[bool] = Field(True, description="Whether to run workflow from the beginning (default: True, if False will continue from last executed action)")
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
workflow_id: int = Field(
..., description="Workflow ID (can be obtained from query_workflows tool)"
)
from_begin: Optional[bool] = Field(
True,
description="Whether to run workflow from the beginning (default: True, if False will continue from last executed action)",
)
class RunWorkflowTool(MoviePilotTool):
name: str = "run_workflow"
description: str = "Execute a specific workflow manually. Can run workflow by ID or name. Supports running from the beginning or continuing from the last executed action."
description: str = "Execute a specific workflow manually by workflow ID. Supports running from the beginning or continuing from the last executed action."
args_schema: Type[BaseModel] = RunWorkflowInput
require_admin: bool = True
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据工作流参数生成友好的提示消息"""
workflow_identifier = kwargs.get("workflow_identifier", "")
workflow_id = kwargs.get("workflow_id")
from_begin = kwargs.get("from_begin", True)
message = f"正在执行工作流: {workflow_identifier}"
message = f"执行工作流: {workflow_id}"
if not from_begin:
message += " (从上次位置继续)"
else:
message += " (从头开始)"
return message
async def run(self, workflow_identifier: str,
from_begin: Optional[bool] = True, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: workflow_identifier={workflow_identifier}, from_begin={from_begin}")
@staticmethod
def _run_workflow_sync(
workflow_id: int, from_begin: Optional[bool] = True
) -> tuple[bool, str]:
"""同步执行工作流,放到专用线程池避免长流程阻塞 API 响应。"""
return WorkflowChain().process(workflow_id, from_begin=from_begin)
async def run(
self, workflow_id: int, from_begin: Optional[bool] = True, **kwargs
) -> str:
logger.info(
f"执行工具: {self.name}, 参数: workflow_id={workflow_id}, from_begin={from_begin}"
)
try:
# 获取数据库会话
async with AsyncSessionFactory() as db:
workflow_oper = WorkflowOper(db)
# 尝试解析为工作流ID
workflow = None
if workflow_identifier.isdigit():
# 如果是数字尝试作为工作流ID查询
workflow = await workflow_oper.async_get(int(workflow_identifier))
# 如果不是ID或ID查询失败尝试按名称查询
workflow = await workflow_oper.async_get(workflow_id)
if not workflow:
workflow = await workflow_oper.async_get_by_name(workflow_identifier)
if not workflow:
return f"未找到工作流:{workflow_identifier},请使用 query_workflows 工具查询可用的工作流"
# 执行工作流
workflow_chain = WorkflowChain()
state, errmsg = workflow_chain.process(workflow.id, from_begin=from_begin)
return f"未找到工作流:{workflow_id},请使用 query_workflows 工具查询可用的工作流"
# 工作流执行链路包含大量同步步骤,统一放到 workflow 线程池。
state, errmsg = await self.run_blocking(
"workflow",
self._run_workflow_sync,
workflow.id,
from_begin,
)
if not state:
return f"执行工作流失败:{workflow.name} (ID: {workflow.id})\n错误原因:{errmsg}"
else:
@@ -69,4 +84,3 @@ class RunWorkflowTool(MoviePilotTool):
except Exception as e:
logger.error(f"执行工作流失败: {e}", exc_info=True)
return f"执行工作流时发生错误: {str(e)}"

View File

@@ -8,7 +8,6 @@ from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.chain.media import MediaChain
from app.core.config import global_vars
from app.core.metainfo import MetaInfoPath
from app.log import logger
from app.schemas import FileItem
@@ -16,18 +15,29 @@ from app.schemas import FileItem
class ScrapeMetadataInput(BaseModel):
"""刮削媒体元数据工具的输入参数模型"""
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
path: str = Field(...,
description="Path to the file or directory to scrape metadata for (e.g., '/path/to/file.mkv' or '/path/to/directory')")
storage: Optional[str] = Field("local",
description="Storage type: 'local' for local storage, 'smb', 'alist', etc. for remote storage (default: 'local')")
overwrite: Optional[bool] = Field(False,
description="Whether to overwrite existing metadata files (default: False)")
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
path: str = Field(
...,
description="Path to the file or directory to scrape metadata for (e.g., '/path/to/file.mkv' or '/path/to/directory')",
)
storage: Optional[str] = Field(
"local",
description="Storage type: 'local' for local storage, 'smb', 'alist', etc. for remote storage (default: 'local')",
)
overwrite: Optional[bool] = Field(
False,
description="Whether to overwrite existing metadata files (default: False)",
)
class ScrapeMetadataTool(MoviePilotTool):
name: str = "scrape_metadata"
description: str = "Generate metadata files (NFO files, posters, backgrounds, etc.) for existing media files or directories. Automatically recognizes media information from the file path and creates metadata files. Supports both local and remote storage. Use 'search_media' to search TMDB database, or 'recognize_media' to extract info from torrent titles/file paths without generating files."
require_admin: bool = True
args_schema: Type[BaseModel] = ScrapeMetadataInput
def get_tool_message(self, **kwargs) -> Optional[str]:
@@ -36,7 +46,7 @@ class ScrapeMetadataTool(MoviePilotTool):
storage = kwargs.get("storage", "local")
overwrite = kwargs.get("overwrite", False)
message = f"正在刮削媒体元数据: {path}"
message = f"刮削媒体元数据: {path}"
if storage != "local":
message += f" [存储: {storage}]"
if overwrite:
@@ -44,33 +54,38 @@ class ScrapeMetadataTool(MoviePilotTool):
return message
async def run(self, path: str, storage: Optional[str] = "local",
overwrite: Optional[bool] = False, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: path={path}, storage={storage}, overwrite={overwrite}")
async def run(
self,
path: str,
storage: Optional[str] = "local",
overwrite: Optional[bool] = False,
**kwargs,
) -> str:
logger.info(
f"执行工具: {self.name}, 参数: path={path}, storage={storage}, overwrite={overwrite}"
)
try:
# 验证路径
if not path:
return json.dumps({
"success": False,
"message": "刮削路径不能为空"
}, ensure_ascii=False)
return json.dumps(
{"success": False, "message": "刮削路径不能为空"},
ensure_ascii=False,
)
# 创建 FileItem
fileitem = FileItem(
storage=storage,
path=path,
type="file" if Path(path).suffix else "dir"
storage=storage, path=path, type="file" if Path(path).suffix else "dir"
)
# 检查本地存储路径是否存在
if storage == "local":
scrape_path = Path(path)
if not scrape_path.exists():
return json.dumps({
"success": False,
"message": f"刮削路径不存在: {path}"
}, ensure_ascii=False)
return json.dumps(
{"success": False, "message": f"刮削路径不存在: {path}"},
ensure_ascii=False,
)
# 识别媒体信息
media_chain = MediaChain()
@@ -79,41 +94,46 @@ class ScrapeMetadataTool(MoviePilotTool):
mediainfo = await media_chain.async_recognize_by_meta(meta)
if not mediainfo:
return json.dumps({
"success": False,
"message": f"刮削失败,无法识别媒体信息: {path}",
"path": path
}, ensure_ascii=False)
# 在线程池中执行同步的刮削操作
await global_vars.loop.run_in_executor(
None,
lambda: media_chain.scrape_metadata(
fileitem=fileitem,
meta=meta,
mediainfo=mediainfo,
overwrite=overwrite
return json.dumps(
{
"success": False,
"message": f"刮削失败,无法识别媒体信息: {path}",
"path": path,
},
ensure_ascii=False,
)
# 刮削会包含磁盘写入和外部图片/元数据访问,统一放到 storage 线程池。
await self.run_blocking(
"storage",
media_chain.scrape_metadata,
fileitem=fileitem,
meta=meta,
mediainfo=mediainfo,
overwrite=overwrite,
)
return json.dumps({
"success": True,
"message": f"{path} 刮削完成",
"path": path,
"media_info": {
"title": mediainfo.title,
"year": mediainfo.year,
"type": mediainfo.type.value if mediainfo.type else None,
"tmdb_id": mediainfo.tmdb_id,
"season": mediainfo.season
}
}, ensure_ascii=False, indent=2)
return json.dumps(
{
"success": True,
"message": f"{path} 刮削完成",
"path": path,
"media_info": {
"title": mediainfo.title,
"year": mediainfo.year,
"type": mediainfo.type.value if mediainfo.type else None,
"tmdb_id": mediainfo.tmdb_id,
"season": mediainfo.season,
},
},
ensure_ascii=False,
indent=2,
)
except Exception as e:
error_message = f"刮削媒体元数据失败: {str(e)}"
logger.error(f"刮削媒体元数据失败: {e}", exc_info=True)
return json.dumps({
"success": False,
"message": error_message,
"path": path
}, ensure_ascii=False)
return json.dumps(
{"success": False, "message": error_message, "path": path},
ensure_ascii=False,
)

View File

@@ -8,7 +8,7 @@ from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.chain.media import MediaChain
from app.log import logger
from app.schemas.types import MediaType
from app.schemas.types import MediaType, media_type_to_agent
class SearchMediaInput(BaseModel):
@@ -17,7 +17,7 @@ class SearchMediaInput(BaseModel):
title: str = Field(..., description="The title of the media to search for (e.g., 'The Matrix', 'Breaking Bad')")
year: Optional[str] = Field(None, description="Release year of the media (optional, helps narrow down results)")
media_type: Optional[str] = Field(None,
description="Type of media content: '电影' for films, '电视剧' for television series or anime series")
description="Allowed values: movie, tv")
season: Optional[int] = Field(None,
description="Season number for TV shows and anime (optional, only applicable for series)")
@@ -34,7 +34,7 @@ class SearchMediaTool(MoviePilotTool):
media_type = kwargs.get("media_type")
season = kwargs.get("season")
message = f"正在搜索媒体: {title}"
message = f"搜索媒体: {title}"
if year:
message += f" ({year})"
if media_type:
@@ -56,14 +56,19 @@ class SearchMediaTool(MoviePilotTool):
# 过滤结果
if results:
media_type_enum = None
if media_type:
media_type_enum = MediaType.from_agent(media_type)
if not media_type_enum:
return f"错误:无效的媒体类型 '{media_type}',支持的类型:'movie', 'tv'"
filtered_results = []
for result in results:
if year and result.year != year:
continue
if media_type:
if result.type != MediaType(media_type):
continue
if season and result.season != season:
if media_type_enum and result.type != media_type_enum:
continue
if season is not None and result.season != season:
continue
filtered_results.append(result)
@@ -78,7 +83,7 @@ class SearchMediaTool(MoviePilotTool):
"title": r.title,
"en_title": r.en_title,
"year": r.year,
"type": r.type.value if r.type else None,
"type": media_type_to_agent(r.type),
"season": r.season,
"tmdb_id": r.tmdb_id,
"imdb_id": r.imdb_id,
@@ -91,8 +96,8 @@ class SearchMediaTool(MoviePilotTool):
simplified_results.append(simplified)
result_json = json.dumps(simplified_results, ensure_ascii=False, indent=2)
# 如果结果被裁剪,添加提示信息
if total_count > 30:
return f"注意:搜索结果共找到 {total_count} 条,为节省上下文空间,仅显示前 30 条结果。\n\n{result_json}"
if total_count > 100:
return f"注意:搜索结果共找到 {total_count} 条,为节省上下文空间,仅显示前 100 条结果。\n\n{result_json}"
return result_json
else:
return f"未找到符合条件的媒体资源: {title}"

View File

@@ -24,7 +24,7 @@ class SearchPersonTool(MoviePilotTool):
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据搜索参数生成友好的提示消息"""
name = kwargs.get("name", "")
return f"正在搜索人物: {name}"
return f"搜索人物: {name}"
async def run(self, name: str, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: name={name}")
@@ -72,8 +72,8 @@ class SearchPersonTool(MoviePilotTool):
result_json = json.dumps(simplified_results, ensure_ascii=False, indent=2)
# 如果结果被裁剪,添加提示信息
if total_count > 30:
return f"注意:搜索结果共找到 {total_count} 条,为节省上下文空间,仅显示前 30 条结果。\n\n{result_json}"
if total_count > 50:
return f"注意:搜索结果共找到 {total_count} 条,为节省上下文空间,仅显示前 50 条结果。\n\n{result_json}"
return result_json
else:
return f"未找到相关人物信息: {name}"

View File

@@ -29,7 +29,7 @@ class SearchPersonCreditsTool(MoviePilotTool):
"""根据搜索参数生成友好的提示消息"""
person_id = kwargs.get("person_id", "")
source = kwargs.get("source", "")
return f"正在搜索人物参演作品: {source} ID {person_id}"
return f"搜索人物参演作品: {source} ID {person_id}"
async def run(self, person_id: int, source: str, page: Optional[int] = 1, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: person_id={person_id}, source={source}, page={page}")

View File

@@ -7,18 +7,18 @@ from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.chain.subscribe import SubscribeChain
from app.core.config import global_vars
from app.db.subscribe_oper import SubscribeOper
from app.log import logger
from app.schemas.types import media_type_to_agent
class SearchSubscribeInput(BaseModel):
"""搜索订阅缺失剧集工具的输入参数模型"""
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
subscribe_id: int = Field(..., description="The ID of the subscription to search for missing episodes")
subscribe_id: int = Field(..., description="The ID of the subscription to search for missing episodes (can be obtained from query_subscribes tool)")
manual: Optional[bool] = Field(False, description="Whether this is a manual search (default: False)")
filter_groups: Optional[List[str]] = Field(None,
description="List of filter rule group names to apply for this search (optional, use query_rule_groups tool to get available rule groups. If provided, will temporarily update the subscription's filter groups before searching)")
description="List of filter rule group names to apply for this search (optional, can be obtained from query_rule_groups tool. If provided, will temporarily update the subscription's filter groups before searching)")
class SearchSubscribeTool(MoviePilotTool):
@@ -31,7 +31,7 @@ class SearchSubscribeTool(MoviePilotTool):
subscribe_id = kwargs.get("subscribe_id")
manual = kwargs.get("manual", False)
message = f"正在搜索订阅 #{subscribe_id} 的缺失剧集"
message = f"搜索订阅 #{subscribe_id} 的缺失剧集"
if manual:
message += "(手动搜索)"
@@ -58,7 +58,7 @@ class SearchSubscribeTool(MoviePilotTool):
"id": subscribe.id,
"name": subscribe.name,
"year": subscribe.year,
"type": subscribe.type,
"type": media_type_to_agent(subscribe.type),
"season": subscribe.season,
"state": subscribe.state,
"total_episode": subscribe.total_episode,
@@ -80,19 +80,13 @@ class SearchSubscribeTool(MoviePilotTool):
subscribe_oper.update(subscribe_id, {"filter_groups": filter_groups})
logger.info(f"更新订阅 #{subscribe_id} 的规则组为: {filter_groups}")
# 调用 SubscribeChain 的 search 方法
# search 方法是同步的,需要在异步环境中运行
subscribe_chain = SubscribeChain()
# 在线程池中执行同步的搜索操作
# 当 sid 有值时state 参数会被忽略,直接处理该订阅
await global_vars.loop.run_in_executor(
None,
lambda: subscribe_chain.search(
sid=subscribe_id,
state='R', # 默认状态,当 sid 有值时此参数会被忽略
manual=manual
)
# 订阅搜索会触发大量同步站点访问,统一走 subscribe 线程池。
await self.run_blocking(
"subscribe",
SubscribeChain().search,
sid=subscribe_id,
state="R", # 当 sid 有值时此参数会被忽略
manual=manual,
)
# 重新获取订阅信息以获取更新后的状态

View File

@@ -1,141 +1,113 @@
"""搜索种子工具"""
import json
import re
from typing import List, Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.chain.search import SearchChain
from app.db.systemconfig_oper import SystemConfigOper
from app.helper.sites import SitesHelper
from app.log import logger
from app.schemas.types import MediaType
from app.schemas.types import MediaType, SystemConfigKey
from ._torrent_search_utils import (
SEARCH_RESULT_CACHE_FILE,
build_filter_options,
)
class SearchTorrentsInput(BaseModel):
"""搜索种子工具的输入参数模型"""
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
title: str = Field(...,
description="The title of the media resource to search for (e.g., 'The Matrix 1999', 'Breaking Bad S01E01')")
year: Optional[str] = Field(None,
description="Release year of the media (optional, helps narrow down search results)")
media_type: Optional[str] = Field(None,
description="Type of media content: '电影' for films, '电视剧' for television series or anime series")
season: Optional[int] = Field(None, description="Season number for TV shows (optional, only applicable for series)")
tmdb_id: Optional[int] = Field(None, description="TMDB ID (can be obtained from search_media tool). Either tmdb_id or douban_id must be provided.")
douban_id: Optional[str] = Field(None, description="Douban ID (can be obtained from search_media tool). Either tmdb_id or douban_id must be provided.")
media_type: Optional[str] = Field(None, description="Allowed values: movie, tv")
area: Optional[str] = Field(None, description="Search scope: 'title' (default) or 'imdbid'")
sites: Optional[List[int]] = Field(None,
description="Array of specific site IDs to search on (optional, if not provided searches all configured sites)")
filter_pattern: Optional[str] = Field(None,
description="Regular expression pattern to filter torrent titles by resolution, quality, or other keywords (e.g., '4K|2160p|UHD' for 4K content, '1080p|BluRay' for 1080p BluRay)")
class SearchTorrentsTool(MoviePilotTool):
name: str = "search_torrents"
description: str = "Search for torrent files across configured indexer sites based on media information. Returns available torrent downloads with details like file size, quality, and download links."
description: str = ("Search for torrent files by media ID across configured indexer sites, cache the matched results, "
"and return available filter options for follow-up selection. "
"Requires tmdb_id or douban_id (can be obtained from search_media tool) for accurate matching.")
args_schema: Type[BaseModel] = SearchTorrentsInput
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据搜索参数生成友好的提示消息"""
title = kwargs.get("title", "")
year = kwargs.get("year")
tmdb_id = kwargs.get("tmdb_id")
douban_id = kwargs.get("douban_id")
media_type = kwargs.get("media_type")
season = kwargs.get("season")
filter_pattern = kwargs.get("filter_pattern")
message = f"正在搜索种子: {title}"
if year:
message += f" ({year})"
if tmdb_id:
message = f"搜索种子: TMDB={tmdb_id}"
elif douban_id:
message = f"搜索种子: 豆瓣={douban_id}"
else:
message = "搜索种子"
if media_type:
message += f" [{media_type}]"
if season:
message += f"{season}"
if filter_pattern:
message += f" 过滤: {filter_pattern}"
return message
async def run(self, title: str, year: Optional[str] = None,
media_type: Optional[str] = None, season: Optional[int] = None,
sites: Optional[List[int]] = None, filter_pattern: Optional[str] = None, **kwargs) -> str:
@staticmethod
def _load_configured_sites() -> List[int]:
"""同步读取默认搜索站点列表。"""
return SystemConfigOper().get(SystemConfigKey.IndexerSites) or []
async def run(self, tmdb_id: Optional[int] = None, douban_id: Optional[str] = None,
media_type: Optional[str] = None, area: Optional[str] = None,
sites: Optional[List[int]] = None, **kwargs) -> str:
logger.info(
f"执行工具: {self.name}, 参数: title={title}, year={year}, media_type={media_type}, season={season}, sites={sites}, filter_pattern={filter_pattern}")
f"执行工具: {self.name}, 参数: tmdb_id={tmdb_id}, douban_id={douban_id}, media_type={media_type}, area={area}, sites={sites}")
if not tmdb_id and not douban_id:
return "参数错误tmdb_id 和 douban_id 至少需要提供一个,请先使用 search_media 工具获取媒体 ID。"
try:
search_chain = SearchChain()
torrents = await search_chain.async_search_by_title(title=title, sites=sites)
filtered_torrents = []
# 编译正则表达式(如果提供)
regex_pattern = None
if filter_pattern:
try:
regex_pattern = re.compile(filter_pattern, re.IGNORECASE)
except re.error as e:
logger.warning(f"正则表达式编译失败: {filter_pattern}, 错误: {e}")
return f"正则表达式格式错误: {str(e)}"
for torrent in torrents:
# torrent 是 Context 对象,需要通过 meta_info 和 media_info 访问属性
if year and torrent.meta_info and torrent.meta_info.year != year:
continue
if media_type and torrent.media_info:
if torrent.media_info.type != MediaType(media_type):
continue
if season and torrent.meta_info and torrent.meta_info.begin_season != season:
continue
# 使用正则表达式过滤标题(分辨率、质量等关键字)
if regex_pattern and torrent.torrent_info and torrent.torrent_info.title:
if not regex_pattern.search(torrent.torrent_info.title):
continue
filtered_torrents.append(torrent)
media_type_enum = None
if media_type:
media_type_enum = MediaType.from_agent(media_type)
if not media_type_enum:
return f"错误:无效的媒体类型 '{media_type}',支持的类型:'movie', 'tv'"
filtered_torrents = await search_chain.async_search_by_id(
tmdbid=tmdb_id,
doubanid=douban_id,
mtype=media_type_enum,
area=area or "title",
sites=sites,
cache_local=False,
)
# 获取站点信息
all_indexers = await SitesHelper().async_get_indexers()
all_sites = [{"id": indexer.get("id"), "name": indexer.get("name")} for indexer in (all_indexers or [])]
if sites:
search_site_ids = sites
else:
search_site_ids = self._load_configured_sites()
if filtered_torrents:
# 限制最多50条结果
total_count = len(filtered_torrents)
limited_torrents = filtered_torrents[:50]
# 精简字段,只保留关键信息
simplified_torrents = []
for t in limited_torrents:
simplified = {}
# 精简 torrent_info
if t.torrent_info:
simplified["torrent_info"] = {
"title": t.torrent_info.title,
"size": t.torrent_info.size,
"seeders": t.torrent_info.seeders,
"peers": t.torrent_info.peers,
"site_name": t.torrent_info.site_name,
"enclosure": t.torrent_info.enclosure,
"page_url": t.torrent_info.page_url,
"volume_factor": t.torrent_info.volume_factor,
"pubdate": t.torrent_info.pubdate
}
# 精简 media_info
if t.media_info:
simplified["media_info"] = {
"title": t.media_info.title,
"en_title": t.media_info.en_title,
"year": t.media_info.year,
"type": t.media_info.type.value if t.media_info.type else None,
"season": t.media_info.season,
"tmdb_id": t.media_info.tmdb_id
}
# 精简 meta_info
if t.meta_info:
simplified["meta_info"] = {
"name": t.meta_info.name,
"cn_name": t.meta_info.cn_name,
"en_name": t.meta_info.en_name,
"year": t.meta_info.year,
"type": t.meta_info.type.value if t.meta_info.type else None,
"begin_season": t.meta_info.begin_season
}
simplified_torrents.append(simplified)
result_json = json.dumps(simplified_torrents, ensure_ascii=False, indent=2)
# 如果结果被裁剪,添加提示信息
if total_count > 50:
return f"注意:搜索结果共找到 {total_count} 条,为节省上下文空间,仅显示前 50 条结果。\n\n{result_json}"
await search_chain.async_save_cache(filtered_torrents, SEARCH_RESULT_CACHE_FILE)
result_json = json.dumps({
"total_count": len(filtered_torrents),
"message": "搜索完成。请使用 get_search_results 工具获取搜索结果。",
"all_sites": all_sites,
"search_site_ids": search_site_ids,
"filter_options": build_filter_options(filtered_torrents),
}, ensure_ascii=False, indent=2)
return result_json
else:
return f"未找到相关种子资源: {title}"
media_id = f"TMDB={tmdb_id}" if tmdb_id else f"豆瓣={douban_id}"
result_json = json.dumps({
"message": f"未找到相关种子资源: {media_id}",
"all_sites": all_sites,
"search_site_ids": search_site_ids,
}, ensure_ascii=False, indent=2)
return result_json
except Exception as e:
error_message = f"搜索种子时发生错误: {str(e)}"
logger.error(f"搜索种子失败: {e}", exc_info=True)

View File

@@ -1,22 +1,35 @@
"""搜索网络内容工具"""
import asyncio
import json
import random
import re
from typing import Optional, Type
from typing import Optional, Type, List, Dict
import httpx
from ddgs import DDGS
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.core.config import settings
from app.log import logger
from app.utils.http import AsyncRequestUtils
# 搜索超时时间(秒)
SEARCH_TIMEOUT = 20
class SearchWebInput(BaseModel):
"""搜索网络内容工具的输入参数模型"""
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
query: str = Field(..., description="The search query string to search for on the web")
max_results: Optional[int] = Field(5, description="Maximum number of search results to return (default: 5, max: 10)")
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
query: str = Field(
..., description="The search query string to search for on the web"
)
max_results: Optional[int] = Field(
20,
description="Maximum number of search results to return (default: 5, max: 10)",
)
class SearchWebTool(MoviePilotTool):
@@ -27,167 +40,200 @@ class SearchWebTool(MoviePilotTool):
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据搜索参数生成友好的提示消息"""
query = kwargs.get("query", "")
max_results = kwargs.get("max_results", 5)
return f"正在搜索网络内容: {query} (最多返回 {max_results} 条结果)"
max_results = kwargs.get("max_results", 20)
return f"搜索网络内容: {query} (最多返回 {max_results} 条结果)"
async def run(self, query: str, max_results: Optional[int] = 5, **kwargs) -> str:
async def run(self, query: str, max_results: Optional[int] = 20, **kwargs) -> str:
"""
执行网络搜索
Args:
query: 搜索查询字符串
max_results: 最大返回结果数默认5最大10
Returns:
格式化的搜索结果JSON字符串
"""
logger.info(f"执行工具: {self.name}, 参数: query={query}, max_results={max_results}")
logger.info(
f"执行工具: {self.name}, 参数: query={query}, max_results={max_results}"
)
try:
# 限制最大结果数
max_results = min(max(1, max_results or 5), 10)
# 使用DuckDuckGo API进行搜索
search_results = await self._search_duckduckgo_api(query, max_results)
if not search_results:
max_results = min(max(1, max_results or 20), 20)
results = []
# 1. 优先使用 Exa (如果配置了 API Key)
if settings.EXA_API_KEY:
logger.info("使用 Exa 进行搜索...")
results = await self._search_exa(query, max_results)
# 2. 如果没有结果或未配置 Exa使用 Tavily (如果配置了 API Key)
if not results and settings.TAVILY_API_KEY:
logger.info("使用 Tavily 进行搜索...")
results = await self._search_tavily(query, max_results)
# 3. 如果没有结果或未配置 Tavily使用 DuckDuckGo
if not results:
logger.info("使用 DuckDuckGo 进行搜索...")
results = await self._search_duckduckgo(query, max_results)
if not results:
return f"未找到与 '{query}' 相关的搜索结果"
# 裁剪结果以避免占用过多上下文
formatted_results = self._format_and_truncate_results(search_results, max_results)
result_json = json.dumps(formatted_results, ensure_ascii=False, indent=2)
return result_json
# 格式化并裁剪结果
formatted_results = self._format_and_truncate_results(results, max_results)
return json.dumps(formatted_results, ensure_ascii=False, indent=2)
except Exception as e:
error_message = f"搜索网络内容失败: {str(e)}"
logger.error(f"搜索网络内容失败: {e}", exc_info=True)
return error_message
@staticmethod
async def _search_duckduckgo_api(query: str, max_results: int) -> list:
"""
使用DuckDuckGo API进行搜索
Args:
query: 搜索查询
max_results: 最大结果数
Returns:
搜索结果列表
"""
async def _search_tavily(query: str, max_results: int) -> List[Dict]:
"""使用 Tavily API 进行搜索"""
try:
# DuckDuckGo Instant Answer API
api_url = "https://api.duckduckgo.com/"
params = {
"q": query,
"format": "json",
"no_html": "1",
"skip_disambig": "1"
}
# 使用代理(如果配置了)
http_utils = AsyncRequestUtils(
proxies=settings.PROXY,
timeout=10
)
data = await http_utils.get_json(api_url, params=params)
results = []
if data:
# 处理AbstractText摘要
if data.get("AbstractText"):
results.append({
"title": data.get("Heading", query),
"snippet": data.get("AbstractText", ""),
"url": data.get("AbstractURL", ""),
"source": "DuckDuckGo Abstract"
})
# 处理RelatedTopics相关主题
related_topics = data.get("RelatedTopics", [])
for topic in related_topics[:max_results - len(results)]:
if isinstance(topic, dict):
text = topic.get("Text", "")
first_url = topic.get("FirstURL", "")
if text and first_url:
# 提取标题(通常在" - "之前)
title = text.split(" - ")[0] if " - " in text else text[:100]
snippet = text
results.append({
"title": title.strip(),
"snippet": snippet,
"url": first_url,
"source": "DuckDuckGo Related"
})
# 处理Results搜索结果
api_results = data.get("Results", [])
for result in api_results[:max_results - len(results)]:
if isinstance(result, dict):
title = result.get("Text", "")
url = result.get("FirstURL", "")
if title and url:
results.append({
"title": title,
"snippet": result.get("Text", ""),
"url": url,
"source": "DuckDuckGo Results"
})
return results[:max_results]
async with httpx.AsyncClient(timeout=SEARCH_TIMEOUT) as client:
# 从设置中随机选择一个 API Key如果有多个
tavity_api_key = random.choice(settings.TAVILY_API_KEY)
response = await client.post(
"https://api.tavily.com/search",
json={
"api_key": tavity_api_key,
"query": query,
"search_depth": "basic",
"max_results": max_results,
"include_answer": False,
"include_images": False,
"include_raw_content": False,
},
)
response.raise_for_status()
data = response.json()
results = []
for result in data.get("results", []):
results.append(
{
"title": result.get("title", ""),
"snippet": result.get("content", ""),
"url": result.get("url", ""),
"source": "Tavily",
}
)
return results
except Exception as e:
logger.warning(f"DuckDuckGo API搜索失败: {e}")
logger.warning(f"Tavily 搜索失败: {e}")
return []
@staticmethod
def _format_and_truncate_results(results: list, max_results: int) -> dict:
"""
格式化并裁剪搜索结果以避免占用过多上下文
Args:
results: 原始搜索结果列表
max_results: 最大结果数
Returns:
格式化后的结果字典
"""
formatted = {
"total_results": len(results),
"results": []
}
# 限制结果数量
limited_results = results[:max_results]
for idx, result in enumerate(limited_results, 1):
title = result.get("title", "")[:200] # 限制标题长度
async def _search_exa(query: str, max_results: int) -> List[Dict]:
"""使用 Exa API 进行搜索"""
try:
async with httpx.AsyncClient(timeout=SEARCH_TIMEOUT) as client:
response = await client.post(
"https://api.exa.ai/search",
headers={
"x-api-key": settings.EXA_API_KEY,
"Content-Type": "application/json",
},
json={
"query": query,
"numResults": max_results,
"type": "auto",
"contents": {"highlights": {"maxCharacters": 2000}},
},
)
response.raise_for_status()
data = response.json()
results = []
for result in data.get("results", []):
highlights = result.get("highlights", [])
snippet = (
highlights[0] if highlights else result.get("text", "")[:500]
)
results.append(
{
"title": result.get("title", ""),
"snippet": snippet,
"url": result.get("url", ""),
"source": "Exa",
}
)
return results
except Exception as e:
logger.warning(f"Exa 搜索失败: {e}")
return []
@staticmethod
def _get_proxy_url(proxy_setting) -> Optional[str]:
"""从代理设置中提取代理URL"""
if not proxy_setting:
return None
if isinstance(proxy_setting, dict):
return proxy_setting.get("http") or proxy_setting.get("https")
return proxy_setting
async def _search_duckduckgo(self, query: str, max_results: int) -> List[Dict]:
"""使用 duckduckgo-search (DDGS) 进行搜索"""
try:
def sync_search():
results = []
ddgs_kwargs = {"timeout": SEARCH_TIMEOUT}
proxy_url = self._get_proxy_url(settings.PROXY)
if proxy_url:
ddgs_kwargs["proxy"] = proxy_url
try:
with DDGS(**ddgs_kwargs) as ddgs:
ddgs_gen = ddgs.text(query, max_results=max_results)
if ddgs_gen:
for result in ddgs_gen:
results.append(
{
"title": result.get("title", ""),
"snippet": result.get("body", ""),
"url": result.get("href", ""),
"source": "DuckDuckGo",
}
)
except Exception as err:
logger.warning(f"DuckDuckGo search process failed: {err}")
return results
loop = asyncio.get_running_loop()
return await loop.run_in_executor(None, sync_search)
except Exception as e:
logger.warning(f"DuckDuckGo 搜索失败: {e}")
return []
@staticmethod
def _format_and_truncate_results(results: List[Dict], max_results: int) -> Dict:
"""格式化并裁剪搜索结果"""
formatted = {"total_results": len(results), "results": []}
for idx, result in enumerate(results[:max_results], 1):
title = result.get("title", "")[:200]
snippet = result.get("snippet", "")
url = result.get("url", "")
source = result.get("source", "Unknown")
# 裁剪摘要,避免过长
max_snippet_length = 300 # 每个摘要最多300字符
# 裁剪摘要
max_snippet_length = 1000 # 增加到1000字符提供更多上下文
if len(snippet) > max_snippet_length:
snippet = snippet[:max_snippet_length] + "..."
# 清理文本,移除多余的空白字符
snippet = re.sub(r'\s+', ' ', snippet).strip()
formatted["results"].append({
"rank": idx,
"title": title,
"snippet": snippet,
"url": url,
"source": source
})
# 添加提示信息
# 清理文本
snippet = re.sub(r"\s+", " ", snippet).strip()
formatted["results"].append(
{
"rank": idx,
"title": title,
"snippet": snippet,
"url": url,
"source": source,
}
)
if len(results) > max_results:
formatted["note"] = f"注意:共找到 {len(results)} 条结果,为节省上下文空间,仅显示前 {max_results} 条结果。"
formatted["note"] = f"仅显示前 {max_results} 条结果。"
return formatted

Some files were not shown because too many files have changed in this diff Show More