Compare commits

...

141 Commits

Author SHA1 Message Date
jxxghp
636c4be9fb 更新 version.py 2026-02-07 08:13:43 +08:00
jxxghp
6bec765a9d Merge pull request #5474 from jxxghp/copilot/optimize-file-move-implementation 2026-02-06 22:20:11 +08:00
copilot-swe-agent[bot]
d61d16ccc4 Restore the optimization - accidentally reverted in previous commit
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-02-06 14:15:29 +00:00
copilot-swe-agent[bot]
f2a5715b24 Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com> 2026-02-06 14:11:15 +00:00
copilot-swe-agent[bot]
c064c3781f Optimize SystemUtils.move to avoid triggering directory monitoring
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-02-06 14:03:03 +00:00
copilot-swe-agent[bot]
bb4dffe2a4 Initial plan 2026-02-06 13:59:59 +00:00
jxxghp
37cf3eeef3 Merge pull request #5473 from cddjr/feat_transfer_files_filter 2026-02-06 21:04:52 +08:00
景大侠
40395b2999 feat: 在构造待整理文件列表时引入过滤逻辑以简化后续处理 2026-02-06 20:56:26 +08:00
景大侠
32afe6445f fix: 整理成功事件缺少历史记录ID 2026-02-06 20:33:13 +08:00
jxxghp
793a991913 Merge remote-tracking branch 'origin/v2' into v2 2026-02-05 14:16:55 +08:00
jxxghp
d278224ff1 fix:优化第三方插件存储类型的检测提示 2026-02-05 14:16:50 +08:00
jxxghp
9b4d0ce6a8 Merge pull request #5466 from DDSRem-Dev/dev 2026-02-05 06:56:25 +08:00
DDSRem
a1829fe590 feat: u115 global rate limiting strategy 2026-02-04 23:24:14 +08:00
jxxghp
2b2b39365c Merge pull request #5464 from ChanningHe/enhance/discord 2026-02-04 18:08:38 +08:00
ChanningHe
1147930f3f fix: [slack&discord&telegram] handle special characters in config names 2026-02-04 14:09:40 +09:00
ChanningHe
636f338ed7 enhance: [discord] add _user_chat_mapping to chat in channel 2026-02-04 13:42:33 +09:00
ChanningHe
72365d00b4 enhance: discord debug information 2026-02-04 12:54:17 +09:00
jxxghp
19d8086732 Merge pull request #5460 from cddjr/fix_download_hash_overridden 2026-02-03 21:23:04 +08:00
大虾
30488418e5 修复 整理时download_hash参数被覆盖
导致后续文件均识别成同一个媒体信息
2026-02-03 18:59:32 +08:00
jxxghp
2f0badd74a Merge pull request #5457 from cddjr/fix_5449 2026-02-02 23:45:07 +08:00
jxxghp
6045b0579b Merge pull request #5455 from cddjr/fix_transfer_result_incorrect 2026-02-02 23:44:32 +08:00
景大侠
498f1fec74 修复 整理视频可能导致误删字幕及音轨 2026-02-02 23:18:46 +08:00
景大侠
f6a541f2b9 修复 覆盖整理失败时误报成功 2026-02-02 21:50:35 +08:00
jxxghp
8ce78eabca 更新 version.py 2026-02-02 18:44:30 +08:00
jxxghp
2c34c5309f Merge pull request #5454 from CHANTXU64/v2 2026-02-02 18:02:45 +08:00
jxxghp
77e680168a Merge pull request #5452 from 0honus0/v2 2026-02-02 17:22:00 +08:00
jxxghp
8a7e59742f Merge pull request #5451 from cddjr/fix_specials_season 2026-02-02 17:21:29 +08:00
jxxghp
42bac14770 Merge pull request #5450 from CHANTXU64/v2 2026-02-02 17:20:40 +08:00
CHANTXU64
8323834483 feat: 优化RSS订阅和网页抓取中发布日期(PubDate)的获取兼容性
- app/helper/rss.py: 优化RSS解析,支持带命名空间的日期标签(如 pubDate/published/updated)。
- app/modules/indexer/spider/__init__.py: 优化网页抓取,增加日期格式校验并对非标准格式进行自动归一化。
2026-02-02 16:52:04 +08:00
景大侠
1751caef62 fix: 补充几处season的判空 2026-02-02 15:01:12 +08:00
0honus0
d622d1474d 根据意见增加尾部逗号 2026-02-02 07:00:57 +00:00
0honus0
f28be2e7de 增加登录按钮xpath支持nicept网站 2026-02-02 06:52:48 +00:00
jxxghp
17773913ae fix: 统一了数据库查询中 season 参数的非空判断逻辑,以正确处理 season=0 的情况。 2026-02-02 14:23:51 +08:00
jxxghp
d469c2d3f9 refactor: 统一将布尔判断 if var:if not var: 更改为显式的 if var is not None:if var is None: 以正确处理 None 值。 2026-02-02 13:49:32 +08:00
CHANTXU64
4e74d32882 Fix: TMDB 剧集详情页不显示第 0 季(特别篇) #5444 2026-02-02 10:28:22 +08:00
jxxghp
7b8cd37a9b feat(transfer): enhance job removal methods for thread safety and strict checks 2026-02-01 16:58:32 +08:00
jxxghp
eda306d726 Merge pull request #5448 from cddjr/feat_japanese_subtitles 2026-02-01 16:25:56 +08:00
景大侠
94f3b1fe84 feat: 支持整理日语字幕 2026-02-01 16:04:22 +08:00
jxxghp
c50e3ba293 Merge pull request #5445 from jxxghp/copilot/analyze-task-loss-reason 2026-02-01 08:42:17 +08:00
copilot-swe-agent[bot]
eff7818912 Improve documentation and fix validation bug in add_task
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-31 16:44:01 +00:00
copilot-swe-agent[bot]
270bcff8f3 Fix task loss issue in do_transfer multi-threading batch adding
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-31 16:38:55 +00:00
copilot-swe-agent[bot]
e04963c2dc Initial plan 2026-01-31 16:33:59 +00:00
jxxghp
f369967c91 更新 version.py 2026-01-29 22:32:03 +08:00
jxxghp
cd982c5526 Merge pull request #5439 from DDSRem-Dev/dev 2026-01-29 22:30:28 +08:00
jxxghp
16e03c9d37 Merge pull request #5438 from cddjr/fix_scrape_follow_tmdb 2026-01-29 22:29:06 +08:00
DDSRem
d38b1f5364 feat: u115 support oauth 2026-01-29 22:14:10 +08:00
景大侠
f57ba4d05e 修复 整理时可能误跟随TMDB变化的问题 2026-01-29 15:04:42 +08:00
jxxghp
172eeaafcf 更新 version.py 2026-01-27 18:07:55 +08:00
jxxghp
3115ed28b2 fix: 历史记录删除源文件后,不在订阅的文件列表中显示 2026-01-26 21:47:26 +08:00
jxxghp
d8dc53805c feat(transfer): 整理事件增加历史记录ID 2026-01-26 21:29:05 +08:00
jxxghp
7218d10e1b feat(transfer): 拆分字幕和音频整理事件 2026-01-26 19:33:50 +08:00
jxxghp
89bf85f501 Merge pull request #5425 from xiaoQQya/develop 2026-01-26 18:41:42 +08:00
jxxghp
8334a468d0 feat(category): Add API endpoints for retrieving and saving category configuration 2026-01-26 12:53:26 +08:00
jxxghp
3da80ed077 Merge pull request #5423 from jxxghp/copilot/update-category-helper-integration 2026-01-26 12:35:05 +08:00
copilot-swe-agent[bot]
2883ccbe87 Move category methods to ChainBase and use consistent naming
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-26 04:32:11 +00:00
copilot-swe-agent[bot]
5d3443fee4 Use ruamel.yaml consistently in CategoryHelper
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-26 04:10:15 +00:00
copilot-swe-agent[bot]
27756a53db Implement proper architecture: module->chain->API with single CategoryHelper
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-26 04:07:56 +00:00
copilot-swe-agent[bot]
71cde6661d Improve comments for clarity
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-25 10:08:13 +00:00
copilot-swe-agent[bot]
a857337b31 Fix architecture - restore helper layer and use ModuleManager for reload trigger
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-25 10:06:01 +00:00
copilot-swe-agent[bot]
4ee21ffae4 Address code review feedback - use ruamel.yaml consistently and fix typo
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-25 09:58:28 +00:00
copilot-swe-agent[bot]
d8399f7e85 Consolidate CategoryHelper classes and add reload trigger
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-25 09:56:11 +00:00
copilot-swe-agent[bot]
574ac8d32f Initial plan 2026-01-25 09:52:31 +00:00
jxxghp
a2611bfa7d feat: Add search_imdbid to subscriptions and improve error message propagation and handling for existing subscriptions. 2026-01-25 14:57:46 +08:00
xiaoQQya
853badb76f fix: 更新站点 Rousi Pro 获取未读消息接口 2026-01-25 14:36:22 +08:00
jxxghp
5d69e1d2a5 Merge pull request #5419 from wikrin/subscribe-source-query-enhancement 2026-01-25 14:04:42 +08:00
jxxghp
6494f28bdb Fix: Remove isolated ToolMessage instances after message trimming to prevent OpenAI errors. 2026-01-25 13:42:29 +08:00
Attente
f55916bda2 feat(transfer): 支持按条件查询订阅获取自定义识别词用于文件转移 2026-01-25 11:34:03 +08:00
jxxghp
04691ee197 Merge remote-tracking branch 'origin/v2' into v2 2026-01-25 09:39:59 +08:00
jxxghp
2ac0e564e1 feat(category):新增二级分类维护API 2026-01-25 09:39:48 +08:00
jxxghp
6072a29a20 Merge pull request #5418 from wikrin/CNSUB-filter-rules-update 2026-01-25 08:17:20 +08:00
Attente
8658942385 feat(filter): 添加配置监听和改进中字过滤规则 2026-01-25 01:06:50 +08:00
jxxghp
cc4859950c Merge remote-tracking branch 'origin/v2' into v2 2026-01-24 19:24:22 +08:00
jxxghp
23b81ad6f1 feat(config):完善默认插件库 2026-01-24 19:24:15 +08:00
jxxghp
e3b9dca5c0 Merge pull request #5417 from cddjr/fix_u115_create_folder
fix(u115): 创建目录误报失败
2026-01-24 19:14:40 +08:00
景大侠
a2359a1ad2 fix(u115): 创建目录误报失败
- 解析响应时忽略20004错误码
- 根目录创建目录会报错ValueError
2026-01-24 17:48:53 +08:00
jxxghp
cb875b1b34 更新 version.py 2026-01-24 12:04:54 +08:00
jxxghp
b92a85b4bc Merge pull request #5415 from cddjr/fix_bluray_scrape 2026-01-24 11:43:44 +08:00
景大侠
8c7dd6bab2 修复 原盘目录不刮削 2026-01-24 11:42:00 +08:00
景大侠
aad7df64d7 简化原盘大小计算代码 2026-01-24 11:29:30 +08:00
jxxghp
8474342007 feat(agent):上下文超长时自动摘要 2026-01-24 11:24:59 +08:00
jxxghp
61ccb4be65 feat(agent): 新增命令行工具 2026-01-24 11:10:15 +08:00
jxxghp
1c6f69707c fix 增加模块异常traceback打印 2026-01-24 11:00:24 +08:00
jxxghp
e08e8c482a Merge pull request #5414 from jxxghp/copilot/fix-file-organization-error 2026-01-24 10:49:19 +08:00
copilot-swe-agent[bot]
548c1d2cab Add null check for schema access in IndexerModule
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-24 02:26:55 +00:00
copilot-swe-agent[bot]
5a071bf3d1 Add null check for schema.value access in FileManagerModule
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-24 02:25:55 +00:00
copilot-swe-agent[bot]
1bffcbd947 Initial plan 2026-01-24 02:22:25 +00:00
jxxghp
274a36a83a 更新 config.py 2026-01-24 10:04:37 +08:00
jxxghp
ec40f36114 fix(agent):修复智能体工具调用,优化媒体库查询工具 2026-01-24 09:46:19 +08:00
jxxghp
af19f274a7 Merge pull request #5413 from jxxghp/copilot/fix-runnable-lambda-error 2026-01-24 08:38:24 +08:00
copilot-swe-agent[bot]
2316004194 Fix 'RunnableLambda' object is not callable error by wrapping validated_trimmer
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-24 00:35:59 +00:00
copilot-swe-agent[bot]
98762198ef Initial plan 2026-01-24 00:33:35 +00:00
jxxghp
1469de22a4 Merge pull request #5412 from jxxghp/copilot/translate-comments-to-chinese 2026-01-24 08:27:11 +08:00
copilot-swe-agent[bot]
1e687f960a Translate English comments to Chinese in agent/__init__.py
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-24 00:25:21 +00:00
copilot-swe-agent[bot]
7f01b835fd Initial plan 2026-01-24 00:22:19 +00:00
jxxghp
e46b6c5c01 Merge pull request #5411 from jxxghp/copilot/fix-tool-call-exception-handling 2026-01-24 08:20:51 +08:00
copilot-swe-agent[bot]
74226ad8df Improve error message to include exception type for better debugging
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-24 00:18:43 +00:00
copilot-swe-agent[bot]
f8ae7be539 Fix: Ensure tool exceptions are stored in memory to maintain message chain integrity
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-24 00:18:06 +00:00
copilot-swe-agent[bot]
37b16e380d Initial plan 2026-01-24 00:14:13 +00:00
jxxghp
9ea3e9f652 Merge pull request #5409 from jxxghp/copilot/fix-agent-execution-error 2026-01-24 08:12:39 +08:00
copilot-swe-agent[bot]
54422b5181 Final refinements: fix falsy value handling and add warning for extra ToolMessages
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-24 00:10:00 +00:00
copilot-swe-agent[bot]
712995dcf3 Address code review feedback: fix ToolCall handling and add orphaned message filtering
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-24 00:08:25 +00:00
jxxghp
c2767b0fd6 Merge pull request #5410 from jxxghp/copilot/fix-media-exists-error 2026-01-24 08:08:03 +08:00
copilot-swe-agent[bot]
179cc61f65 Fix tool call integrity validation to skip orphaned ToolMessages
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-24 00:05:21 +00:00
copilot-swe-agent[bot]
f3b910d55a Fix AttributeError when mediainfo.type is None
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-24 00:04:02 +00:00
copilot-swe-agent[bot]
f4157b52ea Fix agent tool_calls integrity validation
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-24 00:02:47 +00:00
copilot-swe-agent[bot]
79710310ce Initial plan 2026-01-24 00:00:31 +00:00
copilot-swe-agent[bot]
3412498438 Initial plan 2026-01-23 23:57:27 +00:00
jxxghp
b896b07a08 fix search_web tool 2026-01-24 07:39:07 +08:00
jxxghp
379bff0622 Merge pull request #5407 from cddjr/fix_db 2026-01-24 06:45:54 +08:00
jxxghp
474f47aa9f Merge pull request #5406 from cddjr/fix_transfer 2026-01-24 06:45:10 +08:00
jxxghp
f1e26a4133 Merge pull request #5405 from cddjr/fix_modify_time_comparison 2026-01-24 06:44:05 +08:00
jxxghp
e37f881207 Merge pull request #5404 from jxxghp/copilot/reimplement-network-search-tool 2026-01-24 06:39:56 +08:00
大虾
306c0b707b Update database/versions/41ef1dd7467c_2_2_2.py
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2026-01-24 02:53:14 +08:00
景大侠
08c448ee30 修复 迁移PG后可能卡启动的问题 2026-01-24 02:49:54 +08:00
景大侠
1532014067 修复 多下载器返回相同种子造成的重复整理 2026-01-24 01:41:48 +08:00
景大侠
fa9f604af9 修复 入库通知不显示集数
因过早清理作业导致
2026-01-24 01:17:23 +08:00
景大侠
3b3d0d6539 修复 文件列表接口中空值时间戳的比较逻辑 2026-01-23 23:52:43 +08:00
copilot-swe-agent[bot]
9641d33040 Fix generator handling and update error message to reference requirements.in
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-23 15:23:52 +00:00
copilot-swe-agent[bot]
eca339d107 Address code review comments: improve code organization and use modern asyncio
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-23 15:22:45 +00:00
copilot-swe-agent[bot]
ca18705d88 Reimplemented SearchWebTool using duckduckgo-search library
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-23 15:20:06 +00:00
copilot-swe-agent[bot]
8f17b52466 Initial plan 2026-01-23 15:16:09 +00:00
jxxghp
8cf84e722b fix agent error message 2026-01-23 22:50:59 +08:00
jxxghp
7c4d736b54 feat:Agent上下文裁剪 2026-01-23 22:47:18 +08:00
jxxghp
1b3ae6ab25 fix 下载器整理标签设置 2026-01-23 18:10:59 +08:00
jxxghp
a4ad08136e 更新 version.py 2026-01-23 14:33:41 +08:00
jxxghp
df5e7997c5 Merge pull request #5401 from jxxghp/copilot/check-jobview-logic 2026-01-23 07:21:46 +08:00
copilot-swe-agent[bot]
b2cb3768c1 Fix remove_job to use __get_id for consistent job removal
Co-authored-by: jxxghp <51039935+jxxghp@users.noreply.github.com>
2026-01-22 14:38:33 +00:00
copilot-swe-agent[bot]
fa169c5cd3 Initial plan 2026-01-22 14:34:18 +00:00
jxxghp
bbb3975b67 更新 transfer.py 2026-01-22 22:31:52 +08:00
jxxghp
4502a9c4fa fix:优化移动模式删除逻辑 2026-01-22 22:15:40 +08:00
jxxghp
86905a2670 Merge pull request #5399 from cddjr/fix_downloader_monitor 2026-01-22 21:41:25 +08:00
景大侠
b1e60a4867 修复 下载器监控 2026-01-22 21:34:50 +08:00
jxxghp
1efe3324fb fix:优化设置种子状态标签的时机 2026-01-22 08:24:23 +08:00
jxxghp
55c1e37d39 更新 query_subscribes.py 2026-01-22 08:05:41 +08:00
jxxghp
7fa700317c 更新 update_subscribe.py 2026-01-22 08:03:48 +08:00
jxxghp
bbe831a57c 优化 transfer.py 中任务处理逻辑,增强错误信息反馈 2026-01-21 23:55:20 +08:00
jxxghp
90c86c056c fix all_tasks 2026-01-21 23:30:39 +08:00
jxxghp
36f22a28df fix 完成状态计算 2026-01-21 23:23:37 +08:00
jxxghp
ac03c51e2c 更新 transfer.py 2026-01-21 23:06:29 +08:00
jxxghp
bd9e92f705 更新 transfer.py 2026-01-21 22:59:30 +08:00
jxxghp
281eff5eb2 更新 version.py 2026-01-21 22:54:31 +08:00
66 changed files with 1851 additions and 715 deletions

View File

@@ -1,12 +1,17 @@
import asyncio
from typing import Dict, List, Any
from typing import Dict, List, Any, Union
import json
import tiktoken
from langchain.agents import AgentExecutor, create_openai_tools_agent
from langchain.agents import AgentExecutor
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_community.callbacks import get_openai_callback
from langchain_core.chat_history import InMemoryChatMessageHistory
from langchain_core.messages import HumanMessage, AIMessage, ToolCall, ToolMessage, SystemMessage
from langchain_core.messages import HumanMessage, AIMessage, ToolCall, ToolMessage, SystemMessage, trim_messages
from langchain_core.runnables import RunnablePassthrough, RunnableLambda
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain.agents.format_scratchpad.openai_tools import format_to_openai_tool_messages
from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser
from app.agent.callback import StreamingCallbackHandler
from app.agent.memory import conversation_manager
@@ -120,6 +125,7 @@ class MoviePilotAgent:
))
elif msg.get("role") == "system":
chat_history.add_message(SystemMessage(content=msg.get("content", "")))
return chat_history
@staticmethod
@@ -140,15 +146,140 @@ class MoviePilotAgent:
logger.error(f"初始化提示词失败: {e}")
raise e
@staticmethod
def _token_counter(messages: List[Union[HumanMessage, AIMessage, ToolMessage, SystemMessage]]) -> int:
"""
通用的Token计数器
"""
try:
# 尝试从模型获取编码集,如果失败则回退到 cl100k_base (大多数现代模型使用的编码)
try:
encoding = tiktoken.encoding_for_model(settings.LLM_MODEL)
except KeyError:
encoding = tiktoken.get_encoding("cl100k_base")
num_tokens = 0
for message in messages:
# 基础开销 (每个消息大约 3 个 token)
num_tokens += 3
# 1. 处理文本内容 (content)
if isinstance(message.content, str):
num_tokens += len(encoding.encode(message.content))
elif isinstance(message.content, list):
for part in message.content:
if isinstance(part, dict) and part.get("type") == "text":
num_tokens += len(encoding.encode(part.get("text", "")))
# 2. 处理工具调用 (仅 AIMessage 包含 tool_calls)
if getattr(message, "tool_calls", None):
for tool_call in message.tool_calls:
# 函数名
num_tokens += len(encoding.encode(tool_call.get("name", "")))
# 参数 (转为 JSON 估算)
args_str = json.dumps(tool_call.get("args", {}), ensure_ascii=False)
num_tokens += len(encoding.encode(args_str))
# 额外的结构开销 (ID 等)
num_tokens += 3
# 3. 处理角色权重
num_tokens += 1
# 加上回复的起始 Token (大约 3 个 token)
num_tokens += 3
return num_tokens
except Exception as e:
logger.error(f"Token计数失败: {e}")
# 发生错误时返回一个保守的估算值
return len(str(messages)) // 4
def _create_agent_executor(self) -> RunnableWithMessageHistory:
"""
创建Agent执行器
"""
try:
agent = create_openai_tools_agent(
llm=self.llm,
tools=self.tools,
prompt=self.prompt
# 消息裁剪器,防止上下文超出限制
base_trimmer = trim_messages(
max_tokens=settings.LLM_MAX_CONTEXT_TOKENS * 1000 * 0.8,
strategy="last",
token_counter=self._token_counter,
include_system=True,
allow_partial=False,
start_on="human",
)
# 包装trimmer在裁剪后验证工具调用的完整性
def validated_trimmer(messages):
# 如果输入是 PromptValue转换为消息列表
if hasattr(messages, "to_messages"):
messages = messages.to_messages()
trimmed = base_trimmer.invoke(messages)
# 二次校验:确保不出现 broken tool chains
# 1. AIMessage with tool_calls 必须紧跟着对应的 ToolMessage
# 2. ToolMessage 必须有对应的 AIMessage 前置
safe_messages = []
i = 0
while i < len(trimmed):
msg = trimmed[i]
if isinstance(msg, AIMessage) and getattr(msg, "tool_calls", None):
# 检查工具调用序列是否完整
tool_calls = msg.tool_calls
is_valid_sequence = True
tool_results = []
# 向后查找对应的 ToolMessage
temp_i = i + 1
for tool_call in tool_calls:
if temp_i >= len(trimmed):
is_valid_sequence = False
break
next_msg = trimmed[temp_i]
if isinstance(next_msg, ToolMessage) and next_msg.tool_call_id == tool_call.get("id"):
tool_results.append(next_msg)
temp_i += 1
else:
is_valid_sequence = False
break
if is_valid_sequence:
# 序列完整,保留消息
safe_messages.append(msg)
safe_messages.extend(tool_results)
i = temp_i # 跳过已处理的工具结果
else:
# 序列不完整,丢弃该 AIMessage后续的孤立 ToolMessage 会在下一次循环被当做 orphaned 处理掉)
logger.warning(f"移除无效的工具调用链: {len(tool_calls)} calls, incomplete results")
i += 1
continue
if isinstance(msg, ToolMessage):
# 如果在这里遇到 ToolMessage说明它没有被上面的逻辑消费则是孤立的或者顺序错乱
logger.warning("移除孤立的 ToolMessage")
i += 1
continue
# 其他类型的消息直接保留
safe_messages.append(msg)
i += 1
if len(safe_messages) < len(messages):
logger.info(f"LangChain消息上下文已裁剪: {len(messages)} -> {len(safe_messages)}")
return safe_messages
# 创建Agent执行链
agent = (
RunnablePassthrough.assign(
agent_scratchpad=lambda x: format_to_openai_tool_messages(
x["intermediate_steps"]
)
)
| self.prompt
| RunnableLambda(validated_trimmer)
| self.llm.bind_tools(self.tools)
| OpenAIToolsAgentOutputParser()
)
executor = AgentExecutor(
agent=agent,
@@ -169,11 +300,81 @@ class MoviePilotAgent:
logger.error(f"创建Agent执行器失败: {e}")
raise e
async def _summarize_history(self):
"""
总结提炼之前的对话和工具执行情况,并把会话总结变成新的系统提示词取代之前的对话
"""
try:
# 获取当前历史记录
chat_history = self.get_session_history(self.session_id)
messages = chat_history.messages
if not messages:
return
logger.info(f"会话 {self.session_id} 历史消息长度已超过 90%,开始总结并重置上下文...")
# 将消息转换为摘要所需的文本格式
history_text = ""
for msg in messages:
if isinstance(msg, HumanMessage):
history_text += f"用户: {msg.content}\n"
elif isinstance(msg, AIMessage):
history_text += f"智能体: {msg.content}\n"
if getattr(msg, "tool_calls", None):
for tool_call in msg.tool_calls:
history_text += f"智能体调用工具: {tool_call.get('name')},参数: {tool_call.get('args')}\n"
elif isinstance(msg, ToolMessage):
history_text += f"工具响应: {msg.content}\n"
elif isinstance(msg, SystemMessage):
history_text += f"系统: {msg.content}\n"
# 摘要提示词
summary_prompt = (
"Please provide a comprehensive and highly informational summary of the preceding conversation and tool executions. "
"Your goal is to condense the history while retaining all critical details for future reference. "
"Ensure you include:\n"
"1. User's core intents, specific requests, and any mentioned preferences.\n"
"2. Names of movies, TV shows, or other key entities discussed.\n"
"3. A concise log of tool calls made and their specific results/outcomes.\n"
"4. The current status of any tasks and any pending actions.\n"
"5. Any important context that would be necessary for the agent to continue the conversation seamlessly.\n"
"The summary should be dense with information and serve as the primary context for the next stage of the interaction."
)
# 调用 LLM 进行总结 (非流式)
summary_llm = LLMHelper.get_llm(streaming=False)
response = await summary_llm.ainvoke([
SystemMessage(content=summary_prompt),
HumanMessage(content=f"Here is the conversation history to summarize:\n{history_text}")
])
summary_content = str(response.content)
if not summary_content:
logger.warning("总结生成失败,跳过重置逻辑。")
return
# 清空原有的会话记录并插入新的系统总结
await conversation_manager.clear_memory(self.session_id, self.user_id)
await conversation_manager.add_conversation(
session_id=self.session_id,
user_id=self.user_id,
role="system",
content=f"<history_summary>\n{summary_content}\n</history_summary>"
)
logger.info(f"会话 {self.session_id} 历史摘要替换完成。")
except Exception as e:
logger.error(f"执行会话总结出错: {str(e)}")
async def process_message(self, message: str) -> str:
"""
处理用户消息
"""
try:
# 检查上下文长度是否超过 90%
history = self.get_session_history(self.session_id)
if self._token_counter(history.messages) > settings.LLM_MAX_CONTEXT_TOKENS * 1000 * 0.9:
await self._summarize_history()
# 添加用户消息到记忆
await conversation_manager.add_conversation(
self.session_id,
@@ -190,7 +391,8 @@ class MoviePilotAgent:
# 执行Agent
logger.info(f"Agent执行推理: session_id={self.session_id}, input={message}")
await self._execute_agent(input_context)
result = await self._execute_agent(input_context)
# 获取Agent回复
agent_message = await self.callback_handler.get_message()
@@ -208,7 +410,7 @@ class MoviePilotAgent:
content=agent_message
)
else:
agent_message = "很抱歉,智能体出错了,未能生成回复内容。"
agent_message = result.get("output") or "很抱歉,智能体出错了,未能生成回复内容。"
await self.send_agent_message(agent_message)
return agent_message
@@ -250,7 +452,7 @@ class MoviePilotAgent:
except Exception as e:
logger.error(f"Agent执行失败: {e}")
return {
"output": f"执行过程中发生错误: {str(e)}",
"output": str(e),
"intermediate_steps": [],
"token_usage": {}
}

View File

@@ -232,7 +232,7 @@ class ConversationMemoryManager:
return []
# 获取所有消息
return memory.messages
return memory.messages[:-1]
async def get_recent_messages(
self,

View File

@@ -1,4 +1,5 @@
import json
import uuid
from abc import ABCMeta, abstractmethod
from typing import Any, Optional
@@ -42,6 +43,9 @@ class MoviePilotTool(BaseTool, metaclass=ABCMeta):
# 获取工具调用前的agent消息
agent_message = await self._callback_handler.get_message()
# 生成唯一的工具调用ID
call_id = f"call_{str(uuid.uuid4())[:16]}"
# 记忆工具调用
await conversation_manager.add_conversation(
session_id=self._session_id,
@@ -49,8 +53,8 @@ class MoviePilotTool(BaseTool, metaclass=ABCMeta):
role="tool_call",
content=agent_message,
metadata={
"call_id": self.__class__.__name__,
"tool_name": self.__class__.__name__,
"call_id": call_id,
"tool_name": self.name,
"parameters": kwargs
}
)
@@ -61,22 +65,30 @@ class MoviePilotTool(BaseTool, metaclass=ABCMeta):
explanation = kwargs.get("explanation")
if explanation:
tool_message = explanation
# 合并agent消息和工具执行消息一起发送
messages = []
if agent_message:
messages.append(agent_message)
if tool_message:
messages.append(f"⚙️ => {tool_message}")
# 发送合并后的消息
if messages:
merged_message = "\n\n".join(messages)
await self.send_tool_message(merged_message, title="MoviePilot助手")
logger.debug(f'Executing tool {self.name} with args: {kwargs}')
result = await self.run(**kwargs)
logger.debug(f'Tool {self.name} executed with result: {result}')
# 执行工具,捕获异常确保结果总是被存储到记忆中
try:
result = await self.run(**kwargs)
logger.debug(f'Tool {self.name} executed with result: {result}')
except Exception as e:
# 记录异常详情
error_message = f"工具执行异常 ({type(e).__name__}): {str(e)}"
logger.error(f'Tool {self.name} execution failed: {e}', exc_info=True)
result = error_message
# 记忆工具调用结果
if isinstance(result, str):
@@ -85,13 +97,15 @@ class MoviePilotTool(BaseTool, metaclass=ABCMeta):
formated_result = str(result)
else:
formated_result = json.dumps(result, ensure_ascii=False, indent=2)
await conversation_manager.add_conversation(
session_id=self._session_id,
user_id=self._user_id,
role="tool_result",
content=formated_result,
metadata={
"call_id": self.__class__.__name__
"call_id": call_id,
"tool_name": self.name,
}
)

View File

@@ -39,6 +39,7 @@ from app.agent.tools.impl.query_directory_settings import QueryDirectorySettings
from app.agent.tools.impl.list_directory import ListDirectoryTool
from app.agent.tools.impl.query_transfer_history import QueryTransferHistoryTool
from app.agent.tools.impl.transfer_file import TransferFileTool
from app.agent.tools.impl.execute_command import ExecuteCommandTool
from app.core.plugin import PluginManager
from app.log import logger
from .base import MoviePilotTool
@@ -96,7 +97,8 @@ class MoviePilotToolFactory:
QuerySchedulersTool,
RunSchedulerTool,
QueryWorkflowsTool,
RunWorkflowTool
RunWorkflowTool,
ExecuteCommandTool
]
# 创建内置工具
for ToolClass in tool_definitions:

View File

@@ -108,6 +108,9 @@ class AddSubscribeTool(MoviePilotTool):
**subscribe_kwargs
)
if sid:
if message and "已存在" in message:
return f"订阅已存在:{title} ({year})。如需修改参数请先删除旧订阅。"
result_msg = f"成功添加订阅:{title} ({year})"
if subscribe_kwargs:
params = []

View File

@@ -0,0 +1,81 @@
"""执行Shell命令工具"""
import asyncio
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.log import logger
class ExecuteCommandInput(BaseModel):
"""执行Shell命令工具的输入参数模型"""
explanation: str = Field(..., description="Clear explanation of why this command is being executed")
command: str = Field(..., description="The shell command to execute")
timeout: Optional[int] = Field(60, description="Max execution time in seconds (default: 60)")
class ExecuteCommandTool(MoviePilotTool):
name: str = "execute_command"
description: str = "Safely execute shell commands on the server. Useful for system maintenance, checking status, or running custom scripts. Includes timeout and output limits."
args_schema: Type[BaseModel] = ExecuteCommandInput
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据命令生成友好的提示消息"""
command = kwargs.get("command", "")
return f"正在执行系统命令: {command}"
async def run(self, command: str, timeout: Optional[int] = 60, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: command={command}, timeout={timeout}")
# 简单安全过滤
forbidden_keywords = ["rm -rf /", ":(){ :|:& };:", "dd if=/dev/zero", "mkfs", "reboot", "shutdown"]
for keyword in forbidden_keywords:
if keyword in command:
return f"错误:命令包含禁止使用的关键字 '{keyword}'"
try:
# 执行命令
process = await asyncio.create_subprocess_shell(
command,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
try:
# 等待完成,带超时
stdout, stderr = await asyncio.wait_for(process.communicate(), timeout=timeout)
# 处理输出
stdout_str = stdout.decode('utf-8', errors='replace').strip()
stderr_str = stderr.decode('utf-8', errors='replace').strip()
exit_code = process.returncode
result = f"命令执行完成 (退出码: {exit_code})"
if stdout_str:
result += f"\n\n标准输出:\n{stdout_str}"
if stderr_str:
result += f"\n\n错误输出:\n{stderr_str}"
# 如果没有输出
if not stdout_str and not stderr_str:
result += "\n\n(无输出内容)"
# 限制输出长度,防止上下文过长
if len(result) > 3000:
result = result[:3000] + "\n\n...(输出内容过长,已截断)"
return result
except asyncio.TimeoutError:
# 超时处理
try:
process.kill()
except ProcessLookupError:
pass
return f"命令执行超时 (限制: {timeout}秒)"
except Exception as e:
logger.error(f"执行命令失败: {e}", exc_info=True)
return f"执行命令时发生错误: {str(e)}"

View File

@@ -8,6 +8,7 @@ from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.chain.mediaserver import MediaServerChain
from app.core.context import MediaInfo
from app.core.meta import MetaBase
from app.log import logger
from app.schemas.types import MediaType
@@ -51,47 +52,88 @@ class QueryLibraryExistsTool(MoviePilotTool):
try:
if not title:
return "请提供媒体标题进行查询"
# 创建 MediaInfo 对象
mediainfo = MediaInfo()
mediainfo.title = title
mediainfo.year = year
# 转换媒体类型
if media_type == "电影":
mediainfo.type = MediaType.MOVIE
elif media_type == "电视剧":
mediainfo.type = MediaType.TV
# media_type == "all" 时不设置类型,让媒体服务器自动判断
# 调用媒体服务器接口实时查询
media_chain = MediaServerChain()
# 1. 识别媒体信息(获取 TMDB ID 和各季的总集数等元数据)
meta = MetaBase(title=title)
if year:
meta.year = str(year)
if media_type == "电影":
meta.type = MediaType.MOVIE
elif media_type == "电视剧":
meta.type = MediaType.TV
# 使用识别方法补充信息
recognize_info = media_chain.recognize_media(meta=meta)
if recognize_info:
mediainfo = recognize_info
else:
# 识别失败,创建基本信息的 MediaInfo
mediainfo = MediaInfo()
mediainfo.title = title
mediainfo.year = year
if media_type == "电影":
mediainfo.type = MediaType.MOVIE
elif media_type == "电视剧":
mediainfo.type = MediaType.TV
# 2. 调用媒体服务器接口实时查询存在信息
existsinfo = media_chain.media_exists(mediainfo=mediainfo)
if not existsinfo:
return "媒体库中未找到相关媒体"
# 如果找到了,获取详细信息
# 3. 如果找到了,获取详细信息并组装结果
result_items = []
if existsinfo.itemid and existsinfo.server:
iteminfo = media_chain.iteminfo(server=existsinfo.server, item_id=existsinfo.itemid)
if iteminfo:
# 使用 model_dump() 转换为字典格式
item_dict = iteminfo.model_dump(exclude_none=True)
# 对于电视剧,补充已存在的季集详情及进度统计
if existsinfo.type == MediaType.TV:
# 注入已存在集信息 (Dict[int, list])
item_dict["seasoninfo"] = existsinfo.seasons
# 统计库中已存在的季集总数
if existsinfo.seasons:
item_dict["existing_episodes_count"] = sum(len(e) for e in existsinfo.seasons.values())
item_dict["seasons_existing_count"] = {str(s): len(e) for s, e in existsinfo.seasons.items()}
# 如果识别到了元数据,补充总计对比和进度概览
if mediainfo.seasons:
item_dict["seasons_total_count"] = {str(s): len(e) for s, e in mediainfo.seasons.items()}
# 进度概览,例如 "Season 1": "3/12"
item_dict["seasons_progress"] = {
f"{s}": f"{len(existsinfo.seasons.get(s, []))}/{len(mediainfo.seasons.get(s, []))}"
for s in mediainfo.seasons.keys() if (s in existsinfo.seasons or s > 0)
}
result_items.append(item_dict)
if result_items:
return json.dumps(result_items, ensure_ascii=False)
# 如果找到了但没有详细信息,返回基本信息
# 如果找到了但没有获取到 iteminfo,返回基本信息
result_dict = {
"title": mediainfo.title,
"year": mediainfo.year,
"type": existsinfo.type.value if existsinfo.type else None,
"server": existsinfo.server,
"server_type": existsinfo.server_type,
"itemid": existsinfo.itemid,
"seasons": existsinfo.seasons if existsinfo.seasons else {}
}
if existsinfo.type == MediaType.TV and existsinfo.seasons:
result_dict["existing_episodes_count"] = sum(len(e) for e in existsinfo.seasons.values())
result_dict["seasons_existing_count"] = {str(s): len(e) for s, e in existsinfo.seasons.items()}
if mediainfo.seasons:
result_dict["seasons_total_count"] = {str(s): len(e) for s, e in mediainfo.seasons.items()}
return json.dumps([result_dict], ensure_ascii=False)
except Exception as e:
logger.error(f"查询媒体库失败: {e}", exc_info=True)
return f"查询媒体库时发生错误: {str(e)}"

View File

@@ -14,7 +14,7 @@ class QuerySubscribesInput(BaseModel):
"""查询订阅工具的输入参数模型"""
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
status: Optional[str] = Field("all",
description="Filter subscriptions by status: 'R' for enabled subscriptions, 'P' for disabled ones, 'all' for all subscriptions")
description="Filter subscriptions by status: 'R' for enabled subscriptions, 'S' for paused ones, 'all' for all subscriptions")
media_type: Optional[str] = Field("all",
description="Filter by media type: '电影' for films, '电视剧' for television series, 'all' for all types")
@@ -33,7 +33,7 @@ class QuerySubscribesTool(MoviePilotTool):
# 根据状态过滤条件生成提示
if status != "all":
status_map = {"R": "已启用", "P": "禁用"}
status_map = {"R": "已启用", "S": "暂停"}
parts.append(f"状态: {status_map.get(status, status)}")
# 根据媒体类型过滤条件生成提示

View File

@@ -63,7 +63,7 @@ class SearchMediaTool(MoviePilotTool):
if media_type:
if result.type != MediaType(media_type):
continue
if season and result.season != season:
if season is not None and result.season != season:
continue
filtered_results.append(result)

View File

@@ -80,7 +80,7 @@ class SearchTorrentsTool(MoviePilotTool):
if media_type and torrent.media_info:
if torrent.media_info.type != MediaType(media_type):
continue
if season and torrent.meta_info and torrent.meta_info.begin_season != season:
if season is not None and torrent.meta_info and torrent.meta_info.begin_season != season:
continue
# 使用正则表达式过滤标题(分辨率、质量等关键字)
if regex_pattern and torrent.torrent_info and torrent.torrent_info.title:

View File

@@ -1,22 +1,26 @@
"""搜索网络内容工具"""
import asyncio
import json
import re
from typing import Optional, Type
from typing import Optional, Type, List, Dict
import httpx
from ddgs import DDGS
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.core.config import settings
from app.log import logger
from app.utils.http import AsyncRequestUtils
# 搜索超时时间(秒)
SEARCH_TIMEOUT = 20
class SearchWebInput(BaseModel):
"""搜索网络内容工具的输入参数模型"""
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
query: str = Field(..., description="The search query string to search for on the web")
max_results: Optional[int] = Field(5, description="Maximum number of search results to return (default: 5, max: 10)")
max_results: Optional[int] = Field(5,
description="Maximum number of search results to return (default: 5, max: 10)")
class SearchWebTool(MoviePilotTool):
@@ -33,151 +37,137 @@ class SearchWebTool(MoviePilotTool):
async def run(self, query: str, max_results: Optional[int] = 5, **kwargs) -> str:
"""
执行网络搜索
Args:
query: 搜索查询字符串
max_results: 最大返回结果数默认5最大10
Returns:
格式化的搜索结果JSON字符串
"""
logger.info(f"执行工具: {self.name}, 参数: query={query}, max_results={max_results}")
try:
# 限制最大结果数
max_results = min(max(1, max_results or 5), 10)
# 使用DuckDuckGo API进行搜索
search_results = await self._search_duckduckgo_api(query, max_results)
if not search_results:
results = []
# 1. 优先使用 Tavily (如果配置了 API Key)
if settings.TAVILY_API_KEY:
logger.info("使用 Tavily 进行搜索...")
results = await self._search_tavily(query, max_results)
# 2. 如果没有结果或未配置 Tavily使用 DuckDuckGo
if not results:
logger.info("使用 DuckDuckGo 进行搜索...")
results = await self._search_duckduckgo(query, max_results)
if not results:
return f"未找到与 '{query}' 相关的搜索结果"
# 裁剪结果以避免占用过多上下文
formatted_results = self._format_and_truncate_results(search_results, max_results)
result_json = json.dumps(formatted_results, ensure_ascii=False, indent=2)
return result_json
# 格式化并裁剪结果
formatted_results = self._format_and_truncate_results(results, max_results)
return json.dumps(formatted_results, ensure_ascii=False, indent=2)
except Exception as e:
error_message = f"搜索网络内容失败: {str(e)}"
logger.error(f"搜索网络内容失败: {e}", exc_info=True)
return error_message
@staticmethod
async def _search_duckduckgo_api(query: str, max_results: int) -> list:
"""
使用DuckDuckGo API进行搜索
Args:
query: 搜索查询
max_results: 最大结果数
Returns:
搜索结果列表
"""
async def _search_tavily(query: str, max_results: int) -> List[Dict]:
"""使用 Tavily API 进行搜索"""
try:
# DuckDuckGo Instant Answer API
api_url = "https://api.duckduckgo.com/"
params = {
"q": query,
"format": "json",
"no_html": "1",
"skip_disambig": "1"
}
# 使用代理(如果配置了)
http_utils = AsyncRequestUtils(
proxies=settings.PROXY,
timeout=10
)
data = await http_utils.get_json(api_url, params=params)
results = []
if data:
# 处理AbstractText摘要
if data.get("AbstractText"):
async with httpx.AsyncClient(timeout=SEARCH_TIMEOUT) as client:
response = await client.post(
"https://api.tavily.com/search",
json={
"api_key": settings.TAVILY_API_KEY,
"query": query,
"search_depth": "basic",
"max_results": max_results,
"include_answer": False,
"include_images": False,
"include_raw_content": False,
}
)
response.raise_for_status()
data = response.json()
results = []
for result in data.get("results", []):
results.append({
"title": data.get("Heading", query),
"snippet": data.get("AbstractText", ""),
"url": data.get("AbstractURL", ""),
"source": "DuckDuckGo Abstract"
'title': result.get('title', ''),
'snippet': result.get('content', ''),
'url': result.get('url', ''),
'source': 'Tavily'
})
# 处理RelatedTopics相关主题
related_topics = data.get("RelatedTopics", [])
for topic in related_topics[:max_results - len(results)]:
if isinstance(topic, dict):
text = topic.get("Text", "")
first_url = topic.get("FirstURL", "")
if text and first_url:
# 提取标题(通常在" - "之前)
title = text.split(" - ")[0] if " - " in text else text[:100]
snippet = text
results.append({
"title": title.strip(),
"snippet": snippet,
"url": first_url,
"source": "DuckDuckGo Related"
})
# 处理Results搜索结果
api_results = data.get("Results", [])
for result in api_results[:max_results - len(results)]:
if isinstance(result, dict):
title = result.get("Text", "")
url = result.get("FirstURL", "")
if title and url:
results.append({
"title": title,
"snippet": result.get("Text", ""),
"url": url,
"source": "DuckDuckGo Results"
})
return results[:max_results]
return results
except Exception as e:
logger.warning(f"DuckDuckGo API搜索失败: {e}")
logger.warning(f"Tavily 搜索失败: {e}")
return []
@staticmethod
def _format_and_truncate_results(results: list, max_results: int) -> dict:
"""
格式化并裁剪搜索结果以避免占用过多上下文
Args:
results: 原始搜索结果列表
max_results: 最大结果数
Returns:
格式化后的结果字典
"""
def _get_proxy_url(proxy_setting) -> Optional[str]:
"""从代理设置中提取代理URL"""
if not proxy_setting:
return None
if isinstance(proxy_setting, dict):
return proxy_setting.get('http') or proxy_setting.get('https')
return proxy_setting
async def _search_duckduckgo(self, query: str, max_results: int) -> List[Dict]:
"""使用 duckduckgo-search (DDGS) 进行搜索"""
try:
def sync_search():
results = []
ddgs_kwargs = {
'timeout': SEARCH_TIMEOUT
}
proxy_url = self._get_proxy_url(settings.PROXY)
if proxy_url:
ddgs_kwargs['proxy'] = proxy_url
try:
with DDGS(**ddgs_kwargs) as ddgs:
ddgs_gen = ddgs.text(
query,
max_results=max_results
)
if ddgs_gen:
for result in ddgs_gen:
results.append({
'title': result.get('title', ''),
'snippet': result.get('body', ''),
'url': result.get('href', ''),
'source': 'DuckDuckGo'
})
except Exception as err:
logger.warning(f"DuckDuckGo search process failed: {err}")
return results
loop = asyncio.get_running_loop()
return await loop.run_in_executor(None, sync_search)
except Exception as e:
logger.warning(f"DuckDuckGo 搜索失败: {e}")
return []
@staticmethod
def _format_and_truncate_results(results: List[Dict], max_results: int) -> Dict:
"""格式化并裁剪搜索结果"""
formatted = {
"total_results": len(results),
"results": []
}
# 限制结果数量
limited_results = results[:max_results]
for idx, result in enumerate(limited_results, 1):
title = result.get("title", "")[:200] # 限制标题长度
for idx, result in enumerate(results[:max_results], 1):
title = result.get("title", "")[:200]
snippet = result.get("snippet", "")
url = result.get("url", "")
source = result.get("source", "Unknown")
# 裁剪摘要,避免过长
max_snippet_length = 300 # 每个摘要最多300字符
# 裁剪摘要
max_snippet_length = 500 # 增加到500字符提供更多上下文
if len(snippet) > max_snippet_length:
snippet = snippet[:max_snippet_length] + "..."
# 清理文本,移除多余的空白字符
# 清理文本
snippet = re.sub(r'\s+', ' ', snippet).strip()
formatted["results"].append({
"rank": idx,
"title": title,
@@ -185,9 +175,8 @@ class SearchWebTool(MoviePilotTool):
"url": url,
"source": source
})
# 添加提示信息
if len(results) > max_results:
formatted["note"] = f"注意:共找到 {len(results)} 条结果,为节省上下文空间,仅显示前 {max_results} 条结果。"
formatted["note"] = f"仅显示前 {max_results} 条结果。"
return formatted

View File

@@ -29,7 +29,7 @@ class UpdateSubscribeInput(BaseModel):
include: Optional[str] = Field(None, description="Include filter as regular expression (optional)")
exclude: Optional[str] = Field(None, description="Exclude filter as regular expression (optional)")
filter: Optional[str] = Field(None, description="Filter rule as regular expression (optional)")
state: Optional[str] = Field(None, description="Subscription state: 'R' for enabled, 'P' for pending, 'S' for stoped (optional)")
state: Optional[str] = Field(None, description="Subscription state: 'R' for enabled, 'P' for pending, 'S' for paused (optional)")
sites: Optional[List[int]] = Field(None, description="List of site IDs to search from (optional)")
downloader: Optional[str] = Field(None, description="Downloader name (optional)")
save_path: Optional[str] = Field(None, description="Save path for downloaded files (optional)")

View File

@@ -4,6 +4,7 @@ import jieba
from fastapi import APIRouter, Depends
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import Session
from pathlib import Path
from app import schemas
from app.chain.storage import StorageChain
@@ -11,7 +12,7 @@ from app.core.event import eventmanager
from app.core.security import verify_token
from app.db import get_async_db, get_db
from app.db.models import User
from app.db.models.downloadhistory import DownloadHistory
from app.db.models.downloadhistory import DownloadHistory, DownloadFiles
from app.db.models.transferhistory import TransferHistory
from app.db.user_oper import get_current_active_superuser_async, get_current_active_superuser
from app.schemas.types import EventType
@@ -98,6 +99,8 @@ def delete_transfer_history(history_in: schemas.TransferHistory,
state = StorageChain().delete_media_file(src_fileitem)
if not state:
return schemas.Response(success=False, message=f"{src_fileitem.path} 删除失败")
# 删除下载记录中关联的文件
DownloadFiles.delete_by_fullpath(db, Path(src_fileitem.path).as_posix())
# 发送事件
eventmanager.send_event(
EventType.DownloadFileDeleted,

View File

@@ -11,7 +11,10 @@ from app.core.context import Context
from app.core.event import eventmanager
from app.core.metainfo import MetaInfo, MetaInfoPath
from app.core.security import verify_token, verify_apitoken
from app.db.models import User
from app.db.user_oper import get_current_active_user, get_current_active_superuser
from app.schemas import MediaType, MediaRecognizeConvertEventData
from app.schemas.category import CategoryConfig
from app.schemas.types import ChainEventType
router = APIRouter()
@@ -131,6 +134,26 @@ def scrape(fileitem: schemas.FileItem,
return schemas.Response(success=True, message=f"{fileitem.path} 刮削完成")
@router.get("/category/config", summary="获取分类策略配置", response_model=schemas.Response)
def get_category_config(_: User = Depends(get_current_active_user)):
"""
获取分类策略配置
"""
config = MediaChain().category_config()
return schemas.Response(success=True, data=config.model_dump())
@router.post("/category/config", summary="保存分类策略配置", response_model=schemas.Response)
def save_category_config(config: CategoryConfig, _: User = Depends(get_current_active_superuser)):
"""
保存分类策略配置
"""
if MediaChain().save_category_config(config):
return schemas.Response(success=True, message="保存成功")
else:
return schemas.Response(success=False, message="保存失败")
@router.get("/category", summary="查询自动分类配置", response_model=dict)
async def category(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
@@ -172,7 +195,7 @@ async def seasons(mediaid: Optional[str] = None,
tmdbid = int(mediaid[5:])
seasons_info = await TmdbChain().async_tmdb_seasons(tmdbid=tmdbid)
if seasons_info:
if season:
if season is not None:
return [sea for sea in seasons_info if sea.season_number == season]
return seasons_info
if title:
@@ -184,11 +207,11 @@ async def seasons(mediaid: Optional[str] = None,
if settings.RECOGNIZE_SOURCE == "themoviedb":
seasons_info = await TmdbChain().async_tmdb_seasons(tmdbid=mediainfo.tmdb_id)
if seasons_info:
if season:
if season is not None:
return [sea for sea in seasons_info if sea.season_number == season]
return seasons_info
else:
sea = season or 1
sea = season if season is not None else 1
return [schemas.MediaSeason(
season_number=sea,
poster_path=mediainfo.poster_path,

View File

@@ -54,7 +54,7 @@ async def exists_local(title: Optional[str] = None,
判断本地是否存在
"""
meta = MetaInfo(title)
if not season:
if season is None:
season = meta.begin_season
# 返回对象
ret_info = {}
@@ -83,7 +83,7 @@ def exists(media_in: schemas.MediaInfo,
existsinfo: schemas.ExistMediaInfo = MediaServerChain().media_exists(mediainfo=mediainfo)
if not existsinfo:
return {}
if media_in.season:
if media_in.season is not None:
return {
media_in.season: existsinfo.seasons.get(media_in.season) or []
}
@@ -101,7 +101,7 @@ def not_exists(media_in: schemas.MediaInfo,
mtype = MediaType(media_in.type) if media_in.type else None
if mtype:
meta.type = mtype
if media_in.season:
if media_in.season is not None:
meta.begin_season = media_in.season
meta.type = MediaType.TV
if media_in.year:

View File

@@ -1,4 +1,4 @@
from datetime import datetime
import math
from pathlib import Path
from typing import Any, List, Optional
@@ -31,6 +31,17 @@ def qrcode(name: str, _: schemas.TokenPayload = Depends(verify_token)) -> Any:
return schemas.Response(success=False, message=errmsg)
@router.get("/auth_url/{name}", summary="获取 OAuth2 授权 URL", response_model=schemas.Response)
def auth_url(name: str, _: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
获取 OAuth2 授权 URL
"""
auth_data, errmsg = StorageChain().generate_auth_url(name)
if auth_data:
return schemas.Response(success=True, data=auth_data)
return schemas.Response(success=False, message=errmsg)
@router.get("/check/{name}", summary="二维码登录确认", response_model=schemas.Response)
def check(name: str, ck: Optional[str] = None, t: Optional[str] = None,
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
@@ -83,7 +94,7 @@ def list_files(fileitem: schemas.FileItem,
if sort == "name":
file_list.sort(key=lambda x: StringUtils.natural_sort_key(x.name or ""))
else:
file_list.sort(key=lambda x: x.modify_time or datetime.min, reverse=True)
file_list.sort(key=lambda x: x.modify_time or -math.inf, reverse=True)
return file_list

View File

@@ -199,7 +199,7 @@ async def subscribe_mediaid(
# 使用名称检查订阅
if title_check and title:
meta = MetaInfo(title)
if season:
if season is not None:
meta.begin_season = season
result = await Subscribe.async_get_by_title(db, title=meta.name, season=meta.begin_season)

View File

@@ -26,6 +26,7 @@ from app.helper.service import ServiceConfigHelper
from app.log import logger
from app.schemas import TransferInfo, TransferTorrent, ExistMediaInfo, DownloadingTorrent, CommingMessage, Notification, \
WebhookEventInfo, TmdbEpisode, MediaPerson, FileItem, TransferDirectoryConf
from app.schemas.category import CategoryConfig
from app.schemas.types import TorrentStatus, MediaType, MediaImageType, EventType, MessageChannel
from app.utils.object import ObjectUtils
@@ -251,6 +252,7 @@ class ChainBase(metaclass=ABCMeta):
# 中止继续执行
break
except Exception as err:
logger.error(traceback.format_exc())
self.__handle_system_error(err, module_id, module_name, method, **kwargs)
return result
@@ -292,6 +294,7 @@ class ChainBase(metaclass=ABCMeta):
# 中止继续执行
break
except Exception as err:
logger.error(traceback.format_exc())
self.__handle_system_error(err, module_id, module_name, method, **kwargs)
return result
@@ -1060,6 +1063,18 @@ class ChainBase(metaclass=ABCMeta):
"""
return self.run_module("media_category")
def category_config(self) -> CategoryConfig:
"""
获取分类策略配置
"""
return self.run_module("load_category_config")
def save_category_config(self, config: CategoryConfig) -> bool:
"""
保存分类策略配置
"""
return self.run_module("save_category_config", config=config)
def register_commands(self, commands: Dict[str, dict]) -> None:
"""
注册菜单命令

View File

@@ -292,10 +292,6 @@ class DownloadChain(ChainBase):
# 登记下载记录
downloadhis = DownloadHistoryOper()
# 获取应用的识别词(如果有)
custom_words_str = None
if hasattr(_meta, 'apply_words') and _meta.apply_words:
custom_words_str = '\n'.join(_meta.apply_words)
downloadhis.add(
path=download_path.as_posix(),
type=_media.type.value,
@@ -319,7 +315,6 @@ class DownloadChain(ChainBase):
date=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
media_category=_media.category,
episode_group=_media.episode_group,
custom_words=custom_words_str,
note={"source": source}
)

View File

@@ -150,7 +150,7 @@ class MediaChain(ChainBase):
org_meta.year = year
org_meta.begin_season = season_number
org_meta.begin_episode = episode_number
if org_meta.begin_season or org_meta.begin_episode:
if org_meta.begin_season is not None or org_meta.begin_episode is not None:
org_meta.type = MediaType.TV
# 重新识别
return self.recognize_media(meta=org_meta)
@@ -958,10 +958,10 @@ class MediaChain(ChainBase):
year = None
if tmdbinfo.get('release_date'):
year = tmdbinfo['release_date'][:4]
elif tmdbinfo.get('seasons') and season:
elif tmdbinfo.get('seasons') and season is not None:
for seainfo in tmdbinfo['seasons']:
season_number = seainfo.get("season_number")
if not season_number:
if season_number is None:
continue
air_date = seainfo.get("air_date")
if air_date and season_number == season:

View File

@@ -40,7 +40,7 @@ class MessageChain(ChainBase):
# 用户会话信息 {userid: (session_id, last_time)}
_user_sessions: Dict[Union[str, int], tuple] = {}
# 会话超时时间(分钟)
_session_timeout_minutes: int = 15
_session_timeout_minutes: int = 30
@staticmethod
def __get_noexits_info(
@@ -842,8 +842,7 @@ class MessageChain(ChainBase):
return buttons
@staticmethod
def _get_or_create_session_id(userid: Union[str, int]) -> str:
def _get_or_create_session_id(self, userid: Union[str, int]) -> str:
"""
获取或创建会话ID
如果用户上次会话在15分钟内则复用相同的会话ID否则创建新的会话ID
@@ -851,34 +850,33 @@ class MessageChain(ChainBase):
current_time = datetime.now()
# 检查用户是否有已存在的会话
if userid in MessageChain._user_sessions:
session_id, last_time = MessageChain._user_sessions[userid]
if userid in self._user_sessions:
session_id, last_time = self._user_sessions[userid]
# 计算时间差
time_diff = current_time - last_time
# 如果时间差小于等于15分钟复用会话ID
if time_diff <= timedelta(minutes=MessageChain._session_timeout_minutes):
# 如果时间差小于等于xx分钟复用会话ID
if time_diff <= timedelta(minutes=self._session_timeout_minutes):
# 更新最后使用时间
MessageChain._user_sessions[userid] = (session_id, current_time)
self._user_sessions[userid] = (session_id, current_time)
logger.info(
f"复用会话ID: {session_id}, 用户: {userid}, 距离上次会话: {time_diff.total_seconds() / 60:.1f}分钟")
return session_id
# 创建新的会话ID
new_session_id = f"user_{userid}_{int(time.time())}"
MessageChain._user_sessions[userid] = (new_session_id, current_time)
self._user_sessions[userid] = (new_session_id, current_time)
logger.info(f"创建新会话ID: {new_session_id}, 用户: {userid}")
return new_session_id
@staticmethod
def clear_user_session(userid: Union[str, int]) -> bool:
def clear_user_session(self, userid: Union[str, int]) -> bool:
"""
清除指定用户的会话信息
返回是否成功清除
"""
if userid in MessageChain._user_sessions:
session_id, _ = MessageChain._user_sessions.pop(userid)
if userid in self._user_sessions:
session_id, _ = self._user_sessions.pop(userid)
logger.info(f"已清除用户 {userid} 的会话: {session_id}")
return True
return False
@@ -889,8 +887,8 @@ class MessageChain(ChainBase):
"""
# 获取并清除会话信息
session_id = None
if userid in MessageChain._user_sessions:
session_id, _ = MessageChain._user_sessions.pop(userid)
if userid in self._user_sessions:
session_id, _ = self._user_sessions.pop(userid)
logger.info(f"已清除用户 {userid} 的会话: {session_id}")
# 如果有会话ID同时清除智能体的会话记忆

View File

@@ -49,7 +49,7 @@ class SearchChain(ChainBase):
logger.error(f'{tmdbid} 媒体信息识别失败!')
return []
no_exists = None
if season:
if season is not None:
no_exists = {
tmdbid or doubanid: {
season: NotExistMediaInfo(episodes=[])
@@ -129,7 +129,7 @@ class SearchChain(ChainBase):
logger.error(f'{tmdbid} 媒体信息识别失败!')
return []
no_exists = None
if season:
if season is not None:
no_exists = {
tmdbid or doubanid: {
season: NotExistMediaInfo(episodes=[])
@@ -181,7 +181,7 @@ class SearchChain(ChainBase):
# 过滤剧集
season_episodes = {sea: info.episodes
for sea, info in no_exists[mediakey].items()}
elif mediainfo.season:
elif mediainfo.season is not None:
# 豆瓣只搜索当前季
season_episodes = {mediainfo.season: []}
else:

View File

@@ -31,6 +31,12 @@ class StorageChain(ChainBase):
"""
return self.run_module("generate_qrcode", storage=storage)
def generate_auth_url(self, storage: str) -> Optional[Tuple[dict, str]]:
"""
生成 OAuth2 授权 URL
"""
return self.run_module("generate_auth_url", storage=storage)
def check_login(self, storage: str, **kwargs) -> Optional[Tuple[dict, str]]:
"""
登录确认
@@ -150,7 +156,7 @@ class StorageChain(ChainBase):
"""
判断是否包含蓝光必备的文件夹
"""
required_files = ("BDMV", "CERTIFICATE")
required_files = {"BDMV", "CERTIFICATE"}
return any(
item.type == "dir" and item.name in required_files
for item in fileitems or []

View File

@@ -144,7 +144,7 @@ class SubscribeChain(ChainBase):
metainfo.year = year
if mtype:
metainfo.type = mtype
if season:
if season is not None:
metainfo.type = MediaType.TV
metainfo.begin_season = season
# 识别媒体信息
@@ -174,7 +174,7 @@ class SubscribeChain(ChainBase):
# 豆瓣标题处理
meta = MetaInfo(mediainfo.title)
mediainfo.title = meta.name
if not season:
if season is None:
season = meta.begin_season
# 使用名称识别兜底
@@ -188,7 +188,7 @@ class SubscribeChain(ChainBase):
# 总集数
if mediainfo.type == MediaType.TV:
if not season:
if season is None:
season = 1
# 总集数
if not kwargs.get('total_episode'):
@@ -292,7 +292,7 @@ class SubscribeChain(ChainBase):
"description": mediainfo.overview
})
# 返回结果
return sid, ""
return sid, err_msg
async def async_add(self, title: str, year: str,
mtype: MediaType = None,
@@ -321,7 +321,7 @@ class SubscribeChain(ChainBase):
metainfo.year = year
if mtype:
metainfo.type = mtype
if season:
if season is not None:
metainfo.type = MediaType.TV
metainfo.begin_season = season
# 识别媒体信息
@@ -351,7 +351,7 @@ class SubscribeChain(ChainBase):
# 豆瓣标题处理
meta = MetaInfo(mediainfo.title)
mediainfo.title = meta.name
if not season:
if season is None:
season = meta.begin_season
# 使用名称识别兜底
@@ -365,7 +365,7 @@ class SubscribeChain(ChainBase):
# 总集数
if mediainfo.type == MediaType.TV:
if not season:
if season is None:
season = 1
# 总集数
if not kwargs.get('total_episode'):
@@ -469,7 +469,7 @@ class SubscribeChain(ChainBase):
"description": mediainfo.overview
})
# 返回结果
return sid, ""
return sid, err_msg
@staticmethod
def exists(mediainfo: MediaInfo, meta: MetaBase = None):
@@ -530,7 +530,7 @@ class SubscribeChain(ChainBase):
# 生成元数据
meta = MetaInfo(subscribe.name)
meta.year = subscribe.year
meta.begin_season = subscribe.season or None
meta.begin_season = subscribe.season if subscribe.season is not None else None
try:
meta.type = MediaType(subscribe.type)
except ValueError:
@@ -1119,6 +1119,19 @@ class SubscribeChain(ChainBase):
})
logger.info(f'{subscribe.name} 订阅元数据更新完成')
def get_subscribe_by_source(self, source: str) -> Optional[Subscribe]:
"""
从来源获取订阅
"""
source_keyword = self.parse_subscribe_source_keyword(source)
if not source_keyword:
return None
# 只保留需要的字段动态获取订阅
valid_fields = {k: v for k, v in source_keyword.items()
if k in ["type", "season", "tmdbid", "doubanid", "bangumiid"]}
# 暂时不考虑订阅历史, 若有必要再添加
return SubscribeOper().get_by(**valid_fields)
@staticmethod
def follow():
"""
@@ -1655,7 +1668,7 @@ class SubscribeChain(ChainBase):
if download_his:
for his in download_his:
# 查询下载文件
files = downloadhis.get_files_by_hash(his.download_hash)
files = downloadhis.get_files_by_hash(his.download_hash, state=1)
if files:
for file in files:
# 识别文件名
@@ -1828,8 +1841,9 @@ class SubscribeChain(ChainBase):
def get_subscribe_source_keyword(subscribe: Subscribe) -> str:
"""
构造用于订阅来源的关键字字符串
:param subscribe: Subscribe 对象
:return: 格式化的订阅来源关键字字符串,格式为 "Subscribe|{...}"
:return str: 格式化的订阅来源关键字字符串,格式为 "Subscribe|{...}"
"""
source_keyword = {
'id': subscribe.id,
@@ -1844,3 +1858,24 @@ class SubscribeChain(ChainBase):
'bangumiid': subscribe.bangumiid
}
return f"Subscribe|{json.dumps(source_keyword, ensure_ascii=False)}"
@staticmethod
def parse_subscribe_source_keyword(source_keyword_str: str) -> Optional[dict]:
"""
解析订阅来源关键字字符串
:param source_keyword_str: 订阅来源关键字字符串,格式为 "Subscribe|{...}"
:return Dict: 如果解析失败则返回None
"""
if not source_keyword_str or not source_keyword_str.startswith("Subscribe|"):
return None
try:
# 分割字符串获取JSON部分
json_part = source_keyword_str.split("|", 1)[1]
# 解析JSON字符串
source_keyword = json.loads(json_part)
return source_keyword
except (IndexError, json.JSONDecodeError, TypeError) as e:
logger.error(f"解析订阅来源关键字失败: {e}")
return None

File diff suppressed because it is too large Load Diff

View File

@@ -209,6 +209,8 @@ class ConfigModel(BaseModel):
# ==================== 云盘配置 ====================
# 115 AppId
U115_APP_ID: str = "100196807"
# 115 OAuth2 Server 地址
U115_AUTH_SERVER: str = "https://movie-pilot.org"
# Alipan AppId
ALIPAN_APP_ID: str = "ac1bf04dc9fd4d9aaabb65b4a668d403"
@@ -337,7 +339,7 @@ class ConfigModel(BaseModel):
"https://github.com/thsrite/MoviePilot-Plugins,"
"https://github.com/honue/MoviePilot-Plugins,"
"https://github.com/InfinityPacer/MoviePilot-Plugins,"
"https://github.com/DDS-Derek/MoviePilot-Plugins,"
"https://github.com/DDSRem-Dev/MoviePilot-Plugins,"
"https://github.com/madrays/MoviePilot-Plugins,"
"https://github.com/justzerock/MoviePilot-Plugins,"
"https://github.com/KoWming/MoviePilot-Plugins,"
@@ -347,7 +349,12 @@ class ConfigModel(BaseModel):
"https://github.com/Aqr-K/MoviePilot-Plugins,"
"https://github.com/hotlcc/MoviePilot-Plugins-Third,"
"https://github.com/gxterry/MoviePilot-Plugins,"
"https://github.com/DzAvril/MoviePilot-Plugins")
"https://github.com/DzAvril/MoviePilot-Plugins,"
"https://github.com/mrtian2016/MoviePilot-Plugins,"
"https://github.com/Hqyel/MoviePilot-Plugins-Third,"
"https://github.com/xijin285/MoviePilot-Plugins,"
"https://github.com/Seed680/MoviePilot-Plugins,"
"https://github.com/imaliang/MoviePilot-Plugins")
# 插件安装数据共享
PLUGIN_STATISTIC_SHARE: bool = True
# 是否开启插件热加载
@@ -427,10 +434,12 @@ class ConfigModel(BaseModel):
LLM_API_KEY: Optional[str] = None
# LLM基础URL用于自定义API端点
LLM_BASE_URL: Optional[str] = "https://api.deepseek.com"
# LLM最大上下文Token数量K
LLM_MAX_CONTEXT_TOKENS: int = 64
# LLM温度参数
LLM_TEMPERATURE: float = 0.1
# LLM最大迭代次数
LLM_MAX_ITERATIONS: int = 15
LLM_MAX_ITERATIONS: int = 128
# LLM工具调用超时时间
LLM_TOOL_TIMEOUT: int = 300
# 是否启用详细日志
@@ -445,10 +454,14 @@ class ConfigModel(BaseModel):
AI_RECOMMEND_ENABLED: bool = False
# AI推荐用户偏好
AI_RECOMMEND_USER_PREFERENCE: str = ""
# Tavily API密钥用于网络搜索
TAVILY_API_KEY: str = "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"
# AI推荐条目数量限制
AI_RECOMMEND_MAX_ITEMS: int = 50
class Settings(BaseSettings, ConfigModel, LogConfigModel):
"""
系统配置类

View File

@@ -465,7 +465,7 @@ class MediaInfo:
for seainfo in info.get('seasons'):
# 季
season = seainfo.get("season_number")
if not season:
if season is None:
continue
# 集
episode_count = seainfo.get("episode_count")
@@ -545,9 +545,9 @@ class MediaInfo:
# 识别标题中的季
meta = MetaInfo(info.get("title"))
# 季
if not self.season:
if self.season is None:
self.season = meta.begin_season
if self.season:
if self.season is not None:
self.type = MediaType.TV
elif not self.type:
self.type = MediaType.MOVIE
@@ -607,13 +607,13 @@ class MediaInfo:
# 剧集
if self.type == MediaType.TV and not self.seasons:
meta = MetaInfo(info.get("title"))
season = meta.begin_season or 1
season = meta.begin_season if meta.begin_season is not None else 1
episodes_count = info.get("episodes_count")
if episodes_count:
self.seasons[season] = list(range(1, episodes_count + 1))
# 季年份
if self.type == MediaType.TV and not self.season_years:
season = self.season or 1
season = self.season if self.season is not None else 1
self.season_years = {
season: self.year
}
@@ -667,7 +667,7 @@ class MediaInfo:
# 识别标题中的季
meta = MetaInfo(self.title)
# 季
if not self.season:
if self.season is None:
self.season = meta.begin_season
# 评分
if not self.vote_average:
@@ -703,7 +703,7 @@ class MediaInfo:
# 剧集
if self.type == MediaType.TV and not self.seasons:
meta = MetaInfo(self.title)
season = meta.begin_season or 1
season = meta.begin_season if meta.begin_season is not None else 1
episodes_count = info.get("total_episodes")
if episodes_count:
self.seasons[season] = list(range(1, episodes_count + 1))

View File

@@ -535,7 +535,7 @@ class MetaBase(object):
def merge(self, meta: Self):
"""
并Meta信息
并Meta信息
"""
# 类型
if self.type == MediaType.UNKNOWN \

View File

@@ -49,7 +49,7 @@ class MediaServerOper(DbOper):
if not item:
return None
if kwargs.get("season"):
if kwargs.get("season") is not None:
# 判断季是否存在
if not item.seasoninfo:
return None
@@ -75,7 +75,7 @@ class MediaServerOper(DbOper):
if not item:
return None
if kwargs.get("season"):
if kwargs.get("season") is not None:
# 判断季是否存在
if not item.seasoninfo:
return None

View File

@@ -104,14 +104,14 @@ class DownloadHistory(Base):
# TMDBID + 类型
if tmdbid and mtype:
# 电视剧某季某集
if season and episode:
if season is not None and episode:
return db.query(DownloadHistory).filter(DownloadHistory.tmdbid == tmdbid,
DownloadHistory.type == mtype,
DownloadHistory.seasons == season,
DownloadHistory.episodes == episode).order_by(
DownloadHistory.id.desc()).all()
# 电视剧某季
elif season:
elif season is not None:
return db.query(DownloadHistory).filter(DownloadHistory.tmdbid == tmdbid,
DownloadHistory.type == mtype,
DownloadHistory.seasons == season).order_by(
@@ -124,14 +124,14 @@ class DownloadHistory(Base):
# 标题 + 年份
elif title and year:
# 电视剧某季某集
if season and episode:
if season is not None and episode:
return db.query(DownloadHistory).filter(DownloadHistory.title == title,
DownloadHistory.year == year,
DownloadHistory.seasons == season,
DownloadHistory.episodes == episode).order_by(
DownloadHistory.id.desc()).all()
# 电视剧某季
elif season:
elif season is not None:
return db.query(DownloadHistory).filter(DownloadHistory.title == title,
DownloadHistory.year == year,
DownloadHistory.seasons == season).order_by(
@@ -209,7 +209,7 @@ class DownloadFiles(Base):
@classmethod
@db_query
def get_by_hash(cls, db: Session, download_hash: str, state: Optional[int] = None):
if state:
if state is not None:
return db.query(cls).filter(cls.download_hash == download_hash,
cls.state == state).all()
else:

View File

@@ -93,7 +93,7 @@ class Subscribe(Base):
def exists(cls, db: Session, tmdbid: Optional[int] = None, doubanid: Optional[str] = None,
season: Optional[int] = None):
if tmdbid:
if season:
if season is not None:
return db.query(cls).filter(cls.tmdbid == tmdbid,
cls.season == season).first()
return db.query(cls).filter(cls.tmdbid == tmdbid).first()
@@ -106,7 +106,7 @@ class Subscribe(Base):
async def async_exists(cls, db: AsyncSession, tmdbid: Optional[int] = None, doubanid: Optional[str] = None,
season: Optional[int] = None):
if tmdbid:
if season:
if season is not None:
result = await db.execute(
select(cls).filter(cls.tmdbid == tmdbid, cls.season == season)
)
@@ -148,7 +148,7 @@ class Subscribe(Base):
@classmethod
@db_query
def get_by_title(cls, db: Session, title: str, season: Optional[int] = None):
if season:
if season is not None:
return db.query(cls).filter(cls.name == title,
cls.season == season).first()
return db.query(cls).filter(cls.name == title).first()
@@ -156,7 +156,7 @@ class Subscribe(Base):
@classmethod
@async_db_query
async def async_get_by_title(cls, db: AsyncSession, title: str, season: Optional[int] = None):
if season:
if season is not None:
result = await db.execute(
select(cls).filter(cls.name == title, cls.season == season)
)
@@ -169,7 +169,7 @@ class Subscribe(Base):
@classmethod
@db_query
def get_by_tmdbid(cls, db: Session, tmdbid: int, season: Optional[int] = None):
if season:
if season is not None:
return db.query(cls).filter(cls.tmdbid == tmdbid,
cls.season == season).all()
else:
@@ -178,7 +178,7 @@ class Subscribe(Base):
@classmethod
@async_db_query
async def async_get_by_tmdbid(cls, db: AsyncSession, tmdbid: int, season: Optional[int] = None):
if season:
if season is not None:
result = await db.execute(
select(cls).filter(cls.tmdbid == tmdbid, cls.season == season)
)
@@ -227,6 +227,66 @@ class Subscribe(Base):
)
return result.scalars().first()
@classmethod
@db_query
def get_by(cls, db: Session, type: str, season: Optional[str] = None,
tmdbid: Optional[int] = None, doubanid: Optional[str] = None, bangumiid: Optional[str] = None):
"""
根据条件查询订阅
"""
# TMDBID
if tmdbid:
if season is not None:
result = db.query(cls).filter(
cls.tmdbid == tmdbid, cls.type == type, cls.season == season
)
else:
result = db.query(cls).filter(cls.tmdbid == tmdbid, cls.type == type)
# 豆瓣ID
elif doubanid:
result = db.query(cls).filter(cls.doubanid == doubanid, cls.type == type)
# BangumiID
elif bangumiid:
result = db.query(cls).filter(cls.bangumiid == bangumiid, cls.type == type)
else:
return None
return result.first()
@classmethod
@async_db_query
async def async_get_by(cls, db: AsyncSession, type: str, season: Optional[str] = None,
tmdbid: Optional[int] = None, doubanid: Optional[str] = None, bangumiid: Optional[str] = None):
"""
根据条件查询订阅
"""
# TMDBID
if tmdbid:
if season is not None:
result = await db.execute(
select(cls).filter(
cls.tmdbid == tmdbid, cls.type == type, cls.season == season
)
)
else:
result = await db.execute(
select(cls).filter(cls.tmdbid == tmdbid, cls.type == type)
)
# 豆瓣ID
elif doubanid:
result = await db.execute(
select(cls).filter(cls.doubanid == doubanid, cls.type == type)
)
# BangumiID
elif bangumiid:
result = await db.execute(
select(cls).filter(cls.bangumiid == bangumiid, cls.type == type)
)
else:
return None
return result.scalars().first()
@db_update
def delete_by_tmdbid(self, db: Session, tmdbid: int, season: int):
subscrbies = self.get_by_tmdbid(db, tmdbid, season)

View File

@@ -99,7 +99,7 @@ class SubscribeHistory(Base):
def exists(cls, db: Session, tmdbid: Optional[int] = None, doubanid: Optional[str] = None,
season: Optional[int] = None):
if tmdbid:
if season:
if season is not None:
return db.query(cls).filter(cls.tmdbid == tmdbid,
cls.season == season).first()
return db.query(cls).filter(cls.tmdbid == tmdbid).first()
@@ -112,7 +112,7 @@ class SubscribeHistory(Base):
async def async_exists(cls, db: AsyncSession, tmdbid: Optional[int] = None, doubanid: Optional[str] = None,
season: Optional[int] = None):
if tmdbid:
if season:
if season is not None:
result = await db.execute(
select(cls).filter(cls.tmdbid == tmdbid, cls.season == season)
)

View File

@@ -266,14 +266,14 @@ class TransferHistory(Base):
# TMDBID + 类型
if tmdbid and mtype:
# 电视剧某季某集
if season and episode:
if season is not None and episode:
return db.query(cls).filter(cls.tmdbid == tmdbid,
cls.type == mtype,
cls.seasons == season,
cls.episodes == episode,
cls.dest == dest).all()
# 电视剧某季
elif season:
elif season is not None:
return db.query(cls).filter(cls.tmdbid == tmdbid,
cls.type == mtype,
cls.seasons == season).all()
@@ -290,14 +290,14 @@ class TransferHistory(Base):
# 标题 + 年份
elif title and year:
# 电视剧某季某集
if season and episode:
if season is not None and episode:
return db.query(cls).filter(cls.title == title,
cls.year == year,
cls.seasons == season,
cls.episodes == episode,
cls.dest == dest).all()
# 电视剧某季
elif season:
elif season is not None:
return db.query(cls).filter(cls.title == title,
cls.year == year,
cls.seasons == season).all()
@@ -312,7 +312,7 @@ class TransferHistory(Base):
return db.query(cls).filter(cls.title == title,
cls.year == year).all()
# 类型 + 转移路径emby webhook season无tmdbid场景
elif mtype and season and dest:
elif mtype and season is not None and dest:
# 电视剧某季
return db.query(cls).filter(cls.type == mtype,
cls.seasons == season,

View File

@@ -71,6 +71,7 @@ class SubscribeOper(DbOper):
"backdrop": mediainfo.get_backdrop_image(),
"vote": mediainfo.vote_average,
"description": mediainfo.overview,
"search_imdbid": 1 if kwargs.get('search_imdbid') else 0,
"date": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
})
if not subscribe:
@@ -91,7 +92,7 @@ class SubscribeOper(DbOper):
判断是否存在
"""
if tmdbid:
if season:
if season is not None:
return True if Subscribe.exists(self._db, tmdbid=tmdbid, season=season) else False
else:
return True if Subscribe.exists(self._db, tmdbid=tmdbid) else False
@@ -111,6 +112,20 @@ class SubscribeOper(DbOper):
"""
return await Subscribe.async_get(self._db, rid=sid)
def get_by(self, type: str, season: Optional[str] = None, tmdbid: Optional[int] = None,
doubanid: Optional[str] = None, bangumiid: Optional[str] = None) -> Optional[Subscribe]:
"""
根据条件查询订阅
"""
return Subscribe.get_by(self._db, type, season, tmdbid, doubanid, bangumiid)
async def async_get_by(self, type: str, season: Optional[str] = None, tmdbid: Optional[int] = None,
doubanid: Optional[str] = None, bangumiid: Optional[str] = None) -> Optional[Subscribe]:
"""
根据条件查询订阅
"""
return await Subscribe.async_get_by(self._db, type, season, tmdbid, doubanid, bangumiid)
def list(self, state: Optional[str] = None) -> List[Subscribe]:
"""
获取订阅列表
@@ -180,7 +195,7 @@ class SubscribeOper(DbOper):
判断是否存在订阅历史
"""
if tmdbid:
if season:
if season is not None:
return True if SubscribeHistory.exists(self._db, tmdbid=tmdbid, season=season) else False
else:
return True if SubscribeHistory.exists(self._db, tmdbid=tmdbid) else False

View File

@@ -125,7 +125,7 @@ class TransferHistoryOper(DbOper):
"""
新增转移成功历史记录
"""
self.add_force(
return self.add_force(
src=fileitem.path,
src_storage=fileitem.storage,
src_fileitem=fileitem.model_dump(),

View File

@@ -19,41 +19,42 @@ class CookieHelper:
"username": [
'//input[@name="username"]',
'//input[@id="form_item_username"]',
'//input[@id="username"]'
'//input[@id="username"]',
],
"password": [
'//input[@name="password"]',
'//input[@id="form_item_password"]',
'//input[@id="password"]',
'//input[@type="password"]'
'//input[@type="password"]',
],
"captcha": [
'//input[@name="imagestring"]',
'//input[@name="captcha"]',
'//input[@id="form_item_captcha"]',
'//input[@placeholder="驗證碼"]'
'//input[@placeholder="驗證碼"]',
],
"captcha_img": [
'//img[@alt="captcha"]/@src',
'//img[@alt="CAPTCHA"]/@src',
'//img[@alt="SECURITY CODE"]/@src',
'//img[@id="LAY-user-get-vercode"]/@src',
'//img[contains(@src,"/api/getCaptcha")]/@src'
'//img[contains(@src,"/api/getCaptcha")]/@src',
],
"submit": [
'//input[@type="submit"]',
'//button[@type="submit"]',
'//button[@lay-filter="login"]',
'//button[@lay-filter="formLogin"]',
'//input[@type="button"][@value="登录"]'
'//input[@type="button"][@value="登录"]',
'//input[@id="submit-btn"]',
],
"error": [
"//table[@class='main']//td[@class='text']/text()"
"//table[@class='main']//td[@class='text']/text()",
],
"twostep": [
'//input[@name="two_step_code"]',
'//input[@name="2fa_secret"]',
'//input[@name="otp"]'
'//input[@name="otp"]',
]
}

View File

@@ -539,7 +539,7 @@ class MessageTemplateHelper:
获取消息模板
"""
template_dict: dict[str, str] = SystemConfigOper().get(SystemConfigKey.NotificationTemplates)
return template_dict.get(f"{message.ctype.value}")
return template_dict.get(message.ctype.value)
class MessageQueueManager(metaclass=SingletonClass):

View File

@@ -382,7 +382,10 @@ class RssHelper:
size = int(size_attr)
# 发布日期
pubdate_nodes = item.xpath('.//pubDate | .//published | .//updated')
pubdate_nodes = item.xpath('./pubDate | ./published | ./updated')
if not pubdate_nodes:
pubdate_nodes = item.xpath('.//*[local-name()="pubDate"] | .//*[local-name()="published"] | .//*[local-name()="updated"]')
pubdate = ""
if pubdate_nodes and pubdate_nodes[0].text:
pubdate = StringUtils.get_time(pubdate_nodes[0].text)

View File

@@ -139,9 +139,23 @@ class DiscordModule(_ModuleBase, _MessageBase[Discord]):
发送通知消息
:param message: 消息通知对象
"""
for conf in self.get_configs().values():
# DEBUG: Log entry and configs
configs = self.get_configs()
logger.debug(f"[Discord] post_message 被调用message.source={message.source}, "
f"message.userid={message.userid}, message.channel={message.channel}")
logger.debug(f"[Discord] 当前配置数量: {len(configs)}, 配置名称: {list(configs.keys())}")
logger.debug(f"[Discord] 当前实例数量: {len(self.get_instances())}, 实例名称: {list(self.get_instances().keys())}")
if not configs:
logger.warning("[Discord] get_configs() 返回空,没有可用的 Discord 配置")
return
for conf in configs.values():
logger.debug(f"[Discord] 检查配置: name={conf.name}, type={conf.type}, enabled={conf.enabled}")
if not self.check_message(message, conf.name):
logger.debug(f"[Discord] check_message 返回 False跳过配置: {conf.name}")
continue
logger.debug(f"[Discord] check_message 通过,准备发送到: {conf.name}")
targets = message.targets
userid = message.userid
if not userid and targets is not None:
@@ -150,13 +164,18 @@ class DiscordModule(_ModuleBase, _MessageBase[Discord]):
logger.warn("用户没有指定 Discord 用户ID消息无法发送")
return
client: Discord = self.get_instance(conf.name)
logger.debug(f"[Discord] get_instance('{conf.name}') 返回: {client is not None}")
if client:
client.send_msg(title=message.title, text=message.text,
logger.debug(f"[Discord] 调用 client.send_msg, userid={userid}, title={message.title[:50] if message.title else None}...")
result = client.send_msg(title=message.title, text=message.text,
image=message.image, userid=userid, link=message.link,
buttons=message.buttons,
original_message_id=message.original_message_id,
original_chat_id=message.original_chat_id,
mtype=message.mtype)
logger.debug(f"[Discord] send_msg 返回结果: {result}")
else:
logger.warning(f"[Discord] 未找到配置 '{conf.name}' 对应的 Discord 客户端实例")
def post_medias_message(self, message: Notification, medias: List[MediaInfo]) -> None:
"""

View File

@@ -2,6 +2,7 @@ import asyncio
import re
import threading
from typing import Optional, List, Dict, Any, Tuple, Union
from urllib.parse import quote
import discord
from discord import app_commands
@@ -33,6 +34,9 @@ class Discord:
DISCORD_GUILD_ID: Optional[Union[str, int]] = None,
DISCORD_CHANNEL_ID: Optional[Union[str, int]] = None,
**kwargs):
logger.debug(f"[Discord] 初始化 Discord 实例: name={kwargs.get('name')}, "
f"GUILD_ID={DISCORD_GUILD_ID}, CHANNEL_ID={DISCORD_CHANNEL_ID}, "
f"TOKEN={'已配置' if DISCORD_BOT_TOKEN else '未配置'}")
if not DISCORD_BOT_TOKEN:
logger.error("Discord Bot Token 未配置!")
return
@@ -40,10 +44,14 @@ class Discord:
self._token = DISCORD_BOT_TOKEN
self._guild_id = self._to_int(DISCORD_GUILD_ID)
self._channel_id = self._to_int(DISCORD_CHANNEL_ID)
logger.debug(f"[Discord] 解析后的 ID: _guild_id={self._guild_id}, _channel_id={self._channel_id}")
base_ds_url = f"http://127.0.0.1:{settings.PORT}/api/v1/message/"
self._ds_url = f"{base_ds_url}?token={settings.API_TOKEN}"
if kwargs.get("name"):
self._ds_url = f"{self._ds_url}&source={kwargs.get('name')}"
# URL encode the source name to handle special characters in config names
encoded_name = quote(kwargs.get('name'), safe='')
self._ds_url = f"{self._ds_url}&source={encoded_name}"
logger.debug(f"[Discord] 消息回调 URL: {self._ds_url}")
intents = discord.Intents.default()
intents.message_content = True
@@ -59,6 +67,7 @@ class Discord:
self._thread: Optional[threading.Thread] = None
self._ready_event = threading.Event()
self._user_dm_cache: Dict[str, discord.DMChannel] = {}
self._user_chat_mapping: Dict[str, str] = {} # userid -> chat_id mapping for reply targeting
self._broadcast_channel = None
self._bot_user_id: Optional[int] = None
@@ -86,6 +95,9 @@ class Discord:
if not self._should_process_message(message):
return
# Update user-chat mapping for reply targeting
self._update_user_chat_mapping(str(message.author.id), str(message.channel.id))
cleaned_text = self._clean_bot_mention(message.content or "")
username = message.author.display_name or message.author.global_name or message.author.name
payload = {
@@ -112,6 +124,10 @@ class Discord:
except Exception as e:
logger.error(f"处理 Discord 交互响应失败:{e}")
# Update user-chat mapping for reply targeting
if interaction.user and interaction.channel:
self._update_user_chat_mapping(str(interaction.user.id), str(interaction.channel.id))
username = (interaction.user.display_name or interaction.user.global_name or interaction.user.name) \
if interaction.user else None
payload = {
@@ -168,13 +184,19 @@ class Discord:
original_message_id: Optional[Union[int, str]] = None,
original_chat_id: Optional[str] = None,
mtype: Optional['NotificationType'] = None) -> Optional[bool]:
logger.debug(f"[Discord] send_msg 被调用: userid={userid}, title={title[:50] if title else None}...")
logger.debug(f"[Discord] get_state() = {self.get_state()}, "
f"_ready_event.is_set() = {self._ready_event.is_set()}, "
f"_client = {self._client is not None}")
if not self.get_state():
logger.warning("[Discord] get_state() 返回 FalseBot 未就绪,无法发送消息")
return False
if not title and not text:
logger.warn("标题和内容不能同时为空")
return False
try:
logger.debug(f"[Discord] 准备异步发送消息...")
future = asyncio.run_coroutine_threadsafe(
self._send_message(title=title, text=text, image=image, userid=userid,
link=link, buttons=buttons,
@@ -182,7 +204,9 @@ class Discord:
original_chat_id=original_chat_id,
mtype=mtype),
self._loop)
return future.result(timeout=30)
result = future.result(timeout=30)
logger.debug(f"[Discord] 异步发送完成,结果: {result}")
return result
except Exception as err:
logger.error(f"发送 Discord 消息失败:{err}")
return False
@@ -254,7 +278,9 @@ class Discord:
original_message_id: Optional[Union[int, str]],
original_chat_id: Optional[str],
mtype: Optional['NotificationType'] = None) -> bool:
logger.debug(f"[Discord] _send_message: userid={userid}, original_chat_id={original_chat_id}")
channel = await self._resolve_channel(userid=userid, chat_id=original_chat_id)
logger.debug(f"[Discord] _resolve_channel 返回: {channel}, type={type(channel)}")
if not channel:
logger.error("未找到可用的 Discord 频道或私聊")
return False
@@ -264,11 +290,18 @@ class Discord:
content = None
if original_message_id and original_chat_id:
logger.debug(f"[Discord] 编辑现有消息: message_id={original_message_id}")
return await self._edit_message(chat_id=original_chat_id, message_id=original_message_id,
content=content, embed=embed, view=view)
await channel.send(content=content, embed=embed, view=view)
return True
logger.debug(f"[Discord] 发送新消息到频道: {channel}")
try:
await channel.send(content=content, embed=embed, view=view)
logger.debug("[Discord] 消息发送成功")
return True
except Exception as e:
logger.error(f"[Discord] 发送消息到频道失败: {e}")
return False
async def _send_list_message(self, embeds: List[discord.Embed],
userid: Optional[str],
@@ -515,26 +548,54 @@ class Discord:
return view
async def _resolve_channel(self, userid: Optional[str] = None, chat_id: Optional[str] = None):
# 优先使用明确的聊天 ID
"""
Resolve the channel to send messages to.
Priority order:
1. `chat_id` (original channel where user sent the message) - for contextual replies
2. `userid` mapping (channel where user last sent a message) - for contextual replies
3. Configured `_channel_id` (broadcast channel) - for system notifications
4. Any available text channel in configured guild - fallback
5. `userid` (DM) - for private conversations as a final fallback
"""
logger.debug(f"[Discord] _resolve_channel: userid={userid}, chat_id={chat_id}, "
f"_channel_id={self._channel_id}, _guild_id={self._guild_id}")
# Priority 1: Use explicit chat_id (reply to the same channel where user sent message)
if chat_id:
logger.debug(f"[Discord] 尝试通过 chat_id={chat_id} 获取原始频道")
channel = self._client.get_channel(int(chat_id))
if channel:
logger.debug(f"[Discord] 通过 get_channel 找到频道: {channel}")
return channel
try:
return await self._client.fetch_channel(int(chat_id))
channel = await self._client.fetch_channel(int(chat_id))
logger.debug(f"[Discord] 通过 fetch_channel 找到频道: {channel}")
return channel
except Exception as err:
logger.warn(f"通过 chat_id 获取 Discord 频道失败:{err}")
# 私聊
# Priority 2: Use user-chat mapping (reply to where the user last sent a message)
if userid:
dm = await self._get_dm_channel(str(userid))
if dm:
return dm
mapped_chat_id = self._get_user_chat_id(str(userid))
if mapped_chat_id:
logger.debug(f"[Discord] 从用户映射获取 chat_id={mapped_chat_id}")
channel = self._client.get_channel(int(mapped_chat_id))
if channel:
logger.debug(f"[Discord] 通过映射找到频道: {channel}")
return channel
try:
channel = await self._client.fetch_channel(int(mapped_chat_id))
logger.debug(f"[Discord] 通过 fetch_channel 找到映射频道: {channel}")
return channel
except Exception as err:
logger.warn(f"通过映射的 chat_id 获取 Discord 频道失败:{err}")
# 配置的广播频道
# Priority 3: Use configured broadcast channel (for system notifications)
if self._broadcast_channel:
logger.debug(f"[Discord] 使用缓存的广播频道: {self._broadcast_channel}")
return self._broadcast_channel
if self._channel_id:
logger.debug(f"[Discord] 尝试通过配置的 _channel_id={self._channel_id} 获取频道")
channel = self._client.get_channel(self._channel_id)
if not channel:
try:
@@ -544,9 +605,11 @@ class Discord:
channel = None
self._broadcast_channel = channel
if channel:
logger.debug(f"[Discord] 通过配置的频道ID找到频道: {channel}")
return channel
# 按 Guild 寻找一个可用文本频道
# Priority 4: Find any available text channel in guild (fallback)
logger.debug(f"[Discord] 尝试在 Guild 中寻找可用频道")
target_guilds = []
if self._guild_id:
guild = self._client.get_guild(self._guild_id)
@@ -554,22 +617,47 @@ class Discord:
target_guilds.append(guild)
else:
target_guilds = list(self._client.guilds)
logger.debug(f"[Discord] 目标 Guilds 数量: {len(target_guilds)}")
for guild in target_guilds:
for channel in guild.text_channels:
if guild.me and channel.permissions_for(guild.me).send_messages:
logger.debug(f"[Discord] 在 Guild 中找到可用频道: {channel}")
self._broadcast_channel = channel
return channel
# Priority 5: Fallback to DM (only if no channel available)
if userid:
logger.debug(f"[Discord] 回退到私聊: userid={userid}")
dm = await self._get_dm_channel(str(userid))
if dm:
logger.debug(f"[Discord] 获取到私聊频道: {dm}")
return dm
else:
logger.debug(f"[Discord] 无法获取用户 {userid} 的私聊频道")
return None
async def _get_dm_channel(self, userid: str) -> Optional[discord.DMChannel]:
logger.debug(f"[Discord] _get_dm_channel: userid={userid}")
if userid in self._user_dm_cache:
logger.debug(f"[Discord] 从缓存获取私聊频道: {self._user_dm_cache.get(userid)}")
return self._user_dm_cache.get(userid)
try:
user_obj = self._client.get_user(int(userid)) or await self._client.fetch_user(int(userid))
logger.debug(f"[Discord] 尝试获取/创建用户 {userid} 的私聊频道")
user_obj = self._client.get_user(int(userid))
logger.debug(f"[Discord] get_user 结果: {user_obj}")
if not user_obj:
user_obj = await self._client.fetch_user(int(userid))
logger.debug(f"[Discord] fetch_user 结果: {user_obj}")
if not user_obj:
logger.debug(f"[Discord] 无法找到用户 {userid}")
return None
dm = user_obj.dm_channel or await user_obj.create_dm()
dm = user_obj.dm_channel
logger.debug(f"[Discord] 用户现有 dm_channel: {dm}")
if not dm:
dm = await user_obj.create_dm()
logger.debug(f"[Discord] 创建新的 dm_channel: {dm}")
if dm:
self._user_dm_cache[userid] = dm
return dm
@@ -577,6 +665,25 @@ class Discord:
logger.error(f"获取 Discord 私聊失败:{err}")
return None
def _update_user_chat_mapping(self, userid: str, chat_id: str) -> None:
"""
Update user-chat mapping for reply targeting.
This ensures replies go to the same channel where the user sent the message.
:param userid: User ID
:param chat_id: Channel/Chat ID where the user sent the message
"""
if userid and chat_id:
self._user_chat_mapping[userid] = chat_id
logger.debug(f"[Discord] 更新用户频道映射: userid={userid} -> chat_id={chat_id}")
def _get_user_chat_id(self, userid: str) -> Optional[str]:
"""
Get the chat ID where the user last sent a message.
:param userid: User ID
:return: Chat ID or None if not found
"""
return self._user_chat_mapping.get(userid)
def _should_process_message(self, message: discord.Message) -> bool:
if isinstance(message.channel, discord.DMChannel):
return True

View File

@@ -21,7 +21,7 @@ class DoubanScraper:
# 电影元数据文件
doc = self.__gen_movie_nfo_file(mediainfo=mediainfo)
else:
if season:
if season is not None:
# 季元数据文件
doc = self.__gen_tv_season_nfo_file(mediainfo=mediainfo, season=season)
else:
@@ -41,7 +41,7 @@ class DoubanScraper:
:param episode: 集号
"""
ret_dict = {}
if season:
if season is not None:
# 豆瓣无季图片
return {}
if episode:

View File

@@ -421,7 +421,7 @@ class Emby:
if str(tmdb_id) != str(item_info.tmdbid):
return None, {}
# 查集的信息
if not season:
if season is None:
season = None
try:
url = f"{self._host}emby/Shows/{item_id}/Episodes"
@@ -437,12 +437,12 @@ class Emby:
season_episodes = {}
for res_item in res_items:
season_index = res_item.get("ParentIndexNumber")
if not season_index:
if season_index is None:
continue
if season and season != season_index:
if season is not None and season != season_index:
continue
episode_index = res_item.get("IndexNumber")
if not episode_index:
if episode_index is None:
continue
if season_index not in season_episodes:
season_episodes[season_index] = []

View File

@@ -36,7 +36,7 @@ class FileManagerModule(_ModuleBase):
self._storage_schemas = ModuleHelper.load('app.modules.filemanager.storages',
filter_func=lambda _, obj: hasattr(obj, 'schema') and obj.schema)
# 获取存储类型
self._support_storages = [storage.schema.value for storage in self._storage_schemas]
self._support_storages = [storage.schema.value for storage in self._storage_schemas if storage.schema]
@staticmethod
def get_name() -> str:
@@ -95,12 +95,11 @@ class FileManagerModule(_ModuleBase):
return False, f"{d.name} 的下载目录 {download_path} 与媒体库目录 {library_path} 不在同一磁盘,无法硬链接"
# 存储
storage_oper = self.__get_storage_oper(d.storage)
if not storage_oper:
return False, f"{d.name} 的存储类型 {d.storage} 不支持"
if not storage_oper.check():
return False, f"{d.name} 的存储测试不通过"
if d.transfer_type and d.transfer_type not in storage_oper.support_transtype():
return False, f"{d.name} 的存储不支持 {d.transfer_type} 整理方式"
if storage_oper:
if not storage_oper.check():
return False, f"{d.name} 的存储测试不通过"
if d.transfer_type and d.transfer_type not in storage_oper.support_transtype():
return False, f"{d.name} 的存储不支持 {d.transfer_type} 整理方式"
return True, ""
@@ -197,6 +196,16 @@ class FileManagerModule(_ModuleBase):
return None
return storage_oper.generate_qrcode()
def generate_auth_url(self, storage: str) -> Optional[Tuple[dict, str]]:
"""
生成 OAuth2 授权 URL
"""
storage_oper = self.__get_storage_oper(storage, "generate_auth_url")
if not storage_oper:
logger.error(f"不支持 {storage} 的 OAuth2 授权")
return {}, f"不支持 {storage} 的 OAuth2 授权"
return storage_oper.generate_auth_url()
def check_login(self, storage: str, **kwargs) -> Optional[Dict[str, str]]:
"""
登录确认
@@ -464,7 +473,7 @@ class FileManagerModule(_ModuleBase):
else:
# 未找到有效的媒体库目录
logger.error(
f"{mediainfo.type.value} {mediainfo.title_year} 未找到有效的媒体库目录,无法整理文件,源路径:{fileitem.path}")
f"{mediainfo.type.value if mediainfo.type else '未知类型'} {mediainfo.title_year} 未找到有效的媒体库目录,无法整理文件,源路径:{fileitem.path}")
return TransferInfo(success=False,
fileitem=fileitem,
message="未找到有效的媒体库目录")

View File

@@ -57,6 +57,12 @@ class StorageBase(metaclass=ABCMeta):
def generate_qrcode(self, *args, **kwargs) -> Optional[Tuple[dict, str]]:
pass
def generate_auth_url(self, *args, **kwargs) -> Optional[Tuple[dict, str]]:
"""
生成 OAuth2 授权 URL
"""
return {}, "此存储不支持 OAuth2 授权"
def check_login(self, *args, **kwargs) -> Optional[Dict[str, str]]:
pass

View File

@@ -3,7 +3,7 @@ import secrets
import time
from pathlib import Path
from threading import Lock
from typing import List, Optional, Tuple, Union, Dict
from typing import List, Optional, Tuple, Union
from hashlib import sha256
import oss2
@@ -20,7 +20,7 @@ from app.modules.filemanager.storages import transfer_process
from app.schemas.types import StorageSchema
from app.utils.singleton import WeakSingleton
from app.utils.string import StringUtils
from app.utils.limit import QpsRateLimiter
from app.utils.limit import QpsRateLimiter, RateStats
lock = Lock()
@@ -46,22 +46,23 @@ class U115Pan(StorageBase, metaclass=WeakSingleton):
# 文件块大小默认10MB
chunk_size = 10 * 1024 * 1024
# 流控重试间隔时间
retry_delay = 70
# 下载接口单独限流
download_endpoint = "/open/ufile/downurl"
# 风控触发后休眠时间(秒)
limit_sleep_seconds = 3600
def __init__(self):
super().__init__()
self._auth_state = {}
self.session = httpx.Client(follow_redirects=True, timeout=20.0)
self._init_session()
self.qps_limiter: Dict[str, QpsRateLimiter] = {
"/open/ufile/files": QpsRateLimiter(4),
"/open/folder/get_info": QpsRateLimiter(3),
"/open/ufile/move": QpsRateLimiter(2),
"/open/ufile/copy": QpsRateLimiter(2),
"/open/ufile/update": QpsRateLimiter(2),
"/open/ufile/delete": QpsRateLimiter(2),
}
# 接口限流
self._download_limiter = QpsRateLimiter(1)
self._api_limiter = QpsRateLimiter(3)
self._limit_until = 0.0
self._limit_lock = Lock()
# 总体 QPS/QPM/QPH 统计
self._rate_stats = RateStats(source="115")
def _init_session(self):
"""
@@ -105,6 +106,33 @@ class U115Pan(StorageBase, metaclass=WeakSingleton):
self.session.headers.update({"Authorization": f"Bearer {access_token}"})
return access_token
def generate_auth_url(self) -> Tuple[dict, str]:
"""
生成 OAuth2 授权 URL
"""
try:
resp = self.session.get(f"{settings.U115_AUTH_SERVER}/u115/auth_url")
if resp is None:
return {}, "无法连接到授权服务器"
result = resp.json()
if not result.get("success"):
return {}, result.get("message", "获取授权URL失败")
data = result.get("data", {})
auth_url = data.get("auth_url")
state = data.get("state")
if not auth_url or not state:
return {}, "授权服务器返回数据不完整"
self._auth_state = {"state": state}
return {"authUrl": auth_url, "state": state}, ""
except Exception as e:
logger.error(f"【115】获取授权 URL 失败: {str(e)}")
return {}, f"获取授权 URL 失败: {str(e)}"
def generate_qrcode(self) -> Tuple[dict, str]:
"""
实现PKCE规范的设备授权二维码生成
@@ -141,8 +169,11 @@ class U115Pan(StorageBase, metaclass=WeakSingleton):
def check_login(self) -> Optional[Tuple[dict, str]]:
"""
改进的带PKCE校验的登录状态检查
检查授权状态
"""
if self._auth_state and self._auth_state.get("state"):
return self.__check_oauth_login()
if not self._auth_state:
return {}, "生成二维码失败"
try:
@@ -169,6 +200,47 @@ class U115Pan(StorageBase, metaclass=WeakSingleton):
except Exception as e:
return {}, str(e)
def __check_oauth_login(self) -> Tuple[dict, str]:
"""
检查 OAuth2 授权状态
"""
state = self._auth_state.get("state")
if not state:
return {}, "state为空"
try:
resp = self.session.get(
f"{settings.U115_AUTH_SERVER}/u115/token", params={"state": state}
)
if resp is None:
return {}, "无法连接到授权服务器"
result = resp.json()
status = result.get("status", "pending")
if status == "completed":
data = result.get("data", {})
if data:
self.set_config(
{
"refresh_time": int(time.time()),
"access_token": data.get("access_token"),
"refresh_token": data.get("refresh_token"),
"expires_in": data.get("expires_in"),
}
)
self._auth_state = {}
return {"status": 2, "tip": "授权成功"}, ""
return {}, "授权服务器返回数据不完整"
elif status == "expired":
self._auth_state = {}
return {"status": -1, "tip": result.get("message", "授权已过期")}, ""
else:
return {"status": 0, "tip": "等待用户授权"}, ""
except Exception as e:
logger.error(f"【115】检查授权状态失败: {str(e)}")
return {}, f"检查授权状态失败: {str(e)}"
def __get_access_token(self) -> dict:
"""
确认登录后获取相关token
@@ -222,11 +294,24 @@ class U115Pan(StorageBase, metaclass=WeakSingleton):
# 错误日志标志
no_error_log = kwargs.pop("no_error_log", False)
# 重试次数
retry_times = kwargs.pop("retry_limit", 5)
retry_times = kwargs.pop("retry_limit", 3)
# qps 速率限制
if endpoint in self.qps_limiter:
self.qps_limiter[endpoint].acquire()
# 按接口类型限流
if endpoint == self.download_endpoint:
self._download_limiter.acquire()
else:
self._api_limiter.acquire()
self._rate_stats.record()
# 风控冷却期间阻止所有接口调用,统一等待
with self._limit_lock:
wait_until = self._limit_until
if wait_until > time.time():
wait_secs = wait_until - time.time()
logger.info(
f"【115】风控冷却中本请求等待 {wait_secs:.0f} 秒后再调用接口..."
)
time.sleep(wait_secs)
try:
resp = self.session.request(method, f"{self.base_url}{endpoint}", **kwargs)
@@ -240,13 +325,24 @@ class U115Pan(StorageBase, metaclass=WeakSingleton):
kwargs["retry_limit"] = retry_times
# 处理速率限制
if resp.status_code == 429:
reset_time = 5 + int(resp.headers.get("X-RateLimit-Reset", 60))
logger.debug(
f"【115】{method} 请求 {endpoint} 限流,等待{reset_time}秒后重试"
self._rate_stats.log_stats("warning")
if retry_times <= 0:
logger.error(
f"【115】{method} 请求 {endpoint} 触发限流(429),重试次数用尽!"
)
return None
with self._limit_lock:
self._limit_until = max(
self._limit_until,
time.time() + self.limit_sleep_seconds,
)
logger.warning(
f"【115】触发限流(429),全体接口进入风控冷却 {self.limit_sleep_seconds} 秒,随后重试..."
)
time.sleep(reset_time)
time.sleep(self.limit_sleep_seconds)
kwargs["retry_limit"] = retry_times - 1
kwargs["no_error_log"] = no_error_log
return self._request_api(method, endpoint, result_key, **kwargs)
# 处理请求错误
@@ -259,6 +355,7 @@ class U115Pan(StorageBase, metaclass=WeakSingleton):
)
return None
kwargs["retry_limit"] = retry_times - 1
kwargs["no_error_log"] = no_error_log
sleep_duration = 2 ** (5 - retry_times + 1)
logger.info(
f"【115】{method} 请求 {endpoint} 错误 {e},等待 {sleep_duration} 秒后重试..."
@@ -268,21 +365,28 @@ class U115Pan(StorageBase, metaclass=WeakSingleton):
# 返回数据
ret_data = resp.json()
if ret_data.get("code") != 0:
error_msg = ret_data.get("message")
if ret_data.get("code") not in (0, 20004):
error_msg = ret_data.get("message", "")
if not no_error_log:
logger.warn(f"【115】{method} 请求 {endpoint} 出错:{error_msg}")
if "已达到当前访问上限" in error_msg:
self._rate_stats.log_stats("warning")
if retry_times <= 0:
logger.error(
f"【115】{method} 请求 {endpoint} 达到访问上限,重试次数用尽!"
f"【115】{method} 请求 {endpoint} 触发风控(访问上限),重试次数用尽!"
)
return None
kwargs["retry_limit"] = retry_times - 1
logger.info(
f"【115】{method} 请求 {endpoint} 达到访问上限,等待 {self.retry_delay} 秒后重试..."
with self._limit_lock:
self._limit_until = max(
self._limit_until,
time.time() + self.limit_sleep_seconds,
)
logger.warning(
f"【115】触发风控(访问上限),全体接口进入风控冷却 {self.limit_sleep_seconds} 秒,随后重试..."
)
time.sleep(self.retry_delay)
time.sleep(self.limit_sleep_seconds)
kwargs["retry_limit"] = retry_times - 1
kwargs["no_error_log"] = no_error_log
return self._request_api(method, endpoint, result_key, **kwargs)
return None
@@ -386,7 +490,10 @@ class U115Pan(StorageBase, metaclass=WeakSingleton):
resp = self._request_api(
"POST",
"/open/folder/add",
data={"pid": int(parent_item.fileid or "0"), "file_name": name},
data={
"pid": 0 if parent_item.path == "/" else int(parent_item.fileid or 0),
"file_name": name,
},
)
if not resp:
return None
@@ -806,7 +913,7 @@ class U115Pan(StorageBase, metaclass=WeakSingleton):
def copy(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
"""
企业级复制实现(支持目录递归复制)
复制
"""
if fileitem.fileid is None:
fileitem = self.get_item(Path(fileitem.path))
@@ -839,7 +946,7 @@ class U115Pan(StorageBase, metaclass=WeakSingleton):
def move(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
"""
原子性移动操作实现
移动
"""
if fileitem.fileid is None:
fileitem = self.get_item(Path(fileitem.path))
@@ -877,7 +984,7 @@ class U115Pan(StorageBase, metaclass=WeakSingleton):
def usage(self) -> Optional[schemas.StorageUsage]:
"""
获取带有企业级配额信息的存储使用情况
存储使用情况
"""
try:
resp = self._request_api("GET", "/open/user/info", "data")

View File

@@ -150,10 +150,9 @@ class TransHandler:
if stream_fileitem := source_oper.get_item(
Path(fileitem.path) / "BDMV" / "STREAM"
):
fileitem.size = 0
files = source_oper.list(stream_fileitem) or []
for file in files:
fileitem.size += file.size
fileitem.size = sum(
file.size for file in source_oper.list(stream_fileitem) or []
)
# 整理目录
new_diritem, errmsg = self.__transfer_dir(fileitem=fileitem,
mediainfo=mediainfo,
@@ -296,6 +295,7 @@ class TransHandler:
elif overwrite_mode == 'never':
# 存在不覆盖
self.__update_result(result=result,
success=False,
message=f"媒体库存在同名文件,当前覆盖模式为不覆盖",
fileitem=fileitem,
target_item=target_item,
@@ -314,6 +314,9 @@ class TransHandler:
logger.info(
f"当前整理覆盖模式设置为 {overwrite_mode},仅保留最新版本,正在删除已有版本文件 ...")
self.__delete_version_files(target_oper, new_file)
else:
# 附加文件 总是需要覆盖
overflag = True
# 整理文件
new_item, err_msg = self.__transfer_file(fileitem=fileitem,
@@ -498,18 +501,23 @@ class TransHandler:
重命名字幕文件,补充附加信息
"""
# 字幕正则式
_zhcn_sub_re = r"([.\[(](((zh[-_])?(cn|ch[si]|sg|sc))|zho?" \
r"|chinese|(cn|ch[si]|sg|zho?|eng)[-_&]?(cn|ch[si]|sg|zho?|eng)" \
r"|简[体中]?)[.\])])" \
_zhcn_sub_re = r"([.\[(\s](((zh[-_])?(cn|ch[si]|sg|sc))|zho?" \
r"|chinese|(cn|ch[si]|sg|zho?)[-_&]?(cn|ch[si]|sg|zho?|eng|jap|ja|jpn)" \
r"|eng[-_&]?(cn|ch[si]|sg|zho?)|(jap|ja|jpn)[-_&]?(cn|ch[si]|sg|zho?)" \
r"|简[体中]?)[.\])\s])" \
r"|([\u4e00-\u9fa5]{0,3}[中双][\u4e00-\u9fa5]{0,2}[字文语][\u4e00-\u9fa5]{0,3})" \
r"|简体|简中|JPSC|sc_jp" \
r"|(?<![a-z0-9])gb(?![a-z0-9])"
_zhtw_sub_re = r"([.\[(](((zh[-_])?(hk|tw|cht|tc))" \
r"|(cht|eng)[-_&]?(cht|eng)" \
r"|繁[体中]?)[.\])])" \
_zhtw_sub_re = r"([.\[(\s](((zh[-_])?(hk|tw|cht|tc))" \
r"|cht[-_&]?(cht|eng|jap|ja|jpn)" \
r"|eng[-_&]?cht|(jap|ja|jpn)[-_&]?cht" \
r"|繁[体中]?)[.\])\s])" \
r"|繁体中[文字]|中[文字]繁体|繁体|JPTC|tc_jp" \
r"|(?<![a-z0-9])big5(?![a-z0-9])"
_eng_sub_re = r"[.\[(]eng[.\])]"
_ja_sub_re = r"([.\[(\s](ja-jp|jap|ja|jpn" \
r"|(jap|ja|jpn)[-_&]?eng|eng[-_&]?(jap|ja|jpn))[.\])\s])" \
r"|日本語|日語"
_eng_sub_re = r"[.\[(\s]eng[.\])\s]"
# 原文件后缀
file_ext = f".{sub_item.extension}"
@@ -521,12 +529,15 @@ class TransHandler:
new_file_type = ".chi.zh-cn"
elif re.search(_zhtw_sub_re, sub_item.name, re.I):
new_file_type = ".zh-tw"
elif re.search(_ja_sub_re, sub_item.name, re.I):
new_file_type = ".ja"
elif re.search(_eng_sub_re, sub_item.name, re.I):
new_file_type = ".eng"
# 添加默认字幕标识
if ((settings.DEFAULT_SUB == "zh-cn" and new_file_type == ".chi.zh-cn")
or (settings.DEFAULT_SUB == "zh-tw" and new_file_type == ".zh-tw")
or (settings.DEFAULT_SUB == "ja" and new_file_type == ".ja")
or (settings.DEFAULT_SUB == "eng" and new_file_type == ".eng")):
new_sub_tag = ".default" + new_file_type
else:
@@ -708,7 +719,7 @@ class TransHandler:
"""
获取目标路径
"""
if need_type_folder:
if need_type_folder and mediainfo.type:
target_path = target_path / mediainfo.type.value
if need_category_folder and mediainfo.category:
target_path = target_path / mediainfo.category
@@ -728,7 +739,7 @@ class TransHandler:
need_type_folder = target_dir.library_type_folder
if need_category_folder is None:
need_category_folder = target_dir.library_category_folder
if not target_dir.media_type and need_type_folder:
if not target_dir.media_type and need_type_folder and mediainfo.type:
# 一级自动分类
library_dir = Path(target_dir.library_path) / mediainfo.type.value
elif target_dir.media_type and need_type_folder:
@@ -790,8 +801,8 @@ class TransHandler:
continue
if media_file.type != "file":
continue
media_exts = settings.RMT_MEDIAEXT + settings.RMT_SUBEXT + settings.RMT_AUDIOEXT
if f".{media_file.extension.lower()}" not in media_exts:
# 当前只有视频文件需要保留最新版本,其余格式无需处理,以避免误删 (issue 5449)
if f".{media_file.extension.lower()}" not in settings.RMT_MEDIAEXT:
continue
# 识别文件中的季集信息
filemeta = MetaInfoPath(media_path)

View File

@@ -7,11 +7,12 @@ from app.helper.rule import RuleHelper
from app.log import logger
from app.modules import _ModuleBase
from app.modules.filter.RuleParser import RuleParser
from app.schemas.types import ModuleType, OtherModulesType
from app.schemas.types import ModuleType, OtherModulesType, SystemConfigKey
from app.utils.string import StringUtils
class FilterModule(_ModuleBase):
CONFIG_WATCH = {SystemConfigKey.CustomFilterRules.value}
# 规则解析器
parser: RuleParser = None
# 媒体信息
@@ -44,7 +45,8 @@ class FilterModule(_ModuleBase):
"include": [
r'[中国國繁简](/|\s|\\|\|)?[繁简英粤]|[英简繁](/|\s|\\|\|)?[中繁简]'
r'|繁體|简体|[中国國][字配]|国语|國語|中文|中字|简日|繁日|简繁|繁体'
r'|([\s,.-\[])(CHT|CHS|cht|chs)(|[\s,.-\]])'],
r'|([\s,.-\[])(chs|cht)(|[\s,.-\]])'
r'|(?<![a-z0-9])(gb|big5)(?![a-z0-9])'],
"exclude": [],
"tmdb": {
"original_language": "zh,cn"
@@ -203,8 +205,6 @@ class FilterModule(_ModuleBase):
if not rule_groups:
return torrent_list
self.media = mediainfo
# 重新加载自定义规则
self.__init_custom_rules()
# 查询规则表详情
groups = self.rulehelper.get_rule_group_by_media(media=mediainfo, group_names=rule_groups)
if groups:
@@ -227,7 +227,7 @@ class FilterModule(_ModuleBase):
for torrent in torrent_list:
# 能命中优先级的才返回
if not self.__get_order(torrent, rule_string):
logger.debug(f"种子 {torrent.site_name} - {torrent.title} {torrent.description} "
logger.debug(f"种子 {torrent.site_name} - {torrent.title} {torrent.description or ''} "
f"不匹配 {rule_name} 过滤规则")
continue
ret_torrents.append(torrent)

View File

@@ -434,7 +434,7 @@ class IndexerModule(_ModuleBase):
获取站点解析器
"""
for site_schema in self._site_schemas:
if site_schema.schema.value == site.get("schema"):
if site_schema.schema and site_schema.schema.value == site.get("schema"):
return site_schema(
site_name=site.get("name"),
url=site.get("url"),

View File

@@ -197,13 +197,13 @@ class RousiSiteUserInfo(SiteParserBase):
url=urljoin(self._base_url, "api/messages"),
params=params
)
if not res or res.status_code != 200 or not res.text:
if not res or res.status_code != 200 or res.json().get("code", -1) != 0:
logger.warn(f"{self._site_name} 站点解析消息失败,状态码: {res.status_code if res else '无响应'}")
return {
"messages": [],
"total_pages": 0
}
return res.json()
return res.json().get("data")
# 分页获取所有未读消息
page = 0

View File

@@ -428,6 +428,12 @@ class SiteSpider:
if pubdate_str:
pubdate_str = pubdate_str.replace('\n', ' ').strip()
self.torrents_info['pubdate'] = self.__filter_text(pubdate_str, selector.get('filters'))
if self.torrents_info.get('pubdate'):
try:
if not isinstance(self.torrents_info['pubdate'], datetime.datetime):
datetime.datetime.strptime(str(self.torrents_info['pubdate']), '%Y-%m-%d %H:%M:%S')
except (ValueError, TypeError):
self.torrents_info['pubdate'] = StringUtils.unify_datetime_str(str(self.torrents_info['pubdate']))
def __get_date_elapsed(self, torrent: Any):
# torrent date elapsed text

View File

@@ -409,7 +409,7 @@ class Jellyfin:
if tmdb_id and item_info.tmdbid:
if str(tmdb_id) != str(item_info.tmdbid):
return None, {}
if not season:
if season is None:
season = None
url = f"{self._host}Shows/{item_id}/Episodes"
params = {
@@ -427,12 +427,12 @@ class Jellyfin:
season_episodes = {}
for res_item in res_items:
season_index = res_item.get("ParentIndexNumber")
if not season_index:
if season_index is None:
continue
if season and season != season_index:
if season is not None and season != season_index:
continue
episode_index = res_item.get("IndexNumber")
if not episode_index:
if episode_index is None:
continue
if not season_episodes.get(season_index):
season_episodes[season_index] = []

View File

@@ -287,7 +287,7 @@ class Plex:
episodes = videos.episodes()
season_episodes = {}
for episode in episodes:
if season and episode.seasonNumber != int(season):
if season is not None and episode.seasonNumber != int(season):
continue
if episode.seasonNumber not in season_episodes:
season_episodes[episode.seasonNumber] = []

View File

@@ -1,6 +1,7 @@
import re
from threading import Lock
from typing import List, Optional
from urllib.parse import quote
import requests
from slack_bolt import App
@@ -42,7 +43,9 @@ class Slack:
# 标记消息来源
if kwargs.get("name"):
self._ds_url = f"{self._ds_url}&source={kwargs.get('name')}"
# URL encode the source name to handle special characters
encoded_name = quote(kwargs.get('name'), safe='')
self._ds_url = f"{self._ds_url}&source={encoded_name}"
# 注册消息响应
@slack_app.event("message")

View File

@@ -2,7 +2,7 @@ import asyncio
import re
import threading
from typing import Optional, List, Dict, Callable
from urllib.parse import urljoin
from urllib.parse import urljoin, quote
from telebot import TeleBot, apihelper
from telebot.types import BotCommand, InlineKeyboardMarkup, InlineKeyboardButton, InputMediaPhoto
@@ -65,7 +65,9 @@ class Telegram:
# 标记渠道来源
if kwargs.get("name"):
self._ds_url = f"{self._ds_url}&source={kwargs.get('name')}"
# URL encode the source name to handle special characters
encoded_name = quote(kwargs.get('name'), safe='')
self._ds_url = f"{self._ds_url}&source={encoded_name}"
@_bot.message_handler(commands=['start', 'help'])
def send_welcome(message):

View File

@@ -14,10 +14,12 @@ from app.modules.themoviedb.category import CategoryHelper
from app.modules.themoviedb.scraper import TmdbScraper
from app.modules.themoviedb.tmdb_cache import TmdbCache
from app.modules.themoviedb.tmdbapi import TmdbApi
from app.schemas.category import CategoryConfig
from app.schemas.types import MediaType, MediaImageType, ModuleType, MediaRecognizeType
from app.utils.http import RequestUtils
class TheMovieDbModule(_ModuleBase):
"""
TMDB媒体信息匹配
@@ -796,7 +798,7 @@ class TheMovieDbModule(_ModuleBase):
if not tmdb_info:
return []
return [schemas.TmdbSeason(**sea)
for sea in tmdb_info.get("seasons", []) if sea.get("season_number")]
for sea in tmdb_info.get("seasons", []) if sea.get("season_number") is not None]
def tmdb_group_seasons(self, group_id: str) -> List[schemas.TmdbSeason]:
"""
@@ -1166,7 +1168,7 @@ class TheMovieDbModule(_ModuleBase):
if not tmdb_info:
return []
return [schemas.TmdbSeason(**sea)
for sea in tmdb_info.get("seasons", []) if sea.get("season_number")]
for sea in tmdb_info.get("seasons", []) if sea.get("season_number") is not None]
async def async_tmdb_group_seasons(self, group_id: str) -> List[schemas.TmdbSeason]:
"""
@@ -1290,3 +1292,15 @@ class TheMovieDbModule(_ModuleBase):
self.tmdb.clear_cache()
self.cache.clear()
logger.info("TMDB缓存清除完成")
def load_category_config(self) -> CategoryConfig:
"""
加载分类配置
"""
return self.category.load()
def save_category_config(self, config: CategoryConfig) -> bool:
"""
保存分类配置
"""
return self.category.save(config)

View File

@@ -7,8 +7,23 @@ from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.log import logger
from app.schemas.category import CategoryConfig
from app.utils.singleton import WeakSingleton
HEADER_COMMENTS = """####### 配置说明 #######
# 1. 该配置文件用于配置电影和电视剧的分类策略配置后程序会按照配置的分类策略名称进行分类配置文件采用yaml格式需要严格符合语法规则
# 2. 配置文件中的一级分类名称:`movie`、`tv` 为固定名称不可修改,二级名称同时也是目录名称,会按先后顺序匹配,匹配后程序会按这个名称建立二级目录
# 3. 支持的分类条件:
# `original_language` 语种,具体含义参考下方字典
# `production_countries` 国家或地区(电影)、`origin_country` 国家或地区(电视剧),具体含义参考下方字典
# `genre_ids` 内容类型,具体含义参考下方字典
# `release_year` 发行年份格式YYYY电影实际对应`release_date`字段,电视剧实际对应`first_air_date`字段,支持范围设定,如:`YYYY-YYYY`
# themoviedb 详情API返回的其它一级字段
# 4. 配置多项条件时需要同时满足,一个条件需要匹配多个值是使用`,`分隔
# 5. !条件值表示排除该值
"""
class CategoryHelper(metaclass=WeakSingleton):
"""
@@ -31,8 +46,8 @@ class CategoryHelper(metaclass=WeakSingleton):
shutil.copy(settings.INNER_CONFIG_PATH / "category.yaml", self._category_path)
with open(self._category_path, mode='r', encoding='utf-8') as f:
try:
yaml = ruamel.yaml.YAML()
self._categorys = yaml.load(f)
yaml_loader = ruamel.yaml.YAML()
self._categorys = yaml_loader.load(f)
except Exception as e:
logger.warn(f"二级分类策略配置文件格式出现严重错误!请检查:{str(e)}")
self._categorys = {}
@@ -44,6 +59,40 @@ class CategoryHelper(metaclass=WeakSingleton):
self._tv_categorys = self._categorys.get('tv')
logger.info(f"已加载二级分类策略 category.yaml")
def load(self) -> CategoryConfig:
"""
加载配置
"""
config = CategoryConfig()
if not self._category_path.exists():
return config
try:
with open(self._category_path, 'r', encoding='utf-8') as f:
yaml_loader = ruamel.yaml.YAML()
data = yaml_loader.load(f)
if data:
config = CategoryConfig(**data)
except Exception as e:
logger.error(f"Load category config failed: {e}")
return config
def save(self, config: CategoryConfig) -> bool:
"""
保存配置
"""
data = config.model_dump(exclude_none=True)
try:
with open(self._category_path, 'w', encoding='utf-8') as f:
f.write(HEADER_COMMENTS)
yaml_dumper = ruamel.yaml.YAML()
yaml_dumper.dump(data, f)
# 保存后重新加载配置
self.init()
return True
except Exception as e:
logger.error(f"Save category config failed: {e}")
return False
@property
def is_movie_category(self) -> bool:
"""

View File

@@ -167,7 +167,7 @@ class TmdbApi:
"""
记录匹配调试日志
"""
if season_number and season_year:
if season_number is not None and season_year:
logger.debug(f"正在识别{mtype.value}{name}, 季集={season_number}, 季集年份={season_year} ...")
else:
logger.debug(f"正在识别{mtype.value}{name}, 年份={year} ...")
@@ -473,7 +473,7 @@ class TmdbApi:
info = self._set_media_type(info, MediaType.MOVIE)
else:
# 有当前季和当前季集年份,使用精确匹配
if season_year and season_number:
if season_year and season_number is not None:
self._log_match_debug(mtype, name, season_year, season_number, season_year)
info = self.__search_tv_by_season(name,
season_year,
@@ -697,7 +697,7 @@ class TmdbApi:
return {}
ret_seasons = {}
for season_info in tv_info.get("seasons") or []:
if not season_info.get("season_number"):
if season_info.get("season_number") is None:
continue
ret_seasons[season_info.get("season_number")] = season_info
return ret_seasons
@@ -2028,7 +2028,7 @@ class TmdbApi:
info = self._set_media_type(info, MediaType.MOVIE)
else:
# 有当前季和当前季集年份,使用精确匹配
if season_year and season_number:
if season_year and season_number is not None:
self._log_match_debug(mtype, name, season_year, season_number, season_year)
info = await self.__async_search_tv_by_season(name,
season_year,

31
app/schemas/category.py Normal file
View File

@@ -0,0 +1,31 @@
from typing import Dict, Optional
from pydantic import BaseModel, ConfigDict
class CategoryRule(BaseModel):
"""
分类规则详情
"""
# 内容类型
genre_ids: Optional[str] = None
# 语种
original_language: Optional[str] = None
# 国家或地区(电视剧)
origin_country: Optional[str] = None
# 国家或地区(电影)
production_countries: Optional[str] = None
# 发行年份
release_year: Optional[str] = None
# 允许接收其他动态字段
model_config = ConfigDict(extra='allow')
class CategoryConfig(BaseModel):
"""
分类策略配置
"""
# 电影分类策略
movie: Optional[Dict[str, Optional[CategoryRule]]] = {}
# 电视剧分类策略
tv: Optional[Dict[str, Optional[CategoryRule]]] = {}

View File

@@ -29,3 +29,10 @@ class RateLimitExceededException(LimitException):
这个异常通常用于本地限流逻辑(例如 RateLimiter当系统检测到函数调用频率过高时触发限流并抛出该异常。
"""
pass
class OperationInterrupted(KeyboardInterrupt):
"""
用于表示操作被中断
"""
pass

View File

@@ -3,11 +3,11 @@ from typing import Optional, List, Any, Callable
from pydantic import BaseModel, Field
from app.schemas.tmdb import TmdbEpisode
from app.schemas.history import DownloadHistory
from app.schemas.context import MetaInfo, MediaInfo
from app.schemas.file import FileItem
from app.schemas.history import DownloadHistory
from app.schemas.system import TransferDirectoryConf
from app.schemas.tmdb import TmdbEpisode
class TransferTorrent(BaseModel):
@@ -124,14 +124,6 @@ class TransferInfo(BaseModel):
total_size: Optional[int] = Field(default=0)
# 失败清单
fail_list: Optional[list] = Field(default_factory=list)
# 处理字幕文件清单
subtitle_list: Optional[list] = Field(default_factory=list)
# 目标字幕文件清单
subtitle_list_new: Optional[list] = Field(default_factory=list)
# 处理音频文件清单
audio_list: Optional[list] = Field(default_factory=list)
# 目标音频文件清单
audio_list_new: Optional[list] = Field(default_factory=list)
# 错误信息
message: Optional[str] = None
# 是否需要刮削

View File

@@ -38,10 +38,18 @@ class EventType(Enum):
SiteUpdated = "site.updated"
# 站点已刷新
SiteRefreshed = "site.refreshed"
# 整理完成
# 媒体文件整理完成
TransferComplete = "transfer.complete"
# 整理失败
# 媒体文件整理失败
TransferFailed = "transfer.failed"
# 字幕整理完成
SubtitleTransferComplete = "transfer.subtitle.complete"
# 字幕整理失败
SubtitleTransferFailed = "transfer.subtitle.failed"
# 音频文件整理完成
AudioTransferComplete = "transfer.audio.complete"
# 音频文件整理失败
AudioTransferFailed = "transfer.audio.failed"
# 下载已添加
DownloadAdded = "download.added"
# 删除历史记录
@@ -88,6 +96,11 @@ EVENT_TYPE_NAMES = {
EventType.SiteUpdated: "站点已更新",
EventType.SiteRefreshed: "站点已刷新",
EventType.TransferComplete: "整理完成",
EventType.TransferFailed: "整理失败",
EventType.SubtitleTransferComplete: "字幕整理完成",
EventType.SubtitleTransferFailed: "字幕整理失败",
EventType.AudioTransferComplete: "音频整理完成",
EventType.AudioTransferFailed: "音频整理失败",
EventType.DownloadAdded: "添加下载",
EventType.HistoryDeleted: "删除历史记录",
EventType.DownloadFileDeleted: "删除下载源文件",

View File

@@ -98,8 +98,14 @@ class ExponentialBackoffRateLimiter(BaseRateLimiter):
每次触发限流时,等待时间会成倍增加,直到达到最大等待时间
"""
def __init__(self, base_wait: float = 60.0, max_wait: float = 600.0, backoff_factor: float = 2.0,
source: str = "", enable_logging: bool = True):
def __init__(
self,
base_wait: float = 60.0,
max_wait: float = 600.0,
backoff_factor: float = 2.0,
source: str = "",
enable_logging: bool = True,
):
"""
初始化 ExponentialBackoffRateLimiter 实例
:param base_wait: 基础等待时间(秒),默认值为 60 秒1 分钟)
@@ -156,7 +162,9 @@ class ExponentialBackoffRateLimiter(BaseRateLimiter):
current_time = time.time()
with self.lock:
self.next_allowed_time = current_time + self.current_wait
self.current_wait = min(self.current_wait * self.backoff_factor, self.max_wait)
self.current_wait = min(
self.current_wait * self.backoff_factor, self.max_wait
)
wait_time = self.next_allowed_time - current_time
self.log_warning(f"触发限流,将在 {wait_time:.2f} 秒后允许继续调用")
@@ -168,8 +176,13 @@ class WindowRateLimiter(BaseRateLimiter):
如果超过允许的最大调用次数,则限流直到窗口期结束
"""
def __init__(self, max_calls: int, window_seconds: float,
source: str = "", enable_logging: bool = True):
def __init__(
self,
max_calls: int,
window_seconds: float,
source: str = "",
enable_logging: bool = True,
):
"""
初始化 WindowRateLimiter 实例
:param max_calls: 在时间窗口内允许的最大调用次数
@@ -190,7 +203,10 @@ class WindowRateLimiter(BaseRateLimiter):
current_time = time.time()
with self.lock:
# 清理超出时间窗口的调用记录
while self.call_times and current_time - self.call_times[0] > self.window_seconds:
while (
self.call_times
and current_time - self.call_times[0] > self.window_seconds
):
self.call_times.popleft()
if len(self.call_times) < self.max_calls:
@@ -225,8 +241,12 @@ class CompositeRateLimiter(BaseRateLimiter):
当任意一个限流策略触发限流时,都会阻止调用
"""
def __init__(self, limiters: List[BaseRateLimiter], source: str = "", enable_logging: bool = True):
def __init__(
self,
limiters: List[BaseRateLimiter],
source: str = "",
enable_logging: bool = True,
):
"""
初始化 CompositeRateLimiter 实例
:param limiters: 要组合的限流器列表
@@ -263,7 +283,9 @@ class CompositeRateLimiter(BaseRateLimiter):
# 通用装饰器:自定义限流器实例
def rate_limit_handler(limiter: BaseRateLimiter, raise_on_limit: bool = False) -> Callable:
def rate_limit_handler(
limiter: BaseRateLimiter, raise_on_limit: bool = False
) -> Callable:
"""
通用装饰器,允许用户传递自定义的限流器实例,用于处理限流逻辑
该装饰器可灵活支持任意继承自 BaseRateLimiter 的限流器
@@ -344,8 +366,14 @@ def rate_limit_handler(limiter: BaseRateLimiter, raise_on_limit: bool = False) -
# 装饰器:指数退避限流
def rate_limit_exponential(base_wait: float = 60.0, max_wait: float = 600.0, backoff_factor: float = 2.0,
raise_on_limit: bool = False, source: str = "", enable_logging: bool = True) -> Callable:
def rate_limit_exponential(
base_wait: float = 60.0,
max_wait: float = 600.0,
backoff_factor: float = 2.0,
raise_on_limit: bool = False,
source: str = "",
enable_logging: bool = True,
) -> Callable:
"""
装饰器,用于应用指数退避限流策略
通过逐渐增加调用等待时间控制调用频率。每次触发限流时,等待时间会成倍增加,直到达到最大等待时间
@@ -359,14 +387,21 @@ def rate_limit_exponential(base_wait: float = 60.0, max_wait: float = 600.0, bac
:return: 装饰器函数
"""
# 实例化 ExponentialBackoffRateLimiter并传入相关参数
limiter = ExponentialBackoffRateLimiter(base_wait, max_wait, backoff_factor, source, enable_logging)
limiter = ExponentialBackoffRateLimiter(
base_wait, max_wait, backoff_factor, source, enable_logging
)
# 使用通用装饰器逻辑包装该限流器
return rate_limit_handler(limiter, raise_on_limit)
# 装饰器:时间窗口限流
def rate_limit_window(max_calls: int, window_seconds: float,
raise_on_limit: bool = False, source: str = "", enable_logging: bool = True) -> Callable:
def rate_limit_window(
max_calls: int,
window_seconds: float,
raise_on_limit: bool = False,
source: str = "",
enable_logging: bool = True,
) -> Callable:
"""
装饰器,用于应用时间窗口限流策略
在固定的时间窗口内限制调用次数,当调用次数超过最大值时,触发限流,直到时间窗口结束
@@ -407,3 +442,63 @@ class QpsRateLimiter:
self.next_call_time = max(now, self.next_call_time) + self.interval
if sleep_duration > 0:
time.sleep(sleep_duration)
class RateStats:
"""
请求速率统计:记录时间戳,计算 QPS / QPM / QPH
"""
def __init__(self, window_seconds: float = 7200, source: str = ""):
"""
:param window_seconds: 统计窗口(秒),默认 2 小时,用于计算 QPH
:param source: 日志来源标识
"""
self._window = window_seconds
self._source = source
self._lock = threading.Lock()
self._timestamps: deque = deque()
def record(self) -> None:
"""
记录一次请求
"""
t = time.time()
with self._lock:
self._timestamps.append(t)
while self._timestamps and t - self._timestamps[0] > self._window:
self._timestamps.popleft()
def _count_since(self, seconds: float) -> int:
t = time.time()
with self._lock:
return sum(1 for ts in self._timestamps if t - ts <= seconds)
def get_qps(self) -> float:
"""
最近 1 秒内请求数
"""
return self._count_since(1.0)
def get_qpm(self) -> float:
"""
最近 1 分钟内请求数
"""
return self._count_since(60.0)
def get_qph(self) -> float:
"""
最近 1 小时内请求数
"""
return self._count_since(3600.0)
def log_stats(self, level: str = "info") -> None:
"""
输出当前 QPS/QPM/QPH
"""
qps, qpm, qph = self.get_qps(), self.get_qpm(), self.get_qph()
msg = f"QPS={qps} QPM={qpm} QPH={qph}"
if self._source:
msg = f"[{self._source}] {msg}"
log_fn = getattr(logger, level, logger.info)
log_fn(msg)

View File

@@ -166,10 +166,8 @@ class SystemUtils:
移动
"""
try:
# 当前目录改名
temp = src.replace(src.parent / dest.name)
# 移动到目标目录
shutil.move(temp, dest)
# 直接移动到目标路径,避免中间改名步骤触发目录监控
shutil.move(src, dest)
return 0, ""
except Exception as err:
return -1, str(err)

View File

@@ -6,8 +6,9 @@ Create Date: 2026-01-13 13:02:41.614029
"""
from app.db import ScopedSession
from app.db.models.systemconfig import SystemConfig
from alembic import op
from sqlalchemy import text
from app.log import logger
# revision identifiers, used by Alembic.
@@ -19,22 +20,28 @@ depends_on = None
def upgrade() -> None:
# systemconfig表 去重
with ScopedSession() as db:
try:
seen_keys = set()
# 按ID降序查询以便保留最新的配置
for item in db.query(SystemConfig).order_by(SystemConfig.id.desc()).all():
if item.key in seen_keys:
logger.warn(
f"已删除重复的SystemConfig项{item.key} 值:{item.value}"
)
db.delete(item)
else:
seen_keys.add(item.key)
db.commit()
except Exception as e:
logger.error(e)
db.rollback()
connection = op.get_bind()
select_stmt = text(
"""
SELECT id, key, value
FROM SystemConfig
WHERE id NOT IN (
SELECT MAX(id)
FROM SystemConfig
GROUP BY key
)
"""
)
to_delete = connection.execute(select_stmt).fetchall()
for row in to_delete:
logger.warn(
f"已删除重复的 SystemConfig 项key={row.key}, value={row.value}, id={row.id}"
)
delete_stmt = text("DELETE FROM SystemConfig WHERE id = :id")
connection.execute(delete_stmt, {"id": row.id})
logger.info("SystemConfig 表去重操作已完成。")
def downgrade() -> None:

View File

@@ -91,3 +91,4 @@ langchain-deepseek~=0.1.4
langchain-experimental~=0.3.4
openai~=1.108.2
google-generativeai~=0.8.5
ddgs~=9.10.0

View File

@@ -1,2 +1,2 @@
APP_VERSION = 'v2.9.5'
FRONTEND_VERSION = 'v2.9.5'
APP_VERSION = 'v2.9.10'
FRONTEND_VERSION = 'v2.9.10'