Compare commits

...

21 Commits

Author SHA1 Message Date
jxxghp
ad40b99313 更新 version.py 2026-04-07 13:24:13 +08:00
jxxghp
1e338e48ab fix(agent): 基于langgraph_step过滤中间步骤思考文本,抽离ThinkTagStripper类
- 利用metadata中的langgraph_step检测工具调用前的中间步骤,非VERBOSE模式下
  自动reset清除模型输出的计划/推理文本(如NEXT STEPS、tool call描述等)
- 将<think>标签流式剥离逻辑抽离为独立的_ThinkTagStripper类,简化主流程
2026-04-07 12:42:46 +08:00
jxxghp
ac9c9598f4 feat(agent): add tools for querying and updating custom identifiers 2026-04-07 09:00:15 +08:00
jxxghp
02cb5dfc31 refactor(agent): optimize database-operation skill to read DB info from system prompt <system_info> 2026-04-07 07:37:39 +08:00
jxxghp
8109ffb445 feat(agent): add /stop_agent command for emergency stop of agent reasoning
Add /stop_agent command that cancels the currently running agent reasoning
task without clearing the session or memory. Unlike /clear_session which
destroys the entire session, this allows users to stop a long-running or
stuck agent process and continue the conversation afterward.
2026-04-07 07:32:35 +08:00
jxxghp
0ecbcb89fa 更新 SKILL.md 2026-04-07 07:16:18 +08:00
jxxghp
8f38c06424 feat(agent): add database-operation skill for SQL access with auto SQLite/PostgreSQL detection 2026-04-07 00:43:28 +08:00
jxxghp
902394f86e fix(agent): resolve circular import by lazy-importing Command in run_slash_command and list_slash_commands 2026-04-07 00:16:09 +08:00
jxxghp
9fefd807f9 refactor(agent): rename list_all_commands to list_slash_commands and skill to command-dispatch 2026-04-07 00:00:10 +08:00
jxxghp
a8fb4a6d84 refactor(agent): rename run_plugin_command to run_slash_command to avoid confusion with execute_command (shell) 2026-04-06 23:53:49 +08:00
jxxghp
7806267e92 feat(agent): add command-execute skill for intelligent command dispatch
- Enhance run_plugin_command tool to support all registered commands (system preset + plugin + other), not just plugin commands
- Add list_all_commands tool to discover all available commands with descriptions and categories
- Add command-execute skill that guides the agent to recognize user intent from natural language and match it to available system/plugin commands
2026-04-06 23:45:48 +08:00
Attente
eb5e17a115 test: 补充媒体刮削路径、图片与事件流程测试 2026-04-06 11:28:00 +08:00
Attente
2ae98d628d feat(subscribe): 优化洗版订阅合集的识别 2026-04-05 21:39:44 +08:00
EkkoG
8b9dc0e77f 修复 QQbot渠道依旧会重复发送消息问题 2026-04-05 15:42:46 +08:00
Attente
2f151cea64 fix(helper): 统一redis缓存键 2026-04-05 13:55:54 +08:00
jxxghp
b777e8cab1 删除 .DS_Store 2026-04-04 14:06:05 +08:00
jxxghp
663e37bd03 refactor: SendMessageTool message_type 改为消息标题 2026-04-04 07:42:36 +08:00
jxxghp
8960620883 更新 __init__.py 2026-04-04 07:29:41 +08:00
jxxghp
5b892b3a63 fix: 修复Gemini 2.5思考模型工具调用时thought_signature缺失导致400错误
- Google provider统一使用ChatGoogleGenerativeAI原生接口,不再走OpenAI兼容端点
  (OpenAI协议不支持thought_signature字段,导致思考模型工具调用必然失败)
- 通过client_args传递代理配置,替代原来的OpenAI兼容端点+openai_proxy方案
- 修补langchain-google-genai的_is_gemini_3_or_later()以覆盖Gemini 2.5模型
- 自动适配httpx代理参数名(proxies/proxy),修复代理配置被静默丢弃的问题
2026-04-04 07:24:47 +08:00
jxxghp
974d5f2f49 fix: 修复获取Google模型列表阻塞事件循环及缺少代理配置的问题 2026-04-04 06:58:39 +08:00
DDSRem
f70881bb4f feat: TransferRename 事件增加 source_item 源文件信息 2026-04-03 17:51:05 +08:00
29 changed files with 2180 additions and 623 deletions

BIN
.DS_Store vendored

Binary file not shown.

View File

@@ -36,6 +36,79 @@ class AgentChain(ChainBase):
pass
class _ThinkTagStripper:
"""
流式剥离 <think>...</think> 标签的辅助类。
维护内部缓冲区,处理标签跨 token 边界被截断的情况。
"""
def __init__(self):
self.buffer = ""
self.in_think_tag = False
def reset(self):
"""重置状态"""
self.buffer = ""
self.in_think_tag = False
def process(self, text: str, on_output: Callable[[str], None]):
"""
将新文本送入处理,剥离 <think> 标签后通过 on_output 回调输出。
:param text: 新增的文本片段
:param on_output: 输出回调,接收过滤后的文本
:return: 本次调用是否通过 on_output 输出了内容
"""
self.buffer += text
emitted = False
while self.buffer:
if not self.in_think_tag:
start_idx = self.buffer.find("<think>")
if start_idx != -1:
if start_idx > 0:
on_output(self.buffer[:start_idx])
emitted = True
self.in_think_tag = True
self.buffer = self.buffer[start_idx + 7:]
else:
# 检查是否以 <think> 的不完整前缀结尾
partial_match = False
for i in range(6, 0, -1):
if self.buffer.endswith("<think>"[:i]):
if len(self.buffer) > i:
on_output(self.buffer[:-i])
emitted = True
self.buffer = self.buffer[-i:]
partial_match = True
break
if not partial_match:
on_output(self.buffer)
emitted = True
self.buffer = ""
else:
end_idx = self.buffer.find("</think>")
if end_idx != -1:
self.in_think_tag = False
self.buffer = self.buffer[end_idx + 8:]
else:
# 检查是否以 </think> 的不完整前缀结尾
partial_match = False
for i in range(7, 0, -1):
if self.buffer.endswith("</think>"[:i]):
self.buffer = self.buffer[-i:]
partial_match = True
break
if not partial_match:
self.buffer = ""
break
return emitted
def flush(self, on_output: Callable[[str], None]):
"""流式结束时,输出缓冲区中剩余的非思考内容"""
if self.buffer and not self.in_think_tag:
on_output(self.buffer)
self.buffer = ""
class MoviePilotAgent:
"""
MoviePilot AI智能体基于 LangChain v1 + LangGraph
@@ -218,8 +291,11 @@ class MoviePilotAgent:
:param config: Agent 运行配置
:param on_token: 收到有效 token 时的回调
"""
in_think_tag = False
buffer = ""
stripper = _ThinkTagStripper()
# 非VERBOSE模式下跟踪当前langgraph_step以检测中间步骤的模型输出
# 当模型在工具调用之前输出的"计划/思考"文本会在检测到tool_call时被清除
current_model_step = -1
has_emitted_in_step = False
async for chunk in agent.astream(
messages,
@@ -230,59 +306,41 @@ class MoviePilotAgent:
):
if chunk["type"] == "messages":
token, metadata = chunk["data"]
if (
token
and hasattr(token, "tool_call_chunks")
and not token.tool_call_chunks
):
# 跳过模型思考/推理内容(如 DeepSeek R1 的 reasoning_content
additional = getattr(token, "additional_kwargs", None)
if additional and additional.get("reasoning_content"):
continue
if token.content:
# content 可能是字符串或内容块列表,过滤掉思考类型的块
content = self._extract_text_content(token.content)
if content:
buffer += content
while buffer:
if not in_think_tag:
start_idx = buffer.find("<think>")
if start_idx != -1:
if start_idx > 0:
on_token(buffer[:start_idx])
in_think_tag = True
buffer = buffer[start_idx + 7 :]
else:
# 检查是否以 <think> 的前缀结尾
partial_match = False
for i in range(6, 0, -1):
if buffer.endswith("<think>"[:i]):
if len(buffer) > i:
on_token(buffer[:-i])
buffer = buffer[-i:]
partial_match = True
break
if not partial_match:
on_token(buffer)
buffer = ""
else:
end_idx = buffer.find("</think>")
if end_idx != -1:
in_think_tag = False
buffer = buffer[end_idx + 8 :]
else:
# 检查是否以 </think> 的前缀结尾
partial_match = False
for i in range(7, 0, -1):
if buffer.endswith("</think>"[:i]):
buffer = buffer[-i:]
partial_match = True
break
if not partial_match:
buffer = ""
if not token or not hasattr(token, "tool_call_chunks"):
continue
if buffer and not in_think_tag:
on_token(buffer)
# 获取当前步骤信息
step = metadata.get("langgraph_step", -1) if metadata else -1
if token.tool_call_chunks:
# 检测到工具调用token说明当前步骤是中间步骤
# 非VERBOSE模式下清除该步骤之前输出的"计划/思考"文本
if not settings.AI_AGENT_VERBOSE and has_emitted_in_step:
self.stream_handler.reset()
stripper.reset()
has_emitted_in_step = False
continue
# 以下处理纯文本tokentool_call_chunks为空
# 检测步骤变化重置步骤内emit跟踪
if step != current_model_step:
current_model_step = step
has_emitted_in_step = False
# 跳过模型思考/推理内容(如 DeepSeek R1 的 reasoning_content
additional = getattr(token, "additional_kwargs", None)
if additional and additional.get("reasoning_content"):
continue
if token.content:
# content 可能是字符串或内容块列表,过滤掉思考类型的块
content = self._extract_text_content(token.content)
if content:
if stripper.process(content, on_token):
has_emitted_in_step = True
stripper.flush(on_token)
async def _execute_agent(self, messages: List[BaseMessage]):
"""
@@ -604,6 +662,43 @@ class AgentManager:
return await agent.process(task.message, images=task.images)
async def stop_current_task(self, session_id: str):
"""
应急停止当前正在执行的Agent推理任务但保留会话和记忆。
与 clear_session 不同此方法不会销毁Agent实例或清除记忆
用户可以在停止后继续对话。
"""
stopped = False
# 取消该会话的worker会触发 _execute_agent 中的 CancelledError
if session_id in self._session_workers:
self._session_workers[session_id].cancel()
try:
await self._session_workers[session_id]
except asyncio.CancelledError:
pass
self._session_workers.pop(session_id, None) # noqa
stopped = True
# 清空队列中待处理的消息
if session_id in self._session_queues:
queue = self._session_queues[session_id]
while not queue.empty():
try:
queue.get_nowait()
queue.task_done()
except asyncio.QueueEmpty:
break
self._session_queues.pop(session_id, None)
stopped = True
if stopped:
logger.info(f"会话 {session_id} 的Agent推理已应急停止")
else:
logger.debug(f"会话 {session_id} 没有正在执行的Agent任务")
return stopped
async def clear_session(self, session_id: str, user_id: str):
"""
清空会话

View File

@@ -38,7 +38,7 @@ class StreamingHandler:
"""
# 流式输出的刷新间隔(秒)
FLUSH_INTERVAL = 1.0
FLUSH_INTERVAL = 0.3
def __init__(self):
self._lock = threading.Lock()

View File

@@ -50,7 +50,10 @@ from app.agent.tools.impl.read_file import ReadFileTool
from app.agent.tools.impl.browse_webpage import BrowseWebpageTool
from app.agent.tools.impl.query_installed_plugins import QueryInstalledPluginsTool
from app.agent.tools.impl.query_plugin_capabilities import QueryPluginCapabilitiesTool
from app.agent.tools.impl.run_plugin_command import RunPluginCommandTool
from app.agent.tools.impl.run_slash_command import RunSlashCommandTool
from app.agent.tools.impl.list_slash_commands import ListSlashCommandsTool
from app.agent.tools.impl.query_custom_identifiers import QueryCustomIdentifiersTool
from app.agent.tools.impl.update_custom_identifiers import UpdateCustomIdentifiersTool
from app.core.plugin import PluginManager
from app.log import logger
from .base import MoviePilotTool
@@ -125,7 +128,10 @@ class MoviePilotToolFactory:
BrowseWebpageTool,
QueryInstalledPluginsTool,
QueryPluginCapabilitiesTool,
RunPluginCommandTool,
RunSlashCommandTool,
ListSlashCommandsTool,
QueryCustomIdentifiersTool,
UpdateCustomIdentifiersTool,
]
# 创建内置工具
for ToolClass in tool_definitions:

View File

@@ -0,0 +1,79 @@
"""查询所有可用斜杠命令工具(系统命令 + 插件命令)"""
import json
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.log import logger
class ListSlashCommandsInput(BaseModel):
"""查询所有可用斜杠命令工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
class ListSlashCommandsTool(MoviePilotTool):
name: str = "list_slash_commands"
description: str = (
"List all available slash commands in the system, including system preset commands "
"(e.g. /cookiecloud, /sites, /subscribes, /downloading, /transfer, /restart, etc.) "
"and plugin-registered commands. "
"Use this tool to discover what slash commands are available before executing them with run_slash_command. "
"This is especially useful when the user describes an action in natural language and you need to "
"find the matching command to fulfill their request."
)
args_schema: Type[BaseModel] = ListSlashCommandsInput
require_admin: bool = True
def get_tool_message(self, **kwargs) -> Optional[str]:
"""生成友好的提示消息"""
return "正在查询所有可用命令"
async def run(self, **kwargs) -> str:
logger.info(f"执行工具: {self.name}")
try:
from app.command import Command
command_obj = Command()
all_commands = command_obj.get_commands()
if not all_commands:
return "当前没有可用的命令"
commands_list = []
for cmd, info in all_commands.items():
cmd_info = {
"command": cmd,
"description": info.get("description", ""),
}
if info.get("category"):
cmd_info["category"] = info["category"]
# 标识命令类型
if info.get("type") == "scheduler":
cmd_info["type"] = "scheduler"
elif info.get("pid"):
cmd_info["type"] = "plugin"
cmd_info["plugin_id"] = info["pid"]
else:
cmd_info["type"] = "system"
commands_list.append(cmd_info)
result = {
"total": len(commands_list),
"commands": commands_list,
}
return json.dumps(result, ensure_ascii=False, indent=2)
except Exception as e:
logger.error(f"查询可用命令失败: {e}", exc_info=True)
return json.dumps(
{"success": False, "message": f"查询可用命令时发生错误: {str(e)}"},
ensure_ascii=False,
)

View File

@@ -0,0 +1,66 @@
"""查询自定义识别词工具"""
import json
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.db.systemconfig_oper import SystemConfigOper
from app.log import logger
from app.schemas.types import SystemConfigKey
class QueryCustomIdentifiersInput(BaseModel):
"""查询自定义识别词工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
class QueryCustomIdentifiersTool(MoviePilotTool):
name: str = "query_custom_identifiers"
description: str = (
"Query all currently configured custom identifiers (自定义识别词). "
"Returns the list of identifier rules used for preprocessing torrent/file names before media recognition. "
"Use this tool to check existing rules before adding new ones to avoid duplicates."
)
args_schema: Type[BaseModel] = QueryCustomIdentifiersInput
def get_tool_message(self, **kwargs) -> Optional[str]:
"""生成友好的提示消息"""
return "正在查询自定义识别词"
async def run(self, **kwargs) -> str:
logger.info(f"执行工具: {self.name}")
try:
system_config_oper = SystemConfigOper()
identifiers = system_config_oper.get(SystemConfigKey.CustomIdentifiers)
if identifiers:
return json.dumps(
{
"success": True,
"count": len(identifiers),
"identifiers": identifiers,
},
ensure_ascii=False,
indent=2,
)
return json.dumps(
{
"success": True,
"count": 0,
"identifiers": [],
"message": "当前没有配置自定义识别词",
},
ensure_ascii=False,
indent=2,
)
except Exception as e:
logger.error(f"查询自定义识别词失败: {e}")
return json.dumps(
{"success": False, "message": f"查询自定义识别词时发生错误: {str(e)}"},
ensure_ascii=False,
)

View File

@@ -29,7 +29,7 @@ class QueryPluginCapabilitiesTool(MoviePilotTool):
name: str = "query_plugin_capabilities"
description: str = (
"Query the capabilities of installed plugins, including supported commands and scheduled services. "
"Commands are slash-commands (e.g. /xxx) that can be executed via the run_plugin_command tool. "
"Commands are slash-commands (e.g. /xxx) that can be executed via the run_slash_command tool. "
"Scheduled services are periodic tasks that can be triggered via the run_scheduler tool. "
"Optionally specify a plugin_id to query a specific plugin, or omit to query all running plugins."
)

View File

@@ -1,4 +1,4 @@
"""运行插件命令工具"""
"""运行斜杠命令工具(系统命令 + 插件命令"""
import json
from typing import Optional, Type
@@ -7,13 +7,12 @@ from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.core.event import eventmanager
from app.core.plugin import PluginManager
from app.log import logger
from app.schemas.types import EventType, MessageChannel
class RunPluginCommandInput(BaseModel):
"""运行插件命令工具的输入参数模型"""
class RunSlashCommandInput(BaseModel):
"""运行斜杠命令工具的输入参数模型"""
explanation: str = Field(
...,
@@ -23,26 +22,30 @@ class RunPluginCommandInput(BaseModel):
...,
description="The slash command to execute, e.g. '/cookiecloud'. "
"Must start with '/'. Can include arguments after the command, e.g. '/command arg1 arg2'. "
"Use query_plugin_capabilities tool to discover available commands first.",
"Use query_plugin_capabilities tool to discover available plugin commands, "
"or list_slash_commands tool to discover all available commands (including system commands).",
)
class RunPluginCommandTool(MoviePilotTool):
name: str = "run_plugin_command"
class RunSlashCommandTool(MoviePilotTool):
name: str = "run_slash_command"
description: str = (
"Execute a plugin command by sending a CommandExcute event. "
"Plugin commands are slash-commands (starting with '/') registered by plugins. "
"Use the query_plugin_capabilities tool first to discover available commands and their descriptions. "
"Execute a slash command (system or plugin) by sending a CommandExcute event. "
"This tool supports ALL registered slash commands, including: "
"1) System preset commands (e.g. /cookiecloud, /sites, /subscribes, /downloading, /transfer, /restart, etc.) "
"2) Plugin commands registered by installed plugins. "
"Use the query_plugin_capabilities tool to discover plugin commands, "
"or the list_slash_commands tool to discover all available commands. "
"The command will be executed asynchronously. "
"Note: This tool triggers the command execution but the actual processing happens in the background."
)
args_schema: Type[BaseModel] = RunPluginCommandInput
args_schema: Type[BaseModel] = RunSlashCommandInput
require_admin: bool = True
def get_tool_message(self, **kwargs) -> Optional[str]:
"""生成友好的提示消息"""
command = kwargs.get("command", "")
return f"正在执行插件命令: {command}"
return f"正在执行命令: {command}"
async def run(self, command: str, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, 参数: command={command}")
@@ -52,21 +55,19 @@ class RunPluginCommandTool(MoviePilotTool):
if not command.startswith("/"):
command = f"/{command}"
# 验证命令是否存在
plugin_manager = PluginManager()
registered_commands = plugin_manager.get_plugin_commands()
# 从全局 Command 单例中验证命令是否存在(包含系统预设命令 + 插件命令 + 其他命令)
from app.command import Command
cmd_name = command.split()[0]
matched_command = None
for cmd in registered_commands:
if cmd.get("cmd") == cmd_name:
matched_command = cmd
break
command_obj = Command()
matched_command = command_obj.get(cmd_name)
if not matched_command:
# 列出可用命令帮助用户
# 列出所有可用命令帮助用户
all_commands = command_obj.get_commands()
available_cmds = [
f"{cmd.get('cmd')} - {cmd.get('desc', '无描述')}"
for cmd in registered_commands
f"{cmd} - {info.get('description', '无描述')}"
for cmd, info in all_commands.items()
]
result = {
"success": False,
@@ -99,14 +100,16 @@ class RunPluginCommandTool(MoviePilotTool):
"success": True,
"message": f"命令 {cmd_name} 已触发执行",
"command": command,
"command_desc": matched_command.get("desc", ""),
"plugin_id": matched_command.get("pid", ""),
"command_desc": matched_command.get("description", ""),
}
# 如果是插件命令附加插件ID
if matched_command.get("pid"):
result["plugin_id"] = matched_command["pid"]
return json.dumps(result, ensure_ascii=False, indent=2)
except Exception as e:
logger.error(f"执行插件命令失败: {e}", exc_info=True)
logger.error(f"执行命令失败: {e}", exc_info=True)
return json.dumps(
{"success": False, "message": f"执行插件命令时发生错误: {str(e)}"},
{"success": False, "message": f"执行命令时发生错误: {str(e)}"},
ensure_ascii=False,
)

View File

@@ -20,8 +20,8 @@ class SendMessageInput(BaseModel):
description="The message content to send to the user (should be clear and informative)",
)
message_type: Optional[str] = Field(
"info",
description="Type of message: 'info' for general information, 'success' for successful operations, 'warning' for warnings, 'error' for error messages",
None,
description="Title of the message, a short summary of the message content",
)
@@ -34,30 +34,23 @@ class SendMessageTool(MoviePilotTool):
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据消息参数生成友好的提示消息"""
message = kwargs.get("message", "")
message_type = kwargs.get("message_type", "info")
type_map = {
"info": "信息",
"success": "成功",
"warning": "警告",
"error": "错误",
}
type_desc = type_map.get(message_type, message_type)
title = kwargs.get("message_type") or ""
# 截断过长的消息
if len(message) > 50:
message = message[:50] + "..."
return f"正在发送{type_desc}消息: {message}"
if title:
return f"正在发送消息: [{title}] {message}"
return f"正在发送消息: {message}"
async def run(
self, message: str, message_type: Optional[str] = None, **kwargs
) -> str:
logger.info(
f"执行工具: {self.name}, 参数: message={message}, message_type={message_type}"
)
title = message_type or ""
logger.info(f"执行工具: {self.name}, 参数: title={title}, message={message}")
try:
await self.send_tool_message(message, title=message_type)
await self.send_tool_message(message, title=title)
return "消息已发送"
except Exception as e:
logger.error(f"发送消息失败: {e}")

View File

@@ -0,0 +1,95 @@
"""更新自定义识别词工具"""
import json
from typing import List, Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.db.systemconfig_oper import SystemConfigOper
from app.log import logger
from app.schemas.types import SystemConfigKey
class UpdateCustomIdentifiersInput(BaseModel):
"""更新自定义识别词工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
identifiers: List[str] = Field(
...,
description=(
"The complete list of custom identifier rules to save. "
"This REPLACES the entire existing list. "
"Always query existing identifiers first, merge new rules, then pass the full list."
),
)
class UpdateCustomIdentifiersTool(MoviePilotTool):
name: str = "update_custom_identifiers"
description: str = (
"Update the full list of custom identifiers (自定义识别词) used for preprocessing torrent/file names. "
"This tool REPLACES all existing identifier rules with the provided list. "
"IMPORTANT: Always use 'query_custom_identifiers' first to get existing rules, "
"then merge new rules into the list before calling this tool to avoid accidentally deleting existing rules. "
"Supported rule formats (spaces around operators are required): "
"1) Block word: just the word/regex to remove; "
"2) Replacement: '被替换词 => 替换词'; "
"3) Episode offset: '前定位词 <> 后定位词 >> EP±N'; "
"4) Combined: '被替换词 => 替换词 && 前定位词 <> 后定位词 >> EP±N'; "
"Lines starting with '#' are comments. "
"The replacement target supports: {[tmdbid=xxx;type=movie/tv;s=xxx;e=xxx]} for direct TMDB ID matching."
)
args_schema: Type[BaseModel] = UpdateCustomIdentifiersInput
def get_tool_message(self, **kwargs) -> Optional[str]:
"""生成友好的提示消息"""
identifiers = kwargs.get("identifiers", [])
return f"正在更新自定义识别词(共 {len(identifiers)} 条规则)"
async def run(self, identifiers: List[str] = None, **kwargs) -> str:
logger.info(
f"执行工具: {self.name}, 规则数量: {len(identifiers) if identifiers else 0}"
)
try:
if identifiers is None:
return json.dumps(
{"success": False, "message": "必须提供 identifiers 参数"},
ensure_ascii=False,
)
# 过滤空字符串
identifiers = [i for i in identifiers if i is not None]
system_config_oper = SystemConfigOper()
# 保存
value = identifiers if identifiers else None
success = await system_config_oper.async_set(
SystemConfigKey.CustomIdentifiers, value
)
if success:
return json.dumps(
{
"success": True,
"message": f"自定义识别词已更新,共 {len(identifiers)} 条规则",
"count": len(identifiers),
"identifiers": identifiers,
},
ensure_ascii=False,
indent=2,
)
else:
return json.dumps(
{"success": False, "message": "保存自定义识别词失败"},
ensure_ascii=False,
)
except Exception as e:
logger.error(f"更新自定义识别词失败: {e}")
return json.dumps(
{"success": False, "message": f"更新自定义识别词时发生错误: {str(e)}"},
ensure_ascii=False,
)

View File

@@ -23,8 +23,11 @@ from app.core.module import ModuleManager
from app.core.security import verify_apitoken, verify_resource_token, verify_token
from app.db.models import User
from app.db.systemconfig_oper import SystemConfigOper
from app.db.user_oper import get_current_active_superuser, get_current_active_superuser_async, \
get_current_active_user_async
from app.db.user_oper import (
get_current_active_superuser,
get_current_active_superuser_async,
get_current_active_user_async,
)
from app.helper.llm import LLMHelper
from app.helper.mediaserver import MediaServerHelper
from app.helper.message import MessageHelper
@@ -47,12 +50,13 @@ router = APIRouter()
async def fetch_image(
url: str,
proxy: Optional[bool] = None,
use_cache: bool = False,
if_none_match: Optional[str] = None,
cookies: Optional[str | dict] = None,
allowed_domains: Optional[set[str]] = None) -> Optional[Response]:
url: str,
proxy: Optional[bool] = None,
use_cache: bool = False,
if_none_match: Optional[str] = None,
cookies: Optional[str | dict] = None,
allowed_domains: Optional[set[str]] = None,
) -> Optional[Response]:
"""
处理图片缓存逻辑支持HTTP缓存和磁盘缓存
"""
@@ -83,47 +87,57 @@ async def fetch_image(
return Response(
content=content,
media_type=UrlUtils.get_mime_type(url, "image/jpeg"),
headers=headers
headers=headers,
)
@router.get("/img/{proxy}", summary="图片代理")
async def proxy_img(
imgurl: str,
proxy: bool = False,
cache: bool = False,
use_cookies: bool = False,
if_none_match: Annotated[str | None, Header()] = None,
_: schemas.TokenPayload = Depends(verify_resource_token)
imgurl: str,
proxy: bool = False,
cache: bool = False,
use_cookies: bool = False,
if_none_match: Annotated[str | None, Header()] = None,
_: schemas.TokenPayload = Depends(verify_resource_token),
) -> Response:
"""
图片代理,可选是否使用代理服务器,支持 HTTP 缓存
"""
# 媒体服务器添加图片代理支持
hosts = [config.config.get("host") for config in MediaServerHelper().get_configs().values() if
config and config.config and config.config.get("host")]
hosts = [
config.config.get("host")
for config in MediaServerHelper().get_configs().values()
if config and config.config and config.config.get("host")
]
allowed_domains = set(settings.SECURITY_IMAGE_DOMAINS) | set(hosts)
cookies = (
MediaServerChain().get_image_cookies(server=None, image_url=imgurl)
if use_cookies
else None
)
return await fetch_image(url=imgurl, proxy=proxy, use_cache=cache, cookies=cookies,
if_none_match=if_none_match, allowed_domains=allowed_domains)
return await fetch_image(
url=imgurl,
proxy=proxy,
use_cache=cache,
cookies=cookies,
if_none_match=if_none_match,
allowed_domains=allowed_domains,
)
@router.get("/cache/image", summary="图片缓存")
async def cache_img(
url: str,
if_none_match: Annotated[str | None, Header()] = None,
_: schemas.TokenPayload = Depends(verify_resource_token)
url: str,
if_none_match: Annotated[str | None, Header()] = None,
_: schemas.TokenPayload = Depends(verify_resource_token),
) -> Response:
"""
本地缓存图片文件,支持 HTTP 缓存,如果启用全局图片缓存,则使用磁盘缓存
"""
# 如果没有启用全局图片缓存,则不使用磁盘缓存
return await fetch_image(url=url, use_cache=settings.GLOBAL_IMAGE_CACHE,
if_none_match=if_none_match)
return await fetch_image(
url=url, use_cache=settings.GLOBAL_IMAGE_CACHE, if_none_match=if_none_match
)
@router.get("/global", summary="查询非敏感系统设置", response_model=schemas.Response)
@@ -144,15 +158,18 @@ def get_global_setting(token: str):
}
)
# 追加版本信息(用于版本检查)
info.update({
"FRONTEND_VERSION": SystemChain.get_frontend_version(),
"BACKEND_VERSION": APP_VERSION
})
return schemas.Response(success=True,
data=info)
info.update(
{
"FRONTEND_VERSION": SystemChain.get_frontend_version(),
"BACKEND_VERSION": APP_VERSION,
}
)
return schemas.Response(success=True, data=info)
@router.get("/global/user", summary="查询用户相关系统设置", response_model=schemas.Response)
@router.get(
"/global/user", summary="查询用户相关系统设置", response_model=schemas.Response
)
async def get_user_global_setting(_: User = Depends(get_current_active_user_async)):
"""
查询用户相关系统设置(登录后获取)
@@ -164,7 +181,7 @@ async def get_user_global_setting(_: User = Depends(get_current_active_user_asyn
"RECOGNIZE_SOURCE",
"SEARCH_SOURCE",
"AI_RECOMMEND_ENABLED",
"PASSKEY_ALLOW_REGISTER_WITHOUT_OTP"
"PASSKEY_ALLOW_REGISTER_WITHOUT_OTP",
}
)
# 智能助手总开关未开启智能推荐状态强制返回False
@@ -173,13 +190,14 @@ async def get_user_global_setting(_: User = Depends(get_current_active_user_asyn
# 追加用户唯一ID和订阅分享管理权限
share_admin = SubscribeHelper().is_admin_user()
info.update({
"USER_UNIQUE_ID": SubscribeHelper().get_user_uuid(),
"SUBSCRIBE_SHARE_MANAGE": share_admin,
"WORKFLOW_SHARE_MANAGE": share_admin,
})
return schemas.Response(success=True,
data=info)
info.update(
{
"USER_UNIQUE_ID": SubscribeHelper().get_user_uuid(),
"SUBSCRIBE_SHARE_MANAGE": share_admin,
"WORKFLOW_SHARE_MANAGE": share_admin,
}
)
return schemas.Response(success=True, data=info)
@router.get("/env", summary="查询系统配置", response_model=schemas.Response)
@@ -187,22 +205,22 @@ async def get_env_setting(_: User = Depends(get_current_active_user_async)):
"""
查询系统环境变量,包括当前版本号(仅管理员)
"""
info = settings.model_dump(
exclude={"SECRET_KEY", "RESOURCE_SECRET_KEY"}
info = settings.model_dump(exclude={"SECRET_KEY", "RESOURCE_SECRET_KEY"})
info.update(
{
"VERSION": APP_VERSION,
"AUTH_VERSION": SitesHelper().auth_version,
"INDEXER_VERSION": SitesHelper().indexer_version,
"FRONTEND_VERSION": SystemChain().get_frontend_version(),
}
)
info.update({
"VERSION": APP_VERSION,
"AUTH_VERSION": SitesHelper().auth_version,
"INDEXER_VERSION": SitesHelper().indexer_version,
"FRONTEND_VERSION": SystemChain().get_frontend_version()
})
return schemas.Response(success=True,
data=info)
return schemas.Response(success=True, data=info)
@router.post("/env", summary="更新系统配置", response_model=schemas.Response)
async def set_env_setting(env: dict,
_: User = Depends(get_current_active_superuser_async)):
async def set_env_setting(
env: dict, _: User = Depends(get_current_active_superuser_async)
):
"""
更新系统环境变量(仅管理员)
"""
@@ -215,30 +233,31 @@ async def set_env_setting(env: dict,
return schemas.Response(
success=False,
message=f"{', '.join([v[1] for v in failed_updates.values()])}",
data={
"success_updates": success_updates,
"failed_updates": failed_updates
}
data={"success_updates": success_updates, "failed_updates": failed_updates},
)
if success_updates:
# 发送配置变更事件
await eventmanager.async_send_event(etype=EventType.ConfigChanged, data=ConfigChangeEventData(
key=success_updates.keys(),
change_type="update"
))
await eventmanager.async_send_event(
etype=EventType.ConfigChanged,
data=ConfigChangeEventData(
key=success_updates.keys(), change_type="update"
),
)
return schemas.Response(
success=True,
message="所有配置项更新成功",
data={
"success_updates": success_updates
}
data={"success_updates": success_updates},
)
@router.get("/progress/{process_type}", summary="实时进度")
async def get_progress(request: Request, process_type: str, _: schemas.TokenPayload = Depends(verify_resource_token)):
async def get_progress(
request: Request,
process_type: str,
_: schemas.TokenPayload = Depends(verify_resource_token),
):
"""
实时获取处理进度返回格式为SSE
"""
@@ -259,8 +278,7 @@ async def get_progress(request: Request, process_type: str, _: schemas.TokenPayl
@router.get("/setting/{key}", summary="查询系统设置", response_model=schemas.Response)
async def get_setting(key: str,
_: User = Depends(get_current_active_user_async)):
async def get_setting(key: str, _: User = Depends(get_current_active_user_async)):
"""
查询系统设置(仅管理员)
"""
@@ -268,16 +286,14 @@ async def get_setting(key: str,
value = getattr(settings, key)
else:
value = SystemConfigOper().get(key)
return schemas.Response(success=True, data={
"value": value
})
return schemas.Response(success=True, data={"value": value})
@router.post("/setting/{key}", summary="更新系统设置", response_model=schemas.Response)
async def set_setting(
key: str,
value: Annotated[Union[list, dict, bool, int, str] | None, Body()] = None,
_: User = Depends(get_current_active_superuser_async),
key: str,
value: Annotated[Union[list, dict, bool, int, str] | None, Body()] = None,
_: User = Depends(get_current_active_superuser_async),
):
"""
更新系统设置(仅管理员)
@@ -286,11 +302,10 @@ async def set_setting(
success, message = settings.update_setting(key=key, value=value)
if success:
# 发送配置变更事件
await eventmanager.async_send_event(etype=EventType.ConfigChanged, data=ConfigChangeEventData(
key=key,
value=value,
change_type="update"
))
await eventmanager.async_send_event(
etype=EventType.ConfigChanged,
data=ConfigChangeEventData(key=key, value=value, change_type="update"),
)
elif success is None:
success = True
return schemas.Response(success=success, message=message)
@@ -301,31 +316,40 @@ async def set_setting(
success = await SystemConfigOper().async_set(key, value)
if success:
# 发送配置变更事件
await eventmanager.async_send_event(etype=EventType.ConfigChanged, data=ConfigChangeEventData(
key=key,
value=value,
change_type="update"
))
await eventmanager.async_send_event(
etype=EventType.ConfigChanged,
data=ConfigChangeEventData(key=key, value=value, change_type="update"),
)
return schemas.Response(success=True)
else:
return schemas.Response(success=False, message=f"配置项 '{key}' 不存在")
@router.get("/llm-models", summary="获取LLM模型列表", response_model=schemas.Response)
async def get_llm_models(provider: str, api_key: str, base_url: Optional[str] = None, _: User = Depends(get_current_active_user_async)):
async def get_llm_models(
provider: str,
api_key: str,
base_url: Optional[str] = None,
_: User = Depends(get_current_active_user_async),
):
"""
获取LLM模型列表
"""
try:
models = LLMHelper().get_models(provider, api_key, base_url)
models = await asyncio.to_thread(
LLMHelper().get_models, provider, api_key, base_url
)
return schemas.Response(success=True, data=models)
except Exception as e:
return schemas.Response(success=False, message=str(e))
@router.get("/message", summary="实时消息")
async def get_message(request: Request, role: Optional[str] = "system",
_: schemas.TokenPayload = Depends(verify_resource_token)):
async def get_message(
request: Request,
role: Optional[str] = "system",
_: schemas.TokenPayload = Depends(verify_resource_token),
):
"""
实时获取系统消息返回格式为SSE
"""
@@ -346,8 +370,12 @@ async def get_message(request: Request, role: Optional[str] = "system",
@router.get("/logging", summary="实时日志")
async def get_logging(request: Request, length: Optional[int] = 50, logfile: Optional[str] = "moviepilot.log",
_: schemas.TokenPayload = Depends(verify_resource_token)):
async def get_logging(
request: Request,
length: Optional[int] = 50,
logfile: Optional[str] = "moviepilot.log",
_: schemas.TokenPayload = Depends(verify_resource_token),
):
"""
实时获取系统日志
length = -1 时, 返回text/plain
@@ -356,7 +384,9 @@ async def get_logging(request: Request, length: Optional[int] = 50, logfile: Opt
base_path = AsyncPath(settings.LOG_PATH)
log_path = base_path / logfile
if not await SecurityUtils.async_is_safe_path(base_path=base_path, user_path=log_path, allowed_suffixes={".log"}):
if not await SecurityUtils.async_is_safe_path(
base_path=base_path, user_path=log_path, allowed_suffixes={".log"}
):
raise HTTPException(status_code=404, detail="Not Found")
if not await log_path.exists() or not await log_path.is_file():
@@ -371,7 +401,9 @@ async def get_logging(request: Request, length: Optional[int] = 50, logfile: Opt
file_size = file_stat.st_size
# 读取历史日志
async with aiofiles.open(log_path, mode="r", encoding="utf-8", errors="ignore") as f:
async with aiofiles.open(
log_path, mode="r", encoding="utf-8", errors="ignore"
) as f:
# 优化大文件读取策略
if file_size > 100 * 1024:
# 只读取最后100KB的内容
@@ -380,9 +412,9 @@ async def get_logging(request: Request, length: Optional[int] = 50, logfile: Opt
await f.seek(position)
content = await f.read()
# 找到第一个完整的行
first_newline = content.find('\n')
first_newline = content.find("\n")
if first_newline != -1:
content = content[first_newline + 1:]
content = content[first_newline + 1 :]
else:
# 小文件直接读取全部内容
content = await f.read()
@@ -390,7 +422,7 @@ async def get_logging(request: Request, length: Optional[int] = 50, logfile: Opt
# 按行分割并添加到队列,只保留非空行
lines = [line.strip() for line in content.splitlines() if line.strip()]
# 只取最后N行
for line in lines[-max(length, 50):]:
for line in lines[-max(length, 50) :]:
lines_queue.append(line)
# 输出历史日志
@@ -398,7 +430,9 @@ async def get_logging(request: Request, length: Optional[int] = 50, logfile: Opt
yield f"data: {line}\n\n"
# 实时监听新日志
async with aiofiles.open(log_path, mode="r", encoding="utf-8", errors="ignore") as f:
async with aiofiles.open(
log_path, mode="r", encoding="utf-8", errors="ignore"
) as f:
# 移动文件指针到文件末尾,继续监听新增内容
await f.seek(0, 2)
# 记录初始文件大小
@@ -435,7 +469,9 @@ async def get_logging(request: Request, length: Optional[int] = 50, logfile: Opt
return Response(content="日志文件不存在!", media_type="text/plain")
try:
# 使用 aiofiles 异步读取文件
async with aiofiles.open(log_path, mode="r", encoding="utf-8", errors="ignore") as file:
async with aiofiles.open(
log_path, mode="r", encoding="utf-8", errors="ignore"
) as file:
text = await file.read()
# 倒序输出
text = "\n".join(text.split("\n")[::-1])
@@ -447,13 +483,16 @@ async def get_logging(request: Request, length: Optional[int] = 50, logfile: Opt
return StreamingResponse(log_generator(), media_type="text/event-stream")
@router.get("/versions", summary="查询Github所有Release版本", response_model=schemas.Response)
@router.get(
"/versions", summary="查询Github所有Release版本", response_model=schemas.Response
)
async def latest_version(_: schemas.TokenPayload = Depends(verify_token)):
"""
查询Github所有Release版本
"""
version_res = await AsyncRequestUtils(proxies=settings.PROXY, headers=settings.GITHUB_HEADERS).get_res(
f"https://api.github.com/repos/jxxghp/MoviePilot/releases")
version_res = await AsyncRequestUtils(
proxies=settings.PROXY, headers=settings.GITHUB_HEADERS
).get_res(f"https://api.github.com/repos/jxxghp/MoviePilot/releases")
if version_res:
ver_json = version_res.json()
if ver_json:
@@ -462,10 +501,12 @@ async def latest_version(_: schemas.TokenPayload = Depends(verify_token)):
@router.get("/ruletest", summary="过滤规则测试", response_model=schemas.Response)
def ruletest(title: str,
rulegroup_name: str,
subtitle: Optional[str] = None,
_: schemas.TokenPayload = Depends(verify_token)):
def ruletest(
title: str,
rulegroup_name: str,
subtitle: Optional[str] = None,
_: schemas.TokenPayload = Depends(verify_token),
):
"""
过滤规则测试,规则类型 1-订阅2-洗版3-搜索
"""
@@ -476,7 +517,9 @@ def ruletest(title: str,
# 查询规则组详情
rulegroup = RuleHelper().get_rule_group(rulegroup_name)
if not rulegroup:
return schemas.Response(success=False, message=f"过滤规则组 {rulegroup_name} 不存在!")
return schemas.Response(
success=False, message=f"过滤规则组 {rulegroup_name} 不存在!"
)
# 根据标题查询媒体信息
media_info = SearchChain().recognize_media(MetaInfo(title=title, subtitle=subtitle))
@@ -484,21 +527,22 @@ def ruletest(title: str,
return schemas.Response(success=False, message="未识别到媒体信息!")
# 过滤
result = SearchChain().filter_torrents(rule_groups=[rulegroup.name],
torrent_list=[torrent], mediainfo=media_info)
result = SearchChain().filter_torrents(
rule_groups=[rulegroup.name], torrent_list=[torrent], mediainfo=media_info
)
if not result:
return schemas.Response(success=False, message="不符合过滤规则!")
return schemas.Response(success=True, data={
"priority": 100 - result[0].pri_order + 1
})
return schemas.Response(
success=True, data={"priority": 100 - result[0].pri_order + 1}
)
@router.get("/nettest", summary="测试网络连通性")
async def nettest(
url: str,
proxy: bool,
include: Optional[str] = None,
_: schemas.TokenPayload = Depends(verify_token),
url: str,
proxy: bool,
include: Optional[str] = None,
_: schemas.TokenPayload = Depends(verify_token),
):
"""
测试网络连通性
@@ -570,21 +614,26 @@ async def nettest(
return schemas.Response(success=False, message=message, data={"time": time})
@router.get("/modulelist", summary="查询已加载的模块ID列表", response_model=schemas.Response)
@router.get(
"/modulelist", summary="查询已加载的模块ID列表", response_model=schemas.Response
)
def modulelist(_: schemas.TokenPayload = Depends(verify_token)):
"""
查询已加载的模块ID列表
"""
modules = [{
"id": k,
"name": v.get_name(),
} for k, v in ModuleManager().get_modules().items()]
return schemas.Response(success=True, data={
"modules": modules
})
modules = [
{
"id": k,
"name": v.get_name(),
}
for k, v in ModuleManager().get_modules().items()
]
return schemas.Response(success=True, data={"modules": modules})
@router.get("/moduletest/{moduleid}", summary="模块可用性测试", response_model=schemas.Response)
@router.get(
"/moduletest/{moduleid}", summary="模块可用性测试", response_model=schemas.Response
)
def moduletest(moduleid: str, _: schemas.TokenPayload = Depends(verify_token)):
"""
模块可用性测试接口
@@ -608,8 +657,7 @@ def restart_system(_: User = Depends(get_current_active_superuser)):
@router.get("/runscheduler", summary="运行服务", response_model=schemas.Response)
def run_scheduler(jobid: str,
_: User = Depends(get_current_active_superuser)):
def run_scheduler(jobid: str, _: User = Depends(get_current_active_superuser)):
"""
执行命令(仅管理员)
"""
@@ -622,9 +670,10 @@ def run_scheduler(jobid: str,
return schemas.Response(success=True)
@router.get("/runscheduler2", summary="运行服务API_TOKEN", response_model=schemas.Response)
def run_scheduler2(jobid: str,
_: Annotated[str, Depends(verify_apitoken)]):
@router.get(
"/runscheduler2", summary="运行服务API_TOKEN", response_model=schemas.Response
)
def run_scheduler2(jobid: str, _: Annotated[str, Depends(verify_apitoken)]):
"""
执行命令API_TOKEN认证
"""

View File

@@ -580,7 +580,7 @@ class MessageChain(ChainBase):
total = len(cache_list)
# 加一页
cache_list = cache_list[
(_current_page + 1) * self._page_size: (_current_page + 2)
(_current_page + 1) * self._page_size : (_current_page + 2)
* self._page_size
]
if not cache_list:
@@ -1134,6 +1134,59 @@ class MessageChain(ChainBase):
)
)
def remote_stop_agent(
self,
channel: MessageChannel,
userid: Union[str, int],
source: Optional[str] = None,
):
"""
应急停止当前正在执行的Agent推理远程命令接口
与 /clear_session 不同,此命令不会清除会话和记忆,
停止后用户仍可继续对话。
"""
# 查找用户的会话ID不弹出保留会话
session_info = self._user_sessions.get(userid)
if session_info:
session_id, _ = session_info
try:
future = asyncio.run_coroutine_threadsafe(
agent_manager.stop_current_task(session_id=session_id),
global_vars.loop,
)
stopped = future.result(timeout=10)
except Exception as e:
logger.warning(f"停止Agent推理失败: {e}")
stopped = False
if stopped:
self.post_message(
Notification(
channel=channel,
source=source,
title="智能体推理已应急停止,会话记忆已保留,您可以继续对话",
userid=userid,
)
)
else:
self.post_message(
Notification(
channel=channel,
source=source,
title="当前没有正在执行的智能体任务",
userid=userid,
)
)
else:
self.post_message(
Notification(
channel=channel,
source=source,
title="您当前没有活跃的智能体会话",
userid=userid,
)
)
def _handle_ai_message(
self,
text: str,

View File

@@ -593,11 +593,17 @@ class SubscribeChain(ChainBase):
# 洗版
if subscribe.best_version:
# 洗版时,非整季不要
if torrent_mediainfo.type == MediaType.TV:
if torrent_meta.episode_list:
logger.info(f'{subscribe.name} 正在洗版,{torrent_info.title} 不是整季')
continue
# 洗版时,不符合订阅集数的不要
if (
torrent_mediainfo.type == MediaType.TV
and not self._is_episode_range_covered(
meta=torrent_meta, subscribe=subscribe
)
):
logger.info(
f"{subscribe.name} 正在洗版,{torrent_info.title} 不符合订阅集数范围"
)
continue
# 洗版时,优先级小于等于已下载优先级的不要
if subscribe.current_priority \
and torrent_info.pri_order <= subscribe.current_priority:
@@ -985,11 +991,18 @@ class SubscribeChain(ChainBase):
)
continue
else:
# 洗版时,非整季不要
if meta.type == MediaType.TV:
if torrent_meta.episode_list:
logger.debug(f'{subscribe.name} 正在洗版,{torrent_info.title} 不是整季')
continue
# 洗版时,不符合订阅集数的不要
if (
meta.type == MediaType.TV
and not self._is_episode_range_covered(
meta=torrent_meta,
subscribe=subscribe,
)
):
logger.debug(
f"{subscribe.name} 正在洗版,{torrent_info.title} 不符合订阅集数范围"
)
continue
# 匹配订阅附加参数
if not torrenthelper.filter_torrent(torrent_info=torrent_info,
@@ -1821,6 +1834,23 @@ class SubscribeChain(ChainBase):
# 返回结果,表示媒体未完全下载或存在
return False, no_exists
@staticmethod
def _is_episode_range_covered(meta: MetaBase, subscribe: Subscribe) -> bool:
"""
判断种子是否包含指定订阅的剧集范围
"""
episodes = meta.episode_list
if not episodes:
# 没有剧集信息,表示该种子为合集
return True
min_ep = min(episodes)
max_ep = max(episodes)
start_ep = subscribe.start_episode or 1
end_ep = subscribe.total_episode
return min_ep <= start_ep and max_ep >= end_ep
@staticmethod
def get_states_for_search(state: str) -> str:
"""

View File

@@ -45,109 +45,115 @@ class Command(metaclass=Singleton):
"id": "cookiecloud",
"type": "scheduler",
"description": "同步站点",
"category": "站点"
"category": "站点",
},
"/sites": {
"func": SiteChain().remote_list,
"description": "查询站点",
"category": "站点",
"data": {}
"data": {},
},
"/site_cookie": {
"func": SiteChain().remote_cookie,
"description": "更新站点Cookie",
"data": {}
"data": {},
},
"/site_statistic": {
"func": SiteChain().remote_refresh_userdatas,
"description": "站点数据统计",
"data": {}
"data": {},
},
"/site_enable": {
"func": SiteChain().remote_enable,
"description": "启用站点",
"data": {}
"data": {},
},
"/site_disable": {
"func": SiteChain().remote_disable,
"description": "禁用站点",
"data": {}
"data": {},
},
"/mediaserver_sync": {
"id": "mediaserver_sync",
"type": "scheduler",
"description": "同步媒体服务器",
"category": "管理"
"category": "管理",
},
"/subscribes": {
"func": SubscribeChain().remote_list,
"description": "查询订阅",
"category": "订阅",
"data": {}
"data": {},
},
"/subscribe_refresh": {
"id": "subscribe_refresh",
"type": "scheduler",
"description": "刷新订阅",
"category": "订阅"
"category": "订阅",
},
"/subscribe_search": {
"id": "subscribe_search",
"type": "scheduler",
"description": "搜索订阅",
"category": "订阅"
"category": "订阅",
},
"/subscribe_delete": {
"func": SubscribeChain().remote_delete,
"description": "删除订阅",
"data": {}
"data": {},
},
"/subscribe_tmdb": {
"id": "subscribe_tmdb",
"type": "scheduler",
"description": "订阅元数据更新"
"description": "订阅元数据更新",
},
"/downloading": {
"func": DownloadChain().remote_downloading,
"description": "正在下载",
"category": "管理",
"data": {}
"data": {},
},
"/transfer": {
"id": "transfer",
"type": "scheduler",
"description": "下载文件整理",
"category": "管理"
"category": "管理",
},
"/redo": {
"func": TransferChain().remote_transfer,
"description": "手动整理",
"data": {}
"data": {},
},
"/clear_cache": {
"func": SystemChain().remote_clear_cache,
"description": "清理缓存",
"category": "管理",
"data": {}
"data": {},
},
"/restart": {
"func": SystemChain().restart,
"description": "重启系统",
"category": "管理",
"data": {}
"data": {},
},
"/version": {
"func": SystemChain().version,
"description": "当前版本",
"category": "管理",
"data": {}
"data": {},
},
"/clear_session": {
"func": MessageChain().remote_clear_session,
"description": "清除会话",
"category": "管理",
"data": {}
}
"data": {},
},
"/stop_agent": {
"func": MessageChain().remote_stop_agent,
"description": "停止推理",
"category": "管理",
"data": {},
},
}
# 插件命令集合
self._plugin_commands = {}
@@ -182,7 +188,7 @@ class Command(metaclass=Singleton):
self._commands = {
**self._preset_commands,
**self._plugin_commands,
**self._other_commands
**self._other_commands,
}
# 强制触发注册
@@ -195,32 +201,50 @@ class Command(metaclass=Singleton):
event_data: CommandRegisterEventData = event.event_data
# 如果事件被取消,跳过命令注册
if event_data.cancel:
logger.debug(f"Command initialization canceled by event: {event_data.source}")
logger.debug(
f"Command initialization canceled by event: {event_data.source}"
)
return
# 如果拦截源与插件标识一致时,这里认为需要强制触发注册
if pid is not None and pid == event_data.source:
force_register = True
initial_commands = event_data.commands or {}
logger.debug(f"Registering command count from event: {len(initial_commands)}")
logger.debug(
f"Registering command count from event: {len(initial_commands)}"
)
else:
logger.debug(f"Registering initial command count: {len(initial_commands)}")
logger.debug(
f"Registering initial command count: {len(initial_commands)}"
)
# initial_commands 必须是 self._commands 的子集
filtered_initial_commands = DictUtils.filter_keys_to_subset(initial_commands, self._commands)
filtered_initial_commands = DictUtils.filter_keys_to_subset(
initial_commands, self._commands
)
# 如果 filtered_initial_commands 为空,则跳过注册
if not filtered_initial_commands and not force_register:
logger.debug("Filtered commands are empty, skipping registration.")
return
# 对比调整后的命令与当前命令
if filtered_initial_commands != self._registered_commands or force_register:
logger.debug("Command set has changed or force registration is enabled.")
if (
filtered_initial_commands != self._registered_commands
or force_register
):
logger.debug(
"Command set has changed or force registration is enabled."
)
self._registered_commands = filtered_initial_commands
CommandChain().register_commands(commands=filtered_initial_commands)
else:
logger.debug("Command set unchanged, skipping broadcast registration.")
logger.debug(
"Command set unchanged, skipping broadcast registration."
)
except Exception as e:
logger.error(f"Error occurred during command initialization in background: {e}", exc_info=True)
logger.error(
f"Error occurred during command initialization in background: {e}",
exc_info=True,
)
def __trigger_register_commands_event(self) -> tuple[Optional[Event], dict]:
"""
@@ -238,7 +262,7 @@ class Command(metaclass=Singleton):
command_data = {
"type": command_type,
"description": command.get("description"),
"category": command.get("category")
"category": command.get("category"),
}
# 如果有 pid则添加到命令数据中
plugin_id = command.get("pid")
@@ -253,7 +277,9 @@ class Command(metaclass=Singleton):
add_commands(self._other_commands, "other")
# 触发事件允许可以拦截和调整命令
event_data = CommandRegisterEventData(commands=commands, origin="CommandChain", service=None)
event_data = CommandRegisterEventData(
commands=commands, origin="CommandChain", service=None
)
event = eventmanager.send_event(ChainEventType.CommandRegister, event_data)
return event, commands
@@ -274,13 +300,19 @@ class Command(metaclass=Singleton):
"show": command.get("show", True),
"data": {
"etype": command.get("event"),
"data": command.get("data")
}
"data": command.get("data"),
},
}
return plugin_commands
def __run_command(self, command: Dict[str, any], data_str: Optional[str] = "",
channel: MessageChannel = None, source: Optional[str] = None, userid: Union[str, int] = None):
def __run_command(
self,
command: Dict[str, any],
data_str: Optional[str] = "",
channel: MessageChannel = None,
source: Optional[str] = None,
userid: Union[str, int] = None,
):
"""
运行定时服务
"""
@@ -292,7 +324,7 @@ class Command(metaclass=Singleton):
channel=channel,
source=source,
title=f"开始执行 {command.get('description')} ...",
userid=userid
userid=userid,
)
)
@@ -305,33 +337,33 @@ class Command(metaclass=Singleton):
channel=channel,
source=source,
title=f"{command.get('description')} 执行完成",
userid=userid
userid=userid,
)
)
else:
# 命令
cmd_data = copy.deepcopy(command['data']) if command.get('data') else {}
args_num = ObjectUtils.arguments(command['func'])
cmd_data = copy.deepcopy(command["data"]) if command.get("data") else {}
args_num = ObjectUtils.arguments(command["func"])
if args_num > 0:
if cmd_data:
# 有内置参数直接使用内置参数
data = cmd_data.get("data") or {}
data['channel'] = channel
data['source'] = source
data['user'] = userid
data["channel"] = channel
data["source"] = source
data["user"] = userid
if data_str:
data['arg_str'] = data_str
cmd_data['data'] = data
command['func'](**cmd_data)
data["arg_str"] = data_str
cmd_data["data"] = data
command["func"](**cmd_data)
elif args_num == 3:
# 没有输入参数只输入渠道来源、用户ID和消息来源
command['func'](channel, userid, source)
command["func"](channel, userid, source)
elif args_num > 3:
# 多个输入参数用户输入、用户ID
command['func'](data_str, channel, userid, source)
command["func"](data_str, channel, userid, source)
else:
# 没有参数
command['func']()
command["func"]()
def get_commands(self):
"""
@@ -345,9 +377,15 @@ class Command(metaclass=Singleton):
"""
return self._commands.get(cmd, {})
def register(self, cmd: str, func: Any, data: Optional[dict] = None,
desc: Optional[str] = None, category: Optional[str] = None,
show: bool = True) -> None:
def register(
self,
cmd: str,
func: Any,
data: Optional[dict] = None,
desc: Optional[str] = None,
category: Optional[str] = None,
show: bool = True,
) -> None:
"""
注册单个命令
"""
@@ -357,12 +395,17 @@ class Command(metaclass=Singleton):
"description": desc,
"category": category,
"data": data or {},
"show": show
"show": show,
}
def execute(self, cmd: str, data_str: Optional[str] = "",
channel: MessageChannel = None, source: Optional[str] = None,
userid: Union[str, int] = None) -> None:
def execute(
self,
cmd: str,
data_str: Optional[str] = "",
channel: MessageChannel = None,
source: Optional[str] = None,
userid: Union[str, int] = None,
) -> None:
"""
执行命令
"""
@@ -370,23 +413,32 @@ class Command(metaclass=Singleton):
if command:
try:
if userid:
logger.info(f"用户 {userid} 开始执行:{command.get('description')} ...")
logger.info(
f"用户 {userid} 开始执行:{command.get('description')} ..."
)
else:
logger.info(f"开始执行:{command.get('description')} ...")
# 执行命令
self.__run_command(command, data_str=data_str,
channel=channel, source=source, userid=userid)
self.__run_command(
command,
data_str=data_str,
channel=channel,
source=source,
userid=userid,
)
if userid:
logger.info(f"用户 {userid} {command.get('description')} 执行完成")
else:
logger.info(f"{command.get('description')} 执行完成")
except Exception as err:
logger.error(f"执行命令 {cmd} 出错:{str(err)} - {traceback.format_exc()}")
self.messagehelper.put(title=f"执行命令 {cmd} 出错",
message=str(err),
role="system")
logger.error(
f"执行命令 {cmd} 出错:{str(err)} - {traceback.format_exc()}"
)
self.messagehelper.put(
title=f"执行命令 {cmd} 出错", message=str(err), role="system"
)
@staticmethod
def send_plugin_event(etype: EventType, data: dict) -> None:
@@ -404,19 +456,24 @@ class Command(metaclass=Singleton):
}
"""
# 命令参数
event_str = event.event_data.get('cmd')
event_str = event.event_data.get("cmd")
# 消息渠道
event_channel = event.event_data.get('channel')
event_channel = event.event_data.get("channel")
# 消息来源
event_source = event.event_data.get('source')
event_source = event.event_data.get("source")
# 消息用户
event_user = event.event_data.get('user')
event_user = event.event_data.get("user")
if event_str:
cmd = event_str.split()[0]
args = " ".join(event_str.split()[1:])
if self.get(cmd):
self.execute(cmd=cmd, data_str=args,
channel=event_channel, source=event_source, userid=event_user)
self.execute(
cmd=cmd,
data_str=args,
channel=event_channel,
source=event_source,
userid=event_user,
)
@eventmanager.register(EventType.ModuleReload)
def module_reload_event(self, _: ManagerEvent) -> None:

View File

@@ -211,7 +211,7 @@ class CacheBackend(ABC):
"""
获取缓存的区
"""
return f"region:{region}" if region else "region:default"
return f"region:{region}" if region else "region:DEFAULT"
@staticmethod
def is_redis() -> bool:

View File

@@ -1,11 +1,61 @@
"""LLM模型相关辅助功能"""
import inspect
from typing import List
from app.core.config import settings
from app.log import logger
def _patch_gemini_thought_signature():
"""
修复 langchain-google-genai 中 Gemini 2.5 思考模型的 thought_signature 兼容问题。
langchain-google-genai 的 _is_gemini_3_or_later() 仅检查 "gemini-3"
导致 Gemini 2.5 思考模型(如 gemini-2.5-flash、gemini-2.5-pro在工具调用时
缺少 thought_signature 而报错 400。
此补丁将检查范围扩展到 Gemini 2.5 模型。
"""
try:
import langchain_google_genai.chat_models as _cm
# 仅在未修补时执行
if getattr(_cm, "_thought_signature_patched", False):
return
def _patched_is_gemini_3_or_later(model_name: str) -> bool:
if not model_name:
return False
name = model_name.lower().replace("models/", "")
# Gemini 2.5 思考模型也需要 thought_signature 支持
return "gemini-3" in name or "gemini-2.5" in name
_cm._is_gemini_3_or_later = _patched_is_gemini_3_or_later
_cm._thought_signature_patched = True
logger.debug(
"已修补 langchain-google-genai thought_signature 兼容性(覆盖 Gemini 2.5 模型)"
)
except Exception as e:
logger.warning(f"修补 langchain-google-genai thought_signature 失败: {e}")
def _get_httpx_proxy_key() -> str:
"""
获取当前 httpx 版本支持的代理参数名。
httpx < 0.28 使用 "proxies"(复数),>= 0.28 使用 "proxy"(单数)。
google-genai SDK 会静默过滤掉不在 httpx.Client.__init__ 签名中的参数,
因此必须使用与当前 httpx 版本匹配的参数名。
"""
try:
import httpx
params = inspect.signature(httpx.Client.__init__).parameters
if "proxy" in params:
return "proxy"
return "proxies"
except Exception:
return "proxies"
class LLMHelper:
"""LLM模型相关辅助功能"""
@@ -23,31 +73,27 @@ class LLMHelper:
raise ValueError("未配置LLM API Key")
if provider == "google":
# 修补 Gemini 2.5 思考模型的 thought_signature 兼容性
_patch_gemini_thought_signature()
# 统一使用 langchain-google-genai 原生接口
# 不使用 OpenAI 兼容端点,因其不支持 Gemini 思考模型的 thought_signature
# 会导致工具调用时报错 400
from langchain_google_genai import ChatGoogleGenerativeAI
client_args = None
if settings.PROXY_HOST:
# 通过代理使用 Google 的 OpenAI 兼容接口
from langchain_openai import ChatOpenAI
proxy_key = _get_httpx_proxy_key()
client_args = {proxy_key: settings.PROXY_HOST}
model = ChatOpenAI(
model=settings.LLM_MODEL,
api_key=api_key,
max_retries=3,
base_url="https://generativelanguage.googleapis.com/v1beta/openai",
temperature=settings.LLM_TEMPERATURE,
streaming=streaming,
stream_usage=True,
openai_proxy=settings.PROXY_HOST,
)
else:
# 使用 langchain-google-genai 原生接口v4 API 变更google_api_key → api_keymax_retries → retries
from langchain_google_genai import ChatGoogleGenerativeAI
model = ChatGoogleGenerativeAI(
model=settings.LLM_MODEL,
api_key=api_key,
retries=3,
temperature=settings.LLM_TEMPERATURE,
streaming=streaming
)
model = ChatGoogleGenerativeAI(
model=settings.LLM_MODEL,
api_key=api_key,
retries=3,
temperature=settings.LLM_TEMPERATURE,
streaming=streaming,
client_args=client_args,
)
elif provider == "deepseek":
from langchain_deepseek import ChatDeepSeek
@@ -78,13 +124,14 @@ class LLMHelper:
logger.info(f"使用LLM模型: {model.model}Profile: {model.profile}")
else:
model.profile = {
"max_input_tokens": settings.LLM_MAX_CONTEXT_TOKENS * 1000, # 转换为token单位
"max_input_tokens": settings.LLM_MAX_CONTEXT_TOKENS
* 1000, # 转换为token单位
}
return model
def get_models(
self, provider: str, api_key: str, base_url: str = None
self, provider: str, api_key: str, base_url: str = None
) -> List[str]:
"""获取模型列表"""
logger.info(f"获取 {provider} 模型列表...")
@@ -98,8 +145,18 @@ class LLMHelper:
"""获取Google模型列表使用 google-genai SDK v1"""
try:
from google import genai
from google.genai.types import HttpOptions
client = genai.Client(api_key=api_key)
http_options = None
if settings.PROXY_HOST:
proxy_key = _get_httpx_proxy_key()
proxy_args = {proxy_key: settings.PROXY_HOST}
http_options = HttpOptions(
client_args=proxy_args,
async_client_args=proxy_args,
)
client = genai.Client(api_key=api_key, http_options=http_options)
models = client.models.list()
return [
m.name
@@ -112,7 +169,7 @@ class LLMHelper:
@staticmethod
def _get_openai_compatible_models(
provider: str, api_key: str, base_url: str = None
provider: str, api_key: str, base_url: str = None
) -> List[str]:
"""获取OpenAI兼容模型列表"""
try:

View File

@@ -140,7 +140,7 @@ class RedisHelper(ConfigReloadMixin, metaclass=Singleton):
"""
获取缓存的区
"""
return f"region:{quote(region)}" if region else "region:DEFAULT"
return f"region:{region}" if region else "region:DEFAULT"
def __make_redis_key(self, region: str, key: str) -> str:
"""
@@ -370,7 +370,7 @@ class AsyncRedisHelper(ConfigReloadMixin, metaclass=Singleton):
"""
获取缓存的区
"""
return f"region:{region}" if region else "region:default"
return f"region:{region}" if region else "region:DEFAULT"
def __make_redis_key(self, region: str, key: str) -> str:
"""

View File

@@ -13,8 +13,14 @@ from app.helper.directory import DirectoryHelper
from app.helper.message import TemplateHelper
from app.log import logger
from app.modules.filemanager.storages import StorageBase
from app.schemas import TransferInfo, TmdbEpisode, TransferDirectoryConf, FileItem, TransferInterceptEventData, \
TransferRenameEventData
from app.schemas import (
TransferInfo,
TmdbEpisode,
TransferDirectoryConf,
FileItem,
TransferInterceptEventData,
TransferRenameEventData,
)
from app.schemas.types import MediaType, ChainEventType
from app.utils.system import SystemUtils
@@ -51,26 +57,27 @@ class TransHandler:
elif isinstance(current_value, bool):
current_value = value
elif isinstance(current_value, int):
current_value += (value or 0)
current_value += value or 0
else:
current_value = value
setattr(result, key, current_value)
def transfer_media(self,
fileitem: FileItem,
in_meta: MetaBase,
mediainfo: MediaInfo,
target_storage: str,
target_path: Path,
transfer_type: str,
source_oper: StorageBase,
target_oper: StorageBase,
need_scrape: Optional[bool] = False,
need_rename: Optional[bool] = True,
need_notify: Optional[bool] = True,
overwrite_mode: Optional[str] = None,
episodes_info: List[TmdbEpisode] = None
) -> TransferInfo:
def transfer_media(
self,
fileitem: FileItem,
in_meta: MetaBase,
mediainfo: MediaInfo,
target_storage: str,
target_path: Path,
transfer_type: str,
source_oper: StorageBase,
target_oper: StorageBase,
need_scrape: Optional[bool] = False,
need_rename: Optional[bool] = True,
need_notify: Optional[bool] = True,
overwrite_mode: Optional[str] = None,
episodes_info: List[TmdbEpisode] = None,
) -> TransferInfo:
"""
识别并整理一个文件或者一个目录下的所有文件
:param fileitem: 整理的文件对象,可能是一个文件也可以是一个目录
@@ -109,7 +116,9 @@ class TransHandler:
"""
if not _fileitem.extension:
return False
if f".{_fileitem.extension.lower()}" in (settings.RMT_SUBEXT + settings.RMT_AUDIOEXT):
if f".{_fileitem.extension.lower()}" in (
settings.RMT_SUBEXT + settings.RMT_AUDIOEXT
):
return True
return False
@@ -117,7 +126,6 @@ class TransHandler:
result = TransferInfo()
try:
# 重命名格式
rename_format = settings.RENAME_FORMAT(mediainfo.type)
@@ -128,9 +136,11 @@ class TransHandler:
new_path = self.get_rename_path(
path=target_path,
template_string=rename_format,
rename_dict=self.get_naming_dict(meta=in_meta,
mediainfo=mediainfo),
source_path=fileitem.path
rename_dict=self.get_naming_dict(
meta=in_meta, mediainfo=mediainfo
),
source_path=fileitem.path,
source_item=fileitem,
)
new_path = DirectoryHelper.get_media_root_path(
rename_format, rename_path=new_path
@@ -149,40 +159,46 @@ class TransHandler:
new_path = target_path / fileitem.name
# 原盘大小只计算STREAM目录内的文件大小
if stream_fileitem := source_oper.get_item(
Path(fileitem.path) / "BDMV" / "STREAM"
Path(fileitem.path) / "BDMV" / "STREAM"
):
fileitem.size = sum(
file.size for file in source_oper.list(stream_fileitem) or []
)
# 整理目录
new_diritem, errmsg = self.__transfer_dir(fileitem=fileitem,
mediainfo=mediainfo,
source_oper=source_oper,
target_oper=target_oper,
target_storage=target_storage,
target_path=new_path,
transfer_type=transfer_type,
result=result)
new_diritem, errmsg = self.__transfer_dir(
fileitem=fileitem,
mediainfo=mediainfo,
source_oper=source_oper,
target_oper=target_oper,
target_storage=target_storage,
target_path=new_path,
transfer_type=transfer_type,
result=result,
)
if not new_diritem:
logger.error(f"文件夹 {fileitem.path} 整理失败:{errmsg}")
self.__update_result(result=result,
success=False,
message=errmsg,
fileitem=fileitem,
transfer_type=transfer_type,
need_notify=need_notify)
self.__update_result(
result=result,
success=False,
message=errmsg,
fileitem=fileitem,
transfer_type=transfer_type,
need_notify=need_notify,
)
return result
logger.info(f"文件夹 {fileitem.path} 整理成功")
# 返回整理后的路径
self.__update_result(result=result,
success=True,
fileitem=fileitem,
target_item=new_diritem,
target_diritem=new_diritem,
need_scrape=need_scrape,
need_notify=need_notify,
transfer_type=transfer_type)
self.__update_result(
result=result,
success=True,
fileitem=fileitem,
target_item=new_diritem,
target_diritem=new_diritem,
need_scrape=need_scrape,
need_notify=need_notify,
transfer_type=transfer_type,
)
return result
else:
# 整理单个文件
@@ -190,13 +206,15 @@ class TransHandler:
# 电视剧
if in_meta.begin_episode is None:
logger.warn(f"文件 {fileitem.path} 整理失败:未识别到文件集数")
self.__update_result(result=result,
success=False,
message="未识别到文件集数",
fileitem=fileitem,
fail_list=[fileitem.path],
transfer_type=transfer_type,
need_notify=need_notify)
self.__update_result(
result=result,
success=False,
message="未识别到文件集数",
fileitem=fileitem,
fail_list=[fileitem.path],
transfer_type=transfer_type,
need_notify=need_notify,
)
return result
# 文件结束季为空
@@ -218,9 +236,10 @@ class TransHandler:
meta=in_meta,
mediainfo=mediainfo,
episodes_info=episodes_info,
file_ext=f".{fileitem.extension}"
file_ext=f".{fileitem.extension}",
),
source_path=fileitem.path
source_path=fileitem.path,
source_item=fileitem,
)
# 针对字幕文件,文件名中补充额外标识信息
@@ -250,13 +269,15 @@ class TransHandler:
target_diritem = target_oper.get_folder(folder_path)
if not target_diritem:
logger.error(f"目标目录 {folder_path} 获取失败")
self.__update_result(result=result,
success=False,
message=f"目标目录 {folder_path} 获取失败",
fileitem=fileitem,
fail_list=[fileitem.path],
transfer_type=transfer_type,
need_notify=need_notify)
self.__update_result(
result=result,
success=False,
message=f"目标目录 {folder_path} 获取失败",
fileitem=fileitem,
fail_list=[fileitem.path],
transfer_type=transfer_type,
need_notify=need_notify,
)
return result
# 判断是否要覆盖,附加文件强制覆盖
@@ -274,92 +295,112 @@ class TransHandler:
if not overflag:
# 目标文件已存在
logger.info(
f"目的文件系统中已经存在同名文件 {target_file},当前整理覆盖模式设置为 {overwrite_mode}")
if overwrite_mode == 'always':
f"目的文件系统中已经存在同名文件 {target_file},当前整理覆盖模式设置为 {overwrite_mode}"
)
if overwrite_mode == "always":
# 总是覆盖同名文件
overflag = True
elif overwrite_mode == 'size':
elif overwrite_mode == "size":
# 存在时大覆盖小
if target_item.size < fileitem.size:
logger.info(f"目标文件文件大小更小,将覆盖:{new_file}")
logger.info(
f"目标文件文件大小更小,将覆盖:{new_file}"
)
overflag = True
else:
self.__update_result(result=result,
success=False,
message=f"媒体库存在同名文件,且质量更好",
fileitem=fileitem,
target_item=target_item,
target_diritem=target_diritem,
fail_list=[fileitem.path],
transfer_type=transfer_type,
need_notify=need_notify)
self.__update_result(
result=result,
success=False,
message=f"媒体库存在同名文件,且质量更好",
fileitem=fileitem,
target_item=target_item,
target_diritem=target_diritem,
fail_list=[fileitem.path],
transfer_type=transfer_type,
need_notify=need_notify,
)
return result
elif overwrite_mode == 'never':
elif overwrite_mode == "never":
# 存在不覆盖
self.__update_result(result=result,
success=False,
message=f"媒体库存在同名文件,当前覆盖模式为不覆盖",
fileitem=fileitem,
target_item=target_item,
target_diritem=target_diritem,
fail_list=[fileitem.path],
transfer_type=transfer_type,
need_notify=need_notify)
self.__update_result(
result=result,
success=False,
message=f"媒体库存在同名文件,当前覆盖模式为不覆盖",
fileitem=fileitem,
target_item=target_item,
target_diritem=target_diritem,
fail_list=[fileitem.path],
transfer_type=transfer_type,
need_notify=need_notify,
)
return result
elif overwrite_mode == 'latest':
elif overwrite_mode == "latest":
# 仅保留最新版本
logger.info(f"当前整理覆盖模式设置为仅保留最新版本,将覆盖:{new_file}")
logger.info(
f"当前整理覆盖模式设置为仅保留最新版本,将覆盖:{new_file}"
)
overflag = True
else:
if overwrite_mode == 'latest':
if overwrite_mode == "latest":
# 文件不存在,但仅保留最新版本
logger.info(
f"当前整理覆盖模式设置为 {overwrite_mode},仅保留最新版本,正在删除已有版本文件 ...")
f"当前整理覆盖模式设置为 {overwrite_mode},仅保留最新版本,正在删除已有版本文件 ..."
)
self.__delete_version_files(target_oper, new_file)
else:
# 附加文件 总是需要覆盖
overflag = True
# 整理文件
new_item, err_msg = self.__transfer_file(fileitem=fileitem,
mediainfo=mediainfo,
target_storage=target_storage,
target_file=new_file,
transfer_type=transfer_type,
over_flag=overflag,
source_oper=source_oper,
target_oper=target_oper,
result=result)
new_item, err_msg = self.__transfer_file(
fileitem=fileitem,
mediainfo=mediainfo,
target_storage=target_storage,
target_file=new_file,
transfer_type=transfer_type,
over_flag=overflag,
source_oper=source_oper,
target_oper=target_oper,
result=result,
)
if not new_item:
logger.error(f"文件 {fileitem.path} 整理失败:{err_msg}")
self.__update_result(result=result,
success=False,
message=err_msg,
fileitem=fileitem,
fail_list=[fileitem.path],
transfer_type=transfer_type,
need_notify=need_notify)
self.__update_result(
result=result,
success=False,
message=err_msg,
fileitem=fileitem,
fail_list=[fileitem.path],
transfer_type=transfer_type,
need_notify=need_notify,
)
return result
logger.info(f"文件 {fileitem.path} 整理成功")
self.__update_result(result=result,
success=True,
fileitem=fileitem,
target_item=new_item,
target_diritem=target_diritem,
need_scrape=need_scrape,
transfer_type=transfer_type,
need_notify=need_notify)
self.__update_result(
result=result,
success=True,
fileitem=fileitem,
target_item=new_item,
target_diritem=target_diritem,
need_scrape=need_scrape,
transfer_type=transfer_type,
need_notify=need_notify,
)
return result
except Exception as e:
logger.error(f"媒体整理出错:{e}")
return TransferInfo(success=False, message=str(e))
@staticmethod
def __transfer_command(fileitem: FileItem, target_storage: str,
source_oper: StorageBase, target_oper: StorageBase,
target_file: Path, transfer_type: str,
) -> Tuple[Optional[FileItem], str]:
def __transfer_command(
fileitem: FileItem,
target_storage: str,
source_oper: StorageBase,
target_oper: StorageBase,
target_file: Path,
transfer_type: str,
) -> Tuple[Optional[FileItem], str]:
"""
处理单个文件
:param fileitem: 源文件
@@ -381,12 +422,15 @@ class TransHandler:
basename=_path.stem,
type="file",
size=_path.stat().st_size,
extension=_path.suffix.lstrip('.'),
modify_time=_path.stat().st_mtime
extension=_path.suffix.lstrip("."),
modify_time=_path.stat().st_mtime,
)
if (fileitem.storage != target_storage
and fileitem.storage != "local" and target_storage != "local"):
if (
fileitem.storage != target_storage
and fileitem.storage != "local"
and target_storage != "local"
):
return None, f"不支持 {fileitem.storage}{target_storage} 的文件整理"
if fileitem.storage == "local" and target_storage == "local":
@@ -419,20 +463,27 @@ class TransHandler:
target_fileitem = target_oper.get_folder(target_file.parent)
if target_fileitem:
# 上传文件
new_item = target_oper.upload(target_fileitem, filepath, target_file.name)
new_item = target_oper.upload(
target_fileitem, filepath, target_file.name
)
if new_item:
return new_item, ""
else:
return None, f"{fileitem.path} 上传 {target_storage} 失败"
else:
return None, f"{target_storage}{target_file.parent} 目录获取失败"
return (
None,
f"{target_storage}{target_file.parent} 目录获取失败",
)
elif transfer_type == "move":
# 移动
# 根据目的路径获取文件夹
target_fileitem = target_oper.get_folder(target_file.parent)
if target_fileitem:
# 上传文件
new_item = target_oper.upload(target_fileitem, filepath, target_file.name)
new_item = target_oper.upload(
target_fileitem, filepath, target_file.name
)
if new_item:
# 删除源文件
source_oper.delete(fileitem)
@@ -440,7 +491,10 @@ class TransHandler:
else:
return None, f"{fileitem.path} 上传 {target_storage} 失败"
else:
return None, f"{target_storage}{target_file.parent} 目录获取失败"
return (
None,
f"{target_storage}{target_file.parent} 目录获取失败",
)
elif fileitem.storage != "local" and target_storage == "local":
# 网盘到本地
if target_file.exists():
@@ -449,7 +503,9 @@ class TransHandler:
# 网盘到本地
if transfer_type in ["copy", "move"]:
# 下载
tmp_file = source_oper.download(fileitem=fileitem, path=target_file.parent)
tmp_file = source_oper.download(
fileitem=fileitem, path=target_file.parent
)
if tmp_file:
# 创建目录
if not target_file.parent.exists():
@@ -471,22 +527,32 @@ class TransHandler:
# 复制文件到新目录
target_fileitem = target_oper.get_folder(target_file.parent)
if target_fileitem:
if source_oper.copy(fileitem, Path(target_fileitem.path), target_file.name):
if source_oper.copy(
fileitem, Path(target_fileitem.path), target_file.name
):
return target_oper.get_item(target_file), ""
else:
return None, f"{target_storage}{fileitem.path} 复制文件失败"
else:
return None, f"{target_storage}{target_file.parent} 目录获取失败"
return (
None,
f"{target_storage}{target_file.parent} 目录获取失败",
)
elif transfer_type == "move":
# 移动文件到新目录
target_fileitem = target_oper.get_folder(target_file.parent)
if target_fileitem:
if source_oper.move(fileitem, Path(target_fileitem.path), target_file.name):
if source_oper.move(
fileitem, Path(target_fileitem.path), target_file.name
):
return target_oper.get_item(target_file), ""
else:
return None, f"{target_storage}{fileitem.path} 移动文件失败"
else:
return None, f"{target_storage}{target_file.parent} 目录获取失败"
return (
None,
f"{target_storage}{target_file.parent} 目录获取失败",
)
elif transfer_type == "link":
if source_oper.link(fileitem, target_file):
return target_oper.get_item(target_file), ""
@@ -503,22 +569,28 @@ class TransHandler:
重命名字幕文件,补充附加信息
"""
# 字幕正则式
_zhcn_sub_re = r"([.\[(\s](((zh[-_])?(cn|ch[si]|sg|sc))|zho?" \
r"|chinese|(cn|ch[si]|sg|zho?)[-_&]?(cn|ch[si]|sg|zho?|eng|jap|ja|jpn)" \
r"|eng[-_&]?(cn|ch[si]|sg|zho?)|(jap|ja|jpn)[-_&]?(cn|ch[si]|sg|zho?)" \
r"|简[体中]?)[.\])\s])" \
r"|([\u4e00-\u9fa5]{0,3}[中双][\u4e00-\u9fa5]{0,2}[字文语][\u4e00-\u9fa5]{0,3})" \
r"|简体|简中|JPSC|sc_jp" \
r"|(?<![a-z0-9])gb(?![a-z0-9])"
_zhtw_sub_re = r"([.\[(\s](((zh[-_])?(hk|tw|cht|tc))" \
r"|cht[-_&]?(cht|eng|jap|ja|jpn)" \
r"|eng[-_&]?cht|(jap|ja|jpn)[-_&]?cht" \
r"|繁[体中]?)[.\])\s])" \
r"|繁体中[文字]|中[文字]繁体|繁体|JPTC|tc_jp" \
r"|(?<![a-z0-9])big5(?![a-z0-9])"
_ja_sub_re = r"([.\[(\s](ja-jp|jap|ja|jpn" \
r"|(jap|ja|jpn)[-_&]?eng|eng[-_&]?(jap|ja|jpn))[.\])\s])" \
r"|日本語|日語"
_zhcn_sub_re = (
r"([.\[(\s](((zh[-_])?(cn|ch[si]|sg|sc))|zho?"
r"|chinese|(cn|ch[si]|sg|zho?)[-_&]?(cn|ch[si]|sg|zho?|eng|jap|ja|jpn)"
r"|eng[-_&]?(cn|ch[si]|sg|zho?)|(jap|ja|jpn)[-_&]?(cn|ch[si]|sg|zho?)"
r"|简[体中]?)[.\])\s])"
r"|([\u4e00-\u9fa5]{0,3}[中双][\u4e00-\u9fa5]{0,2}[字文语][\u4e00-\u9fa5]{0,3})"
r"|简体|简中|JPSC|sc_jp"
r"|(?<![a-z0-9])gb(?![a-z0-9])"
)
_zhtw_sub_re = (
r"([.\[(\s](((zh[-_])?(hk|tw|cht|tc))"
r"|cht[-_&]?(cht|eng|jap|ja|jpn)"
r"|eng[-_&]?cht|(jap|ja|jpn)[-_&]?cht"
r"|繁[体中]?)[.\])\s])"
r"|繁体中[文字]|中[文字]繁体|繁体|JPTC|tc_jp"
r"|(?<![a-z0-9])big5(?![a-z0-9])"
)
_ja_sub_re = (
r"([.\[(\s](ja-jp|jap|ja|jpn"
r"|(jap|ja|jpn)[-_&]?eng|eng[-_&]?(jap|ja|jpn))[.\])\s])"
r"|日本語|日語"
)
_eng_sub_re = r"[.\[(\s]eng[.\])\s]"
# 原文件后缀
@@ -537,20 +609,29 @@ class TransHandler:
new_file_type = ".eng"
# 添加默认字幕标识
if ((settings.DEFAULT_SUB == "zh-cn" and new_file_type == ".chi.zh-cn")
or (settings.DEFAULT_SUB == "zh-tw" and new_file_type == ".zh-tw")
or (settings.DEFAULT_SUB == "ja" and new_file_type == ".ja")
or (settings.DEFAULT_SUB == "eng" and new_file_type == ".eng")):
if (
(settings.DEFAULT_SUB == "zh-cn" and new_file_type == ".chi.zh-cn")
or (settings.DEFAULT_SUB == "zh-tw" and new_file_type == ".zh-tw")
or (settings.DEFAULT_SUB == "ja" and new_file_type == ".ja")
or (settings.DEFAULT_SUB == "eng" and new_file_type == ".eng")
):
new_sub_tag = ".default" + new_file_type
else:
new_sub_tag = new_file_type
return new_file.with_name(new_file.stem + new_sub_tag + file_ext)
def __transfer_dir(self, fileitem: FileItem, mediainfo: MediaInfo,
source_oper: StorageBase, target_oper: StorageBase,
transfer_type: str, target_storage: str, target_path: Path,
result: TransferInfo) -> Tuple[Optional[FileItem], str]:
def __transfer_dir(
self,
fileitem: FileItem,
mediainfo: MediaInfo,
source_oper: StorageBase,
target_oper: StorageBase,
transfer_type: str,
target_storage: str,
target_path: Path,
result: TransferInfo,
) -> Tuple[Optional[FileItem], str]:
"""
整理整个文件夹
:param fileitem: 源文件
@@ -570,7 +651,7 @@ class TransHandler:
mediainfo=mediainfo,
target_storage=target_storage,
target_path=target_path,
transfer_type=transfer_type
transfer_type=transfer_type,
)
event = eventmanager.send_event(ChainEventType.TransferIntercept, event_data)
if event and event.event_data:
@@ -579,25 +660,34 @@ class TransHandler:
if event_data.cancel:
logger.debug(
f"Transfer dir canceled by event: {event_data.source},"
f"Reason: {event_data.reason}")
f"Reason: {event_data.reason}"
)
return None, event_data.reason
# 处理所有文件
state, errmsg = self.__transfer_dir_files(fileitem=fileitem,
target_storage=target_storage,
source_oper=source_oper,
target_oper=target_oper,
target_path=target_path,
transfer_type=transfer_type,
result=result)
state, errmsg = self.__transfer_dir_files(
fileitem=fileitem,
target_storage=target_storage,
source_oper=source_oper,
target_oper=target_oper,
target_path=target_path,
transfer_type=transfer_type,
result=result,
)
if state:
return target_item, errmsg
else:
return None, errmsg
def __transfer_dir_files(self, fileitem: FileItem, target_storage: str,
source_oper: StorageBase, target_oper: StorageBase,
transfer_type: str, target_path: Path,
result: TransferInfo) -> Tuple[bool, str]:
def __transfer_dir_files(
self,
fileitem: FileItem,
target_storage: str,
source_oper: StorageBase,
target_oper: StorageBase,
transfer_type: str,
target_path: Path,
result: TransferInfo,
) -> Tuple[bool, str]:
"""
按目录结构整理目录下所有文件
:param fileitem: 源文件
@@ -613,24 +703,28 @@ class TransHandler:
if item.type == "dir":
# 递归整理目录
new_path = target_path / item.name
state, errmsg = self.__transfer_dir_files(fileitem=item,
target_storage=target_storage,
source_oper=source_oper,
target_oper=target_oper,
transfer_type=transfer_type,
target_path=new_path,
result=result)
state, errmsg = self.__transfer_dir_files(
fileitem=item,
target_storage=target_storage,
source_oper=source_oper,
target_oper=target_oper,
transfer_type=transfer_type,
target_path=new_path,
result=result,
)
if not state:
return False, errmsg
else:
# 整理文件
new_file = target_path / item.name
new_item, errmsg = self.__transfer_command(fileitem=item,
target_storage=target_storage,
source_oper=source_oper,
target_oper=target_oper,
target_file=new_file,
transfer_type=transfer_type)
new_item, errmsg = self.__transfer_command(
fileitem=item,
target_storage=target_storage,
source_oper=source_oper,
target_oper=target_oper,
target_file=new_file,
transfer_type=transfer_type,
)
if not new_item:
return False, errmsg
self.__update_result(
@@ -641,11 +735,18 @@ class TransHandler:
# 返回成功
return True, ""
def __transfer_file(self, fileitem: FileItem, mediainfo: MediaInfo,
source_oper: StorageBase, target_oper: StorageBase,
target_storage: str, target_file: Path,
transfer_type: str, result: TransferInfo,
over_flag: Optional[bool] = False) -> Tuple[Optional[FileItem], str]:
def __transfer_file(
self,
fileitem: FileItem,
mediainfo: MediaInfo,
source_oper: StorageBase,
target_oper: StorageBase,
target_storage: str,
target_file: Path,
transfer_type: str,
result: TransferInfo,
over_flag: Optional[bool] = False,
) -> Tuple[Optional[FileItem], str]:
"""
整理一个文件,同时处理其他相关文件
:param fileitem: 原文件
@@ -659,17 +760,17 @@ class TransHandler:
:param source_oper: 源存储操作对象
:param target_oper: 目标存储操作对象
"""
logger.info(f"正在整理文件:【{fileitem.storage}{fileitem.path} 到 【{target_storage}{target_file}"
f"操作类型:{transfer_type}")
logger.info(
f"正在整理文件:【{fileitem.storage}{fileitem.path} 到 【{target_storage}{target_file}"
f"操作类型:{transfer_type}"
)
event_data = TransferInterceptEventData(
fileitem=fileitem,
mediainfo=mediainfo,
target_storage=target_storage,
target_path=target_file,
transfer_type=transfer_type,
options={
"over_flag": over_flag
}
options={"over_flag": over_flag},
)
event = eventmanager.send_event(ChainEventType.TransferIntercept, event_data)
if event and event.event_data:
@@ -678,9 +779,12 @@ class TransHandler:
if event_data.cancel:
logger.debug(
f"Transfer file canceled by event: {event_data.source},"
f"Reason: {event_data.reason}")
f"Reason: {event_data.reason}"
)
return None, event_data.reason
if target_storage == "local" and (target_file.exists() or target_file.is_symlink()):
if target_storage == "local" and (
target_file.exists() or target_file.is_symlink()
):
if not over_flag:
logger.warn(f"文件已存在:{target_file}")
return None, f"{target_file} 已存在"
@@ -694,15 +798,19 @@ class TransHandler:
logger.warn(f"文件已存在:【{target_storage}{target_file}")
return None, f"{target_storage}{target_file} 已存在"
else:
logger.info(f"正在删除已存在的文件:【{target_storage}{target_file}")
logger.info(
f"正在删除已存在的文件:【{target_storage}{target_file}"
)
target_oper.delete(exists_item)
# 执行文件整理命令
new_item, errmsg = self.__transfer_command(fileitem=fileitem,
target_storage=target_storage,
source_oper=source_oper,
target_oper=target_oper,
target_file=target_file,
transfer_type=transfer_type)
new_item, errmsg = self.__transfer_command(
fileitem=fileitem,
target_storage=target_storage,
source_oper=source_oper,
target_oper=target_oper,
target_file=target_file,
transfer_type=transfer_type,
)
if new_item:
self.__update_result(
result=result,
@@ -716,8 +824,12 @@ class TransHandler:
return None, errmsg
@staticmethod
def get_dest_path(mediainfo: MediaInfo, target_path: Path,
need_type_folder: Optional[bool] = False, need_category_folder: Optional[bool] = False):
def get_dest_path(
mediainfo: MediaInfo,
target_path: Path,
need_type_folder: Optional[bool] = False,
need_category_folder: Optional[bool] = False,
):
"""
获取目标路径
"""
@@ -728,8 +840,12 @@ class TransHandler:
return target_path
@staticmethod
def get_dest_dir(mediainfo: MediaInfo, target_dir: TransferDirectoryConf,
need_type_folder: Optional[bool] = None, need_category_folder: Optional[bool] = None) -> Path:
def get_dest_dir(
mediainfo: MediaInfo,
target_dir: TransferDirectoryConf,
need_type_folder: Optional[bool] = None,
need_category_folder: Optional[bool] = None,
) -> Path:
"""
根据设置并装媒体库目录
:param mediainfo: 媒体信息
@@ -749,7 +865,11 @@ class TransHandler:
library_dir = Path(target_dir.library_path) / target_dir.media_type
else:
library_dir = Path(target_dir.library_path)
if not target_dir.media_category and need_category_folder and mediainfo.category:
if (
not target_dir.media_category
and need_category_folder
and mediainfo.category
):
# 二级自动分类
library_dir = library_dir / mediainfo.category
elif target_dir.media_category and need_category_folder:
@@ -759,8 +879,12 @@ class TransHandler:
return library_dir
@staticmethod
def get_naming_dict(meta: MetaBase, mediainfo: MediaInfo, file_ext: Optional[str] = None,
episodes_info: List[TmdbEpisode] = None) -> dict:
def get_naming_dict(
meta: MetaBase,
mediainfo: MediaInfo,
file_ext: Optional[str] = None,
episodes_info: List[TmdbEpisode] = None,
) -> dict:
"""
根据媒体信息返回Format字典
:param meta: 文件元数据
@@ -768,8 +892,12 @@ class TransHandler:
:param file_ext: 文件扩展名
:param episodes_info: 当前季的全部集信息
"""
return TemplateHelper().builder.build(meta=meta, mediainfo=mediainfo,
file_extension=file_ext, episodes_info=episodes_info)
return TemplateHelper().builder.build(
meta=meta,
mediainfo=mediainfo,
file_extension=file_ext,
episodes_info=episodes_info,
)
@staticmethod
def __delete_version_files(storage_oper: StorageBase, path: Path) -> bool:
@@ -816,14 +944,20 @@ class TransHandler:
return True
@staticmethod
def get_rename_path(template_string: str, rename_dict: dict,
path: Path = None, source_path: str = None) -> Path:
def get_rename_path(
template_string: str,
rename_dict: dict,
path: Optional[Path] = None,
source_path: Optional[str] = None,
source_item: Optional[FileItem] = None,
) -> Path:
"""
生成重命名后的完整路径,支持智能重命名事件
:param template_string: Jinja2 模板字符串
:param rename_dict: 渲染上下文,用于替换模板中的变量
:param path: 可选的基础路径,如果提供,将在其基础上拼接生成的路径
:param source_path: 源文件路径,即待整理的文件路径
:param source_item: 源文件信息,即待整理的文件信息
:return: 生成的完整路径
"""
# 创建jinja2模板对象
@@ -838,15 +972,18 @@ class TransHandler:
rename_dict=rename_dict,
render_str=render_str,
path=path,
source_path=source_path
source_path=source_path,
source_item=source_item,
)
event = eventmanager.send_event(ChainEventType.TransferRename, event_data)
# 检查事件返回的结果
if event and event.event_data:
event_data: TransferRenameEventData = event.event_data
if event_data.updated and event_data.updated_str:
logger.debug(f"Render string updated by event: "
f"{render_str} -> {event_data.updated_str} (source: {event_data.source})")
logger.debug(
f"Render string updated by event: "
f"{render_str} -> {event_data.updated_str} (source: {event_data.source})"
)
render_str = event_data.updated_str
# 目的路径

View File

@@ -19,6 +19,7 @@ class QQBotModule(_ModuleBase, _MessageBase[QQBot]):
"""QQ Bot 通知模块"""
def init_module(self) -> None:
self.stop()
super().init_service(service_name=QQBot.__name__.lower(), service_type=QQBot)
self._channel = MessageChannel.QQ

View File

@@ -6,7 +6,7 @@ QQ Bot Gateway WebSocket 客户端
import json
import threading
import time
from typing import Callable, Optional
from typing import Callable, List, Optional
import websocket
@@ -24,6 +24,7 @@ def run_gateway(
get_gateway_url_fn: Callable[[str], str],
on_message_fn: Callable[[dict], None],
stop_event: threading.Event,
ws_holder: List,
) -> None:
"""
在后台线程中运行 Gateway WebSocket 连接
@@ -34,20 +35,20 @@ def run_gateway(
:param get_gateway_url_fn: 获取 gateway URL 的函数 (token) -> url
:param on_message_fn: 收到消息时的回调 (payload_dict) -> None
:param stop_event: 停止事件set 时退出循环
:param ws_holder: 调用方持有的单元素列表,存放当前 WebSocketApp供 stop() 时 close 以打断 run_forever
"""
last_seq: Optional[int] = None
heartbeat_interval_ms: Optional[int] = None
heartbeat_timer: Optional[threading.Timer] = None
ws_ref: list = [] # 用于在闭包中保持 ws 引用
def send_heartbeat():
nonlocal heartbeat_timer
if stop_event.is_set():
return
try:
if ws_ref and ws_ref[0]:
if ws_holder and ws_holder[0]:
payload = {"op": 1, "d": last_seq}
ws_ref[0].send(json.dumps(payload))
ws_holder[0].send(json.dumps(payload))
logger.debug(f"[QQ Gateway:{config_name}] Heartbeat sent, seq={last_seq}")
except Exception as err:
logger.debug(f"[QQ Gateway:{config_name}] Heartbeat error: {err}")
@@ -87,7 +88,7 @@ def run_gateway(
"shard": [0, 1],
},
}
ws_ref[0].send(json.dumps(identify))
ws_holder[0].send(json.dumps(identify))
logger.info(f"[QQ Gateway:{config_name}] Identify sent")
# 启动心跳
@@ -139,8 +140,8 @@ def run_gateway(
elif op == 9: # Invalid Session
logger.warning(f"[QQ Gateway:{config_name}] Invalid session")
if ws_ref and ws_ref[0]:
ws_ref[0].close()
if ws_holder and ws_holder[0]:
ws_holder[0].close()
def on_ws_error(_, error):
logger.error(f"[QQ Gateway:{config_name}] WebSocket error: {error}")
@@ -149,6 +150,7 @@ def run_gateway(
logger.info(f"[QQ Gateway:{config_name}] WebSocket closed: {close_status_code} {close_msg}")
if heartbeat_timer:
heartbeat_timer.cancel()
ws_holder.clear()
reconnect_delays = [1, 2, 5, 10, 30, 60]
attempt = 0
@@ -165,8 +167,8 @@ def run_gateway(
on_error=on_ws_error,
on_close=on_ws_close,
)
ws_ref.clear()
ws_ref.append(ws)
ws_holder.clear()
ws_holder.append(ws)
# run_forever 会阻塞,需要传入 stop_event 的检查
# websocket-client 的 run_forever 支持 ping_interval, ping_timeout

View File

@@ -50,6 +50,9 @@ class QQBot:
:param QQ_GROUP_OPENID: 默认群组 openid群聊与 QQ_OPENID 二选一)
:param name: 配置名称,用于消息来源标识和 Gateway 接收
"""
self._gateway_stop = None
self._gateway_thread = None
self._gateway_ws_holder: list = []
if not QQ_APP_ID or not QQ_APP_SECRET:
logger.error("QQ Bot 配置不完整:缺少 AppID 或 AppSecret")
self._ready = False
@@ -151,6 +154,7 @@ class QQBot:
"get_gateway_url_fn": get_gateway_url,
"on_message_fn": self._on_gateway_message,
"stop_event": self._gateway_stop,
"ws_holder": self._gateway_ws_holder,
},
daemon=True,
)
@@ -161,10 +165,19 @@ class QQBot:
def stop(self) -> None:
"""停止 Gateway 连接"""
if self._gateway_stop:
if self._gateway_stop is not None:
self._gateway_stop.set()
if self._gateway_thread and self._gateway_thread.is_alive():
self._gateway_thread.join(timeout=5)
try:
if self._gateway_ws_holder:
self._gateway_ws_holder[0].close()
except Exception as e:
logger.debug(f"QQ Bot Gateway WebSocket close: {e}")
if self._gateway_thread is not None and self._gateway_thread.is_alive():
self._gateway_thread.join(timeout=20)
if self._gateway_thread.is_alive():
logger.warning(
"QQ Bot Gateway 线程在 stop 后仍未退出,可能存在重复收消息,请重启进程"
)
def get_state(self) -> bool:
"""获取就绪状态"""

View File

@@ -11,6 +11,7 @@ class Event(BaseModel):
"""
事件模型
"""
event_type: str = Field(..., description="事件类型")
event_data: Optional[dict] = Field(default={}, description="事件数据")
priority: Optional[int] = Field(0, description="事件优先级")
@@ -20,6 +21,7 @@ class BaseEventData(BaseModel):
"""
事件数据的基类,所有具体事件数据类应继承自此类
"""
pass
@@ -27,11 +29,14 @@ class ConfigChangeEventData(BaseEventData):
"""
ConfigChange 事件的数据模型
"""
key: set[str] = Field(..., description="配置项的键(集合类型)")
value: Optional[Any] = Field(default=None, description="配置项的新值")
change_type: str = Field(default="update", description="配置项的变更类型,如 'add', 'update', 'delete'")
change_type: str = Field(
default="update", description="配置项的变更类型,如 'add', 'update', 'delete'"
)
@field_validator('key', mode='before')
@field_validator("key", mode="before")
@classmethod
def convert_to_set(cls, v):
"""将输入的 str、list、dict.keys() 等转为 set"""
@@ -55,6 +60,7 @@ class ChainEventData(BaseEventData):
"""
链式事件数据的基类,所有具体事件数据类应继承自此类
"""
pass
@@ -73,12 +79,24 @@ class AuthCredentials(ChainEventData):
channel (Optional[str]): 认证渠道
service (Optional[str]): 服务名称
"""
# 输入参数
username: Optional[str] = Field(None, description="用户名,适用于 'password' 认证类型")
password: Optional[str] = Field(None, description="用户密码,适用于 'password' 认证类型")
mfa_code: Optional[str] = Field(None, description="一次性密码,目前仅适用于 'password' 认证类型")
code: Optional[str] = Field(None, description="授权码,适用于 'authorization_code' 认证类型")
grant_type: str = Field(..., description="认证类型,如 'password', 'authorization_code', 'client_credentials'")
username: Optional[str] = Field(
None, description="用户,适用于 'password' 认证类型"
)
password: Optional[str] = Field(
None, description="用户密码,适用于 'password' 认证类型"
)
mfa_code: Optional[str] = Field(
None, description="一次性密码,目前仅适用于 'password' 认证类型"
)
code: Optional[str] = Field(
None, description="授权码,适用于 'authorization_code' 认证类型"
)
grant_type: str = Field(
...,
description="认证类型,如 'password', 'authorization_code', 'client_credentials'",
)
# scope: List[str] = Field(default_factory=list, description="权限范围,如 ['read', 'write']")
# 输出参数
@@ -87,7 +105,7 @@ class AuthCredentials(ChainEventData):
channel: Optional[str] = Field(default=None, description="认证渠道")
service: Optional[str] = Field(default=None, description="服务名称")
@model_validator(mode='before')
@model_validator(mode="before")
@classmethod
def check_fields_based_on_grant_type(cls, values): # noqa
grant_type = values.get("grant_type")
@@ -97,7 +115,9 @@ class AuthCredentials(ChainEventData):
if grant_type == "password":
if not values.get("username") or not values.get("password"):
raise ValueError("username and password are required for grant_type 'password'")
raise ValueError(
"username and password are required for grant_type 'password'"
)
elif grant_type == "authorization_code":
if not values.get("code"):
@@ -122,11 +142,15 @@ class AuthInterceptCredentials(ChainEventData):
source (str): 拦截源,默认值为 "未知拦截源"
cancel (bool): 是否取消认证,默认值为 False
"""
# 输入参数
username: Optional[str] = Field(..., description="用户名")
channel: str = Field(..., description="认证渠道")
service: str = Field(..., description="服务名称")
status: str = Field(..., description="认证状态, 包含 'triggered' 表示认证触发,'completed' 表示认证成功")
status: str = Field(
...,
description="认证状态, 包含 'triggered' 表示认证触发,'completed' 表示认证成功",
)
token: Optional[str] = Field(default=None, description="认证令牌")
# 输出参数
@@ -148,6 +172,7 @@ class CommandRegisterEventData(ChainEventData):
source (str): 拦截源,默认值为 "未知拦截源"
cancel (bool): 是否取消认证,默认值为 False
"""
# 输入参数
commands: Dict[str, dict] = Field(..., description="菜单命令")
origin: str = Field(..., description="事件源")
@@ -169,18 +194,25 @@ class TransferRenameEventData(ChainEventData):
render_str (str): 渲染生成的字符串
path (Optional[Path]): 当前文件的目标路径
source_path (Optional[str]): 源文件路径,即待整理的文件路径
source_item (Optional[FileItem]): 源文件信息,即待整理的文件信息
# 输出参数
updated (bool): 是否已更新,默认值为 False
updated_str (str): 更新后的字符串
source (str): 拦截源,默认值为 "未知拦截源"
"""
# 输入参数
template_string: str = Field(..., description="模板字符串")
rename_dict: Dict[str, Any] = Field(..., description="渲染上下文")
path: Optional[Path] = Field(None, description="文件的目标路径")
render_str: str = Field(..., description="渲染生成的字符串")
source_path: Optional[str] = Field(None, description="源文件路径,即待整理的文件路径")
source_path: Optional[str] = Field(
None, description="源文件路径,即待整理的文件路径"
)
source_item: Optional[FileItem] = Field(
None, description="源文件信息,即待整理的文件信息"
)
# 输出参数
updated: bool = Field(default=False, description="是否已更新")
@@ -202,6 +234,7 @@ class ResourceSelectionEventData(BaseModel):
updated_contexts (Optional[List[Context]]): 已更新的资源上下文列表,默认值为 None
source (str): 更新源,默认值为 "未知更新源"
"""
# 输入参数
contexts: Any = Field(None, description="待选择的资源上下文列表")
downloader: Optional[str] = Field(None, description="下载器")
@@ -209,7 +242,9 @@ class ResourceSelectionEventData(BaseModel):
# 输出参数
updated: bool = Field(default=False, description="是否已更新")
updated_contexts: Optional[List[Any]] = Field(default=None, description="已更新的资源上下文列表")
updated_contexts: Optional[List[Any]] = Field(
default=None, description="已更新的资源上下文列表"
)
source: Optional[str] = Field(default="未知拦截源", description="拦截源")
@@ -231,6 +266,7 @@ class ResourceDownloadEventData(ChainEventData):
source (str): 拦截源,默认值为 "未知拦截源"
reason (str): 拦截原因,描述拦截的具体原因
"""
# 输入参数
context: Any = Field(None, description="当前资源上下文")
episodes: Optional[Set[int]] = Field(None, description="需要下载的集数")
@@ -262,6 +298,7 @@ class TransferInterceptEventData(ChainEventData):
source (str): 拦截源,默认值为 "未知拦截源"
reason (str): 拦截原因,描述拦截的具体原因
"""
# 输入参数
fileitem: FileItem = Field(..., description="源文件")
mediainfo: Any = Field(..., description="媒体信息")
@@ -280,12 +317,17 @@ class DiscoverMediaSource(BaseModel):
"""
探索媒体数据源的基类
"""
name: str = Field(..., description="数据源名称")
mediaid_prefix: str = Field(..., description="媒体ID的前缀不含:")
api_path: str = Field(..., description="媒体数据源API地址")
filter_params: Optional[Dict[str, Any]] = Field(default=None, description="过滤参数")
filter_params: Optional[Dict[str, Any]] = Field(
default=None, description="过滤参数"
)
filter_ui: Optional[List[dict]] = Field(default=[], description="过滤参数UI配置")
depends: Optional[Dict[str, list]] = Field(default=None, description="UI依赖关系字典")
depends: Optional[Dict[str, list]] = Field(
default=None, description="UI依赖关系字典"
)
class DiscoverSourceEventData(ChainEventData):
@@ -296,14 +338,18 @@ class DiscoverSourceEventData(ChainEventData):
# 输出参数
extra_sources (List[DiscoverMediaSource]): 额外媒体数据源
"""
# 输出参数
extra_sources: List[DiscoverMediaSource] = Field(default_factory=list, description="额外媒体数据源")
extra_sources: List[DiscoverMediaSource] = Field(
default_factory=list, description="额外媒体数据源"
)
class RecommendMediaSource(BaseModel):
"""
推荐媒体数据源的基类
"""
name: str = Field(..., description="数据源名称")
api_path: str = Field(..., description="媒体数据源API地址")
type: str = Field(..., description="类型")
@@ -317,8 +363,11 @@ class RecommendSourceEventData(ChainEventData):
# 输出参数
extra_sources (List[RecommendMediaSource]): 额外媒体数据源
"""
# 输出参数
extra_sources: List[RecommendMediaSource] = Field(default_factory=list, description="额外媒体数据源")
extra_sources: List[RecommendMediaSource] = Field(
default_factory=list, description="额外媒体数据源"
)
class MediaRecognizeConvertEventData(ChainEventData):
@@ -333,12 +382,15 @@ class MediaRecognizeConvertEventData(ChainEventData):
# 输出参数
media_dict (dict): TheMovieDb/豆瓣的媒体数据
"""
# 输入参数
mediaid: str = Field(..., description="媒体ID")
convert_type: str = Field(..., description="转换类型themoviedb/douban")
# 输出参数
media_dict: dict = Field(default_factory=dict, description="转换后的媒体信息TheMovieDb/豆瓣)")
media_dict: dict = Field(
default_factory=dict, description="转换后的媒体信息TheMovieDb/豆瓣)"
)
class StorageOperSelectionEventData(ChainEventData):
@@ -352,6 +404,7 @@ class StorageOperSelectionEventData(ChainEventData):
# 输出参数
storage_oper (Callable): 存储操作对象
"""
# 输入参数
storage: Optional[str] = Field(default=None, description="存储类型")

View File

@@ -0,0 +1,73 @@
---
name: command-dispatch
description: >-
Use this skill when the user's intent is to execute a system or plugin function. Applicable scenarios include:
1) The user sends a slash command starting with / (e.g. /cookiecloud, /sites, /subscribes, etc.);
2) The user describes an action in natural language that can be fulfilled by a system or plugin command
(e.g. "sync sites", "show subscriptions", "refresh subscriptions", "check downloads", etc.).
This skill helps you identify the user's intent, find the matching command, extract necessary parameters,
and execute the corresponding command.
allowed-tools: list_slash_commands query_plugin_capabilities run_slash_command
---
# Command Dispatch
Use this skill to identify user intent and dispatch the corresponding system or plugin command.
## When to Use
- The user sends a `/xxx` slash command (execute directly)
- The user describes an action in natural language, for example:
- "Sync sites" → `/cookiecloud`
- "Show my subscriptions" → `/subscribes`
- "Refresh subscriptions" → `/subscribe_refresh`
- "What's downloading?" → `/downloading`
- "Organize downloaded files" → `/transfer`
- "Clear cache" → `/clear_cache`
- "Restart the system" → `/restart`
- "Pause all QB tasks" → `/pause_torrents` (plugin command)
## Tools
- `list_slash_commands` — List all available slash commands (system + plugin), returns command name, description, and category
- `query_plugin_capabilities` — Query detailed plugin capabilities (commands, actions, scheduled services)
- `run_slash_command` — Execute a specified command (works for both system and plugin commands)
## Workflow
### Step 1: Identify User Intent
Determine whether the user's message is requesting the execution of a command:
- **Direct command**: Message starts with `/`, e.g. `/sites`, `/subscribes` → skip to Step 3
- **Natural language**: The user describes an actionable request → continue to Step 2
### Step 2: Find Matching Command
Use `list_slash_commands` to retrieve all available commands. Match the user's described intent against the `description` and `category` fields of each command.
If the user's description involves a specific plugin's functionality, additionally use `query_plugin_capabilities` to query that plugin's detailed capabilities.
**Matching strategy**:
- Prefer exact matches on command description
- Then narrow down by category and match
- If no matching command is found, inform the user that no corresponding function is available
### Step 3: Extract Parameters and Execute
Some commands support additional arguments (space-separated after the command), for example:
- `/redo <history_id>` — Manually re-organize a specific record
- `/subscribe_delete <name>` — Delete a specific subscription
Use `run_slash_command` to execute the command in the format `/command_name arg1 arg2`.
### Step 4: Report Result
Command execution is asynchronous. After triggering, inform the user that the command has started. If the command does not exist, list available commands for reference.
## Important Notes
- Command execution requires admin privileges; the tool will automatically check permissions
- Both system and plugin commands are executed via the `run_slash_command` tool — no need to distinguish between them
- If you are unsure which command matches the user's intent, use `list_slash_commands` first to look up before deciding
- Never guess non-existent commands; always select from the available command list

View File

@@ -0,0 +1,231 @@
---
name: database-operation
description: >-
Use this skill when you need to execute SQL against the MoviePilot database.
This skill guides you through connecting to the database and executing SQL statements.
The database type (SQLite or PostgreSQL) and connection details are provided in the system prompt <system_info>.
Applicable scenarios include:
1) The user asks about data statistics, counts, or aggregations that existing tools don't cover;
2) The user wants to inspect, modify, or fix raw database records;
3) The user asks to clean up data, update records, or perform database maintenance;
4) The user asks questions like "how many downloads", "show me site stats", "delete old records", etc.
allowed-tools: execute_command read_file
---
# Database Query (数据库查询)
This skill guides you through executing SQL against the MoviePilot database. Both read and write operations are supported.
## Prerequisites
You need the following tools:
- `execute_command` - Execute shell commands to run database queries
## Getting Database Connection Info
The system prompt `<system_info>` section already contains all the database connection details you need:
- **数据库类型** — `sqlite` or `postgresql`
- **数据库** — Full connection info:
- For SQLite: the database file path, e.g. `SQLite (/config/db/moviepilot.db)`
- For PostgreSQL: the connection string, e.g. `PostgreSQL (user:password@host:port/database)`
**Do NOT run any detection commands.** Extract the database type and connection details directly from `<system_info>`.
## Executing Queries
### SQLite Mode
Extract the database file path from `<system_info>` (the path inside the parentheses after `SQLite`).
Use `execute_command` to run queries:
```bash
sqlite3 -header -column <DB_PATH> "YOUR SQL QUERY HERE;"
```
For JSON-formatted output (easier to parse):
```bash
sqlite3 -json <DB_PATH> "YOUR SQL QUERY HERE;"
```
**List all tables:**
```bash
sqlite3 -header -column <DB_PATH> "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name;"
```
**View table schema:**
```bash
sqlite3 <DB_PATH> ".schema tablename"
```
### PostgreSQL Mode
Extract the connection parameters from `<system_info>` (parse `user:password@host:port/database` from the parentheses after `PostgreSQL`).
Use `execute_command` to run queries via `psql`:
```bash
PGPASSWORD=<password> psql -h <host> -p <port> -U <user> -d <database> -c "YOUR SQL QUERY HERE;"
```
**List all tables:**
```bash
PGPASSWORD=<password> psql -h <host> -p <port> -U <user> -d <database> -c "SELECT tablename FROM pg_tables WHERE schemaname='public' ORDER BY tablename;"
```
**View table schema:**
```bash
PGPASSWORD=<password> psql -h <host> -p <port> -U <user> -d <database> -c "\d tablename"
```
## Interpret Results
After executing the query, analyze the results and present them in a clear, user-friendly format. Use aggregation, sorting, and filtering as needed.
## Database Schema Reference
MoviePilot uses the following core tables:
### downloadhistory (下载历史)
Key columns: `id`, `path`, `type`, `title`, `year`, `tmdbid`, `imdbid`, `doubanid`, `seasons`, `episodes`, `downloader`, `download_hash`, `torrent_name`, `torrent_site`, `userid`, `username`, `date`, `media_category`
### downloadfiles (下载文件)
Key columns: `id`, `downloader`, `download_hash`, `fullpath`, `savepath`, `filepath`, `torrentname`, `state`
### transferhistory (整理历史)
Key columns: `id`, `src`, `dest`, `mode`, `type`, `category`, `title`, `year`, `tmdbid`, `seasons`, `episodes`, `download_hash`, `status` (boolean: true=success, false=failed), `errmsg`, `date`
### subscribe (订阅)
Key columns: `id`, `name`, `year`, `type`, `tmdbid`, `doubanid`, `season`, `total_episode`, `start_episode`, `lack_episode`, `state` ('N'=new, 'R'=running, 'S'=paused), `filter`, `include`, `exclude`, `quality`, `resolution`, `sites`, `best_version`, `date`, `username`
### subscribehistory (订阅历史)
Key columns: `id`, `name`, `year`, `type`, `tmdbid`, `doubanid`, `season`, `total_episode`, `start_episode`, `date`, `username`
### user (用户)
Key columns: `id`, `name`, `email`, `is_active`, `is_superuser`, `permissions`, `settings`
### site (站点)
Key columns: `id`, `name`, `domain`, `url`, `pri` (priority), `cookie`, `proxy`, `is_active`, `downloader`, `limit_interval`, `limit_count`
### siteuserdata (站点用户数据)
Key columns: `id`, `domain`, `name`, `username`, `user_level`, `bonus`, `upload`, `download`, `ratio`, `seeding`, `leeching`, `seeding_size`, `updated_day`
### sitestatistic (站点统计)
Key columns: `id`, `domain`, `success`, `fail`, `seconds`, `lst_state`, `lst_mod_date`
### mediaserveritem (媒体库条目)
Key columns: `id`, `server`, `library`, `item_id`, `item_type`, `title`, `original_title`, `year`, `tmdbid`, `imdbid`, `tvdbid`, `path`
### systemconfig (系统配置)
Key columns: `id`, `key`, `value` (JSON)
### userconfig (用户配置)
Key columns: `id`, `username`, `key`, `value` (JSON)
### plugindata (插件数据)
Key columns: `id`, `plugin_id`, `key`, `value` (JSON)
### message (消息)
Key columns: `id`, `channel`, `source`, `mtype`, `title`, `text`, `image`, `link`, `userid`, `reg_time`
### workflow (工作流)
Key columns: `id`, `name`, `description`, `timer`, `trigger_type`, `event_type`, `state` ('W'=waiting, 'R'=running), `run_count`, `actions`, `flows`, `last_time`
### passkey (通行密钥)
Key columns: `id`, `user_id`, `credential_id`, `public_key`, `name`, `created_at`, `last_used_at`, `is_active`
### siteicon (站点图标)
Key columns: `id`, `name`, `domain`, `url`, `base64`
## Common Query Examples
### Count total downloads
```sql
SELECT COUNT(*) AS total FROM downloadhistory;
```
### Recent download history
```sql
SELECT title, year, type, torrent_site, date FROM downloadhistory ORDER BY id DESC LIMIT 10;
```
### Failed transfers
```sql
SELECT id, title, src, errmsg, date FROM transferhistory WHERE status = 0 ORDER BY id DESC LIMIT 10;
```
### Active subscriptions
```sql
SELECT name, year, type, season, state, lack_episode FROM subscribe WHERE state = 'R';
```
### Site upload/download statistics
```sql
SELECT name, domain, upload, download, ratio, bonus, seeding, user_level FROM siteuserdata ORDER BY upload DESC;
```
### Media library statistics
```sql
SELECT server, library, COUNT(*) AS count FROM mediaserveritem GROUP BY server, library;
```
### Site access success rate
```sql
SELECT domain, success, fail, ROUND(success * 100.0 / (success + fail), 1) AS success_rate FROM sitestatistic WHERE success + fail > 0 ORDER BY success_rate DESC;
```
### Plugin data inspection
```sql
SELECT plugin_id, key FROM plugindata ORDER BY plugin_id, key;
```
### Delete old download history (write operation)
```sql
DELETE FROM downloadhistory WHERE date < '2024-01-01';
```
### Update subscription state (write operation)
```sql
UPDATE subscribe SET state = 'S' WHERE id = 123;
```
### Clean up failed transfer records (write operation)
```sql
DELETE FROM transferhistory WHERE status = 0 AND date < '2024-06-01';
```
## Safety Rules
1. **Confirm before writing** — For any `INSERT`, `UPDATE`, `DELETE`, `DROP`, `ALTER`, or `TRUNCATE` operation, always describe what the statement will do and ask the user to confirm before executing. For `SELECT` queries, execute directly without confirmation
2. **Back up before destructive operations** — Before executing `DELETE`, `DROP`, or `TRUNCATE` on important tables, suggest the user back up the data first (e.g., export with `.dump` for SQLite or `pg_dump` for PostgreSQL)
3. **Use WHERE clauses** — Never run `UPDATE` or `DELETE` without a `WHERE` clause unless the user explicitly intends to affect all rows
4. **Use LIMIT for queries** — When querying large tables with `SELECT`, add `LIMIT` to prevent excessive output
5. **Sensitive data** — The `site` table contains `cookie`, `apikey`, and `token` fields. NEVER display these values to the user. Exclude them from SELECT or replace with `'***'`
6. **Password data** — The `user` table contains `hashed_password` and `otp_secret` fields. NEVER display these values
7. **Output limits** — If the query results are very long, summarize or truncate them
## SQL Dialect Differences
When writing queries, be aware of differences between SQLite and PostgreSQL:
| Feature | SQLite | PostgreSQL |
|---------|--------|------------|
| Boolean values | `0` / `1` | `false` / `true` |
| String concat | `\|\|` | `\|\|` or `CONCAT()` |
| Current time | `datetime('now')` | `NOW()` |
| LIMIT syntax | `LIMIT n` | `LIMIT n` |
| JSON access | `json_extract(col, '$.key')` | `col->>'key'` |
| Case sensitivity | Case-insensitive by default | Case-sensitive |
| LIKE | Case-insensitive | Use `ILIKE` for case-insensitive |
## Troubleshooting
- **sqlite3 not found**: The `sqlite3` CLI should be pre-installed in the MoviePilot Docker container. If missing, you can try using Python: `python3 -c "import sqlite3; ..."`
- **psql not found**: For PostgreSQL, if `psql` is not available, use Python: `python3 -c "import psycopg2; ..."`
- **Permission denied**: Database queries require admin privileges
- **Table not found**: Use the "list all tables" query first to verify table names

View File

@@ -0,0 +1,226 @@
---
name: generate-identifiers
description: >-
Use this skill when a user provides a torrent name or file name and wants to fix recognition issues,
or asks to add/manage custom identifiers (自定义识别词).
This skill generates identifier rules based on the WordsMatcher preprocessing logic,
checks for duplicates against existing rules, and saves them via MCP tools.
Applicable scenarios include:
1) A torrent or file name is incorrectly recognized (wrong title, season, episode, etc.);
2) The user wants to block unwanted keywords from torrent names;
3) The user needs episode offset rules for series with non-standard numbering;
4) The user wants to force recognition of a specific media by TMDB/Douban ID.
allowed-tools: query_custom_identifiers update_custom_identifiers recognize_media
---
# Generate Custom Identifiers (生成自定义识别词)
This skill helps generate custom identifier rules for MoviePilot's media recognition system. Custom identifiers preprocess torrent/file names before the recognition engine runs, correcting naming issues that cause misidentification.
## Prerequisites
You need the following tools:
- `query_custom_identifiers` - Query all existing custom identifier rules
- `update_custom_identifiers` - Save the updated identifier list (replaces the full list)
- `recognize_media` - Test recognition of a torrent title or file path (optional, for verification)
## Supported Rule Formats
There are **four formats**. Operators must have spaces on both sides.
### 1. Block Word (屏蔽词)
Removes matched text from the title. Supports regex.
```
REPACK
```
### 2. Replacement (被替换词 => 替换词)
Regex substitution. The left side is a regex pattern, the right side is the replacement (supports backreferences).
```
被替换词 => 替换词
```
**Special replacement for direct ID specification:**
```
被替换词 => {[tmdbid=xxx;type=movie/tv;s=xxx;e=xxx]}
被替换词 => {[doubanid=xxx;type=movie/tv;s=xxx;e=xxx]}
```
Where `s` (season) and `e` (episode) are optional.
### 3. Episode Offset (集偏移)
Shifts episode numbers found between the front and back delimiter words. `EP` is the placeholder for the original episode number.
```
前定位词 <> 后定位词 >> EP-12
```
### 4. Combined Replacement + Episode Offset
First performs replacement; episode offset only runs if replacement succeeded.
```
被替换词 => 替换词 && 前定位词 <> 后定位词 >> EP-12
```
### Comments
Lines starting with `#` are comments and will be skipped during processing.
## Important Rules for Writing Identifiers
1. **Regex support**: All patterns support regular expressions. Special characters (`. * + ? ^ $ { } [ ] ( ) | \`) must be escaped with `\` when matching literally.
2. **Spaces matter**: The operators ` => `, ` <> `, ` >> `, ` && ` must have spaces on both sides.
3. **One rule per string**: Each element in the identifiers list is one rule.
4. **EP placeholder**: In episode offset expressions, `EP` represents the original episode number. Common patterns:
- `EP-12` means subtract 12
- `EP+5` means add 5
- `EP*2` means multiply by 2
5. **Chinese number support**: Episode offset handles Chinese numbers (一二三四五六七八九十).
6. **Empty replacement**: Using nothing after `=>` is equivalent to a block word.
## Workflow
### Step 1: Analyze the Problem
Parse the torrent/file name provided by the user. Identify:
- What is being incorrectly recognized (title, season, episode, year, quality, etc.)
- What the correct recognition result should be
- Which identifier format(s) will solve the problem
### Step 2: Generate the Identifier Rule(s)
Write the rule using the appropriate format. Ensure:
- Regex special characters are properly escaped
- Add a comment line (starting with `#`) above the rule to describe what it does
- Test the regex mentally against the provided name to verify correctness
### Step 3: Query Existing Identifiers
Use the `query_custom_identifiers` tool to get all current rules:
```
query_custom_identifiers(explanation="Checking existing identifiers before adding new rules to avoid duplicates")
```
### Step 4: Check for Duplicates
Compare each new rule against the existing identifiers:
- **Exact duplicate**: The rule string is identical to an existing rule — skip it
- **Functional duplicate**: A different rule that produces the same effect on the same input (e.g., same regex pattern with trivial whitespace differences) — warn the user
- **Conflict**: An existing rule modifies the same text in a different way — warn the user and ask which to keep
### Step 5: Save the Updated Identifiers
Merge new non-duplicate rules into the existing list, then use `update_custom_identifiers` to save the **complete** list:
```
update_custom_identifiers(
explanation="Adding new identifier rules for [description]",
identifiers=["existing rule 1", "existing rule 2", "# new comment", "new rule"]
)
```
**CRITICAL**: Always include ALL existing rules in the list. This tool replaces the entire list.
### Step 6: Verify (Optional)
If the user wants to verify the rule works, use `recognize_media` to test:
```
recognize_media(explanation="Testing recognition after adding identifier", title="the torrent title to test")
```
### Step 7: Report
Tell the user:
- What rule(s) were added
- What effect they will have on the title
- Whether any duplicates or conflicts were found
## Common Scenarios and Examples
### Wrong Season/Episode Parsing
**User**: "种子名 `[SubGroup] My Show - 13 [1080P]`这是第二季第1集但被识别成第13集"
**Solution**: Episode offset to subtract 12:
```
# My Show 第二季集数偏移13->1
\[SubGroup\] <> \[1080P\] >> EP-12
```
### Unwanted Text Causing Wrong Identification
**User**: "种子名 `My.Show.2024.REPACK.1080p.mkv`REPACK导致识别异常"
**Solution**: Block word:
```
# 屏蔽REPACK标记
REPACK
```
### Non-Standard Naming
**User**: "文件名 `[OldName] EP01.mkv`,应该识别为 NewName"
**Solution**: Replacement:
```
# OldName替换为NewName
OldName => NewName
```
### Force TMDB ID Recognition
**User**: "种子名 `Some.Weird.Name.S01E01.1080p.mkv`识别不到TMDB ID是12345是电视剧"
**Solution**: Direct ID specification:
```
# 强制识别Some.Weird.Name为TMDB ID 12345
Some\.Weird\.Name => {[tmdbid=12345;type=tv;s=1]}
```
### Combined Fix
**User**: "种子名 `[Baha][OldTitle][13][1080P]`标题应该是NewTitle而且13应该是第二季第1集"
**Solution**: Combined replacement + episode offset:
```
# OldTitle替换为NewTitle并偏移集数
OldTitle => NewTitle && \[Baha\] <> \[1080P\] >> EP-12
```
### Multiple Episode Numbers in One Title
**User**: "种子名 `[Group] Title - 13-14 [1080P]`应该是第1-2集"
**Solution**: Episode offset (handles multiple numbers between delimiters):
```
# Title 集数偏移
\[Group\] <> \[1080P\] >> EP-12
```
## WordsMatcher Processing Logic Reference
The `WordsMatcher.prepare()` method (in `app/core/meta/words.py`) processes each rule in order:
1. Skip empty lines and lines starting with `#`
2. Detect format by checking operator presence:
- Contains ` => ` AND ` && ` AND ` >> ` AND ` <> ` → Combined format (4)
- Contains ` => ` → Replacement format (2)
- Contains ` >> ` AND ` <> ` → Episode offset format (3)
- Otherwise → Block word format (1)
3. For combined format, replacement runs first; episode offset only runs if replacement succeeded
4. Returns the modified title and a list of rules that were actually applied
5. Priority: per-subscribe `custom_words` parameter takes precedence over global `CustomIdentifiers`
## Safety Notes
- Always query existing rules first before updating
- Never remove existing rules unless the user explicitly asks
- Add comment lines before new rules for maintainability
- When uncertain about the correct approach, present multiple options and let the user choose

View File

@@ -10,6 +10,7 @@ from tests.test_mediascrape import (
)
from tests.test_metainfo import MetaInfoTest
from tests.test_object import ObjectUtilsTest
from tests.test_subscribe_chain import SubscribeChainTest
if __name__ == '__main__':
@@ -36,6 +37,9 @@ if __name__ == '__main__':
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestMediaScrapingTVDirectory))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestMediaScrapeEvents))
# 测试订阅洗版匹配
suite.addTest(SubscribeChainTest('test_is_episode_range_covered'))
# 运行测试
runner = unittest.TextTestRunner()
runner.run(suite)

View File

@@ -2,7 +2,7 @@ import sys
import unittest
from pathlib import Path
from unittest.mock import patch, MagicMock
# ruff: noqa: E402
sys.modules['app.helper.sites'] = MagicMock()
sys.modules['app.db.systemconfig_oper'] = MagicMock()
sys.modules['app.db.systemconfig_oper'].SystemConfigOper.return_value.get.return_value = None
@@ -172,6 +172,62 @@ class TestMediaScrapingImages(unittest.TestCase):
self.assertEqual(len(calls), 1)
self.assertEqual(calls[0].kwargs["url"], "http://season01")
def test_scrape_episode_thumb_image_path(self):
fileitem = schemas.FileItem(path="/tv/Show/Season 1/S01E01.mp4", name="S01E01.mp4", type="file", storage="local")
parent_item = schemas.FileItem(path="/tv/Show/Season 1", name="Season 1", type="dir", storage="local")
mediainfo = MediaInfo()
self.media_chain.metadata_img.return_value = {
"thumb.jpg": "http://episode-thumb"
}
self.media_chain.scraping_policies.option.return_value = ScrapingOption("episode", "thumb", ScrapingPolicy.OVERWRITE)
self.media_chain.storagechain.get_file_item.return_value = None
self.media_chain._scrape_images_generic(
fileitem,
mediainfo,
ScrapingTarget.EPISODE,
parent_fileitem=parent_item,
season_number=1,
episode_number=1
)
self.media_chain.metadata_img.assert_called_once_with(
mediainfo=mediainfo,
season=1,
episode=1
)
self.media_chain._download_and_save_image.assert_called_once_with(
fileitem=parent_item,
path=Path("/tv/Show/Season 1/S01E01.jpg"),
url="http://episode-thumb"
)
def test_scrape_episode_thumb_image_path_via_parent_lookup(self):
fileitem = schemas.FileItem(path="/tv/Show/Season 1/S01E01.mp4", name="S01E01.mp4", type="file", storage="local")
parent_item = schemas.FileItem(path="/tv/Show/Season 1", name="Season 1", type="dir", storage="local")
mediainfo = MediaInfo()
self.media_chain.metadata_img.return_value = {
"thumb.jpg": "http://episode-thumb"
}
self.media_chain.scraping_policies.option.return_value = ScrapingOption("episode", "thumb", ScrapingPolicy.OVERWRITE)
self.media_chain.storagechain.get_parent_item.return_value = parent_item
self.media_chain.storagechain.get_file_item.return_value = None
self.media_chain._scrape_images_generic(
fileitem,
mediainfo,
ScrapingTarget.EPISODE,
season_number=1,
episode_number=1
)
self.media_chain.storagechain.get_parent_item.assert_called_once_with(fileitem)
self.media_chain._download_and_save_image.assert_called_once_with(
fileitem=parent_item,
path=Path("/tv/Show/Season 1/S01E01.jpg"),
url="http://episode-thumb"
)
@patch("app.chain.media.RequestUtils")
@patch("app.chain.media.NamedTemporaryFile")
@patch("app.chain.media.Path.chmod")
@@ -225,16 +281,22 @@ class TestMediaScrapingTVDirectory(unittest.TestCase):
def test_initialize_tv_directory_specials(self, mock_settings):
# mock specials directory recognition
mock_settings.RENAME_FORMAT_S0_NAMES = ["Specials", "SPs"]
mock_settings.RMT_MEDIAEXT = [".mp4", ".mkv"]
fileitem = schemas.FileItem(path="/tv/Show/Specials", name="Specials", type="dir", storage="local")
meta = MetaInfo("Show")
mediainfo = MediaInfo(type=MediaType.TV)
self.media_chain.storagechain.list_files.return_value = []
filepath = Path(fileitem.path)
self.media_chain._handle_tv_scraping(fileitem, meta, mediainfo, init_folder=True, parent=None, overwrite=False, recursive=True)
self.media_chain._initialize_tv_directory_metadata(
fileitem=fileitem,
filepath=filepath,
meta=meta,
mediainfo=mediainfo,
parent=None,
overwrite=False,
)
self.media_chain._scrape_nfo_generic.assert_called_with(
self.media_chain._scrape_nfo_generic.assert_called_once_with(
current_fileitem=fileitem,
meta=meta,
mediainfo=mediainfo,
@@ -242,7 +304,7 @@ class TestMediaScrapingTVDirectory(unittest.TestCase):
overwrite=False,
season_number=0
)
self.media_chain._scrape_images_generic.assert_called_with(
self.media_chain._scrape_images_generic.assert_called_once_with(
current_fileitem=fileitem,
mediainfo=mediainfo,
item_type=ScrapingTarget.SEASON,
@@ -251,15 +313,25 @@ class TestMediaScrapingTVDirectory(unittest.TestCase):
season_number=0
)
def test_initialize_tv_directory_season(self):
@patch("app.chain.media.settings")
def test_initialize_tv_directory_season(self, mock_settings):
mock_settings.RENAME_FORMAT_S0_NAMES = ["Specials", "SPs"]
fileitem = schemas.FileItem(path="/tv/Show/Season 1", name="Season 1", type="dir", storage="local")
meta = MetaInfo("Show")
mediainfo = MediaInfo(type=MediaType.TV)
self.media_chain.storagechain.list_files.return_value = []
filepath = Path(fileitem.path)
self.media_chain._handle_tv_scraping(fileitem, meta, mediainfo, init_folder=True, parent=None, overwrite=False, recursive=True)
self.media_chain._initialize_tv_directory_metadata(
fileitem=fileitem,
filepath=filepath,
meta=meta,
mediainfo=mediainfo,
parent=None,
overwrite=False,
)
self.media_chain._scrape_nfo_generic.assert_called_with(
self.media_chain._scrape_nfo_generic.assert_called_once_with(
current_fileitem=fileitem,
meta=meta,
mediainfo=mediainfo,
@@ -272,18 +344,17 @@ class TestMediaScrapingTVDirectory(unittest.TestCase):
class TestMediaScrapeEvents(unittest.TestCase):
def setUp(self):
self.media_chain = MediaChain()
self.media_chain.storagechain = MagicMock()
@patch("app.chain.media.MediaChain.scrape_metadata")
@patch("app.chain.media.StorageChain.get_item")
@patch("app.chain.media.StorageChain.get_parent_item")
def test_scrape_metadata_event_file(
self, mock_get_parent, mock_get_item, mock_scrape_metadata
self, mock_scrape_metadata
):
fileitem = schemas.FileItem(path="/movies/movie.mkv", name="movie.mkv", type="file", storage="local")
parent_item = schemas.FileItem(path="/movies", name="movies", type="dir", storage="local")
mock_get_item.return_value = fileitem
mock_get_parent.return_value = parent_item
self.media_chain.storagechain.get_item.return_value = fileitem
self.media_chain.storagechain.get_parent_item.return_value = parent_item
mediainfo = MediaInfo()
event = Event(
@@ -306,15 +377,13 @@ class TestMediaScrapeEvents(unittest.TestCase):
)
@patch("app.chain.media.MediaChain.scrape_metadata")
@patch("app.chain.media.StorageChain.get_item")
@patch("app.chain.media.StorageChain.is_bluray_folder")
def test_scrape_metadata_event_dir_bluray(
self, mock_is_bluray, mock_get_item, mock_scrape_metadata
self, mock_scrape_metadata
):
fileitem = schemas.FileItem(path="/movies/bluray_movie", name="bluray_movie", type="dir", storage="local")
mock_get_item.return_value = fileitem
mock_is_bluray.return_value = True
self.media_chain.storagechain.get_item.return_value = fileitem
self.media_chain.storagechain.is_bluray_folder.return_value = True
mediainfo = MediaInfo()
event = Event(
@@ -338,22 +407,19 @@ class TestMediaScrapeEvents(unittest.TestCase):
)
@patch("app.chain.media.MediaChain.scrape_metadata")
@patch("app.chain.media.StorageChain.get_item")
@patch("app.chain.media.StorageChain.is_bluray_folder")
@patch("app.chain.media.StorageChain.get_file_item")
def test_scrape_metadata_event_dir_with_filelist(
self, mock_get_file_item, mock_is_bluray, mock_get_item, mock_scrape_metadata
self, mock_scrape_metadata
):
fileitem = schemas.FileItem(path="/tv/show", name="show", type="dir", storage="local")
mock_get_item.return_value = fileitem
mock_is_bluray.return_value = False
self.media_chain.storagechain.get_item.return_value = fileitem
self.media_chain.storagechain.is_bluray_folder.return_value = False
def side_effect_get_file_item(storage, path):
path_str = str(path)
return schemas.FileItem(path=path_str, name=Path(path_str).name, type="dir" if "." not in path_str else "file", storage="local")
mock_get_file_item.side_effect = side_effect_get_file_item
self.media_chain.storagechain.get_file_item.side_effect = side_effect_get_file_item
mediainfo = MediaInfo()
event = Event(
@@ -377,13 +443,12 @@ class TestMediaScrapeEvents(unittest.TestCase):
self.assertIn("/tv/show/Season 1/S01E01.mp4", paths)
@patch("app.chain.media.MediaChain.scrape_metadata")
@patch("app.chain.media.StorageChain.get_item")
def test_scrape_metadata_event_dir_full(
self, mock_get_item, mock_scrape_metadata
self, mock_scrape_metadata
):
fileitem = schemas.FileItem(path="/movies/movie", name="movie", type="dir", storage="local")
mock_get_item.return_value = fileitem
self.media_chain.storagechain.get_item.return_value = fileitem
mediainfo = MediaInfo()
meta = MetaInfo("movie")
@@ -501,22 +566,19 @@ class TestMediaScrapeEvents(unittest.TestCase):
mock_handle_tv.assert_not_called()
@patch("app.chain.media.MediaChain.scrape_metadata")
@patch("app.chain.media.StorageChain.get_item")
@patch("app.chain.media.StorageChain.is_bluray_folder")
@patch("app.chain.media.StorageChain.get_file_item")
def test_scrape_metadata_event_dir_with_multiple_files(
self, mock_get_file_item, mock_is_bluray, mock_get_item, mock_scrape_metadata
self, mock_scrape_metadata
):
fileitem = schemas.FileItem(path="/movies/collection", name="collection", type="dir", storage="local")
mock_get_item.return_value = fileitem
mock_is_bluray.return_value = False
self.media_chain.storagechain.get_item.return_value = fileitem
self.media_chain.storagechain.is_bluray_folder.return_value = False
def side_effect_get_file_item(storage, path):
path_str = str(path)
return schemas.FileItem(path=path_str, name=Path(path_str).name, type="dir" if "." not in path_str else "file", storage="local")
mock_get_file_item.side_effect = side_effect_get_file_item
self.media_chain.storagechain.get_file_item.side_effect = side_effect_get_file_item
mediainfo = MediaInfo()
event = Event(
@@ -546,22 +608,19 @@ class TestMediaScrapeEvents(unittest.TestCase):
self.assertIn("/movies/collection/movie3.avi", paths)
@patch("app.chain.media.MediaChain.scrape_metadata")
@patch("app.chain.media.StorageChain.get_item")
@patch("app.chain.media.StorageChain.is_bluray_folder")
@patch("app.chain.media.StorageChain.get_file_item")
def test_scrape_metadata_event_dir_with_tv_multi_seasons_episodes(
self, mock_get_file_item, mock_is_bluray, mock_get_item, mock_scrape_metadata
self, mock_scrape_metadata
):
fileitem = schemas.FileItem(path="/tv/MultiSeasonShow", name="MultiSeasonShow", type="dir", storage="local")
mock_get_item.return_value = fileitem
mock_is_bluray.return_value = False
self.media_chain.storagechain.get_item.return_value = fileitem
self.media_chain.storagechain.is_bluray_folder.return_value = False
def side_effect_get_file_item(storage, path):
path_str = str(path)
return schemas.FileItem(path=path_str, name=Path(path_str).name, type="dir" if "." not in path_str else "file", storage="local")
mock_get_file_item.side_effect = side_effect_get_file_item
self.media_chain.storagechain.get_file_item.side_effect = side_effect_get_file_item
mediainfo = MediaInfo()
event = Event(

View File

@@ -0,0 +1,175 @@
from types import SimpleNamespace
from unittest import TestCase
from app.chain.subscribe import SubscribeChain
from app.core.metainfo import MetaInfo
class SubscribeChainTest(TestCase):
def test_is_episode_range_covered(self):
cases = [
{
"title": "Cherry Season S01 2014 2160p 60fps WEB-DL H265 AAC-XXX",
"subtitle": "",
"subscribe": {"start_episode": None, "total_episode": 51},
"expected": True,
},
{
"title": "【爪爪字幕组】★7月新番[欢迎来到实力至上主义的教室 第二季/Youkoso Jitsuryoku Shijou Shugi no Kyoushitsu e S2][11][1080p][HEVC][GB][MP4][招募翻译校对]",
"subtitle": "",
"subscribe": {"start_episode": None, "total_episode": 13},
"expected": False,
},
{
"title": "[秋叶原冥途战争][Akiba Maid Sensou][2022][WEB-DL][1080][TV Series][第01话][LeagueWEB]",
"subtitle": "",
"subscribe": {"start_episode": None, "total_episode": 12},
"expected": False,
},
{
"title": "Qi Refining for 3000 Years S01E06 2022 1080p B-Blobal WEB-DL X264 AAC-AnimeS@AdWeb",
"subtitle": "",
"subscribe": {"start_episode": None, "total_episode": 16},
"expected": False,
},
{
"title": "The Heart of Genius S01 13-14 2022 1080p WEB-DL H264 AAC",
"subtitle": "",
"subscribe": {"start_episode": None, "total_episode": 34},
"expected": False,
},
{
"title": "[xyx98]传颂之物/Utawarerumono/うたわれるもの[BDrip][1920x1080][TV 01-26 Fin][hevc-yuv420p10 flac_ac3][ENG PGS]",
"subtitle": "",
"subscribe": {"start_episode": None, "total_episode": 26},
"expected": True,
},
{
"title": "I Woke Up a Vampire S02 2023 2160p NF WEB-DL DDP5.1 Atmos H 265-HHWEB",
"subtitle": "醒来变成吸血鬼 第二季 | 全8集 | 4K | 类型: 喜剧/家庭/奇幻 | 导演: TommyLynch | 主演: NikoCeci/ZebastinBorjeau/安娜·阿劳约/KaileenAngelicChang/KrisSiddiqi",
"subscribe": {"start_episode": None, "total_episode": 8},
"expected": True,
},
{
"title": "Shadows of the Void S01 2024 1080p WEB-DL H264 AAC-HHWEB",
"subtitle": "虚无边境 | 第01-02集 | 1080p | 类型: 动画 | 导演: 巴西 | 主演: 山新/周一菡/皇贞季/Kenz/李佳怡 [内嵌中字]",
"subscribe": {"start_episode": None, "total_episode": 13},
"expected": False,
},
{
"title": "Mai Xiang S01 2019 2160p WEB-DL H.265 DDP2.0-HHWEB",
"subtitle": "麦香 | 全36集 | 4K | 类型:剧情/爱情/家庭 | 主演:傅晶/章呈赫/王伟/沙景昌/何音",
"subscribe": {"start_episode": None, "total_episode": 36},
"expected": True,
},
{
"title": "Jigokuraku S01E14-E25 2023 1080p CR WEB-DL x264 AAC-Nest@ADWeb",
"subtitle": "地狱乐 / 地獄楽 / Hells Paradise [14-25Fin] [中日双语字幕]",
"subscribe": {"start_episode": 14, "total_episode": 25},
"expected": True,
},
{
"title": "Jigokuraku S01 2023 1080p BluRay Remux AVC FLAC 2.0-AnimeF@ADE",
"subtitle": "地狱乐/Hell's Paradise: Jigokuraku [01-13Fin] [中日双语字幕]",
"subscribe": {"start_episode": None, "total_episode": 13},
"expected": True,
},
{
"title": "Jigokuraku S02E12 2026 1080p NF WEB-DL x264 AAC-ADWeb",
"subtitle": "地狱乐 第二季 地獄楽 第二期 第12集 | 类型: 动画",
"subscribe": {"start_episode": None, "total_episode": 12},
"expected": False,
},
{
"title": "Jigokuraku S02E05-E07 2026 1080p NF WEB-DL x264 AAC-ADWeb",
"subtitle": "地狱乐 第二季 地獄楽 第二期 第05-07集 | 类型: 动画",
"subscribe": {"start_episode": None, "total_episode": 12},
"expected": False,
},
{
"title": "Bungo Stray Dogs S01 2016 1080p KKTV WEB-DL x264 AAC-ADWeb",
"subtitle": "文豪野犬 文豪ストレイドッグス 又名: 文豪Stray Dogs 第一季 全12集 | 类型: 剧情 / 动作 / 动画 主演: 上村祐翔 / 宫野真守 / 细谷佳正 *内嵌繁体字幕*",
"subscribe": {"start_episode": None, "total_episode": 12},
"expected": True,
},
{
"title": "Bungou Stray Dogs S1+S2+S3+OAD 1080p BDRip HEVC FLAC-Snow-Raws",
"subtitle": "文豪野犬 第1-3季",
"subscribe": {"start_episode": None, "total_episode": 36},
"expected": True,
},
{
"title": "Bungou Stray Dogs S1+S2+S3+OAD 1080p BDRip HEVC FLAC-Snow-Raws",
"subtitle": "文豪野犬 第1-3季",
"subscribe": {"start_episode": None, "total_episode": 60},
"expected": True, # 识别不到集数全匹配
},
{
"title": "Fu Gui S01 2005 2160p WEB-DL H265 AAC-HHWEB",
"subtitle": "福贵 | 全33集 | 4K | 类型: 剧情/家庭 | 导演: 朱正/袁进 | 主演: 陈创/刘敏涛/李丁/张鹰/温玉娟",
"subscribe": {"start_episode": None, "total_episode": 33},
"expected": True,
},
{
"title": "The Story of Ming Lan S01 2018 2160p WEB-DL CHDWEB",
"subtitle": "知否知否应是绿肥红瘦 全78集 | 2160p | 国语/中字 | 60帧高码TV版 | 类型:剧情/爱情/古装 | 主演:赵丽颖/冯绍峰/朱一龙/施诗/张佳宁",
"subscribe": {"start_episode": None, "total_episode": 78},
"expected": True,
},
{
"title": "Love Beyond the Grave S01 2026 2160p WEB-DL H265 AAC-HHWEB",
"subtitle": "白日提灯 / 慕胥辞 | 第18集 | 4K | 类型: 剧情 | 导演: 秦榛 | 主演: 迪丽热巴/陈飞宇/魏哲鸣/张俪/高鹤元",
"subscribe": {"start_episode": None, "total_episode": 40},
"expected": False,
},
{
"title": "The Long Ballad S01 2021 2160p WEB-DL H265 AAC-HHWEB",
"subtitle": "长歌行 | 全49集 | 4K | 类型: 剧情/爱情/古装 | 主演: 迪丽热巴/吴磊/刘宇宁/赵露思/方逸伦",
"subscribe": {"start_episode": None, "total_episode": 49},
"expected": True,
},
{
"title": "The Long Ballad S01E01-E04 2021 2160p WEB-DL H265 AAC-HHWEB",
"subtitle": "长歌行 | 第01-04集 | 4K | 类型: 剧情/爱情/古装 | 主演: 迪丽热巴/吴磊/刘宇宁/赵露思/方逸伦",
"subscribe": {"start_episode": None, "total_episode": 49},
"expected": False,
},
{
"title": "Spy x Family S02 2023 1080p Baha WEB-DL x264 AAC-ADWeb",
"subtitle": "间谍过家家 第二季 / SPY×FAMILY Season 2 [01-12Fin] [简繁内封字幕]",
"subscribe": {"start_episode": None, "total_episode": 12},
"expected": True,
},
{
"title": "Spy x Family S02E03-E07 2023 1080p Baha WEB-DL x264 AAC-ADWeb",
"subtitle": "间谍过家家 第二季 / SPY×FAMILY Season 2 第03-07集 [简繁内封字幕]",
"subscribe": {"start_episode": None, "total_episode": 12},
"expected": False,
},
{
"title": "Naruto Shippuden S01-S21 Complete 1080p BluRay x264 AAC-ADWeb",
"subtitle": "火影忍者 疾风传 全500集 [1080p][简中字幕]",
"subscribe": {"start_episode": None, "total_episode": 500},
"expected": True,
},
{
"title": "Naruto Shippuden S01-S21 Complete 1080p BluRay x264 AAC-ADWeb",
"subtitle": "火影忍者 疾风传 第01-500集 [1080p][简中字幕]",
"subscribe": {"start_episode": 201, "total_episode": 500},
"expected": True,
},
]
for case in cases:
meta = MetaInfo(
title=case["title"], subtitle=case["subtitle"], custom_words=["#"]
)
subscribe = SimpleNamespace(**case["subscribe"])
self.assertEqual(
SubscribeChain._is_episode_range_covered(
meta=meta,
subscribe=subscribe,
),
case["expected"],
)

View File

@@ -1,2 +1,2 @@
APP_VERSION = 'v2.9.25'
FRONTEND_VERSION = 'v2.9.25'
APP_VERSION = 'v2.9.26'
FRONTEND_VERSION = 'v2.9.26'