feat: add agent tools for querying and managing filter rules and rule groups

- Add tools for querying built-in and custom filter rules, and for adding, updating, and deleting custom rules and rule groups
- Refactor filter module to use shared builtin rule definitions
- Enhance rule group querying to include syntax guidance and usage references
- Add unittests for agent filter rule tools registration and parsing logic
This commit is contained in:
jxxghp
2026-04-30 12:56:38 +08:00
parent abda9d3212
commit 28a2386f2f
15 changed files with 1776 additions and 166 deletions

View File

@@ -355,7 +355,7 @@ class ActivityLogMiddleware(AgentMiddleware[ActivityLogState, ContextT, Response
def modify_request(self, request: ModelRequest[ContextT]) -> ModelRequest[ContextT]:
"""将活动日志注入系统消息。"""
contents = request.state.get("activity_log_contents", {})
contents = request.state.get("activity_log_contents", {}) # noqa
activity_log_prompt = self._format_activity_log(contents)
new_system_message = append_to_system_message(

View File

@@ -16,6 +16,14 @@ from app.agent.tools.impl.test_site import TestSiteTool
from app.agent.tools.impl.query_subscribes import QuerySubscribesTool
from app.agent.tools.impl.query_subscribe_shares import QuerySubscribeSharesTool
from app.agent.tools.impl.query_rule_groups import QueryRuleGroupsTool
from app.agent.tools.impl.query_builtin_filter_rules import QueryBuiltinFilterRulesTool
from app.agent.tools.impl.query_custom_filter_rules import QueryCustomFilterRulesTool
from app.agent.tools.impl.add_custom_filter_rule import AddCustomFilterRuleTool
from app.agent.tools.impl.update_custom_filter_rule import UpdateCustomFilterRuleTool
from app.agent.tools.impl.delete_custom_filter_rule import DeleteCustomFilterRuleTool
from app.agent.tools.impl.add_rule_group import AddRuleGroupTool
from app.agent.tools.impl.update_rule_group import UpdateRuleGroupTool
from app.agent.tools.impl.delete_rule_group import DeleteRuleGroupTool
from app.agent.tools.impl.query_popular_subscribes import QueryPopularSubscribesTool
from app.agent.tools.impl.query_subscribe_history import QuerySubscribeHistoryTool
from app.agent.tools.impl.delete_subscribe import DeleteSubscribeTool
@@ -124,7 +132,15 @@ class MoviePilotToolFactory:
QuerySubscribesTool,
QuerySubscribeSharesTool,
QueryPopularSubscribesTool,
QueryBuiltinFilterRulesTool,
QueryCustomFilterRulesTool,
QueryRuleGroupsTool,
AddCustomFilterRuleTool,
UpdateCustomFilterRuleTool,
DeleteCustomFilterRuleTool,
AddRuleGroupTool,
UpdateRuleGroupTool,
DeleteRuleGroupTool,
QuerySubscribeHistoryTool,
DeleteSubscribeTool,
QueryDownloadTasksTool,

View File

@@ -0,0 +1,540 @@
"""过滤规则 Agent 工具共用的校验、查询和引用处理逻辑。"""
import copy
import re
from typing import Any, Dict, Iterable, Optional
from app.core.event import eventmanager
from app.db import AsyncSessionFactory
from app.db.models.subscribe import Subscribe
from app.db.systemconfig_oper import SystemConfigOper
from app.helper.rule import RuleHelper
from app.modules.filter.RuleParser import RuleParser
from app.modules.filter.builtin_rules import BUILTIN_RULE_SET
from app.schemas import CustomRule, FilterRuleGroup
from app.schemas.event import ConfigChangeEventData
from app.schemas.types import EventType, SystemConfigKey
RULE_ID_PATTERN = re.compile(r"^[A-Za-z0-9]+$")
RULE_TOKEN_PATTERN = re.compile(r"[A-Za-z][A-Za-z0-9]*|[0-9][A-Za-z0-9]+")
NUMERIC_RANGE_PATTERN = re.compile(
r"^\d+(?:\.\d+)?(?:\s*-\s*\d+(?:\.\d+)?)?$"
)
MEDIA_TYPE_ALIASES = {
"movie": "电影",
"film": "电影",
"tv": "电视剧",
"series": "电视剧",
"show": "电视剧",
"电影": "电影",
"电视剧": "电视剧",
}
RULE_STRING_SYNTAX = {
"level_separator": ">",
"and_operator": "&",
"not_operator": "!",
"supported_grouping": "Parentheses are supported inside a single level.",
"spacing_note": "Prefer spaces around '&', and '>' for readability; use '!RULE' for negation.",
"match_order": "Levels are evaluated from left to right. The first matched level wins and stops further matching.",
"match_result": "If no level matches, the torrent is filtered out. If a level matches, the torrent is kept.",
"writing_workflow": [
"First query built-in rules and custom rules to learn valid rule IDs.",
"Compose one priority level with '&', '!' and optional parentheses.",
"Join multiple priority levels with '>' from highest priority to lowest priority.",
"Use spaces around '&', and '>' for readability.",
],
"examples": [
{
"description": "Prefer torrents with special subtitles and Chinese dubbing at 4K, otherwise fall back to Chinese subtitles and Chinese dubbing at 4K.",
"rule_string": "SPECSUB & CNVOI & 4K & !BLU & !REMUX & !WEBDL > CNSUB & CNVOI & 4K & !BLU & !REMUX & !WEBDL",
},
{
"description": "Inside one level, require 4K and reject Blu-ray source.",
"rule_string": "4K & !BLU",
},
{
"description": "Inside one level, accept either special subtitles or Chinese subtitles, then also require 1080P.",
"rule_string": "(SPECSUB | CNSUB) & 1080P",
},
],
}
def normalize_optional_text(value: Optional[str]) -> Optional[str]:
"""把空白字符串折叠为 None避免保存无意义的空值。"""
if value is None:
return None
value = str(value).strip()
return value or None
def normalize_media_type(value: Optional[str]) -> Optional[str]:
"""兼容英中文媒体类型输入,最终统一为后端实际使用的中文值。"""
value = normalize_optional_text(value)
if not value:
return None
normalized = MEDIA_TYPE_ALIASES.get(value.lower(), value)
if normalized not in {"电影", "电视剧"}:
raise ValueError(
"media_type 仅支持 '电影''电视剧''movie''tv'"
)
return normalized
def validate_numeric_range(
field_name: str, value: Optional[str]
) -> Optional[str]:
"""校验 size_range / publish_time 这类单值或区间值。"""
value = normalize_optional_text(value)
if not value:
return None
if not NUMERIC_RANGE_PATTERN.match(value):
raise ValueError(
f"{field_name} 格式无效,支持 '1000''1000-5000' 这类数字区间格式"
)
parts = [float(item.strip()) for item in value.split("-")]
if len(parts) == 2 and parts[0] > parts[1]:
raise ValueError(f"{field_name} 区间起始值不能大于结束值")
return value
def validate_seeders(value: Optional[str]) -> Optional[str]:
"""做种人数最终会被 int() 解析,这里提前拦住非法值。"""
value = normalize_optional_text(value)
if not value:
return None
if not value.isdigit():
raise ValueError("seeders 必须是非负整数")
return value
def get_builtin_rules() -> Dict[str, dict]:
"""返回内置规则的深拷贝,避免调用方误改共享常量。"""
return copy.deepcopy(BUILTIN_RULE_SET)
def get_custom_rules() -> list[CustomRule]:
return RuleHelper().get_custom_rules()
def get_rule_groups() -> list[FilterRuleGroup]:
return RuleHelper().get_rule_groups()
def build_custom_rule_map(rules: Optional[Iterable[CustomRule]] = None) -> Dict[str, CustomRule]:
return {
rule.id: rule
for rule in (rules or get_custom_rules())
if rule.id
}
def build_rule_group_map(
groups: Optional[Iterable[FilterRuleGroup]] = None,
) -> Dict[str, FilterRuleGroup]:
return {
group.name: group
for group in (groups or get_rule_groups())
if group.name
}
def extract_rule_tokens(rule_string: Optional[str]) -> list[str]:
"""从规则串里提取规则 ID用于引用分析和未知规则校验。"""
if not rule_string:
return []
# dict.fromkeys 用来在保留顺序的同时去重,便于展示和报错。
return list(dict.fromkeys(RULE_TOKEN_PATTERN.findall(rule_string)))
def parse_rule_string(rule_string: str) -> dict:
"""使用后端同款 RuleParser 解析规则串,并拆出每一层的元数据。"""
normalized = normalize_optional_text(rule_string)
if not normalized:
raise ValueError("rule_string 不能为空")
parser = RuleParser()
levels = [level.strip() for level in normalized.split(">")]
if any(not level for level in levels):
raise ValueError("rule_string 不能包含空层级,请检查 '>' 两侧内容")
parsed_levels = []
for index, level in enumerate(levels, start=1):
try:
parser.parse(level)
except Exception as exc: # pragma: no cover - 依赖 pyparsing 的具体异常
raise ValueError(f"规则串第 {index} 层语法错误: {exc}") from exc
parsed_levels.append(
{
"priority": index,
"expression": level,
"referenced_rules": extract_rule_tokens(level),
}
)
return {
"rule_string": " > ".join(levels),
"levels": parsed_levels,
"referenced_rules": extract_rule_tokens(normalized),
}
def validate_rule_string(rule_string: str, available_rule_ids: Iterable[str]) -> dict:
"""校验规则串语法和引用规则是否都存在。"""
parsed = parse_rule_string(rule_string)
available_ids = set(available_rule_ids)
unknown_rules = sorted(
{
rule_id
for rule_id in parsed["referenced_rules"]
if rule_id not in available_ids
}
)
if unknown_rules:
raise ValueError(
f"rule_string 引用了不存在的规则: {', '.join(unknown_rules)}"
)
return parsed
def serialize_builtin_rule(rule_id: str, payload: dict) -> dict:
"""把内置规则整理成适合 Agent 阅读的结构。"""
data = copy.deepcopy(payload)
data["id"] = rule_id
data["source"] = "builtin"
return data
def serialize_custom_rule(rule: CustomRule, group_refs: Optional[list[str]] = None) -> dict:
data = rule.model_dump(exclude_none=True)
data["source"] = "custom"
data["referenced_by_rule_groups"] = group_refs or []
return data
def serialize_rule_group(group: FilterRuleGroup, usage: Optional[dict] = None) -> dict:
"""查询时尽量附带解析结果,便于 Agent 理解优先级层级。"""
data = group.model_dump(exclude_none=True)
if group.rule_string:
try:
parsed = parse_rule_string(group.rule_string)
data["levels"] = parsed["levels"]
data["referenced_rules"] = parsed["referenced_rules"]
data["syntax_valid"] = True
except ValueError as exc:
data["syntax_valid"] = False
data["syntax_error"] = str(exc)
data["referenced_rules"] = extract_rule_tokens(group.rule_string)
else:
data["syntax_valid"] = False
data["syntax_error"] = "rule_string 为空"
data["referenced_rules"] = []
data["usage"] = usage or default_rule_group_usage()
return data
def default_rule_group_usage() -> dict:
return {
"used_in_global_search": False,
"used_in_global_subscribe": False,
"used_in_global_best_version": False,
"subscribes": [],
}
async def collect_rule_group_usages(
group_names: Optional[Iterable[str]] = None,
) -> Dict[str, dict]:
"""收集规则组在全局配置和订阅上的引用情况。"""
target_names = set(group_names or [])
search_groups = set(
SystemConfigOper().get(SystemConfigKey.SearchFilterRuleGroups) or []
)
subscribe_groups = set(
SystemConfigOper().get(SystemConfigKey.SubscribeFilterRuleGroups) or []
)
best_version_groups = set(
SystemConfigOper().get(SystemConfigKey.BestVersionFilterRuleGroups) or []
)
usage_map = {
name: default_rule_group_usage()
for name in target_names
}
def ensure_usage(name: str) -> dict:
if name not in usage_map:
usage_map[name] = default_rule_group_usage()
return usage_map[name]
for name in search_groups:
if target_names and name not in target_names:
continue
ensure_usage(name)["used_in_global_search"] = True
for name in subscribe_groups:
if target_names and name not in target_names:
continue
ensure_usage(name)["used_in_global_subscribe"] = True
for name in best_version_groups:
if target_names and name not in target_names:
continue
ensure_usage(name)["used_in_global_best_version"] = True
async with AsyncSessionFactory() as db:
subscribes = await Subscribe.async_list(db)
for subscribe in subscribes:
filter_groups = subscribe.filter_groups or []
for name in filter_groups:
if target_names and name not in target_names:
continue
ensure_usage(name)["subscribes"].append(
{
"subscribe_id": subscribe.id,
"name": subscribe.name,
"season": subscribe.season,
"type": subscribe.type,
"username": subscribe.username,
"best_version": bool(subscribe.best_version),
}
)
return usage_map
def collect_custom_rule_group_refs(
rule_groups: Iterable[FilterRuleGroup],
rule_ids: Optional[Iterable[str]] = None,
) -> Dict[str, list[str]]:
"""收集自定义规则被哪些规则组引用。"""
target_rule_ids = set(rule_ids or [])
refs: Dict[str, list[str]] = {
rule_id: []
for rule_id in target_rule_ids
}
for group in rule_groups:
if not group.name or not group.rule_string:
continue
referenced = set(extract_rule_tokens(group.rule_string))
for rule_id in referenced:
if target_rule_ids and rule_id not in target_rule_ids:
continue
refs.setdefault(rule_id, []).append(group.name)
for names in refs.values():
names.sort()
return refs
def normalize_custom_rule(
rule_id: str,
name: str,
include: Optional[str],
exclude: Optional[str],
size_range: Optional[str],
seeders: Optional[str],
publish_time: Optional[str],
existing_rules: Iterable[CustomRule],
original_rule_id: Optional[str] = None,
) -> CustomRule:
"""新增/更新自定义规则时统一走这里,避免多处散落校验逻辑。"""
normalized_rule_id = normalize_optional_text(rule_id)
normalized_name = normalize_optional_text(name)
if not normalized_rule_id:
raise ValueError("rule_id 不能为空")
if not normalized_name:
raise ValueError("name 不能为空")
if not RULE_ID_PATTERN.match(normalized_rule_id):
raise ValueError("rule_id 仅支持英文字母和数字")
if (
normalized_rule_id in BUILTIN_RULE_SET
and normalized_rule_id != original_rule_id
):
raise ValueError(
f"rule_id '{normalized_rule_id}' 与内置规则冲突,不能覆盖内置规则"
)
for existing_rule in existing_rules:
if (
existing_rule.id == normalized_rule_id
and existing_rule.id != original_rule_id
):
raise ValueError(f"rule_id '{normalized_rule_id}' 已存在")
if (
existing_rule.name == normalized_name
and existing_rule.id != original_rule_id
):
raise ValueError(f"规则名称 '{normalized_name}' 已存在")
return CustomRule(
id=normalized_rule_id,
name=normalized_name,
include=normalize_optional_text(include),
exclude=normalize_optional_text(exclude),
size_range=validate_numeric_range("size_range", size_range),
seeders=validate_seeders(seeders),
publish_time=validate_numeric_range("publish_time", publish_time),
)
def normalize_rule_group(
name: str,
rule_string: str,
media_type: Optional[str],
category: Optional[str],
existing_groups: Iterable[FilterRuleGroup],
available_rule_ids: Iterable[str],
original_name: Optional[str] = None,
) -> tuple[FilterRuleGroup, dict]:
"""新增/更新规则组时统一校验名字、适用范围和规则串。"""
normalized_name = normalize_optional_text(name)
if not normalized_name:
raise ValueError("规则组名称不能为空")
for group in existing_groups:
if group.name == normalized_name and group.name != original_name:
raise ValueError(f"规则组名称 '{normalized_name}' 已存在")
normalized_media_type = normalize_media_type(media_type)
normalized_category = normalize_optional_text(category)
if normalized_category and not normalized_media_type:
raise ValueError("设置 category 时必须同时设置 media_type")
parsed = validate_rule_string(rule_string, available_rule_ids)
return (
FilterRuleGroup(
name=normalized_name,
rule_string=parsed["rule_string"],
media_type=normalized_media_type,
category=normalized_category,
),
parsed,
)
async def save_system_config(
key: SystemConfigKey, value: Any
) -> Optional[bool]:
"""通过统一入口保存配置并补发 ConfigChanged 事件。"""
normalized_value = value
if isinstance(normalized_value, list):
normalized_value = [
item
for item in normalized_value
if item is not None and item != ""
]
normalized_value = normalized_value or None
success = await SystemConfigOper().async_set(key, normalized_value)
if success:
await eventmanager.async_send_event(
etype=EventType.ConfigChanged,
data=ConfigChangeEventData(
key=key,
value=normalized_value,
change_type="update",
),
)
return success
def replace_rule_id_in_rule_string(
rule_string: str, old_rule_id: str, new_rule_id: str
) -> str:
"""只替换完整 token避免误伤其他规则名。"""
pattern = re.compile(
rf"(?<![A-Za-z0-9]){re.escape(old_rule_id)}(?![A-Za-z0-9])"
)
return pattern.sub(new_rule_id, rule_string)
def replace_group_name_in_list(
values: Optional[Iterable[str]], old_name: str, new_name: str
) -> list[str]:
"""更新配置里的规则组名引用,并顺手去重。"""
result = []
for value in values or []:
mapped = new_name if value == old_name else value
if mapped not in result:
result.append(mapped)
return result
async def rename_rule_group_references(old_name: str, new_name: str) -> dict:
"""规则组改名后,联动更新全局设置和订阅引用。"""
changed = {
"global_settings": {},
"subscribes": [],
}
for config_key in (
SystemConfigKey.SearchFilterRuleGroups,
SystemConfigKey.SubscribeFilterRuleGroups,
SystemConfigKey.BestVersionFilterRuleGroups,
):
original = SystemConfigOper().get(config_key) or []
updated = replace_group_name_in_list(original, old_name, new_name)
if updated != original:
await save_system_config(config_key, updated)
changed["global_settings"][config_key.value] = updated
async with AsyncSessionFactory() as db:
subscribes = await Subscribe.async_list(db)
for subscribe in subscribes:
original = subscribe.filter_groups or []
updated = replace_group_name_in_list(original, old_name, new_name)
if updated == original:
continue
await subscribe.async_update(db, {"filter_groups": updated})
changed["subscribes"].append(
{
"subscribe_id": subscribe.id,
"name": subscribe.name,
"season": subscribe.season,
"filter_groups": updated,
}
)
return changed
async def remove_rule_group_references(group_name: str) -> dict:
"""删除规则组后,清理全局设置和订阅里的悬空引用。"""
changed = {
"global_settings": {},
"subscribes": [],
}
for config_key in (
SystemConfigKey.SearchFilterRuleGroups,
SystemConfigKey.SubscribeFilterRuleGroups,
SystemConfigKey.BestVersionFilterRuleGroups,
):
original = SystemConfigOper().get(config_key) or []
updated = [value for value in original if value != group_name]
if updated != original:
await save_system_config(config_key, updated)
changed["global_settings"][config_key.value] = updated
async with AsyncSessionFactory() as db:
subscribes = await Subscribe.async_list(db)
for subscribe in subscribes:
original = subscribe.filter_groups or []
updated = [value for value in original if value != group_name]
if updated == original:
continue
await subscribe.async_update(db, {"filter_groups": updated})
changed["subscribes"].append(
{
"subscribe_id": subscribe.id,
"name": subscribe.name,
"season": subscribe.season,
"filter_groups": updated,
}
)
return changed

View File

@@ -0,0 +1,111 @@
"""新增自定义过滤规则工具。"""
import json
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.agent.tools.impl._filter_rule_utils import (
get_custom_rules,
normalize_custom_rule,
save_system_config,
serialize_custom_rule,
)
from app.log import logger
from app.schemas.types import SystemConfigKey
class AddCustomFilterRuleInput(BaseModel):
"""新增自定义过滤规则工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
rule_id: str = Field(
...,
description="Unique custom rule ID. Only letters and numbers are allowed.",
)
name: str = Field(..., description="Display name of the custom rule.")
include: Optional[str] = Field(
None, description="Optional include regex for the rule."
)
exclude: Optional[str] = Field(
None, description="Optional exclude regex for the rule."
)
size_range: Optional[str] = Field(
None, description="Optional size range in MB, for example '1000-5000'."
)
seeders: Optional[str] = Field(
None, description="Optional minimum seeder count as a non-negative integer."
)
publish_time: Optional[str] = Field(
None,
description="Optional publish-time filter in minutes, for example '60' or '60-1440'.",
)
class AddCustomFilterRuleTool(MoviePilotTool):
name: str = "add_custom_filter_rule"
description: str = (
"Add a custom filter rule to CustomFilterRules. "
"The new rule can then be referenced by rule ID inside filter rule groups."
)
args_schema: Type[BaseModel] = AddCustomFilterRuleInput
require_admin: bool = True
def get_tool_message(self, **kwargs) -> Optional[str]:
return f"新增自定义过滤规则 {kwargs.get('rule_id', '')}"
async def run(
self,
rule_id: str,
name: str,
include: Optional[str] = None,
exclude: Optional[str] = None,
size_range: Optional[str] = None,
seeders: Optional[str] = None,
publish_time: Optional[str] = None,
**kwargs,
) -> str:
logger.info(f"执行工具: {self.name}, rule_id={rule_id}")
try:
custom_rules = get_custom_rules()
new_rule = normalize_custom_rule(
rule_id=rule_id,
name=name,
include=include,
exclude=exclude,
size_range=size_range,
seeders=seeders,
publish_time=publish_time,
existing_rules=custom_rules,
)
custom_rules.append(new_rule)
await save_system_config(
SystemConfigKey.CustomFilterRules,
[rule.model_dump(exclude_none=True) for rule in custom_rules],
)
return json.dumps(
{
"success": True,
"message": f"已新增自定义过滤规则 {new_rule.id}",
"custom_rule": serialize_custom_rule(new_rule),
"count": len(custom_rules),
},
ensure_ascii=False,
indent=2,
)
except Exception as exc:
logger.error(f"新增自定义过滤规则失败: {exc}", exc_info=True)
return json.dumps(
{
"success": False,
"message": f"新增自定义过滤规则失败: {exc}",
},
ensure_ascii=False,
)

View File

@@ -0,0 +1,115 @@
"""新增过滤规则组工具。"""
import json
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.agent.tools.impl._filter_rule_utils import (
build_custom_rule_map,
collect_rule_group_usages,
get_builtin_rules,
get_custom_rules,
get_rule_groups,
normalize_rule_group,
save_system_config,
serialize_rule_group,
)
from app.log import logger
from app.schemas.types import SystemConfigKey
class AddRuleGroupInput(BaseModel):
"""新增过滤规则组工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
name: str = Field(..., description="New rule group name.")
rule_string: str = Field(
...,
description=(
"Rule expression using built-in/custom rule IDs. "
"Use '&', '!' inside one level, and use '>' between priority levels. "
"Example: 'SPECSUB & CNVOI & 4K & !BLU > CNSUB & CNVOI & 4K & !BLU'."
),
)
media_type: Optional[str] = Field(
None,
description="Optional media type scope: '电影', '电视剧', 'movie', or 'tv'.",
)
category: Optional[str] = Field(
None,
description="Optional media category. Only valid when media_type is set.",
)
class AddRuleGroupTool(MoviePilotTool):
name: str = "add_rule_group"
description: str = (
"Add a new filter rule group to UserFilterRuleGroups. "
"Rule groups are matched level by level from left to right and can be linked to search/subscription flows. "
"Before calling this tool, first use query_builtin_filter_rules and query_custom_filter_rules to confirm valid rule IDs, "
"and optionally use query_rule_groups to imitate existing rule_string patterns."
)
args_schema: Type[BaseModel] = AddRuleGroupInput
require_admin: bool = True
def get_tool_message(self, **kwargs) -> Optional[str]:
return f"新增规则组 {kwargs.get('name', '')}"
async def run(
self,
name: str,
rule_string: str,
media_type: Optional[str] = None,
category: Optional[str] = None,
**kwargs,
) -> str:
logger.info(f"执行工具: {self.name}, name={name}")
try:
custom_rules = get_custom_rules()
available_rule_ids = set(get_builtin_rules().keys()) | set(
build_custom_rule_map(custom_rules).keys()
)
rule_groups = get_rule_groups()
new_group, _ = normalize_rule_group(
name=name,
rule_string=rule_string,
media_type=media_type,
category=category,
existing_groups=rule_groups,
available_rule_ids=available_rule_ids,
)
rule_groups.append(new_group)
await save_system_config(
SystemConfigKey.UserFilterRuleGroups,
[group.model_dump(exclude_none=True) for group in rule_groups],
)
usage = await collect_rule_group_usages([new_group.name])
return json.dumps(
{
"success": True,
"message": f"已新增规则组 {new_group.name}",
"rule_group": serialize_rule_group(
new_group, usage.get(new_group.name)
),
"count": len(rule_groups),
},
ensure_ascii=False,
indent=2,
)
except Exception as exc:
logger.error(f"新增规则组失败: {exc}", exc_info=True)
return json.dumps(
{
"success": False,
"message": f"新增规则组失败: {exc}",
},
ensure_ascii=False,
)

View File

@@ -0,0 +1,97 @@
"""删除自定义过滤规则工具。"""
import json
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.agent.tools.impl._filter_rule_utils import (
collect_custom_rule_group_refs,
get_custom_rules,
get_rule_groups,
save_system_config,
)
from app.log import logger
from app.schemas.types import SystemConfigKey
class DeleteCustomFilterRuleInput(BaseModel):
"""删除自定义过滤规则工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
rule_id: str = Field(..., description="Custom rule ID to delete.")
class DeleteCustomFilterRuleTool(MoviePilotTool):
name: str = "delete_custom_filter_rule"
description: str = (
"Delete a custom filter rule from CustomFilterRules. "
"If the rule is still referenced by rule groups, the deletion is blocked to avoid breaking rule_string expressions."
)
args_schema: Type[BaseModel] = DeleteCustomFilterRuleInput
require_admin: bool = True
def get_tool_message(self, **kwargs) -> Optional[str]:
return f"删除自定义过滤规则 {kwargs.get('rule_id', '')}"
async def run(self, rule_id: str, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, rule_id={rule_id}")
try:
custom_rules = get_custom_rules()
target_rule = next((rule for rule in custom_rules if rule.id == rule_id), None)
if not target_rule:
return json.dumps(
{
"success": False,
"message": f"自定义过滤规则 '{rule_id}' 不存在",
},
ensure_ascii=False,
)
refs = collect_custom_rule_group_refs(get_rule_groups(), [rule_id]).get(
rule_id, []
)
if refs:
return json.dumps(
{
"success": False,
"message": (
f"自定义过滤规则 '{rule_id}' 仍被规则组引用,无法删除。"
),
"referenced_by_rule_groups": refs,
},
ensure_ascii=False,
indent=2,
)
remaining_rules = [
rule for rule in custom_rules if rule.id != rule_id
]
await save_system_config(
SystemConfigKey.CustomFilterRules,
[rule.model_dump(exclude_none=True) for rule in remaining_rules],
)
return json.dumps(
{
"success": True,
"message": f"已删除自定义过滤规则 {rule_id}",
"count": len(remaining_rules),
},
ensure_ascii=False,
indent=2,
)
except Exception as exc:
logger.error(f"删除自定义过滤规则失败: {exc}", exc_info=True)
return json.dumps(
{
"success": False,
"message": f"删除自定义过滤规则失败: {exc}",
},
ensure_ascii=False,
)

View File

@@ -0,0 +1,81 @@
"""删除过滤规则组工具。"""
import json
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.agent.tools.impl._filter_rule_utils import (
get_rule_groups,
remove_rule_group_references,
save_system_config,
)
from app.log import logger
from app.schemas.types import SystemConfigKey
class DeleteRuleGroupInput(BaseModel):
"""删除过滤规则组工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
name: str = Field(..., description="Rule group name to delete.")
class DeleteRuleGroupTool(MoviePilotTool):
name: str = "delete_rule_group"
description: str = (
"Delete a filter rule group from UserFilterRuleGroups. "
"The tool also removes dangling references from global settings and subscriptions."
)
args_schema: Type[BaseModel] = DeleteRuleGroupInput
require_admin: bool = True
def get_tool_message(self, **kwargs) -> Optional[str]:
return f"删除规则组 {kwargs.get('name', '')}"
async def run(self, name: str, **kwargs) -> str:
logger.info(f"执行工具: {self.name}, name={name}")
try:
rule_groups = get_rule_groups()
if not any(group.name == name for group in rule_groups):
return json.dumps(
{
"success": False,
"message": f"规则组 '{name}' 不存在",
},
ensure_ascii=False,
)
remaining_groups = [
group for group in rule_groups if group.name != name
]
await save_system_config(
SystemConfigKey.UserFilterRuleGroups,
[group.model_dump(exclude_none=True) for group in remaining_groups],
)
reference_changes = await remove_rule_group_references(name)
return json.dumps(
{
"success": True,
"message": f"已删除规则组 {name}",
"count": len(remaining_groups),
"reference_updates": reference_changes,
},
ensure_ascii=False,
indent=2,
)
except Exception as exc:
logger.error(f"删除规则组失败: {exc}", exc_info=True)
return json.dumps(
{
"success": False,
"message": f"删除规则组失败: {exc}",
},
ensure_ascii=False,
)

View File

@@ -0,0 +1,85 @@
"""查询内置过滤规则工具。"""
import json
from typing import Optional, Type, List
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.agent.tools.impl._filter_rule_utils import (
get_builtin_rules,
serialize_builtin_rule,
RULE_STRING_SYNTAX,
)
from app.log import logger
class QueryBuiltinFilterRulesInput(BaseModel):
"""查询内置过滤规则工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
rule_ids: Optional[List[str]] = Field(
None,
description="Optional list of built-in rule IDs to query. If omitted, return all built-in rules.",
)
class QueryBuiltinFilterRulesTool(MoviePilotTool):
name: str = "query_builtin_filter_rules"
description: str = (
"Query built-in filter rules defined by the backend filter module. "
"These rule IDs can be used directly inside rule_string expressions for filter rule groups. "
"Use this tool before add_rule_group or update_rule_group to learn valid built-in rule IDs."
)
args_schema: Type[BaseModel] = QueryBuiltinFilterRulesInput
def get_tool_message(self, **kwargs) -> Optional[str]:
rule_ids = kwargs.get("rule_ids") or []
if rule_ids:
return f"查询内置过滤规则: {', '.join(rule_ids)}"
return "查询所有内置过滤规则"
async def run(
self,
rule_ids: Optional[List[str]] = None,
**kwargs,
) -> str:
logger.info(f"执行工具: {self.name}")
try:
builtin_rules = get_builtin_rules()
if rule_ids:
target_ids = set(rule_ids)
builtin_rules = {
rule_id: payload
for rule_id, payload in builtin_rules.items()
if rule_id in target_ids
}
serialized = [
serialize_builtin_rule(rule_id, payload)
for rule_id, payload in builtin_rules.items()
]
return json.dumps(
{
"success": True,
"count": len(serialized),
"rule_string_syntax": RULE_STRING_SYNTAX,
"rules": serialized,
},
ensure_ascii=False,
indent=2,
)
except Exception as exc:
logger.error(f"查询内置过滤规则失败: {exc}", exc_info=True)
return json.dumps(
{
"success": False,
"message": f"查询内置过滤规则失败: {exc}",
"rules": [],
},
ensure_ascii=False,
)

View File

@@ -0,0 +1,95 @@
"""查询自定义过滤规则工具。"""
import json
from typing import Optional, Type, List
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.agent.tools.impl._filter_rule_utils import (
collect_custom_rule_group_refs,
get_custom_rules,
get_rule_groups,
serialize_custom_rule,
)
from app.log import logger
class QueryCustomFilterRulesInput(BaseModel):
"""查询自定义过滤规则工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
rule_ids: Optional[List[str]] = Field(
None,
description="Optional list of custom rule IDs to query. If omitted, return all custom rules.",
)
include_group_refs: bool = Field(
True,
description="Whether to include which rule groups reference each custom rule.",
)
class QueryCustomFilterRulesTool(MoviePilotTool):
name: str = "query_custom_filter_rules"
description: str = (
"Query custom filter rules stored in CustomFilterRules. "
"Custom rules can be referenced from rule_string expressions in filter rule groups. "
"Use this tool before add_rule_group or update_rule_group to learn valid custom rule IDs."
)
args_schema: Type[BaseModel] = QueryCustomFilterRulesInput
def get_tool_message(self, **kwargs) -> Optional[str]:
rule_ids = kwargs.get("rule_ids") or []
if rule_ids:
return f"查询自定义过滤规则: {', '.join(rule_ids)}"
return "查询所有自定义过滤规则"
async def run(
self,
rule_ids: Optional[List[str]] = None,
include_group_refs: bool = True,
**kwargs,
) -> str:
logger.info(f"执行工具: {self.name}")
try:
custom_rules = get_custom_rules()
if rule_ids:
target_ids = set(rule_ids)
custom_rules = [
rule for rule in custom_rules if rule.id in target_ids
]
refs = {}
if include_group_refs:
refs = collect_custom_rule_group_refs(
get_rule_groups(),
[rule.id for rule in custom_rules if rule.id],
)
serialized = [
serialize_custom_rule(rule, refs.get(rule.id))
for rule in custom_rules
]
return json.dumps(
{
"success": True,
"count": len(serialized),
"rules": serialized,
},
ensure_ascii=False,
indent=2,
)
except Exception as exc:
logger.error(f"查询自定义过滤规则失败: {exc}", exc_info=True)
return json.dumps(
{
"success": False,
"message": f"查询自定义过滤规则失败: {exc}",
"rules": [],
},
ensure_ascii=False,
)

View File

@@ -1,63 +1,104 @@
"""查询规则组工具"""
"""查询过滤规则组工具"""
import json
from typing import Optional, Type
from typing import Optional, Type, List
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.helper.rule import RuleHelper
from app.agent.tools.impl._filter_rule_utils import (
collect_rule_group_usages,
get_rule_groups,
serialize_rule_group,
RULE_STRING_SYNTAX,
)
from app.log import logger
class QueryRuleGroupsInput(BaseModel):
"""查询规则组工具的输入参数模型"""
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
group_names: Optional[List[str]] = Field(
None,
description="Optional list of rule group names to query. If omitted, return all rule groups.",
)
include_usage: bool = Field(
True,
description="Whether to include where each rule group is referenced by global settings or subscriptions.",
)
class QueryRuleGroupsTool(MoviePilotTool):
name: str = "query_rule_groups"
description: str = "Query all filter rule groups available in the system. Rule groups are used to filter torrents when searching or subscribing. Returns rule group names, media types, and categories, but excludes rule_string to keep results concise."
description: str = (
"Query filter rule groups (过滤规则组 / 优先级规则组). "
"Each rule group contains a rule_string made of built-in rules and/or custom rules. "
"Inside one level use '&', '|', '!' and optional parentheses; use '>' between levels. "
"Levels are evaluated from left to right, and the first matched level wins. "
"The result includes parsed levels and syntax guidance so the agent can learn existing patterns before writing a new rule group."
)
args_schema: Type[BaseModel] = QueryRuleGroupsInput
def get_tool_message(self, **kwargs) -> Optional[str]:
"""根据查询参数生成友好的提示消息"""
group_names = kwargs.get("group_names") or []
if group_names:
return f"查询规则组: {', '.join(group_names)}"
return "查询所有规则组"
@staticmethod
def _load_rule_groups() -> dict:
"""从内存配置缓存中读取规则组。"""
rule_groups = RuleHelper().get_rule_groups()
if not rule_groups:
return {
"message": "未找到任何规则组",
"rule_groups": [],
}
simplified_groups = [
{
"name": group.name,
"media_type": group.media_type,
"category": group.category,
}
for group in rule_groups
]
return {
"message": f"找到 {len(simplified_groups)} 个规则组",
"rule_groups": simplified_groups,
}
async def run(self, **kwargs) -> str:
async def run(
self,
group_names: Optional[List[str]] = None,
include_usage: bool = True,
**kwargs,
) -> str:
logger.info(f"执行工具: {self.name}")
try:
result = self._load_rule_groups()
return json.dumps(result, ensure_ascii=False, indent=2)
except Exception as e:
error_message = f"查询规则组失败: {str(e)}"
logger.error(f"查询规则组失败: {e}", exc_info=True)
return json.dumps({
"success": False,
"message": error_message,
"rule_groups": []
}, ensure_ascii=False)
rule_groups = get_rule_groups()
if group_names:
target_names = set(group_names)
rule_groups = [
group for group in rule_groups if group.name in target_names
]
usage_map = {}
if include_usage:
usage_map = await collect_rule_group_usages(
[group.name for group in rule_groups if group.name]
)
serialized = [
serialize_rule_group(group, usage_map.get(group.name))
for group in rule_groups
]
message = (
f"找到 {len(serialized)} 个规则组"
if serialized
else "未找到任何规则组"
)
return json.dumps(
{
"success": True,
"message": message,
"count": len(serialized),
"rule_string_syntax": RULE_STRING_SYNTAX,
"rule_groups": serialized,
},
ensure_ascii=False,
indent=2,
)
except Exception as exc:
logger.error(f"查询规则组失败: {exc}", exc_info=True)
return json.dumps(
{
"success": False,
"message": f"查询规则组失败: {exc}",
"rule_groups": [],
},
ensure_ascii=False,
)

View File

@@ -0,0 +1,190 @@
"""更新自定义过滤规则工具。"""
import json
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.agent.tools.impl._filter_rule_utils import (
collect_custom_rule_group_refs,
get_custom_rules,
get_rule_groups,
normalize_custom_rule,
replace_rule_id_in_rule_string,
save_system_config,
serialize_custom_rule,
)
from app.log import logger
from app.schemas.types import SystemConfigKey
class UpdateCustomFilterRuleInput(BaseModel):
"""更新自定义过滤规则工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
current_rule_id: str = Field(
..., description="Existing custom rule ID to update."
)
new_rule_id: Optional[str] = Field(
None,
description="New rule ID. If omitted, keep the original rule ID.",
)
name: Optional[str] = Field(
None, description="New display name. If omitted, keep the original name."
)
include: Optional[str] = Field(
None,
description="New include regex. Pass an empty string to clear it.",
)
exclude: Optional[str] = Field(
None,
description="New exclude regex. Pass an empty string to clear it.",
)
size_range: Optional[str] = Field(
None,
description="New size range in MB. Pass an empty string to clear it.",
)
seeders: Optional[str] = Field(
None,
description="New minimum seeder count. Pass an empty string to clear it.",
)
publish_time: Optional[str] = Field(
None,
description="New publish-time filter in minutes. Pass an empty string to clear it.",
)
class UpdateCustomFilterRuleTool(MoviePilotTool):
name: str = "update_custom_filter_rule"
description: str = (
"Update an existing custom filter rule. "
"If the rule ID is renamed, all rule groups that reference the old ID are updated automatically."
)
args_schema: Type[BaseModel] = UpdateCustomFilterRuleInput
require_admin: bool = True
def get_tool_message(self, **kwargs) -> Optional[str]:
current_rule_id = kwargs.get("current_rule_id", "")
new_rule_id = kwargs.get("new_rule_id")
if new_rule_id and new_rule_id != current_rule_id:
return f"更新自定义过滤规则 {current_rule_id} -> {new_rule_id}"
return f"更新自定义过滤规则 {current_rule_id}"
async def run(
self,
current_rule_id: str,
new_rule_id: Optional[str] = None,
name: Optional[str] = None,
include: Optional[str] = None,
exclude: Optional[str] = None,
size_range: Optional[str] = None,
seeders: Optional[str] = None,
publish_time: Optional[str] = None,
**kwargs,
) -> str:
logger.info(f"执行工具: {self.name}, current_rule_id={current_rule_id}")
try:
custom_rules = get_custom_rules()
rule_map = {rule.id: rule for rule in custom_rules if rule.id}
current_rule = rule_map.get(current_rule_id)
if not current_rule:
return json.dumps(
{
"success": False,
"message": f"自定义过滤规则 '{current_rule_id}' 不存在",
},
ensure_ascii=False,
)
updated_rule = normalize_custom_rule(
rule_id=new_rule_id or current_rule.id,
name=name if name is not None else current_rule.name,
include=include if include is not None else current_rule.include,
exclude=exclude if exclude is not None else current_rule.exclude,
size_range=(
size_range if size_range is not None else current_rule.size_range
),
seeders=seeders if seeders is not None else current_rule.seeders,
publish_time=(
publish_time
if publish_time is not None
else current_rule.publish_time
),
existing_rules=custom_rules,
original_rule_id=current_rule.id,
)
rule_groups = get_rule_groups()
updated_rule_groups = rule_groups
renamed_group_refs = []
if updated_rule.id != current_rule.id:
updated_rule_groups = []
for group in rule_groups:
if not group.rule_string:
updated_rule_groups.append(group)
continue
new_rule_string = replace_rule_id_in_rule_string(
group.rule_string,
current_rule.id,
updated_rule.id,
)
if new_rule_string == group.rule_string:
updated_rule_groups.append(group)
continue
renamed_group_refs.append(group.name)
updated_rule_groups.append(
group.model_copy(update={"rule_string": new_rule_string})
)
# 先保存规则组引用,再保存规则自身,避免在过滤模块重载时出现新规则 ID 尚未同步的问题。
await save_system_config(
SystemConfigKey.UserFilterRuleGroups,
[
group.model_dump(exclude_none=True)
for group in updated_rule_groups
],
)
final_rules = []
for rule in custom_rules:
if rule.id == current_rule.id:
final_rules.append(updated_rule)
else:
final_rules.append(rule)
await save_system_config(
SystemConfigKey.CustomFilterRules,
[rule.model_dump(exclude_none=True) for rule in final_rules],
)
updated_refs = collect_custom_rule_group_refs(
updated_rule_groups,
[updated_rule.id],
)
return json.dumps(
{
"success": True,
"message": f"已更新自定义过滤规则 {updated_rule.id}",
"custom_rule": serialize_custom_rule(
updated_rule,
updated_refs.get(updated_rule.id),
),
"rule_groups_updated_for_rule_id_rename": renamed_group_refs,
},
ensure_ascii=False,
indent=2,
)
except Exception as exc:
logger.error(f"更新自定义过滤规则失败: {exc}", exc_info=True)
return json.dumps(
{
"success": False,
"message": f"更新自定义过滤规则失败: {exc}",
},
ensure_ascii=False,
)

View File

@@ -0,0 +1,157 @@
"""更新过滤规则组工具。"""
import json
from typing import Optional, Type
from pydantic import BaseModel, Field
from app.agent.tools.base import MoviePilotTool
from app.agent.tools.impl._filter_rule_utils import (
build_custom_rule_map,
collect_rule_group_usages,
get_builtin_rules,
get_custom_rules,
get_rule_groups,
normalize_rule_group,
rename_rule_group_references,
save_system_config,
serialize_rule_group,
)
from app.log import logger
from app.schemas.types import SystemConfigKey
class UpdateRuleGroupInput(BaseModel):
"""更新过滤规则组工具的输入参数模型"""
explanation: str = Field(
...,
description="Clear explanation of why this tool is being used in the current context",
)
current_name: str = Field(..., description="Existing rule group name to update.")
new_name: Optional[str] = Field(
None,
description="New rule group name. If omitted, keep the original name.",
)
rule_string: Optional[str] = Field(
None,
description=(
"New rule_string. If omitted, keep the original rule_string. "
"Example: 'SPECSUB & CNVOI & 4K & !BLU > CNSUB & CNVOI & 4K & !BLU'."
),
)
media_type: Optional[str] = Field(
None,
description="New media type scope. Pass an empty string to clear it.",
)
category: Optional[str] = Field(
None,
description="New category. Pass an empty string to clear it.",
)
class UpdateRuleGroupTool(MoviePilotTool):
name: str = "update_rule_group"
description: str = (
"Update a filter rule group. "
"If the rule group name changes, its references in global search/subscription settings and per-subscription bindings are updated automatically. "
"Before changing rule_string, first use query_builtin_filter_rules and query_custom_filter_rules to confirm valid rule IDs."
)
args_schema: Type[BaseModel] = UpdateRuleGroupInput
require_admin: bool = True
def get_tool_message(self, **kwargs) -> Optional[str]:
current_name = kwargs.get("current_name", "")
new_name = kwargs.get("new_name")
if new_name and new_name != current_name:
return f"更新规则组 {current_name} -> {new_name}"
return f"更新规则组 {current_name}"
async def run(
self,
current_name: str,
new_name: Optional[str] = None,
rule_string: Optional[str] = None,
media_type: Optional[str] = None,
category: Optional[str] = None,
**kwargs,
) -> str:
logger.info(f"执行工具: {self.name}, current_name={current_name}")
try:
rule_groups = get_rule_groups()
group_map = {group.name: group for group in rule_groups if group.name}
current_group = group_map.get(current_name)
if not current_group:
return json.dumps(
{
"success": False,
"message": f"规则组 '{current_name}' 不存在",
},
ensure_ascii=False,
)
available_rule_ids = set(get_builtin_rules().keys()) | set(
build_custom_rule_map(get_custom_rules()).keys()
)
updated_group, _ = normalize_rule_group(
name=new_name or current_group.name,
rule_string=(
rule_string
if rule_string is not None
else current_group.rule_string
),
media_type=(
media_type
if media_type is not None
else current_group.media_type
),
category=(
category if category is not None else current_group.category
),
existing_groups=rule_groups,
available_rule_ids=available_rule_ids,
original_name=current_group.name,
)
final_groups = []
for group in rule_groups:
if group.name == current_group.name:
final_groups.append(updated_group)
else:
final_groups.append(group)
await save_system_config(
SystemConfigKey.UserFilterRuleGroups,
[group.model_dump(exclude_none=True) for group in final_groups],
)
reference_changes = {}
if updated_group.name != current_group.name:
reference_changes = await rename_rule_group_references(
current_group.name,
updated_group.name,
)
usage = await collect_rule_group_usages([updated_group.name])
return json.dumps(
{
"success": True,
"message": f"已更新规则组 {updated_group.name}",
"rule_group": serialize_rule_group(
updated_group, usage.get(updated_group.name)
),
"reference_updates": reference_changes,
},
ensure_ascii=False,
indent=2,
)
except Exception as exc:
logger.error(f"更新规则组失败: {exc}", exc_info=True)
return json.dumps(
{
"success": False,
"message": f"更新规则组失败: {exc}",
},
ensure_ascii=False,
)

View File

@@ -1,14 +1,13 @@
import re
from copy import deepcopy
from typing import List, Tuple, Union, Dict, Optional
from app.core.context import TorrentInfo, MediaInfo
from app.core.metainfo import MetaInfo
from app.helper.rule import RuleHelper
from app.log import logger
from app.modules import _ModuleBase
from app.modules.filter.RuleParser import RuleParser
from app.modules.filter.builtin_rules import BUILTIN_RULE_SET
from app.schemas.types import ModuleType, OtherModulesType, SystemConfigKey
from app.utils.string import StringUtils
class FilterModule(_ModuleBase):
@@ -18,128 +17,10 @@ class FilterModule(_ModuleBase):
# 媒体信息
media: MediaInfo = None
# 内置规则集
rule_set: Dict[str, dict] = {
# 蓝光原盘
"BLU": {
"include": [r'(?i)(\bBlu-?Ray\b.*\b(?:VC-?1|AVC|MPEG-?2)\b|\b(?:UHD|4K|2160p)\b(?:.*Blu-?Ray)?.*\b(?:HEVC|H\.?265)\b|\bBlu-?Ray\b.*\b(?:UHD|4K|2160p)\b.*\b(?:HEVC|H\.?265)\b|\b(?:COMPLETE|FULL)\b.*\b(?:(?:UHD|4K|2160p)\b.*)?Blu-?Ray\b|\b(BD25|BD50|BD66|BD100|BDMV|MiniBD)\b)'],
"exclude": [r'(?i)(\b[XH]\.?264\b|\b[XH]\.?265\b|\bWEB-?DL\b|\bWEB-?RIP\b|\bHDTV(?:RIP)?\b|\bREMUX\b|\bBDRip\b|\bBRRip\b|\bHDRip\b|\bENCODE\b|\b(?<!WEB-|HDTV)RIP\b)']
},
# 4K
"4K": {
"include": [r'4k|2160p|x2160'],
"exclude": []
},
# 1080P
"1080P": {
"include": [r'1080[pi]|x1080'],
"exclude": []
},
# 720P
"720P": {
"include": [r'720[pi]|x720'],
"exclude": []
},
# 中字
"CNSUB": {
"include": [
r'[中国國繁简](/|\s|\\|\|)?[繁简英粤]|[英简繁](/|\s|\\|\|)?[中繁简]'
r'|繁體|简体|[中国國][字配]|国语|國語|中文|中字|简日|繁日|简繁|繁体'
r'|([\s,.-\[])(chs|cht)(|[\s,.-\]])'
r'|(?<![a-z0-9])(gb|big5)(?![a-z0-9])'],
"exclude": [],
"tmdb": {
"original_language": "zh,cn"
}
},
# 官种
"GZ": {
"include": [r'官方', r'官种', r'官组'],
"match": ["labels"]
},
# 特效字幕
"SPECSUB": {
"include": [r'特效'],
"exclude": []
},
# BluRay
"BLURAY": {
"include": [r'Blu-?Ray'],
"exclude": []
},
# UHD
"UHD": {
"include": [r'UHD|UltraHD'],
"exclude": []
},
# H265
"H265": {
"include": [r'[Hx].?265|HEVC'],
"exclude": []
},
# H264
"H264": {
"include": [r'[Hx].?264|AVC'],
"exclude": []
},
# 杜比视界
"DOLBY": {
"include": [r"Dolby[\s.]+Vision|DOVI|[\s.]+DV[\s.]+|杜比视界"],
"exclude": []
},
# 杜比全景声
"ATMOS": {
"include": [r"Dolby[\s.+]+Atmos|Atmos|杜比全景[声聲]"],
"exclude": []
},
# HDR
"HDR": {
"include": [r"[\s.]+HDR[\s.]+|HDR10|HDR10\+"],
"exclude": []
},
# SDR
"SDR": {
"include": [r"[\s.]+SDR[\s.]+"],
"exclude": []
},
# 重编码
"REMUX": {
"include": [r'REMUX'],
"exclude": []
},
# WEB-DL
"WEBDL": {
"include": [r'WEB-?DL|WEB-?RIP'],
"exclude": []
},
# 免费
"FREE": {
"downloadvolumefactor": 0
},
# 国语配音
"CNVOI": {
"include": [r'[国國][语語]配音|[国國]配|[国國][语語]'],
"exclude": [],
"tmdb": {
"original_language": "zh"
}
},
# 粤语配音
"HKVOI": {
"include": [r'粤语配音|粤语'],
"exclude": []
},
# 60FPS
"60FPS": {
"include": [r'60fps|60帧'],
"exclude": []
},
# 3D
"3D": {
"include": [r'3D'],
"exclude": []
},
}
# 保留一份只读内置规则定义,方便查询工具准确区分“内置规则”和“自定义规则”。
builtin_rule_set: Dict[str, dict] = deepcopy(BUILTIN_RULE_SET)
# 运行期规则集 = 内置规则 + 自定义规则覆盖。
rule_set: Dict[str, dict] = {}
def __init__(self):
super().__init__()
@@ -147,6 +28,8 @@ class FilterModule(_ModuleBase):
def init_module(self) -> None:
self.parser = RuleParser()
# 每次重载都先恢复为纯内置规则,避免旧的自定义规则残留在内存里。
self.rule_set = deepcopy(self.builtin_rule_set)
self.__init_custom_rules()
def __init_custom_rules(self):

View File

@@ -0,0 +1,131 @@
"""过滤器内置规则定义。"""
from typing import Dict
# 内置规则只在这里维护一份,便于过滤模块和 Agent 工具共享同一套事实来源。
BUILTIN_RULE_SET: Dict[str, dict] = {
# 蓝光原盘
"BLU": {
"include": [
r"(?i)(\bBlu-?Ray\b.*\b(?:VC-?1|AVC|MPEG-?2)\b|\b(?:UHD|4K|2160p)\b(?:.*Blu-?Ray)?.*\b(?:HEVC|H\.?265)\b|\bBlu-?Ray\b.*\b(?:UHD|4K|2160p)\b.*\b(?:HEVC|H\.?265)\b|\b(?:COMPLETE|FULL)\b.*\b(?:(?:UHD|4K|2160p)\b.*)?Blu-?Ray\b|\b(BD25|BD50|BD66|BD100|BDMV|MiniBD)\b)"
],
"exclude": [
r"(?i)(\b[XH]\.?264\b|\b[XH]\.?265\b|\bWEB-?DL\b|\bWEB-?RIP\b|\bHDTV(?:RIP)?\b|\bREMUX\b|\bBDRip\b|\bBRRip\b|\bHDRip\b|\bENCODE\b|\b(?<!WEB-|HDTV)RIP\b)"
],
},
# 4K
"4K": {
"include": [r"4k|2160p|x2160"],
"exclude": [],
},
# 1080P
"1080P": {
"include": [r"1080[pi]|x1080"],
"exclude": [],
},
# 720P
"720P": {
"include": [r"720[pi]|x720"],
"exclude": [],
},
# 中字
"CNSUB": {
"include": [
r"[中国國繁简](/|\s|\\|\|)?[繁简英粤]|[英简繁](/|\s|\\|\|)?[中繁简]"
r"|繁體|简体|[中国國][字配]|国语|國語|中文|中字|简日|繁日|简繁|繁体"
r"|([\s,.-\[])(chs|cht)(|[\s,.-\]])"
r"|(?<![a-z0-9])(gb|big5)(?![a-z0-9])"
],
"exclude": [],
"tmdb": {
"original_language": "zh,cn",
},
},
# 官种
"GZ": {
"include": [r"官方", r"官种", r"官组"],
"match": ["labels"],
},
# 特效字幕
"SPECSUB": {
"include": [r"特效"],
"exclude": [],
},
# BluRay
"BLURAY": {
"include": [r"Blu-?Ray"],
"exclude": [],
},
# UHD
"UHD": {
"include": [r"UHD|UltraHD"],
"exclude": [],
},
# H265
"H265": {
"include": [r"[Hx].?265|HEVC"],
"exclude": [],
},
# H264
"H264": {
"include": [r"[Hx].?264|AVC"],
"exclude": [],
},
# 杜比视界
"DOLBY": {
"include": [r"Dolby[\s.]+Vision|DOVI|[\s.]+DV[\s.]+|杜比视界"],
"exclude": [],
},
# 杜比全景声
"ATMOS": {
"include": [r"Dolby[\s.+]+Atmos|Atmos|杜比全景[声聲]"],
"exclude": [],
},
# HDR
"HDR": {
"include": [r"[\s.]+HDR[\s.]+|HDR10|HDR10\+"],
"exclude": [],
},
# SDR
"SDR": {
"include": [r"[\s.]+SDR[\s.]+"],
"exclude": [],
},
# 重编码
"REMUX": {
"include": [r"REMUX"],
"exclude": [],
},
# WEB-DL
"WEBDL": {
"include": [r"WEB-?DL|WEB-?RIP"],
"exclude": [],
},
# 免费
"FREE": {
"downloadvolumefactor": 0,
},
# 国语配音
"CNVOI": {
"include": [r"[国國][语語]配音|[国國]配|[国國][语語]"],
"exclude": [],
"tmdb": {
"original_language": "zh",
},
},
# 粤语配音
"HKVOI": {
"include": [r"粤语配音|粤语"],
"exclude": [],
},
# 60FPS
"60FPS": {
"include": [r"60fps|60帧"],
"exclude": [],
},
# 3D
"3D": {
"include": [r"3D"],
"exclude": [],
},
}

View File

@@ -0,0 +1,68 @@
import asyncio
import json
import unittest
from unittest.mock import patch
from app.agent.tools.factory import MoviePilotToolFactory
from app.agent.tools.impl._filter_rule_utils import parse_rule_string
from app.agent.tools.impl.query_builtin_filter_rules import (
QueryBuiltinFilterRulesTool,
)
class TestAgentFilterRuleTools(unittest.TestCase):
def test_factory_registers_filter_rule_tools(self):
with patch(
"app.agent.tools.factory.PluginManager.get_plugin_agent_tools",
return_value=[],
):
tools = MoviePilotToolFactory.create_tools(
session_id="filter-rule-session",
user_id="10001",
)
tool_names = {tool.name for tool in tools}
expected = {
"query_builtin_filter_rules",
"query_custom_filter_rules",
"query_rule_groups",
"add_custom_filter_rule",
"update_custom_filter_rule",
"delete_custom_filter_rule",
"add_rule_group",
"update_rule_group",
"delete_rule_group",
}
self.assertTrue(expected.issubset(tool_names))
def test_query_builtin_filter_rules_returns_requested_rules(self):
tool = QueryBuiltinFilterRulesTool(
session_id="filter-rule-session",
user_id="10001",
)
result = asyncio.run(tool.run(rule_ids=["BLU", "4K"]))
payload = json.loads(result)
self.assertTrue(payload["success"])
self.assertEqual({"BLU", "4K"}, {item["id"] for item in payload["rules"]})
self.assertEqual(">", payload["rule_string_syntax"]["level_separator"])
def test_parse_rule_string_splits_priority_levels(self):
parsed = parse_rule_string(
"SPECSUB & CNVOI & 4K & !BLU > CNSUB & CNVOI & 4K & !BLU"
)
self.assertEqual(2, len(parsed["levels"]))
self.assertEqual(
["SPECSUB", "CNVOI", "4K", "BLU", "CNSUB"],
parsed["referenced_rules"],
)
self.assertEqual(
"SPECSUB & CNVOI & 4K & !BLU",
parsed["levels"][0]["expression"],
)
if __name__ == "__main__":
unittest.main()