mirror of
https://github.com/jxxghp/MoviePilot.git
synced 2026-05-09 05:12:39 +08:00
Compare commits
32 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f08a7b9eb3 | ||
|
|
a6fa764e2a | ||
|
|
01676668f1 | ||
|
|
8e5e4f460d | ||
|
|
f907b8a84d | ||
|
|
a3a4285f90 | ||
|
|
0979163b79 | ||
|
|
248a25eaee | ||
|
|
f95b1fa68a | ||
|
|
d2b5d69051 | ||
|
|
3ca419b735 | ||
|
|
50e275a2f9 | ||
|
|
aeccf78957 | ||
|
|
cb3cef70e5 | ||
|
|
b9bd303bf8 | ||
|
|
57d4786a7f | ||
|
|
df031455b2 | ||
|
|
30059eff4f | ||
|
|
bc289b48c8 | ||
|
|
067d8b99b8 | ||
|
|
00a6a9c42d | ||
|
|
070425d446 | ||
|
|
7405883444 | ||
|
|
66959937ed | ||
|
|
e431efbcba | ||
|
|
ba00baa5a0 | ||
|
|
0fb5d4a164 | ||
|
|
1ac717b67f | ||
|
|
273cbd447e | ||
|
|
cee41567a2 | ||
|
|
1aae5eb1a6 | ||
|
|
28a4c81aff |
@@ -27,6 +27,7 @@ from app.agent.tools.impl.search_person_credits import SearchPersonCreditsTool
|
||||
from app.agent.tools.impl.recognize_media import RecognizeMediaTool
|
||||
from app.agent.tools.impl.scrape_metadata import ScrapeMetadataTool
|
||||
from app.agent.tools.impl.query_episode_schedule import QueryEpisodeScheduleTool
|
||||
from app.agent.tools.impl.query_media_detail import QueryMediaDetailTool
|
||||
from app.agent.tools.impl.search_torrents import SearchTorrentsTool
|
||||
from app.agent.tools.impl.search_web import SearchWebTool
|
||||
from app.agent.tools.impl.send_message import SendMessageTool
|
||||
@@ -61,6 +62,7 @@ class MoviePilotToolFactory:
|
||||
RecognizeMediaTool,
|
||||
ScrapeMetadataTool,
|
||||
QueryEpisodeScheduleTool,
|
||||
QueryMediaDetailTool,
|
||||
AddSubscribeTool,
|
||||
UpdateSubscribeTool,
|
||||
SearchSubscribeTool,
|
||||
|
||||
@@ -29,7 +29,8 @@ class QueryDownloadTasksTool(MoviePilotTool):
|
||||
description: str = "Query download status and list download tasks. Can query all active downloads, or search for specific tasks by hash or title. Shows download progress, completion status, and task details from configured downloaders."
|
||||
args_schema: Type[BaseModel] = QueryDownloadTasksInput
|
||||
|
||||
def _get_all_torrents(self, download_chain: DownloadChain, downloader: Optional[str] = None) -> List[Union[TransferTorrent, DownloadingTorrent]]:
|
||||
@staticmethod
|
||||
def _get_all_torrents(download_chain: DownloadChain, downloader: Optional[str] = None) -> List[Union[TransferTorrent, DownloadingTorrent]]:
|
||||
"""
|
||||
查询所有状态的任务(包括下载中和已完成的任务)
|
||||
"""
|
||||
|
||||
120
app/agent/tools/impl/query_media_detail.py
Normal file
120
app/agent/tools/impl/query_media_detail.py
Normal file
@@ -0,0 +1,120 @@
|
||||
"""查询媒体详情工具"""
|
||||
|
||||
import json
|
||||
from typing import Optional, Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.agent.tools.base import MoviePilotTool
|
||||
from app.chain.media import MediaChain
|
||||
from app.log import logger
|
||||
from app.schemas import MediaType
|
||||
|
||||
|
||||
class QueryMediaDetailInput(BaseModel):
|
||||
"""查询媒体详情工具的输入参数模型"""
|
||||
explanation: str = Field(..., description="Clear explanation of why this tool is being used in the current context")
|
||||
tmdb_id: int = Field(..., description="TMDB ID of the media (movie or TV series)")
|
||||
media_type: str = Field(..., description="Media type: 'movie' or 'tv'")
|
||||
|
||||
|
||||
class QueryMediaDetailTool(MoviePilotTool):
|
||||
name: str = "query_media_detail"
|
||||
description: str = "Query detailed media information from TMDB by ID and media_type. IMPORTANT: Convert search results type: '电影'→'movie', '电视剧'→'tv'. Returns core metadata including title, year, overview, status, genres, directors, actors, and season count for TV series."
|
||||
args_schema: Type[BaseModel] = QueryMediaDetailInput
|
||||
|
||||
def get_tool_message(self, **kwargs) -> Optional[str]:
|
||||
"""根据查询参数生成友好的提示消息"""
|
||||
tmdb_id = kwargs.get("tmdb_id")
|
||||
return f"正在查询媒体详情: TMDB ID {tmdb_id}"
|
||||
|
||||
async def run(self, tmdb_id: int, media_type: str, **kwargs) -> str:
|
||||
logger.info(f"执行工具: {self.name}, 参数: tmdb_id={tmdb_id}, media_type={media_type}")
|
||||
|
||||
try:
|
||||
media_chain = MediaChain()
|
||||
|
||||
mtype = None
|
||||
if media_type:
|
||||
if media_type.lower() == 'movie':
|
||||
mtype = MediaType.MOVIE
|
||||
elif media_type.lower() == 'tv':
|
||||
mtype = MediaType.TV
|
||||
|
||||
mediainfo = await media_chain.async_recognize_media(tmdbid=tmdb_id, mtype=mtype)
|
||||
|
||||
if not mediainfo:
|
||||
return json.dumps({
|
||||
"success": False,
|
||||
"message": f"未找到 TMDB ID {tmdb_id} 的媒体信息"
|
||||
}, ensure_ascii=False)
|
||||
|
||||
# 精简 genres - 只保留名称
|
||||
genres = [g.get("name") for g in (mediainfo.genres or []) if g.get("name")]
|
||||
|
||||
# 精简 directors - 只保留姓名和职位
|
||||
directors = [
|
||||
{
|
||||
"name": d.get("name"),
|
||||
"job": d.get("job")
|
||||
}
|
||||
for d in (mediainfo.directors or [])
|
||||
if d.get("name")
|
||||
]
|
||||
|
||||
# 精简 actors - 只保留姓名和角色
|
||||
actors = [
|
||||
{
|
||||
"name": a.get("name"),
|
||||
"character": a.get("character")
|
||||
}
|
||||
for a in (mediainfo.actors or [])
|
||||
if a.get("name")
|
||||
]
|
||||
|
||||
# 构建基础媒体详情信息
|
||||
result = {
|
||||
"success": True,
|
||||
"tmdb_id": tmdb_id,
|
||||
"type": mediainfo.type.value if mediainfo.type else None,
|
||||
"title": mediainfo.title,
|
||||
"year": mediainfo.year,
|
||||
"overview": mediainfo.overview,
|
||||
"status": mediainfo.status,
|
||||
"genres": genres,
|
||||
"directors": directors,
|
||||
"actors": actors
|
||||
}
|
||||
|
||||
# 如果是电视剧,添加电视剧特有信息
|
||||
if mediainfo.type == MediaType.TV:
|
||||
# 精简 season_info - 只保留基础摘要
|
||||
season_info = [
|
||||
{
|
||||
"season_number": s.get("season_number"),
|
||||
"name": s.get("name"),
|
||||
"episode_count": s.get("episode_count"),
|
||||
"air_date": s.get("air_date")
|
||||
}
|
||||
for s in (mediainfo.season_info or [])
|
||||
if s.get("season_number") is not None
|
||||
]
|
||||
|
||||
result.update({
|
||||
"number_of_seasons": mediainfo.number_of_seasons,
|
||||
"number_of_episodes": mediainfo.number_of_episodes,
|
||||
"first_air_date": mediainfo.first_air_date,
|
||||
"last_air_date": mediainfo.last_air_date,
|
||||
"season_info": season_info
|
||||
})
|
||||
|
||||
return json.dumps(result, ensure_ascii=False, indent=2)
|
||||
|
||||
except Exception as e:
|
||||
error_message = f"查询媒体详情失败: {str(e)}"
|
||||
logger.error(f"查询媒体详情失败: {e}", exc_info=True)
|
||||
return json.dumps({
|
||||
"success": False,
|
||||
"message": error_message,
|
||||
"tmdb_id": tmdb_id
|
||||
}, ensure_ascii=False)
|
||||
@@ -151,18 +151,21 @@ class MoviePilotToolsManager:
|
||||
normalized[key] = int(value)
|
||||
except (ValueError, TypeError):
|
||||
logger.warning(f"无法将参数 {key}='{value}' 转换为整数,保持原值")
|
||||
normalized[key] = value
|
||||
normalized[key] = None
|
||||
elif field_type == "number" and isinstance(value, str):
|
||||
try:
|
||||
normalized[key] = float(value)
|
||||
except (ValueError, TypeError):
|
||||
logger.warning(f"无法将参数 {key}='{value}' 转换为浮点数,保持原值")
|
||||
normalized[key] = value
|
||||
elif field_type == "boolean" and isinstance(value, str):
|
||||
# 转换字符串为布尔值
|
||||
normalized[key] = value.lower() in ("true", "1", "yes", "on")
|
||||
normalized[key] = None
|
||||
elif field_type == "boolean":
|
||||
if isinstance(value, str):
|
||||
normalized[key] = value.lower() in ("true", "1", "yes", "on")
|
||||
elif isinstance(value, (int, float)):
|
||||
normalized[key] = value != 0
|
||||
else:
|
||||
normalized[key] = True
|
||||
else:
|
||||
# 其他类型保持原样
|
||||
normalized[key] = value
|
||||
|
||||
return normalized
|
||||
@@ -199,7 +202,11 @@ class MoviePilotToolsManager:
|
||||
elif isinstance(result, int, float):
|
||||
formated_result = str(result)
|
||||
else:
|
||||
formated_result = json.dumps(result, ensure_ascii=False, indent=2)
|
||||
try:
|
||||
formated_result = json.dumps(result, ensure_ascii=False, indent=2)
|
||||
except Exception as e:
|
||||
logger.warning(f"结果转换为JSON失败: {e}, 使用字符串表示")
|
||||
formated_result = str(result)
|
||||
|
||||
return formated_result
|
||||
except Exception as e:
|
||||
|
||||
@@ -4,6 +4,7 @@ import pickle
|
||||
import traceback
|
||||
from abc import ABCMeta
|
||||
from collections.abc import Callable
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Optional, Any, Tuple, List, Set, Union, Dict
|
||||
|
||||
@@ -849,6 +850,8 @@ class ChainBase(metaclass=ABCMeta):
|
||||
:param kwargs: 其他参数(覆盖业务对象属性值)
|
||||
:return: 成功或失败
|
||||
"""
|
||||
# 添加格式化的时间参数
|
||||
kwargs.setdefault('current_time', datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
|
||||
# 渲染消息
|
||||
message = MessageTemplateHelper.render(message=message, meta=meta, mediainfo=mediainfo,
|
||||
torrentinfo=torrentinfo, transferinfo=transferinfo, **kwargs)
|
||||
@@ -932,6 +935,8 @@ class ChainBase(metaclass=ABCMeta):
|
||||
:param kwargs: 其他参数(覆盖业务对象属性值)
|
||||
:return: 成功或失败
|
||||
"""
|
||||
# 添加格式化的时间参数
|
||||
kwargs.setdefault('current_time', datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
|
||||
# 渲染消息
|
||||
message = MessageTemplateHelper.render(message=message, meta=meta, mediainfo=mediainfo,
|
||||
torrentinfo=torrentinfo, transferinfo=transferinfo, **kwargs)
|
||||
|
||||
@@ -618,7 +618,7 @@ class MediaChain(ChainBase):
|
||||
should_scrape = True # 未知类型默认刮削
|
||||
|
||||
if should_scrape:
|
||||
image_path = filepath.with_name(image_name)
|
||||
image_path = filepath / image_name
|
||||
if overwrite or not storagechain.get_file_item(storage=fileitem.storage,
|
||||
path=image_path):
|
||||
# 流式下载图片并直接保存
|
||||
|
||||
@@ -195,10 +195,14 @@ class MessageChain(ChainBase):
|
||||
if text.isdigit():
|
||||
# 用户选择了具体的条目
|
||||
# 缓存
|
||||
cache_data: dict = user_cache.get(userid).copy()
|
||||
cache_data: dict = user_cache.get(userid)
|
||||
if not cache_data:
|
||||
# 发送消息
|
||||
self.post_message(Notification(channel=channel, source=source, title="输入有误!", userid=userid))
|
||||
return
|
||||
cache_data = cache_data.copy()
|
||||
# 选择项目
|
||||
if not cache_data \
|
||||
or not cache_data.get('items') \
|
||||
if not cache_data.get('items') \
|
||||
or len(cache_data.get('items')) < int(text):
|
||||
# 发送消息
|
||||
self.post_message(Notification(channel=channel, source=source, title="输入有误!", userid=userid))
|
||||
@@ -370,12 +374,13 @@ class MessageChain(ChainBase):
|
||||
del cache_data
|
||||
elif text.lower() == "p":
|
||||
# 上一页
|
||||
cache_data: dict = user_cache.get(userid).copy()
|
||||
cache_data: dict = user_cache.get(userid)
|
||||
if not cache_data:
|
||||
# 没有缓存
|
||||
self.post_message(Notification(
|
||||
channel=channel, source=source, title="输入有误!", userid=userid))
|
||||
return
|
||||
cache_data = cache_data.copy()
|
||||
try:
|
||||
if _current_page == 0:
|
||||
# 第一页
|
||||
@@ -422,12 +427,13 @@ class MessageChain(ChainBase):
|
||||
del cache_data
|
||||
elif text.lower() == "n":
|
||||
# 下一页
|
||||
cache_data: dict = user_cache.get(userid).copy()
|
||||
cache_data: dict = user_cache.get(userid)
|
||||
if not cache_data:
|
||||
# 没有缓存
|
||||
self.post_message(Notification(
|
||||
channel=channel, source=source, title="输入有误!", userid=userid))
|
||||
return
|
||||
cache_data = cache_data.copy()
|
||||
try:
|
||||
cache_type: str = cache_data.get('type')
|
||||
# 产生副本,避免修改原值
|
||||
|
||||
@@ -44,6 +44,7 @@ class SiteChain(ChainBase):
|
||||
"star-space.net": self.__indexphp_test,
|
||||
"yemapt.org": self.__yema_test,
|
||||
"hddolby.com": self.__hddolby_test,
|
||||
"rousi.pro": self.__rousi_test,
|
||||
}
|
||||
|
||||
def refresh_userdata(self, site: dict = None) -> Optional[SiteUserData]:
|
||||
@@ -249,6 +250,32 @@ class SiteChain(ChainBase):
|
||||
else:
|
||||
return False, f"错误:{res.status_code} {res.reason}"
|
||||
|
||||
@staticmethod
|
||||
def __rousi_test(site: Site) -> Tuple[bool, str]:
|
||||
"""
|
||||
判断站点是否已经登陆:rousi
|
||||
"""
|
||||
url = f"https://{StringUtils.get_url_domain(site.url)}/api/v1/profile"
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
"Authorization": f"Bearer {site.apikey}",
|
||||
}
|
||||
res = RequestUtils(
|
||||
headers=headers,
|
||||
proxies=settings.PROXY if site.proxy else None,
|
||||
timeout=site.timeout or 15
|
||||
).get_res(url=url)
|
||||
if res is None:
|
||||
return False, "无法打开网站!"
|
||||
if res.status_code == 200:
|
||||
user_info = res.json()
|
||||
if user_info and user_info.get("code") == 0:
|
||||
return True, "连接成功"
|
||||
return False, "APIKEY已过期"
|
||||
else:
|
||||
return False, f"错误:{res.status_code} {res.reason}"
|
||||
|
||||
@staticmethod
|
||||
def __parse_favicon(url: str, cookie: str, ua: str) -> Tuple[str, Optional[str]]:
|
||||
"""
|
||||
|
||||
@@ -278,7 +278,7 @@ class ConfigModel(BaseModel):
|
||||
# 搜索多个名称
|
||||
SEARCH_MULTIPLE_NAME: bool = False
|
||||
# 最大搜索名称数量
|
||||
MAX_SEARCH_NAME_LIMIT: int = 2
|
||||
MAX_SEARCH_NAME_LIMIT: int = 3
|
||||
|
||||
# ==================== 下载配置 ====================
|
||||
# 种子标签
|
||||
|
||||
@@ -71,12 +71,14 @@ def MetaInfoPath(path: Path) -> MetaBase:
|
||||
file_meta = MetaInfo(title=path.name)
|
||||
# 上级目录元数据
|
||||
dir_meta = MetaInfo(title=path.parent.name)
|
||||
# 合并元数据
|
||||
file_meta.merge(dir_meta)
|
||||
if file_meta.type == MediaType.TV or dir_meta.type != MediaType.TV:
|
||||
# 合并元数据
|
||||
file_meta.merge(dir_meta)
|
||||
# 上上级目录元数据
|
||||
root_meta = MetaInfo(title=path.parent.parent.name)
|
||||
# 合并元数据
|
||||
file_meta.merge(root_meta)
|
||||
if file_meta.type == MediaType.TV or root_meta.type != MediaType.TV:
|
||||
# 合并元数据
|
||||
file_meta.merge(root_meta)
|
||||
return file_meta
|
||||
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ from app.modules.indexer.spider import SiteSpider
|
||||
from app.modules.indexer.spider.haidan import HaiDanSpider
|
||||
from app.modules.indexer.spider.hddolby import HddolbySpider
|
||||
from app.modules.indexer.spider.mtorrent import MTorrentSpider
|
||||
from app.modules.indexer.spider.rousi import RousiSpider
|
||||
from app.modules.indexer.spider.tnode import TNodeSpider
|
||||
from app.modules.indexer.spider.torrentleech import TorrentLeech
|
||||
from app.modules.indexer.spider.yema import YemaSpider
|
||||
@@ -212,6 +213,13 @@ class IndexerModule(_ModuleBase):
|
||||
mtype=mtype,
|
||||
page=page
|
||||
)
|
||||
elif site.get('parser') == "RousiPro":
|
||||
error_flag, result = RousiSpider(site).search(
|
||||
keyword=search_word,
|
||||
mtype=mtype,
|
||||
cat=cat,
|
||||
page=page
|
||||
)
|
||||
else:
|
||||
error_flag, result = self.__spider_search(
|
||||
search_word=search_word,
|
||||
@@ -300,6 +308,13 @@ class IndexerModule(_ModuleBase):
|
||||
mtype=mtype,
|
||||
page=page
|
||||
)
|
||||
elif site.get('parser') == "RousiPro":
|
||||
error_flag, result = await RousiSpider(site).async_search(
|
||||
keyword=search_word,
|
||||
mtype=mtype,
|
||||
cat=cat,
|
||||
page=page
|
||||
)
|
||||
else:
|
||||
error_flag, result = await self.__async_spider_search(
|
||||
search_word=search_word,
|
||||
|
||||
@@ -35,6 +35,7 @@ class SiteSchema(Enum):
|
||||
HDDolby = "HDDolby"
|
||||
Zhixing = "Zhixing"
|
||||
Bitpt = "Bitpt"
|
||||
RousiPro = "RousiPro"
|
||||
|
||||
|
||||
class SiteParserBase(metaclass=ABCMeta):
|
||||
|
||||
164
app/modules/indexer/parser/rousi.py
Normal file
164
app/modules/indexer/parser/rousi.py
Normal file
@@ -0,0 +1,164 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import json
|
||||
from typing import Optional, Tuple
|
||||
|
||||
from app.log import logger
|
||||
from app.modules.indexer.parser import SiteParserBase, SiteSchema
|
||||
from app.utils.string import StringUtils
|
||||
|
||||
|
||||
class RousiSiteUserInfo(SiteParserBase):
|
||||
"""
|
||||
Rousi.pro 站点解析器
|
||||
使用 API v1 接口,通过 Passkey (Bearer Token) 进行认证
|
||||
"""
|
||||
schema = SiteSchema.RousiPro
|
||||
request_mode = "apikey"
|
||||
|
||||
def _parse_site_page(self, html_text: str):
|
||||
"""
|
||||
配置 API 请求地址和请求头
|
||||
使用 API v1 的 /profile 接口获取用户信息
|
||||
"""
|
||||
self._base_url = f"https://{StringUtils.get_url_domain(self._site_url)}"
|
||||
self._user_basic_page = "api/v1/profile?include_fields[user]=seeding_leeching_data"
|
||||
self._user_basic_params = {}
|
||||
self._user_basic_headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
"Authorization": f"Bearer {self.apikey}"
|
||||
}
|
||||
|
||||
# Rousi.pro API v1 在单个接口返回所有信息,无需额外页面
|
||||
self._user_traffic_page = None
|
||||
self._user_detail_page = None
|
||||
self._torrent_seeding_page = None
|
||||
self._user_mail_unread_page = None
|
||||
self._sys_mail_unread_page = None
|
||||
|
||||
def _parse_logged_in(self, html_text):
|
||||
"""
|
||||
判断是否登录成功
|
||||
API 认证模式下,通过 HTTP 状态码判断,此处始终返回 True
|
||||
"""
|
||||
return True
|
||||
|
||||
def _parse_user_base_info(self, html_text: str):
|
||||
"""
|
||||
解析用户基本信息
|
||||
通过 API v1 接口获取用户完整信息,包括上传下载量、做种数据等
|
||||
|
||||
API 响应示例:
|
||||
{
|
||||
"code": 0,
|
||||
"message": "success",
|
||||
"data": {
|
||||
"id": 1,
|
||||
"username": "example",
|
||||
"level_text": "Lv.5",
|
||||
"registered_at": "2024-01-01T00:00:00Z",
|
||||
"uploaded": 1073741824,
|
||||
"downloaded": 536870912,
|
||||
"ratio": 2.0,
|
||||
"karma": 1000.5,
|
||||
"seeding_leeching_data": {
|
||||
"seeding_count": 10,
|
||||
"seeding_size": 10737418240,
|
||||
"leeching_count": 2,
|
||||
"leeching_size": 2147483648
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
if not html_text:
|
||||
return
|
||||
|
||||
try:
|
||||
data = json.loads(html_text)
|
||||
except json.JSONDecodeError:
|
||||
logger.error(f"{self._site_name} JSON 解析失败")
|
||||
return
|
||||
|
||||
if not data or data.get("code") != 0:
|
||||
self.err_msg = data.get("message", "未知错误")
|
||||
logger.warn(f"{self._site_name} API 错误: {self.err_msg}")
|
||||
return
|
||||
|
||||
user_info = data.get("data")
|
||||
if not user_info:
|
||||
return
|
||||
|
||||
# 基本信息
|
||||
self.userid = user_info.get("id")
|
||||
self.username = user_info.get("username")
|
||||
self.user_level = user_info.get("level_text") or user_info.get("role_text")
|
||||
|
||||
# 注册时间:统一格式为 YYYY-MM-DD HH:MM:SS
|
||||
join_at = StringUtils.unify_datetime_str(user_info.get("registered_at"))
|
||||
if join_at:
|
||||
# 确保格式为 YYYY-MM-DD HH:MM:SS (19位)
|
||||
if len(join_at) >= 19:
|
||||
self.join_at = join_at[:19]
|
||||
else:
|
||||
self.join_at = join_at
|
||||
|
||||
# 流量信息
|
||||
self.upload = int(user_info.get("uploaded") or 0)
|
||||
self.download = int(user_info.get("downloaded") or 0)
|
||||
self.ratio = round(float(user_info.get("ratio") or 0), 2)
|
||||
|
||||
# 魔力值(站点称为 karma)
|
||||
self.bonus = float(user_info.get("karma") or 0)
|
||||
|
||||
# 做种/下载中数据
|
||||
sl_data = user_info.get("seeding_leeching_data", {})
|
||||
self.seeding = int(sl_data.get("seeding_count") or 0)
|
||||
self.seeding_size = int(sl_data.get("seeding_size") or 0)
|
||||
self.leeching = int(sl_data.get("leeching_count") or 0)
|
||||
self.leeching_size = int(sl_data.get("leeching_size") or 0)
|
||||
|
||||
def _parse_user_traffic_info(self, html_text: str):
|
||||
"""
|
||||
解析用户流量信息
|
||||
Rousi.pro API v1 在 _parse_user_base_info 中已完成所有解析,此方法无需实现
|
||||
"""
|
||||
pass
|
||||
|
||||
def _parse_user_detail_info(self, html_text: str):
|
||||
"""
|
||||
解析用户详细信息
|
||||
Rousi.pro API v1 在 _parse_user_base_info 中已完成所有解析,此方法无需实现
|
||||
"""
|
||||
pass
|
||||
|
||||
def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: Optional[bool] = False) -> Optional[str]:
|
||||
"""
|
||||
解析用户做种信息
|
||||
Rousi.pro API v1 在 _parse_user_base_info 中已通过 seeding_leeching_data 获取做种数据
|
||||
|
||||
:param html_text: 页面内容
|
||||
:param multi_page: 是否多页数据
|
||||
:return: 下页地址(无下页返回 None)
|
||||
"""
|
||||
return None
|
||||
|
||||
def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]:
|
||||
"""
|
||||
解析未读消息链接
|
||||
Rousi.pro API v1 暂未提供消息相关接口
|
||||
|
||||
:param html_text: 页面内容
|
||||
:param msg_links: 消息链接列表
|
||||
:return: 下页地址(无下页返回 None)
|
||||
"""
|
||||
return None
|
||||
|
||||
def _parse_message_content(self, html_text) -> Tuple[Optional[str], Optional[str], Optional[str]]:
|
||||
"""
|
||||
解析消息内容
|
||||
Rousi.pro API v1 暂未提供消息相关接口
|
||||
|
||||
:param html_text: 页面内容
|
||||
:return: (标题, 日期, 内容)
|
||||
"""
|
||||
return None, None, None
|
||||
@@ -118,24 +118,36 @@ class MTorrentSpider:
|
||||
labels_value = self._labels.get(result.get('labels') or "0") or ""
|
||||
if labels_value:
|
||||
labels = labels_value.split()
|
||||
status = result.get('status', {})
|
||||
torrent = {
|
||||
'title': result.get('name'),
|
||||
'description': result.get('smallDescr'),
|
||||
'enclosure': self.__get_download_url(result.get('id')),
|
||||
'pubdate': StringUtils.format_timestamp(result.get('createdDate')),
|
||||
'size': int(result.get('size') or '0'),
|
||||
'seeders': int(result.get('status', {}).get("seeders") or '0'),
|
||||
'peers': int(result.get('status', {}).get("leechers") or '0'),
|
||||
'grabs': int(result.get('status', {}).get("timesCompleted") or '0'),
|
||||
'downloadvolumefactor': self.__get_downloadvolumefactor(result.get('status', {}).get("discount")),
|
||||
'uploadvolumefactor': self.__get_uploadvolumefactor(result.get('status', {}).get("discount")),
|
||||
'seeders': int(status.get("seeders") or '0'),
|
||||
'peers': int(status.get("leechers") or '0'),
|
||||
'grabs': int(status.get("timesCompleted") or '0'),
|
||||
'downloadvolumefactor': self.__get_downloadvolumefactor(status.get("discount")),
|
||||
'uploadvolumefactor': self.__get_uploadvolumefactor(status.get("discount")),
|
||||
'page_url': self._pageurl % (self._url, result.get('id')),
|
||||
'imdbid': self.__find_imdbid(result.get('imdb')),
|
||||
'labels': labels,
|
||||
'category': category
|
||||
}
|
||||
if discount_end_time := (result.get('status') or {}).get('discountEndTime'):
|
||||
if discount_end_time := status.get('discountEndTime'):
|
||||
torrent['freedate'] = StringUtils.format_timestamp(discount_end_time)
|
||||
# 解析全站促销时的规则(当前馒头只有下载促销)
|
||||
if promotion_rule := status.get("promotionRule"):
|
||||
discount = promotion_rule.get("discount", "NORMAL")
|
||||
torrent["downloadvolumefactor"] = self.__get_downloadvolumefactor(discount)
|
||||
if end_time := promotion_rule.get("endTime"):
|
||||
torrent["freedate"] = StringUtils.format_timestamp(end_time)
|
||||
if mall_single_free := status.get("mallSingleFree"):
|
||||
if mall_single_free.get("status") == "ONGOING":
|
||||
torrent["downloadvolumefactor"] = self.__get_downloadvolumefactor("FREE")
|
||||
if end_date := mall_single_free.get("endDate"):
|
||||
torrent["freedate"] = StringUtils.format_timestamp(end_date)
|
||||
torrents.append(torrent)
|
||||
return torrents
|
||||
|
||||
|
||||
289
app/modules/indexer/spider/rousi.py
Normal file
289
app/modules/indexer/spider/rousi.py
Normal file
@@ -0,0 +1,289 @@
|
||||
import base64
|
||||
import json
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
from app.core.config import settings
|
||||
from app.db.systemconfig_oper import SystemConfigOper
|
||||
from app.log import logger
|
||||
from app.schemas import MediaType
|
||||
from app.utils.http import RequestUtils, AsyncRequestUtils
|
||||
from app.utils.string import StringUtils
|
||||
|
||||
|
||||
class RousiSpider:
|
||||
"""
|
||||
Rousi.pro API v1 Spider
|
||||
|
||||
使用 API v1 接口进行种子搜索
|
||||
- 认证方式:Bearer Token (Passkey)
|
||||
- 搜索接口:/api/v1/torrents
|
||||
- 详情接口:/api/v1/torrents/:id
|
||||
"""
|
||||
_indexerid = None
|
||||
_domain = None
|
||||
_url = None
|
||||
_name = ""
|
||||
_proxy = None
|
||||
_cookie = None
|
||||
_ua = None
|
||||
_size = 100
|
||||
_searchurl = "https://%s/api/v1/torrents"
|
||||
_downloadurl = "https://%s/api/v1/torrents/%s"
|
||||
_timeout = 15
|
||||
|
||||
# 分类定义
|
||||
# API 不支持多分类搜索,每次只使用一个分类
|
||||
_movie_category = 'movie'
|
||||
_tv_category = 'tv'
|
||||
|
||||
# API KEY
|
||||
_apikey = None
|
||||
|
||||
def __init__(self, indexer: dict):
|
||||
self.systemconfig = SystemConfigOper()
|
||||
if indexer:
|
||||
self._indexerid = indexer.get('id')
|
||||
self._url = indexer.get('domain')
|
||||
self._domain = StringUtils.get_url_domain(self._url)
|
||||
self._searchurl = self._searchurl % self._domain
|
||||
self._downloadurl = self._downloadurl % (self._domain, "%s")
|
||||
self._name = indexer.get('name')
|
||||
if indexer.get('proxy'):
|
||||
self._proxy = settings.PROXY
|
||||
self._cookie = indexer.get('cookie')
|
||||
self._ua = indexer.get('ua')
|
||||
self._apikey = indexer.get('apikey')
|
||||
self._timeout = indexer.get('timeout') or 15
|
||||
|
||||
def __get_params(self, keyword: str, mtype: MediaType = None, cat: Optional[str] = None, page: Optional[int] = 0) -> dict:
|
||||
"""
|
||||
构建 API 请求参数
|
||||
|
||||
:param keyword: 搜索关键词
|
||||
:param mtype: 媒体类型 (MOVIE/TV)
|
||||
:param cat: 用户选择的分类 ID(逗号分隔的字符串)
|
||||
:param page: 页码(从 0 开始,API 需要从 1 开始)
|
||||
:return: 请求参数字典
|
||||
"""
|
||||
params = {
|
||||
"page": int(page) + 1,
|
||||
"page_size": self._size
|
||||
}
|
||||
if keyword:
|
||||
params["keyword"] = keyword
|
||||
|
||||
# API 不支持多分类搜索,只使用单个 category 参数
|
||||
# 优先使用用户选择的分类,如果用户未选择则根据 mtype 推断
|
||||
if cat:
|
||||
# 用户选择了特定分类,需要将分类 ID 映射回 API 的 category name
|
||||
category_names = self.__get_category_names_by_ids(cat)
|
||||
if category_names:
|
||||
# 如果用户选择了多个分类,只取第一个
|
||||
params["category"] = category_names[0]
|
||||
elif mtype:
|
||||
# 用户未选择分类,根据媒体类型推断
|
||||
if mtype == MediaType.MOVIE:
|
||||
params["category"] = self._movie_category
|
||||
elif mtype == MediaType.TV:
|
||||
params["category"] = self._tv_category
|
||||
|
||||
return params
|
||||
|
||||
def __get_category_names_by_ids(self, cat: str) -> Optional[list]:
|
||||
"""
|
||||
根据用户选择的分类 ID 获取 API 的 category names
|
||||
|
||||
:param cat: 用户选择的分类 ID(逗号分隔的多个ID,如 "1,2,3")
|
||||
:return: API 的 category names 列表(如 ["movie", "tv", "documentary"])
|
||||
"""
|
||||
if not cat:
|
||||
return None
|
||||
|
||||
# ID 到 category name 的映射
|
||||
id_to_name = {
|
||||
'1': 'movie',
|
||||
'2': 'tv',
|
||||
'3': 'documentary',
|
||||
'4': 'animation',
|
||||
'6': 'variety'
|
||||
}
|
||||
|
||||
# 分割多个分类 ID 并映射为 category names
|
||||
cat_ids = [c.strip() for c in cat.split(',') if c.strip()]
|
||||
category_names = [id_to_name.get(cat_id) for cat_id in cat_ids if cat_id in id_to_name]
|
||||
|
||||
return category_names if category_names else None
|
||||
|
||||
def __process_response(self, res) -> Tuple[bool, List[dict]]:
|
||||
"""
|
||||
处理 API 响应
|
||||
|
||||
:param res: 请求响应对象
|
||||
:return: (是否发生错误, 种子列表)
|
||||
"""
|
||||
if res and res.status_code == 200:
|
||||
try:
|
||||
data = res.json()
|
||||
if data.get('code') == 0:
|
||||
results = data.get('data', {}).get('torrents', [])
|
||||
return False, self.__parse_result(results)
|
||||
else:
|
||||
logger.warn(f"{self._name} 搜索失败,错误信息:{data.get('message')}")
|
||||
return True, []
|
||||
except Exception as e:
|
||||
logger.warn(f"{self._name} 解析响应失败:{e}")
|
||||
return True, []
|
||||
elif res is not None:
|
||||
logger.warn(f"{self._name} 搜索失败,HTTP 错误码:{res.status_code}")
|
||||
return True, []
|
||||
else:
|
||||
logger.warn(f"{self._name} 搜索失败,无法连接 {self._domain}")
|
||||
return True, []
|
||||
|
||||
def __parse_result(self, results: List[dict]) -> List[dict]:
|
||||
"""
|
||||
解析搜索结果
|
||||
|
||||
将 API 返回的种子数据转换为 MoviePilot 标准格式
|
||||
|
||||
:param results: API 返回的种子列表
|
||||
:return: 标准化的种子信息列表
|
||||
"""
|
||||
torrents = []
|
||||
if not results:
|
||||
return torrents
|
||||
|
||||
for result in results:
|
||||
# 解析分类信息
|
||||
raw_cat = result.get('category')
|
||||
cat_val = None
|
||||
|
||||
category = MediaType.UNKNOWN.value
|
||||
|
||||
if isinstance(raw_cat, dict):
|
||||
cat_val = raw_cat.get('slug') or raw_cat.get('name')
|
||||
elif isinstance(raw_cat, str):
|
||||
cat_val = raw_cat
|
||||
|
||||
if cat_val:
|
||||
cat_val = str(cat_val).lower()
|
||||
if cat_val == self._movie_category:
|
||||
category = MediaType.MOVIE.value
|
||||
elif cat_val == self._tv_category:
|
||||
category = MediaType.TV.value
|
||||
else:
|
||||
category = MediaType.UNKNOWN.value
|
||||
|
||||
# 解析促销信息
|
||||
# API 后端已处理全站促销优先级,直接使用返回的 promotion 数据
|
||||
downloadvolumefactor = 1.0
|
||||
uploadvolumefactor = 1.0
|
||||
freedate = None
|
||||
|
||||
promotion = result.get('promotion')
|
||||
if promotion and promotion.get('is_active'):
|
||||
downloadvolumefactor = float(promotion.get('down_multiplier', 1.0))
|
||||
uploadvolumefactor = float(promotion.get('up_multiplier', 1.0))
|
||||
# 促销到期时间,格式化为 YYYY-MM-DD HH:MM:SS
|
||||
if promotion.get('until'):
|
||||
freedate = StringUtils.unify_datetime_str(promotion.get('until'))
|
||||
|
||||
torrent = {
|
||||
'title': result.get('title'),
|
||||
'description': result.get('subtitle'),
|
||||
'enclosure': self.__get_download_url(result.get('id')),
|
||||
'pubdate': StringUtils.unify_datetime_str(result.get('created_at')),
|
||||
'size': int(result.get('size') or 0),
|
||||
'seeders': int(result.get('seeders') or 0),
|
||||
'peers': int(result.get('leechers') or 0),
|
||||
'grabs': int(result.get('downloads') or 0),
|
||||
'downloadvolumefactor': downloadvolumefactor,
|
||||
'uploadvolumefactor': uploadvolumefactor,
|
||||
'freedate': freedate,
|
||||
'page_url': f"https://{self._domain}/torrent/{result.get('uuid')}",
|
||||
'labels': [],
|
||||
'category': category
|
||||
}
|
||||
torrents.append(torrent)
|
||||
return torrents
|
||||
|
||||
def search(self, keyword: str, mtype: MediaType = None, cat: Optional[str] = None, page: Optional[int] = 0) -> Tuple[bool, List[dict]]:
|
||||
"""
|
||||
同步搜索种子
|
||||
|
||||
:param keyword: 搜索关键词
|
||||
:param mtype: 媒体类型 (MOVIE/TV)
|
||||
:param cat: 用户选择的分类 ID(逗号分隔)
|
||||
:param page: 页码(从 0 开始)
|
||||
:return: (是否发生错误, 种子列表)
|
||||
"""
|
||||
if not self._apikey:
|
||||
logger.warn(f"{self._name} 未配置 API Key (Passkey)")
|
||||
return True, []
|
||||
|
||||
params = self.__get_params(keyword, mtype, cat, page)
|
||||
headers = {
|
||||
"Authorization": f"Bearer {self._apikey}",
|
||||
"Accept": "application/json"
|
||||
}
|
||||
|
||||
res = RequestUtils(
|
||||
headers=headers,
|
||||
proxies=self._proxy,
|
||||
timeout=self._timeout
|
||||
).get_res(url=self._searchurl, params=params)
|
||||
|
||||
return self.__process_response(res)
|
||||
|
||||
async def async_search(self, keyword: str, mtype: MediaType = None, cat: Optional[str] = None, page: Optional[int] = 0) -> Tuple[bool, List[dict]]:
|
||||
"""
|
||||
异步搜索种子
|
||||
|
||||
:param keyword: 搜索关键词
|
||||
:param mtype: 媒体类型 (MOVIE/TV)
|
||||
:param cat: 用户选择的分类 ID(逗号分隔)
|
||||
:param page: 页码(从 0 开始)
|
||||
:return: (是否发生错误, 种子列表)
|
||||
"""
|
||||
if not self._apikey:
|
||||
logger.warn(f"{self._name} 未配置 API Key (Passkey)")
|
||||
return True, []
|
||||
|
||||
params = self.__get_params(keyword, mtype, cat, page)
|
||||
headers = {
|
||||
"Authorization": f"Bearer {self._apikey}",
|
||||
"Accept": "application/json"
|
||||
}
|
||||
|
||||
res = await AsyncRequestUtils(
|
||||
headers=headers,
|
||||
proxies=self._proxy,
|
||||
timeout=self._timeout
|
||||
).get_res(url=self._searchurl, params=params)
|
||||
|
||||
return self.__process_response(res)
|
||||
|
||||
def __get_download_url(self, torrent_id: int) -> str:
|
||||
"""
|
||||
构建种子下载链接
|
||||
|
||||
使用 base64 编码的方式告诉 MoviePilot 如何获取真实下载地址
|
||||
MoviePilot 会先请求详情接口,然后从响应中提取 data.download_url
|
||||
|
||||
:param torrent_id: 种子 ID
|
||||
:return: base64 编码的请求配置字符串 + 详情接口 URL
|
||||
"""
|
||||
url = self._downloadurl % torrent_id
|
||||
# MoviePilot 会解析这个特殊格式的 URL:
|
||||
# 1. 使用指定的 method 和 header 请求 URL
|
||||
# 2. 从 JSON 响应中提取 result 指定的字段值作为真实下载地址
|
||||
params = {
|
||||
'method': 'get',
|
||||
'header': {
|
||||
'Authorization': f'Bearer {self._apikey}',
|
||||
'Accept': 'application/json'
|
||||
},
|
||||
'result': 'data.download_url'
|
||||
}
|
||||
base64_str = base64.b64encode(json.dumps(params).encode('utf-8')).decode('utf-8')
|
||||
return f"[{base64_str}]{url}"
|
||||
@@ -85,7 +85,11 @@ class SubtitleModule(_ModuleBase):
|
||||
)
|
||||
# TODO 其它采用API访问的站点
|
||||
# 普通站点通过解析网站代码的方式获取
|
||||
request = RequestUtils(cookies=torrent.site_cookie, ua=torrent.site_ua)
|
||||
request = RequestUtils(
|
||||
cookies=torrent.site_cookie,
|
||||
ua=torrent.site_ua,
|
||||
proxies=settings.PROXY if torrent.site_proxy else None,
|
||||
)
|
||||
res = request.get_res(torrent.page_url)
|
||||
if res and res.status_code == 200:
|
||||
if not res.text:
|
||||
@@ -176,7 +180,11 @@ class SubtitleModule(_ModuleBase):
|
||||
logger.warn(f"{torrent.page_url} 页面未找到字幕下载链接")
|
||||
return
|
||||
# 下载所有字幕文件
|
||||
request = RequestUtils(cookies=torrent.site_cookie, ua=torrent.site_ua)
|
||||
request = RequestUtils(
|
||||
cookies=torrent.site_cookie,
|
||||
ua=torrent.site_ua,
|
||||
proxies=settings.PROXY if torrent.site_proxy else None,
|
||||
)
|
||||
for sublink in sublink_list:
|
||||
logger.info(f"找到字幕下载链接:{sublink},开始下载...")
|
||||
# 下载
|
||||
|
||||
@@ -1117,4 +1117,19 @@ meta_cases = [{
|
||||
"audio_codec": "",
|
||||
"tmdbid": 19995
|
||||
}
|
||||
}, {
|
||||
"path": "/movies/DouBan_IMDB.TOP250.Movies.Mixed.Collection.20240501.FRDS/为奴十二年.12.Years.a.Slave.2013.BluRay.1080p.x265.10bit.2Audio.MNHD-FRDS/12.Years.a.Slave.2013.BluRay.1080p.x265.10bit.2Audio.MNHD-FRDS.mkv",
|
||||
"target": {
|
||||
"type": "未知",
|
||||
"cn_name": "",
|
||||
"en_name": "12 Years A Slave",
|
||||
"year": "2013",
|
||||
"part": "",
|
||||
"season": "",
|
||||
"episode": "",
|
||||
"restype": "BluRay",
|
||||
"pix": "1080p",
|
||||
"video_codec": "x265 10bit",
|
||||
"audio_codec": "2Audio"
|
||||
}
|
||||
}]
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
APP_VERSION = 'v2.9.1'
|
||||
FRONTEND_VERSION = 'v2.9.1'
|
||||
APP_VERSION = 'v2.9.3'
|
||||
FRONTEND_VERSION = 'v2.9.3'
|
||||
|
||||
Reference in New Issue
Block a user