mirror of
https://github.com/jxxghp/MoviePilot.git
synced 2026-05-10 17:42:45 +08:00
Compare commits
92 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8bd6ccb0de | ||
|
|
ed8895dfbb | ||
|
|
a55632051b | ||
|
|
7e347a458d | ||
|
|
cce71f23e2 | ||
|
|
d68461a127 | ||
|
|
1bd12a9411 | ||
|
|
4086ba4763 | ||
|
|
6a9cdf71d7 | ||
|
|
a9644c4f86 | ||
|
|
cf62ad5e8e | ||
|
|
f8ed16666c | ||
|
|
37926b4c19 | ||
|
|
b080a2003f | ||
|
|
ab0008be86 | ||
|
|
4a42b0d000 | ||
|
|
e3d4b19dac | ||
|
|
403d600db4 | ||
|
|
835e6e8891 | ||
|
|
eec25113b5 | ||
|
|
a7c4161f91 | ||
|
|
799eb9e6ef | ||
|
|
88993cb67b | ||
|
|
0dc9c98c06 | ||
|
|
c1c91cec44 | ||
|
|
19b6927320 | ||
|
|
0889ebc8b8 | ||
|
|
fb249c0ea5 | ||
|
|
feb22ff0a7 | ||
|
|
3c95156ce1 | ||
|
|
8b6dca6a46 | ||
|
|
43907eea26 | ||
|
|
67145a80d0 | ||
|
|
0b3138fec6 | ||
|
|
b84896b4f9 | ||
|
|
efd046d2f8 | ||
|
|
06fcf817bb | ||
|
|
16a94d9054 | ||
|
|
5bf502188d | ||
|
|
5269b4bc82 | ||
|
|
e3f8ed9886 | ||
|
|
74de554fb0 | ||
|
|
b41de1a982 | ||
|
|
25f7d9ccdd | ||
|
|
9646745181 | ||
|
|
1317d9c4f0 | ||
|
|
351029a842 | ||
|
|
15e1fb61ac | ||
|
|
1889a829b5 | ||
|
|
53a14fce38 | ||
|
|
d9ed7b09c7 | ||
|
|
4dcb18f00e | ||
|
|
0a52fe0a7a | ||
|
|
e5a4d11cf9 | ||
|
|
6c233f13de | ||
|
|
00aee3496c | ||
|
|
77ae40e3d6 | ||
|
|
68cba44476 | ||
|
|
b86d06f632 | ||
|
|
0b7cf305a0 | ||
|
|
21ae36bc3a | ||
|
|
4e2d9e9165 | ||
|
|
6cee308894 | ||
|
|
b8f4cd5fea | ||
|
|
aa1557ad9e | ||
|
|
f03da6daca | ||
|
|
30eb4385d4 | ||
|
|
4c9afcc1a8 | ||
|
|
dd47432a45 | ||
|
|
0ba6974bd6 | ||
|
|
827d8f6d84 | ||
|
|
943a462c69 | ||
|
|
a1bc773fb5 | ||
|
|
ac169b7d22 | ||
|
|
eecbbfea3a | ||
|
|
635ddb044e | ||
|
|
1a6123489d | ||
|
|
4e69195a8d | ||
|
|
e48c8ee652 | ||
|
|
7df07b86b9 | ||
|
|
5e2ad34864 | ||
|
|
e9a147d43c | ||
|
|
a340ee045e | ||
|
|
12405f3c34 | ||
|
|
1e465ee231 | ||
|
|
f06c24c23e | ||
|
|
4b93ee4843 | ||
|
|
c022e05ab9 | ||
|
|
c2a0d9d657 | ||
|
|
6fcf2c2f1f | ||
|
|
bc37daef58 | ||
|
|
fab5995c4e |
73
app/actions/__init__.py
Normal file
73
app/actions/__init__.py
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
from abc import ABC, abstractmethod
|
||||||
|
|
||||||
|
from app.chain import ChainBase
|
||||||
|
from app.schemas import ActionContext, ActionParams
|
||||||
|
|
||||||
|
|
||||||
|
class ActionChain(ChainBase):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class BaseAction(ABC):
|
||||||
|
"""
|
||||||
|
工作流动作基类
|
||||||
|
"""
|
||||||
|
|
||||||
|
# 完成标志
|
||||||
|
_done_flag = False
|
||||||
|
# 执行信息
|
||||||
|
_message = ""
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
@abstractmethod
|
||||||
|
def name(cls) -> str:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
@abstractmethod
|
||||||
|
def description(cls) -> str:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
@abstractmethod
|
||||||
|
def data(cls) -> dict:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@property
|
||||||
|
def done(self) -> bool:
|
||||||
|
"""
|
||||||
|
判断动作是否完成
|
||||||
|
"""
|
||||||
|
return self._done_flag
|
||||||
|
|
||||||
|
@property
|
||||||
|
@abstractmethod
|
||||||
|
def success(self) -> bool:
|
||||||
|
"""
|
||||||
|
判断动作是否成功
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@property
|
||||||
|
def message(self) -> str:
|
||||||
|
"""
|
||||||
|
执行信息
|
||||||
|
"""
|
||||||
|
return self._message
|
||||||
|
|
||||||
|
def job_done(self, message: str = None):
|
||||||
|
"""
|
||||||
|
标记动作完成
|
||||||
|
"""
|
||||||
|
self._message = message
|
||||||
|
self._done_flag = True
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def execute(self, workflow_id: int, params: ActionParams, context: ActionContext) -> ActionContext:
|
||||||
|
"""
|
||||||
|
执行动作
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
108
app/actions/add_download.py
Normal file
108
app/actions/add_download.py
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from pydantic import Field
|
||||||
|
|
||||||
|
from app.actions import BaseAction
|
||||||
|
from app.chain.download import DownloadChain
|
||||||
|
from app.chain.media import MediaChain
|
||||||
|
from app.core.config import global_vars
|
||||||
|
from app.core.metainfo import MetaInfo
|
||||||
|
from app.log import logger
|
||||||
|
from app.schemas import ActionParams, ActionContext, DownloadTask, MediaType
|
||||||
|
|
||||||
|
|
||||||
|
class AddDownloadParams(ActionParams):
|
||||||
|
"""
|
||||||
|
添加下载资源参数
|
||||||
|
"""
|
||||||
|
downloader: Optional[str] = Field(None, description="下载器")
|
||||||
|
save_path: Optional[str] = Field(None, description="保存路径")
|
||||||
|
only_lack: Optional[bool] = Field(False, description="仅下载缺失的资源")
|
||||||
|
|
||||||
|
|
||||||
|
class AddDownloadAction(BaseAction):
|
||||||
|
"""
|
||||||
|
添加下载资源
|
||||||
|
"""
|
||||||
|
|
||||||
|
# 已添加的下载
|
||||||
|
_added_downloads = []
|
||||||
|
_has_error = False
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.downloadchain = DownloadChain()
|
||||||
|
self.mediachain = MediaChain()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def name(cls) -> str:
|
||||||
|
return "添加下载"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def description(cls) -> str:
|
||||||
|
return "根据资源列表添加下载任务"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def data(cls) -> dict:
|
||||||
|
return AddDownloadParams().dict()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def success(self) -> bool:
|
||||||
|
return not self._has_error
|
||||||
|
|
||||||
|
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
|
||||||
|
"""
|
||||||
|
将上下文中的torrents添加到下载任务中
|
||||||
|
"""
|
||||||
|
params = AddDownloadParams(**params)
|
||||||
|
for t in context.torrents:
|
||||||
|
if global_vars.is_workflow_stopped(workflow_id):
|
||||||
|
break
|
||||||
|
if not t.meta_info:
|
||||||
|
t.meta_info = MetaInfo(title=t.title, subtitle=t.description)
|
||||||
|
if not t.media_info:
|
||||||
|
t.media_info = self.mediachain.recognize_media(meta=t.meta_info)
|
||||||
|
if not t.media_info:
|
||||||
|
self._has_error = True
|
||||||
|
logger.warning(f"{t.title} 未识别到媒体信息,无法下载")
|
||||||
|
continue
|
||||||
|
if params.only_lack:
|
||||||
|
exists_info = self.downloadchain.media_exists(t.media_info)
|
||||||
|
if exists_info:
|
||||||
|
if t.media_info.type == MediaType.MOVIE:
|
||||||
|
# 电影
|
||||||
|
logger.warning(f"{t.title} 媒体库中已存在,跳过")
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
# 电视剧
|
||||||
|
exists_seasons = exists_info.seasons or {}
|
||||||
|
if len(t.meta_info.season_list) > 1:
|
||||||
|
# 多季不下载
|
||||||
|
logger.warning(f"{t.meta_info.title} 有多季,跳过")
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
exists_episodes = exists_seasons.get(t.meta_info.begin_season)
|
||||||
|
if exists_episodes:
|
||||||
|
if set(t.meta_info.episode_list).issubset(exists_episodes):
|
||||||
|
logger.warning(f"{t.meta_info.title} 第 {t.meta_info.begin_season} 季第 {t.meta_info.episode_list} 集已存在,跳过")
|
||||||
|
continue
|
||||||
|
|
||||||
|
did = self.downloadchain.download_single(context=t,
|
||||||
|
downloader=params.downloader,
|
||||||
|
save_path=params.save_path)
|
||||||
|
if did:
|
||||||
|
self._added_downloads.append(did)
|
||||||
|
else:
|
||||||
|
self._has_error = True
|
||||||
|
|
||||||
|
if self._added_downloads:
|
||||||
|
logger.info(f"已添加 {len(self._added_downloads)} 个下载任务")
|
||||||
|
context.downloads.extend(
|
||||||
|
[DownloadTask(download_id=did, downloader=params.downloader) for did in self._added_downloads]
|
||||||
|
)
|
||||||
|
|
||||||
|
self.job_done(f"已添加 {len(self._added_downloads)} 个下载任务")
|
||||||
|
return context
|
||||||
81
app/actions/add_subscribe.py
Normal file
81
app/actions/add_subscribe.py
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
from app.actions import BaseAction
|
||||||
|
from app.chain.subscribe import SubscribeChain
|
||||||
|
from app.core.config import settings, global_vars
|
||||||
|
from app.core.context import MediaInfo
|
||||||
|
from app.db.subscribe_oper import SubscribeOper
|
||||||
|
from app.log import logger
|
||||||
|
from app.schemas import ActionParams, ActionContext
|
||||||
|
|
||||||
|
|
||||||
|
class AddSubscribeParams(ActionParams):
|
||||||
|
"""
|
||||||
|
添加订阅参数
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class AddSubscribeAction(BaseAction):
|
||||||
|
"""
|
||||||
|
添加订阅
|
||||||
|
"""
|
||||||
|
|
||||||
|
_added_subscribes = []
|
||||||
|
_has_error = False
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.subscribechain = SubscribeChain()
|
||||||
|
self.subscribeoper = SubscribeOper()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def name(cls) -> str:
|
||||||
|
return "添加订阅"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def description(cls) -> str:
|
||||||
|
return "根据媒体列表添加订阅"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def data(cls) -> dict:
|
||||||
|
return AddSubscribeParams().dict()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def success(self) -> bool:
|
||||||
|
return not self._has_error
|
||||||
|
|
||||||
|
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
|
||||||
|
"""
|
||||||
|
将medias中的信息添加订阅,如果订阅不存在的话
|
||||||
|
"""
|
||||||
|
for media in context.medias:
|
||||||
|
if global_vars.is_workflow_stopped(workflow_id):
|
||||||
|
break
|
||||||
|
mediainfo = MediaInfo()
|
||||||
|
mediainfo.from_dict(media.dict())
|
||||||
|
if self.subscribechain.exists(mediainfo):
|
||||||
|
logger.info(f"{media.title} 已存在订阅")
|
||||||
|
continue
|
||||||
|
# 添加订阅
|
||||||
|
sid, message = self.subscribechain.add(mtype=mediainfo.type,
|
||||||
|
title=mediainfo.title,
|
||||||
|
year=mediainfo.year,
|
||||||
|
tmdbid=mediainfo.tmdb_id,
|
||||||
|
season=mediainfo.season,
|
||||||
|
doubanid=mediainfo.douban_id,
|
||||||
|
bangumiid=mediainfo.bangumi_id,
|
||||||
|
username=settings.SUPERUSER)
|
||||||
|
if sid:
|
||||||
|
self._added_subscribes.append(sid)
|
||||||
|
else:
|
||||||
|
self._has_error = True
|
||||||
|
|
||||||
|
if self._added_subscribes:
|
||||||
|
logger.info(f"已添加 {len(self._added_subscribes)} 个订阅")
|
||||||
|
for sid in self._added_subscribes:
|
||||||
|
context.subscribes.append(self.subscribeoper.get(sid))
|
||||||
|
|
||||||
|
self.job_done(f"已添加 {len(self._added_subscribes)} 个订阅")
|
||||||
|
return context
|
||||||
67
app/actions/fetch_downloads.py
Normal file
67
app/actions/fetch_downloads.py
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
from app.actions import BaseAction, ActionChain
|
||||||
|
from app.core.config import global_vars
|
||||||
|
from app.schemas import ActionParams, ActionContext
|
||||||
|
from app.log import logger
|
||||||
|
|
||||||
|
|
||||||
|
class FetchDownloadsParams(ActionParams):
|
||||||
|
"""
|
||||||
|
获取下载任务参数
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class FetchDownloadsAction(BaseAction):
|
||||||
|
"""
|
||||||
|
获取下载任务
|
||||||
|
"""
|
||||||
|
|
||||||
|
_downloads = []
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.chain = ActionChain()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def name(cls) -> str:
|
||||||
|
return "获取下载任务"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def description(cls) -> str:
|
||||||
|
return "获取下载队列中的任务状态"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def data(cls) -> dict:
|
||||||
|
return FetchDownloadsParams().dict()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def success(self) -> bool:
|
||||||
|
return self.done
|
||||||
|
|
||||||
|
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
|
||||||
|
"""
|
||||||
|
更新downloads中的下载任务状态
|
||||||
|
"""
|
||||||
|
__all_complete = False
|
||||||
|
for download in self._downloads:
|
||||||
|
if global_vars.is_workflow_stopped(workflow_id):
|
||||||
|
break
|
||||||
|
logger.info(f"获取下载任务 {download.download_id} 状态 ...")
|
||||||
|
torrents = self.chain.list_torrents(hashs=[download.download_id])
|
||||||
|
if not torrents:
|
||||||
|
download.completed = True
|
||||||
|
continue
|
||||||
|
for t in torrents:
|
||||||
|
download.path = t.path
|
||||||
|
if t.progress >= 100:
|
||||||
|
logger.info(f"下载任务 {download.download_id} 已完成")
|
||||||
|
download.completed = True
|
||||||
|
else:
|
||||||
|
logger.info(f"下载任务 {download.download_id} 未完成")
|
||||||
|
download.completed = False
|
||||||
|
if all([d.completed for d in self._downloads]):
|
||||||
|
self.job_done()
|
||||||
|
return context
|
||||||
170
app/actions/fetch_medias.py
Normal file
170
app/actions/fetch_medias.py
Normal file
@@ -0,0 +1,170 @@
|
|||||||
|
from typing import List, Optional
|
||||||
|
|
||||||
|
from pydantic import Field
|
||||||
|
|
||||||
|
from app.actions import BaseAction
|
||||||
|
from app.chain.recommend import RecommendChain
|
||||||
|
from app.schemas import ActionParams, ActionContext
|
||||||
|
from app.core.config import settings, global_vars
|
||||||
|
from app.core.event import eventmanager
|
||||||
|
from app.log import logger
|
||||||
|
from app.schemas import RecommendSourceEventData, MediaInfo
|
||||||
|
from app.schemas.types import ChainEventType
|
||||||
|
from app.utils.http import RequestUtils
|
||||||
|
|
||||||
|
|
||||||
|
class FetchMediasParams(ActionParams):
|
||||||
|
"""
|
||||||
|
获取媒体数据参数
|
||||||
|
"""
|
||||||
|
source_type: Optional[str] = Field("ranking", description="来源")
|
||||||
|
sources: Optional[List[str]] = Field([], description="榜单")
|
||||||
|
api_path: Optional[str] = Field(None, description="API路径")
|
||||||
|
|
||||||
|
|
||||||
|
class FetchMediasAction(BaseAction):
|
||||||
|
"""
|
||||||
|
获取媒体数据
|
||||||
|
"""
|
||||||
|
|
||||||
|
_inner_sources = []
|
||||||
|
|
||||||
|
_medias = []
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
self.__inner_sources = [
|
||||||
|
{
|
||||||
|
"func": RecommendChain().tmdb_trending,
|
||||||
|
"name": '流行趋势',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"func": RecommendChain().douban_movie_showing,
|
||||||
|
"name": '正在热映',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"func": RecommendChain().bangumi_calendar,
|
||||||
|
"name": 'Bangumi每日放送',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"func": RecommendChain().tmdb_movies,
|
||||||
|
"name": 'TMDB热门电影',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"func": RecommendChain().tmdb_tvs,
|
||||||
|
"name": 'TMDB热门电视剧',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"func": RecommendChain().douban_movie_hot,
|
||||||
|
"name": '豆瓣热门电影',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"func": RecommendChain().douban_tv_hot,
|
||||||
|
"name": '豆瓣热门电视剧',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"func": RecommendChain().douban_tv_animation,
|
||||||
|
"name": '豆瓣热门动漫',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"func": RecommendChain().douban_movies,
|
||||||
|
"name": '豆瓣最新电影',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"func": RecommendChain().douban_tvs,
|
||||||
|
"name": '豆瓣最新电视剧',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"func": RecommendChain().douban_movie_top250,
|
||||||
|
"name": '豆瓣电影TOP250',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"func": RecommendChain().douban_tv_weekly_chinese,
|
||||||
|
"name": '豆瓣国产剧集榜',
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"func": RecommendChain().douban_tv_weekly_global,
|
||||||
|
"name": '豆瓣全球剧集榜',
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
# 广播事件,请示额外的推荐数据源支持
|
||||||
|
event_data = RecommendSourceEventData()
|
||||||
|
event = eventmanager.send_event(ChainEventType.RecommendSource, event_data)
|
||||||
|
# 使用事件返回的上下文数据
|
||||||
|
if event and event.event_data:
|
||||||
|
event_data: RecommendSourceEventData = event.event_data
|
||||||
|
if event_data.extra_sources:
|
||||||
|
self.__inner_sources.extend([s.dict() for s in event_data.extra_sources])
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def name(cls) -> str:
|
||||||
|
return "获取媒体数据"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def description(cls) -> str:
|
||||||
|
return "获取榜单等媒体数据列表"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def data(cls) -> dict:
|
||||||
|
return FetchMediasParams().dict()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def success(self) -> bool:
|
||||||
|
return True if self._medias else False
|
||||||
|
|
||||||
|
def __get_source(self, source: str):
|
||||||
|
"""
|
||||||
|
获取数据源
|
||||||
|
"""
|
||||||
|
for s in self.__inner_sources:
|
||||||
|
if s['name'] == source:
|
||||||
|
return s
|
||||||
|
return None
|
||||||
|
|
||||||
|
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
|
||||||
|
"""
|
||||||
|
获取媒体数据,填充到medias
|
||||||
|
"""
|
||||||
|
params = FetchMediasParams(**params)
|
||||||
|
if params.source_type == "ranking":
|
||||||
|
for name in params.sources:
|
||||||
|
if global_vars.is_workflow_stopped(workflow_id):
|
||||||
|
break
|
||||||
|
source = self.__get_source(name)
|
||||||
|
if not source:
|
||||||
|
continue
|
||||||
|
logger.info(f"获取媒体数据 {source} ...")
|
||||||
|
results = []
|
||||||
|
if source.get("func"):
|
||||||
|
results = source['func']()
|
||||||
|
else:
|
||||||
|
# 调用内部API获取数据
|
||||||
|
api_url = f"http://127.0.0.1:{settings.PORT}/api/v1/{source['api_path']}?token={settings.API_TOKEN}"
|
||||||
|
res = RequestUtils(timeout=15).post_res(api_url)
|
||||||
|
if res:
|
||||||
|
results = res.json()
|
||||||
|
if results:
|
||||||
|
logger.info(f"{name} 获取到 {len(results)} 条数据")
|
||||||
|
self._medias.extend([MediaInfo(**r) for r in results])
|
||||||
|
else:
|
||||||
|
logger.error(f"{name} 获取数据失败")
|
||||||
|
else:
|
||||||
|
# 调用内部API获取数据
|
||||||
|
api_url = f"http://127.0.0.1:{settings.PORT}{params.api_path}?token={settings.API_TOKEN}"
|
||||||
|
res = RequestUtils(timeout=15).post_res(api_url)
|
||||||
|
if res:
|
||||||
|
results = res.json()
|
||||||
|
if results:
|
||||||
|
logger.info(f"{params.api_path} 获取到 {len(results)} 条数据")
|
||||||
|
self._medias.extend([MediaInfo(**r) for r in results])
|
||||||
|
|
||||||
|
if self._medias:
|
||||||
|
context.medias.extend(self._medias)
|
||||||
|
|
||||||
|
self.job_done(f"获取到 {len(self._medias)} 条媒数据")
|
||||||
|
return context
|
||||||
112
app/actions/fetch_rss.py
Normal file
112
app/actions/fetch_rss.py
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from pydantic import Field
|
||||||
|
|
||||||
|
from app.actions import BaseAction, ActionChain
|
||||||
|
from app.core.config import settings, global_vars
|
||||||
|
from app.core.context import Context
|
||||||
|
from app.core.metainfo import MetaInfo
|
||||||
|
from app.helper.rss import RssHelper
|
||||||
|
from app.log import logger
|
||||||
|
from app.schemas import ActionParams, ActionContext, TorrentInfo
|
||||||
|
|
||||||
|
|
||||||
|
class FetchRssParams(ActionParams):
|
||||||
|
"""
|
||||||
|
获取RSS资源列表参数
|
||||||
|
"""
|
||||||
|
url: str = Field(None, description="RSS地址")
|
||||||
|
proxy: Optional[bool] = Field(False, description="是否使用代理")
|
||||||
|
timeout: Optional[int] = Field(15, description="超时时间")
|
||||||
|
content_type: Optional[str] = Field(None, description="Content-Type")
|
||||||
|
referer: Optional[str] = Field(None, description="Referer")
|
||||||
|
ua: Optional[str] = Field(None, description="User-Agent")
|
||||||
|
|
||||||
|
|
||||||
|
class FetchRssAction(BaseAction):
|
||||||
|
"""
|
||||||
|
获取RSS资源列表
|
||||||
|
"""
|
||||||
|
|
||||||
|
_rss_torrents = []
|
||||||
|
_has_error = False
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.rsshelper = RssHelper()
|
||||||
|
self.chain = ActionChain()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def name(cls) -> str:
|
||||||
|
return "获取RSS资源"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def description(cls) -> str:
|
||||||
|
return "订阅RSS地址获取资源"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def data(cls) -> dict:
|
||||||
|
return FetchRssParams().dict()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def success(self) -> bool:
|
||||||
|
return not self._has_error
|
||||||
|
|
||||||
|
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
|
||||||
|
"""
|
||||||
|
请求RSS地址获取数据,并解析为资源列表
|
||||||
|
"""
|
||||||
|
params = FetchRssParams(**params)
|
||||||
|
if not params.url:
|
||||||
|
return context
|
||||||
|
|
||||||
|
headers = {}
|
||||||
|
if params.content_type:
|
||||||
|
headers["Content-Type"] = params.content_type
|
||||||
|
if params.referer:
|
||||||
|
headers["Referer"] = params.referer
|
||||||
|
if params.ua:
|
||||||
|
headers["User-Agent"] = params.ua
|
||||||
|
|
||||||
|
rss_items = self.rsshelper.parse(url=params.url,
|
||||||
|
proxy=settings.PROXY if params.proxy else None,
|
||||||
|
timeout=params.timeout,
|
||||||
|
headers=headers)
|
||||||
|
if rss_items is None or rss_items is False:
|
||||||
|
logger.error(f'RSS地址 {params.url} 请求失败!')
|
||||||
|
self._has_error = True
|
||||||
|
return context
|
||||||
|
|
||||||
|
if not rss_items:
|
||||||
|
logger.error(f'RSS地址 {params.url} 未获取到RSS数据!')
|
||||||
|
return context
|
||||||
|
|
||||||
|
# 组装种子
|
||||||
|
for item in rss_items:
|
||||||
|
if global_vars.is_workflow_stopped(workflow_id):
|
||||||
|
break
|
||||||
|
if not item.get("title"):
|
||||||
|
continue
|
||||||
|
torrentinfo = TorrentInfo(
|
||||||
|
title=item.get("title"),
|
||||||
|
enclosure=item.get("enclosure"),
|
||||||
|
page_url=item.get("link"),
|
||||||
|
size=item.get("size"),
|
||||||
|
pubdate=item["pubdate"].strftime("%Y-%m-%d %H:%M:%S") if item.get("pubdate") else None,
|
||||||
|
)
|
||||||
|
meta = MetaInfo(title=torrentinfo.title, subtitle=torrentinfo.description)
|
||||||
|
mediainfo = self.chain.recognize_media(meta)
|
||||||
|
if not mediainfo:
|
||||||
|
logger.warning(f"{torrentinfo.title} 未识别到媒体信息")
|
||||||
|
continue
|
||||||
|
self._rss_torrents.append(Context(meta_info=meta, media_info=mediainfo, torrent_info=torrentinfo))
|
||||||
|
|
||||||
|
if self._rss_torrents:
|
||||||
|
logger.info(f"获取到 {len(self._rss_torrents)} 个RSS资源")
|
||||||
|
context.torrents.extend(self._rss_torrents)
|
||||||
|
|
||||||
|
self.job_done(f"获取到 {len(self._rss_torrents)} 个资源")
|
||||||
|
return context
|
||||||
101
app/actions/fetch_torrents.py
Normal file
101
app/actions/fetch_torrents.py
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
import random
|
||||||
|
import time
|
||||||
|
from typing import Optional, List
|
||||||
|
|
||||||
|
from pydantic import Field
|
||||||
|
|
||||||
|
from app.actions import BaseAction
|
||||||
|
from app.chain.search import SearchChain
|
||||||
|
from app.core.config import global_vars
|
||||||
|
from app.log import logger
|
||||||
|
from app.schemas import ActionParams, ActionContext, MediaType
|
||||||
|
|
||||||
|
|
||||||
|
class FetchTorrentsParams(ActionParams):
|
||||||
|
"""
|
||||||
|
获取站点资源参数
|
||||||
|
"""
|
||||||
|
search_type: Optional[str] = Field("keyword", description="搜索类型")
|
||||||
|
name: Optional[str] = Field(None, description="资源名称")
|
||||||
|
year: Optional[str] = Field(None, description="年份")
|
||||||
|
type: Optional[str] = Field(None, description="资源类型 (电影/电视剧)")
|
||||||
|
season: Optional[int] = Field(None, description="季度")
|
||||||
|
sites: Optional[List[int]] = Field([], description="站点列表")
|
||||||
|
|
||||||
|
|
||||||
|
class FetchTorrentsAction(BaseAction):
|
||||||
|
"""
|
||||||
|
搜索站点资源
|
||||||
|
"""
|
||||||
|
|
||||||
|
_torrents = []
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.searchchain = SearchChain()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def name(cls) -> str:
|
||||||
|
return "搜索站点资源"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def description(cls) -> str:
|
||||||
|
return "搜索站点种子资源列表"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def data(cls) -> dict:
|
||||||
|
return FetchTorrentsParams().dict()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def success(self) -> bool:
|
||||||
|
return self.done
|
||||||
|
|
||||||
|
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
|
||||||
|
"""
|
||||||
|
搜索站点,获取资源列表
|
||||||
|
"""
|
||||||
|
params = FetchTorrentsParams(**params)
|
||||||
|
if params.search_type == "keyword":
|
||||||
|
# 按关键字搜索
|
||||||
|
torrents = self.searchchain.search_by_title(title=params.name, sites=params.sites, cache_local=False)
|
||||||
|
for torrent in torrents:
|
||||||
|
if global_vars.is_workflow_stopped(workflow_id):
|
||||||
|
break
|
||||||
|
if params.year and torrent.meta_info.year != params.year:
|
||||||
|
continue
|
||||||
|
if params.type and torrent.media_info and torrent.media_info.type != MediaType(params.type):
|
||||||
|
continue
|
||||||
|
if params.season and torrent.meta_info.begin_season != params.season:
|
||||||
|
continue
|
||||||
|
# 识别媒体信息
|
||||||
|
torrent.media_info = self.searchchain.recognize_media(torrent.meta_info)
|
||||||
|
if not torrent.media_info:
|
||||||
|
logger.warning(f"{torrent.torrent_info.title} 未识别到媒体信息")
|
||||||
|
continue
|
||||||
|
self._torrents.append(torrent)
|
||||||
|
else:
|
||||||
|
# 搜索媒体列表
|
||||||
|
for media in context.medias:
|
||||||
|
if global_vars.is_workflow_stopped(workflow_id):
|
||||||
|
break
|
||||||
|
torrents = self.searchchain.search_by_id(tmdbid=media.tmdb_id,
|
||||||
|
doubanid=media.douban_id,
|
||||||
|
mtype=MediaType(media.type),
|
||||||
|
sites=params.sites)
|
||||||
|
for torrent in torrents:
|
||||||
|
self._torrents.append(torrent)
|
||||||
|
|
||||||
|
# 随机休眠 10-60秒
|
||||||
|
sleep_time = random.randint(10, 60)
|
||||||
|
logger.info(f"随机休眠 {sleep_time} 秒 ...")
|
||||||
|
time.sleep(sleep_time)
|
||||||
|
|
||||||
|
if self._torrents:
|
||||||
|
context.torrents.extend(self._torrents)
|
||||||
|
logger.info(f"共搜索到 {len(self._torrents)} 条资源")
|
||||||
|
|
||||||
|
self.job_done(f"搜索到 {len(self._torrents)} 个资源")
|
||||||
|
return context
|
||||||
68
app/actions/filter_medias.py
Normal file
68
app/actions/filter_medias.py
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from pydantic import Field
|
||||||
|
|
||||||
|
from app.actions import BaseAction
|
||||||
|
from app.core.config import global_vars
|
||||||
|
from app.schemas import ActionParams, ActionContext
|
||||||
|
|
||||||
|
|
||||||
|
class FilterMediasParams(ActionParams):
|
||||||
|
"""
|
||||||
|
过滤媒体数据参数
|
||||||
|
"""
|
||||||
|
type: Optional[str] = Field(None, description="媒体类型 (电影/电视剧)")
|
||||||
|
category: Optional[str] = Field(None, description="媒体类别 (二级分类)")
|
||||||
|
vote: Optional[int] = Field(0, description="评分")
|
||||||
|
year: Optional[str] = Field(None, description="年份")
|
||||||
|
|
||||||
|
|
||||||
|
class FilterMediasAction(BaseAction):
|
||||||
|
"""
|
||||||
|
过滤媒体数据
|
||||||
|
"""
|
||||||
|
|
||||||
|
_medias = []
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def name(cls) -> str:
|
||||||
|
return "过滤媒体数据"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def description(cls) -> str:
|
||||||
|
return "对媒体数据列表进行过滤"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def data(cls) -> dict:
|
||||||
|
return FilterMediasParams().dict()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def success(self) -> bool:
|
||||||
|
return self.done
|
||||||
|
|
||||||
|
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
|
||||||
|
"""
|
||||||
|
过滤medias中媒体数据
|
||||||
|
"""
|
||||||
|
params = FilterMediasParams(**params)
|
||||||
|
for media in context.medias:
|
||||||
|
if global_vars.is_workflow_stopped(workflow_id):
|
||||||
|
break
|
||||||
|
if params.type and media.type != params.type:
|
||||||
|
continue
|
||||||
|
if params.category and media.category != params.category:
|
||||||
|
continue
|
||||||
|
if params.vote and media.vote_average < params.vote:
|
||||||
|
continue
|
||||||
|
if params.year and media.year != params.year:
|
||||||
|
continue
|
||||||
|
self._medias.append(media)
|
||||||
|
|
||||||
|
if self._medias:
|
||||||
|
context.medias = self._medias
|
||||||
|
|
||||||
|
self.job_done(f"过滤后剩余 {len(self._medias)} 条媒体数据")
|
||||||
|
return context
|
||||||
84
app/actions/filter_torrents.py
Normal file
84
app/actions/filter_torrents.py
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
from typing import Optional, List
|
||||||
|
|
||||||
|
from pydantic import Field
|
||||||
|
|
||||||
|
from app.actions import BaseAction, ActionChain
|
||||||
|
from app.core.config import global_vars
|
||||||
|
from app.helper.torrent import TorrentHelper
|
||||||
|
from app.schemas import ActionParams, ActionContext
|
||||||
|
|
||||||
|
|
||||||
|
class FilterTorrentsParams(ActionParams):
|
||||||
|
"""
|
||||||
|
过滤资源数据参数
|
||||||
|
"""
|
||||||
|
rule_groups: Optional[List[str]] = Field([], description="规则组")
|
||||||
|
quality: Optional[str] = Field(None, description="资源质量")
|
||||||
|
resolution: Optional[str] = Field(None, description="资源分辨率")
|
||||||
|
effect: Optional[str] = Field(None, description="特效")
|
||||||
|
include: Optional[str] = Field(None, description="包含规则")
|
||||||
|
exclude: Optional[str] = Field(None, description="排除规则")
|
||||||
|
size: Optional[str] = Field(None, description="资源大小范围(MB)")
|
||||||
|
|
||||||
|
|
||||||
|
class FilterTorrentsAction(BaseAction):
|
||||||
|
"""
|
||||||
|
过滤资源数据
|
||||||
|
"""
|
||||||
|
|
||||||
|
_torrents = []
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.torrenthelper = TorrentHelper()
|
||||||
|
self.chain = ActionChain()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def name(cls) -> str:
|
||||||
|
return "过滤资源"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def description(cls) -> str:
|
||||||
|
return "对资源列表数据进行过滤"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def data(cls) -> dict:
|
||||||
|
return FilterTorrentsParams().dict()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def success(self) -> bool:
|
||||||
|
return self.done
|
||||||
|
|
||||||
|
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
|
||||||
|
"""
|
||||||
|
过滤torrents中的资源
|
||||||
|
"""
|
||||||
|
params = FilterTorrentsParams(**params)
|
||||||
|
for torrent in context.torrents:
|
||||||
|
if global_vars.is_workflow_stopped(workflow_id):
|
||||||
|
break
|
||||||
|
if self.torrenthelper.filter_torrent(
|
||||||
|
torrent_info=torrent.torrent_info,
|
||||||
|
filter_params={
|
||||||
|
"quality": params.quality,
|
||||||
|
"resolution": params.resolution,
|
||||||
|
"effect": params.effect,
|
||||||
|
"include": params.include,
|
||||||
|
"exclude": params.exclude,
|
||||||
|
"size": params.size
|
||||||
|
}
|
||||||
|
):
|
||||||
|
if self.chain.filter_torrents(
|
||||||
|
rule_groups=params.rule_groups,
|
||||||
|
torrent_list=[torrent.torrent_info],
|
||||||
|
mediainfo=torrent.media_info
|
||||||
|
):
|
||||||
|
self._torrents.append(torrent)
|
||||||
|
|
||||||
|
context.torrents = self._torrents
|
||||||
|
|
||||||
|
self.job_done(f"过滤后剩余 {len(self._torrents)} 个资源")
|
||||||
|
return context
|
||||||
77
app/actions/scan_file.py
Normal file
77
app/actions/scan_file.py
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from pydantic import Field
|
||||||
|
|
||||||
|
from app.actions import BaseAction
|
||||||
|
from app.chain.storage import StorageChain
|
||||||
|
from app.core.config import global_vars, settings
|
||||||
|
from app.log import logger
|
||||||
|
from app.schemas import ActionParams, ActionContext
|
||||||
|
|
||||||
|
|
||||||
|
class ScanFileParams(ActionParams):
|
||||||
|
"""
|
||||||
|
整理文件参数
|
||||||
|
"""
|
||||||
|
# 存储
|
||||||
|
storage: Optional[str] = Field("local", description="存储")
|
||||||
|
directory: Optional[str] = Field(None, description="目录")
|
||||||
|
|
||||||
|
|
||||||
|
class ScanFileAction(BaseAction):
|
||||||
|
"""
|
||||||
|
整理文件
|
||||||
|
"""
|
||||||
|
|
||||||
|
_fileitems = []
|
||||||
|
_has_error = False
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.storagechain = StorageChain()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def name(cls) -> str:
|
||||||
|
return "扫描目录"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def description(cls) -> str:
|
||||||
|
return "扫描目录文件到队列"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def data(cls) -> dict:
|
||||||
|
return ScanFileParams().dict()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def success(self) -> bool:
|
||||||
|
return not self._has_error
|
||||||
|
|
||||||
|
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
|
||||||
|
"""
|
||||||
|
扫描目录中的所有文件,记录到fileitems
|
||||||
|
"""
|
||||||
|
params = ScanFileParams(**params)
|
||||||
|
if not params.storage or not params.directory:
|
||||||
|
return context
|
||||||
|
fileitem = self.storagechain.get_file_item(params.storage, Path(params.directory))
|
||||||
|
if not fileitem:
|
||||||
|
logger.error(f"目录不存在: 【{params.storage}】{params.directory}")
|
||||||
|
self._has_error = True
|
||||||
|
return context
|
||||||
|
files = self.storagechain.list_files(fileitem, recursion=True)
|
||||||
|
for file in files:
|
||||||
|
if global_vars.is_workflow_stopped(workflow_id):
|
||||||
|
break
|
||||||
|
if not file.extension or f".{file.extension.lower()}" not in settings.RMT_MEDIAEXT:
|
||||||
|
continue
|
||||||
|
self._fileitems.append(fileitem)
|
||||||
|
|
||||||
|
if self._fileitems:
|
||||||
|
context.fileitems.extend(self._fileitems)
|
||||||
|
|
||||||
|
self.job_done(f"扫描到 {len(self._fileitems)} 个文件")
|
||||||
|
return context
|
||||||
72
app/actions/scrape_file.py
Normal file
72
app/actions/scrape_file.py
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from app.actions import BaseAction
|
||||||
|
from app.core.config import global_vars
|
||||||
|
from app.schemas import ActionParams, ActionContext
|
||||||
|
from app.chain.media import MediaChain
|
||||||
|
from app.chain.storage import StorageChain
|
||||||
|
from app.core.metainfo import MetaInfoPath
|
||||||
|
from app.log import logger
|
||||||
|
|
||||||
|
|
||||||
|
class ScrapeFileParams(ActionParams):
|
||||||
|
"""
|
||||||
|
刮削文件参数
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ScrapeFileAction(BaseAction):
|
||||||
|
"""
|
||||||
|
刮削文件
|
||||||
|
"""
|
||||||
|
|
||||||
|
_scraped_files = []
|
||||||
|
_has_error = False
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.storagechain = StorageChain()
|
||||||
|
self.mediachain = MediaChain()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def name(cls) -> str:
|
||||||
|
return "刮削文件"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def description(cls) -> str:
|
||||||
|
return "刮削媒体信息和图片"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def data(cls) -> dict:
|
||||||
|
return ScrapeFileParams().dict()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def success(self) -> bool:
|
||||||
|
return not self._has_error
|
||||||
|
|
||||||
|
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
|
||||||
|
"""
|
||||||
|
刮削fileitems中的所有文件
|
||||||
|
"""
|
||||||
|
for fileitem in context.fileitems:
|
||||||
|
if global_vars.is_workflow_stopped(workflow_id):
|
||||||
|
break
|
||||||
|
if fileitem in self._scraped_files:
|
||||||
|
continue
|
||||||
|
if not self.storagechain.exists(fileitem):
|
||||||
|
continue
|
||||||
|
meta = MetaInfoPath(Path(fileitem.path))
|
||||||
|
mediainfo = self.mediachain.recognize_media(meta)
|
||||||
|
if not mediainfo:
|
||||||
|
self._has_error = True
|
||||||
|
logger.info(f"{fileitem.path} 未识别到媒体信息,无法刮削")
|
||||||
|
continue
|
||||||
|
self.mediachain.scrape_metadata(fileitem=fileitem, meta=meta, mediainfo=mediainfo)
|
||||||
|
self._scraped_files.append(fileitem)
|
||||||
|
|
||||||
|
self.job_done(f"成功刮削了 {len(self._scraped_files)} 个文件")
|
||||||
|
return context
|
||||||
48
app/actions/send_event.py
Normal file
48
app/actions/send_event.py
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
from app.actions import BaseAction
|
||||||
|
from app.core.event import eventmanager
|
||||||
|
from app.schemas import ActionParams, ActionContext
|
||||||
|
from app.schemas.types import ChainEventType
|
||||||
|
|
||||||
|
|
||||||
|
class SendEventParams(ActionParams):
|
||||||
|
"""
|
||||||
|
发送事件参数
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class SendEventAction(BaseAction):
|
||||||
|
"""
|
||||||
|
发送事件
|
||||||
|
"""
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def name(cls) -> str:
|
||||||
|
return "发送事件"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def description(cls) -> str:
|
||||||
|
return "发送任务执行事件"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def data(cls) -> dict:
|
||||||
|
return SendEventParams().dict()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def success(self) -> bool:
|
||||||
|
return self.done
|
||||||
|
|
||||||
|
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
|
||||||
|
"""
|
||||||
|
发送工作流事件,以更插件干预工作流执行
|
||||||
|
"""
|
||||||
|
# 触发资源下载事件,更新执行上下文
|
||||||
|
event = eventmanager.send_event(ChainEventType.WorkflowExecution, context)
|
||||||
|
if event and event.event_data:
|
||||||
|
context = event.event_data
|
||||||
|
|
||||||
|
self.job_done()
|
||||||
|
return context
|
||||||
73
app/actions/send_message.py
Normal file
73
app/actions/send_message.py
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
from typing import List, Optional, Union
|
||||||
|
|
||||||
|
from pydantic import Field
|
||||||
|
|
||||||
|
from app.actions import BaseAction, ActionChain
|
||||||
|
from app.schemas import ActionParams, ActionContext, Notification
|
||||||
|
|
||||||
|
|
||||||
|
class SendMessageParams(ActionParams):
|
||||||
|
"""
|
||||||
|
发送消息参数
|
||||||
|
"""
|
||||||
|
client: Optional[List[str]] = Field([], description="消息渠道")
|
||||||
|
userid: Optional[Union[str, int]] = Field(None, description="用户ID")
|
||||||
|
|
||||||
|
|
||||||
|
class SendMessageAction(BaseAction):
|
||||||
|
"""
|
||||||
|
发送消息
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.chain = ActionChain()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def name(cls) -> str:
|
||||||
|
return "发送消息"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def description(cls) -> str:
|
||||||
|
return "发送任务执行消息"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def data(cls) -> dict:
|
||||||
|
return SendMessageParams().dict()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def success(self) -> bool:
|
||||||
|
return self.done
|
||||||
|
|
||||||
|
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
|
||||||
|
"""
|
||||||
|
发送messages中的消息
|
||||||
|
"""
|
||||||
|
params = SendMessageParams(**params)
|
||||||
|
msg_text = f"当前进度:{context.progress}%"
|
||||||
|
index = 1
|
||||||
|
if context.execute_history:
|
||||||
|
for history in context.execute_history:
|
||||||
|
if not history.message:
|
||||||
|
continue
|
||||||
|
msg_text += f"\n{index}. {history.action}:{history.message}"
|
||||||
|
index += 1
|
||||||
|
# 发送消息
|
||||||
|
if not params.client:
|
||||||
|
params.client = [None]
|
||||||
|
for client in params.client:
|
||||||
|
self.chain.post_message(
|
||||||
|
Notification(
|
||||||
|
source=client,
|
||||||
|
userid=params.userid,
|
||||||
|
title="【工作流执行结果】",
|
||||||
|
text=msg_text,
|
||||||
|
link="#/workflow"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
self.job_done()
|
||||||
|
return context
|
||||||
120
app/actions/transfer_file.py
Normal file
120
app/actions/transfer_file.py
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
import copy
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from pydantic import Field
|
||||||
|
|
||||||
|
from app.actions import BaseAction
|
||||||
|
from app.core.config import global_vars
|
||||||
|
from app.db.transferhistory_oper import TransferHistoryOper
|
||||||
|
from app.schemas import ActionParams, ActionContext
|
||||||
|
from app.chain.storage import StorageChain
|
||||||
|
from app.chain.transfer import TransferChain
|
||||||
|
from app.log import logger
|
||||||
|
|
||||||
|
|
||||||
|
class TransferFileParams(ActionParams):
|
||||||
|
"""
|
||||||
|
整理文件参数
|
||||||
|
"""
|
||||||
|
# 来源
|
||||||
|
source: Optional[str] = Field("downloads", description="来源")
|
||||||
|
|
||||||
|
|
||||||
|
class TransferFileAction(BaseAction):
|
||||||
|
"""
|
||||||
|
整理文件
|
||||||
|
"""
|
||||||
|
|
||||||
|
_fileitems = []
|
||||||
|
_has_error = False
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.transferchain = TransferChain()
|
||||||
|
self.storagechain = StorageChain()
|
||||||
|
self.transferhis = TransferHistoryOper()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def name(cls) -> str:
|
||||||
|
return "整理文件"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def description(cls) -> str:
|
||||||
|
return "整理队列中的文件"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@property
|
||||||
|
def data(cls) -> dict:
|
||||||
|
return TransferFileParams().dict()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def success(self) -> bool:
|
||||||
|
return not self._has_error
|
||||||
|
|
||||||
|
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
|
||||||
|
"""
|
||||||
|
从 downloads / fileitems 中整理文件,记录到fileitems
|
||||||
|
"""
|
||||||
|
|
||||||
|
def check_continue():
|
||||||
|
"""
|
||||||
|
检查是否继续整理文件
|
||||||
|
"""
|
||||||
|
if global_vars.is_workflow_stopped(workflow_id):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
params = TransferFileParams(**params)
|
||||||
|
if params.source == "downloads":
|
||||||
|
# 从下载任务中整理文件
|
||||||
|
for download in context.downloads:
|
||||||
|
if global_vars.is_workflow_stopped(workflow_id):
|
||||||
|
break
|
||||||
|
if not download.completed:
|
||||||
|
logger.info(f"下载任务 {download.download_id} 未完成")
|
||||||
|
continue
|
||||||
|
fileitem = self.storagechain.get_file_item(storage="local", path=Path(download.path))
|
||||||
|
if not fileitem:
|
||||||
|
logger.info(f"文件 {download.path} 不存在")
|
||||||
|
continue
|
||||||
|
transferd = self.transferhis.get_by_src(fileitem.path, storage=fileitem.storage)
|
||||||
|
if transferd:
|
||||||
|
# 已经整理过的文件不再整理
|
||||||
|
continue
|
||||||
|
logger.info(f"开始整理文件 {download.path} ...")
|
||||||
|
state, errmsg = self.transferchain.do_transfer(fileitem, background=False)
|
||||||
|
if not state:
|
||||||
|
self._has_error = True
|
||||||
|
logger.error(f"整理文件 {download.path} 失败: {errmsg}")
|
||||||
|
continue
|
||||||
|
logger.info(f"整理文件 {download.path} 完成")
|
||||||
|
self._fileitems.append(fileitem)
|
||||||
|
else:
|
||||||
|
# 从 fileitems 中整理文件
|
||||||
|
for fileitem in copy.deepcopy(context.fileitems):
|
||||||
|
if not check_continue():
|
||||||
|
break
|
||||||
|
transferd = self.transferhis.get_by_src(fileitem.path, storage=fileitem.storage)
|
||||||
|
if transferd:
|
||||||
|
# 已经整理过的文件不再整理
|
||||||
|
continue
|
||||||
|
logger.info(f"开始整理文件 {fileitem.path} ...")
|
||||||
|
state, errmsg = self.transferchain.do_transfer(fileitem, background=False,
|
||||||
|
continue_callback=check_continue)
|
||||||
|
if not state:
|
||||||
|
self._has_error = True
|
||||||
|
logger.error(f"整理文件 {fileitem.path} 失败: {errmsg}")
|
||||||
|
continue
|
||||||
|
logger.info(f"整理文件 {fileitem.path} 完成")
|
||||||
|
# 从 fileitems 中移除已整理的文件
|
||||||
|
context.fileitems.remove(fileitem)
|
||||||
|
self._fileitems.append(fileitem)
|
||||||
|
|
||||||
|
if self._fileitems:
|
||||||
|
context.fileitems.extend(self._fileitems)
|
||||||
|
|
||||||
|
self.job_done()
|
||||||
|
return context
|
||||||
@@ -2,7 +2,7 @@ from fastapi import APIRouter
|
|||||||
|
|
||||||
from app.api.endpoints import login, user, site, message, webhook, subscribe, \
|
from app.api.endpoints import login, user, site, message, webhook, subscribe, \
|
||||||
media, douban, search, plugin, tmdb, history, system, download, dashboard, \
|
media, douban, search, plugin, tmdb, history, system, download, dashboard, \
|
||||||
transfer, mediaserver, bangumi, storage, discover
|
transfer, mediaserver, bangumi, storage, discover, recommend, workflow
|
||||||
|
|
||||||
api_router = APIRouter()
|
api_router = APIRouter()
|
||||||
api_router.include_router(login.router, prefix="/login", tags=["login"])
|
api_router.include_router(login.router, prefix="/login", tags=["login"])
|
||||||
@@ -25,3 +25,5 @@ api_router.include_router(transfer.router, prefix="/transfer", tags=["transfer"]
|
|||||||
api_router.include_router(mediaserver.router, prefix="/mediaserver", tags=["mediaserver"])
|
api_router.include_router(mediaserver.router, prefix="/mediaserver", tags=["mediaserver"])
|
||||||
api_router.include_router(bangumi.router, prefix="/bangumi", tags=["bangumi"])
|
api_router.include_router(bangumi.router, prefix="/bangumi", tags=["bangumi"])
|
||||||
api_router.include_router(discover.router, prefix="/discover", tags=["discover"])
|
api_router.include_router(discover.router, prefix="/discover", tags=["discover"])
|
||||||
|
api_router.include_router(recommend.router, prefix="/recommend", tags=["recommend"])
|
||||||
|
api_router.include_router(workflow.router, prefix="/workflow", tags=["workflow"])
|
||||||
|
|||||||
@@ -4,38 +4,12 @@ from fastapi import APIRouter, Depends
|
|||||||
|
|
||||||
from app import schemas
|
from app import schemas
|
||||||
from app.chain.bangumi import BangumiChain
|
from app.chain.bangumi import BangumiChain
|
||||||
from app.chain.recommend import RecommendChain
|
|
||||||
from app.core.context import MediaInfo
|
from app.core.context import MediaInfo
|
||||||
from app.core.security import verify_token
|
from app.core.security import verify_token
|
||||||
|
|
||||||
router = APIRouter()
|
router = APIRouter()
|
||||||
|
|
||||||
|
|
||||||
@router.get("/calendar", summary="Bangumi每日放送", response_model=List[schemas.MediaInfo])
|
|
||||||
def calendar(page: int = 1,
|
|
||||||
count: int = 30,
|
|
||||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
|
||||||
"""
|
|
||||||
浏览Bangumi每日放送
|
|
||||||
"""
|
|
||||||
return RecommendChain().bangumi_calendar(page=page, count=count)
|
|
||||||
|
|
||||||
|
|
||||||
@router.get("/subjects", summary="搜索Bangumi", response_model=List[schemas.MediaInfo])
|
|
||||||
def bangumi_subjects(type: int = 2,
|
|
||||||
cat: int = None,
|
|
||||||
sort: str = 'rank',
|
|
||||||
year: int = None,
|
|
||||||
page: int = 1,
|
|
||||||
count: int = 30,
|
|
||||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
|
||||||
"""
|
|
||||||
搜索Bangumi
|
|
||||||
"""
|
|
||||||
return RecommendChain().bangumi_discover(type=type, cat=cat, sort=sort, year=year,
|
|
||||||
page=page, count=count)
|
|
||||||
|
|
||||||
|
|
||||||
@router.get("/credits/{bangumiid}", summary="查询Bangumi演职员表", response_model=List[schemas.MediaPerson])
|
@router.get("/credits/{bangumiid}", summary="查询Bangumi演职员表", response_model=List[schemas.MediaPerson])
|
||||||
def bangumi_credits(bangumiid: int,
|
def bangumi_credits(bangumiid: int,
|
||||||
page: int = 1,
|
page: int = 1,
|
||||||
|
|||||||
@@ -6,7 +6,10 @@ from app import schemas
|
|||||||
from app.core.event import eventmanager
|
from app.core.event import eventmanager
|
||||||
from app.core.security import verify_token
|
from app.core.security import verify_token
|
||||||
from app.schemas import DiscoverSourceEventData
|
from app.schemas import DiscoverSourceEventData
|
||||||
from app.schemas.types import ChainEventType
|
from app.schemas.types import ChainEventType, MediaType
|
||||||
|
from chain.bangumi import BangumiChain
|
||||||
|
from chain.douban import DoubanChain
|
||||||
|
from chain.tmdb import TmdbChain
|
||||||
|
|
||||||
router = APIRouter()
|
router = APIRouter()
|
||||||
|
|
||||||
@@ -16,7 +19,7 @@ def source(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
|||||||
"""
|
"""
|
||||||
获取探索数据源
|
获取探索数据源
|
||||||
"""
|
"""
|
||||||
# 广播事件,请示额外的发现数据源支持
|
# 广播事件,请示额外的探索数据源支持
|
||||||
event_data = DiscoverSourceEventData()
|
event_data = DiscoverSourceEventData()
|
||||||
event = eventmanager.send_event(ChainEventType.DiscoverSource, event_data)
|
event = eventmanager.send_event(ChainEventType.DiscoverSource, event_data)
|
||||||
# 使用事件返回的上下文数据
|
# 使用事件返回的上下文数据
|
||||||
@@ -25,3 +28,103 @@ def source(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
|||||||
if event_data.extra_sources:
|
if event_data.extra_sources:
|
||||||
return event_data.extra_sources
|
return event_data.extra_sources
|
||||||
return []
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/bangumi", summary="探索Bangumi", response_model=List[schemas.MediaInfo])
|
||||||
|
def bangumi(type: int = 2,
|
||||||
|
cat: int = None,
|
||||||
|
sort: str = 'rank',
|
||||||
|
year: int = None,
|
||||||
|
page: int = 1,
|
||||||
|
count: int = 30,
|
||||||
|
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||||
|
"""
|
||||||
|
探索Bangumi
|
||||||
|
"""
|
||||||
|
medias = BangumiChain().discover(type=type, cat=cat, sort=sort, year=year,
|
||||||
|
limit=count, offset=(page - 1) * count)
|
||||||
|
if medias:
|
||||||
|
return [media.to_dict() for media in medias]
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/douban_movies", summary="探索豆瓣电影", response_model=List[schemas.MediaInfo])
|
||||||
|
def douban_movies(sort: str = "R",
|
||||||
|
tags: str = "",
|
||||||
|
page: int = 1,
|
||||||
|
count: int = 30,
|
||||||
|
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||||
|
"""
|
||||||
|
浏览豆瓣电影信息
|
||||||
|
"""
|
||||||
|
movies = DoubanChain().douban_discover(mtype=MediaType.MOVIE,
|
||||||
|
sort=sort, tags=tags, page=page, count=count)
|
||||||
|
return [media.to_dict() for media in movies] if movies else []
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/douban_tvs", summary="探索豆瓣剧集", response_model=List[schemas.MediaInfo])
|
||||||
|
def douban_tvs(sort: str = "R",
|
||||||
|
tags: str = "",
|
||||||
|
page: int = 1,
|
||||||
|
count: int = 30,
|
||||||
|
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||||
|
"""
|
||||||
|
浏览豆瓣剧集信息
|
||||||
|
"""
|
||||||
|
tvs = DoubanChain().douban_discover(mtype=MediaType.TV,
|
||||||
|
sort=sort, tags=tags, page=page, count=count)
|
||||||
|
return [media.to_dict() for media in tvs] if tvs else []
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/tmdb_movies", summary="探索TMDB电影", response_model=List[schemas.MediaInfo])
|
||||||
|
def tmdb_movies(sort_by: str = "popularity.desc",
|
||||||
|
with_genres: str = "",
|
||||||
|
with_original_language: str = "",
|
||||||
|
with_keywords: str = "",
|
||||||
|
with_watch_providers: str = "",
|
||||||
|
vote_average: float = 0,
|
||||||
|
vote_count: int = 0,
|
||||||
|
release_date: str = "",
|
||||||
|
page: int = 1,
|
||||||
|
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||||
|
"""
|
||||||
|
浏览TMDB电影信息
|
||||||
|
"""
|
||||||
|
movies = TmdbChain().tmdb_discover(mtype=MediaType.MOVIE,
|
||||||
|
sort_by=sort_by,
|
||||||
|
with_genres=with_genres,
|
||||||
|
with_original_language=with_original_language,
|
||||||
|
with_keywords=with_keywords,
|
||||||
|
with_watch_providers=with_watch_providers,
|
||||||
|
vote_average=vote_average,
|
||||||
|
vote_count=vote_count,
|
||||||
|
release_date=release_date,
|
||||||
|
page=page)
|
||||||
|
return [movie.to_dict() for movie in movies] if movies else []
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/tmdb_tvs", summary="探索TMDB剧集", response_model=List[schemas.MediaInfo])
|
||||||
|
def tmdb_tvs(sort_by: str = "popularity.desc",
|
||||||
|
with_genres: str = "",
|
||||||
|
with_original_language: str = "",
|
||||||
|
with_keywords: str = "",
|
||||||
|
with_watch_providers: str = "",
|
||||||
|
vote_average: float = 0,
|
||||||
|
vote_count: int = 0,
|
||||||
|
release_date: str = "",
|
||||||
|
page: int = 1,
|
||||||
|
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||||
|
"""
|
||||||
|
浏览TMDB剧集信息
|
||||||
|
"""
|
||||||
|
tvs = TmdbChain().tmdb_discover(mtype=MediaType.TV,
|
||||||
|
sort_by=sort_by,
|
||||||
|
with_genres=with_genres,
|
||||||
|
with_original_language=with_original_language,
|
||||||
|
with_keywords=with_keywords,
|
||||||
|
with_watch_providers=with_watch_providers,
|
||||||
|
vote_average=vote_average,
|
||||||
|
vote_count=vote_count,
|
||||||
|
release_date=release_date,
|
||||||
|
page=page)
|
||||||
|
return [tv.to_dict() for tv in tvs] if tvs else []
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ from fastapi import APIRouter, Depends
|
|||||||
|
|
||||||
from app import schemas
|
from app import schemas
|
||||||
from app.chain.douban import DoubanChain
|
from app.chain.douban import DoubanChain
|
||||||
from app.chain.recommend import RecommendChain
|
|
||||||
from app.core.context import MediaInfo
|
from app.core.context import MediaInfo
|
||||||
from app.core.security import verify_token
|
from app.core.security import verify_token
|
||||||
from app.schemas import MediaType
|
from app.schemas import MediaType
|
||||||
@@ -34,100 +33,6 @@ def douban_person_credits(person_id: int,
|
|||||||
return []
|
return []
|
||||||
|
|
||||||
|
|
||||||
@router.get("/showing", summary="豆瓣正在热映", response_model=List[schemas.MediaInfo])
|
|
||||||
def movie_showing(page: int = 1,
|
|
||||||
count: int = 30,
|
|
||||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
|
||||||
"""
|
|
||||||
浏览豆瓣正在热映
|
|
||||||
"""
|
|
||||||
return RecommendChain().douban_movie_showing(page=page, count=count)
|
|
||||||
|
|
||||||
|
|
||||||
@router.get("/movies", summary="豆瓣电影", response_model=List[schemas.MediaInfo])
|
|
||||||
def douban_movies(sort: str = "R",
|
|
||||||
tags: str = "",
|
|
||||||
page: int = 1,
|
|
||||||
count: int = 30,
|
|
||||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
|
||||||
"""
|
|
||||||
浏览豆瓣电影信息
|
|
||||||
"""
|
|
||||||
return RecommendChain().douban_movies(sort=sort, tags=tags, page=page, count=count)
|
|
||||||
|
|
||||||
|
|
||||||
@router.get("/tvs", summary="豆瓣剧集", response_model=List[schemas.MediaInfo])
|
|
||||||
def douban_tvs(sort: str = "R",
|
|
||||||
tags: str = "",
|
|
||||||
page: int = 1,
|
|
||||||
count: int = 30,
|
|
||||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
|
||||||
"""
|
|
||||||
浏览豆瓣剧集信息
|
|
||||||
"""
|
|
||||||
return RecommendChain().douban_tvs(sort=sort, tags=tags, page=page, count=count)
|
|
||||||
|
|
||||||
|
|
||||||
@router.get("/movie_top250", summary="豆瓣电影TOP250", response_model=List[schemas.MediaInfo])
|
|
||||||
def movie_top250(page: int = 1,
|
|
||||||
count: int = 30,
|
|
||||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
|
||||||
"""
|
|
||||||
浏览豆瓣剧集信息
|
|
||||||
"""
|
|
||||||
return RecommendChain().douban_movie_top250(page=page, count=count)
|
|
||||||
|
|
||||||
|
|
||||||
@router.get("/tv_weekly_chinese", summary="豆瓣国产剧集周榜", response_model=List[schemas.MediaInfo])
|
|
||||||
def tv_weekly_chinese(page: int = 1,
|
|
||||||
count: int = 30,
|
|
||||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
|
||||||
"""
|
|
||||||
中国每周剧集口碑榜
|
|
||||||
"""
|
|
||||||
return RecommendChain().douban_tv_weekly_chinese(page=page, count=count)
|
|
||||||
|
|
||||||
|
|
||||||
@router.get("/tv_weekly_global", summary="豆瓣全球剧集周榜", response_model=List[schemas.MediaInfo])
|
|
||||||
def tv_weekly_global(page: int = 1,
|
|
||||||
count: int = 30,
|
|
||||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
|
||||||
"""
|
|
||||||
全球每周剧集口碑榜
|
|
||||||
"""
|
|
||||||
return RecommendChain().douban_tv_weekly_global(page=page, count=count)
|
|
||||||
|
|
||||||
|
|
||||||
@router.get("/tv_animation", summary="豆瓣动画剧集", response_model=List[schemas.MediaInfo])
|
|
||||||
def tv_animation(page: int = 1,
|
|
||||||
count: int = 30,
|
|
||||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
|
||||||
"""
|
|
||||||
热门动画剧集
|
|
||||||
"""
|
|
||||||
return RecommendChain().douban_tv_animation(page=page, count=count)
|
|
||||||
|
|
||||||
|
|
||||||
@router.get("/movie_hot", summary="豆瓣热门电影", response_model=List[schemas.MediaInfo])
|
|
||||||
def movie_hot(page: int = 1,
|
|
||||||
count: int = 30,
|
|
||||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
|
||||||
"""
|
|
||||||
热门电影
|
|
||||||
"""
|
|
||||||
return RecommendChain().douban_movie_hot(page=page, count=count)
|
|
||||||
|
|
||||||
|
|
||||||
@router.get("/tv_hot", summary="豆瓣热门电视剧", response_model=List[schemas.MediaInfo])
|
|
||||||
def tv_hot(page: int = 1,
|
|
||||||
count: int = 30,
|
|
||||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
|
||||||
"""
|
|
||||||
热门电视剧
|
|
||||||
"""
|
|
||||||
return RecommendChain().douban_tv_hot(page=page, count=count)
|
|
||||||
|
|
||||||
|
|
||||||
@router.get("/credits/{doubanid}/{type_name}", summary="豆瓣演员阵容", response_model=List[schemas.MediaPerson])
|
@router.get("/credits/{doubanid}/{type_name}", summary="豆瓣演员阵容", response_model=List[schemas.MediaPerson])
|
||||||
def douban_credits(doubanid: str,
|
def douban_credits(doubanid: str,
|
||||||
type_name: str,
|
type_name: str,
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ from fastapi import APIRouter, Depends
|
|||||||
|
|
||||||
from app import schemas
|
from app import schemas
|
||||||
from app.chain.media import MediaChain
|
from app.chain.media import MediaChain
|
||||||
|
from app.chain.tmdb import TmdbChain
|
||||||
from app.core.config import settings
|
from app.core.config import settings
|
||||||
from app.core.context import Context
|
from app.core.context import Context
|
||||||
from app.core.event import eventmanager
|
from app.core.event import eventmanager
|
||||||
@@ -74,6 +75,7 @@ def search(title: str,
|
|||||||
"""
|
"""
|
||||||
模糊搜索媒体/人物信息列表 media:媒体信息,person:人物信息
|
模糊搜索媒体/人物信息列表 media:媒体信息,person:人物信息
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __get_source(obj: Union[schemas.MediaInfo, schemas.MediaPerson, dict]):
|
def __get_source(obj: Union[schemas.MediaInfo, schemas.MediaPerson, dict]):
|
||||||
"""
|
"""
|
||||||
获取对象属性
|
获取对象属性
|
||||||
@@ -133,9 +135,52 @@ def category(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
|||||||
return MediaChain().media_category() or {}
|
return MediaChain().media_category() or {}
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/seasons", summary="查询媒体季信息", response_model=List[schemas.MediaSeason])
|
||||||
|
def seasons(mediaid: str = None,
|
||||||
|
title: str = None,
|
||||||
|
year: int = None,
|
||||||
|
season: int = None,
|
||||||
|
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||||
|
"""
|
||||||
|
查询媒体季信息
|
||||||
|
"""
|
||||||
|
if mediaid:
|
||||||
|
if mediaid.startswith("tmdb:"):
|
||||||
|
tmdbid = int(mediaid[5:])
|
||||||
|
seasons_info = TmdbChain().tmdb_seasons(tmdbid=tmdbid)
|
||||||
|
if seasons_info:
|
||||||
|
if season:
|
||||||
|
return [sea for sea in seasons_info if sea.season_number == season]
|
||||||
|
return seasons_info
|
||||||
|
if title:
|
||||||
|
meta = MetaInfo(title)
|
||||||
|
if year:
|
||||||
|
meta.year = year
|
||||||
|
mediainfo = MediaChain().recognize_media(meta, mtype=MediaType.TV)
|
||||||
|
if mediainfo:
|
||||||
|
if settings.RECOGNIZE_SOURCE == "themoviedb":
|
||||||
|
seasons_info = TmdbChain().tmdb_seasons(tmdbid=mediainfo.tmdb_id)
|
||||||
|
if seasons_info:
|
||||||
|
if season:
|
||||||
|
return [sea for sea in seasons_info if sea.season_number == season]
|
||||||
|
return seasons_info
|
||||||
|
else:
|
||||||
|
sea = season or 1
|
||||||
|
return schemas.MediaSeason(
|
||||||
|
season_number=sea,
|
||||||
|
poster_path=mediainfo.poster_path,
|
||||||
|
name=f"第 {sea} 季",
|
||||||
|
air_date=mediainfo.release_date,
|
||||||
|
overview=mediainfo.overview,
|
||||||
|
vote_average=mediainfo.vote_average,
|
||||||
|
episode_count=mediainfo.number_of_episodes
|
||||||
|
)
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
@router.get("/{mediaid}", summary="查询媒体详情", response_model=schemas.MediaInfo)
|
@router.get("/{mediaid}", summary="查询媒体详情", response_model=schemas.MediaInfo)
|
||||||
def media_info(mediaid: str, type_name: str, title: str = None, year: int = None,
|
def detail(mediaid: str, type_name: str, title: str = None, year: int = None,
|
||||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||||
"""
|
"""
|
||||||
根据媒体ID查询themoviedb或豆瓣媒体信息,type_name: 电影/电视剧
|
根据媒体ID查询themoviedb或豆瓣媒体信息,type_name: 电影/电视剧
|
||||||
"""
|
"""
|
||||||
|
|||||||
191
app/api/endpoints/recommend.py
Normal file
191
app/api/endpoints/recommend.py
Normal file
@@ -0,0 +1,191 @@
|
|||||||
|
from typing import Any, List
|
||||||
|
|
||||||
|
from fastapi import APIRouter, Depends
|
||||||
|
|
||||||
|
from app import schemas
|
||||||
|
from app.core.event import eventmanager
|
||||||
|
from app.core.security import verify_token
|
||||||
|
from app.schemas.types import ChainEventType
|
||||||
|
from chain.recommend import RecommendChain
|
||||||
|
from schemas import RecommendSourceEventData
|
||||||
|
|
||||||
|
router = APIRouter()
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/source", summary="获取推荐数据源", response_model=List[schemas.RecommendMediaSource])
|
||||||
|
def source(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||||
|
"""
|
||||||
|
获取推荐数据源
|
||||||
|
"""
|
||||||
|
# 广播事件,请示额外的推荐数据源支持
|
||||||
|
event_data = RecommendSourceEventData()
|
||||||
|
event = eventmanager.send_event(ChainEventType.RecommendSource, event_data)
|
||||||
|
# 使用事件返回的上下文数据
|
||||||
|
if event and event.event_data:
|
||||||
|
event_data: RecommendSourceEventData = event.event_data
|
||||||
|
if event_data.extra_sources:
|
||||||
|
return event_data.extra_sources
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/bangumi_calendar", summary="Bangumi每日放送", response_model=List[schemas.MediaInfo])
|
||||||
|
def bangumi_calendar(page: int = 1,
|
||||||
|
count: int = 30,
|
||||||
|
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||||
|
"""
|
||||||
|
浏览Bangumi每日放送
|
||||||
|
"""
|
||||||
|
return RecommendChain().bangumi_calendar(page=page, count=count)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/douban_showing", summary="豆瓣正在热映", response_model=List[schemas.MediaInfo])
|
||||||
|
def douban_showing(page: int = 1,
|
||||||
|
count: int = 30,
|
||||||
|
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||||
|
"""
|
||||||
|
浏览豆瓣正在热映
|
||||||
|
"""
|
||||||
|
return RecommendChain().douban_movie_showing(page=page, count=count)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/douban_movies", summary="豆瓣电影", response_model=List[schemas.MediaInfo])
|
||||||
|
def douban_movies(sort: str = "R",
|
||||||
|
tags: str = "",
|
||||||
|
page: int = 1,
|
||||||
|
count: int = 30,
|
||||||
|
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||||
|
"""
|
||||||
|
浏览豆瓣电影信息
|
||||||
|
"""
|
||||||
|
return RecommendChain().douban_movies(sort=sort, tags=tags, page=page, count=count)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/douban_tvs", summary="豆瓣剧集", response_model=List[schemas.MediaInfo])
|
||||||
|
def douban_tvs(sort: str = "R",
|
||||||
|
tags: str = "",
|
||||||
|
page: int = 1,
|
||||||
|
count: int = 30,
|
||||||
|
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||||
|
"""
|
||||||
|
浏览豆瓣剧集信息
|
||||||
|
"""
|
||||||
|
return RecommendChain().douban_tvs(sort=sort, tags=tags, page=page, count=count)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/douban_movie_top250", summary="豆瓣电影TOP250", response_model=List[schemas.MediaInfo])
|
||||||
|
def douban_movie_top250(page: int = 1,
|
||||||
|
count: int = 30,
|
||||||
|
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||||
|
"""
|
||||||
|
浏览豆瓣剧集信息
|
||||||
|
"""
|
||||||
|
return RecommendChain().douban_movie_top250(page=page, count=count)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/douban_tv_weekly_chinese", summary="豆瓣国产剧集周榜", response_model=List[schemas.MediaInfo])
|
||||||
|
def douban_tv_weekly_chinese(page: int = 1,
|
||||||
|
count: int = 30,
|
||||||
|
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||||
|
"""
|
||||||
|
中国每周剧集口碑榜
|
||||||
|
"""
|
||||||
|
return RecommendChain().douban_tv_weekly_chinese(page=page, count=count)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/douban_tv_weekly_global", summary="豆瓣全球剧集周榜", response_model=List[schemas.MediaInfo])
|
||||||
|
def douban_tv_weekly_global(page: int = 1,
|
||||||
|
count: int = 30,
|
||||||
|
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||||
|
"""
|
||||||
|
全球每周剧集口碑榜
|
||||||
|
"""
|
||||||
|
return RecommendChain().douban_tv_weekly_global(page=page, count=count)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/douban_tv_animation", summary="豆瓣动画剧集", response_model=List[schemas.MediaInfo])
|
||||||
|
def douban_tv_animation(page: int = 1,
|
||||||
|
count: int = 30,
|
||||||
|
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||||
|
"""
|
||||||
|
热门动画剧集
|
||||||
|
"""
|
||||||
|
return RecommendChain().douban_tv_animation(page=page, count=count)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/douban_movie_hot", summary="豆瓣热门电影", response_model=List[schemas.MediaInfo])
|
||||||
|
def douban_movie_hot(page: int = 1,
|
||||||
|
count: int = 30,
|
||||||
|
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||||
|
"""
|
||||||
|
热门电影
|
||||||
|
"""
|
||||||
|
return RecommendChain().douban_movie_hot(page=page, count=count)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/douban_tv_hot", summary="豆瓣热门电视剧", response_model=List[schemas.MediaInfo])
|
||||||
|
def douban_tv_hot(page: int = 1,
|
||||||
|
count: int = 30,
|
||||||
|
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||||
|
"""
|
||||||
|
热门电视剧
|
||||||
|
"""
|
||||||
|
return RecommendChain().douban_tv_hot(page=page, count=count)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/tmdb_movies", summary="TMDB电影", response_model=List[schemas.MediaInfo])
|
||||||
|
def tmdb_movies(sort_by: str = "popularity.desc",
|
||||||
|
with_genres: str = "",
|
||||||
|
with_original_language: str = "",
|
||||||
|
with_keywords: str = "",
|
||||||
|
with_watch_providers: str = "",
|
||||||
|
vote_average: float = 0,
|
||||||
|
vote_count: int = 0,
|
||||||
|
release_date: str = "",
|
||||||
|
page: int = 1,
|
||||||
|
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||||
|
"""
|
||||||
|
浏览TMDB电影信息
|
||||||
|
"""
|
||||||
|
return RecommendChain().tmdb_movies(sort_by=sort_by,
|
||||||
|
with_genres=with_genres,
|
||||||
|
with_original_language=with_original_language,
|
||||||
|
with_keywords=with_keywords,
|
||||||
|
with_watch_providers=with_watch_providers,
|
||||||
|
vote_average=vote_average,
|
||||||
|
vote_count=vote_count,
|
||||||
|
release_date=release_date,
|
||||||
|
page=page)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/tmdb_tvs", summary="TMDB剧集", response_model=List[schemas.MediaInfo])
|
||||||
|
def tmdb_tvs(sort_by: str = "popularity.desc",
|
||||||
|
with_genres: str = "",
|
||||||
|
with_original_language: str = "",
|
||||||
|
with_keywords: str = "",
|
||||||
|
with_watch_providers: str = "",
|
||||||
|
vote_average: float = 0,
|
||||||
|
vote_count: int = 0,
|
||||||
|
release_date: str = "",
|
||||||
|
page: int = 1,
|
||||||
|
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||||
|
"""
|
||||||
|
浏览TMDB剧集信息
|
||||||
|
"""
|
||||||
|
return RecommendChain().tmdb_tvs(sort_by=sort_by,
|
||||||
|
with_genres=with_genres,
|
||||||
|
with_original_language=with_original_language,
|
||||||
|
with_keywords=with_keywords,
|
||||||
|
with_watch_providers=with_watch_providers,
|
||||||
|
vote_average=vote_average,
|
||||||
|
vote_count=vote_count,
|
||||||
|
release_date=release_date,
|
||||||
|
page=page)
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/tmdb_trending", summary="TMDB流行趋势", response_model=List[schemas.MediaInfo])
|
||||||
|
def tmdb_trending(page: int = 1,
|
||||||
|
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||||
|
"""
|
||||||
|
TMDB流行趋势
|
||||||
|
"""
|
||||||
|
return RecommendChain().tmdb_trending(page=page)
|
||||||
@@ -31,6 +31,7 @@ def search_by_id(mediaid: str,
|
|||||||
title: str = None,
|
title: str = None,
|
||||||
year: int = None,
|
year: int = None,
|
||||||
season: str = None,
|
season: str = None,
|
||||||
|
sites: str = None,
|
||||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||||
"""
|
"""
|
||||||
根据TMDBID/豆瓣ID精确搜索站点资源 tmdb:/douban:/bangumi:
|
根据TMDBID/豆瓣ID精确搜索站点资源 tmdb:/douban:/bangumi:
|
||||||
@@ -39,6 +40,10 @@ def search_by_id(mediaid: str,
|
|||||||
mtype = MediaType(mtype)
|
mtype = MediaType(mtype)
|
||||||
if season:
|
if season:
|
||||||
season = int(season)
|
season = int(season)
|
||||||
|
if sites:
|
||||||
|
site_list = [int(site) for site in sites.split(",") if site]
|
||||||
|
else:
|
||||||
|
site_list = None
|
||||||
torrents = None
|
torrents = None
|
||||||
# 根据前缀识别媒体ID
|
# 根据前缀识别媒体ID
|
||||||
if mediaid.startswith("tmdb:"):
|
if mediaid.startswith("tmdb:"):
|
||||||
@@ -48,11 +53,13 @@ def search_by_id(mediaid: str,
|
|||||||
doubaninfo = MediaChain().get_doubaninfo_by_tmdbid(tmdbid=tmdbid, mtype=mtype)
|
doubaninfo = MediaChain().get_doubaninfo_by_tmdbid(tmdbid=tmdbid, mtype=mtype)
|
||||||
if doubaninfo:
|
if doubaninfo:
|
||||||
torrents = SearchChain().search_by_id(doubanid=doubaninfo.get("id"),
|
torrents = SearchChain().search_by_id(doubanid=doubaninfo.get("id"),
|
||||||
mtype=mtype, area=area, season=season)
|
mtype=mtype, area=area, season=season,
|
||||||
|
sites=site_list)
|
||||||
else:
|
else:
|
||||||
return schemas.Response(success=False, message="未识别到豆瓣媒体信息")
|
return schemas.Response(success=False, message="未识别到豆瓣媒体信息")
|
||||||
else:
|
else:
|
||||||
torrents = SearchChain().search_by_id(tmdbid=tmdbid, mtype=mtype, area=area, season=season)
|
torrents = SearchChain().search_by_id(tmdbid=tmdbid, mtype=mtype, area=area, season=season,
|
||||||
|
sites=site_list)
|
||||||
elif mediaid.startswith("douban:"):
|
elif mediaid.startswith("douban:"):
|
||||||
doubanid = mediaid.replace("douban:", "")
|
doubanid = mediaid.replace("douban:", "")
|
||||||
if settings.RECOGNIZE_SOURCE == "themoviedb":
|
if settings.RECOGNIZE_SOURCE == "themoviedb":
|
||||||
@@ -62,11 +69,13 @@ def search_by_id(mediaid: str,
|
|||||||
if tmdbinfo.get('season') and not season:
|
if tmdbinfo.get('season') and not season:
|
||||||
season = tmdbinfo.get('season')
|
season = tmdbinfo.get('season')
|
||||||
torrents = SearchChain().search_by_id(tmdbid=tmdbinfo.get("id"),
|
torrents = SearchChain().search_by_id(tmdbid=tmdbinfo.get("id"),
|
||||||
mtype=mtype, area=area, season=season)
|
mtype=mtype, area=area, season=season,
|
||||||
|
sites=site_list)
|
||||||
else:
|
else:
|
||||||
return schemas.Response(success=False, message="未识别到TMDB媒体信息")
|
return schemas.Response(success=False, message="未识别到TMDB媒体信息")
|
||||||
else:
|
else:
|
||||||
torrents = SearchChain().search_by_id(doubanid=doubanid, mtype=mtype, area=area, season=season)
|
torrents = SearchChain().search_by_id(doubanid=doubanid, mtype=mtype, area=area, season=season,
|
||||||
|
sites=site_list)
|
||||||
elif mediaid.startswith("bangumi:"):
|
elif mediaid.startswith("bangumi:"):
|
||||||
bangumiid = int(mediaid.replace("bangumi:", ""))
|
bangumiid = int(mediaid.replace("bangumi:", ""))
|
||||||
if settings.RECOGNIZE_SOURCE == "themoviedb":
|
if settings.RECOGNIZE_SOURCE == "themoviedb":
|
||||||
@@ -74,7 +83,8 @@ def search_by_id(mediaid: str,
|
|||||||
tmdbinfo = MediaChain().get_tmdbinfo_by_bangumiid(bangumiid=bangumiid)
|
tmdbinfo = MediaChain().get_tmdbinfo_by_bangumiid(bangumiid=bangumiid)
|
||||||
if tmdbinfo:
|
if tmdbinfo:
|
||||||
torrents = SearchChain().search_by_id(tmdbid=tmdbinfo.get("id"),
|
torrents = SearchChain().search_by_id(tmdbid=tmdbinfo.get("id"),
|
||||||
mtype=mtype, area=area, season=season)
|
mtype=mtype, area=area, season=season,
|
||||||
|
sites=site_list)
|
||||||
else:
|
else:
|
||||||
return schemas.Response(success=False, message="未识别到TMDB媒体信息")
|
return schemas.Response(success=False, message="未识别到TMDB媒体信息")
|
||||||
else:
|
else:
|
||||||
@@ -82,7 +92,8 @@ def search_by_id(mediaid: str,
|
|||||||
doubaninfo = MediaChain().get_doubaninfo_by_bangumiid(bangumiid=bangumiid)
|
doubaninfo = MediaChain().get_doubaninfo_by_bangumiid(bangumiid=bangumiid)
|
||||||
if doubaninfo:
|
if doubaninfo:
|
||||||
torrents = SearchChain().search_by_id(doubanid=doubaninfo.get("id"),
|
torrents = SearchChain().search_by_id(doubanid=doubaninfo.get("id"),
|
||||||
mtype=mtype, area=area, season=season)
|
mtype=mtype, area=area, season=season,
|
||||||
|
sites=site_list)
|
||||||
else:
|
else:
|
||||||
return schemas.Response(success=False, message="未识别到豆瓣媒体信息")
|
return schemas.Response(success=False, message="未识别到豆瓣媒体信息")
|
||||||
else:
|
else:
|
||||||
@@ -133,12 +144,13 @@ def search_by_id(mediaid: str,
|
|||||||
@router.get("/title", summary="模糊搜索资源", response_model=schemas.Response)
|
@router.get("/title", summary="模糊搜索资源", response_model=schemas.Response)
|
||||||
def search_by_title(keyword: str = None,
|
def search_by_title(keyword: str = None,
|
||||||
page: int = 0,
|
page: int = 0,
|
||||||
site: int = None,
|
sites: str = None,
|
||||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||||
"""
|
"""
|
||||||
根据名称模糊搜索站点资源,支持分页,关键词为空是返回首页资源
|
根据名称模糊搜索站点资源,支持分页,关键词为空是返回首页资源
|
||||||
"""
|
"""
|
||||||
torrents = SearchChain().search_by_title(title=keyword, page=page, site=site)
|
torrents = SearchChain().search_by_title(title=keyword, page=page,
|
||||||
|
sites=[int(site) for site in sites.split(",") if site] if sites else None)
|
||||||
if not torrents:
|
if not torrents:
|
||||||
return schemas.Response(success=False, message="未搜索到任何资源")
|
return schemas.Response(success=False, message="未搜索到任何资源")
|
||||||
return schemas.Response(success=True, data=[torrent.to_dict() for torrent in torrents])
|
return schemas.Response(success=True, data=[torrent.to_dict() for torrent in torrents])
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
from typing import List, Any
|
from typing import List, Any, Dict
|
||||||
|
|
||||||
from fastapi import APIRouter, Depends, HTTPException
|
from fastapi import APIRouter, Depends, HTTPException
|
||||||
from sqlalchemy.orm import Session
|
from sqlalchemy.orm import Session
|
||||||
@@ -259,8 +259,41 @@ def site_icon(site_id: int,
|
|||||||
})
|
})
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/category/{site_id}", summary="站点分类", response_model=List[schemas.SiteCategory])
|
||||||
|
def site_category(site_id: int,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||||
|
"""
|
||||||
|
获取站点分类
|
||||||
|
"""
|
||||||
|
site = Site.get(db, site_id)
|
||||||
|
if not site:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=404,
|
||||||
|
detail=f"站点 {site_id} 不存在",
|
||||||
|
)
|
||||||
|
indexer = SitesHelper().get_indexer(site.domain)
|
||||||
|
if not indexer:
|
||||||
|
raise HTTPException(
|
||||||
|
status_code=404,
|
||||||
|
detail=f"站点 {site.domain} 不支持",
|
||||||
|
)
|
||||||
|
category: Dict[str, List[dict]] = indexer.get('category') or []
|
||||||
|
if not category:
|
||||||
|
return []
|
||||||
|
result = []
|
||||||
|
for cats in category.values():
|
||||||
|
for cat in cats:
|
||||||
|
if cat not in result:
|
||||||
|
result.append(cat)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
@router.get("/resource/{site_id}", summary="站点资源", response_model=List[schemas.TorrentInfo])
|
@router.get("/resource/{site_id}", summary="站点资源", response_model=List[schemas.TorrentInfo])
|
||||||
def site_resource(site_id: int,
|
def site_resource(site_id: int,
|
||||||
|
keyword: str = None,
|
||||||
|
cat: str = None,
|
||||||
|
page: int = 0,
|
||||||
db: Session = Depends(get_db),
|
db: Session = Depends(get_db),
|
||||||
_: schemas.TokenPayload = Depends(get_current_active_superuser)) -> Any:
|
_: schemas.TokenPayload = Depends(get_current_active_superuser)) -> Any:
|
||||||
"""
|
"""
|
||||||
@@ -272,7 +305,7 @@ def site_resource(site_id: int,
|
|||||||
status_code=404,
|
status_code=404,
|
||||||
detail=f"站点 {site_id} 不存在",
|
detail=f"站点 {site_id} 不存在",
|
||||||
)
|
)
|
||||||
torrents = TorrentsChain().browse(domain=site.domain)
|
torrents = TorrentsChain().browse(domain=site.domain, keyword=keyword, cat=cat, page=page)
|
||||||
if not torrents:
|
if not torrents:
|
||||||
return []
|
return []
|
||||||
return [torrent.to_dict() for torrent in torrents]
|
return [torrent.to_dict() for torrent in torrents]
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ from pathlib import Path
|
|||||||
from typing import Optional, Union
|
from typing import Optional, Union
|
||||||
|
|
||||||
import aiofiles
|
import aiofiles
|
||||||
|
import pillow_avif # noqa 用于自动注册AVIF支持
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
from fastapi import APIRouter, Depends, HTTPException, Header, Request, Response
|
from fastapi import APIRouter, Depends, HTTPException, Header, Request, Response
|
||||||
from fastapi.responses import StreamingResponse
|
from fastapi.responses import StreamingResponse
|
||||||
@@ -50,7 +51,6 @@ def fetch_image(
|
|||||||
"""
|
"""
|
||||||
处理图片缓存逻辑,支持HTTP缓存和磁盘缓存
|
处理图片缓存逻辑,支持HTTP缓存和磁盘缓存
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not url:
|
if not url:
|
||||||
raise HTTPException(status_code=404, detail="URL not provided")
|
raise HTTPException(status_code=404, detail="URL not provided")
|
||||||
|
|
||||||
@@ -68,6 +68,10 @@ def fetch_image(
|
|||||||
sanitized_path = SecurityUtils.sanitize_url_path(url)
|
sanitized_path = SecurityUtils.sanitize_url_path(url)
|
||||||
cache_path = settings.CACHE_PATH / "images" / sanitized_path
|
cache_path = settings.CACHE_PATH / "images" / sanitized_path
|
||||||
|
|
||||||
|
# 没有文件类型,则添加后缀,在恶意文件类型和实际需求下的折衷选择
|
||||||
|
if not cache_path.suffix:
|
||||||
|
cache_path = cache_path.with_suffix(".jpg")
|
||||||
|
|
||||||
# 确保缓存路径和文件类型合法
|
# 确保缓存路径和文件类型合法
|
||||||
if not SecurityUtils.is_safe_path(settings.CACHE_PATH, cache_path, settings.SECURITY_IMAGE_SUFFIXES):
|
if not SecurityUtils.is_safe_path(settings.CACHE_PATH, cache_path, settings.SECURITY_IMAGE_SUFFIXES):
|
||||||
raise HTTPException(status_code=400, detail="Invalid cache path or file type")
|
raise HTTPException(status_code=400, detail="Invalid cache path or file type")
|
||||||
@@ -88,7 +92,8 @@ def fetch_image(
|
|||||||
# 请求远程图片
|
# 请求远程图片
|
||||||
referer = "https://movie.douban.com/" if "doubanio.com" in url else None
|
referer = "https://movie.douban.com/" if "doubanio.com" in url else None
|
||||||
proxies = settings.PROXY if proxy else None
|
proxies = settings.PROXY if proxy else None
|
||||||
response = RequestUtils(ua=settings.USER_AGENT, proxies=proxies, referer=referer).get_res(url=url)
|
response = RequestUtils(ua=settings.USER_AGENT, proxies=proxies, referer=referer,
|
||||||
|
accept_type="image/avif,image/webp,image/apng,*/*").get_res(url=url)
|
||||||
if not response:
|
if not response:
|
||||||
raise HTTPException(status_code=502, detail="Failed to fetch the image from the remote server")
|
raise HTTPException(status_code=502, detail="Failed to fetch the image from the remote server")
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ from typing import List, Any
|
|||||||
from fastapi import APIRouter, Depends
|
from fastapi import APIRouter, Depends
|
||||||
|
|
||||||
from app import schemas
|
from app import schemas
|
||||||
from app.chain.recommend import RecommendChain
|
|
||||||
from app.chain.tmdb import TmdbChain
|
from app.chain.tmdb import TmdbChain
|
||||||
from app.core.security import verify_token
|
from app.core.security import verify_token
|
||||||
from app.schemas.types import MediaType
|
from app.schemas.types import MediaType
|
||||||
@@ -114,65 +113,6 @@ def tmdb_person_credits(person_id: int,
|
|||||||
return []
|
return []
|
||||||
|
|
||||||
|
|
||||||
@router.get("/movies", summary="TMDB电影", response_model=List[schemas.MediaInfo])
|
|
||||||
def tmdb_movies(sort_by: str = "popularity.desc",
|
|
||||||
with_genres: str = "",
|
|
||||||
with_original_language: str = "",
|
|
||||||
with_keywords: str = "",
|
|
||||||
with_watch_providers: str = "",
|
|
||||||
vote_average: float = 0,
|
|
||||||
vote_count: int = 0,
|
|
||||||
release_date: str = "",
|
|
||||||
page: int = 1,
|
|
||||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
|
||||||
"""
|
|
||||||
浏览TMDB电影信息
|
|
||||||
"""
|
|
||||||
return RecommendChain().tmdb_movies(sort_by=sort_by,
|
|
||||||
with_genres=with_genres,
|
|
||||||
with_original_language=with_original_language,
|
|
||||||
with_keywords=with_keywords,
|
|
||||||
with_watch_providers=with_watch_providers,
|
|
||||||
vote_average=vote_average,
|
|
||||||
vote_count=vote_count,
|
|
||||||
release_date=release_date,
|
|
||||||
page=page)
|
|
||||||
|
|
||||||
|
|
||||||
@router.get("/tvs", summary="TMDB剧集", response_model=List[schemas.MediaInfo])
|
|
||||||
def tmdb_tvs(sort_by: str = "popularity.desc",
|
|
||||||
with_genres: str = "",
|
|
||||||
with_original_language: str = "",
|
|
||||||
with_keywords: str = "",
|
|
||||||
with_watch_providers: str = "",
|
|
||||||
vote_average: float = 0,
|
|
||||||
vote_count: int = 0,
|
|
||||||
release_date: str = "",
|
|
||||||
page: int = 1,
|
|
||||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
|
||||||
"""
|
|
||||||
浏览TMDB剧集信息
|
|
||||||
"""
|
|
||||||
return RecommendChain().tmdb_tvs(sort_by=sort_by,
|
|
||||||
with_genres=with_genres,
|
|
||||||
with_original_language=with_original_language,
|
|
||||||
with_keywords=with_keywords,
|
|
||||||
with_watch_providers=with_watch_providers,
|
|
||||||
vote_average=vote_average,
|
|
||||||
vote_count=vote_count,
|
|
||||||
release_date=release_date,
|
|
||||||
page=page)
|
|
||||||
|
|
||||||
|
|
||||||
@router.get("/trending", summary="TMDB流行趋势", response_model=List[schemas.MediaInfo])
|
|
||||||
def tmdb_trending(page: int = 1,
|
|
||||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
|
||||||
"""
|
|
||||||
TMDB流行趋势
|
|
||||||
"""
|
|
||||||
return RecommendChain().tmdb_trending(page=page)
|
|
||||||
|
|
||||||
|
|
||||||
@router.get("/{tmdbid}/{season}", summary="TMDB季所有集", response_model=List[schemas.TmdbEpisode])
|
@router.get("/{tmdbid}/{season}", summary="TMDB季所有集", response_model=List[schemas.TmdbEpisode])
|
||||||
def tmdb_season_episodes(tmdbid: int, season: int,
|
def tmdb_season_episodes(tmdbid: int, season: int,
|
||||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||||
|
|||||||
134
app/api/endpoints/workflow.py
Normal file
134
app/api/endpoints/workflow.py
Normal file
@@ -0,0 +1,134 @@
|
|||||||
|
from datetime import datetime
|
||||||
|
from typing import List, Any
|
||||||
|
|
||||||
|
from fastapi import APIRouter, Depends
|
||||||
|
from sqlalchemy.orm import Session
|
||||||
|
|
||||||
|
from app import schemas
|
||||||
|
from app.core.config import global_vars
|
||||||
|
from app.core.workflow import WorkFlowManager
|
||||||
|
from app.db import get_db
|
||||||
|
from app.db.models.workflow import Workflow
|
||||||
|
from app.db.user_oper import get_current_active_user
|
||||||
|
from app.chain.workflow import WorkflowChain
|
||||||
|
from app.scheduler import Scheduler
|
||||||
|
|
||||||
|
router = APIRouter()
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/", summary="所有工作流", response_model=List[schemas.Workflow])
|
||||||
|
def list_workflows(db: Session = Depends(get_db),
|
||||||
|
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
|
||||||
|
"""
|
||||||
|
获取工作流列表
|
||||||
|
"""
|
||||||
|
return Workflow.list(db)
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/", summary="创建工作流", response_model=schemas.Response)
|
||||||
|
def create_workflow(workflow: schemas.Workflow,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
|
||||||
|
"""
|
||||||
|
创建工作流
|
||||||
|
"""
|
||||||
|
if Workflow.get_by_name(db, workflow.name):
|
||||||
|
return schemas.Response(success=False, message="已存在相同名称的工作流")
|
||||||
|
if not workflow.add_time:
|
||||||
|
workflow.add_time = datetime.strftime(datetime.now(), "%Y-%m-%d %H:%M:%S")
|
||||||
|
if not workflow.state:
|
||||||
|
workflow.state = "P"
|
||||||
|
Workflow(**workflow.dict()).create(db)
|
||||||
|
return schemas.Response(success=True, message="创建工作流成功")
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/actions", summary="所有动作", response_model=List[dict])
|
||||||
|
def list_actions(_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
|
||||||
|
"""
|
||||||
|
获取所有动作
|
||||||
|
"""
|
||||||
|
return WorkFlowManager().list_actions()
|
||||||
|
|
||||||
|
|
||||||
|
@router.get("/{workflow_id}", summary="工作流详情", response_model=schemas.Workflow)
|
||||||
|
def get_workflow(workflow_id: int,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
|
||||||
|
"""
|
||||||
|
获取工作流详情
|
||||||
|
"""
|
||||||
|
return Workflow.get(db, workflow_id)
|
||||||
|
|
||||||
|
|
||||||
|
@router.put("/{workflow_id}", summary="更新工作流", response_model=schemas.Response)
|
||||||
|
def update_workflow(workflow: schemas.Workflow,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
|
||||||
|
"""
|
||||||
|
更新工作流
|
||||||
|
"""
|
||||||
|
wf = Workflow.get(db, workflow.id)
|
||||||
|
if not wf:
|
||||||
|
return schemas.Response(success=False, message="工作流不存在")
|
||||||
|
wf.update(db, workflow.dict())
|
||||||
|
return schemas.Response(success=True, message="更新成功")
|
||||||
|
|
||||||
|
|
||||||
|
@router.delete("/{workflow_id}", summary="删除工作流", response_model=schemas.Response)
|
||||||
|
def delete_workflow(workflow_id: int,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
|
||||||
|
"""
|
||||||
|
删除工作流
|
||||||
|
"""
|
||||||
|
workflow = Workflow.get(db, workflow_id)
|
||||||
|
if not workflow:
|
||||||
|
return schemas.Response(success=False, message="工作流不存在")
|
||||||
|
Scheduler().remove_workflow_job(workflow)
|
||||||
|
Workflow.delete(db, workflow_id)
|
||||||
|
return schemas.Response(success=True, message="删除成功")
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/{workflow_id}/run", summary="执行工作流", response_model=schemas.Response)
|
||||||
|
def run_workflow(workflow_id: int,
|
||||||
|
from_begin: bool = True,
|
||||||
|
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
|
||||||
|
"""
|
||||||
|
执行工作流
|
||||||
|
"""
|
||||||
|
state, errmsg = WorkflowChain().process(workflow_id, from_begin=from_begin)
|
||||||
|
if not state:
|
||||||
|
return schemas.Response(success=False, message=errmsg)
|
||||||
|
return schemas.Response(success=True)
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/{workflow_id}/start", summary="启用工作流", response_model=schemas.Response)
|
||||||
|
def start_workflow(workflow_id: int,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
|
||||||
|
"""
|
||||||
|
启用工作流
|
||||||
|
"""
|
||||||
|
workflow = Workflow.get(db, workflow_id)
|
||||||
|
if not workflow:
|
||||||
|
return schemas.Response(success=False, message="工作流不存在")
|
||||||
|
Scheduler().update_workflow_job(workflow)
|
||||||
|
global_vars.workflow_resume(workflow_id)
|
||||||
|
workflow.update_state(db, workflow_id, "W")
|
||||||
|
return schemas.Response(success=True)
|
||||||
|
|
||||||
|
|
||||||
|
@router.post("/{workflow_id}/pause", summary="停用工作流", response_model=schemas.Response)
|
||||||
|
def pause_workflow(workflow_id: int,
|
||||||
|
db: Session = Depends(get_db),
|
||||||
|
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
|
||||||
|
"""
|
||||||
|
停用工作流
|
||||||
|
"""
|
||||||
|
workflow = Workflow.get(db, workflow_id)
|
||||||
|
if not workflow:
|
||||||
|
return schemas.Response(success=False, message="工作流不存在")
|
||||||
|
Scheduler().remove_workflow_job(workflow)
|
||||||
|
global_vars.stop_workflow(workflow_id)
|
||||||
|
workflow.update_state(db, workflow_id, "P")
|
||||||
|
return schemas.Response(success=True)
|
||||||
@@ -7,7 +7,6 @@ from pathlib import Path
|
|||||||
from typing import Optional, Any, Tuple, List, Set, Union, Dict
|
from typing import Optional, Any, Tuple, List, Set, Union, Dict
|
||||||
|
|
||||||
from qbittorrentapi import TorrentFilesList
|
from qbittorrentapi import TorrentFilesList
|
||||||
from ruamel.yaml import CommentedMap
|
|
||||||
from transmission_rpc import File
|
from transmission_rpc import File
|
||||||
|
|
||||||
from app.core.config import settings
|
from app.core.config import settings
|
||||||
@@ -77,7 +76,7 @@ class ChainBase(metaclass=ABCMeta):
|
|||||||
"""
|
"""
|
||||||
cache_path = settings.TEMP_PATH / filename
|
cache_path = settings.TEMP_PATH / filename
|
||||||
if cache_path.exists():
|
if cache_path.exists():
|
||||||
Path(cache_path).unlink()
|
cache_path.unlink()
|
||||||
|
|
||||||
def run_module(self, method: str, *args, **kwargs) -> Any:
|
def run_module(self, method: str, *args, **kwargs) -> Any:
|
||||||
"""
|
"""
|
||||||
@@ -308,7 +307,7 @@ class ChainBase(metaclass=ABCMeta):
|
|||||||
"""
|
"""
|
||||||
return self.run_module("search_collections", name=name)
|
return self.run_module("search_collections", name=name)
|
||||||
|
|
||||||
def search_torrents(self, site: CommentedMap,
|
def search_torrents(self, site: dict,
|
||||||
keywords: List[str],
|
keywords: List[str],
|
||||||
mtype: MediaType = None,
|
mtype: MediaType = None,
|
||||||
page: int = 0) -> List[TorrentInfo]:
|
page: int = 0) -> List[TorrentInfo]:
|
||||||
@@ -323,13 +322,16 @@ class ChainBase(metaclass=ABCMeta):
|
|||||||
return self.run_module("search_torrents", site=site, keywords=keywords,
|
return self.run_module("search_torrents", site=site, keywords=keywords,
|
||||||
mtype=mtype, page=page)
|
mtype=mtype, page=page)
|
||||||
|
|
||||||
def refresh_torrents(self, site: CommentedMap) -> List[TorrentInfo]:
|
def refresh_torrents(self, site: dict, keyword: str = None, cat: str = None, page: int = 0) -> List[TorrentInfo]:
|
||||||
"""
|
"""
|
||||||
获取站点最新一页的种子,多个站点需要多线程处理
|
获取站点最新一页的种子,多个站点需要多线程处理
|
||||||
:param site: 站点
|
:param site: 站点
|
||||||
|
:param keyword: 标题
|
||||||
|
:param cat: 分类
|
||||||
|
:param page: 页码
|
||||||
:reutrn: 种子资源列表
|
:reutrn: 种子资源列表
|
||||||
"""
|
"""
|
||||||
return self.run_module("refresh_torrents", site=site)
|
return self.run_module("refresh_torrents", site=site, keyword=keyword, cat=cat, page=page)
|
||||||
|
|
||||||
def filter_torrents(self, rule_groups: List[str],
|
def filter_torrents(self, rule_groups: List[str],
|
||||||
torrent_list: List[TorrentInfo],
|
torrent_list: List[TorrentInfo],
|
||||||
|
|||||||
@@ -1,8 +1,9 @@
|
|||||||
import io
|
import io
|
||||||
import tempfile
|
import tempfile
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, List
|
from typing import List
|
||||||
|
|
||||||
|
import pillow_avif # noqa 用于自动注册AVIF支持
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
|
|
||||||
from app.chain import ChainBase
|
from app.chain import ChainBase
|
||||||
@@ -116,6 +117,10 @@ class RecommendChain(ChainBase, metaclass=Singleton):
|
|||||||
sanitized_path = SecurityUtils.sanitize_url_path(url)
|
sanitized_path = SecurityUtils.sanitize_url_path(url)
|
||||||
cache_path = settings.CACHE_PATH / "images" / sanitized_path
|
cache_path = settings.CACHE_PATH / "images" / sanitized_path
|
||||||
|
|
||||||
|
# 没有文件类型,则添加后缀,在恶意文件类型和实际需求下的折衷选择
|
||||||
|
if not cache_path.suffix:
|
||||||
|
cache_path = cache_path.with_suffix(".jpg")
|
||||||
|
|
||||||
# 确保缓存路径和文件类型合法
|
# 确保缓存路径和文件类型合法
|
||||||
if not SecurityUtils.is_safe_path(settings.CACHE_PATH, cache_path, settings.SECURITY_IMAGE_SUFFIXES):
|
if not SecurityUtils.is_safe_path(settings.CACHE_PATH, cache_path, settings.SECURITY_IMAGE_SUFFIXES):
|
||||||
logger.debug(f"Invalid cache path or file type for URL: {url}, sanitized path: {sanitized_path}")
|
logger.debug(f"Invalid cache path or file type for URL: {url}, sanitized path: {sanitized_path}")
|
||||||
@@ -225,23 +230,6 @@ class RecommendChain(ChainBase, metaclass=Singleton):
|
|||||||
medias = self.bangumichain.calendar()
|
medias = self.bangumichain.calendar()
|
||||||
return [media.to_dict() for media in medias[(page - 1) * count: page * count]] if medias else []
|
return [media.to_dict() for media in medias[(page - 1) * count: page * count]] if medias else []
|
||||||
|
|
||||||
@log_execution_time(logger=logger)
|
|
||||||
@cached(ttl=recommend_ttl, region=recommend_cache_region)
|
|
||||||
def bangumi_discover(self, type: int = 2,
|
|
||||||
cat: int = None,
|
|
||||||
sort: str = 'rank',
|
|
||||||
year: int = None,
|
|
||||||
count: int = 30,
|
|
||||||
page: int = 1) -> List[dict]:
|
|
||||||
"""
|
|
||||||
搜索Bangumi
|
|
||||||
"""
|
|
||||||
medias = self.bangumichain.discover(type=type, cat=cat, sort=sort, year=year,
|
|
||||||
limit=count, offset=(page - 1) * count)
|
|
||||||
if medias:
|
|
||||||
return [media.to_dict() for media in medias]
|
|
||||||
return []
|
|
||||||
|
|
||||||
@log_execution_time(logger=logger)
|
@log_execution_time(logger=logger)
|
||||||
@cached(ttl=recommend_ttl, region=recommend_cache_region)
|
@cached(ttl=recommend_ttl, region=recommend_cache_region)
|
||||||
def douban_movie_showing(self, page: int = 1, count: int = 30) -> List[dict]:
|
def douban_movie_showing(self, page: int = 1, count: int = 30) -> List[dict]:
|
||||||
|
|||||||
@@ -35,7 +35,8 @@ class SearchChain(ChainBase):
|
|||||||
self.torrenthelper = TorrentHelper()
|
self.torrenthelper = TorrentHelper()
|
||||||
|
|
||||||
def search_by_id(self, tmdbid: int = None, doubanid: str = None,
|
def search_by_id(self, tmdbid: int = None, doubanid: str = None,
|
||||||
mtype: MediaType = None, area: str = "title", season: int = None) -> List[Context]:
|
mtype: MediaType = None, area: str = "title", season: int = None,
|
||||||
|
sites: List[int] = None) -> List[Context]:
|
||||||
"""
|
"""
|
||||||
根据TMDBID/豆瓣ID搜索资源,精确匹配,不过滤本地存在的资源
|
根据TMDBID/豆瓣ID搜索资源,精确匹配,不过滤本地存在的资源
|
||||||
:param tmdbid: TMDB ID
|
:param tmdbid: TMDB ID
|
||||||
@@ -43,6 +44,7 @@ class SearchChain(ChainBase):
|
|||||||
:param mtype: 媒体,电影 or 电视剧
|
:param mtype: 媒体,电影 or 电视剧
|
||||||
:param area: 搜索范围,title or imdbid
|
:param area: 搜索范围,title or imdbid
|
||||||
:param season: 季数
|
:param season: 季数
|
||||||
|
:param sites: 站点ID列表
|
||||||
"""
|
"""
|
||||||
mediainfo = self.recognize_media(tmdbid=tmdbid, doubanid=doubanid, mtype=mtype)
|
mediainfo = self.recognize_media(tmdbid=tmdbid, doubanid=doubanid, mtype=mtype)
|
||||||
if not mediainfo:
|
if not mediainfo:
|
||||||
@@ -55,25 +57,27 @@ class SearchChain(ChainBase):
|
|||||||
season: NotExistMediaInfo(episodes=[])
|
season: NotExistMediaInfo(episodes=[])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
results = self.process(mediainfo=mediainfo, area=area, no_exists=no_exists)
|
results = self.process(mediainfo=mediainfo, sites=sites, area=area, no_exists=no_exists)
|
||||||
# 保存到本地文件
|
# 保存到本地文件
|
||||||
bytes_results = pickle.dumps(results)
|
bytes_results = pickle.dumps(results)
|
||||||
self.save_cache(bytes_results, self.__result_temp_file)
|
self.save_cache(bytes_results, self.__result_temp_file)
|
||||||
return results
|
return results
|
||||||
|
|
||||||
def search_by_title(self, title: str, page: int = 0, site: int = None) -> List[Context]:
|
def search_by_title(self, title: str, page: int = 0,
|
||||||
|
sites: List[int] = None, cache_local: bool = True) -> List[Context]:
|
||||||
"""
|
"""
|
||||||
根据标题搜索资源,不识别不过滤,直接返回站点内容
|
根据标题搜索资源,不识别不过滤,直接返回站点内容
|
||||||
:param title: 标题,为空时返回所有站点首页内容
|
:param title: 标题,为空时返回所有站点首页内容
|
||||||
:param page: 页码
|
:param page: 页码
|
||||||
:param site: 站点ID
|
:param sites: 站点ID列表
|
||||||
|
:param cache_local: 是否缓存到本地
|
||||||
"""
|
"""
|
||||||
if title:
|
if title:
|
||||||
logger.info(f'开始搜索资源,关键词:{title} ...')
|
logger.info(f'开始搜索资源,关键词:{title} ...')
|
||||||
else:
|
else:
|
||||||
logger.info(f'开始浏览资源,站点:{site} ...')
|
logger.info(f'开始浏览资源,站点:{sites} ...')
|
||||||
# 搜索
|
# 搜索
|
||||||
torrents = self.__search_all_sites(keywords=[title], sites=[site] if site else None, page=page) or []
|
torrents = self.__search_all_sites(keywords=[title], sites=sites, page=page) or []
|
||||||
if not torrents:
|
if not torrents:
|
||||||
logger.warn(f'{title} 未搜索到资源')
|
logger.warn(f'{title} 未搜索到资源')
|
||||||
return []
|
return []
|
||||||
@@ -81,8 +85,9 @@ class SearchChain(ChainBase):
|
|||||||
contexts = [Context(meta_info=MetaInfo(title=torrent.title, subtitle=torrent.description),
|
contexts = [Context(meta_info=MetaInfo(title=torrent.title, subtitle=torrent.description),
|
||||||
torrent_info=torrent) for torrent in torrents]
|
torrent_info=torrent) for torrent in torrents]
|
||||||
# 保存到本地文件
|
# 保存到本地文件
|
||||||
bytes_results = pickle.dumps(contexts)
|
if cache_local:
|
||||||
self.save_cache(bytes_results, self.__result_temp_file)
|
bytes_results = pickle.dumps(contexts)
|
||||||
|
self.save_cache(bytes_results, self.__result_temp_file)
|
||||||
return contexts
|
return contexts
|
||||||
|
|
||||||
def last_search_results(self) -> List[Context]:
|
def last_search_results(self) -> List[Context]:
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ from typing import Optional, Tuple, Union, Dict
|
|||||||
from urllib.parse import urljoin
|
from urllib.parse import urljoin
|
||||||
|
|
||||||
from lxml import etree
|
from lxml import etree
|
||||||
from ruamel.yaml import CommentedMap
|
|
||||||
|
|
||||||
from app.chain import ChainBase
|
from app.chain import ChainBase
|
||||||
from app.core.config import global_vars, settings
|
from app.core.config import global_vars, settings
|
||||||
@@ -55,7 +54,7 @@ class SiteChain(ChainBase):
|
|||||||
"yemapt.org": self.__yema_test,
|
"yemapt.org": self.__yema_test,
|
||||||
}
|
}
|
||||||
|
|
||||||
def refresh_userdata(self, site: CommentedMap = None) -> Optional[SiteUserData]:
|
def refresh_userdata(self, site: dict = None) -> Optional[SiteUserData]:
|
||||||
"""
|
"""
|
||||||
刷新站点的用户数据
|
刷新站点的用户数据
|
||||||
:param site: 站点
|
:param site: 站点
|
||||||
|
|||||||
@@ -84,6 +84,12 @@ class StorageChain(ChainBase):
|
|||||||
"""
|
"""
|
||||||
return self.run_module("rename_file", fileitem=fileitem, name=name)
|
return self.run_module("rename_file", fileitem=fileitem, name=name)
|
||||||
|
|
||||||
|
def exists(self, fileitem: schemas.FileItem) -> Optional[bool]:
|
||||||
|
"""
|
||||||
|
判断文件或目录是否存在
|
||||||
|
"""
|
||||||
|
return True if self.get_item(fileitem) else False
|
||||||
|
|
||||||
def get_item(self, fileitem: schemas.FileItem) -> Optional[schemas.FileItem]:
|
def get_item(self, fileitem: schemas.FileItem) -> Optional[schemas.FileItem]:
|
||||||
"""
|
"""
|
||||||
查询目录或文件
|
查询目录或文件
|
||||||
|
|||||||
@@ -73,17 +73,20 @@ class TorrentsChain(ChainBase, metaclass=Singleton):
|
|||||||
logger.info(f'种子缓存数据清理完成')
|
logger.info(f'种子缓存数据清理完成')
|
||||||
|
|
||||||
@cached(cache=TTLCache(maxsize=128, ttl=595))
|
@cached(cache=TTLCache(maxsize=128, ttl=595))
|
||||||
def browse(self, domain: str) -> List[TorrentInfo]:
|
def browse(self, domain: str, keyword: str = None, cat: str = None, page: int = 0) -> List[TorrentInfo]:
|
||||||
"""
|
"""
|
||||||
浏览站点首页内容,返回种子清单,TTL缓存10分钟
|
浏览站点首页内容,返回种子清单,TTL缓存10分钟
|
||||||
:param domain: 站点域名
|
:param domain: 站点域名
|
||||||
|
:param keyword: 搜索标题
|
||||||
|
:param cat: 搜索分类
|
||||||
|
:param page: 页码
|
||||||
"""
|
"""
|
||||||
logger.info(f'开始获取站点 {domain} 最新种子 ...')
|
logger.info(f'开始获取站点 {domain} 最新种子 ...')
|
||||||
site = self.siteshelper.get_indexer(domain)
|
site = self.siteshelper.get_indexer(domain)
|
||||||
if not site:
|
if not site:
|
||||||
logger.error(f'站点 {domain} 不存在!')
|
logger.error(f'站点 {domain} 不存在!')
|
||||||
return []
|
return []
|
||||||
return self.refresh_torrents(site=site)
|
return self.refresh_torrents(site=site, keyword=keyword, cat=cat, page=page)
|
||||||
|
|
||||||
@cached(cache=TTLCache(maxsize=128, ttl=295))
|
@cached(cache=TTLCache(maxsize=128, ttl=295))
|
||||||
def rss(self, domain: str) -> List[TorrentInfo]:
|
def rss(self, domain: str) -> List[TorrentInfo]:
|
||||||
|
|||||||
@@ -670,13 +670,10 @@ class TransferChain(ChainBase, metaclass=Singleton):
|
|||||||
self.jobview.add_task(task, state=curr_task.state if curr_task else "waiting")
|
self.jobview.add_task(task, state=curr_task.state if curr_task else "waiting")
|
||||||
|
|
||||||
# 获取集数据
|
# 获取集数据
|
||||||
if not task.episodes_info and task.mediainfo.type == MediaType.TV:
|
if task.mediainfo.type == MediaType.TV and not task.episodes_info:
|
||||||
if task.meta.begin_season is None:
|
|
||||||
task.meta.begin_season = 1
|
|
||||||
task.mediainfo.season = task.mediainfo.season or task.meta.begin_season
|
|
||||||
task.episodes_info = self.tmdbchain.tmdb_episodes(
|
task.episodes_info = self.tmdbchain.tmdb_episodes(
|
||||||
tmdbid=task.mediainfo.tmdb_id,
|
tmdbid=task.mediainfo.tmdb_id,
|
||||||
season=task.mediainfo.season
|
season=task.mediainfo.season or task.meta.begin_season or 1
|
||||||
)
|
)
|
||||||
|
|
||||||
# 查询整理目标目录
|
# 查询整理目标目录
|
||||||
@@ -908,7 +905,7 @@ class TransferChain(ChainBase, metaclass=Singleton):
|
|||||||
season: int = None, epformat: EpisodeFormat = None, min_filesize: int = 0,
|
season: int = None, epformat: EpisodeFormat = None, min_filesize: int = 0,
|
||||||
downloader: str = None, download_hash: str = None,
|
downloader: str = None, download_hash: str = None,
|
||||||
force: bool = False, background: bool = True,
|
force: bool = False, background: bool = True,
|
||||||
manual: bool = False) -> Tuple[bool, str]:
|
manual: bool = False, continue_callback: Callable = None) -> Tuple[bool, str]:
|
||||||
"""
|
"""
|
||||||
执行一个复杂目录的整理操作
|
执行一个复杂目录的整理操作
|
||||||
:param fileitem: 文件项
|
:param fileitem: 文件项
|
||||||
@@ -929,6 +926,7 @@ class TransferChain(ChainBase, metaclass=Singleton):
|
|||||||
:param force: 是否强制整理
|
:param force: 是否强制整理
|
||||||
:param background: 是否后台运行
|
:param background: 是否后台运行
|
||||||
:param manual: 是否手动整理
|
:param manual: 是否手动整理
|
||||||
|
:param continue_callback: 继续处理回调
|
||||||
返回:成功标识,错误信息
|
返回:成功标识,错误信息
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@@ -994,6 +992,8 @@ class TransferChain(ChainBase, metaclass=Singleton):
|
|||||||
for file_item, bluray_dir in file_items:
|
for file_item, bluray_dir in file_items:
|
||||||
if global_vars.is_system_stopped:
|
if global_vars.is_system_stopped:
|
||||||
break
|
break
|
||||||
|
if continue_callback and not continue_callback():
|
||||||
|
break
|
||||||
file_path = Path(file_item.path)
|
file_path = Path(file_item.path)
|
||||||
# 回收站及隐藏的文件不处理
|
# 回收站及隐藏的文件不处理
|
||||||
if file_item.path.find('/@Recycle/') != -1 \
|
if file_item.path.find('/@Recycle/') != -1 \
|
||||||
@@ -1114,6 +1114,8 @@ class TransferChain(ChainBase, metaclass=Singleton):
|
|||||||
for transfer_task in transfer_tasks:
|
for transfer_task in transfer_tasks:
|
||||||
if global_vars.is_system_stopped:
|
if global_vars.is_system_stopped:
|
||||||
break
|
break
|
||||||
|
if continue_callback and not continue_callback():
|
||||||
|
break
|
||||||
# 更新进度
|
# 更新进度
|
||||||
__process_msg = f"正在整理 ({processed_num + fail_num + 1}/{total_num}){transfer_task.fileitem.name} ..."
|
__process_msg = f"正在整理 ({processed_num + fail_num + 1}/{total_num}){transfer_task.fileitem.name} ..."
|
||||||
logger.info(__process_msg)
|
logger.info(__process_msg)
|
||||||
|
|||||||
247
app/chain/workflow.py
Normal file
247
app/chain/workflow.py
Normal file
@@ -0,0 +1,247 @@
|
|||||||
|
import base64
|
||||||
|
import pickle
|
||||||
|
import threading
|
||||||
|
from collections import defaultdict, deque
|
||||||
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
|
from time import sleep
|
||||||
|
from typing import List, Tuple
|
||||||
|
|
||||||
|
from pydantic.fields import Callable
|
||||||
|
|
||||||
|
from app.chain import ChainBase
|
||||||
|
from app.core.config import global_vars
|
||||||
|
from app.core.workflow import WorkFlowManager
|
||||||
|
from app.db.models import Workflow
|
||||||
|
from app.db.workflow_oper import WorkflowOper
|
||||||
|
from app.log import logger
|
||||||
|
from app.schemas import ActionContext, ActionFlow, Action, ActionExecution
|
||||||
|
|
||||||
|
|
||||||
|
class WorkflowExecutor:
|
||||||
|
"""
|
||||||
|
工作流执行器
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, workflow: Workflow, step_callback: Callable = None):
|
||||||
|
"""
|
||||||
|
初始化工作流执行器
|
||||||
|
:param workflow: 工作流对象
|
||||||
|
:param step_callback: 步骤回调函数
|
||||||
|
"""
|
||||||
|
# 工作流数据
|
||||||
|
self.workflow = workflow
|
||||||
|
self.step_callback = step_callback
|
||||||
|
self.actions = {action['id']: Action(**action) for action in workflow.actions}
|
||||||
|
self.flows = [ActionFlow(**flow) for flow in workflow.flows]
|
||||||
|
self.total_actions = len(self.actions)
|
||||||
|
self.finished_actions = 0
|
||||||
|
|
||||||
|
self.success = True
|
||||||
|
self.errmsg = ""
|
||||||
|
|
||||||
|
# 工作流管理器
|
||||||
|
self.workflowmanager = WorkFlowManager()
|
||||||
|
# 线程安全队列
|
||||||
|
self.queue = deque()
|
||||||
|
# 锁用于保证线程安全
|
||||||
|
self.lock = threading.Lock()
|
||||||
|
# 线程池
|
||||||
|
self.executor = ThreadPoolExecutor()
|
||||||
|
# 跟踪运行中的任务数
|
||||||
|
self.running_tasks = 0
|
||||||
|
|
||||||
|
# 构建邻接表、入度表
|
||||||
|
self.adjacency = defaultdict(list)
|
||||||
|
self.indegree = defaultdict(int)
|
||||||
|
for flow in self.flows:
|
||||||
|
source = flow.source
|
||||||
|
target = flow.target
|
||||||
|
self.adjacency[source].append(target)
|
||||||
|
self.indegree[target] += 1
|
||||||
|
|
||||||
|
# 初始化所有节点的入度(确保未被引用的节点入度为0)
|
||||||
|
for action_id in self.actions:
|
||||||
|
if action_id not in self.indegree:
|
||||||
|
self.indegree[action_id] = 0
|
||||||
|
|
||||||
|
# 初始上下文
|
||||||
|
if workflow.current_action and workflow.context:
|
||||||
|
# Base64解码
|
||||||
|
decoded_data = base64.b64decode(workflow.context["content"])
|
||||||
|
# 反序列化数据
|
||||||
|
self.context = pickle.loads(decoded_data)
|
||||||
|
else:
|
||||||
|
self.context = ActionContext()
|
||||||
|
|
||||||
|
# 初始化队列:入度为0的节点
|
||||||
|
for action_id in self.actions:
|
||||||
|
if self.indegree[action_id] == 0:
|
||||||
|
self.queue.append(action_id)
|
||||||
|
|
||||||
|
def execute(self):
|
||||||
|
"""
|
||||||
|
执行工作流
|
||||||
|
"""
|
||||||
|
while True:
|
||||||
|
with self.lock:
|
||||||
|
# 退出条件:队列为空且无运行任务
|
||||||
|
if not self.queue and self.running_tasks == 0:
|
||||||
|
break
|
||||||
|
# 退出条件:出现了错误
|
||||||
|
if not self.success:
|
||||||
|
break
|
||||||
|
if not self.queue:
|
||||||
|
sleep(1)
|
||||||
|
continue
|
||||||
|
# 取出队首节点
|
||||||
|
node_id = self.queue.popleft()
|
||||||
|
# 标记任务开始
|
||||||
|
self.running_tasks += 1
|
||||||
|
|
||||||
|
# 已停机
|
||||||
|
if global_vars.is_workflow_stopped(self.workflow.id):
|
||||||
|
global_vars.workflow_resume(self.workflow.id)
|
||||||
|
break
|
||||||
|
|
||||||
|
# 已执行的跳过
|
||||||
|
if (self.workflow.current_action
|
||||||
|
and node_id in self.workflow.current_action.split(',')):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 提交任务到线程池
|
||||||
|
future = self.executor.submit(
|
||||||
|
self.execute_node,
|
||||||
|
self.workflow.id,
|
||||||
|
node_id,
|
||||||
|
self.context
|
||||||
|
)
|
||||||
|
future.add_done_callback(self.on_node_complete)
|
||||||
|
|
||||||
|
def execute_node(self, workflow_id: int, node_id: int,
|
||||||
|
context: ActionContext) -> Tuple[Action, bool, str, ActionContext]:
|
||||||
|
"""
|
||||||
|
执行单个节点操作,返回修改后的上下文和节点ID
|
||||||
|
"""
|
||||||
|
action = self.actions[node_id]
|
||||||
|
state, message, result_ctx = self.workflowmanager.excute(workflow_id, action, context=context)
|
||||||
|
return action, state, message, result_ctx
|
||||||
|
|
||||||
|
def on_node_complete(self, future):
|
||||||
|
"""
|
||||||
|
节点完成回调:更新上下文、处理后继节点
|
||||||
|
"""
|
||||||
|
action, state, message, result_ctx = future.result()
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.finished_actions += 1
|
||||||
|
# 更新当前进度
|
||||||
|
self.context.progress = round(self.finished_actions / self.total_actions) * 100
|
||||||
|
|
||||||
|
# 补充执行历史
|
||||||
|
self.context.execute_history.append(
|
||||||
|
ActionExecution(
|
||||||
|
action=action.name,
|
||||||
|
result=state,
|
||||||
|
message=message
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# 节点执行失败
|
||||||
|
if not state:
|
||||||
|
self.success = False
|
||||||
|
self.errmsg = f"{action.name} 失败"
|
||||||
|
return
|
||||||
|
|
||||||
|
with self.lock:
|
||||||
|
# 更新主上下文
|
||||||
|
self.merge_context(result_ctx)
|
||||||
|
# 回调
|
||||||
|
if self.step_callback:
|
||||||
|
self.step_callback(action, self.context)
|
||||||
|
|
||||||
|
# 处理后继节点
|
||||||
|
successors = self.adjacency.get(action.id, [])
|
||||||
|
for succ_id in successors:
|
||||||
|
with self.lock:
|
||||||
|
self.indegree[succ_id] -= 1
|
||||||
|
if self.indegree[succ_id] == 0:
|
||||||
|
self.queue.append(succ_id)
|
||||||
|
finally:
|
||||||
|
# 标记任务完成
|
||||||
|
with self.lock:
|
||||||
|
self.running_tasks -= 1
|
||||||
|
|
||||||
|
def merge_context(self, context: ActionContext):
|
||||||
|
"""
|
||||||
|
合并上下文
|
||||||
|
"""
|
||||||
|
for key, value in context.dict().items():
|
||||||
|
if not getattr(self.context, key, None):
|
||||||
|
setattr(self.context, key, value)
|
||||||
|
|
||||||
|
|
||||||
|
class WorkflowChain(ChainBase):
|
||||||
|
"""
|
||||||
|
工作流链
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.workflowoper = WorkflowOper()
|
||||||
|
|
||||||
|
def process(self, workflow_id: int, from_begin: bool = True) -> Tuple[bool, str]:
|
||||||
|
"""
|
||||||
|
处理工作流
|
||||||
|
:param workflow_id: 工作流ID
|
||||||
|
:param from_begin: 是否从头开始,默认为True
|
||||||
|
"""
|
||||||
|
|
||||||
|
def save_step(action: Action, context: ActionContext):
|
||||||
|
"""
|
||||||
|
保存上下文到数据库
|
||||||
|
"""
|
||||||
|
# 序列化数据
|
||||||
|
serialized_data = pickle.dumps(context)
|
||||||
|
# 使用Base64编码字节流
|
||||||
|
encoded_data = base64.b64encode(serialized_data).decode('utf-8')
|
||||||
|
self.workflowoper.step(workflow_id, action_id=action.id, context={
|
||||||
|
"content": encoded_data
|
||||||
|
})
|
||||||
|
|
||||||
|
# 重置工作流
|
||||||
|
if from_begin:
|
||||||
|
self.workflowoper.reset(workflow_id)
|
||||||
|
|
||||||
|
# 查询工作流数据
|
||||||
|
workflow = self.workflowoper.get(workflow_id)
|
||||||
|
if not workflow:
|
||||||
|
logger.warn(f"工作流 {workflow_id} 不存在")
|
||||||
|
return False, "工作流不存在"
|
||||||
|
if not workflow.actions:
|
||||||
|
logger.warn(f"工作流 {workflow.name} 无动作")
|
||||||
|
return False, "工作流无动作"
|
||||||
|
if not workflow.flows:
|
||||||
|
logger.warn(f"工作流 {workflow.name} 无流程")
|
||||||
|
return False, "工作流无流程"
|
||||||
|
|
||||||
|
logger.info(f"开始处理 {workflow.name},共 {len(workflow.actions)} 个动作 ...")
|
||||||
|
self.workflowoper.start(workflow_id)
|
||||||
|
|
||||||
|
# 执行工作流
|
||||||
|
executor = WorkflowExecutor(workflow, step_callback=save_step)
|
||||||
|
executor.execute()
|
||||||
|
|
||||||
|
if not executor.success:
|
||||||
|
logger.info(f"工作流 {workflow.name} 执行失败:{executor.errmsg}")
|
||||||
|
self.workflowoper.fail(workflow_id, result=executor.errmsg)
|
||||||
|
return False, executor.errmsg
|
||||||
|
else:
|
||||||
|
logger.info(f"工作流 {workflow.name} 执行完成")
|
||||||
|
self.workflowoper.success(workflow_id)
|
||||||
|
return True, ""
|
||||||
|
|
||||||
|
def get_workflows(self) -> List[Workflow]:
|
||||||
|
"""
|
||||||
|
获取工作流列表
|
||||||
|
"""
|
||||||
|
return self.workflowoper.list_enabled()
|
||||||
@@ -118,7 +118,7 @@ class ConfigModel(BaseModel):
|
|||||||
# 自动检查和更新站点资源包(站点索引、认证等)
|
# 自动检查和更新站点资源包(站点索引、认证等)
|
||||||
AUTO_UPDATE_RESOURCE: bool = True
|
AUTO_UPDATE_RESOURCE: bool = True
|
||||||
# 是否启用DOH解析域名
|
# 是否启用DOH解析域名
|
||||||
DOH_ENABLE: bool = True
|
DOH_ENABLE: bool = False
|
||||||
# 使用 DOH 解析的域名列表
|
# 使用 DOH 解析的域名列表
|
||||||
DOH_DOMAINS: str = ("api.themoviedb.org,"
|
DOH_DOMAINS: str = ("api.themoviedb.org,"
|
||||||
"api.tmdb.org,"
|
"api.tmdb.org,"
|
||||||
@@ -236,11 +236,18 @@ class ConfigModel(BaseModel):
|
|||||||
"doubanio.com",
|
"doubanio.com",
|
||||||
"lain.bgm.tv",
|
"lain.bgm.tv",
|
||||||
"raw.githubusercontent.com",
|
"raw.githubusercontent.com",
|
||||||
"github.com"]
|
"github.com",
|
||||||
|
"thetvdb.com",
|
||||||
|
"cctvpic.com",
|
||||||
|
"iqiyipic.com",
|
||||||
|
"hdslb.com",
|
||||||
|
"cmvideo.cn",
|
||||||
|
"ykimg.com",
|
||||||
|
"qpic.cn"]
|
||||||
)
|
)
|
||||||
# 允许的图片文件后缀格式
|
# 允许的图片文件后缀格式
|
||||||
SECURITY_IMAGE_SUFFIXES: List[str] = Field(
|
SECURITY_IMAGE_SUFFIXES: List[str] = Field(
|
||||||
default_factory=lambda: [".jpg", ".jpeg", ".png", ".webp", ".gif", ".svg"]
|
default_factory=lambda: [".jpg", ".jpeg", ".png", ".webp", ".gif", ".svg", ".avif"]
|
||||||
)
|
)
|
||||||
# 重命名时支持的S0别名
|
# 重命名时支持的S0别名
|
||||||
RENAME_FORMAT_S0_NAMES: List[str] = Field(
|
RENAME_FORMAT_S0_NAMES: List[str] = Field(
|
||||||
@@ -248,6 +255,8 @@ class ConfigModel(BaseModel):
|
|||||||
)
|
)
|
||||||
# 启用分词搜索
|
# 启用分词搜索
|
||||||
TOKENIZED_SEARCH: bool = False
|
TOKENIZED_SEARCH: bool = False
|
||||||
|
# 为指定默认字幕添加.default后缀
|
||||||
|
DEFAULT_SUB: Optional[str] = "zh-cn"
|
||||||
|
|
||||||
|
|
||||||
class Settings(BaseSettings, ConfigModel, LogConfigModel):
|
class Settings(BaseSettings, ConfigModel, LogConfigModel):
|
||||||
@@ -598,6 +607,8 @@ class GlobalVar(object):
|
|||||||
STOP_EVENT: threading.Event = threading.Event()
|
STOP_EVENT: threading.Event = threading.Event()
|
||||||
# webpush订阅
|
# webpush订阅
|
||||||
SUBSCRIPTIONS: List[dict] = []
|
SUBSCRIPTIONS: List[dict] = []
|
||||||
|
# 需应急停止的工作流
|
||||||
|
EMERGENCY_STOP_WORKFLOWS: List[str] = []
|
||||||
|
|
||||||
def stop_system(self):
|
def stop_system(self):
|
||||||
"""
|
"""
|
||||||
@@ -624,6 +635,26 @@ class GlobalVar(object):
|
|||||||
"""
|
"""
|
||||||
self.SUBSCRIPTIONS.append(subscription)
|
self.SUBSCRIPTIONS.append(subscription)
|
||||||
|
|
||||||
|
def stop_workflow(self, workflow_id: str):
|
||||||
|
"""
|
||||||
|
停止工作流
|
||||||
|
"""
|
||||||
|
if workflow_id not in self.EMERGENCY_STOP_WORKFLOWS:
|
||||||
|
self.EMERGENCY_STOP_WORKFLOWS.append(workflow_id)
|
||||||
|
|
||||||
|
def workflow_resume(self, workflow_id: str):
|
||||||
|
"""
|
||||||
|
恢复工作流
|
||||||
|
"""
|
||||||
|
if workflow_id in self.EMERGENCY_STOP_WORKFLOWS:
|
||||||
|
self.EMERGENCY_STOP_WORKFLOWS.remove(workflow_id)
|
||||||
|
|
||||||
|
def is_workflow_stopped(self, workflow_id: str):
|
||||||
|
"""
|
||||||
|
是否停止工作流
|
||||||
|
"""
|
||||||
|
return self.is_system_stopped or workflow_id in self.EMERGENCY_STOP_WORKFLOWS
|
||||||
|
|
||||||
|
|
||||||
# 实例化配置
|
# 实例化配置
|
||||||
settings = Settings()
|
settings = Settings()
|
||||||
|
|||||||
@@ -793,10 +793,9 @@ class PluginManager(metaclass=Singleton):
|
|||||||
# 已安装插件
|
# 已安装插件
|
||||||
installed_apps = self.systemconfig.get(SystemConfigKey.UserInstalledPlugins) or []
|
installed_apps = self.systemconfig.get(SystemConfigKey.UserInstalledPlugins) or []
|
||||||
# 获取在线插件
|
# 获取在线插件
|
||||||
online_plugins = self.pluginhelper.get_plugins(market, package_version) or {}
|
online_plugins = self.pluginhelper.get_plugins(market, package_version)
|
||||||
if not online_plugins:
|
if online_plugins is None:
|
||||||
if not package_version:
|
logger.warning(f"获取{package_version if package_version else ''}插件库失败:{market},请检查 GitHub 网络连接")
|
||||||
logger.warning(f"获取插件库失败:{market},请检查 GitHub 网络连接")
|
|
||||||
return []
|
return []
|
||||||
ret_plugins = []
|
ret_plugins = []
|
||||||
add_time = len(online_plugins)
|
add_time = len(online_plugins)
|
||||||
|
|||||||
110
app/core/workflow.py
Normal file
110
app/core/workflow.py
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
from time import sleep
|
||||||
|
from typing import Dict, Any, Tuple, List
|
||||||
|
|
||||||
|
from app.core.config import global_vars
|
||||||
|
from app.helper.module import ModuleHelper
|
||||||
|
from app.log import logger
|
||||||
|
from app.schemas import Action, ActionContext
|
||||||
|
from app.utils.singleton import Singleton
|
||||||
|
|
||||||
|
|
||||||
|
class WorkFlowManager(metaclass=Singleton):
|
||||||
|
"""
|
||||||
|
工作流管理器
|
||||||
|
"""
|
||||||
|
|
||||||
|
# 所有动作定义
|
||||||
|
_actions: Dict[str, Any] = {}
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.init()
|
||||||
|
|
||||||
|
def init(self):
|
||||||
|
"""
|
||||||
|
初始化
|
||||||
|
"""
|
||||||
|
|
||||||
|
def filter_func(obj: Any):
|
||||||
|
"""
|
||||||
|
过滤函数,确保只加载新定义的类
|
||||||
|
"""
|
||||||
|
if not isinstance(obj, type):
|
||||||
|
return False
|
||||||
|
if not hasattr(obj, 'execute') or not hasattr(obj, "name"):
|
||||||
|
return False
|
||||||
|
if obj.__name__ == "BaseAction":
|
||||||
|
return False
|
||||||
|
return obj.__module__.startswith("app.actions")
|
||||||
|
|
||||||
|
# 加载所有动作
|
||||||
|
self._actions = {}
|
||||||
|
actions = ModuleHelper.load(
|
||||||
|
"app.actions",
|
||||||
|
filter_func=lambda _, obj: filter_func(obj)
|
||||||
|
)
|
||||||
|
for action in actions:
|
||||||
|
logger.debug(f"加载动作: {action.__name__}")
|
||||||
|
try:
|
||||||
|
self._actions[action.__name__] = action
|
||||||
|
except Exception as err:
|
||||||
|
logger.error(f"加载动作失败: {action.__name__} - {err}")
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
"""
|
||||||
|
停止
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def excute(self, workflow_id: int, action: Action,
|
||||||
|
context: ActionContext = None) -> Tuple[bool, str, ActionContext]:
|
||||||
|
"""
|
||||||
|
执行工作流动作
|
||||||
|
"""
|
||||||
|
if not context:
|
||||||
|
context = ActionContext()
|
||||||
|
if action.type in self._actions:
|
||||||
|
# 实例化
|
||||||
|
action_obj = self._actions[action.type]()
|
||||||
|
# 执行
|
||||||
|
logger.info(f"执行动作: {action.id} - {action.name}")
|
||||||
|
try:
|
||||||
|
result_context = action_obj.execute(workflow_id, action.data, context)
|
||||||
|
except Exception as err:
|
||||||
|
logger.error(f"{action.name} 执行失败: {err}")
|
||||||
|
return False, f"{err}", context
|
||||||
|
loop = action.data.get("loop")
|
||||||
|
loop_interval = action.data.get("loop_interval")
|
||||||
|
if loop and loop_interval:
|
||||||
|
while not action_obj.done:
|
||||||
|
if global_vars.is_workflow_stopped(workflow_id):
|
||||||
|
break
|
||||||
|
# 等待
|
||||||
|
logger.info(f"{action.name} 等待 {loop_interval} 秒后继续执行 ...")
|
||||||
|
sleep(loop_interval)
|
||||||
|
# 执行
|
||||||
|
logger.info(f"继续执行动作: {action.id} - {action.name}")
|
||||||
|
result_context = action_obj.execute(workflow_id, action.data, result_context)
|
||||||
|
if action_obj.success:
|
||||||
|
logger.info(f"{action.name} 执行成功")
|
||||||
|
else:
|
||||||
|
logger.error(f"{action.name} 执行失败!")
|
||||||
|
return action_obj.success, action_obj.message, result_context
|
||||||
|
else:
|
||||||
|
logger.error(f"未找到动作: {action.type} - {action.name}")
|
||||||
|
return False, " ", context
|
||||||
|
|
||||||
|
def list_actions(self) -> List[dict]:
|
||||||
|
"""
|
||||||
|
获取所有动作
|
||||||
|
"""
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
"type": key,
|
||||||
|
"name": action.name,
|
||||||
|
"description": action.description,
|
||||||
|
"data": {
|
||||||
|
"label": action.name,
|
||||||
|
**action.data
|
||||||
|
}
|
||||||
|
} for key, action in self._actions.items()
|
||||||
|
]
|
||||||
@@ -8,3 +8,4 @@ from .systemconfig import SystemConfig
|
|||||||
from .transferhistory import TransferHistory
|
from .transferhistory import TransferHistory
|
||||||
from .user import User
|
from .user import User
|
||||||
from .userconfig import UserConfig
|
from .userconfig import UserConfig
|
||||||
|
from .workflow import Workflow
|
||||||
|
|||||||
101
app/db/models/workflow.py
Normal file
101
app/db/models/workflow.py
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
from sqlalchemy import Column, Integer, JSON, Sequence, String, and_
|
||||||
|
|
||||||
|
from app.db import Base, db_query, db_update
|
||||||
|
|
||||||
|
|
||||||
|
class Workflow(Base):
|
||||||
|
"""
|
||||||
|
工作流表
|
||||||
|
"""
|
||||||
|
# ID
|
||||||
|
id = Column(Integer, Sequence('id'), primary_key=True, index=True)
|
||||||
|
# 名称
|
||||||
|
name = Column(String, index=True, nullable=False)
|
||||||
|
# 描述
|
||||||
|
description = Column(String)
|
||||||
|
# 定时器
|
||||||
|
timer = Column(String)
|
||||||
|
# 状态:W-等待 R-运行中 P-暂停 S-成功 F-失败
|
||||||
|
state = Column(String, nullable=False, index=True, default='W')
|
||||||
|
# 已执行动作(,分隔)
|
||||||
|
current_action = Column(String)
|
||||||
|
# 任务执行结果
|
||||||
|
result = Column(String)
|
||||||
|
# 已执行次数
|
||||||
|
run_count = Column(Integer, default=0)
|
||||||
|
# 任务列表
|
||||||
|
actions = Column(JSON, default=list)
|
||||||
|
# 任务流
|
||||||
|
flows = Column(JSON, default=list)
|
||||||
|
# 执行上下文
|
||||||
|
context = Column(JSON, default=dict)
|
||||||
|
# 创建时间
|
||||||
|
add_time = Column(String, default=datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
|
||||||
|
# 最后执行时间
|
||||||
|
last_time = Column(String)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
@db_query
|
||||||
|
def get_enabled_workflows(db):
|
||||||
|
return db.query(Workflow).filter(Workflow.state != 'P').all()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
@db_query
|
||||||
|
def get_by_name(db, name: str):
|
||||||
|
return db.query(Workflow).filter(Workflow.name == name).first()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
@db_update
|
||||||
|
def update_state(db, wid: int, state: str):
|
||||||
|
db.query(Workflow).filter(Workflow.id == wid).update({"state": state})
|
||||||
|
return True
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
@db_update
|
||||||
|
def start(db, wid: int):
|
||||||
|
db.query(Workflow).filter(Workflow.id == wid).update({
|
||||||
|
"state": 'R'
|
||||||
|
})
|
||||||
|
return True
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
@db_update
|
||||||
|
def fail(db, wid: int, result: str):
|
||||||
|
db.query(Workflow).filter(and_(Workflow.id == wid, Workflow.state != "P")).update({
|
||||||
|
"state": 'F',
|
||||||
|
"result": result,
|
||||||
|
"last_time": datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
||||||
|
})
|
||||||
|
return True
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
@db_update
|
||||||
|
def success(db, wid: int, result: str = None):
|
||||||
|
db.query(Workflow).filter(and_(Workflow.id == wid, Workflow.state != "P")).update({
|
||||||
|
"state": 'S',
|
||||||
|
"result": result,
|
||||||
|
"run_count": Workflow.run_count + 1,
|
||||||
|
"last_time": datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
||||||
|
})
|
||||||
|
return True
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
@db_update
|
||||||
|
def reset(db, wid: int):
|
||||||
|
db.query(Workflow).filter(Workflow.id == wid).update({
|
||||||
|
"state": 'W',
|
||||||
|
"result": None,
|
||||||
|
"current_action": None,
|
||||||
|
})
|
||||||
|
return True
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
@db_update
|
||||||
|
def update_current_action(db, wid: int, action_id: str, context: dict):
|
||||||
|
db.query(Workflow).filter(Workflow.id == wid).update({
|
||||||
|
"current_action": f"{Workflow.current_action},{action_id}" if Workflow.current_action else action_id,
|
||||||
|
"context": context
|
||||||
|
})
|
||||||
|
return True
|
||||||
68
app/db/workflow_oper.py
Normal file
68
app/db/workflow_oper.py
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
from typing import List, Tuple
|
||||||
|
|
||||||
|
from app.db import DbOper
|
||||||
|
from app.db.models.workflow import Workflow
|
||||||
|
|
||||||
|
|
||||||
|
class WorkflowOper(DbOper):
|
||||||
|
"""
|
||||||
|
工作流管理
|
||||||
|
"""
|
||||||
|
|
||||||
|
def add(self, **kwargs) -> Tuple[bool, str]:
|
||||||
|
"""
|
||||||
|
新增工作流
|
||||||
|
"""
|
||||||
|
wf = Workflow(**kwargs)
|
||||||
|
if not wf.get_by_name(self._db, kwargs.get("name")):
|
||||||
|
wf.create(self._db)
|
||||||
|
return True, "新增工作流成功"
|
||||||
|
return False, "工作流已存在"
|
||||||
|
|
||||||
|
def get(self, wid: int) -> Workflow:
|
||||||
|
"""
|
||||||
|
查询单个工作流
|
||||||
|
"""
|
||||||
|
return Workflow.get(self._db, wid)
|
||||||
|
|
||||||
|
def list_enabled(self) -> List[Workflow]:
|
||||||
|
"""
|
||||||
|
获取启用的工作流列表
|
||||||
|
"""
|
||||||
|
return Workflow.get_enabled_workflows(self._db)
|
||||||
|
|
||||||
|
def get_by_name(self, name: str) -> Workflow:
|
||||||
|
"""
|
||||||
|
按名称获取工作流
|
||||||
|
"""
|
||||||
|
return Workflow.get_by_name(self._db, name)
|
||||||
|
|
||||||
|
def start(self, wid: int) -> bool:
|
||||||
|
"""
|
||||||
|
启动
|
||||||
|
"""
|
||||||
|
return Workflow.start(self._db, wid)
|
||||||
|
|
||||||
|
def success(self, wid: int, result: str = None) -> bool:
|
||||||
|
"""
|
||||||
|
成功
|
||||||
|
"""
|
||||||
|
return Workflow.success(self._db, wid, result)
|
||||||
|
|
||||||
|
def fail(self, wid: int, result: str) -> bool:
|
||||||
|
"""
|
||||||
|
失败
|
||||||
|
"""
|
||||||
|
return Workflow.fail(self._db, wid, result)
|
||||||
|
|
||||||
|
def step(self, wid: int, action_id: str, context: dict) -> bool:
|
||||||
|
"""
|
||||||
|
步进
|
||||||
|
"""
|
||||||
|
return Workflow.update_current_action(self._db, wid, action_id, context)
|
||||||
|
|
||||||
|
def reset(self, wid: int) -> bool:
|
||||||
|
"""
|
||||||
|
重置
|
||||||
|
"""
|
||||||
|
return Workflow.reset(self._db, wid)
|
||||||
@@ -23,6 +23,7 @@ class ModuleHelper:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
submodules: list = []
|
submodules: list = []
|
||||||
|
loaded_modules = set()
|
||||||
packages = importlib.import_module(package_path)
|
packages = importlib.import_module(package_path)
|
||||||
for importer, package_name, _ in pkgutil.iter_modules(packages.__path__):
|
for importer, package_name, _ in pkgutil.iter_modules(packages.__path__):
|
||||||
try:
|
try:
|
||||||
@@ -35,6 +36,9 @@ class ModuleHelper:
|
|||||||
if name.startswith('_'):
|
if name.startswith('_'):
|
||||||
continue
|
continue
|
||||||
if isinstance(obj, type) and filter_func(name, obj):
|
if isinstance(obj, type) and filter_func(name, obj):
|
||||||
|
if name in loaded_modules:
|
||||||
|
continue
|
||||||
|
loaded_modules.add(name)
|
||||||
submodules.append(obj)
|
submodules.append(obj)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
logger.debug(f'加载模块 {package_name} 失败:{str(err)} - {traceback.format_exc()}')
|
logger.debug(f'加载模块 {package_name} 失败:{str(err)} - {traceback.format_exc()}')
|
||||||
|
|||||||
@@ -63,6 +63,7 @@ class PluginHelper(metaclass=Singleton):
|
|||||||
return json.loads(res.text)
|
return json.loads(res.text)
|
||||||
except json.JSONDecodeError:
|
except json.JSONDecodeError:
|
||||||
logger.error(f"插件包数据解析失败:{res.text}")
|
logger.error(f"插件包数据解析失败:{res.text}")
|
||||||
|
return None
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
def get_plugin_package_version(self, pid: str, repo_url: str, package_version: str = None) -> Optional[str]:
|
def get_plugin_package_version(self, pid: str, repo_url: str, package_version: str = None) -> Optional[str]:
|
||||||
|
|||||||
@@ -225,27 +225,27 @@ class RssHelper:
|
|||||||
}
|
}
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def parse(url, proxy: bool = False, timeout: int = 15, headers: dict = None) -> Union[List[dict], None]:
|
def parse(url, proxy: bool = False, timeout: int = 15, headers: dict = None) -> Union[List[dict], None, bool]:
|
||||||
"""
|
"""
|
||||||
解析RSS订阅URL,获取RSS中的种子信息
|
解析RSS订阅URL,获取RSS中的种子信息
|
||||||
:param url: RSS地址
|
:param url: RSS地址
|
||||||
:param proxy: 是否使用代理
|
:param proxy: 是否使用代理
|
||||||
:param timeout: 请求超时
|
:param timeout: 请求超时
|
||||||
:param headers: 自定义请求头
|
:param headers: 自定义请求头
|
||||||
:return: 种子信息列表,如为None代表Rss过期
|
:return: 种子信息列表,如为None代表Rss过期,如果为False则为错误
|
||||||
"""
|
"""
|
||||||
# 开始处理
|
# 开始处理
|
||||||
ret_array: list = []
|
ret_array: list = []
|
||||||
if not url:
|
if not url:
|
||||||
return []
|
return False
|
||||||
try:
|
try:
|
||||||
ret = RequestUtils(proxies=settings.PROXY if proxy else None,
|
ret = RequestUtils(proxies=settings.PROXY if proxy else None,
|
||||||
timeout=timeout, headers=headers).get_res(url)
|
timeout=timeout, headers=headers).get_res(url)
|
||||||
if not ret:
|
if not ret:
|
||||||
return []
|
return False
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
logger.error(f"获取RSS失败:{str(err)} - {traceback.format_exc()}")
|
logger.error(f"获取RSS失败:{str(err)} - {traceback.format_exc()}")
|
||||||
return []
|
return False
|
||||||
if ret:
|
if ret:
|
||||||
ret_xml = ""
|
ret_xml = ""
|
||||||
try:
|
try:
|
||||||
@@ -322,6 +322,7 @@ class RssHelper:
|
|||||||
]
|
]
|
||||||
if ret_xml in _rss_expired_msg:
|
if ret_xml in _rss_expired_msg:
|
||||||
return None
|
return None
|
||||||
|
return False
|
||||||
return ret_array
|
return ret_array
|
||||||
|
|
||||||
def get_rss_link(self, url: str, cookie: str, ua: str, proxy: bool = False) -> Tuple[str, str]:
|
def get_rss_link(self, url: str, cookie: str, ua: str, proxy: bool = False) -> Tuple[str, str]:
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ class RuleHelper:
|
|||||||
return group
|
return group
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def get_rule_group_by_media(self, media: MediaInfo, group_names: list = None) -> List[FilterRuleGroup]:
|
def get_rule_group_by_media(self, media: MediaInfo = None, group_names: list = None) -> List[FilterRuleGroup]:
|
||||||
"""
|
"""
|
||||||
根据媒体信息获取规则组
|
根据媒体信息获取规则组
|
||||||
"""
|
"""
|
||||||
@@ -44,9 +44,9 @@ class RuleHelper:
|
|||||||
for group in rule_groups:
|
for group in rule_groups:
|
||||||
if not group.media_type:
|
if not group.media_type:
|
||||||
ret_groups.append(group)
|
ret_groups.append(group)
|
||||||
elif not group.category and group.media_type == media.type.value:
|
elif media and not group.category and group.media_type == media.type.value:
|
||||||
ret_groups.append(group)
|
ret_groups.append(group)
|
||||||
elif group.category == media.category:
|
elif media and group.category == media.category:
|
||||||
ret_groups.append(group)
|
ret_groups.append(group)
|
||||||
return ret_groups
|
return ret_groups
|
||||||
|
|
||||||
|
|||||||
@@ -445,6 +445,27 @@ class TorrentHelper(metaclass=Singleton):
|
|||||||
logger.info(f"{torrent_info.title} 不匹配特效规则 {effect}")
|
logger.info(f"{torrent_info.title} 不匹配特效规则 {effect}")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
# 大小
|
||||||
|
size_range = filter_params.get("size")
|
||||||
|
if size_range:
|
||||||
|
if size_range.find("-") != -1:
|
||||||
|
# 区间
|
||||||
|
size_min, size_max = size_range.split("-")
|
||||||
|
size_min = float(size_min.strip()) * 1024 * 1024
|
||||||
|
size_max = float(size_max.strip()) * 1024 * 1024
|
||||||
|
if torrent_info.size < size_min or torrent_info.size > size_max:
|
||||||
|
return False
|
||||||
|
elif size_range.startswith(">"):
|
||||||
|
# 大于
|
||||||
|
size_min = float(size_range[1:].strip()) * 1024 * 1024
|
||||||
|
if torrent_info.size < size_min:
|
||||||
|
return False
|
||||||
|
elif size_range.startswith("<"):
|
||||||
|
# 小于
|
||||||
|
size_max = float(size_range[1:].strip()) * 1024 * 1024
|
||||||
|
if torrent_info.size > size_max:
|
||||||
|
return False
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
|||||||
@@ -676,11 +676,15 @@ class FileManagerModule(_ModuleBase):
|
|||||||
".zh-tw": ".繁体中文"
|
".zh-tw": ".繁体中文"
|
||||||
}
|
}
|
||||||
new_sub_tag_list = [
|
new_sub_tag_list = [
|
||||||
new_file_type if t == 0 else "%s%s(%s)" % (new_file_type,
|
(".default" + new_file_type if (
|
||||||
new_sub_tag_dict.get(
|
(settings.DEFAULT_SUB == "zh-cn" and new_file_type == ".chi.zh-cn") or
|
||||||
new_file_type, ""
|
(settings.DEFAULT_SUB == "zh-tw" and new_file_type == ".zh-tw") or
|
||||||
),
|
(settings.DEFAULT_SUB == "eng" and new_file_type == ".eng")
|
||||||
t) for t in range(6)
|
) else new_file_type) if t == 0 else "%s%s(%s)" % (new_file_type,
|
||||||
|
new_sub_tag_dict.get(
|
||||||
|
new_file_type, ""
|
||||||
|
),
|
||||||
|
t) for t in range(6)
|
||||||
]
|
]
|
||||||
for new_sub_tag in new_sub_tag_list:
|
for new_sub_tag in new_sub_tag_list:
|
||||||
new_file: Path = target_file.with_name(target_file.stem + new_sub_tag + file_ext)
|
new_file: Path = target_file.with_name(target_file.stem + new_sub_tag + file_ext)
|
||||||
|
|||||||
@@ -67,14 +67,14 @@ class Alist(StorageBase, metaclass=Singleton):
|
|||||||
return self.__generate_token
|
return self.__generate_token
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@cached(maxsize=1, ttl=60 * 60 * 24 * 2 - 60 * 5)
|
@cached(maxsize=1, ttl=60 * 60 * 24 * 2 - 60 * 5, skip_empty=True)
|
||||||
def __generate_token(self) -> str:
|
def __generate_token(self) -> str:
|
||||||
"""
|
"""
|
||||||
如果设置永久令牌则返回永久令牌,否则使用账号密码生成一个临时 token
|
如果设置永久令牌则返回永久令牌,否则使用账号密码生成一个临时 token
|
||||||
缓存2天,提前5分钟更新
|
缓存2天,提前5分钟更新
|
||||||
"""
|
"""
|
||||||
conf = self.get_conf()
|
conf = self.get_conf()
|
||||||
token = conf.get("token")
|
token = conf.get("token")
|
||||||
if token:
|
if token:
|
||||||
return str(token)
|
return str(token)
|
||||||
resp: Response = RequestUtils(headers={
|
resp: Response = RequestUtils(headers={
|
||||||
|
|||||||
@@ -1,17 +1,14 @@
|
|||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from typing import List, Optional, Tuple, Union
|
from typing import List, Optional, Tuple, Union
|
||||||
|
|
||||||
from ruamel.yaml import CommentedMap
|
|
||||||
|
|
||||||
from app.core.config import settings
|
from app.core.config import settings
|
||||||
from app.core.context import TorrentInfo
|
from app.core.context import TorrentInfo
|
||||||
from app.db.site_oper import SiteOper
|
from app.db.site_oper import SiteOper
|
||||||
from app.helper.module import ModuleHelper
|
from app.helper.module import ModuleHelper
|
||||||
from app.helper.sites import SitesHelper
|
from app.helper.sites import SitesHelper, SiteSpider
|
||||||
from app.log import logger
|
from app.log import logger
|
||||||
from app.modules import _ModuleBase
|
from app.modules import _ModuleBase
|
||||||
from app.modules.indexer.parser import SiteParserBase
|
from app.modules.indexer.parser import SiteParserBase
|
||||||
from app.modules.indexer.spider import TorrentSpider
|
|
||||||
from app.modules.indexer.spider.haidan import HaiDanSpider
|
from app.modules.indexer.spider.haidan import HaiDanSpider
|
||||||
from app.modules.indexer.spider.mtorrent import MTorrentSpider
|
from app.modules.indexer.spider.mtorrent import MTorrentSpider
|
||||||
from app.modules.indexer.spider.tnode import TNodeSpider
|
from app.modules.indexer.spider.tnode import TNodeSpider
|
||||||
@@ -76,15 +73,17 @@ class IndexerModule(_ModuleBase):
|
|||||||
def init_setting(self) -> Tuple[str, Union[str, bool]]:
|
def init_setting(self) -> Tuple[str, Union[str, bool]]:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def search_torrents(self, site: CommentedMap,
|
def search_torrents(self, site: dict,
|
||||||
keywords: List[str] = None,
|
keywords: List[str] = None,
|
||||||
mtype: MediaType = None,
|
mtype: MediaType = None,
|
||||||
|
cat: str = None,
|
||||||
page: int = 0) -> List[TorrentInfo]:
|
page: int = 0) -> List[TorrentInfo]:
|
||||||
"""
|
"""
|
||||||
搜索一个站点
|
搜索一个站点
|
||||||
:param site: 站点
|
:param site: 站点
|
||||||
:param keywords: 搜索关键词列表
|
:param keywords: 搜索关键词列表
|
||||||
:param mtype: 媒体类型
|
:param mtype: 媒体类型
|
||||||
|
:param cat: 分类
|
||||||
:param page: 页码
|
:param page: 页码
|
||||||
:return: 资源列表
|
:return: 资源列表
|
||||||
"""
|
"""
|
||||||
@@ -159,6 +158,7 @@ class IndexerModule(_ModuleBase):
|
|||||||
search_word=search_word,
|
search_word=search_word,
|
||||||
indexer=site,
|
indexer=site,
|
||||||
mtype=mtype,
|
mtype=mtype,
|
||||||
|
cat=cat,
|
||||||
page=page
|
page=page
|
||||||
)
|
)
|
||||||
if error_flag:
|
if error_flag:
|
||||||
@@ -204,35 +204,42 @@ class IndexerModule(_ModuleBase):
|
|||||||
return __remove_duplicate(torrents)
|
return __remove_duplicate(torrents)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def __spider_search(indexer: CommentedMap,
|
def __spider_search(indexer: dict,
|
||||||
search_word: str = None,
|
search_word: str = None,
|
||||||
mtype: MediaType = None,
|
mtype: MediaType = None,
|
||||||
|
cat: str = None,
|
||||||
page: int = 0) -> Tuple[bool, List[dict]]:
|
page: int = 0) -> Tuple[bool, List[dict]]:
|
||||||
"""
|
"""
|
||||||
根据关键字搜索单个站点
|
根据关键字搜索单个站点
|
||||||
:param: indexer: 站点配置
|
:param: indexer: 站点配置
|
||||||
:param: search_word: 关键字
|
:param: search_word: 关键字
|
||||||
|
:param: cat: 分类
|
||||||
:param: page: 页码
|
:param: page: 页码
|
||||||
:param: mtype: 媒体类型
|
:param: mtype: 媒体类型
|
||||||
:param: timeout: 超时时间
|
:param: timeout: 超时时间
|
||||||
:return: 是否发生错误, 种子列表
|
:return: 是否发生错误, 种子列表
|
||||||
"""
|
"""
|
||||||
_spider = TorrentSpider(indexer=indexer,
|
_spider = SiteSpider(indexer=indexer,
|
||||||
mtype=mtype,
|
keyword=search_word,
|
||||||
keyword=search_word,
|
mtype=mtype,
|
||||||
page=page)
|
cat=cat,
|
||||||
|
page=page)
|
||||||
|
|
||||||
return _spider.is_error, _spider.get_torrents()
|
return _spider.is_error, _spider.get_torrents()
|
||||||
|
|
||||||
def refresh_torrents(self, site: CommentedMap) -> Optional[List[TorrentInfo]]:
|
def refresh_torrents(self, site: dict,
|
||||||
|
keyword: str = None, cat: str = None, page: int = 0) -> Optional[List[TorrentInfo]]:
|
||||||
"""
|
"""
|
||||||
获取站点最新一页的种子,多个站点需要多线程处理
|
获取站点最新一页的种子,多个站点需要多线程处理
|
||||||
:param site: 站点
|
:param site: 站点
|
||||||
|
:param keyword: 关键字
|
||||||
|
:param cat: 分类
|
||||||
|
:param page: 页码
|
||||||
:reutrn: 种子资源列表
|
:reutrn: 种子资源列表
|
||||||
"""
|
"""
|
||||||
return self.search_torrents(site=site)
|
return self.search_torrents(site=site, keywords=[keyword], cat=cat, page=page)
|
||||||
|
|
||||||
def refresh_userdata(self, site: CommentedMap) -> Optional[SiteUserData]:
|
def refresh_userdata(self, site: dict) -> Optional[SiteUserData]:
|
||||||
"""
|
"""
|
||||||
刷新站点的用户数据
|
刷新站点的用户数据
|
||||||
:param site: 站点
|
:param site: 站点
|
||||||
|
|||||||
@@ -207,13 +207,14 @@ class NexusPhpSiteUserInfo(SiteParserBase):
|
|||||||
|
|
||||||
# 是否存在下页数据
|
# 是否存在下页数据
|
||||||
next_page = None
|
next_page = None
|
||||||
next_page_text = html.xpath('//a[contains(.//text(), "下一页") or contains(.//text(), "下一頁") or contains(.//text(), ">")]/@href')
|
next_page_text = html.xpath(
|
||||||
|
'//a[contains(.//text(), "下一页") or contains(.//text(), "下一頁") or contains(.//text(), ">")]/@href')
|
||||||
#防止识别到详情页
|
|
||||||
|
# 防止识别到详情页
|
||||||
while next_page_text:
|
while next_page_text:
|
||||||
next_page = next_page_text.pop().strip()
|
next_page = next_page_text.pop().strip()
|
||||||
if not next_page.startswith('details.php'):
|
if not next_page.startswith('details.php'):
|
||||||
break;
|
break
|
||||||
next_page = None
|
next_page = None
|
||||||
|
|
||||||
# fix up page url
|
# fix up page url
|
||||||
|
|||||||
@@ -1,742 +0,0 @@
|
|||||||
import copy
|
|
||||||
import datetime
|
|
||||||
import re
|
|
||||||
import traceback
|
|
||||||
from typing import List
|
|
||||||
from urllib.parse import quote, urlencode, urlparse, parse_qs
|
|
||||||
|
|
||||||
from jinja2 import Template
|
|
||||||
from pyquery import PyQuery
|
|
||||||
from ruamel.yaml import CommentedMap
|
|
||||||
|
|
||||||
from app.core.config import settings
|
|
||||||
from app.helper.browser import PlaywrightHelper
|
|
||||||
from app.log import logger
|
|
||||||
from app.schemas.types import MediaType
|
|
||||||
from app.utils.http import RequestUtils
|
|
||||||
from app.utils.string import StringUtils
|
|
||||||
|
|
||||||
|
|
||||||
class TorrentSpider:
|
|
||||||
# 是否出现错误
|
|
||||||
is_error: bool = False
|
|
||||||
# 索引器ID
|
|
||||||
indexerid: int = None
|
|
||||||
# 索引器名称
|
|
||||||
indexername: str = None
|
|
||||||
# 站点域名
|
|
||||||
domain: str = None
|
|
||||||
# 站点Cookie
|
|
||||||
cookie: str = None
|
|
||||||
# 站点UA
|
|
||||||
ua: str = None
|
|
||||||
# Requests 代理
|
|
||||||
proxies: dict = None
|
|
||||||
# playwright 代理
|
|
||||||
proxy_server: dict = None
|
|
||||||
# 是否渲染
|
|
||||||
render: bool = False
|
|
||||||
# Referer
|
|
||||||
referer: str = None
|
|
||||||
# 搜索关键字
|
|
||||||
keyword: str = None
|
|
||||||
# 媒体类型
|
|
||||||
mtype: MediaType = None
|
|
||||||
# 搜索路径、方式配置
|
|
||||||
search: dict = {}
|
|
||||||
# 批量搜索配置
|
|
||||||
batch: dict = {}
|
|
||||||
# 浏览配置
|
|
||||||
browse: dict = {}
|
|
||||||
# 站点分类配置
|
|
||||||
category: dict = {}
|
|
||||||
# 站点种子列表配置
|
|
||||||
list: dict = {}
|
|
||||||
# 站点种子字段配置
|
|
||||||
fields: dict = {}
|
|
||||||
# 页码
|
|
||||||
page: int = 0
|
|
||||||
# 搜索条数, 默认: 100条
|
|
||||||
result_num: int = 100
|
|
||||||
# 单个种子信息
|
|
||||||
torrents_info: dict = {}
|
|
||||||
# 种子列表
|
|
||||||
torrents_info_array: list = []
|
|
||||||
# 搜索超时, 默认: 15秒
|
|
||||||
_timeout = 15
|
|
||||||
|
|
||||||
def __init__(self,
|
|
||||||
indexer: CommentedMap,
|
|
||||||
keyword: [str, list] = None,
|
|
||||||
page: int = 0,
|
|
||||||
referer: str = None,
|
|
||||||
mtype: MediaType = None):
|
|
||||||
"""
|
|
||||||
设置查询参数
|
|
||||||
:param indexer: 索引器
|
|
||||||
:param keyword: 搜索关键字,如果数组则为批量搜索
|
|
||||||
:param page: 页码
|
|
||||||
:param referer: Referer
|
|
||||||
:param mtype: 媒体类型
|
|
||||||
"""
|
|
||||||
if not indexer:
|
|
||||||
return
|
|
||||||
self.keyword = keyword
|
|
||||||
self.mtype = mtype
|
|
||||||
self.indexerid = indexer.get('id')
|
|
||||||
self.indexername = indexer.get('name')
|
|
||||||
self.search = indexer.get('search')
|
|
||||||
self.batch = indexer.get('batch')
|
|
||||||
self.browse = indexer.get('browse')
|
|
||||||
self.category = indexer.get('category')
|
|
||||||
self.list = indexer.get('torrents').get('list', {})
|
|
||||||
self.fields = indexer.get('torrents').get('fields')
|
|
||||||
self.render = indexer.get('render')
|
|
||||||
self.domain = indexer.get('domain')
|
|
||||||
self.result_num = int(indexer.get('result_num') or 100)
|
|
||||||
self._timeout = int(indexer.get('timeout') or 15)
|
|
||||||
self.page = page
|
|
||||||
if self.domain and not str(self.domain).endswith("/"):
|
|
||||||
self.domain = self.domain + "/"
|
|
||||||
if indexer.get('ua'):
|
|
||||||
self.ua = indexer.get('ua') or settings.USER_AGENT
|
|
||||||
else:
|
|
||||||
self.ua = settings.USER_AGENT
|
|
||||||
if indexer.get('proxy'):
|
|
||||||
self.proxies = settings.PROXY
|
|
||||||
self.proxy_server = settings.PROXY_SERVER
|
|
||||||
if indexer.get('cookie'):
|
|
||||||
self.cookie = indexer.get('cookie')
|
|
||||||
if referer:
|
|
||||||
self.referer = referer
|
|
||||||
self.torrents_info_array = []
|
|
||||||
|
|
||||||
def get_torrents(self) -> List[dict]:
|
|
||||||
"""
|
|
||||||
开始请求
|
|
||||||
"""
|
|
||||||
if not self.search or not self.domain:
|
|
||||||
return []
|
|
||||||
|
|
||||||
# 种子搜索相对路径
|
|
||||||
paths = self.search.get('paths', [])
|
|
||||||
torrentspath = ""
|
|
||||||
if len(paths) == 1:
|
|
||||||
torrentspath = paths[0].get('path', '')
|
|
||||||
else:
|
|
||||||
for path in paths:
|
|
||||||
if path.get("type") == "all" and not self.mtype:
|
|
||||||
torrentspath = path.get('path')
|
|
||||||
break
|
|
||||||
elif path.get("type") == "movie" and self.mtype == MediaType.MOVIE:
|
|
||||||
torrentspath = path.get('path')
|
|
||||||
break
|
|
||||||
elif path.get("type") == "tv" and self.mtype == MediaType.TV:
|
|
||||||
torrentspath = path.get('path')
|
|
||||||
break
|
|
||||||
|
|
||||||
# 精确搜索
|
|
||||||
if self.keyword:
|
|
||||||
|
|
||||||
if isinstance(self.keyword, list):
|
|
||||||
# 批量查询
|
|
||||||
if self.batch:
|
|
||||||
delimiter = self.batch.get('delimiter') or ' '
|
|
||||||
space_replace = self.batch.get('space_replace') or ' '
|
|
||||||
search_word = delimiter.join([str(k).replace(' ',
|
|
||||||
space_replace) for k in self.keyword])
|
|
||||||
else:
|
|
||||||
search_word = " ".join(self.keyword)
|
|
||||||
# 查询模式:或
|
|
||||||
search_mode = "1"
|
|
||||||
else:
|
|
||||||
# 单个查询
|
|
||||||
search_word = self.keyword
|
|
||||||
# 查询模式与
|
|
||||||
search_mode = "0"
|
|
||||||
|
|
||||||
# 搜索URL
|
|
||||||
indexer_params = self.search.get("params", {}).copy()
|
|
||||||
if indexer_params:
|
|
||||||
search_area = indexer_params.get('search_area')
|
|
||||||
# search_area非0表示支持imdbid搜索
|
|
||||||
if (search_area and
|
|
||||||
(not self.keyword or not self.keyword.startswith('tt'))):
|
|
||||||
# 支持imdbid搜索,但关键字不是imdbid时,不启用imdbid搜索
|
|
||||||
indexer_params.pop('search_area')
|
|
||||||
# 变量字典
|
|
||||||
inputs_dict = {
|
|
||||||
"keyword": search_word
|
|
||||||
}
|
|
||||||
# 查询参数,默认查询标题
|
|
||||||
params = {
|
|
||||||
"search_mode": search_mode,
|
|
||||||
"search_area": 0,
|
|
||||||
"page": self.page or 0,
|
|
||||||
"notnewword": 1
|
|
||||||
}
|
|
||||||
# 额外参数
|
|
||||||
for key, value in indexer_params.items():
|
|
||||||
params.update({
|
|
||||||
"%s" % key: str(value).format(**inputs_dict)
|
|
||||||
})
|
|
||||||
# 分类条件
|
|
||||||
if self.category:
|
|
||||||
if self.mtype == MediaType.TV:
|
|
||||||
cats = self.category.get("tv") or []
|
|
||||||
elif self.mtype == MediaType.MOVIE:
|
|
||||||
cats = self.category.get("movie") or []
|
|
||||||
else:
|
|
||||||
cats = (self.category.get("movie") or []) + (self.category.get("tv") or [])
|
|
||||||
for cat in cats:
|
|
||||||
if self.category.get("field"):
|
|
||||||
value = params.get(self.category.get("field"), "")
|
|
||||||
params.update({
|
|
||||||
"%s" % self.category.get("field"): value + self.category.get("delimiter",
|
|
||||||
' ') + cat.get("id")
|
|
||||||
})
|
|
||||||
else:
|
|
||||||
params.update({
|
|
||||||
"cat%s" % cat.get("id"): 1
|
|
||||||
})
|
|
||||||
searchurl = self.domain + torrentspath + "?" + urlencode(params)
|
|
||||||
else:
|
|
||||||
# 变量字典
|
|
||||||
inputs_dict = {
|
|
||||||
"keyword": quote(search_word),
|
|
||||||
"page": self.page or 0
|
|
||||||
}
|
|
||||||
# 无额外参数
|
|
||||||
searchurl = self.domain + str(torrentspath).format(**inputs_dict)
|
|
||||||
|
|
||||||
# 列表浏览
|
|
||||||
else:
|
|
||||||
# 变量字典
|
|
||||||
inputs_dict = {
|
|
||||||
"page": self.page or 0,
|
|
||||||
"keyword": ""
|
|
||||||
}
|
|
||||||
# 有单独浏览路径
|
|
||||||
if self.browse:
|
|
||||||
torrentspath = self.browse.get("path")
|
|
||||||
if self.browse.get("start"):
|
|
||||||
start_page = int(self.browse.get("start")) + int(self.page or 0)
|
|
||||||
inputs_dict.update({
|
|
||||||
"page": start_page
|
|
||||||
})
|
|
||||||
elif self.page:
|
|
||||||
torrentspath = torrentspath + f"?page={self.page}"
|
|
||||||
# 搜索Url
|
|
||||||
searchurl = self.domain + str(torrentspath).format(**inputs_dict)
|
|
||||||
|
|
||||||
logger.info(f"开始请求:{searchurl}")
|
|
||||||
|
|
||||||
if self.render:
|
|
||||||
# 浏览器仿真
|
|
||||||
page_source = PlaywrightHelper().get_page_source(
|
|
||||||
url=searchurl,
|
|
||||||
cookies=self.cookie,
|
|
||||||
ua=self.ua,
|
|
||||||
proxies=self.proxy_server,
|
|
||||||
timeout=self._timeout
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# requests请求
|
|
||||||
ret = RequestUtils(
|
|
||||||
ua=self.ua,
|
|
||||||
cookies=self.cookie,
|
|
||||||
timeout=self._timeout,
|
|
||||||
referer=self.referer,
|
|
||||||
proxies=self.proxies
|
|
||||||
).get_res(searchurl, allow_redirects=True)
|
|
||||||
page_source = RequestUtils.get_decoded_html_content(ret,
|
|
||||||
settings.ENCODING_DETECTION_PERFORMANCE_MODE,
|
|
||||||
settings.ENCODING_DETECTION_MIN_CONFIDENCE)
|
|
||||||
|
|
||||||
# 解析
|
|
||||||
return self.parse(page_source)
|
|
||||||
|
|
||||||
def __get_title(self, torrent):
|
|
||||||
# title default text
|
|
||||||
if 'title' not in self.fields:
|
|
||||||
return
|
|
||||||
selector = self.fields.get('title', {})
|
|
||||||
if 'selector' in selector:
|
|
||||||
title = torrent(selector.get('selector', '')).clone()
|
|
||||||
self.__remove(title, selector)
|
|
||||||
items = self.__attribute_or_text(title, selector)
|
|
||||||
self.torrents_info['title'] = self.__index(items, selector)
|
|
||||||
elif 'text' in selector:
|
|
||||||
render_dict = {}
|
|
||||||
if "title_default" in self.fields:
|
|
||||||
title_default_selector = self.fields.get('title_default', {})
|
|
||||||
title_default_item = torrent(title_default_selector.get('selector', '')).clone()
|
|
||||||
self.__remove(title_default_item, title_default_selector)
|
|
||||||
items = self.__attribute_or_text(title_default_item, selector)
|
|
||||||
title_default = self.__index(items, title_default_selector)
|
|
||||||
render_dict.update({'title_default': title_default})
|
|
||||||
if "title_optional" in self.fields:
|
|
||||||
title_optional_selector = self.fields.get('title_optional', {})
|
|
||||||
title_optional_item = torrent(title_optional_selector.get('selector', '')).clone()
|
|
||||||
self.__remove(title_optional_item, title_optional_selector)
|
|
||||||
items = self.__attribute_or_text(title_optional_item, title_optional_selector)
|
|
||||||
title_optional = self.__index(items, title_optional_selector)
|
|
||||||
render_dict.update({'title_optional': title_optional})
|
|
||||||
self.torrents_info['title'] = Template(selector.get('text')).render(fields=render_dict)
|
|
||||||
self.torrents_info['title'] = self.__filter_text(self.torrents_info.get('title'),
|
|
||||||
selector.get('filters'))
|
|
||||||
|
|
||||||
def __get_description(self, torrent):
|
|
||||||
# title optional text
|
|
||||||
if 'description' not in self.fields:
|
|
||||||
return
|
|
||||||
selector = self.fields.get('description', {})
|
|
||||||
if "selector" in selector \
|
|
||||||
or "selectors" in selector:
|
|
||||||
description = torrent(selector.get('selector', selector.get('selectors', ''))).clone()
|
|
||||||
if description:
|
|
||||||
self.__remove(description, selector)
|
|
||||||
items = self.__attribute_or_text(description, selector)
|
|
||||||
self.torrents_info['description'] = self.__index(items, selector)
|
|
||||||
elif "text" in selector:
|
|
||||||
render_dict = {}
|
|
||||||
if "tags" in self.fields:
|
|
||||||
tags_selector = self.fields.get('tags', {})
|
|
||||||
tags_item = torrent(tags_selector.get('selector', '')).clone()
|
|
||||||
self.__remove(tags_item, tags_selector)
|
|
||||||
items = self.__attribute_or_text(tags_item, tags_selector)
|
|
||||||
tag = self.__index(items, tags_selector)
|
|
||||||
render_dict.update({'tags': tag})
|
|
||||||
if "subject" in self.fields:
|
|
||||||
subject_selector = self.fields.get('subject', {})
|
|
||||||
subject_item = torrent(subject_selector.get('selector', '')).clone()
|
|
||||||
self.__remove(subject_item, subject_selector)
|
|
||||||
items = self.__attribute_or_text(subject_item, subject_selector)
|
|
||||||
subject = self.__index(items, subject_selector)
|
|
||||||
render_dict.update({'subject': subject})
|
|
||||||
if "description_free_forever" in self.fields:
|
|
||||||
description_free_forever_selector = self.fields.get("description_free_forever", {})
|
|
||||||
description_free_forever_item = torrent(description_free_forever_selector.get("selector", '')).clone()
|
|
||||||
self.__remove(description_free_forever_item, description_free_forever_selector)
|
|
||||||
items = self.__attribute_or_text(description_free_forever_item, description_free_forever_selector)
|
|
||||||
description_free_forever = self.__index(items, description_free_forever_selector)
|
|
||||||
render_dict.update({"description_free_forever": description_free_forever})
|
|
||||||
if "description_normal" in self.fields:
|
|
||||||
description_normal_selector = self.fields.get("description_normal", {})
|
|
||||||
description_normal_item = torrent(description_normal_selector.get("selector", '')).clone()
|
|
||||||
self.__remove(description_normal_item, description_normal_selector)
|
|
||||||
items = self.__attribute_or_text(description_normal_item, description_normal_selector)
|
|
||||||
description_normal = self.__index(items, description_normal_selector)
|
|
||||||
render_dict.update({"description_normal": description_normal})
|
|
||||||
self.torrents_info['description'] = Template(selector.get('text')).render(fields=render_dict)
|
|
||||||
self.torrents_info['description'] = self.__filter_text(self.torrents_info.get('description'),
|
|
||||||
selector.get('filters'))
|
|
||||||
|
|
||||||
def __get_detail(self, torrent):
|
|
||||||
# details page text
|
|
||||||
if 'details' not in self.fields:
|
|
||||||
return
|
|
||||||
selector = self.fields.get('details', {})
|
|
||||||
details = torrent(selector.get('selector', '')).clone()
|
|
||||||
self.__remove(details, selector)
|
|
||||||
items = self.__attribute_or_text(details, selector)
|
|
||||||
item = self.__index(items, selector)
|
|
||||||
detail_link = self.__filter_text(item, selector.get('filters'))
|
|
||||||
if detail_link:
|
|
||||||
if not detail_link.startswith("http"):
|
|
||||||
if detail_link.startswith("//"):
|
|
||||||
self.torrents_info['page_url'] = self.domain.split(":")[0] + ":" + detail_link
|
|
||||||
elif detail_link.startswith("/"):
|
|
||||||
self.torrents_info['page_url'] = self.domain + detail_link[1:]
|
|
||||||
else:
|
|
||||||
self.torrents_info['page_url'] = self.domain + detail_link
|
|
||||||
else:
|
|
||||||
self.torrents_info['page_url'] = detail_link
|
|
||||||
|
|
||||||
def __get_download(self, torrent):
|
|
||||||
# download link text
|
|
||||||
if 'download' not in self.fields:
|
|
||||||
return
|
|
||||||
selector = self.fields.get('download', {})
|
|
||||||
download = torrent(selector.get('selector', '')).clone()
|
|
||||||
self.__remove(download, selector)
|
|
||||||
items = self.__attribute_or_text(download, selector)
|
|
||||||
item = self.__index(items, selector)
|
|
||||||
download_link = self.__filter_text(item, selector.get('filters'))
|
|
||||||
if download_link:
|
|
||||||
if not download_link.startswith("http") \
|
|
||||||
and not download_link.startswith("magnet"):
|
|
||||||
_scheme, _domain = StringUtils.get_url_netloc(self.domain)
|
|
||||||
if _domain in download_link:
|
|
||||||
if download_link.startswith("/"):
|
|
||||||
self.torrents_info['enclosure'] = f"{_scheme}:{download_link}"
|
|
||||||
else:
|
|
||||||
self.torrents_info['enclosure'] = f"{_scheme}://{download_link}"
|
|
||||||
else:
|
|
||||||
if download_link.startswith("/"):
|
|
||||||
self.torrents_info['enclosure'] = f"{self.domain}{download_link[1:]}"
|
|
||||||
else:
|
|
||||||
self.torrents_info['enclosure'] = f"{self.domain}{download_link}"
|
|
||||||
else:
|
|
||||||
self.torrents_info['enclosure'] = download_link
|
|
||||||
|
|
||||||
def __get_imdbid(self, torrent):
|
|
||||||
# imdbid
|
|
||||||
if "imdbid" not in self.fields:
|
|
||||||
return
|
|
||||||
selector = self.fields.get('imdbid', {})
|
|
||||||
imdbid = torrent(selector.get('selector', '')).clone()
|
|
||||||
self.__remove(imdbid, selector)
|
|
||||||
items = self.__attribute_or_text(imdbid, selector)
|
|
||||||
item = self.__index(items, selector)
|
|
||||||
self.torrents_info['imdbid'] = item
|
|
||||||
self.torrents_info['imdbid'] = self.__filter_text(self.torrents_info.get('imdbid'),
|
|
||||||
selector.get('filters'))
|
|
||||||
|
|
||||||
def __get_size(self, torrent):
|
|
||||||
# torrent size int
|
|
||||||
if 'size' not in self.fields:
|
|
||||||
return
|
|
||||||
selector = self.fields.get('size', {})
|
|
||||||
size = torrent(selector.get('selector', selector.get("selectors", ''))).clone()
|
|
||||||
self.__remove(size, selector)
|
|
||||||
items = self.__attribute_or_text(size, selector)
|
|
||||||
item = self.__index(items, selector)
|
|
||||||
if item:
|
|
||||||
size_val = item.replace("\n", "").strip()
|
|
||||||
size_val = self.__filter_text(size_val,
|
|
||||||
selector.get('filters'))
|
|
||||||
self.torrents_info['size'] = StringUtils.num_filesize(size_val)
|
|
||||||
else:
|
|
||||||
self.torrents_info['size'] = 0
|
|
||||||
|
|
||||||
def __get_leechers(self, torrent):
|
|
||||||
# torrent leechers int
|
|
||||||
if 'leechers' not in self.fields:
|
|
||||||
return
|
|
||||||
selector = self.fields.get('leechers', {})
|
|
||||||
leechers = torrent(selector.get('selector', '')).clone()
|
|
||||||
self.__remove(leechers, selector)
|
|
||||||
items = self.__attribute_or_text(leechers, selector)
|
|
||||||
item = self.__index(items, selector)
|
|
||||||
if item:
|
|
||||||
peers_val = item.split("/")[0]
|
|
||||||
peers_val = peers_val.replace(",", "")
|
|
||||||
peers_val = self.__filter_text(peers_val,
|
|
||||||
selector.get('filters'))
|
|
||||||
self.torrents_info['peers'] = int(peers_val) if peers_val and peers_val.isdigit() else 0
|
|
||||||
else:
|
|
||||||
self.torrents_info['peers'] = 0
|
|
||||||
|
|
||||||
def __get_seeders(self, torrent):
|
|
||||||
# torrent leechers int
|
|
||||||
if 'seeders' not in self.fields:
|
|
||||||
return
|
|
||||||
selector = self.fields.get('seeders', {})
|
|
||||||
seeders = torrent(selector.get('selector', '')).clone()
|
|
||||||
self.__remove(seeders, selector)
|
|
||||||
items = self.__attribute_or_text(seeders, selector)
|
|
||||||
item = self.__index(items, selector)
|
|
||||||
if item:
|
|
||||||
seeders_val = item.split("/")[0]
|
|
||||||
seeders_val = seeders_val.replace(",", "")
|
|
||||||
seeders_val = self.__filter_text(seeders_val,
|
|
||||||
selector.get('filters'))
|
|
||||||
self.torrents_info['seeders'] = int(seeders_val) if seeders_val and seeders_val.isdigit() else 0
|
|
||||||
else:
|
|
||||||
self.torrents_info['seeders'] = 0
|
|
||||||
|
|
||||||
def __get_grabs(self, torrent):
|
|
||||||
# torrent grabs int
|
|
||||||
if 'grabs' not in self.fields:
|
|
||||||
return
|
|
||||||
selector = self.fields.get('grabs', {})
|
|
||||||
grabs = torrent(selector.get('selector', '')).clone()
|
|
||||||
self.__remove(grabs, selector)
|
|
||||||
items = self.__attribute_or_text(grabs, selector)
|
|
||||||
item = self.__index(items, selector)
|
|
||||||
if item:
|
|
||||||
grabs_val = item.split("/")[0]
|
|
||||||
grabs_val = grabs_val.replace(",", "")
|
|
||||||
grabs_val = self.__filter_text(grabs_val,
|
|
||||||
selector.get('filters'))
|
|
||||||
self.torrents_info['grabs'] = int(grabs_val) if grabs_val and grabs_val.isdigit() else 0
|
|
||||||
else:
|
|
||||||
self.torrents_info['grabs'] = 0
|
|
||||||
|
|
||||||
def __get_pubdate(self, torrent):
|
|
||||||
# torrent pubdate yyyy-mm-dd hh:mm:ss
|
|
||||||
if 'date_added' not in self.fields:
|
|
||||||
return
|
|
||||||
selector = self.fields.get('date_added', {})
|
|
||||||
pubdate = torrent(selector.get('selector', '')).clone()
|
|
||||||
self.__remove(pubdate, selector)
|
|
||||||
items = self.__attribute_or_text(pubdate, selector)
|
|
||||||
pubdate_str = self.__index(items, selector)
|
|
||||||
if pubdate_str:
|
|
||||||
pubdate_str = pubdate_str.replace('\n', ' ').strip()
|
|
||||||
self.torrents_info['pubdate'] = self.__filter_text(pubdate_str,
|
|
||||||
selector.get('filters'))
|
|
||||||
|
|
||||||
def __get_date_elapsed(self, torrent):
|
|
||||||
# torrent data elaspsed text
|
|
||||||
if 'date_elapsed' not in self.fields:
|
|
||||||
return
|
|
||||||
selector = self.fields.get('date_elapsed', {})
|
|
||||||
date_elapsed = torrent(selector.get('selector', '')).clone()
|
|
||||||
self.__remove(date_elapsed, selector)
|
|
||||||
items = self.__attribute_or_text(date_elapsed, selector)
|
|
||||||
self.torrents_info['date_elapsed'] = self.__index(items, selector)
|
|
||||||
self.torrents_info['date_elapsed'] = self.__filter_text(self.torrents_info.get('date_elapsed'),
|
|
||||||
selector.get('filters'))
|
|
||||||
|
|
||||||
def __get_downloadvolumefactor(self, torrent):
|
|
||||||
# downloadvolumefactor int
|
|
||||||
selector = self.fields.get('downloadvolumefactor', {})
|
|
||||||
if not selector:
|
|
||||||
return
|
|
||||||
self.torrents_info['downloadvolumefactor'] = 1
|
|
||||||
if 'case' in selector:
|
|
||||||
for downloadvolumefactorselector in list(selector.get('case', {}).keys()):
|
|
||||||
downloadvolumefactor = torrent(downloadvolumefactorselector)
|
|
||||||
if len(downloadvolumefactor) > 0:
|
|
||||||
self.torrents_info['downloadvolumefactor'] = selector.get('case', {}).get(
|
|
||||||
downloadvolumefactorselector)
|
|
||||||
break
|
|
||||||
elif "selector" in selector:
|
|
||||||
downloadvolume = torrent(selector.get('selector', '')).clone()
|
|
||||||
self.__remove(downloadvolume, selector)
|
|
||||||
items = self.__attribute_or_text(downloadvolume, selector)
|
|
||||||
item = self.__index(items, selector)
|
|
||||||
if item:
|
|
||||||
downloadvolumefactor = re.search(r'(\d+\.?\d*)', item)
|
|
||||||
if downloadvolumefactor:
|
|
||||||
self.torrents_info['downloadvolumefactor'] = int(downloadvolumefactor.group(1))
|
|
||||||
|
|
||||||
def __get_uploadvolumefactor(self, torrent):
|
|
||||||
# uploadvolumefactor int
|
|
||||||
selector = self.fields.get('uploadvolumefactor', {})
|
|
||||||
if not selector:
|
|
||||||
return
|
|
||||||
self.torrents_info['uploadvolumefactor'] = 1
|
|
||||||
if 'case' in selector:
|
|
||||||
for uploadvolumefactorselector in list(selector.get('case', {}).keys()):
|
|
||||||
uploadvolumefactor = torrent(uploadvolumefactorselector)
|
|
||||||
if len(uploadvolumefactor) > 0:
|
|
||||||
self.torrents_info['uploadvolumefactor'] = selector.get('case', {}).get(
|
|
||||||
uploadvolumefactorselector)
|
|
||||||
break
|
|
||||||
elif "selector" in selector:
|
|
||||||
uploadvolume = torrent(selector.get('selector', '')).clone()
|
|
||||||
self.__remove(uploadvolume, selector)
|
|
||||||
items = self.__attribute_or_text(uploadvolume, selector)
|
|
||||||
item = self.__index(items, selector)
|
|
||||||
if item:
|
|
||||||
uploadvolumefactor = re.search(r'(\d+\.?\d*)', item)
|
|
||||||
if uploadvolumefactor:
|
|
||||||
self.torrents_info['uploadvolumefactor'] = int(uploadvolumefactor.group(1))
|
|
||||||
|
|
||||||
def __get_labels(self, torrent):
|
|
||||||
# labels ['label1', 'label2']
|
|
||||||
if 'labels' not in self.fields:
|
|
||||||
return
|
|
||||||
selector = self.fields.get('labels', {})
|
|
||||||
labels = torrent(selector.get("selector", "")).clone()
|
|
||||||
self.__remove(labels, selector)
|
|
||||||
items = self.__attribute_or_text(labels, selector)
|
|
||||||
if items:
|
|
||||||
self.torrents_info['labels'] = [item for item in items if item]
|
|
||||||
else:
|
|
||||||
self.torrents_info['labels'] = []
|
|
||||||
|
|
||||||
def __get_free_date(self, torrent):
|
|
||||||
# free date yyyy-mm-dd hh:mm:ss
|
|
||||||
if 'freedate' not in self.fields:
|
|
||||||
return
|
|
||||||
selector = self.fields.get('freedate', {})
|
|
||||||
freedate = torrent(selector.get('selector', '')).clone()
|
|
||||||
self.__remove(freedate, selector)
|
|
||||||
items = self.__attribute_or_text(freedate, selector)
|
|
||||||
self.torrents_info['freedate'] = self.__index(items, selector)
|
|
||||||
self.torrents_info['freedate'] = self.__filter_text(self.torrents_info.get('freedate'),
|
|
||||||
selector.get('filters'))
|
|
||||||
|
|
||||||
def __get_hit_and_run(self, torrent):
|
|
||||||
# hitandrun True/False
|
|
||||||
if 'hr' not in self.fields:
|
|
||||||
return
|
|
||||||
selector = self.fields.get('hr', {})
|
|
||||||
hit_and_run = torrent(selector.get('selector', ''))
|
|
||||||
if hit_and_run:
|
|
||||||
self.torrents_info['hit_and_run'] = True
|
|
||||||
else:
|
|
||||||
self.torrents_info['hit_and_run'] = False
|
|
||||||
|
|
||||||
def __get_category(self, torrent):
|
|
||||||
# category 电影/电视剧
|
|
||||||
if 'category' not in self.fields:
|
|
||||||
return
|
|
||||||
selector = self.fields.get('category', {})
|
|
||||||
category = torrent(selector.get('selector', '')).clone()
|
|
||||||
self.__remove(category, selector)
|
|
||||||
items = self.__attribute_or_text(category, selector)
|
|
||||||
category_value = self.__index(items, selector)
|
|
||||||
category_value = self.__filter_text(category_value,
|
|
||||||
selector.get('filters'))
|
|
||||||
if category_value and self.category:
|
|
||||||
tv_cats = [str(cat.get("id")) for cat in self.category.get("tv") or []]
|
|
||||||
movie_cats = [str(cat.get("id")) for cat in self.category.get("movie") or []]
|
|
||||||
if category_value in tv_cats \
|
|
||||||
and category_value not in movie_cats:
|
|
||||||
self.torrents_info['category'] = MediaType.TV.value
|
|
||||||
elif category_value in movie_cats:
|
|
||||||
self.torrents_info['category'] = MediaType.MOVIE.value
|
|
||||||
else:
|
|
||||||
self.torrents_info['category'] = MediaType.UNKNOWN.value
|
|
||||||
else:
|
|
||||||
self.torrents_info['category'] = MediaType.UNKNOWN.value
|
|
||||||
|
|
||||||
def get_info(self, torrent) -> dict:
|
|
||||||
"""
|
|
||||||
解析单条种子数据
|
|
||||||
"""
|
|
||||||
self.torrents_info = {}
|
|
||||||
try:
|
|
||||||
# 标题
|
|
||||||
self.__get_title(torrent)
|
|
||||||
# 描述
|
|
||||||
self.__get_description(torrent)
|
|
||||||
# 详情页面
|
|
||||||
self.__get_detail(torrent)
|
|
||||||
# 下载链接
|
|
||||||
self.__get_download(torrent)
|
|
||||||
# 完成数
|
|
||||||
self.__get_grabs(torrent)
|
|
||||||
# 下载数
|
|
||||||
self.__get_leechers(torrent)
|
|
||||||
# 做种数
|
|
||||||
self.__get_seeders(torrent)
|
|
||||||
# 大小
|
|
||||||
self.__get_size(torrent)
|
|
||||||
# IMDBID
|
|
||||||
self.__get_imdbid(torrent)
|
|
||||||
# 下载系数
|
|
||||||
self.__get_downloadvolumefactor(torrent)
|
|
||||||
# 上传系数
|
|
||||||
self.__get_uploadvolumefactor(torrent)
|
|
||||||
# 发布时间
|
|
||||||
self.__get_pubdate(torrent)
|
|
||||||
# 已发布时间
|
|
||||||
self.__get_date_elapsed(torrent)
|
|
||||||
# 免费载止时间
|
|
||||||
self.__get_free_date(torrent)
|
|
||||||
# 标签
|
|
||||||
self.__get_labels(torrent)
|
|
||||||
# HR
|
|
||||||
self.__get_hit_and_run(torrent)
|
|
||||||
# 分类
|
|
||||||
self.__get_category(torrent)
|
|
||||||
|
|
||||||
except Exception as err:
|
|
||||||
logger.error("%s 搜索出现错误:%s" % (self.indexername, str(err)))
|
|
||||||
return self.torrents_info
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def __filter_text(text: str, filters: list):
|
|
||||||
"""
|
|
||||||
对文件进行处理
|
|
||||||
"""
|
|
||||||
if not text or not filters or not isinstance(filters, list):
|
|
||||||
return text
|
|
||||||
if not isinstance(text, str):
|
|
||||||
text = str(text)
|
|
||||||
for filter_item in filters:
|
|
||||||
if not text:
|
|
||||||
break
|
|
||||||
method_name = filter_item.get("name")
|
|
||||||
try:
|
|
||||||
args = filter_item.get("args")
|
|
||||||
if method_name == "re_search" and isinstance(args, list):
|
|
||||||
rematch = re.search(r"%s" % args[0], text)
|
|
||||||
if rematch:
|
|
||||||
text = rematch.group(args[-1])
|
|
||||||
elif method_name == "split" and isinstance(args, list):
|
|
||||||
text = text.split(r"%s" % args[0])[args[-1]]
|
|
||||||
elif method_name == "replace" and isinstance(args, list):
|
|
||||||
text = text.replace(r"%s" % args[0], r"%s" % args[-1])
|
|
||||||
elif method_name == "dateparse" and isinstance(args, str):
|
|
||||||
text = text.replace("\n", " ").strip()
|
|
||||||
text = datetime.datetime.strptime(text, r"%s" % args)
|
|
||||||
elif method_name == "strip":
|
|
||||||
text = text.strip()
|
|
||||||
elif method_name == "appendleft":
|
|
||||||
text = f"{args}{text}"
|
|
||||||
elif method_name == "querystring":
|
|
||||||
parsed_url = urlparse(str(text))
|
|
||||||
query_params = parse_qs(parsed_url.query)
|
|
||||||
param_value = query_params.get(args)
|
|
||||||
text = param_value[0] if param_value else ''
|
|
||||||
except Exception as err:
|
|
||||||
logger.debug(f'过滤器 {method_name} 处理失败:{str(err)} - {traceback.format_exc()}')
|
|
||||||
return text.strip()
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def __remove(item, selector):
|
|
||||||
"""
|
|
||||||
移除元素
|
|
||||||
"""
|
|
||||||
if selector and "remove" in selector:
|
|
||||||
removelist = selector.get('remove', '').split(', ')
|
|
||||||
for v in removelist:
|
|
||||||
item.remove(v)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def __attribute_or_text(item, selector: dict):
|
|
||||||
if not selector:
|
|
||||||
return item
|
|
||||||
if not item:
|
|
||||||
return []
|
|
||||||
if 'attribute' in selector:
|
|
||||||
items = [i.attr(selector.get('attribute')) for i in item.items() if i]
|
|
||||||
else:
|
|
||||||
items = [i.text() for i in item.items() if i]
|
|
||||||
return items
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def __index(items: list, selector: dict):
|
|
||||||
if not items:
|
|
||||||
return None
|
|
||||||
if selector:
|
|
||||||
if "contents" in selector \
|
|
||||||
and len(items) > int(selector.get("contents")):
|
|
||||||
items = items[0].split("\n")[selector.get("contents")]
|
|
||||||
elif "index" in selector \
|
|
||||||
and len(items) > int(selector.get("index")):
|
|
||||||
items = items[int(selector.get("index"))]
|
|
||||||
if isinstance(items, list):
|
|
||||||
items = items[0]
|
|
||||||
return items
|
|
||||||
|
|
||||||
def parse(self, html_text: str) -> List[dict]:
|
|
||||||
"""
|
|
||||||
解析整个页面
|
|
||||||
"""
|
|
||||||
if not html_text:
|
|
||||||
self.is_error = True
|
|
||||||
return []
|
|
||||||
# 清空旧结果
|
|
||||||
self.torrents_info_array = []
|
|
||||||
try:
|
|
||||||
# 解析站点文本对象
|
|
||||||
html_doc = PyQuery(html_text)
|
|
||||||
# 种子筛选器
|
|
||||||
torrents_selector = self.list.get('selector', '')
|
|
||||||
# 遍历种子html列表
|
|
||||||
for torn in html_doc(torrents_selector):
|
|
||||||
self.torrents_info_array.append(copy.deepcopy(self.get_info(PyQuery(torn))))
|
|
||||||
if len(self.torrents_info_array) >= int(self.result_num):
|
|
||||||
break
|
|
||||||
return self.torrents_info_array
|
|
||||||
except Exception as err:
|
|
||||||
self.is_error = True
|
|
||||||
logger.warn(f"错误:{self.indexername} {str(err)}")
|
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
import urllib.parse
|
import urllib.parse
|
||||||
from typing import Tuple, List
|
from typing import Tuple, List
|
||||||
|
|
||||||
from ruamel.yaml import CommentedMap
|
|
||||||
|
|
||||||
from app.core.config import settings
|
from app.core.config import settings
|
||||||
from app.db.systemconfig_oper import SystemConfigOper
|
from app.db.systemconfig_oper import SystemConfigOper
|
||||||
from app.log import logger
|
from app.log import logger
|
||||||
@@ -51,7 +49,7 @@ class HaiDanSpider:
|
|||||||
"7": 1
|
"7": 1
|
||||||
}
|
}
|
||||||
|
|
||||||
def __init__(self, indexer: CommentedMap):
|
def __init__(self, indexer: dict):
|
||||||
self.systemconfig = SystemConfigOper()
|
self.systemconfig = SystemConfigOper()
|
||||||
if indexer:
|
if indexer:
|
||||||
self._indexerid = indexer.get('id')
|
self._indexerid = indexer.get('id')
|
||||||
|
|||||||
@@ -3,8 +3,6 @@ import json
|
|||||||
import re
|
import re
|
||||||
from typing import Tuple, List
|
from typing import Tuple, List
|
||||||
|
|
||||||
from ruamel.yaml import CommentedMap
|
|
||||||
|
|
||||||
from app.core.config import settings
|
from app.core.config import settings
|
||||||
from app.db.systemconfig_oper import SystemConfigOper
|
from app.db.systemconfig_oper import SystemConfigOper
|
||||||
from app.log import logger
|
from app.log import logger
|
||||||
@@ -51,7 +49,7 @@ class MTorrentSpider:
|
|||||||
"7": "DIY 国配 中字"
|
"7": "DIY 国配 中字"
|
||||||
}
|
}
|
||||||
|
|
||||||
def __init__(self, indexer: CommentedMap):
|
def __init__(self, indexer: dict):
|
||||||
self.systemconfig = SystemConfigOper()
|
self.systemconfig = SystemConfigOper()
|
||||||
if indexer:
|
if indexer:
|
||||||
self._indexerid = indexer.get('id')
|
self._indexerid = indexer.get('id')
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
import re
|
import re
|
||||||
from typing import Tuple, List
|
from typing import Tuple, List
|
||||||
|
|
||||||
from ruamel.yaml import CommentedMap
|
|
||||||
|
|
||||||
from app.core.config import settings
|
from app.core.config import settings
|
||||||
from app.log import logger
|
from app.log import logger
|
||||||
from app.utils.http import RequestUtils
|
from app.utils.http import RequestUtils
|
||||||
@@ -23,7 +21,7 @@ class TNodeSpider:
|
|||||||
_downloadurl = "%sapi/torrent/download/%s"
|
_downloadurl = "%sapi/torrent/download/%s"
|
||||||
_pageurl = "%storrent/info/%s"
|
_pageurl = "%storrent/info/%s"
|
||||||
|
|
||||||
def __init__(self, indexer: CommentedMap):
|
def __init__(self, indexer: dict):
|
||||||
if indexer:
|
if indexer:
|
||||||
self._indexerid = indexer.get('id')
|
self._indexerid = indexer.get('id')
|
||||||
self._domain = indexer.get('domain')
|
self._domain = indexer.get('domain')
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
from typing import List, Tuple
|
from typing import List, Tuple
|
||||||
from urllib.parse import quote
|
from urllib.parse import quote
|
||||||
|
|
||||||
from ruamel.yaml import CommentedMap
|
|
||||||
|
|
||||||
from app.core.config import settings
|
from app.core.config import settings
|
||||||
from app.log import logger
|
from app.log import logger
|
||||||
from app.utils.http import RequestUtils
|
from app.utils.http import RequestUtils
|
||||||
@@ -19,7 +17,7 @@ class TorrentLeech:
|
|||||||
_pageurl = "%storrent/%s"
|
_pageurl = "%storrent/%s"
|
||||||
_timeout = 15
|
_timeout = 15
|
||||||
|
|
||||||
def __init__(self, indexer: CommentedMap):
|
def __init__(self, indexer: dict):
|
||||||
self._indexer = indexer
|
self._indexer = indexer
|
||||||
if indexer.get('proxy'):
|
if indexer.get('proxy'):
|
||||||
self._proxy = settings.PROXY
|
self._proxy = settings.PROXY
|
||||||
|
|||||||
@@ -1,7 +1,5 @@
|
|||||||
from typing import Tuple, List
|
from typing import Tuple, List
|
||||||
|
|
||||||
from ruamel.yaml import CommentedMap
|
|
||||||
|
|
||||||
from app.core.config import settings
|
from app.core.config import settings
|
||||||
from app.db.systemconfig_oper import SystemConfigOper
|
from app.db.systemconfig_oper import SystemConfigOper
|
||||||
from app.log import logger
|
from app.log import logger
|
||||||
@@ -46,7 +44,7 @@ class YemaSpider:
|
|||||||
"12": "完结",
|
"12": "完结",
|
||||||
}
|
}
|
||||||
|
|
||||||
def __init__(self, indexer: CommentedMap):
|
def __init__(self, indexer: dict):
|
||||||
self.systemconfig = SystemConfigOper()
|
self.systemconfig = SystemConfigOper()
|
||||||
if indexer:
|
if indexer:
|
||||||
self._indexerid = indexer.get('id')
|
self._indexerid = indexer.get('id')
|
||||||
|
|||||||
@@ -239,7 +239,9 @@ class QbittorrentModule(_ModuleBase, _DownloaderBase[Qbittorrent]):
|
|||||||
path=torrent_path,
|
path=torrent_path,
|
||||||
hash=torrent.get('hash'),
|
hash=torrent.get('hash'),
|
||||||
size=torrent.get('total_size'),
|
size=torrent.get('total_size'),
|
||||||
tags=torrent.get('tags')
|
tags=torrent.get('tags'),
|
||||||
|
progress=torrent.get('progress') * 100,
|
||||||
|
state="paused" if torrent.get('state') in ("paused", "pausedDL") else "downloading",
|
||||||
))
|
))
|
||||||
elif status == TorrentStatus.TRANSFER:
|
elif status == TorrentStatus.TRANSFER:
|
||||||
# 获取已完成且未整理的
|
# 获取已完成且未整理的
|
||||||
|
|||||||
@@ -118,11 +118,12 @@ class TheMovieDbModule(_ModuleBase):
|
|||||||
|
|
||||||
# 识别匹配
|
# 识别匹配
|
||||||
if not cache_info or not cache:
|
if not cache_info or not cache:
|
||||||
|
info = None
|
||||||
# 缓存没有或者强制不使用缓存
|
# 缓存没有或者强制不使用缓存
|
||||||
if tmdbid:
|
if tmdbid:
|
||||||
# 直接查询详情
|
# 直接查询详情
|
||||||
info = self.tmdb.get_info(mtype=mtype, tmdbid=tmdbid)
|
info = self.tmdb.get_info(mtype=mtype, tmdbid=tmdbid)
|
||||||
elif meta:
|
if not info and meta:
|
||||||
info = {}
|
info = {}
|
||||||
# 简体名称
|
# 简体名称
|
||||||
zh_name = zhconv.convert(meta.cn_name, "zh-hans") if meta.cn_name else None
|
zh_name = zhconv.convert(meta.cn_name, "zh-hans") if meta.cn_name else None
|
||||||
@@ -172,8 +173,8 @@ class TheMovieDbModule(_ModuleBase):
|
|||||||
if info and not info.get("genres"):
|
if info and not info.get("genres"):
|
||||||
info = self.tmdb.get_info(mtype=info.get("media_type"),
|
info = self.tmdb.get_info(mtype=info.get("media_type"),
|
||||||
tmdbid=info.get("id"))
|
tmdbid=info.get("id"))
|
||||||
else:
|
elif not info:
|
||||||
logger.error("识别媒体信息时未提供元数据或tmdbid")
|
logger.error("识别媒体信息时未提供元数据或唯一且有效的tmdbid")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# 保存到缓存
|
# 保存到缓存
|
||||||
|
|||||||
@@ -578,7 +578,7 @@ class TmdbApi:
|
|||||||
genre_ids.append(genre.get('id'))
|
genre_ids.append(genre.get('id'))
|
||||||
return genre_ids
|
return genre_ids
|
||||||
|
|
||||||
# 查询TMDB详ngeq
|
# 查询TMDB详情
|
||||||
if mtype == MediaType.MOVIE:
|
if mtype == MediaType.MOVIE:
|
||||||
tmdb_info = self.__get_movie_detail(tmdbid)
|
tmdb_info = self.__get_movie_detail(tmdbid)
|
||||||
if tmdb_info:
|
if tmdb_info:
|
||||||
@@ -588,13 +588,20 @@ class TmdbApi:
|
|||||||
if tmdb_info:
|
if tmdb_info:
|
||||||
tmdb_info['media_type'] = MediaType.TV
|
tmdb_info['media_type'] = MediaType.TV
|
||||||
else:
|
else:
|
||||||
tmdb_info = self.__get_tv_detail(tmdbid)
|
tmdb_info_tv = self.__get_tv_detail(tmdbid)
|
||||||
if tmdb_info:
|
tmdb_info_movie = self.__get_movie_detail(tmdbid)
|
||||||
|
if tmdb_info_tv and tmdb_info_movie:
|
||||||
|
tmdb_info = None
|
||||||
|
logger.warn(f"无法判断tmdb_id:{tmdbid} 是电影还是电视剧")
|
||||||
|
elif tmdb_info_tv:
|
||||||
|
tmdb_info = tmdb_info_tv
|
||||||
tmdb_info['media_type'] = MediaType.TV
|
tmdb_info['media_type'] = MediaType.TV
|
||||||
|
elif tmdb_info_movie:
|
||||||
|
tmdb_info = tmdb_info_movie
|
||||||
|
tmdb_info['media_type'] = MediaType.MOVIE
|
||||||
else:
|
else:
|
||||||
tmdb_info = self.__get_movie_detail(tmdbid)
|
tmdb_info = None
|
||||||
if tmdb_info:
|
logger.warn(f"tmdb_id:{tmdbid} 未查询到媒体信息")
|
||||||
tmdb_info['media_type'] = MediaType.MOVIE
|
|
||||||
|
|
||||||
if tmdb_info:
|
if tmdb_info:
|
||||||
# 转换genreid
|
# 转换genreid
|
||||||
|
|||||||
@@ -246,7 +246,9 @@ class TransmissionModule(_ModuleBase, _DownloaderBase[Transmission]):
|
|||||||
title=torrent.name,
|
title=torrent.name,
|
||||||
path=Path(torrent.download_dir) / torrent.name,
|
path=Path(torrent.download_dir) / torrent.name,
|
||||||
hash=torrent.hashString,
|
hash=torrent.hashString,
|
||||||
tags=",".join(torrent.labels or [])
|
tags=",".join(torrent.labels or []),
|
||||||
|
progress=torrent.progress,
|
||||||
|
state="paused" if torrent.status == "stopped" else "downloading",
|
||||||
))
|
))
|
||||||
elif status == TorrentStatus.DOWNLOADING:
|
elif status == TorrentStatus.DOWNLOADING:
|
||||||
# 获取正在下载的任务
|
# 获取正在下载的任务
|
||||||
|
|||||||
@@ -32,13 +32,13 @@ class WeChat:
|
|||||||
_proxy = None
|
_proxy = None
|
||||||
|
|
||||||
# 企业微信发送消息URL
|
# 企业微信发送消息URL
|
||||||
_send_msg_url = "/cgi-bin/message/send?access_token={access_token}"
|
_send_msg_url = "cgi-bin/message/send?access_token={access_token}"
|
||||||
# 企业微信获取TokenURL
|
# 企业微信获取TokenURL
|
||||||
_token_url = "/cgi-bin/gettoken?corpid={corpid}&corpsecret={corpsecret}"
|
_token_url = "cgi-bin/gettoken?corpid={corpid}&corpsecret={corpsecret}"
|
||||||
# 企业微信创建菜单URL
|
# 企业微信创建菜单URL
|
||||||
_create_menu_url = "/cgi-bin/menu/create?access_token={access_token}&agentid={agentid}"
|
_create_menu_url = "cgi-bin/menu/create?access_token={access_token}&agentid={agentid}"
|
||||||
# 企业微信删除菜单URL
|
# 企业微信删除菜单URL
|
||||||
_delete_menu_url = "/cgi-bin/menu/delete?access_token={access_token}&agentid={agentid}"
|
_delete_menu_url = "cgi-bin/menu/delete?access_token={access_token}&agentid={agentid}"
|
||||||
|
|
||||||
def __init__(self, WECHAT_CORPID: str = None, WECHAT_APP_SECRET: str = None,
|
def __init__(self, WECHAT_CORPID: str = None, WECHAT_APP_SECRET: str = None,
|
||||||
WECHAT_APP_ID: str = None, WECHAT_PROXY: str = None, **kwargs):
|
WECHAT_APP_ID: str = None, WECHAT_PROXY: str = None, **kwargs):
|
||||||
|
|||||||
169
app/scheduler.py
169
app/scheduler.py
@@ -7,6 +7,7 @@ import pytz
|
|||||||
from apscheduler.executors.pool import ThreadPoolExecutor
|
from apscheduler.executors.pool import ThreadPoolExecutor
|
||||||
from apscheduler.jobstores.base import JobLookupError
|
from apscheduler.jobstores.base import JobLookupError
|
||||||
from apscheduler.schedulers.background import BackgroundScheduler
|
from apscheduler.schedulers.background import BackgroundScheduler
|
||||||
|
from apscheduler.triggers.cron import CronTrigger
|
||||||
|
|
||||||
from app import schemas
|
from app import schemas
|
||||||
from app.chain import ChainBase
|
from app.chain import ChainBase
|
||||||
@@ -16,13 +17,14 @@ from app.chain.site import SiteChain
|
|||||||
from app.chain.subscribe import SubscribeChain
|
from app.chain.subscribe import SubscribeChain
|
||||||
from app.chain.tmdb import TmdbChain
|
from app.chain.tmdb import TmdbChain
|
||||||
from app.chain.transfer import TransferChain
|
from app.chain.transfer import TransferChain
|
||||||
|
from app.chain.workflow import WorkflowChain
|
||||||
from app.core.config import settings
|
from app.core.config import settings
|
||||||
from app.core.event import EventManager
|
from app.core.event import EventManager
|
||||||
from app.core.plugin import PluginManager
|
from app.core.plugin import PluginManager
|
||||||
from app.db.systemconfig_oper import SystemConfigOper
|
from app.db.systemconfig_oper import SystemConfigOper
|
||||||
from app.helper.sites import SitesHelper
|
from app.helper.sites import SitesHelper
|
||||||
from app.log import logger
|
from app.log import logger
|
||||||
from app.schemas import Notification, NotificationType
|
from app.schemas import Notification, NotificationType, Workflow
|
||||||
from app.schemas.types import EventType, SystemConfigKey
|
from app.schemas.types import EventType, SystemConfigKey
|
||||||
from app.utils.singleton import Singleton
|
from app.utils.singleton import Singleton
|
||||||
from app.utils.timer import TimerUtils
|
from app.utils.timer import TimerUtils
|
||||||
@@ -345,6 +347,10 @@ class Scheduler(metaclass=Singleton):
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# 初始化工作流服务
|
||||||
|
self.init_workflow_jobs()
|
||||||
|
|
||||||
|
# 初始化插件服务
|
||||||
self.init_plugin_jobs()
|
self.init_plugin_jobs()
|
||||||
|
|
||||||
# 打印服务
|
# 打印服务
|
||||||
@@ -401,52 +407,42 @@ class Scheduler(metaclass=Singleton):
|
|||||||
for pid in PluginManager().get_running_plugin_ids():
|
for pid in PluginManager().get_running_plugin_ids():
|
||||||
self.update_plugin_job(pid)
|
self.update_plugin_job(pid)
|
||||||
|
|
||||||
def update_plugin_job(self, pid: str):
|
def init_workflow_jobs(self):
|
||||||
"""
|
"""
|
||||||
更新插件定时服务
|
初始化工作流定时服务
|
||||||
"""
|
"""
|
||||||
if not self._scheduler or not pid:
|
for workflow in WorkflowChain().get_workflows() or []:
|
||||||
|
self.update_workflow_job(workflow)
|
||||||
|
|
||||||
|
def remove_workflow_job(self, workflow: Workflow):
|
||||||
|
"""
|
||||||
|
移除工作流服务
|
||||||
|
"""
|
||||||
|
if not self._scheduler:
|
||||||
return
|
return
|
||||||
# 移除该插件的全部服务
|
|
||||||
self.remove_plugin_job(pid)
|
|
||||||
# 获取插件服务列表
|
|
||||||
with self._lock:
|
with self._lock:
|
||||||
try:
|
job_id = f"workflow-{workflow.id}"
|
||||||
plugin_services = PluginManager().get_plugin_services(pid=pid)
|
service = self._jobs.pop(job_id, None)
|
||||||
except Exception as e:
|
if not service:
|
||||||
logger.error(f"运行插件 {pid} 服务失败:{str(e)} - {traceback.format_exc()}")
|
|
||||||
return
|
return
|
||||||
# 获取插件名称
|
try:
|
||||||
plugin_name = PluginManager().get_plugin_attr(pid, "plugin_name")
|
# 在调度器中查找并移除对应的 job
|
||||||
# 开始注册插件服务
|
job_removed = False
|
||||||
for service in plugin_services:
|
for job in list(self._scheduler.get_jobs()):
|
||||||
try:
|
if job_id == job.id:
|
||||||
sid = f"{service['id']}"
|
try:
|
||||||
job_id = sid.split("|")[0]
|
self._scheduler.remove_job(job.id)
|
||||||
self.remove_plugin_job(pid, job_id)
|
job_removed = True
|
||||||
self._jobs[job_id] = {
|
except JobLookupError:
|
||||||
"func": service["func"],
|
pass
|
||||||
"name": service["name"],
|
break
|
||||||
"pid": pid,
|
if job_removed:
|
||||||
"plugin_name": plugin_name,
|
logger.info(f"移除工作流服务:{service.get('name')}")
|
||||||
"kwargs": service.get("func_kwargs") or {},
|
except Exception as e:
|
||||||
"running": False,
|
logger.error(f"移除工作流服务失败:{str(e)} - {job_id}: {service}")
|
||||||
}
|
SchedulerChain().messagehelper.put(title=f"工作流 {workflow.name} 服务移除失败",
|
||||||
self._scheduler.add_job(
|
message=str(e),
|
||||||
self.start,
|
role="system")
|
||||||
service["trigger"],
|
|
||||||
id=sid,
|
|
||||||
name=service["name"],
|
|
||||||
**(service.get("kwargs") or {}),
|
|
||||||
kwargs={"job_id": job_id},
|
|
||||||
replace_existing=True
|
|
||||||
)
|
|
||||||
logger.info(f"注册插件{plugin_name}服务:{service['name']} - {service['trigger']}")
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"注册插件{plugin_name}服务失败:{str(e)} - {service}")
|
|
||||||
SchedulerChain().messagehelper.put(title=f"插件 {plugin_name} 服务注册失败",
|
|
||||||
message=str(e),
|
|
||||||
role="system")
|
|
||||||
|
|
||||||
def remove_plugin_job(self, pid: str, job_id: str = None):
|
def remove_plugin_job(self, pid: str, job_id: str = None):
|
||||||
"""
|
"""
|
||||||
@@ -494,6 +490,87 @@ class Scheduler(metaclass=Singleton):
|
|||||||
message=str(e),
|
message=str(e),
|
||||||
role="system")
|
role="system")
|
||||||
|
|
||||||
|
def update_workflow_job(self, workflow: Workflow):
|
||||||
|
"""
|
||||||
|
更新工作流定时服务
|
||||||
|
"""
|
||||||
|
if not self._scheduler:
|
||||||
|
return
|
||||||
|
|
||||||
|
# 移除该工作流的全部服务
|
||||||
|
self.remove_workflow_job(workflow)
|
||||||
|
# 添加工作流服务
|
||||||
|
with self._lock:
|
||||||
|
try:
|
||||||
|
job_id = f"workflow-{workflow.id}"
|
||||||
|
self._jobs[job_id] = {
|
||||||
|
"func": WorkflowChain().process,
|
||||||
|
"name": workflow.name,
|
||||||
|
"provider_name": "工作流",
|
||||||
|
"running": False,
|
||||||
|
}
|
||||||
|
self._scheduler.add_job(
|
||||||
|
self.start,
|
||||||
|
trigger=CronTrigger.from_crontab(workflow.timer),
|
||||||
|
id=job_id,
|
||||||
|
name=workflow.name,
|
||||||
|
kwargs={"job_id": job_id, "workflow_id": workflow.id},
|
||||||
|
replace_existing=True
|
||||||
|
)
|
||||||
|
logger.info(f"注册工作流服务:{workflow.name} - {workflow.timer}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"注册工作流服务失败:{workflow.name} - {str(e)}")
|
||||||
|
SchedulerChain().messagehelper.put(title=f"工作流 {workflow.name} 服务注册失败",
|
||||||
|
message=str(e),
|
||||||
|
role="system")
|
||||||
|
|
||||||
|
def update_plugin_job(self, pid: str):
|
||||||
|
"""
|
||||||
|
更新插件定时服务
|
||||||
|
"""
|
||||||
|
if not self._scheduler or not pid:
|
||||||
|
return
|
||||||
|
# 移除该插件的全部服务
|
||||||
|
self.remove_plugin_job(pid)
|
||||||
|
# 获取插件服务列表
|
||||||
|
with self._lock:
|
||||||
|
try:
|
||||||
|
plugin_services = PluginManager().get_plugin_services(pid=pid)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"运行插件 {pid} 服务失败:{str(e)} - {traceback.format_exc()}")
|
||||||
|
return
|
||||||
|
# 获取插件名称
|
||||||
|
plugin_name = PluginManager().get_plugin_attr(pid, "plugin_name")
|
||||||
|
# 开始注册插件服务
|
||||||
|
for service in plugin_services:
|
||||||
|
try:
|
||||||
|
sid = f"{service['id']}"
|
||||||
|
job_id = sid.split("|")[0]
|
||||||
|
self.remove_plugin_job(pid, job_id)
|
||||||
|
self._jobs[job_id] = {
|
||||||
|
"func": service["func"],
|
||||||
|
"name": service["name"],
|
||||||
|
"pid": pid,
|
||||||
|
"provider_name": plugin_name,
|
||||||
|
"kwargs": service.get("func_kwargs") or {},
|
||||||
|
"running": False,
|
||||||
|
}
|
||||||
|
self._scheduler.add_job(
|
||||||
|
self.start,
|
||||||
|
service["trigger"],
|
||||||
|
id=sid,
|
||||||
|
name=service["name"],
|
||||||
|
**(service.get("kwargs") or {}),
|
||||||
|
kwargs={"job_id": job_id},
|
||||||
|
replace_existing=True
|
||||||
|
)
|
||||||
|
logger.info(f"注册插件{plugin_name}服务:{service['name']} - {service['trigger']}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"注册插件{plugin_name}服务失败:{str(e)} - {service}")
|
||||||
|
SchedulerChain().messagehelper.put(title=f"插件 {plugin_name} 服务注册失败",
|
||||||
|
message=str(e),
|
||||||
|
role="system")
|
||||||
|
|
||||||
def list(self) -> List[schemas.ScheduleInfo]:
|
def list(self) -> List[schemas.ScheduleInfo]:
|
||||||
"""
|
"""
|
||||||
当前所有任务
|
当前所有任务
|
||||||
@@ -511,14 +588,14 @@ class Scheduler(metaclass=Singleton):
|
|||||||
# 将正在运行的任务提取出来 (保障一次性任务正常显示)
|
# 将正在运行的任务提取出来 (保障一次性任务正常显示)
|
||||||
for job_id, service in self._jobs.items():
|
for job_id, service in self._jobs.items():
|
||||||
name = service.get("name")
|
name = service.get("name")
|
||||||
plugin_name = service.get("plugin_name")
|
provider_name = service.get("provider_name")
|
||||||
if service.get("running") and name and plugin_name:
|
if service.get("running") and name and provider_name:
|
||||||
if name not in added:
|
if name not in added:
|
||||||
added.append(name)
|
added.append(name)
|
||||||
schedulers.append(schemas.ScheduleInfo(
|
schedulers.append(schemas.ScheduleInfo(
|
||||||
id=job_id,
|
id=job_id,
|
||||||
name=name,
|
name=name,
|
||||||
provider=plugin_name,
|
provider=provider_name,
|
||||||
status="正在运行",
|
status="正在运行",
|
||||||
))
|
))
|
||||||
# 获取其他待执行任务
|
# 获取其他待执行任务
|
||||||
@@ -538,7 +615,7 @@ class Scheduler(metaclass=Singleton):
|
|||||||
schedulers.append(schemas.ScheduleInfo(
|
schedulers.append(schemas.ScheduleInfo(
|
||||||
id=job_id,
|
id=job_id,
|
||||||
name=job.name,
|
name=job.name,
|
||||||
provider=service.get("plugin_name", "[系统]"),
|
provider=service.get("provider_name", "[系统]"),
|
||||||
status=status,
|
status=status,
|
||||||
next_run=next_run
|
next_run=next_run
|
||||||
))
|
))
|
||||||
|
|||||||
@@ -19,3 +19,5 @@ from .file import *
|
|||||||
from .exception import *
|
from .exception import *
|
||||||
from .system import *
|
from .system import *
|
||||||
from .event import *
|
from .event import *
|
||||||
|
from .workflow import *
|
||||||
|
from .download import *
|
||||||
|
|||||||
@@ -242,6 +242,19 @@ class Context(BaseModel):
|
|||||||
torrent_info: Optional[TorrentInfo] = None
|
torrent_info: Optional[TorrentInfo] = None
|
||||||
|
|
||||||
|
|
||||||
|
class MediaSeason(BaseModel):
|
||||||
|
"""
|
||||||
|
季信息
|
||||||
|
"""
|
||||||
|
air_date: Optional[str] = None
|
||||||
|
episode_count: Optional[int] = None
|
||||||
|
name: Optional[str] = None
|
||||||
|
overview: Optional[str] = None
|
||||||
|
poster_path: Optional[str] = None
|
||||||
|
season_number: Optional[int] = None
|
||||||
|
vote_average: Optional[float] = None
|
||||||
|
|
||||||
|
|
||||||
class MediaPerson(BaseModel):
|
class MediaPerson(BaseModel):
|
||||||
"""
|
"""
|
||||||
媒体人物信息
|
媒体人物信息
|
||||||
|
|||||||
13
app/schemas/download.py
Normal file
13
app/schemas/download.py
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
|
||||||
|
class DownloadTask(BaseModel):
|
||||||
|
"""
|
||||||
|
下载任务
|
||||||
|
"""
|
||||||
|
download_id: Optional[str] = Field(None, description="任务ID")
|
||||||
|
downloader: Optional[str] = Field(None, description="下载器")
|
||||||
|
path: Optional[str] = Field(None, description="下载路径")
|
||||||
|
completed: Optional[bool] = Field(False, description="是否完成")
|
||||||
@@ -6,6 +6,15 @@ from pydantic import BaseModel, Field, root_validator
|
|||||||
from app.schemas import MessageChannel, FileItem
|
from app.schemas import MessageChannel, FileItem
|
||||||
|
|
||||||
|
|
||||||
|
class Event(BaseModel):
|
||||||
|
"""
|
||||||
|
事件模型
|
||||||
|
"""
|
||||||
|
event_type: str = Field(..., description="事件类型")
|
||||||
|
event_data: Optional[dict] = Field({}, description="事件数据")
|
||||||
|
priority: Optional[int] = Field(0, description="事件优先级")
|
||||||
|
|
||||||
|
|
||||||
class BaseEventData(BaseModel):
|
class BaseEventData(BaseModel):
|
||||||
"""
|
"""
|
||||||
事件数据的基类,所有具体事件数据类应继承自此类
|
事件数据的基类,所有具体事件数据类应继承自此类
|
||||||
@@ -244,6 +253,7 @@ class DiscoverMediaSource(BaseModel):
|
|||||||
api_path: str = Field(..., description="媒体数据源API地址")
|
api_path: str = Field(..., description="媒体数据源API地址")
|
||||||
filter_params: Optional[Dict[str, Any]] = Field(default=None, description="过滤参数")
|
filter_params: Optional[Dict[str, Any]] = Field(default=None, description="过滤参数")
|
||||||
filter_ui: Optional[List[dict]] = Field(default=[], description="过滤参数UI配置")
|
filter_ui: Optional[List[dict]] = Field(default=[], description="过滤参数UI配置")
|
||||||
|
depends: Optional[Dict[str, list]] = Field(default=None, description="UI依赖关系字典")
|
||||||
|
|
||||||
|
|
||||||
class DiscoverSourceEventData(ChainEventData):
|
class DiscoverSourceEventData(ChainEventData):
|
||||||
@@ -258,6 +268,26 @@ class DiscoverSourceEventData(ChainEventData):
|
|||||||
extra_sources: List[DiscoverMediaSource] = Field(default_factory=list, description="额外媒体数据源")
|
extra_sources: List[DiscoverMediaSource] = Field(default_factory=list, description="额外媒体数据源")
|
||||||
|
|
||||||
|
|
||||||
|
class RecommendMediaSource(BaseModel):
|
||||||
|
"""
|
||||||
|
推荐媒体数据源的基类
|
||||||
|
"""
|
||||||
|
name: str = Field(..., description="数据源名称")
|
||||||
|
api_path: str = Field(..., description="媒体数据源API地址")
|
||||||
|
|
||||||
|
|
||||||
|
class RecommendSourceEventData(ChainEventData):
|
||||||
|
"""
|
||||||
|
RecommendSource 事件的数据模型
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
# 输出参数
|
||||||
|
extra_sources (List[RecommendMediaSource]): 额外媒体数据源
|
||||||
|
"""
|
||||||
|
# 输出参数
|
||||||
|
extra_sources: List[RecommendMediaSource] = Field(default_factory=list, description="额外媒体数据源")
|
||||||
|
|
||||||
|
|
||||||
class MediaRecognizeConvertEventData(ChainEventData):
|
class MediaRecognizeConvertEventData(ChainEventData):
|
||||||
"""
|
"""
|
||||||
MediaRecognizeConvert 事件的数据模型
|
MediaRecognizeConvert 事件的数据模型
|
||||||
|
|||||||
@@ -115,3 +115,9 @@ class SiteUserData(BaseModel):
|
|||||||
class SiteAuth(BaseModel):
|
class SiteAuth(BaseModel):
|
||||||
site: Optional[str] = None
|
site: Optional[str] = None
|
||||||
params: Optional[Dict[str, Union[int, str]]] = Field(default_factory=dict)
|
params: Optional[Dict[str, Union[int, str]]] = Field(default_factory=dict)
|
||||||
|
|
||||||
|
|
||||||
|
class SiteCategory(BaseModel):
|
||||||
|
id: Optional[int] = None
|
||||||
|
cat: Optional[str] = None
|
||||||
|
desc: Optional[str] = None
|
||||||
|
|||||||
@@ -21,6 +21,8 @@ class TransferTorrent(BaseModel):
|
|||||||
tags: Optional[str] = None
|
tags: Optional[str] = None
|
||||||
size: Optional[int] = 0
|
size: Optional[int] = 0
|
||||||
userid: Optional[str] = None
|
userid: Optional[str] = None
|
||||||
|
progress: Optional[float] = 0
|
||||||
|
state: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
class DownloadingTorrent(BaseModel):
|
class DownloadingTorrent(BaseModel):
|
||||||
|
|||||||
@@ -81,10 +81,14 @@ class ChainEventType(Enum):
|
|||||||
ResourceSelection = "resource.selection"
|
ResourceSelection = "resource.selection"
|
||||||
# 资源下载
|
# 资源下载
|
||||||
ResourceDownload = "resource.download"
|
ResourceDownload = "resource.download"
|
||||||
# 发现数据源
|
# 探索数据源
|
||||||
DiscoverSource = "discover.source"
|
DiscoverSource = "discover.source"
|
||||||
# 媒体识别转换
|
# 媒体识别转换
|
||||||
MediaRecognizeConvert = "media.recognize.convert"
|
MediaRecognizeConvert = "media.recognize.convert"
|
||||||
|
# 推荐数据源
|
||||||
|
RecommendSource = "recommend.source"
|
||||||
|
# 工作流执行
|
||||||
|
WorkflowExecution = "workflow.execution"
|
||||||
|
|
||||||
|
|
||||||
# 系统配置Key字典
|
# 系统配置Key字典
|
||||||
|
|||||||
84
app/schemas/workflow.py
Normal file
84
app/schemas/workflow.py
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
from typing import Optional, List
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from app.schemas.context import Context, MediaInfo
|
||||||
|
from app.schemas.download import DownloadTask
|
||||||
|
from app.schemas.file import FileItem
|
||||||
|
from app.schemas.site import Site
|
||||||
|
from app.schemas.subscribe import Subscribe
|
||||||
|
|
||||||
|
|
||||||
|
class Workflow(BaseModel):
|
||||||
|
"""
|
||||||
|
工作流信息
|
||||||
|
"""
|
||||||
|
id: Optional[int] = Field(None, description="工作流ID")
|
||||||
|
name: Optional[str] = Field(None, description="工作流名称")
|
||||||
|
description: Optional[str] = Field(None, description="工作流描述")
|
||||||
|
timer: Optional[str] = Field(None, description="定时器")
|
||||||
|
state: Optional[str] = Field(None, description="状态")
|
||||||
|
current_action: Optional[str] = Field(None, description="已执行动作")
|
||||||
|
result: Optional[str] = Field(None, description="任务执行结果")
|
||||||
|
run_count: Optional[int] = Field(0, description="已执行次数")
|
||||||
|
actions: Optional[list] = Field([], description="任务列表")
|
||||||
|
flows: Optional[list] = Field([], description="任务流")
|
||||||
|
add_time: Optional[str] = Field(None, description="创建时间")
|
||||||
|
last_time: Optional[str] = Field(None, description="最后执行时间")
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
orm_mode = True
|
||||||
|
|
||||||
|
|
||||||
|
class ActionParams(BaseModel):
|
||||||
|
"""
|
||||||
|
动作基础参数
|
||||||
|
"""
|
||||||
|
loop: Optional[bool] = Field(False, description="是否需要循环")
|
||||||
|
loop_interval: Optional[int] = Field(0, description="循环间隔 (秒)")
|
||||||
|
|
||||||
|
|
||||||
|
class Action(BaseModel):
|
||||||
|
"""
|
||||||
|
动作信息
|
||||||
|
"""
|
||||||
|
id: Optional[str] = Field(None, description="动作ID")
|
||||||
|
type: Optional[str] = Field(None, description="动作类型 (类名)")
|
||||||
|
name: Optional[str] = Field(None, description="动作名称")
|
||||||
|
description: Optional[str] = Field(None, description="动作描述")
|
||||||
|
position: Optional[dict] = Field({}, description="位置")
|
||||||
|
data: Optional[dict] = Field({}, description="参数")
|
||||||
|
|
||||||
|
|
||||||
|
class ActionExecution(BaseModel):
|
||||||
|
"""
|
||||||
|
动作执行情况
|
||||||
|
"""
|
||||||
|
action: Optional[str] = Field(None, description="当前动作(名称)")
|
||||||
|
result: Optional[bool] = Field(None, description="执行结果")
|
||||||
|
message: Optional[str] = Field(None, description="执行消息")
|
||||||
|
|
||||||
|
|
||||||
|
class ActionContext(BaseModel):
|
||||||
|
"""
|
||||||
|
动作基础上下文,各动作通用数据
|
||||||
|
"""
|
||||||
|
content: Optional[str] = Field(None, description="文本类内容")
|
||||||
|
torrents: Optional[List[Context]] = Field([], description="资源列表")
|
||||||
|
medias: Optional[List[MediaInfo]] = Field([], description="媒体列表")
|
||||||
|
fileitems: Optional[List[FileItem]] = Field([], description="文件列表")
|
||||||
|
downloads: Optional[List[DownloadTask]] = Field([], description="下载任务列表")
|
||||||
|
sites: Optional[List[Site]] = Field([], description="站点列表")
|
||||||
|
subscribes: Optional[List[Subscribe]] = Field([], description="订阅列表")
|
||||||
|
execute_history: Optional[List[ActionExecution]] = Field([], description="执行历史")
|
||||||
|
progress: Optional[int] = Field(0, description="执行进度(%)")
|
||||||
|
|
||||||
|
|
||||||
|
class ActionFlow(BaseModel):
|
||||||
|
"""
|
||||||
|
工作流流程
|
||||||
|
"""
|
||||||
|
id: Optional[str] = Field(None, description="流程ID")
|
||||||
|
source: Optional[str] = Field(None, description="源动作")
|
||||||
|
target: Optional[str] = Field(None, description="目标动作")
|
||||||
|
animated: Optional[bool] = Field(True, description="是否动画流程")
|
||||||
@@ -3,6 +3,7 @@ from contextlib import asynccontextmanager
|
|||||||
|
|
||||||
from fastapi import FastAPI
|
from fastapi import FastAPI
|
||||||
|
|
||||||
|
from app.startup.workflow_initializer import init_workflow, stop_workflow
|
||||||
from app.startup.modules_initializer import shutdown_modules, start_modules
|
from app.startup.modules_initializer import shutdown_modules, start_modules
|
||||||
from app.startup.plugins_initializer import init_plugins_async
|
from app.startup.plugins_initializer import init_plugins_async
|
||||||
from app.startup.routers_initializer import init_routers
|
from app.startup.routers_initializer import init_routers
|
||||||
@@ -16,6 +17,8 @@ async def lifespan(app: FastAPI):
|
|||||||
print("Starting up...")
|
print("Starting up...")
|
||||||
# 启动模块
|
# 启动模块
|
||||||
start_modules(app)
|
start_modules(app)
|
||||||
|
# 初始化工作流动作
|
||||||
|
init_workflow(app)
|
||||||
# 初始化路由
|
# 初始化路由
|
||||||
init_routers(app)
|
init_routers(app)
|
||||||
# 初始化插件
|
# 初始化插件
|
||||||
@@ -35,3 +38,6 @@ async def lifespan(app: FastAPI):
|
|||||||
print(f"Error during plugin installation shutdown: {e}")
|
print(f"Error during plugin installation shutdown: {e}")
|
||||||
# 清理模块
|
# 清理模块
|
||||||
shutdown_modules(app)
|
shutdown_modules(app)
|
||||||
|
# 关闭工作流
|
||||||
|
stop_workflow(app)
|
||||||
|
|
||||||
|
|||||||
17
app/startup/workflow_initializer.py
Normal file
17
app/startup/workflow_initializer.py
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
from fastapi import FastAPI
|
||||||
|
|
||||||
|
from app.core.workflow import WorkFlowManager
|
||||||
|
|
||||||
|
|
||||||
|
def init_workflow(_: FastAPI):
|
||||||
|
"""
|
||||||
|
初始化动作
|
||||||
|
"""
|
||||||
|
WorkFlowManager()
|
||||||
|
|
||||||
|
|
||||||
|
def stop_workflow(_: FastAPI):
|
||||||
|
"""
|
||||||
|
停止动作
|
||||||
|
"""
|
||||||
|
WorkFlowManager().stop()
|
||||||
@@ -42,7 +42,7 @@ class SecurityUtils:
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def is_safe_url(url: str, allowed_domains: Union[Set[str], List[str]], strict: bool = False) -> bool:
|
def is_safe_url(url: str, allowed_domains: Union[Set[str], List[str]], strict: bool = False) -> bool:
|
||||||
"""
|
"""
|
||||||
验证URL是否在允许的域名列表中,包括带有端口的域名。
|
验证URL是否在允许的域名列表中,包括带有端口的域名
|
||||||
|
|
||||||
:param url: 需要验证的 URL
|
:param url: 需要验证的 URL
|
||||||
:param allowed_domains: 允许的域名集合,域名可以包含端口
|
:param allowed_domains: 允许的域名集合,域名可以包含端口
|
||||||
|
|||||||
@@ -63,3 +63,5 @@ OCR_HOST=https://movie-pilot.org
|
|||||||
PLUGIN_MARKET=https://github.com/jxxghp/MoviePilot-Plugins,https://github.com/thsrite/MoviePilot-Plugins,https://github.com/InfinityPacer/MoviePilot-Plugins,https://github.com/honue/MoviePilot-Plugins
|
PLUGIN_MARKET=https://github.com/jxxghp/MoviePilot-Plugins,https://github.com/thsrite/MoviePilot-Plugins,https://github.com/InfinityPacer/MoviePilot-Plugins,https://github.com/honue/MoviePilot-Plugins
|
||||||
# 搜索多个名称,true/false,为true时搜索时会同时搜索中英文及原始名称,搜索结果会更全面,但会增加搜索时间;为false时其中一个名称搜索到结果或全部名称搜索完毕即停止
|
# 搜索多个名称,true/false,为true时搜索时会同时搜索中英文及原始名称,搜索结果会更全面,但会增加搜索时间;为false时其中一个名称搜索到结果或全部名称搜索完毕即停止
|
||||||
SEARCH_MULTIPLE_NAME=true
|
SEARCH_MULTIPLE_NAME=true
|
||||||
|
# 为指定字幕添加.default后缀设置为默认字幕,支持为'zh-cn','zh-tw','eng'添加默认字幕,未定义或设置为None则不添加
|
||||||
|
DEFAULT_SUB=None
|
||||||
|
|||||||
24
database/versions/279a949d81b6_2_1_1.py
Normal file
24
database/versions/279a949d81b6_2_1_1.py
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
"""2.1.1
|
||||||
|
|
||||||
|
Revision ID: 279a949d81b6
|
||||||
|
Revises: ca5461f314f2
|
||||||
|
Create Date: 2025-02-14 19:02:24.989349
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
from app.chain.torrents import TorrentsChain
|
||||||
|
|
||||||
|
# revision identifiers, used by Alembic.
|
||||||
|
revision = '279a949d81b6'
|
||||||
|
down_revision = 'ca5461f314f2'
|
||||||
|
branch_labels = None
|
||||||
|
depends_on = None
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade() -> None:
|
||||||
|
# 清理一次缓存
|
||||||
|
TorrentsChain().clear_torrents()
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade() -> None:
|
||||||
|
pass
|
||||||
@@ -7,9 +7,8 @@ Create Date: 2024-12-24 13:29:32.225532
|
|||||||
"""
|
"""
|
||||||
import contextlib
|
import contextlib
|
||||||
|
|
||||||
from alembic import op
|
|
||||||
import sqlalchemy as sa
|
import sqlalchemy as sa
|
||||||
from sqlalchemy.dialects import sqlite
|
from alembic import op
|
||||||
|
|
||||||
# revision identifiers, used by Alembic.
|
# revision identifiers, used by Alembic.
|
||||||
revision = '55390f1f77c1'
|
revision = '55390f1f77c1'
|
||||||
|
|||||||
29
database/versions/610bb05ddeef_2_1_2.py
Normal file
29
database/versions/610bb05ddeef_2_1_2.py
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
"""2.1.2
|
||||||
|
|
||||||
|
Revision ID: 610bb05ddeef
|
||||||
|
Revises: 279a949d81b6
|
||||||
|
Create Date: 2025-02-24 07:52:00.042837
|
||||||
|
|
||||||
|
"""
|
||||||
|
import contextlib
|
||||||
|
|
||||||
|
from alembic import op
|
||||||
|
import sqlalchemy as sa
|
||||||
|
from sqlalchemy.dialects import sqlite
|
||||||
|
|
||||||
|
# revision identifiers, used by Alembic.
|
||||||
|
revision = '610bb05ddeef'
|
||||||
|
down_revision = '279a949d81b6'
|
||||||
|
branch_labels = None
|
||||||
|
depends_on = None
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade() -> None:
|
||||||
|
# ### commands auto generated by Alembic - please adjust! ###
|
||||||
|
with contextlib.suppress(Exception):
|
||||||
|
op.add_column('workflow', sa.Column('flows', sa.JSON(), nullable=True))
|
||||||
|
# ### end Alembic commands ###
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade() -> None:
|
||||||
|
pass
|
||||||
@@ -7,9 +7,8 @@ Create Date: 2025-02-06 18:28:00.644571
|
|||||||
"""
|
"""
|
||||||
import contextlib
|
import contextlib
|
||||||
|
|
||||||
from alembic import op
|
|
||||||
import sqlalchemy as sa
|
import sqlalchemy as sa
|
||||||
from sqlalchemy.dialects import sqlite
|
from alembic import op
|
||||||
|
|
||||||
# revision identifiers, used by Alembic.
|
# revision identifiers, used by Alembic.
|
||||||
revision = 'ca5461f314f2'
|
revision = 'ca5461f314f2'
|
||||||
|
|||||||
@@ -32,6 +32,7 @@ func_timeout==4.3.5
|
|||||||
bs4~=0.0.1
|
bs4~=0.0.1
|
||||||
beautifulsoup4~=4.12.2
|
beautifulsoup4~=4.12.2
|
||||||
pillow~=10.4.0
|
pillow~=10.4.0
|
||||||
|
pillow-avif-plugin~=1.4.6
|
||||||
pyTelegramBotAPI~=4.12.0
|
pyTelegramBotAPI~=4.12.0
|
||||||
playwright~=1.37.0
|
playwright~=1.37.0
|
||||||
cf-clearance~=0.31.0
|
cf-clearance~=0.31.0
|
||||||
|
|||||||
28
setup.py
28
setup.py
@@ -1,20 +1,22 @@
|
|||||||
|
|
||||||
from distutils.core import setup
|
from distutils.core import setup
|
||||||
|
|
||||||
from Cython.Build import cythonize
|
from Cython.Build import cythonize
|
||||||
|
|
||||||
|
|
||||||
module_list = ['app/helper/sites.py']
|
module_list = ['app/helper/sites.py']
|
||||||
|
|
||||||
setup(
|
setup(
|
||||||
name="",
|
name="MoviePilot",
|
||||||
author="",
|
author="jxxghp",
|
||||||
zip_safe=False,
|
zip_safe=False,
|
||||||
include_package_data=True,
|
include_package_data=True,
|
||||||
ext_modules=cythonize(
|
ext_modules=cythonize(
|
||||||
module_list=module_list,
|
module_list=module_list,
|
||||||
nthreads=0,
|
nthreads=0,
|
||||||
compiler_directives={"language_level": "3"},
|
compiler_directives={
|
||||||
),
|
"language_level": "3",
|
||||||
script_args=["build_ext", "-j", '2', "--inplace"],
|
"binding": False,
|
||||||
)
|
"nonecheck": False
|
||||||
|
},
|
||||||
|
),
|
||||||
|
script_args=["build_ext", "-j", '2', "--inplace"],
|
||||||
|
)
|
||||||
|
|||||||
@@ -1,2 +1,2 @@
|
|||||||
APP_VERSION = 'v2.2.7'
|
APP_VERSION = 'v2.3.2'
|
||||||
FRONTEND_VERSION = 'v2.2.7'
|
FRONTEND_VERSION = 'v2.3.2'
|
||||||
|
|||||||
Reference in New Issue
Block a user