mirror of
https://github.com/jxxghp/MoviePilot.git
synced 2026-05-07 16:53:03 +08:00
Compare commits
117 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e662338d6f | ||
|
|
2c1d6817dd | ||
|
|
5d4a3fec1f | ||
|
|
6603a30e7e | ||
|
|
81d08ca517 | ||
|
|
e04506a614 | ||
|
|
39756512ae | ||
|
|
71c29ea5e7 | ||
|
|
87ce266b14 | ||
|
|
ed6d856c24 | ||
|
|
d3ecbef946 | ||
|
|
7b24f5eb21 | ||
|
|
e1f82e338a | ||
|
|
a835d34a01 | ||
|
|
79d70c9977 | ||
|
|
aea82723cb | ||
|
|
d47ff0b31a | ||
|
|
affcb9d5c3 | ||
|
|
9be2686733 | ||
|
|
7126fed2b5 | ||
|
|
5bc4330e1c | ||
|
|
b25ac7116e | ||
|
|
8896867bb3 | ||
|
|
ba7c9eec7b | ||
|
|
9b95fde8d1 | ||
|
|
2851f16395 | ||
|
|
0d63dfb931 | ||
|
|
37558e3135 | ||
|
|
96021e42a2 | ||
|
|
c32b845515 | ||
|
|
147d980c54 | ||
|
|
f91c43dde9 | ||
|
|
4cf5cb06a0 | ||
|
|
8e4b4c3144 | ||
|
|
c302013696 | ||
|
|
37cb94c59d | ||
|
|
01f7c6bc2b | ||
|
|
8bd6ccb0de | ||
|
|
ed8895dfbb | ||
|
|
a55632051b | ||
|
|
7e347a458d | ||
|
|
cce71f23e2 | ||
|
|
d68461a127 | ||
|
|
1bd12a9411 | ||
|
|
4086ba4763 | ||
|
|
6a9cdf71d7 | ||
|
|
a9644c4f86 | ||
|
|
cf62ad5e8e | ||
|
|
f8ed16666c | ||
|
|
37926b4c19 | ||
|
|
b080a2003f | ||
|
|
ab0008be86 | ||
|
|
4a42b0d000 | ||
|
|
e3d4b19dac | ||
|
|
403d600db4 | ||
|
|
835e6e8891 | ||
|
|
eec25113b5 | ||
|
|
a7c4161f91 | ||
|
|
799eb9e6ef | ||
|
|
88993cb67b | ||
|
|
0dc9c98c06 | ||
|
|
c1c91cec44 | ||
|
|
19b6927320 | ||
|
|
0889ebc8b8 | ||
|
|
fb249c0ea5 | ||
|
|
feb22ff0a7 | ||
|
|
3c95156ce1 | ||
|
|
8b6dca6a46 | ||
|
|
43907eea26 | ||
|
|
67145a80d0 | ||
|
|
0b3138fec6 | ||
|
|
b84896b4f9 | ||
|
|
efd046d2f8 | ||
|
|
06fcf817bb | ||
|
|
16a94d9054 | ||
|
|
5bf502188d | ||
|
|
5269b4bc82 | ||
|
|
e3f8ed9886 | ||
|
|
74de554fb0 | ||
|
|
b41de1a982 | ||
|
|
25f7d9ccdd | ||
|
|
9646745181 | ||
|
|
1317d9c4f0 | ||
|
|
351029a842 | ||
|
|
15e1fb61ac | ||
|
|
1889a829b5 | ||
|
|
53a14fce38 | ||
|
|
d9ed7b09c7 | ||
|
|
4dcb18f00e | ||
|
|
0a52fe0a7a | ||
|
|
e5a4d11cf9 | ||
|
|
6c233f13de | ||
|
|
00aee3496c | ||
|
|
77ae40e3d6 | ||
|
|
68cba44476 | ||
|
|
b86d06f632 | ||
|
|
0b7cf305a0 | ||
|
|
21ae36bc3a | ||
|
|
4e2d9e9165 | ||
|
|
6cee308894 | ||
|
|
b8f4cd5fea | ||
|
|
aa1557ad9e | ||
|
|
f03da6daca | ||
|
|
30eb4385d4 | ||
|
|
4c9afcc1a8 | ||
|
|
dd47432a45 | ||
|
|
0ba6974bd6 | ||
|
|
827d8f6d84 | ||
|
|
943a462c69 | ||
|
|
a1bc773fb5 | ||
|
|
ac169b7d22 | ||
|
|
eecbbfea3a | ||
|
|
635ddb044e | ||
|
|
1a6123489d | ||
|
|
4e69195a8d | ||
|
|
e48c8ee652 | ||
|
|
7df07b86b9 |
106
app/actions/__init__.py
Normal file
106
app/actions/__init__.py
Normal file
@@ -0,0 +1,106 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Union
|
||||
|
||||
from app.chain import ChainBase
|
||||
from app.db.systemconfig_oper import SystemConfigOper
|
||||
from app.schemas import ActionContext, ActionParams
|
||||
|
||||
|
||||
class ActionChain(ChainBase):
|
||||
pass
|
||||
|
||||
|
||||
class BaseAction(ABC):
|
||||
"""
|
||||
工作流动作基类
|
||||
"""
|
||||
|
||||
# 动作ID
|
||||
_action_id = None
|
||||
# 完成标志
|
||||
_done_flag = False
|
||||
# 执行信息
|
||||
_message = ""
|
||||
# 缓存键值
|
||||
_cache_key = "WorkflowCache-%s"
|
||||
|
||||
def __init__(self, action_id: str):
|
||||
self._action_id = action_id
|
||||
self.systemconfigoper = SystemConfigOper()
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
@abstractmethod
|
||||
def name(cls) -> str: # noqa
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
@abstractmethod
|
||||
def description(cls) -> str: # noqa
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
@abstractmethod
|
||||
def data(cls) -> dict: # noqa
|
||||
pass
|
||||
|
||||
@property
|
||||
def done(self) -> bool:
|
||||
"""
|
||||
判断动作是否完成
|
||||
"""
|
||||
return self._done_flag
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def success(self) -> bool:
|
||||
"""
|
||||
判断动作是否成功
|
||||
"""
|
||||
pass
|
||||
|
||||
@property
|
||||
def message(self) -> str:
|
||||
"""
|
||||
执行信息
|
||||
"""
|
||||
return self._message
|
||||
|
||||
def job_done(self, message: str = None):
|
||||
"""
|
||||
标记动作完成
|
||||
"""
|
||||
self._message = message
|
||||
self._done_flag = True
|
||||
|
||||
def check_cache(self, workflow_id: int, key: str) -> bool:
|
||||
"""
|
||||
检查是否处理过
|
||||
"""
|
||||
workflow_key = self._cache_key % workflow_id
|
||||
workflow_cache = self.systemconfigoper.get(workflow_key) or {}
|
||||
action_cache = workflow_cache.get(self._action_id) or []
|
||||
return key in action_cache
|
||||
|
||||
def save_cache(self, workflow_id: int, data: Union[list, str]):
|
||||
"""
|
||||
保存缓存
|
||||
"""
|
||||
workflow_key = self._cache_key % workflow_id
|
||||
workflow_cache = self.systemconfigoper.get(workflow_key) or {}
|
||||
action_cache = workflow_cache.get(self._action_id) or []
|
||||
if isinstance(data, list):
|
||||
action_cache.extend(data)
|
||||
else:
|
||||
action_cache.append(data)
|
||||
workflow_cache[self._action_id] = action_cache
|
||||
self.systemconfigoper.set(workflow_key, workflow_cache)
|
||||
|
||||
@abstractmethod
|
||||
def execute(self, workflow_id: int, params: ActionParams, context: ActionContext) -> ActionContext:
|
||||
"""
|
||||
执行动作
|
||||
"""
|
||||
raise NotImplementedError
|
||||
121
app/actions/add_download.py
Normal file
121
app/actions/add_download.py
Normal file
@@ -0,0 +1,121 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from app.actions import BaseAction
|
||||
from app.chain.download import DownloadChain
|
||||
from app.chain.media import MediaChain
|
||||
from app.core.config import global_vars
|
||||
from app.core.metainfo import MetaInfo
|
||||
from app.log import logger
|
||||
from app.schemas import ActionParams, ActionContext, DownloadTask, MediaType
|
||||
|
||||
|
||||
class AddDownloadParams(ActionParams):
|
||||
"""
|
||||
添加下载资源参数
|
||||
"""
|
||||
downloader: Optional[str] = Field(default=None, description="下载器")
|
||||
save_path: Optional[str] = Field(default=None, description="保存路径")
|
||||
labels: Optional[str] = Field(default=None, description="标签(,分隔)")
|
||||
only_lack: Optional[bool] = Field(default=False, description="仅下载缺失的资源")
|
||||
|
||||
|
||||
class AddDownloadAction(BaseAction):
|
||||
"""
|
||||
添加下载资源
|
||||
"""
|
||||
|
||||
# 已添加的下载
|
||||
_added_downloads = []
|
||||
_has_error = False
|
||||
|
||||
def __init__(self, action_id: str):
|
||||
super().__init__(action_id)
|
||||
self.downloadchain = DownloadChain()
|
||||
self.mediachain = MediaChain()
|
||||
self._added_downloads = []
|
||||
self._has_error = False
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def name(cls) -> str: # noqa
|
||||
return "添加下载"
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def description(cls) -> str: # noqa
|
||||
return "根据资源列表添加下载任务"
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def data(cls) -> dict: # noqa
|
||||
return AddDownloadParams().dict()
|
||||
|
||||
@property
|
||||
def success(self) -> bool:
|
||||
return not self._has_error
|
||||
|
||||
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
|
||||
"""
|
||||
将上下文中的torrents添加到下载任务中
|
||||
"""
|
||||
params = AddDownloadParams(**params)
|
||||
_started = False
|
||||
for t in context.torrents:
|
||||
if global_vars.is_workflow_stopped(workflow_id):
|
||||
break
|
||||
# 检查缓存
|
||||
cache_key = f"{t.torrent_info.site}-{t.torrent_info.title}"
|
||||
if self.check_cache(workflow_id, cache_key):
|
||||
logger.info(f"{t.torrent_info.title} 已添加过下载,跳过")
|
||||
continue
|
||||
if not t.meta_info:
|
||||
t.meta_info = MetaInfo(title=t.torrent_info.title, subtitle=t.torrent_info.description)
|
||||
if not t.media_info:
|
||||
t.media_info = self.mediachain.recognize_media(meta=t.meta_info)
|
||||
if not t.media_info:
|
||||
self._has_error = True
|
||||
logger.warning(f"{t.torrent_info.title} 未识别到媒体信息,无法下载")
|
||||
continue
|
||||
if params.only_lack:
|
||||
exists_info = self.downloadchain.media_exists(t.media_info)
|
||||
if exists_info:
|
||||
if t.media_info.type == MediaType.MOVIE:
|
||||
# 电影
|
||||
logger.warning(f"{t.torrent_info.title} 媒体库中已存在,跳过")
|
||||
continue
|
||||
else:
|
||||
# 电视剧
|
||||
exists_seasons = exists_info.seasons or {}
|
||||
if len(t.meta_info.season_list) > 1:
|
||||
# 多季不下载
|
||||
logger.warning(f"{t.meta_info.title} 有多季,跳过")
|
||||
continue
|
||||
else:
|
||||
exists_episodes = exists_seasons.get(t.meta_info.begin_season)
|
||||
if exists_episodes:
|
||||
if set(t.meta_info.episode_list).issubset(exists_episodes):
|
||||
logger.warning(f"{t.meta_info.title} 第 {t.meta_info.begin_season} 季第 {t.meta_info.episode_list} 集已存在,跳过")
|
||||
continue
|
||||
|
||||
_started = True
|
||||
did = self.downloadchain.download_single(context=t,
|
||||
downloader=params.downloader,
|
||||
save_path=params.save_path,
|
||||
label=params.labels)
|
||||
if did:
|
||||
self._added_downloads.append(did)
|
||||
# 保存缓存
|
||||
self.save_cache(workflow_id, cache_key)
|
||||
|
||||
if self._added_downloads:
|
||||
logger.info(f"已添加 {len(self._added_downloads)} 个下载任务")
|
||||
context.downloads.extend(
|
||||
[DownloadTask(download_id=did, downloader=params.downloader) for did in self._added_downloads]
|
||||
)
|
||||
elif _started:
|
||||
self._has_error = True
|
||||
|
||||
self.job_done(f"已添加 {len(self._added_downloads)} 个下载任务")
|
||||
return context
|
||||
92
app/actions/add_subscribe.py
Normal file
92
app/actions/add_subscribe.py
Normal file
@@ -0,0 +1,92 @@
|
||||
from app.actions import BaseAction
|
||||
from app.chain.subscribe import SubscribeChain
|
||||
from app.core.config import settings, global_vars
|
||||
from app.core.context import MediaInfo
|
||||
from app.db.subscribe_oper import SubscribeOper
|
||||
from app.log import logger
|
||||
from app.schemas import ActionParams, ActionContext
|
||||
|
||||
|
||||
class AddSubscribeParams(ActionParams):
|
||||
"""
|
||||
添加订阅参数
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class AddSubscribeAction(BaseAction):
|
||||
"""
|
||||
添加订阅
|
||||
"""
|
||||
|
||||
_added_subscribes = []
|
||||
_has_error = False
|
||||
|
||||
def __init__(self, action_id: str):
|
||||
super().__init__(action_id)
|
||||
self.subscribechain = SubscribeChain()
|
||||
self.subscribeoper = SubscribeOper()
|
||||
self._added_subscribes = []
|
||||
self._has_error = False
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def name(cls) -> str: # noqa
|
||||
return "添加订阅"
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def description(cls) -> str: # noqa
|
||||
return "根据媒体列表添加订阅"
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def data(cls) -> dict: # noqa
|
||||
return AddSubscribeParams().dict()
|
||||
|
||||
@property
|
||||
def success(self) -> bool:
|
||||
return not self._has_error
|
||||
|
||||
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
|
||||
"""
|
||||
将medias中的信息添加订阅,如果订阅不存在的话
|
||||
"""
|
||||
_started = False
|
||||
for media in context.medias:
|
||||
if global_vars.is_workflow_stopped(workflow_id):
|
||||
break
|
||||
# 检查缓存
|
||||
cache_key = f"{media.type}-{media.title}-{media.year}-{media.season}"
|
||||
if self.check_cache(workflow_id, cache_key):
|
||||
logger.info(f"{media.title} {media.year} 已添加过订阅,跳过")
|
||||
continue
|
||||
mediainfo = MediaInfo()
|
||||
mediainfo.from_dict(media.dict())
|
||||
if self.subscribechain.exists(mediainfo):
|
||||
logger.info(f"{media.title} 已存在订阅")
|
||||
continue
|
||||
# 添加订阅
|
||||
_started = True
|
||||
sid, message = self.subscribechain.add(mtype=mediainfo.type,
|
||||
title=mediainfo.title,
|
||||
year=mediainfo.year,
|
||||
tmdbid=mediainfo.tmdb_id,
|
||||
season=mediainfo.season,
|
||||
doubanid=mediainfo.douban_id,
|
||||
bangumiid=mediainfo.bangumi_id,
|
||||
username=settings.SUPERUSER)
|
||||
if sid:
|
||||
self._added_subscribes.append(sid)
|
||||
# 保存缓存
|
||||
self.save_cache(workflow_id, cache_key)
|
||||
|
||||
if self._added_subscribes:
|
||||
logger.info(f"已添加 {len(self._added_subscribes)} 个订阅")
|
||||
for sid in self._added_subscribes:
|
||||
context.subscribes.append(self.subscribeoper.get(sid))
|
||||
elif _started:
|
||||
self._has_error = True
|
||||
|
||||
self.job_done(f"已添加 {len(self._added_subscribes)} 个订阅")
|
||||
return context
|
||||
68
app/actions/fetch_downloads.py
Normal file
68
app/actions/fetch_downloads.py
Normal file
@@ -0,0 +1,68 @@
|
||||
from app.actions import BaseAction, ActionChain
|
||||
from app.core.config import global_vars
|
||||
from app.schemas import ActionParams, ActionContext
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class FetchDownloadsParams(ActionParams):
|
||||
"""
|
||||
获取下载任务参数
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class FetchDownloadsAction(BaseAction):
|
||||
"""
|
||||
获取下载任务
|
||||
"""
|
||||
|
||||
_downloads = []
|
||||
|
||||
def __init__(self, action_id: str):
|
||||
super().__init__(action_id)
|
||||
self.chain = ActionChain()
|
||||
self._downloads = []
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def name(cls) -> str: # noqa
|
||||
return "获取下载任务"
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def description(cls) -> str: # noqa
|
||||
return "获取下载队列中的任务状态"
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def data(cls) -> dict: # noqa
|
||||
return FetchDownloadsParams().dict()
|
||||
|
||||
@property
|
||||
def success(self) -> bool:
|
||||
return self.done
|
||||
|
||||
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
|
||||
"""
|
||||
更新downloads中的下载任务状态
|
||||
"""
|
||||
__all_complete = False
|
||||
for download in self._downloads:
|
||||
if global_vars.is_workflow_stopped(workflow_id):
|
||||
break
|
||||
logger.info(f"获取下载任务 {download.download_id} 状态 ...")
|
||||
torrents = self.chain.list_torrents(hashs=[download.download_id])
|
||||
if not torrents:
|
||||
download.completed = True
|
||||
continue
|
||||
for t in torrents:
|
||||
download.path = t.path
|
||||
if t.progress >= 100:
|
||||
logger.info(f"下载任务 {download.download_id} 已完成")
|
||||
download.completed = True
|
||||
else:
|
||||
logger.info(f"下载任务 {download.download_id} 未完成")
|
||||
download.completed = False
|
||||
if all([d.completed for d in self._downloads]):
|
||||
self.job_done()
|
||||
return context
|
||||
176
app/actions/fetch_medias.py
Normal file
176
app/actions/fetch_medias.py
Normal file
@@ -0,0 +1,176 @@
|
||||
from typing import List, Optional
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from app.actions import BaseAction
|
||||
from app.chain.recommend import RecommendChain
|
||||
from app.schemas import ActionParams, ActionContext
|
||||
from app.core.config import settings, global_vars
|
||||
from app.core.event import eventmanager
|
||||
from app.log import logger
|
||||
from app.schemas import RecommendSourceEventData, MediaInfo
|
||||
from app.schemas.types import ChainEventType
|
||||
from app.utils.http import RequestUtils
|
||||
|
||||
|
||||
class FetchMediasParams(ActionParams):
|
||||
"""
|
||||
获取媒体数据参数
|
||||
"""
|
||||
source_type: Optional[str] = Field(default="ranking", description="来源")
|
||||
sources: Optional[List[str]] = Field(default=[], description="榜单")
|
||||
api_path: Optional[str] = Field(default=None, description="API路径")
|
||||
|
||||
|
||||
class FetchMediasAction(BaseAction):
|
||||
"""
|
||||
获取媒体数据
|
||||
"""
|
||||
|
||||
_inner_sources = []
|
||||
_medias = []
|
||||
_has_error = False
|
||||
|
||||
def __init__(self, action_id: str):
|
||||
super().__init__(action_id)
|
||||
|
||||
self._medias = []
|
||||
self._has_error = False
|
||||
self.__inner_sources = [
|
||||
{
|
||||
"func": RecommendChain().tmdb_trending,
|
||||
"name": '流行趋势',
|
||||
},
|
||||
{
|
||||
"func": RecommendChain().douban_movie_showing,
|
||||
"name": '正在热映',
|
||||
},
|
||||
{
|
||||
"func": RecommendChain().bangumi_calendar,
|
||||
"name": 'Bangumi每日放送',
|
||||
},
|
||||
{
|
||||
"func": RecommendChain().tmdb_movies,
|
||||
"name": 'TMDB热门电影',
|
||||
},
|
||||
{
|
||||
"func": RecommendChain().tmdb_tvs,
|
||||
"name": 'TMDB热门电视剧',
|
||||
},
|
||||
{
|
||||
"func": RecommendChain().douban_movie_hot,
|
||||
"name": '豆瓣热门电影',
|
||||
},
|
||||
{
|
||||
"func": RecommendChain().douban_tv_hot,
|
||||
"name": '豆瓣热门电视剧',
|
||||
},
|
||||
{
|
||||
"func": RecommendChain().douban_tv_animation,
|
||||
"name": '豆瓣热门动漫',
|
||||
},
|
||||
{
|
||||
"func": RecommendChain().douban_movies,
|
||||
"name": '豆瓣最新电影',
|
||||
},
|
||||
{
|
||||
"func": RecommendChain().douban_tvs,
|
||||
"name": '豆瓣最新电视剧',
|
||||
},
|
||||
{
|
||||
"func": RecommendChain().douban_movie_top250,
|
||||
"name": '豆瓣电影TOP250',
|
||||
},
|
||||
{
|
||||
"func": RecommendChain().douban_tv_weekly_chinese,
|
||||
"name": '豆瓣国产剧集榜',
|
||||
},
|
||||
{
|
||||
"func": RecommendChain().douban_tv_weekly_global,
|
||||
"name": '豆瓣全球剧集榜',
|
||||
}
|
||||
]
|
||||
|
||||
# 广播事件,请示额外的推荐数据源支持
|
||||
event_data = RecommendSourceEventData()
|
||||
event = eventmanager.send_event(ChainEventType.RecommendSource, event_data)
|
||||
# 使用事件返回的上下文数据
|
||||
if event and event.event_data:
|
||||
event_data: RecommendSourceEventData = event.event_data
|
||||
if event_data.extra_sources:
|
||||
self.__inner_sources.extend([s.dict() for s in event_data.extra_sources])
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def name(cls) -> str: # noqa
|
||||
return "获取媒体数据"
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def description(cls) -> str: # noqa
|
||||
return "获取榜单等媒体数据列表"
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def data(cls) -> dict: # noqa
|
||||
return FetchMediasParams().dict()
|
||||
|
||||
@property
|
||||
def success(self) -> bool:
|
||||
return not self._has_error
|
||||
|
||||
def __get_source(self, source: str):
|
||||
"""
|
||||
获取数据源
|
||||
"""
|
||||
for s in self.__inner_sources:
|
||||
if s['name'] == source:
|
||||
return s
|
||||
return None
|
||||
|
||||
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
|
||||
"""
|
||||
获取媒体数据,填充到medias
|
||||
"""
|
||||
params = FetchMediasParams(**params)
|
||||
try:
|
||||
if params.source_type == "ranking":
|
||||
for name in params.sources:
|
||||
if global_vars.is_workflow_stopped(workflow_id):
|
||||
break
|
||||
source = self.__get_source(name)
|
||||
if not source:
|
||||
continue
|
||||
logger.info(f"获取媒体数据 {source} ...")
|
||||
results = []
|
||||
if source.get("func"):
|
||||
results = source['func']()
|
||||
else:
|
||||
# 调用内部API获取数据
|
||||
api_url = f"http://127.0.0.1:{settings.PORT}/api/v1/{source['api_path']}?token={settings.API_TOKEN}"
|
||||
res = RequestUtils(timeout=15).post_res(api_url)
|
||||
if res:
|
||||
results = res.json()
|
||||
if results:
|
||||
logger.info(f"{name} 获取到 {len(results)} 条数据")
|
||||
self._medias.extend([MediaInfo(**r) for r in results])
|
||||
else:
|
||||
logger.error(f"{name} 获取数据失败")
|
||||
else:
|
||||
# 调用内部API获取数据
|
||||
api_url = f"http://127.0.0.1:{settings.PORT}{params.api_path}?token={settings.API_TOKEN}"
|
||||
res = RequestUtils(timeout=15).post_res(api_url)
|
||||
if res:
|
||||
results = res.json()
|
||||
if results:
|
||||
logger.info(f"{params.api_path} 获取到 {len(results)} 条数据")
|
||||
self._medias.extend([MediaInfo(**r) for r in results])
|
||||
except Exception as e:
|
||||
logger.error(f"获取媒体数据失败: {e}")
|
||||
self._has_error = True
|
||||
|
||||
if self._medias:
|
||||
context.medias.extend(self._medias)
|
||||
|
||||
self.job_done(f"获取到 {len(self._medias)} 条媒数据")
|
||||
return context
|
||||
117
app/actions/fetch_rss.py
Normal file
117
app/actions/fetch_rss.py
Normal file
@@ -0,0 +1,117 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from app.actions import BaseAction, ActionChain
|
||||
from app.core.config import settings, global_vars
|
||||
from app.core.context import Context
|
||||
from app.core.metainfo import MetaInfo
|
||||
from app.helper.rss import RssHelper
|
||||
from app.log import logger
|
||||
from app.schemas import ActionParams, ActionContext, TorrentInfo
|
||||
|
||||
|
||||
class FetchRssParams(ActionParams):
|
||||
"""
|
||||
获取RSS资源列表参数
|
||||
"""
|
||||
url: str = Field(default=None, description="RSS地址")
|
||||
proxy: Optional[bool] = Field(default=False, description="是否使用代理")
|
||||
timeout: Optional[int] = Field(default=15, description="超时时间")
|
||||
content_type: Optional[str] = Field(default=None, description="Content-Type")
|
||||
referer: Optional[str] = Field(default=None, description="Referer")
|
||||
ua: Optional[str] = Field(default=None, description="User-Agent")
|
||||
match_media: Optional[str] = Field(default=None, description="匹配媒体信息")
|
||||
|
||||
|
||||
class FetchRssAction(BaseAction):
|
||||
"""
|
||||
获取RSS资源列表
|
||||
"""
|
||||
|
||||
_rss_torrents = []
|
||||
_has_error = False
|
||||
|
||||
def __init__(self, action_id: str):
|
||||
super().__init__(action_id)
|
||||
self.rsshelper = RssHelper()
|
||||
self.chain = ActionChain()
|
||||
self._rss_torrents = []
|
||||
self._has_error = False
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def name(cls) -> str: # noqa
|
||||
return "获取RSS资源"
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def description(cls) -> str: # noqa
|
||||
return "订阅RSS地址获取资源"
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def data(cls) -> dict: # noqa
|
||||
return FetchRssParams().dict()
|
||||
|
||||
@property
|
||||
def success(self) -> bool:
|
||||
return not self._has_error
|
||||
|
||||
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
|
||||
"""
|
||||
请求RSS地址获取数据,并解析为资源列表
|
||||
"""
|
||||
params = FetchRssParams(**params)
|
||||
if not params.url:
|
||||
return context
|
||||
|
||||
headers = {}
|
||||
if params.content_type:
|
||||
headers["Content-Type"] = params.content_type
|
||||
if params.referer:
|
||||
headers["Referer"] = params.referer
|
||||
if params.ua:
|
||||
headers["User-Agent"] = params.ua
|
||||
|
||||
rss_items = self.rsshelper.parse(url=params.url,
|
||||
proxy=settings.PROXY if params.proxy else None,
|
||||
timeout=params.timeout,
|
||||
headers=headers)
|
||||
if rss_items is None or rss_items is False:
|
||||
logger.error(f'RSS地址 {params.url} 请求失败!')
|
||||
self._has_error = True
|
||||
return context
|
||||
|
||||
if not rss_items:
|
||||
logger.error(f'RSS地址 {params.url} 未获取到RSS数据!')
|
||||
return context
|
||||
|
||||
# 组装种子
|
||||
for item in rss_items:
|
||||
if global_vars.is_workflow_stopped(workflow_id):
|
||||
break
|
||||
if not item.get("title"):
|
||||
continue
|
||||
torrentinfo = TorrentInfo(
|
||||
title=item.get("title"),
|
||||
enclosure=item.get("enclosure"),
|
||||
page_url=item.get("link"),
|
||||
size=item.get("size"),
|
||||
pubdate=item["pubdate"].strftime("%Y-%m-%d %H:%M:%S") if item.get("pubdate") else None,
|
||||
)
|
||||
meta = MetaInfo(title=torrentinfo.title, subtitle=torrentinfo.description)
|
||||
mediainfo = None
|
||||
if params.match_media:
|
||||
mediainfo = self.chain.recognize_media(meta)
|
||||
if not mediainfo:
|
||||
logger.warning(f"{torrentinfo.title} 未识别到媒体信息")
|
||||
continue
|
||||
self._rss_torrents.append(Context(meta_info=meta, media_info=mediainfo, torrent_info=torrentinfo))
|
||||
|
||||
if self._rss_torrents:
|
||||
logger.info(f"获取到 {len(self._rss_torrents)} 个RSS资源")
|
||||
context.torrents.extend(self._rss_torrents)
|
||||
|
||||
self.job_done(f"获取到 {len(self._rss_torrents)} 个资源")
|
||||
return context
|
||||
104
app/actions/fetch_torrents.py
Normal file
104
app/actions/fetch_torrents.py
Normal file
@@ -0,0 +1,104 @@
|
||||
import random
|
||||
import time
|
||||
from typing import Optional, List
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from app.actions import BaseAction
|
||||
from app.chain.search import SearchChain
|
||||
from app.core.config import global_vars
|
||||
from app.log import logger
|
||||
from app.schemas import ActionParams, ActionContext, MediaType
|
||||
|
||||
|
||||
class FetchTorrentsParams(ActionParams):
|
||||
"""
|
||||
获取站点资源参数
|
||||
"""
|
||||
search_type: Optional[str] = Field(default="keyword", description="搜索类型")
|
||||
name: Optional[str] = Field(default=None, description="资源名称")
|
||||
year: Optional[str] = Field(default=None, description="年份")
|
||||
type: Optional[str] = Field(default=None, description="资源类型 (电影/电视剧)")
|
||||
season: Optional[int] = Field(default=None, description="季度")
|
||||
sites: Optional[List[int]] = Field(default=[], description="站点列表")
|
||||
match_media: Optional[bool] = Field(default=False, description="匹配媒体信息")
|
||||
|
||||
|
||||
class FetchTorrentsAction(BaseAction):
|
||||
"""
|
||||
搜索站点资源
|
||||
"""
|
||||
|
||||
_torrents = []
|
||||
|
||||
def __init__(self, action_id: str):
|
||||
super().__init__(action_id)
|
||||
self.searchchain = SearchChain()
|
||||
self._torrents = []
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def name(cls) -> str: # noqa
|
||||
return "搜索站点资源"
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def description(cls) -> str: # noqa
|
||||
return "搜索站点种子资源列表"
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def data(cls) -> dict: # noqa
|
||||
return FetchTorrentsParams().dict()
|
||||
|
||||
@property
|
||||
def success(self) -> bool:
|
||||
return self.done
|
||||
|
||||
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
|
||||
"""
|
||||
搜索站点,获取资源列表
|
||||
"""
|
||||
params = FetchTorrentsParams(**params)
|
||||
if params.search_type == "keyword":
|
||||
# 按关键字搜索
|
||||
torrents = self.searchchain.search_by_title(title=params.name, sites=params.sites, cache_local=False)
|
||||
for torrent in torrents:
|
||||
if global_vars.is_workflow_stopped(workflow_id):
|
||||
break
|
||||
if params.year and torrent.meta_info.year != params.year:
|
||||
continue
|
||||
if params.type and torrent.media_info and torrent.media_info.type != MediaType(params.type):
|
||||
continue
|
||||
if params.season and torrent.meta_info.begin_season != params.season:
|
||||
continue
|
||||
# 识别媒体信息
|
||||
if params.match_media:
|
||||
torrent.media_info = self.searchchain.recognize_media(torrent.meta_info)
|
||||
if not torrent.media_info:
|
||||
logger.warning(f"{torrent.torrent_info.title} 未识别到媒体信息")
|
||||
continue
|
||||
self._torrents.append(torrent)
|
||||
else:
|
||||
# 搜索媒体列表
|
||||
for media in context.medias:
|
||||
if global_vars.is_workflow_stopped(workflow_id):
|
||||
break
|
||||
torrents = self.searchchain.search_by_id(tmdbid=media.tmdb_id,
|
||||
doubanid=media.douban_id,
|
||||
mtype=MediaType(media.type),
|
||||
sites=params.sites)
|
||||
for torrent in torrents:
|
||||
self._torrents.append(torrent)
|
||||
|
||||
# 随机休眠 5-30秒
|
||||
sleep_time = random.randint(5, 30)
|
||||
logger.info(f"随机休眠 {sleep_time} 秒 ...")
|
||||
time.sleep(sleep_time)
|
||||
|
||||
if self._torrents:
|
||||
context.torrents.extend(self._torrents)
|
||||
logger.info(f"共搜索到 {len(self._torrents)} 条资源")
|
||||
|
||||
self.job_done(f"搜索到 {len(self._torrents)} 个资源")
|
||||
return context
|
||||
71
app/actions/filter_medias.py
Normal file
71
app/actions/filter_medias.py
Normal file
@@ -0,0 +1,71 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from app.actions import BaseAction
|
||||
from app.core.config import global_vars
|
||||
from app.log import logger
|
||||
from app.schemas import ActionParams, ActionContext
|
||||
|
||||
|
||||
class FilterMediasParams(ActionParams):
|
||||
"""
|
||||
过滤媒体数据参数
|
||||
"""
|
||||
type: Optional[str] = Field(default=None, description="媒体类型 (电影/电视剧)")
|
||||
vote: Optional[int] = Field(default=0, description="评分")
|
||||
year: Optional[str] = Field(default=None, description="年份")
|
||||
|
||||
|
||||
class FilterMediasAction(BaseAction):
|
||||
"""
|
||||
过滤媒体数据
|
||||
"""
|
||||
|
||||
_medias = []
|
||||
|
||||
def __init__(self, action_id: str):
|
||||
super().__init__(action_id)
|
||||
self._medias = []
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def name(cls) -> str: # noqa
|
||||
return "过滤媒体数据"
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def description(cls) -> str: # noqa
|
||||
return "对媒体数据列表进行过滤"
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def data(cls) -> dict: # noqa
|
||||
return FilterMediasParams().dict()
|
||||
|
||||
@property
|
||||
def success(self) -> bool:
|
||||
return self.done
|
||||
|
||||
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
|
||||
"""
|
||||
过滤medias中媒体数据
|
||||
"""
|
||||
params = FilterMediasParams(**params)
|
||||
for media in context.medias:
|
||||
if global_vars.is_workflow_stopped(workflow_id):
|
||||
break
|
||||
if params.type and media.type != params.type:
|
||||
continue
|
||||
if params.vote and media.vote_average < params.vote:
|
||||
continue
|
||||
if params.year and media.year != params.year:
|
||||
continue
|
||||
self._medias.append(media)
|
||||
|
||||
logger.info(f"过滤后剩余 {len(self._medias)} 条媒体数据")
|
||||
|
||||
context.medias = self._medias
|
||||
|
||||
self.job_done(f"过滤后剩余 {len(self._medias)} 条媒体数据")
|
||||
return context
|
||||
88
app/actions/filter_torrents.py
Normal file
88
app/actions/filter_torrents.py
Normal file
@@ -0,0 +1,88 @@
|
||||
from typing import Optional, List
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from app.actions import BaseAction, ActionChain
|
||||
from app.core.config import global_vars
|
||||
from app.helper.torrent import TorrentHelper
|
||||
from app.log import logger
|
||||
from app.schemas import ActionParams, ActionContext
|
||||
|
||||
|
||||
class FilterTorrentsParams(ActionParams):
|
||||
"""
|
||||
过滤资源数据参数
|
||||
"""
|
||||
rule_groups: Optional[List[str]] = Field(default=[], description="规则组")
|
||||
quality: Optional[str] = Field(default=None, description="资源质量")
|
||||
resolution: Optional[str] = Field(default=None, description="资源分辨率")
|
||||
effect: Optional[str] = Field(default=None, description="特效")
|
||||
include: Optional[str] = Field(default=None, description="包含规则")
|
||||
exclude: Optional[str] = Field(default=None, description="排除规则")
|
||||
size: Optional[str] = Field(default=None, description="资源大小范围(MB)")
|
||||
|
||||
|
||||
class FilterTorrentsAction(BaseAction):
|
||||
"""
|
||||
过滤资源数据
|
||||
"""
|
||||
|
||||
_torrents = []
|
||||
|
||||
def __init__(self, action_id: str):
|
||||
super().__init__(action_id)
|
||||
self.torrenthelper = TorrentHelper()
|
||||
self.chain = ActionChain()
|
||||
self._torrents = []
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def name(cls) -> str: # noqa
|
||||
return "过滤资源"
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def description(cls) -> str: # noqa
|
||||
return "对资源列表数据进行过滤"
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def data(cls) -> dict: # noqa
|
||||
return FilterTorrentsParams().dict()
|
||||
|
||||
@property
|
||||
def success(self) -> bool:
|
||||
return self.done
|
||||
|
||||
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
|
||||
"""
|
||||
过滤torrents中的资源
|
||||
"""
|
||||
params = FilterTorrentsParams(**params)
|
||||
for torrent in context.torrents:
|
||||
if global_vars.is_workflow_stopped(workflow_id):
|
||||
break
|
||||
if self.torrenthelper.filter_torrent(
|
||||
torrent_info=torrent.torrent_info,
|
||||
filter_params={
|
||||
"quality": params.quality,
|
||||
"resolution": params.resolution,
|
||||
"effect": params.effect,
|
||||
"include": params.include,
|
||||
"exclude": params.exclude,
|
||||
"size": params.size
|
||||
}
|
||||
):
|
||||
if self.chain.filter_torrents(
|
||||
rule_groups=params.rule_groups,
|
||||
torrent_list=[torrent.torrent_info],
|
||||
mediainfo=torrent.media_info
|
||||
):
|
||||
self._torrents.append(torrent)
|
||||
|
||||
logger.info(f"过滤后剩余 {len(self._torrents)} 个资源")
|
||||
|
||||
context.torrents = self._torrents
|
||||
|
||||
self.job_done(f"过滤后剩余 {len(self._torrents)} 个资源")
|
||||
return context
|
||||
86
app/actions/scan_file.py
Normal file
86
app/actions/scan_file.py
Normal file
@@ -0,0 +1,86 @@
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from app.actions import BaseAction
|
||||
from app.chain.storage import StorageChain
|
||||
from app.core.config import global_vars, settings
|
||||
from app.log import logger
|
||||
from app.schemas import ActionParams, ActionContext
|
||||
|
||||
|
||||
class ScanFileParams(ActionParams):
|
||||
"""
|
||||
整理文件参数
|
||||
"""
|
||||
# 存储
|
||||
storage: Optional[str] = Field(default="local", description="存储")
|
||||
directory: Optional[str] = Field(default=None, description="目录")
|
||||
|
||||
|
||||
class ScanFileAction(BaseAction):
|
||||
"""
|
||||
整理文件
|
||||
"""
|
||||
|
||||
_fileitems = []
|
||||
_has_error = False
|
||||
|
||||
def __init__(self, action_id: str):
|
||||
super().__init__(action_id)
|
||||
self.storagechain = StorageChain()
|
||||
self._fileitems = []
|
||||
self._has_error = False
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def name(cls) -> str: # noqa
|
||||
return "扫描目录"
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def description(cls) -> str: # noqa
|
||||
return "扫描目录文件到队列"
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def data(cls) -> dict: # noqa
|
||||
return ScanFileParams().dict()
|
||||
|
||||
@property
|
||||
def success(self) -> bool:
|
||||
return not self._has_error
|
||||
|
||||
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
|
||||
"""
|
||||
扫描目录中的所有文件,记录到fileitems
|
||||
"""
|
||||
params = ScanFileParams(**params)
|
||||
if not params.storage or not params.directory:
|
||||
return context
|
||||
fileitem = self.storagechain.get_file_item(params.storage, Path(params.directory))
|
||||
if not fileitem:
|
||||
logger.error(f"目录不存在: 【{params.storage}】{params.directory}")
|
||||
self._has_error = True
|
||||
return context
|
||||
files = self.storagechain.list_files(fileitem, recursion=True)
|
||||
for file in files:
|
||||
if global_vars.is_workflow_stopped(workflow_id):
|
||||
break
|
||||
if not file.extension or f".{file.extension.lower()}" not in settings.RMT_MEDIAEXT:
|
||||
continue
|
||||
# 检查缓存
|
||||
cache_key = f"{file.path}"
|
||||
if self.check_cache(workflow_id, cache_key):
|
||||
logger.info(f"{file.path} 已处理过,跳过")
|
||||
continue
|
||||
self._fileitems.append(fileitem)
|
||||
# 保存缓存
|
||||
self.save_cache(workflow_id, cache_key)
|
||||
|
||||
if self._fileitems:
|
||||
context.fileitems.extend(self._fileitems)
|
||||
|
||||
self.job_done(f"扫描到 {len(self._fileitems)} 个文件")
|
||||
return context
|
||||
86
app/actions/scrape_file.py
Normal file
86
app/actions/scrape_file.py
Normal file
@@ -0,0 +1,86 @@
|
||||
from pathlib import Path
|
||||
|
||||
from app.actions import BaseAction
|
||||
from app.core.config import global_vars
|
||||
from app.schemas import ActionParams, ActionContext
|
||||
from app.chain.media import MediaChain
|
||||
from app.chain.storage import StorageChain
|
||||
from app.core.metainfo import MetaInfoPath
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class ScrapeFileParams(ActionParams):
|
||||
"""
|
||||
刮削文件参数
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class ScrapeFileAction(BaseAction):
|
||||
"""
|
||||
刮削文件
|
||||
"""
|
||||
|
||||
_scraped_files = []
|
||||
_has_error = False
|
||||
|
||||
def __init__(self, action_id: str):
|
||||
super().__init__(action_id)
|
||||
self.storagechain = StorageChain()
|
||||
self.mediachain = MediaChain()
|
||||
self._scraped_files = []
|
||||
self._has_error = False
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def name(cls) -> str: # noqa
|
||||
return "刮削文件"
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def description(cls) -> str: # noqa
|
||||
return "刮削媒体信息和图片"
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def data(cls) -> dict: # noqa
|
||||
return ScrapeFileParams().dict()
|
||||
|
||||
@property
|
||||
def success(self) -> bool:
|
||||
return not self._has_error
|
||||
|
||||
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
|
||||
"""
|
||||
刮削fileitems中的所有文件
|
||||
"""
|
||||
# 失败次数
|
||||
_failed_count = 0
|
||||
for fileitem in context.fileitems:
|
||||
if global_vars.is_workflow_stopped(workflow_id):
|
||||
break
|
||||
if fileitem in self._scraped_files:
|
||||
continue
|
||||
if not self.storagechain.exists(fileitem):
|
||||
continue
|
||||
# 检查缓存
|
||||
cache_key = f"{fileitem.path}"
|
||||
if self.check_cache(workflow_id, cache_key):
|
||||
logger.info(f"{fileitem.path} 已刮削过,跳过")
|
||||
continue
|
||||
meta = MetaInfoPath(Path(fileitem.path))
|
||||
mediainfo = self.mediachain.recognize_media(meta)
|
||||
if not mediainfo:
|
||||
_failed_count += 1
|
||||
logger.info(f"{fileitem.path} 未识别到媒体信息,无法刮削")
|
||||
continue
|
||||
self.mediachain.scrape_metadata(fileitem=fileitem, meta=meta, mediainfo=mediainfo)
|
||||
self._scraped_files.append(fileitem)
|
||||
# 保存缓存
|
||||
self.save_cache(workflow_id, cache_key)
|
||||
|
||||
if not self._scraped_files and _failed_count:
|
||||
self._has_error = True
|
||||
|
||||
self.job_done(f"成功刮削 {len(self._scraped_files)} 个文件,失败 {_failed_count} 个")
|
||||
return context
|
||||
48
app/actions/send_event.py
Normal file
48
app/actions/send_event.py
Normal file
@@ -0,0 +1,48 @@
|
||||
from app.actions import BaseAction
|
||||
from app.core.event import eventmanager
|
||||
from app.schemas import ActionParams, ActionContext
|
||||
from app.schemas.types import ChainEventType
|
||||
|
||||
|
||||
class SendEventParams(ActionParams):
|
||||
"""
|
||||
发送事件参数
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class SendEventAction(BaseAction):
|
||||
"""
|
||||
发送事件
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def name(cls) -> str: # noqa
|
||||
return "发送事件"
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def description(cls) -> str: # noqa
|
||||
return "发送任务执行事件"
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def data(cls) -> dict: # noqa
|
||||
return SendEventParams().dict()
|
||||
|
||||
@property
|
||||
def success(self) -> bool:
|
||||
return self.done
|
||||
|
||||
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
|
||||
"""
|
||||
发送工作流事件,以更插件干预工作流执行
|
||||
"""
|
||||
# 触发资源下载事件,更新执行上下文
|
||||
event = eventmanager.send_event(ChainEventType.WorkflowExecution, context)
|
||||
if event and event.event_data:
|
||||
context = event.event_data
|
||||
|
||||
self.job_done()
|
||||
return context
|
||||
74
app/actions/send_message.py
Normal file
74
app/actions/send_message.py
Normal file
@@ -0,0 +1,74 @@
|
||||
from typing import List, Optional, Union
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from app.actions import BaseAction, ActionChain
|
||||
from app.schemas import ActionParams, ActionContext, Notification
|
||||
from core.config import settings
|
||||
|
||||
|
||||
class SendMessageParams(ActionParams):
|
||||
"""
|
||||
发送消息参数
|
||||
"""
|
||||
client: Optional[List[str]] = Field(default=[], description="消息渠道")
|
||||
userid: Optional[Union[str, int]] = Field(default=None, description="用户ID")
|
||||
|
||||
|
||||
class SendMessageAction(BaseAction):
|
||||
"""
|
||||
发送消息
|
||||
"""
|
||||
|
||||
def __init__(self, action_id: str):
|
||||
super().__init__(action_id)
|
||||
self.chain = ActionChain()
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def name(cls) -> str: # noqa
|
||||
return "发送消息"
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def description(cls) -> str: # noqa
|
||||
return "发送任务执行消息"
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def data(cls) -> dict: # noqa
|
||||
return SendMessageParams().dict()
|
||||
|
||||
@property
|
||||
def success(self) -> bool:
|
||||
return self.done
|
||||
|
||||
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
|
||||
"""
|
||||
发送messages中的消息
|
||||
"""
|
||||
params = SendMessageParams(**params)
|
||||
msg_text = f"当前进度:{context.progress}%"
|
||||
index = 1
|
||||
if context.execute_history:
|
||||
for history in context.execute_history:
|
||||
if not history.message:
|
||||
continue
|
||||
msg_text += f"\n{index}. {history.action}:{history.message}"
|
||||
index += 1
|
||||
# 发送消息
|
||||
if not params.client:
|
||||
params.client = [""]
|
||||
for client in params.client:
|
||||
self.chain.post_message(
|
||||
Notification(
|
||||
source=client,
|
||||
userid=params.userid,
|
||||
title="【工作流执行结果】",
|
||||
text=msg_text,
|
||||
link=settings.MP_DOMAIN("#/workflow")
|
||||
)
|
||||
)
|
||||
|
||||
self.job_done()
|
||||
return context
|
||||
139
app/actions/transfer_file.py
Normal file
139
app/actions/transfer_file.py
Normal file
@@ -0,0 +1,139 @@
|
||||
import copy
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from app.actions import BaseAction
|
||||
from app.core.config import global_vars
|
||||
from app.db.transferhistory_oper import TransferHistoryOper
|
||||
from app.schemas import ActionParams, ActionContext
|
||||
from app.chain.storage import StorageChain
|
||||
from app.chain.transfer import TransferChain
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class TransferFileParams(ActionParams):
|
||||
"""
|
||||
整理文件参数
|
||||
"""
|
||||
# 来源
|
||||
source: Optional[str] = Field(default="downloads", description="来源")
|
||||
|
||||
|
||||
class TransferFileAction(BaseAction):
|
||||
"""
|
||||
整理文件
|
||||
"""
|
||||
|
||||
_fileitems = []
|
||||
_has_error = False
|
||||
|
||||
def __init__(self, action_id: str):
|
||||
super().__init__(action_id)
|
||||
self.transferchain = TransferChain()
|
||||
self.storagechain = StorageChain()
|
||||
self.transferhis = TransferHistoryOper()
|
||||
self._fileitems = []
|
||||
self._has_error = False
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def name(cls) -> str: # noqa
|
||||
return "整理文件"
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def description(cls) -> str: # noqa
|
||||
return "整理队列中的文件"
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def data(cls) -> dict: # noqa
|
||||
return TransferFileParams().dict()
|
||||
|
||||
@property
|
||||
def success(self) -> bool:
|
||||
return not self._has_error
|
||||
|
||||
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
|
||||
"""
|
||||
从 downloads / fileitems 中整理文件,记录到fileitems
|
||||
"""
|
||||
|
||||
def check_continue():
|
||||
"""
|
||||
检查是否继续整理文件
|
||||
"""
|
||||
if global_vars.is_workflow_stopped(workflow_id):
|
||||
return False
|
||||
return True
|
||||
|
||||
params = TransferFileParams(**params)
|
||||
# 失败次数
|
||||
_failed_count = 0
|
||||
if params.source == "downloads":
|
||||
# 从下载任务中整理文件
|
||||
for download in context.downloads:
|
||||
if global_vars.is_workflow_stopped(workflow_id):
|
||||
break
|
||||
if not download.completed:
|
||||
logger.info(f"下载任务 {download.download_id} 未完成")
|
||||
continue
|
||||
# 检查缓存
|
||||
cache_key = f"{download.download_id}"
|
||||
if self.check_cache(workflow_id, cache_key):
|
||||
logger.info(f"{download.path} 已整理过,跳过")
|
||||
continue
|
||||
fileitem = self.storagechain.get_file_item(storage="local", path=Path(download.path))
|
||||
if not fileitem:
|
||||
logger.info(f"文件 {download.path} 不存在")
|
||||
continue
|
||||
transferd = self.transferhis.get_by_src(fileitem.path, storage=fileitem.storage)
|
||||
if transferd:
|
||||
# 已经整理过的文件不再整理
|
||||
continue
|
||||
logger.info(f"开始整理文件 {download.path} ...")
|
||||
state, errmsg = self.transferchain.do_transfer(fileitem, background=False)
|
||||
if not state:
|
||||
_failed_count += 1
|
||||
logger.error(f"整理文件 {download.path} 失败: {errmsg}")
|
||||
continue
|
||||
logger.info(f"整理文件 {download.path} 完成")
|
||||
self._fileitems.append(fileitem)
|
||||
self.save_cache(workflow_id, cache_key)
|
||||
else:
|
||||
# 从 fileitems 中整理文件
|
||||
for fileitem in copy.deepcopy(context.fileitems):
|
||||
if not check_continue():
|
||||
break
|
||||
# 检查缓存
|
||||
cache_key = f"{fileitem.path}"
|
||||
if self.check_cache(workflow_id, cache_key):
|
||||
logger.info(f"{fileitem.path} 已整理过,跳过")
|
||||
continue
|
||||
transferd = self.transferhis.get_by_src(fileitem.path, storage=fileitem.storage)
|
||||
if transferd:
|
||||
# 已经整理过的文件不再整理
|
||||
continue
|
||||
logger.info(f"开始整理文件 {fileitem.path} ...")
|
||||
state, errmsg = self.transferchain.do_transfer(fileitem, background=False,
|
||||
continue_callback=check_continue)
|
||||
if not state:
|
||||
_failed_count += 1
|
||||
logger.error(f"整理文件 {fileitem.path} 失败: {errmsg}")
|
||||
continue
|
||||
logger.info(f"整理文件 {fileitem.path} 完成")
|
||||
# 从 fileitems 中移除已整理的文件
|
||||
context.fileitems.remove(fileitem)
|
||||
self._fileitems.append(fileitem)
|
||||
# 记录已整理的文件
|
||||
self.save_cache(workflow_id, cache_key)
|
||||
|
||||
if self._fileitems:
|
||||
context.fileitems.extend(self._fileitems)
|
||||
elif _failed_count:
|
||||
self._has_error = True
|
||||
|
||||
self.job_done(f"整理成功 {len(self._fileitems)} 个文件,失败 {_failed_count} 个")
|
||||
return context
|
||||
@@ -2,7 +2,7 @@ from fastapi import APIRouter
|
||||
|
||||
from app.api.endpoints import login, user, site, message, webhook, subscribe, \
|
||||
media, douban, search, plugin, tmdb, history, system, download, dashboard, \
|
||||
transfer, mediaserver, bangumi, storage, discover, recommend
|
||||
transfer, mediaserver, bangumi, storage, discover, recommend, workflow
|
||||
|
||||
api_router = APIRouter()
|
||||
api_router.include_router(login.router, prefix="/login", tags=["login"])
|
||||
@@ -26,3 +26,4 @@ api_router.include_router(mediaserver.router, prefix="/mediaserver", tags=["medi
|
||||
api_router.include_router(bangumi.router, prefix="/bangumi", tags=["bangumi"])
|
||||
api_router.include_router(discover.router, prefix="/discover", tags=["discover"])
|
||||
api_router.include_router(recommend.router, prefix="/recommend", tags=["recommend"])
|
||||
api_router.include_router(workflow.router, prefix="/workflow", tags=["workflow"])
|
||||
|
||||
@@ -29,8 +29,9 @@ def search_by_id(mediaid: str,
|
||||
mtype: str = None,
|
||||
area: str = "title",
|
||||
title: str = None,
|
||||
year: int = None,
|
||||
year: str = None,
|
||||
season: str = None,
|
||||
sites: str = None,
|
||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
"""
|
||||
根据TMDBID/豆瓣ID精确搜索站点资源 tmdb:/douban:/bangumi:
|
||||
@@ -39,6 +40,10 @@ def search_by_id(mediaid: str,
|
||||
mtype = MediaType(mtype)
|
||||
if season:
|
||||
season = int(season)
|
||||
if sites:
|
||||
site_list = [int(site) for site in sites.split(",") if site]
|
||||
else:
|
||||
site_list = None
|
||||
torrents = None
|
||||
# 根据前缀识别媒体ID
|
||||
if mediaid.startswith("tmdb:"):
|
||||
@@ -48,11 +53,13 @@ def search_by_id(mediaid: str,
|
||||
doubaninfo = MediaChain().get_doubaninfo_by_tmdbid(tmdbid=tmdbid, mtype=mtype)
|
||||
if doubaninfo:
|
||||
torrents = SearchChain().search_by_id(doubanid=doubaninfo.get("id"),
|
||||
mtype=mtype, area=area, season=season)
|
||||
mtype=mtype, area=area, season=season,
|
||||
sites=site_list)
|
||||
else:
|
||||
return schemas.Response(success=False, message="未识别到豆瓣媒体信息")
|
||||
else:
|
||||
torrents = SearchChain().search_by_id(tmdbid=tmdbid, mtype=mtype, area=area, season=season)
|
||||
torrents = SearchChain().search_by_id(tmdbid=tmdbid, mtype=mtype, area=area, season=season,
|
||||
sites=site_list)
|
||||
elif mediaid.startswith("douban:"):
|
||||
doubanid = mediaid.replace("douban:", "")
|
||||
if settings.RECOGNIZE_SOURCE == "themoviedb":
|
||||
@@ -62,11 +69,13 @@ def search_by_id(mediaid: str,
|
||||
if tmdbinfo.get('season') and not season:
|
||||
season = tmdbinfo.get('season')
|
||||
torrents = SearchChain().search_by_id(tmdbid=tmdbinfo.get("id"),
|
||||
mtype=mtype, area=area, season=season)
|
||||
mtype=mtype, area=area, season=season,
|
||||
sites=site_list)
|
||||
else:
|
||||
return schemas.Response(success=False, message="未识别到TMDB媒体信息")
|
||||
else:
|
||||
torrents = SearchChain().search_by_id(doubanid=doubanid, mtype=mtype, area=area, season=season)
|
||||
torrents = SearchChain().search_by_id(doubanid=doubanid, mtype=mtype, area=area, season=season,
|
||||
sites=site_list)
|
||||
elif mediaid.startswith("bangumi:"):
|
||||
bangumiid = int(mediaid.replace("bangumi:", ""))
|
||||
if settings.RECOGNIZE_SOURCE == "themoviedb":
|
||||
@@ -74,7 +83,8 @@ def search_by_id(mediaid: str,
|
||||
tmdbinfo = MediaChain().get_tmdbinfo_by_bangumiid(bangumiid=bangumiid)
|
||||
if tmdbinfo:
|
||||
torrents = SearchChain().search_by_id(tmdbid=tmdbinfo.get("id"),
|
||||
mtype=mtype, area=area, season=season)
|
||||
mtype=mtype, area=area, season=season,
|
||||
sites=site_list)
|
||||
else:
|
||||
return schemas.Response(success=False, message="未识别到TMDB媒体信息")
|
||||
else:
|
||||
@@ -82,7 +92,8 @@ def search_by_id(mediaid: str,
|
||||
doubaninfo = MediaChain().get_doubaninfo_by_bangumiid(bangumiid=bangumiid)
|
||||
if doubaninfo:
|
||||
torrents = SearchChain().search_by_id(doubanid=doubaninfo.get("id"),
|
||||
mtype=mtype, area=area, season=season)
|
||||
mtype=mtype, area=area, season=season,
|
||||
sites=site_list)
|
||||
else:
|
||||
return schemas.Response(success=False, message="未识别到豆瓣媒体信息")
|
||||
else:
|
||||
@@ -133,12 +144,13 @@ def search_by_id(mediaid: str,
|
||||
@router.get("/title", summary="模糊搜索资源", response_model=schemas.Response)
|
||||
def search_by_title(keyword: str = None,
|
||||
page: int = 0,
|
||||
site: int = None,
|
||||
sites: str = None,
|
||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
"""
|
||||
根据名称模糊搜索站点资源,支持分页,关键词为空是返回首页资源
|
||||
"""
|
||||
torrents = SearchChain().search_by_title(title=keyword, page=page, site=site)
|
||||
torrents = SearchChain().search_by_title(title=keyword, page=page,
|
||||
sites=[int(site) for site in sites.split(",") if site] if sites else None)
|
||||
if not torrents:
|
||||
return schemas.Response(success=False, message="未搜索到任何资源")
|
||||
return schemas.Response(success=True, data=[torrent.to_dict() for torrent in torrents])
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import List, Any
|
||||
from typing import List, Any, Dict
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from sqlalchemy.orm import Session
|
||||
@@ -259,8 +259,41 @@ def site_icon(site_id: int,
|
||||
})
|
||||
|
||||
|
||||
@router.get("/category/{site_id}", summary="站点分类", response_model=List[schemas.SiteCategory])
|
||||
def site_category(site_id: int,
|
||||
db: Session = Depends(get_db),
|
||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
"""
|
||||
获取站点分类
|
||||
"""
|
||||
site = Site.get(db, site_id)
|
||||
if not site:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"站点 {site_id} 不存在",
|
||||
)
|
||||
indexer = SitesHelper().get_indexer(site.domain)
|
||||
if not indexer:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"站点 {site.domain} 不支持",
|
||||
)
|
||||
category: Dict[str, List[dict]] = indexer.get('category') or []
|
||||
if not category:
|
||||
return []
|
||||
result = []
|
||||
for cats in category.values():
|
||||
for cat in cats:
|
||||
if cat not in result:
|
||||
result.append(cat)
|
||||
return result
|
||||
|
||||
|
||||
@router.get("/resource/{site_id}", summary="站点资源", response_model=List[schemas.TorrentInfo])
|
||||
def site_resource(site_id: int,
|
||||
keyword: str = None,
|
||||
cat: str = None,
|
||||
page: int = 0,
|
||||
db: Session = Depends(get_db),
|
||||
_: schemas.TokenPayload = Depends(get_current_active_superuser)) -> Any:
|
||||
"""
|
||||
@@ -272,7 +305,7 @@ def site_resource(site_id: int,
|
||||
status_code=404,
|
||||
detail=f"站点 {site_id} 不存在",
|
||||
)
|
||||
torrents = TorrentsChain().browse(domain=site.domain)
|
||||
torrents = TorrentsChain().browse(domain=site.domain, keyword=keyword, cat=cat, page=page)
|
||||
if not torrents:
|
||||
return []
|
||||
return [torrent.to_dict() for torrent in torrents]
|
||||
|
||||
@@ -8,6 +8,7 @@ from pathlib import Path
|
||||
from typing import Optional, Union
|
||||
|
||||
import aiofiles
|
||||
import pillow_avif # noqa 用于自动注册AVIF支持
|
||||
from PIL import Image
|
||||
from fastapi import APIRouter, Depends, HTTPException, Header, Request, Response
|
||||
from fastapi.responses import StreamingResponse
|
||||
@@ -23,7 +24,7 @@ from app.db.models import User
|
||||
from app.db.systemconfig_oper import SystemConfigOper
|
||||
from app.db.user_oper import get_current_active_superuser
|
||||
from app.helper.mediaserver import MediaServerHelper
|
||||
from app.helper.message import MessageHelper
|
||||
from app.helper.message import MessageHelper, MessageQueueManager
|
||||
from app.helper.progress import ProgressHelper
|
||||
from app.helper.rule import RuleHelper
|
||||
from app.helper.sites import SitesHelper
|
||||
@@ -50,7 +51,6 @@ def fetch_image(
|
||||
"""
|
||||
处理图片缓存逻辑,支持HTTP缓存和磁盘缓存
|
||||
"""
|
||||
|
||||
if not url:
|
||||
raise HTTPException(status_code=404, detail="URL not provided")
|
||||
|
||||
@@ -68,6 +68,10 @@ def fetch_image(
|
||||
sanitized_path = SecurityUtils.sanitize_url_path(url)
|
||||
cache_path = settings.CACHE_PATH / "images" / sanitized_path
|
||||
|
||||
# 没有文件类型,则添加后缀,在恶意文件类型和实际需求下的折衷选择
|
||||
if not cache_path.suffix:
|
||||
cache_path = cache_path.with_suffix(".jpg")
|
||||
|
||||
# 确保缓存路径和文件类型合法
|
||||
if not SecurityUtils.is_safe_path(settings.CACHE_PATH, cache_path, settings.SECURITY_IMAGE_SUFFIXES):
|
||||
raise HTTPException(status_code=400, detail="Invalid cache path or file type")
|
||||
@@ -88,7 +92,8 @@ def fetch_image(
|
||||
# 请求远程图片
|
||||
referer = "https://movie.douban.com/" if "doubanio.com" in url else None
|
||||
proxies = settings.PROXY if proxy else None
|
||||
response = RequestUtils(ua=settings.USER_AGENT, proxies=proxies, referer=referer).get_res(url=url)
|
||||
response = RequestUtils(ua=settings.USER_AGENT, proxies=proxies, referer=referer,
|
||||
accept_type="image/avif,image/webp,image/apng,*/*").get_res(url=url)
|
||||
if not response:
|
||||
raise HTTPException(status_code=502, detail="Failed to fetch the image from the remote server")
|
||||
|
||||
@@ -474,6 +479,7 @@ def reload_module(_: User = Depends(get_current_active_superuser)):
|
||||
"""
|
||||
重新加载模块(仅管理员)
|
||||
"""
|
||||
MessageQueueManager().init_config()
|
||||
ModuleManager().reload()
|
||||
Scheduler().init()
|
||||
Monitor().init()
|
||||
|
||||
162
app/api/endpoints/workflow.py
Normal file
162
app/api/endpoints/workflow.py
Normal file
@@ -0,0 +1,162 @@
|
||||
from datetime import datetime
|
||||
from typing import List, Any
|
||||
|
||||
from fastapi import APIRouter, Depends
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from app import schemas
|
||||
from app.core.config import global_vars
|
||||
from app.core.workflow import WorkFlowManager
|
||||
from app.db import get_db
|
||||
from app.db.models.workflow import Workflow
|
||||
from app.db.systemconfig_oper import SystemConfigOper
|
||||
from app.db.user_oper import get_current_active_user
|
||||
from app.chain.workflow import WorkflowChain
|
||||
from app.scheduler import Scheduler
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/", summary="所有工作流", response_model=List[schemas.Workflow])
|
||||
def list_workflows(db: Session = Depends(get_db),
|
||||
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
|
||||
"""
|
||||
获取工作流列表
|
||||
"""
|
||||
return Workflow.list(db)
|
||||
|
||||
|
||||
@router.post("/", summary="创建工作流", response_model=schemas.Response)
|
||||
def create_workflow(workflow: schemas.Workflow,
|
||||
db: Session = Depends(get_db),
|
||||
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
|
||||
"""
|
||||
创建工作流
|
||||
"""
|
||||
if Workflow.get_by_name(db, workflow.name):
|
||||
return schemas.Response(success=False, message="已存在相同名称的工作流")
|
||||
if not workflow.add_time:
|
||||
workflow.add_time = datetime.strftime(datetime.now(), "%Y-%m-%d %H:%M:%S")
|
||||
if not workflow.state:
|
||||
workflow.state = "P"
|
||||
Workflow(**workflow.dict()).create(db)
|
||||
return schemas.Response(success=True, message="创建工作流成功")
|
||||
|
||||
|
||||
@router.get("/actions", summary="所有动作", response_model=List[dict])
|
||||
def list_actions(_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
|
||||
"""
|
||||
获取所有动作
|
||||
"""
|
||||
return WorkFlowManager().list_actions()
|
||||
|
||||
|
||||
@router.get("/{workflow_id}", summary="工作流详情", response_model=schemas.Workflow)
|
||||
def get_workflow(workflow_id: int,
|
||||
db: Session = Depends(get_db),
|
||||
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
|
||||
"""
|
||||
获取工作流详情
|
||||
"""
|
||||
return Workflow.get(db, workflow_id)
|
||||
|
||||
|
||||
@router.put("/{workflow_id}", summary="更新工作流", response_model=schemas.Response)
|
||||
def update_workflow(workflow: schemas.Workflow,
|
||||
db: Session = Depends(get_db),
|
||||
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
|
||||
"""
|
||||
更新工作流
|
||||
"""
|
||||
wf = Workflow.get(db, workflow.id)
|
||||
if not wf:
|
||||
return schemas.Response(success=False, message="工作流不存在")
|
||||
wf.update(db, workflow.dict())
|
||||
return schemas.Response(success=True, message="更新成功")
|
||||
|
||||
|
||||
@router.delete("/{workflow_id}", summary="删除工作流", response_model=schemas.Response)
|
||||
def delete_workflow(workflow_id: int,
|
||||
db: Session = Depends(get_db),
|
||||
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
|
||||
"""
|
||||
删除工作流
|
||||
"""
|
||||
workflow = Workflow.get(db, workflow_id)
|
||||
if not workflow:
|
||||
return schemas.Response(success=False, message="工作流不存在")
|
||||
# 删除定时任务
|
||||
Scheduler().remove_workflow_job(workflow)
|
||||
# 删除工作流
|
||||
Workflow.delete(db, workflow_id)
|
||||
# 删除缓存
|
||||
SystemConfigOper().delete(f"WorkflowCache-{workflow_id}")
|
||||
return schemas.Response(success=True, message="删除成功")
|
||||
|
||||
|
||||
@router.post("/{workflow_id}/run", summary="执行工作流", response_model=schemas.Response)
|
||||
def run_workflow(workflow_id: int,
|
||||
from_begin: bool = True,
|
||||
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
|
||||
"""
|
||||
执行工作流
|
||||
"""
|
||||
state, errmsg = WorkflowChain().process(workflow_id, from_begin=from_begin)
|
||||
if not state:
|
||||
return schemas.Response(success=False, message=errmsg)
|
||||
return schemas.Response(success=True)
|
||||
|
||||
|
||||
@router.post("/{workflow_id}/start", summary="启用工作流", response_model=schemas.Response)
|
||||
def start_workflow(workflow_id: int,
|
||||
db: Session = Depends(get_db),
|
||||
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
|
||||
"""
|
||||
启用工作流
|
||||
"""
|
||||
workflow = Workflow.get(db, workflow_id)
|
||||
if not workflow:
|
||||
return schemas.Response(success=False, message="工作流不存在")
|
||||
# 添加定时任务
|
||||
Scheduler().update_workflow_job(workflow)
|
||||
# 更新状态
|
||||
workflow.update_state(db, workflow_id, "W")
|
||||
return schemas.Response(success=True)
|
||||
|
||||
|
||||
@router.post("/{workflow_id}/pause", summary="停用工作流", response_model=schemas.Response)
|
||||
def pause_workflow(workflow_id: int,
|
||||
db: Session = Depends(get_db),
|
||||
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
|
||||
"""
|
||||
停用工作流
|
||||
"""
|
||||
workflow = Workflow.get(db, workflow_id)
|
||||
if not workflow:
|
||||
return schemas.Response(success=False, message="工作流不存在")
|
||||
# 删除定时任务
|
||||
Scheduler().remove_workflow_job(workflow)
|
||||
# 停止工作流
|
||||
global_vars.stop_workflow(workflow_id)
|
||||
# 更新状态
|
||||
workflow.update_state(db, workflow_id, "P")
|
||||
return schemas.Response(success=True)
|
||||
|
||||
|
||||
@router.post("/{workflow_id}/reset", summary="重置工作流", response_model=schemas.Response)
|
||||
def reset_workflow(workflow_id: int,
|
||||
db: Session = Depends(get_db),
|
||||
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
|
||||
"""
|
||||
重置工作流
|
||||
"""
|
||||
workflow = Workflow.get(db, workflow_id)
|
||||
if not workflow:
|
||||
return schemas.Response(success=False, message="工作流不存在")
|
||||
# 停止工作流
|
||||
global_vars.stop_workflow(workflow_id)
|
||||
# 重置工作流
|
||||
workflow.reset(db, workflow_id)
|
||||
# 删除缓存
|
||||
SystemConfigOper().delete(f"WorkflowCache-{workflow_id}")
|
||||
return schemas.Response(success=True)
|
||||
@@ -7,7 +7,6 @@ from pathlib import Path
|
||||
from typing import Optional, Any, Tuple, List, Set, Union, Dict
|
||||
|
||||
from qbittorrentapi import TorrentFilesList
|
||||
from ruamel.yaml import CommentedMap
|
||||
from transmission_rpc import File
|
||||
|
||||
from app.core.config import settings
|
||||
@@ -17,7 +16,7 @@ from app.core.meta import MetaBase
|
||||
from app.core.module import ModuleManager
|
||||
from app.db.message_oper import MessageOper
|
||||
from app.db.user_oper import UserOper
|
||||
from app.helper.message import MessageHelper
|
||||
from app.helper.message import MessageHelper, MessageQueueManager
|
||||
from app.helper.service import ServiceConfigHelper
|
||||
from app.log import logger
|
||||
from app.schemas import TransferInfo, TransferTorrent, ExistMediaInfo, DownloadingTorrent, CommingMessage, Notification, \
|
||||
@@ -39,6 +38,9 @@ class ChainBase(metaclass=ABCMeta):
|
||||
self.eventmanager = EventManager()
|
||||
self.messageoper = MessageOper()
|
||||
self.messagehelper = MessageHelper()
|
||||
self.messagequeue = MessageQueueManager(
|
||||
send_callback=self.run_module
|
||||
)
|
||||
self.useroper = UserOper()
|
||||
|
||||
@staticmethod
|
||||
@@ -77,7 +79,7 @@ class ChainBase(metaclass=ABCMeta):
|
||||
"""
|
||||
cache_path = settings.TEMP_PATH / filename
|
||||
if cache_path.exists():
|
||||
Path(cache_path).unlink()
|
||||
cache_path.unlink()
|
||||
|
||||
def run_module(self, method: str, *args, **kwargs) -> Any:
|
||||
"""
|
||||
@@ -308,7 +310,7 @@ class ChainBase(metaclass=ABCMeta):
|
||||
"""
|
||||
return self.run_module("search_collections", name=name)
|
||||
|
||||
def search_torrents(self, site: CommentedMap,
|
||||
def search_torrents(self, site: dict,
|
||||
keywords: List[str],
|
||||
mtype: MediaType = None,
|
||||
page: int = 0) -> List[TorrentInfo]:
|
||||
@@ -323,13 +325,16 @@ class ChainBase(metaclass=ABCMeta):
|
||||
return self.run_module("search_torrents", site=site, keywords=keywords,
|
||||
mtype=mtype, page=page)
|
||||
|
||||
def refresh_torrents(self, site: CommentedMap) -> List[TorrentInfo]:
|
||||
def refresh_torrents(self, site: dict, keyword: str = None, cat: str = None, page: int = 0) -> List[TorrentInfo]:
|
||||
"""
|
||||
获取站点最新一页的种子,多个站点需要多线程处理
|
||||
:param site: 站点
|
||||
:param keyword: 标题
|
||||
:param cat: 分类
|
||||
:param page: 页码
|
||||
:reutrn: 种子资源列表
|
||||
"""
|
||||
return self.run_module("refresh_torrents", site=site)
|
||||
return self.run_module("refresh_torrents", site=site, keyword=keyword, cat=cat, page=page)
|
||||
|
||||
def filter_torrents(self, rule_groups: List[str],
|
||||
torrent_list: List[TorrentInfo],
|
||||
@@ -345,7 +350,7 @@ class ChainBase(metaclass=ABCMeta):
|
||||
torrent_list=torrent_list, mediainfo=mediainfo)
|
||||
|
||||
def download(self, content: Union[Path, str], download_dir: Path, cookie: str,
|
||||
episodes: Set[int] = None, category: str = None,
|
||||
episodes: Set[int] = None, category: str = None, label: str = None,
|
||||
downloader: str = None
|
||||
) -> Optional[Tuple[Optional[str], Optional[str], Optional[str], str]]:
|
||||
"""
|
||||
@@ -355,11 +360,12 @@ class ChainBase(metaclass=ABCMeta):
|
||||
:param cookie: cookie
|
||||
:param episodes: 需要下载的集数
|
||||
:param category: 种子分类
|
||||
:param label: 标签
|
||||
:param downloader: 下载器
|
||||
:return: 下载器名称、种子Hash、种子文件布局、错误原因
|
||||
"""
|
||||
return self.run_module("download", content=content, download_dir=download_dir,
|
||||
cookie=cookie, episodes=episodes, category=category,
|
||||
cookie=cookie, episodes=episodes, category=category, label=label,
|
||||
downloader=downloader)
|
||||
|
||||
def download_added(self, context: Context, download_dir: Path, torrent_path: Path = None) -> None:
|
||||
@@ -488,11 +494,6 @@ class ChainBase(metaclass=ABCMeta):
|
||||
:param message: 消息体
|
||||
:return: 成功或失败
|
||||
"""
|
||||
logger.info(f"发送消息:channel={message.channel},"
|
||||
f"source={message.source},"
|
||||
f"title={message.title}, "
|
||||
f"text={message.text},"
|
||||
f"userid={message.userid}")
|
||||
# 保存原消息
|
||||
self.messagehelper.put(message, role="user", title=message.title)
|
||||
self.messageoper.add(**message.dict())
|
||||
@@ -542,13 +543,13 @@ class ChainBase(metaclass=ABCMeta):
|
||||
# 按设定发送
|
||||
self.eventmanager.send_event(etype=EventType.NoticeMessage,
|
||||
data={**send_message.dict(), "type": send_message.mtype})
|
||||
self.run_module("post_message", message=send_message)
|
||||
self.messagequeue.send_message("post_message", message=send_message)
|
||||
if not send_orignal:
|
||||
return
|
||||
# 发送消息事件
|
||||
self.eventmanager.send_event(etype=EventType.NoticeMessage, data={**message.dict(), "type": message.mtype})
|
||||
# 按原消息发送
|
||||
self.run_module("post_message", message=message)
|
||||
self.messagequeue.send_message("post_message", message=message)
|
||||
|
||||
def post_medias_message(self, message: Notification, medias: List[MediaInfo]) -> None:
|
||||
"""
|
||||
@@ -560,7 +561,7 @@ class ChainBase(metaclass=ABCMeta):
|
||||
note_list = [media.to_dict() for media in medias]
|
||||
self.messagehelper.put(message, role="user", note=note_list, title=message.title)
|
||||
self.messageoper.add(**message.dict(), note=note_list)
|
||||
return self.run_module("post_medias_message", message=message, medias=medias)
|
||||
return self.messagequeue.send_message("post_medias_message", message=message, medias=medias)
|
||||
|
||||
def post_torrents_message(self, message: Notification, torrents: List[Context]) -> None:
|
||||
"""
|
||||
@@ -572,7 +573,7 @@ class ChainBase(metaclass=ABCMeta):
|
||||
note_list = [torrent.torrent_info.to_dict() for torrent in torrents]
|
||||
self.messagehelper.put(message, role="user", note=note_list, title=message.title)
|
||||
self.messageoper.add(**message.dict(), note=note_list)
|
||||
return self.run_module("post_torrents_message", message=message, torrents=torrents)
|
||||
return self.messagequeue.send_message("post_torrents_message", message=message, torrents=torrents)
|
||||
|
||||
def metadata_img(self, mediainfo: MediaInfo, season: int = None, episode: int = None) -> Optional[dict]:
|
||||
"""
|
||||
|
||||
@@ -209,7 +209,8 @@ class DownloadChain(ChainBase):
|
||||
save_path: str = None,
|
||||
userid: Union[str, int] = None,
|
||||
username: str = None,
|
||||
media_category: str = None) -> Optional[str]:
|
||||
media_category: str = None,
|
||||
label: str = None) -> Optional[str]:
|
||||
"""
|
||||
下载及发送通知
|
||||
:param context: 资源上下文
|
||||
@@ -222,6 +223,7 @@ class DownloadChain(ChainBase):
|
||||
:param userid: 用户ID
|
||||
:param username: 调用下载的用户名/插件名
|
||||
:param media_category: 自定义媒体类别
|
||||
:param label: 自定义标签
|
||||
"""
|
||||
# 发送资源下载事件,允许外部拦截下载
|
||||
event_data = ResourceDownloadEventData(
|
||||
@@ -310,6 +312,7 @@ class DownloadChain(ChainBase):
|
||||
episodes=episodes,
|
||||
download_dir=download_dir,
|
||||
category=_media.category,
|
||||
label=label,
|
||||
downloader=downloader or _site_downloader)
|
||||
if result:
|
||||
_downloader, _hash, _layout, error_msg = result
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import threading
|
||||
from typing import List, Union, Optional, Generator
|
||||
from typing import List, Union, Optional, Generator, Any
|
||||
|
||||
from app.chain import ChainBase
|
||||
from app.core.cache import cached
|
||||
@@ -27,8 +27,8 @@ class MediaServerChain(ChainBase):
|
||||
"""
|
||||
return self.run_module("mediaserver_librarys", server=server, username=username, hidden=hidden)
|
||||
|
||||
def items(self, server: str, library_id: Union[str, int], start_index: int = 0, limit: Optional[int] = -1) \
|
||||
-> Optional[Generator]:
|
||||
def items(self, server: str, library_id: Union[str, int],
|
||||
start_index: int = 0, limit: Optional[int] = -1) -> Generator[Any, None, None]:
|
||||
"""
|
||||
获取媒体服务器项目列表,支持分页和不分页逻辑,默认不分页获取所有数据
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ import tempfile
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
import pillow_avif # noqa 用于自动注册AVIF支持
|
||||
from PIL import Image
|
||||
|
||||
from app.chain import ChainBase
|
||||
@@ -116,6 +117,10 @@ class RecommendChain(ChainBase, metaclass=Singleton):
|
||||
sanitized_path = SecurityUtils.sanitize_url_path(url)
|
||||
cache_path = settings.CACHE_PATH / "images" / sanitized_path
|
||||
|
||||
# 没有文件类型,则添加后缀,在恶意文件类型和实际需求下的折衷选择
|
||||
if not cache_path.suffix:
|
||||
cache_path = cache_path.with_suffix(".jpg")
|
||||
|
||||
# 确保缓存路径和文件类型合法
|
||||
if not SecurityUtils.is_safe_path(settings.CACHE_PATH, cache_path, settings.SECURITY_IMAGE_SUFFIXES):
|
||||
logger.debug(f"Invalid cache path or file type for URL: {url}, sanitized path: {sanitized_path}")
|
||||
|
||||
@@ -35,7 +35,8 @@ class SearchChain(ChainBase):
|
||||
self.torrenthelper = TorrentHelper()
|
||||
|
||||
def search_by_id(self, tmdbid: int = None, doubanid: str = None,
|
||||
mtype: MediaType = None, area: str = "title", season: int = None) -> List[Context]:
|
||||
mtype: MediaType = None, area: str = "title", season: int = None,
|
||||
sites: List[int] = None) -> List[Context]:
|
||||
"""
|
||||
根据TMDBID/豆瓣ID搜索资源,精确匹配,不过滤本地存在的资源
|
||||
:param tmdbid: TMDB ID
|
||||
@@ -43,6 +44,7 @@ class SearchChain(ChainBase):
|
||||
:param mtype: 媒体,电影 or 电视剧
|
||||
:param area: 搜索范围,title or imdbid
|
||||
:param season: 季数
|
||||
:param sites: 站点ID列表
|
||||
"""
|
||||
mediainfo = self.recognize_media(tmdbid=tmdbid, doubanid=doubanid, mtype=mtype)
|
||||
if not mediainfo:
|
||||
@@ -55,25 +57,27 @@ class SearchChain(ChainBase):
|
||||
season: NotExistMediaInfo(episodes=[])
|
||||
}
|
||||
}
|
||||
results = self.process(mediainfo=mediainfo, area=area, no_exists=no_exists)
|
||||
results = self.process(mediainfo=mediainfo, sites=sites, area=area, no_exists=no_exists)
|
||||
# 保存到本地文件
|
||||
bytes_results = pickle.dumps(results)
|
||||
self.save_cache(bytes_results, self.__result_temp_file)
|
||||
return results
|
||||
|
||||
def search_by_title(self, title: str, page: int = 0, site: int = None) -> List[Context]:
|
||||
def search_by_title(self, title: str, page: int = 0,
|
||||
sites: List[int] = None, cache_local: bool = True) -> List[Context]:
|
||||
"""
|
||||
根据标题搜索资源,不识别不过滤,直接返回站点内容
|
||||
:param title: 标题,为空时返回所有站点首页内容
|
||||
:param page: 页码
|
||||
:param site: 站点ID
|
||||
:param sites: 站点ID列表
|
||||
:param cache_local: 是否缓存到本地
|
||||
"""
|
||||
if title:
|
||||
logger.info(f'开始搜索资源,关键词:{title} ...')
|
||||
else:
|
||||
logger.info(f'开始浏览资源,站点:{site} ...')
|
||||
logger.info(f'开始浏览资源,站点:{sites} ...')
|
||||
# 搜索
|
||||
torrents = self.__search_all_sites(keywords=[title], sites=[site] if site else None, page=page) or []
|
||||
torrents = self.__search_all_sites(keywords=[title], sites=sites, page=page) or []
|
||||
if not torrents:
|
||||
logger.warn(f'{title} 未搜索到资源')
|
||||
return []
|
||||
@@ -81,8 +85,9 @@ class SearchChain(ChainBase):
|
||||
contexts = [Context(meta_info=MetaInfo(title=torrent.title, subtitle=torrent.description),
|
||||
torrent_info=torrent) for torrent in torrents]
|
||||
# 保存到本地文件
|
||||
bytes_results = pickle.dumps(contexts)
|
||||
self.save_cache(bytes_results, self.__result_temp_file)
|
||||
if cache_local:
|
||||
bytes_results = pickle.dumps(contexts)
|
||||
self.save_cache(bytes_results, self.__result_temp_file)
|
||||
return contexts
|
||||
|
||||
def last_search_results(self) -> List[Context]:
|
||||
@@ -307,11 +312,6 @@ class SearchChain(ChainBase):
|
||||
for indexer in self.siteshelper.get_indexers():
|
||||
# 检查站点索引开关
|
||||
if not sites or indexer.get("id") in sites:
|
||||
# 站点流控
|
||||
state, msg = self.siteshelper.check(indexer.get("domain"))
|
||||
if state:
|
||||
logger.warn(msg)
|
||||
continue
|
||||
indexer_sites.append(indexer)
|
||||
if not indexer_sites:
|
||||
logger.warn('未开启任何有效站点,无法搜索资源')
|
||||
|
||||
@@ -6,7 +6,6 @@ from typing import Optional, Tuple, Union, Dict
|
||||
from urllib.parse import urljoin
|
||||
|
||||
from lxml import etree
|
||||
from ruamel.yaml import CommentedMap
|
||||
|
||||
from app.chain import ChainBase
|
||||
from app.core.config import global_vars, settings
|
||||
@@ -53,9 +52,10 @@ class SiteChain(ChainBase):
|
||||
"1ptba.com": self.__indexphp_test,
|
||||
"star-space.net": self.__indexphp_test,
|
||||
"yemapt.org": self.__yema_test,
|
||||
"hddolby.com": self.__hddolby_test,
|
||||
}
|
||||
|
||||
def refresh_userdata(self, site: CommentedMap = None) -> Optional[SiteUserData]:
|
||||
def refresh_userdata(self, site: dict = None) -> Optional[SiteUserData]:
|
||||
"""
|
||||
刷新站点的用户数据
|
||||
:param site: 站点
|
||||
@@ -252,6 +252,32 @@ class SiteChain(ChainBase):
|
||||
site.url = f"{site.url}index.php"
|
||||
return self.__test(site)
|
||||
|
||||
@staticmethod
|
||||
def __hddolby_test(site: Site) -> Tuple[bool, str]:
|
||||
"""
|
||||
判断站点是否已经登陆:hddolby
|
||||
"""
|
||||
url = f"{site.url}api/v1/user/data"
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json, text/plain, */*",
|
||||
"x-api-key": site.apikey,
|
||||
}
|
||||
res = RequestUtils(
|
||||
headers=headers,
|
||||
proxies=settings.PROXY if site.proxy else None,
|
||||
timeout=site.timeout or 15
|
||||
).get_res(url=url)
|
||||
if res is None:
|
||||
return False, "无法打开网站!"
|
||||
if res.status_code == 200:
|
||||
user_info = res.json()
|
||||
if user_info and user_info.get("status") == 0:
|
||||
return True, "连接成功"
|
||||
return False, "APIKEY已过期"
|
||||
else:
|
||||
return False, f"错误:{res.status_code} {res.reason}"
|
||||
|
||||
@staticmethod
|
||||
def __parse_favicon(url: str, cookie: str, ua: str) -> Tuple[str, Optional[str]]:
|
||||
"""
|
||||
|
||||
@@ -84,6 +84,12 @@ class StorageChain(ChainBase):
|
||||
"""
|
||||
return self.run_module("rename_file", fileitem=fileitem, name=name)
|
||||
|
||||
def exists(self, fileitem: schemas.FileItem) -> Optional[bool]:
|
||||
"""
|
||||
判断文件或目录是否存在
|
||||
"""
|
||||
return True if self.get_item(fileitem) else False
|
||||
|
||||
def get_item(self, fileitem: schemas.FileItem) -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
查询目录或文件
|
||||
|
||||
@@ -1262,7 +1262,7 @@ class SubscribeChain(ChainBase, metaclass=Singleton):
|
||||
订阅相关的下载和文件信息
|
||||
"""
|
||||
if not subscribe:
|
||||
return
|
||||
return None
|
||||
|
||||
# 返回订阅数据
|
||||
subscribe_info = schemas.SubscrbieInfo()
|
||||
|
||||
@@ -73,17 +73,20 @@ class TorrentsChain(ChainBase, metaclass=Singleton):
|
||||
logger.info(f'种子缓存数据清理完成')
|
||||
|
||||
@cached(cache=TTLCache(maxsize=128, ttl=595))
|
||||
def browse(self, domain: str) -> List[TorrentInfo]:
|
||||
def browse(self, domain: str, keyword: str = None, cat: str = None, page: int = 0) -> List[TorrentInfo]:
|
||||
"""
|
||||
浏览站点首页内容,返回种子清单,TTL缓存10分钟
|
||||
:param domain: 站点域名
|
||||
:param keyword: 搜索标题
|
||||
:param cat: 搜索分类
|
||||
:param page: 页码
|
||||
"""
|
||||
logger.info(f'开始获取站点 {domain} 最新种子 ...')
|
||||
site = self.siteshelper.get_indexer(domain)
|
||||
if not site:
|
||||
logger.error(f'站点 {domain} 不存在!')
|
||||
return []
|
||||
return self.refresh_torrents(site=site)
|
||||
return self.refresh_torrents(site=site, keyword=keyword, cat=cat, page=page)
|
||||
|
||||
@cached(cache=TTLCache(maxsize=128, ttl=295))
|
||||
def rss(self, domain: str) -> List[TorrentInfo]:
|
||||
|
||||
@@ -606,7 +606,7 @@ class TransferChain(ChainBase, metaclass=Singleton):
|
||||
logger.error(f"整理队列处理出现错误:{e} - {traceback.format_exc()}")
|
||||
|
||||
def __handle_transfer(self, task: TransferTask,
|
||||
callback: Optional[Callable] = None) -> Tuple[bool, str]:
|
||||
callback: Optional[Callable] = None) -> Optional[Tuple[bool, str]]:
|
||||
"""
|
||||
处理整理任务
|
||||
"""
|
||||
@@ -670,13 +670,18 @@ class TransferChain(ChainBase, metaclass=Singleton):
|
||||
self.jobview.add_task(task, state=curr_task.state if curr_task else "waiting")
|
||||
|
||||
# 获取集数据
|
||||
if not task.episodes_info and task.mediainfo.type == MediaType.TV:
|
||||
if task.meta.begin_season is None:
|
||||
task.meta.begin_season = 1
|
||||
task.mediainfo.season = task.mediainfo.season or task.meta.begin_season
|
||||
if task.mediainfo.type == MediaType.TV and not task.episodes_info:
|
||||
# 判断注意season为0的情况
|
||||
season_num = task.mediainfo.season
|
||||
if season_num is None and task.meta.season_seq:
|
||||
if task.meta.season_seq.isdigit():
|
||||
season_num = int(task.meta.season_seq)
|
||||
# 默认值1
|
||||
if season_num is None:
|
||||
season_num = 1
|
||||
task.episodes_info = self.tmdbchain.tmdb_episodes(
|
||||
tmdbid=task.mediainfo.tmdb_id,
|
||||
season=task.mediainfo.season
|
||||
season=season_num
|
||||
)
|
||||
|
||||
# 查询整理目标目录
|
||||
@@ -908,7 +913,7 @@ class TransferChain(ChainBase, metaclass=Singleton):
|
||||
season: int = None, epformat: EpisodeFormat = None, min_filesize: int = 0,
|
||||
downloader: str = None, download_hash: str = None,
|
||||
force: bool = False, background: bool = True,
|
||||
manual: bool = False) -> Tuple[bool, str]:
|
||||
manual: bool = False, continue_callback: Callable = None) -> Tuple[bool, str]:
|
||||
"""
|
||||
执行一个复杂目录的整理操作
|
||||
:param fileitem: 文件项
|
||||
@@ -929,6 +934,7 @@ class TransferChain(ChainBase, metaclass=Singleton):
|
||||
:param force: 是否强制整理
|
||||
:param background: 是否后台运行
|
||||
:param manual: 是否手动整理
|
||||
:param continue_callback: 继续处理回调
|
||||
返回:成功标识,错误信息
|
||||
"""
|
||||
|
||||
@@ -994,6 +1000,8 @@ class TransferChain(ChainBase, metaclass=Singleton):
|
||||
for file_item, bluray_dir in file_items:
|
||||
if global_vars.is_system_stopped:
|
||||
break
|
||||
if continue_callback and not continue_callback():
|
||||
break
|
||||
file_path = Path(file_item.path)
|
||||
# 回收站及隐藏的文件不处理
|
||||
if file_item.path.find('/@Recycle/') != -1 \
|
||||
@@ -1114,6 +1122,8 @@ class TransferChain(ChainBase, metaclass=Singleton):
|
||||
for transfer_task in transfer_tasks:
|
||||
if global_vars.is_system_stopped:
|
||||
break
|
||||
if continue_callback and not continue_callback():
|
||||
break
|
||||
# 更新进度
|
||||
__process_msg = f"正在整理 ({processed_num + fail_num + 1}/{total_num}){transfer_task.fileitem.name} ..."
|
||||
logger.info(__process_msg)
|
||||
|
||||
250
app/chain/workflow.py
Normal file
250
app/chain/workflow.py
Normal file
@@ -0,0 +1,250 @@
|
||||
import base64
|
||||
import pickle
|
||||
import threading
|
||||
from collections import defaultdict, deque
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from time import sleep
|
||||
from typing import List, Tuple
|
||||
|
||||
from pydantic.fields import Callable
|
||||
|
||||
from app.chain import ChainBase
|
||||
from app.core.config import global_vars
|
||||
from app.core.workflow import WorkFlowManager
|
||||
from app.db.models import Workflow
|
||||
from app.db.workflow_oper import WorkflowOper
|
||||
from app.log import logger
|
||||
from app.schemas import ActionContext, ActionFlow, Action, ActionExecution
|
||||
|
||||
|
||||
class WorkflowExecutor:
|
||||
"""
|
||||
工作流执行器
|
||||
"""
|
||||
|
||||
def __init__(self, workflow: Workflow, step_callback: Callable = None):
|
||||
"""
|
||||
初始化工作流执行器
|
||||
:param workflow: 工作流对象
|
||||
:param step_callback: 步骤回调函数
|
||||
"""
|
||||
# 工作流数据
|
||||
self.workflow = workflow
|
||||
self.step_callback = step_callback
|
||||
self.actions = {action['id']: Action(**action) for action in workflow.actions}
|
||||
self.flows = [ActionFlow(**flow) for flow in workflow.flows]
|
||||
self.total_actions = len(self.actions)
|
||||
self.finished_actions = 0
|
||||
|
||||
self.success = True
|
||||
self.errmsg = ""
|
||||
|
||||
# 工作流管理器
|
||||
self.workflowmanager = WorkFlowManager()
|
||||
# 线程安全队列
|
||||
self.queue = deque()
|
||||
# 锁用于保证线程安全
|
||||
self.lock = threading.Lock()
|
||||
# 线程池
|
||||
self.executor = ThreadPoolExecutor()
|
||||
# 跟踪运行中的任务数
|
||||
self.running_tasks = 0
|
||||
|
||||
# 构建邻接表、入度表
|
||||
self.adjacency = defaultdict(list)
|
||||
self.indegree = defaultdict(int)
|
||||
for flow in self.flows:
|
||||
source = flow.source
|
||||
target = flow.target
|
||||
self.adjacency[source].append(target)
|
||||
self.indegree[target] += 1
|
||||
|
||||
# 初始化所有节点的入度(确保未被引用的节点入度为0)
|
||||
for action_id in self.actions:
|
||||
if action_id not in self.indegree:
|
||||
self.indegree[action_id] = 0
|
||||
|
||||
# 初始上下文
|
||||
if workflow.current_action and workflow.context:
|
||||
logger.info(f"工作流已执行动作:{workflow.current_action}")
|
||||
# Base64解码
|
||||
decoded_data = base64.b64decode(workflow.context["content"])
|
||||
# 反序列化数据
|
||||
self.context = pickle.loads(decoded_data)
|
||||
else:
|
||||
self.context = ActionContext()
|
||||
|
||||
# 恢复工作流
|
||||
global_vars.workflow_resume(self.workflow.id)
|
||||
# 初始化队列,添加入度为0的节点
|
||||
for action_id in self.actions:
|
||||
if self.indegree[action_id] == 0:
|
||||
self.queue.append(action_id)
|
||||
|
||||
def execute(self):
|
||||
"""
|
||||
执行工作流
|
||||
"""
|
||||
while True:
|
||||
with self.lock:
|
||||
# 退出条件:队列为空且无运行任务
|
||||
if not self.queue and self.running_tasks == 0:
|
||||
break
|
||||
# 退出条件:出现了错误
|
||||
if not self.success:
|
||||
break
|
||||
if not self.queue:
|
||||
sleep(0.1)
|
||||
continue
|
||||
# 取出队首节点
|
||||
node_id = self.queue.popleft()
|
||||
# 标记任务开始
|
||||
self.running_tasks += 1
|
||||
|
||||
# 已停机
|
||||
if global_vars.is_workflow_stopped(self.workflow.id):
|
||||
global_vars.workflow_resume(self.workflow.id)
|
||||
break
|
||||
|
||||
# 已执行的跳过
|
||||
if (self.workflow.current_action
|
||||
and node_id in self.workflow.current_action.split(',')):
|
||||
continue
|
||||
|
||||
# 提交任务到线程池
|
||||
future = self.executor.submit(
|
||||
self.execute_node,
|
||||
self.workflow.id,
|
||||
node_id,
|
||||
self.context
|
||||
)
|
||||
future.add_done_callback(self.on_node_complete)
|
||||
|
||||
def execute_node(self, workflow_id: int, node_id: int,
|
||||
context: ActionContext) -> Tuple[Action, bool, str, ActionContext]:
|
||||
"""
|
||||
执行单个节点操作,返回修改后的上下文和节点ID
|
||||
"""
|
||||
action = self.actions[node_id]
|
||||
state, message, result_ctx = self.workflowmanager.excute(workflow_id, action, context=context)
|
||||
return action, state, message, result_ctx
|
||||
|
||||
def on_node_complete(self, future):
|
||||
"""
|
||||
节点完成回调:更新上下文、处理后继节点
|
||||
"""
|
||||
action, state, message, result_ctx = future.result()
|
||||
|
||||
try:
|
||||
self.finished_actions += 1
|
||||
# 更新当前进度
|
||||
self.context.progress = round(self.finished_actions / self.total_actions) * 100
|
||||
|
||||
# 补充执行历史
|
||||
self.context.execute_history.append(
|
||||
ActionExecution(
|
||||
action=action.name,
|
||||
result=state,
|
||||
message=message
|
||||
)
|
||||
)
|
||||
|
||||
# 节点执行失败
|
||||
if not state:
|
||||
self.success = False
|
||||
self.errmsg = f"{action.name} 失败"
|
||||
return
|
||||
|
||||
with self.lock:
|
||||
# 更新主上下文
|
||||
self.merge_context(result_ctx)
|
||||
# 回调
|
||||
if self.step_callback:
|
||||
self.step_callback(action, self.context)
|
||||
|
||||
# 处理后继节点
|
||||
successors = self.adjacency.get(action.id, [])
|
||||
for succ_id in successors:
|
||||
with self.lock:
|
||||
self.indegree[succ_id] -= 1
|
||||
if self.indegree[succ_id] == 0:
|
||||
self.queue.append(succ_id)
|
||||
finally:
|
||||
# 标记任务完成
|
||||
with self.lock:
|
||||
self.running_tasks -= 1
|
||||
|
||||
def merge_context(self, context: ActionContext):
|
||||
"""
|
||||
合并上下文
|
||||
"""
|
||||
for key, value in context.dict().items():
|
||||
if not getattr(self.context, key, None):
|
||||
setattr(self.context, key, value)
|
||||
|
||||
|
||||
class WorkflowChain(ChainBase):
|
||||
"""
|
||||
工作流链
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.workflowoper = WorkflowOper()
|
||||
|
||||
def process(self, workflow_id: int, from_begin: bool = True) -> Tuple[bool, str]:
|
||||
"""
|
||||
处理工作流
|
||||
:param workflow_id: 工作流ID
|
||||
:param from_begin: 是否从头开始,默认为True
|
||||
"""
|
||||
|
||||
def save_step(action: Action, context: ActionContext):
|
||||
"""
|
||||
保存上下文到数据库
|
||||
"""
|
||||
# 序列化数据
|
||||
serialized_data = pickle.dumps(context)
|
||||
# 使用Base64编码字节流
|
||||
encoded_data = base64.b64encode(serialized_data).decode('utf-8')
|
||||
self.workflowoper.step(workflow_id, action_id=action.id, context={
|
||||
"content": encoded_data
|
||||
})
|
||||
|
||||
# 重置工作流
|
||||
if from_begin:
|
||||
self.workflowoper.reset(workflow_id)
|
||||
|
||||
# 查询工作流数据
|
||||
workflow = self.workflowoper.get(workflow_id)
|
||||
if not workflow:
|
||||
logger.warn(f"工作流 {workflow_id} 不存在")
|
||||
return False, "工作流不存在"
|
||||
if not workflow.actions:
|
||||
logger.warn(f"工作流 {workflow.name} 无动作")
|
||||
return False, "工作流无动作"
|
||||
if not workflow.flows:
|
||||
logger.warn(f"工作流 {workflow.name} 无流程")
|
||||
return False, "工作流无流程"
|
||||
|
||||
logger.info(f"开始处理 {workflow.name},共 {len(workflow.actions)} 个动作 ...")
|
||||
self.workflowoper.start(workflow_id)
|
||||
|
||||
# 执行工作流
|
||||
executor = WorkflowExecutor(workflow, step_callback=save_step)
|
||||
executor.execute()
|
||||
|
||||
if not executor.success:
|
||||
logger.info(f"工作流 {workflow.name} 执行失败:{executor.errmsg}")
|
||||
self.workflowoper.fail(workflow_id, result=executor.errmsg)
|
||||
return False, executor.errmsg
|
||||
else:
|
||||
logger.info(f"工作流 {workflow.name} 执行完成")
|
||||
self.workflowoper.success(workflow_id)
|
||||
return True, ""
|
||||
|
||||
def get_workflows(self) -> List[Workflow]:
|
||||
"""
|
||||
获取工作流列表
|
||||
"""
|
||||
return self.workflowoper.list_enabled()
|
||||
@@ -247,7 +247,7 @@ class ConfigModel(BaseModel):
|
||||
)
|
||||
# 允许的图片文件后缀格式
|
||||
SECURITY_IMAGE_SUFFIXES: List[str] = Field(
|
||||
default_factory=lambda: [".jpg", ".jpeg", ".png", ".webp", ".gif", ".svg"]
|
||||
default_factory=lambda: [".jpg", ".jpeg", ".png", ".webp", ".gif", ".svg", ".avif"]
|
||||
)
|
||||
# 重命名时支持的S0别名
|
||||
RENAME_FORMAT_S0_NAMES: List[str] = Field(
|
||||
@@ -255,6 +255,8 @@ class ConfigModel(BaseModel):
|
||||
)
|
||||
# 启用分词搜索
|
||||
TOKENIZED_SEARCH: bool = False
|
||||
# 为指定默认字幕添加.default后缀
|
||||
DEFAULT_SUB: Optional[str] = "zh-cn"
|
||||
|
||||
|
||||
class Settings(BaseSettings, ConfigModel, LogConfigModel):
|
||||
@@ -361,7 +363,7 @@ class Settings(BaseSettings, ConfigModel, LogConfigModel):
|
||||
raise ValueError(f"配置项 '{field_name}' 的值 '{value}' 无法转换成正确的类型") from e
|
||||
logger.error(
|
||||
f"配置项 '{field_name}' 的值 '{value}' 无法转换成正确的类型,使用默认值 '{default}',错误信息: {e}")
|
||||
return default, True
|
||||
return default, True
|
||||
|
||||
@validator('*', pre=True, always=True)
|
||||
def generic_type_validator(cls, value: Any, field): # noqa
|
||||
@@ -605,6 +607,8 @@ class GlobalVar(object):
|
||||
STOP_EVENT: threading.Event = threading.Event()
|
||||
# webpush订阅
|
||||
SUBSCRIPTIONS: List[dict] = []
|
||||
# 需应急停止的工作流
|
||||
EMERGENCY_STOP_WORKFLOWS: List[str] = []
|
||||
|
||||
def stop_system(self):
|
||||
"""
|
||||
@@ -631,6 +635,26 @@ class GlobalVar(object):
|
||||
"""
|
||||
self.SUBSCRIPTIONS.append(subscription)
|
||||
|
||||
def stop_workflow(self, workflow_id: str):
|
||||
"""
|
||||
停止工作流
|
||||
"""
|
||||
if workflow_id not in self.EMERGENCY_STOP_WORKFLOWS:
|
||||
self.EMERGENCY_STOP_WORKFLOWS.append(workflow_id)
|
||||
|
||||
def workflow_resume(self, workflow_id: str):
|
||||
"""
|
||||
恢复工作流
|
||||
"""
|
||||
if workflow_id in self.EMERGENCY_STOP_WORKFLOWS:
|
||||
self.EMERGENCY_STOP_WORKFLOWS.remove(workflow_id)
|
||||
|
||||
def is_workflow_stopped(self, workflow_id: str):
|
||||
"""
|
||||
是否停止工作流
|
||||
"""
|
||||
return self.is_system_stopped or workflow_id in self.EMERGENCY_STOP_WORKFLOWS
|
||||
|
||||
|
||||
# 实例化配置
|
||||
settings = Settings()
|
||||
|
||||
@@ -121,7 +121,7 @@ class ModuleManager(metaclass=Singleton):
|
||||
获取实现了同一方法的模块列表
|
||||
"""
|
||||
if not self._running_modules:
|
||||
return []
|
||||
return
|
||||
for _, module in self._running_modules.items():
|
||||
if hasattr(module, method) \
|
||||
and ObjectUtils.check_method(getattr(module, method)):
|
||||
@@ -132,7 +132,7 @@ class ModuleManager(metaclass=Singleton):
|
||||
获取指定类型的模块列表
|
||||
"""
|
||||
if not self._running_modules:
|
||||
return []
|
||||
return
|
||||
for _, module in self._running_modules.items():
|
||||
if hasattr(module, 'get_type') \
|
||||
and module.get_type() == module_type:
|
||||
@@ -143,7 +143,7 @@ class ModuleManager(metaclass=Singleton):
|
||||
获取指定子类型的模块
|
||||
"""
|
||||
if not self._running_modules:
|
||||
return []
|
||||
return
|
||||
for _, module in self._running_modules.items():
|
||||
if hasattr(module, 'get_subtype') \
|
||||
and module.get_subtype() == module_subtype:
|
||||
|
||||
@@ -793,10 +793,9 @@ class PluginManager(metaclass=Singleton):
|
||||
# 已安装插件
|
||||
installed_apps = self.systemconfig.get(SystemConfigKey.UserInstalledPlugins) or []
|
||||
# 获取在线插件
|
||||
online_plugins = self.pluginhelper.get_plugins(market, package_version) or {}
|
||||
if not online_plugins:
|
||||
if not package_version:
|
||||
logger.warning(f"获取插件库失败:{market},请检查 GitHub 网络连接")
|
||||
online_plugins = self.pluginhelper.get_plugins(market, package_version)
|
||||
if online_plugins is None:
|
||||
logger.warning(f"获取{package_version if package_version else ''}插件库失败:{market},请检查 GitHub 网络连接")
|
||||
return []
|
||||
ret_plugins = []
|
||||
add_time = len(online_plugins)
|
||||
|
||||
@@ -4,7 +4,8 @@ import hmac
|
||||
import json
|
||||
import os
|
||||
import traceback
|
||||
from datetime import datetime, timedelta
|
||||
import datetime
|
||||
from datetime import timedelta
|
||||
from typing import Any, Union, Annotated, Optional
|
||||
|
||||
import jwt
|
||||
@@ -69,13 +70,13 @@ def create_access_token(
|
||||
if expires_delta is not None:
|
||||
if expires_delta.total_seconds() <= 0:
|
||||
raise ValueError("过期时间必须为正数")
|
||||
expire = datetime.utcnow() + expires_delta
|
||||
expire = datetime.datetime.now(datetime.UTC) + expires_delta
|
||||
else:
|
||||
expire = datetime.utcnow() + default_expire
|
||||
expire = datetime.datetime.now(datetime.UTC) + default_expire
|
||||
|
||||
to_encode = {
|
||||
"exp": expire,
|
||||
"iat": datetime.utcnow(),
|
||||
"iat": datetime.datetime.now(datetime.UTC),
|
||||
"sub": str(userid),
|
||||
"username": username,
|
||||
"super_user": super_user,
|
||||
@@ -102,7 +103,7 @@ def __set_or_refresh_resource_token_cookie(request: Request, response: Response,
|
||||
decoded_token = jwt.decode(resource_token, settings.RESOURCE_SECRET_KEY, algorithms=[ALGORITHM])
|
||||
exp = decoded_token.get("exp")
|
||||
if exp:
|
||||
remaining_time = datetime.utcfromtimestamp(exp) - datetime.utcnow()
|
||||
remaining_time = datetime.datetime.fromtimestamp(exp, tz=datetime.UTC) - datetime.datetime.now(datetime.UTC)
|
||||
# 根据剩余时长提前刷新令牌
|
||||
if remaining_time < timedelta(seconds=(settings.RESOURCE_ACCESS_TOKEN_EXPIRE_SECONDS / 3)):
|
||||
raise jwt.ExpiredSignatureError
|
||||
|
||||
112
app/core/workflow.py
Normal file
112
app/core/workflow.py
Normal file
@@ -0,0 +1,112 @@
|
||||
from time import sleep
|
||||
from typing import Dict, Any, Tuple, List
|
||||
|
||||
from app.core.config import global_vars
|
||||
from app.helper.module import ModuleHelper
|
||||
from app.log import logger
|
||||
from app.schemas import Action, ActionContext
|
||||
from app.utils.singleton import Singleton
|
||||
|
||||
|
||||
class WorkFlowManager(metaclass=Singleton):
|
||||
"""
|
||||
工作流管理器
|
||||
"""
|
||||
|
||||
# 所有动作定义
|
||||
_actions: Dict[str, Any] = {}
|
||||
|
||||
def __init__(self):
|
||||
self.init()
|
||||
|
||||
def init(self):
|
||||
"""
|
||||
初始化
|
||||
"""
|
||||
|
||||
def filter_func(obj: Any):
|
||||
"""
|
||||
过滤函数,确保只加载新定义的类
|
||||
"""
|
||||
if not isinstance(obj, type):
|
||||
return False
|
||||
if not hasattr(obj, 'execute') or not hasattr(obj, "name"):
|
||||
return False
|
||||
if obj.__name__ == "BaseAction":
|
||||
return False
|
||||
return obj.__module__.startswith("app.actions")
|
||||
|
||||
# 加载所有动作
|
||||
self._actions = {}
|
||||
actions = ModuleHelper.load(
|
||||
"app.actions",
|
||||
filter_func=lambda _, obj: filter_func(obj)
|
||||
)
|
||||
for action in actions:
|
||||
logger.debug(f"加载动作: {action.__name__}")
|
||||
try:
|
||||
self._actions[action.__name__] = action
|
||||
except Exception as err:
|
||||
logger.error(f"加载动作失败: {action.__name__} - {err}")
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
停止
|
||||
"""
|
||||
pass
|
||||
|
||||
def excute(self, workflow_id: int, action: Action,
|
||||
context: ActionContext = None) -> Tuple[bool, str, ActionContext]:
|
||||
"""
|
||||
执行工作流动作
|
||||
"""
|
||||
if not context:
|
||||
context = ActionContext()
|
||||
if action.type in self._actions:
|
||||
# 实例化之前,清理掉类对象的数据
|
||||
|
||||
# 实例化
|
||||
action_obj = self._actions[action.type](action.id)
|
||||
# 执行
|
||||
logger.info(f"执行动作: {action.id} - {action.name}")
|
||||
try:
|
||||
result_context = action_obj.execute(workflow_id, action.data, context)
|
||||
except Exception as err:
|
||||
logger.error(f"{action.name} 执行失败: {err}")
|
||||
return False, f"{err}", context
|
||||
loop = action.data.get("loop")
|
||||
loop_interval = action.data.get("loop_interval")
|
||||
if loop and loop_interval:
|
||||
while not action_obj.done:
|
||||
if global_vars.is_workflow_stopped(workflow_id):
|
||||
break
|
||||
# 等待
|
||||
logger.info(f"{action.name} 等待 {loop_interval} 秒后继续执行 ...")
|
||||
sleep(loop_interval)
|
||||
# 执行
|
||||
logger.info(f"继续执行动作: {action.id} - {action.name}")
|
||||
result_context = action_obj.execute(workflow_id, action.data, result_context)
|
||||
if action_obj.success:
|
||||
logger.info(f"{action.name} 执行成功")
|
||||
else:
|
||||
logger.error(f"{action.name} 执行失败!")
|
||||
return action_obj.success, action_obj.message, result_context
|
||||
else:
|
||||
logger.error(f"未找到动作: {action.type} - {action.name}")
|
||||
return False, " ", context
|
||||
|
||||
def list_actions(self) -> List[dict]:
|
||||
"""
|
||||
获取所有动作
|
||||
"""
|
||||
return [
|
||||
{
|
||||
"type": key,
|
||||
"name": action.name,
|
||||
"description": action.description,
|
||||
"data": {
|
||||
"label": action.name,
|
||||
**action.data
|
||||
}
|
||||
} for key, action in self._actions.items()
|
||||
]
|
||||
@@ -8,3 +8,4 @@ from .systemconfig import SystemConfig
|
||||
from .transferhistory import TransferHistory
|
||||
from .user import User
|
||||
from .userconfig import UserConfig
|
||||
from .workflow import Workflow
|
||||
|
||||
102
app/db/models/workflow.py
Normal file
102
app/db/models/workflow.py
Normal file
@@ -0,0 +1,102 @@
|
||||
from datetime import datetime
|
||||
|
||||
from sqlalchemy import Column, Integer, JSON, Sequence, String, and_
|
||||
|
||||
from app.db import Base, db_query, db_update
|
||||
|
||||
|
||||
class Workflow(Base):
|
||||
"""
|
||||
工作流表
|
||||
"""
|
||||
# ID
|
||||
id = Column(Integer, Sequence('id'), primary_key=True, index=True)
|
||||
# 名称
|
||||
name = Column(String, index=True, nullable=False)
|
||||
# 描述
|
||||
description = Column(String)
|
||||
# 定时器
|
||||
timer = Column(String)
|
||||
# 状态:W-等待 R-运行中 P-暂停 S-成功 F-失败
|
||||
state = Column(String, nullable=False, index=True, default='W')
|
||||
# 已执行动作(,分隔)
|
||||
current_action = Column(String)
|
||||
# 任务执行结果
|
||||
result = Column(String)
|
||||
# 已执行次数
|
||||
run_count = Column(Integer, default=0)
|
||||
# 任务列表
|
||||
actions = Column(JSON, default=list)
|
||||
# 任务流
|
||||
flows = Column(JSON, default=list)
|
||||
# 执行上下文
|
||||
context = Column(JSON, default=dict)
|
||||
# 创建时间
|
||||
add_time = Column(String, default=datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
|
||||
# 最后执行时间
|
||||
last_time = Column(String)
|
||||
|
||||
@staticmethod
|
||||
@db_query
|
||||
def get_enabled_workflows(db):
|
||||
return db.query(Workflow).filter(Workflow.state != 'P').all()
|
||||
|
||||
@staticmethod
|
||||
@db_query
|
||||
def get_by_name(db, name: str):
|
||||
return db.query(Workflow).filter(Workflow.name == name).first()
|
||||
|
||||
@staticmethod
|
||||
@db_update
|
||||
def update_state(db, wid: int, state: str):
|
||||
db.query(Workflow).filter(Workflow.id == wid).update({"state": state})
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
@db_update
|
||||
def start(db, wid: int):
|
||||
db.query(Workflow).filter(Workflow.id == wid).update({
|
||||
"state": 'R'
|
||||
})
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
@db_update
|
||||
def fail(db, wid: int, result: str):
|
||||
db.query(Workflow).filter(and_(Workflow.id == wid, Workflow.state != "P")).update({
|
||||
"state": 'F',
|
||||
"result": result,
|
||||
"last_time": datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
||||
})
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
@db_update
|
||||
def success(db, wid: int, result: str = None):
|
||||
db.query(Workflow).filter(and_(Workflow.id == wid, Workflow.state != "P")).update({
|
||||
"state": 'S',
|
||||
"result": result,
|
||||
"run_count": Workflow.run_count + 1,
|
||||
"last_time": datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
||||
})
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
@db_update
|
||||
def reset(db, wid: int):
|
||||
db.query(Workflow).filter(Workflow.id == wid).update({
|
||||
"state": 'W',
|
||||
"result": None,
|
||||
"current_action": None,
|
||||
"run_count": 0,
|
||||
})
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
@db_update
|
||||
def update_current_action(db, wid: int, action_id: str, context: dict):
|
||||
db.query(Workflow).filter(Workflow.id == wid).update({
|
||||
"current_action": Workflow.current_action + f",{action_id}" if Workflow.current_action else action_id,
|
||||
"context": context
|
||||
})
|
||||
return True
|
||||
68
app/db/workflow_oper.py
Normal file
68
app/db/workflow_oper.py
Normal file
@@ -0,0 +1,68 @@
|
||||
from typing import List, Tuple
|
||||
|
||||
from app.db import DbOper
|
||||
from app.db.models.workflow import Workflow
|
||||
|
||||
|
||||
class WorkflowOper(DbOper):
|
||||
"""
|
||||
工作流管理
|
||||
"""
|
||||
|
||||
def add(self, **kwargs) -> Tuple[bool, str]:
|
||||
"""
|
||||
新增工作流
|
||||
"""
|
||||
wf = Workflow(**kwargs)
|
||||
if not wf.get_by_name(self._db, kwargs.get("name")):
|
||||
wf.create(self._db)
|
||||
return True, "新增工作流成功"
|
||||
return False, "工作流已存在"
|
||||
|
||||
def get(self, wid: int) -> Workflow:
|
||||
"""
|
||||
查询单个工作流
|
||||
"""
|
||||
return Workflow.get(self._db, wid)
|
||||
|
||||
def list_enabled(self) -> List[Workflow]:
|
||||
"""
|
||||
获取启用的工作流列表
|
||||
"""
|
||||
return Workflow.get_enabled_workflows(self._db)
|
||||
|
||||
def get_by_name(self, name: str) -> Workflow:
|
||||
"""
|
||||
按名称获取工作流
|
||||
"""
|
||||
return Workflow.get_by_name(self._db, name)
|
||||
|
||||
def start(self, wid: int) -> bool:
|
||||
"""
|
||||
启动
|
||||
"""
|
||||
return Workflow.start(self._db, wid)
|
||||
|
||||
def success(self, wid: int, result: str = None) -> bool:
|
||||
"""
|
||||
成功
|
||||
"""
|
||||
return Workflow.success(self._db, wid, result)
|
||||
|
||||
def fail(self, wid: int, result: str) -> bool:
|
||||
"""
|
||||
失败
|
||||
"""
|
||||
return Workflow.fail(self._db, wid, result)
|
||||
|
||||
def step(self, wid: int, action_id: str, context: dict) -> bool:
|
||||
"""
|
||||
步进
|
||||
"""
|
||||
return Workflow.update_current_action(self._db, wid, action_id, context)
|
||||
|
||||
def reset(self, wid: int) -> bool:
|
||||
"""
|
||||
重置
|
||||
"""
|
||||
return Workflow.reset(self._db, wid)
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Callable, Any
|
||||
from typing import Callable, Any, Optional
|
||||
|
||||
from playwright.sync_api import sync_playwright, Page
|
||||
from cf_clearance import sync_cf_retry, sync_stealth
|
||||
@@ -61,7 +61,7 @@ class PlaywrightHelper:
|
||||
ua: str = None,
|
||||
proxies: dict = None,
|
||||
headless: bool = False,
|
||||
timeout: int = 20) -> str:
|
||||
timeout: int = 20) -> Optional[str]:
|
||||
"""
|
||||
获取网页源码
|
||||
:param url: 网页地址
|
||||
|
||||
@@ -1,9 +1,152 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import queue
|
||||
import threading
|
||||
import time
|
||||
from typing import Optional, Any, Union
|
||||
from datetime import datetime
|
||||
from typing import Any, Union
|
||||
from typing import List, Optional, Callable
|
||||
|
||||
from app.utils.singleton import Singleton
|
||||
from app.core.config import global_vars
|
||||
from app.db.systemconfig_oper import SystemConfigOper
|
||||
from app.schemas.types import SystemConfigKey
|
||||
from app.utils.singleton import Singleton, SingletonClass
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class MessageQueueManager(metaclass=SingletonClass):
|
||||
"""
|
||||
消息发送队列管理器
|
||||
"""
|
||||
|
||||
schedule_periods: List[tuple[int, int, int, int]] = []
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
send_callback: Optional[Callable] = None,
|
||||
check_interval: int = 10
|
||||
) -> None:
|
||||
"""
|
||||
消息队列管理器初始化
|
||||
|
||||
:param send_callback: 实际发送消息的回调函数
|
||||
:param check_interval: 时间检查间隔(秒)
|
||||
"""
|
||||
self.init_config()
|
||||
|
||||
self.queue: queue.Queue[Any] = queue.Queue()
|
||||
self.send_callback = send_callback
|
||||
self.check_interval = check_interval
|
||||
|
||||
self._running = True
|
||||
self.thread = threading.Thread(target=self._monitor_loop, daemon=True)
|
||||
self.thread.start()
|
||||
|
||||
def init_config(self):
|
||||
"""
|
||||
初始化配置
|
||||
"""
|
||||
self.schedule_periods = self._parse_schedule(
|
||||
SystemConfigOper().get(SystemConfigKey.NotificationSendTime)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _parse_schedule(periods: Union[list, dict]) -> List[tuple[int, int, int, int]]:
|
||||
"""
|
||||
将字符串时间格式转换为分钟数元组
|
||||
"""
|
||||
parsed = []
|
||||
if not periods:
|
||||
return parsed
|
||||
if not isinstance(periods, list):
|
||||
periods = [periods]
|
||||
for period in periods:
|
||||
if not period:
|
||||
continue
|
||||
start_h, start_m = map(int, period['start'].split(':'))
|
||||
end_h, end_m = map(int, period['end'].split(':'))
|
||||
parsed.append((start_h, start_m, end_h, end_m))
|
||||
return parsed
|
||||
|
||||
@staticmethod
|
||||
def _time_to_minutes(time_str: str) -> int:
|
||||
"""
|
||||
将 'HH:MM' 格式转换为分钟数
|
||||
"""
|
||||
hours, minutes = map(int, time_str.split(':'))
|
||||
return hours * 60 + minutes
|
||||
|
||||
def _is_in_scheduled_time(self, current_time: datetime) -> bool:
|
||||
"""
|
||||
检查当前时间是否在允许发送的时间段内
|
||||
"""
|
||||
if not self.schedule_periods:
|
||||
return True
|
||||
current_minutes = current_time.hour * 60 + current_time.minute
|
||||
for period in self.schedule_periods:
|
||||
s_h, s_m, e_h, e_m = period
|
||||
start = s_h * 60 + s_m
|
||||
end = e_h * 60 + e_m
|
||||
|
||||
if start <= end:
|
||||
if start <= current_minutes <= end:
|
||||
return True
|
||||
else:
|
||||
if current_minutes >= start or current_minutes <= end:
|
||||
return True
|
||||
return False
|
||||
|
||||
def send_message(self, *args, **kwargs) -> None:
|
||||
"""
|
||||
发送消息(立即发送或加入队列)
|
||||
"""
|
||||
if self._is_in_scheduled_time(datetime.now()):
|
||||
self._send(*args, **kwargs)
|
||||
else:
|
||||
self.queue.put({
|
||||
"args": args,
|
||||
"kwargs": kwargs
|
||||
})
|
||||
logger.info(f"消息已加入队列,当前队列长度:{self.queue.qsize()}")
|
||||
|
||||
def _send(self, *args, **kwargs) -> None:
|
||||
"""
|
||||
实际发送消息(可通过回调函数自定义)
|
||||
"""
|
||||
if self.send_callback:
|
||||
try:
|
||||
logger.info(f"发送消息:{kwargs}")
|
||||
self.send_callback(*args, **kwargs)
|
||||
except Exception as e:
|
||||
logger.error(f"发送消息错误:{str(e)}")
|
||||
|
||||
def _monitor_loop(self) -> None:
|
||||
"""
|
||||
后台线程循环检查时间并处理队列
|
||||
"""
|
||||
while self._running:
|
||||
current_time = datetime.now()
|
||||
if self._is_in_scheduled_time(current_time):
|
||||
while not self.queue.empty():
|
||||
if global_vars.is_system_stopped:
|
||||
break
|
||||
if not self._is_in_scheduled_time(datetime.now()):
|
||||
break
|
||||
try:
|
||||
message = self.queue.get_nowait()
|
||||
self._send(*message['args'], **message['kwargs'])
|
||||
logger.info(f"队列剩余消息:{self.queue.qsize()}")
|
||||
except queue.Empty:
|
||||
break
|
||||
time.sleep(self.check_interval)
|
||||
|
||||
def stop(self) -> None:
|
||||
"""
|
||||
停止队列管理器
|
||||
"""
|
||||
self._running = False
|
||||
self.thread.join()
|
||||
|
||||
|
||||
class MessageHelper(metaclass=Singleton):
|
||||
|
||||
@@ -23,6 +23,7 @@ class ModuleHelper:
|
||||
"""
|
||||
|
||||
submodules: list = []
|
||||
loaded_modules = set()
|
||||
packages = importlib.import_module(package_path)
|
||||
for importer, package_name, _ in pkgutil.iter_modules(packages.__path__):
|
||||
try:
|
||||
@@ -35,6 +36,9 @@ class ModuleHelper:
|
||||
if name.startswith('_'):
|
||||
continue
|
||||
if isinstance(obj, type) and filter_func(name, obj):
|
||||
if name in loaded_modules:
|
||||
continue
|
||||
loaded_modules.add(name)
|
||||
submodules.append(obj)
|
||||
except Exception as err:
|
||||
logger.debug(f'加载模块 {package_name} 失败:{str(err)} - {traceback.format_exc()}')
|
||||
|
||||
@@ -63,6 +63,7 @@ class PluginHelper(metaclass=Singleton):
|
||||
return json.loads(res.text)
|
||||
except json.JSONDecodeError:
|
||||
logger.error(f"插件包数据解析失败:{res.text}")
|
||||
return None
|
||||
return {}
|
||||
|
||||
def get_plugin_package_version(self, pid: str, repo_url: str, package_version: str = None) -> Optional[str]:
|
||||
@@ -447,58 +448,6 @@ class PluginHelper(metaclass=Singleton):
|
||||
if plugin_dir.exists():
|
||||
shutil.rmtree(plugin_dir, ignore_errors=True)
|
||||
|
||||
@staticmethod
|
||||
def __pip_uninstall_and_install_with_fallback(requirements_file: Path) -> Tuple[bool, str]:
|
||||
"""
|
||||
先卸载 requirements.txt 中的依赖,再按照自动降级策略重新安装,不使用 PIP 缓存
|
||||
|
||||
:param requirements_file: 依赖的 requirements.txt 文件路径
|
||||
:return: (是否成功, 错误信息)
|
||||
"""
|
||||
# 读取 requirements.txt 文件中的依赖列表
|
||||
try:
|
||||
with open(requirements_file, "r", encoding="utf-8") as f:
|
||||
dependencies = [line.strip() for line in f if line.strip() and not line.startswith("#")]
|
||||
except Exception as e:
|
||||
return False, f"无法读取 requirements.txt 文件:{str(e)}"
|
||||
|
||||
# 1. 先卸载所有依赖包
|
||||
for dep in dependencies:
|
||||
pip_uninstall_command = ["pip", "uninstall", "-y", dep]
|
||||
logger.debug(f"尝试卸载依赖:{dep},命令:{' '.join(pip_uninstall_command)}")
|
||||
success, message = SystemUtils.execute_with_subprocess(pip_uninstall_command)
|
||||
if success:
|
||||
logger.debug(f"依赖 {dep} 卸载成功,输出:{message}")
|
||||
else:
|
||||
error_message = f"卸载依赖 {dep} 失败,错误信息:{message}"
|
||||
logger.error(error_message)
|
||||
|
||||
# 2. 重新安装所有依赖,使用自动降级策略
|
||||
strategies = []
|
||||
|
||||
# 添加策略到列表中
|
||||
if settings.PIP_PROXY:
|
||||
strategies.append(("镜像站",
|
||||
["pip", "install", "-r", str(requirements_file),
|
||||
"-i", settings.PIP_PROXY, "--no-cache-dir"]))
|
||||
if settings.PROXY_HOST:
|
||||
strategies.append(("代理",
|
||||
["pip", "install", "-r", str(requirements_file),
|
||||
"--proxy", settings.PROXY_HOST, "--no-cache-dir"]))
|
||||
strategies.append(("直连", ["pip", "install", "-r", str(requirements_file), "--no-cache-dir"]))
|
||||
|
||||
# 遍历策略进行安装
|
||||
for strategy_name, pip_command in strategies:
|
||||
logger.debug(f"[PIP] 尝试使用策略:{strategy_name} 安装依赖,命令:{' '.join(pip_command)}")
|
||||
success, message = SystemUtils.execute_with_subprocess(pip_command)
|
||||
if success:
|
||||
logger.debug(f"[PIP] 策略:{strategy_name} 安装依赖成功,输出:{message}")
|
||||
return True, message
|
||||
else:
|
||||
logger.error(f"[PIP] 策略:{strategy_name} 安装依赖失败,错误信息:{message}")
|
||||
|
||||
return False, "[PIP] 所有策略均安装依赖失败,请检查网络连接或 PIP 配置"
|
||||
|
||||
@staticmethod
|
||||
def __pip_install_with_fallback(requirements_file: Path) -> Tuple[bool, str]:
|
||||
"""
|
||||
|
||||
@@ -225,27 +225,27 @@ class RssHelper:
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def parse(url, proxy: bool = False, timeout: int = 15, headers: dict = None) -> Union[List[dict], None]:
|
||||
def parse(url, proxy: bool = False, timeout: int = 15, headers: dict = None) -> Union[List[dict], None, bool]:
|
||||
"""
|
||||
解析RSS订阅URL,获取RSS中的种子信息
|
||||
:param url: RSS地址
|
||||
:param proxy: 是否使用代理
|
||||
:param timeout: 请求超时
|
||||
:param headers: 自定义请求头
|
||||
:return: 种子信息列表,如为None代表Rss过期
|
||||
:return: 种子信息列表,如为None代表Rss过期,如果为False则为错误
|
||||
"""
|
||||
# 开始处理
|
||||
ret_array: list = []
|
||||
if not url:
|
||||
return []
|
||||
return False
|
||||
try:
|
||||
ret = RequestUtils(proxies=settings.PROXY if proxy else None,
|
||||
timeout=timeout, headers=headers).get_res(url)
|
||||
if not ret:
|
||||
return []
|
||||
return False
|
||||
except Exception as err:
|
||||
logger.error(f"获取RSS失败:{str(err)} - {traceback.format_exc()}")
|
||||
return []
|
||||
return False
|
||||
if ret:
|
||||
ret_xml = ""
|
||||
try:
|
||||
@@ -322,6 +322,7 @@ class RssHelper:
|
||||
]
|
||||
if ret_xml in _rss_expired_msg:
|
||||
return None
|
||||
return False
|
||||
return ret_array
|
||||
|
||||
def get_rss_link(self, url: str, cookie: str, ua: str, proxy: bool = False) -> Tuple[str, str]:
|
||||
|
||||
@@ -33,7 +33,7 @@ class RuleHelper:
|
||||
return group
|
||||
return None
|
||||
|
||||
def get_rule_group_by_media(self, media: MediaInfo, group_names: list = None) -> List[FilterRuleGroup]:
|
||||
def get_rule_group_by_media(self, media: MediaInfo = None, group_names: list = None) -> List[FilterRuleGroup]:
|
||||
"""
|
||||
根据媒体信息获取规则组
|
||||
"""
|
||||
@@ -44,9 +44,9 @@ class RuleHelper:
|
||||
for group in rule_groups:
|
||||
if not group.media_type:
|
||||
ret_groups.append(group)
|
||||
elif not group.category and group.media_type == media.type.value:
|
||||
elif media and not group.category and group.media_type == media.type.value:
|
||||
ret_groups.append(group)
|
||||
elif group.category == media.category:
|
||||
elif media and group.category == media.category:
|
||||
ret_groups.append(group)
|
||||
return ret_groups
|
||||
|
||||
|
||||
@@ -445,6 +445,27 @@ class TorrentHelper(metaclass=Singleton):
|
||||
logger.info(f"{torrent_info.title} 不匹配特效规则 {effect}")
|
||||
return False
|
||||
|
||||
# 大小
|
||||
size_range = filter_params.get("size")
|
||||
if size_range:
|
||||
if size_range.find("-") != -1:
|
||||
# 区间
|
||||
size_min, size_max = size_range.split("-")
|
||||
size_min = float(size_min.strip()) * 1024 * 1024
|
||||
size_max = float(size_max.strip()) * 1024 * 1024
|
||||
if torrent_info.size < size_min or torrent_info.size > size_max:
|
||||
return False
|
||||
elif size_range.startswith(">"):
|
||||
# 大于
|
||||
size_min = float(size_range[1:].strip()) * 1024 * 1024
|
||||
if torrent_info.size < size_min:
|
||||
return False
|
||||
elif size_range.startswith("<"):
|
||||
# 小于
|
||||
size_max = float(size_range[1:].strip()) * 1024 * 1024
|
||||
if torrent_info.size > size_max:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
|
||||
12
app/log.py
12
app/log.py
@@ -246,12 +246,12 @@ class LoggerManager:
|
||||
else:
|
||||
# 使用默认日志文件
|
||||
logfile = self._default_log_file
|
||||
|
||||
# 获取调用者的模块的logger
|
||||
_logger = self._loggers.get(logfile)
|
||||
if not _logger:
|
||||
_logger = self.__setup_logger(log_file=logfile)
|
||||
self._loggers[logfile] = _logger
|
||||
with LoggerManager._lock: # 添加锁
|
||||
# 获取调用者的模块的logger
|
||||
_logger = self._loggers.get(logfile)
|
||||
if not _logger:
|
||||
_logger = self.__setup_logger(log_file=logfile)
|
||||
self._loggers[logfile] = _logger
|
||||
# 调用logger的方法打印日志
|
||||
if hasattr(_logger, method):
|
||||
log_method = getattr(_logger, method)
|
||||
|
||||
@@ -3,7 +3,7 @@ import re
|
||||
import traceback
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Union, Dict, Generator, Tuple
|
||||
from typing import List, Optional, Union, Dict, Generator, Tuple, Any
|
||||
|
||||
from requests import Response
|
||||
|
||||
@@ -13,6 +13,7 @@ from app.log import logger
|
||||
from app.schemas.types import MediaType
|
||||
from app.utils.http import RequestUtils
|
||||
from app.utils.url import UrlUtils
|
||||
from schemas import MediaServerItem
|
||||
|
||||
|
||||
class Emby:
|
||||
@@ -545,7 +546,7 @@ class Emby:
|
||||
return False
|
||||
return False
|
||||
|
||||
def refresh_library_by_items(self, items: List[schemas.RefreshMediaItem]) -> bool:
|
||||
def refresh_library_by_items(self, items: List[schemas.RefreshMediaItem]) -> Optional[bool]:
|
||||
"""
|
||||
按类型、名称、年份来刷新媒体库
|
||||
:param items: 已识别的需要刷新媒体库的媒体信息列表
|
||||
@@ -668,8 +669,8 @@ class Emby:
|
||||
logger.error(f"连接/Users/{self.user}/Items/{itemid}出错:" + str(e))
|
||||
return None
|
||||
|
||||
def get_items(self, parent: Union[str, int], start_index: int = 0, limit: Optional[int] = -1) \
|
||||
-> Optional[Generator]:
|
||||
def get_items(self, parent: Union[str, int], start_index: int = 0,
|
||||
limit: Optional[int] = -1) -> Generator[MediaServerItem | None | Any, Any, None]:
|
||||
"""
|
||||
获取媒体服务器项目列表,支持分页和不分页逻辑,默认不分页获取所有数据
|
||||
|
||||
|
||||
@@ -676,11 +676,15 @@ class FileManagerModule(_ModuleBase):
|
||||
".zh-tw": ".繁体中文"
|
||||
}
|
||||
new_sub_tag_list = [
|
||||
new_file_type if t == 0 else "%s%s(%s)" % (new_file_type,
|
||||
new_sub_tag_dict.get(
|
||||
new_file_type, ""
|
||||
),
|
||||
t) for t in range(6)
|
||||
(".default" + new_file_type if (
|
||||
(settings.DEFAULT_SUB == "zh-cn" and new_file_type == ".chi.zh-cn") or
|
||||
(settings.DEFAULT_SUB == "zh-tw" and new_file_type == ".zh-tw") or
|
||||
(settings.DEFAULT_SUB == "eng" and new_file_type == ".eng")
|
||||
) else new_file_type) if t == 0 else "%s%s(%s)" % (new_file_type,
|
||||
new_sub_tag_dict.get(
|
||||
new_file_type, ""
|
||||
),
|
||||
t) for t in range(6)
|
||||
]
|
||||
for new_sub_tag in new_sub_tag_list:
|
||||
new_file: Path = target_file.with_name(target_file.stem + new_sub_tag + file_ext)
|
||||
|
||||
@@ -67,14 +67,14 @@ class Alist(StorageBase, metaclass=Singleton):
|
||||
return self.__generate_token
|
||||
|
||||
@property
|
||||
@cached(maxsize=1, ttl=60 * 60 * 24 * 2 - 60 * 5)
|
||||
@cached(maxsize=1, ttl=60 * 60 * 24 * 2 - 60 * 5, skip_empty=True)
|
||||
def __generate_token(self) -> str:
|
||||
"""
|
||||
如果设置永久令牌则返回永久令牌,否则使用账号密码生成一个临时 token
|
||||
缓存2天,提前5分钟更新
|
||||
"""
|
||||
conf = self.get_conf()
|
||||
token = conf.get("token")
|
||||
token = conf.get("token")
|
||||
if token:
|
||||
return str(token)
|
||||
resp: Response = RequestUtils(headers={
|
||||
@@ -201,12 +201,12 @@ class Alist(StorageBase, metaclass=Singleton):
|
||||
|
||||
if resp is None:
|
||||
logging.warning(f"请求获取目录 {fileitem.path} 的文件列表失败,无法连接alist服务")
|
||||
return
|
||||
return None
|
||||
if resp.status_code != 200:
|
||||
logging.warning(
|
||||
f"请求获取目录 {fileitem.path} 的文件列表失败,状态码:{resp.status_code}"
|
||||
)
|
||||
return
|
||||
return None
|
||||
|
||||
result = resp.json()
|
||||
|
||||
@@ -214,7 +214,7 @@ class Alist(StorageBase, metaclass=Singleton):
|
||||
logging.warning(
|
||||
f'获取目录 {fileitem.path} 的文件列表失败,错误信息:{result["message"]}'
|
||||
)
|
||||
return
|
||||
return None
|
||||
|
||||
return [
|
||||
schemas.FileItem(
|
||||
@@ -259,15 +259,15 @@ class Alist(StorageBase, metaclass=Singleton):
|
||||
"""
|
||||
if resp is None:
|
||||
logging.warning(f"请求创建目录 {path} 失败,无法连接alist服务")
|
||||
return
|
||||
return None
|
||||
if resp.status_code != 200:
|
||||
logging.warning(f"请求创建目录 {path} 失败,状态码:{resp.status_code}")
|
||||
return
|
||||
return None
|
||||
|
||||
result = resp.json()
|
||||
if result["code"] != 200:
|
||||
logging.warning(f'创建目录 {path} 失败,错误信息:{result["message"]}')
|
||||
return
|
||||
return None
|
||||
|
||||
return self.get_item(path)
|
||||
|
||||
@@ -349,15 +349,15 @@ class Alist(StorageBase, metaclass=Singleton):
|
||||
"""
|
||||
if resp is None:
|
||||
logging.warning(f"请求获取文件 {path} 失败,无法连接alist服务")
|
||||
return
|
||||
return None
|
||||
if resp.status_code != 200:
|
||||
logging.warning(f"请求获取文件 {path} 失败,状态码:{resp.status_code}")
|
||||
return
|
||||
return None
|
||||
|
||||
result = resp.json()
|
||||
if result["code"] != 200:
|
||||
logging.debug(f'获取文件 {path} 失败,错误信息:{result["message"]}')
|
||||
return
|
||||
return None
|
||||
|
||||
return schemas.FileItem(
|
||||
storage=self.schema.value,
|
||||
@@ -513,15 +513,15 @@ class Alist(StorageBase, metaclass=Singleton):
|
||||
"""
|
||||
if not resp:
|
||||
logging.warning(f"请求获取文件 {path} 失败,无法连接alist服务")
|
||||
return
|
||||
return None
|
||||
if resp.status_code != 200:
|
||||
logging.warning(f"请求获取文件 {path} 失败,状态码:{resp.status_code}")
|
||||
return
|
||||
return None
|
||||
|
||||
result = resp.json()
|
||||
if result["code"] != 200:
|
||||
logging.warning(f'获取文件 {path} 失败,错误信息:{result["message"]}')
|
||||
return
|
||||
return None
|
||||
|
||||
if result["data"]["raw_url"]:
|
||||
download_url = result["data"]["raw_url"]
|
||||
@@ -569,7 +569,7 @@ class Alist(StorageBase, metaclass=Singleton):
|
||||
|
||||
if resp.status_code != 200:
|
||||
logging.warning(f"请求上传文件 {path} 失败,状态码:{resp.status_code}")
|
||||
return
|
||||
return None
|
||||
|
||||
new_item = self.get_item(Path(fileitem.path) / path.name)
|
||||
if new_item and new_name and new_name != path.name:
|
||||
|
||||
@@ -52,7 +52,7 @@ class FilterModule(_ModuleBase):
|
||||
},
|
||||
# 官种
|
||||
"GZ": {
|
||||
"include": [r'官方', r'官种'],
|
||||
"include": [r'官方', r'官种', r'官组'],
|
||||
"match": ["labels"]
|
||||
},
|
||||
# 特效字幕
|
||||
@@ -259,7 +259,7 @@ class FilterModule(_ModuleBase):
|
||||
|
||||
return None if not matched else torrent
|
||||
|
||||
def __match_group(self, torrent: TorrentInfo, rule_group: Union[list, str]) -> bool:
|
||||
def __match_group(self, torrent: TorrentInfo, rule_group: Union[list, str]) -> Optional[bool]:
|
||||
"""
|
||||
判断种子是否匹配规则组
|
||||
"""
|
||||
|
||||
@@ -1,18 +1,16 @@
|
||||
from datetime import datetime
|
||||
from typing import List, Optional, Tuple, Union
|
||||
|
||||
from ruamel.yaml import CommentedMap
|
||||
|
||||
from app.core.config import settings
|
||||
from app.core.context import TorrentInfo
|
||||
from app.db.site_oper import SiteOper
|
||||
from app.helper.module import ModuleHelper
|
||||
from app.helper.sites import SitesHelper
|
||||
from app.helper.sites import SitesHelper, SiteSpider
|
||||
from app.log import logger
|
||||
from app.modules import _ModuleBase
|
||||
from app.modules.indexer.parser import SiteParserBase
|
||||
from app.modules.indexer.spider import TorrentSpider
|
||||
from app.modules.indexer.spider.haidan import HaiDanSpider
|
||||
from app.modules.indexer.spider.hddolby import HddolbySpider
|
||||
from app.modules.indexer.spider.mtorrent import MTorrentSpider
|
||||
from app.modules.indexer.spider.tnode import TNodeSpider
|
||||
from app.modules.indexer.spider.torrentleech import TorrentLeech
|
||||
@@ -76,15 +74,17 @@ class IndexerModule(_ModuleBase):
|
||||
def init_setting(self) -> Tuple[str, Union[str, bool]]:
|
||||
pass
|
||||
|
||||
def search_torrents(self, site: CommentedMap,
|
||||
def search_torrents(self, site: dict,
|
||||
keywords: List[str] = None,
|
||||
mtype: MediaType = None,
|
||||
cat: str = None,
|
||||
page: int = 0) -> List[TorrentInfo]:
|
||||
"""
|
||||
搜索一个站点
|
||||
:param site: 站点
|
||||
:param keywords: 搜索关键词列表
|
||||
:param mtype: 媒体类型
|
||||
:param cat: 分类
|
||||
:param page: 页码
|
||||
:return: 资源列表
|
||||
"""
|
||||
@@ -122,6 +122,12 @@ class IndexerModule(_ModuleBase):
|
||||
logger.warn(f"{site.get('name')} 不支持中文搜索")
|
||||
continue
|
||||
|
||||
# 站点流控
|
||||
state, msg = SitesHelper().check(StringUtils.get_url_domain(site.get("domain")))
|
||||
if state:
|
||||
logger.warn(msg)
|
||||
continue
|
||||
|
||||
# 去除搜索关键字中的特殊字符
|
||||
if search_word:
|
||||
search_word = StringUtils.clear(search_word, replace_word=" ", allow_space=True)
|
||||
@@ -154,11 +160,18 @@ class IndexerModule(_ModuleBase):
|
||||
keyword=search_word,
|
||||
mtype=mtype
|
||||
)
|
||||
elif site.get('parser') == "HDDolby":
|
||||
error_flag, result = HddolbySpider(site).search(
|
||||
keyword=search_word,
|
||||
mtype=mtype,
|
||||
page=page
|
||||
)
|
||||
else:
|
||||
error_flag, result = self.__spider_search(
|
||||
search_word=search_word,
|
||||
indexer=site,
|
||||
mtype=mtype,
|
||||
cat=cat,
|
||||
page=page
|
||||
)
|
||||
if error_flag:
|
||||
@@ -204,35 +217,42 @@ class IndexerModule(_ModuleBase):
|
||||
return __remove_duplicate(torrents)
|
||||
|
||||
@staticmethod
|
||||
def __spider_search(indexer: CommentedMap,
|
||||
def __spider_search(indexer: dict,
|
||||
search_word: str = None,
|
||||
mtype: MediaType = None,
|
||||
cat: str = None,
|
||||
page: int = 0) -> Tuple[bool, List[dict]]:
|
||||
"""
|
||||
根据关键字搜索单个站点
|
||||
:param: indexer: 站点配置
|
||||
:param: search_word: 关键字
|
||||
:param: cat: 分类
|
||||
:param: page: 页码
|
||||
:param: mtype: 媒体类型
|
||||
:param: timeout: 超时时间
|
||||
:return: 是否发生错误, 种子列表
|
||||
"""
|
||||
_spider = TorrentSpider(indexer=indexer,
|
||||
mtype=mtype,
|
||||
keyword=search_word,
|
||||
page=page)
|
||||
_spider = SiteSpider(indexer=indexer,
|
||||
keyword=search_word,
|
||||
mtype=mtype,
|
||||
cat=cat,
|
||||
page=page)
|
||||
|
||||
return _spider.is_error, _spider.get_torrents()
|
||||
|
||||
def refresh_torrents(self, site: CommentedMap) -> Optional[List[TorrentInfo]]:
|
||||
def refresh_torrents(self, site: dict,
|
||||
keyword: str = None, cat: str = None, page: int = 0) -> Optional[List[TorrentInfo]]:
|
||||
"""
|
||||
获取站点最新一页的种子,多个站点需要多线程处理
|
||||
:param site: 站点
|
||||
:param keyword: 关键字
|
||||
:param cat: 分类
|
||||
:param page: 页码
|
||||
:reutrn: 种子资源列表
|
||||
"""
|
||||
return self.search_torrents(site=site)
|
||||
return self.search_torrents(site=site, keywords=[keyword], cat=cat, page=page)
|
||||
|
||||
def refresh_userdata(self, site: CommentedMap) -> Optional[SiteUserData]:
|
||||
def refresh_userdata(self, site: dict) -> Optional[SiteUserData]:
|
||||
"""
|
||||
刷新站点的用户数据
|
||||
:param site: 站点
|
||||
|
||||
@@ -32,6 +32,7 @@ class SiteSchema(Enum):
|
||||
TNode = "TNode"
|
||||
MTorrent = "MTorrent"
|
||||
Yema = "Yema"
|
||||
HDDolby = "HDDolby"
|
||||
|
||||
|
||||
class SiteParserBase(metaclass=ABCMeta):
|
||||
@@ -155,11 +156,17 @@ class SiteParserBase(metaclass=ABCMeta):
|
||||
解析站点信息
|
||||
:return:
|
||||
"""
|
||||
# 获取站点首页html
|
||||
self._index_html = self._get_page_content(url=self._site_url)
|
||||
# 检查是否已经登录
|
||||
if not self._parse_logged_in(self._index_html):
|
||||
return
|
||||
# Cookie模式时,获取站点首页html
|
||||
if self.request_mode == "apikey":
|
||||
if not self.apikey and not self.token:
|
||||
logger.warn(f"{self._site_name} 未设置cookie 或 apikey/token,跳过后续操作")
|
||||
return
|
||||
self._index_html = {}
|
||||
else:
|
||||
# 检查是否已经登录
|
||||
self._index_html = self._get_page_content(url=self._site_url)
|
||||
if not self._parse_logged_in(self._index_html):
|
||||
return
|
||||
# 解析站点页面
|
||||
self._parse_site_page(self._index_html)
|
||||
# 解析用户基础信息
|
||||
@@ -293,9 +300,13 @@ class SiteParserBase(metaclass=ABCMeta):
|
||||
req_headers = None
|
||||
proxies = settings.PROXY if self._proxy else None
|
||||
if self._ua or headers or self._addition_headers:
|
||||
req_headers = {
|
||||
"User-Agent": f"{self._ua}"
|
||||
}
|
||||
|
||||
if self.request_mode == "apikey":
|
||||
req_headers = {}
|
||||
else:
|
||||
req_headers = {
|
||||
"User-Agent": f"{self._ua}"
|
||||
}
|
||||
|
||||
if headers:
|
||||
req_headers.update(headers)
|
||||
|
||||
157
app/modules/indexer/parser/hddolby.py
Normal file
157
app/modules/indexer/parser/hddolby.py
Normal file
@@ -0,0 +1,157 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import json
|
||||
from typing import Optional, Tuple
|
||||
|
||||
from app.modules.indexer.parser import SiteParserBase, SiteSchema
|
||||
from app.utils.string import StringUtils
|
||||
|
||||
|
||||
class HDDolbySiteUserInfo(SiteParserBase):
|
||||
schema = SiteSchema.HDDolby
|
||||
request_mode = "apikey"
|
||||
|
||||
# 用户级别字典
|
||||
HDDolby_sysRoleList = {
|
||||
"0": "Peasant",
|
||||
"1": "User",
|
||||
"2": "Power User",
|
||||
"3": "Elite User",
|
||||
"4": "Crazy User",
|
||||
"5": "Insane User",
|
||||
"6": "Veteran User",
|
||||
"7": "Extreme User",
|
||||
"8": "Ultimate User",
|
||||
"9": "Nexus Master",
|
||||
"10": "VIP",
|
||||
"11": "Retiree",
|
||||
"12": "Helper",
|
||||
"13": "Seeder",
|
||||
"14": "Transferrer",
|
||||
"15": "Uploader",
|
||||
"16": "Torrent Manager",
|
||||
"17": "Forum Moderator",
|
||||
"18": "Coder",
|
||||
"19": "Moderator",
|
||||
"20": "Administrator",
|
||||
"21": "Sysop",
|
||||
"22": "Staff Leader",
|
||||
}
|
||||
|
||||
def _parse_site_page(self, html_text: str):
|
||||
"""
|
||||
获取站点页面地址
|
||||
"""
|
||||
# 更换api地址
|
||||
self._base_url = f"https://api.{StringUtils.get_url_domain(self._base_url)}"
|
||||
self._user_traffic_page = None
|
||||
self._user_detail_page = None
|
||||
self._user_basic_page = "api/v1/user/data"
|
||||
self._user_basic_params = {}
|
||||
self._user_basic_headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json, text/plain, */*"
|
||||
}
|
||||
self._sys_mail_unread_page = None
|
||||
self._user_mail_unread_page = None
|
||||
self._mail_unread_params = {}
|
||||
self._torrent_seeding_page = "api/v1/user/peers"
|
||||
self._torrent_seeding_params = {}
|
||||
self._torrent_seeding_headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json, text/plain, */*"
|
||||
}
|
||||
self._addition_headers = {
|
||||
"x-api-key": self.apikey,
|
||||
}
|
||||
|
||||
def _parse_logged_in(self, html_text):
|
||||
"""
|
||||
判断是否登录成功, 通过判断是否存在用户信息
|
||||
暂时跳过检测,待后续优化
|
||||
:param html_text:
|
||||
:return:
|
||||
"""
|
||||
return True
|
||||
|
||||
def _parse_user_base_info(self, html_text: str):
|
||||
"""
|
||||
解析用户基本信息,这里把_parse_user_traffic_info和_parse_user_detail_info合并到这里
|
||||
"""
|
||||
if not html_text:
|
||||
return None
|
||||
detail = json.loads(html_text)
|
||||
if not detail or detail.get("status") != 0:
|
||||
return
|
||||
user_infos = detail.get("data")
|
||||
"""
|
||||
{
|
||||
"id": "1",
|
||||
"added": "2019-03-03 15:30:36",
|
||||
"last_access": "2025-02-18 19:48:04",
|
||||
"class": "22",
|
||||
"uploaded": "852071699418375",
|
||||
"downloaded": "1885536536176",
|
||||
"seedbonus": "99774808.0",
|
||||
"sebonus": "3739023.7",
|
||||
"unread_messages": "0",
|
||||
}
|
||||
"""
|
||||
if not user_infos:
|
||||
return
|
||||
user_info = user_infos[0]
|
||||
self.userid = user_info.get("id")
|
||||
self.username = user_info.get("username")
|
||||
self.user_level = self.HDDolby_sysRoleList.get(user_info.get("class") or "1")
|
||||
self.join_at = user_info.get("added")
|
||||
self.upload = int(user_info.get("uploaded") or '0')
|
||||
self.download = int(user_info.get("downloaded") or '0')
|
||||
self.ratio = round(self.upload / self.download, 2) if self.download else 0
|
||||
self.bonus = float(user_info.get("seedbonus") or "0")
|
||||
self.message_unread = int(user_info.get("unread_messages") or '0')
|
||||
|
||||
def _parse_user_traffic_info(self, html_text: str):
|
||||
"""
|
||||
解析用户流量信息
|
||||
"""
|
||||
pass
|
||||
|
||||
def _parse_user_detail_info(self, html_text: str):
|
||||
"""
|
||||
解析用户详细信息
|
||||
"""
|
||||
pass
|
||||
|
||||
def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]:
|
||||
"""
|
||||
解析用户做种信息
|
||||
"""
|
||||
if not html_text:
|
||||
return None
|
||||
seeding_info = json.loads(html_text)
|
||||
if not seeding_info or seeding_info.get("status") != 0:
|
||||
return None
|
||||
torrents = seeding_info.get("data", [])
|
||||
page_seeding_size = 0
|
||||
page_seeding_info = []
|
||||
for info in torrents:
|
||||
size = info.get("size")
|
||||
seeder = info.get("seeders") or 1
|
||||
page_seeding_size += size
|
||||
page_seeding_info.append([seeder, size])
|
||||
self.seeding += len(torrents)
|
||||
self.seeding_size += page_seeding_size
|
||||
self.seeding_info.extend(page_seeding_info)
|
||||
|
||||
return None
|
||||
|
||||
def _parse_message_unread_links(self, html_text: str, msg_links: list) -> Optional[str]:
|
||||
"""
|
||||
解析未读消息链接,这里直接读出详情
|
||||
"""
|
||||
pass
|
||||
|
||||
def _parse_message_content(self, html_text) -> Tuple[Optional[str], Optional[str], Optional[str]]:
|
||||
"""
|
||||
解析消息内容
|
||||
"""
|
||||
pass
|
||||
@@ -54,7 +54,7 @@ class IptSiteUserInfo(SiteParserBase):
|
||||
def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]:
|
||||
html = etree.HTML(html_text)
|
||||
if not StringUtils.is_valid_html_element(html):
|
||||
return
|
||||
return None
|
||||
# seeding start
|
||||
seeding_end_pos = 3
|
||||
if html.xpath('//tr/td[text() = "Leechers"]'):
|
||||
|
||||
@@ -207,13 +207,14 @@ class NexusPhpSiteUserInfo(SiteParserBase):
|
||||
|
||||
# 是否存在下页数据
|
||||
next_page = None
|
||||
next_page_text = html.xpath('//a[contains(.//text(), "下一页") or contains(.//text(), "下一頁") or contains(.//text(), ">")]/@href')
|
||||
|
||||
#防止识别到详情页
|
||||
next_page_text = html.xpath(
|
||||
'//a[contains(.//text(), "下一页") or contains(.//text(), "下一頁") or contains(.//text(), ">")]/@href')
|
||||
|
||||
# 防止识别到详情页
|
||||
while next_page_text:
|
||||
next_page = next_page_text.pop().strip()
|
||||
if not next_page.startswith('details.php'):
|
||||
break;
|
||||
break
|
||||
next_page = None
|
||||
|
||||
# fix up page url
|
||||
|
||||
@@ -65,7 +65,7 @@ class TNodeSiteUserInfo(SiteParserBase):
|
||||
"""
|
||||
seeding_info = json.loads(html_text)
|
||||
if seeding_info.get("status") != 200:
|
||||
return
|
||||
return None
|
||||
|
||||
torrents = seeding_info.get("data", {}).get("torrents", [])
|
||||
|
||||
|
||||
@@ -1,742 +0,0 @@
|
||||
import copy
|
||||
import datetime
|
||||
import re
|
||||
import traceback
|
||||
from typing import List
|
||||
from urllib.parse import quote, urlencode, urlparse, parse_qs
|
||||
|
||||
from jinja2 import Template
|
||||
from pyquery import PyQuery
|
||||
from ruamel.yaml import CommentedMap
|
||||
|
||||
from app.core.config import settings
|
||||
from app.helper.browser import PlaywrightHelper
|
||||
from app.log import logger
|
||||
from app.schemas.types import MediaType
|
||||
from app.utils.http import RequestUtils
|
||||
from app.utils.string import StringUtils
|
||||
|
||||
|
||||
class TorrentSpider:
|
||||
# 是否出现错误
|
||||
is_error: bool = False
|
||||
# 索引器ID
|
||||
indexerid: int = None
|
||||
# 索引器名称
|
||||
indexername: str = None
|
||||
# 站点域名
|
||||
domain: str = None
|
||||
# 站点Cookie
|
||||
cookie: str = None
|
||||
# 站点UA
|
||||
ua: str = None
|
||||
# Requests 代理
|
||||
proxies: dict = None
|
||||
# playwright 代理
|
||||
proxy_server: dict = None
|
||||
# 是否渲染
|
||||
render: bool = False
|
||||
# Referer
|
||||
referer: str = None
|
||||
# 搜索关键字
|
||||
keyword: str = None
|
||||
# 媒体类型
|
||||
mtype: MediaType = None
|
||||
# 搜索路径、方式配置
|
||||
search: dict = {}
|
||||
# 批量搜索配置
|
||||
batch: dict = {}
|
||||
# 浏览配置
|
||||
browse: dict = {}
|
||||
# 站点分类配置
|
||||
category: dict = {}
|
||||
# 站点种子列表配置
|
||||
list: dict = {}
|
||||
# 站点种子字段配置
|
||||
fields: dict = {}
|
||||
# 页码
|
||||
page: int = 0
|
||||
# 搜索条数, 默认: 100条
|
||||
result_num: int = 100
|
||||
# 单个种子信息
|
||||
torrents_info: dict = {}
|
||||
# 种子列表
|
||||
torrents_info_array: list = []
|
||||
# 搜索超时, 默认: 15秒
|
||||
_timeout = 15
|
||||
|
||||
def __init__(self,
|
||||
indexer: CommentedMap,
|
||||
keyword: [str, list] = None,
|
||||
page: int = 0,
|
||||
referer: str = None,
|
||||
mtype: MediaType = None):
|
||||
"""
|
||||
设置查询参数
|
||||
:param indexer: 索引器
|
||||
:param keyword: 搜索关键字,如果数组则为批量搜索
|
||||
:param page: 页码
|
||||
:param referer: Referer
|
||||
:param mtype: 媒体类型
|
||||
"""
|
||||
if not indexer:
|
||||
return
|
||||
self.keyword = keyword
|
||||
self.mtype = mtype
|
||||
self.indexerid = indexer.get('id')
|
||||
self.indexername = indexer.get('name')
|
||||
self.search = indexer.get('search')
|
||||
self.batch = indexer.get('batch')
|
||||
self.browse = indexer.get('browse')
|
||||
self.category = indexer.get('category')
|
||||
self.list = indexer.get('torrents').get('list', {})
|
||||
self.fields = indexer.get('torrents').get('fields')
|
||||
self.render = indexer.get('render')
|
||||
self.domain = indexer.get('domain')
|
||||
self.result_num = int(indexer.get('result_num') or 100)
|
||||
self._timeout = int(indexer.get('timeout') or 15)
|
||||
self.page = page
|
||||
if self.domain and not str(self.domain).endswith("/"):
|
||||
self.domain = self.domain + "/"
|
||||
if indexer.get('ua'):
|
||||
self.ua = indexer.get('ua') or settings.USER_AGENT
|
||||
else:
|
||||
self.ua = settings.USER_AGENT
|
||||
if indexer.get('proxy'):
|
||||
self.proxies = settings.PROXY
|
||||
self.proxy_server = settings.PROXY_SERVER
|
||||
if indexer.get('cookie'):
|
||||
self.cookie = indexer.get('cookie')
|
||||
if referer:
|
||||
self.referer = referer
|
||||
self.torrents_info_array = []
|
||||
|
||||
def get_torrents(self) -> List[dict]:
|
||||
"""
|
||||
开始请求
|
||||
"""
|
||||
if not self.search or not self.domain:
|
||||
return []
|
||||
|
||||
# 种子搜索相对路径
|
||||
paths = self.search.get('paths', [])
|
||||
torrentspath = ""
|
||||
if len(paths) == 1:
|
||||
torrentspath = paths[0].get('path', '')
|
||||
else:
|
||||
for path in paths:
|
||||
if path.get("type") == "all" and not self.mtype:
|
||||
torrentspath = path.get('path')
|
||||
break
|
||||
elif path.get("type") == "movie" and self.mtype == MediaType.MOVIE:
|
||||
torrentspath = path.get('path')
|
||||
break
|
||||
elif path.get("type") == "tv" and self.mtype == MediaType.TV:
|
||||
torrentspath = path.get('path')
|
||||
break
|
||||
|
||||
# 精确搜索
|
||||
if self.keyword:
|
||||
|
||||
if isinstance(self.keyword, list):
|
||||
# 批量查询
|
||||
if self.batch:
|
||||
delimiter = self.batch.get('delimiter') or ' '
|
||||
space_replace = self.batch.get('space_replace') or ' '
|
||||
search_word = delimiter.join([str(k).replace(' ',
|
||||
space_replace) for k in self.keyword])
|
||||
else:
|
||||
search_word = " ".join(self.keyword)
|
||||
# 查询模式:或
|
||||
search_mode = "1"
|
||||
else:
|
||||
# 单个查询
|
||||
search_word = self.keyword
|
||||
# 查询模式与
|
||||
search_mode = "0"
|
||||
|
||||
# 搜索URL
|
||||
indexer_params = self.search.get("params", {}).copy()
|
||||
if indexer_params:
|
||||
search_area = indexer_params.get('search_area')
|
||||
# search_area非0表示支持imdbid搜索
|
||||
if (search_area and
|
||||
(not self.keyword or not self.keyword.startswith('tt'))):
|
||||
# 支持imdbid搜索,但关键字不是imdbid时,不启用imdbid搜索
|
||||
indexer_params.pop('search_area')
|
||||
# 变量字典
|
||||
inputs_dict = {
|
||||
"keyword": search_word
|
||||
}
|
||||
# 查询参数,默认查询标题
|
||||
params = {
|
||||
"search_mode": search_mode,
|
||||
"search_area": 0,
|
||||
"page": self.page or 0,
|
||||
"notnewword": 1
|
||||
}
|
||||
# 额外参数
|
||||
for key, value in indexer_params.items():
|
||||
params.update({
|
||||
"%s" % key: str(value).format(**inputs_dict)
|
||||
})
|
||||
# 分类条件
|
||||
if self.category:
|
||||
if self.mtype == MediaType.TV:
|
||||
cats = self.category.get("tv") or []
|
||||
elif self.mtype == MediaType.MOVIE:
|
||||
cats = self.category.get("movie") or []
|
||||
else:
|
||||
cats = (self.category.get("movie") or []) + (self.category.get("tv") or [])
|
||||
for cat in cats:
|
||||
if self.category.get("field"):
|
||||
value = params.get(self.category.get("field"), "")
|
||||
params.update({
|
||||
"%s" % self.category.get("field"): value + self.category.get("delimiter",
|
||||
' ') + cat.get("id")
|
||||
})
|
||||
else:
|
||||
params.update({
|
||||
"cat%s" % cat.get("id"): 1
|
||||
})
|
||||
searchurl = self.domain + torrentspath + "?" + urlencode(params)
|
||||
else:
|
||||
# 变量字典
|
||||
inputs_dict = {
|
||||
"keyword": quote(search_word),
|
||||
"page": self.page or 0
|
||||
}
|
||||
# 无额外参数
|
||||
searchurl = self.domain + str(torrentspath).format(**inputs_dict)
|
||||
|
||||
# 列表浏览
|
||||
else:
|
||||
# 变量字典
|
||||
inputs_dict = {
|
||||
"page": self.page or 0,
|
||||
"keyword": ""
|
||||
}
|
||||
# 有单独浏览路径
|
||||
if self.browse:
|
||||
torrentspath = self.browse.get("path")
|
||||
if self.browse.get("start"):
|
||||
start_page = int(self.browse.get("start")) + int(self.page or 0)
|
||||
inputs_dict.update({
|
||||
"page": start_page
|
||||
})
|
||||
elif self.page:
|
||||
torrentspath = torrentspath + f"?page={self.page}"
|
||||
# 搜索Url
|
||||
searchurl = self.domain + str(torrentspath).format(**inputs_dict)
|
||||
|
||||
logger.info(f"开始请求:{searchurl}")
|
||||
|
||||
if self.render:
|
||||
# 浏览器仿真
|
||||
page_source = PlaywrightHelper().get_page_source(
|
||||
url=searchurl,
|
||||
cookies=self.cookie,
|
||||
ua=self.ua,
|
||||
proxies=self.proxy_server,
|
||||
timeout=self._timeout
|
||||
)
|
||||
else:
|
||||
# requests请求
|
||||
ret = RequestUtils(
|
||||
ua=self.ua,
|
||||
cookies=self.cookie,
|
||||
timeout=self._timeout,
|
||||
referer=self.referer,
|
||||
proxies=self.proxies
|
||||
).get_res(searchurl, allow_redirects=True)
|
||||
page_source = RequestUtils.get_decoded_html_content(ret,
|
||||
settings.ENCODING_DETECTION_PERFORMANCE_MODE,
|
||||
settings.ENCODING_DETECTION_MIN_CONFIDENCE)
|
||||
|
||||
# 解析
|
||||
return self.parse(page_source)
|
||||
|
||||
def __get_title(self, torrent):
|
||||
# title default text
|
||||
if 'title' not in self.fields:
|
||||
return
|
||||
selector = self.fields.get('title', {})
|
||||
if 'selector' in selector:
|
||||
title = torrent(selector.get('selector', '')).clone()
|
||||
self.__remove(title, selector)
|
||||
items = self.__attribute_or_text(title, selector)
|
||||
self.torrents_info['title'] = self.__index(items, selector)
|
||||
elif 'text' in selector:
|
||||
render_dict = {}
|
||||
if "title_default" in self.fields:
|
||||
title_default_selector = self.fields.get('title_default', {})
|
||||
title_default_item = torrent(title_default_selector.get('selector', '')).clone()
|
||||
self.__remove(title_default_item, title_default_selector)
|
||||
items = self.__attribute_or_text(title_default_item, selector)
|
||||
title_default = self.__index(items, title_default_selector)
|
||||
render_dict.update({'title_default': title_default})
|
||||
if "title_optional" in self.fields:
|
||||
title_optional_selector = self.fields.get('title_optional', {})
|
||||
title_optional_item = torrent(title_optional_selector.get('selector', '')).clone()
|
||||
self.__remove(title_optional_item, title_optional_selector)
|
||||
items = self.__attribute_or_text(title_optional_item, title_optional_selector)
|
||||
title_optional = self.__index(items, title_optional_selector)
|
||||
render_dict.update({'title_optional': title_optional})
|
||||
self.torrents_info['title'] = Template(selector.get('text')).render(fields=render_dict)
|
||||
self.torrents_info['title'] = self.__filter_text(self.torrents_info.get('title'),
|
||||
selector.get('filters'))
|
||||
|
||||
def __get_description(self, torrent):
|
||||
# title optional text
|
||||
if 'description' not in self.fields:
|
||||
return
|
||||
selector = self.fields.get('description', {})
|
||||
if "selector" in selector \
|
||||
or "selectors" in selector:
|
||||
description = torrent(selector.get('selector', selector.get('selectors', ''))).clone()
|
||||
if description:
|
||||
self.__remove(description, selector)
|
||||
items = self.__attribute_or_text(description, selector)
|
||||
self.torrents_info['description'] = self.__index(items, selector)
|
||||
elif "text" in selector:
|
||||
render_dict = {}
|
||||
if "tags" in self.fields:
|
||||
tags_selector = self.fields.get('tags', {})
|
||||
tags_item = torrent(tags_selector.get('selector', '')).clone()
|
||||
self.__remove(tags_item, tags_selector)
|
||||
items = self.__attribute_or_text(tags_item, tags_selector)
|
||||
tag = self.__index(items, tags_selector)
|
||||
render_dict.update({'tags': tag})
|
||||
if "subject" in self.fields:
|
||||
subject_selector = self.fields.get('subject', {})
|
||||
subject_item = torrent(subject_selector.get('selector', '')).clone()
|
||||
self.__remove(subject_item, subject_selector)
|
||||
items = self.__attribute_or_text(subject_item, subject_selector)
|
||||
subject = self.__index(items, subject_selector)
|
||||
render_dict.update({'subject': subject})
|
||||
if "description_free_forever" in self.fields:
|
||||
description_free_forever_selector = self.fields.get("description_free_forever", {})
|
||||
description_free_forever_item = torrent(description_free_forever_selector.get("selector", '')).clone()
|
||||
self.__remove(description_free_forever_item, description_free_forever_selector)
|
||||
items = self.__attribute_or_text(description_free_forever_item, description_free_forever_selector)
|
||||
description_free_forever = self.__index(items, description_free_forever_selector)
|
||||
render_dict.update({"description_free_forever": description_free_forever})
|
||||
if "description_normal" in self.fields:
|
||||
description_normal_selector = self.fields.get("description_normal", {})
|
||||
description_normal_item = torrent(description_normal_selector.get("selector", '')).clone()
|
||||
self.__remove(description_normal_item, description_normal_selector)
|
||||
items = self.__attribute_or_text(description_normal_item, description_normal_selector)
|
||||
description_normal = self.__index(items, description_normal_selector)
|
||||
render_dict.update({"description_normal": description_normal})
|
||||
self.torrents_info['description'] = Template(selector.get('text')).render(fields=render_dict)
|
||||
self.torrents_info['description'] = self.__filter_text(self.torrents_info.get('description'),
|
||||
selector.get('filters'))
|
||||
|
||||
def __get_detail(self, torrent):
|
||||
# details page text
|
||||
if 'details' not in self.fields:
|
||||
return
|
||||
selector = self.fields.get('details', {})
|
||||
details = torrent(selector.get('selector', '')).clone()
|
||||
self.__remove(details, selector)
|
||||
items = self.__attribute_or_text(details, selector)
|
||||
item = self.__index(items, selector)
|
||||
detail_link = self.__filter_text(item, selector.get('filters'))
|
||||
if detail_link:
|
||||
if not detail_link.startswith("http"):
|
||||
if detail_link.startswith("//"):
|
||||
self.torrents_info['page_url'] = self.domain.split(":")[0] + ":" + detail_link
|
||||
elif detail_link.startswith("/"):
|
||||
self.torrents_info['page_url'] = self.domain + detail_link[1:]
|
||||
else:
|
||||
self.torrents_info['page_url'] = self.domain + detail_link
|
||||
else:
|
||||
self.torrents_info['page_url'] = detail_link
|
||||
|
||||
def __get_download(self, torrent):
|
||||
# download link text
|
||||
if 'download' not in self.fields:
|
||||
return
|
||||
selector = self.fields.get('download', {})
|
||||
download = torrent(selector.get('selector', '')).clone()
|
||||
self.__remove(download, selector)
|
||||
items = self.__attribute_or_text(download, selector)
|
||||
item = self.__index(items, selector)
|
||||
download_link = self.__filter_text(item, selector.get('filters'))
|
||||
if download_link:
|
||||
if not download_link.startswith("http") \
|
||||
and not download_link.startswith("magnet"):
|
||||
_scheme, _domain = StringUtils.get_url_netloc(self.domain)
|
||||
if _domain in download_link:
|
||||
if download_link.startswith("/"):
|
||||
self.torrents_info['enclosure'] = f"{_scheme}:{download_link}"
|
||||
else:
|
||||
self.torrents_info['enclosure'] = f"{_scheme}://{download_link}"
|
||||
else:
|
||||
if download_link.startswith("/"):
|
||||
self.torrents_info['enclosure'] = f"{self.domain}{download_link[1:]}"
|
||||
else:
|
||||
self.torrents_info['enclosure'] = f"{self.domain}{download_link}"
|
||||
else:
|
||||
self.torrents_info['enclosure'] = download_link
|
||||
|
||||
def __get_imdbid(self, torrent):
|
||||
# imdbid
|
||||
if "imdbid" not in self.fields:
|
||||
return
|
||||
selector = self.fields.get('imdbid', {})
|
||||
imdbid = torrent(selector.get('selector', '')).clone()
|
||||
self.__remove(imdbid, selector)
|
||||
items = self.__attribute_or_text(imdbid, selector)
|
||||
item = self.__index(items, selector)
|
||||
self.torrents_info['imdbid'] = item
|
||||
self.torrents_info['imdbid'] = self.__filter_text(self.torrents_info.get('imdbid'),
|
||||
selector.get('filters'))
|
||||
|
||||
def __get_size(self, torrent):
|
||||
# torrent size int
|
||||
if 'size' not in self.fields:
|
||||
return
|
||||
selector = self.fields.get('size', {})
|
||||
size = torrent(selector.get('selector', selector.get("selectors", ''))).clone()
|
||||
self.__remove(size, selector)
|
||||
items = self.__attribute_or_text(size, selector)
|
||||
item = self.__index(items, selector)
|
||||
if item:
|
||||
size_val = item.replace("\n", "").strip()
|
||||
size_val = self.__filter_text(size_val,
|
||||
selector.get('filters'))
|
||||
self.torrents_info['size'] = StringUtils.num_filesize(size_val)
|
||||
else:
|
||||
self.torrents_info['size'] = 0
|
||||
|
||||
def __get_leechers(self, torrent):
|
||||
# torrent leechers int
|
||||
if 'leechers' not in self.fields:
|
||||
return
|
||||
selector = self.fields.get('leechers', {})
|
||||
leechers = torrent(selector.get('selector', '')).clone()
|
||||
self.__remove(leechers, selector)
|
||||
items = self.__attribute_or_text(leechers, selector)
|
||||
item = self.__index(items, selector)
|
||||
if item:
|
||||
peers_val = item.split("/")[0]
|
||||
peers_val = peers_val.replace(",", "")
|
||||
peers_val = self.__filter_text(peers_val,
|
||||
selector.get('filters'))
|
||||
self.torrents_info['peers'] = int(peers_val) if peers_val and peers_val.isdigit() else 0
|
||||
else:
|
||||
self.torrents_info['peers'] = 0
|
||||
|
||||
def __get_seeders(self, torrent):
|
||||
# torrent leechers int
|
||||
if 'seeders' not in self.fields:
|
||||
return
|
||||
selector = self.fields.get('seeders', {})
|
||||
seeders = torrent(selector.get('selector', '')).clone()
|
||||
self.__remove(seeders, selector)
|
||||
items = self.__attribute_or_text(seeders, selector)
|
||||
item = self.__index(items, selector)
|
||||
if item:
|
||||
seeders_val = item.split("/")[0]
|
||||
seeders_val = seeders_val.replace(",", "")
|
||||
seeders_val = self.__filter_text(seeders_val,
|
||||
selector.get('filters'))
|
||||
self.torrents_info['seeders'] = int(seeders_val) if seeders_val and seeders_val.isdigit() else 0
|
||||
else:
|
||||
self.torrents_info['seeders'] = 0
|
||||
|
||||
def __get_grabs(self, torrent):
|
||||
# torrent grabs int
|
||||
if 'grabs' not in self.fields:
|
||||
return
|
||||
selector = self.fields.get('grabs', {})
|
||||
grabs = torrent(selector.get('selector', '')).clone()
|
||||
self.__remove(grabs, selector)
|
||||
items = self.__attribute_or_text(grabs, selector)
|
||||
item = self.__index(items, selector)
|
||||
if item:
|
||||
grabs_val = item.split("/")[0]
|
||||
grabs_val = grabs_val.replace(",", "")
|
||||
grabs_val = self.__filter_text(grabs_val,
|
||||
selector.get('filters'))
|
||||
self.torrents_info['grabs'] = int(grabs_val) if grabs_val and grabs_val.isdigit() else 0
|
||||
else:
|
||||
self.torrents_info['grabs'] = 0
|
||||
|
||||
def __get_pubdate(self, torrent):
|
||||
# torrent pubdate yyyy-mm-dd hh:mm:ss
|
||||
if 'date_added' not in self.fields:
|
||||
return
|
||||
selector = self.fields.get('date_added', {})
|
||||
pubdate = torrent(selector.get('selector', '')).clone()
|
||||
self.__remove(pubdate, selector)
|
||||
items = self.__attribute_or_text(pubdate, selector)
|
||||
pubdate_str = self.__index(items, selector)
|
||||
if pubdate_str:
|
||||
pubdate_str = pubdate_str.replace('\n', ' ').strip()
|
||||
self.torrents_info['pubdate'] = self.__filter_text(pubdate_str,
|
||||
selector.get('filters'))
|
||||
|
||||
def __get_date_elapsed(self, torrent):
|
||||
# torrent data elaspsed text
|
||||
if 'date_elapsed' not in self.fields:
|
||||
return
|
||||
selector = self.fields.get('date_elapsed', {})
|
||||
date_elapsed = torrent(selector.get('selector', '')).clone()
|
||||
self.__remove(date_elapsed, selector)
|
||||
items = self.__attribute_or_text(date_elapsed, selector)
|
||||
self.torrents_info['date_elapsed'] = self.__index(items, selector)
|
||||
self.torrents_info['date_elapsed'] = self.__filter_text(self.torrents_info.get('date_elapsed'),
|
||||
selector.get('filters'))
|
||||
|
||||
def __get_downloadvolumefactor(self, torrent):
|
||||
# downloadvolumefactor int
|
||||
selector = self.fields.get('downloadvolumefactor', {})
|
||||
if not selector:
|
||||
return
|
||||
self.torrents_info['downloadvolumefactor'] = 1
|
||||
if 'case' in selector:
|
||||
for downloadvolumefactorselector in list(selector.get('case', {}).keys()):
|
||||
downloadvolumefactor = torrent(downloadvolumefactorselector)
|
||||
if len(downloadvolumefactor) > 0:
|
||||
self.torrents_info['downloadvolumefactor'] = selector.get('case', {}).get(
|
||||
downloadvolumefactorselector)
|
||||
break
|
||||
elif "selector" in selector:
|
||||
downloadvolume = torrent(selector.get('selector', '')).clone()
|
||||
self.__remove(downloadvolume, selector)
|
||||
items = self.__attribute_or_text(downloadvolume, selector)
|
||||
item = self.__index(items, selector)
|
||||
if item:
|
||||
downloadvolumefactor = re.search(r'(\d+\.?\d*)', item)
|
||||
if downloadvolumefactor:
|
||||
self.torrents_info['downloadvolumefactor'] = int(downloadvolumefactor.group(1))
|
||||
|
||||
def __get_uploadvolumefactor(self, torrent):
|
||||
# uploadvolumefactor int
|
||||
selector = self.fields.get('uploadvolumefactor', {})
|
||||
if not selector:
|
||||
return
|
||||
self.torrents_info['uploadvolumefactor'] = 1
|
||||
if 'case' in selector:
|
||||
for uploadvolumefactorselector in list(selector.get('case', {}).keys()):
|
||||
uploadvolumefactor = torrent(uploadvolumefactorselector)
|
||||
if len(uploadvolumefactor) > 0:
|
||||
self.torrents_info['uploadvolumefactor'] = selector.get('case', {}).get(
|
||||
uploadvolumefactorselector)
|
||||
break
|
||||
elif "selector" in selector:
|
||||
uploadvolume = torrent(selector.get('selector', '')).clone()
|
||||
self.__remove(uploadvolume, selector)
|
||||
items = self.__attribute_or_text(uploadvolume, selector)
|
||||
item = self.__index(items, selector)
|
||||
if item:
|
||||
uploadvolumefactor = re.search(r'(\d+\.?\d*)', item)
|
||||
if uploadvolumefactor:
|
||||
self.torrents_info['uploadvolumefactor'] = int(uploadvolumefactor.group(1))
|
||||
|
||||
def __get_labels(self, torrent):
|
||||
# labels ['label1', 'label2']
|
||||
if 'labels' not in self.fields:
|
||||
return
|
||||
selector = self.fields.get('labels', {})
|
||||
labels = torrent(selector.get("selector", "")).clone()
|
||||
self.__remove(labels, selector)
|
||||
items = self.__attribute_or_text(labels, selector)
|
||||
if items:
|
||||
self.torrents_info['labels'] = [item for item in items if item]
|
||||
else:
|
||||
self.torrents_info['labels'] = []
|
||||
|
||||
def __get_free_date(self, torrent):
|
||||
# free date yyyy-mm-dd hh:mm:ss
|
||||
if 'freedate' not in self.fields:
|
||||
return
|
||||
selector = self.fields.get('freedate', {})
|
||||
freedate = torrent(selector.get('selector', '')).clone()
|
||||
self.__remove(freedate, selector)
|
||||
items = self.__attribute_or_text(freedate, selector)
|
||||
self.torrents_info['freedate'] = self.__index(items, selector)
|
||||
self.torrents_info['freedate'] = self.__filter_text(self.torrents_info.get('freedate'),
|
||||
selector.get('filters'))
|
||||
|
||||
def __get_hit_and_run(self, torrent):
|
||||
# hitandrun True/False
|
||||
if 'hr' not in self.fields:
|
||||
return
|
||||
selector = self.fields.get('hr', {})
|
||||
hit_and_run = torrent(selector.get('selector', ''))
|
||||
if hit_and_run:
|
||||
self.torrents_info['hit_and_run'] = True
|
||||
else:
|
||||
self.torrents_info['hit_and_run'] = False
|
||||
|
||||
def __get_category(self, torrent):
|
||||
# category 电影/电视剧
|
||||
if 'category' not in self.fields:
|
||||
return
|
||||
selector = self.fields.get('category', {})
|
||||
category = torrent(selector.get('selector', '')).clone()
|
||||
self.__remove(category, selector)
|
||||
items = self.__attribute_or_text(category, selector)
|
||||
category_value = self.__index(items, selector)
|
||||
category_value = self.__filter_text(category_value,
|
||||
selector.get('filters'))
|
||||
if category_value and self.category:
|
||||
tv_cats = [str(cat.get("id")) for cat in self.category.get("tv") or []]
|
||||
movie_cats = [str(cat.get("id")) for cat in self.category.get("movie") or []]
|
||||
if category_value in tv_cats \
|
||||
and category_value not in movie_cats:
|
||||
self.torrents_info['category'] = MediaType.TV.value
|
||||
elif category_value in movie_cats:
|
||||
self.torrents_info['category'] = MediaType.MOVIE.value
|
||||
else:
|
||||
self.torrents_info['category'] = MediaType.UNKNOWN.value
|
||||
else:
|
||||
self.torrents_info['category'] = MediaType.UNKNOWN.value
|
||||
|
||||
def get_info(self, torrent) -> dict:
|
||||
"""
|
||||
解析单条种子数据
|
||||
"""
|
||||
self.torrents_info = {}
|
||||
try:
|
||||
# 标题
|
||||
self.__get_title(torrent)
|
||||
# 描述
|
||||
self.__get_description(torrent)
|
||||
# 详情页面
|
||||
self.__get_detail(torrent)
|
||||
# 下载链接
|
||||
self.__get_download(torrent)
|
||||
# 完成数
|
||||
self.__get_grabs(torrent)
|
||||
# 下载数
|
||||
self.__get_leechers(torrent)
|
||||
# 做种数
|
||||
self.__get_seeders(torrent)
|
||||
# 大小
|
||||
self.__get_size(torrent)
|
||||
# IMDBID
|
||||
self.__get_imdbid(torrent)
|
||||
# 下载系数
|
||||
self.__get_downloadvolumefactor(torrent)
|
||||
# 上传系数
|
||||
self.__get_uploadvolumefactor(torrent)
|
||||
# 发布时间
|
||||
self.__get_pubdate(torrent)
|
||||
# 已发布时间
|
||||
self.__get_date_elapsed(torrent)
|
||||
# 免费载止时间
|
||||
self.__get_free_date(torrent)
|
||||
# 标签
|
||||
self.__get_labels(torrent)
|
||||
# HR
|
||||
self.__get_hit_and_run(torrent)
|
||||
# 分类
|
||||
self.__get_category(torrent)
|
||||
|
||||
except Exception as err:
|
||||
logger.error("%s 搜索出现错误:%s" % (self.indexername, str(err)))
|
||||
return self.torrents_info
|
||||
|
||||
@staticmethod
|
||||
def __filter_text(text: str, filters: list):
|
||||
"""
|
||||
对文件进行处理
|
||||
"""
|
||||
if not text or not filters or not isinstance(filters, list):
|
||||
return text
|
||||
if not isinstance(text, str):
|
||||
text = str(text)
|
||||
for filter_item in filters:
|
||||
if not text:
|
||||
break
|
||||
method_name = filter_item.get("name")
|
||||
try:
|
||||
args = filter_item.get("args")
|
||||
if method_name == "re_search" and isinstance(args, list):
|
||||
rematch = re.search(r"%s" % args[0], text)
|
||||
if rematch:
|
||||
text = rematch.group(args[-1])
|
||||
elif method_name == "split" and isinstance(args, list):
|
||||
text = text.split(r"%s" % args[0])[args[-1]]
|
||||
elif method_name == "replace" and isinstance(args, list):
|
||||
text = text.replace(r"%s" % args[0], r"%s" % args[-1])
|
||||
elif method_name == "dateparse" and isinstance(args, str):
|
||||
text = text.replace("\n", " ").strip()
|
||||
text = datetime.datetime.strptime(text, r"%s" % args)
|
||||
elif method_name == "strip":
|
||||
text = text.strip()
|
||||
elif method_name == "appendleft":
|
||||
text = f"{args}{text}"
|
||||
elif method_name == "querystring":
|
||||
parsed_url = urlparse(str(text))
|
||||
query_params = parse_qs(parsed_url.query)
|
||||
param_value = query_params.get(args)
|
||||
text = param_value[0] if param_value else ''
|
||||
except Exception as err:
|
||||
logger.debug(f'过滤器 {method_name} 处理失败:{str(err)} - {traceback.format_exc()}')
|
||||
return text.strip()
|
||||
|
||||
@staticmethod
|
||||
def __remove(item, selector):
|
||||
"""
|
||||
移除元素
|
||||
"""
|
||||
if selector and "remove" in selector:
|
||||
removelist = selector.get('remove', '').split(', ')
|
||||
for v in removelist:
|
||||
item.remove(v)
|
||||
|
||||
@staticmethod
|
||||
def __attribute_or_text(item, selector: dict):
|
||||
if not selector:
|
||||
return item
|
||||
if not item:
|
||||
return []
|
||||
if 'attribute' in selector:
|
||||
items = [i.attr(selector.get('attribute')) for i in item.items() if i]
|
||||
else:
|
||||
items = [i.text() for i in item.items() if i]
|
||||
return items
|
||||
|
||||
@staticmethod
|
||||
def __index(items: list, selector: dict):
|
||||
if not items:
|
||||
return None
|
||||
if selector:
|
||||
if "contents" in selector \
|
||||
and len(items) > int(selector.get("contents")):
|
||||
items = items[0].split("\n")[selector.get("contents")]
|
||||
elif "index" in selector \
|
||||
and len(items) > int(selector.get("index")):
|
||||
items = items[int(selector.get("index"))]
|
||||
if isinstance(items, list):
|
||||
items = items[0]
|
||||
return items
|
||||
|
||||
def parse(self, html_text: str) -> List[dict]:
|
||||
"""
|
||||
解析整个页面
|
||||
"""
|
||||
if not html_text:
|
||||
self.is_error = True
|
||||
return []
|
||||
# 清空旧结果
|
||||
self.torrents_info_array = []
|
||||
try:
|
||||
# 解析站点文本对象
|
||||
html_doc = PyQuery(html_text)
|
||||
# 种子筛选器
|
||||
torrents_selector = self.list.get('selector', '')
|
||||
# 遍历种子html列表
|
||||
for torn in html_doc(torrents_selector):
|
||||
self.torrents_info_array.append(copy.deepcopy(self.get_info(PyQuery(torn))))
|
||||
if len(self.torrents_info_array) >= int(self.result_num):
|
||||
break
|
||||
return self.torrents_info_array
|
||||
except Exception as err:
|
||||
self.is_error = True
|
||||
logger.warn(f"错误:{self.indexername} {str(err)}")
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
import urllib.parse
|
||||
from typing import Tuple, List
|
||||
|
||||
from ruamel.yaml import CommentedMap
|
||||
|
||||
from app.core.config import settings
|
||||
from app.db.systemconfig_oper import SystemConfigOper
|
||||
from app.log import logger
|
||||
@@ -51,7 +49,7 @@ class HaiDanSpider:
|
||||
"7": 1
|
||||
}
|
||||
|
||||
def __init__(self, indexer: CommentedMap):
|
||||
def __init__(self, indexer: dict):
|
||||
self.systemconfig = SystemConfigOper()
|
||||
if indexer:
|
||||
self._indexerid = indexer.get('id')
|
||||
|
||||
211
app/modules/indexer/spider/hddolby.py
Normal file
211
app/modules/indexer/spider/hddolby.py
Normal file
@@ -0,0 +1,211 @@
|
||||
from typing import Tuple, List
|
||||
|
||||
from app.core.config import settings
|
||||
from app.db.systemconfig_oper import SystemConfigOper
|
||||
from app.log import logger
|
||||
from app.schemas import MediaType
|
||||
from app.utils.http import RequestUtils
|
||||
from app.utils.string import StringUtils
|
||||
|
||||
|
||||
class HddolbySpider:
|
||||
"""
|
||||
HDDolby API
|
||||
"""
|
||||
_indexerid = None
|
||||
_domain = None
|
||||
_domain_host = None
|
||||
_name = ""
|
||||
_proxy = None
|
||||
_cookie = None
|
||||
_ua = None
|
||||
_apikey = None
|
||||
_size = 40
|
||||
_pageurl = None
|
||||
_timeout = 15
|
||||
_searchurl = None
|
||||
|
||||
# 分类
|
||||
_movie_category = [401, 405]
|
||||
_tv_category = [402, 403, 404, 405]
|
||||
|
||||
# 标签
|
||||
_labels = {
|
||||
"gf": "官方",
|
||||
"gy": "国语",
|
||||
"yy": "粤语",
|
||||
"ja": "日语",
|
||||
"ko": "韩语",
|
||||
"zz": "中文字幕",
|
||||
"jz": "禁转",
|
||||
"xz": "限转",
|
||||
"diy": "DIY",
|
||||
"sf": "首发",
|
||||
"yq": "应求",
|
||||
"m0": "零魔",
|
||||
"yc": "原创",
|
||||
"gz": "官字",
|
||||
"db": "Dolby Vision",
|
||||
"hdr10": "HDR10",
|
||||
"hdrm": "HDR10+",
|
||||
"tx": "特效",
|
||||
"lz": "连载",
|
||||
"wj": "完结",
|
||||
"hdrv": "HDR Vivid",
|
||||
"hlg": "HLG",
|
||||
"hq": "高码率",
|
||||
"hfr": "高帧率",
|
||||
}
|
||||
|
||||
def __init__(self, indexer: dict):
|
||||
self.systemconfig = SystemConfigOper()
|
||||
if indexer:
|
||||
self._indexerid = indexer.get('id')
|
||||
self._domain = indexer.get('domain')
|
||||
self._domain_host = StringUtils.get_url_domain(self._domain)
|
||||
self._name = indexer.get('name')
|
||||
if indexer.get('proxy'):
|
||||
self._proxy = settings.PROXY
|
||||
self._cookie = indexer.get('cookie')
|
||||
self._ua = indexer.get('ua')
|
||||
self._apikey = indexer.get('apikey')
|
||||
self._timeout = indexer.get('timeout') or 15
|
||||
self._searchurl = f"https://api.{self._domain_host}/api/v1/torrent/search"
|
||||
self._pageurl = f"{self._domain}details.php?id=%s&hit=1"
|
||||
|
||||
def search(self, keyword: str, mtype: MediaType = None, page: int = 0) -> Tuple[bool, List[dict]]:
|
||||
"""
|
||||
搜索
|
||||
"""
|
||||
|
||||
if mtype == MediaType.TV:
|
||||
categories = self._tv_category
|
||||
elif mtype == MediaType.MOVIE:
|
||||
categories = self._movie_category
|
||||
else:
|
||||
categories = list(set(self._movie_category + self._tv_category))
|
||||
|
||||
# 输入参数
|
||||
params = {
|
||||
"keyword": keyword,
|
||||
"page_number": page,
|
||||
"page_size": 100,
|
||||
"categories": categories,
|
||||
"visible": 1,
|
||||
}
|
||||
|
||||
res = RequestUtils(
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json, text/plain, */*",
|
||||
"x-api-key": self._apikey
|
||||
},
|
||||
cookies=self._cookie,
|
||||
proxies=self._proxy,
|
||||
referer=f"{self._domain}",
|
||||
timeout=self._timeout
|
||||
).post_res(url=self._searchurl, json=params)
|
||||
torrents = []
|
||||
if res and res.status_code == 200:
|
||||
results = res.json().get('data', []) or []
|
||||
for result in results:
|
||||
"""
|
||||
{
|
||||
"id": 120202,
|
||||
"promotion_time_type": 0,
|
||||
"promotion_until": "0000-00-00 00:00:00",
|
||||
"category": 402,
|
||||
"medium": 6,
|
||||
"codec": 1,
|
||||
"standard": 2,
|
||||
"team": 10,
|
||||
"audiocodec": 14,
|
||||
"leechers": 0,
|
||||
"seeders": 1,
|
||||
"name": "[DBY] Lost S06 2010 Complete 1080p Netflix WEB-DL AVC DDP5.1-DBTV",
|
||||
"small_descr": "lost ",
|
||||
"times_completed": 0,
|
||||
"size": 33665425886,
|
||||
"added": "2025-02-18 19:47:56",
|
||||
"url": 0,
|
||||
"hr": 0,
|
||||
"tmdb_type": "tv",
|
||||
"tmdb_id": 4607,
|
||||
"imdb_id": null,
|
||||
"tags": "gf"
|
||||
}
|
||||
"""
|
||||
# 类别
|
||||
category_value = result.get('category')
|
||||
if category_value in self._tv_category:
|
||||
category = MediaType.TV.value
|
||||
elif category_value in self._movie_category:
|
||||
category = MediaType.MOVIE.value
|
||||
else:
|
||||
category = MediaType.UNKNOWN.value
|
||||
# 标签
|
||||
torrentLabelIds = result.get('tags', "").split(";") or []
|
||||
torrentLabels = []
|
||||
for labelId in torrentLabelIds:
|
||||
if self._labels.get(labelId) is not None:
|
||||
torrentLabels.append(self._labels.get(labelId))
|
||||
# 种子信息
|
||||
torrent = {
|
||||
'title': result.get('name'),
|
||||
'description': result.get('small_descr'),
|
||||
'enclosure': self.__get_download_url(result.get('id'), result.get('downhash')),
|
||||
'pubdate': result.get('added'),
|
||||
'size': result.get('size'),
|
||||
'seeders': result.get('seeders'),
|
||||
'peers': result.get('leechers'),
|
||||
'grabs': result.get('times_completed'),
|
||||
'downloadvolumefactor': self.__get_downloadvolumefactor(result.get('promotion_time_type')),
|
||||
'uploadvolumefactor': self.__get_uploadvolumefactor(result.get('promotion_time_type')),
|
||||
'freedate': result.get('promotion_until'),
|
||||
'page_url': self._pageurl % result.get('id'),
|
||||
'labels': torrentLabels,
|
||||
'category': category
|
||||
}
|
||||
torrents.append(torrent)
|
||||
elif res is not None:
|
||||
logger.warn(f"{self._name} 搜索失败,错误码:{res.status_code}")
|
||||
return True, []
|
||||
else:
|
||||
logger.warn(f"{self._name} 搜索失败,无法连接 {self._domain}")
|
||||
return True, []
|
||||
return False, torrents
|
||||
|
||||
@staticmethod
|
||||
def __get_downloadvolumefactor(discount: int) -> float:
|
||||
"""
|
||||
获取下载系数
|
||||
"""
|
||||
discount_dict = {
|
||||
2: 0,
|
||||
5: 0.5,
|
||||
6: 1,
|
||||
7: 0.3
|
||||
}
|
||||
if discount:
|
||||
return discount_dict.get(discount, 1)
|
||||
return 1
|
||||
|
||||
@staticmethod
|
||||
def __get_uploadvolumefactor(discount: int) -> float:
|
||||
"""
|
||||
获取上传系数
|
||||
"""
|
||||
discount_dict = {
|
||||
3: 2,
|
||||
4: 2,
|
||||
6: 2
|
||||
}
|
||||
if discount:
|
||||
return discount_dict.get(discount, 1)
|
||||
return 1
|
||||
|
||||
def __get_download_url(self, torrent_id: int, downhash: str) -> str:
|
||||
"""
|
||||
获取下载链接,返回base64编码的json字符串及URL
|
||||
"""
|
||||
return f"{self._domain}download.php?id={torrent_id}&downhash={downhash}"
|
||||
@@ -3,8 +3,6 @@ import json
|
||||
import re
|
||||
from typing import Tuple, List
|
||||
|
||||
from ruamel.yaml import CommentedMap
|
||||
|
||||
from app.core.config import settings
|
||||
from app.db.systemconfig_oper import SystemConfigOper
|
||||
from app.log import logger
|
||||
@@ -51,7 +49,7 @@ class MTorrentSpider:
|
||||
"7": "DIY 国配 中字"
|
||||
}
|
||||
|
||||
def __init__(self, indexer: CommentedMap):
|
||||
def __init__(self, indexer: dict):
|
||||
self.systemconfig = SystemConfigOper()
|
||||
if indexer:
|
||||
self._indexerid = indexer.get('id')
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
import re
|
||||
from typing import Tuple, List
|
||||
|
||||
from ruamel.yaml import CommentedMap
|
||||
|
||||
from app.core.config import settings
|
||||
from app.log import logger
|
||||
from app.utils.http import RequestUtils
|
||||
@@ -23,7 +21,7 @@ class TNodeSpider:
|
||||
_downloadurl = "%sapi/torrent/download/%s"
|
||||
_pageurl = "%storrent/info/%s"
|
||||
|
||||
def __init__(self, indexer: CommentedMap):
|
||||
def __init__(self, indexer: dict):
|
||||
if indexer:
|
||||
self._indexerid = indexer.get('id')
|
||||
self._domain = indexer.get('domain')
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
from typing import List, Tuple
|
||||
from urllib.parse import quote
|
||||
|
||||
from ruamel.yaml import CommentedMap
|
||||
|
||||
from app.core.config import settings
|
||||
from app.log import logger
|
||||
from app.utils.http import RequestUtils
|
||||
@@ -19,7 +17,7 @@ class TorrentLeech:
|
||||
_pageurl = "%storrent/%s"
|
||||
_timeout = 15
|
||||
|
||||
def __init__(self, indexer: CommentedMap):
|
||||
def __init__(self, indexer: dict):
|
||||
self._indexer = indexer
|
||||
if indexer.get('proxy'):
|
||||
self._proxy = settings.PROXY
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
from typing import Tuple, List
|
||||
|
||||
from ruamel.yaml import CommentedMap
|
||||
|
||||
from app.core.config import settings
|
||||
from app.db.systemconfig_oper import SystemConfigOper
|
||||
from app.log import logger
|
||||
@@ -46,7 +44,7 @@ class YemaSpider:
|
||||
"12": "完结",
|
||||
}
|
||||
|
||||
def __init__(self, indexer: CommentedMap):
|
||||
def __init__(self, indexer: dict):
|
||||
self.systemconfig = SystemConfigOper()
|
||||
if indexer:
|
||||
self._indexerid = indexer.get('id')
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import json
|
||||
from datetime import datetime
|
||||
from typing import List, Union, Optional, Dict, Generator, Tuple
|
||||
from typing import List, Union, Optional, Dict, Generator, Tuple, Any
|
||||
|
||||
from requests import Response
|
||||
|
||||
@@ -10,6 +10,7 @@ from app.log import logger
|
||||
from app.schemas import MediaType
|
||||
from app.utils.http import RequestUtils
|
||||
from app.utils.url import UrlUtils
|
||||
from schemas import MediaServerItem
|
||||
|
||||
|
||||
class Jellyfin:
|
||||
@@ -548,7 +549,7 @@ class Jellyfin:
|
||||
logger.error(f"连接Items/Id/Ancestors出错:" + str(e))
|
||||
return None
|
||||
|
||||
def refresh_root_library(self) -> bool:
|
||||
def refresh_root_library(self) -> Optional[bool]:
|
||||
"""
|
||||
通知Jellyfin刷新整个媒体库
|
||||
"""
|
||||
@@ -762,7 +763,7 @@ class Jellyfin:
|
||||
return None
|
||||
|
||||
def get_items(self, parent: Union[str, int], start_index: int = 0, limit: Optional[int] = -1) \
|
||||
-> Optional[Generator]:
|
||||
-> Generator[MediaServerItem | None | Any, Any, None]:
|
||||
"""
|
||||
获取媒体服务器项目列表,支持分页和不分页逻辑,默认不分页获取所有数据
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ from app.log import logger
|
||||
from app.schemas import MediaType
|
||||
from app.utils.http import RequestUtils
|
||||
from app.utils.url import UrlUtils
|
||||
from schemas import MediaServerItem
|
||||
|
||||
|
||||
class Plex:
|
||||
@@ -367,7 +368,7 @@ class Plex:
|
||||
return False
|
||||
return self._plex.library.update()
|
||||
|
||||
def refresh_library_by_items(self, items: List[schemas.RefreshMediaItem]) -> bool:
|
||||
def refresh_library_by_items(self, items: List[schemas.RefreshMediaItem]) -> Optional[bool]:
|
||||
"""
|
||||
按路径刷新媒体库 item: target_path
|
||||
"""
|
||||
@@ -512,7 +513,7 @@ class Plex:
|
||||
)
|
||||
|
||||
def get_items(self, parent: Union[str, int], start_index: int = 0, limit: Optional[int] = -1) \
|
||||
-> Optional[Generator]:
|
||||
-> Generator[MediaServerItem | None, Any, None]:
|
||||
"""
|
||||
获取媒体服务器项目列表,支持分页和不分页逻辑,默认不分页获取所有数据
|
||||
|
||||
@@ -855,7 +856,7 @@ class Plex:
|
||||
:param kwargs: 其他请求参数,如headers, cookies, proxies等
|
||||
"""
|
||||
if not self._session:
|
||||
return
|
||||
return None
|
||||
try:
|
||||
url = UrlUtils.adapt_request_url(host=self._host, endpoint=endpoint)
|
||||
kwargs.setdefault("headers", self.__get_request_headers())
|
||||
|
||||
@@ -78,7 +78,7 @@ class QbittorrentModule(_ModuleBase, _DownloaderBase[Qbittorrent]):
|
||||
server.reconnect()
|
||||
|
||||
def download(self, content: Union[Path, str], download_dir: Path, cookie: str,
|
||||
episodes: Set[int] = None, category: str = None,
|
||||
episodes: Set[int] = None, category: str = None, label: str = None,
|
||||
downloader: str = None) -> Optional[Tuple[Optional[str], Optional[str], Optional[str], str]]:
|
||||
"""
|
||||
根据种子文件,选择并添加下载任务
|
||||
@@ -87,6 +87,7 @@ class QbittorrentModule(_ModuleBase, _DownloaderBase[Qbittorrent]):
|
||||
:param cookie: cookie
|
||||
:param episodes: 需要下载的集数
|
||||
:param category: 分类
|
||||
:param label: 标签
|
||||
:param downloader: 下载器
|
||||
:return: 下载器名称、种子Hash、种子文件布局、错误原因
|
||||
"""
|
||||
@@ -118,7 +119,9 @@ class QbittorrentModule(_ModuleBase, _DownloaderBase[Qbittorrent]):
|
||||
|
||||
# 生成随机Tag
|
||||
tag = StringUtils.generate_random_str(10)
|
||||
if settings.TORRENT_TAG:
|
||||
if label:
|
||||
tags = label.split(',') + [tag]
|
||||
elif settings.TORRENT_TAG:
|
||||
tags = [tag, settings.TORRENT_TAG]
|
||||
else:
|
||||
tags = [tag]
|
||||
@@ -239,7 +242,9 @@ class QbittorrentModule(_ModuleBase, _DownloaderBase[Qbittorrent]):
|
||||
path=torrent_path,
|
||||
hash=torrent.get('hash'),
|
||||
size=torrent.get('total_size'),
|
||||
tags=torrent.get('tags')
|
||||
tags=torrent.get('tags'),
|
||||
progress=torrent.get('progress') * 100,
|
||||
state="paused" if torrent.get('state') in ("paused", "pausedDL") else "downloading",
|
||||
))
|
||||
elif status == TorrentStatus.TRANSFER:
|
||||
# 获取已完成且未整理的
|
||||
|
||||
@@ -118,11 +118,12 @@ class TheMovieDbModule(_ModuleBase):
|
||||
|
||||
# 识别匹配
|
||||
if not cache_info or not cache:
|
||||
info = None
|
||||
# 缓存没有或者强制不使用缓存
|
||||
if tmdbid:
|
||||
# 直接查询详情
|
||||
info = self.tmdb.get_info(mtype=mtype, tmdbid=tmdbid)
|
||||
elif meta:
|
||||
if not info and meta:
|
||||
info = {}
|
||||
# 简体名称
|
||||
zh_name = zhconv.convert(meta.cn_name, "zh-hans") if meta.cn_name else None
|
||||
@@ -172,8 +173,8 @@ class TheMovieDbModule(_ModuleBase):
|
||||
if info and not info.get("genres"):
|
||||
info = self.tmdb.get_info(mtype=info.get("media_type"),
|
||||
tmdbid=info.get("id"))
|
||||
else:
|
||||
logger.error("识别媒体信息时未提供元数据或tmdbid")
|
||||
elif not info:
|
||||
logger.error("识别媒体信息时未提供元数据或唯一且有效的tmdbid")
|
||||
return None
|
||||
|
||||
# 保存到缓存
|
||||
|
||||
@@ -578,7 +578,7 @@ class TmdbApi:
|
||||
genre_ids.append(genre.get('id'))
|
||||
return genre_ids
|
||||
|
||||
# 查询TMDB详ngeq
|
||||
# 查询TMDB详情
|
||||
if mtype == MediaType.MOVIE:
|
||||
tmdb_info = self.__get_movie_detail(tmdbid)
|
||||
if tmdb_info:
|
||||
@@ -588,13 +588,20 @@ class TmdbApi:
|
||||
if tmdb_info:
|
||||
tmdb_info['media_type'] = MediaType.TV
|
||||
else:
|
||||
tmdb_info = self.__get_tv_detail(tmdbid)
|
||||
if tmdb_info:
|
||||
tmdb_info_tv = self.__get_tv_detail(tmdbid)
|
||||
tmdb_info_movie = self.__get_movie_detail(tmdbid)
|
||||
if tmdb_info_tv and tmdb_info_movie:
|
||||
tmdb_info = None
|
||||
logger.warn(f"无法判断tmdb_id:{tmdbid} 是电影还是电视剧")
|
||||
elif tmdb_info_tv:
|
||||
tmdb_info = tmdb_info_tv
|
||||
tmdb_info['media_type'] = MediaType.TV
|
||||
elif tmdb_info_movie:
|
||||
tmdb_info = tmdb_info_movie
|
||||
tmdb_info['media_type'] = MediaType.MOVIE
|
||||
else:
|
||||
tmdb_info = self.__get_movie_detail(tmdbid)
|
||||
if tmdb_info:
|
||||
tmdb_info['media_type'] = MediaType.MOVIE
|
||||
tmdb_info = None
|
||||
logger.warn(f"tmdb_id:{tmdbid} 未查询到媒体信息")
|
||||
|
||||
if tmdb_info:
|
||||
# 转换genreid
|
||||
|
||||
@@ -79,7 +79,7 @@ class TransmissionModule(_ModuleBase, _DownloaderBase[Transmission]):
|
||||
server.reconnect()
|
||||
|
||||
def download(self, content: Union[Path, str], download_dir: Path, cookie: str,
|
||||
episodes: Set[int] = None, category: str = None,
|
||||
episodes: Set[int] = None, category: str = None, label: str = None,
|
||||
downloader: str = None) -> Optional[Tuple[Optional[str], Optional[str], Optional[str], str]]:
|
||||
"""
|
||||
根据种子文件,选择并添加下载任务
|
||||
@@ -88,6 +88,7 @@ class TransmissionModule(_ModuleBase, _DownloaderBase[Transmission]):
|
||||
:param cookie: cookie
|
||||
:param episodes: 需要下载的集数
|
||||
:param category: 分类,TR中未使用
|
||||
:param label: 标签
|
||||
:param downloader: 下载器
|
||||
:return: 下载器名称、种子Hash、种子文件布局、错误原因
|
||||
"""
|
||||
@@ -118,8 +119,11 @@ class TransmissionModule(_ModuleBase, _DownloaderBase[Transmission]):
|
||||
|
||||
# 如果要选择文件则先暂停
|
||||
is_paused = True if episodes else False
|
||||
|
||||
# 标签
|
||||
if settings.TORRENT_TAG:
|
||||
if label:
|
||||
labels = label.split(',')
|
||||
elif settings.TORRENT_TAG:
|
||||
labels = [settings.TORRENT_TAG]
|
||||
else:
|
||||
labels = None
|
||||
@@ -246,7 +250,9 @@ class TransmissionModule(_ModuleBase, _DownloaderBase[Transmission]):
|
||||
title=torrent.name,
|
||||
path=Path(torrent.download_dir) / torrent.name,
|
||||
hash=torrent.hashString,
|
||||
tags=",".join(torrent.labels or [])
|
||||
tags=",".join(torrent.labels or []),
|
||||
progress=torrent.progress,
|
||||
state="paused" if torrent.status == "stopped" else "downloading",
|
||||
))
|
||||
elif status == TorrentStatus.DOWNLOADING:
|
||||
# 获取正在下载的任务
|
||||
|
||||
@@ -32,13 +32,13 @@ class WeChat:
|
||||
_proxy = None
|
||||
|
||||
# 企业微信发送消息URL
|
||||
_send_msg_url = "/cgi-bin/message/send?access_token={access_token}"
|
||||
_send_msg_url = "cgi-bin/message/send?access_token={access_token}"
|
||||
# 企业微信获取TokenURL
|
||||
_token_url = "/cgi-bin/gettoken?corpid={corpid}&corpsecret={corpsecret}"
|
||||
_token_url = "cgi-bin/gettoken?corpid={corpid}&corpsecret={corpsecret}"
|
||||
# 企业微信创建菜单URL
|
||||
_create_menu_url = "/cgi-bin/menu/create?access_token={access_token}&agentid={agentid}"
|
||||
_create_menu_url = "cgi-bin/menu/create?access_token={access_token}&agentid={agentid}"
|
||||
# 企业微信删除菜单URL
|
||||
_delete_menu_url = "/cgi-bin/menu/delete?access_token={access_token}&agentid={agentid}"
|
||||
_delete_menu_url = "cgi-bin/menu/delete?access_token={access_token}&agentid={agentid}"
|
||||
|
||||
def __init__(self, WECHAT_CORPID: str = None, WECHAT_APP_SECRET: str = None,
|
||||
WECHAT_APP_ID: str = None, WECHAT_PROXY: str = None, **kwargs):
|
||||
|
||||
726
app/scheduler.py
726
app/scheduler.py
@@ -7,6 +7,7 @@ import pytz
|
||||
from apscheduler.executors.pool import ThreadPoolExecutor
|
||||
from apscheduler.jobstores.base import JobLookupError
|
||||
from apscheduler.schedulers.background import BackgroundScheduler
|
||||
from apscheduler.triggers.cron import CronTrigger
|
||||
|
||||
from app import schemas
|
||||
from app.chain import ChainBase
|
||||
@@ -16,18 +17,22 @@ from app.chain.site import SiteChain
|
||||
from app.chain.subscribe import SubscribeChain
|
||||
from app.chain.tmdb import TmdbChain
|
||||
from app.chain.transfer import TransferChain
|
||||
from app.chain.workflow import WorkflowChain
|
||||
from app.core.config import settings
|
||||
from app.core.event import EventManager
|
||||
from app.core.plugin import PluginManager
|
||||
from app.db.systemconfig_oper import SystemConfigOper
|
||||
from app.helper.sites import SitesHelper
|
||||
from app.log import logger
|
||||
from app.schemas import Notification, NotificationType
|
||||
from app.schemas import Notification, NotificationType, Workflow
|
||||
from app.schemas.types import EventType, SystemConfigKey
|
||||
from app.utils.singleton import Singleton
|
||||
from app.utils.timer import TimerUtils
|
||||
|
||||
|
||||
lock = threading.Lock()
|
||||
|
||||
|
||||
class SchedulerChain(ChainBase):
|
||||
pass
|
||||
|
||||
@@ -54,85 +59,6 @@ class Scheduler(metaclass=Singleton):
|
||||
"""
|
||||
初始化定时服务
|
||||
"""
|
||||
# 各服务的运行状态
|
||||
self._jobs = {
|
||||
"cookiecloud": {
|
||||
"name": "同步CookieCloud站点",
|
||||
"func": SiteChain().sync_cookies,
|
||||
"running": False,
|
||||
},
|
||||
"mediaserver_sync": {
|
||||
"name": "同步媒体服务器",
|
||||
"func": MediaServerChain().sync,
|
||||
"running": False,
|
||||
},
|
||||
"subscribe_tmdb": {
|
||||
"name": "订阅元数据更新",
|
||||
"func": SubscribeChain().check,
|
||||
"running": False,
|
||||
},
|
||||
"subscribe_search": {
|
||||
"name": "订阅搜索补全",
|
||||
"func": SubscribeChain().search,
|
||||
"running": False,
|
||||
"kwargs": {
|
||||
"state": "R"
|
||||
}
|
||||
},
|
||||
"new_subscribe_search": {
|
||||
"name": "新增订阅搜索",
|
||||
"func": SubscribeChain().search,
|
||||
"running": False,
|
||||
"kwargs": {
|
||||
"state": "N"
|
||||
}
|
||||
},
|
||||
"subscribe_refresh": {
|
||||
"name": "订阅刷新",
|
||||
"func": SubscribeChain().refresh,
|
||||
"running": False,
|
||||
},
|
||||
"subscribe_follow": {
|
||||
"name": "关注的订阅分享",
|
||||
"func": SubscribeChain().follow,
|
||||
"running": False,
|
||||
},
|
||||
"transfer": {
|
||||
"name": "下载文件整理",
|
||||
"func": TransferChain().process,
|
||||
"running": False,
|
||||
},
|
||||
"clear_cache": {
|
||||
"name": "缓存清理",
|
||||
"func": self.clear_cache,
|
||||
"running": False,
|
||||
},
|
||||
"user_auth": {
|
||||
"name": "用户认证检查",
|
||||
"func": self.user_auth,
|
||||
"running": False,
|
||||
},
|
||||
"scheduler_job": {
|
||||
"name": "公共定时服务",
|
||||
"func": SchedulerChain().scheduler_job,
|
||||
"running": False,
|
||||
},
|
||||
"random_wallpager": {
|
||||
"name": "壁纸缓存",
|
||||
"func": TmdbChain().get_trending_wallpapers,
|
||||
"running": False,
|
||||
},
|
||||
"sitedata_refresh": {
|
||||
"name": "站点数据刷新",
|
||||
"func": SiteChain().refresh_userdatas,
|
||||
"running": False,
|
||||
},
|
||||
"recommend_refresh": {
|
||||
"name": "推荐缓存",
|
||||
"func": RecommendChain().refresh_recommend,
|
||||
"running": False,
|
||||
}
|
||||
}
|
||||
|
||||
# 停止定时服务
|
||||
self.stop()
|
||||
@@ -141,217 +67,302 @@ class Scheduler(metaclass=Singleton):
|
||||
if settings.DEV:
|
||||
return
|
||||
|
||||
# 创建定时服务
|
||||
self._scheduler = BackgroundScheduler(timezone=settings.TZ,
|
||||
executors={
|
||||
'default': ThreadPoolExecutor(100)
|
||||
})
|
||||
|
||||
# CookieCloud定时同步
|
||||
if settings.COOKIECLOUD_INTERVAL \
|
||||
and str(settings.COOKIECLOUD_INTERVAL).isdigit():
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"interval",
|
||||
id="cookiecloud",
|
||||
name="同步CookieCloud站点",
|
||||
minutes=int(settings.COOKIECLOUD_INTERVAL),
|
||||
next_run_time=datetime.now(pytz.timezone(settings.TZ)) + timedelta(minutes=1),
|
||||
kwargs={
|
||||
'job_id': 'cookiecloud'
|
||||
with lock:
|
||||
# 各服务的运行状态
|
||||
self._jobs = {
|
||||
"cookiecloud": {
|
||||
"name": "同步CookieCloud站点",
|
||||
"func": SiteChain().sync_cookies,
|
||||
"running": False,
|
||||
},
|
||||
"mediaserver_sync": {
|
||||
"name": "同步媒体服务器",
|
||||
"func": MediaServerChain().sync,
|
||||
"running": False,
|
||||
},
|
||||
"subscribe_tmdb": {
|
||||
"name": "订阅元数据更新",
|
||||
"func": SubscribeChain().check,
|
||||
"running": False,
|
||||
},
|
||||
"subscribe_search": {
|
||||
"name": "订阅搜索补全",
|
||||
"func": SubscribeChain().search,
|
||||
"running": False,
|
||||
"kwargs": {
|
||||
"state": "R"
|
||||
}
|
||||
},
|
||||
"new_subscribe_search": {
|
||||
"name": "新增订阅搜索",
|
||||
"func": SubscribeChain().search,
|
||||
"running": False,
|
||||
"kwargs": {
|
||||
"state": "N"
|
||||
}
|
||||
},
|
||||
"subscribe_refresh": {
|
||||
"name": "订阅刷新",
|
||||
"func": SubscribeChain().refresh,
|
||||
"running": False,
|
||||
},
|
||||
"subscribe_follow": {
|
||||
"name": "关注的订阅分享",
|
||||
"func": SubscribeChain().follow,
|
||||
"running": False,
|
||||
},
|
||||
"transfer": {
|
||||
"name": "下载文件整理",
|
||||
"func": TransferChain().process,
|
||||
"running": False,
|
||||
},
|
||||
"clear_cache": {
|
||||
"name": "缓存清理",
|
||||
"func": self.clear_cache,
|
||||
"running": False,
|
||||
},
|
||||
"user_auth": {
|
||||
"name": "用户认证检查",
|
||||
"func": self.user_auth,
|
||||
"running": False,
|
||||
},
|
||||
"scheduler_job": {
|
||||
"name": "公共定时服务",
|
||||
"func": SchedulerChain().scheduler_job,
|
||||
"running": False,
|
||||
},
|
||||
"random_wallpager": {
|
||||
"name": "壁纸缓存",
|
||||
"func": TmdbChain().get_trending_wallpapers,
|
||||
"running": False,
|
||||
},
|
||||
"sitedata_refresh": {
|
||||
"name": "站点数据刷新",
|
||||
"func": SiteChain().refresh_userdatas,
|
||||
"running": False,
|
||||
},
|
||||
"recommend_refresh": {
|
||||
"name": "推荐缓存",
|
||||
"func": RecommendChain().refresh_recommend,
|
||||
"running": False,
|
||||
}
|
||||
)
|
||||
|
||||
# 媒体服务器同步
|
||||
if settings.MEDIASERVER_SYNC_INTERVAL \
|
||||
and str(settings.MEDIASERVER_SYNC_INTERVAL).isdigit():
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"interval",
|
||||
id="mediaserver_sync",
|
||||
name="同步媒体服务器",
|
||||
hours=int(settings.MEDIASERVER_SYNC_INTERVAL),
|
||||
next_run_time=datetime.now(pytz.timezone(settings.TZ)) + timedelta(minutes=5),
|
||||
kwargs={
|
||||
'job_id': 'mediaserver_sync'
|
||||
}
|
||||
)
|
||||
|
||||
# 新增订阅时搜索(5分钟检查一次)
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"interval",
|
||||
id="new_subscribe_search",
|
||||
name="新增订阅搜索",
|
||||
minutes=5,
|
||||
kwargs={
|
||||
'job_id': 'new_subscribe_search'
|
||||
}
|
||||
)
|
||||
|
||||
# 检查更新订阅TMDB数据(每隔6小时)
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"interval",
|
||||
id="subscribe_tmdb",
|
||||
name="订阅元数据更新",
|
||||
hours=6,
|
||||
kwargs={
|
||||
'job_id': 'subscribe_tmdb'
|
||||
}
|
||||
)
|
||||
# 创建定时服务
|
||||
self._scheduler = BackgroundScheduler(timezone=settings.TZ,
|
||||
executors={
|
||||
'default': ThreadPoolExecutor(100)
|
||||
})
|
||||
|
||||
# 订阅状态每隔24小时搜索一次
|
||||
if settings.SUBSCRIBE_SEARCH:
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"interval",
|
||||
id="subscribe_search",
|
||||
name="订阅搜索补全",
|
||||
hours=24,
|
||||
kwargs={
|
||||
'job_id': 'subscribe_search'
|
||||
}
|
||||
)
|
||||
|
||||
if settings.SUBSCRIBE_MODE == "spider":
|
||||
# 站点首页种子定时刷新模式
|
||||
triggers = TimerUtils.random_scheduler(num_executions=32)
|
||||
for trigger in triggers:
|
||||
# CookieCloud定时同步
|
||||
if settings.COOKIECLOUD_INTERVAL \
|
||||
and str(settings.COOKIECLOUD_INTERVAL).isdigit():
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"cron",
|
||||
id=f"subscribe_refresh|{trigger.hour}:{trigger.minute}",
|
||||
name="订阅刷新",
|
||||
hour=trigger.hour,
|
||||
minute=trigger.minute,
|
||||
"interval",
|
||||
id="cookiecloud",
|
||||
name="同步CookieCloud站点",
|
||||
minutes=int(settings.COOKIECLOUD_INTERVAL),
|
||||
next_run_time=datetime.now(pytz.timezone(settings.TZ)) + timedelta(minutes=1),
|
||||
kwargs={
|
||||
'job_id': 'cookiecloud'
|
||||
}
|
||||
)
|
||||
|
||||
# 媒体服务器同步
|
||||
if settings.MEDIASERVER_SYNC_INTERVAL \
|
||||
and str(settings.MEDIASERVER_SYNC_INTERVAL).isdigit():
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"interval",
|
||||
id="mediaserver_sync",
|
||||
name="同步媒体服务器",
|
||||
hours=int(settings.MEDIASERVER_SYNC_INTERVAL),
|
||||
next_run_time=datetime.now(pytz.timezone(settings.TZ)) + timedelta(minutes=5),
|
||||
kwargs={
|
||||
'job_id': 'mediaserver_sync'
|
||||
}
|
||||
)
|
||||
|
||||
# 新增订阅时搜索(5分钟检查一次)
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"interval",
|
||||
id="new_subscribe_search",
|
||||
name="新增订阅搜索",
|
||||
minutes=5,
|
||||
kwargs={
|
||||
'job_id': 'new_subscribe_search'
|
||||
}
|
||||
)
|
||||
|
||||
# 检查更新订阅TMDB数据(每隔6小时)
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"interval",
|
||||
id="subscribe_tmdb",
|
||||
name="订阅元数据更新",
|
||||
hours=6,
|
||||
kwargs={
|
||||
'job_id': 'subscribe_tmdb'
|
||||
}
|
||||
)
|
||||
|
||||
# 订阅状态每隔24小时搜索一次
|
||||
if settings.SUBSCRIBE_SEARCH:
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"interval",
|
||||
id="subscribe_search",
|
||||
name="订阅搜索补全",
|
||||
hours=24,
|
||||
kwargs={
|
||||
'job_id': 'subscribe_search'
|
||||
}
|
||||
)
|
||||
|
||||
if settings.SUBSCRIBE_MODE == "spider":
|
||||
# 站点首页种子定时刷新模式
|
||||
triggers = TimerUtils.random_scheduler(num_executions=32)
|
||||
for trigger in triggers:
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"cron",
|
||||
id=f"subscribe_refresh|{trigger.hour}:{trigger.minute}",
|
||||
name="订阅刷新",
|
||||
hour=trigger.hour,
|
||||
minute=trigger.minute,
|
||||
kwargs={
|
||||
'job_id': 'subscribe_refresh'
|
||||
})
|
||||
else:
|
||||
# RSS订阅模式
|
||||
if not settings.SUBSCRIBE_RSS_INTERVAL \
|
||||
or not str(settings.SUBSCRIBE_RSS_INTERVAL).isdigit():
|
||||
settings.SUBSCRIBE_RSS_INTERVAL = 30
|
||||
elif int(settings.SUBSCRIBE_RSS_INTERVAL) < 5:
|
||||
settings.SUBSCRIBE_RSS_INTERVAL = 5
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"interval",
|
||||
id="subscribe_refresh",
|
||||
name="RSS订阅刷新",
|
||||
minutes=int(settings.SUBSCRIBE_RSS_INTERVAL),
|
||||
kwargs={
|
||||
'job_id': 'subscribe_refresh'
|
||||
})
|
||||
else:
|
||||
# RSS订阅模式
|
||||
if not settings.SUBSCRIBE_RSS_INTERVAL \
|
||||
or not str(settings.SUBSCRIBE_RSS_INTERVAL).isdigit():
|
||||
settings.SUBSCRIBE_RSS_INTERVAL = 30
|
||||
elif int(settings.SUBSCRIBE_RSS_INTERVAL) < 5:
|
||||
settings.SUBSCRIBE_RSS_INTERVAL = 5
|
||||
}
|
||||
)
|
||||
|
||||
# 关注订阅分享(每1小时)
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"interval",
|
||||
id="subscribe_refresh",
|
||||
name="RSS订阅刷新",
|
||||
minutes=int(settings.SUBSCRIBE_RSS_INTERVAL),
|
||||
id="subscribe_follow",
|
||||
name="关注的订阅分享",
|
||||
hours=1,
|
||||
kwargs={
|
||||
'job_id': 'subscribe_refresh'
|
||||
'job_id': 'subscribe_follow'
|
||||
}
|
||||
)
|
||||
|
||||
# 关注订阅分享(每1小时)
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"interval",
|
||||
id="subscribe_follow",
|
||||
name="关注的订阅分享",
|
||||
hours=1,
|
||||
kwargs={
|
||||
'job_id': 'subscribe_follow'
|
||||
}
|
||||
)
|
||||
|
||||
# 下载器文件转移(每5分钟)
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"interval",
|
||||
id="transfer",
|
||||
name="下载文件整理",
|
||||
minutes=5,
|
||||
kwargs={
|
||||
'job_id': 'transfer'
|
||||
}
|
||||
)
|
||||
|
||||
# 后台刷新TMDB壁纸
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"interval",
|
||||
id="random_wallpager",
|
||||
name="壁纸缓存",
|
||||
minutes=30,
|
||||
next_run_time=datetime.now(pytz.timezone(settings.TZ)) + timedelta(seconds=3),
|
||||
kwargs={
|
||||
'job_id': 'random_wallpager'
|
||||
}
|
||||
)
|
||||
|
||||
# 公共定时服务
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"interval",
|
||||
id="scheduler_job",
|
||||
name="公共定时服务",
|
||||
minutes=10,
|
||||
kwargs={
|
||||
'job_id': 'scheduler_job'
|
||||
}
|
||||
)
|
||||
|
||||
# 缓存清理服务,每隔24小时
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"interval",
|
||||
id="clear_cache",
|
||||
name="缓存清理",
|
||||
hours=settings.CACHE_CONF["meta"] / 3600,
|
||||
kwargs={
|
||||
'job_id': 'clear_cache'
|
||||
}
|
||||
)
|
||||
|
||||
# 定时检查用户认证,每隔10分钟
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"interval",
|
||||
id="user_auth",
|
||||
name="用户认证检查",
|
||||
minutes=10,
|
||||
kwargs={
|
||||
'job_id': 'user_auth'
|
||||
}
|
||||
)
|
||||
|
||||
# 站点数据刷新
|
||||
if settings.SITEDATA_REFRESH_INTERVAL:
|
||||
# 下载器文件转移(每5分钟)
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"interval",
|
||||
id="sitedata_refresh",
|
||||
name="站点数据刷新",
|
||||
minutes=settings.SITEDATA_REFRESH_INTERVAL * 60,
|
||||
id="transfer",
|
||||
name="下载文件整理",
|
||||
minutes=5,
|
||||
kwargs={
|
||||
'job_id': 'sitedata_refresh'
|
||||
'job_id': 'transfer'
|
||||
}
|
||||
)
|
||||
|
||||
# 推荐缓存
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"interval",
|
||||
id="recommend_refresh",
|
||||
name="推荐缓存",
|
||||
hours=24,
|
||||
next_run_time=datetime.now(pytz.timezone(settings.TZ)) + timedelta(seconds=3),
|
||||
kwargs={
|
||||
'job_id': 'recommend_refresh'
|
||||
}
|
||||
)
|
||||
# 后台刷新TMDB壁纸
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"interval",
|
||||
id="random_wallpager",
|
||||
name="壁纸缓存",
|
||||
minutes=30,
|
||||
next_run_time=datetime.now(pytz.timezone(settings.TZ)) + timedelta(seconds=3),
|
||||
kwargs={
|
||||
'job_id': 'random_wallpager'
|
||||
}
|
||||
)
|
||||
|
||||
self.init_plugin_jobs()
|
||||
# 公共定时服务
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"interval",
|
||||
id="scheduler_job",
|
||||
name="公共定时服务",
|
||||
minutes=10,
|
||||
kwargs={
|
||||
'job_id': 'scheduler_job'
|
||||
}
|
||||
)
|
||||
|
||||
# 打印服务
|
||||
logger.debug(self._scheduler.print_jobs())
|
||||
# 缓存清理服务,每隔24小时
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"interval",
|
||||
id="clear_cache",
|
||||
name="缓存清理",
|
||||
hours=settings.CACHE_CONF["meta"] / 3600,
|
||||
kwargs={
|
||||
'job_id': 'clear_cache'
|
||||
}
|
||||
)
|
||||
|
||||
# 启动定时服务
|
||||
self._scheduler.start()
|
||||
# 定时检查用户认证,每隔10分钟
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"interval",
|
||||
id="user_auth",
|
||||
name="用户认证检查",
|
||||
minutes=10,
|
||||
kwargs={
|
||||
'job_id': 'user_auth'
|
||||
}
|
||||
)
|
||||
|
||||
# 站点数据刷新
|
||||
if settings.SITEDATA_REFRESH_INTERVAL:
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"interval",
|
||||
id="sitedata_refresh",
|
||||
name="站点数据刷新",
|
||||
minutes=settings.SITEDATA_REFRESH_INTERVAL * 60,
|
||||
kwargs={
|
||||
'job_id': 'sitedata_refresh'
|
||||
}
|
||||
)
|
||||
|
||||
# 推荐缓存
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"interval",
|
||||
id="recommend_refresh",
|
||||
name="推荐缓存",
|
||||
hours=24,
|
||||
next_run_time=datetime.now(pytz.timezone(settings.TZ)) + timedelta(seconds=3),
|
||||
kwargs={
|
||||
'job_id': 'recommend_refresh'
|
||||
}
|
||||
)
|
||||
|
||||
# 初始化工作流服务
|
||||
self.init_workflow_jobs()
|
||||
|
||||
# 初始化插件服务
|
||||
self.init_plugin_jobs()
|
||||
|
||||
# 打印服务
|
||||
logger.debug(self._scheduler.print_jobs())
|
||||
|
||||
# 启动定时服务
|
||||
self._scheduler.start()
|
||||
|
||||
def start(self, job_id: str, *args, **kwargs):
|
||||
"""
|
||||
@@ -401,52 +412,42 @@ class Scheduler(metaclass=Singleton):
|
||||
for pid in PluginManager().get_running_plugin_ids():
|
||||
self.update_plugin_job(pid)
|
||||
|
||||
def update_plugin_job(self, pid: str):
|
||||
def init_workflow_jobs(self):
|
||||
"""
|
||||
更新插件定时服务
|
||||
初始化工作流定时服务
|
||||
"""
|
||||
if not self._scheduler or not pid:
|
||||
for workflow in WorkflowChain().get_workflows() or []:
|
||||
self.update_workflow_job(workflow)
|
||||
|
||||
def remove_workflow_job(self, workflow: Workflow):
|
||||
"""
|
||||
移除工作流服务
|
||||
"""
|
||||
if not self._scheduler:
|
||||
return
|
||||
# 移除该插件的全部服务
|
||||
self.remove_plugin_job(pid)
|
||||
# 获取插件服务列表
|
||||
with self._lock:
|
||||
try:
|
||||
plugin_services = PluginManager().get_plugin_services(pid=pid)
|
||||
except Exception as e:
|
||||
logger.error(f"运行插件 {pid} 服务失败:{str(e)} - {traceback.format_exc()}")
|
||||
job_id = f"workflow-{workflow.id}"
|
||||
service = self._jobs.pop(job_id, None)
|
||||
if not service:
|
||||
return
|
||||
# 获取插件名称
|
||||
plugin_name = PluginManager().get_plugin_attr(pid, "plugin_name")
|
||||
# 开始注册插件服务
|
||||
for service in plugin_services:
|
||||
try:
|
||||
sid = f"{service['id']}"
|
||||
job_id = sid.split("|")[0]
|
||||
self.remove_plugin_job(pid, job_id)
|
||||
self._jobs[job_id] = {
|
||||
"func": service["func"],
|
||||
"name": service["name"],
|
||||
"pid": pid,
|
||||
"plugin_name": plugin_name,
|
||||
"kwargs": service.get("func_kwargs") or {},
|
||||
"running": False,
|
||||
}
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
service["trigger"],
|
||||
id=sid,
|
||||
name=service["name"],
|
||||
**(service.get("kwargs") or {}),
|
||||
kwargs={"job_id": job_id},
|
||||
replace_existing=True
|
||||
)
|
||||
logger.info(f"注册插件{plugin_name}服务:{service['name']} - {service['trigger']}")
|
||||
except Exception as e:
|
||||
logger.error(f"注册插件{plugin_name}服务失败:{str(e)} - {service}")
|
||||
SchedulerChain().messagehelper.put(title=f"插件 {plugin_name} 服务注册失败",
|
||||
message=str(e),
|
||||
role="system")
|
||||
try:
|
||||
# 在调度器中查找并移除对应的 job
|
||||
job_removed = False
|
||||
for job in list(self._scheduler.get_jobs()):
|
||||
if job_id == job.id:
|
||||
try:
|
||||
self._scheduler.remove_job(job.id)
|
||||
job_removed = True
|
||||
except JobLookupError:
|
||||
pass
|
||||
break
|
||||
if job_removed:
|
||||
logger.info(f"移除工作流服务:{service.get('name')}")
|
||||
except Exception as e:
|
||||
logger.error(f"移除工作流服务失败:{str(e)} - {job_id}: {service}")
|
||||
SchedulerChain().messagehelper.put(title=f"工作流 {workflow.name} 服务移除失败",
|
||||
message=str(e),
|
||||
role="system")
|
||||
|
||||
def remove_plugin_job(self, pid: str, job_id: str = None):
|
||||
"""
|
||||
@@ -494,6 +495,86 @@ class Scheduler(metaclass=Singleton):
|
||||
message=str(e),
|
||||
role="system")
|
||||
|
||||
def update_workflow_job(self, workflow: Workflow):
|
||||
"""
|
||||
更新工作流定时服务
|
||||
"""
|
||||
if not self._scheduler:
|
||||
return
|
||||
# 移除该工作流的全部服务
|
||||
self.remove_workflow_job(workflow)
|
||||
# 添加工作流服务
|
||||
with self._lock:
|
||||
try:
|
||||
job_id = f"workflow-{workflow.id}"
|
||||
self._jobs[job_id] = {
|
||||
"func": WorkflowChain().process,
|
||||
"name": workflow.name,
|
||||
"provider_name": "工作流",
|
||||
"running": False,
|
||||
}
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
trigger=CronTrigger.from_crontab(workflow.timer),
|
||||
id=job_id,
|
||||
name=workflow.name,
|
||||
kwargs={"job_id": job_id, "workflow_id": workflow.id},
|
||||
replace_existing=True
|
||||
)
|
||||
logger.info(f"注册工作流服务:{workflow.name} - {workflow.timer}")
|
||||
except Exception as e:
|
||||
logger.error(f"注册工作流服务失败:{workflow.name} - {str(e)}")
|
||||
SchedulerChain().messagehelper.put(title=f"工作流 {workflow.name} 服务注册失败",
|
||||
message=str(e),
|
||||
role="system")
|
||||
|
||||
def update_plugin_job(self, pid: str):
|
||||
"""
|
||||
更新插件定时服务
|
||||
"""
|
||||
if not self._scheduler or not pid:
|
||||
return
|
||||
# 移除该插件的全部服务
|
||||
self.remove_plugin_job(pid)
|
||||
# 获取插件服务列表
|
||||
with self._lock:
|
||||
try:
|
||||
plugin_services = PluginManager().get_plugin_services(pid=pid)
|
||||
except Exception as e:
|
||||
logger.error(f"运行插件 {pid} 服务失败:{str(e)} - {traceback.format_exc()}")
|
||||
return
|
||||
# 获取插件名称
|
||||
plugin_name = PluginManager().get_plugin_attr(pid, "plugin_name")
|
||||
# 开始注册插件服务
|
||||
for service in plugin_services:
|
||||
try:
|
||||
sid = f"{service['id']}"
|
||||
job_id = sid.split("|")[0]
|
||||
self.remove_plugin_job(pid, job_id)
|
||||
self._jobs[job_id] = {
|
||||
"func": service["func"],
|
||||
"name": service["name"],
|
||||
"pid": pid,
|
||||
"provider_name": plugin_name,
|
||||
"kwargs": service.get("func_kwargs") or {},
|
||||
"running": False,
|
||||
}
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
service["trigger"],
|
||||
id=sid,
|
||||
name=service["name"],
|
||||
**(service.get("kwargs") or {}),
|
||||
kwargs={"job_id": job_id},
|
||||
replace_existing=True
|
||||
)
|
||||
logger.info(f"注册插件{plugin_name}服务:{service['name']} - {service['trigger']}")
|
||||
except Exception as e:
|
||||
logger.error(f"注册插件{plugin_name}服务失败:{str(e)} - {service}")
|
||||
SchedulerChain().messagehelper.put(title=f"插件 {plugin_name} 服务注册失败",
|
||||
message=str(e),
|
||||
role="system")
|
||||
|
||||
def list(self) -> List[schemas.ScheduleInfo]:
|
||||
"""
|
||||
当前所有任务
|
||||
@@ -511,14 +592,14 @@ class Scheduler(metaclass=Singleton):
|
||||
# 将正在运行的任务提取出来 (保障一次性任务正常显示)
|
||||
for job_id, service in self._jobs.items():
|
||||
name = service.get("name")
|
||||
plugin_name = service.get("plugin_name")
|
||||
if service.get("running") and name and plugin_name:
|
||||
provider_name = service.get("provider_name")
|
||||
if service.get("running") and name and provider_name:
|
||||
if name not in added:
|
||||
added.append(name)
|
||||
schedulers.append(schemas.ScheduleInfo(
|
||||
id=job_id,
|
||||
name=name,
|
||||
provider=plugin_name,
|
||||
provider=provider_name,
|
||||
status="正在运行",
|
||||
))
|
||||
# 获取其他待执行任务
|
||||
@@ -538,7 +619,7 @@ class Scheduler(metaclass=Singleton):
|
||||
schedulers.append(schemas.ScheduleInfo(
|
||||
id=job_id,
|
||||
name=job.name,
|
||||
provider=service.get("plugin_name", "[系统]"),
|
||||
provider=service.get("provider_name", "[系统]"),
|
||||
status=status,
|
||||
next_run=next_run
|
||||
))
|
||||
@@ -548,17 +629,18 @@ class Scheduler(metaclass=Singleton):
|
||||
"""
|
||||
关闭定时服务
|
||||
"""
|
||||
try:
|
||||
if self._scheduler:
|
||||
logger.info("正在停止定时任务...")
|
||||
self._event.set()
|
||||
self._scheduler.remove_all_jobs()
|
||||
if self._scheduler.running:
|
||||
self._scheduler.shutdown()
|
||||
self._scheduler = None
|
||||
logger.info("定时任务停止完成")
|
||||
except Exception as e:
|
||||
logger.error(f"停止定时任务失败::{str(e)} - {traceback.format_exc()}")
|
||||
with lock:
|
||||
try:
|
||||
if self._scheduler:
|
||||
logger.info("正在停止定时任务...")
|
||||
self._event.set()
|
||||
self._scheduler.remove_all_jobs()
|
||||
if self._scheduler.running:
|
||||
self._scheduler.shutdown()
|
||||
self._scheduler = None
|
||||
logger.info("定时任务停止完成")
|
||||
except Exception as e:
|
||||
logger.error(f"停止定时任务失败::{str(e)} - {traceback.format_exc()}")
|
||||
|
||||
@staticmethod
|
||||
def clear_cache():
|
||||
|
||||
@@ -19,3 +19,5 @@ from .file import *
|
||||
from .exception import *
|
||||
from .system import *
|
||||
from .event import *
|
||||
from .workflow import *
|
||||
from .download import *
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Optional, Dict, List, Union
|
||||
from typing import Optional, Dict, List, Union, Any
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
@@ -235,9 +235,9 @@ class Context(BaseModel):
|
||||
上下文
|
||||
"""
|
||||
# 元数据
|
||||
meta_info: Optional[MetaInfo] = None
|
||||
meta_info: Optional[Union[MetaInfo, Any]] = None
|
||||
# 媒体信息
|
||||
media_info: Optional[MediaInfo] = None
|
||||
media_info: Optional[Union[MediaInfo, Any]] = None
|
||||
# 种子信息
|
||||
torrent_info: Optional[TorrentInfo] = None
|
||||
|
||||
|
||||
13
app/schemas/download.py
Normal file
13
app/schemas/download.py
Normal file
@@ -0,0 +1,13 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class DownloadTask(BaseModel):
|
||||
"""
|
||||
下载任务
|
||||
"""
|
||||
download_id: Optional[str] = Field(default=None, description="任务ID")
|
||||
downloader: Optional[str] = Field(default=None, description="下载器")
|
||||
path: Optional[str] = Field(default=None, description="下载路径")
|
||||
completed: Optional[bool] = Field(default=False, description="是否完成")
|
||||
@@ -6,6 +6,15 @@ from pydantic import BaseModel, Field, root_validator
|
||||
from app.schemas import MessageChannel, FileItem
|
||||
|
||||
|
||||
class Event(BaseModel):
|
||||
"""
|
||||
事件模型
|
||||
"""
|
||||
event_type: str = Field(..., description="事件类型")
|
||||
event_data: Optional[dict] = Field(default={}, description="事件数据")
|
||||
priority: Optional[int] = Field(0, description="事件优先级")
|
||||
|
||||
|
||||
class BaseEventData(BaseModel):
|
||||
"""
|
||||
事件数据的基类,所有具体事件数据类应继承自此类
|
||||
@@ -244,6 +253,7 @@ class DiscoverMediaSource(BaseModel):
|
||||
api_path: str = Field(..., description="媒体数据源API地址")
|
||||
filter_params: Optional[Dict[str, Any]] = Field(default=None, description="过滤参数")
|
||||
filter_ui: Optional[List[dict]] = Field(default=[], description="过滤参数UI配置")
|
||||
depends: Optional[Dict[str, list]] = Field(default=None, description="UI依赖关系字典")
|
||||
|
||||
|
||||
class DiscoverSourceEventData(ChainEventData):
|
||||
|
||||
@@ -115,3 +115,9 @@ class SiteUserData(BaseModel):
|
||||
class SiteAuth(BaseModel):
|
||||
site: Optional[str] = None
|
||||
params: Optional[Dict[str, Union[int, str]]] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class SiteCategory(BaseModel):
|
||||
id: Optional[int] = None
|
||||
cat: Optional[str] = None
|
||||
desc: Optional[str] = None
|
||||
|
||||
@@ -21,6 +21,8 @@ class TransferTorrent(BaseModel):
|
||||
tags: Optional[str] = None
|
||||
size: Optional[int] = 0
|
||||
userid: Optional[str] = None
|
||||
progress: Optional[float] = 0
|
||||
state: Optional[str] = None
|
||||
|
||||
|
||||
class DownloadingTorrent(BaseModel):
|
||||
|
||||
@@ -87,6 +87,8 @@ class ChainEventType(Enum):
|
||||
MediaRecognizeConvert = "media.recognize.convert"
|
||||
# 推荐数据源
|
||||
RecommendSource = "recommend.source"
|
||||
# 工作流执行
|
||||
WorkflowExecution = "workflow.execution"
|
||||
|
||||
|
||||
# 系统配置Key字典
|
||||
@@ -145,6 +147,8 @@ class SystemConfigKey(Enum):
|
||||
UserSiteAuthParams = "UserSiteAuthParams"
|
||||
# Follow订阅分享者
|
||||
FollowSubscribers = "FollowSubscribers"
|
||||
# 通知发送时间
|
||||
NotificationSendTime = "NotificationSendTime"
|
||||
|
||||
|
||||
# 处理进度Key字典
|
||||
|
||||
84
app/schemas/workflow.py
Normal file
84
app/schemas/workflow.py
Normal file
@@ -0,0 +1,84 @@
|
||||
from typing import Optional, List
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from app.schemas.context import Context, MediaInfo
|
||||
from app.schemas.download import DownloadTask
|
||||
from app.schemas.file import FileItem
|
||||
from app.schemas.site import Site
|
||||
from app.schemas.subscribe import Subscribe
|
||||
|
||||
|
||||
class Workflow(BaseModel):
|
||||
"""
|
||||
工作流信息
|
||||
"""
|
||||
id: Optional[int] = Field(default=None, description="工作流ID")
|
||||
name: Optional[str] = Field(default=None, description="工作流名称")
|
||||
description: Optional[str] = Field(default=None, description="工作流描述")
|
||||
timer: Optional[str] = Field(default=None, description="定时器")
|
||||
state: Optional[str] = Field(default=None, description="状态")
|
||||
current_action: Optional[str] = Field(default=None, description="已执行动作")
|
||||
result: Optional[str] = Field(default=None, description="任务执行结果")
|
||||
run_count: Optional[int] = Field(default=0, description="已执行次数")
|
||||
actions: Optional[list] = Field(default=[], description="任务列表")
|
||||
flows: Optional[list] = Field(default=[], description="任务流")
|
||||
add_time: Optional[str] = Field(default=None, description="创建时间")
|
||||
last_time: Optional[str] = Field(default=None, description="最后执行时间")
|
||||
|
||||
class Config:
|
||||
orm_mode = True
|
||||
|
||||
|
||||
class ActionParams(BaseModel):
|
||||
"""
|
||||
动作基础参数
|
||||
"""
|
||||
loop: Optional[bool] = Field(default=False, description="是否需要循环")
|
||||
loop_interval: Optional[int] = Field(default=0, description="循环间隔 (秒)")
|
||||
|
||||
|
||||
class Action(BaseModel):
|
||||
"""
|
||||
动作信息
|
||||
"""
|
||||
id: Optional[str] = Field(default=None, description="动作ID")
|
||||
type: Optional[str] = Field(default=None, description="动作类型 (类名)")
|
||||
name: Optional[str] = Field(default=None, description="动作名称")
|
||||
description: Optional[str] = Field(default=None, description="动作描述")
|
||||
position: Optional[dict] = Field(default={}, description="位置")
|
||||
data: Optional[dict] = Field(default={}, description="参数")
|
||||
|
||||
|
||||
class ActionExecution(BaseModel):
|
||||
"""
|
||||
动作执行情况
|
||||
"""
|
||||
action: Optional[str] = Field(default=None, description="当前动作(名称)")
|
||||
result: Optional[bool] = Field(default=None, description="执行结果")
|
||||
message: Optional[str] = Field(default=None, description="执行消息")
|
||||
|
||||
|
||||
class ActionContext(BaseModel):
|
||||
"""
|
||||
动作基础上下文,各动作通用数据
|
||||
"""
|
||||
content: Optional[str] = Field(default=None, description="文本类内容")
|
||||
torrents: Optional[List[Context]] = Field(default=[], description="资源列表")
|
||||
medias: Optional[List[MediaInfo]] = Field(default=[], description="媒体列表")
|
||||
fileitems: Optional[List[FileItem]] = Field(default=[], description="文件列表")
|
||||
downloads: Optional[List[DownloadTask]] = Field(default=[], description="下载任务列表")
|
||||
sites: Optional[List[Site]] = Field(default=[], description="站点列表")
|
||||
subscribes: Optional[List[Subscribe]] = Field(default=[], description="订阅列表")
|
||||
execute_history: Optional[List[ActionExecution]] = Field(default=[], description="执行历史")
|
||||
progress: Optional[int] = Field(default=0, description="执行进度(%)")
|
||||
|
||||
|
||||
class ActionFlow(BaseModel):
|
||||
"""
|
||||
工作流流程
|
||||
"""
|
||||
id: Optional[str] = Field(default=None, description="流程ID")
|
||||
source: Optional[str] = Field(default=None, description="源动作")
|
||||
target: Optional[str] = Field(default=None, description="目标动作")
|
||||
animated: Optional[bool] = Field(default=True, description="是否动画流程")
|
||||
@@ -3,6 +3,7 @@ from contextlib import asynccontextmanager
|
||||
|
||||
from fastapi import FastAPI
|
||||
|
||||
from app.startup.workflow_initializer import init_workflow, stop_workflow
|
||||
from app.startup.modules_initializer import shutdown_modules, start_modules
|
||||
from app.startup.plugins_initializer import init_plugins_async
|
||||
from app.startup.routers_initializer import init_routers
|
||||
@@ -16,6 +17,8 @@ async def lifespan(app: FastAPI):
|
||||
print("Starting up...")
|
||||
# 启动模块
|
||||
start_modules(app)
|
||||
# 初始化工作流动作
|
||||
init_workflow(app)
|
||||
# 初始化路由
|
||||
init_routers(app)
|
||||
# 初始化插件
|
||||
@@ -35,3 +38,6 @@ async def lifespan(app: FastAPI):
|
||||
print(f"Error during plugin installation shutdown: {e}")
|
||||
# 清理模块
|
||||
shutdown_modules(app)
|
||||
# 关闭工作流
|
||||
stop_workflow(app)
|
||||
|
||||
|
||||
17
app/startup/workflow_initializer.py
Normal file
17
app/startup/workflow_initializer.py
Normal file
@@ -0,0 +1,17 @@
|
||||
from fastapi import FastAPI
|
||||
|
||||
from app.core.workflow import WorkFlowManager
|
||||
|
||||
|
||||
def init_workflow(_: FastAPI):
|
||||
"""
|
||||
初始化动作
|
||||
"""
|
||||
WorkFlowManager()
|
||||
|
||||
|
||||
def stop_workflow(_: FastAPI):
|
||||
"""
|
||||
停止动作
|
||||
"""
|
||||
WorkFlowManager().stop()
|
||||
@@ -42,7 +42,7 @@ class SecurityUtils:
|
||||
@staticmethod
|
||||
def is_safe_url(url: str, allowed_domains: Union[Set[str], List[str]], strict: bool = False) -> bool:
|
||||
"""
|
||||
验证URL是否在允许的域名列表中,包括带有端口的域名。
|
||||
验证URL是否在允许的域名列表中,包括带有端口的域名
|
||||
|
||||
:param url: 需要验证的 URL
|
||||
:param allowed_domains: 允许的域名集合,域名可以包含端口
|
||||
@@ -65,7 +65,6 @@ class SecurityUtils:
|
||||
netloc = parsed_url.netloc.lower()
|
||||
if not netloc:
|
||||
return False
|
||||
netloc_no_port = netloc.split(":")[0]
|
||||
|
||||
# 检查每个允许的域名
|
||||
allowed_domains = {d.lower() for d in allowed_domains}
|
||||
@@ -79,7 +78,7 @@ class SecurityUtils:
|
||||
return True
|
||||
else:
|
||||
# 非严格模式下,允许子域名匹配
|
||||
if netloc_no_port == allowed_netloc or netloc_no_port.endswith('.' + allowed_netloc):
|
||||
if netloc == allowed_netloc or netloc.endswith('.' + allowed_netloc):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@@ -3,7 +3,7 @@ import abc
|
||||
|
||||
class Singleton(abc.ABCMeta, type):
|
||||
"""
|
||||
类单例模式
|
||||
类单例模式(按参数)
|
||||
"""
|
||||
|
||||
_instances: dict = {}
|
||||
@@ -19,3 +19,24 @@ class AbstractSingleton(abc.ABC, metaclass=Singleton):
|
||||
"""
|
||||
抽像类单例模式
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class SingletonClass(abc.ABCMeta, type):
|
||||
"""
|
||||
类单例模式(按类)
|
||||
"""
|
||||
|
||||
_instances: dict = {}
|
||||
|
||||
def __call__(cls, *args, **kwargs):
|
||||
if cls not in cls._instances:
|
||||
cls._instances[cls] = super(SingletonClass, cls).__call__(*args, **kwargs)
|
||||
return cls._instances[cls]
|
||||
|
||||
|
||||
class AbstractSingletonClass(abc.ABC, metaclass=SingletonClass):
|
||||
"""
|
||||
抽像类单例模式(按类)
|
||||
"""
|
||||
pass
|
||||
|
||||
@@ -63,3 +63,5 @@ OCR_HOST=https://movie-pilot.org
|
||||
PLUGIN_MARKET=https://github.com/jxxghp/MoviePilot-Plugins,https://github.com/thsrite/MoviePilot-Plugins,https://github.com/InfinityPacer/MoviePilot-Plugins,https://github.com/honue/MoviePilot-Plugins
|
||||
# 搜索多个名称,true/false,为true时搜索时会同时搜索中英文及原始名称,搜索结果会更全面,但会增加搜索时间;为false时其中一个名称搜索到结果或全部名称搜索完毕即停止
|
||||
SEARCH_MULTIPLE_NAME=true
|
||||
# 为指定字幕添加.default后缀设置为默认字幕,支持为'zh-cn','zh-tw','eng'添加默认字幕,未定义或设置为None则不添加
|
||||
DEFAULT_SUB=None
|
||||
|
||||
24
database/versions/279a949d81b6_2_1_1.py
Normal file
24
database/versions/279a949d81b6_2_1_1.py
Normal file
@@ -0,0 +1,24 @@
|
||||
"""2.1.1
|
||||
|
||||
Revision ID: 279a949d81b6
|
||||
Revises: ca5461f314f2
|
||||
Create Date: 2025-02-14 19:02:24.989349
|
||||
|
||||
"""
|
||||
|
||||
from app.chain.torrents import TorrentsChain
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '279a949d81b6'
|
||||
down_revision = 'ca5461f314f2'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# 清理一次缓存
|
||||
TorrentsChain().clear_torrents()
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
pass
|
||||
@@ -7,9 +7,8 @@ Create Date: 2024-12-24 13:29:32.225532
|
||||
"""
|
||||
import contextlib
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import sqlite
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '55390f1f77c1'
|
||||
|
||||
29
database/versions/610bb05ddeef_2_1_2.py
Normal file
29
database/versions/610bb05ddeef_2_1_2.py
Normal file
@@ -0,0 +1,29 @@
|
||||
"""2.1.2
|
||||
|
||||
Revision ID: 610bb05ddeef
|
||||
Revises: 279a949d81b6
|
||||
Create Date: 2025-02-24 07:52:00.042837
|
||||
|
||||
"""
|
||||
import contextlib
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import sqlite
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '610bb05ddeef'
|
||||
down_revision = '279a949d81b6'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
with contextlib.suppress(Exception):
|
||||
op.add_column('workflow', sa.Column('flows', sa.JSON(), nullable=True))
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
pass
|
||||
@@ -7,9 +7,8 @@ Create Date: 2025-02-06 18:28:00.644571
|
||||
"""
|
||||
import contextlib
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import sqlite
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = 'ca5461f314f2'
|
||||
|
||||
@@ -32,6 +32,7 @@ func_timeout==4.3.5
|
||||
bs4~=0.0.1
|
||||
beautifulsoup4~=4.12.2
|
||||
pillow~=10.4.0
|
||||
pillow-avif-plugin~=1.4.6
|
||||
pyTelegramBotAPI~=4.12.0
|
||||
playwright~=1.37.0
|
||||
cf-clearance~=0.31.0
|
||||
|
||||
28
setup.py
28
setup.py
@@ -1,20 +1,22 @@
|
||||
|
||||
from distutils.core import setup
|
||||
|
||||
from Cython.Build import cythonize
|
||||
|
||||
|
||||
module_list = ['app/helper/sites.py']
|
||||
|
||||
setup(
|
||||
name="",
|
||||
author="",
|
||||
zip_safe=False,
|
||||
include_package_data=True,
|
||||
ext_modules=cythonize(
|
||||
module_list=module_list,
|
||||
nthreads=0,
|
||||
compiler_directives={"language_level": "3"},
|
||||
),
|
||||
script_args=["build_ext", "-j", '2', "--inplace"],
|
||||
)
|
||||
name="MoviePilot",
|
||||
author="jxxghp",
|
||||
zip_safe=False,
|
||||
include_package_data=True,
|
||||
ext_modules=cythonize(
|
||||
module_list=module_list,
|
||||
nthreads=0,
|
||||
compiler_directives={
|
||||
"language_level": "3",
|
||||
"binding": False,
|
||||
"nonecheck": False
|
||||
},
|
||||
),
|
||||
script_args=["build_ext", "-j", '2', "--inplace"],
|
||||
)
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
APP_VERSION = 'v2.2.8'
|
||||
FRONTEND_VERSION = 'v2.2.8'
|
||||
APP_VERSION = 'v2.3.4'
|
||||
FRONTEND_VERSION = 'v2.3.4'
|
||||
|
||||
Reference in New Issue
Block a user