Compare commits

...

73 Commits

Author SHA1 Message Date
jxxghp
4086ba4763 更新 version.py 2025-02-28 12:30:45 +08:00
jxxghp
6a9cdf71d7 fix AddDownloadAction 2025-02-28 12:12:52 +08:00
jxxghp
a9644c4f86 fix actions 2025-02-28 11:56:26 +08:00
jxxghp
cf62ad5e8e fix actions 2025-02-28 11:15:24 +08:00
jxxghp
f8ed16666c fix actions execute 2025-02-27 20:39:42 +08:00
jxxghp
37926b4c19 fix actions 2025-02-27 18:58:11 +08:00
jxxghp
b080a2003f fix actions 2025-02-27 17:08:38 +08:00
jxxghp
ab0008be86 fix actions 2025-02-27 13:09:01 +08:00
jxxghp
4a42b0d000 fix import 2025-02-26 21:13:41 +08:00
jxxghp
e3d4b19dac fix actionid type 2025-02-26 20:28:10 +08:00
jxxghp
403d600db4 fix workflow edit api 2025-02-26 19:06:30 +08:00
jxxghp
835e6e8891 fix workflow scheduler 2025-02-26 18:32:25 +08:00
jxxghp
eec25113b5 fix workflow scheduler 2025-02-26 18:24:27 +08:00
jxxghp
a7c4161f91 fix workflow executor 2025-02-26 12:57:57 +08:00
jxxghp
799eb9e6ef add workflow executor 2025-02-26 08:37:37 +08:00
jxxghp
88993cb67b fix workflow api 2025-02-25 17:27:21 +08:00
jxxghp
0dc9c98c06 fix workflow api 2025-02-25 13:35:32 +08:00
jxxghp
c1c91cec44 fix workflow api 2025-02-25 13:25:56 +08:00
jxxghp
19b6927320 fix workflow process 2025-02-25 12:42:15 +08:00
jxxghp
0889ebc8b8 fix workflow schema 2025-02-25 08:25:19 +08:00
jxxghp
fb249c0ea5 fix workflow excute 2025-02-25 08:22:02 +08:00
jxxghp
feb22ff0a7 Merge pull request #3922 from WingGao/v2 2025-02-22 17:51:13 +08:00
WingGao
3c95156ce1 fix: alist不应该缓存失败的结果 2025-02-22 15:05:04 +08:00
jxxghp
8b6dca6a46 fix bug 2025-02-22 11:22:21 +08:00
jxxghp
43907eea26 fix 2025-02-22 11:12:14 +08:00
jxxghp
67145a80d0 add workflow apis 2025-02-22 10:35:57 +08:00
jxxghp
0b3138fec6 fix actions 2025-02-22 09:57:32 +08:00
jxxghp
b84896b4f9 Merge pull request #3919 from InfinityPacer/feature/plugin 2025-02-22 07:46:02 +08:00
InfinityPacer
efd046d2f8 fix(plugin): handle None response for online plugins retrieval 2025-02-22 00:34:35 +08:00
jxxghp
06fcf817bb Merge pull request #3917 from gtsicko/v2 2025-02-21 07:29:23 +08:00
gtsicko
16a94d9054 fix: 修复带路径的WECHAT_PROXY不生效 2025-02-20 23:41:14 +08:00
jxxghp
5bf502188d fix 2025-02-20 19:32:58 +08:00
jxxghp
5269b4bc82 fix #3914
feat:搜索支持指定站点
2025-02-20 13:03:12 +08:00
jxxghp
e3f8ed9886 add downloads path 2025-02-20 10:51:22 +08:00
jxxghp
74de554fb0 Merge pull request #3914 from TimoYoung/v2 2025-02-19 18:01:49 +08:00
jxxghp
b41de1a982 fix actions 2025-02-19 17:44:14 +08:00
Timo_Young
25f7d9ccdd Merge branch 'jxxghp:v2' into v2 2025-02-19 17:28:22 +08:00
yangyux
9646745181 fix: mtype为空且tmdbid在movie和tv中都存在时的识别错误问题 2025-02-19 17:27:38 +08:00
jxxghp
1317d9c4f0 fix actions 2025-02-19 16:43:42 +08:00
jxxghp
351029a842 fix AddDownloadAction 2025-02-19 15:24:13 +08:00
jxxghp
15e1fb61ac fix actions 2025-02-19 08:33:15 +08:00
jxxghp
1889a829b5 fix workflow process 2025-02-19 08:16:35 +08:00
jxxghp
53a14fce38 fix workflow process 2025-02-19 08:15:49 +08:00
jxxghp
d9ed7b09c7 v2.3.0
- 站点资源浏览支持关键字和分类搜索,优化了界面,修改了站点卡片点击时的交互行为
- 优化了APP模式下更多菜单、滚动条等多处UI细节
2025-02-18 17:05:24 +08:00
jxxghp
4dcb18f00e fix: site browse api 2025-02-18 16:32:10 +08:00
jxxghp
0a52fe0a7a refactor: site browse api 2025-02-17 19:01:05 +08:00
jxxghp
e5a4d11cf9 fix workflow 2025-02-17 15:08:24 +08:00
jxxghp
6c233f13de fix workflow chain 2025-02-17 12:38:29 +08:00
jxxghp
00aee3496c add workflow oper 2025-02-17 11:54:11 +08:00
jxxghp
77ae40e3d6 fix workflow 2025-02-17 11:40:32 +08:00
jxxghp
68cba44476 fix modules load 2025-02-16 17:24:17 +08:00
jxxghp
b86d06f632 add workflow lifecycle 2025-02-16 16:53:38 +08:00
jxxghp
0b7cf305a0 add action templates 2025-02-16 13:45:15 +08:00
jxxghp
21ae36bc3a add action templates 2025-02-16 12:52:29 +08:00
jxxghp
4e2d9e9165 Merge pull request #3899 from Mister-album/v2-sync 2025-02-15 08:10:15 +08:00
Mister-album
6cee308894 添加为指定字幕添加.default后缀设置为默认字幕功能 2025-02-14 19:58:29 +08:00
jxxghp
b8f4cd5fea v2.2.9
- 资源包升级以提升安全性
- 优化了页面数据刷新机制

注意:本次升级后会默认清理一次种子识别缓存
2025-02-14 19:35:49 +08:00
jxxghp
aa1557ad9e fix setup 2025-02-14 17:37:10 +08:00
jxxghp
f03da6daca fix setup 2025-02-14 17:17:16 +08:00
jxxghp
30eb4385d4 fix sites 2025-02-14 13:44:18 +08:00
jxxghp
4c9afcc1a8 fix 2025-02-14 13:32:20 +08:00
jxxghp
dd47432a45 fix 2025-02-14 12:55:32 +08:00
jxxghp
0ba6974bd6 fix #3843
fix #3829
2025-02-13 08:08:13 +08:00
jxxghp
827d8f6d84 add workflow framework 2025-02-12 17:49:01 +08:00
jxxghp
943a462c69 Merge pull request #3885 from InfinityPacer/feature/security 2025-02-11 17:21:04 +08:00
InfinityPacer
a1bc773fb5 feat(security): add AVIF support 2025-02-11 17:10:50 +08:00
InfinityPacer
ac169b7d22 feat(security): add cache default extension for files without suffix 2025-02-11 17:09:43 +08:00
jxxghp
eecbbfea3a 更新 version.py 2025-02-10 22:28:06 +08:00
jxxghp
635ddb044e add depends for DiscoverMediaSource 2025-02-10 22:05:56 +08:00
jxxghp
1a6123489d 更新 config.py 2025-02-10 07:52:40 +08:00
jxxghp
4e69195a8d Merge pull request #3876 from InfinityPacer/feature/security 2025-02-10 07:11:28 +08:00
InfinityPacer
e48c8ee652 Revert "fix is_safe_url"
This reverts commit 5e2ad34864.
2025-02-10 02:22:53 +08:00
InfinityPacer
7df07b86b9 feat(security): add cmvideo image for http with port 2025-02-10 02:19:08 +08:00
70 changed files with 2234 additions and 912 deletions

63
app/actions/__init__.py Normal file
View File

@@ -0,0 +1,63 @@
from abc import ABC, abstractmethod
from app.chain import ChainBase
from app.schemas import ActionContext, ActionParams
class ActionChain(ChainBase):
pass
class BaseAction(ABC):
"""
工作流动作基类
"""
# 完成标志
_done_flag = False
@classmethod
@property
@abstractmethod
def name(cls) -> str:
pass
@classmethod
@property
@abstractmethod
def description(cls) -> str:
pass
@classmethod
@property
@abstractmethod
def data(cls) -> dict:
pass
@property
def done(self) -> bool:
"""
判断动作是否完成
"""
return self._done_flag
@property
@abstractmethod
def success(self) -> bool:
"""
判断动作是否成功
"""
pass
def job_done(self):
"""
标记动作完成
"""
self._done_flag = True
@abstractmethod
def execute(self, params: ActionParams, context: ActionContext) -> ActionContext:
"""
执行动作
"""
raise NotImplementedError

103
app/actions/add_download.py Normal file
View File

@@ -0,0 +1,103 @@
from pydantic import Field
from app.actions import BaseAction
from app.chain.download import DownloadChain
from app.chain.media import MediaChain
from app.core.metainfo import MetaInfo
from app.log import logger
from app.schemas import ActionParams, ActionContext, DownloadTask, MediaType
class AddDownloadParams(ActionParams):
"""
添加下载资源参数
"""
downloader: str = Field(None, description="下载器")
save_path: str = Field(None, description="保存路径")
only_lack: bool = Field(False, description="仅下载缺失的资源")
class AddDownloadAction(BaseAction):
"""
添加下载资源
"""
# 已添加的下载
_added_downloads = []
_has_error = False
def __init__(self):
super().__init__()
self.downloadchain = DownloadChain()
self.mediachain = MediaChain()
@classmethod
@property
def name(cls) -> str:
return "添加下载"
@classmethod
@property
def description(cls) -> str:
return "根据资源列表添加下载任务"
@classmethod
@property
def data(cls) -> dict:
return AddDownloadParams().dict()
@property
def success(self) -> bool:
return not self._has_error
def execute(self, params: dict, context: ActionContext) -> ActionContext:
"""
将上下文中的torrents添加到下载任务中
"""
params = AddDownloadParams(**params)
for t in context.torrents:
if not t.meta_info:
t.meta_info = MetaInfo(title=t.title, subtitle=t.description)
if not t.media_info:
t.media_info = self.mediachain.recognize_media(meta=t.meta_info)
if not t.media_info:
self._has_error = True
logger.warning(f"{t.title} 未识别到媒体信息,无法下载")
continue
if params.only_lack:
exists_info = self.downloadchain.media_exists(t.media_info)
if exists_info:
if t.media_info.type == MediaType.MOVIE:
# 电影
logger.warning(f"{t.title} 媒体库中已存在,跳过")
continue
else:
# 电视剧
exists_seasons = exists_info.seasons or {}
if len(t.meta_info.season_list) > 1:
# 多季不下载
logger.warning(f"{t.meta_info.title} 有多季,跳过")
continue
else:
exists_episodes = exists_seasons.get(t.meta_info.begin_season)
if exists_episodes:
if set(t.meta_info.episode_list).issubset(exists_episodes):
logger.warning(f"{t.meta_info.title}{t.meta_info.begin_season} 季第 {t.meta_info.episode_list} 集已存在,跳过")
continue
did = self.downloadchain.download_single(context=t,
downloader=params.downloader,
save_path=params.save_path)
if did:
self._added_downloads.append(did)
else:
self._has_error = True
if self._added_downloads:
logger.info(f"已添加 {len(self._added_downloads)} 个下载任务")
context.downloads.extend(
[DownloadTask(download_id=did, downloader=params.downloader) for did in self._added_downloads]
)
self.job_done()
return context

View File

@@ -0,0 +1,79 @@
from app.actions import BaseAction
from app.chain.subscribe import SubscribeChain
from app.core.config import settings
from app.core.context import MediaInfo
from app.db.subscribe_oper import SubscribeOper
from app.log import logger
from app.schemas import ActionParams, ActionContext
class AddSubscribeParams(ActionParams):
"""
添加订阅参数
"""
pass
class AddSubscribeAction(BaseAction):
"""
添加订阅
"""
_added_subscribes = []
_has_error = False
def __init__(self):
super().__init__()
self.subscribechain = SubscribeChain()
self.subscribeoper = SubscribeOper()
@classmethod
@property
def name(cls) -> str:
return "添加订阅"
@classmethod
@property
def description(cls) -> str:
return "根据媒体列表添加订阅"
@classmethod
@property
def data(cls) -> dict:
return AddSubscribeParams().dict()
@property
def success(self) -> bool:
return not self._has_error
def execute(self, params: dict, context: ActionContext) -> ActionContext:
"""
将medias中的信息添加订阅如果订阅不存在的话
"""
for media in context.medias:
mediainfo = MediaInfo()
mediainfo.from_dict(media.dict())
if self.subscribechain.exists(mediainfo):
logger.info(f"{media.title} 已存在订阅")
continue
# 添加订阅
sid, message = self.subscribechain.add(mtype=mediainfo.type,
title=mediainfo.title,
year=mediainfo.year,
tmdbid=mediainfo.tmdb_id,
season=mediainfo.season,
doubanid=mediainfo.douban_id,
bangumiid=mediainfo.bangumi_id,
username=settings.SUPERUSER)
if sid:
self._added_subscribes.append(sid)
else:
self._has_error = True
if self._added_subscribes:
logger.info(f"已添加 {len(self._added_subscribes)} 个订阅")
for sid in self._added_subscribes:
context.subscribes.append(self.subscribeoper.get(sid))
self.job_done()
return context

View File

@@ -0,0 +1,61 @@
from app.actions import BaseAction, ActionChain
from app.schemas import ActionParams, ActionContext
from app.log import logger
class FetchDownloadsParams(ActionParams):
"""
获取下载任务参数
"""
pass
class FetchDownloadsAction(BaseAction):
"""
获取下载任务
"""
_downloads = []
def __init__(self):
super().__init__()
self.chain = ActionChain()
@classmethod
@property
def name(cls) -> str:
return "获取下载任务"
@classmethod
@property
def description(cls) -> str:
return "获取下载任务,更新任务状态"
@classmethod
@property
def data(cls) -> dict:
return FetchDownloadsParams().dict()
@property
def success(self) -> bool:
return self.done
def execute(self, params: dict, context: ActionContext) -> ActionContext:
"""
更新downloads中的下载任务状态
"""
__all_complete = False
for download in self._downloads:
logger.info(f"获取下载任务 {download.download_id} 状态 ...")
torrents = self.chain.list_torrents(hashs=[download.download_id])
if not torrents:
download.completed = True
continue
for t in torrents:
download.path = t.path
if t.progress >= 100:
logger.info(f"下载任务 {download.download_id} 已完成")
download.completed = True
if all([d.completed for d in self._downloads]):
self.job_done()
return context

156
app/actions/fetch_medias.py Normal file
View File

@@ -0,0 +1,156 @@
from typing import List
from pydantic import Field
from app.actions import BaseAction
from app.chain.recommend import RecommendChain
from app.schemas import ActionParams, ActionContext
from app.core.config import settings
from app.core.event import eventmanager
from app.log import logger
from app.schemas import RecommendSourceEventData, MediaInfo
from app.schemas.types import ChainEventType
from app.utils.http import RequestUtils
class FetchMediasParams(ActionParams):
"""
获取媒体数据参数
"""
sources: List[str] = Field([], description="媒体数据来源")
class FetchMediasAction(BaseAction):
"""
获取媒体数据
"""
_inner_sources = []
_medias = []
def __init__(self):
super().__init__()
self.__inner_sources = [
{
"func": RecommendChain().tmdb_trending,
"name": '流行趋势',
},
{
"func": RecommendChain().douban_movie_showing,
"name": '正在热映',
},
{
"func": RecommendChain().bangumi_calendar,
"name": 'Bangumi每日放送',
},
{
"func": RecommendChain().tmdb_movies,
"name": 'TMDB热门电影',
},
{
"func": RecommendChain().tmdb_tvs,
"name": 'TMDB热门电视剧',
},
{
"func": RecommendChain().douban_movie_hot,
"name": '豆瓣热门电影',
},
{
"func": RecommendChain().douban_tv_hot,
"name": '豆瓣热门电视剧',
},
{
"func": RecommendChain().douban_tv_animation,
"name": '豆瓣热门动漫',
},
{
"func": RecommendChain().douban_movies,
"name": '豆瓣最新电影',
},
{
"func": RecommendChain().douban_tvs,
"name": '豆瓣最新电视剧',
},
{
"func": RecommendChain().douban_movie_top250,
"name": '豆瓣电影TOP250',
},
{
"func": RecommendChain().douban_tv_weekly_chinese,
"name": '豆瓣国产剧集榜',
},
{
"func": RecommendChain().douban_tv_weekly_global,
"name": '豆瓣全球剧集榜',
}
]
# 广播事件,请示额外的推荐数据源支持
event_data = RecommendSourceEventData()
event = eventmanager.send_event(ChainEventType.RecommendSource, event_data)
# 使用事件返回的上下文数据
if event and event.event_data:
event_data: RecommendSourceEventData = event.event_data
if event_data.extra_sources:
self.__inner_sources.extend([s.dict() for s in event_data.extra_sources])
@classmethod
@property
def name(cls) -> str:
return "获取媒体数据"
@classmethod
@property
def description(cls) -> str:
return "获取榜单等媒体数据列表"
@classmethod
@property
def data(cls) -> dict:
return FetchMediasParams().dict()
@property
def success(self) -> bool:
return True if self._medias else False
def __get_source(self, source: str):
"""
获取数据源
"""
for s in self.__inner_sources:
if s['name'] == source:
return s
return None
def execute(self, params: dict, context: ActionContext) -> ActionContext:
"""
获取媒体数据填充到medias
"""
params = FetchMediasParams(**params)
for name in params.sources:
source = self.__get_source(name)
if not source:
continue
logger.info(f"获取媒体数据 {source} ...")
results = []
if source.get("func"):
results = source['func']()
else:
# 调用内部API获取数据
api_url = f"http://127.0.0.1:{settings.PORT}/api/v1/{source['api_path']}?token={settings.API_TOKEN}"
res = RequestUtils(timeout=15).post_res(api_url)
if res:
results = res.json()
if results:
logger.info(f"{name} 获取到 {len(results)} 条数据")
self._medias.extend([MediaInfo(**r) for r in results])
else:
logger.error(f"{name} 获取数据失败")
if self._medias:
context.medias.extend(self._medias)
self.job_done()
return context

110
app/actions/fetch_rss.py Normal file
View File

@@ -0,0 +1,110 @@
from typing import Optional
from pydantic import Field
from app.actions import BaseAction, ActionChain
from app.core.config import settings
from app.core.context import Context
from app.core.metainfo import MetaInfo
from app.helper.rss import RssHelper
from app.log import logger
from app.schemas import ActionParams, ActionContext, TorrentInfo
class FetchRssParams(ActionParams):
"""
获取RSS资源列表参数
"""
url: str = Field(None, description="RSS地址")
proxy: Optional[bool] = Field(False, description="是否使用代理")
timeout: Optional[int] = Field(15, description="超时时间")
content_type: Optional[str] = Field(None, description="Content-Type")
referer: Optional[str] = Field(None, description="Referer")
ua: Optional[str] = Field(None, description="User-Agent")
class FetchRssAction(BaseAction):
"""
获取RSS资源列表
"""
_rss_torrents = []
_has_error = False
def __init__(self):
super().__init__()
self.rsshelper = RssHelper()
self.chain = ActionChain()
@classmethod
@property
def name(cls) -> str:
return "获取RSS资源"
@classmethod
@property
def description(cls) -> str:
return "订阅RSS地址获取资源"
@classmethod
@property
def data(cls) -> dict:
return FetchRssParams().dict()
@property
def success(self) -> bool:
return not self._has_error
def execute(self, params: dict, context: ActionContext) -> ActionContext:
"""
请求RSS地址获取数据并解析为资源列表
"""
params = FetchRssParams(**params)
if not params.url:
return context
headers = {}
if params.content_type:
headers["Content-Type"] = params.content_type
if params.referer:
headers["Referer"] = params.referer
if params.ua:
headers["User-Agent"] = params.ua
rss_items = self.rsshelper.parse(url=params.url,
proxy=settings.PROXY if params.proxy else None,
timeout=params.timeout,
headers=headers)
if rss_items is None or rss_items is False:
logger.error(f'RSS地址 {params.url} 请求失败!')
self._has_error = True
return context
if not rss_items:
logger.error(f'RSS地址 {params.url} 未获取到RSS数据')
return context
# 组装种子
for item in rss_items:
if not item.get("title"):
continue
torrentinfo = TorrentInfo(
title=item.get("title"),
enclosure=item.get("enclosure"),
page_url=item.get("link"),
size=item.get("size"),
pubdate=item["pubdate"].strftime("%Y-%m-%d %H:%M:%S") if item.get("pubdate") else None,
)
meta = MetaInfo(title=torrentinfo.title, subtitle=torrentinfo.description)
mediainfo = self.chain.recognize_media(meta)
if not mediainfo:
logger.warning(f"{torrentinfo.title} 未识别到媒体信息")
continue
self._rss_torrents.append(Context(meta_info=meta, media_info=mediainfo, torrent_info=torrentinfo))
if self._rss_torrents:
logger.info(f"已获取 {len(self._rss_torrents)} 个RSS资源")
context.torrents.extend(self._rss_torrents)
self.job_done()
return context

View File

@@ -0,0 +1,77 @@
from typing import Optional, List
from pydantic import Field
from app.actions import BaseAction
from app.chain.search import SearchChain
from app.log import logger
from app.schemas import ActionParams, ActionContext, MediaType
class FetchTorrentsParams(ActionParams):
"""
获取站点资源参数
"""
name: str = Field(None, description="资源名称")
year: Optional[str] = Field(None, description="年份")
type: Optional[str] = Field(None, description="资源类型 (电影/电视剧)")
season: Optional[int] = Field(None, description="季度")
sites: Optional[List[int]] = Field([], description="站点列表")
class FetchTorrentsAction(BaseAction):
"""
搜索站点资源
"""
_torrents = []
def __init__(self):
super().__init__()
self.searchchain = SearchChain()
@classmethod
@property
def name(cls) -> str:
return "搜索站点资源"
@classmethod
@property
def description(cls) -> str:
return "根据关键字搜索站点种子资源"
@classmethod
@property
def data(cls) -> dict:
return FetchTorrentsParams().dict()
@property
def success(self) -> bool:
return self.done
def execute(self, params: dict, context: ActionContext) -> ActionContext:
"""
搜索站点,获取资源列表
"""
params = FetchTorrentsParams(**params)
torrents = self.searchchain.search_by_title(title=params.name, sites=params.sites, cache_local=False)
for torrent in torrents:
if params.year and torrent.meta_info.year != params.year:
continue
if params.type and torrent.media_info and torrent.media_info.type != MediaType(params.type):
continue
if params.season and torrent.meta_info.begin_season != params.season:
continue
# 识别媒体信息
torrent.media_info = self.searchchain.recognize_media(torrent.meta_info)
if not torrent.media_info:
logger.warning(f"{torrent.torrent_info.title} 未识别到媒体信息")
continue
self._torrents.append(torrent)
if self._torrents:
context.torrents.extend(self._torrents)
logger.info(f"搜索到 {len(self._torrents)} 条资源")
self.job_done()
return context

View File

@@ -0,0 +1,65 @@
from typing import Optional
from pydantic import Field
from app.actions import BaseAction
from app.schemas import ActionParams, ActionContext
class FilterMediasParams(ActionParams):
"""
过滤媒体数据参数
"""
type: Optional[str] = Field(None, description="媒体类型 (电影/电视剧)")
category: Optional[str] = Field(None, description="媒体类别 (二级分类)")
vote: Optional[int] = Field(0, description="评分")
year: Optional[str] = Field(None, description="年份")
class FilterMediasAction(BaseAction):
"""
过滤媒体数据
"""
_medias = []
@classmethod
@property
def name(cls) -> str:
return "过滤媒体数据"
@classmethod
@property
def description(cls) -> str:
return "对媒体数据列表进行过滤"
@classmethod
@property
def data(cls) -> dict:
return FilterMediasParams().dict()
@property
def success(self) -> bool:
return self.done
def execute(self, params: dict, context: ActionContext) -> ActionContext:
"""
过滤medias中媒体数据
"""
params = FilterMediasParams(**params)
for media in context.medias:
if params.type and media.type != params.type:
continue
if params.category and media.category != params.category:
continue
if params.vote and media.vote_average < params.vote:
continue
if params.year and media.year != params.year:
continue
self._medias.append(media)
if self._medias:
context.medias = self._medias
self.job_done()
return context

View File

@@ -0,0 +1,81 @@
from typing import Optional, List
from pydantic import Field
from app.actions import BaseAction, ActionChain
from app.helper.torrent import TorrentHelper
from app.schemas import ActionParams, ActionContext
class FilterTorrentsParams(ActionParams):
"""
过滤资源数据参数
"""
rule_groups: Optional[List[str]] = Field([], description="规则组")
quality: Optional[str] = Field(None, description="资源质量")
resolution: Optional[str] = Field(None, description="资源分辨率")
effect: Optional[str] = Field(None, description="特效")
include: Optional[str] = Field(None, description="包含规则")
exclude: Optional[str] = Field(None, description="排除规则")
size: Optional[str] = Field(None, description="资源大小范围MB")
class FilterTorrentsAction(BaseAction):
"""
过滤资源数据
"""
_torrents = []
def __init__(self):
super().__init__()
self.torrenthelper = TorrentHelper()
self.chain = ActionChain()
@classmethod
@property
def name(cls) -> str:
return "过滤资源"
@classmethod
@property
def description(cls) -> str:
return "对资源列表数据进行过滤"
@classmethod
@property
def data(cls) -> dict:
return FilterTorrentsParams().dict()
@property
def success(self) -> bool:
return self.done
def execute(self, params: dict, context: ActionContext) -> ActionContext:
"""
过滤torrents中的资源
"""
params = FilterTorrentsParams(**params)
for torrent in context.torrents:
if self.torrenthelper.filter_torrent(
torrent_info=torrent.torrent_info,
filter_params={
"quality": params.quality,
"resolution": params.resolution,
"effect": params.effect,
"include": params.include,
"exclude": params.exclude,
"size": params.size
}
):
if self.chain.filter_torrents(
rule_groups=params.rule_groups,
torrent_list=[torrent.torrent_info],
mediainfo=torrent.media_info
):
self._torrents.append(torrent)
context.torrents = self._torrents
self.job_done()
return context

View File

@@ -0,0 +1,69 @@
from pathlib import Path
from app.actions import BaseAction
from app.schemas import ActionParams, ActionContext
from app.chain.media import MediaChain
from app.chain.storage import StorageChain
from app.core.metainfo import MetaInfoPath
from app.log import logger
class ScrapeFileParams(ActionParams):
"""
刮削文件参数
"""
pass
class ScrapeFileAction(BaseAction):
"""
刮削文件
"""
_scraped_files = []
_has_error = False
def __init__(self):
super().__init__()
self.storagechain = StorageChain()
self.mediachain = MediaChain()
@classmethod
@property
def name(cls) -> str:
return "刮削文件"
@classmethod
@property
def description(cls) -> str:
return "刮削媒体信息和图片"
@classmethod
@property
def data(cls) -> dict:
return ScrapeFileParams().dict()
@property
def success(self) -> bool:
return not self._has_error
def execute(self, params: dict, context: ActionContext) -> ActionContext:
"""
刮削fileitems中的所有文件
"""
for fileitem in context.fileitems:
if fileitem in self._scraped_files:
continue
if not self.storagechain.exists(fileitem):
continue
meta = MetaInfoPath(Path(fileitem.path))
mediainfo = self.mediachain.recognize_media(meta)
if not mediainfo:
self._has_error = True
logger.info(f"{fileitem.path} 未识别到媒体信息,无法刮削")
continue
self.mediachain.scrape_metadata(fileitem=fileitem, meta=meta, mediainfo=mediainfo)
self._scraped_files.append(fileitem)
self.job_done()
return context

51
app/actions/send_event.py Normal file
View File

@@ -0,0 +1,51 @@
import copy
from app.actions import BaseAction
from app.schemas import ActionParams, ActionContext
from app.core.event import eventmanager
class SendEventParams(ActionParams):
"""
发送事件参数
"""
pass
class SendEventAction(BaseAction):
"""
发送事件
"""
@classmethod
@property
def name(cls) -> str:
return "发送事件"
@classmethod
@property
def description(cls) -> str:
return "发送队列中的所有事件"
@classmethod
@property
def data(cls) -> dict:
return SendEventParams().dict()
@property
def success(self) -> bool:
return self.done
def execute(self, params: dict, context: ActionContext) -> ActionContext:
"""
发送events中的事件
"""
if context.events:
# 按优先级排序,优先级高的先发送
context.events.sort(key=lambda x: x.priority, reverse=True)
for event in copy.deepcopy(context.events):
eventmanager.send_event(etype=event.event_type, data=event.event_data)
context.events.remove(event)
self.job_done()
return context

View File

@@ -0,0 +1,61 @@
import copy
from typing import List, Optional, Union
from pydantic import Field
from app.actions import BaseAction, ActionChain
from app.schemas import ActionParams, ActionContext
class SendMessageParams(ActionParams):
"""
发送消息参数
"""
client: Optional[List[str]] = Field([], description="消息渠道")
userid: Optional[Union[str, int]] = Field(None, description="用户ID")
class SendMessageAction(BaseAction):
"""
发送消息
"""
def __init__(self):
super().__init__()
self.chain = ActionChain()
@classmethod
@property
def name(cls) -> str:
return "发送消息"
@classmethod
@property
def description(cls) -> str:
return "发送队列中的所有消息"
@classmethod
@property
def data(cls) -> dict:
return SendMessageParams().dict()
@property
def success(self) -> bool:
return self.done
def execute(self, params: dict, context: ActionContext) -> ActionContext:
"""
发送messages中的消息
"""
for message in copy.deepcopy(context.messages):
if params.client:
message.source = params.client
if params.userid:
message.userid = params.userid
self.chain.post_message(message)
context.messages.remove(message)
context.messages = []
self.job_done()
return context

View File

@@ -0,0 +1,74 @@
from pathlib import Path
from app.actions import BaseAction
from app.schemas import ActionParams, ActionContext
from app.chain.storage import StorageChain
from app.chain.transfer import TransferChain
from app.log import logger
class TransferFileParams(ActionParams):
"""
整理文件参数
"""
pass
class TransferFileAction(BaseAction):
"""
整理文件
"""
_fileitems = []
_has_error = False
def __init__(self):
super().__init__()
self.transferchain = TransferChain()
self.storagechain = StorageChain()
@classmethod
@property
def name(cls) -> str:
return "整理文件"
@classmethod
@property
def description(cls) -> str:
return "整理下载队列中的文件"
@classmethod
@property
def data(cls) -> dict:
return TransferFileParams().dict()
@property
def success(self) -> bool:
return not self._has_error
def execute(self, params: dict, context: ActionContext) -> ActionContext:
"""
从downloads中整理文件记录到fileitems
"""
for download in context.downloads:
if not download.completed:
logger.info(f"下载任务 {download.download_id} 未完成")
continue
fileitem = self.storagechain.get_file_item(storage="local", path=Path(download.path))
if not fileitem:
logger.info(f"文件 {download.path} 不存在")
continue
logger.info(f"开始整理文件 {download.path} ...")
state, errmsg = self.transferchain.do_transfer(fileitem, background=False)
if not state:
self._has_error = True
logger.error(f"整理文件 {download.path} 失败: {errmsg}")
continue
logger.info(f"整理文件 {download.path} 完成")
self._fileitems.append(fileitem)
if self._fileitems:
context.fileitems.extend(self._fileitems)
self.job_done()
return context

View File

@@ -2,7 +2,7 @@ from fastapi import APIRouter
from app.api.endpoints import login, user, site, message, webhook, subscribe, \
media, douban, search, plugin, tmdb, history, system, download, dashboard, \
transfer, mediaserver, bangumi, storage, discover, recommend
transfer, mediaserver, bangumi, storage, discover, recommend, workflow
api_router = APIRouter()
api_router.include_router(login.router, prefix="/login", tags=["login"])
@@ -26,3 +26,4 @@ api_router.include_router(mediaserver.router, prefix="/mediaserver", tags=["medi
api_router.include_router(bangumi.router, prefix="/bangumi", tags=["bangumi"])
api_router.include_router(discover.router, prefix="/discover", tags=["discover"])
api_router.include_router(recommend.router, prefix="/recommend", tags=["recommend"])
api_router.include_router(workflow.router, prefix="/workflow", tags=["workflow"])

View File

@@ -31,6 +31,7 @@ def search_by_id(mediaid: str,
title: str = None,
year: int = None,
season: str = None,
sites: str = None,
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
根据TMDBID/豆瓣ID精确搜索站点资源 tmdb:/douban:/bangumi:
@@ -39,6 +40,10 @@ def search_by_id(mediaid: str,
mtype = MediaType(mtype)
if season:
season = int(season)
if sites:
site_list = [int(site) for site in sites.split(",") if site]
else:
site_list = None
torrents = None
# 根据前缀识别媒体ID
if mediaid.startswith("tmdb:"):
@@ -48,11 +53,13 @@ def search_by_id(mediaid: str,
doubaninfo = MediaChain().get_doubaninfo_by_tmdbid(tmdbid=tmdbid, mtype=mtype)
if doubaninfo:
torrents = SearchChain().search_by_id(doubanid=doubaninfo.get("id"),
mtype=mtype, area=area, season=season)
mtype=mtype, area=area, season=season,
sites=site_list)
else:
return schemas.Response(success=False, message="未识别到豆瓣媒体信息")
else:
torrents = SearchChain().search_by_id(tmdbid=tmdbid, mtype=mtype, area=area, season=season)
torrents = SearchChain().search_by_id(tmdbid=tmdbid, mtype=mtype, area=area, season=season,
sites=site_list)
elif mediaid.startswith("douban:"):
doubanid = mediaid.replace("douban:", "")
if settings.RECOGNIZE_SOURCE == "themoviedb":
@@ -62,11 +69,13 @@ def search_by_id(mediaid: str,
if tmdbinfo.get('season') and not season:
season = tmdbinfo.get('season')
torrents = SearchChain().search_by_id(tmdbid=tmdbinfo.get("id"),
mtype=mtype, area=area, season=season)
mtype=mtype, area=area, season=season,
sites=site_list)
else:
return schemas.Response(success=False, message="未识别到TMDB媒体信息")
else:
torrents = SearchChain().search_by_id(doubanid=doubanid, mtype=mtype, area=area, season=season)
torrents = SearchChain().search_by_id(doubanid=doubanid, mtype=mtype, area=area, season=season,
sites=site_list)
elif mediaid.startswith("bangumi:"):
bangumiid = int(mediaid.replace("bangumi:", ""))
if settings.RECOGNIZE_SOURCE == "themoviedb":
@@ -74,7 +83,8 @@ def search_by_id(mediaid: str,
tmdbinfo = MediaChain().get_tmdbinfo_by_bangumiid(bangumiid=bangumiid)
if tmdbinfo:
torrents = SearchChain().search_by_id(tmdbid=tmdbinfo.get("id"),
mtype=mtype, area=area, season=season)
mtype=mtype, area=area, season=season,
sites=site_list)
else:
return schemas.Response(success=False, message="未识别到TMDB媒体信息")
else:
@@ -82,7 +92,8 @@ def search_by_id(mediaid: str,
doubaninfo = MediaChain().get_doubaninfo_by_bangumiid(bangumiid=bangumiid)
if doubaninfo:
torrents = SearchChain().search_by_id(doubanid=doubaninfo.get("id"),
mtype=mtype, area=area, season=season)
mtype=mtype, area=area, season=season,
sites=site_list)
else:
return schemas.Response(success=False, message="未识别到豆瓣媒体信息")
else:
@@ -133,12 +144,13 @@ def search_by_id(mediaid: str,
@router.get("/title", summary="模糊搜索资源", response_model=schemas.Response)
def search_by_title(keyword: str = None,
page: int = 0,
site: int = None,
sites: str = None,
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
根据名称模糊搜索站点资源,支持分页,关键词为空是返回首页资源
"""
torrents = SearchChain().search_by_title(title=keyword, page=page, site=site)
torrents = SearchChain().search_by_title(title=keyword, page=page,
sites=[int(site) for site in sites.split(",") if site] if sites else None)
if not torrents:
return schemas.Response(success=False, message="未搜索到任何资源")
return schemas.Response(success=True, data=[torrent.to_dict() for torrent in torrents])

View File

@@ -1,4 +1,4 @@
from typing import List, Any
from typing import List, Any, Dict
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session
@@ -259,8 +259,41 @@ def site_icon(site_id: int,
})
@router.get("/category/{site_id}", summary="站点分类", response_model=List[schemas.SiteCategory])
def site_category(site_id: int,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
获取站点分类
"""
site = Site.get(db, site_id)
if not site:
raise HTTPException(
status_code=404,
detail=f"站点 {site_id} 不存在",
)
indexer = SitesHelper().get_indexer(site.domain)
if not indexer:
raise HTTPException(
status_code=404,
detail=f"站点 {site.domain} 不支持",
)
category: Dict[str, List[dict]] = indexer.get('category') or []
if not category:
return []
result = []
for cats in category.values():
for cat in cats:
if cat not in result:
result.append(cat)
return result
@router.get("/resource/{site_id}", summary="站点资源", response_model=List[schemas.TorrentInfo])
def site_resource(site_id: int,
keyword: str = None,
cat: str = None,
page: int = 0,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(get_current_active_superuser)) -> Any:
"""
@@ -272,7 +305,7 @@ def site_resource(site_id: int,
status_code=404,
detail=f"站点 {site_id} 不存在",
)
torrents = TorrentsChain().browse(domain=site.domain)
torrents = TorrentsChain().browse(domain=site.domain, keyword=keyword, cat=cat, page=page)
if not torrents:
return []
return [torrent.to_dict() for torrent in torrents]

View File

@@ -8,6 +8,7 @@ from pathlib import Path
from typing import Optional, Union
import aiofiles
import pillow_avif # noqa 用于自动注册AVIF支持
from PIL import Image
from fastapi import APIRouter, Depends, HTTPException, Header, Request, Response
from fastapi.responses import StreamingResponse
@@ -50,7 +51,6 @@ def fetch_image(
"""
处理图片缓存逻辑支持HTTP缓存和磁盘缓存
"""
if not url:
raise HTTPException(status_code=404, detail="URL not provided")
@@ -68,6 +68,10 @@ def fetch_image(
sanitized_path = SecurityUtils.sanitize_url_path(url)
cache_path = settings.CACHE_PATH / "images" / sanitized_path
# 没有文件类型,则添加后缀,在恶意文件类型和实际需求下的折衷选择
if not cache_path.suffix:
cache_path = cache_path.with_suffix(".jpg")
# 确保缓存路径和文件类型合法
if not SecurityUtils.is_safe_path(settings.CACHE_PATH, cache_path, settings.SECURITY_IMAGE_SUFFIXES):
raise HTTPException(status_code=400, detail="Invalid cache path or file type")
@@ -88,7 +92,8 @@ def fetch_image(
# 请求远程图片
referer = "https://movie.douban.com/" if "doubanio.com" in url else None
proxies = settings.PROXY if proxy else None
response = RequestUtils(ua=settings.USER_AGENT, proxies=proxies, referer=referer).get_res(url=url)
response = RequestUtils(ua=settings.USER_AGENT, proxies=proxies, referer=referer,
accept_type="image/avif,image/webp,image/apng,*/*").get_res(url=url)
if not response:
raise HTTPException(status_code=502, detail="Failed to fetch the image from the remote server")

View File

@@ -0,0 +1,131 @@
from datetime import datetime
from typing import List, Any
from fastapi import APIRouter, Depends
from sqlalchemy.orm import Session
from app import schemas
from app.core.workflow import WorkFlowManager
from app.db import get_db
from app.db.models.workflow import Workflow
from app.db.user_oper import get_current_active_user
from app.chain.workflow import WorkflowChain
from app.scheduler import Scheduler
router = APIRouter()
@router.get("/", summary="所有工作流", response_model=List[schemas.Workflow])
def list_workflows(db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
"""
获取工作流列表
"""
return Workflow.list(db)
@router.post("/", summary="创建工作流", response_model=schemas.Response)
def create_workflow(workflow: schemas.Workflow,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
"""
创建工作流
"""
if Workflow.get_by_name(db, workflow.name):
return schemas.Response(success=False, message="已存在相同名称的工作流")
if not workflow.add_time:
workflow.add_time = datetime.strftime(datetime.now(), "%Y-%m-%d %H:%M:%S")
if not workflow.state:
workflow.state = "P"
Workflow(**workflow.dict()).create(db)
return schemas.Response(success=True, message="创建工作流成功")
@router.get("/actions", summary="所有动作", response_model=List[dict])
def list_actions(_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
"""
获取所有动作
"""
return WorkFlowManager().list_actions()
@router.get("/{workflow_id}", summary="工作流详情", response_model=schemas.Workflow)
def get_workflow(workflow_id: int,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
"""
获取工作流详情
"""
return Workflow.get(db, workflow_id)
@router.put("/{workflow_id}", summary="更新工作流", response_model=schemas.Response)
def update_workflow(workflow: schemas.Workflow,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
"""
更新工作流
"""
wf = Workflow.get(db, workflow.id)
if not wf:
return schemas.Response(success=False, message="工作流不存在")
wf.update(db, workflow.dict())
return schemas.Response(success=True, message="更新成功")
@router.delete("/{workflow_id}", summary="删除工作流", response_model=schemas.Response)
def delete_workflow(workflow_id: int,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
"""
删除工作流
"""
workflow = Workflow.get(db, workflow_id)
if not workflow:
return schemas.Response(success=False, message="工作流不存在")
Scheduler().remove_workflow_job(workflow)
Workflow.delete(db, workflow_id)
return schemas.Response(success=True, message="删除成功")
@router.post("/{workflow_id}/run", summary="执行工作流", response_model=schemas.Response)
def run_workflow(workflow_id: int,
from_begin: bool = True,
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
"""
执行工作流
"""
state, errmsg = WorkflowChain().process(workflow_id, from_begin=from_begin)
if not state:
return schemas.Response(success=False, message=errmsg)
return schemas.Response(success=True)
@router.post("/{workflow_id}/start", summary="启用工作流", response_model=schemas.Response)
def start_workflow(workflow_id: int,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
"""
启用工作流
"""
workflow = Workflow.get(db, workflow_id)
if not workflow:
return schemas.Response(success=False, message="工作流不存在")
Scheduler().update_workflow_job(workflow)
workflow.update_state(db, workflow_id, "W")
return schemas.Response(success=True)
@router.post("/{workflow_id}/pause", summary="停用工作流", response_model=schemas.Response)
def pause_workflow(workflow_id: int,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
"""
停用工作流
"""
workflow = Workflow.get(db, workflow_id)
if not workflow:
return schemas.Response(success=False, message="工作流不存在")
Scheduler().remove_workflow_job(workflow)
workflow.update_state(db, workflow_id, "P")
return schemas.Response(success=True)

View File

@@ -7,7 +7,6 @@ from pathlib import Path
from typing import Optional, Any, Tuple, List, Set, Union, Dict
from qbittorrentapi import TorrentFilesList
from ruamel.yaml import CommentedMap
from transmission_rpc import File
from app.core.config import settings
@@ -77,7 +76,7 @@ class ChainBase(metaclass=ABCMeta):
"""
cache_path = settings.TEMP_PATH / filename
if cache_path.exists():
Path(cache_path).unlink()
cache_path.unlink()
def run_module(self, method: str, *args, **kwargs) -> Any:
"""
@@ -308,7 +307,7 @@ class ChainBase(metaclass=ABCMeta):
"""
return self.run_module("search_collections", name=name)
def search_torrents(self, site: CommentedMap,
def search_torrents(self, site: dict,
keywords: List[str],
mtype: MediaType = None,
page: int = 0) -> List[TorrentInfo]:
@@ -323,13 +322,16 @@ class ChainBase(metaclass=ABCMeta):
return self.run_module("search_torrents", site=site, keywords=keywords,
mtype=mtype, page=page)
def refresh_torrents(self, site: CommentedMap) -> List[TorrentInfo]:
def refresh_torrents(self, site: dict, keyword: str = None, cat: str = None, page: int = 0) -> List[TorrentInfo]:
"""
获取站点最新一页的种子,多个站点需要多线程处理
:param site: 站点
:param keyword: 标题
:param cat: 分类
:param page: 页码
:reutrn: 种子资源列表
"""
return self.run_module("refresh_torrents", site=site)
return self.run_module("refresh_torrents", site=site, keyword=keyword, cat=cat, page=page)
def filter_torrents(self, rule_groups: List[str],
torrent_list: List[TorrentInfo],

View File

@@ -3,6 +3,7 @@ import tempfile
from pathlib import Path
from typing import List
import pillow_avif # noqa 用于自动注册AVIF支持
from PIL import Image
from app.chain import ChainBase
@@ -116,6 +117,10 @@ class RecommendChain(ChainBase, metaclass=Singleton):
sanitized_path = SecurityUtils.sanitize_url_path(url)
cache_path = settings.CACHE_PATH / "images" / sanitized_path
# 没有文件类型,则添加后缀,在恶意文件类型和实际需求下的折衷选择
if not cache_path.suffix:
cache_path = cache_path.with_suffix(".jpg")
# 确保缓存路径和文件类型合法
if not SecurityUtils.is_safe_path(settings.CACHE_PATH, cache_path, settings.SECURITY_IMAGE_SUFFIXES):
logger.debug(f"Invalid cache path or file type for URL: {url}, sanitized path: {sanitized_path}")

View File

@@ -35,7 +35,8 @@ class SearchChain(ChainBase):
self.torrenthelper = TorrentHelper()
def search_by_id(self, tmdbid: int = None, doubanid: str = None,
mtype: MediaType = None, area: str = "title", season: int = None) -> List[Context]:
mtype: MediaType = None, area: str = "title", season: int = None,
sites: List[int] = None) -> List[Context]:
"""
根据TMDBID/豆瓣ID搜索资源精确匹配不过滤本地存在的资源
:param tmdbid: TMDB ID
@@ -43,6 +44,7 @@ class SearchChain(ChainBase):
:param mtype: 媒体,电影 or 电视剧
:param area: 搜索范围title or imdbid
:param season: 季数
:param sites: 站点ID列表
"""
mediainfo = self.recognize_media(tmdbid=tmdbid, doubanid=doubanid, mtype=mtype)
if not mediainfo:
@@ -55,25 +57,27 @@ class SearchChain(ChainBase):
season: NotExistMediaInfo(episodes=[])
}
}
results = self.process(mediainfo=mediainfo, area=area, no_exists=no_exists)
results = self.process(mediainfo=mediainfo, sites=sites, area=area, no_exists=no_exists)
# 保存到本地文件
bytes_results = pickle.dumps(results)
self.save_cache(bytes_results, self.__result_temp_file)
return results
def search_by_title(self, title: str, page: int = 0, site: int = None) -> List[Context]:
def search_by_title(self, title: str, page: int = 0,
sites: List[int] = None, cache_local: bool = True) -> List[Context]:
"""
根据标题搜索资源,不识别不过滤,直接返回站点内容
:param title: 标题,为空时返回所有站点首页内容
:param page: 页码
:param site: 站点ID
:param sites: 站点ID列表
:param cache_local: 是否缓存到本地
"""
if title:
logger.info(f'开始搜索资源,关键词:{title} ...')
else:
logger.info(f'开始浏览资源,站点:{site} ...')
logger.info(f'开始浏览资源,站点:{sites} ...')
# 搜索
torrents = self.__search_all_sites(keywords=[title], sites=[site] if site else None, page=page) or []
torrents = self.__search_all_sites(keywords=[title], sites=sites, page=page) or []
if not torrents:
logger.warn(f'{title} 未搜索到资源')
return []
@@ -81,8 +85,9 @@ class SearchChain(ChainBase):
contexts = [Context(meta_info=MetaInfo(title=torrent.title, subtitle=torrent.description),
torrent_info=torrent) for torrent in torrents]
# 保存到本地文件
bytes_results = pickle.dumps(contexts)
self.save_cache(bytes_results, self.__result_temp_file)
if cache_local:
bytes_results = pickle.dumps(contexts)
self.save_cache(bytes_results, self.__result_temp_file)
return contexts
def last_search_results(self) -> List[Context]:

View File

@@ -6,7 +6,6 @@ from typing import Optional, Tuple, Union, Dict
from urllib.parse import urljoin
from lxml import etree
from ruamel.yaml import CommentedMap
from app.chain import ChainBase
from app.core.config import global_vars, settings
@@ -55,7 +54,7 @@ class SiteChain(ChainBase):
"yemapt.org": self.__yema_test,
}
def refresh_userdata(self, site: CommentedMap = None) -> Optional[SiteUserData]:
def refresh_userdata(self, site: dict = None) -> Optional[SiteUserData]:
"""
刷新站点的用户数据
:param site: 站点

View File

@@ -84,6 +84,12 @@ class StorageChain(ChainBase):
"""
return self.run_module("rename_file", fileitem=fileitem, name=name)
def exists(self, fileitem: schemas.FileItem) -> Optional[bool]:
"""
判断文件或目录是否存在
"""
return True if self.get_item(fileitem) else False
def get_item(self, fileitem: schemas.FileItem) -> Optional[schemas.FileItem]:
"""
查询目录或文件

View File

@@ -73,17 +73,20 @@ class TorrentsChain(ChainBase, metaclass=Singleton):
logger.info(f'种子缓存数据清理完成')
@cached(cache=TTLCache(maxsize=128, ttl=595))
def browse(self, domain: str) -> List[TorrentInfo]:
def browse(self, domain: str, keyword: str = None, cat: str = None, page: int = 0) -> List[TorrentInfo]:
"""
浏览站点首页内容返回种子清单TTL缓存10分钟
:param domain: 站点域名
:param keyword: 搜索标题
:param cat: 搜索分类
:param page: 页码
"""
logger.info(f'开始获取站点 {domain} 最新种子 ...')
site = self.siteshelper.get_indexer(domain)
if not site:
logger.error(f'站点 {domain} 不存在!')
return []
return self.refresh_torrents(site=site)
return self.refresh_torrents(site=site, keyword=keyword, cat=cat, page=page)
@cached(cache=TTLCache(maxsize=128, ttl=295))
def rss(self, domain: str) -> List[TorrentInfo]:

View File

@@ -670,13 +670,10 @@ class TransferChain(ChainBase, metaclass=Singleton):
self.jobview.add_task(task, state=curr_task.state if curr_task else "waiting")
# 获取集数据
if not task.episodes_info and task.mediainfo.type == MediaType.TV:
if task.meta.begin_season is None:
task.meta.begin_season = 1
task.mediainfo.season = task.mediainfo.season or task.meta.begin_season
if task.mediainfo.type == MediaType.TV and not task.episodes_info:
task.episodes_info = self.tmdbchain.tmdb_episodes(
tmdbid=task.mediainfo.tmdb_id,
season=task.mediainfo.season
season=task.mediainfo.season or task.meta.begin_season or 1
)
# 查询整理目标目录

232
app/chain/workflow.py Normal file
View File

@@ -0,0 +1,232 @@
import base64
import pickle
import threading
from collections import defaultdict, deque
from concurrent.futures import ThreadPoolExecutor
from time import sleep
from typing import List, Tuple
from pydantic.fields import Callable
from app.chain import ChainBase
from app.core.config import global_vars
from app.core.workflow import WorkFlowManager
from app.db.models import Workflow
from app.db.workflow_oper import WorkflowOper
from app.log import logger
from app.schemas import ActionContext, ActionFlow, Action
class WorkflowExecutor:
"""
工作流执行器
"""
def __init__(self, workflow: Workflow, step_callback: Callable = None):
"""
初始化工作流执行器
:param workflow: 工作流对象
:param step_callback: 步骤回调函数
"""
# 工作流数据
self.workflow = workflow
self.step_callback = step_callback
self.actions = {action['id']: Action(**action) for action in workflow.actions}
self.flows = [ActionFlow(**flow) for flow in workflow.flows]
self.success = True
self.errmsg = ""
# 工作流管理器
self.workflowmanager = WorkFlowManager()
# 线程安全队列
self.queue = deque()
# 锁用于保证线程安全
self.lock = threading.Lock()
# 线程池
self.executor = ThreadPoolExecutor()
# 跟踪运行中的任务数
self.running_tasks = 0
# 构建邻接表、入度表
self.adjacency = defaultdict(list)
self.indegree = defaultdict(int)
for flow in self.flows:
source = flow.source
target = flow.target
self.adjacency[source].append(target)
self.indegree[target] += 1
# 初始化所有节点的入度确保未被引用的节点入度为0
for action_id in self.actions:
if action_id not in self.indegree:
self.indegree[action_id] = 0
# 初始上下文
if workflow.current_action and workflow.context:
# Base64解码
decoded_data = base64.b64decode(workflow.context["content"])
# 反序列化数据
self.context = pickle.loads(decoded_data)
else:
self.context = ActionContext()
# 初始化队列入度为0的节点
for action_id in self.actions:
if self.indegree[action_id] == 0:
self.queue.append(action_id)
def execute(self):
"""
执行工作流
"""
while True:
with self.lock:
# 退出条件:队列为空且无运行任务
if not self.queue and self.running_tasks == 0:
break
# 退出条件:出现了错误
if not self.success:
break
if not self.queue:
sleep(1)
continue
# 取出队首节点
node_id = self.queue.popleft()
# 标记任务开始
self.running_tasks += 1
# 已停机
if global_vars.is_system_stopped:
break
# 已执行的跳过
if (self.workflow.current_action
and node_id in self.workflow.current_action.split(',')):
continue
# 提交任务到线程池
future = self.executor.submit(
self.execute_node,
node_id,
self.context
)
future.add_done_callback(self.on_node_complete)
def execute_node(self, node_id: int, context: ActionContext) -> Tuple[Action, bool, ActionContext]:
"""
执行单个节点操作返回修改后的上下文和节点ID
"""
action = self.actions[node_id]
state, result_ctx = self.workflowmanager.excute(action, context=context)
return action, state, result_ctx
def on_node_complete(self, future):
"""
节点完成回调:更新上下文、处理后继节点
"""
action, state, result_ctx = future.result()
# 节点执行失败
if not state:
self.success = False
self.errmsg = f"{action.name} 失败"
# 标记任务完成
with self.lock:
self.running_tasks -= 1
return
with self.lock:
# 更新主上下文
self.merge_context(result_ctx)
# 回调
if self.step_callback:
self.step_callback(action, self.context)
# 处理后继节点
successors = self.adjacency.get(action.id, [])
for succ_id in successors:
with self.lock:
self.indegree[succ_id] -= 1
if self.indegree[succ_id] == 0:
self.queue.append(succ_id)
# 标记任务完成
with self.lock:
self.running_tasks -= 1
def merge_context(self, context: ActionContext):
"""
合并上下文
"""
for key, value in context.dict().items():
if not getattr(self.context, key, None):
setattr(self.context, key, value)
class WorkflowChain(ChainBase):
"""
工作流链
"""
def __init__(self):
super().__init__()
self.workflowoper = WorkflowOper()
def process(self, workflow_id: int, from_begin: bool = True) -> Tuple[bool, str]:
"""
处理工作流
:param workflow_id: 工作流ID
:param from_begin: 是否从头开始默认为True
"""
def save_step(action: Action, context: ActionContext):
"""
保存上下文到数据库
"""
# 序列化数据
serialized_data = pickle.dumps(context)
# 使用Base64编码字节流
encoded_data = base64.b64encode(serialized_data).decode('utf-8')
self.workflowoper.step(workflow_id, action_id=action.id, context={
"content": encoded_data
})
# 重置工作流
if from_begin:
self.workflowoper.reset(workflow_id)
# 查询工作流数据
workflow = self.workflowoper.get(workflow_id)
if not workflow:
logger.warn(f"工作流 {workflow_id} 不存在")
return False, "工作流不存在"
if not workflow.actions:
logger.warn(f"工作流 {workflow.name} 无动作")
return False, "工作流无动作"
if not workflow.flows:
logger.warn(f"工作流 {workflow.name} 无流程")
return False, "工作流无流程"
logger.info(f"开始处理 {workflow.name},共 {len(workflow.actions)} 个动作 ...")
self.workflowoper.start(workflow_id)
# 执行工作流
executor = WorkflowExecutor(workflow, step_callback=save_step)
executor.execute()
if not executor.success:
logger.info(f"工作流 {workflow.name} 执行失败:{executor.errmsg}")
self.workflowoper.fail(workflow_id, result=executor.errmsg)
return False, executor.errmsg
else:
logger.info(f"工作流 {workflow.name} 执行成功")
self.workflowoper.success(workflow_id)
return True, ""
def get_workflows(self) -> List[Workflow]:
"""
获取工作流列表
"""
return self.workflowoper.list_enabled()

View File

@@ -247,7 +247,7 @@ class ConfigModel(BaseModel):
)
# 允许的图片文件后缀格式
SECURITY_IMAGE_SUFFIXES: List[str] = Field(
default_factory=lambda: [".jpg", ".jpeg", ".png", ".webp", ".gif", ".svg"]
default_factory=lambda: [".jpg", ".jpeg", ".png", ".webp", ".gif", ".svg", ".avif"]
)
# 重命名时支持的S0别名
RENAME_FORMAT_S0_NAMES: List[str] = Field(
@@ -255,6 +255,8 @@ class ConfigModel(BaseModel):
)
# 启用分词搜索
TOKENIZED_SEARCH: bool = False
# 为指定默认字幕添加.default后缀
DEFAULT_SUB: Optional[str] = "zh-cn"
class Settings(BaseSettings, ConfigModel, LogConfigModel):

View File

@@ -793,10 +793,9 @@ class PluginManager(metaclass=Singleton):
# 已安装插件
installed_apps = self.systemconfig.get(SystemConfigKey.UserInstalledPlugins) or []
# 获取在线插件
online_plugins = self.pluginhelper.get_plugins(market, package_version) or {}
if not online_plugins:
if not package_version:
logger.warning(f"获取插件库失败:{market},请检查 GitHub 网络连接")
online_plugins = self.pluginhelper.get_plugins(market, package_version)
if online_plugins is None:
logger.warning(f"获取{package_version if package_version else ''}插件库失败:{market},请检查 GitHub 网络连接")
return []
ret_plugins = []
add_time = len(online_plugins)

106
app/core/workflow.py Normal file
View File

@@ -0,0 +1,106 @@
from time import sleep
from typing import Dict, Any, Tuple, List
from app.helper.module import ModuleHelper
from app.log import logger
from app.schemas import Action, ActionContext
from app.utils.singleton import Singleton
class WorkFlowManager(metaclass=Singleton):
"""
工作流管理器
"""
# 所有动作定义
_actions: Dict[str, Any] = {}
def __init__(self):
self.init()
def init(self):
"""
初始化
"""
def filter_func(obj: Any):
"""
过滤函数,确保只加载新定义的类
"""
if not isinstance(obj, type):
return False
if not hasattr(obj, 'execute') or not hasattr(obj, "name"):
return False
if obj.__name__ == "BaseAction":
return False
return obj.__module__.startswith("app.actions")
# 加载所有动作
self._actions = {}
actions = ModuleHelper.load(
"app.actions",
filter_func=lambda _, obj: filter_func(obj)
)
for action in actions:
logger.debug(f"加载动作: {action.__name__}")
try:
self._actions[action.__name__] = action
except Exception as err:
logger.error(f"加载动作失败: {action.__name__} - {err}")
def stop(self):
"""
停止
"""
pass
def excute(self, action: Action, context: ActionContext = None) -> Tuple[bool, ActionContext]:
"""
执行工作流动作
"""
if not context:
context = ActionContext()
if action.type in self._actions:
# 实例化
action_obj = self._actions[action.type]()
# 执行
logger.info(f"执行动作: {action.id} - {action.name}")
try:
result_context = action_obj.execute(action.data, context)
except Exception as err:
logger.error(f"{action.name} 执行失败: {err}")
return False, context
loop = action.data.get("loop")
loop_interval = action.data.get("loop_interval")
if loop and loop_interval:
while not action_obj.done:
# 等待
logger.info(f"{action.name} 等待 {loop_interval} 秒后继续执行 ...")
sleep(loop_interval)
# 执行
logger.info(f"继续执行动作: {action.id} - {action.name}")
result_context = action_obj.execute(action.data, result_context)
if action_obj.success:
logger.info(f"{action.name} 执行成功")
else:
logger.error(f"{action.name} 执行失败!")
return action_obj.success, result_context
else:
logger.error(f"未找到动作: {action.type} - {action.name}")
return False, context
def list_actions(self) -> List[dict]:
"""
获取所有动作
"""
return [
{
"type": key,
"name": action.name,
"description": action.description,
"data": {
"label": action.name,
**action.data
}
} for key, action in self._actions.items()
]

View File

@@ -8,3 +8,4 @@ from .systemconfig import SystemConfig
from .transferhistory import TransferHistory
from .user import User
from .userconfig import UserConfig
from .workflow import Workflow

101
app/db/models/workflow.py Normal file
View File

@@ -0,0 +1,101 @@
from datetime import datetime
from sqlalchemy import Column, Integer, JSON, Sequence, String
from app.db import Base, db_query, db_update
class Workflow(Base):
"""
工作流表
"""
# ID
id = Column(Integer, Sequence('id'), primary_key=True, index=True)
# 名称
name = Column(String, index=True, nullable=False)
# 描述
description = Column(String)
# 定时器
timer = Column(String)
# 状态W-等待 R-运行中 P-暂停 S-成功 F-失败
state = Column(String, nullable=False, index=True, default='W')
# 已执行动作(,分隔)
current_action = Column(String)
# 任务执行结果
result = Column(String)
# 已执行次数
run_count = Column(Integer, default=0)
# 任务列表
actions = Column(JSON, default=list)
# 任务流
flows = Column(JSON, default=list)
# 执行上下文
context = Column(JSON, default=dict)
# 创建时间
add_time = Column(String, default=datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
# 最后执行时间
last_time = Column(String)
@staticmethod
@db_query
def get_enabled_workflows(db):
return db.query(Workflow).filter(Workflow.state != 'P').all()
@staticmethod
@db_query
def get_by_name(db, name: str):
return db.query(Workflow).filter(Workflow.name == name).first()
@staticmethod
@db_update
def update_state(db, wid: int, state: str):
db.query(Workflow).filter(Workflow.id == wid).update({"state": state})
return True
@staticmethod
@db_update
def start(db, wid: int):
db.query(Workflow).filter(Workflow.id == wid).update({
"state": 'R'
})
return True
@staticmethod
@db_update
def fail(db, wid: int, result: str):
db.query(Workflow).filter(Workflow.id == wid).update({
"state": 'F',
"result": result,
"last_time": datetime.now().strftime('%Y-%m-%d %H:%M:%S')
})
return True
@staticmethod
@db_update
def success(db, wid: int, result: str = None):
db.query(Workflow).filter(Workflow.id == wid).update({
"state": 'S',
"result": result,
"run_count": Workflow.run_count + 1,
"last_time": datetime.now().strftime('%Y-%m-%d %H:%M:%S')
})
return True
@staticmethod
@db_update
def reset(db, wid: int):
db.query(Workflow).filter(Workflow.id == wid).update({
"state": 'W',
"result": None,
"current_action": None,
})
return True
@staticmethod
@db_update
def update_current_action(db, wid: int, action_id: str, context: dict):
db.query(Workflow).filter(Workflow.id == wid).update({
"current_action": f"{Workflow.current_action},{action_id}" if Workflow.current_action else action_id,
"context": context
})
return True

68
app/db/workflow_oper.py Normal file
View File

@@ -0,0 +1,68 @@
from typing import List, Tuple
from app.db import DbOper
from app.db.models.workflow import Workflow
class WorkflowOper(DbOper):
"""
工作流管理
"""
def add(self, **kwargs) -> Tuple[bool, str]:
"""
新增工作流
"""
wf = Workflow(**kwargs)
if not wf.get_by_name(self._db, kwargs.get("name")):
wf.create(self._db)
return True, "新增工作流成功"
return False, "工作流已存在"
def get(self, wid: int) -> Workflow:
"""
查询单个工作流
"""
return Workflow.get(self._db, wid)
def list_enabled(self) -> List[Workflow]:
"""
获取启用的工作流列表
"""
return Workflow.get_enabled_workflows(self._db)
def get_by_name(self, name: str) -> Workflow:
"""
按名称获取工作流
"""
return Workflow.get_by_name(self._db, name)
def start(self, wid: int) -> bool:
"""
启动
"""
return Workflow.start(self._db, wid)
def success(self, wid: int, result: str = None) -> bool:
"""
成功
"""
return Workflow.success(self._db, wid, result)
def fail(self, wid: int, result: str) -> bool:
"""
失败
"""
return Workflow.fail(self._db, wid, result)
def step(self, wid: int, action_id: str, context: dict) -> bool:
"""
步进
"""
return Workflow.update_current_action(self._db, wid, action_id, context)
def reset(self, wid: int) -> bool:
"""
重置
"""
return Workflow.reset(self._db, wid)

View File

@@ -23,6 +23,7 @@ class ModuleHelper:
"""
submodules: list = []
loaded_modules = set()
packages = importlib.import_module(package_path)
for importer, package_name, _ in pkgutil.iter_modules(packages.__path__):
try:
@@ -35,6 +36,9 @@ class ModuleHelper:
if name.startswith('_'):
continue
if isinstance(obj, type) and filter_func(name, obj):
if name in loaded_modules:
continue
loaded_modules.add(name)
submodules.append(obj)
except Exception as err:
logger.debug(f'加载模块 {package_name} 失败:{str(err)} - {traceback.format_exc()}')

View File

@@ -63,6 +63,7 @@ class PluginHelper(metaclass=Singleton):
return json.loads(res.text)
except json.JSONDecodeError:
logger.error(f"插件包数据解析失败:{res.text}")
return None
return {}
def get_plugin_package_version(self, pid: str, repo_url: str, package_version: str = None) -> Optional[str]:

View File

@@ -225,27 +225,27 @@ class RssHelper:
}
@staticmethod
def parse(url, proxy: bool = False, timeout: int = 15, headers: dict = None) -> Union[List[dict], None]:
def parse(url, proxy: bool = False, timeout: int = 15, headers: dict = None) -> Union[List[dict], None, bool]:
"""
解析RSS订阅URL获取RSS中的种子信息
:param url: RSS地址
:param proxy: 是否使用代理
:param timeout: 请求超时
:param headers: 自定义请求头
:return: 种子信息列表如为None代表Rss过期
:return: 种子信息列表如为None代表Rss过期如果为False则为错误
"""
# 开始处理
ret_array: list = []
if not url:
return []
return False
try:
ret = RequestUtils(proxies=settings.PROXY if proxy else None,
timeout=timeout, headers=headers).get_res(url)
if not ret:
return []
return False
except Exception as err:
logger.error(f"获取RSS失败{str(err)} - {traceback.format_exc()}")
return []
return False
if ret:
ret_xml = ""
try:
@@ -322,6 +322,7 @@ class RssHelper:
]
if ret_xml in _rss_expired_msg:
return None
return False
return ret_array
def get_rss_link(self, url: str, cookie: str, ua: str, proxy: bool = False) -> Tuple[str, str]:

View File

@@ -33,7 +33,7 @@ class RuleHelper:
return group
return None
def get_rule_group_by_media(self, media: MediaInfo, group_names: list = None) -> List[FilterRuleGroup]:
def get_rule_group_by_media(self, media: MediaInfo = None, group_names: list = None) -> List[FilterRuleGroup]:
"""
根据媒体信息获取规则组
"""
@@ -44,9 +44,9 @@ class RuleHelper:
for group in rule_groups:
if not group.media_type:
ret_groups.append(group)
elif not group.category and group.media_type == media.type.value:
elif media and not group.category and group.media_type == media.type.value:
ret_groups.append(group)
elif group.category == media.category:
elif media and group.category == media.category:
ret_groups.append(group)
return ret_groups

View File

@@ -445,6 +445,27 @@ class TorrentHelper(metaclass=Singleton):
logger.info(f"{torrent_info.title} 不匹配特效规则 {effect}")
return False
# 大小
size_range = filter_params.get("size")
if size_range:
if size_range.find("-") != -1:
# 区间
size_min, size_max = size_range.split("-")
size_min = float(size_min.strip()) * 1024 * 1024
size_max = float(size_max.strip()) * 1024 * 1024
if torrent_info.size < size_min or torrent_info.size > size_max:
return False
elif size_range.startswith(">"):
# 大于
size_min = float(size_range[1:].strip()) * 1024 * 1024
if torrent_info.size < size_min:
return False
elif size_range.startswith("<"):
# 小于
size_max = float(size_range[1:].strip()) * 1024 * 1024
if torrent_info.size > size_max:
return False
return True
@staticmethod

View File

@@ -676,11 +676,15 @@ class FileManagerModule(_ModuleBase):
".zh-tw": ".繁体中文"
}
new_sub_tag_list = [
new_file_type if t == 0 else "%s%s(%s)" % (new_file_type,
new_sub_tag_dict.get(
new_file_type, ""
),
t) for t in range(6)
(".default" + new_file_type if (
(settings.DEFAULT_SUB == "zh-cn" and new_file_type == ".chi.zh-cn") or
(settings.DEFAULT_SUB == "zh-tw" and new_file_type == ".zh-tw") or
(settings.DEFAULT_SUB == "eng" and new_file_type == ".eng")
) else new_file_type) if t == 0 else "%s%s(%s)" % (new_file_type,
new_sub_tag_dict.get(
new_file_type, ""
),
t) for t in range(6)
]
for new_sub_tag in new_sub_tag_list:
new_file: Path = target_file.with_name(target_file.stem + new_sub_tag + file_ext)

View File

@@ -67,14 +67,14 @@ class Alist(StorageBase, metaclass=Singleton):
return self.__generate_token
@property
@cached(maxsize=1, ttl=60 * 60 * 24 * 2 - 60 * 5)
@cached(maxsize=1, ttl=60 * 60 * 24 * 2 - 60 * 5, skip_empty=True)
def __generate_token(self) -> str:
"""
如果设置永久令牌则返回永久令牌,否则使用账号密码生成一个临时 token
缓存2天提前5分钟更新
"""
conf = self.get_conf()
token = conf.get("token")
token = conf.get("token")
if token:
return str(token)
resp: Response = RequestUtils(headers={

View File

@@ -1,17 +1,14 @@
from datetime import datetime
from typing import List, Optional, Tuple, Union
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.core.context import TorrentInfo
from app.db.site_oper import SiteOper
from app.helper.module import ModuleHelper
from app.helper.sites import SitesHelper
from app.helper.sites import SitesHelper, SiteSpider
from app.log import logger
from app.modules import _ModuleBase
from app.modules.indexer.parser import SiteParserBase
from app.modules.indexer.spider import TorrentSpider
from app.modules.indexer.spider.haidan import HaiDanSpider
from app.modules.indexer.spider.mtorrent import MTorrentSpider
from app.modules.indexer.spider.tnode import TNodeSpider
@@ -76,15 +73,17 @@ class IndexerModule(_ModuleBase):
def init_setting(self) -> Tuple[str, Union[str, bool]]:
pass
def search_torrents(self, site: CommentedMap,
def search_torrents(self, site: dict,
keywords: List[str] = None,
mtype: MediaType = None,
cat: str = None,
page: int = 0) -> List[TorrentInfo]:
"""
搜索一个站点
:param site: 站点
:param keywords: 搜索关键词列表
:param mtype: 媒体类型
:param cat: 分类
:param page: 页码
:return: 资源列表
"""
@@ -159,6 +158,7 @@ class IndexerModule(_ModuleBase):
search_word=search_word,
indexer=site,
mtype=mtype,
cat=cat,
page=page
)
if error_flag:
@@ -204,35 +204,42 @@ class IndexerModule(_ModuleBase):
return __remove_duplicate(torrents)
@staticmethod
def __spider_search(indexer: CommentedMap,
def __spider_search(indexer: dict,
search_word: str = None,
mtype: MediaType = None,
cat: str = None,
page: int = 0) -> Tuple[bool, List[dict]]:
"""
根据关键字搜索单个站点
:param: indexer: 站点配置
:param: search_word: 关键字
:param: cat: 分类
:param: page: 页码
:param: mtype: 媒体类型
:param: timeout: 超时时间
:return: 是否发生错误, 种子列表
"""
_spider = TorrentSpider(indexer=indexer,
mtype=mtype,
keyword=search_word,
page=page)
_spider = SiteSpider(indexer=indexer,
keyword=search_word,
mtype=mtype,
cat=cat,
page=page)
return _spider.is_error, _spider.get_torrents()
def refresh_torrents(self, site: CommentedMap) -> Optional[List[TorrentInfo]]:
def refresh_torrents(self, site: dict,
keyword: str = None, cat: str = None, page: int = 0) -> Optional[List[TorrentInfo]]:
"""
获取站点最新一页的种子,多个站点需要多线程处理
:param site: 站点
:param keyword: 关键字
:param cat: 分类
:param page: 页码
:reutrn: 种子资源列表
"""
return self.search_torrents(site=site)
return self.search_torrents(site=site, keywords=[keyword], cat=cat, page=page)
def refresh_userdata(self, site: CommentedMap) -> Optional[SiteUserData]:
def refresh_userdata(self, site: dict) -> Optional[SiteUserData]:
"""
刷新站点的用户数据
:param site: 站点

View File

@@ -207,13 +207,14 @@ class NexusPhpSiteUserInfo(SiteParserBase):
# 是否存在下页数据
next_page = None
next_page_text = html.xpath('//a[contains(.//text(), "下一页") or contains(.//text(), "下一頁") or contains(.//text(), ">")]/@href')
#防止识别到详情页
next_page_text = html.xpath(
'//a[contains(.//text(), "下一页") or contains(.//text(), "下一頁") or contains(.//text(), ">")]/@href')
# 防止识别到详情页
while next_page_text:
next_page = next_page_text.pop().strip()
if not next_page.startswith('details.php'):
break;
break
next_page = None
# fix up page url

View File

@@ -1,742 +0,0 @@
import copy
import datetime
import re
import traceback
from typing import List
from urllib.parse import quote, urlencode, urlparse, parse_qs
from jinja2 import Template
from pyquery import PyQuery
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.helper.browser import PlaywrightHelper
from app.log import logger
from app.schemas.types import MediaType
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class TorrentSpider:
# 是否出现错误
is_error: bool = False
# 索引器ID
indexerid: int = None
# 索引器名称
indexername: str = None
# 站点域名
domain: str = None
# 站点Cookie
cookie: str = None
# 站点UA
ua: str = None
# Requests 代理
proxies: dict = None
# playwright 代理
proxy_server: dict = None
# 是否渲染
render: bool = False
# Referer
referer: str = None
# 搜索关键字
keyword: str = None
# 媒体类型
mtype: MediaType = None
# 搜索路径、方式配置
search: dict = {}
# 批量搜索配置
batch: dict = {}
# 浏览配置
browse: dict = {}
# 站点分类配置
category: dict = {}
# 站点种子列表配置
list: dict = {}
# 站点种子字段配置
fields: dict = {}
# 页码
page: int = 0
# 搜索条数, 默认: 100条
result_num: int = 100
# 单个种子信息
torrents_info: dict = {}
# 种子列表
torrents_info_array: list = []
# 搜索超时, 默认: 15秒
_timeout = 15
def __init__(self,
indexer: CommentedMap,
keyword: [str, list] = None,
page: int = 0,
referer: str = None,
mtype: MediaType = None):
"""
设置查询参数
:param indexer: 索引器
:param keyword: 搜索关键字,如果数组则为批量搜索
:param page: 页码
:param referer: Referer
:param mtype: 媒体类型
"""
if not indexer:
return
self.keyword = keyword
self.mtype = mtype
self.indexerid = indexer.get('id')
self.indexername = indexer.get('name')
self.search = indexer.get('search')
self.batch = indexer.get('batch')
self.browse = indexer.get('browse')
self.category = indexer.get('category')
self.list = indexer.get('torrents').get('list', {})
self.fields = indexer.get('torrents').get('fields')
self.render = indexer.get('render')
self.domain = indexer.get('domain')
self.result_num = int(indexer.get('result_num') or 100)
self._timeout = int(indexer.get('timeout') or 15)
self.page = page
if self.domain and not str(self.domain).endswith("/"):
self.domain = self.domain + "/"
if indexer.get('ua'):
self.ua = indexer.get('ua') or settings.USER_AGENT
else:
self.ua = settings.USER_AGENT
if indexer.get('proxy'):
self.proxies = settings.PROXY
self.proxy_server = settings.PROXY_SERVER
if indexer.get('cookie'):
self.cookie = indexer.get('cookie')
if referer:
self.referer = referer
self.torrents_info_array = []
def get_torrents(self) -> List[dict]:
"""
开始请求
"""
if not self.search or not self.domain:
return []
# 种子搜索相对路径
paths = self.search.get('paths', [])
torrentspath = ""
if len(paths) == 1:
torrentspath = paths[0].get('path', '')
else:
for path in paths:
if path.get("type") == "all" and not self.mtype:
torrentspath = path.get('path')
break
elif path.get("type") == "movie" and self.mtype == MediaType.MOVIE:
torrentspath = path.get('path')
break
elif path.get("type") == "tv" and self.mtype == MediaType.TV:
torrentspath = path.get('path')
break
# 精确搜索
if self.keyword:
if isinstance(self.keyword, list):
# 批量查询
if self.batch:
delimiter = self.batch.get('delimiter') or ' '
space_replace = self.batch.get('space_replace') or ' '
search_word = delimiter.join([str(k).replace(' ',
space_replace) for k in self.keyword])
else:
search_word = " ".join(self.keyword)
# 查询模式:或
search_mode = "1"
else:
# 单个查询
search_word = self.keyword
# 查询模式与
search_mode = "0"
# 搜索URL
indexer_params = self.search.get("params", {}).copy()
if indexer_params:
search_area = indexer_params.get('search_area')
# search_area非0表示支持imdbid搜索
if (search_area and
(not self.keyword or not self.keyword.startswith('tt'))):
# 支持imdbid搜索但关键字不是imdbid时不启用imdbid搜索
indexer_params.pop('search_area')
# 变量字典
inputs_dict = {
"keyword": search_word
}
# 查询参数,默认查询标题
params = {
"search_mode": search_mode,
"search_area": 0,
"page": self.page or 0,
"notnewword": 1
}
# 额外参数
for key, value in indexer_params.items():
params.update({
"%s" % key: str(value).format(**inputs_dict)
})
# 分类条件
if self.category:
if self.mtype == MediaType.TV:
cats = self.category.get("tv") or []
elif self.mtype == MediaType.MOVIE:
cats = self.category.get("movie") or []
else:
cats = (self.category.get("movie") or []) + (self.category.get("tv") or [])
for cat in cats:
if self.category.get("field"):
value = params.get(self.category.get("field"), "")
params.update({
"%s" % self.category.get("field"): value + self.category.get("delimiter",
' ') + cat.get("id")
})
else:
params.update({
"cat%s" % cat.get("id"): 1
})
searchurl = self.domain + torrentspath + "?" + urlencode(params)
else:
# 变量字典
inputs_dict = {
"keyword": quote(search_word),
"page": self.page or 0
}
# 无额外参数
searchurl = self.domain + str(torrentspath).format(**inputs_dict)
# 列表浏览
else:
# 变量字典
inputs_dict = {
"page": self.page or 0,
"keyword": ""
}
# 有单独浏览路径
if self.browse:
torrentspath = self.browse.get("path")
if self.browse.get("start"):
start_page = int(self.browse.get("start")) + int(self.page or 0)
inputs_dict.update({
"page": start_page
})
elif self.page:
torrentspath = torrentspath + f"?page={self.page}"
# 搜索Url
searchurl = self.domain + str(torrentspath).format(**inputs_dict)
logger.info(f"开始请求:{searchurl}")
if self.render:
# 浏览器仿真
page_source = PlaywrightHelper().get_page_source(
url=searchurl,
cookies=self.cookie,
ua=self.ua,
proxies=self.proxy_server,
timeout=self._timeout
)
else:
# requests请求
ret = RequestUtils(
ua=self.ua,
cookies=self.cookie,
timeout=self._timeout,
referer=self.referer,
proxies=self.proxies
).get_res(searchurl, allow_redirects=True)
page_source = RequestUtils.get_decoded_html_content(ret,
settings.ENCODING_DETECTION_PERFORMANCE_MODE,
settings.ENCODING_DETECTION_MIN_CONFIDENCE)
# 解析
return self.parse(page_source)
def __get_title(self, torrent):
# title default text
if 'title' not in self.fields:
return
selector = self.fields.get('title', {})
if 'selector' in selector:
title = torrent(selector.get('selector', '')).clone()
self.__remove(title, selector)
items = self.__attribute_or_text(title, selector)
self.torrents_info['title'] = self.__index(items, selector)
elif 'text' in selector:
render_dict = {}
if "title_default" in self.fields:
title_default_selector = self.fields.get('title_default', {})
title_default_item = torrent(title_default_selector.get('selector', '')).clone()
self.__remove(title_default_item, title_default_selector)
items = self.__attribute_or_text(title_default_item, selector)
title_default = self.__index(items, title_default_selector)
render_dict.update({'title_default': title_default})
if "title_optional" in self.fields:
title_optional_selector = self.fields.get('title_optional', {})
title_optional_item = torrent(title_optional_selector.get('selector', '')).clone()
self.__remove(title_optional_item, title_optional_selector)
items = self.__attribute_or_text(title_optional_item, title_optional_selector)
title_optional = self.__index(items, title_optional_selector)
render_dict.update({'title_optional': title_optional})
self.torrents_info['title'] = Template(selector.get('text')).render(fields=render_dict)
self.torrents_info['title'] = self.__filter_text(self.torrents_info.get('title'),
selector.get('filters'))
def __get_description(self, torrent):
# title optional text
if 'description' not in self.fields:
return
selector = self.fields.get('description', {})
if "selector" in selector \
or "selectors" in selector:
description = torrent(selector.get('selector', selector.get('selectors', ''))).clone()
if description:
self.__remove(description, selector)
items = self.__attribute_or_text(description, selector)
self.torrents_info['description'] = self.__index(items, selector)
elif "text" in selector:
render_dict = {}
if "tags" in self.fields:
tags_selector = self.fields.get('tags', {})
tags_item = torrent(tags_selector.get('selector', '')).clone()
self.__remove(tags_item, tags_selector)
items = self.__attribute_or_text(tags_item, tags_selector)
tag = self.__index(items, tags_selector)
render_dict.update({'tags': tag})
if "subject" in self.fields:
subject_selector = self.fields.get('subject', {})
subject_item = torrent(subject_selector.get('selector', '')).clone()
self.__remove(subject_item, subject_selector)
items = self.__attribute_or_text(subject_item, subject_selector)
subject = self.__index(items, subject_selector)
render_dict.update({'subject': subject})
if "description_free_forever" in self.fields:
description_free_forever_selector = self.fields.get("description_free_forever", {})
description_free_forever_item = torrent(description_free_forever_selector.get("selector", '')).clone()
self.__remove(description_free_forever_item, description_free_forever_selector)
items = self.__attribute_or_text(description_free_forever_item, description_free_forever_selector)
description_free_forever = self.__index(items, description_free_forever_selector)
render_dict.update({"description_free_forever": description_free_forever})
if "description_normal" in self.fields:
description_normal_selector = self.fields.get("description_normal", {})
description_normal_item = torrent(description_normal_selector.get("selector", '')).clone()
self.__remove(description_normal_item, description_normal_selector)
items = self.__attribute_or_text(description_normal_item, description_normal_selector)
description_normal = self.__index(items, description_normal_selector)
render_dict.update({"description_normal": description_normal})
self.torrents_info['description'] = Template(selector.get('text')).render(fields=render_dict)
self.torrents_info['description'] = self.__filter_text(self.torrents_info.get('description'),
selector.get('filters'))
def __get_detail(self, torrent):
# details page text
if 'details' not in self.fields:
return
selector = self.fields.get('details', {})
details = torrent(selector.get('selector', '')).clone()
self.__remove(details, selector)
items = self.__attribute_or_text(details, selector)
item = self.__index(items, selector)
detail_link = self.__filter_text(item, selector.get('filters'))
if detail_link:
if not detail_link.startswith("http"):
if detail_link.startswith("//"):
self.torrents_info['page_url'] = self.domain.split(":")[0] + ":" + detail_link
elif detail_link.startswith("/"):
self.torrents_info['page_url'] = self.domain + detail_link[1:]
else:
self.torrents_info['page_url'] = self.domain + detail_link
else:
self.torrents_info['page_url'] = detail_link
def __get_download(self, torrent):
# download link text
if 'download' not in self.fields:
return
selector = self.fields.get('download', {})
download = torrent(selector.get('selector', '')).clone()
self.__remove(download, selector)
items = self.__attribute_or_text(download, selector)
item = self.__index(items, selector)
download_link = self.__filter_text(item, selector.get('filters'))
if download_link:
if not download_link.startswith("http") \
and not download_link.startswith("magnet"):
_scheme, _domain = StringUtils.get_url_netloc(self.domain)
if _domain in download_link:
if download_link.startswith("/"):
self.torrents_info['enclosure'] = f"{_scheme}:{download_link}"
else:
self.torrents_info['enclosure'] = f"{_scheme}://{download_link}"
else:
if download_link.startswith("/"):
self.torrents_info['enclosure'] = f"{self.domain}{download_link[1:]}"
else:
self.torrents_info['enclosure'] = f"{self.domain}{download_link}"
else:
self.torrents_info['enclosure'] = download_link
def __get_imdbid(self, torrent):
# imdbid
if "imdbid" not in self.fields:
return
selector = self.fields.get('imdbid', {})
imdbid = torrent(selector.get('selector', '')).clone()
self.__remove(imdbid, selector)
items = self.__attribute_or_text(imdbid, selector)
item = self.__index(items, selector)
self.torrents_info['imdbid'] = item
self.torrents_info['imdbid'] = self.__filter_text(self.torrents_info.get('imdbid'),
selector.get('filters'))
def __get_size(self, torrent):
# torrent size int
if 'size' not in self.fields:
return
selector = self.fields.get('size', {})
size = torrent(selector.get('selector', selector.get("selectors", ''))).clone()
self.__remove(size, selector)
items = self.__attribute_or_text(size, selector)
item = self.__index(items, selector)
if item:
size_val = item.replace("\n", "").strip()
size_val = self.__filter_text(size_val,
selector.get('filters'))
self.torrents_info['size'] = StringUtils.num_filesize(size_val)
else:
self.torrents_info['size'] = 0
def __get_leechers(self, torrent):
# torrent leechers int
if 'leechers' not in self.fields:
return
selector = self.fields.get('leechers', {})
leechers = torrent(selector.get('selector', '')).clone()
self.__remove(leechers, selector)
items = self.__attribute_or_text(leechers, selector)
item = self.__index(items, selector)
if item:
peers_val = item.split("/")[0]
peers_val = peers_val.replace(",", "")
peers_val = self.__filter_text(peers_val,
selector.get('filters'))
self.torrents_info['peers'] = int(peers_val) if peers_val and peers_val.isdigit() else 0
else:
self.torrents_info['peers'] = 0
def __get_seeders(self, torrent):
# torrent leechers int
if 'seeders' not in self.fields:
return
selector = self.fields.get('seeders', {})
seeders = torrent(selector.get('selector', '')).clone()
self.__remove(seeders, selector)
items = self.__attribute_or_text(seeders, selector)
item = self.__index(items, selector)
if item:
seeders_val = item.split("/")[0]
seeders_val = seeders_val.replace(",", "")
seeders_val = self.__filter_text(seeders_val,
selector.get('filters'))
self.torrents_info['seeders'] = int(seeders_val) if seeders_val and seeders_val.isdigit() else 0
else:
self.torrents_info['seeders'] = 0
def __get_grabs(self, torrent):
# torrent grabs int
if 'grabs' not in self.fields:
return
selector = self.fields.get('grabs', {})
grabs = torrent(selector.get('selector', '')).clone()
self.__remove(grabs, selector)
items = self.__attribute_or_text(grabs, selector)
item = self.__index(items, selector)
if item:
grabs_val = item.split("/")[0]
grabs_val = grabs_val.replace(",", "")
grabs_val = self.__filter_text(grabs_val,
selector.get('filters'))
self.torrents_info['grabs'] = int(grabs_val) if grabs_val and grabs_val.isdigit() else 0
else:
self.torrents_info['grabs'] = 0
def __get_pubdate(self, torrent):
# torrent pubdate yyyy-mm-dd hh:mm:ss
if 'date_added' not in self.fields:
return
selector = self.fields.get('date_added', {})
pubdate = torrent(selector.get('selector', '')).clone()
self.__remove(pubdate, selector)
items = self.__attribute_or_text(pubdate, selector)
pubdate_str = self.__index(items, selector)
if pubdate_str:
pubdate_str = pubdate_str.replace('\n', ' ').strip()
self.torrents_info['pubdate'] = self.__filter_text(pubdate_str,
selector.get('filters'))
def __get_date_elapsed(self, torrent):
# torrent data elaspsed text
if 'date_elapsed' not in self.fields:
return
selector = self.fields.get('date_elapsed', {})
date_elapsed = torrent(selector.get('selector', '')).clone()
self.__remove(date_elapsed, selector)
items = self.__attribute_or_text(date_elapsed, selector)
self.torrents_info['date_elapsed'] = self.__index(items, selector)
self.torrents_info['date_elapsed'] = self.__filter_text(self.torrents_info.get('date_elapsed'),
selector.get('filters'))
def __get_downloadvolumefactor(self, torrent):
# downloadvolumefactor int
selector = self.fields.get('downloadvolumefactor', {})
if not selector:
return
self.torrents_info['downloadvolumefactor'] = 1
if 'case' in selector:
for downloadvolumefactorselector in list(selector.get('case', {}).keys()):
downloadvolumefactor = torrent(downloadvolumefactorselector)
if len(downloadvolumefactor) > 0:
self.torrents_info['downloadvolumefactor'] = selector.get('case', {}).get(
downloadvolumefactorselector)
break
elif "selector" in selector:
downloadvolume = torrent(selector.get('selector', '')).clone()
self.__remove(downloadvolume, selector)
items = self.__attribute_or_text(downloadvolume, selector)
item = self.__index(items, selector)
if item:
downloadvolumefactor = re.search(r'(\d+\.?\d*)', item)
if downloadvolumefactor:
self.torrents_info['downloadvolumefactor'] = int(downloadvolumefactor.group(1))
def __get_uploadvolumefactor(self, torrent):
# uploadvolumefactor int
selector = self.fields.get('uploadvolumefactor', {})
if not selector:
return
self.torrents_info['uploadvolumefactor'] = 1
if 'case' in selector:
for uploadvolumefactorselector in list(selector.get('case', {}).keys()):
uploadvolumefactor = torrent(uploadvolumefactorselector)
if len(uploadvolumefactor) > 0:
self.torrents_info['uploadvolumefactor'] = selector.get('case', {}).get(
uploadvolumefactorselector)
break
elif "selector" in selector:
uploadvolume = torrent(selector.get('selector', '')).clone()
self.__remove(uploadvolume, selector)
items = self.__attribute_or_text(uploadvolume, selector)
item = self.__index(items, selector)
if item:
uploadvolumefactor = re.search(r'(\d+\.?\d*)', item)
if uploadvolumefactor:
self.torrents_info['uploadvolumefactor'] = int(uploadvolumefactor.group(1))
def __get_labels(self, torrent):
# labels ['label1', 'label2']
if 'labels' not in self.fields:
return
selector = self.fields.get('labels', {})
labels = torrent(selector.get("selector", "")).clone()
self.__remove(labels, selector)
items = self.__attribute_or_text(labels, selector)
if items:
self.torrents_info['labels'] = [item for item in items if item]
else:
self.torrents_info['labels'] = []
def __get_free_date(self, torrent):
# free date yyyy-mm-dd hh:mm:ss
if 'freedate' not in self.fields:
return
selector = self.fields.get('freedate', {})
freedate = torrent(selector.get('selector', '')).clone()
self.__remove(freedate, selector)
items = self.__attribute_or_text(freedate, selector)
self.torrents_info['freedate'] = self.__index(items, selector)
self.torrents_info['freedate'] = self.__filter_text(self.torrents_info.get('freedate'),
selector.get('filters'))
def __get_hit_and_run(self, torrent):
# hitandrun True/False
if 'hr' not in self.fields:
return
selector = self.fields.get('hr', {})
hit_and_run = torrent(selector.get('selector', ''))
if hit_and_run:
self.torrents_info['hit_and_run'] = True
else:
self.torrents_info['hit_and_run'] = False
def __get_category(self, torrent):
# category 电影/电视剧
if 'category' not in self.fields:
return
selector = self.fields.get('category', {})
category = torrent(selector.get('selector', '')).clone()
self.__remove(category, selector)
items = self.__attribute_or_text(category, selector)
category_value = self.__index(items, selector)
category_value = self.__filter_text(category_value,
selector.get('filters'))
if category_value and self.category:
tv_cats = [str(cat.get("id")) for cat in self.category.get("tv") or []]
movie_cats = [str(cat.get("id")) for cat in self.category.get("movie") or []]
if category_value in tv_cats \
and category_value not in movie_cats:
self.torrents_info['category'] = MediaType.TV.value
elif category_value in movie_cats:
self.torrents_info['category'] = MediaType.MOVIE.value
else:
self.torrents_info['category'] = MediaType.UNKNOWN.value
else:
self.torrents_info['category'] = MediaType.UNKNOWN.value
def get_info(self, torrent) -> dict:
"""
解析单条种子数据
"""
self.torrents_info = {}
try:
# 标题
self.__get_title(torrent)
# 描述
self.__get_description(torrent)
# 详情页面
self.__get_detail(torrent)
# 下载链接
self.__get_download(torrent)
# 完成数
self.__get_grabs(torrent)
# 下载数
self.__get_leechers(torrent)
# 做种数
self.__get_seeders(torrent)
# 大小
self.__get_size(torrent)
# IMDBID
self.__get_imdbid(torrent)
# 下载系数
self.__get_downloadvolumefactor(torrent)
# 上传系数
self.__get_uploadvolumefactor(torrent)
# 发布时间
self.__get_pubdate(torrent)
# 已发布时间
self.__get_date_elapsed(torrent)
# 免费载止时间
self.__get_free_date(torrent)
# 标签
self.__get_labels(torrent)
# HR
self.__get_hit_and_run(torrent)
# 分类
self.__get_category(torrent)
except Exception as err:
logger.error("%s 搜索出现错误:%s" % (self.indexername, str(err)))
return self.torrents_info
@staticmethod
def __filter_text(text: str, filters: list):
"""
对文件进行处理
"""
if not text or not filters or not isinstance(filters, list):
return text
if not isinstance(text, str):
text = str(text)
for filter_item in filters:
if not text:
break
method_name = filter_item.get("name")
try:
args = filter_item.get("args")
if method_name == "re_search" and isinstance(args, list):
rematch = re.search(r"%s" % args[0], text)
if rematch:
text = rematch.group(args[-1])
elif method_name == "split" and isinstance(args, list):
text = text.split(r"%s" % args[0])[args[-1]]
elif method_name == "replace" and isinstance(args, list):
text = text.replace(r"%s" % args[0], r"%s" % args[-1])
elif method_name == "dateparse" and isinstance(args, str):
text = text.replace("\n", " ").strip()
text = datetime.datetime.strptime(text, r"%s" % args)
elif method_name == "strip":
text = text.strip()
elif method_name == "appendleft":
text = f"{args}{text}"
elif method_name == "querystring":
parsed_url = urlparse(str(text))
query_params = parse_qs(parsed_url.query)
param_value = query_params.get(args)
text = param_value[0] if param_value else ''
except Exception as err:
logger.debug(f'过滤器 {method_name} 处理失败:{str(err)} - {traceback.format_exc()}')
return text.strip()
@staticmethod
def __remove(item, selector):
"""
移除元素
"""
if selector and "remove" in selector:
removelist = selector.get('remove', '').split(', ')
for v in removelist:
item.remove(v)
@staticmethod
def __attribute_or_text(item, selector: dict):
if not selector:
return item
if not item:
return []
if 'attribute' in selector:
items = [i.attr(selector.get('attribute')) for i in item.items() if i]
else:
items = [i.text() for i in item.items() if i]
return items
@staticmethod
def __index(items: list, selector: dict):
if not items:
return None
if selector:
if "contents" in selector \
and len(items) > int(selector.get("contents")):
items = items[0].split("\n")[selector.get("contents")]
elif "index" in selector \
and len(items) > int(selector.get("index")):
items = items[int(selector.get("index"))]
if isinstance(items, list):
items = items[0]
return items
def parse(self, html_text: str) -> List[dict]:
"""
解析整个页面
"""
if not html_text:
self.is_error = True
return []
# 清空旧结果
self.torrents_info_array = []
try:
# 解析站点文本对象
html_doc = PyQuery(html_text)
# 种子筛选器
torrents_selector = self.list.get('selector', '')
# 遍历种子html列表
for torn in html_doc(torrents_selector):
self.torrents_info_array.append(copy.deepcopy(self.get_info(PyQuery(torn))))
if len(self.torrents_info_array) >= int(self.result_num):
break
return self.torrents_info_array
except Exception as err:
self.is_error = True
logger.warn(f"错误:{self.indexername} {str(err)}")

View File

@@ -1,8 +1,6 @@
import urllib.parse
from typing import Tuple, List
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.db.systemconfig_oper import SystemConfigOper
from app.log import logger
@@ -51,7 +49,7 @@ class HaiDanSpider:
"7": 1
}
def __init__(self, indexer: CommentedMap):
def __init__(self, indexer: dict):
self.systemconfig = SystemConfigOper()
if indexer:
self._indexerid = indexer.get('id')

View File

@@ -3,8 +3,6 @@ import json
import re
from typing import Tuple, List
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.db.systemconfig_oper import SystemConfigOper
from app.log import logger
@@ -51,7 +49,7 @@ class MTorrentSpider:
"7": "DIY 国配 中字"
}
def __init__(self, indexer: CommentedMap):
def __init__(self, indexer: dict):
self.systemconfig = SystemConfigOper()
if indexer:
self._indexerid = indexer.get('id')

View File

@@ -1,8 +1,6 @@
import re
from typing import Tuple, List
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.log import logger
from app.utils.http import RequestUtils
@@ -23,7 +21,7 @@ class TNodeSpider:
_downloadurl = "%sapi/torrent/download/%s"
_pageurl = "%storrent/info/%s"
def __init__(self, indexer: CommentedMap):
def __init__(self, indexer: dict):
if indexer:
self._indexerid = indexer.get('id')
self._domain = indexer.get('domain')

View File

@@ -1,8 +1,6 @@
from typing import List, Tuple
from urllib.parse import quote
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.log import logger
from app.utils.http import RequestUtils
@@ -19,7 +17,7 @@ class TorrentLeech:
_pageurl = "%storrent/%s"
_timeout = 15
def __init__(self, indexer: CommentedMap):
def __init__(self, indexer: dict):
self._indexer = indexer
if indexer.get('proxy'):
self._proxy = settings.PROXY

View File

@@ -1,7 +1,5 @@
from typing import Tuple, List
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.db.systemconfig_oper import SystemConfigOper
from app.log import logger
@@ -46,7 +44,7 @@ class YemaSpider:
"12": "完结",
}
def __init__(self, indexer: CommentedMap):
def __init__(self, indexer: dict):
self.systemconfig = SystemConfigOper()
if indexer:
self._indexerid = indexer.get('id')

View File

@@ -239,7 +239,9 @@ class QbittorrentModule(_ModuleBase, _DownloaderBase[Qbittorrent]):
path=torrent_path,
hash=torrent.get('hash'),
size=torrent.get('total_size'),
tags=torrent.get('tags')
tags=torrent.get('tags'),
progress=torrent.get('progress') * 100,
state="paused" if torrent.get('state') in ("paused", "pausedDL") else "downloading",
))
elif status == TorrentStatus.TRANSFER:
# 获取已完成且未整理的

View File

@@ -118,11 +118,12 @@ class TheMovieDbModule(_ModuleBase):
# 识别匹配
if not cache_info or not cache:
info = None
# 缓存没有或者强制不使用缓存
if tmdbid:
# 直接查询详情
info = self.tmdb.get_info(mtype=mtype, tmdbid=tmdbid)
elif meta:
if not info and meta:
info = {}
# 简体名称
zh_name = zhconv.convert(meta.cn_name, "zh-hans") if meta.cn_name else None
@@ -172,8 +173,8 @@ class TheMovieDbModule(_ModuleBase):
if info and not info.get("genres"):
info = self.tmdb.get_info(mtype=info.get("media_type"),
tmdbid=info.get("id"))
else:
logger.error("识别媒体信息时未提供元数据或tmdbid")
elif not info:
logger.error("识别媒体信息时未提供元数据或唯一且有效的tmdbid")
return None
# 保存到缓存

View File

@@ -578,7 +578,7 @@ class TmdbApi:
genre_ids.append(genre.get('id'))
return genre_ids
# 查询TMDB详ngeq
# 查询TMDB详
if mtype == MediaType.MOVIE:
tmdb_info = self.__get_movie_detail(tmdbid)
if tmdb_info:
@@ -588,13 +588,20 @@ class TmdbApi:
if tmdb_info:
tmdb_info['media_type'] = MediaType.TV
else:
tmdb_info = self.__get_tv_detail(tmdbid)
if tmdb_info:
tmdb_info_tv = self.__get_tv_detail(tmdbid)
tmdb_info_movie = self.__get_movie_detail(tmdbid)
if tmdb_info_tv and tmdb_info_movie:
tmdb_info = None
logger.warn(f"无法判断tmdb_id:{tmdbid} 是电影还是电视剧")
elif tmdb_info_tv:
tmdb_info = tmdb_info_tv
tmdb_info['media_type'] = MediaType.TV
elif tmdb_info_movie:
tmdb_info = tmdb_info_movie
tmdb_info['media_type'] = MediaType.MOVIE
else:
tmdb_info = self.__get_movie_detail(tmdbid)
if tmdb_info:
tmdb_info['media_type'] = MediaType.MOVIE
tmdb_info = None
logger.warn(f"tmdb_id:{tmdbid} 未查询到媒体信息")
if tmdb_info:
# 转换genreid

View File

@@ -246,7 +246,9 @@ class TransmissionModule(_ModuleBase, _DownloaderBase[Transmission]):
title=torrent.name,
path=Path(torrent.download_dir) / torrent.name,
hash=torrent.hashString,
tags=",".join(torrent.labels or [])
tags=",".join(torrent.labels or []),
progress=torrent.progress,
state="paused" if torrent.status == "stopped" else "downloading",
))
elif status == TorrentStatus.DOWNLOADING:
# 获取正在下载的任务

View File

@@ -32,13 +32,13 @@ class WeChat:
_proxy = None
# 企业微信发送消息URL
_send_msg_url = "/cgi-bin/message/send?access_token={access_token}"
_send_msg_url = "cgi-bin/message/send?access_token={access_token}"
# 企业微信获取TokenURL
_token_url = "/cgi-bin/gettoken?corpid={corpid}&corpsecret={corpsecret}"
_token_url = "cgi-bin/gettoken?corpid={corpid}&corpsecret={corpsecret}"
# 企业微信创建菜单URL
_create_menu_url = "/cgi-bin/menu/create?access_token={access_token}&agentid={agentid}"
_create_menu_url = "cgi-bin/menu/create?access_token={access_token}&agentid={agentid}"
# 企业微信删除菜单URL
_delete_menu_url = "/cgi-bin/menu/delete?access_token={access_token}&agentid={agentid}"
_delete_menu_url = "cgi-bin/menu/delete?access_token={access_token}&agentid={agentid}"
def __init__(self, WECHAT_CORPID: str = None, WECHAT_APP_SECRET: str = None,
WECHAT_APP_ID: str = None, WECHAT_PROXY: str = None, **kwargs):

View File

@@ -7,6 +7,7 @@ import pytz
from apscheduler.executors.pool import ThreadPoolExecutor
from apscheduler.jobstores.base import JobLookupError
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from app import schemas
from app.chain import ChainBase
@@ -16,13 +17,14 @@ from app.chain.site import SiteChain
from app.chain.subscribe import SubscribeChain
from app.chain.tmdb import TmdbChain
from app.chain.transfer import TransferChain
from app.chain.workflow import WorkflowChain
from app.core.config import settings
from app.core.event import EventManager
from app.core.plugin import PluginManager
from app.db.systemconfig_oper import SystemConfigOper
from app.helper.sites import SitesHelper
from app.log import logger
from app.schemas import Notification, NotificationType
from app.schemas import Notification, NotificationType, Workflow
from app.schemas.types import EventType, SystemConfigKey
from app.utils.singleton import Singleton
from app.utils.timer import TimerUtils
@@ -345,6 +347,10 @@ class Scheduler(metaclass=Singleton):
}
)
# 初始化工作流服务
self.init_workflow_jobs()
# 初始化插件服务
self.init_plugin_jobs()
# 打印服务
@@ -401,52 +407,42 @@ class Scheduler(metaclass=Singleton):
for pid in PluginManager().get_running_plugin_ids():
self.update_plugin_job(pid)
def update_plugin_job(self, pid: str):
def init_workflow_jobs(self):
"""
更新插件定时服务
初始化工作流定时服务
"""
if not self._scheduler or not pid:
for workflow in WorkflowChain().get_workflows() or []:
self.update_workflow_job(workflow)
def remove_workflow_job(self, workflow: Workflow):
"""
移除工作流服务
"""
if not self._scheduler:
return
# 移除该插件的全部服务
self.remove_plugin_job(pid)
# 获取插件服务列表
with self._lock:
try:
plugin_services = PluginManager().get_plugin_services(pid=pid)
except Exception as e:
logger.error(f"运行插件 {pid} 服务失败:{str(e)} - {traceback.format_exc()}")
job_id = f"workflow-{workflow.id}"
service = self._jobs.pop(job_id, None)
if not service:
return
# 获取插件名称
plugin_name = PluginManager().get_plugin_attr(pid, "plugin_name")
# 开始注册插件服务
for service in plugin_services:
try:
sid = f"{service['id']}"
job_id = sid.split("|")[0]
self.remove_plugin_job(pid, job_id)
self._jobs[job_id] = {
"func": service["func"],
"name": service["name"],
"pid": pid,
"plugin_name": plugin_name,
"kwargs": service.get("func_kwargs") or {},
"running": False,
}
self._scheduler.add_job(
self.start,
service["trigger"],
id=sid,
name=service["name"],
**(service.get("kwargs") or {}),
kwargs={"job_id": job_id},
replace_existing=True
)
logger.info(f"注册插件{plugin_name}服务:{service['name']} - {service['trigger']}")
except Exception as e:
logger.error(f"注册插件{plugin_name}服务失败:{str(e)} - {service}")
SchedulerChain().messagehelper.put(title=f"插件 {plugin_name} 服务注册失败",
message=str(e),
role="system")
try:
# 在调度器中查找并移除对应的 job
job_removed = False
for job in list(self._scheduler.get_jobs()):
if job_id == job.id:
try:
self._scheduler.remove_job(job.id)
job_removed = True
except JobLookupError:
pass
break
if job_removed:
logger.info(f"移除工作流服务:{service.get('name')}")
except Exception as e:
logger.error(f"移除工作流服务失败:{str(e)} - {job_id}: {service}")
SchedulerChain().messagehelper.put(title=f"工作流 {workflow.name} 服务移除失败",
message=str(e),
role="system")
def remove_plugin_job(self, pid: str, job_id: str = None):
"""
@@ -494,6 +490,87 @@ class Scheduler(metaclass=Singleton):
message=str(e),
role="system")
def update_workflow_job(self, workflow: Workflow):
"""
更新工作流定时服务
"""
if not self._scheduler:
return
# 移除该工作流的全部服务
self.remove_workflow_job(workflow)
# 添加工作流服务
with self._lock:
try:
job_id = f"workflow-{workflow.id}"
self._jobs[job_id] = {
"func": WorkflowChain().process,
"name": workflow.name,
"provider_name": "工作流",
"running": False,
}
self._scheduler.add_job(
self.start,
trigger=CronTrigger.from_crontab(workflow.timer),
id=job_id,
name=workflow.name,
kwargs={"job_id": job_id, "workflow_id": job_id},
replace_existing=True
)
logger.info(f"注册工作流服务:{workflow.name} - {workflow.timer}")
except Exception as e:
logger.error(f"注册工作流服务失败:{workflow.name} - {str(e)}")
SchedulerChain().messagehelper.put(title=f"工作流 {workflow.name} 服务注册失败",
message=str(e),
role="system")
def update_plugin_job(self, pid: str):
"""
更新插件定时服务
"""
if not self._scheduler or not pid:
return
# 移除该插件的全部服务
self.remove_plugin_job(pid)
# 获取插件服务列表
with self._lock:
try:
plugin_services = PluginManager().get_plugin_services(pid=pid)
except Exception as e:
logger.error(f"运行插件 {pid} 服务失败:{str(e)} - {traceback.format_exc()}")
return
# 获取插件名称
plugin_name = PluginManager().get_plugin_attr(pid, "plugin_name")
# 开始注册插件服务
for service in plugin_services:
try:
sid = f"{service['id']}"
job_id = sid.split("|")[0]
self.remove_plugin_job(pid, job_id)
self._jobs[job_id] = {
"func": service["func"],
"name": service["name"],
"pid": pid,
"provider_name": plugin_name,
"kwargs": service.get("func_kwargs") or {},
"running": False,
}
self._scheduler.add_job(
self.start,
service["trigger"],
id=sid,
name=service["name"],
**(service.get("kwargs") or {}),
kwargs={"job_id": job_id},
replace_existing=True
)
logger.info(f"注册插件{plugin_name}服务:{service['name']} - {service['trigger']}")
except Exception as e:
logger.error(f"注册插件{plugin_name}服务失败:{str(e)} - {service}")
SchedulerChain().messagehelper.put(title=f"插件 {plugin_name} 服务注册失败",
message=str(e),
role="system")
def list(self) -> List[schemas.ScheduleInfo]:
"""
当前所有任务
@@ -511,14 +588,14 @@ class Scheduler(metaclass=Singleton):
# 将正在运行的任务提取出来 (保障一次性任务正常显示)
for job_id, service in self._jobs.items():
name = service.get("name")
plugin_name = service.get("plugin_name")
if service.get("running") and name and plugin_name:
provider_name = service.get("provider_name")
if service.get("running") and name and provider_name:
if name not in added:
added.append(name)
schedulers.append(schemas.ScheduleInfo(
id=job_id,
name=name,
provider=plugin_name,
provider=provider_name,
status="正在运行",
))
# 获取其他待执行任务
@@ -538,7 +615,7 @@ class Scheduler(metaclass=Singleton):
schedulers.append(schemas.ScheduleInfo(
id=job_id,
name=job.name,
provider=service.get("plugin_name", "[系统]"),
provider=service.get("provider_name", "[系统]"),
status=status,
next_run=next_run
))

View File

@@ -19,3 +19,5 @@ from .file import *
from .exception import *
from .system import *
from .event import *
from .workflow import *
from .download import *

13
app/schemas/download.py Normal file
View File

@@ -0,0 +1,13 @@
from typing import Optional
from pydantic import BaseModel, Field
class DownloadTask(BaseModel):
"""
下载任务
"""
download_id: Optional[str] = Field(None, description="任务ID")
downloader: Optional[str] = Field(None, description="下载器")
path: Optional[str] = Field(None, description="下载路径")
completed: Optional[bool] = Field(False, description="是否完成")

View File

@@ -6,6 +6,15 @@ from pydantic import BaseModel, Field, root_validator
from app.schemas import MessageChannel, FileItem
class Event(BaseModel):
"""
事件模型
"""
event_type: str = Field(..., description="事件类型")
event_data: Optional[dict] = Field({}, description="事件数据")
priority: Optional[int] = Field(0, description="事件优先级")
class BaseEventData(BaseModel):
"""
事件数据的基类,所有具体事件数据类应继承自此类
@@ -244,6 +253,7 @@ class DiscoverMediaSource(BaseModel):
api_path: str = Field(..., description="媒体数据源API地址")
filter_params: Optional[Dict[str, Any]] = Field(default=None, description="过滤参数")
filter_ui: Optional[List[dict]] = Field(default=[], description="过滤参数UI配置")
depends: Optional[Dict[str, list]] = Field(default=None, description="UI依赖关系字典")
class DiscoverSourceEventData(ChainEventData):

View File

@@ -115,3 +115,9 @@ class SiteUserData(BaseModel):
class SiteAuth(BaseModel):
site: Optional[str] = None
params: Optional[Dict[str, Union[int, str]]] = Field(default_factory=dict)
class SiteCategory(BaseModel):
id: Optional[int] = None
cat: Optional[str] = None
desc: Optional[str] = None

View File

@@ -21,6 +21,8 @@ class TransferTorrent(BaseModel):
tags: Optional[str] = None
size: Optional[int] = 0
userid: Optional[str] = None
progress: Optional[float] = 0
state: Optional[str] = None
class DownloadingTorrent(BaseModel):

77
app/schemas/workflow.py Normal file
View File

@@ -0,0 +1,77 @@
from typing import Optional, List
from pydantic import BaseModel, Field
from app.schemas.context import Context, MediaInfo
from app.schemas.file import FileItem
from app.schemas.download import DownloadTask
from app.schemas.site import Site
from app.schemas.subscribe import Subscribe
from app.schemas.message import Notification
from app.schemas.event import Event
class Workflow(BaseModel):
"""
工作流信息
"""
id: Optional[int] = Field(None, description="工作流ID")
name: Optional[str] = Field(None, description="工作流名称")
description: Optional[str] = Field(None, description="工作流描述")
timer: Optional[str] = Field(None, description="定时器")
state: Optional[str] = Field(None, description="状态")
current_action: Optional[str] = Field(None, description="已执行动作")
result: Optional[str] = Field(None, description="任务执行结果")
run_count: Optional[int] = Field(0, description="已执行次数")
actions: Optional[list] = Field([], description="任务列表")
flows: Optional[list] = Field([], description="任务流")
add_time: Optional[str] = Field(None, description="创建时间")
last_time: Optional[str] = Field(None, description="最后执行时间")
class Config:
orm_mode = True
class ActionParams(BaseModel):
"""
动作基础参数
"""
loop: Optional[bool] = Field(False, description="是否需要循环")
loop_interval: Optional[int] = Field(0, description="循环间隔 (秒)")
class Action(BaseModel):
"""
动作信息
"""
id: Optional[str] = Field(None, description="动作ID")
type: Optional[str] = Field(None, description="动作类型 (类名)")
name: Optional[str] = Field(None, description="动作名称")
description: Optional[str] = Field(None, description="动作描述")
position: Optional[dict] = Field({}, description="位置")
data: Optional[dict] = Field({}, description="参数")
class ActionContext(BaseModel):
"""
动作基础上下文,各动作通用数据
"""
content: Optional[str] = Field(None, description="文本类内容")
torrents: Optional[List[Context]] = Field([], description="资源列表")
medias: Optional[List[MediaInfo]] = Field([], description="媒体列表")
fileitems: Optional[List[FileItem]] = Field([], description="文件列表")
downloads: Optional[List[DownloadTask]] = Field([], description="下载任务列表")
sites: Optional[List[Site]] = Field([], description="站点列表")
subscribes: Optional[List[Subscribe]] = Field([], description="订阅列表")
messages: Optional[List[Notification]] = Field([], description="消息列表")
events: Optional[List[Event]] = Field([], description="事件列表")
class ActionFlow(BaseModel):
"""
工作流流程
"""
id: Optional[str] = Field(None, description="流程ID")
source: Optional[str] = Field(None, description="源动作")
target: Optional[str] = Field(None, description="目标动作")
animated: Optional[bool] = Field(True, description="是否动画流程")

View File

@@ -3,6 +3,7 @@ from contextlib import asynccontextmanager
from fastapi import FastAPI
from app.startup.workflow_initializer import init_workflow, stop_workflow
from app.startup.modules_initializer import shutdown_modules, start_modules
from app.startup.plugins_initializer import init_plugins_async
from app.startup.routers_initializer import init_routers
@@ -16,6 +17,8 @@ async def lifespan(app: FastAPI):
print("Starting up...")
# 启动模块
start_modules(app)
# 初始化工作流动作
init_workflow(app)
# 初始化路由
init_routers(app)
# 初始化插件
@@ -35,3 +38,6 @@ async def lifespan(app: FastAPI):
print(f"Error during plugin installation shutdown: {e}")
# 清理模块
shutdown_modules(app)
# 关闭工作流
stop_workflow(app)

View File

@@ -0,0 +1,17 @@
from fastapi import FastAPI
from app.core.workflow import WorkFlowManager
def init_workflow(_: FastAPI):
"""
初始化动作
"""
WorkFlowManager()
def stop_workflow(_: FastAPI):
"""
停止动作
"""
WorkFlowManager().stop()

View File

@@ -42,7 +42,7 @@ class SecurityUtils:
@staticmethod
def is_safe_url(url: str, allowed_domains: Union[Set[str], List[str]], strict: bool = False) -> bool:
"""
验证URL是否在允许的域名列表中包括带有端口的域名
验证URL是否在允许的域名列表中包括带有端口的域名
:param url: 需要验证的 URL
:param allowed_domains: 允许的域名集合,域名可以包含端口
@@ -65,7 +65,6 @@ class SecurityUtils:
netloc = parsed_url.netloc.lower()
if not netloc:
return False
netloc_no_port = netloc.split(":")[0]
# 检查每个允许的域名
allowed_domains = {d.lower() for d in allowed_domains}
@@ -79,7 +78,7 @@ class SecurityUtils:
return True
else:
# 非严格模式下,允许子域名匹配
if netloc_no_port == allowed_netloc or netloc_no_port.endswith('.' + allowed_netloc):
if netloc == allowed_netloc or netloc.endswith('.' + allowed_netloc):
return True
return False

View File

@@ -63,3 +63,5 @@ OCR_HOST=https://movie-pilot.org
PLUGIN_MARKET=https://github.com/jxxghp/MoviePilot-Plugins,https://github.com/thsrite/MoviePilot-Plugins,https://github.com/InfinityPacer/MoviePilot-Plugins,https://github.com/honue/MoviePilot-Plugins
# 搜索多个名称true/false为true时搜索时会同时搜索中英文及原始名称搜索结果会更全面但会增加搜索时间为false时其中一个名称搜索到结果或全部名称搜索完毕即停止
SEARCH_MULTIPLE_NAME=true
# 为指定字幕添加.default后缀设置为默认字幕支持为'zh-cn''zh-tw''eng'添加默认字幕未定义或设置为None则不添加
DEFAULT_SUB=None

View File

@@ -0,0 +1,24 @@
"""2.1.1
Revision ID: 279a949d81b6
Revises: ca5461f314f2
Create Date: 2025-02-14 19:02:24.989349
"""
from app.chain.torrents import TorrentsChain
# revision identifiers, used by Alembic.
revision = '279a949d81b6'
down_revision = 'ca5461f314f2'
branch_labels = None
depends_on = None
def upgrade() -> None:
# 清理一次缓存
TorrentsChain().clear_torrents()
def downgrade() -> None:
pass

View File

@@ -7,9 +7,8 @@ Create Date: 2024-12-24 13:29:32.225532
"""
import contextlib
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import sqlite
from alembic import op
# revision identifiers, used by Alembic.
revision = '55390f1f77c1'

View File

@@ -0,0 +1,29 @@
"""2.1.2
Revision ID: 610bb05ddeef
Revises: 279a949d81b6
Create Date: 2025-02-24 07:52:00.042837
"""
import contextlib
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import sqlite
# revision identifiers, used by Alembic.
revision = '610bb05ddeef'
down_revision = '279a949d81b6'
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
with contextlib.suppress(Exception):
op.add_column('workflow', sa.Column('flows', sa.JSON(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
pass

View File

@@ -7,9 +7,8 @@ Create Date: 2025-02-06 18:28:00.644571
"""
import contextlib
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import sqlite
from alembic import op
# revision identifiers, used by Alembic.
revision = 'ca5461f314f2'

View File

@@ -32,6 +32,7 @@ func_timeout==4.3.5
bs4~=0.0.1
beautifulsoup4~=4.12.2
pillow~=10.4.0
pillow-avif-plugin~=1.4.6
pyTelegramBotAPI~=4.12.0
playwright~=1.37.0
cf-clearance~=0.31.0

View File

@@ -1,20 +1,22 @@
from distutils.core import setup
from Cython.Build import cythonize
module_list = ['app/helper/sites.py']
setup(
name="",
author="",
zip_safe=False,
include_package_data=True,
ext_modules=cythonize(
module_list=module_list,
nthreads=0,
compiler_directives={"language_level": "3"},
),
script_args=["build_ext", "-j", '2', "--inplace"],
)
name="MoviePilot",
author="jxxghp",
zip_safe=False,
include_package_data=True,
ext_modules=cythonize(
module_list=module_list,
nthreads=0,
compiler_directives={
"language_level": "3",
"binding": False,
"nonecheck": False
},
),
script_args=["build_ext", "-j", '2', "--inplace"],
)

View File

@@ -1,2 +1,2 @@
APP_VERSION = 'v2.2.8'
FRONTEND_VERSION = 'v2.2.8'
APP_VERSION = 'v2.3.1'
FRONTEND_VERSION = 'v2.3.1'