mirror of
https://github.com/jxxghp/MoviePilot.git
synced 2026-05-08 01:03:08 +08:00
Compare commits
240 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cde4db1a56 | ||
|
|
29ae910953 | ||
|
|
314f90cc40 | ||
|
|
1c22e3d024 | ||
|
|
233d62479f | ||
|
|
6974f2ebd7 | ||
|
|
c030166cf5 | ||
|
|
4c511eaea6 | ||
|
|
6e443a1127 | ||
|
|
896e473c41 | ||
|
|
12f10ebedf | ||
|
|
ba9f85747c | ||
|
|
2954c02a7c | ||
|
|
312e602f12 | ||
|
|
ed37fcbb07 | ||
|
|
6acf8fbf00 | ||
|
|
a1e178c805 | ||
|
|
922e2fc446 | ||
|
|
db4c8cb3f2 | ||
|
|
1c578746fe | ||
|
|
68f88117b6 | ||
|
|
108c0a89f6 | ||
|
|
92dacdf6a2 | ||
|
|
6aa684d6a5 | ||
|
|
efece8cc56 | ||
|
|
383c8ca19a | ||
|
|
0a73681280 | ||
|
|
c1ecda280c | ||
|
|
825fc35134 | ||
|
|
8f543ca602 | ||
|
|
f0ecc1a497 | ||
|
|
71f170a1ad | ||
|
|
3709b65b0e | ||
|
|
9d6eb0f1e1 | ||
|
|
c93306147b | ||
|
|
5e8f924a2f | ||
|
|
54988d6397 | ||
|
|
112761dc4c | ||
|
|
ef20508840 | ||
|
|
589a1765ed | ||
|
|
2c666e24f3 | ||
|
|
168e3c5533 | ||
|
|
cda8b2573a | ||
|
|
4cb4eb23b8 | ||
|
|
f208b65570 | ||
|
|
8a0a530036 | ||
|
|
76643f13ed | ||
|
|
6992284a77 | ||
|
|
9a142799cd | ||
|
|
027d1567c3 | ||
|
|
65af737dfd | ||
|
|
48aa0e3d0b | ||
|
|
b4e31893ff | ||
|
|
4f1b95352a | ||
|
|
ca664cb569 | ||
|
|
fe4ea73286 | ||
|
|
9e9cca6de4 | ||
|
|
2e7e74c803 | ||
|
|
916597047d | ||
|
|
83fc474dbe | ||
|
|
f67bf49e69 | ||
|
|
bf9043f526 | ||
|
|
a98de604a1 | ||
|
|
e160a745a7 | ||
|
|
7f2c6ef167 | ||
|
|
2086651dbe | ||
|
|
132fde2308 | ||
|
|
4e27a1e623 | ||
|
|
a453831deb | ||
|
|
1035ceb4ac | ||
|
|
b7cb917347 | ||
|
|
680ad164dc | ||
|
|
aed68253e9 | ||
|
|
b83c7a5656 | ||
|
|
491456b0a2 | ||
|
|
84465a6536 | ||
|
|
9acbcf4922 | ||
|
|
8dc4290695 | ||
|
|
5c95945691 | ||
|
|
11115d50fb | ||
|
|
7f83d56a7e | ||
|
|
28805e9e17 | ||
|
|
88a098abc1 | ||
|
|
a3cc9830de | ||
|
|
43623efa99 | ||
|
|
ff73b2cb5d | ||
|
|
6cab14366c | ||
|
|
576d215d8c | ||
|
|
a2c10c86bf | ||
|
|
21bede3f00 | ||
|
|
0a39322281 | ||
|
|
be323d3da1 | ||
|
|
fa8860bf62 | ||
|
|
a700958edb | ||
|
|
9349973d16 | ||
|
|
c0d3637d12 | ||
|
|
79473ca229 | ||
|
|
fccbe39547 | ||
|
|
85324acacc | ||
|
|
9dec4d704b | ||
|
|
72732277a1 | ||
|
|
8d737f9e37 | ||
|
|
96b3746caa | ||
|
|
c690ea3c39 | ||
|
|
3282fb88e0 | ||
|
|
b9c2b9a044 | ||
|
|
24b58dc002 | ||
|
|
42c56497c6 | ||
|
|
c7512d1580 | ||
|
|
7d25bf7b48 | ||
|
|
99daa3a95e | ||
|
|
0a923bced9 | ||
|
|
06e3b0def2 | ||
|
|
0feecc3eca | ||
|
|
0afbc58263 | ||
|
|
7c7561029a | ||
|
|
65683999e1 | ||
|
|
f72e26015f | ||
|
|
b4e5c50655 | ||
|
|
f395dc68c3 | ||
|
|
27cf5bb7e6 | ||
|
|
9b573535cd | ||
|
|
cb32305b86 | ||
|
|
f7164450d0 | ||
|
|
344862dbd4 | ||
|
|
f1d0e9d50a | ||
|
|
9ba9e8f41c | ||
|
|
78fc5b7017 | ||
|
|
fe07830b71 | ||
|
|
350f1faf2a | ||
|
|
103cfe0b47 | ||
|
|
0953c1be16 | ||
|
|
c299bf6f7c | ||
|
|
c0eb9d824c | ||
|
|
ebffdebdb2 | ||
|
|
acd9e38477 | ||
|
|
9f4cf530f8 | ||
|
|
84897aa592 | ||
|
|
23c5982f5a | ||
|
|
1849930b72 | ||
|
|
4f1d3a7572 | ||
|
|
824c3ac5d6 | ||
|
|
1cec6ed6d1 | ||
|
|
fff75c7fe2 | ||
|
|
81fecf1e07 | ||
|
|
ad8f687f8e | ||
|
|
a3172d7503 | ||
|
|
8d5e0b26d5 | ||
|
|
b1b980f550 | ||
|
|
8196589cff | ||
|
|
cb9f41cb65 | ||
|
|
cb4981adb3 | ||
|
|
6880b42a84 | ||
|
|
97054adc61 | ||
|
|
de94e5d595 | ||
|
|
a5a734d091 | ||
|
|
efb607d22f | ||
|
|
d0b2787a7c | ||
|
|
d5988ff443 | ||
|
|
96b4f1b575 | ||
|
|
bb6b8439c7 | ||
|
|
9cdce4509d | ||
|
|
3956ab1fe8 | ||
|
|
14686fdb03 | ||
|
|
32892ab747 | ||
|
|
79c637e003 | ||
|
|
d7c260715a | ||
|
|
2dfb089a39 | ||
|
|
e04179525b | ||
|
|
d044364c68 | ||
|
|
a0f912ffbe | ||
|
|
d7c8b08d7a | ||
|
|
f752082e1b | ||
|
|
201ec21adf | ||
|
|
57590323b2 | ||
|
|
4636c7ada7 | ||
|
|
4c86a4da5f | ||
|
|
8dc9acf071 | ||
|
|
abebae3664 | ||
|
|
4f7d8866a0 | ||
|
|
cceb22d729 | ||
|
|
89edbb93f5 | ||
|
|
4ffb406172 | ||
|
|
293e417865 | ||
|
|
510c20dc70 | ||
|
|
8e1810955b | ||
|
|
73f732fe1d | ||
|
|
d6f5160959 | ||
|
|
d64a7086dd | ||
|
|
825d9b768f | ||
|
|
f758a47f4f | ||
|
|
fc69d7e6c1 | ||
|
|
edc30266c8 | ||
|
|
665da9dad3 | ||
|
|
4048acf60e | ||
|
|
f116229ecc | ||
|
|
f6a2efb256 | ||
|
|
af3a50f7ea | ||
|
|
44a0e5b4a7 | ||
|
|
f40a1246ff | ||
|
|
dd890c410c | ||
|
|
8fd7f2c875 | ||
|
|
8c09b3482f | ||
|
|
0066247a2b | ||
|
|
c7926fc575 | ||
|
|
ac5b9fd4e5 | ||
|
|
42dc539df6 | ||
|
|
e60d785a11 | ||
|
|
33558d6197 | ||
|
|
46d2ffeb75 | ||
|
|
8e4bce2f95 | ||
|
|
00f1f06e3d | ||
|
|
fe37bde993 | ||
|
|
6c3bb8893f | ||
|
|
ca4d64819d | ||
|
|
0a53635d35 | ||
|
|
921e24b049 | ||
|
|
24c21ed04e | ||
|
|
777785579e | ||
|
|
8061a06fe4 | ||
|
|
438ce6ee3e | ||
|
|
77e19c3de7 | ||
|
|
49881c9c54 | ||
|
|
5da28f702f | ||
|
|
dfbd9f3b30 | ||
|
|
d6c6ee9b4e | ||
|
|
4b27404ee5 | ||
|
|
3a826b343a | ||
|
|
851aa5f9e2 | ||
|
|
9ef1f56ea1 | ||
|
|
78d51b7621 | ||
|
|
c12e2bdba7 | ||
|
|
fda11f427c | ||
|
|
d809330225 | ||
|
|
ce4a2314d8 | ||
|
|
c19e825e94 | ||
|
|
c45d64b554 | ||
|
|
0689b2e331 | ||
|
|
c8e337440e | ||
|
|
726e7dfbd4 |
45
.github/ISSUE_TEMPLATE/rfc.yml
vendored
Normal file
45
.github/ISSUE_TEMPLATE/rfc.yml
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
name: 功能提案
|
||||
description: Request for Comments
|
||||
title: "[RFC]"
|
||||
labels: ["RFC"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
一份提案(RFC)定位为 **「在某功能/重构的具体开发前,用于开发者间 review 技术设计/方案的文档」**,
|
||||
目的是让协作的开发者间清晰的知道「要做什么」和「具体会怎么做」,以及所有的开发者都能公开透明的参与讨论;
|
||||
以便评估和讨论产生的影响 (遗漏的考虑、向后兼容性、与现有功能的冲突),
|
||||
因此提案侧重在对解决问题的 **方案、设计、步骤** 的描述上。
|
||||
|
||||
如果仅希望讨论是否添加或改进某功能本身,请使用 -> [Issue: 功能改进](https://github.com/jxxghp/MoviePilot/issues/new?assignees=&labels=feature+request&projects=&template=feature_request.yml&title=%5BFeature+Request%5D%3A+)
|
||||
- type: textarea
|
||||
id: background
|
||||
attributes:
|
||||
label: 背景 or 问题
|
||||
description: 简单描述遇到的什么问题或需要改动什么。可以引用其他 issue、讨论、文档等。
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: goal
|
||||
attributes:
|
||||
label: "目标 & 方案简述"
|
||||
description: 简单描述提案此提案实现后,**预期的目标效果**,以及简单大致描述会采取的方案/步骤,可能会/不会产生什么影响。
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: design
|
||||
attributes:
|
||||
label: "方案设计 & 实现步骤"
|
||||
description: |
|
||||
详细描述你设计的具体方案,可以考虑拆分列表或要点,一步步描述具体打算如何实现的步骤和相关细节。
|
||||
这部份不需要一次性写完整,即使在创建完此提案 issue 后,依旧可以再次编辑修改。
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: alternative
|
||||
attributes:
|
||||
label: "替代方案 & 对比"
|
||||
description: |
|
||||
[可选] 为来实现目标效果,还考虑过什么其他方案,有什么对比?
|
||||
validations:
|
||||
required: false
|
||||
3
.github/workflows/build.yml
vendored
3
.github/workflows/build.yml
vendored
@@ -5,10 +5,7 @@ on:
|
||||
branches:
|
||||
- v2
|
||||
paths:
|
||||
- '.github/workflows/build.yml'
|
||||
- 'Dockerfile'
|
||||
- 'version.py'
|
||||
- 'requirements.in'
|
||||
|
||||
jobs:
|
||||
Docker-build:
|
||||
|
||||
@@ -10,10 +10,7 @@ ENV LANG="C.UTF-8" \
|
||||
UMASK=000 \
|
||||
PORT=3001 \
|
||||
NGINX_PORT=3000 \
|
||||
PROXY_HOST="" \
|
||||
MOVIEPILOT_AUTO_UPDATE=false \
|
||||
AUTH_SITE="iyuu" \
|
||||
IYUU_SIGN=""
|
||||
MOVIEPILOT_AUTO_UPDATE=release
|
||||
WORKDIR "/app"
|
||||
RUN apt-get update -y \
|
||||
&& apt-get upgrade -y \
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
|
||||
|
||||
@@ -9,7 +9,9 @@ from app.core.context import MediaInfo, Context, TorrentInfo
|
||||
from app.core.metainfo import MetaInfo
|
||||
from app.core.security import verify_token
|
||||
from app.db.models.user import User
|
||||
from app.db.systemconfig_oper import SystemConfigOper
|
||||
from app.db.user_oper import get_current_active_user
|
||||
from app.schemas.types import SystemConfigKey
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
@@ -49,7 +51,7 @@ def download(
|
||||
torrent_info=torrentinfo
|
||||
)
|
||||
did = DownloadChain().download_single(context=context, username=current_user.name,
|
||||
downloader=downloader, save_path=save_path)
|
||||
downloader=downloader, save_path=save_path, source="Manual")
|
||||
if not did:
|
||||
return schemas.Response(success=False, message="任务添加失败")
|
||||
return schemas.Response(success=True, data={
|
||||
@@ -82,7 +84,7 @@ def add(
|
||||
torrent_info=torrentinfo
|
||||
)
|
||||
did = DownloadChain().download_single(context=context, username=current_user.name,
|
||||
downloader=downloader, save_path=save_path)
|
||||
downloader=downloader, save_path=save_path, source="Manual")
|
||||
if not did:
|
||||
return schemas.Response(success=False, message="任务添加失败")
|
||||
return schemas.Response(success=True, data={
|
||||
@@ -111,6 +113,17 @@ def stop(hashString: str,
|
||||
return schemas.Response(success=True if ret else False)
|
||||
|
||||
|
||||
@router.get("/clients", summary="查询可用下载器", response_model=List[dict])
|
||||
def clients(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
"""
|
||||
查询可用下载器
|
||||
"""
|
||||
downloaders: List[dict] = SystemConfigOper().get(SystemConfigKey.Downloaders)
|
||||
if downloaders:
|
||||
return [{"name": d.get("name"), "type": d.get("type")} for d in downloaders if d.get("enabled")]
|
||||
return []
|
||||
|
||||
|
||||
@router.delete("/{hashString}", summary="删除下载任务", response_model=schemas.Response)
|
||||
def delete(hashString: str,
|
||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
|
||||
@@ -111,14 +111,11 @@ def scrape(fileitem: schemas.FileItem,
|
||||
scrape_path = Path(fileitem.path)
|
||||
meta = MetaInfoPath(scrape_path)
|
||||
mediainfo = chain.recognize_by_meta(meta)
|
||||
if not media_info:
|
||||
if not mediainfo:
|
||||
return schemas.Response(success=False, message="刮削失败,无法识别媒体信息")
|
||||
if storage == "local":
|
||||
if not scrape_path.exists():
|
||||
return schemas.Response(success=False, message="刮削路径不存在")
|
||||
else:
|
||||
if not fileitem.fileid:
|
||||
return schemas.Response(success=False, message="刮削文件ID无效")
|
||||
# 手动刮削
|
||||
chain.scrape_metadata(fileitem=fileitem, meta=meta, mediainfo=mediainfo)
|
||||
return schemas.Response(success=True, message=f"{fileitem.path} 刮削完成")
|
||||
|
||||
@@ -12,8 +12,10 @@ from app.core.security import verify_token
|
||||
from app.db import get_db
|
||||
from app.db.mediaserver_oper import MediaServerOper
|
||||
from app.db.models import MediaServerItem
|
||||
from app.db.systemconfig_oper import SystemConfigOper
|
||||
from app.helper.mediaserver import MediaServerHelper
|
||||
from app.schemas import MediaType, NotExistMediaInfo
|
||||
from app.schemas.types import SystemConfigKey
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
@@ -143,3 +145,14 @@ def library(server: str, hidden: bool = False,
|
||||
获取媒体服务器媒体库列表
|
||||
"""
|
||||
return MediaServerChain().librarys(server=server, username=userinfo.username, hidden=hidden) or []
|
||||
|
||||
|
||||
@router.get("/clients", summary="查询可用媒体服务器", response_model=List[dict])
|
||||
def clients(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
"""
|
||||
查询可用媒体服务器
|
||||
"""
|
||||
mediaservers: List[dict] = SystemConfigOper().get(SystemConfigKey.MediaServers)
|
||||
if mediaservers:
|
||||
return [{"name": d.get("name"), "type": d.get("type")} for d in mediaservers if d.get("enabled")]
|
||||
return []
|
||||
|
||||
@@ -8,6 +8,7 @@ from app import schemas
|
||||
from app.chain.site import SiteChain
|
||||
from app.chain.torrents import TorrentsChain
|
||||
from app.core.event import EventManager
|
||||
from app.core.plugin import PluginManager
|
||||
from app.core.security import verify_token
|
||||
from app.db import get_db
|
||||
from app.db.models import User
|
||||
@@ -331,6 +332,31 @@ def read_rss_sites(db: Session = Depends(get_db),
|
||||
return rss_sites
|
||||
|
||||
|
||||
@router.get("/auth", summary="查询认证站点", response_model=dict)
|
||||
def read_auth_sites(_: schemas.TokenPayload = Depends(verify_token)) -> dict:
|
||||
"""
|
||||
获取可认证站点列表
|
||||
"""
|
||||
return SitesHelper().get_authsites()
|
||||
|
||||
|
||||
@router.post("/auth", summary="用户站点认证", response_model=schemas.Response)
|
||||
def auth_site(
|
||||
auth_info: schemas.SiteAuth,
|
||||
_: User = Depends(get_current_active_superuser)
|
||||
) -> Any:
|
||||
"""
|
||||
用户站点认证
|
||||
"""
|
||||
if not auth_info or not auth_info.site or not auth_info.params:
|
||||
return schemas.Response(success=False, message="请输入认证站点和认证参数")
|
||||
status, msg = SitesHelper().check_user(auth_info.site, auth_info.params)
|
||||
SystemConfigOper().set(SystemConfigKey.UserSiteAuthParams, auth_info.dict())
|
||||
PluginManager().init_config()
|
||||
Scheduler().init_plugin_jobs()
|
||||
return schemas.Response(success=status, message=msg)
|
||||
|
||||
|
||||
@router.get("/{site_id}", summary="站点详情", response_model=schemas.Site)
|
||||
def read_site(
|
||||
site_id: int,
|
||||
|
||||
@@ -151,8 +151,6 @@ def rename(fileitem: schemas.FileItem,
|
||||
"""
|
||||
if not new_name:
|
||||
return schemas.Response(success=False, message="新名称为空")
|
||||
if fileitem.storage != 'local' and not fileitem.fileid:
|
||||
return schemas.Response(success=False, message="资源ID获取失败")
|
||||
result = StorageChain().rename_file(fileitem, new_name)
|
||||
if result:
|
||||
if recursive:
|
||||
|
||||
@@ -124,6 +124,27 @@ def update_subscribe(
|
||||
return schemas.Response(success=True)
|
||||
|
||||
|
||||
@router.put("/status/{subid}", summary="更新订阅状态", response_model=schemas.Response)
|
||||
def update_subscribe_status(
|
||||
subid: int,
|
||||
state: str,
|
||||
db: Session = Depends(get_db),
|
||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
"""
|
||||
更新订阅状态
|
||||
"""
|
||||
subscribe = Subscribe.get(db, subid)
|
||||
if not subscribe:
|
||||
return schemas.Response(success=False, message="订阅不存在")
|
||||
valid_states = ["R", "P", "S"]
|
||||
if state not in valid_states:
|
||||
return schemas.Response(success=False, message="无效的订阅状态")
|
||||
subscribe.update(db, {
|
||||
"state": state
|
||||
})
|
||||
return schemas.Response(success=True)
|
||||
|
||||
|
||||
@router.get("/media/{mediaid}", summary="查询订阅", response_model=schemas.Subscribe)
|
||||
def subscribe_mediaid(
|
||||
mediaid: str,
|
||||
|
||||
@@ -16,6 +16,7 @@ from app import schemas
|
||||
from app.chain.search import SearchChain
|
||||
from app.chain.system import SystemChain
|
||||
from app.core.config import global_vars, settings
|
||||
from app.core.metainfo import MetaInfo
|
||||
from app.core.module import ModuleManager
|
||||
from app.core.security import verify_apitoken, verify_resource_token, verify_token
|
||||
from app.db.models import User
|
||||
@@ -159,7 +160,8 @@ def cache_img(
|
||||
本地缓存图片文件,支持 HTTP 缓存,如果启用全局图片缓存,则使用磁盘缓存
|
||||
"""
|
||||
# 如果没有启用全局图片缓存,则不使用磁盘缓存
|
||||
return fetch_image(url=url, proxy=False, use_disk_cache=settings.GLOBAL_IMAGE_CACHE, if_none_match=if_none_match)
|
||||
proxy = "doubanio.com" not in url
|
||||
return fetch_image(url=url, proxy=proxy, use_disk_cache=settings.GLOBAL_IMAGE_CACHE, if_none_match=if_none_match)
|
||||
|
||||
|
||||
@router.get("/global", summary="查询非敏感系统设置", response_model=schemas.Response)
|
||||
@@ -384,9 +386,14 @@ def ruletest(title: str,
|
||||
if not rulegroup:
|
||||
return schemas.Response(success=False, message=f"过滤规则组 {rulegroup_name} 不存在!")
|
||||
|
||||
# 根据标题查询媒体信息
|
||||
media_info = SearchChain().recognize_media(MetaInfo(title=title, subtitle=subtitle))
|
||||
if not media_info:
|
||||
return schemas.Response(success=False, message="未识别到媒体信息!")
|
||||
|
||||
# 过滤
|
||||
result = SearchChain().filter_torrents(rule_groups=[rulegroup.name],
|
||||
torrent_list=[torrent])
|
||||
torrent_list=[torrent], mediainfo=media_info)
|
||||
if not result:
|
||||
return schemas.Response(success=False, message="不符合过滤规则!")
|
||||
return schemas.Response(success=True, data={
|
||||
|
||||
@@ -20,22 +20,42 @@ router = APIRouter()
|
||||
|
||||
|
||||
class ManualTransferItem(BaseModel):
|
||||
fileitem: FileItem = None,
|
||||
logid: Optional[int] = None,
|
||||
target_storage: Optional[str] = None,
|
||||
target_path: Optional[str] = None,
|
||||
tmdbid: Optional[int] = None,
|
||||
doubanid: Optional[str] = None,
|
||||
type_name: Optional[str] = None,
|
||||
season: Optional[int] = None,
|
||||
transfer_type: Optional[str] = None,
|
||||
episode_format: Optional[str] = None,
|
||||
episode_detail: Optional[str] = None,
|
||||
episode_part: Optional[str] = None,
|
||||
episode_offset: Optional[int] = 0,
|
||||
min_filesize: Optional[int] = 0,
|
||||
scrape: bool = False,
|
||||
from_history: bool = False
|
||||
# 文件项
|
||||
fileitem: FileItem = None
|
||||
# 日志ID
|
||||
logid: Optional[int] = None
|
||||
# 目标存储
|
||||
target_storage: Optional[str] = None
|
||||
# 目标路径
|
||||
target_path: Optional[str] = None
|
||||
# TMDB ID
|
||||
tmdbid: Optional[int] = None
|
||||
# 豆瓣ID
|
||||
doubanid: Optional[str] = None
|
||||
# 类型
|
||||
type_name: Optional[str] = None
|
||||
# 季号
|
||||
season: Optional[int] = None
|
||||
# 整理方式
|
||||
transfer_type: Optional[str] = None
|
||||
# 自定义格式
|
||||
episode_format: Optional[str] = None
|
||||
# 指定集数
|
||||
episode_detail: Optional[str] = None
|
||||
# 指定PART
|
||||
episode_part: Optional[str] = None
|
||||
# 集数偏移
|
||||
episode_offset: Optional[str] = None
|
||||
# 最小文件大小
|
||||
min_filesize: Optional[int] = 0
|
||||
# 刮削
|
||||
scrape: bool = False
|
||||
# 媒体库类型子目录
|
||||
library_type_folder: Optional[bool] = None
|
||||
# 媒体库类别子目录
|
||||
library_category_folder: Optional[bool] = None
|
||||
# 复用历史识别信息
|
||||
from_history: Optional[bool] = False
|
||||
|
||||
|
||||
@router.get("/name", summary="查询整理后的名称", response_model=schemas.Response)
|
||||
@@ -148,6 +168,8 @@ def manual_transfer(transer_item: ManualTransferItem,
|
||||
epformat=epformat,
|
||||
min_filesize=transer_item.min_filesize,
|
||||
scrape=transer_item.scrape,
|
||||
library_type_folder=transer_item.library_type_folder,
|
||||
library_category_folder=transer_item.library_category_folder,
|
||||
force=force
|
||||
)
|
||||
# 失败
|
||||
|
||||
@@ -385,6 +385,7 @@ class ChainBase(metaclass=ABCMeta):
|
||||
target_directory: TransferDirectoryConf = None,
|
||||
target_storage: str = None, target_path: Path = None,
|
||||
transfer_type: str = None, scrape: bool = None,
|
||||
library_type_folder: bool = None, library_category_folder: bool = None,
|
||||
episodes_info: List[TmdbEpisode] = None) -> Optional[TransferInfo]:
|
||||
"""
|
||||
文件转移
|
||||
@@ -396,6 +397,8 @@ class ChainBase(metaclass=ABCMeta):
|
||||
:param target_path: 目标路径
|
||||
:param transfer_type: 转移模式
|
||||
:param scrape: 是否刮削元数据
|
||||
:param library_type_folder: 是否按类型创建目录
|
||||
:param library_category_folder: 是否按类别创建目录
|
||||
:param episodes_info: 当前季的全部集信息
|
||||
:return: {path, target_path, message}
|
||||
"""
|
||||
@@ -404,6 +407,8 @@ class ChainBase(metaclass=ABCMeta):
|
||||
target_directory=target_directory,
|
||||
target_path=target_path, target_storage=target_storage,
|
||||
transfer_type=transfer_type, scrape=scrape,
|
||||
library_type_folder=library_type_folder,
|
||||
library_category_folder=library_category_folder,
|
||||
episodes_info=episodes_info)
|
||||
|
||||
def transfer_completed(self, hashs: str, downloader: str = None) -> None:
|
||||
|
||||
@@ -54,6 +54,11 @@ class CommandChain(ChainBase, metaclass=Singleton):
|
||||
"description": "更新站点Cookie",
|
||||
"data": {}
|
||||
},
|
||||
"/site_statistic": {
|
||||
"func": SiteChain().remote_refresh_userdatas,
|
||||
"description": "站点数据统计",
|
||||
"data": {}
|
||||
},
|
||||
"/site_enable": {
|
||||
"func": SiteChain().remote_enable,
|
||||
"description": "启用站点",
|
||||
@@ -402,7 +407,7 @@ class CommandChain(ChainBase, metaclass=Singleton):
|
||||
channel=event_channel, source=event_source, userid=event_user)
|
||||
|
||||
@eventmanager.register(EventType.ModuleReload)
|
||||
def module_reload_event(self, event: ManagerEvent) -> None:
|
||||
def module_reload_event(self, _: ManagerEvent) -> None:
|
||||
"""
|
||||
注册模块重载事件
|
||||
"""
|
||||
|
||||
@@ -20,7 +20,8 @@ from app.helper.message import MessageHelper
|
||||
from app.helper.torrent import TorrentHelper
|
||||
from app.log import logger
|
||||
from app.schemas import ExistMediaInfo, NotExistMediaInfo, DownloadingTorrent, Notification
|
||||
from app.schemas.types import MediaType, TorrentStatus, EventType, MessageChannel, NotificationType
|
||||
from app.schemas.event import ResourceSelectionEventData, ResourceDownloadEventData
|
||||
from app.schemas.types import MediaType, TorrentStatus, EventType, MessageChannel, NotificationType, ChainEventType
|
||||
from app.utils.http import RequestUtils
|
||||
from app.utils.string import StringUtils
|
||||
|
||||
@@ -180,7 +181,7 @@ class DownloadChain(ChainBase):
|
||||
torrent_file, content, download_folder, files, error_msg = self.torrent.download_torrent(
|
||||
url=torrent_url,
|
||||
cookie=site_cookie,
|
||||
ua=torrent.site_ua,
|
||||
ua=torrent.site_ua or settings.USER_AGENT,
|
||||
proxy=torrent.site_proxy)
|
||||
|
||||
if isinstance(content, str):
|
||||
@@ -191,7 +192,7 @@ class DownloadChain(ChainBase):
|
||||
logger.error(f"下载种子文件失败:{torrent.title} - {torrent_url}")
|
||||
self.post_message(Notification(
|
||||
channel=channel,
|
||||
source=source,
|
||||
source=source if channel else None,
|
||||
mtype=NotificationType.Manual,
|
||||
title=f"{torrent.title} 种子下载失败!",
|
||||
text=f"错误信息:{error_msg}\n站点:{torrent.site_name}",
|
||||
@@ -203,11 +204,12 @@ class DownloadChain(ChainBase):
|
||||
|
||||
def download_single(self, context: Context, torrent_file: Path = None,
|
||||
episodes: Set[int] = None,
|
||||
channel: MessageChannel = None, source: str = None,
|
||||
channel: MessageChannel = None,
|
||||
source: str = None,
|
||||
downloader: str = None,
|
||||
save_path: str = None,
|
||||
userid: Union[str, int] = None,
|
||||
username: str = None,
|
||||
downloader: str = None,
|
||||
media_category: str = None) -> Optional[str]:
|
||||
"""
|
||||
下载及发送通知
|
||||
@@ -215,16 +217,42 @@ class DownloadChain(ChainBase):
|
||||
:param torrent_file: 种子文件路径
|
||||
:param episodes: 需要下载的集数
|
||||
:param channel: 通知渠道
|
||||
:param source: 通知来源
|
||||
:param source: 来源(消息通知、Subscribe、Manual等)
|
||||
:param downloader: 下载器
|
||||
:param save_path: 保存路径
|
||||
:param userid: 用户ID
|
||||
:param username: 调用下载的用户名/插件名
|
||||
:param downloader: 下载器
|
||||
:param media_category: 自定义媒体类别
|
||||
"""
|
||||
# 发送资源下载事件,允许外部拦截下载
|
||||
event_data = ResourceDownloadEventData(
|
||||
context=context,
|
||||
episodes=episodes,
|
||||
channel=channel,
|
||||
origin=source,
|
||||
downloader=downloader,
|
||||
options={
|
||||
"save_path": save_path,
|
||||
"userid": userid,
|
||||
"username": username,
|
||||
"media_category": media_category
|
||||
}
|
||||
)
|
||||
# 触发资源下载事件
|
||||
event = eventmanager.send_event(ChainEventType.ResourceDownload, event_data)
|
||||
if event and event.event_data:
|
||||
event_data: ResourceDownloadEventData = event.event_data
|
||||
# 如果事件被取消,跳过资源下载
|
||||
if event_data.cancel:
|
||||
logger.debug(
|
||||
f"Resource download canceled by event: {event_data.source},"
|
||||
f"Reason: {event_data.reason}")
|
||||
return None
|
||||
|
||||
_torrent = context.torrent_info
|
||||
_media = context.media_info
|
||||
_meta = context.meta_info
|
||||
_site_downloader = _torrent.site_downloader
|
||||
|
||||
# 补充完整的media数据
|
||||
if not _media.genre_ids:
|
||||
@@ -251,35 +279,31 @@ class DownloadChain(ChainBase):
|
||||
|
||||
# 下载目录
|
||||
if save_path:
|
||||
# 有自定义下载目录时,尝试匹配目录配置
|
||||
dir_info = self.directoryhelper.get_dir(_media, src_path=Path(save_path), local=True)
|
||||
else:
|
||||
# 根据媒体信息查询下载目录配置
|
||||
dir_info = self.directoryhelper.get_dir(_media, local=True)
|
||||
|
||||
# 拼装子目录
|
||||
if dir_info:
|
||||
# 一级目录
|
||||
if not dir_info.media_type and dir_info.download_type_folder:
|
||||
# 一级自动分类
|
||||
download_dir = Path(dir_info.download_path) / _media.type.value
|
||||
else:
|
||||
# 一级不分类
|
||||
download_dir = Path(dir_info.download_path)
|
||||
|
||||
# 二级目录
|
||||
if not dir_info.media_category and dir_info.download_category_folder and _media and _media.category:
|
||||
# 二级自动分类
|
||||
download_dir = download_dir / _media.category
|
||||
elif save_path:
|
||||
# 自定义下载目录
|
||||
# 下载目录使用自定义的
|
||||
download_dir = Path(save_path)
|
||||
else:
|
||||
# 未找到下载目录,且没有自定义下载目录
|
||||
logger.error(f"未找到下载目录:{_media.type.value} {_media.title_year}")
|
||||
self.messagehelper.put(f"{_media.type.value} {_media.title_year} 未找到下载目录!",
|
||||
title="下载失败", role="system")
|
||||
return None
|
||||
# 根据媒体信息查询下载目录配置
|
||||
dir_info = self.directoryhelper.get_dir(_media, storage="local", include_unsorted=True)
|
||||
# 拼装子目录
|
||||
if dir_info:
|
||||
# 一级目录
|
||||
if not dir_info.media_type and dir_info.download_type_folder:
|
||||
# 一级自动分类
|
||||
download_dir = Path(dir_info.download_path) / _media.type.value
|
||||
else:
|
||||
# 一级不分类
|
||||
download_dir = Path(dir_info.download_path)
|
||||
|
||||
# 二级目录
|
||||
if not dir_info.media_category and dir_info.download_category_folder and _media and _media.category:
|
||||
# 二级自动分类
|
||||
download_dir = download_dir / _media.category
|
||||
else:
|
||||
# 未找到下载目录,且没有自定义下载目录
|
||||
logger.error(f"未找到下载目录:{_media.type.value} {_media.title_year}")
|
||||
self.messagehelper.put(f"{_media.type.value} {_media.title_year} 未找到下载目录!",
|
||||
title="下载失败", role="system")
|
||||
return None
|
||||
|
||||
# 添加下载
|
||||
result: Optional[tuple] = self.download(content=content,
|
||||
@@ -287,7 +311,7 @@ class DownloadChain(ChainBase):
|
||||
episodes=episodes,
|
||||
download_dir=download_dir,
|
||||
category=_media.category,
|
||||
downloader=downloader)
|
||||
downloader=downloader or _site_downloader)
|
||||
if result:
|
||||
_downloader, _hash, error_msg = result
|
||||
else:
|
||||
@@ -335,7 +359,7 @@ class DownloadChain(ChainBase):
|
||||
continue
|
||||
# 只处理视频格式
|
||||
if not Path(file).suffix \
|
||||
or Path(file).suffix not in settings.RMT_MEDIAEXT:
|
||||
or Path(file).suffix.lower() not in settings.RMT_MEDIAEXT:
|
||||
continue
|
||||
files_to_add.append({
|
||||
"download_hash": _hash,
|
||||
@@ -358,7 +382,8 @@ class DownloadChain(ChainBase):
|
||||
"hash": _hash,
|
||||
"context": context,
|
||||
"username": username,
|
||||
"downloader": _downloader
|
||||
"downloader": _downloader,
|
||||
"episodes": episodes
|
||||
})
|
||||
else:
|
||||
# 下载失败
|
||||
@@ -367,7 +392,7 @@ class DownloadChain(ChainBase):
|
||||
# 只发送给对应渠道和用户
|
||||
self.post_message(Notification(
|
||||
channel=channel,
|
||||
source=source,
|
||||
source=source if channel else None,
|
||||
mtype=NotificationType.Manual,
|
||||
title="添加下载任务失败:%s %s"
|
||||
% (_media.title_year, _meta.season_episode),
|
||||
@@ -386,7 +411,8 @@ class DownloadChain(ChainBase):
|
||||
source: str = None,
|
||||
userid: str = None,
|
||||
username: str = None,
|
||||
media_category: str = None
|
||||
media_category: str = None,
|
||||
downloader: str = None
|
||||
) -> Tuple[List[Context], Dict[Union[int, str], Dict[int, NotExistMediaInfo]]]:
|
||||
"""
|
||||
根据缺失数据,自动种子列表中组合择优下载
|
||||
@@ -394,10 +420,11 @@ class DownloadChain(ChainBase):
|
||||
:param no_exists: 缺失的剧集信息
|
||||
:param save_path: 保存路径
|
||||
:param channel: 通知渠道
|
||||
:param source: 通知来源
|
||||
:param source: 来源(消息通知、订阅、手工下载等)
|
||||
:param userid: 用户ID
|
||||
:param username: 调用下载的用户名/插件名
|
||||
:param media_category: 自定义媒体类别
|
||||
:param downloader: 下载器
|
||||
:return: 已经下载的资源列表、剩余未下载到的剧集 no_exists[tmdb_id/douban_id] = {season: NotExistMediaInfo}
|
||||
"""
|
||||
# 已下载的项目
|
||||
@@ -458,6 +485,21 @@ class DownloadChain(ChainBase):
|
||||
return 9999
|
||||
return no_exist[season].total_episode
|
||||
|
||||
# 发送资源选择事件,允许外部修改上下文数据
|
||||
logger.debug(f"Initial contexts: {len(contexts)} items, Downloader: {downloader}")
|
||||
event_data = ResourceSelectionEventData(
|
||||
contexts=contexts,
|
||||
downloader=downloader
|
||||
)
|
||||
event = eventmanager.send_event(ChainEventType.ResourceSelection, event_data)
|
||||
# 如果事件修改了上下文数据,使用更新后的数据
|
||||
if event and event.event_data:
|
||||
event_data: ResourceSelectionEventData = event.event_data
|
||||
if event_data.updated and event_data.updated_contexts is not None:
|
||||
logger.debug(f"Contexts updated by event: "
|
||||
f"{len(event_data.updated_contexts)} items (source: {event_data.source})")
|
||||
contexts = event_data.updated_contexts
|
||||
|
||||
# 分组排序
|
||||
contexts = TorrentHelper().sort_group_torrents(contexts)
|
||||
|
||||
@@ -469,7 +511,7 @@ class DownloadChain(ChainBase):
|
||||
logger.info(f"开始下载电影 {context.torrent_info.title} ...")
|
||||
if self.download_single(context, save_path=save_path, channel=channel,
|
||||
source=source, userid=userid, username=username,
|
||||
media_category=media_category):
|
||||
media_category=media_category, downloader=downloader):
|
||||
# 下载成功
|
||||
logger.info(f"{context.torrent_info.title} 添加下载成功")
|
||||
downloaded_list.append(context)
|
||||
@@ -554,7 +596,8 @@ class DownloadChain(ChainBase):
|
||||
source=source,
|
||||
userid=userid,
|
||||
username=username,
|
||||
media_category=media_category
|
||||
media_category=media_category,
|
||||
downloader=downloader,
|
||||
)
|
||||
else:
|
||||
# 下载
|
||||
@@ -562,7 +605,8 @@ class DownloadChain(ChainBase):
|
||||
download_id = self.download_single(context, save_path=save_path,
|
||||
channel=channel, source=source,
|
||||
userid=userid, username=username,
|
||||
media_category=media_category)
|
||||
media_category=media_category,
|
||||
downloader=downloader)
|
||||
|
||||
if download_id:
|
||||
# 下载成功
|
||||
@@ -633,7 +677,8 @@ class DownloadChain(ChainBase):
|
||||
download_id = self.download_single(context, save_path=save_path,
|
||||
channel=channel, source=source,
|
||||
userid=userid, username=username,
|
||||
media_category=media_category)
|
||||
media_category=media_category,
|
||||
downloader=downloader)
|
||||
if download_id:
|
||||
# 下载成功
|
||||
logger.info(f"{meta.title} 添加下载成功")
|
||||
@@ -722,7 +767,8 @@ class DownloadChain(ChainBase):
|
||||
source=source,
|
||||
userid=userid,
|
||||
username=username,
|
||||
media_category=media_category
|
||||
media_category=media_category,
|
||||
downloader=downloader
|
||||
)
|
||||
if not download_id:
|
||||
continue
|
||||
|
||||
@@ -11,12 +11,15 @@ from app.core.event import eventmanager, Event
|
||||
from app.core.meta import MetaBase
|
||||
from app.core.metainfo import MetaInfo, MetaInfoPath
|
||||
from app.log import logger
|
||||
from app.schemas import FileItem
|
||||
from app.schemas.types import EventType, MediaType, ChainEventType
|
||||
from app.utils.http import RequestUtils
|
||||
from app.utils.singleton import Singleton
|
||||
from app.utils.string import StringUtils
|
||||
|
||||
recognize_lock = Lock()
|
||||
scraping_lock = Lock()
|
||||
scraping_files = []
|
||||
|
||||
|
||||
class MediaChain(ChainBase, metaclass=Singleton):
|
||||
@@ -301,12 +304,23 @@ class MediaChain(ChainBase, metaclass=Singleton):
|
||||
if not event:
|
||||
return
|
||||
event_data = event.event_data or {}
|
||||
fileitem = event_data.get("fileitem")
|
||||
meta = event_data.get("meta")
|
||||
mediainfo = event_data.get("mediainfo")
|
||||
fileitem: FileItem = event_data.get("fileitem")
|
||||
meta: MetaBase = event_data.get("meta")
|
||||
mediainfo: MediaInfo = event_data.get("mediainfo")
|
||||
if not fileitem:
|
||||
return
|
||||
self.scrape_metadata(fileitem=fileitem, meta=meta, mediainfo=mediainfo)
|
||||
# 刮削锁
|
||||
with scraping_lock:
|
||||
if fileitem.path in scraping_files:
|
||||
return
|
||||
scraping_files.append(fileitem.path)
|
||||
try:
|
||||
# 执行刮削
|
||||
self.scrape_metadata(fileitem=fileitem, meta=meta, mediainfo=mediainfo)
|
||||
finally:
|
||||
# 释放锁
|
||||
with scraping_lock:
|
||||
scraping_files.remove(fileitem.path)
|
||||
|
||||
def scrape_metadata(self, fileitem: schemas.FileItem,
|
||||
meta: MetaBase = None, mediainfo: MediaInfo = None,
|
||||
@@ -322,6 +336,20 @@ class MediaChain(ChainBase, metaclass=Singleton):
|
||||
:param overwrite: 是否覆盖已有文件
|
||||
"""
|
||||
|
||||
def is_bluray_folder(_fileitem: schemas.FileItem) -> bool:
|
||||
"""
|
||||
判断是否为原盘目录
|
||||
"""
|
||||
if not _fileitem or _fileitem.type != "dir":
|
||||
return False
|
||||
# 蓝光原盘目录必备的文件或文件夹
|
||||
required_files = ['BDMV', 'CERTIFICATE']
|
||||
# 检查目录下是否存在所需文件或文件夹
|
||||
for item in self.storagechain.list_files(_fileitem):
|
||||
if item.name in required_files:
|
||||
return True
|
||||
return False
|
||||
|
||||
def __list_files(_fileitem: schemas.FileItem):
|
||||
"""
|
||||
列出下级文件
|
||||
@@ -337,13 +365,19 @@ class MediaChain(ChainBase, metaclass=Singleton):
|
||||
"""
|
||||
if not _fileitem or not _content or not _path:
|
||||
return
|
||||
# 保存文件到临时目录
|
||||
tmp_file = settings.TEMP_PATH / _path.name
|
||||
tmp_file.write_bytes(_content)
|
||||
logger.info(f"保存文件:【{_fileitem.storage}】{_path}")
|
||||
_fileitem.path = str(_path.parent)
|
||||
self.storagechain.upload_file(fileitem=_fileitem, path=tmp_file)
|
||||
if tmp_file.exists():
|
||||
tmp_file.unlink()
|
||||
# 获取文件的父目录
|
||||
try:
|
||||
item = self.storagechain.upload_file(fileitem=_fileitem, path=tmp_file, new_name=_path.name)
|
||||
if item:
|
||||
logger.info(f"已保存文件:{item.path}")
|
||||
else:
|
||||
logger.warn(f"文件保存失败:{item.path}")
|
||||
finally:
|
||||
if tmp_file.exists():
|
||||
tmp_file.unlink()
|
||||
|
||||
def __download_image(_url: str) -> Optional[bytes]:
|
||||
"""
|
||||
@@ -376,25 +410,40 @@ class MediaChain(ChainBase, metaclass=Singleton):
|
||||
if mediainfo.type == MediaType.MOVIE:
|
||||
# 电影
|
||||
if fileitem.type == "file":
|
||||
# 是否已存在
|
||||
nfo_path = filepath.with_suffix(".nfo")
|
||||
if not overwrite and self.storagechain.get_file_item(storage=fileitem.storage, path=nfo_path):
|
||||
logger.info(f"已存在nfo文件:{nfo_path}")
|
||||
return
|
||||
# 电影文件
|
||||
logger.info(f"正在生成电影nfo:{mediainfo.title_year} - {filepath.name}")
|
||||
movie_nfo = self.metadata_nfo(meta=meta, mediainfo=mediainfo)
|
||||
if not movie_nfo:
|
||||
logger.warn(f"{filepath.name} nfo文件生成失败!")
|
||||
return
|
||||
# 保存或上传nfo文件到上级目录
|
||||
nfo_path = filepath.with_suffix(".nfo")
|
||||
if not overwrite and self.storagechain.get_file_item(storage=fileitem.storage, path=nfo_path):
|
||||
logger.info(f"已存在nfo文件:{nfo_path}")
|
||||
return
|
||||
__save_file(_fileitem=parent, _path=nfo_path, _content=movie_nfo)
|
||||
else:
|
||||
# 电影目录
|
||||
files = __list_files(_fileitem=fileitem)
|
||||
for file in files:
|
||||
self.scrape_metadata(fileitem=file,
|
||||
meta=meta, mediainfo=mediainfo,
|
||||
init_folder=False, parent=fileitem)
|
||||
if is_bluray_folder(fileitem):
|
||||
# 原盘目录
|
||||
nfo_path = filepath / (filepath.name + ".nfo")
|
||||
if not overwrite and self.storagechain.get_file_item(storage=fileitem.storage, path=nfo_path):
|
||||
logger.info(f"已存在nfo文件:{nfo_path}")
|
||||
return
|
||||
# 生成原盘nfo
|
||||
movie_nfo = self.metadata_nfo(meta=meta, mediainfo=mediainfo)
|
||||
if not movie_nfo:
|
||||
logger.warn(f"{filepath.name} nfo文件生成失败!")
|
||||
return
|
||||
# 保存或上传nfo文件到当前目录
|
||||
__save_file(_fileitem=fileitem, _path=nfo_path, _content=movie_nfo)
|
||||
else:
|
||||
# 处理目录内的文件
|
||||
files = __list_files(_fileitem=fileitem)
|
||||
for file in files:
|
||||
self.scrape_metadata(fileitem=file,
|
||||
meta=meta, mediainfo=mediainfo,
|
||||
init_folder=False, parent=fileitem)
|
||||
# 生成目录内图片文件
|
||||
if init_folder:
|
||||
# 图片
|
||||
@@ -418,12 +467,17 @@ class MediaChain(ChainBase, metaclass=Singleton):
|
||||
else:
|
||||
# 电视剧
|
||||
if fileitem.type == "file":
|
||||
# 当前为集文件,重新识别季集
|
||||
# 是否已存在
|
||||
nfo_path = filepath.with_suffix(".nfo")
|
||||
if not overwrite and self.storagechain.get_file_item(storage=fileitem.storage, path=nfo_path):
|
||||
logger.info(f"已存在nfo文件:{nfo_path}")
|
||||
return
|
||||
# 重新识别季集
|
||||
file_meta = MetaInfoPath(filepath)
|
||||
if not file_meta.begin_episode:
|
||||
logger.warn(f"{filepath.name} 无法识别文件集数!")
|
||||
return
|
||||
file_mediainfo = self.recognize_media(meta=file_meta)
|
||||
file_mediainfo = self.recognize_media(meta=file_meta, tmdbid=mediainfo.tmdb_id)
|
||||
if not file_mediainfo:
|
||||
logger.warn(f"{filepath.name} 无法识别文件媒体信息!")
|
||||
return
|
||||
@@ -434,10 +488,8 @@ class MediaChain(ChainBase, metaclass=Singleton):
|
||||
logger.warn(f"{filepath.name} nfo生成失败!")
|
||||
return
|
||||
# 保存或上传nfo文件到上级目录
|
||||
nfo_path = filepath.with_suffix(".nfo")
|
||||
if not overwrite and self.storagechain.get_file_item(storage=fileitem.storage, path=nfo_path):
|
||||
logger.info(f"已存在nfo文件:{nfo_path}")
|
||||
return
|
||||
if not parent:
|
||||
parent = self.storagechain.get_parent_item(fileitem)
|
||||
__save_file(_fileitem=parent, _path=nfo_path, _content=episode_nfo)
|
||||
# 获取集的图片
|
||||
image_dict = self.metadata_img(mediainfo=file_mediainfo,
|
||||
@@ -452,6 +504,8 @@ class MediaChain(ChainBase, metaclass=Singleton):
|
||||
content = __download_image(image_url)
|
||||
# 保存图片文件到当前目录
|
||||
if content:
|
||||
if not parent:
|
||||
parent = self.storagechain.get_parent_item(fileitem)
|
||||
__save_file(_fileitem=parent, _path=image_path, _content=content)
|
||||
|
||||
else:
|
||||
@@ -466,17 +520,21 @@ class MediaChain(ChainBase, metaclass=Singleton):
|
||||
if init_folder:
|
||||
# 识别文件夹名称
|
||||
season_meta = MetaInfo(filepath.name)
|
||||
if season_meta.begin_season:
|
||||
# 当前文件夹为Specials或者SPs时,设置为S0
|
||||
if filepath.name in settings.RENAME_FORMAT_S0_NAMES:
|
||||
season_meta.begin_season = 0
|
||||
if season_meta.begin_season is not None:
|
||||
# 是否已存在
|
||||
nfo_path = filepath / "season.nfo"
|
||||
if not overwrite and self.storagechain.get_file_item(storage=fileitem.storage, path=nfo_path):
|
||||
logger.info(f"已存在nfo文件:{nfo_path}")
|
||||
return
|
||||
# 当前目录有季号,生成季nfo
|
||||
season_nfo = self.metadata_nfo(meta=meta, mediainfo=mediainfo, season=season_meta.begin_season)
|
||||
if not season_nfo:
|
||||
logger.warn(f"无法生成电视剧季nfo文件:{meta.name}")
|
||||
return
|
||||
# 写入nfo到根目录
|
||||
nfo_path = filepath / "season.nfo"
|
||||
if not overwrite and self.storagechain.get_file_item(storage=fileitem.storage, path=nfo_path):
|
||||
logger.info(f"已存在nfo文件:{nfo_path}")
|
||||
return
|
||||
__save_file(_fileitem=fileitem, _path=nfo_path, _content=season_nfo)
|
||||
# TMDB季poster图片
|
||||
image_dict = self.metadata_img(mediainfo=mediainfo, season=season_meta.begin_season)
|
||||
@@ -489,26 +547,55 @@ class MediaChain(ChainBase, metaclass=Singleton):
|
||||
continue
|
||||
# 下载图片
|
||||
content = __download_image(image_url)
|
||||
# 保存图片文件到当前目录
|
||||
# 保存图片文件到剧集目录
|
||||
if content:
|
||||
__save_file(_fileitem=fileitem, _path=image_path, _content=content)
|
||||
if not parent:
|
||||
parent = self.storagechain.get_parent_item(fileitem)
|
||||
__save_file(_fileitem=parent, _path=image_path, _content=content)
|
||||
# 额外fanart季图片:poster thumb banner
|
||||
image_dict = self.metadata_img(mediainfo=mediainfo)
|
||||
if image_dict:
|
||||
for image_name, image_url in image_dict.items():
|
||||
if image_name.startswith("season"):
|
||||
image_path = filepath.with_name(image_name)
|
||||
# 只下载当前刮削季的图片
|
||||
image_season = "00" if "specials" in image_name else image_name[6:8]
|
||||
if image_season != str(season_meta.begin_season).rjust(2, '0'):
|
||||
logger.info(f"当前刮削季为:{season_meta.begin_season},跳过文件:{image_path}")
|
||||
continue
|
||||
if not overwrite and self.storagechain.get_file_item(storage=fileitem.storage,
|
||||
path=image_path):
|
||||
logger.info(f"已存在图片文件:{image_path}")
|
||||
continue
|
||||
# 下载图片
|
||||
content = __download_image(image_url)
|
||||
# 保存图片文件到当前目录
|
||||
if content:
|
||||
if not parent:
|
||||
parent = self.storagechain.get_parent_item(fileitem)
|
||||
__save_file(_fileitem=parent, _path=image_path, _content=content)
|
||||
|
||||
# 判断当前目录是不是剧集根目录
|
||||
if season_meta.name:
|
||||
if not season_meta.season:
|
||||
# 是否已存在
|
||||
nfo_path = filepath / "tvshow.nfo"
|
||||
if not overwrite and self.storagechain.get_file_item(storage=fileitem.storage, path=nfo_path):
|
||||
logger.info(f"已存在nfo文件:{nfo_path}")
|
||||
return
|
||||
# 当前目录有名称,生成tvshow nfo 和 tv图片
|
||||
tv_nfo = self.metadata_nfo(meta=meta, mediainfo=mediainfo)
|
||||
if not tv_nfo:
|
||||
logger.warn(f"无法生成电视剧nfo文件:{meta.name}")
|
||||
return
|
||||
# 写入tvshow nfo到根目录
|
||||
nfo_path = filepath / "tvshow.nfo"
|
||||
if not overwrite and self.storagechain.get_file_item(storage=fileitem.storage, path=nfo_path):
|
||||
logger.info(f"已存在nfo文件:{nfo_path}")
|
||||
return
|
||||
__save_file(_fileitem=fileitem, _path=nfo_path, _content=tv_nfo)
|
||||
# 生成目录图片
|
||||
image_dict = self.metadata_img(mediainfo=mediainfo)
|
||||
if image_dict:
|
||||
for image_name, image_url in image_dict.items():
|
||||
# 不下载季图片
|
||||
if image_name.startswith("season"):
|
||||
continue
|
||||
image_path = filepath / image_name
|
||||
if not overwrite and self.storagechain.get_file_item(storage=fileitem.storage,
|
||||
path=image_path):
|
||||
|
||||
@@ -3,12 +3,12 @@ from typing import List, Union, Optional, Generator
|
||||
|
||||
from cachetools import cached, TTLCache
|
||||
|
||||
from app import schemas
|
||||
from app.chain import ChainBase
|
||||
from app.core.config import global_vars
|
||||
from app.db.mediaserver_oper import MediaServerOper
|
||||
from app.helper.service import ServiceConfigHelper
|
||||
from app.log import logger
|
||||
from app.schemas import MediaServerLibrary, MediaServerItem, MediaServerSeasonInfo, MediaServerPlayItem
|
||||
|
||||
lock = threading.Lock()
|
||||
|
||||
@@ -22,7 +22,7 @@ class MediaServerChain(ChainBase):
|
||||
super().__init__()
|
||||
self.dboper = MediaServerOper()
|
||||
|
||||
def librarys(self, server: str, username: str = None, hidden: bool = False) -> List[schemas.MediaServerLibrary]:
|
||||
def librarys(self, server: str, username: str = None, hidden: bool = False) -> List[MediaServerLibrary]:
|
||||
"""
|
||||
获取媒体服务器所有媒体库
|
||||
"""
|
||||
@@ -70,25 +70,25 @@ class MediaServerChain(ChainBase):
|
||||
yield from self.run_module("mediaserver_items", server=server, library_id=library_id,
|
||||
start_index=start_index, limit=limit)
|
||||
|
||||
def iteminfo(self, server: str, item_id: Union[str, int]) -> schemas.MediaServerItem:
|
||||
def iteminfo(self, server: str, item_id: Union[str, int]) -> MediaServerItem:
|
||||
"""
|
||||
获取媒体服务器项目信息
|
||||
"""
|
||||
return self.run_module("mediaserver_iteminfo", server=server, item_id=item_id)
|
||||
|
||||
def episodes(self, server: str, item_id: Union[str, int]) -> List[schemas.MediaServerSeasonInfo]:
|
||||
def episodes(self, server: str, item_id: Union[str, int]) -> List[MediaServerSeasonInfo]:
|
||||
"""
|
||||
获取媒体服务器剧集信息
|
||||
"""
|
||||
return self.run_module("mediaserver_tv_episodes", server=server, item_id=item_id)
|
||||
|
||||
def playing(self, server: str, count: int = 20, username: str = None) -> List[schemas.MediaServerPlayItem]:
|
||||
def playing(self, server: str, count: int = 20, username: str = None) -> List[MediaServerPlayItem]:
|
||||
"""
|
||||
获取媒体服务器正在播放信息
|
||||
"""
|
||||
return self.run_module("mediaserver_playing", count=count, server=server, username=username)
|
||||
|
||||
def latest(self, server: str, count: int = 20, username: str = None) -> List[schemas.MediaServerPlayItem]:
|
||||
def latest(self, server: str, count: int = 20, username: str = None) -> List[MediaServerPlayItem]:
|
||||
"""
|
||||
获取媒体服务器最新入库条目
|
||||
"""
|
||||
|
||||
@@ -111,6 +111,8 @@ class MessageChain(ChainBase):
|
||||
info = self.message_parser(source=source, body=body, form=form, args=args)
|
||||
if not info:
|
||||
return
|
||||
# 更新消息来源
|
||||
source = info.source
|
||||
# 渠道
|
||||
channel = info.channel
|
||||
# 用户ID
|
||||
|
||||
@@ -221,6 +221,12 @@ class SearchChain(ChainBase):
|
||||
key=ProgressKey.Search)
|
||||
if not torrent.title:
|
||||
continue
|
||||
|
||||
# 匹配订阅附加参数
|
||||
if filter_params and not self.torrenthelper.filter_torrent(torrent_info=torrent,
|
||||
filter_params=filter_params):
|
||||
continue
|
||||
|
||||
# 识别元数据
|
||||
torrent_meta = MetaInfo(title=torrent.title, subtitle=torrent.description,
|
||||
custom_words=custom_words)
|
||||
@@ -234,11 +240,6 @@ class SearchChain(ChainBase):
|
||||
_match_torrents.append((torrent, torrent_meta))
|
||||
continue
|
||||
|
||||
# 匹配订阅附加参数
|
||||
if filter_params and not self.torrenthelper.filter_torrent(torrent_info=torrent,
|
||||
filter_params=filter_params):
|
||||
continue
|
||||
|
||||
# 比对种子
|
||||
if self.torrenthelper.match_torrent(mediainfo=mediainfo,
|
||||
torrent_meta=torrent_meta,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import base64
|
||||
import re
|
||||
from datetime import datetime
|
||||
from typing import Optional, Tuple, Union
|
||||
from typing import Optional, Tuple, Union, Dict
|
||||
from urllib.parse import urljoin
|
||||
|
||||
from lxml import etree
|
||||
@@ -86,14 +86,22 @@ class SiteChain(ChainBase):
|
||||
f"{userdata.message_unread} 条新消息,请登陆查看",
|
||||
link=site.get("url")
|
||||
))
|
||||
# 低分享率警告
|
||||
if userdata.ratio and float(userdata.ratio) < 1:
|
||||
self.post_message(Notification(
|
||||
mtype=NotificationType.SiteMessage,
|
||||
title=f"【站点分享率低预警】",
|
||||
text=f"站点 {site.get('name')} 分享率 {userdata.ratio},请注意!"
|
||||
))
|
||||
return userdata
|
||||
|
||||
def refresh_userdatas(self) -> None:
|
||||
def refresh_userdatas(self) -> Dict[str, SiteUserData]:
|
||||
"""
|
||||
刷新所有站点的用户数据
|
||||
"""
|
||||
sites = self.siteshelper.get_indexers()
|
||||
any_site_updated = False
|
||||
result = {}
|
||||
for site in sites:
|
||||
if global_vars.is_system_stopped:
|
||||
return
|
||||
@@ -101,10 +109,12 @@ class SiteChain(ChainBase):
|
||||
userdata = self.refresh_userdata(site)
|
||||
if userdata:
|
||||
any_site_updated = True
|
||||
result[site.get("name")] = userdata
|
||||
if any_site_updated:
|
||||
EventManager().send_event(EventType.SiteRefreshed, {
|
||||
"site_id": "*"
|
||||
})
|
||||
return result
|
||||
|
||||
def is_special_site(self, domain: str) -> bool:
|
||||
"""
|
||||
@@ -705,3 +715,66 @@ class SiteChain(ChainBase):
|
||||
source=source,
|
||||
title=f"【{site_info.name}】 Cookie&UA更新成功",
|
||||
userid=userid))
|
||||
|
||||
def remote_refresh_userdatas(self, channel: MessageChannel,
|
||||
userid: Union[str, int] = None, source: str = None):
|
||||
"""
|
||||
刷新所有站点用户数据
|
||||
"""
|
||||
logger.info("收到命令,开始刷新站点数据 ...")
|
||||
self.post_message(Notification(
|
||||
channel=channel,
|
||||
source=source,
|
||||
title="开始刷新站点数据 ...",
|
||||
userid=userid
|
||||
))
|
||||
# 刷新站点数据
|
||||
site_datas = self.refresh_userdatas()
|
||||
if site_datas:
|
||||
# 发送消息
|
||||
messages = {}
|
||||
# 总上传
|
||||
incUploads = 0
|
||||
# 总下载
|
||||
incDownloads = 0
|
||||
# 今天日期
|
||||
today_date = datetime.now().strftime("%Y-%m-%d")
|
||||
|
||||
for rand, site in enumerate(site_datas.keys()):
|
||||
upload = int(site_datas[site].upload or 0)
|
||||
download = int(site_datas[site].download or 0)
|
||||
updated_date = site_datas[site].updated_day
|
||||
if updated_date and updated_date != today_date:
|
||||
updated_date = f"({updated_date})"
|
||||
else:
|
||||
updated_date = ""
|
||||
|
||||
if upload > 0 or download > 0:
|
||||
incUploads += upload
|
||||
incDownloads += download
|
||||
messages[upload + (rand / 1000)] = (
|
||||
f"【{site}】{updated_date}\n"
|
||||
+ f"上传量:{StringUtils.str_filesize(upload)}\n"
|
||||
+ f"下载量:{StringUtils.str_filesize(download)}\n"
|
||||
+ "————————————"
|
||||
)
|
||||
if incDownloads or incUploads:
|
||||
sorted_messages = [messages[key] for key in sorted(messages.keys(), reverse=True)]
|
||||
sorted_messages.insert(0, f"【汇总】\n"
|
||||
f"总上传:{StringUtils.str_filesize(incUploads)}\n"
|
||||
f"总下载:{StringUtils.str_filesize(incDownloads)}\n"
|
||||
f"————————————")
|
||||
self.post_message(Notification(
|
||||
channel=channel,
|
||||
source=source,
|
||||
title="【站点数据统计】",
|
||||
text="\n".join(sorted_messages),
|
||||
userid=userid
|
||||
))
|
||||
else:
|
||||
self.post_message(Notification(
|
||||
channel=channel,
|
||||
source=source,
|
||||
title="没有刷新到任何站点数据!",
|
||||
userid=userid
|
||||
))
|
||||
|
||||
@@ -4,6 +4,7 @@ from typing import Optional, Tuple, List, Dict
|
||||
from app import schemas
|
||||
from app.chain import ChainBase
|
||||
from app.core.config import settings
|
||||
from app.helper.directory import DirectoryHelper
|
||||
from app.log import logger
|
||||
from app.schemas import MediaType
|
||||
|
||||
@@ -13,6 +14,10 @@ class StorageChain(ChainBase):
|
||||
存储处理链
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.directoryhelper = DirectoryHelper()
|
||||
|
||||
def save_config(self, storage: str, conf: dict) -> None:
|
||||
"""
|
||||
保存存储配置
|
||||
@@ -57,13 +62,15 @@ class StorageChain(ChainBase):
|
||||
"""
|
||||
return self.run_module("download_file", fileitem=fileitem, path=path)
|
||||
|
||||
def upload_file(self, fileitem: schemas.FileItem, path: Path) -> Optional[bool]:
|
||||
def upload_file(self, fileitem: schemas.FileItem, path: Path,
|
||||
new_name: str = None) -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
上传文件
|
||||
:param fileitem: 保存目录项
|
||||
:param path: 本地文件路径
|
||||
:param new_name: 新文件名
|
||||
"""
|
||||
return self.run_module("upload_file", fileitem=fileitem, path=path)
|
||||
return self.run_module("upload_file", fileitem=fileitem, path=path, new_name=new_name)
|
||||
|
||||
def delete_file(self, fileitem: schemas.FileItem) -> Optional[bool]:
|
||||
"""
|
||||
@@ -107,24 +114,52 @@ class StorageChain(ChainBase):
|
||||
"""
|
||||
return self.run_module("support_transtype", storage=storage)
|
||||
|
||||
def delete_media_file(self, fileitem: schemas.FileItem, mtype: MediaType = None) -> bool:
|
||||
def delete_media_file(self, fileitem: schemas.FileItem,
|
||||
mtype: MediaType = None, delete_self: bool = True) -> bool:
|
||||
"""
|
||||
删除媒体文件,以及不含媒体文件的目录
|
||||
"""
|
||||
state = self.delete_file(fileitem)
|
||||
if not state:
|
||||
logger.warn(f"【{fileitem.storage}】{fileitem.path} 删除失败")
|
||||
media_exts = settings.RMT_MEDIAEXT + settings.DOWNLOAD_TMPEXT
|
||||
if fileitem.path == "/" or len(Path(fileitem.path).parts) <= 2:
|
||||
logger.warn(f"【{fileitem.storage}】{fileitem.path} 根目录或一级目录不允许删除")
|
||||
return False
|
||||
# 上级目录
|
||||
if mtype and mtype == MediaType.TV:
|
||||
dir_path = Path(fileitem.path).parent.parent
|
||||
dir_item = self.get_file_item(storage=fileitem.storage, path=dir_path)
|
||||
if fileitem.type == "dir":
|
||||
# 本身是目录
|
||||
if self.any_files(fileitem, extensions=media_exts) is False:
|
||||
logger.warn(f"【{fileitem.storage}】{fileitem.path} 不存在其它媒体文件,删除空目录")
|
||||
return self.delete_file(fileitem)
|
||||
return False
|
||||
elif delete_self:
|
||||
# 本身是文件
|
||||
logger.warn(f"正在删除【{fileitem.storage}】{fileitem.path}")
|
||||
if not self.delete_file(fileitem):
|
||||
logger.warn(f"【{fileitem.storage}】{fileitem.path} 删除失败")
|
||||
return False
|
||||
if mtype:
|
||||
# 重命名格式
|
||||
rename_format = settings.TV_RENAME_FORMAT \
|
||||
if mtype == MediaType.TV else settings.MOVIE_RENAME_FORMAT
|
||||
# 计算重命名中的文件夹层数
|
||||
rename_format_level = len(rename_format.split("/")) - 1
|
||||
if rename_format_level < 1:
|
||||
return True
|
||||
# 处理上级目录
|
||||
dir_item = self.get_file_item(storage=fileitem.storage,
|
||||
path=Path(fileitem.path).parents[rename_format_level - 1])
|
||||
else:
|
||||
dir_item = self.get_parent_item(fileitem)
|
||||
if dir_item:
|
||||
if dir_item and len(Path(dir_item.path).parts) > 2:
|
||||
# 如何目录是所有下载目录、媒体库目录的上级,则不处理
|
||||
for d in self.directoryhelper.get_dirs():
|
||||
if d.download_path and Path(d.download_path).is_relative_to(Path(dir_item.path)):
|
||||
logger.debug(f"【{dir_item.storage}】{dir_item.path} 是下载目录本级或上级目录,不删除")
|
||||
return True
|
||||
if d.library_path and Path(d.library_path).is_relative_to(Path(dir_item.path)):
|
||||
logger.debug(f"【{dir_item.storage}】{dir_item.path} 是媒体库目录本级或上级目录,不删除")
|
||||
return True
|
||||
# 不存在其他媒体文件,删除空目录
|
||||
if not self.any_files(dir_item, extensions=settings.RMT_MEDIAEXT):
|
||||
if self.any_files(dir_item, extensions=media_exts) is False:
|
||||
logger.warn(f"【{dir_item.storage}】{dir_item.path} 不存在其它媒体文件,删除空目录")
|
||||
return self.delete_file(dir_item)
|
||||
|
||||
# 存在媒体文件,返回文件删除状态
|
||||
return state
|
||||
return True
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import copy
|
||||
import random
|
||||
import time
|
||||
import threading
|
||||
from datetime import datetime
|
||||
from typing import Dict, List, Optional, Union, Tuple
|
||||
|
||||
@@ -28,15 +29,17 @@ from app.log import logger
|
||||
from app.schemas import NotExistMediaInfo, Notification, SubscrbieInfo, SubscribeEpisodeInfo, SubscribeDownloadFileInfo, \
|
||||
SubscribeLibraryFileInfo
|
||||
from app.schemas.types import MediaType, SystemConfigKey, MessageChannel, NotificationType, EventType
|
||||
from app.utils.singleton import Singleton
|
||||
|
||||
|
||||
class SubscribeChain(ChainBase):
|
||||
class SubscribeChain(ChainBase, metaclass=Singleton):
|
||||
"""
|
||||
订阅管理处理链
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self._rlock = threading.RLock()
|
||||
self.downloadchain = DownloadChain()
|
||||
self.downloadhis = DownloadHistoryOper()
|
||||
self.searchchain = SearchChain()
|
||||
@@ -159,6 +162,8 @@ class SubscribeChain(ChainBase):
|
||||
"search_imdbid") else kwargs.get("search_imdbid"),
|
||||
'sites': self.__get_default_subscribe_config(mediainfo.type, "sites") or None if not kwargs.get(
|
||||
"sites") else kwargs.get("sites"),
|
||||
'downloader': self.__get_default_subscribe_config(mediainfo.type, "downloader") if not kwargs.get(
|
||||
"downloader") else kwargs.get("downloader"),
|
||||
'save_path': self.__get_default_subscribe_config(mediainfo.type, "save_path") if not kwargs.get(
|
||||
"save_path") else kwargs.get("save_path")
|
||||
})
|
||||
@@ -232,181 +237,189 @@ class SubscribeChain(ChainBase):
|
||||
"""
|
||||
订阅搜索
|
||||
:param sid: 订阅ID,有值时只处理该订阅
|
||||
:param state: 订阅状态 N:未搜索 R:已搜索
|
||||
:param state: 订阅状态 N:新建, R:订阅中, P:待定, S:暂停
|
||||
:param manual: 是否手动搜索
|
||||
:return: 更新订阅状态为R或删除订阅
|
||||
"""
|
||||
if sid:
|
||||
subscribes = [self.subscribeoper.get(sid)]
|
||||
else:
|
||||
subscribes = self.subscribeoper.list(state)
|
||||
# 遍历订阅
|
||||
for subscribe in subscribes:
|
||||
if global_vars.is_system_stopped:
|
||||
break
|
||||
mediakey = subscribe.tmdbid or subscribe.doubanid
|
||||
custom_word_list = subscribe.custom_words.split("\n") if subscribe.custom_words else None
|
||||
# 校验当前时间减订阅创建时间是否大于1分钟,否则跳过先,留出编辑订阅的时间
|
||||
if subscribe.date:
|
||||
now = datetime.now()
|
||||
subscribe_time = datetime.strptime(subscribe.date, '%Y-%m-%d %H:%M:%S')
|
||||
if (now - subscribe_time).total_seconds() < 60:
|
||||
logger.debug(f"订阅标题:{subscribe.name} 新增小于1分钟,暂不搜索...")
|
||||
continue
|
||||
# 随机休眠1-5分钟
|
||||
if not sid and state == 'R':
|
||||
sleep_time = random.randint(60, 300)
|
||||
logger.info(f'订阅搜索随机休眠 {sleep_time} 秒 ...')
|
||||
time.sleep(sleep_time)
|
||||
logger.info(f'开始搜索订阅,标题:{subscribe.name} ...')
|
||||
# 如果状态为N则更新为R
|
||||
if subscribe.state == 'N':
|
||||
self.subscribeoper.update(subscribe.id, {'state': 'R'})
|
||||
# 生成元数据
|
||||
meta = MetaInfo(subscribe.name)
|
||||
meta.year = subscribe.year
|
||||
meta.begin_season = subscribe.season or None
|
||||
try:
|
||||
meta.type = MediaType(subscribe.type)
|
||||
except ValueError:
|
||||
logger.error(f'订阅 {subscribe.name} 类型错误:{subscribe.type}')
|
||||
continue
|
||||
# 识别媒体信息
|
||||
mediainfo: MediaInfo = self.recognize_media(meta=meta, mtype=meta.type,
|
||||
tmdbid=subscribe.tmdbid,
|
||||
doubanid=subscribe.doubanid,
|
||||
cache=False)
|
||||
if not mediainfo:
|
||||
logger.warn(
|
||||
f'未识别到媒体信息,标题:{subscribe.name},tmdbid:{subscribe.tmdbid},doubanid:{subscribe.doubanid}')
|
||||
continue
|
||||
|
||||
# 非洗版状态
|
||||
if not subscribe.best_version:
|
||||
# 每季总集数
|
||||
totals = {}
|
||||
if subscribe.season and subscribe.total_episode:
|
||||
totals = {
|
||||
subscribe.season: subscribe.total_episode
|
||||
}
|
||||
# 查询媒体库缺失的媒体信息
|
||||
exist_flag, no_exists = self.downloadchain.get_no_exists_info(
|
||||
meta=meta,
|
||||
mediainfo=mediainfo,
|
||||
totals=totals
|
||||
)
|
||||
else:
|
||||
# 洗版状态
|
||||
exist_flag = False
|
||||
if meta.type == MediaType.TV:
|
||||
no_exists = {
|
||||
mediakey: {
|
||||
subscribe.season: NotExistMediaInfo(
|
||||
season=subscribe.season,
|
||||
episodes=[],
|
||||
total_episode=subscribe.total_episode,
|
||||
start_episode=subscribe.start_episode or 1)
|
||||
}
|
||||
}
|
||||
else:
|
||||
no_exists = {}
|
||||
|
||||
# 已存在
|
||||
if exist_flag:
|
||||
logger.info(f'{mediainfo.title_year} 媒体库中已存在')
|
||||
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta, mediainfo=mediainfo, force=True)
|
||||
continue
|
||||
|
||||
# 电视剧订阅处理缺失集
|
||||
if meta.type == MediaType.TV:
|
||||
# 实际缺失集与订阅开始结束集范围进行整合,同时剔除已下载的集数
|
||||
no_exists = self.__get_subscribe_no_exits(
|
||||
subscribe_name=f'{subscribe.name} {meta.season}',
|
||||
no_exists=no_exists,
|
||||
mediakey=mediakey,
|
||||
begin_season=meta.begin_season,
|
||||
total_episode=subscribe.total_episode,
|
||||
start_episode=subscribe.start_episode,
|
||||
downloaded_episodes=self.__get_downloaded_episodes(subscribe)
|
||||
)
|
||||
|
||||
# 站点范围
|
||||
sites = self.get_sub_sites(subscribe)
|
||||
|
||||
# 优先级过滤规则
|
||||
if subscribe.best_version:
|
||||
rule_groups = subscribe.filter_groups \
|
||||
or self.systemconfig.get(SystemConfigKey.BestVersionFilterRuleGroups)
|
||||
else:
|
||||
rule_groups = subscribe.filter_groups \
|
||||
or self.systemconfig.get(SystemConfigKey.SubscribeFilterRuleGroups)
|
||||
|
||||
# 搜索,同时电视剧会过滤掉不需要的剧集
|
||||
contexts = self.searchchain.process(mediainfo=mediainfo,
|
||||
keyword=subscribe.keyword,
|
||||
no_exists=no_exists,
|
||||
sites=sites,
|
||||
rule_groups=rule_groups,
|
||||
area="imdbid" if subscribe.search_imdbid else "title",
|
||||
custom_words=custom_word_list,
|
||||
filter_params=self.get_params(subscribe))
|
||||
if not contexts:
|
||||
logger.warn(f'订阅 {subscribe.keyword or subscribe.name} 未搜索到资源')
|
||||
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta,
|
||||
mediainfo=mediainfo, lefts=no_exists)
|
||||
continue
|
||||
|
||||
# 过滤搜索结果
|
||||
matched_contexts = []
|
||||
for context in contexts:
|
||||
torrent_meta = context.meta_info
|
||||
torrent_info = context.torrent_info
|
||||
torrent_mediainfo = context.media_info
|
||||
|
||||
# 匹配订阅附加参数
|
||||
if not self.torrenthelper.filter_torrent(torrent_info=torrent_info,
|
||||
filter_params=self.get_params(subscribe)):
|
||||
continue
|
||||
# 洗版
|
||||
if subscribe.best_version:
|
||||
# 洗版时,非整季不要
|
||||
if torrent_mediainfo.type == MediaType.TV:
|
||||
if torrent_meta.episode_list:
|
||||
logger.info(f'{subscribe.name} 正在洗版,{torrent_info.title} 不是整季')
|
||||
continue
|
||||
# 洗版时,优先级小于等于已下载优先级的不要
|
||||
if subscribe.current_priority \
|
||||
and torrent_info.pri_order <= subscribe.current_priority:
|
||||
logger.info(f'{subscribe.name} 正在洗版,{torrent_info.title} 优先级低于或等于已下载优先级')
|
||||
continue
|
||||
matched_contexts.append(context)
|
||||
|
||||
if not matched_contexts:
|
||||
logger.warn(f'订阅 {subscribe.name} 没有符合过滤条件的资源')
|
||||
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta,
|
||||
mediainfo=mediainfo, lefts=no_exists)
|
||||
continue
|
||||
|
||||
# 自动下载
|
||||
downloads, lefts = self.downloadchain.batch_download(
|
||||
contexts=matched_contexts,
|
||||
no_exists=no_exists,
|
||||
userid=subscribe.username,
|
||||
username=subscribe.username,
|
||||
save_path=subscribe.save_path,
|
||||
media_category=subscribe.media_category
|
||||
)
|
||||
|
||||
# 判断是否应完成订阅
|
||||
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta, mediainfo=mediainfo,
|
||||
downloads=downloads, lefts=lefts)
|
||||
|
||||
# 手动触发时发送系统消息
|
||||
if manual:
|
||||
with self._rlock:
|
||||
logger.debug(f"search lock acquired at {datetime.now()}")
|
||||
if sid:
|
||||
self.message.put(f'{subscribes[0].name} 搜索完成!', title="订阅搜索", role="system")
|
||||
subscribe = self.subscribeoper.get(sid)
|
||||
subscribes = [subscribe] if subscribe else []
|
||||
else:
|
||||
self.message.put('所有订阅搜索完成!', title="订阅搜索", role="system")
|
||||
subscribes = self.subscribeoper.list(self.get_states_for_search(state))
|
||||
# 遍历订阅
|
||||
for subscribe in subscribes:
|
||||
if global_vars.is_system_stopped:
|
||||
break
|
||||
mediakey = subscribe.tmdbid or subscribe.doubanid
|
||||
custom_word_list = subscribe.custom_words.split("\n") if subscribe.custom_words else None
|
||||
# 校验当前时间减订阅创建时间是否大于1分钟,否则跳过先,留出编辑订阅的时间
|
||||
if subscribe.date:
|
||||
now = datetime.now()
|
||||
subscribe_time = datetime.strptime(subscribe.date, '%Y-%m-%d %H:%M:%S')
|
||||
if (now - subscribe_time).total_seconds() < 60:
|
||||
logger.debug(f"订阅标题:{subscribe.name} 新增小于1分钟,暂不搜索...")
|
||||
continue
|
||||
# 随机休眠1-5分钟
|
||||
if not sid and state in ['R', 'P']:
|
||||
sleep_time = random.randint(60, 300)
|
||||
logger.info(f'订阅搜索随机休眠 {sleep_time} 秒 ...')
|
||||
time.sleep(sleep_time)
|
||||
try:
|
||||
logger.info(f'开始搜索订阅,标题:{subscribe.name} ...')
|
||||
# 生成元数据
|
||||
meta = MetaInfo(subscribe.name)
|
||||
meta.year = subscribe.year
|
||||
meta.begin_season = subscribe.season or None
|
||||
try:
|
||||
meta.type = MediaType(subscribe.type)
|
||||
except ValueError:
|
||||
logger.error(f'订阅 {subscribe.name} 类型错误:{subscribe.type}')
|
||||
continue
|
||||
# 识别媒体信息
|
||||
mediainfo: MediaInfo = self.recognize_media(meta=meta, mtype=meta.type,
|
||||
tmdbid=subscribe.tmdbid,
|
||||
doubanid=subscribe.doubanid,
|
||||
cache=False)
|
||||
if not mediainfo:
|
||||
logger.warn(
|
||||
f'未识别到媒体信息,标题:{subscribe.name},tmdbid:{subscribe.tmdbid},doubanid:{subscribe.doubanid}')
|
||||
continue
|
||||
|
||||
# 非洗版状态
|
||||
if not subscribe.best_version:
|
||||
# 每季总集数
|
||||
totals = {}
|
||||
if subscribe.season and subscribe.total_episode:
|
||||
totals = {
|
||||
subscribe.season: subscribe.total_episode
|
||||
}
|
||||
# 查询媒体库缺失的媒体信息
|
||||
exist_flag, no_exists = self.downloadchain.get_no_exists_info(
|
||||
meta=meta,
|
||||
mediainfo=mediainfo,
|
||||
totals=totals
|
||||
)
|
||||
else:
|
||||
# 洗版状态
|
||||
exist_flag = False
|
||||
if meta.type == MediaType.TV:
|
||||
no_exists = {
|
||||
mediakey: {
|
||||
subscribe.season: NotExistMediaInfo(
|
||||
season=subscribe.season,
|
||||
episodes=[],
|
||||
total_episode=subscribe.total_episode,
|
||||
start_episode=subscribe.start_episode or 1)
|
||||
}
|
||||
}
|
||||
else:
|
||||
no_exists = {}
|
||||
|
||||
# 已存在
|
||||
if exist_flag:
|
||||
logger.info(f'{mediainfo.title_year} 媒体库中已存在')
|
||||
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta, mediainfo=mediainfo, force=True)
|
||||
continue
|
||||
|
||||
# 电视剧订阅处理缺失集
|
||||
if meta.type == MediaType.TV:
|
||||
# 实际缺失集与订阅开始结束集范围进行整合,同时剔除已下载的集数
|
||||
no_exists = self.__get_subscribe_no_exits(
|
||||
subscribe_name=f'{subscribe.name} {meta.season}',
|
||||
no_exists=no_exists,
|
||||
mediakey=mediakey,
|
||||
begin_season=meta.begin_season,
|
||||
total_episode=subscribe.total_episode,
|
||||
start_episode=subscribe.start_episode,
|
||||
downloaded_episodes=self.__get_downloaded_episodes(subscribe)
|
||||
)
|
||||
|
||||
# 站点范围
|
||||
sites = self.get_sub_sites(subscribe)
|
||||
|
||||
# 优先级过滤规则
|
||||
if subscribe.best_version:
|
||||
rule_groups = subscribe.filter_groups \
|
||||
or self.systemconfig.get(SystemConfigKey.BestVersionFilterRuleGroups) or []
|
||||
else:
|
||||
rule_groups = subscribe.filter_groups \
|
||||
or self.systemconfig.get(SystemConfigKey.SubscribeFilterRuleGroups) or []
|
||||
|
||||
# 搜索,同时电视剧会过滤掉不需要的剧集
|
||||
contexts = self.searchchain.process(mediainfo=mediainfo,
|
||||
keyword=subscribe.keyword,
|
||||
no_exists=no_exists,
|
||||
sites=sites,
|
||||
rule_groups=rule_groups,
|
||||
area="imdbid" if subscribe.search_imdbid else "title",
|
||||
custom_words=custom_word_list,
|
||||
filter_params=self.get_params(subscribe))
|
||||
if not contexts:
|
||||
logger.warn(f'订阅 {subscribe.keyword or subscribe.name} 未搜索到资源')
|
||||
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta,
|
||||
mediainfo=mediainfo, lefts=no_exists)
|
||||
continue
|
||||
|
||||
# 过滤搜索结果
|
||||
matched_contexts = []
|
||||
for context in contexts:
|
||||
torrent_meta = context.meta_info
|
||||
torrent_info = context.torrent_info
|
||||
torrent_mediainfo = context.media_info
|
||||
|
||||
# 洗版
|
||||
if subscribe.best_version:
|
||||
# 洗版时,非整季不要
|
||||
if torrent_mediainfo.type == MediaType.TV:
|
||||
if torrent_meta.episode_list:
|
||||
logger.info(f'{subscribe.name} 正在洗版,{torrent_info.title} 不是整季')
|
||||
continue
|
||||
# 洗版时,优先级小于等于已下载优先级的不要
|
||||
if subscribe.current_priority \
|
||||
and torrent_info.pri_order <= subscribe.current_priority:
|
||||
logger.info(
|
||||
f'{subscribe.name} 正在洗版,{torrent_info.title} 优先级低于或等于已下载优先级')
|
||||
continue
|
||||
matched_contexts.append(context)
|
||||
|
||||
if not matched_contexts:
|
||||
logger.warn(f'订阅 {subscribe.name} 没有符合过滤条件的资源')
|
||||
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta,
|
||||
mediainfo=mediainfo, lefts=no_exists)
|
||||
continue
|
||||
|
||||
# 自动下载
|
||||
downloads, lefts = self.downloadchain.batch_download(
|
||||
contexts=matched_contexts,
|
||||
no_exists=no_exists,
|
||||
userid=subscribe.username,
|
||||
username=subscribe.username,
|
||||
save_path=subscribe.save_path,
|
||||
media_category=subscribe.media_category,
|
||||
downloader=subscribe.downloader,
|
||||
source="Subscribe"
|
||||
)
|
||||
|
||||
# 判断是否应完成订阅
|
||||
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta, mediainfo=mediainfo,
|
||||
downloads=downloads, lefts=lefts)
|
||||
finally:
|
||||
# 如果状态为N则更新为R
|
||||
if subscribe.state == 'N':
|
||||
self.subscribeoper.update(subscribe.id, {'state': 'R'})
|
||||
|
||||
# 手动触发时发送系统消息
|
||||
if manual:
|
||||
if subscribes:
|
||||
if sid:
|
||||
self.message.put(f'{subscribes[0].name} 搜索完成!', title="订阅搜索", role="system")
|
||||
else:
|
||||
self.message.put('所有订阅搜索完成!', title="订阅搜索", role="system")
|
||||
else:
|
||||
self.message.put('没有找到订阅!', title="订阅搜索", role="system")
|
||||
logger.debug(f"search Lock released at {datetime.now()}")
|
||||
|
||||
def update_subscribe_priority(self, subscribe: Subscribe, meta: MetaInfo,
|
||||
mediainfo: MediaInfo, downloads: List[Context]):
|
||||
@@ -509,7 +522,7 @@ class SubscribeChain(ChainBase):
|
||||
:return: 返回[]代表所有站点命中,返回None代表没有订阅
|
||||
"""
|
||||
# 查询所有订阅
|
||||
subscribes = self.subscribeoper.list('R')
|
||||
subscribes = self.subscribeoper.list(self.get_states_for_search('R'))
|
||||
if not subscribes:
|
||||
return None
|
||||
ret_sites = []
|
||||
@@ -534,249 +547,255 @@ class SubscribeChain(ChainBase):
|
||||
# 记录重新识别过的种子
|
||||
_recognize_cached = []
|
||||
|
||||
# 所有订阅
|
||||
subscribes = self.subscribeoper.list('R')
|
||||
# 遍历订阅
|
||||
for subscribe in subscribes:
|
||||
if global_vars.is_system_stopped:
|
||||
break
|
||||
logger.info(f'开始匹配订阅,标题:{subscribe.name} ...')
|
||||
mediakey = subscribe.tmdbid or subscribe.doubanid
|
||||
# 生成元数据
|
||||
meta = MetaInfo(subscribe.name)
|
||||
meta.year = subscribe.year
|
||||
meta.begin_season = subscribe.season or None
|
||||
try:
|
||||
meta.type = MediaType(subscribe.type)
|
||||
except ValueError:
|
||||
logger.error(f'订阅 {subscribe.name} 类型错误:{subscribe.type}')
|
||||
continue
|
||||
# 订阅的站点域名列表
|
||||
domains = []
|
||||
if subscribe.sites:
|
||||
domains = self.siteoper.get_domains_by_ids(subscribe.sites)
|
||||
# 识别媒体信息
|
||||
mediainfo: MediaInfo = self.recognize_media(meta=meta, mtype=meta.type,
|
||||
tmdbid=subscribe.tmdbid,
|
||||
doubanid=subscribe.doubanid,
|
||||
cache=False)
|
||||
if not mediainfo:
|
||||
logger.warn(
|
||||
f'未识别到媒体信息,标题:{subscribe.name},tmdbid:{subscribe.tmdbid},doubanid:{subscribe.doubanid}')
|
||||
continue
|
||||
# 非洗版
|
||||
if not subscribe.best_version:
|
||||
# 每季总集数
|
||||
totals = {}
|
||||
if subscribe.season and subscribe.total_episode:
|
||||
totals = {
|
||||
subscribe.season: subscribe.total_episode
|
||||
}
|
||||
# 查询缺失的媒体信息
|
||||
exist_flag, no_exists = self.downloadchain.get_no_exists_info(
|
||||
meta=meta,
|
||||
mediainfo=mediainfo,
|
||||
totals=totals
|
||||
)
|
||||
else:
|
||||
# 洗版
|
||||
exist_flag = False
|
||||
if meta.type == MediaType.TV:
|
||||
no_exists = {
|
||||
mediakey: {
|
||||
subscribe.season: NotExistMediaInfo(
|
||||
season=subscribe.season,
|
||||
episodes=[],
|
||||
total_episode=subscribe.total_episode,
|
||||
start_episode=subscribe.start_episode or 1)
|
||||
}
|
||||
}
|
||||
else:
|
||||
no_exists = {}
|
||||
|
||||
# 已存在
|
||||
if exist_flag:
|
||||
logger.info(f'{mediainfo.title_year} 媒体库中已存在')
|
||||
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta, mediainfo=mediainfo, force=True)
|
||||
continue
|
||||
|
||||
# 电视剧订阅
|
||||
if meta.type == MediaType.TV:
|
||||
# 整合实际缺失集与订阅开始集结束集,同时剔除已下载的集数
|
||||
no_exists = self.__get_subscribe_no_exits(
|
||||
subscribe_name=f'{subscribe.name} {meta.season}',
|
||||
no_exists=no_exists,
|
||||
mediakey=mediakey,
|
||||
begin_season=meta.begin_season,
|
||||
total_episode=subscribe.total_episode,
|
||||
start_episode=subscribe.start_episode,
|
||||
downloaded_episodes=self.__get_downloaded_episodes(subscribe)
|
||||
)
|
||||
|
||||
# 遍历缓存种子
|
||||
_match_context = []
|
||||
for domain, contexts in torrents.items():
|
||||
with self._rlock:
|
||||
logger.debug(f"match lock acquired at {datetime.now()}")
|
||||
# 所有订阅
|
||||
subscribes = self.subscribeoper.list(self.get_states_for_search('R'))
|
||||
# 遍历订阅
|
||||
for subscribe in subscribes:
|
||||
if global_vars.is_system_stopped:
|
||||
break
|
||||
if domains and domain not in domains:
|
||||
logger.info(f'开始匹配订阅,标题:{subscribe.name} ...')
|
||||
mediakey = subscribe.tmdbid or subscribe.doubanid
|
||||
# 生成元数据
|
||||
meta = MetaInfo(subscribe.name)
|
||||
meta.year = subscribe.year
|
||||
meta.begin_season = subscribe.season or None
|
||||
try:
|
||||
meta.type = MediaType(subscribe.type)
|
||||
except ValueError:
|
||||
logger.error(f'订阅 {subscribe.name} 类型错误:{subscribe.type}')
|
||||
continue
|
||||
logger.debug(f'开始匹配站点:{domain},共缓存了 {len(contexts)} 个种子...')
|
||||
for context in contexts:
|
||||
# 提取信息
|
||||
torrent_meta = copy.deepcopy(context.meta_info)
|
||||
torrent_mediainfo = copy.deepcopy(context.media_info)
|
||||
torrent_info = context.torrent_info
|
||||
|
||||
# 不在订阅站点范围的不处理
|
||||
sub_sites = self.get_sub_sites(subscribe)
|
||||
if sub_sites and torrent_info.site not in sub_sites:
|
||||
logger.debug(f"{torrent_info.site_name} - {torrent_info.title} 不符合订阅站点要求")
|
||||
continue
|
||||
|
||||
# 有自定义识别词时,需要判断是否需要重新识别
|
||||
if subscribe.custom_words:
|
||||
_, apply_words = WordsMatcher().prepare(torrent_info.title,
|
||||
custom_words=subscribe.custom_words.split("\n"))
|
||||
if apply_words:
|
||||
logger.info(
|
||||
f'{torrent_info.site_name} - {torrent_info.title} 因订阅存在自定义识别词,重新识别元数据...')
|
||||
# 重新识别元数据
|
||||
torrent_meta = MetaInfo(title=torrent_info.title, subtitle=torrent_info.description,
|
||||
custom_words=subscribe.custom_words)
|
||||
# 媒体信息需要重新识别
|
||||
torrent_mediainfo = None
|
||||
|
||||
# 先判断是否有没识别的种子,否则重新识别
|
||||
if not torrent_mediainfo \
|
||||
or (not torrent_mediainfo.tmdb_id and not torrent_mediainfo.douban_id):
|
||||
# 避免重复处理
|
||||
_cache_key = f"{torrent_meta.org_string}_{torrent_info.description}"
|
||||
if _cache_key not in _recognize_cached:
|
||||
_recognize_cached.append(_cache_key)
|
||||
# 重新识别媒体信息
|
||||
torrent_mediainfo = self.recognize_media(meta=torrent_meta)
|
||||
if torrent_mediainfo:
|
||||
# 更新种子缓存
|
||||
context.media_info = torrent_mediainfo
|
||||
if not torrent_mediainfo:
|
||||
# 通过标题匹配兜底
|
||||
logger.warn(
|
||||
f'{torrent_info.site_name} - {torrent_info.title} 重新识别失败,尝试通过标题匹配...')
|
||||
if self.torrenthelper.match_torrent(mediainfo=mediainfo,
|
||||
torrent_meta=torrent_meta,
|
||||
torrent=torrent_info):
|
||||
# 匹配成功
|
||||
logger.info(
|
||||
f'{mediainfo.title_year} 通过标题匹配到可选资源:{torrent_info.site_name} - {torrent_info.title}')
|
||||
# 更新种子缓存
|
||||
torrent_mediainfo = mediainfo
|
||||
context.media_info = mediainfo
|
||||
else:
|
||||
continue
|
||||
|
||||
# 直接比对媒体信息
|
||||
if torrent_mediainfo and (torrent_mediainfo.tmdb_id or torrent_mediainfo.douban_id):
|
||||
if torrent_mediainfo.type != mediainfo.type:
|
||||
continue
|
||||
if torrent_mediainfo.tmdb_id \
|
||||
and torrent_mediainfo.tmdb_id != mediainfo.tmdb_id:
|
||||
continue
|
||||
if torrent_mediainfo.douban_id \
|
||||
and torrent_mediainfo.douban_id != mediainfo.douban_id:
|
||||
continue
|
||||
logger.info(
|
||||
f'{mediainfo.title_year} 通过媒体信ID匹配到可选资源:{torrent_info.site_name} - {torrent_info.title}')
|
||||
# 订阅的站点域名列表
|
||||
domains = []
|
||||
if subscribe.sites:
|
||||
domains = self.siteoper.get_domains_by_ids(subscribe.sites)
|
||||
# 识别媒体信息
|
||||
mediainfo: MediaInfo = self.recognize_media(meta=meta, mtype=meta.type,
|
||||
tmdbid=subscribe.tmdbid,
|
||||
doubanid=subscribe.doubanid,
|
||||
cache=False)
|
||||
if not mediainfo:
|
||||
logger.warn(
|
||||
f'未识别到媒体信息,标题:{subscribe.name},tmdbid:{subscribe.tmdbid},doubanid:{subscribe.doubanid}')
|
||||
continue
|
||||
# 非洗版
|
||||
if not subscribe.best_version:
|
||||
# 每季总集数
|
||||
totals = {}
|
||||
if subscribe.season and subscribe.total_episode:
|
||||
totals = {
|
||||
subscribe.season: subscribe.total_episode
|
||||
}
|
||||
# 查询缺失的媒体信息
|
||||
exist_flag, no_exists = self.downloadchain.get_no_exists_info(
|
||||
meta=meta,
|
||||
mediainfo=mediainfo,
|
||||
totals=totals
|
||||
)
|
||||
else:
|
||||
# 洗版
|
||||
exist_flag = False
|
||||
if meta.type == MediaType.TV:
|
||||
no_exists = {
|
||||
mediakey: {
|
||||
subscribe.season: NotExistMediaInfo(
|
||||
season=subscribe.season,
|
||||
episodes=[],
|
||||
total_episode=subscribe.total_episode,
|
||||
start_episode=subscribe.start_episode or 1)
|
||||
}
|
||||
}
|
||||
else:
|
||||
continue
|
||||
no_exists = {}
|
||||
|
||||
# 如果是电视剧
|
||||
if torrent_mediainfo.type == MediaType.TV:
|
||||
# 有多季的不要
|
||||
if len(torrent_meta.season_list) > 1:
|
||||
logger.debug(f'{torrent_info.title} 有多季,不处理')
|
||||
# 已存在
|
||||
if exist_flag:
|
||||
logger.info(f'{mediainfo.title_year} 媒体库中已存在')
|
||||
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta, mediainfo=mediainfo, force=True)
|
||||
continue
|
||||
|
||||
# 电视剧订阅
|
||||
if meta.type == MediaType.TV:
|
||||
# 整合实际缺失集与订阅开始集结束集,同时剔除已下载的集数
|
||||
no_exists = self.__get_subscribe_no_exits(
|
||||
subscribe_name=f'{subscribe.name} {meta.season}',
|
||||
no_exists=no_exists,
|
||||
mediakey=mediakey,
|
||||
begin_season=meta.begin_season,
|
||||
total_episode=subscribe.total_episode,
|
||||
start_episode=subscribe.start_episode,
|
||||
downloaded_episodes=self.__get_downloaded_episodes(subscribe)
|
||||
)
|
||||
|
||||
# 遍历缓存种子
|
||||
_match_context = []
|
||||
for domain, contexts in torrents.items():
|
||||
if global_vars.is_system_stopped:
|
||||
break
|
||||
if domains and domain not in domains:
|
||||
continue
|
||||
logger.debug(f'开始匹配站点:{domain},共缓存了 {len(contexts)} 个种子...')
|
||||
for context in contexts:
|
||||
# 提取信息
|
||||
torrent_meta = copy.deepcopy(context.meta_info)
|
||||
torrent_mediainfo = copy.deepcopy(context.media_info)
|
||||
torrent_info = context.torrent_info
|
||||
|
||||
# 不在订阅站点范围的不处理
|
||||
sub_sites = self.get_sub_sites(subscribe)
|
||||
if sub_sites and torrent_info.site not in sub_sites:
|
||||
logger.debug(f"{torrent_info.site_name} - {torrent_info.title} 不符合订阅站点要求")
|
||||
continue
|
||||
# 比对季
|
||||
if torrent_meta.begin_season:
|
||||
if meta.begin_season != torrent_meta.begin_season:
|
||||
|
||||
# 有自定义识别词时,需要判断是否需要重新识别
|
||||
if subscribe.custom_words:
|
||||
_, apply_words = WordsMatcher().prepare(torrent_info.title,
|
||||
custom_words=subscribe.custom_words.split("\n"))
|
||||
if apply_words:
|
||||
logger.info(
|
||||
f'{torrent_info.site_name} - {torrent_info.title} 因订阅存在自定义识别词,重新识别元数据...')
|
||||
# 重新识别元数据
|
||||
torrent_meta = MetaInfo(title=torrent_info.title, subtitle=torrent_info.description,
|
||||
custom_words=subscribe.custom_words)
|
||||
# 媒体信息需要重新识别
|
||||
torrent_mediainfo = None
|
||||
|
||||
# 先判断是否有没识别的种子,否则重新识别
|
||||
if not torrent_mediainfo \
|
||||
or (not torrent_mediainfo.tmdb_id and not torrent_mediainfo.douban_id):
|
||||
# 避免重复处理
|
||||
_cache_key = f"{torrent_meta.org_string}_{torrent_info.description}"
|
||||
if _cache_key not in _recognize_cached:
|
||||
_recognize_cached.append(_cache_key)
|
||||
# 重新识别媒体信息
|
||||
torrent_mediainfo = self.recognize_media(meta=torrent_meta)
|
||||
if torrent_mediainfo:
|
||||
# 更新种子缓存
|
||||
context.media_info = torrent_mediainfo
|
||||
if not torrent_mediainfo:
|
||||
# 通过标题匹配兜底
|
||||
logger.warn(
|
||||
f'{torrent_info.site_name} - {torrent_info.title} 重新识别失败,尝试通过标题匹配...')
|
||||
if self.torrenthelper.match_torrent(mediainfo=mediainfo,
|
||||
torrent_meta=torrent_meta,
|
||||
torrent=torrent_info):
|
||||
# 匹配成功
|
||||
logger.info(
|
||||
f'{mediainfo.title_year} 通过标题匹配到可选资源:{torrent_info.site_name} - {torrent_info.title}')
|
||||
# 更新种子缓存
|
||||
torrent_mediainfo = mediainfo
|
||||
context.media_info = mediainfo
|
||||
else:
|
||||
continue
|
||||
|
||||
# 直接比对媒体信息
|
||||
if torrent_mediainfo and (torrent_mediainfo.tmdb_id or torrent_mediainfo.douban_id):
|
||||
if torrent_mediainfo.type != mediainfo.type:
|
||||
continue
|
||||
if torrent_mediainfo.tmdb_id \
|
||||
and torrent_mediainfo.tmdb_id != mediainfo.tmdb_id:
|
||||
continue
|
||||
if torrent_mediainfo.douban_id \
|
||||
and torrent_mediainfo.douban_id != mediainfo.douban_id:
|
||||
continue
|
||||
logger.info(
|
||||
f'{mediainfo.title_year} 通过媒体信ID匹配到可选资源:{torrent_info.site_name} - {torrent_info.title}')
|
||||
else:
|
||||
continue
|
||||
|
||||
# 如果是电视剧
|
||||
if torrent_mediainfo.type == MediaType.TV:
|
||||
# 有多季的不要
|
||||
if len(torrent_meta.season_list) > 1:
|
||||
logger.debug(f'{torrent_info.title} 有多季,不处理')
|
||||
continue
|
||||
# 比对季
|
||||
if torrent_meta.begin_season:
|
||||
if meta.begin_season != torrent_meta.begin_season:
|
||||
logger.debug(f'{torrent_info.title} 季不匹配')
|
||||
continue
|
||||
elif meta.begin_season != 1:
|
||||
logger.debug(f'{torrent_info.title} 季不匹配')
|
||||
continue
|
||||
elif meta.begin_season != 1:
|
||||
logger.debug(f'{torrent_info.title} 季不匹配')
|
||||
continue
|
||||
# 非洗版
|
||||
if not subscribe.best_version:
|
||||
# 不是缺失的剧集不要
|
||||
if no_exists and no_exists.get(mediakey):
|
||||
# 缺失集
|
||||
no_exists_info = no_exists.get(mediakey).get(subscribe.season)
|
||||
if no_exists_info:
|
||||
# 是否有交集
|
||||
if no_exists_info.episodes and \
|
||||
torrent_meta.episode_list and \
|
||||
not set(no_exists_info.episodes).intersection(
|
||||
set(torrent_meta.episode_list)
|
||||
):
|
||||
logger.debug(
|
||||
f'{torrent_info.title} 对应剧集 {torrent_meta.episode_list} 未包含缺失的剧集'
|
||||
)
|
||||
# 非洗版
|
||||
if not subscribe.best_version:
|
||||
# 不是缺失的剧集不要
|
||||
if no_exists and no_exists.get(mediakey):
|
||||
# 缺失集
|
||||
no_exists_info = no_exists.get(mediakey).get(subscribe.season)
|
||||
if no_exists_info:
|
||||
# 是否有交集
|
||||
if no_exists_info.episodes and \
|
||||
torrent_meta.episode_list and \
|
||||
not set(no_exists_info.episodes).intersection(
|
||||
set(torrent_meta.episode_list)
|
||||
):
|
||||
logger.debug(
|
||||
f'{torrent_info.title} 对应剧集 {torrent_meta.episode_list} 未包含缺失的剧集'
|
||||
)
|
||||
continue
|
||||
else:
|
||||
# 洗版时,非整季不要
|
||||
if meta.type == MediaType.TV:
|
||||
if torrent_meta.episode_list:
|
||||
logger.debug(f'{subscribe.name} 正在洗版,{torrent_info.title} 不是整季')
|
||||
continue
|
||||
else:
|
||||
# 洗版时,非整季不要
|
||||
if meta.type == MediaType.TV:
|
||||
if torrent_meta.episode_list:
|
||||
logger.debug(f'{subscribe.name} 正在洗版,{torrent_info.title} 不是整季')
|
||||
continue
|
||||
|
||||
# 匹配订阅附加参数
|
||||
if not self.torrenthelper.filter_torrent(torrent_info=torrent_info,
|
||||
filter_params=self.get_params(subscribe)):
|
||||
continue
|
||||
|
||||
# 优先级过滤规则
|
||||
if subscribe.best_version:
|
||||
rule_groups = subscribe.filter_groups \
|
||||
or self.systemconfig.get(SystemConfigKey.BestVersionFilterRuleGroups)
|
||||
else:
|
||||
rule_groups = subscribe.filter_groups \
|
||||
or self.systemconfig.get(SystemConfigKey.SubscribeFilterRuleGroups)
|
||||
result: List[TorrentInfo] = self.filter_torrents(
|
||||
rule_groups=rule_groups,
|
||||
torrent_list=[torrent_info],
|
||||
mediainfo=torrent_mediainfo)
|
||||
if result is not None and not result:
|
||||
# 不符合过滤规则
|
||||
logger.debug(f"{torrent_info.title} 不匹配过滤规则")
|
||||
continue
|
||||
|
||||
# 洗版时,优先级小于已下载优先级的不要
|
||||
if subscribe.best_version:
|
||||
if subscribe.current_priority \
|
||||
and torrent_info.pri_order <= subscribe.current_priority:
|
||||
logger.info(f'{subscribe.name} 正在洗版,{torrent_info.title} 优先级低于或等于已下载优先级')
|
||||
# 匹配订阅附加参数
|
||||
if not self.torrenthelper.filter_torrent(torrent_info=torrent_info,
|
||||
filter_params=self.get_params(subscribe)):
|
||||
continue
|
||||
|
||||
# 匹配成功
|
||||
logger.info(f'{mediainfo.title_year} 匹配成功:{torrent_info.title}')
|
||||
_match_context.append(context)
|
||||
# 优先级过滤规则
|
||||
if subscribe.best_version:
|
||||
rule_groups = subscribe.filter_groups \
|
||||
or self.systemconfig.get(SystemConfigKey.BestVersionFilterRuleGroups)
|
||||
else:
|
||||
rule_groups = subscribe.filter_groups \
|
||||
or self.systemconfig.get(SystemConfigKey.SubscribeFilterRuleGroups)
|
||||
result: List[TorrentInfo] = self.filter_torrents(
|
||||
rule_groups=rule_groups,
|
||||
torrent_list=[torrent_info],
|
||||
mediainfo=torrent_mediainfo)
|
||||
if result is not None and not result:
|
||||
# 不符合过滤规则
|
||||
logger.debug(f"{torrent_info.title} 不匹配过滤规则")
|
||||
continue
|
||||
|
||||
if not _match_context:
|
||||
# 未匹配到资源
|
||||
logger.info(f'{mediainfo.title_year} 未匹配到符合条件的资源')
|
||||
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta,
|
||||
mediainfo=mediainfo, lefts=no_exists)
|
||||
continue
|
||||
# 洗版时,优先级小于已下载优先级的不要
|
||||
if subscribe.best_version:
|
||||
if subscribe.current_priority \
|
||||
and torrent_info.pri_order <= subscribe.current_priority:
|
||||
logger.info(
|
||||
f'{subscribe.name} 正在洗版,{torrent_info.title} 优先级低于或等于已下载优先级')
|
||||
continue
|
||||
|
||||
# 开始批量择优下载
|
||||
logger.info(f'{mediainfo.title_year} 匹配完成,共匹配到{len(_match_context)}个资源')
|
||||
downloads, lefts = self.downloadchain.batch_download(contexts=_match_context,
|
||||
no_exists=no_exists,
|
||||
userid=subscribe.username,
|
||||
username=subscribe.username,
|
||||
save_path=subscribe.save_path,
|
||||
media_category=subscribe.media_category)
|
||||
# 判断是否要完成订阅
|
||||
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta, mediainfo=mediainfo,
|
||||
downloads=downloads, lefts=lefts)
|
||||
# 匹配成功
|
||||
logger.info(f'{mediainfo.title_year} 匹配成功:{torrent_info.title}')
|
||||
_match_context.append(context)
|
||||
|
||||
if not _match_context:
|
||||
# 未匹配到资源
|
||||
logger.info(f'{mediainfo.title_year} 未匹配到符合条件的资源')
|
||||
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta,
|
||||
mediainfo=mediainfo, lefts=no_exists)
|
||||
continue
|
||||
|
||||
# 开始批量择优下载
|
||||
logger.info(f'{mediainfo.title_year} 匹配完成,共匹配到{len(_match_context)}个资源')
|
||||
downloads, lefts = self.downloadchain.batch_download(contexts=_match_context,
|
||||
no_exists=no_exists,
|
||||
userid=subscribe.username,
|
||||
username=subscribe.username,
|
||||
save_path=subscribe.save_path,
|
||||
media_category=subscribe.media_category,
|
||||
downloader=subscribe.downloader,
|
||||
source="Subscribe")
|
||||
# 判断是否要完成订阅
|
||||
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta, mediainfo=mediainfo,
|
||||
downloads=downloads, lefts=lefts)
|
||||
logger.debug(f"match Lock released at {datetime.now()}")
|
||||
|
||||
def check(self):
|
||||
"""
|
||||
@@ -916,6 +935,9 @@ class SubscribeChain(ChainBase):
|
||||
"""
|
||||
完成订阅
|
||||
"""
|
||||
# 如果订阅状态为待定(P),说明订阅信息尚未完全更新,无法完成订阅
|
||||
if subscribe.state == "P":
|
||||
return
|
||||
# 完成订阅
|
||||
msgstr = "订阅"
|
||||
if bestversion:
|
||||
@@ -1241,6 +1263,9 @@ class SubscribeChain(ChainBase):
|
||||
file_path=file.fullpath,
|
||||
)
|
||||
if subscribe.type == MediaType.TV.value:
|
||||
season_number = file_meta.begin_season
|
||||
if season_number and season_number != subscribe.season:
|
||||
continue
|
||||
episode_number = file_meta.begin_episode
|
||||
if episode_number and episodes.get(episode_number):
|
||||
episodes[episode_number].download.append(file_info)
|
||||
@@ -1278,6 +1303,9 @@ class SubscribeChain(ChainBase):
|
||||
file_path=fileitem.path,
|
||||
)
|
||||
if subscribe.type == MediaType.TV.value:
|
||||
season_number = file_meta.begin_season
|
||||
if season_number and season_number != subscribe.season:
|
||||
continue
|
||||
episode_number = file_meta.begin_episode
|
||||
if episode_number and episodes.get(episode_number):
|
||||
episodes[episode_number].library.append(file_info)
|
||||
@@ -1288,3 +1316,19 @@ class SubscribeChain(ChainBase):
|
||||
subscribe_info.subscribe = Subscribe(**subscribe.to_dict())
|
||||
subscribe_info.episodes = episodes
|
||||
return subscribe_info
|
||||
|
||||
@staticmethod
|
||||
def get_states_for_search(state: str) -> str:
|
||||
"""
|
||||
根据给定的状态返回实际需要搜索的状态列表,支持多个状态用逗号分隔
|
||||
:param state: 订阅状态
|
||||
N: New(新建,未处理)
|
||||
R: Resolved(订阅中)
|
||||
P: Pending(待定,信息待进一步更新,允许搜索,不允许完成)
|
||||
S: Suspended(暂停,订阅不参与任何动作,暂时停止处理)
|
||||
:return: 需要查询的状态列表(多个状态用逗号分隔)
|
||||
"""
|
||||
# 如果状态是 R 或 P,则视为一起搜索,返回 R,P 作为查询条件
|
||||
if state in ["R", "P"]:
|
||||
return "R,P"
|
||||
return state
|
||||
|
||||
@@ -120,6 +120,7 @@ class TorrentsChain(ChainBase, metaclass=Singleton):
|
||||
site_ua=site.get("ua") or settings.USER_AGENT,
|
||||
site_proxy=site.get("proxy"),
|
||||
site_order=site.get("pri"),
|
||||
site_downloader=site.get("downloader"),
|
||||
title=item.get("title"),
|
||||
enclosure=item.get("enclosure"),
|
||||
page_url=item.get("link"),
|
||||
@@ -174,7 +175,7 @@ class TorrentsChain(ChainBase, metaclass=Singleton):
|
||||
# 按pubdate降序排列
|
||||
torrents.sort(key=lambda x: x.pubdate or '', reverse=True)
|
||||
# 取前N条
|
||||
torrents = torrents[:settings.CACHE_CONF.get('refresh')]
|
||||
torrents = torrents[:settings.CACHE_CONF["refresh"]]
|
||||
if torrents:
|
||||
# 过滤出没有处理过的种子
|
||||
torrents = [torrent for torrent in torrents
|
||||
@@ -214,8 +215,8 @@ class TorrentsChain(ChainBase, metaclass=Singleton):
|
||||
else:
|
||||
torrents_cache[domain].append(context)
|
||||
# 如果超过了限制条数则移除掉前面的
|
||||
if len(torrents_cache[domain]) > settings.CACHE_CONF.get('torrents'):
|
||||
torrents_cache[domain] = torrents_cache[domain][-settings.CACHE_CONF.get('torrents'):]
|
||||
if len(torrents_cache[domain]) > settings.CACHE_CONF["torrents"]:
|
||||
torrents_cache[domain] = torrents_cache[domain][-settings.CACHE_CONF["torrents"]:]
|
||||
# 回收资源
|
||||
del torrents
|
||||
else:
|
||||
|
||||
@@ -10,7 +10,7 @@ from app.chain.tmdb import TmdbChain
|
||||
from app.core.config import settings, global_vars
|
||||
from app.core.context import MediaInfo
|
||||
from app.core.meta import MetaBase
|
||||
from app.core.metainfo import MetaInfoPath
|
||||
from app.core.metainfo import MetaInfoPath, MetaInfo
|
||||
from app.db.downloadhistory_oper import DownloadHistoryOper
|
||||
from app.db.models.downloadhistory import DownloadHistory
|
||||
from app.db.models.transferhistory import TransferHistory
|
||||
@@ -120,7 +120,7 @@ class TransferChain(ChainBase):
|
||||
# 非MoviePilot下载的任务,按文件识别
|
||||
mediainfo = None
|
||||
|
||||
# 执行整理
|
||||
# 执行整理,匹配源目录
|
||||
state, errmsg = self.__do_transfer(
|
||||
fileitem=FileItem(
|
||||
storage="local",
|
||||
@@ -131,7 +131,9 @@ class TransferChain(ChainBase):
|
||||
extension=file_path.suffix.lstrip('.'),
|
||||
),
|
||||
mediainfo=mediainfo,
|
||||
download_hash=torrent.hash
|
||||
downloader=torrent.downloader,
|
||||
download_hash=torrent.hash,
|
||||
src_match=True
|
||||
)
|
||||
|
||||
# 设置下载任务状态
|
||||
@@ -147,8 +149,10 @@ class TransferChain(ChainBase):
|
||||
target_directory: TransferDirectoryConf = None,
|
||||
target_storage: str = None, target_path: Path = None,
|
||||
transfer_type: str = None, scrape: bool = None,
|
||||
season: int = None, epformat: EpisodeFormat = None,
|
||||
min_filesize: int = 0, download_hash: str = None, force: bool = False) -> Tuple[bool, str]:
|
||||
library_type_folder: bool = None, library_category_folder: bool = None,
|
||||
season: int = None, epformat: EpisodeFormat = None, min_filesize: int = 0,
|
||||
downloader: str = None, download_hash: str = None,
|
||||
force: bool = False, src_match: bool = False) -> Tuple[bool, str]:
|
||||
"""
|
||||
执行一个复杂目录的整理操作
|
||||
:param fileitem: 文件项
|
||||
@@ -159,11 +163,15 @@ class TransferChain(ChainBase):
|
||||
:param target_path: 目标路径
|
||||
:param transfer_type: 整理类型
|
||||
:param scrape: 是否刮削元数据
|
||||
:param library_type_folder: 媒体库类型子目录
|
||||
:param library_category_folder: 媒体库类别子目录
|
||||
:param season: 季
|
||||
:param epformat: 剧集格式
|
||||
:param min_filesize: 最小文件大小(MB)
|
||||
:param downloader: 下载器
|
||||
:param download_hash: 下载记录hash
|
||||
:param force: 是否强制整理
|
||||
:param src_match: 是否源目录匹配
|
||||
返回:成功标识,错误信息
|
||||
"""
|
||||
|
||||
@@ -181,8 +189,6 @@ class TransferChain(ChainBase):
|
||||
|
||||
# 汇总季集清单
|
||||
season_episodes: Dict[Tuple, List[int]] = {}
|
||||
# 汇总元数据
|
||||
metas: Dict[Tuple, MetaBase] = {}
|
||||
# 汇总媒体信息
|
||||
medias: Dict[Tuple, MediaInfo] = {}
|
||||
# 汇总整理信息
|
||||
@@ -202,6 +208,8 @@ class TransferChain(ChainBase):
|
||||
skip_num = 0
|
||||
# 本次整理方式
|
||||
current_transfer_type = transfer_type
|
||||
# 是否全部成功
|
||||
all_success = True
|
||||
|
||||
# 获取待整理路径清单
|
||||
trans_items = self.__get_trans_fileitems(fileitem)
|
||||
@@ -281,6 +289,7 @@ class TransferChain(ChainBase):
|
||||
# 计数
|
||||
processed_num += 1
|
||||
skip_num += 1
|
||||
all_success = False
|
||||
continue
|
||||
|
||||
# 整理成功的不再处理
|
||||
@@ -291,6 +300,7 @@ class TransferChain(ChainBase):
|
||||
# 计数
|
||||
processed_num += 1
|
||||
skip_num += 1
|
||||
all_success = False
|
||||
continue
|
||||
|
||||
# 更新进度
|
||||
@@ -314,6 +324,7 @@ class TransferChain(ChainBase):
|
||||
# 计数
|
||||
processed_num += 1
|
||||
fail_num += 1
|
||||
all_success = False
|
||||
continue
|
||||
|
||||
# 自定义识别
|
||||
@@ -350,6 +361,7 @@ class TransferChain(ChainBase):
|
||||
# 计数
|
||||
processed_num += 1
|
||||
fail_num += 1
|
||||
all_success = False
|
||||
continue
|
||||
|
||||
# 如果未开启新增已入库媒体是否跟随TMDB信息变化则根据tmdbid查询之前的title
|
||||
@@ -379,16 +391,38 @@ class TransferChain(ChainBase):
|
||||
if download_file:
|
||||
download_hash = download_file.download_hash
|
||||
|
||||
# 查询整理目标目录
|
||||
dir_info = None
|
||||
if not target_directory:
|
||||
if src_match:
|
||||
# 按源目录匹配,以便找到更合适的目录配置
|
||||
dir_info = self.directoryhelper.get_dir(media=file_mediainfo,
|
||||
storage=file_item.storage,
|
||||
src_path=file_path,
|
||||
target_storage=target_storage)
|
||||
elif target_path:
|
||||
# 指定目标路径,`手动整理`场景下使用,忽略源目录匹配,使用指定目录匹配
|
||||
dir_info = self.directoryhelper.get_dir(media=file_mediainfo,
|
||||
dest_path=target_path,
|
||||
target_storage=target_storage)
|
||||
else:
|
||||
# 未指定目标路径,根据媒体信息获取目标目录
|
||||
dir_info = self.directoryhelper.get_dir(file_mediainfo,
|
||||
storage=file_item.storage,
|
||||
target_storage=target_storage)
|
||||
|
||||
# 执行整理
|
||||
transferinfo: TransferInfo = self.transfer(fileitem=file_item,
|
||||
meta=file_meta,
|
||||
mediainfo=file_mediainfo,
|
||||
target_directory=target_directory,
|
||||
target_directory=target_directory or dir_info,
|
||||
target_storage=target_storage,
|
||||
target_path=target_path,
|
||||
transfer_type=transfer_type,
|
||||
episodes_info=episodes_info,
|
||||
scrape=scrape)
|
||||
scrape=scrape,
|
||||
library_type_folder=library_type_folder,
|
||||
library_category_folder=library_category_folder)
|
||||
if not transferinfo:
|
||||
logger.error("文件整理模块运行失败")
|
||||
return False, "文件整理模块运行失败"
|
||||
@@ -416,6 +450,7 @@ class TransferChain(ChainBase):
|
||||
# 计数
|
||||
processed_num += 1
|
||||
fail_num += 1
|
||||
all_success = False
|
||||
continue
|
||||
|
||||
# 汇总信息
|
||||
@@ -423,7 +458,6 @@ class TransferChain(ChainBase):
|
||||
mkey = (file_mediainfo.tmdb_id, file_meta.begin_season)
|
||||
if mkey not in medias:
|
||||
# 新增信息
|
||||
metas[mkey] = file_meta
|
||||
medias[mkey] = file_mediainfo
|
||||
season_episodes[mkey] = file_meta.episode_list
|
||||
transfers[mkey] = transferinfo
|
||||
@@ -447,6 +481,15 @@ class TransferChain(ChainBase):
|
||||
transferinfo=transferinfo
|
||||
)
|
||||
|
||||
# 整理完成事件
|
||||
self.eventmanager.send_event(EventType.TransferComplete, {
|
||||
'meta': file_meta,
|
||||
'mediainfo': file_mediainfo,
|
||||
'transferinfo': transferinfo,
|
||||
'downloader': downloader,
|
||||
'download_hash': download_hash,
|
||||
})
|
||||
|
||||
# 更新进度
|
||||
processed_num += 1
|
||||
self.progress.update(value=processed_num / total_num * 100,
|
||||
@@ -459,8 +502,9 @@ class TransferChain(ChainBase):
|
||||
|
||||
# 执行后续处理
|
||||
for mkey, media in medias.items():
|
||||
transfer_meta = metas[mkey]
|
||||
transfer_info = transfers[mkey]
|
||||
transfer_meta = MetaInfo(transfer_info.target_diritem.name)
|
||||
transfer_meta.begin_season = mkey[1]
|
||||
# 发送通知
|
||||
if transfer_info.need_notify:
|
||||
se_str = None
|
||||
@@ -477,30 +521,16 @@ class TransferChain(ChainBase):
|
||||
'mediainfo': media,
|
||||
'fileitem': transfer_info.target_diritem
|
||||
})
|
||||
# 整理完成事件
|
||||
self.eventmanager.send_event(EventType.TransferComplete, {
|
||||
'meta': transfer_meta,
|
||||
'mediainfo': media,
|
||||
'transferinfo': transfer_info,
|
||||
'download_hash': download_hash,
|
||||
})
|
||||
|
||||
# 移动模式处理
|
||||
if current_transfer_type in ["move"]:
|
||||
if all_success and current_transfer_type in ["move"]:
|
||||
# 下载器hash
|
||||
if download_hash:
|
||||
if self.remove_torrents(download_hash):
|
||||
if self.remove_torrents(download_hash, downloader=downloader):
|
||||
logger.info(f"移动模式删除种子成功:{download_hash} ")
|
||||
# 删除残留文件
|
||||
# 删除残留目录
|
||||
if fileitem:
|
||||
logger.warn(f"删除残留文件夹:【{fileitem.storage}】{fileitem.path}")
|
||||
if self.storagechain.delete_file(fileitem):
|
||||
# 删除空的父目录
|
||||
dir_item = self.storagechain.get_parent_item(fileitem)
|
||||
if dir_item:
|
||||
if not self.storagechain.any_files(dir_item, extensions=settings.RMT_MEDIAEXT):
|
||||
logger.warn(f"正在删除空目录:【{dir_item.storage}】{dir_item.path}")
|
||||
return self.storagechain.delete_file(dir_item)
|
||||
self.storagechain.delete_media_file(fileitem, delete_self=False)
|
||||
|
||||
# 结束进度
|
||||
logger.info(f"{fileitem.path} 整理完成,共 {total_num} 个文件,"
|
||||
@@ -664,6 +694,8 @@ class TransferChain(ChainBase):
|
||||
epformat: EpisodeFormat = None,
|
||||
min_filesize: int = 0,
|
||||
scrape: bool = None,
|
||||
library_type_folder: bool = None,
|
||||
library_category_folder: bool = None,
|
||||
force: bool = False) -> Tuple[bool, Union[str, list]]:
|
||||
"""
|
||||
手动整理,支持复杂条件,带进度显示
|
||||
@@ -678,6 +710,8 @@ class TransferChain(ChainBase):
|
||||
:param epformat: 剧集格式
|
||||
:param min_filesize: 最小文件大小(MB)
|
||||
:param scrape: 是否刮削元数据
|
||||
:param library_type_folder: 是否按类型建立目录
|
||||
:param library_category_folder: 是否按类别建立目录
|
||||
:param force: 是否强制整理
|
||||
"""
|
||||
logger.info(f"手动整理:{fileitem.path} ...")
|
||||
@@ -706,6 +740,8 @@ class TransferChain(ChainBase):
|
||||
epformat=epformat,
|
||||
min_filesize=min_filesize,
|
||||
scrape=scrape,
|
||||
library_type_folder=library_type_folder,
|
||||
library_category_folder=library_category_folder,
|
||||
force=force,
|
||||
)
|
||||
if not state:
|
||||
@@ -724,6 +760,8 @@ class TransferChain(ChainBase):
|
||||
epformat=epformat,
|
||||
min_filesize=min_filesize,
|
||||
scrape=scrape,
|
||||
library_type_folder=library_type_folder,
|
||||
library_category_folder=library_category_folder,
|
||||
force=force)
|
||||
return state, errmsg
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Tuple, Type
|
||||
|
||||
from dotenv import set_key
|
||||
from pydantic import BaseModel, BaseSettings, validator
|
||||
from pydantic import BaseModel, BaseSettings, validator, Field
|
||||
|
||||
from app.log import logger
|
||||
from app.utils.system import SystemUtils
|
||||
@@ -36,7 +36,7 @@ class ConfigModel(BaseModel):
|
||||
# RESOURCE密钥
|
||||
RESOURCE_SECRET_KEY: str = secrets.token_urlsafe(32)
|
||||
# 允许的域名
|
||||
ALLOWED_HOSTS: list = ["*"]
|
||||
ALLOWED_HOSTS: list = Field(default_factory=lambda: ["*"])
|
||||
# TOKEN过期时间
|
||||
ACCESS_TOKEN_EXPIRE_MINUTES: int = 60 * 24 * 8
|
||||
# RESOURCE_TOKEN过期时间
|
||||
@@ -114,29 +114,39 @@ class ConfigModel(BaseModel):
|
||||
# 是否启用DOH解析域名
|
||||
DOH_ENABLE: bool = True
|
||||
# 使用 DOH 解析的域名列表
|
||||
DOH_DOMAINS: str = "api.themoviedb.org,api.tmdb.org,webservice.fanart.tv,api.github.com,github.com,raw.githubusercontent.com,api.telegram.org"
|
||||
DOH_DOMAINS: str = ("api.themoviedb.org,"
|
||||
"api.tmdb.org,"
|
||||
"webservice.fanart.tv,"
|
||||
"api.github.com,"
|
||||
"github.com,"
|
||||
"raw.githubusercontent.com,"
|
||||
"api.telegram.org")
|
||||
# DOH 解析服务器列表
|
||||
DOH_RESOLVERS: str = "1.0.0.1,1.1.1.1,9.9.9.9,149.112.112.112"
|
||||
# 支持的后缀格式
|
||||
RMT_MEDIAEXT: list = ['.mp4', '.mkv', '.ts', '.iso',
|
||||
'.rmvb', '.avi', '.mov', '.mpeg',
|
||||
'.mpg', '.wmv', '.3gp', '.asf',
|
||||
'.m4v', '.flv', '.m2ts', '.strm',
|
||||
'.tp', '.f4v']
|
||||
RMT_MEDIAEXT: list = Field(
|
||||
default_factory=lambda: ['.mp4', '.mkv', '.ts', '.iso',
|
||||
'.rmvb', '.avi', '.mov', '.mpeg',
|
||||
'.mpg', '.wmv', '.3gp', '.asf',
|
||||
'.m4v', '.flv', '.m2ts', '.strm',
|
||||
'.tp', '.f4v']
|
||||
)
|
||||
# 支持的字幕文件后缀格式
|
||||
RMT_SUBEXT: list = ['.srt', '.ass', '.ssa', '.sup']
|
||||
RMT_SUBEXT: list = Field(default_factory=lambda: ['.srt', '.ass', '.ssa', '.sup'])
|
||||
# 支持的音轨文件后缀格式
|
||||
RMT_AUDIO_TRACK_EXT: list = ['.mka']
|
||||
RMT_AUDIO_TRACK_EXT: list = Field(default_factory=lambda: ['.mka'])
|
||||
# 音轨文件后缀格式
|
||||
RMT_AUDIOEXT: list = ['.aac', '.ac3', '.amr', '.caf', '.cda', '.dsf',
|
||||
'.dff', '.kar', '.m4a', '.mp1', '.mp2', '.mp3',
|
||||
'.mid', '.mod', '.mka', '.mpc', '.nsf', '.ogg',
|
||||
'.pcm', '.rmi', '.s3m', '.snd', '.spx', '.tak',
|
||||
'.tta', '.vqf', '.wav', '.wma',
|
||||
'.aifc', '.aiff', '.alac', '.adif', '.adts',
|
||||
'.flac', '.midi', '.opus', '.sfalc']
|
||||
RMT_AUDIOEXT: list = Field(
|
||||
default_factory=lambda: ['.aac', '.ac3', '.amr', '.caf', '.cda', '.dsf',
|
||||
'.dff', '.kar', '.m4a', '.mp1', '.mp2', '.mp3',
|
||||
'.mid', '.mod', '.mka', '.mpc', '.nsf', '.ogg',
|
||||
'.pcm', '.rmi', '.s3m', '.snd', '.spx', '.tak',
|
||||
'.tta', '.vqf', '.wav', '.wma',
|
||||
'.aifc', '.aiff', '.alac', '.adif', '.adts',
|
||||
'.flac', '.midi', '.opus', '.sfalc']
|
||||
)
|
||||
# 下载器临时文件后缀
|
||||
DOWNLOAD_TMPEXT: list = ['.!qB', '.part']
|
||||
DOWNLOAD_TMPEXT: list = Field(default_factory=lambda: ['.!qb', '.part'])
|
||||
# 媒体服务器同步间隔(小时)
|
||||
MEDIASERVER_SYNC_INTERVAL: int = 6
|
||||
# 订阅模式
|
||||
@@ -189,7 +199,10 @@ class ConfigModel(BaseModel):
|
||||
# 服务器地址,对应 https://github.com/jxxghp/MoviePilot-Server 项目
|
||||
MP_SERVER_HOST: str = "https://movie-pilot.org"
|
||||
# 插件市场仓库地址,多个地址使用,分隔,地址以/结尾
|
||||
PLUGIN_MARKET: str = "https://github.com/jxxghp/MoviePilot-Plugins,https://github.com/thsrite/MoviePilot-Plugins,https://github.com/honue/MoviePilot-Plugins,https://github.com/InfinityPacer/MoviePilot-Plugins"
|
||||
PLUGIN_MARKET: str = ("https://github.com/jxxghp/MoviePilot-Plugins,"
|
||||
"https://github.com/thsrite/MoviePilot-Plugins,"
|
||||
"https://github.com/honue/MoviePilot-Plugins,"
|
||||
"https://github.com/InfinityPacer/MoviePilot-Plugins")
|
||||
# 插件安装数据共享
|
||||
PLUGIN_STATISTIC_SHARE: bool = True
|
||||
# 是否开启插件热加载
|
||||
@@ -206,11 +219,27 @@ class ConfigModel(BaseModel):
|
||||
BIG_MEMORY_MODE: bool = False
|
||||
# 全局图片缓存,将媒体图片缓存到本地
|
||||
GLOBAL_IMAGE_CACHE: bool = False
|
||||
# 是否启用编码探测的性能模式
|
||||
ENCODING_DETECTION_PERFORMANCE_MODE: bool = True
|
||||
# 编码探测的最低置信度阈值
|
||||
ENCODING_DETECTION_MIN_CONFIDENCE: float = 0.8
|
||||
# 允许的图片缓存域名
|
||||
SECURITY_IMAGE_DOMAINS: List[str] = ["image.tmdb.org", "static-mdb.v.geilijiasu.com", "doubanio.com", "lain.bgm.tv",
|
||||
"raw.githubusercontent.com", "github.com"]
|
||||
SECURITY_IMAGE_DOMAINS: List[str] = Field(
|
||||
default_factory=lambda: ["image.tmdb.org",
|
||||
"static-mdb.v.geilijiasu.com",
|
||||
"doubanio.com",
|
||||
"lain.bgm.tv",
|
||||
"raw.githubusercontent.com",
|
||||
"github.com"]
|
||||
)
|
||||
# 允许的图片文件后缀格式
|
||||
SECURITY_IMAGE_SUFFIXES: List[str] = [".jpg", ".jpeg", ".png", ".webp", ".gif", ".svg"]
|
||||
SECURITY_IMAGE_SUFFIXES: List[str] = Field(
|
||||
default_factory=lambda: [".jpg", ".jpeg", ".png", ".webp", ".gif", ".svg"]
|
||||
)
|
||||
# 重命名时支持的S0别名
|
||||
RENAME_FORMAT_S0_NAMES: List[str] = Field(
|
||||
default_factory=lambda: ["Specials", "SPs"]
|
||||
)
|
||||
|
||||
|
||||
class Settings(BaseSettings, ConfigModel):
|
||||
@@ -345,10 +374,9 @@ class Settings(BaseSettings, ConfigModel):
|
||||
logger.warning(message)
|
||||
|
||||
if field.name in os.environ:
|
||||
if is_converted:
|
||||
message = f"配置项 '{field.name}' 已在环境变量中设置,请手动更新以保持一致性"
|
||||
logger.warning(message)
|
||||
return False, message
|
||||
message = f"配置项 '{field.name}' 已在环境变量中设置,请手动更新以保持一致性"
|
||||
logger.warning(message)
|
||||
return False, message
|
||||
else:
|
||||
set_key(SystemUtils.get_env_path(), field.name, str(converted_value) if converted_value is not None else "")
|
||||
if is_converted:
|
||||
@@ -372,7 +400,7 @@ class Settings(BaseSettings, ConfigModel):
|
||||
field.default, key)
|
||||
# 如果没有抛出异常,则统一使用 converted_value 进行更新
|
||||
if needs_update or str(value) != str(converted_value):
|
||||
success, message = self.update_env_config(field, original_value, converted_value)
|
||||
success, message = self.update_env_config(field, value, converted_value)
|
||||
# 仅成功更新配置时,才更新内存
|
||||
if success:
|
||||
setattr(self, key, converted_value)
|
||||
@@ -437,22 +465,32 @@ class Settings(BaseSettings, ConfigModel):
|
||||
|
||||
@property
|
||||
def CACHE_CONF(self):
|
||||
"""
|
||||
{
|
||||
"torrents": "缓存种子数量",
|
||||
"refresh": "订阅刷新处理数量",
|
||||
"tmdb": "TMDB请求缓存数量",
|
||||
"douban": "豆瓣请求缓存数量",
|
||||
"fanart": "Fanart请求缓存数量",
|
||||
"meta": "元数据缓存过期时间(秒)"
|
||||
}
|
||||
"""
|
||||
if self.BIG_MEMORY_MODE:
|
||||
return {
|
||||
"torrents": 200,
|
||||
"refresh": 100,
|
||||
"tmdb": 1024,
|
||||
"refresh": 50,
|
||||
"torrents": 100,
|
||||
"douban": 512,
|
||||
"fanart": 512,
|
||||
"meta": (self.META_CACHE_EXPIRE or 168) * 3600
|
||||
"meta": (self.META_CACHE_EXPIRE or 24) * 3600
|
||||
}
|
||||
return {
|
||||
"torrents": 100,
|
||||
"refresh": 50,
|
||||
"tmdb": 256,
|
||||
"refresh": 30,
|
||||
"torrents": 50,
|
||||
"douban": 256,
|
||||
"fanart": 128,
|
||||
"meta": (self.META_CACHE_EXPIRE or 72) * 3600
|
||||
"meta": (self.META_CACHE_EXPIRE or 2) * 3600
|
||||
}
|
||||
|
||||
@property
|
||||
|
||||
@@ -23,6 +23,8 @@ class TorrentInfo:
|
||||
site_proxy: bool = False
|
||||
# 站点优先级
|
||||
site_order: int = 0
|
||||
# 站点下载器
|
||||
site_downloader: str = None
|
||||
# 种子名称
|
||||
title: str = None
|
||||
# 种子副标题
|
||||
|
||||
@@ -233,23 +233,29 @@ class EventManager(metaclass=Singleton):
|
||||
可视化所有事件处理器,包括是否被禁用的状态
|
||||
:return: 处理器列表,包含事件类型、处理器标识符、优先级(如果有)和状态
|
||||
"""
|
||||
|
||||
def parse_handler_data(data):
|
||||
"""
|
||||
解析处理器数据,判断是否包含优先级
|
||||
:param data: 订阅者数据,可能是元组或单一值
|
||||
:return: (priority, handler),若没有优先级则返回 (None, handler)
|
||||
"""
|
||||
if isinstance(data, tuple) and len(data) == 2:
|
||||
return data
|
||||
return None, data
|
||||
|
||||
handler_info = []
|
||||
# 统一处理广播事件和链式事件
|
||||
for event_type, subscribers in {**self.__broadcast_subscribers, **self.__chain_subscribers}.items():
|
||||
for handler_data in subscribers:
|
||||
if isinstance(subscribers, dict):
|
||||
priority, handler = handler_data
|
||||
else:
|
||||
priority = None
|
||||
handler = handler_data
|
||||
# 获取处理器的唯一标识符
|
||||
handler_id = self.__get_handler_identifier(handler)
|
||||
for handler_identifier, handler_data in subscribers.items():
|
||||
# 解析优先级和处理器
|
||||
priority, handler = parse_handler_data(handler_data)
|
||||
# 检查处理器的启用状态
|
||||
status = "enabled" if self.__is_handler_enabled(handler) else "disabled"
|
||||
# 构建处理器信息字典
|
||||
handler_dict = {
|
||||
"event_type": event_type.value,
|
||||
"handler_identifier": handler_id,
|
||||
"handler_identifier": handler_identifier,
|
||||
"status": status
|
||||
}
|
||||
if priority is not None:
|
||||
@@ -341,8 +347,17 @@ class EventManager(metaclass=Singleton):
|
||||
if not handlers:
|
||||
logger.debug(f"No handlers found for chain event: {event}")
|
||||
return False
|
||||
|
||||
# 过滤出启用的处理器
|
||||
enabled_handlers = {handler_id: (priority, handler) for handler_id, (priority, handler) in handlers.items()
|
||||
if self.__is_handler_enabled(handler)}
|
||||
|
||||
if not enabled_handlers:
|
||||
logger.debug(f"No enabled handlers found for chain event: {event}. Skipping execution.")
|
||||
return False
|
||||
|
||||
self.__log_event_lifecycle(event, "Started")
|
||||
for handler_id, (priority, handler) in handlers.items():
|
||||
for handler_id, (priority, handler) in enabled_handlers.items():
|
||||
start_time = time.time()
|
||||
self.__safe_invoke_handler(handler, event)
|
||||
logger.debug(
|
||||
@@ -499,11 +514,18 @@ class EventManager(metaclass=Singleton):
|
||||
def decorator(f: Callable):
|
||||
# 将输入的事件类型统一转换为列表格式
|
||||
if isinstance(etype, list):
|
||||
event_list = etype # 传入的已经是列表,直接使用
|
||||
# 传入的已经是列表,直接使用
|
||||
event_list = etype
|
||||
elif etype is EventType:
|
||||
# 订阅所有事件
|
||||
event_list = []
|
||||
for et in etype:
|
||||
event_list.append(et)
|
||||
else:
|
||||
event_list = [etype] # 不是列表则包裹成单一元素的列表
|
||||
# 不是列表则包裹成单一元素的列表
|
||||
event_list = [etype]
|
||||
|
||||
# 遍历列表,处理每个事件类型
|
||||
# 遍历列表,处理每个事件类型
|
||||
for event in event_list:
|
||||
if isinstance(event, (EventType, ChainEventType)):
|
||||
self.add_event_listener(event, f)
|
||||
|
||||
@@ -30,8 +30,8 @@ class MetaVideo(MetaBase):
|
||||
_episode_re = r"EP?(\d{2,4})$|^EP?(\d{1,4})$|^S\d{1,2}EP?(\d{1,4})$|S\d{2}EP?(\d{2,4})"
|
||||
_part_re = r"(^PART[0-9ABI]{0,2}$|^CD[0-9]{0,2}$|^DVD[0-9]{0,2}$|^DISK[0-9]{0,2}$|^DISC[0-9]{0,2}$)"
|
||||
_roman_numerals = r"^(?=[MDCLXVI])M*(C[MD]|D?C{0,3})(X[CL]|L?X{0,3})(I[XV]|V?I{0,3})$"
|
||||
_source_re = r"^BLURAY$|^HDTV$|^UHDTV$|^HDDVD$|^WEBRIP$|^DVDRIP$|^BDRIP$|^BLU$|^WEB$|^BD$|^HDRip$|^REMUX$|^UHD$|^REPACK$"
|
||||
_effect_re = r"^SDR$|^HDR\d*$|^DOLBY$|^DOVI$|^DV$|^3D$"
|
||||
_source_re = r"^BLURAY$|^HDTV$|^UHDTV$|^HDDVD$|^WEBRIP$|^DVDRIP$|^BDRIP$|^BLU$|^WEB$|^BD$|^HDRip$|^REMUX$|^UHD$"
|
||||
_effect_re = r"^SDR$|^HDR\d*$|^DOLBY$|^DOVI$|^DV$|^3D$|^REPACK$"
|
||||
_resources_type_re = r"%s|%s" % (_source_re, _effect_re)
|
||||
_name_no_begin_re = r"^[\[【].+?[\]】]"
|
||||
_name_no_chinese_re = r".*版|.*字幕"
|
||||
@@ -524,16 +524,7 @@ class MetaVideo(MetaBase):
|
||||
"""
|
||||
if not self.name:
|
||||
return
|
||||
source_res = re.search(r"(%s)" % self._source_re, token, re.IGNORECASE)
|
||||
if source_res:
|
||||
self._last_token_type = "source"
|
||||
self._continue_flag = False
|
||||
self._stop_name_flag = True
|
||||
if not self._source:
|
||||
self._source = source_res.group(1)
|
||||
self._last_token = self._source.upper()
|
||||
return
|
||||
elif token.upper() == "DL" \
|
||||
if token.upper() == "DL" \
|
||||
and self._last_token_type == "source" \
|
||||
and self._last_token == "WEB":
|
||||
self._source = "WEB-DL"
|
||||
@@ -542,13 +533,37 @@ class MetaVideo(MetaBase):
|
||||
elif token.upper() == "RAY" \
|
||||
and self._last_token_type == "source" \
|
||||
and self._last_token == "BLU":
|
||||
self._source = "BluRay"
|
||||
# UHD BluRay组合
|
||||
if self._source == "UHD":
|
||||
self._source = "UHD BluRay"
|
||||
else:
|
||||
self._source = "BluRay"
|
||||
self._continue_flag = False
|
||||
return
|
||||
elif token.upper() == "WEBDL":
|
||||
self._source = "WEB-DL"
|
||||
self._continue_flag = False
|
||||
return
|
||||
# UHD REMUX组合
|
||||
if token.upper() == "REMUX" \
|
||||
and self._source == "BluRay":
|
||||
self._source = "BluRay REMUX"
|
||||
self._continue_flag = False
|
||||
return
|
||||
elif token.upper() == "BLURAY" \
|
||||
and self._source == "UHD":
|
||||
self._source = "UHD BluRay"
|
||||
self._continue_flag = False
|
||||
return
|
||||
source_res = re.search(r"(%s)" % self._source_re, token, re.IGNORECASE)
|
||||
if source_res:
|
||||
self._last_token_type = "source"
|
||||
self._continue_flag = False
|
||||
self._stop_name_flag = True
|
||||
if not self._source:
|
||||
self._source = source_res.group(1)
|
||||
self._last_token = self._source.upper()
|
||||
return
|
||||
effect_res = re.search(r"(%s)" % self._effect_re, token, re.IGNORECASE)
|
||||
if effect_res:
|
||||
self._last_token_type = "effect"
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
import traceback
|
||||
from typing import Generator, Optional, Tuple, Any
|
||||
from typing import Generator, Optional, Tuple, Any, Union
|
||||
|
||||
from app.core.config import settings
|
||||
from app.core.event import eventmanager
|
||||
from app.helper.module import ModuleHelper
|
||||
from app.log import logger
|
||||
from app.schemas.types import EventType, ModuleType
|
||||
from app.schemas.types import EventType, ModuleType, DownloaderType, MediaServerType, MessageChannel, StorageSchema, \
|
||||
OtherModulesType
|
||||
from app.utils.object import ObjectUtils
|
||||
from app.utils.singleton import Singleton
|
||||
|
||||
@@ -19,6 +20,8 @@ class ModuleManager(metaclass=Singleton):
|
||||
_modules: dict = {}
|
||||
# 运行态模块列表
|
||||
_running_modules: dict = {}
|
||||
# 子模块类型集合
|
||||
SubType = Union[DownloaderType, MediaServerType, MessageChannel, StorageSchema, OtherModulesType]
|
||||
|
||||
def __init__(self):
|
||||
self.load_modules()
|
||||
@@ -135,6 +138,17 @@ class ModuleManager(metaclass=Singleton):
|
||||
and module.get_type() == module_type:
|
||||
yield module
|
||||
|
||||
def get_running_subtype_module(self, module_subtype: SubType) -> Generator:
|
||||
"""
|
||||
获取指定子类型的模块
|
||||
"""
|
||||
if not self._running_modules:
|
||||
return []
|
||||
for _, module in self._running_modules.items():
|
||||
if hasattr(module, 'get_subtype') \
|
||||
and module.get_subtype() == module_subtype:
|
||||
yield module
|
||||
|
||||
def get_module(self, module_id: str) -> Any:
|
||||
"""
|
||||
根据模块id获取模块
|
||||
|
||||
@@ -526,7 +526,8 @@ class PluginManager(metaclass=Singleton):
|
||||
"name": "服务名称",
|
||||
"trigger": "触发器:cron、interval、date、CronTrigger.from_crontab()",
|
||||
"func": self.xxx,
|
||||
"kwargs": {} # 定时器参数
|
||||
"kwargs": {} # 定时器参数,
|
||||
"func_kwargs": {} # 方法参数
|
||||
}]
|
||||
"""
|
||||
ret_services = []
|
||||
|
||||
@@ -53,7 +53,9 @@ class DownloadHistory(Base):
|
||||
@staticmethod
|
||||
@db_query
|
||||
def get_by_hash(db: Session, download_hash: str):
|
||||
return db.query(DownloadHistory).filter(DownloadHistory.download_hash == download_hash).first()
|
||||
return db.query(DownloadHistory).filter(DownloadHistory.download_hash == download_hash).order_by(
|
||||
DownloadHistory.date.desc()
|
||||
).first()
|
||||
|
||||
@staticmethod
|
||||
@db_query
|
||||
|
||||
@@ -46,11 +46,13 @@ class Site(Base):
|
||||
# 流控间隔
|
||||
limit_seconds = Column(Integer, default=0)
|
||||
# 超时时间
|
||||
timeout = Column(Integer, default=0)
|
||||
timeout = Column(Integer, default=15)
|
||||
# 是否启用
|
||||
is_active = Column(Boolean(), default=True)
|
||||
# 创建时间
|
||||
lst_mod_date = Column(String, default=datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
|
||||
# 下载器
|
||||
downloader = Column(String)
|
||||
|
||||
@staticmethod
|
||||
@db_query
|
||||
|
||||
@@ -81,7 +81,7 @@ class SiteUserData(Base):
|
||||
func.max(SiteUserData.updated_day).label('latest_update_day')
|
||||
)
|
||||
.group_by(SiteUserData.domain)
|
||||
.filter(SiteUserData.err_msg == None)
|
||||
.filter(SiteUserData.err_msg is None)
|
||||
.subquery()
|
||||
)
|
||||
|
||||
|
||||
@@ -54,7 +54,7 @@ class Subscribe(Base):
|
||||
lack_episode = Column(Integer)
|
||||
# 附加信息
|
||||
note = Column(JSON)
|
||||
# 状态:N-新建, R-订阅中
|
||||
# 状态:N-新建 R-订阅中 P-待定 S-暂停
|
||||
state = Column(String, nullable=False, index=True, default='N')
|
||||
# 最后更新时间
|
||||
last_update = Column(String)
|
||||
@@ -64,6 +64,8 @@ class Subscribe(Base):
|
||||
username = Column(String)
|
||||
# 订阅站点
|
||||
sites = Column(JSON, default=list)
|
||||
# 下载器
|
||||
downloader = Column(String)
|
||||
# 是否洗版
|
||||
best_version = Column(Integer, default=0)
|
||||
# 当前优先级
|
||||
@@ -96,7 +98,13 @@ class Subscribe(Base):
|
||||
@staticmethod
|
||||
@db_query
|
||||
def get_by_state(db: Session, state: str):
|
||||
result = db.query(Subscribe).filter(Subscribe.state == state).all()
|
||||
# 如果 state 为空或 None,返回所有订阅
|
||||
if not state:
|
||||
result = db.query(Subscribe).all()
|
||||
else:
|
||||
# 如果传入的状态不为空,拆分成多个状态
|
||||
states = state.split(',')
|
||||
result = db.query(Subscribe).filter(Subscribe.state.in_(states)).all()
|
||||
return list(result)
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -83,7 +83,8 @@ class SubscribeOper(DbOper):
|
||||
更新订阅
|
||||
"""
|
||||
subscribe = self.get(sid)
|
||||
subscribe.update(self._db, payload)
|
||||
if subscribe:
|
||||
subscribe.update(self._db, payload)
|
||||
return subscribe
|
||||
|
||||
def list_by_tmdbid(self, tmdbid: int, season: int = None) -> List[Subscribe]:
|
||||
|
||||
@@ -16,7 +16,7 @@ class PlaywrightHelper:
|
||||
"""
|
||||
sync_stealth(page, pure=True)
|
||||
page.goto(url)
|
||||
return sync_cf_retry(page)
|
||||
return sync_cf_retry(page)[0]
|
||||
|
||||
def action(self, url: str,
|
||||
callback: Callable,
|
||||
|
||||
@@ -4,7 +4,8 @@ from typing import List, Optional
|
||||
from app import schemas
|
||||
from app.core.context import MediaInfo
|
||||
from app.db.systemconfig_oper import SystemConfigOper
|
||||
from app.schemas.types import SystemConfigKey, MediaType
|
||||
from app.schemas.types import SystemConfigKey
|
||||
from app.utils.system import SystemUtils
|
||||
|
||||
|
||||
class DirectoryHelper:
|
||||
@@ -48,49 +49,62 @@ class DirectoryHelper:
|
||||
"""
|
||||
return [d for d in self.get_library_dirs() if d.library_storage == "local"]
|
||||
|
||||
def get_dir(self, media: MediaInfo, src_path: Path = None, dest_path: Path = None,
|
||||
fileitem: schemas.FileItem = None, local: bool = False) -> Optional[schemas.TransferDirectoryConf]:
|
||||
def get_dir(self, media: MediaInfo, include_unsorted: bool = False,
|
||||
storage: str = None, src_path: Path = None,
|
||||
target_storage: str = None, dest_path: Path = None
|
||||
) -> Optional[schemas.TransferDirectoryConf]:
|
||||
"""
|
||||
根据媒体信息获取下载目录、媒体库目录配置
|
||||
:param media: 媒体信息
|
||||
:param include_unsorted: 包含不整理目录
|
||||
:param storage: 源存储类型
|
||||
:param target_storage: 目标存储类型
|
||||
:param src_path: 源目录,有值时直接匹配
|
||||
:param dest_path: 目标目录,有值时直接匹配
|
||||
:param fileitem: 文件项,使用文件路径匹配
|
||||
:param local: 是否本地目录
|
||||
"""
|
||||
# 处理类型
|
||||
if media:
|
||||
media_type = media.type.value
|
||||
else:
|
||||
media_type = MediaType.UNKNOWN.value
|
||||
if not media:
|
||||
return None
|
||||
# 电影/电视剧
|
||||
media_type = media.type.value
|
||||
dirs = self.get_dirs()
|
||||
# 已匹配的目录
|
||||
matched_dirs: List[schemas.TransferDirectoryConf] = []
|
||||
# 按照配置顺序查找
|
||||
for d in dirs:
|
||||
# 下载目录
|
||||
download_path = Path(d.download_path)
|
||||
# 媒体库目录
|
||||
library_path = Path(d.library_path)
|
||||
# 下载目录不匹配, 不符合条件, 通常处理`下载`匹配
|
||||
if src_path and download_path != src_path:
|
||||
# 没有启用整理的目录
|
||||
if not d.monitor_type and not include_unsorted:
|
||||
continue
|
||||
# 媒体库目录不匹配, 或监控方式为None(即不自动整理), 不符合条件, 通常处理`整理`匹配
|
||||
if dest_path:
|
||||
if library_path != dest_path or not d.monitor_type:
|
||||
continue
|
||||
# 没有目录配置时起作用, 通常处理`手动整理`未选择`目标目录`的情况
|
||||
if fileitem and not Path(fileitem.path).is_relative_to(download_path):
|
||||
# 源存储类型不匹配
|
||||
if storage and d.storage != storage:
|
||||
continue
|
||||
# 本地目录
|
||||
if local and d.storage != "local":
|
||||
# 目标存储类型不匹配
|
||||
if target_storage and d.library_storage != target_storage:
|
||||
continue
|
||||
# 有源目录时,源目录不匹配下载目录
|
||||
if src_path and not src_path.is_relative_to(d.download_path):
|
||||
continue
|
||||
# 有目标目录时,目标目录不匹配媒体库目录
|
||||
if dest_path and dest_path != Path(d.library_path):
|
||||
continue
|
||||
# 目录类型为全部的,符合条件
|
||||
if not d.media_type:
|
||||
return d
|
||||
matched_dirs.append(d)
|
||||
continue
|
||||
# 目录类型相等,目录类别为全部,符合条件
|
||||
if d.media_type == media_type and not d.media_category:
|
||||
return d
|
||||
matched_dirs.append(d)
|
||||
continue
|
||||
# 目录类型相等,目录类别相等,符合条件
|
||||
if d.media_type == media_type and d.media_category == media.category:
|
||||
return d
|
||||
|
||||
matched_dirs.append(d)
|
||||
continue
|
||||
if matched_dirs:
|
||||
if src_path:
|
||||
# 优先源目录同盘
|
||||
for matched_dir in matched_dirs:
|
||||
matched_path = Path(matched_dir.download_path)
|
||||
if SystemUtils.is_same_disk(matched_path, src_path):
|
||||
return matched_dir
|
||||
return matched_dirs[0]
|
||||
return None
|
||||
|
||||
@@ -20,7 +20,15 @@ class FormatParser(object):
|
||||
self._format = eformat
|
||||
self._start_ep = None
|
||||
self._end_ep = None
|
||||
self.__offset = offset or "EP"
|
||||
if not offset:
|
||||
self.__offset = "EP"
|
||||
elif "EP" in offset:
|
||||
self.__offset = offset
|
||||
else:
|
||||
if offset.startswith("-") or offset.startswith("+"):
|
||||
self.__offset = f"EP{offset}"
|
||||
else:
|
||||
self.__offset = f"EP+{offset}"
|
||||
self._key = key
|
||||
self._part = None
|
||||
if part:
|
||||
@@ -91,7 +99,7 @@ class FormatParser(object):
|
||||
s, e = self.__handle_single(file_name)
|
||||
start_ep = self.__offset.replace("EP", str(s)) if s else None
|
||||
end_ep = self.__offset.replace("EP", str(e)) if e else None
|
||||
return int(eval(start_ep)) if start_ep else None, int(eval(end_ep)) if end_ep else None, self.part
|
||||
return int(eval(start_ep)) if start_ep else None, int(eval(end_ep)) if end_ep else None, self.part
|
||||
|
||||
def __handle_single(self, file: str) -> Tuple[Optional[int], Optional[int]]:
|
||||
"""
|
||||
|
||||
@@ -287,10 +287,10 @@ class TorrentHelper(metaclass=Singleton):
|
||||
if not file:
|
||||
continue
|
||||
file_path = Path(file)
|
||||
if file_path.suffix not in settings.RMT_MEDIAEXT:
|
||||
if not file_path.suffix or file_path.suffix.lower() not in settings.RMT_MEDIAEXT:
|
||||
continue
|
||||
# 只使用文件名识别
|
||||
meta = MetaInfo(file_path.stem)
|
||||
meta = MetaInfo(file_path.name)
|
||||
if not meta.begin_episode:
|
||||
continue
|
||||
episodes = list(set(episodes).union(set(meta.episode_list)))
|
||||
|
||||
@@ -10,7 +10,7 @@ from app.log import logger
|
||||
|
||||
class TwoFactorAuth:
|
||||
def __init__(self, code_or_secret: str):
|
||||
if code_or_secret and len(code_or_secret) > 16:
|
||||
if code_or_secret and len(code_or_secret) >= 16:
|
||||
self.code = None
|
||||
self.secret = code_or_secret
|
||||
else:
|
||||
|
||||
@@ -2,8 +2,9 @@ from abc import abstractmethod, ABCMeta
|
||||
from typing import Generic, Tuple, Union, TypeVar, Type, Dict, Optional, Callable
|
||||
|
||||
from app.helper.service import ServiceConfigHelper
|
||||
from app.schemas import Notification, MessageChannel, NotificationConf, MediaServerConf, DownloaderConf
|
||||
from app.schemas.types import ModuleType
|
||||
from app.schemas import Notification, NotificationConf, MediaServerConf, DownloaderConf
|
||||
from app.schemas.types import ModuleType, DownloaderType, MediaServerType, MessageChannel, StorageSchema, \
|
||||
OtherModulesType
|
||||
|
||||
|
||||
class _ModuleBase(metaclass=ABCMeta):
|
||||
@@ -43,6 +44,14 @@ class _ModuleBase(metaclass=ABCMeta):
|
||||
"""
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def get_subtype() -> Union[DownloaderType, MediaServerType, MessageChannel, StorageSchema, OtherModulesType]:
|
||||
"""
|
||||
获取模块子类型(下载器、媒体服务器、消息通道、存储类型、其他杂项模块类型)
|
||||
"""
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def get_priority() -> int:
|
||||
|
||||
@@ -7,7 +7,7 @@ from app.core.meta import MetaBase
|
||||
from app.log import logger
|
||||
from app.modules import _ModuleBase
|
||||
from app.modules.bangumi.bangumi import BangumiApi
|
||||
from app.schemas.types import ModuleType
|
||||
from app.schemas.types import ModuleType, MediaRecognizeType
|
||||
from app.utils.http import RequestUtils
|
||||
|
||||
|
||||
@@ -44,6 +44,13 @@ class BangumiModule(_ModuleBase):
|
||||
获取模块类型
|
||||
"""
|
||||
return ModuleType.MediaRecognize
|
||||
|
||||
@staticmethod
|
||||
def get_subtype() -> MediaRecognizeType:
|
||||
"""
|
||||
获取模块子类型
|
||||
"""
|
||||
return MediaRecognizeType.Bangumi
|
||||
|
||||
@staticmethod
|
||||
def get_priority() -> int:
|
||||
|
||||
@@ -15,7 +15,7 @@ from app.modules.douban.apiv2 import DoubanApi
|
||||
from app.modules.douban.douban_cache import DoubanCache
|
||||
from app.modules.douban.scraper import DoubanScraper
|
||||
from app.schemas import MediaPerson, APIRateLimitException
|
||||
from app.schemas.types import MediaType, ModuleType
|
||||
from app.schemas.types import MediaType, ModuleType, MediaRecognizeType
|
||||
from app.utils.common import retry
|
||||
from app.utils.http import RequestUtils
|
||||
from app.utils.limit import rate_limit_exponential
|
||||
@@ -59,6 +59,13 @@ class DoubanModule(_ModuleBase):
|
||||
"""
|
||||
return ModuleType.MediaRecognize
|
||||
|
||||
@staticmethod
|
||||
def get_subtype() -> MediaRecognizeType:
|
||||
"""
|
||||
获取模块子类型
|
||||
"""
|
||||
return MediaRecognizeType.Douban
|
||||
|
||||
@staticmethod
|
||||
def get_priority() -> int:
|
||||
"""
|
||||
|
||||
@@ -3,11 +3,11 @@ import base64
|
||||
import hashlib
|
||||
import hmac
|
||||
from datetime import datetime
|
||||
from functools import lru_cache
|
||||
from random import choice
|
||||
from urllib import parse
|
||||
|
||||
import requests
|
||||
from cachetools import TTLCache, cached
|
||||
|
||||
from app.core.config import settings
|
||||
from app.utils.http import RequestUtils
|
||||
@@ -160,12 +160,12 @@ class DoubanApi(metaclass=Singleton):
|
||||
self._session = requests.Session()
|
||||
|
||||
@classmethod
|
||||
def __sign(cls, url: str, ts: int, method='GET') -> str:
|
||||
def __sign(cls, url: str, ts: str, method='GET') -> str:
|
||||
"""
|
||||
签名
|
||||
"""
|
||||
url_path = parse.urlparse(url).path
|
||||
raw_sign = '&'.join([method.upper(), parse.quote(url_path, safe=''), str(ts)])
|
||||
raw_sign = '&'.join([method.upper(), parse.quote(url_path, safe=''), ts])
|
||||
return base64.b64encode(
|
||||
hmac.new(
|
||||
cls._api_secret_key.encode(),
|
||||
@@ -174,7 +174,7 @@ class DoubanApi(metaclass=Singleton):
|
||||
).digest()
|
||||
).decode()
|
||||
|
||||
@lru_cache(maxsize=settings.CACHE_CONF.get('douban'))
|
||||
@cached(cache=TTLCache(maxsize=settings.CACHE_CONF["douban"], ttl=settings.CACHE_CONF["meta"]))
|
||||
def __invoke(self, url: str, **kwargs) -> dict:
|
||||
"""
|
||||
GET请求
|
||||
@@ -203,7 +203,7 @@ class DoubanApi(metaclass=Singleton):
|
||||
return resp.json()
|
||||
return resp.json() if resp else {}
|
||||
|
||||
@lru_cache(maxsize=settings.CACHE_CONF.get('douban'))
|
||||
@cached(cache=TTLCache(maxsize=settings.CACHE_CONF["douban"], ttl=settings.CACHE_CONF["meta"]))
|
||||
def __post(self, url: str, **kwargs) -> dict:
|
||||
"""
|
||||
POST请求
|
||||
|
||||
@@ -16,7 +16,7 @@ from app.schemas.types import MediaType
|
||||
lock = RLock()
|
||||
|
||||
CACHE_EXPIRE_TIMESTAMP_STR = "cache_expire_timestamp"
|
||||
EXPIRE_TIMESTAMP = settings.CACHE_CONF.get('meta')
|
||||
EXPIRE_TIMESTAMP = settings.CACHE_CONF["meta"]
|
||||
|
||||
|
||||
class DoubanCache(metaclass=Singleton):
|
||||
@@ -77,7 +77,7 @@ class DoubanCache(metaclass=Singleton):
|
||||
@return: 被删除的缓存内容
|
||||
"""
|
||||
with lock:
|
||||
return self._meta_data.pop(key, None)
|
||||
return self._meta_data.pop(key, {})
|
||||
|
||||
def delete_by_doubanid(self, doubanid: str) -> None:
|
||||
"""
|
||||
|
||||
@@ -7,7 +7,7 @@ from app.log import logger
|
||||
from app.modules import _MediaServerBase, _ModuleBase
|
||||
from app.modules.emby.emby import Emby
|
||||
from app.schemas.event import AuthCredentials, AuthInterceptCredentials
|
||||
from app.schemas.types import MediaType, ModuleType, ChainEventType
|
||||
from app.schemas.types import MediaType, ModuleType, ChainEventType, MediaServerType
|
||||
|
||||
|
||||
class EmbyModule(_ModuleBase, _MediaServerBase[Emby]):
|
||||
@@ -30,6 +30,13 @@ class EmbyModule(_ModuleBase, _MediaServerBase[Emby]):
|
||||
"""
|
||||
return ModuleType.MediaServer
|
||||
|
||||
@staticmethod
|
||||
def get_subtype() -> MediaServerType:
|
||||
"""
|
||||
获取模块子类型
|
||||
"""
|
||||
return MediaServerType.Emby
|
||||
|
||||
@staticmethod
|
||||
def get_priority() -> int:
|
||||
"""
|
||||
@@ -66,16 +73,26 @@ class EmbyModule(_ModuleBase, _MediaServerBase[Emby]):
|
||||
logger.info(f"Emby服务器 {name} 连接断开,尝试重连 ...")
|
||||
server.reconnect()
|
||||
|
||||
def user_authenticate(self, credentials: AuthCredentials) -> Optional[AuthCredentials]:
|
||||
def user_authenticate(self, credentials: AuthCredentials, service_name: Optional[str] = None) \
|
||||
-> Optional[AuthCredentials]:
|
||||
"""
|
||||
使用Emby用户辅助完成用户认证
|
||||
:param credentials: 认证数据
|
||||
:param service_name: 指定要认证的媒体服务器名称,若为 None 则认证所有服务
|
||||
:return: 认证数据
|
||||
"""
|
||||
# Emby认证
|
||||
if not credentials or credentials.grant_type != "password":
|
||||
return None
|
||||
for name, server in self.get_instances().items():
|
||||
# 确定要认证的服务器列表
|
||||
if service_name:
|
||||
# 如果指定了服务名,获取该服务实例
|
||||
servers = [(service_name, server)] if (server := self.get_instance(service_name)) else []
|
||||
else:
|
||||
# 如果没有指定服务名,遍历所有服务
|
||||
servers = self.get_instances().items()
|
||||
# 遍历要认证的服务器
|
||||
for name, server in servers:
|
||||
# 触发认证拦截事件
|
||||
intercept_event = eventmanager.send_event(
|
||||
etype=ChainEventType.AuthIntercept,
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
import re
|
||||
from functools import lru_cache
|
||||
from typing import Optional, Tuple, Union
|
||||
|
||||
from cachetools import TTLCache, cached
|
||||
|
||||
from app.core.context import MediaInfo, settings
|
||||
from app.log import logger
|
||||
from app.modules import _ModuleBase
|
||||
from app.schemas.types import MediaType, ModuleType, OtherModulesType
|
||||
from app.utils.http import RequestUtils
|
||||
from app.schemas.types import MediaType, ModuleType
|
||||
|
||||
|
||||
class FanartModule(_ModuleBase):
|
||||
@@ -342,6 +343,13 @@ class FanartModule(_ModuleBase):
|
||||
"""
|
||||
return ModuleType.Other
|
||||
|
||||
@staticmethod
|
||||
def get_subtype() -> OtherModulesType:
|
||||
"""
|
||||
获取模块子类型
|
||||
"""
|
||||
return OtherModulesType.Fanart
|
||||
|
||||
@staticmethod
|
||||
def get_priority() -> int:
|
||||
"""
|
||||
@@ -376,20 +384,30 @@ class FanartModule(_ModuleBase):
|
||||
continue
|
||||
if not isinstance(images, list):
|
||||
continue
|
||||
# 按欢迎程度倒排
|
||||
images.sort(key=lambda x: int(x.get('likes', 0)), reverse=True)
|
||||
# 取第一张图片
|
||||
image_obj = images[0]
|
||||
|
||||
# 图片属性xx_path
|
||||
image_name = self.__name(name)
|
||||
image_season = image_obj.get('season')
|
||||
# 设置图片
|
||||
if image_name.startswith("season") and image_season:
|
||||
# 季图片格式 seasonxx-poster
|
||||
image_name = f"season{str(image_season).rjust(2, '0')}-{image_name[6:]}"
|
||||
if not mediainfo.get_image(image_name):
|
||||
# 没有图片才设置
|
||||
mediainfo.set_image(image_name, image_obj.get('url'))
|
||||
if image_name.startswith("season"):
|
||||
# 季图片,图片格式seasonxx-xxxx/season-specials-xxxx
|
||||
for image_obj in images:
|
||||
image_season = image_obj.get('season')
|
||||
if image_season is not None:
|
||||
# 包括poster,thumb,banner
|
||||
if image_season == '0':
|
||||
season_image = f"season-specials-{image_name[6:]}"
|
||||
else:
|
||||
season_image = f"season{str(image_season).rjust(2, '0')}-{image_name[6:]}"
|
||||
# 设置图片,没有图片才设置
|
||||
if not mediainfo.get_image(season_image):
|
||||
mediainfo.set_image(season_image, image_obj.get('url'))
|
||||
else:
|
||||
# 其他图片,按欢迎程度倒排
|
||||
images.sort(key=lambda x: int(x.get('likes', 0)), reverse=True)
|
||||
# 取第一张图片
|
||||
image_obj = images[0]
|
||||
# 设置图片,没有图片才设置
|
||||
if not mediainfo.get_image(image_name):
|
||||
mediainfo.set_image(image_name, image_obj.get('url'))
|
||||
|
||||
return mediainfo
|
||||
|
||||
@@ -404,7 +422,7 @@ class FanartModule(_ModuleBase):
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
@lru_cache(maxsize=settings.CACHE_CONF.get('fanart'))
|
||||
@cached(cache=TTLCache(maxsize=settings.CACHE_CONF["fanart"], ttl=settings.CACHE_CONF["meta"]))
|
||||
def __request_fanart(cls, media_type: MediaType, queryid: Union[str, int]) -> Optional[dict]:
|
||||
if media_type == MediaType.MOVIE:
|
||||
image_url = cls._movie_url % queryid
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import copy
|
||||
import re
|
||||
from pathlib import Path
|
||||
from threading import Lock
|
||||
@@ -8,6 +7,7 @@ from jinja2 import Template
|
||||
|
||||
from app.core.config import settings
|
||||
from app.core.context import MediaInfo
|
||||
from app.core.event import eventmanager
|
||||
from app.core.meta import MetaBase
|
||||
from app.core.metainfo import MetaInfo, MetaInfoPath
|
||||
from app.helper.directory import DirectoryHelper
|
||||
@@ -17,7 +17,8 @@ from app.log import logger
|
||||
from app.modules import _ModuleBase
|
||||
from app.modules.filemanager.storages import StorageBase
|
||||
from app.schemas import TransferInfo, ExistMediaInfo, TmdbEpisode, TransferDirectoryConf, FileItem, StorageUsage
|
||||
from app.schemas.types import MediaType, ModuleType
|
||||
from app.schemas.event import TransferRenameEventData
|
||||
from app.schemas.types import MediaType, ModuleType, ChainEventType, OtherModulesType
|
||||
from app.utils.system import SystemUtils
|
||||
|
||||
lock = Lock()
|
||||
@@ -51,6 +52,13 @@ class FileManagerModule(_ModuleBase):
|
||||
"""
|
||||
return ModuleType.Other
|
||||
|
||||
@staticmethod
|
||||
def get_subtype() -> OtherModulesType:
|
||||
"""
|
||||
获取模块子类型
|
||||
"""
|
||||
return OtherModulesType.FileManager
|
||||
|
||||
@staticmethod
|
||||
def get_priority() -> int:
|
||||
"""
|
||||
@@ -65,9 +73,8 @@ class FileManagerModule(_ModuleBase):
|
||||
"""
|
||||
测试模块连接性
|
||||
"""
|
||||
directoryhelper = DirectoryHelper()
|
||||
# 检查目录
|
||||
dirs = directoryhelper.get_dirs()
|
||||
dirs = self.directoryhelper.get_dirs()
|
||||
if not dirs:
|
||||
return False, "未设置任何目录"
|
||||
for d in dirs:
|
||||
@@ -132,8 +139,6 @@ class FileManagerModule(_ModuleBase):
|
||||
)
|
||||
return str(path)
|
||||
|
||||
pass
|
||||
|
||||
def save_config(self, storage: str, conf: Dict) -> None:
|
||||
"""
|
||||
保存存储配置
|
||||
@@ -144,7 +149,7 @@ class FileManagerModule(_ModuleBase):
|
||||
return
|
||||
storage_oper.set_config(conf)
|
||||
|
||||
def generate_qrcode(self, storage: str) -> Optional[Dict[str, str]]:
|
||||
def generate_qrcode(self, storage: str) -> Optional[Tuple[dict, str]]:
|
||||
"""
|
||||
生成二维码
|
||||
"""
|
||||
@@ -220,7 +225,8 @@ class FileManagerModule(_ModuleBase):
|
||||
and f".{t.extension.lower()}" in extensions):
|
||||
return True
|
||||
elif t.type == "dir":
|
||||
return __any_file(t)
|
||||
if __any_file(t):
|
||||
return True
|
||||
return False
|
||||
|
||||
# 返回结果
|
||||
@@ -269,7 +275,7 @@ class FileManagerModule(_ModuleBase):
|
||||
return None
|
||||
return storage_oper.download(fileitem, path=path)
|
||||
|
||||
def upload_file(self, fileitem: FileItem, path: Path) -> Optional[FileItem]:
|
||||
def upload_file(self, fileitem: FileItem, path: Path, new_name: str = None) -> Optional[FileItem]:
|
||||
"""
|
||||
上传文件
|
||||
"""
|
||||
@@ -277,7 +283,7 @@ class FileManagerModule(_ModuleBase):
|
||||
if not storage_oper:
|
||||
logger.error(f"不支持 {fileitem.storage} 的上传处理")
|
||||
return None
|
||||
return storage_oper.upload(fileitem, path)
|
||||
return storage_oper.upload(fileitem, path, new_name)
|
||||
|
||||
def get_file_item(self, storage: str, path: Path) -> Optional[FileItem]:
|
||||
"""
|
||||
@@ -323,6 +329,7 @@ class FileManagerModule(_ModuleBase):
|
||||
target_directory: TransferDirectoryConf = None,
|
||||
target_storage: str = None, target_path: Path = None,
|
||||
transfer_type: str = None, scrape: bool = None,
|
||||
library_type_folder: bool = None, library_category_folder: bool = None,
|
||||
episodes_info: List[TmdbEpisode] = None) -> TransferInfo:
|
||||
"""
|
||||
文件整理
|
||||
@@ -334,6 +341,8 @@ class FileManagerModule(_ModuleBase):
|
||||
:param target_path: 目标路径
|
||||
:param transfer_type: 转移模式
|
||||
:param scrape: 是否刮削元数据
|
||||
:param library_type_folder: 是否按媒体类型创建目录
|
||||
:param library_category_folder: 是否按媒体类别创建目录
|
||||
:param episodes_info: 当前季的全部集信息
|
||||
:return: {path, target_path, message}
|
||||
"""
|
||||
@@ -349,41 +358,37 @@ class FileManagerModule(_ModuleBase):
|
||||
fileitem=fileitem,
|
||||
message=f"{target_path} 不是有效目录")
|
||||
# 获取目标路径
|
||||
directoryhelper = DirectoryHelper()
|
||||
if not target_directory:
|
||||
# 根据目的路径查找目录配置
|
||||
if target_path:
|
||||
target_directory = directoryhelper.get_dir(mediainfo, dest_path=target_path)
|
||||
else:
|
||||
target_directory = directoryhelper.get_dir(mediainfo, fileitem=fileitem)
|
||||
|
||||
if target_directory:
|
||||
# 拼装媒体库一、二级子目录
|
||||
target_path = self.__get_dest_dir(mediainfo=mediainfo, target_dir=target_directory)
|
||||
# 目标存储类型
|
||||
if not target_storage:
|
||||
target_storage = target_directory.library_storage
|
||||
# 整理方式
|
||||
if not transfer_type:
|
||||
transfer_type = target_directory.transfer_type
|
||||
# 是否需要刮削
|
||||
if scrape is None:
|
||||
need_scrape = target_directory.scraping
|
||||
else:
|
||||
need_scrape = scrape
|
||||
# 是否需要重命名
|
||||
need_rename = target_directory.renaming
|
||||
# 是否需要通知
|
||||
need_notify = target_directory.notify
|
||||
# 覆盖模式
|
||||
overwrite_mode = target_directory.overwrite_mode
|
||||
# 是否需要刮削
|
||||
if scrape is None:
|
||||
need_scrape = target_directory.scraping
|
||||
else:
|
||||
need_scrape = scrape
|
||||
# 目标存储类型
|
||||
if not target_storage:
|
||||
target_storage = target_directory.library_storage
|
||||
# 拼装媒体库一、二级子目录
|
||||
target_path = self.__get_dest_dir(mediainfo=mediainfo, target_dir=target_directory,
|
||||
need_type_folder=library_type_folder,
|
||||
need_category_folder=library_category_folder)
|
||||
elif target_path:
|
||||
# 自定义目标路径,仅适用于手动整理的场景
|
||||
need_scrape = scrape or False
|
||||
need_rename = True
|
||||
need_notify = False
|
||||
overwrite_mode = "never"
|
||||
logger.warn(f"{target_path} 为自定义路径, 通知将不会发送")
|
||||
# 手动整理的场景,有自定义目标路径
|
||||
target_path = self.__get_dest_path(mediainfo=mediainfo, target_path=target_path,
|
||||
need_type_folder=library_type_folder,
|
||||
need_category_folder=library_category_folder)
|
||||
else:
|
||||
# 未找到有效的媒体库目录
|
||||
logger.error(
|
||||
@@ -391,9 +396,14 @@ class FileManagerModule(_ModuleBase):
|
||||
return TransferInfo(success=False,
|
||||
fileitem=fileitem,
|
||||
message="未找到有效的媒体库目录")
|
||||
|
||||
logger.info(f"获取整理目标路径:【{target_storage}】{target_path}")
|
||||
# 整理方式
|
||||
if not transfer_type:
|
||||
logger.error(f"{target_directory.name} 未设置整理方式")
|
||||
return TransferInfo(success=False,
|
||||
fileitem=fileitem,
|
||||
message=f"{target_directory.name} 未设置整理方式")
|
||||
# 整理
|
||||
logger.info(f"获取整理目标路径:【{target_storage}】{target_path}")
|
||||
return self.transfer_media(fileitem=fileitem,
|
||||
in_meta=meta,
|
||||
mediainfo=mediainfo,
|
||||
@@ -467,13 +477,15 @@ class FileManagerModule(_ModuleBase):
|
||||
target_file.parent.mkdir(parents=True)
|
||||
# 本地到本地
|
||||
if transfer_type == "copy":
|
||||
state = source_oper.copy(fileitem, target_file)
|
||||
state = source_oper.copy(fileitem, target_file.parent, target_file.name)
|
||||
elif transfer_type == "move":
|
||||
state = source_oper.move(fileitem, target_file)
|
||||
state = source_oper.move(fileitem, target_file.parent, target_file.name)
|
||||
elif transfer_type == "link":
|
||||
state = source_oper.link(fileitem, target_file)
|
||||
elif transfer_type == "softlink":
|
||||
state = source_oper.softlink(fileitem, target_file)
|
||||
else:
|
||||
return None, f"不支持的整理方式:{transfer_type}"
|
||||
if state:
|
||||
return __get_targetitem(target_file), ""
|
||||
else:
|
||||
@@ -489,38 +501,28 @@ class FileManagerModule(_ModuleBase):
|
||||
target_fileitem = target_oper.get_folder(target_file.parent)
|
||||
if target_fileitem:
|
||||
# 上传文件
|
||||
new_item = target_oper.upload(target_fileitem, filepath)
|
||||
new_item = target_oper.upload(target_fileitem, filepath, target_file.name)
|
||||
if new_item:
|
||||
# 重命名为目标文件名
|
||||
if new_item.name != target_file.name:
|
||||
if target_oper.rename(new_item, target_file.name):
|
||||
new_item.name = target_file.name
|
||||
new_item.path = str(Path(new_item.path).parent / target_file.name)
|
||||
return new_item, ""
|
||||
else:
|
||||
return None, f"{fileitem.path} 上传 {target_storage} 失败"
|
||||
else:
|
||||
return None, f"{target_file.parent} {target_storage} 目录获取失败"
|
||||
return None, f"【{target_storage}】{target_file.parent} 目录获取失败"
|
||||
elif transfer_type == "move":
|
||||
# 移动
|
||||
# 根据目的路径获取文件夹
|
||||
target_fileitem = target_oper.get_folder(target_file.parent)
|
||||
if target_fileitem:
|
||||
# 上传文件
|
||||
new_item = target_oper.upload(target_fileitem, filepath)
|
||||
new_item = target_oper.upload(target_fileitem, filepath, target_file.name)
|
||||
if new_item:
|
||||
# 重命名为目标文件名
|
||||
if new_item.name != target_file.name:
|
||||
if target_oper.rename(new_item, target_file.name):
|
||||
new_item.name = target_file.name
|
||||
new_item.path = str(Path(new_item.path).parent / target_file.name)
|
||||
# 删除源文件
|
||||
source_oper.delete(fileitem)
|
||||
return new_item, ""
|
||||
else:
|
||||
return None, f"{fileitem.path} 上传 {target_storage} 失败"
|
||||
else:
|
||||
return None, f"{target_file.parent} {target_storage} 目录获取失败"
|
||||
return None, f"【{target_storage}】{target_file.parent} 目录获取失败"
|
||||
elif fileitem.storage != "local" and target_storage == "local":
|
||||
# 网盘到本地
|
||||
if target_file.exists():
|
||||
@@ -544,25 +546,28 @@ class FileManagerModule(_ModuleBase):
|
||||
return None, f"{fileitem.path} {fileitem.storage} 下载失败"
|
||||
elif fileitem.storage == target_storage:
|
||||
# 同一网盘
|
||||
# 根据目的路径获取文件夹
|
||||
target_diritem = target_oper.get_folder(target_file.parent)
|
||||
if target_diritem:
|
||||
# 重命名文件
|
||||
if target_oper.rename(fileitem, target_file.name):
|
||||
# 移动文件到新目录
|
||||
if source_oper.move(fileitem, target_diritem):
|
||||
ret_fileitem = copy.deepcopy(fileitem)
|
||||
ret_fileitem.path = target_diritem.path + "/" + target_file.name
|
||||
ret_fileitem.name = target_file.name
|
||||
ret_fileitem.basename = target_file.stem
|
||||
ret_fileitem.parent_fileid = target_diritem.fileid
|
||||
return ret_fileitem, ""
|
||||
if transfer_type == "copy":
|
||||
# 复制文件到新目录
|
||||
target_fileitem = target_oper.get_folder(target_file.parent)
|
||||
if target_fileitem:
|
||||
if source_oper.move(fileitem, Path(target_fileitem.path), target_file.name):
|
||||
return target_oper.get_item(target_file), ""
|
||||
else:
|
||||
return None, f"{fileitem.path} {target_storage} 移动文件失败"
|
||||
return None, f"【{target_storage}】{fileitem.path} 复制文件失败"
|
||||
else:
|
||||
return None, f"{fileitem.path} {target_storage} 重命名文件失败"
|
||||
return None, f"【{target_storage}】{target_file.parent} 目录获取失败"
|
||||
elif transfer_type == "move":
|
||||
# 移动文件到新目录
|
||||
target_fileitem = target_oper.get_folder(target_file.parent)
|
||||
if target_fileitem:
|
||||
if source_oper.move(fileitem, Path(target_fileitem.path), target_file.name):
|
||||
return target_oper.get_item(target_file), ""
|
||||
else:
|
||||
return None, f"【{target_storage}】{fileitem.path} 移动文件失败"
|
||||
else:
|
||||
return None, f"【{target_storage}】{target_file.parent} 目录获取失败"
|
||||
else:
|
||||
return None, f"{target_file.parent} {target_storage} 目录获取失败"
|
||||
return None, f"不支持的整理方式:{transfer_type}"
|
||||
|
||||
return None, "未知错误"
|
||||
|
||||
@@ -625,18 +630,16 @@ class FileManagerModule(_ModuleBase):
|
||||
if not parent_item:
|
||||
return False, f"{org_path} 上级目录获取失败"
|
||||
# 字幕文件列表
|
||||
file_list: List[FileItem] = storage_oper.list(parent_item)
|
||||
file_list: List[FileItem] = storage_oper.list(parent_item) or []
|
||||
file_list = [f for f in file_list if f.type == "file" and f.extension
|
||||
and f".{f.extension.lower()}" in settings.RMT_SUBEXT]
|
||||
if len(file_list) == 0:
|
||||
logger.debug(f"{parent_item.path} 目录下没有找到字幕文件...")
|
||||
logger.info(f"{parent_item.path} 目录下没有找到字幕文件...")
|
||||
else:
|
||||
logger.debug("字幕文件清单:" + str(file_list))
|
||||
logger.info(f"字幕文件清单:{[f.name for f in file_list]}")
|
||||
# 识别文件名
|
||||
metainfo = MetaInfoPath(org_path)
|
||||
for sub_item in file_list:
|
||||
if sub_item.type == "dir" or not sub_item.extension:
|
||||
continue
|
||||
if f".{sub_item.extension.lower()}" not in settings.RMT_SUBEXT:
|
||||
continue
|
||||
# 识别字幕文件名
|
||||
sub_file_name = re.sub(_zhtw_sub_re,
|
||||
".",
|
||||
@@ -699,7 +702,7 @@ class FileManagerModule(_ModuleBase):
|
||||
return False, errmsg
|
||||
except Exception as error:
|
||||
logger.info(f"字幕 {new_file} 出错了,原因: {str(error)}")
|
||||
return False, ""
|
||||
return True, ""
|
||||
|
||||
def __transfer_audio_track_files(self, fileitem: FileItem, target_storage: str, target_file: Path,
|
||||
transfer_type: str) -> Tuple[bool, str]:
|
||||
@@ -723,7 +726,7 @@ class FileManagerModule(_ModuleBase):
|
||||
file_list: List[FileItem] = storage_oper.list(parent_item)
|
||||
# 匹配音轨文件
|
||||
pending_file_list: List[FileItem] = [file for file in file_list
|
||||
if Path(file.name).stem == org_path.name
|
||||
if Path(file.name).stem == org_path.stem
|
||||
and file.type == "file" and file.extension
|
||||
and f".{file.extension.lower()}" in settings.RMT_AUDIOEXT]
|
||||
if len(pending_file_list) == 0:
|
||||
@@ -829,7 +832,8 @@ class FileManagerModule(_ModuleBase):
|
||||
else:
|
||||
logger.info(f"正在删除已存在的文件:{target_file}")
|
||||
target_file.unlink()
|
||||
logger.info(f"正在整理文件:【{fileitem.storage}】{fileitem.path} 到 【{target_storage}】{target_file}")
|
||||
logger.info(f"正在整理文件:【{fileitem.storage}】{fileitem.path} 到 【{target_storage}】{target_file},"
|
||||
f"操作类型:{transfer_type}")
|
||||
new_item, errmsg = self.__transfer_command(fileitem=fileitem,
|
||||
target_storage=target_storage,
|
||||
target_file=target_file,
|
||||
@@ -845,26 +849,43 @@ class FileManagerModule(_ModuleBase):
|
||||
return None, errmsg
|
||||
|
||||
@staticmethod
|
||||
def __get_dest_dir(mediainfo: MediaInfo, target_dir: TransferDirectoryConf) -> Path:
|
||||
def __get_dest_path(mediainfo: MediaInfo, target_path: Path,
|
||||
need_type_folder: bool = False, need_category_folder: bool = False):
|
||||
"""
|
||||
获取目标路径
|
||||
"""
|
||||
if need_type_folder:
|
||||
target_path = target_path / mediainfo.type.value
|
||||
if need_category_folder and mediainfo.category:
|
||||
target_path = target_path / mediainfo.category
|
||||
return target_path
|
||||
|
||||
@staticmethod
|
||||
def __get_dest_dir(mediainfo: MediaInfo, target_dir: TransferDirectoryConf,
|
||||
need_type_folder: bool = None, need_category_folder: bool = None) -> Path:
|
||||
"""
|
||||
根据设置并装媒体库目录
|
||||
:param mediainfo: 媒体信息
|
||||
:target_dir: 媒体库根目录
|
||||
:typename_dir: 是否加上类型目录
|
||||
:need_type_folder: 是否需要按媒体类型创建目录
|
||||
:need_category_folder: 是否需要按媒体类别创建目录
|
||||
"""
|
||||
if not target_dir.media_type and target_dir.library_type_folder:
|
||||
if need_type_folder is None:
|
||||
need_type_folder = target_dir.library_type_folder
|
||||
if need_category_folder is None:
|
||||
need_category_folder = target_dir.library_category_folder
|
||||
if not target_dir.media_type and need_type_folder:
|
||||
# 一级自动分类
|
||||
library_dir = Path(target_dir.library_path) / mediainfo.type.value
|
||||
elif target_dir.media_type and target_dir.library_type_folder:
|
||||
elif target_dir.media_type and need_type_folder:
|
||||
# 一级手动分类
|
||||
library_dir = Path(target_dir.library_path) / target_dir.media_type
|
||||
else:
|
||||
library_dir = Path(target_dir.library_path)
|
||||
|
||||
if not target_dir.media_category and target_dir.library_category_folder and mediainfo.category:
|
||||
if not target_dir.media_category and need_category_folder and mediainfo.category:
|
||||
# 二级自动分类
|
||||
library_dir = library_dir / mediainfo.category
|
||||
elif target_dir.media_category and target_dir.library_category_folder:
|
||||
elif target_dir.media_category and need_category_folder:
|
||||
# 二级手动分类
|
||||
library_dir = library_dir / target_dir.media_category
|
||||
|
||||
@@ -903,6 +924,18 @@ class FileManagerModule(_ModuleBase):
|
||||
rename_format = settings.TV_RENAME_FORMAT \
|
||||
if mediainfo.type == MediaType.TV else settings.MOVIE_RENAME_FORMAT
|
||||
|
||||
# 计算重命名中的文件夹层数
|
||||
rename_format_level = len(rename_format.split("/")) - 1
|
||||
|
||||
if rename_format_level < 1:
|
||||
# 重命名格式不合法
|
||||
logger.error(f"重命名格式不合法:{rename_format}")
|
||||
return TransferInfo(success=False,
|
||||
message=f"重命名格式不合法",
|
||||
fileitem=fileitem,
|
||||
transfer_type=transfer_type,
|
||||
need_notify=need_notify)
|
||||
|
||||
# 判断是否为文件夹
|
||||
if fileitem.type == "dir":
|
||||
# 整理整个目录,一般为蓝光原盘
|
||||
@@ -983,9 +1016,15 @@ class FileManagerModule(_ModuleBase):
|
||||
# 目的操作对象
|
||||
target_oper: StorageBase = self.__get_storage_oper(target_storage)
|
||||
# 目标目录
|
||||
target_diritem = target_oper.get_folder(
|
||||
new_file.parent) if mediainfo.type == MediaType.MOVIE else target_oper.get_folder(
|
||||
new_file.parent.parent)
|
||||
target_diritem = target_oper.get_folder(new_file.parents[rename_format_level - 1])
|
||||
if not target_diritem:
|
||||
logger.error(f"目标目录 {new_file.parents[rename_format_level - 1]} 获取失败")
|
||||
return TransferInfo(success=False,
|
||||
message=f"目标目录 {new_file.parents[rename_format_level - 1]} 获取失败",
|
||||
fileitem=fileitem,
|
||||
fail_list=[fileitem.path],
|
||||
transfer_type=transfer_type,
|
||||
need_notify=need_notify)
|
||||
# 目标文件
|
||||
target_item = target_oper.get_item(new_file)
|
||||
if target_item:
|
||||
@@ -1094,7 +1133,14 @@ class FileManagerModule(_ModuleBase):
|
||||
if episode.episode_number == meta.begin_episode:
|
||||
episode_title = episode.name
|
||||
break
|
||||
|
||||
# 获取集播出日期
|
||||
episode_date = None
|
||||
if meta.begin_episode and episodes_info:
|
||||
for episode in episodes_info:
|
||||
if episode.episode_number == meta.begin_episode:
|
||||
episode_date = episode.air_date
|
||||
break
|
||||
|
||||
return {
|
||||
# 标题
|
||||
"title": __convert_invalid_characters(mediainfo.title),
|
||||
@@ -1144,21 +1190,51 @@ class FileManagerModule(_ModuleBase):
|
||||
"part": meta.part,
|
||||
# 剧集标题
|
||||
"episode_title": __convert_invalid_characters(episode_title),
|
||||
# 剧集日期根据episodes_info值获取
|
||||
"episode_date": episode_date,
|
||||
# 文件后缀
|
||||
"fileExt": file_ext,
|
||||
# 自定义占位符
|
||||
"customization": meta.customization
|
||||
"customization": meta.customization,
|
||||
# 文件元数据
|
||||
"__meta__": meta,
|
||||
# 识别的媒体信息
|
||||
"__mediainfo__": mediainfo,
|
||||
# 当前季的全部集信息
|
||||
"__episodes_info__": episodes_info,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def get_rename_path(template_string: str, rename_dict: dict, path: Path = None) -> Path:
|
||||
"""
|
||||
生成重命名后的完整路径
|
||||
生成重命名后的完整路径,支持智能重命名事件
|
||||
:param template_string: Jinja2 模板字符串
|
||||
:param rename_dict: 渲染上下文,用于替换模板中的变量
|
||||
:param path: 可选的基础路径,如果提供,将在其基础上拼接生成的路径
|
||||
:return: 生成的完整路径
|
||||
"""
|
||||
# 创建jinja2模板对象
|
||||
template = Template(template_string)
|
||||
# 渲染生成的字符串
|
||||
render_str = template.render(rename_dict)
|
||||
|
||||
logger.debug(f"Initial render string: {render_str}")
|
||||
# 发送智能重命名事件
|
||||
event_data = TransferRenameEventData(
|
||||
template_string=template_string,
|
||||
rename_dict=rename_dict,
|
||||
render_str=render_str,
|
||||
path=path
|
||||
)
|
||||
event = eventmanager.send_event(ChainEventType.TransferRename, event_data)
|
||||
# 检查事件返回的结果
|
||||
if event and event.event_data:
|
||||
event_data: TransferRenameEventData = event.event_data
|
||||
if event_data.updated and event_data.updated_str:
|
||||
logger.debug(f"Render string updated by event: "
|
||||
f"{render_str} -> {event_data.updated_str} (source: {event_data.source})")
|
||||
render_str = event_data.updated_str
|
||||
|
||||
# 目的路径
|
||||
if path:
|
||||
return path / render_str
|
||||
@@ -1184,17 +1260,19 @@ class FileManagerModule(_ModuleBase):
|
||||
# 重命名格式
|
||||
rename_format = settings.TV_RENAME_FORMAT \
|
||||
if mediainfo.type == MediaType.TV else settings.MOVIE_RENAME_FORMAT
|
||||
# 获取相对路径(重命名路径)
|
||||
rel_path = self.get_rename_path(
|
||||
# 计算重命名中的文件夹层数
|
||||
rename_format_level = len(rename_format.split("/")) - 1
|
||||
if rename_format_level < 1:
|
||||
continue
|
||||
# 获取路径(重命名路径)
|
||||
target_path = self.get_rename_path(
|
||||
path=dir_path,
|
||||
template_string=rename_format,
|
||||
rename_dict=self.__get_naming_dict(meta=MetaInfo(mediainfo.title),
|
||||
mediainfo=mediainfo)
|
||||
)
|
||||
# 取相对路径的第1层目录
|
||||
if rel_path.parts:
|
||||
media_path = dir_path / rel_path.parts[0]
|
||||
else:
|
||||
continue
|
||||
media_path = target_path.parents[rename_format_level - 1]
|
||||
# 检索媒体文件
|
||||
fileitem = storage_oper.get_item(media_path)
|
||||
if not fileitem:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from pathlib import Path
|
||||
from typing import Optional, List, Union, Dict
|
||||
from typing import Optional, List, Union, Dict, Tuple
|
||||
|
||||
from app import schemas
|
||||
from app.helper.storage import StorageHelper
|
||||
@@ -16,7 +16,14 @@ class StorageBase(metaclass=ABCMeta):
|
||||
def __init__(self):
|
||||
self.storagehelper = StorageHelper()
|
||||
|
||||
def generate_qrcode(self, *args, **kwargs) -> Optional[Dict[str, str]]:
|
||||
@abstractmethod
|
||||
def init_storage(self):
|
||||
"""
|
||||
初始化
|
||||
"""
|
||||
pass
|
||||
|
||||
def generate_qrcode(self, *args, **kwargs) -> Optional[Tuple[dict, str]]:
|
||||
pass
|
||||
|
||||
def check_login(self, *args, **kwargs) -> Optional[Dict[str, str]]:
|
||||
@@ -28,11 +35,19 @@ class StorageBase(metaclass=ABCMeta):
|
||||
"""
|
||||
return self.storagehelper.get_storage(self.schema.value)
|
||||
|
||||
def get_conf(self) -> dict:
|
||||
"""
|
||||
获取配置
|
||||
"""
|
||||
conf = self.get_config()
|
||||
return conf.config if conf else {}
|
||||
|
||||
def set_config(self, conf: dict):
|
||||
"""
|
||||
设置配置
|
||||
"""
|
||||
self.storagehelper.set_storage(self.schema.value, conf)
|
||||
self.init_storage()
|
||||
|
||||
def support_transtype(self) -> dict:
|
||||
"""
|
||||
@@ -64,6 +79,8 @@ class StorageBase(metaclass=ABCMeta):
|
||||
def create_folder(self, fileitem: schemas.FileItem, name: str) -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
创建目录
|
||||
:param fileitem: 父目录
|
||||
:param name: 目录名
|
||||
"""
|
||||
pass
|
||||
|
||||
@@ -107,16 +124,16 @@ class StorageBase(metaclass=ABCMeta):
|
||||
下载文件,保存到本地,返回本地临时文件地址
|
||||
:param fileitem: 文件项
|
||||
:param path: 文件保存路径
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def upload(self, fileitem: schemas.FileItem, path: Path) -> Optional[schemas.FileItem]:
|
||||
def upload(self, fileitem: schemas.FileItem, path: Path, new_name: str = None) -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
上传文件
|
||||
:param fileitem: 上传目录项
|
||||
:param path: 本地文件路径
|
||||
:param new_name: 上传后文件名
|
||||
"""
|
||||
pass
|
||||
|
||||
@@ -128,16 +145,22 @@ class StorageBase(metaclass=ABCMeta):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def copy(self, fileitem: schemas.FileItem, target: Union[schemas.FileItem, Path]) -> bool:
|
||||
def copy(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
|
||||
"""
|
||||
复制文件
|
||||
:param fileitem: 文件项
|
||||
:param path: 目标目录
|
||||
:param new_name: 新文件名
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def move(self, fileitem: schemas.FileItem, target: Union[schemas.FileItem, Path]) -> bool:
|
||||
def move(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
|
||||
"""
|
||||
移动文件
|
||||
:param fileitem: 文件项
|
||||
:param path: 目标目录
|
||||
:param new_name: 新文件名
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
@@ -16,10 +16,11 @@ from app.schemas.types import StorageSchema
|
||||
from app.utils.http import RequestUtils
|
||||
from aligo import Aligo, BaseFile
|
||||
|
||||
from app.utils.singleton import Singleton
|
||||
from app.utils.string import StringUtils
|
||||
|
||||
|
||||
class AliPan(StorageBase):
|
||||
class AliPan(StorageBase, metaclass=Singleton):
|
||||
"""
|
||||
阿里云相关操作
|
||||
"""
|
||||
@@ -54,17 +55,27 @@ class AliPan(StorageBase):
|
||||
except FileNotFoundError:
|
||||
logger.debug('未发现 aria2c')
|
||||
self._has_aria2c = False
|
||||
self.init_storage()
|
||||
|
||||
self.__init_aligo()
|
||||
|
||||
def __init_aligo(self):
|
||||
def init_storage(self):
|
||||
"""
|
||||
初始化 aligo
|
||||
"""
|
||||
|
||||
def show_qrcode(qr_link: str):
|
||||
"""
|
||||
显示二维码
|
||||
"""
|
||||
logger.info(f"请用阿里云盘 App 扫码登录:{qr_link}")
|
||||
|
||||
refresh_token = self.__auth_params.get("refreshToken")
|
||||
if refresh_token:
|
||||
self.aligo = Aligo(refresh_token=refresh_token, use_aria2=self._has_aria2c,
|
||||
name="MoviePilot V2", level=logging.ERROR)
|
||||
try:
|
||||
self.aligo = Aligo(refresh_token=refresh_token, show=show_qrcode, use_aria2=self._has_aria2c,
|
||||
name="MoviePilot V2", level=logging.ERROR, re_login=False)
|
||||
except Exception as err:
|
||||
logger.error(f"初始化阿里云盘失败:{str(err)}")
|
||||
self.__clear_params()
|
||||
|
||||
@property
|
||||
def __auth_params(self):
|
||||
@@ -160,7 +171,7 @@ class AliPan(StorageBase):
|
||||
})
|
||||
self.__update_params(data)
|
||||
self.__update_drives()
|
||||
self.__init_aligo()
|
||||
self.init_storage()
|
||||
except Exception as e:
|
||||
return {}, f"bizExt 解码失败:{str(e)}"
|
||||
return data, ""
|
||||
@@ -180,12 +191,16 @@ class AliPan(StorageBase):
|
||||
"""
|
||||
获取用户信息(drive_id等)
|
||||
"""
|
||||
if not self.aligo:
|
||||
return {}
|
||||
return self.aligo.get_user()
|
||||
|
||||
def __update_drives(self):
|
||||
"""
|
||||
更新用户存储根目录
|
||||
"""
|
||||
if not self.aligo:
|
||||
return
|
||||
drivers = self.aligo.list_my_drives()
|
||||
for driver in drivers:
|
||||
if driver.category == "resource":
|
||||
@@ -240,28 +255,9 @@ class AliPan(StorageBase):
|
||||
return []
|
||||
# 根目录处理
|
||||
if not fileitem or not fileitem.drive_id:
|
||||
return [
|
||||
schemas.FileItem(
|
||||
storage=self.schema.value,
|
||||
fileid="root",
|
||||
drive_id=self.__auth_params.get("resourceDriveId"),
|
||||
parent_fileid="root",
|
||||
type="dir",
|
||||
path="/资源库/",
|
||||
name="资源库",
|
||||
basename="资源库"
|
||||
),
|
||||
schemas.FileItem(
|
||||
storage=self.schema.value,
|
||||
fileid="root",
|
||||
drive_id=self.__auth_params.get("backDriveId"),
|
||||
parent_fileid="root",
|
||||
type="dir",
|
||||
path="/备份盘/",
|
||||
name="备份盘",
|
||||
basename="备份盘"
|
||||
)
|
||||
]
|
||||
items = self.aligo.get_file_list()
|
||||
if items:
|
||||
return [self.__get_fileitem(item) for item in items]
|
||||
elif fileitem.type == "file":
|
||||
# 文件处理
|
||||
file = self.detail(fileitem)
|
||||
@@ -276,6 +272,8 @@ class AliPan(StorageBase):
|
||||
def create_folder(self, fileitem: schemas.FileItem, name: str) -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
创建目录
|
||||
:param fileitem: 父目录
|
||||
:param name: 目录名
|
||||
"""
|
||||
if not self.aligo:
|
||||
return None
|
||||
@@ -283,21 +281,43 @@ class AliPan(StorageBase):
|
||||
if item:
|
||||
if isinstance(item, CreateFileResponse):
|
||||
item = self.aligo.get_file(file_id=item.file_id, drive_id=item.drive_id)
|
||||
return self.__get_fileitem(item)
|
||||
return self.__get_fileitem(item, parent=fileitem.path)
|
||||
return None
|
||||
|
||||
def get_folder(self, path: Path) -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
根据文件路程获取目录,不存在则创建
|
||||
"""
|
||||
if not self.aligo:
|
||||
|
||||
def __find_dir(_fileitem: schemas.FileItem, _name: str) -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
查找下级目录中匹配名称的目录
|
||||
"""
|
||||
for sub_folder in self.list(_fileitem):
|
||||
if sub_folder.type != "dir":
|
||||
continue
|
||||
if sub_folder.name == _name:
|
||||
return sub_folder
|
||||
return None
|
||||
item = self.aligo.get_folder_by_path(path=str(path), create_folder=True)
|
||||
if item:
|
||||
if isinstance(item, CreateFileResponse):
|
||||
item = self.aligo.get_file(file_id=item.file_id, drive_id=item.drive_id)
|
||||
return self.__get_fileitem(item)
|
||||
return None
|
||||
|
||||
# 是否已存在
|
||||
folder = self.get_item(path)
|
||||
if folder:
|
||||
return folder
|
||||
# 逐级查找和创建目录
|
||||
fileitem = schemas.FileItem(path="/")
|
||||
for part in path.parts:
|
||||
if part == "/":
|
||||
continue
|
||||
dir_file = __find_dir(fileitem, part)
|
||||
if dir_file:
|
||||
fileitem = dir_file
|
||||
else:
|
||||
dir_file = self.create_folder(fileitem, part)
|
||||
if not dir_file:
|
||||
return None
|
||||
fileitem = dir_file
|
||||
return fileitem
|
||||
|
||||
def get_item(self, path: Path) -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
@@ -307,7 +327,7 @@ class AliPan(StorageBase):
|
||||
return None
|
||||
item = self.aligo.get_file_by_path(path=str(path))
|
||||
if item:
|
||||
return self.__get_fileitem(item)
|
||||
return self.__get_fileitem(item, parent=path.parent)
|
||||
return None
|
||||
|
||||
def delete(self, fileitem: schemas.FileItem) -> bool:
|
||||
@@ -328,7 +348,7 @@ class AliPan(StorageBase):
|
||||
return None
|
||||
item = self.aligo.get_file(file_id=fileitem.fileid, drive_id=fileitem.drive_id)
|
||||
if item:
|
||||
return self.__get_fileitem(item)
|
||||
return self.__get_fileitem(item, parent=fileitem.path)
|
||||
return None
|
||||
|
||||
def rename(self, fileitem: schemas.FileItem, name: str) -> bool:
|
||||
@@ -347,41 +367,66 @@ class AliPan(StorageBase):
|
||||
"""
|
||||
if not self.aligo:
|
||||
return None
|
||||
local_path = self.aligo.download_file(file_id=fileitem.fileid, drive_id=fileitem.drive_id,
|
||||
local_path = self.aligo.download_file(file_id=fileitem.fileid, drive_id=fileitem.drive_id, # noqa
|
||||
local_folder=str(path or settings.TEMP_PATH))
|
||||
if local_path:
|
||||
return Path(local_path)
|
||||
return None
|
||||
|
||||
def upload(self, fileitem: schemas.FileItem, path: Path) -> Optional[schemas.FileItem]:
|
||||
def upload(self, fileitem: schemas.FileItem, path: Path, new_name: str = None) -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
上传文件,并标记完成
|
||||
:param fileitem: 上传目录项
|
||||
:param path: 本地文件路径
|
||||
:param new_name: 上传后文件名
|
||||
"""
|
||||
if not self.aligo:
|
||||
return None
|
||||
# 上传文件
|
||||
result = self.aligo.upload_file(file_path=str(path), parent_file_id=fileitem.fileid,
|
||||
drive_id=fileitem.drive_id, name=path.name,
|
||||
drive_id=fileitem.drive_id, name=new_name or path.name,
|
||||
check_name_mode="refuse")
|
||||
if result:
|
||||
item = self.aligo.get_file(file_id=result.file_id, drive_id=result.drive_id)
|
||||
if item:
|
||||
return self.__get_fileitem(item)
|
||||
return self.__get_fileitem(item, parent=fileitem.path)
|
||||
return None
|
||||
|
||||
def move(self, fileitem: schemas.FileItem, target: schemas.FileItem) -> bool:
|
||||
def move(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
|
||||
"""
|
||||
移动文件
|
||||
:param fileitem: 文件项
|
||||
:param path: 目标目录
|
||||
:param new_name: 新文件名
|
||||
"""
|
||||
if not self.aligo:
|
||||
return False
|
||||
target = self.get_folder(path)
|
||||
if not target:
|
||||
return False
|
||||
if self.aligo.move_file(file_id=fileitem.fileid, drive_id=fileitem.drive_id,
|
||||
to_parent_file_id=target.fileid, to_drive_id=target.drive_id):
|
||||
to_parent_file_id=target.fileid, to_drive_id=target.drive_id,
|
||||
new_name=new_name):
|
||||
return True
|
||||
return False
|
||||
|
||||
def copy(self, fileitem: schemas.FileItem, target: schemas.FileItem) -> bool:
|
||||
pass
|
||||
def copy(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
|
||||
"""
|
||||
复制文件
|
||||
:param fileitem: 文件项
|
||||
:param path: 目标目录
|
||||
:param new_name: 新文件名
|
||||
"""
|
||||
if not self.aligo:
|
||||
return False
|
||||
target = self.get_folder(path)
|
||||
if not target:
|
||||
return False
|
||||
if self.aligo.copy_file(file_id=fileitem.fileid, drive_id=fileitem.drive_id,
|
||||
to_parent_file_id=target.fileid, to_drive_id=target.drive_id,
|
||||
new_name=new_name):
|
||||
return True
|
||||
return False
|
||||
|
||||
def link(self, fileitem: schemas.FileItem, target_file: Path) -> bool:
|
||||
"""
|
||||
|
||||
741
app/modules/filemanager/storages/alist.py
Normal file
741
app/modules/filemanager/storages/alist.py
Normal file
@@ -0,0 +1,741 @@
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Optional, List, Dict
|
||||
|
||||
from cachetools import cached, TTLCache
|
||||
from requests import Response
|
||||
|
||||
from app import schemas
|
||||
from app.core.config import settings
|
||||
from app.log import logger
|
||||
from app.modules.filemanager.storages import StorageBase
|
||||
from app.schemas.types import StorageSchema
|
||||
from app.utils.http import RequestUtils
|
||||
from app.utils.singleton import Singleton
|
||||
from app.utils.url import UrlUtils
|
||||
|
||||
|
||||
class Alist(StorageBase, metaclass=Singleton):
|
||||
"""
|
||||
Alist相关操作
|
||||
api文档:https://alist.nn.ci/zh/guide/api
|
||||
"""
|
||||
|
||||
# 存储类型
|
||||
schema = StorageSchema.Alist
|
||||
|
||||
# 支持的整理方式
|
||||
transtype = {
|
||||
"copy": "复制",
|
||||
"move": "移动",
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def init_storage(self):
|
||||
"""
|
||||
初始化
|
||||
"""
|
||||
pass
|
||||
|
||||
@property
|
||||
def __get_base_url(self) -> str:
|
||||
"""
|
||||
获取基础URL
|
||||
"""
|
||||
url = self.get_conf().get("url")
|
||||
if url is None:
|
||||
return ""
|
||||
return UrlUtils.standardize_base_url(self.get_conf().get("url"))
|
||||
|
||||
def __get_api_url(self, path: str) -> str:
|
||||
"""
|
||||
获取API URL
|
||||
"""
|
||||
return UrlUtils.adapt_request_url(self.__get_base_url, path)
|
||||
|
||||
@property
|
||||
def __get_valuable_toke(self) -> str:
|
||||
"""
|
||||
获取一个可用的token
|
||||
如果设置永久令牌则返回永久令牌
|
||||
否则使用账号密码生成临时令牌
|
||||
"""
|
||||
return self.__generate_token
|
||||
|
||||
@property
|
||||
@cached(cache=TTLCache(maxsize=1, ttl=60 * 60 * 24 * 2 - 60 * 5))
|
||||
def __generate_token(self) -> str:
|
||||
"""
|
||||
使用账号密码生成一个临时token
|
||||
缓存2天,提前5分钟更新
|
||||
"""
|
||||
conf = self.get_conf()
|
||||
resp: Response = RequestUtils(headers={
|
||||
'Content-Type': 'application/json'
|
||||
}).post_res(
|
||||
self.__get_api_url("/api/auth/login"),
|
||||
data=json.dumps({
|
||||
"username": conf.get("username"),
|
||||
"password": conf.get("password"),
|
||||
}),
|
||||
)
|
||||
"""
|
||||
{
|
||||
"username": "{{alist_username}}",
|
||||
"password": "{{alist_password}}"
|
||||
}
|
||||
======================================
|
||||
{
|
||||
"code": 200,
|
||||
"message": "success",
|
||||
"data": {
|
||||
"token": "abcd"
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
if resp is None:
|
||||
logger.warning("请求登录失败,无法连接alist服务")
|
||||
return ""
|
||||
|
||||
if resp.status_code != 200:
|
||||
logger.warning(f"更新令牌请求发送失败,状态码:{resp.status_code}")
|
||||
return ""
|
||||
|
||||
result = resp.json()
|
||||
|
||||
if result["code"] != 200:
|
||||
logger.critical(f'更新令牌,错误信息:{result["message"]}')
|
||||
return ""
|
||||
|
||||
logger.debug("AList获取令牌成功")
|
||||
return result["data"]["token"]
|
||||
|
||||
def __get_header_with_token(self) -> dict:
|
||||
"""
|
||||
获取带有token的header
|
||||
"""
|
||||
return {"Authorization": self.__get_valuable_toke}
|
||||
|
||||
def check(self) -> bool:
|
||||
"""
|
||||
检查存储是否可用
|
||||
"""
|
||||
pass
|
||||
|
||||
def list(
|
||||
self,
|
||||
fileitem: schemas.FileItem,
|
||||
password: str = "",
|
||||
page: int = 1,
|
||||
per_page: int = 0,
|
||||
refresh: bool = False,
|
||||
) -> Optional[List[schemas.FileItem]]:
|
||||
"""
|
||||
浏览文件
|
||||
:param fileitem: 文件项
|
||||
:param password: 路径密码
|
||||
:param page: 页码
|
||||
:param per_page: 每页数量
|
||||
:param refresh: 是否刷新
|
||||
"""
|
||||
if fileitem.type == "file":
|
||||
item = self.get_item(Path(fileitem.path))
|
||||
if item:
|
||||
return [item]
|
||||
return None
|
||||
resp: Response = RequestUtils(
|
||||
headers=self.__get_header_with_token()
|
||||
).post_res(
|
||||
self.__get_api_url("/api/fs/list"),
|
||||
json={
|
||||
"path": fileitem.path,
|
||||
"password": password,
|
||||
"page": page,
|
||||
"per_page": per_page,
|
||||
"refresh": refresh,
|
||||
},
|
||||
)
|
||||
"""
|
||||
{
|
||||
"path": "/t",
|
||||
"password": "",
|
||||
"page": 1,
|
||||
"per_page": 0,
|
||||
"refresh": false
|
||||
}
|
||||
======================================
|
||||
{
|
||||
"code": 200,
|
||||
"message": "success",
|
||||
"data": {
|
||||
"content": [
|
||||
{
|
||||
"name": "Alist V3.md",
|
||||
"size": 1592,
|
||||
"is_dir": false,
|
||||
"modified": "2024-05-17T13:47:55.4174917+08:00",
|
||||
"created": "2024-05-17T13:47:47.5725906+08:00",
|
||||
"sign": "",
|
||||
"thumb": "",
|
||||
"type": 4,
|
||||
"hashinfo": "null",
|
||||
"hash_info": null
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"readme": "",
|
||||
"header": "",
|
||||
"write": true,
|
||||
"provider": "Local"
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
if resp is None:
|
||||
logging.warning(f"请求获取目录 {fileitem.path} 的文件列表失败,无法连接alist服务")
|
||||
return
|
||||
if resp.status_code != 200:
|
||||
logging.warning(
|
||||
f"请求获取目录 {fileitem.path} 的文件列表失败,状态码:{resp.status_code}"
|
||||
)
|
||||
return
|
||||
|
||||
result = resp.json()
|
||||
|
||||
if result["code"] != 200:
|
||||
logging.warning(
|
||||
f'获取目录 {fileitem.path} 的文件列表失败,错误信息:{result["message"]}'
|
||||
)
|
||||
return
|
||||
|
||||
return [
|
||||
schemas.FileItem(
|
||||
storage=self.schema.value,
|
||||
type="dir" if item["is_dir"] else "file",
|
||||
path=(Path(fileitem.path) / item["name"]).as_posix() + ("/" if item["is_dir"] else ""),
|
||||
name=item["name"],
|
||||
basename=Path(item["name"]).stem,
|
||||
extension=Path(item["name"]).suffix[1:] if not item["is_dir"] else None,
|
||||
size=item["size"] if not item["is_dir"] else None,
|
||||
modify_time=self.__parse_timestamp(item["modified"]),
|
||||
thumbnail=item["thumb"],
|
||||
)
|
||||
for item in result["data"]["content"] or []
|
||||
]
|
||||
|
||||
def create_folder(
|
||||
self, fileitem: schemas.FileItem, name: str
|
||||
) -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
创建目录
|
||||
:param fileitem: 父目录
|
||||
:param name: 目录名
|
||||
"""
|
||||
path = Path(fileitem.path) / name
|
||||
resp: Response = RequestUtils(
|
||||
headers=self.__get_header_with_token()
|
||||
).post_res(
|
||||
self.__get_api_url("/api/fs/mkdir"),
|
||||
json={"path": path.as_posix()},
|
||||
)
|
||||
"""
|
||||
{
|
||||
"path": "/tt"
|
||||
}
|
||||
======================================
|
||||
{
|
||||
"code": 200,
|
||||
"message": "success",
|
||||
"data": null
|
||||
}
|
||||
"""
|
||||
if resp is None:
|
||||
logging.warning(f"请求创建目录 {path} 失败,无法连接alist服务")
|
||||
return
|
||||
if resp.status_code != 200:
|
||||
logging.warning(f"请求创建目录 {path} 失败,状态码:{resp.status_code}")
|
||||
return
|
||||
|
||||
result = resp.json()
|
||||
if result["code"] != 200:
|
||||
logging.warning(f'创建目录 {path} 失败,错误信息:{result["message"]}')
|
||||
return
|
||||
|
||||
return self.get_item(path)
|
||||
|
||||
def get_folder(self, path: Path) -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
获取目录,如目录不存在则创建
|
||||
"""
|
||||
folder = self.get_item(path)
|
||||
if folder:
|
||||
return folder
|
||||
if not folder:
|
||||
folder = self.create_folder(schemas.FileItem(
|
||||
storage=self.schema.value,
|
||||
type="dir",
|
||||
path=path.parent.as_posix(),
|
||||
name=path.name,
|
||||
basename=path.stem
|
||||
), path.name)
|
||||
return folder
|
||||
|
||||
def get_item(
|
||||
self,
|
||||
path: Path,
|
||||
password: str = "",
|
||||
page: int = 1,
|
||||
per_page: int = 0,
|
||||
refresh: bool = False,
|
||||
) -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
获取文件或目录,不存在返回None
|
||||
:param path: 文件路径
|
||||
:param password: 路径密码
|
||||
:param page: 页码
|
||||
:param per_page: 每页数量
|
||||
:param refresh: 是否刷新
|
||||
"""
|
||||
resp: Response = RequestUtils(
|
||||
headers=self.__get_header_with_token()
|
||||
).post_res(
|
||||
self.__get_api_url("/api/fs/get"),
|
||||
json={
|
||||
"path": path.as_posix(),
|
||||
"password": password,
|
||||
"page": page,
|
||||
"per_page": per_page,
|
||||
"refresh": refresh,
|
||||
},
|
||||
)
|
||||
"""
|
||||
{
|
||||
"path": "/t",
|
||||
"password": "",
|
||||
"page": 1,
|
||||
"per_page": 0,
|
||||
"refresh": false
|
||||
}
|
||||
======================================
|
||||
{
|
||||
"code": 200,
|
||||
"message": "success",
|
||||
"data": {
|
||||
"name": "Alist V3.md",
|
||||
"size": 2618,
|
||||
"is_dir": false,
|
||||
"modified": "2024-05-17T16:05:36.4651534+08:00",
|
||||
"created": "2024-05-17T16:05:29.2001008+08:00",
|
||||
"sign": "",
|
||||
"thumb": "",
|
||||
"type": 4,
|
||||
"hashinfo": "null",
|
||||
"hash_info": null,
|
||||
"raw_url": "http://127.0.0.1:5244/p/local/Alist%20V3.md",
|
||||
"readme": "",
|
||||
"header": "",
|
||||
"provider": "Local",
|
||||
"related": null
|
||||
}
|
||||
}
|
||||
"""
|
||||
if resp is None:
|
||||
logging.warning(f"请求获取文件 {path} 失败,无法连接alist服务")
|
||||
return
|
||||
if resp.status_code != 200:
|
||||
logging.warning(f"请求获取文件 {path} 失败,状态码:{resp.status_code}")
|
||||
return
|
||||
|
||||
result = resp.json()
|
||||
if result["code"] != 200:
|
||||
logging.debug(f'获取文件 {path} 失败,错误信息:{result["message"]}')
|
||||
return
|
||||
|
||||
return schemas.FileItem(
|
||||
storage=self.schema.value,
|
||||
type="dir" if result["data"]["is_dir"] else "file",
|
||||
path=path.as_posix() + ("/" if result["data"]["is_dir"] else ""),
|
||||
name=result["data"]["name"],
|
||||
basename=Path(result["data"]["name"]).stem,
|
||||
extension=Path(result["data"]["name"]).suffix[1:],
|
||||
size=result["data"]["size"],
|
||||
modify_time=self.__parse_timestamp(result["data"]["modified"]),
|
||||
thumbnail=result["data"]["thumb"],
|
||||
)
|
||||
|
||||
def get_parent(self, fileitem: schemas.FileItem) -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
获取父目录
|
||||
"""
|
||||
return self.get_folder(Path(fileitem.path).parent)
|
||||
|
||||
def delete(self, fileitem: schemas.FileItem) -> bool:
|
||||
"""
|
||||
删除文件
|
||||
"""
|
||||
resp: Response = RequestUtils(
|
||||
headers=self.__get_header_with_token()
|
||||
).post_res(
|
||||
self.__get_api_url("/api/fs/remove"),
|
||||
json={
|
||||
"dir": Path(fileitem.path).parent.as_posix(),
|
||||
"names": [fileitem.name],
|
||||
},
|
||||
)
|
||||
"""
|
||||
{
|
||||
"names": [
|
||||
"string"
|
||||
],
|
||||
"dir": "string"
|
||||
}
|
||||
======================================
|
||||
{
|
||||
"code": 200,
|
||||
"message": "success",
|
||||
"data": null
|
||||
}
|
||||
"""
|
||||
if resp is None:
|
||||
logging.warning(f"请求删除文件 {fileitem.path} 失败,无法连接alist服务")
|
||||
return False
|
||||
if resp.status_code != 200:
|
||||
logging.warning(
|
||||
f"请求删除文件 {fileitem.path} 失败,状态码:{resp.status_code}"
|
||||
)
|
||||
return False
|
||||
|
||||
result = resp.json()
|
||||
if result["code"] != 200:
|
||||
logging.warning(
|
||||
f'删除文件 {fileitem.path} 失败,错误信息:{result["message"]}'
|
||||
)
|
||||
return False
|
||||
return True
|
||||
|
||||
def rename(self, fileitem: schemas.FileItem, name: str) -> bool:
|
||||
"""
|
||||
重命名文件
|
||||
"""
|
||||
resp: Response = RequestUtils(
|
||||
headers=self.__get_header_with_token()
|
||||
).post_res(
|
||||
self.__get_api_url("/api/fs/rename"),
|
||||
json={
|
||||
"name": name,
|
||||
"path": fileitem.path,
|
||||
},
|
||||
)
|
||||
"""
|
||||
{
|
||||
"name": "test3",
|
||||
"path": "/阿里云盘/test2"
|
||||
}
|
||||
======================================
|
||||
{
|
||||
"code": 200,
|
||||
"message": "success",
|
||||
"data": null
|
||||
}
|
||||
"""
|
||||
if not resp:
|
||||
logging.warning(f"请求重命名文件 {fileitem.path} 失败,无法连接alist服务")
|
||||
return False
|
||||
if resp.status_code != 200:
|
||||
logging.warning(
|
||||
f"请求重命名文件 {fileitem.path} 失败,状态码:{resp.status_code}"
|
||||
)
|
||||
return False
|
||||
|
||||
result = resp.json()
|
||||
if result["code"] != 200:
|
||||
logging.warning(
|
||||
f'重命名文件 {fileitem.path} 失败,错误信息:{result["message"]}'
|
||||
)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def download(
|
||||
self,
|
||||
fileitem: schemas.FileItem,
|
||||
path: Path = None,
|
||||
password: str = "",
|
||||
) -> Optional[Path]:
|
||||
"""
|
||||
下载文件,保存到本地,返回本地临时文件地址
|
||||
:param fileitem: 文件项
|
||||
:param path: 文件保存路径
|
||||
:param password: 文件密码
|
||||
"""
|
||||
resp: Response = RequestUtils(
|
||||
headers=self.__get_header_with_token()
|
||||
).post_res(
|
||||
self.__get_api_url("/api/fs/get"),
|
||||
json={
|
||||
"path": fileitem.path,
|
||||
"password": password,
|
||||
"page": 1,
|
||||
"per_page": 0,
|
||||
"refresh": False,
|
||||
},
|
||||
)
|
||||
"""
|
||||
{
|
||||
"code": 200,
|
||||
"message": "success",
|
||||
"data": {
|
||||
"name": "[ANi]輝夜姬想讓人告白~天才們的戀愛頭腦戰~[01][1080P][Baha][WEB-DL].mp4",
|
||||
"size": 924933111,
|
||||
"is_dir": false,
|
||||
"modified": "1970-01-01T00:00:00Z",
|
||||
"created": "1970-01-01T00:00:00Z",
|
||||
"sign": "1v0xkMQz_uG8fkEOQ7-l58OnbB-g4GkdBlUBcrsApCQ=:0",
|
||||
"thumb": "",
|
||||
"type": 2,
|
||||
"hashinfo": "null",
|
||||
"hash_info": null,
|
||||
"raw_url": "xxxxxx",
|
||||
"readme": "",
|
||||
"header": "",
|
||||
"provider": "UrlTree",
|
||||
"related": null
|
||||
}
|
||||
}
|
||||
"""
|
||||
if not resp:
|
||||
logging.warning(f"请求获取文件 {path} 失败,无法连接alist服务")
|
||||
return
|
||||
if resp.status_code != 200:
|
||||
logging.warning(f"请求获取文件 {path} 失败,状态码:{resp.status_code}")
|
||||
return
|
||||
|
||||
result = resp.json()
|
||||
if result["code"] != 200:
|
||||
logging.warning(f'获取文件 {path} 失败,错误信息:{result["message"]}')
|
||||
return
|
||||
|
||||
if result["data"]["raw_url"]:
|
||||
download_url = result["data"]["raw_url"]
|
||||
else:
|
||||
download_url = UrlUtils.adapt_request_url(self.__get_base_url, f"/d{fileitem.path}")
|
||||
if result["data"]["sign"]:
|
||||
download_url = download_url + "?sign=" + result["data"]["sign"]
|
||||
|
||||
resp = RequestUtils(
|
||||
headers=self.__get_header_with_token()
|
||||
).get_res(download_url)
|
||||
|
||||
if not path:
|
||||
new_path = settings.TEMP_PATH / fileitem.name
|
||||
else:
|
||||
new_path = path / fileitem.name
|
||||
|
||||
with open(new_path, "wb") as f:
|
||||
f.write(resp.content)
|
||||
|
||||
if new_path.exists():
|
||||
return new_path
|
||||
return None
|
||||
|
||||
def upload(
|
||||
self, fileitem: schemas.FileItem, path: Path, new_name: str = None, task: bool = False
|
||||
) -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
上传文件
|
||||
:param fileitem: 上传目录项
|
||||
:param path: 本地文件路径
|
||||
:param new_name: 上传后文件名
|
||||
:param task: 是否为任务,默认为False避免未完成上传时对文件进行操作
|
||||
"""
|
||||
encoded_path = UrlUtils.quote(fileitem.path)
|
||||
headers = self.__get_header_with_token()
|
||||
headers.setdefault("Content-Type", "multipart/form-data")
|
||||
headers.setdefault("As-Task", str(task).lower())
|
||||
headers.setdefault("File-Path", encoded_path)
|
||||
with open(path, "rb") as f:
|
||||
resp: Response = RequestUtils(headers=headers).put_res(
|
||||
self.__get_api_url("/api/fs/form"),
|
||||
data={"file": f},
|
||||
)
|
||||
|
||||
if resp.status_code != 200:
|
||||
logging.warning(f"请求上传文件 {path} 失败,状态码:{resp.status_code}")
|
||||
return
|
||||
|
||||
new_item = self.get_item(Path(fileitem.path) / path.name)
|
||||
if new_name and new_name != path.name:
|
||||
if self.rename(new_item, new_name):
|
||||
return self.get_item(Path(new_item.path).with_name(new_name))
|
||||
|
||||
return new_item
|
||||
|
||||
def detail(self, fileitem: schemas.FileItem) -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
获取文件详情
|
||||
"""
|
||||
return self.get_item(Path(fileitem.path))
|
||||
|
||||
def copy(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
|
||||
"""
|
||||
复制文件
|
||||
:param fileitem: 文件项
|
||||
:param path: 目标目录
|
||||
:param new_name: 新文件名
|
||||
"""
|
||||
resp: Response = RequestUtils(
|
||||
headers=self.__get_header_with_token()
|
||||
).post_res(
|
||||
self.__get_api_url("/api/fs/copy"),
|
||||
json={
|
||||
"src_dir": Path(fileitem.path).parent.as_posix(),
|
||||
"dst_dir": path.as_posix(),
|
||||
"names": [fileitem.name],
|
||||
},
|
||||
)
|
||||
"""
|
||||
{
|
||||
"src_dir": "string",
|
||||
"dst_dir": "string",
|
||||
"names": [
|
||||
"string"
|
||||
]
|
||||
}
|
||||
======================================
|
||||
{
|
||||
"code": 200,
|
||||
"message": "success",
|
||||
"data": null
|
||||
}
|
||||
"""
|
||||
if resp is None:
|
||||
logging.warning(
|
||||
f"请求复制文件 {fileitem.path} 失败,无法连接alist服务"
|
||||
)
|
||||
return False
|
||||
if resp.status_code != 200:
|
||||
logging.warning(
|
||||
f"请求复制文件 {fileitem.path} 失败,状态码:{resp.status_code}"
|
||||
)
|
||||
return False
|
||||
|
||||
result = resp.json()
|
||||
if result["code"] != 200:
|
||||
logging.warning(
|
||||
f'复制文件 {fileitem.path} 失败,错误信息:{result["message"]}'
|
||||
)
|
||||
return False
|
||||
# 重命名
|
||||
if fileitem.name != new_name:
|
||||
self.rename(
|
||||
self.get_item(path / fileitem.name), new_name
|
||||
)
|
||||
return True
|
||||
|
||||
def move(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
|
||||
"""
|
||||
移动文件
|
||||
:param fileitem: 文件项
|
||||
:param path: 目标目录
|
||||
:param new_name: 新文件名
|
||||
"""
|
||||
# 先重命名
|
||||
if fileitem.name != new_name:
|
||||
self.rename(fileitem, new_name)
|
||||
resp: Response = RequestUtils(
|
||||
headers=self.__get_header_with_token()
|
||||
).post_res(
|
||||
self.__get_api_url("/api/fs/move"),
|
||||
json={
|
||||
"src_dir": Path(fileitem.path).parent.as_posix(),
|
||||
"dst_dir": path.as_posix(),
|
||||
"names": [new_name],
|
||||
},
|
||||
)
|
||||
"""
|
||||
{
|
||||
"src_dir": "string",
|
||||
"dst_dir": "string",
|
||||
"names": [
|
||||
"string"
|
||||
]
|
||||
}
|
||||
======================================
|
||||
{
|
||||
"code": 200,
|
||||
"message": "success",
|
||||
"data": null
|
||||
}
|
||||
"""
|
||||
if resp is None:
|
||||
logging.warning(
|
||||
f"请求移动文件 {fileitem.path} 失败,无法连接alist服务"
|
||||
)
|
||||
return False
|
||||
if resp.status_code != 200:
|
||||
logging.warning(
|
||||
f"请求移动文件 {fileitem.path} 失败,状态码:{resp.status_code}"
|
||||
)
|
||||
return False
|
||||
|
||||
result = resp.json()
|
||||
if result["code"] != 200:
|
||||
logging.warning(
|
||||
f'移动文件 {fileitem.path} 失败,错误信息:{result["message"]}'
|
||||
)
|
||||
return False
|
||||
return True
|
||||
|
||||
def link(self, fileitem: schemas.FileItem, target_file: Path) -> bool:
|
||||
"""
|
||||
硬链接文件
|
||||
"""
|
||||
pass
|
||||
|
||||
def softlink(self, fileitem: schemas.FileItem, target_file: Path) -> bool:
|
||||
"""
|
||||
软链接文件
|
||||
"""
|
||||
pass
|
||||
|
||||
def usage(self) -> Optional[schemas.StorageUsage]:
|
||||
"""
|
||||
存储使用情况
|
||||
"""
|
||||
pass
|
||||
|
||||
def snapshot(self, path: Path) -> Dict[str, float]:
|
||||
"""
|
||||
快照文件系统,输出所有层级文件信息(不含目录)
|
||||
"""
|
||||
files_info = {}
|
||||
|
||||
def __snapshot_file(_fileitm: schemas.FileItem):
|
||||
"""
|
||||
递归获取文件信息
|
||||
"""
|
||||
if _fileitm.type == "dir":
|
||||
for sub_file in self.list(_fileitm):
|
||||
__snapshot_file(sub_file)
|
||||
else:
|
||||
files_info[_fileitm.path] = _fileitm.size
|
||||
|
||||
fileitem = self.get_item(path)
|
||||
if not fileitem:
|
||||
return {}
|
||||
|
||||
__snapshot_file(fileitem)
|
||||
|
||||
return files_info
|
||||
|
||||
@staticmethod
|
||||
def __parse_timestamp(time_str: str) -> float:
|
||||
"""
|
||||
直接使用 ISO 8601 格式解析时间
|
||||
"""
|
||||
return datetime.fromisoformat(time_str).timestamp()
|
||||
@@ -25,13 +25,19 @@ class LocalStorage(StorageBase):
|
||||
"softlink": "软链接"
|
||||
}
|
||||
|
||||
def init_storage(self):
|
||||
"""
|
||||
初始化
|
||||
"""
|
||||
pass
|
||||
|
||||
def check(self) -> bool:
|
||||
"""
|
||||
检查存储是否可用
|
||||
"""
|
||||
return True
|
||||
|
||||
def __get_fileitem(self, path: Path):
|
||||
def __get_fileitem(self, path: Path) -> schemas.FileItem:
|
||||
"""
|
||||
获取文件项
|
||||
"""
|
||||
@@ -46,7 +52,7 @@ class LocalStorage(StorageBase):
|
||||
modify_time=path.stat().st_mtime,
|
||||
)
|
||||
|
||||
def __get_diritem(self, path: Path):
|
||||
def __get_diritem(self, path: Path) -> schemas.FileItem:
|
||||
"""
|
||||
获取目录项
|
||||
"""
|
||||
@@ -109,6 +115,8 @@ class LocalStorage(StorageBase):
|
||||
def create_folder(self, fileitem: schemas.FileItem, name: str) -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
创建目录
|
||||
:param fileitem: 父目录
|
||||
:param name: 目录名
|
||||
"""
|
||||
if not fileitem.path:
|
||||
return None
|
||||
@@ -183,28 +191,20 @@ class LocalStorage(StorageBase):
|
||||
"""
|
||||
return Path(fileitem.path)
|
||||
|
||||
def upload(self, fileitem: schemas.FileItem, path: Path) -> Optional[schemas.FileItem]:
|
||||
def upload(self, fileitem: schemas.FileItem, path: Path, new_name: str = None) -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
上传文件
|
||||
:param fileitem: 上传目录项
|
||||
:param path: 本地文件路径
|
||||
:param new_name: 上传后文件名
|
||||
"""
|
||||
dir_path = Path(fileitem.path)
|
||||
target_path = dir_path / path.name
|
||||
target_path = dir_path / (new_name or path.name)
|
||||
code, message = SystemUtils.move(path, target_path)
|
||||
if code != 0:
|
||||
logger.error(f"移动文件失败:{message}")
|
||||
return None
|
||||
return self.__get_diritem(target_path)
|
||||
|
||||
def copy(self, fileitem: schemas.FileItem, target_file: Path) -> bool:
|
||||
"""
|
||||
复制文件
|
||||
"""
|
||||
file_path = Path(fileitem.path)
|
||||
code, message = SystemUtils.copy(file_path, target_file)
|
||||
if code != 0:
|
||||
logger.error(f"复制文件失败:{message}")
|
||||
return False
|
||||
return True
|
||||
return self.get_item(target_path)
|
||||
|
||||
def link(self, fileitem: schemas.FileItem, target_file: Path) -> bool:
|
||||
"""
|
||||
@@ -228,12 +228,29 @@ class LocalStorage(StorageBase):
|
||||
return False
|
||||
return True
|
||||
|
||||
def move(self, fileitem: schemas.FileItem, target: Path) -> bool:
|
||||
def copy(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
|
||||
"""
|
||||
移动文件
|
||||
复制文件
|
||||
:param fileitem: 文件项
|
||||
:param path: 目标目录
|
||||
:param new_name: 新文件名
|
||||
"""
|
||||
file_path = Path(fileitem.path)
|
||||
code, message = SystemUtils.move(file_path, target)
|
||||
code, message = SystemUtils.copy(file_path, path / new_name)
|
||||
if code != 0:
|
||||
logger.error(f"复制文件失败:{message}")
|
||||
return False
|
||||
return True
|
||||
|
||||
def move(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
|
||||
"""
|
||||
移动文件
|
||||
:param fileitem: 文件项
|
||||
:param path: 目标目录
|
||||
:param new_name: 新文件名
|
||||
"""
|
||||
file_path = Path(fileitem.path)
|
||||
code, message = SystemUtils.move(file_path, path / new_name)
|
||||
if code != 0:
|
||||
logger.error(f"移动文件失败:{message}")
|
||||
return False
|
||||
|
||||
@@ -27,6 +27,12 @@ class Rclone(StorageBase):
|
||||
"copy": "复制"
|
||||
}
|
||||
|
||||
def init_storage(self):
|
||||
"""
|
||||
初始化
|
||||
"""
|
||||
pass
|
||||
|
||||
def set_config(self, conf: dict):
|
||||
"""
|
||||
设置配置
|
||||
@@ -39,7 +45,7 @@ class Rclone(StorageBase):
|
||||
path = Path(filepath)
|
||||
if not path.parent.exists():
|
||||
path.parent.mkdir(parents=True)
|
||||
path.write_text(conf.get('content'))
|
||||
path.write_text(conf.get('content'), encoding='utf-8')
|
||||
|
||||
@staticmethod
|
||||
def __get_hidden_shell():
|
||||
@@ -76,7 +82,7 @@ class Rclone(StorageBase):
|
||||
return schemas.FileItem(
|
||||
storage=self.schema.value,
|
||||
type="dir",
|
||||
path=f"{parent}{item.get('Name')}",
|
||||
path=f"{parent}{item.get('Name')}" + "/",
|
||||
name=item.get("Name"),
|
||||
basename=item.get("Name"),
|
||||
modify_time=StringUtils.str_to_timestamp(item.get("ModTime"))
|
||||
@@ -133,6 +139,8 @@ class Rclone(StorageBase):
|
||||
def create_folder(self, fileitem: schemas.FileItem, name: str) -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
创建目录
|
||||
:param fileitem: 父目录
|
||||
:param name: 目录名
|
||||
"""
|
||||
try:
|
||||
retcode = subprocess.run(
|
||||
@@ -143,10 +151,7 @@ class Rclone(StorageBase):
|
||||
startupinfo=self.__get_hidden_shell()
|
||||
).returncode
|
||||
if retcode == 0:
|
||||
ret_fileitem = copy.deepcopy(fileitem)
|
||||
ret_fileitem.path = f"{fileitem.path}/{name}/"
|
||||
ret_fileitem.name = name
|
||||
return ret_fileitem
|
||||
return self.get_item(Path(f"{fileitem.path}/{name}"))
|
||||
except Exception as err:
|
||||
logger.error(f"rclone创建目录失败:{err}")
|
||||
return None
|
||||
@@ -160,13 +165,17 @@ class Rclone(StorageBase):
|
||||
"""
|
||||
查找下级目录中匹配名称的目录
|
||||
"""
|
||||
for sub_file in self.list(_fileitem):
|
||||
if sub_file.type != "dir":
|
||||
for sub_folder in self.list(_fileitem):
|
||||
if sub_folder.type != "dir":
|
||||
continue
|
||||
if sub_file.name == _name:
|
||||
return sub_file
|
||||
if sub_folder.name == _name:
|
||||
return sub_folder
|
||||
return None
|
||||
|
||||
# 是否已存在
|
||||
folder = self.get_item(path)
|
||||
if folder:
|
||||
return folder
|
||||
# 逐级查找和创建目录
|
||||
fileitem = schemas.FileItem(path="/")
|
||||
for part in path.parts:
|
||||
@@ -260,21 +269,25 @@ class Rclone(StorageBase):
|
||||
logger.error(f"rclone复制文件失败:{err}")
|
||||
return None
|
||||
|
||||
def upload(self, fileitem: schemas.FileItem, path: Path) -> Optional[schemas.FileItem]:
|
||||
def upload(self, fileitem: schemas.FileItem, path: Path, new_name: str = None) -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
上传文件
|
||||
:param fileitem: 上传目录项
|
||||
:param path: 本地文件路径
|
||||
:param new_name: 上传后文件名
|
||||
"""
|
||||
try:
|
||||
new_path = Path(fileitem.path) / (new_name or path.name)
|
||||
retcode = subprocess.run(
|
||||
[
|
||||
'rclone', 'copyto',
|
||||
str(path),
|
||||
f'MP:{Path(fileitem.path) / path.name}'
|
||||
f'MP:{new_path}'
|
||||
],
|
||||
startupinfo=self.__get_hidden_shell()
|
||||
).returncode
|
||||
if retcode == 0:
|
||||
return self.__get_fileitem(path)
|
||||
return self.__get_fileitem(new_path)
|
||||
except Exception as err:
|
||||
logger.error(f"rclone上传文件失败:{err}")
|
||||
return None
|
||||
@@ -299,16 +312,19 @@ class Rclone(StorageBase):
|
||||
logger.error(f"rclone获取文件详情失败:{err}")
|
||||
return None
|
||||
|
||||
def move(self, fileitem: schemas.FileItem, target: Path) -> bool:
|
||||
def move(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
|
||||
"""
|
||||
移动文件,target_file格式:rclone:path
|
||||
移动文件
|
||||
:param fileitem: 文件项
|
||||
:param path: 目标目录
|
||||
:param new_name: 新文件名
|
||||
"""
|
||||
try:
|
||||
retcode = subprocess.run(
|
||||
[
|
||||
'rclone', 'moveto',
|
||||
f'MP:{fileitem.path}',
|
||||
f'MP:{target}'
|
||||
f'MP:{path / new_name}'
|
||||
],
|
||||
startupinfo=self.__get_hidden_shell()
|
||||
).returncode
|
||||
@@ -318,8 +334,27 @@ class Rclone(StorageBase):
|
||||
logger.error(f"rclone移动文件失败:{err}")
|
||||
return False
|
||||
|
||||
def copy(self, fileitem: schemas.FileItem, target_file: Path) -> bool:
|
||||
pass
|
||||
def copy(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
|
||||
"""
|
||||
复制文件
|
||||
:param fileitem: 文件项
|
||||
:param path: 目标目录
|
||||
:param new_name: 新文件名
|
||||
"""
|
||||
try:
|
||||
retcode = subprocess.run(
|
||||
[
|
||||
'rclone', 'copyto',
|
||||
f'MP:{fileitem.path}',
|
||||
f'MP:{path / new_name}'
|
||||
],
|
||||
startupinfo=self.__get_hidden_shell()
|
||||
).returncode
|
||||
if retcode == 0:
|
||||
return True
|
||||
except Exception as err:
|
||||
logger.error(f"rclone复制文件失败:{err}")
|
||||
return False
|
||||
|
||||
def link(self, fileitem: schemas.FileItem, target_file: Path) -> bool:
|
||||
pass
|
||||
|
||||
@@ -1,12 +1,7 @@
|
||||
import base64
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import Optional, Tuple, List
|
||||
|
||||
import oss2
|
||||
import py115
|
||||
from py115 import Cloud
|
||||
from py115.types import LoginTarget, QrcodeSession, QrcodeStatus, Credential
|
||||
from p115 import P115Client, P115Path
|
||||
|
||||
from app import schemas
|
||||
from app.core.config import settings
|
||||
@@ -27,57 +22,54 @@ class U115Pan(StorageBase, metaclass=Singleton):
|
||||
|
||||
# 支持的整理方式
|
||||
transtype = {
|
||||
"move": "移动"
|
||||
"move": "移动",
|
||||
"copy": "复制"
|
||||
}
|
||||
|
||||
cloud: Optional[Cloud] = None
|
||||
_session: QrcodeSession = None
|
||||
# 115二维码登录地址
|
||||
qrcode_url = "https://qrcodeapi.115.com/api/1.0/web/1.0/token/"
|
||||
# 115登录状态检查
|
||||
login_check_url = "https://qrcodeapi.115.com/get/status/"
|
||||
# 115登录完成 alipaymini
|
||||
login_done_api = f"https://passportapi.115.com/app/1.0/alipaymini/1.0/login/qrcode/"
|
||||
|
||||
# 是否有aria2c
|
||||
_has_aria2c: bool = False
|
||||
client: P115Client = None
|
||||
session_info: dict = None
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
try:
|
||||
subprocess.run(['aria2c', '-h'], capture_output=True)
|
||||
self._has_aria2c = True
|
||||
logger.debug('发现 aria2c, 将使用 aria2c 下载文件')
|
||||
except FileNotFoundError:
|
||||
logger.debug('未发现 aria2c')
|
||||
self._has_aria2c = False
|
||||
self.init_storage()
|
||||
|
||||
def __init_cloud(self) -> bool:
|
||||
def init_storage(self):
|
||||
"""
|
||||
初始化Cloud
|
||||
"""
|
||||
credential = self.__credential
|
||||
if not credential:
|
||||
logger.warn("115未登录,请先登录!")
|
||||
return False
|
||||
if not self.__credential:
|
||||
return
|
||||
try:
|
||||
if not self.cloud:
|
||||
self.cloud = py115.connect(credential)
|
||||
self.client = P115Client(self.__credential, app="alipaymini",
|
||||
check_for_relogin=False, console_qrcode=False)
|
||||
except Exception as err:
|
||||
logger.error(f"115连接失败,请重新扫码登录:{str(err)}")
|
||||
logger.error(f"115连接失败,请重新登录:{str(err)}")
|
||||
self.__clear_credential()
|
||||
return False
|
||||
return True
|
||||
|
||||
@property
|
||||
def __credential(self) -> Optional[Credential]:
|
||||
def __credential(self) -> Optional[str]:
|
||||
"""
|
||||
获取已保存的115认证参数
|
||||
获取已保存的115 Cookie
|
||||
"""
|
||||
cookie_dict = self.get_config()
|
||||
if not cookie_dict:
|
||||
conf = self.get_config()
|
||||
if not conf:
|
||||
return None
|
||||
return Credential.from_dict(cookie_dict.dict().get("config"))
|
||||
if not conf.config:
|
||||
return None
|
||||
return conf.config.get("cookie")
|
||||
|
||||
def __save_credential(self, credential: Credential):
|
||||
def __save_credential(self, credential: dict):
|
||||
"""
|
||||
设置115认证参数
|
||||
"""
|
||||
self.set_config(credential.to_dict())
|
||||
self.set_config(credential)
|
||||
|
||||
def __clear_credential(self):
|
||||
"""
|
||||
@@ -89,62 +81,75 @@ class U115Pan(StorageBase, metaclass=Singleton):
|
||||
"""
|
||||
生成二维码
|
||||
"""
|
||||
try:
|
||||
self.cloud = py115.connect()
|
||||
self._session = self.cloud.qrcode_login(LoginTarget.Web)
|
||||
image_bin = self._session.image_data
|
||||
if not image_bin:
|
||||
res = RequestUtils(timeout=10).get_res(self.qrcode_url)
|
||||
if res:
|
||||
self.session_info = res.json().get("data")
|
||||
qrcode_content = self.session_info.pop("qrcode")
|
||||
if not qrcode_content:
|
||||
logger.warn("115生成二维码失败:未获取到二维码数据!")
|
||||
return None
|
||||
# 转换为base64图片格式
|
||||
image_base64 = base64.b64encode(image_bin).decode()
|
||||
return {}, ""
|
||||
return {
|
||||
"codeContent": f"data:image/jpeg;base64,{image_base64}"
|
||||
"codeContent": qrcode_content
|
||||
}, ""
|
||||
except Exception as e:
|
||||
logger.warn(f"115生成二维码失败:{str(e)}")
|
||||
return {}, f"115生成二维码失败:{str(e)}"
|
||||
elif res is not None:
|
||||
return {}, f"115生成二维码失败:{res.status_code} - {res.reason}"
|
||||
return {}, f"115生成二维码失败:无法连接!"
|
||||
|
||||
def check_login(self) -> Optional[Tuple[dict, str]]:
|
||||
"""
|
||||
二维码登录确认
|
||||
"""
|
||||
if not self._session:
|
||||
if not self.session_info:
|
||||
return {}, "请先生成二维码!"
|
||||
try:
|
||||
if not self.cloud:
|
||||
return {}, "请先生成二维码!"
|
||||
status = self.cloud.qrcode_poll(self._session)
|
||||
if status == QrcodeStatus.Done:
|
||||
# 确认完成,保存认证信息
|
||||
self.__save_credential(self.cloud.export_credentail())
|
||||
result = {
|
||||
"status": 1,
|
||||
"tip": "登录成功!"
|
||||
}
|
||||
elif status == QrcodeStatus.Waiting:
|
||||
result = {
|
||||
"status": 0,
|
||||
"tip": "请使用微信或115客户端扫码"
|
||||
}
|
||||
elif status == QrcodeStatus.Expired:
|
||||
result = {
|
||||
"status": -1,
|
||||
"tip": "二维码已过期,请重新刷新!"
|
||||
}
|
||||
self.cloud = None
|
||||
elif status == QrcodeStatus.Failed:
|
||||
result = {
|
||||
"status": -2,
|
||||
"tip": "登录失败,请重试!"
|
||||
}
|
||||
self.cloud = None
|
||||
else:
|
||||
result = {
|
||||
"status": -3,
|
||||
"tip": "未知错误,请重试!"
|
||||
}
|
||||
self.cloud = None
|
||||
resp = RequestUtils(timeout=10).get_res(self.login_check_url, params=self.session_info)
|
||||
if not resp:
|
||||
return {}, "115登录确认失败:无法连接!"
|
||||
result = resp.json()
|
||||
match result["data"].get("status"):
|
||||
case 0:
|
||||
result = {
|
||||
"status": 0,
|
||||
"tip": "请使用微信或115客户端扫码"
|
||||
}
|
||||
case 1:
|
||||
result = {
|
||||
"status": 1,
|
||||
"tip": "已扫码"
|
||||
}
|
||||
case 2:
|
||||
# 确认完成,保存认证信息
|
||||
resp = RequestUtils(timeout=10).post_res(self.login_done_api,
|
||||
data={"account": self.session_info.get("uid")})
|
||||
if not resp:
|
||||
return {}, "115登录确认失败:无法连接!"
|
||||
if resp:
|
||||
# 保存认证信息
|
||||
result = resp.json()
|
||||
cookie_dict = result["data"]["cookie"]
|
||||
cookie_str = "; ".join([f"{k}={v}" for k, v in cookie_dict.items()])
|
||||
cookie_dict.update({"cookie": cookie_str})
|
||||
self.__save_credential(cookie_dict)
|
||||
self.init_storage()
|
||||
result = {
|
||||
"status": 2,
|
||||
"tip": "登录成功!"
|
||||
}
|
||||
case -1:
|
||||
result = {
|
||||
"status": -1,
|
||||
"tip": "二维码已过期,请重新刷新!"
|
||||
}
|
||||
case -2:
|
||||
result = {
|
||||
"status": -2,
|
||||
"tip": "登录失败,请重试!"
|
||||
}
|
||||
case _:
|
||||
result = {
|
||||
"status": -3,
|
||||
"tip": "未知错误,请重试!"
|
||||
}
|
||||
return result, ""
|
||||
except Exception as e:
|
||||
return {}, f"115登录确认失败:{str(e)}"
|
||||
@@ -153,10 +158,12 @@ class U115Pan(StorageBase, metaclass=Singleton):
|
||||
"""
|
||||
获取存储空间
|
||||
"""
|
||||
if not self.__init_cloud():
|
||||
if not self.client:
|
||||
return None
|
||||
try:
|
||||
return self.cloud.storage().space()
|
||||
usage = self.client.fs.space_summury()
|
||||
if usage:
|
||||
return usage['rt_space_info']['all_total']['size'], usage['rt_space_info']['all_remain']['size']
|
||||
except Exception as e:
|
||||
logger.error(f"115获取存储空间失败:{str(e)}")
|
||||
return None
|
||||
@@ -165,31 +172,27 @@ class U115Pan(StorageBase, metaclass=Singleton):
|
||||
"""
|
||||
检查存储是否可用
|
||||
"""
|
||||
return True if self.list(schemas.FileItem(
|
||||
fileid="0"
|
||||
)) else False
|
||||
return True if self.list(schemas.FileItem()) else False
|
||||
|
||||
def list(self, fileitem: schemas.FileItem) -> Optional[List[schemas.FileItem]]:
|
||||
"""
|
||||
浏览文件
|
||||
"""
|
||||
if not self.__init_cloud():
|
||||
if not self.client:
|
||||
return []
|
||||
try:
|
||||
if fileitem.type == "file":
|
||||
return [fileitem]
|
||||
items = self.cloud.storage().list(dir_id=fileitem.fileid)
|
||||
items: List[P115Path] = self.client.fs.list(fileitem.path)
|
||||
return [schemas.FileItem(
|
||||
storage=self.schema.value,
|
||||
fileid=item.file_id,
|
||||
parent_fileid=item.parent_id,
|
||||
type="dir" if item.is_dir else "file",
|
||||
path=f"{fileitem.path}{item.name}" + ("/" if item.is_dir else ""),
|
||||
type="dir" if item.is_dir() else "file",
|
||||
path=item.path + ("/" if item.is_dir() else ""),
|
||||
name=item.name,
|
||||
size=item.size,
|
||||
extension=Path(item.name).suffix[1:],
|
||||
modify_time=item.modified_time.timestamp() if item.modified_time else 0,
|
||||
pickcode=item.pickcode
|
||||
basename=item.stem,
|
||||
size=item.stat().st_size,
|
||||
extension=item.suffix[1:] if not item.is_dir() else None,
|
||||
modify_time=item.stat().st_mtime
|
||||
) for item in items if item]
|
||||
except Exception as e:
|
||||
logger.error(f"115浏览文件失败:{str(e)}")
|
||||
@@ -199,20 +202,18 @@ class U115Pan(StorageBase, metaclass=Singleton):
|
||||
"""
|
||||
创建目录
|
||||
"""
|
||||
if not self.__init_cloud():
|
||||
if not self.client:
|
||||
return None
|
||||
try:
|
||||
result = self.cloud.storage().make_dir(fileitem.fileid, name)
|
||||
result = self.client.fs.makedirs(Path(fileitem.path) / name, exist_ok=True)
|
||||
if result:
|
||||
return schemas.FileItem(
|
||||
storage=self.schema.value,
|
||||
fileid=result.file_id,
|
||||
parent_fileid=result.parent_id,
|
||||
type="dir",
|
||||
path=f"{fileitem.path}{name}/",
|
||||
path=f"{result.path}/",
|
||||
name=name,
|
||||
modify_time=result.modified_time.timestamp() if result.modified_time else 0,
|
||||
pickcode=result.pickcode
|
||||
basename=Path(result.name).stem,
|
||||
modify_time=result.mtime
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"115创建目录失败:{str(e)}")
|
||||
@@ -222,73 +223,86 @@ class U115Pan(StorageBase, metaclass=Singleton):
|
||||
"""
|
||||
根据文件路程获取目录,不存在则创建
|
||||
"""
|
||||
|
||||
def __find_dir(_fileitem: schemas.FileItem, _name: str) -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
查找下级目录中匹配名称的目录
|
||||
"""
|
||||
for sub_file in self.list(_fileitem):
|
||||
if sub_file.type != "dir":
|
||||
continue
|
||||
if sub_file.name == _name:
|
||||
return sub_file
|
||||
if not self.client:
|
||||
return None
|
||||
|
||||
# 逐级查找和创建目录
|
||||
fileitem = schemas.FileItem(fileid="0")
|
||||
for part in path.parts:
|
||||
if part == "/":
|
||||
continue
|
||||
dir_file = __find_dir(fileitem, part)
|
||||
if dir_file:
|
||||
fileitem = dir_file
|
||||
else:
|
||||
dir_file = self.create_folder(fileitem, part)
|
||||
if not dir_file:
|
||||
logger.warn(f"115创建目录 {fileitem.path}{part} 失败!")
|
||||
return None
|
||||
fileitem = dir_file
|
||||
return fileitem if fileitem.fileid != "0" else None
|
||||
folder = self.get_item(path)
|
||||
if folder:
|
||||
return folder
|
||||
try:
|
||||
result = self.client.fs.makedirs(path, exist_ok=True)
|
||||
if result:
|
||||
return schemas.FileItem(
|
||||
storage=self.schema.value,
|
||||
type="dir",
|
||||
path=result.path + "/",
|
||||
name=result.name,
|
||||
basename=Path(result.name).stem,
|
||||
modify_time=result.mtime
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"115获取目录失败:{str(e)}")
|
||||
return None
|
||||
|
||||
def get_item(self, path: Path) -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
获取文件或目录,不存在返回None
|
||||
"""
|
||||
|
||||
def __find_item(_fileitem: schemas.FileItem, _name: str) -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
查找下级目录中匹配名称的目录或文件
|
||||
"""
|
||||
for sub_file in self.list(_fileitem):
|
||||
if sub_file.name == _name:
|
||||
return sub_file
|
||||
if not self.client:
|
||||
return None
|
||||
|
||||
# 逐级查找
|
||||
fileitem = schemas.FileItem(fileid="0")
|
||||
for part in path.parts:
|
||||
if part == "/":
|
||||
continue
|
||||
item = __find_item(fileitem, part)
|
||||
if not item:
|
||||
try:
|
||||
try:
|
||||
item = self.client.fs.attr(path)
|
||||
except FileNotFoundError:
|
||||
return None
|
||||
fileitem = item
|
||||
return fileitem
|
||||
if item:
|
||||
return schemas.FileItem(
|
||||
storage=self.schema.value,
|
||||
type="dir" if item.is_directory else "file",
|
||||
path=item.path + ("/" if item.is_directory else ""),
|
||||
name=item.name,
|
||||
size=item.size,
|
||||
extension=Path(item.name).suffix[1:] if not item.is_directory else None,
|
||||
modify_time=item.mtime,
|
||||
thumbnail=item.get("thumb")
|
||||
)
|
||||
except Exception as e:
|
||||
logger.info(f"115获取文件失败:{str(e)}")
|
||||
return None
|
||||
|
||||
def detail(self, fileitem: schemas.FileItem) -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
获取文件详情
|
||||
"""
|
||||
pass
|
||||
if not self.client:
|
||||
return None
|
||||
try:
|
||||
try:
|
||||
item = self.client.fs.attr(fileitem.path)
|
||||
except FileNotFoundError:
|
||||
return None
|
||||
if item:
|
||||
return schemas.FileItem(
|
||||
storage=self.schema.value,
|
||||
type="dir" if item.is_directory else "file",
|
||||
path=item.path + ("/" if item.is_directory else ""),
|
||||
name=item.name,
|
||||
size=item.size,
|
||||
extension=Path(item.name).suffix[1:] if not item.is_directory else None,
|
||||
modify_time=item.mtime,
|
||||
thumbnail=item.get("thumb")
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"115获取文件详情失败:{str(e)}")
|
||||
return None
|
||||
|
||||
def delete(self, fileitem: schemas.FileItem) -> bool:
|
||||
"""
|
||||
删除文件
|
||||
"""
|
||||
if not self.__init_cloud():
|
||||
if not self.client:
|
||||
return False
|
||||
try:
|
||||
self.cloud.storage().delete(fileitem.fileid)
|
||||
self.client.fs.remove(fileitem.path)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"115删除文件失败:{str(e)}")
|
||||
@@ -298,10 +312,10 @@ class U115Pan(StorageBase, metaclass=Singleton):
|
||||
"""
|
||||
重命名文件
|
||||
"""
|
||||
if not self.__init_cloud():
|
||||
if not self.client:
|
||||
return False
|
||||
try:
|
||||
self.cloud.storage().rename(fileitem.fileid, name)
|
||||
self.client.fs.rename(fileitem.path, Path(fileitem.path).with_name(name))
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"115重命名文件失败:{str(e)}")
|
||||
@@ -311,89 +325,77 @@ class U115Pan(StorageBase, metaclass=Singleton):
|
||||
"""
|
||||
获取下载链接
|
||||
"""
|
||||
if not self.__init_cloud():
|
||||
if not self.client:
|
||||
return None
|
||||
local_file = (path or settings.TEMP_PATH) / fileitem.name
|
||||
try:
|
||||
ticket = self.cloud.storage().request_download(fileitem.pickcode)
|
||||
if ticket:
|
||||
path = (path or settings.TEMP_PATH) / fileitem.name
|
||||
res = RequestUtils(headers=ticket.headers).get_res(ticket.url)
|
||||
if res:
|
||||
with open(path, "wb") as f:
|
||||
f.write(res.content)
|
||||
return path
|
||||
else:
|
||||
logger.warn(f"{fileitem.path} 未获取到下载链接")
|
||||
task = self.client.fs.download(fileitem.path, file=local_file)
|
||||
if task:
|
||||
return local_file
|
||||
except Exception as e:
|
||||
logger.error(f"115下载失败:{str(e)}")
|
||||
logger.error(f"115下载文件失败:{str(e)}")
|
||||
return None
|
||||
|
||||
def upload(self, fileitem: schemas.FileItem, path: Path) -> Optional[schemas.FileItem]:
|
||||
def upload(self, fileitem: schemas.FileItem, path: Path, new_name: str = None) -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
上传文件
|
||||
:param fileitem: 上传目录项
|
||||
:param path: 本地文件路径
|
||||
:param new_name: 上传后文件名
|
||||
"""
|
||||
if not self.__init_cloud():
|
||||
if not self.client:
|
||||
return None
|
||||
try:
|
||||
ticket = self.cloud.storage().request_upload(dir_id=fileitem.fileid, file_path=str(path))
|
||||
if ticket is None:
|
||||
logger.warn(f"115请求上传出错")
|
||||
return None
|
||||
elif ticket.is_done:
|
||||
file_path = Path(fileitem.path) / path.name
|
||||
logger.warn(f"115上传:{file_path} 文件已存在")
|
||||
return self.get_item(file_path)
|
||||
else:
|
||||
auth = oss2.StsAuth(**ticket.oss_token)
|
||||
bucket = oss2.Bucket(
|
||||
auth=auth,
|
||||
endpoint=ticket.oss_endpoint,
|
||||
bucket_name=ticket.bucket_name,
|
||||
)
|
||||
por = bucket.put_object_from_file(
|
||||
key=ticket.object_key,
|
||||
filename=str(path),
|
||||
headers=ticket.headers,
|
||||
)
|
||||
result = por.resp.response.json()
|
||||
new_path = Path(fileitem.path) / (new_name or path.name)
|
||||
with open(path, "rb") as f:
|
||||
result = self.client.fs.upload(f, new_path)
|
||||
if result:
|
||||
result_data = result.get('data')
|
||||
logger.info(f"115上传文件成功:{result_data.get('file_name')}")
|
||||
return schemas.FileItem(
|
||||
storage=self.schema.value,
|
||||
fileid=result_data.get('file_id'),
|
||||
parent_fileid=fileitem.fileid,
|
||||
type="file",
|
||||
name=result_data.get('file_name'),
|
||||
basename=Path(result_data.get('file_name')).stem,
|
||||
path=f"{fileitem.path}{result_data.get('file_name')}",
|
||||
size=result_data.get('file_size'),
|
||||
extension=Path(result_data.get('file_name')).suffix[1:],
|
||||
pickcode=result_data.get('pickcode')
|
||||
path=str(path),
|
||||
name=result.name,
|
||||
basename=Path(result.name).stem,
|
||||
size=result.size,
|
||||
extension=Path(result.name).suffix[1:],
|
||||
modify_time=result.mtime
|
||||
)
|
||||
else:
|
||||
logger.warn(f"115上传文件失败:{por.resp.response.text}")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"115上传文件失败:{str(e)}")
|
||||
return None
|
||||
|
||||
def move(self, fileitem: schemas.FileItem, target: schemas.FileItem) -> bool:
|
||||
def copy(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
|
||||
"""
|
||||
移动文件
|
||||
复制文件
|
||||
:param fileitem: 文件项
|
||||
:param path: 目标目录
|
||||
:param new_name: 新文件名
|
||||
"""
|
||||
if not self.__init_cloud():
|
||||
if not self.client:
|
||||
return False
|
||||
try:
|
||||
self.cloud.storage().move(fileitem.fileid, target.fileid)
|
||||
self.client.fs.copy(fileitem.path, path / new_name)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"115复制文件失败:{str(e)}")
|
||||
return False
|
||||
|
||||
def move(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
|
||||
"""
|
||||
移动文件
|
||||
:param fileitem: 文件项
|
||||
:param path: 目标目录
|
||||
:param new_name: 新文件名
|
||||
"""
|
||||
if not self.client:
|
||||
return False
|
||||
try:
|
||||
self.client.fs.move(fileitem.path, path / new_name)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"115移动文件失败:{str(e)}")
|
||||
return False
|
||||
|
||||
def copy(self, fileitem: schemas.FileItem, target_file: Path) -> bool:
|
||||
pass
|
||||
|
||||
def link(self, fileitem: schemas.FileItem, target_file: Path) -> bool:
|
||||
pass
|
||||
|
||||
@@ -406,9 +408,9 @@ class U115Pan(StorageBase, metaclass=Singleton):
|
||||
"""
|
||||
info = self.storage()
|
||||
if info:
|
||||
total, used = info
|
||||
total, free = info
|
||||
return schemas.StorageUsage(
|
||||
total=total,
|
||||
available=total - used
|
||||
available=free
|
||||
)
|
||||
return schemas.StorageUsage()
|
||||
|
||||
@@ -7,7 +7,7 @@ from app.helper.rule import RuleHelper
|
||||
from app.log import logger
|
||||
from app.modules import _ModuleBase
|
||||
from app.modules.filter.RuleParser import RuleParser
|
||||
from app.schemas.types import ModuleType
|
||||
from app.schemas.types import ModuleType, OtherModulesType
|
||||
from app.utils.string import StringUtils
|
||||
|
||||
|
||||
@@ -167,6 +167,13 @@ class FilterModule(_ModuleBase):
|
||||
"""
|
||||
return ModuleType.Other
|
||||
|
||||
@staticmethod
|
||||
def get_subtype() -> OtherModulesType:
|
||||
"""
|
||||
获取模块子类型
|
||||
"""
|
||||
return OtherModulesType.Filter
|
||||
|
||||
@staticmethod
|
||||
def get_priority() -> int:
|
||||
"""
|
||||
|
||||
@@ -18,7 +18,7 @@ from app.modules.indexer.spider.tnode import TNodeSpider
|
||||
from app.modules.indexer.spider.torrentleech import TorrentLeech
|
||||
from app.modules.indexer.spider.yema import YemaSpider
|
||||
from app.schemas import SiteUserData
|
||||
from app.schemas.types import MediaType, ModuleType
|
||||
from app.schemas.types import MediaType, ModuleType, OtherModulesType
|
||||
from app.utils.string import StringUtils
|
||||
|
||||
|
||||
@@ -47,6 +47,13 @@ class IndexerModule(_ModuleBase):
|
||||
"""
|
||||
return ModuleType.Indexer
|
||||
|
||||
@staticmethod
|
||||
def get_subtype() -> OtherModulesType:
|
||||
"""
|
||||
获取模块子类型
|
||||
"""
|
||||
return OtherModulesType.Indexer
|
||||
|
||||
@staticmethod
|
||||
def get_priority() -> int:
|
||||
"""
|
||||
@@ -191,6 +198,7 @@ class IndexerModule(_ModuleBase):
|
||||
site_ua=site.get("ua"),
|
||||
site_proxy=site.get("proxy"),
|
||||
site_order=site.get("pri"),
|
||||
site_downloader=site.get("downloader"),
|
||||
**result) for result in result_array]
|
||||
# 去重
|
||||
return __remove_duplicate(torrents)
|
||||
@@ -199,7 +207,7 @@ class IndexerModule(_ModuleBase):
|
||||
def __spider_search(indexer: CommentedMap,
|
||||
search_word: str = None,
|
||||
mtype: MediaType = None,
|
||||
page: int = 0) -> (bool, List[dict]):
|
||||
page: int = 0) -> Tuple[bool, List[dict]]:
|
||||
"""
|
||||
根据关键字搜索单个站点
|
||||
:param: indexer: 站点配置
|
||||
|
||||
@@ -94,6 +94,7 @@ class SiteParserBase(metaclass=ABCMeta):
|
||||
# 未读消息
|
||||
self.message_unread = 0
|
||||
self.message_unread_contents = []
|
||||
self.message_read_force = False
|
||||
|
||||
# 全局附加请求头
|
||||
self._addition_headers = None
|
||||
@@ -202,7 +203,7 @@ class SiteParserBase(metaclass=ABCMeta):
|
||||
:return:
|
||||
"""
|
||||
unread_msg_links = []
|
||||
if self.message_unread > 0:
|
||||
if self.message_unread > 0 or self.message_read_force:
|
||||
links = {self._user_mail_unread_page, self._sys_mail_unread_page}
|
||||
for link in links:
|
||||
if not link:
|
||||
@@ -226,7 +227,7 @@ class SiteParserBase(metaclass=ABCMeta):
|
||||
)
|
||||
unread_msg_links.extend(msg_links)
|
||||
# 重新更新未读消息数(99999表示有消息但数量未知)
|
||||
if self.message_unread == 99999:
|
||||
if unread_msg_links and not self.message_unread:
|
||||
self.message_unread = len(unread_msg_links)
|
||||
# 解析未读消息内容
|
||||
for msg_link in unread_msg_links:
|
||||
@@ -343,11 +344,9 @@ class SiteParserBase(metaclass=ABCMeta):
|
||||
logger.warn(
|
||||
f"{self._site_name} 检测到Cloudflare,请更新Cookie和UA")
|
||||
return ""
|
||||
if re.search(r"charset=\"?utf-8\"?", res.text, re.IGNORECASE):
|
||||
res.encoding = "utf-8"
|
||||
else:
|
||||
res.encoding = res.apparent_encoding
|
||||
return res.text
|
||||
return RequestUtils.get_decoded_html_content(res,
|
||||
settings.ENCODING_DETECTION_PERFORMANCE_MODE,
|
||||
settings.ENCODING_DETECTION_MIN_CONFIDENCE)
|
||||
|
||||
return ""
|
||||
|
||||
|
||||
@@ -91,9 +91,7 @@ class MTorrentSiteUserInfo(SiteParserBase):
|
||||
self.download = int(user_info.get("memberCount", {}).get("downloaded") or '0')
|
||||
self.ratio = user_info.get("memberCount", {}).get("shareRate") or 0
|
||||
self.bonus = user_info.get("memberCount", {}).get("bonus") or 0
|
||||
# 需要解析消息,但不确定消息条数
|
||||
self.message_unread = 99999
|
||||
|
||||
self.message_read_force = True
|
||||
self._torrent_seeding_params = {
|
||||
"pageNumber": 1,
|
||||
"pageSize": 200,
|
||||
|
||||
@@ -1,21 +1,60 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import json
|
||||
from typing import Optional
|
||||
|
||||
from lxml import etree
|
||||
from urllib.parse import urljoin
|
||||
from app.log import logger
|
||||
from app.modules.indexer.parser import SiteSchema
|
||||
from app.modules.indexer.parser.nexus_php import NexusPhpSiteUserInfo
|
||||
from app.modules.indexer.parser import SiteParserBase
|
||||
from app.utils.string import StringUtils
|
||||
|
||||
|
||||
class NexusRabbitSiteUserInfo(NexusPhpSiteUserInfo):
|
||||
class NexusRabbitSiteUserInfo(SiteParserBase):
|
||||
schema = SiteSchema.NexusRabbit
|
||||
|
||||
def _parse_site_page(self, html_text: str):
|
||||
super()._parse_site_page(html_text)
|
||||
self._torrent_seeding_page = f"getusertorrentlistajax.php?page=1&limit=5000000&type=seeding&uid={self.userid}"
|
||||
self._torrent_seeding_headers = {"Accept": "application/json, text/javascript, */*; q=0.01"}
|
||||
html_text = self._prepare_html_text(html_text)
|
||||
|
||||
def _parse_user_torrent_seeding_info(self, html_text: str, multi_page: bool = False) -> Optional[str]:
|
||||
user_detail = re.search(r"user.php\?id=(\d+)", html_text)
|
||||
|
||||
if not (user_detail and user_detail.group().strip()):
|
||||
return
|
||||
|
||||
self.userid = user_detail.group(1)
|
||||
self._user_detail_page = f"user.php?id={self.userid}"
|
||||
|
||||
self._user_traffic_page = None
|
||||
|
||||
self._torrent_seeding_page = "api/general"
|
||||
self._torrent_seeding_params = {
|
||||
"page": 1,
|
||||
"limit": 5000000,
|
||||
"action": "userTorrentsList",
|
||||
"data": {"type": "seeding", "id": int(self.userid)},
|
||||
}
|
||||
self._torrent_seeding_headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json, text/plain, */*",
|
||||
"X-Requested-With": "XMLHttpRequest", # 必须要加上这一条,不然返回的是空数据
|
||||
}
|
||||
|
||||
self._user_mail_unread_page = None
|
||||
self._sys_mail_unread_page = "api/general"
|
||||
self._mail_unread_params = {
|
||||
"page": 1,
|
||||
"limit": 5000000,
|
||||
"action": "getMessageIn",
|
||||
}
|
||||
self._mail_unread_headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json, text/plain, */*",
|
||||
"X-Requested-With": "XMLHttpRequest",
|
||||
}
|
||||
|
||||
def _parse_user_torrent_seeding_info(
|
||||
self, html_text: str, multi_page: bool = False
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
做种相关信息
|
||||
:param html_text:
|
||||
@@ -24,22 +63,112 @@ class NexusRabbitSiteUserInfo(NexusPhpSiteUserInfo):
|
||||
"""
|
||||
|
||||
try:
|
||||
torrents = json.loads(html_text).get('data')
|
||||
torrents = json.loads(html_text).get("data", [])
|
||||
except Exception as e:
|
||||
logger.error(f"解析做种信息失败: {str(e)}")
|
||||
return
|
||||
|
||||
page_seeding_size = 0
|
||||
page_seeding_info = []
|
||||
seeding_size = 0
|
||||
seeding_info = []
|
||||
|
||||
page_seeding = len(torrents)
|
||||
for torrent in torrents:
|
||||
seeders = int(torrent.get('seeders', 0))
|
||||
size = int(torrent.get('size', 0))
|
||||
page_seeding_size += int(torrent.get('size', 0))
|
||||
seeders = int(torrent.get("seeders", 0))
|
||||
size = StringUtils.num_filesize(torrent.get("size"))
|
||||
seeding_size += size
|
||||
seeding_info.append([seeders, size])
|
||||
|
||||
page_seeding_info.append([seeders, size])
|
||||
self.seeding = len(torrents)
|
||||
self.seeding_size = seeding_size
|
||||
self.seeding_info = seeding_info
|
||||
|
||||
self.seeding += page_seeding
|
||||
self.seeding_size += page_seeding_size
|
||||
self.seeding_info.extend(page_seeding_info)
|
||||
def _parse_message_unread_links(
|
||||
self, html_text: str, msg_links: list
|
||||
) -> str | None:
|
||||
unread_ids = []
|
||||
try:
|
||||
messages = json.loads(html_text).get("data", [])
|
||||
except Exception as e:
|
||||
logger.error(f"解析未读消息失败: {e}")
|
||||
return
|
||||
for msg in messages:
|
||||
msg_id, msg_unread = msg.get("id"), msg.get("unread")
|
||||
if not (msg_id and msg_unread) or msg_unread == "no":
|
||||
continue
|
||||
unread_ids.append(msg_id)
|
||||
head, date, content = msg.get("subject"), msg.get("added"), msg.get("msg")
|
||||
if head and date and content:
|
||||
self.message_unread_contents.append((head, date, content))
|
||||
self.message_unread = len(unread_ids)
|
||||
if unread_ids:
|
||||
self._get_page_content(
|
||||
url=urljoin(self._base_url, "api/general?loading=true"),
|
||||
params={"action": "readMessage", "data": {"ids": unread_ids}},
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json, text/plain, */*",
|
||||
"X-Requested-With": "XMLHttpRequest",
|
||||
},
|
||||
)
|
||||
return None
|
||||
|
||||
def _parse_user_base_info(self, html_text: str):
|
||||
"""只有奶糖余额才需要在 base 中获取,其它均可以在详情页拿到"""
|
||||
html = etree.HTML(html_text)
|
||||
if not StringUtils.is_valid_html_element(html):
|
||||
return
|
||||
bonus = html.xpath(
|
||||
'//div[contains(text(), "奶糖余额")]/following-sibling::div[1]/text()'
|
||||
)
|
||||
if bonus:
|
||||
self.bonus = StringUtils.str_float(bonus[0].strip())
|
||||
|
||||
def _parse_user_detail_info(self, html_text: str):
|
||||
html = etree.HTML(html_text)
|
||||
if not StringUtils.is_valid_html_element(html):
|
||||
return
|
||||
# 缩小一下查找范围,所有的信息都在这个 div 里
|
||||
user_info = html.xpath('//div[contains(@class, "layui-hares-user-info-right")]')
|
||||
if not user_info:
|
||||
return
|
||||
user_info = user_info[0]
|
||||
# 用户名
|
||||
if username := user_info.xpath(
|
||||
'.//span[contains(text(), "用户名")]/a/span/text()'
|
||||
):
|
||||
self.username = username[0].strip()
|
||||
# 等级
|
||||
if user_level := user_info.xpath('.//span[contains(text(), "等级")]/b/text()'):
|
||||
self.user_level = user_level[0].strip()
|
||||
# 加入日期
|
||||
if join_date := user_info.xpath('.//span[contains(text(), "注册日期")]/text()'):
|
||||
join_date = join_date[0].strip().split("\r")[0].removeprefix("注册日期:")
|
||||
self.join_at = StringUtils.unify_datetime_str(join_date)
|
||||
# 上传量
|
||||
if upload := user_info.xpath('.//span[contains(text(), "上传量")]/text()'):
|
||||
self.upload = StringUtils.num_filesize(
|
||||
upload[0].strip().removeprefix("上传量:")
|
||||
)
|
||||
# 下载量
|
||||
if download := user_info.xpath('.//span[contains(text(), "下载量")]/text()'):
|
||||
self.download = StringUtils.num_filesize(
|
||||
download[0].strip().removeprefix("下载量:")
|
||||
)
|
||||
# 分享率
|
||||
if ratio := user_info.xpath('.//span[contains(text(), "分享率")]/em/text()'):
|
||||
self.ratio = StringUtils.str_float(ratio[0].strip())
|
||||
|
||||
def _parse_message_content(self, html_text):
|
||||
"""
|
||||
解析短消息内容,已经在 _parse_message_unread_links 内实现,重载防止 abstractmethod 报错
|
||||
:param html_text:
|
||||
:return: head: message, date: time, content: message content
|
||||
"""
|
||||
pass
|
||||
|
||||
def _parse_user_traffic_info(self, html_text: str):
|
||||
"""
|
||||
解析用户的上传,下载,分享率等信息,已经在 _parse_user_detail_info 内实现,重载防止 abstractmethod 报错
|
||||
:param html_text:
|
||||
:return:
|
||||
"""
|
||||
pass
|
||||
|
||||
@@ -36,7 +36,10 @@ class TNodeSiteUserInfo(SiteParserBase):
|
||||
pass
|
||||
|
||||
def _parse_user_detail_info(self, html_text: str):
|
||||
detail = json.loads(html_text)
|
||||
try:
|
||||
detail = json.loads(html_text)
|
||||
except json.JSONDecodeError:
|
||||
return
|
||||
if detail.get("status") != 200:
|
||||
return
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ import traceback
|
||||
from typing import List
|
||||
from urllib.parse import quote, urlencode, urlparse, parse_qs
|
||||
|
||||
import chardet
|
||||
from jinja2 import Template
|
||||
from pyquery import PyQuery
|
||||
from ruamel.yaml import CommentedMap
|
||||
@@ -250,27 +249,9 @@ class TorrentSpider:
|
||||
referer=self.referer,
|
||||
proxies=self.proxies
|
||||
).get_res(searchurl, allow_redirects=True)
|
||||
if ret is not None:
|
||||
# 使用chardet检测字符编码
|
||||
raw_data = ret.content
|
||||
if raw_data:
|
||||
try:
|
||||
result = chardet.detect(raw_data)
|
||||
encoding = result['encoding']
|
||||
# 解码为字符串
|
||||
page_source = raw_data.decode(encoding)
|
||||
except Exception as e:
|
||||
logger.debug(f"chardet解码失败:{str(e)}")
|
||||
# 探测utf-8解码
|
||||
if re.search(r"charset=\"?utf-8\"?", ret.text, re.IGNORECASE):
|
||||
ret.encoding = "utf-8"
|
||||
else:
|
||||
ret.encoding = ret.apparent_encoding
|
||||
page_source = ret.text
|
||||
else:
|
||||
page_source = ret.text
|
||||
else:
|
||||
page_source = ""
|
||||
page_source = RequestUtils.get_decoded_html_content(ret,
|
||||
settings.ENCODING_DETECTION_PERFORMANCE_MODE,
|
||||
settings.ENCODING_DETECTION_MIN_CONFIDENCE)
|
||||
|
||||
# 解析
|
||||
return self.parse(page_source)
|
||||
|
||||
@@ -21,14 +21,30 @@ class YemaSpider:
|
||||
_cookie = None
|
||||
_ua = None
|
||||
_size = 40
|
||||
_searchurl = "%sapi/torrent/fetchCategoryOpenTorrentList"
|
||||
_searchurl = "%sapi/torrent/fetchOpenTorrentList"
|
||||
_downloadurl = "%sapi/torrent/download?id=%s"
|
||||
_pageurl = "%s#/torrent/detail/%s/"
|
||||
_timeout = 15
|
||||
|
||||
# 分类
|
||||
_movie_category = 4
|
||||
_tv_category = 5
|
||||
_movie_category = [4]
|
||||
_tv_category = [5, 13, 14, 17, 15, 6, 16]
|
||||
|
||||
# 标签 https://wiki.yemapt.org/developer/constants
|
||||
_labels = {
|
||||
"1": "禁转",
|
||||
"2": "首发",
|
||||
"3": "官方",
|
||||
"4": "自制",
|
||||
"5": "国语",
|
||||
"6": "中字",
|
||||
"7": "粤语",
|
||||
"8": "英字",
|
||||
"9": "HDR10",
|
||||
"10": "杜比视界",
|
||||
"11": "分集",
|
||||
"12": "完结",
|
||||
}
|
||||
|
||||
def __init__(self, indexer: CommentedMap):
|
||||
self.systemconfig = SystemConfigOper()
|
||||
@@ -47,14 +63,7 @@ class YemaSpider:
|
||||
"""
|
||||
搜索
|
||||
"""
|
||||
if not mtype:
|
||||
categoryId = self._movie_category
|
||||
elif mtype == MediaType.TV:
|
||||
categoryId = self._tv_category
|
||||
else:
|
||||
categoryId = self._movie_category
|
||||
params = {
|
||||
"categoryId": categoryId,
|
||||
"pageParam": {
|
||||
"current": page + 1,
|
||||
"pageSize": self._size,
|
||||
@@ -62,6 +71,12 @@ class YemaSpider:
|
||||
},
|
||||
"sorter": {}
|
||||
}
|
||||
# 新接口可不传 categoryId 参数
|
||||
# if mtype == MediaType.MOVIE:
|
||||
# params.update({
|
||||
# "categoryId": self._movie_category,
|
||||
# })
|
||||
# pass
|
||||
if keyword:
|
||||
params.update({
|
||||
"keyword": keyword,
|
||||
@@ -82,17 +97,27 @@ class YemaSpider:
|
||||
results = res.json().get('data', []) or []
|
||||
for result in results:
|
||||
category_value = result.get('categoryId')
|
||||
if category_value == self._tv_category:
|
||||
if category_value in self._tv_category:
|
||||
category = MediaType.TV.value
|
||||
elif category_value == self._movie_category:
|
||||
elif category_value in self._movie_category:
|
||||
category = MediaType.MOVIE.value
|
||||
else:
|
||||
category = MediaType.UNKNOWN.value
|
||||
pass
|
||||
|
||||
torrentLabelIds = result.get('tagList', []) or []
|
||||
torrentLabels = []
|
||||
for labelId in torrentLabelIds:
|
||||
if self._labels.get(labelId) is not None:
|
||||
torrentLabels.append(self._labels.get(labelId))
|
||||
pass
|
||||
pass
|
||||
torrent = {
|
||||
'title': result.get('showName'),
|
||||
'description': result.get('shortDesc'),
|
||||
'enclosure': self.__get_download_url(result.get('id')),
|
||||
'pubdate': StringUtils.unify_datetime_str(result.get('gmtCreate')),
|
||||
# 使用上架时间,而不是用户发布时间,上架时间即其他用户可见时间
|
||||
'pubdate': StringUtils.unify_datetime_str(result.get('listingTime')),
|
||||
'size': result.get('fileSize'),
|
||||
'seeders': result.get('seedNum'),
|
||||
'peers': result.get('leechNum'),
|
||||
@@ -101,7 +126,7 @@ class YemaSpider:
|
||||
'uploadvolumefactor': self.__get_uploadvolumefactor(result.get('uploadPromotion')),
|
||||
'freedate': StringUtils.unify_datetime_str(result.get('downloadPromotionEndTime')),
|
||||
'page_url': self._pageurl % (self._domain, result.get('id')),
|
||||
'labels': [],
|
||||
'labels': torrentLabels,
|
||||
'category': category
|
||||
}
|
||||
torrents.append(torrent)
|
||||
|
||||
@@ -7,7 +7,7 @@ from app.log import logger
|
||||
from app.modules import _MediaServerBase, _ModuleBase
|
||||
from app.modules.jellyfin.jellyfin import Jellyfin
|
||||
from app.schemas.event import AuthCredentials, AuthInterceptCredentials
|
||||
from app.schemas.types import MediaType, ModuleType, ChainEventType
|
||||
from app.schemas.types import MediaType, ModuleType, ChainEventType, MediaServerType
|
||||
|
||||
|
||||
class JellyfinModule(_ModuleBase, _MediaServerBase[Jellyfin]):
|
||||
@@ -30,6 +30,13 @@ class JellyfinModule(_ModuleBase, _MediaServerBase[Jellyfin]):
|
||||
"""
|
||||
return ModuleType.MediaServer
|
||||
|
||||
@staticmethod
|
||||
def get_subtype() -> MediaServerType:
|
||||
"""
|
||||
获取模块子类型
|
||||
"""
|
||||
return MediaServerType.Jellyfin
|
||||
|
||||
@staticmethod
|
||||
def get_priority() -> int:
|
||||
"""
|
||||
@@ -66,16 +73,26 @@ class JellyfinModule(_ModuleBase, _MediaServerBase[Jellyfin]):
|
||||
return False, f"无法连接Jellyfin服务器:{name}"
|
||||
return True, ""
|
||||
|
||||
def user_authenticate(self, credentials: AuthCredentials) -> Optional[AuthCredentials]:
|
||||
def user_authenticate(self, credentials: AuthCredentials, service_name: Optional[str] = None) \
|
||||
-> Optional[AuthCredentials]:
|
||||
"""
|
||||
使用Jellyfin用户辅助完成用户认证
|
||||
:param credentials: 认证数据
|
||||
:param service_name: 指定要认证的媒体服务器名称,若为 None 则认证所有服务
|
||||
:return: 认证数据
|
||||
"""
|
||||
# Jellyfin认证
|
||||
if not credentials or credentials.grant_type != "password":
|
||||
return None
|
||||
for name, server in self.get_instances().items():
|
||||
# 确定要认证的服务器列表
|
||||
if service_name:
|
||||
# 如果指定了服务名,获取该服务实例
|
||||
servers = [(service_name, server)] if (server := self.get_instance(service_name)) else []
|
||||
else:
|
||||
# 如果没有指定服务名,遍历所有服务
|
||||
servers = self.get_instances().items()
|
||||
# 遍历要认证的服务器
|
||||
for name, server in servers:
|
||||
# 触发认证拦截事件
|
||||
intercept_event = eventmanager.send_event(
|
||||
etype=ChainEventType.AuthIntercept,
|
||||
|
||||
@@ -7,7 +7,7 @@ from app.log import logger
|
||||
from app.modules import _ModuleBase, _MediaServerBase
|
||||
from app.modules.plex.plex import Plex
|
||||
from app.schemas.event import AuthCredentials, AuthInterceptCredentials
|
||||
from app.schemas.types import MediaType, ModuleType, ChainEventType
|
||||
from app.schemas.types import MediaType, ModuleType, ChainEventType, MediaServerType
|
||||
|
||||
|
||||
class PlexModule(_ModuleBase, _MediaServerBase[Plex]):
|
||||
@@ -30,6 +30,13 @@ class PlexModule(_ModuleBase, _MediaServerBase[Plex]):
|
||||
"""
|
||||
return ModuleType.MediaServer
|
||||
|
||||
@staticmethod
|
||||
def get_subtype() -> MediaServerType:
|
||||
"""
|
||||
获取模块子类型
|
||||
"""
|
||||
return MediaServerType.Plex
|
||||
|
||||
@staticmethod
|
||||
def get_priority() -> int:
|
||||
"""
|
||||
@@ -66,16 +73,26 @@ class PlexModule(_ModuleBase, _MediaServerBase[Plex]):
|
||||
logger.info(f"Plex {name} 服务器连接断开,尝试重连 ...")
|
||||
server.reconnect()
|
||||
|
||||
def user_authenticate(self, credentials: AuthCredentials) -> Optional[AuthCredentials]:
|
||||
def user_authenticate(self, credentials: AuthCredentials, service_name: Optional[str] = None) \
|
||||
-> Optional[AuthCredentials]:
|
||||
"""
|
||||
使用Plex用户辅助完成用户认证
|
||||
:param credentials: 认证数据
|
||||
:param service_name: 指定要认证的媒体服务器名称,若为 None 则认证所有服务
|
||||
:return: 认证数据
|
||||
"""
|
||||
# Plex认证
|
||||
if not credentials or credentials.grant_type != "password":
|
||||
return None
|
||||
for name, server in self.get_instances().items():
|
||||
# 确定要认证的服务器列表
|
||||
if service_name:
|
||||
# 如果指定了服务名,获取该服务实例
|
||||
servers = [(service_name, server)] if (server := self.get_instance(service_name)) else []
|
||||
else:
|
||||
# 如果没有指定服务名,遍历所有服务
|
||||
servers = self.get_instances().items()
|
||||
# 遍历要认证的服务器
|
||||
for name, server in servers:
|
||||
# 触发认证拦截事件
|
||||
intercept_event = eventmanager.send_event(
|
||||
etype=ChainEventType.AuthIntercept,
|
||||
|
||||
@@ -162,26 +162,26 @@ class Plex:
|
||||
def get_medias_count(self) -> schemas.Statistic:
|
||||
"""
|
||||
获得电影、电视剧、动漫媒体数量
|
||||
:return: MovieCount SeriesCount SongCount
|
||||
:return: movie_count tv_count episode_count
|
||||
"""
|
||||
if not self._plex:
|
||||
return schemas.Statistic()
|
||||
sections = self._plex.library.sections()
|
||||
MovieCount = SeriesCount = EpisodeCount = 0
|
||||
movie_count = tv_count = episode_count = 0
|
||||
# 媒体库白名单
|
||||
allow_library = [lib.id for lib in self.get_librarys(hidden=True)]
|
||||
for sec in sections:
|
||||
if str(sec.key) not in allow_library:
|
||||
if sec.key not in allow_library:
|
||||
continue
|
||||
if sec.type == "movie":
|
||||
MovieCount += sec.totalSize
|
||||
movie_count += sec.totalSize
|
||||
if sec.type == "show":
|
||||
SeriesCount += sec.totalSize
|
||||
EpisodeCount += sec.totalViewSize(libtype='episode')
|
||||
tv_count += sec.totalSize
|
||||
episode_count += sec.totalViewSize(libtype="episode")
|
||||
return schemas.Statistic(
|
||||
movie_count=MovieCount,
|
||||
tv_count=SeriesCount,
|
||||
episode_count=EpisodeCount
|
||||
movie_count=movie_count,
|
||||
tv_count=tv_count,
|
||||
episode_count=episode_count
|
||||
)
|
||||
|
||||
def get_movies(self,
|
||||
@@ -294,7 +294,7 @@ class Plex:
|
||||
return videos.key, season_episodes
|
||||
|
||||
def get_remote_image_by_id(self,
|
||||
item_id: str,
|
||||
item_id: str,
|
||||
image_type: str,
|
||||
depth: int = 0,
|
||||
plex_url: bool = True) -> Optional[str]:
|
||||
@@ -310,12 +310,16 @@ class Plex:
|
||||
return None
|
||||
try:
|
||||
image_url = None
|
||||
ekey = f"/library/metadata/{item_id}"
|
||||
ekey = item_id
|
||||
item = self._plex.fetchItem(ekey=ekey)
|
||||
if not item:
|
||||
return None
|
||||
# 如果配置了外网播放地址以及Token,则默认从Plex媒体服务器获取图片,否则返回有外网地址的图片资源
|
||||
if self._playhost and self._token and plex_url:
|
||||
# Plex外网播放地址这个框里目前可以填两种地址
|
||||
# 1. Plex的官方转发地址https://app.plex.tv, 2. 自己处理的端口转发地址
|
||||
# 如果使用的是1的官方转发地址,那么就不能走这个逻辑,因为官方转发地址无法获取到图片
|
||||
if (self._playhost and "app.plex.tv" not in self._playhost
|
||||
and self._token and plex_url):
|
||||
query = {"X-Plex-Token": self._token}
|
||||
if image_type == "Poster":
|
||||
if item.thumb:
|
||||
@@ -346,8 +350,8 @@ class Plex:
|
||||
image_url = image.key
|
||||
break
|
||||
# 如果最后还是找不到,则递归父级进行查找
|
||||
if not image_url and hasattr(item, "parentRatingKey"):
|
||||
return self.get_remote_image_by_id(item_id=item.parentRatingKey,
|
||||
if not image_url and hasattr(item, "parentKey"):
|
||||
return self.get_remote_image_by_id(item_id=item.parentKey,
|
||||
image_type=image_type,
|
||||
depth=depth + 1)
|
||||
return image_url
|
||||
@@ -665,7 +669,7 @@ class Plex:
|
||||
"S" + str(message.get('Metadata', {}).get('parentIndex')),
|
||||
"E" + str(message.get('Metadata', {}).get('index')),
|
||||
message.get('Metadata', {}).get('title'))
|
||||
eventItem.item_id = message.get('Metadata', {}).get('ratingKey')
|
||||
eventItem.item_id = message.get('Metadata', {}).get('key')
|
||||
eventItem.season_id = message.get('Metadata', {}).get('parentIndex')
|
||||
eventItem.episode_id = message.get('Metadata', {}).get('index')
|
||||
|
||||
@@ -680,7 +684,7 @@ class Plex:
|
||||
eventItem.item_name = "%s %s" % (
|
||||
message.get('Metadata', {}).get('title'),
|
||||
"(" + str(message.get('Metadata', {}).get('year')) + ")")
|
||||
eventItem.item_id = message.get('Metadata', {}).get('ratingKey')
|
||||
eventItem.item_id = message.get('Metadata', {}).get('key')
|
||||
if len(message.get('Metadata', {}).get('summary')) > 100:
|
||||
eventItem.overview = str(message.get('Metadata', {}).get('summary'))[:100] + "..."
|
||||
else:
|
||||
@@ -721,7 +725,7 @@ class Plex:
|
||||
if not self._plex:
|
||||
return []
|
||||
# 媒体库白名单
|
||||
allow_library = ",".join([lib.id for lib in self.get_librarys(hidden=True)])
|
||||
allow_library = ",".join(map(str, (lib.id for lib in self.get_librarys(hidden=True))))
|
||||
params = {"contentDirectoryID": allow_library}
|
||||
items = self._plex.fetchItems("/hubs/continueWatching/items",
|
||||
container_start=0,
|
||||
@@ -757,7 +761,7 @@ class Plex:
|
||||
if not self._plex:
|
||||
return None
|
||||
# 请求参数(除黑名单)
|
||||
allow_library = ",".join([lib.id for lib in self.get_librarys(hidden=True)])
|
||||
allow_library = ",".join(map(str, (lib.id for lib in self.get_librarys(hidden=True))))
|
||||
params = {
|
||||
"contentDirectoryID": allow_library,
|
||||
"count": num,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from pathlib import Path
|
||||
from typing import Set, Tuple, Optional, Union, List
|
||||
from typing import Set, Tuple, Optional, Union, List, Dict
|
||||
|
||||
from qbittorrentapi import TorrentFilesList
|
||||
from torrentool.torrent import Torrent
|
||||
@@ -11,7 +11,7 @@ from app.log import logger
|
||||
from app.modules import _ModuleBase, _DownloaderBase
|
||||
from app.modules.qbittorrent.qbittorrent import Qbittorrent
|
||||
from app.schemas import TransferTorrent, DownloadingTorrent
|
||||
from app.schemas.types import TorrentStatus, ModuleType
|
||||
from app.schemas.types import TorrentStatus, ModuleType, DownloaderType
|
||||
from app.utils.string import StringUtils
|
||||
|
||||
|
||||
@@ -35,6 +35,13 @@ class QbittorrentModule(_ModuleBase, _DownloaderBase[Qbittorrent]):
|
||||
"""
|
||||
return ModuleType.Downloader
|
||||
|
||||
@staticmethod
|
||||
def get_subtype() -> DownloaderType:
|
||||
"""
|
||||
获取模块子类型
|
||||
"""
|
||||
return DownloaderType.Qbittorrent
|
||||
|
||||
@staticmethod
|
||||
def get_priority() -> int:
|
||||
"""
|
||||
@@ -124,7 +131,8 @@ class QbittorrentModule(_ModuleBase, _DownloaderBase[Qbittorrent]):
|
||||
is_paused=is_paused,
|
||||
tag=tags,
|
||||
cookie=cookie,
|
||||
category=category
|
||||
category=category,
|
||||
ignore_category_check=False
|
||||
)
|
||||
if not state:
|
||||
# 读取种子的名称
|
||||
@@ -203,66 +211,75 @@ class QbittorrentModule(_ModuleBase, _DownloaderBase[Qbittorrent]):
|
||||
:return: 下载器中符合状态的种子列表
|
||||
"""
|
||||
# 获取下载器
|
||||
server: Qbittorrent = self.get_instance(downloader)
|
||||
if not server:
|
||||
return None
|
||||
|
||||
if downloader:
|
||||
server: Qbittorrent = self.get_instance(downloader)
|
||||
if not server:
|
||||
return None
|
||||
servers = {downloader: server}
|
||||
else:
|
||||
servers: Dict[str, Qbittorrent] = self.get_instances()
|
||||
ret_torrents = []
|
||||
if hashs:
|
||||
# 按Hash获取
|
||||
torrents, _ = server.get_torrents(ids=hashs, tags=settings.TORRENT_TAG)
|
||||
for torrent in torrents or []:
|
||||
content_path = torrent.get("content_path")
|
||||
if content_path:
|
||||
torrent_path = Path(content_path)
|
||||
else:
|
||||
torrent_path = Path(torrent.get('save_path')) / torrent.get('name')
|
||||
ret_torrents.append(TransferTorrent(
|
||||
title=torrent.get('name'),
|
||||
path=torrent_path,
|
||||
hash=torrent.get('hash'),
|
||||
size=torrent.get('total_size'),
|
||||
tags=torrent.get('tags')
|
||||
))
|
||||
for name, server in servers.items():
|
||||
torrents, _ = server.get_torrents(ids=hashs, tags=settings.TORRENT_TAG)
|
||||
for torrent in torrents or []:
|
||||
content_path = torrent.get("content_path")
|
||||
if content_path:
|
||||
torrent_path = Path(content_path)
|
||||
else:
|
||||
torrent_path = Path(torrent.get('save_path')) / torrent.get('name')
|
||||
ret_torrents.append(TransferTorrent(
|
||||
downloader=name,
|
||||
title=torrent.get('name'),
|
||||
path=torrent_path,
|
||||
hash=torrent.get('hash'),
|
||||
size=torrent.get('total_size'),
|
||||
tags=torrent.get('tags')
|
||||
))
|
||||
elif status == TorrentStatus.TRANSFER:
|
||||
# 获取已完成且未整理的
|
||||
torrents = server.get_completed_torrents(tags=settings.TORRENT_TAG)
|
||||
for torrent in torrents or []:
|
||||
tags = torrent.get("tags") or []
|
||||
if "已整理" in tags:
|
||||
continue
|
||||
# 内容路径
|
||||
content_path = torrent.get("content_path")
|
||||
if content_path:
|
||||
torrent_path = Path(content_path)
|
||||
else:
|
||||
torrent_path = torrent.get('save_path') / torrent.get('name')
|
||||
ret_torrents.append(TransferTorrent(
|
||||
title=torrent.get('name'),
|
||||
path=torrent_path,
|
||||
hash=torrent.get('hash'),
|
||||
tags=torrent.get('tags')
|
||||
))
|
||||
for name, server in servers.items():
|
||||
torrents = server.get_completed_torrents(tags=settings.TORRENT_TAG)
|
||||
for torrent in torrents or []:
|
||||
tags = torrent.get("tags") or []
|
||||
if "已整理" in tags:
|
||||
continue
|
||||
# 内容路径
|
||||
content_path = torrent.get("content_path")
|
||||
if content_path:
|
||||
torrent_path = Path(content_path)
|
||||
else:
|
||||
torrent_path = torrent.get('save_path') / torrent.get('name')
|
||||
ret_torrents.append(TransferTorrent(
|
||||
downloader=name,
|
||||
title=torrent.get('name'),
|
||||
path=torrent_path,
|
||||
hash=torrent.get('hash'),
|
||||
tags=torrent.get('tags')
|
||||
))
|
||||
elif status == TorrentStatus.DOWNLOADING:
|
||||
# 获取正在下载的任务
|
||||
torrents = server.get_downloading_torrents(tags=settings.TORRENT_TAG)
|
||||
for torrent in torrents or []:
|
||||
meta = MetaInfo(torrent.get('name'))
|
||||
ret_torrents.append(DownloadingTorrent(
|
||||
hash=torrent.get('hash'),
|
||||
title=torrent.get('name'),
|
||||
name=meta.name,
|
||||
year=meta.year,
|
||||
season_episode=meta.season_episode,
|
||||
progress=torrent.get('progress') * 100,
|
||||
size=torrent.get('total_size'),
|
||||
state="paused" if torrent.get('state') in ("paused", "pausedDL") else "downloading",
|
||||
dlspeed=StringUtils.str_filesize(torrent.get('dlspeed')),
|
||||
upspeed=StringUtils.str_filesize(torrent.get('upspeed')),
|
||||
left_time=StringUtils.str_secends(
|
||||
(torrent.get('total_size') - torrent.get('completed')) / torrent.get('dlspeed')) if torrent.get(
|
||||
'dlspeed') > 0 else ''
|
||||
))
|
||||
for name, server in servers.items():
|
||||
torrents = server.get_downloading_torrents(tags=settings.TORRENT_TAG)
|
||||
for torrent in torrents or []:
|
||||
meta = MetaInfo(torrent.get('name'))
|
||||
ret_torrents.append(DownloadingTorrent(
|
||||
downloader=name,
|
||||
hash=torrent.get('hash'),
|
||||
title=torrent.get('name'),
|
||||
name=meta.name,
|
||||
year=meta.year,
|
||||
season_episode=meta.season_episode,
|
||||
progress=torrent.get('progress') * 100,
|
||||
size=torrent.get('total_size'),
|
||||
state="paused" if torrent.get('state') in ("paused", "pausedDL") else "downloading",
|
||||
dlspeed=StringUtils.str_filesize(torrent.get('dlspeed')),
|
||||
upspeed=StringUtils.str_filesize(torrent.get('upspeed')),
|
||||
left_time=StringUtils.str_secends(
|
||||
(torrent.get('total_size') - torrent.get('completed')) / torrent.get('dlspeed')) if torrent.get(
|
||||
'dlspeed') > 0 else ''
|
||||
))
|
||||
else:
|
||||
return None
|
||||
return ret_torrents
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import time
|
||||
import traceback
|
||||
from typing import Optional, Union, Tuple, List
|
||||
|
||||
import qbittorrentapi
|
||||
@@ -75,8 +76,13 @@ class Qbittorrent:
|
||||
REQUESTS_ARGS={'timeout': (15, 60)})
|
||||
try:
|
||||
qbt.auth_log_in()
|
||||
except qbittorrentapi.LoginFailed as e:
|
||||
logger.error(f"qbittorrent 登录失败:{str(e)}")
|
||||
except (qbittorrentapi.LoginFailed, qbittorrentapi.Forbidden403Error) as e:
|
||||
logger.error(f"qbittorrent 登录失败:{str(e).strip() or '请检查用户名和密码是否正确'}")
|
||||
return None
|
||||
except Exception as e:
|
||||
stack_trace = "".join(traceback.format_exception(None, e, e.__traceback__))[:2000]
|
||||
logger.error(f"qbittorrent 登录失败:{str(e)}\n{stack_trace}")
|
||||
return None
|
||||
return qbt
|
||||
except Exception as err:
|
||||
logger.error(f"qbittorrent 连接出错:{str(err)}")
|
||||
@@ -245,6 +251,7 @@ class Qbittorrent:
|
||||
:param category: 种子分类
|
||||
:param download_dir: 下载路径
|
||||
:param cookie: 站点Cookie用于辅助下载种子
|
||||
:param kwargs: 可选参数,如 ignore_category_check 以及 QB相关参数
|
||||
:return: bool
|
||||
"""
|
||||
if not self.qbc or not content:
|
||||
@@ -270,13 +277,16 @@ class Qbittorrent:
|
||||
else:
|
||||
tags = None
|
||||
|
||||
# 分类自动管理
|
||||
if category and self._category:
|
||||
is_auto = True
|
||||
# 如果忽略分类检查,则直接使用传入的分类值,否则,仅在分类存在且启用了自动管理时才传递参数
|
||||
ignore_category_check = kwargs.pop("ignore_category_check", True)
|
||||
if ignore_category_check:
|
||||
is_auto = self._category
|
||||
else:
|
||||
is_auto = False
|
||||
category = None
|
||||
|
||||
if category and self._category:
|
||||
is_auto = True
|
||||
else:
|
||||
is_auto = False
|
||||
category = None
|
||||
try:
|
||||
# 添加下载
|
||||
qbc_ret = self.qbc.torrents_add(urls=urls,
|
||||
|
||||
@@ -31,6 +31,13 @@ class SlackModule(_ModuleBase, _MessageBase[Slack]):
|
||||
"""
|
||||
return ModuleType.Notification
|
||||
|
||||
@staticmethod
|
||||
def get_subtype() -> MessageChannel:
|
||||
"""
|
||||
获取模块子类型
|
||||
"""
|
||||
return MessageChannel.Slack
|
||||
|
||||
@staticmethod
|
||||
def get_priority() -> int:
|
||||
"""
|
||||
|
||||
@@ -10,7 +10,7 @@ from app.core.context import Context
|
||||
from app.helper.torrent import TorrentHelper
|
||||
from app.log import logger
|
||||
from app.modules import _ModuleBase
|
||||
from app.schemas.types import ModuleType
|
||||
from app.schemas.types import ModuleType, OtherModulesType
|
||||
from app.utils.http import RequestUtils
|
||||
from app.utils.string import StringUtils
|
||||
from app.utils.system import SystemUtils
|
||||
@@ -40,6 +40,13 @@ class SubtitleModule(_ModuleBase):
|
||||
"""
|
||||
return ModuleType.Other
|
||||
|
||||
@staticmethod
|
||||
def get_subtype() -> OtherModulesType:
|
||||
"""
|
||||
获取模块子类型
|
||||
"""
|
||||
return OtherModulesType.Subtitle
|
||||
|
||||
@staticmethod
|
||||
def get_priority() -> int:
|
||||
"""
|
||||
|
||||
@@ -29,6 +29,13 @@ class SynologyChatModule(_ModuleBase, _MessageBase[SynologyChat]):
|
||||
"""
|
||||
return ModuleType.Notification
|
||||
|
||||
@staticmethod
|
||||
def get_subtype() -> MessageChannel:
|
||||
"""
|
||||
获取模块子类型
|
||||
"""
|
||||
return MessageChannel.SynologyChat
|
||||
|
||||
@staticmethod
|
||||
def get_priority() -> int:
|
||||
"""
|
||||
|
||||
@@ -34,6 +34,13 @@ class TelegramModule(_ModuleBase, _MessageBase[Telegram]):
|
||||
"""
|
||||
return ModuleType.Notification
|
||||
|
||||
@staticmethod
|
||||
def get_subtype() -> MessageChannel:
|
||||
"""
|
||||
获取模块子类型
|
||||
"""
|
||||
return MessageChannel.Telegram
|
||||
|
||||
@staticmethod
|
||||
def get_priority() -> int:
|
||||
"""
|
||||
|
||||
@@ -14,7 +14,7 @@ from app.modules.themoviedb.scraper import TmdbScraper
|
||||
from app.modules.themoviedb.tmdb_cache import TmdbCache
|
||||
from app.modules.themoviedb.tmdbapi import TmdbApi
|
||||
from app.schemas import MediaPerson
|
||||
from app.schemas.types import MediaType, MediaImageType, ModuleType
|
||||
from app.schemas.types import MediaType, MediaImageType, ModuleType, MediaRecognizeType
|
||||
from app.utils.http import RequestUtils
|
||||
|
||||
|
||||
@@ -49,6 +49,13 @@ class TheMovieDbModule(_ModuleBase):
|
||||
"""
|
||||
return ModuleType.MediaRecognize
|
||||
|
||||
@staticmethod
|
||||
def get_subtype() -> MediaRecognizeType:
|
||||
"""
|
||||
获取模块子类型
|
||||
"""
|
||||
return MediaRecognizeType.TMDB
|
||||
|
||||
@staticmethod
|
||||
def get_priority() -> int:
|
||||
"""
|
||||
|
||||
@@ -30,9 +30,9 @@ class TmdbScraper:
|
||||
# 电影元数据文件
|
||||
doc = self.__gen_movie_nfo_file(mediainfo=mediainfo)
|
||||
else:
|
||||
if season:
|
||||
if season is not None:
|
||||
# 查询季信息
|
||||
seasoninfo = self.tmdb.get_tv_season_detail(mediainfo.tmdb_id, meta.begin_season)
|
||||
seasoninfo = self.tmdb.get_tv_season_detail(mediainfo.tmdb_id, season)
|
||||
if episode:
|
||||
# 集元数据文件
|
||||
episodeinfo = self.__get_episode_detail(seasoninfo, meta.begin_episode)
|
||||
@@ -57,7 +57,7 @@ class TmdbScraper:
|
||||
:param episode: 集号
|
||||
"""
|
||||
images = {}
|
||||
if season:
|
||||
if season is not None:
|
||||
# 只需要集的图片
|
||||
if episode:
|
||||
# 集的图片
|
||||
@@ -102,8 +102,13 @@ class TmdbScraper:
|
||||
ext = Path(seasoninfo.get('poster_path')).suffix
|
||||
# URL
|
||||
url = f"https://{settings.TMDB_IMAGE_DOMAIN}/t/p/original{seasoninfo.get('poster_path')}"
|
||||
image_name = f"season{sea_seq}-poster{ext}"
|
||||
# S0海报格式不同
|
||||
if season == 0:
|
||||
image_name = f"season-specials-poster{ext}"
|
||||
else:
|
||||
image_name = f"season{sea_seq}-poster{ext}"
|
||||
return image_name, url
|
||||
return "", ""
|
||||
|
||||
@staticmethod
|
||||
def __get_episode_detail(seasoninfo: dict, episode: int) -> dict:
|
||||
@@ -228,7 +233,7 @@ class TmdbScraper:
|
||||
xoutline = DomUtils.add_node(doc, root, "outline")
|
||||
xoutline.appendChild(doc.createCDATASection(seasoninfo.get("overview") or ""))
|
||||
# 标题
|
||||
DomUtils.add_node(doc, root, "title", "季 %s" % season)
|
||||
DomUtils.add_node(doc, root, "title", seasoninfo.get("name") or "季 %s" % season)
|
||||
# 发行日期
|
||||
DomUtils.add_node(doc, root, "premiered", seasoninfo.get("air_date") or "")
|
||||
DomUtils.add_node(doc, root, "releasedate", seasoninfo.get("air_date") or "")
|
||||
|
||||
@@ -15,7 +15,7 @@ from app.schemas.types import MediaType
|
||||
lock = RLock()
|
||||
|
||||
CACHE_EXPIRE_TIMESTAMP_STR = "cache_expire_timestamp"
|
||||
EXPIRE_TIMESTAMP = settings.CACHE_CONF.get('meta')
|
||||
EXPIRE_TIMESTAMP = settings.CACHE_CONF["meta"]
|
||||
|
||||
|
||||
class TmdbCache(metaclass=Singleton):
|
||||
@@ -75,7 +75,7 @@ class TmdbCache(metaclass=Singleton):
|
||||
@return: 被删除的缓存内容
|
||||
"""
|
||||
with lock:
|
||||
return self._meta_data.pop(key, None)
|
||||
return self._meta_data.pop(key, {})
|
||||
|
||||
def delete_by_tmdbid(self, tmdbid: int) -> None:
|
||||
"""
|
||||
@@ -138,14 +138,14 @@ class TmdbCache(metaclass=Singleton):
|
||||
if cache_year:
|
||||
cache_year = cache_year[:4]
|
||||
self._meta_data[self.__get_key(meta)] = {
|
||||
"id": info.get("id"),
|
||||
"type": info.get("media_type"),
|
||||
"year": cache_year,
|
||||
"title": cache_title,
|
||||
"poster_path": info.get("poster_path"),
|
||||
"backdrop_path": info.get("backdrop_path"),
|
||||
CACHE_EXPIRE_TIMESTAMP_STR: int(time.time()) + EXPIRE_TIMESTAMP
|
||||
}
|
||||
"id": info.get("id"),
|
||||
"type": info.get("media_type"),
|
||||
"year": cache_year,
|
||||
"title": cache_title,
|
||||
"poster_path": info.get("poster_path"),
|
||||
"backdrop_path": info.get("backdrop_path"),
|
||||
CACHE_EXPIRE_TIMESTAMP_STR: int(time.time()) + EXPIRE_TIMESTAMP
|
||||
}
|
||||
elif info is not None:
|
||||
# None时不缓存,此时代表网络错误,允许重复请求
|
||||
self._meta_data[self.__get_key(meta)] = {'id': 0}
|
||||
@@ -164,7 +164,7 @@ class TmdbCache(metaclass=Singleton):
|
||||
return
|
||||
|
||||
with open(self._meta_path, 'wb') as f:
|
||||
pickle.dump(new_meta_data, f, pickle.HIGHEST_PROTOCOL)
|
||||
pickle.dump(new_meta_data, f, pickle.HIGHEST_PROTOCOL) # type: ignore
|
||||
|
||||
def _random_sample(self, new_meta_data: dict) -> bool:
|
||||
"""
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import traceback
|
||||
from functools import lru_cache
|
||||
from typing import Optional, List
|
||||
from urllib.parse import quote
|
||||
|
||||
import zhconv
|
||||
from cachetools import TTLCache, cached
|
||||
from lxml import etree
|
||||
|
||||
from app.core.config import settings
|
||||
@@ -27,8 +27,6 @@ class TmdbApi:
|
||||
self.tmdb.domain = settings.TMDB_API_DOMAIN
|
||||
# 开启缓存
|
||||
self.tmdb.cache = True
|
||||
# 缓存大小
|
||||
self.tmdb.REQUEST_CACHE_MAXSIZE = settings.CACHE_CONF.get('tmdb')
|
||||
# APIKEY
|
||||
self.tmdb.api_key = settings.TMDB_API_KEY
|
||||
# 语种
|
||||
@@ -163,7 +161,7 @@ class TmdbApi:
|
||||
season_number: int = None) -> Optional[dict]:
|
||||
"""
|
||||
搜索tmdb中的媒体信息,匹配返回一条尽可能正确的信息
|
||||
:param name: 剑索的名称
|
||||
:param name: 检索的名称
|
||||
:param mtype: 类型:电影、电视剧
|
||||
:param year: 年份,如要是季集需要是首播年份(first_air_date)
|
||||
:param season_year: 当前季集年份
|
||||
@@ -466,7 +464,7 @@ class TmdbApi:
|
||||
|
||||
return ret_info
|
||||
|
||||
@lru_cache(maxsize=settings.CACHE_CONF.get('tmdb'))
|
||||
@cached(cache=TTLCache(maxsize=settings.CACHE_CONF["tmdb"], ttl=settings.CACHE_CONF["meta"]))
|
||||
def match_web(self, name: str, mtype: MediaType) -> Optional[dict]:
|
||||
"""
|
||||
搜索TMDB网站,直接抓取结果,结果只有一条时才返回
|
||||
@@ -1292,7 +1290,7 @@ class TmdbApi:
|
||||
for group_episode in group_episodes:
|
||||
order = group_episode.get('order')
|
||||
episodes = group_episode.get('episodes')
|
||||
if not episodes or not order:
|
||||
if not episodes:
|
||||
continue
|
||||
# 当前季第一季时间
|
||||
first_date = episodes[0].get("air_date")
|
||||
|
||||
@@ -4,11 +4,12 @@ import logging
|
||||
import os
|
||||
import time
|
||||
from datetime import datetime
|
||||
from functools import lru_cache
|
||||
|
||||
import requests
|
||||
import requests.exceptions
|
||||
from cachetools import TTLCache, cached
|
||||
|
||||
from app.core.config import settings
|
||||
from app.utils.http import RequestUtils
|
||||
from .exceptions import TMDbException
|
||||
|
||||
@@ -24,7 +25,6 @@ class TMDb(object):
|
||||
TMDB_CACHE_ENABLED = "TMDB_CACHE_ENABLED"
|
||||
TMDB_PROXIES = "TMDB_PROXIES"
|
||||
TMDB_DOMAIN = "TMDB_DOMAIN"
|
||||
REQUEST_CACHE_MAXSIZE = None
|
||||
|
||||
_req = None
|
||||
_session = None
|
||||
@@ -137,7 +137,7 @@ class TMDb(object):
|
||||
def cache(self, cache):
|
||||
os.environ[self.TMDB_CACHE_ENABLED] = str(cache)
|
||||
|
||||
@lru_cache(maxsize=REQUEST_CACHE_MAXSIZE)
|
||||
@cached(cache=TTLCache(maxsize=settings.CACHE_CONF["tmdb"], ttl=settings.CACHE_CONF["meta"]))
|
||||
def cached_request(self, method, url, data, json,
|
||||
_ts=datetime.strftime(datetime.now(), '%Y%m%d')):
|
||||
"""
|
||||
|
||||
@@ -4,7 +4,7 @@ from app.core.config import settings
|
||||
from app.log import logger
|
||||
from app.modules import _ModuleBase
|
||||
from app.modules.thetvdb import tvdbapi
|
||||
from app.schemas.types import ModuleType
|
||||
from app.schemas.types import ModuleType, MediaRecognizeType
|
||||
from app.utils.http import RequestUtils
|
||||
|
||||
|
||||
@@ -28,6 +28,13 @@ class TheTvDbModule(_ModuleBase):
|
||||
"""
|
||||
return ModuleType.MediaRecognize
|
||||
|
||||
@staticmethod
|
||||
def get_subtype() -> MediaRecognizeType:
|
||||
"""
|
||||
获取模块子类型
|
||||
"""
|
||||
return MediaRecognizeType.TVDB
|
||||
|
||||
@staticmethod
|
||||
def get_priority() -> int:
|
||||
"""
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from pathlib import Path
|
||||
from typing import Set, Tuple, Optional, Union, List
|
||||
from typing import Set, Tuple, Optional, Union, List, Dict
|
||||
|
||||
from torrentool.torrent import Torrent
|
||||
from transmission_rpc import File
|
||||
@@ -11,7 +11,7 @@ from app.log import logger
|
||||
from app.modules import _ModuleBase, _DownloaderBase
|
||||
from app.modules.transmission.transmission import Transmission
|
||||
from app.schemas import TransferTorrent, DownloadingTorrent
|
||||
from app.schemas.types import TorrentStatus, ModuleType
|
||||
from app.schemas.types import TorrentStatus, ModuleType, DownloaderType
|
||||
from app.utils.string import StringUtils
|
||||
|
||||
|
||||
@@ -35,6 +35,13 @@ class TransmissionModule(_ModuleBase, _DownloaderBase[Transmission]):
|
||||
"""
|
||||
return ModuleType.Downloader
|
||||
|
||||
@staticmethod
|
||||
def get_subtype() -> DownloaderType:
|
||||
"""
|
||||
获取模块子类型
|
||||
"""
|
||||
return DownloaderType.Transmission
|
||||
|
||||
@staticmethod
|
||||
def get_priority() -> int:
|
||||
"""
|
||||
@@ -196,60 +203,70 @@ class TransmissionModule(_ModuleBase, _DownloaderBase[Transmission]):
|
||||
:return: 下载器中符合状态的种子列表
|
||||
"""
|
||||
# 获取下载器
|
||||
server: Transmission = self.get_instance(downloader)
|
||||
if not server:
|
||||
return None
|
||||
if downloader:
|
||||
server: Transmission = self.get_instance(downloader)
|
||||
if not server:
|
||||
return None
|
||||
servers = {downloader: server}
|
||||
else:
|
||||
servers: Dict[str, Transmission] = self.get_instances()
|
||||
ret_torrents = []
|
||||
if hashs:
|
||||
# 按Hash获取
|
||||
torrents, _ = server.get_torrents(ids=hashs, tags=settings.TORRENT_TAG)
|
||||
for torrent in torrents or []:
|
||||
ret_torrents.append(TransferTorrent(
|
||||
title=torrent.name,
|
||||
path=Path(torrent.download_dir) / torrent.name,
|
||||
hash=torrent.hashString,
|
||||
size=torrent.total_size,
|
||||
tags=",".join(torrent.labels or [])
|
||||
))
|
||||
for name, server in servers.items():
|
||||
torrents, _ = server.get_torrents(ids=hashs, tags=settings.TORRENT_TAG)
|
||||
for torrent in torrents or []:
|
||||
ret_torrents.append(TransferTorrent(
|
||||
downloader=name,
|
||||
title=torrent.name,
|
||||
path=Path(torrent.download_dir) / torrent.name,
|
||||
hash=torrent.hashString,
|
||||
size=torrent.total_size,
|
||||
tags=",".join(torrent.labels or [])
|
||||
))
|
||||
elif status == TorrentStatus.TRANSFER:
|
||||
# 获取已完成且未整理的
|
||||
torrents = server.get_completed_torrents(tags=settings.TORRENT_TAG)
|
||||
for torrent in torrents or []:
|
||||
# 含"已整理"tag的不处理
|
||||
if "已整理" in torrent.labels or []:
|
||||
continue
|
||||
# 下载路径
|
||||
path = torrent.download_dir
|
||||
# 无法获取下载路径的不处理
|
||||
if not path:
|
||||
logger.debug(f"未获取到 {torrent.name} 下载保存路径")
|
||||
continue
|
||||
ret_torrents.append(TransferTorrent(
|
||||
title=torrent.name,
|
||||
path=Path(torrent.download_dir) / torrent.name,
|
||||
hash=torrent.hashString,
|
||||
tags=",".join(torrent.labels or [])
|
||||
))
|
||||
for name, server in servers.items():
|
||||
torrents = server.get_completed_torrents(tags=settings.TORRENT_TAG)
|
||||
for torrent in torrents or []:
|
||||
# 含"已整理"tag的不处理
|
||||
if "已整理" in torrent.labels or []:
|
||||
continue
|
||||
# 下载路径
|
||||
path = torrent.download_dir
|
||||
# 无法获取下载路径的不处理
|
||||
if not path:
|
||||
logger.debug(f"未获取到 {torrent.name} 下载保存路径")
|
||||
continue
|
||||
ret_torrents.append(TransferTorrent(
|
||||
downloader=name,
|
||||
title=torrent.name,
|
||||
path=Path(torrent.download_dir) / torrent.name,
|
||||
hash=torrent.hashString,
|
||||
tags=",".join(torrent.labels or [])
|
||||
))
|
||||
elif status == TorrentStatus.DOWNLOADING:
|
||||
# 获取正在下载的任务
|
||||
torrents = server.get_downloading_torrents(tags=settings.TORRENT_TAG)
|
||||
for torrent in torrents or []:
|
||||
meta = MetaInfo(torrent.name)
|
||||
dlspeed = torrent.rate_download if hasattr(torrent, "rate_download") else torrent.rateDownload
|
||||
upspeed = torrent.rate_upload if hasattr(torrent, "rate_upload") else torrent.rateUpload
|
||||
ret_torrents.append(DownloadingTorrent(
|
||||
hash=torrent.hashString,
|
||||
title=torrent.name,
|
||||
name=meta.name,
|
||||
year=meta.year,
|
||||
season_episode=meta.season_episode,
|
||||
progress=torrent.progress,
|
||||
size=torrent.total_size,
|
||||
state="paused" if torrent.status == "stopped" else "downloading",
|
||||
dlspeed=StringUtils.str_filesize(dlspeed),
|
||||
upspeed=StringUtils.str_filesize(upspeed),
|
||||
left_time=StringUtils.str_secends(torrent.left_until_done / dlspeed) if dlspeed > 0 else ''
|
||||
))
|
||||
for name, server in servers.items():
|
||||
torrents = server.get_downloading_torrents(tags=settings.TORRENT_TAG)
|
||||
for torrent in torrents or []:
|
||||
meta = MetaInfo(torrent.name)
|
||||
dlspeed = torrent.rate_download if hasattr(torrent, "rate_download") else torrent.rateDownload
|
||||
upspeed = torrent.rate_upload if hasattr(torrent, "rate_upload") else torrent.rateUpload
|
||||
ret_torrents.append(DownloadingTorrent(
|
||||
downloader=name,
|
||||
hash=torrent.hashString,
|
||||
title=torrent.name,
|
||||
name=meta.name,
|
||||
year=meta.year,
|
||||
season_episode=meta.season_episode,
|
||||
progress=torrent.progress,
|
||||
size=torrent.total_size,
|
||||
state="paused" if torrent.status == "stopped" else "downloading",
|
||||
dlspeed=StringUtils.str_filesize(dlspeed),
|
||||
upspeed=StringUtils.str_filesize(upspeed),
|
||||
left_time=StringUtils.str_secends(torrent.left_until_done / dlspeed) if dlspeed > 0 else ''
|
||||
))
|
||||
else:
|
||||
return None
|
||||
return ret_torrents
|
||||
|
||||
@@ -30,6 +30,13 @@ class VoceChatModule(_ModuleBase, _MessageBase[VoceChat]):
|
||||
"""
|
||||
return ModuleType.Notification
|
||||
|
||||
@staticmethod
|
||||
def get_subtype() -> MessageChannel:
|
||||
"""
|
||||
获取模块子类型
|
||||
"""
|
||||
return MessageChannel.VoceChat
|
||||
|
||||
@staticmethod
|
||||
def get_priority() -> int:
|
||||
"""
|
||||
|
||||
@@ -30,6 +30,13 @@ class WebPushModule(_ModuleBase, _MessageBase):
|
||||
"""
|
||||
return ModuleType.Notification
|
||||
|
||||
@staticmethod
|
||||
def get_subtype() -> MessageChannel:
|
||||
"""
|
||||
获取模块子类型
|
||||
"""
|
||||
return MessageChannel.WebPush
|
||||
|
||||
@staticmethod
|
||||
def get_priority() -> int:
|
||||
"""
|
||||
|
||||
@@ -36,6 +36,13 @@ class WechatModule(_ModuleBase, _MessageBase[WeChat]):
|
||||
"""
|
||||
return ModuleType.Notification
|
||||
|
||||
@staticmethod
|
||||
def get_subtype() -> MessageChannel:
|
||||
"""
|
||||
获取模块的子类型
|
||||
"""
|
||||
return MessageChannel.Wechat
|
||||
|
||||
@staticmethod
|
||||
def get_priority() -> int:
|
||||
"""
|
||||
|
||||
@@ -263,28 +263,26 @@ class Monitor(metaclass=Singleton):
|
||||
try:
|
||||
item = self._queue.get(timeout=self._transfer_interval)
|
||||
if item:
|
||||
self.__handle_file(storage=item.get("storage"),
|
||||
event_path=item.get("filepath"),
|
||||
mon_path=item.get("mon_path"))
|
||||
self.__handle_file(storage=item.get("storage"), event_path=item.get("filepath"))
|
||||
except queue.Empty:
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.error(f"整理队列处理出现错误:{e}")
|
||||
|
||||
def __handle_file(self, storage: str, event_path: Path, mon_path: Path):
|
||||
def __handle_file(self, storage: str, event_path: Path):
|
||||
"""
|
||||
整理一个文件
|
||||
:param storage: 存储
|
||||
:param event_path: 事件文件路径
|
||||
:param mon_path: 监控目录
|
||||
"""
|
||||
|
||||
def __get_bluray_dir(_path: Path):
|
||||
"""
|
||||
获取BDMV目录的上级目录
|
||||
"""
|
||||
for parent in _path.parents:
|
||||
if parent.name == "BDMV":
|
||||
return parent.parent
|
||||
for p in _path.parents:
|
||||
if p.name == "BDMV":
|
||||
return p.parent
|
||||
return None
|
||||
|
||||
# 全程加锁
|
||||
@@ -386,7 +384,7 @@ class Monitor(metaclass=Singleton):
|
||||
return
|
||||
|
||||
# 查询转移目的目录
|
||||
dir_info = self.directoryhelper.get_dir(mediainfo, src_path=mon_path)
|
||||
dir_info = self.directoryhelper.get_dir(mediainfo, storage=storage, src_path=event_path)
|
||||
if not dir_info:
|
||||
logger.warn(f"{event_path.name} 未找到对应的目标目录")
|
||||
return
|
||||
@@ -480,14 +478,7 @@ class Monitor(metaclass=Singleton):
|
||||
|
||||
# 移动模式删除空目录
|
||||
if transferinfo.transfer_type in ["move"]:
|
||||
logger.info(f"正在删除: {file_item.storage} {file_item.path}")
|
||||
if self.storagechain.delete_file(file_item):
|
||||
# 删除空的父目录
|
||||
dir_item = self.storagechain.get_parent_item(file_item)
|
||||
if dir_item:
|
||||
if not self.storagechain.any_files(dir_item, extensions=settings.RMT_MEDIAEXT):
|
||||
logger.warn(f"正在删除空目录: {dir_item.storage} {dir_item.path}")
|
||||
return self.storagechain.delete_file(dir_item)
|
||||
self.storagechain.delete_media_file(file_item, delete_self=False)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("目录监控发生错误:%s - %s" % (str(e), traceback.format_exc()))
|
||||
|
||||
155
app/scheduler.py
155
app/scheduler.py
@@ -19,10 +19,11 @@ from app.chain.transfer import TransferChain
|
||||
from app.core.config import settings
|
||||
from app.core.event import EventManager
|
||||
from app.core.plugin import PluginManager
|
||||
from app.db.systemconfig_oper import SystemConfigOper
|
||||
from app.helper.sites import SitesHelper
|
||||
from app.log import logger
|
||||
from app.schemas import Notification, NotificationType
|
||||
from app.schemas.types import EventType
|
||||
from app.schemas.types import EventType, SystemConfigKey
|
||||
from app.utils.singleton import Singleton
|
||||
from app.utils.timer import TimerUtils
|
||||
|
||||
@@ -40,7 +41,7 @@ class Scheduler(metaclass=Singleton):
|
||||
# 退出事件
|
||||
_event = threading.Event()
|
||||
# 锁
|
||||
_lock = threading.Lock()
|
||||
_lock = threading.RLock()
|
||||
# 各服务的运行状态
|
||||
_jobs = {}
|
||||
# 用户认证失败次数
|
||||
@@ -53,49 +54,6 @@ class Scheduler(metaclass=Singleton):
|
||||
"""
|
||||
初始化定时服务
|
||||
"""
|
||||
|
||||
def clear_cache():
|
||||
"""
|
||||
清理缓存
|
||||
"""
|
||||
TorrentsChain().clear_cache()
|
||||
SchedulerChain().clear_cache()
|
||||
|
||||
def user_auth():
|
||||
"""
|
||||
用户认证检查
|
||||
"""
|
||||
if SitesHelper().auth_level >= 2:
|
||||
return
|
||||
# 最大重试次数
|
||||
__max_try__ = 30
|
||||
if self._auth_count > __max_try__:
|
||||
SchedulerChain().messagehelper.put(title=f"用户认证失败",
|
||||
message="用户认证失败次数过多,将不再尝试认证!",
|
||||
role="system")
|
||||
return
|
||||
logger.info("用户未认证,正在尝试重新认证...")
|
||||
status, msg = SitesHelper().check_user()
|
||||
if status:
|
||||
self._auth_count = 0
|
||||
logger.info(f"{msg} 用户认证成功")
|
||||
SchedulerChain().post_message(
|
||||
Notification(
|
||||
mtype=NotificationType.Manual,
|
||||
title="MoviePilot用户认证成功",
|
||||
text=f"使用站点:{msg}",
|
||||
link=settings.MP_DOMAIN('#/site')
|
||||
)
|
||||
)
|
||||
PluginManager().init_config()
|
||||
self.init_plugin_jobs()
|
||||
|
||||
else:
|
||||
self._auth_count += 1
|
||||
logger.error(f"用户认证失败:{msg},共失败 {self._auth_count} 次")
|
||||
if self._auth_count >= __max_try__:
|
||||
logger.error("用户认证失败次数过多,将不再尝试认证!")
|
||||
|
||||
# 各服务的运行状态
|
||||
self._jobs = {
|
||||
"cookiecloud": {
|
||||
@@ -141,12 +99,12 @@ class Scheduler(metaclass=Singleton):
|
||||
},
|
||||
"clear_cache": {
|
||||
"name": "缓存清理",
|
||||
"func": clear_cache,
|
||||
"func": self.clear_cache,
|
||||
"running": False,
|
||||
},
|
||||
"user_auth": {
|
||||
"name": "用户认证检查",
|
||||
"func": user_auth,
|
||||
"func": self.user_auth,
|
||||
"running": False,
|
||||
},
|
||||
"scheduler_job": {
|
||||
@@ -321,7 +279,7 @@ class Scheduler(metaclass=Singleton):
|
||||
"interval",
|
||||
id="clear_cache",
|
||||
name="缓存清理",
|
||||
hours=settings.CACHE_CONF.get("meta") / 3600,
|
||||
hours=settings.CACHE_CONF["meta"] / 3600,
|
||||
kwargs={
|
||||
'job_id': 'clear_cache'
|
||||
}
|
||||
@@ -339,17 +297,18 @@ class Scheduler(metaclass=Singleton):
|
||||
}
|
||||
)
|
||||
|
||||
# 站点数据刷新,每隔30分钟
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"interval",
|
||||
id="sitedata_refresh",
|
||||
name="站点数据刷新",
|
||||
minutes=settings.SITEDATA_REFRESH_INTERVAL * 60,
|
||||
kwargs={
|
||||
'job_id': 'sitedata_refresh'
|
||||
}
|
||||
)
|
||||
# 站点数据刷新
|
||||
if settings.SITEDATA_REFRESH_INTERVAL:
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
"interval",
|
||||
id="sitedata_refresh",
|
||||
name="站点数据刷新",
|
||||
minutes=settings.SITEDATA_REFRESH_INTERVAL * 60,
|
||||
kwargs={
|
||||
'job_id': 'sitedata_refresh'
|
||||
}
|
||||
)
|
||||
|
||||
self.init_plugin_jobs()
|
||||
|
||||
@@ -429,11 +388,13 @@ class Scheduler(metaclass=Singleton):
|
||||
try:
|
||||
sid = f"{service['id']}"
|
||||
job_id = sid.split("|")[0]
|
||||
self.remove_plugin_job(pid, job_id)
|
||||
self._jobs[job_id] = {
|
||||
"func": service["func"],
|
||||
"name": service["name"],
|
||||
"pid": pid,
|
||||
"plugin_name": plugin_name,
|
||||
"kwargs": service.get("func_kwargs") or {},
|
||||
"running": False,
|
||||
}
|
||||
self._scheduler.add_job(
|
||||
@@ -441,7 +402,7 @@ class Scheduler(metaclass=Singleton):
|
||||
service["trigger"],
|
||||
id=sid,
|
||||
name=service["name"],
|
||||
**service["kwargs"],
|
||||
**(service.get("kwargs") or {}),
|
||||
kwargs={"job_id": job_id},
|
||||
replace_existing=True
|
||||
)
|
||||
@@ -452,23 +413,34 @@ class Scheduler(metaclass=Singleton):
|
||||
message=str(e),
|
||||
role="system")
|
||||
|
||||
def remove_plugin_job(self, pid: str):
|
||||
def remove_plugin_job(self, pid: str, job_id: str = None):
|
||||
"""
|
||||
移除插件定时服务
|
||||
移除定时服务,可以是单个服务(包括默认服务)或整个插件的所有服务
|
||||
:param pid: 插件 ID
|
||||
:param job_id: 可选,指定要移除的单个服务的 job_id。如果不提供,则移除该插件的所有服务,当移除单个服务时,默认服务也包含在内
|
||||
"""
|
||||
if not self._scheduler:
|
||||
return
|
||||
with self._lock:
|
||||
# 获取插件名称
|
||||
plugin_name = PluginManager().get_plugin_attr(pid, "plugin_name")
|
||||
# 先从 _jobs 中查找匹配的服务
|
||||
jobs_to_remove = [(job_id, service) for job_id, service in self._jobs.items() if service.get("pid") == pid]
|
||||
if job_id:
|
||||
# 移除单个服务
|
||||
service = self._jobs.pop(job_id, None)
|
||||
if not service:
|
||||
return
|
||||
jobs_to_remove = [(job_id, service)]
|
||||
else:
|
||||
# 移除插件的所有服务
|
||||
jobs_to_remove = [
|
||||
(job_id, service) for job_id, service in self._jobs.items() if service.get("pid") == pid
|
||||
]
|
||||
for job_id, _ in jobs_to_remove:
|
||||
self._jobs.pop(job_id, None)
|
||||
if not jobs_to_remove:
|
||||
return
|
||||
plugin_name = PluginManager().get_plugin_attr(pid, "plugin_name")
|
||||
# 遍历移除任务
|
||||
for job_id, service in jobs_to_remove:
|
||||
try:
|
||||
# 移除服务
|
||||
self._jobs.pop(job_id, None)
|
||||
# 在调度器中查找并移除对应的 job
|
||||
job_removed = False
|
||||
for job in list(self._scheduler.get_jobs()):
|
||||
@@ -552,3 +524,50 @@ class Scheduler(metaclass=Singleton):
|
||||
logger.info("定时任务停止完成")
|
||||
except Exception as e:
|
||||
logger.error(f"停止定时任务失败::{str(e)} - {traceback.format_exc()}")
|
||||
|
||||
@staticmethod
|
||||
def clear_cache():
|
||||
"""
|
||||
清理缓存
|
||||
"""
|
||||
TorrentsChain().clear_cache()
|
||||
SchedulerChain().clear_cache()
|
||||
|
||||
def user_auth(self):
|
||||
"""
|
||||
用户认证检查
|
||||
"""
|
||||
if SitesHelper().auth_level >= 2:
|
||||
return
|
||||
# 最大重试次数
|
||||
__max_try__ = 30
|
||||
if self._auth_count > __max_try__:
|
||||
SchedulerChain().messagehelper.put(title=f"用户认证失败",
|
||||
message="用户认证失败次数过多,将不再尝试认证!",
|
||||
role="system")
|
||||
return
|
||||
logger.info("用户未认证,正在尝试认证...")
|
||||
auth_conf = SystemConfigOper().get(SystemConfigKey.UserSiteAuthParams)
|
||||
if auth_conf:
|
||||
status, msg = SitesHelper().check_user(**auth_conf)
|
||||
else:
|
||||
status, msg = SitesHelper().check_user()
|
||||
if status:
|
||||
self._auth_count = 0
|
||||
logger.info(f"{msg} 用户认证成功")
|
||||
SchedulerChain().post_message(
|
||||
Notification(
|
||||
mtype=NotificationType.Manual,
|
||||
title="MoviePilot用户认证成功",
|
||||
text=f"使用站点:{msg}",
|
||||
link=settings.MP_DOMAIN('#/site')
|
||||
)
|
||||
)
|
||||
PluginManager().init_config()
|
||||
self.init_plugin_jobs()
|
||||
|
||||
else:
|
||||
self._auth_count += 1
|
||||
logger.error(f"用户认证失败:{msg},共失败 {self._auth_count} 次")
|
||||
if self._auth_count >= __max_try__:
|
||||
logger.error("用户认证失败次数过多,将不再尝试认证!")
|
||||
|
||||
@@ -180,6 +180,8 @@ class TorrentInfo(BaseModel):
|
||||
site_proxy: Optional[bool] = False
|
||||
# 站点优先级
|
||||
site_order: Optional[int] = 0
|
||||
# 站点下载器
|
||||
site_downloader: Optional[str] = None
|
||||
# 种子名称
|
||||
title: Optional[str] = None
|
||||
# 种子副标题
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
from typing import Optional, Dict
|
||||
from pathlib import Path
|
||||
from typing import Optional, Dict, Any, List, Set
|
||||
|
||||
from pydantic import BaseModel, Field, root_validator
|
||||
|
||||
from app.core.context import Context
|
||||
from app.schemas import MessageChannel
|
||||
|
||||
|
||||
class BaseEventData(BaseModel):
|
||||
"""
|
||||
@@ -114,3 +118,87 @@ class CommandRegisterEventData(ChainEventData):
|
||||
# 输出参数
|
||||
cancel: bool = Field(False, description="是否取消注册")
|
||||
source: str = Field("未知拦截源", description="拦截源")
|
||||
|
||||
|
||||
class TransferRenameEventData(ChainEventData):
|
||||
"""
|
||||
TransferRename 事件的数据模型
|
||||
|
||||
Attributes:
|
||||
# 输入参数
|
||||
template_string (str): Jinja2 模板字符串
|
||||
rename_dict (dict): 渲染上下文
|
||||
render_str (str): 渲染生成的字符串
|
||||
path (Optional[Path]): 当前文件的目标路径
|
||||
|
||||
# 输出参数
|
||||
updated (bool): 是否已更新,默认值为 False
|
||||
updated_str (str): 更新后的字符串
|
||||
source (str): 拦截源,默认值为 "未知拦截源"
|
||||
"""
|
||||
# 输入参数
|
||||
template_string: str = Field(..., description="模板字符串")
|
||||
rename_dict: Dict[str, Any] = Field(..., description="渲染上下文")
|
||||
path: Optional[Path] = Field(None, description="文件的目标路径")
|
||||
render_str: str = Field(..., description="渲染生成的字符串")
|
||||
|
||||
# 输出参数
|
||||
updated: bool = Field(False, description="是否已更新")
|
||||
updated_str: Optional[str] = Field(None, description="更新后的字符串")
|
||||
source: Optional[str] = Field("未知拦截源", description="拦截源")
|
||||
|
||||
|
||||
class ResourceSelectionEventData(BaseModel):
|
||||
"""
|
||||
ResourceSelection 事件的数据模型
|
||||
|
||||
Attributes:
|
||||
# 输入参数
|
||||
contexts (List[Context]): 当前待选择的资源上下文列表
|
||||
source (str): 事件源,指示事件的触发来源
|
||||
|
||||
# 输出参数
|
||||
updated (bool): 是否已更新,默认值为 False
|
||||
updated_contexts (Optional[List[Context]]): 已更新的资源上下文列表,默认值为 None
|
||||
source (str): 更新源,默认值为 "未知更新源"
|
||||
"""
|
||||
# 输入参数
|
||||
contexts: Any = Field(None, description="待选择的资源上下文列表")
|
||||
downloader: Optional[str] = Field(None, description="下载器")
|
||||
|
||||
# 输出参数
|
||||
updated: bool = Field(False, description="是否已更新")
|
||||
updated_contexts: Optional[List[Context]] = Field(None, description="已更新的资源上下文列表")
|
||||
source: Optional[str] = Field("未知拦截源", description="拦截源")
|
||||
|
||||
|
||||
class ResourceDownloadEventData(ChainEventData):
|
||||
"""
|
||||
ResourceDownload 事件的数据模型
|
||||
|
||||
Attributes:
|
||||
# 输入参数
|
||||
context (Context): 当前资源上下文
|
||||
episodes (Set[int]): 需要下载的集数
|
||||
channel (MessageChannel): 通知渠道
|
||||
origin (str): 来源(消息通知、Subscribe、Manual等)
|
||||
downloader (str): 下载器
|
||||
options (dict): 其他参数
|
||||
|
||||
# 输出参数
|
||||
cancel (bool): 是否取消下载,默认值为 False
|
||||
source (str): 拦截源,默认值为 "未知拦截源"
|
||||
reason (str): 拦截原因,描述拦截的具体原因
|
||||
"""
|
||||
# 输入参数
|
||||
context: Any = Field(None, description="当前资源上下文")
|
||||
episodes: Optional[Set[int]] = Field(None, description="需要下载的集数")
|
||||
channel: Optional[MessageChannel] = Field(None, description="通知渠道")
|
||||
origin: Optional[str] = Field(None, description="来源")
|
||||
downloader: Optional[str] = Field(None, description="下载器")
|
||||
options: Optional[dict] = Field(None, description="其他参数")
|
||||
|
||||
# 输出参数
|
||||
cancel: bool = Field(False, description="是否取消下载")
|
||||
source: str = Field("未知拦截源", description="拦截源")
|
||||
reason: str = Field("", description="拦截原因")
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Optional, Any
|
||||
from typing import Optional, Any, Union, Dict
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
@@ -35,7 +35,7 @@ class Site(BaseModel):
|
||||
# 备注
|
||||
note: Optional[Any] = None
|
||||
# 超时时间
|
||||
timeout: Optional[int] = 0
|
||||
timeout: Optional[int] = 15
|
||||
# 流控单位周期
|
||||
limit_interval: Optional[int] = None
|
||||
# 流控次数
|
||||
@@ -44,6 +44,8 @@ class Site(BaseModel):
|
||||
limit_seconds: Optional[int] = None
|
||||
# 是否启用
|
||||
is_active: Optional[bool] = True
|
||||
# 下载器
|
||||
downloader: Optional[str] = None
|
||||
|
||||
class Config:
|
||||
orm_mode = True
|
||||
@@ -75,7 +77,7 @@ class SiteUserData(BaseModel):
|
||||
# 用户名
|
||||
username: Optional[str]
|
||||
# 用户ID
|
||||
userid: Optional[int]
|
||||
userid: Optional[Union[int, str]]
|
||||
# 用户等级
|
||||
user_level: Optional[str]
|
||||
# 加入时间
|
||||
@@ -108,3 +110,8 @@ class SiteUserData(BaseModel):
|
||||
updated_day: Optional[str] = None
|
||||
# 更新时间
|
||||
updated_time: Optional[str] = None
|
||||
|
||||
|
||||
class SiteAuth(BaseModel):
|
||||
site: Optional[str] = None
|
||||
params: Optional[Dict[str, Union[int, str]]] = {}
|
||||
|
||||
@@ -54,6 +54,8 @@ class Subscribe(BaseModel):
|
||||
username: Optional[str] = None
|
||||
# 订阅站点
|
||||
sites: Optional[List[int]] = []
|
||||
# 下载器
|
||||
downloader: Optional[str] = None
|
||||
# 是否洗版
|
||||
best_version: Optional[int] = 0
|
||||
# 当前优先级
|
||||
|
||||
@@ -83,7 +83,7 @@ class StorageConf(BaseModel):
|
||||
"""
|
||||
存储配置
|
||||
"""
|
||||
# 类型 local/alipan/u115/rclone
|
||||
# 类型 local/alipan/u115/rclone/alist
|
||||
type: Optional[str] = None
|
||||
# 名称
|
||||
name: Optional[str] = None
|
||||
|
||||
@@ -10,6 +10,7 @@ class TransferTorrent(BaseModel):
|
||||
"""
|
||||
待转移任务信息
|
||||
"""
|
||||
downloader: Optional[str] = None
|
||||
title: Optional[str] = None
|
||||
path: Optional[Path] = None
|
||||
hash: Optional[str] = None
|
||||
@@ -22,6 +23,7 @@ class DownloadingTorrent(BaseModel):
|
||||
"""
|
||||
下载中任务信息
|
||||
"""
|
||||
downloader: Optional[str] = None
|
||||
hash: Optional[str] = None
|
||||
title: Optional[str] = None
|
||||
name: Optional[str] = None
|
||||
|
||||
@@ -60,14 +60,20 @@ class EventType(Enum):
|
||||
|
||||
# 同步链式事件
|
||||
class ChainEventType(Enum):
|
||||
# 名称识别请求
|
||||
# 名称识别
|
||||
NameRecognize = "name.recognize"
|
||||
# 认证验证请求
|
||||
# 认证验证
|
||||
AuthVerification = "auth.verification"
|
||||
# 认证拦截请求
|
||||
# 认证拦截
|
||||
AuthIntercept = "auth.intercept"
|
||||
# 命令注册请求
|
||||
# 命令注册
|
||||
CommandRegister = "command.register"
|
||||
# 整理重命名
|
||||
TransferRename = "transfer.rename"
|
||||
# 资源选择
|
||||
ResourceSelection = "resource.selection"
|
||||
# 资源下载
|
||||
ResourceDownload = "resource.download"
|
||||
|
||||
|
||||
# 系统配置Key字典
|
||||
@@ -122,6 +128,8 @@ class SystemConfigKey(Enum):
|
||||
DefaultMovieSubscribeConfig = "DefaultMovieSubscribeConfig"
|
||||
# 默认电视剧订阅规则
|
||||
DefaultTvSubscribeConfig = "DefaultTvSubscribeConfig"
|
||||
# 用户站点认证参数
|
||||
UserSiteAuthParams = "UserSiteAuthParams"
|
||||
|
||||
|
||||
# 处理进度Key字典
|
||||
@@ -174,6 +182,52 @@ class MessageChannel(Enum):
|
||||
WebPush = "WebPush"
|
||||
|
||||
|
||||
# 下载器类型
|
||||
class DownloaderType(Enum):
|
||||
# Qbittorrent
|
||||
Qbittorrent = "Qbittorrent"
|
||||
# Transmission
|
||||
Transmission = "Transmission"
|
||||
# Aria2
|
||||
# Aria2 = "Aria2"
|
||||
|
||||
|
||||
# 媒体服务器类型
|
||||
class MediaServerType(Enum):
|
||||
# Emby
|
||||
Emby = "Emby"
|
||||
# Jellyfin
|
||||
Jellyfin = "Jellyfin"
|
||||
# Plex
|
||||
Plex = "Plex"
|
||||
|
||||
|
||||
# 识别器类型
|
||||
class MediaRecognizeType(Enum):
|
||||
# 豆瓣
|
||||
Douban = "豆瓣"
|
||||
# TMDB
|
||||
TMDB = "TheMovieDb"
|
||||
# TVDB
|
||||
TVDB = "TheTvDb"
|
||||
# bangumi
|
||||
Bangumi = "Bangumi"
|
||||
|
||||
|
||||
# 其他杂项模块类型
|
||||
class OtherModulesType(Enum):
|
||||
# 字幕
|
||||
Subtitle = "站点字幕"
|
||||
# Fanart
|
||||
Fanart = "Fanart"
|
||||
# 文件整理
|
||||
FileManager = "文件整理"
|
||||
# 过滤器
|
||||
Filter = "过滤器"
|
||||
# 站点索引
|
||||
Indexer = "站点索引"
|
||||
|
||||
|
||||
# 用户配置Key字典
|
||||
class UserConfigKey(Enum):
|
||||
# 监控面板
|
||||
@@ -187,6 +241,7 @@ class StorageSchema(Enum):
|
||||
Alipan = "alipan"
|
||||
U115 = "u115"
|
||||
Rclone = "rclone"
|
||||
Alist = "alist"
|
||||
|
||||
|
||||
# 模块类型
|
||||
|
||||
@@ -23,7 +23,9 @@ from app.helper.message import MessageHelper
|
||||
from app.scheduler import Scheduler
|
||||
from app.monitor import Monitor
|
||||
from app.schemas import Notification, NotificationType
|
||||
from app.schemas.types import SystemConfigKey
|
||||
from app.db import close_database
|
||||
from app.db.systemconfig_oper import SystemConfigOper
|
||||
from app.chain.command import CommandChain
|
||||
|
||||
|
||||
@@ -72,6 +74,19 @@ def clear_temp():
|
||||
SystemUtils.clear(settings.CACHE_PATH / "images", days=7)
|
||||
|
||||
|
||||
def user_auth():
|
||||
"""
|
||||
用户认证检查
|
||||
"""
|
||||
if SitesHelper().auth_level >= 2:
|
||||
return
|
||||
auth_conf = SystemConfigOper().get(SystemConfigKey.UserSiteAuthParams)
|
||||
if auth_conf:
|
||||
SitesHelper().check_user(**auth_conf)
|
||||
else:
|
||||
SitesHelper().check_user()
|
||||
|
||||
|
||||
def check_auth():
|
||||
"""
|
||||
检查认证状态
|
||||
@@ -128,6 +143,8 @@ def start_modules(_: FastAPI):
|
||||
SitesHelper()
|
||||
# 资源包检测
|
||||
ResourceHelper()
|
||||
# 用户认证
|
||||
user_auth()
|
||||
# 加载模块
|
||||
ModuleManager()
|
||||
# 启动事件消费
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
import re
|
||||
from typing import Any, Optional, Union
|
||||
|
||||
import chardet
|
||||
import requests
|
||||
import urllib3
|
||||
from requests import Response, Session
|
||||
@@ -273,3 +275,108 @@ class RequestUtils:
|
||||
cache_headers["Cache-Control"] = f"max-age={max_age}"
|
||||
|
||||
return cache_headers
|
||||
|
||||
@staticmethod
|
||||
def detect_encoding_from_html_response(response: Response,
|
||||
performance_mode: bool = False, confidence_threshold: float = 0.8):
|
||||
"""
|
||||
根据HTML响应内容探测编码信息
|
||||
|
||||
:param response: HTTP 响应对象
|
||||
:param performance_mode: 是否使用性能模式,默认为 False (兼容模式)
|
||||
:param confidence_threshold: chardet 检测置信度阈值,默认为 0.8
|
||||
:return: 解析得到的字符编码
|
||||
"""
|
||||
fallback_encoding = None
|
||||
try:
|
||||
if not performance_mode:
|
||||
# 兼容模式:使用chardet分析后,再处理 BOM 和 meta 信息
|
||||
# 1. 使用 chardet 库进一步分析内容
|
||||
detection = chardet.detect(response.content)
|
||||
if detection["confidence"] > confidence_threshold:
|
||||
return detection.get("encoding")
|
||||
# 保存 chardet 的结果备用
|
||||
fallback_encoding = detection.get("encoding")
|
||||
|
||||
# 2. 检查响应体中的 BOM 标记(例如 UTF-8 BOM)
|
||||
if response.content[:3] == b"\xef\xbb\xbf": # UTF-8 BOM
|
||||
return "utf-8"
|
||||
|
||||
# 3. 如果是 HTML 响应体,检查其中的 <meta charset="..."> 标签
|
||||
if re.search(r"charset=[\"']?utf-8[\"']?", response.text, re.IGNORECASE):
|
||||
return "utf-8"
|
||||
|
||||
# 4. 尝试从 response headers 中获取编码信息
|
||||
content_type = response.headers.get("Content-Type", "")
|
||||
if re.search(r"charset=[\"']?utf-8[\"']?", content_type, re.IGNORECASE):
|
||||
return "utf-8"
|
||||
|
||||
else:
|
||||
# 性能模式:优先从 headers 和 BOM 标记获取,最后使用 chardet 分析
|
||||
# 1. 尝试从 response headers 中获取编码信息
|
||||
content_type = response.headers.get("Content-Type", "")
|
||||
if re.search(r"charset=[\"']?utf-8[\"']?", content_type, re.IGNORECASE):
|
||||
return "utf-8"
|
||||
# 暂不支持直接提取字符集,仅提取UTF8
|
||||
# match = re.search(r"charset=[\"']?([^\"';\s]+)", content_type, re.IGNORECASE)
|
||||
# if match:
|
||||
# return match.group(1)
|
||||
|
||||
# 2. 检查响应体中的 BOM 标记(例如 UTF-8 BOM)
|
||||
if response.content[:3] == b"\xef\xbb\xbf":
|
||||
return "utf-8"
|
||||
|
||||
# 3. 如果是 HTML 响应体,检查其中的 <meta charset="..."> 标签
|
||||
if re.search(r"charset=[\"']?utf-8[\"']?", response.text, re.IGNORECASE):
|
||||
return "utf-8"
|
||||
# 暂不支持直接提取字符集,仅提取UTF8
|
||||
# match = re.search(r"<meta[^>]+charset=[\"']?([^\"'>\s]+)", response.text, re.IGNORECASE)
|
||||
# if match:
|
||||
# return match.group(1)
|
||||
|
||||
# 4. 使用 chardet 库进一步分析内容
|
||||
detection = chardet.detect(response.content)
|
||||
if detection.get("confidence", 0) > confidence_threshold:
|
||||
return detection.get("encoding")
|
||||
# 保存 chardet 的结果备用
|
||||
fallback_encoding = detection.get("encoding")
|
||||
|
||||
# 5. 如果上述方法都无法确定,信任 chardet 的结果(即使置信度较低),否则返回默认字符集
|
||||
return fallback_encoding or "utf-8"
|
||||
except Exception as e:
|
||||
logger.debug(f"Error when detect_encoding_from_response: {str(e)}")
|
||||
return fallback_encoding or "utf-8"
|
||||
|
||||
@staticmethod
|
||||
def get_decoded_html_content(response: Response,
|
||||
performance_mode: bool = False, confidence_threshold: float = 0.8) -> str:
|
||||
"""
|
||||
获取HTML响应的解码文本内容
|
||||
|
||||
:param response: HTTP 响应对象
|
||||
:param performance_mode: 是否使用性能模式,默认为 False (兼容模式)
|
||||
:param confidence_threshold: chardet 检测置信度阈值,默认为 0.8
|
||||
:return: 解码后的响应文本内容
|
||||
"""
|
||||
try:
|
||||
if not response:
|
||||
return ""
|
||||
if response.content:
|
||||
# 1. 获取编码信息
|
||||
encoding = (RequestUtils.detect_encoding_from_html_response(response, performance_mode,
|
||||
confidence_threshold)
|
||||
or response.apparent_encoding)
|
||||
# 2. 根据解析得到的编码进行解码
|
||||
try:
|
||||
# 尝试用推测的编码解码
|
||||
return response.content.decode(encoding)
|
||||
except Exception as e:
|
||||
logger.debug(f"Decoding failed, error message: {str(e)}")
|
||||
# 如果解码失败,尝试 fallback 使用 apparent_encoding
|
||||
response.encoding = response.apparent_encoding
|
||||
return response.text
|
||||
else:
|
||||
return response.text
|
||||
except Exception as e:
|
||||
logger.debug(f"Error when getting decoded content: {str(e)}")
|
||||
return response.text
|
||||
|
||||
@@ -24,7 +24,8 @@ class SiteUtils:
|
||||
' or contains(@data-url, "logout")'
|
||||
' or contains(@href, "mybonus") '
|
||||
' or contains(@onclick, "logout")'
|
||||
' or contains(@href, "usercp")]',
|
||||
' or contains(@href, "usercp")'
|
||||
' or contains(@lay-on, "logout")]',
|
||||
'//form[contains(@action, "logout")]',
|
||||
'//div[@class="user-info-side"]',
|
||||
'//a[@id="myitem"]'
|
||||
|
||||
@@ -275,6 +275,10 @@ class SystemUtils:
|
||||
# 遍历目录
|
||||
for path in directory.iterdir():
|
||||
if path.is_dir():
|
||||
if not SystemUtils.is_windows() and path.name.startswith("."):
|
||||
continue
|
||||
if path.name == "@eaDir":
|
||||
continue
|
||||
dirs.append(path)
|
||||
|
||||
return dirs
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import mimetypes
|
||||
from pathlib import Path
|
||||
from typing import Optional, Union
|
||||
from urllib import parse
|
||||
from urllib.parse import parse_qs, urlencode, urljoin, urlparse, urlunparse
|
||||
|
||||
from app.log import logger
|
||||
@@ -95,3 +96,14 @@ class UrlUtils:
|
||||
except Exception as e:
|
||||
logger.debug(f"Error get_mime_type: {e}")
|
||||
return default_type
|
||||
|
||||
@staticmethod
|
||||
def quote(s: str) -> str:
|
||||
"""
|
||||
将字符串编码为 URL 安全的格式
|
||||
这将确保路径中的特殊字符(如空格、中文字符等)被正确编码,以便在 URL 中传输
|
||||
|
||||
:param s: 要编码的字符串
|
||||
:return: 编码后的字符串
|
||||
"""
|
||||
return parse.quote(s)
|
||||
|
||||
@@ -64,7 +64,7 @@ def upgrade() -> None:
|
||||
},
|
||||
{
|
||||
"type": "rclone",
|
||||
"name": "Rclone网盘",
|
||||
"name": "RClone",
|
||||
"config": {}
|
||||
}
|
||||
])
|
||||
|
||||
40
database/versions/a295e41830a6_2_0_6.py
Normal file
40
database/versions/a295e41830a6_2_0_6.py
Normal file
@@ -0,0 +1,40 @@
|
||||
"""2.0.6
|
||||
|
||||
Revision ID: a295e41830a6
|
||||
Revises: ecf3c693fdf3
|
||||
Create Date: 2024-11-14 12:49:13.838120
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy.dialects import sqlite
|
||||
|
||||
from app.db.systemconfig_oper import SystemConfigOper
|
||||
from app.schemas.types import SystemConfigKey
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = 'a295e41830a6'
|
||||
down_revision = 'ecf3c693fdf3'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
# 初始化AList存储
|
||||
_systemconfig = SystemConfigOper()
|
||||
_storages = _systemconfig.get(SystemConfigKey.Storages)
|
||||
if _storages:
|
||||
if "alist" not in [storage["type"] for storage in _storages]:
|
||||
_storages.append({
|
||||
"type": "alist",
|
||||
"name": "AList",
|
||||
"config": {}
|
||||
})
|
||||
_systemconfig.set(SystemConfigKey.Storages, _storages)
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
pass
|
||||
31
database/versions/eaf9cbc49027_2_0_7.py
Normal file
31
database/versions/eaf9cbc49027_2_0_7.py
Normal file
@@ -0,0 +1,31 @@
|
||||
"""2.0.7
|
||||
|
||||
Revision ID: eaf9cbc49027
|
||||
Revises: a295e41830a6
|
||||
Create Date: 2024-11-16 00:26:09.505188
|
||||
|
||||
"""
|
||||
import contextlib
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = 'eaf9cbc49027'
|
||||
down_revision = 'a295e41830a6'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
# 站点管理、订阅增加下载器选项
|
||||
with contextlib.suppress(Exception):
|
||||
op.add_column('site', sa.Column('downloader', sa.String(), nullable=True))
|
||||
op.add_column('subscribe', sa.Column('downloader', sa.String(), nullable=True))
|
||||
|
||||
|
||||
# ### end Alembic commands ###
|
||||
|
||||
def downgrade() -> None:
|
||||
pass
|
||||
@@ -3,7 +3,8 @@
|
||||
# shellcheck disable=SC2016
|
||||
|
||||
# 使用 `envsubst` 将模板文件中的 ${NGINX_PORT} 替换为实际的环境变量值
|
||||
envsubst '${NGINX_PORT}${PORT}' < /etc/nginx/nginx.template.conf > /etc/nginx/nginx.conf
|
||||
export NGINX_CLIENT_MAX_BODY_SIZE=${NGINX_CLIENT_MAX_BODY_SIZE:-10m}
|
||||
envsubst '${NGINX_PORT}${PORT}${NGINX_CLIENT_MAX_BODY_SIZE}' < /etc/nginx/nginx.template.conf > /etc/nginx/nginx.conf
|
||||
# 自动更新
|
||||
cd /
|
||||
/usr/local/bin/mp_update
|
||||
|
||||
@@ -17,6 +17,8 @@ http {
|
||||
|
||||
keepalive_timeout 3600;
|
||||
|
||||
client_max_body_size ${NGINX_CLIENT_MAX_BODY_SIZE};
|
||||
|
||||
gzip on;
|
||||
gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
gzip_proxied any;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user