Compare commits

...

142 Commits

Author SHA1 Message Date
jxxghp
cde4db1a56 v2.1.2 2024-12-06 15:55:56 +08:00
jxxghp
29ae910953 fix build 2024-12-06 12:31:29 +08:00
jxxghp
314f90cc40 upgrade python-115 2024-12-06 12:30:13 +08:00
jxxghp
1c22e3d024 Merge pull request #3337 from InfinityPacer/feature/subscribe
feat(event): add ResourceDownload event for cancel download
2024-12-06 11:17:34 +08:00
InfinityPacer
233d62479f feat(event): add options to ResourceDownloadEventData 2024-12-06 10:47:56 +08:00
jxxghp
6974f2ebd7 Merge pull request #3335 from mackerel-12138/fix_scraper 2024-12-06 06:53:24 +08:00
InfinityPacer
c030166cf5 feat(event): send events for resource download based on source 2024-12-06 02:08:36 +08:00
InfinityPacer
4c511eaea6 chore(event): update ResourceDownloadEventData comment 2024-12-06 02:06:00 +08:00
InfinityPacer
6e443a1127 feat(event): add ResourceDownload event for cancel download 2024-12-06 01:55:44 +08:00
InfinityPacer
896e473c41 fix(event): filter and handle only enabled event handlers 2024-12-06 01:54:51 +08:00
zhanglijun
12f10ebedf fix: 音轨文件重命名整理 2024-12-06 00:40:38 +08:00
jxxghp
ba9f85747c Merge pull request #3330 from InfinityPacer/feature/subscribe 2024-12-05 17:10:47 +08:00
InfinityPacer
2954c02a7c feat(subscribe): add subscription status update API 2024-12-05 16:24:05 +08:00
InfinityPacer
312e602f12 feat(subscribe): add Pending and Suspended subscription states 2024-12-05 16:22:09 +08:00
InfinityPacer
ed37fcbb07 feat(subscribe): update get_by_state to handle multiple states 2024-12-05 16:20:14 +08:00
jxxghp
6acf8fbf00 Merge pull request #3324 from InfinityPacer/feature/subscribe 2024-12-05 06:54:45 +08:00
InfinityPacer
a1e178c805 feat(event): add ResourceSelection event for update resource contexts 2024-12-04 20:21:57 +08:00
jxxghp
922e2fc446 Merge pull request #3323 from Aqr-K/feat-module 2024-12-04 18:19:15 +08:00
jxxghp
db4c8cb3f2 Merge pull request #3322 from InfinityPacer/feature/subscribe 2024-12-04 18:18:32 +08:00
Aqr-K
1c578746fe fix(module): 补全 indexer 缺少 get_subtype 方法
- 补全 `indexer` 缺少 `get_subtype` 方法。
- 增加 `get_running_subtype_module` 方法,可结合 `types` 快速获取单个运行中的 `module` 。
2024-12-04 18:14:56 +08:00
InfinityPacer
68f88117b6 feat(events): add episodes field to DownloadAdded event for unpack 2024-12-04 16:11:35 +08:00
jxxghp
108c0a89f6 Merge pull request #3320 from InfinityPacer/feature/subscribe 2024-12-04 12:18:19 +08:00
InfinityPacer
92dacdf6a2 fix(subscribe): add RLock to prevent duplicate subscription downloads 2024-12-04 11:07:45 +08:00
InfinityPacer
6aa684d6a5 fix(subscribe): handle case when no subscriptions are found 2024-12-04 11:03:32 +08:00
InfinityPacer
efece8cc56 fix(subscribe): add check for None before updating subscription 2024-12-04 10:27:33 +08:00
jxxghp
383c8ca19a Merge pull request #3313 from Aqr-K/feat-module 2024-12-03 18:09:49 +08:00
jxxghp
0a73681280 Merge pull request #3315 from InfinityPacer/feature/scheduler 2024-12-03 18:09:23 +08:00
InfinityPacer
c1ecda280c fix #3312 2024-12-03 17:33:00 +08:00
Aqr-K
825fc35134 feat(modules): 增加子级 type 分类。
- 在 `types` 里,针对各个模块的类型进行子级分类。
- 为每个模块统一添加 `get_subtype` 方法,这样一来,能更精准快速地区分与调用子类的每个模块,又能获取 ModuleType 所规定的分类以及对应存在的子模块类型支持列表,从而有效解决当下调用时需繁琐遍历每个 module 以获取 get_name 或 _channel 的问题。
- 解决因消息渠道前端返回所保存的 type 与后端规定值不一致,而需要频繁调用 _channel 私有方法才能获取分类所可能产生的问题。
2024-12-03 14:57:19 +08:00
jxxghp
8f543ca602 Merge pull request #3309 from yxlimo/tmdbid-for-downloader 2024-12-03 06:55:36 +08:00
yxlimo
f0ecc1a497 fix: return last record when get downloadhistory by hash 2024-12-02 22:55:57 +08:00
jxxghp
71f170a1ad Merge pull request #3293 from wikrin/v2 2024-12-01 10:23:51 +08:00
Attente
3709b65b0e fix(api): correct variable reference in media scraping logic
- Change incorrect reference from media_info to mediainfo
2024-12-01 03:40:30 +08:00
jxxghp
9d6eb0f1e1 Merge pull request #3291 from mackerel-12138/fix_scraper 2024-11-30 16:06:04 +08:00
jxxghp
c93306147b Merge pull request #3290 from mackerel-12138/fix_poster 2024-11-30 16:05:11 +08:00
zhanglijun
5e8f924a2f fix: 修复指定tmdbid刮削时tmdbid丢失问题 2024-11-30 15:57:47 +08:00
zhanglijun
54988d6397 fix: 修复fanart季图片下载缺失/错误的问题 2024-11-30 13:51:30 +08:00
jxxghp
112761dc4c Merge pull request #3287 from InfinityPacer/feature/security 2024-11-30 07:15:52 +08:00
InfinityPacer
ef20508840 feat(auth): handle service instance retrieval with proper null check 2024-11-30 01:14:36 +08:00
InfinityPacer
589a1765ed feat(auth): support specifying service for authentication 2024-11-30 01:04:48 +08:00
jxxghp
2c666e24f3 Merge pull request #3283 from InfinityPacer/feature/subscribe 2024-11-29 21:12:25 +08:00
InfinityPacer
168e3c5533 fix(subscribe): move state update to finally to prevent duplicates 2024-11-29 18:56:19 +08:00
jxxghp
cda8b2573a Merge pull request #3282 from InfinityPacer/feature/subscribe 2024-11-29 16:47:56 +08:00
InfinityPacer
4cb4eb23b8 fix(subscribe): prevent fallback to search rules if not defined 2024-11-29 16:15:37 +08:00
jxxghp
f208b65570 更新 version.py 2024-11-29 08:59:55 +08:00
jxxghp
8a0a530036 Merge pull request #3279 from wikrin/v2 2024-11-29 07:36:34 +08:00
Attente
76643f13ed Update system.py 2024-11-29 07:33:02 +08:00
Attente
6992284a77 fix(api): 修复规则测试未获取到媒体信息导致的过滤失败问题 2024-11-29 07:25:08 +08:00
jxxghp
9a142799cd Merge pull request #3274 from InfinityPacer/feature/encoding 2024-11-28 17:29:22 +08:00
InfinityPacer
027d1567c3 feat(encoding): set PERFORMANCE_MODE to enabled by default 2024-11-28 17:07:14 +08:00
jxxghp
65af737dfd Merge pull request #3272 from wikrin/transfer 2024-11-28 07:23:25 +08:00
jxxghp
48aa0e3d0b Merge pull request #3271 from wikrin/v2 2024-11-28 07:22:16 +08:00
jxxghp
b4e31893ff Merge pull request #3268 from mackerel-12138/fix_scraper 2024-11-28 07:21:43 +08:00
Attente
4f1b95352a 改进手动整理逻辑 关联后端 jxxghp/MoviePilot-Frontend#255 2024-11-28 05:39:26 +08:00
Attente
ca664cb569 fix: 修复批量整理时媒体库目录匹配不正确的问题 2024-11-28 05:19:09 +08:00
zhanglijun
fe4ea73286 修复季nfo刮削错误, 优化季标题取值 2024-11-27 23:27:08 +08:00
jxxghp
9e9cca6de4 Merge pull request #3262 from InfinityPacer/feature/encoding 2024-11-27 16:25:46 +08:00
InfinityPacer
2e7e74c803 feat(encoding): update configuration to performance mode 2024-11-27 13:52:17 +08:00
InfinityPacer
916597047d Merge branch 'v2' of https://github.com/jxxghp/MoviePilot into feature/encoding 2024-11-27 12:52:01 +08:00
InfinityPacer
83fc474dbe feat(encoding): enhance encoding detection with confidence threshold 2024-11-27 12:33:57 +08:00
jxxghp
f67bf49e69 Merge pull request #3255 from InfinityPacer/feature/event 2024-11-27 06:59:54 +08:00
jxxghp
bf9043f526 Merge pull request #3254 from mackerel-12138/v2 2024-11-27 06:58:47 +08:00
InfinityPacer
a98de604a1 refactor(event): rename SmartRename to TransferRename 2024-11-27 00:50:34 +08:00
InfinityPacer
e160a745a7 fix(event): correct visualize_handlers 2024-11-27 00:49:37 +08:00
zhanglijun
7f2c6ef167 fix: 增加入参判断 2024-11-26 22:25:42 +08:00
jxxghp
2086651dbe Merge pull request #3235 from wikrin/fix 2024-11-26 22:17:32 +08:00
zhanglijun
132fde2308 修复季海报下载路径和第0季海报命名 2024-11-26 22:01:00 +08:00
jxxghp
4e27a1e623 fix #3247 2024-11-26 08:25:01 +08:00
Attente
a453831deb get_dir增加入参
- `include_unsorted`用于表示可否`包含`整理方式`为`不整理`的目录配置
2024-11-26 03:11:25 +08:00
jxxghp
1035ceb4ac Merge pull request #3245 from wikrin/v2 2024-11-25 23:04:15 +08:00
Attente
b7cb917347 fix(transfer): add library type and category folder support
- Add library_type_folder and library_category_folder parameters to the transfer function
- This enhances the transfer functionality by allowing sorting files into folders based on library type and category
2024-11-25 23:02:17 +08:00
jxxghp
680ad164dc Merge pull request #3236 from InfinityPacer/feature/scheduler 2024-11-25 17:54:05 +08:00
InfinityPacer
aed68253e9 feat(scheduler): expose internal methods for external invocation 2024-11-25 16:33:17 +08:00
InfinityPacer
b83c7a5656 feat(scheduler): support plugin method arguments via func_kwargs 2024-11-25 16:31:30 +08:00
InfinityPacer
491456b0a2 feat(scheduler): support plugin replacement for system services 2024-11-25 16:30:11 +08:00
Attente
84465a6536 不整理目录的下载路径可以被下载器获取
修改自动匹配源存储器类型入参
2024-11-25 13:51:04 +08:00
jxxghp
9acbcf4922 v2.1.0 2024-11-25 08:05:07 +08:00
jxxghp
8dc4290695 fix scrape bug 2024-11-25 07:58:17 +08:00
jxxghp
5c95945691 Update README.md 2024-11-24 18:16:37 +08:00
jxxghp
11115d50fb fix dockerfile 2024-11-24 18:14:09 +08:00
jxxghp
7f83d56a7e fix alipan 2024-11-24 17:55:08 +08:00
jxxghp
28805e9e17 fix alipan 2024-11-24 17:45:12 +08:00
jxxghp
88a098abc1 fix log 2024-11-24 17:35:04 +08:00
jxxghp
a3cc9830de fix scraping upload 2024-11-24 17:25:42 +08:00
jxxghp
43623efa99 fix log 2024-11-24 17:19:24 +08:00
jxxghp
ff73b2cb5d fix #3203 2024-11-24 17:11:19 +08:00
jxxghp
6cab14366c Merge pull request #3228 from YemaPT/fix-yemapt-taglist-none 2024-11-24 16:24:38 +08:00
yemapt
576d215d8c fix(yemapt): judge tag list none 2024-11-24 16:22:54 +08:00
jxxghp
a2c10c86bf Merge pull request #3226 from YemaPT/feature-yemapt-optimize 2024-11-24 14:08:04 +08:00
yemapt
21bede3f00 feat(yemapt): update search api and enrich torrent content 2024-11-24 13:45:31 +08:00
jxxghp
0a39322281 Merge pull request #3224 from wikrin/v2 2024-11-24 10:32:47 +08:00
Attente
be323d3da1 fix: 减少入参扩大适用范围 2024-11-24 10:22:29 +08:00
jxxghp
fa8860bf62 Merge pull request #3223 from wikrin/v2
fix: 入参错误
2024-11-24 08:56:58 +08:00
Attente
a700958edb fix: 入参错误 2024-11-24 08:54:59 +08:00
jxxghp
9349973d16 Merge pull request #3221 from wikrin/v2 2024-11-24 07:34:42 +08:00
Attente
c0d3637d12 refactor: change library type and category folder parameters to optional 2024-11-24 00:04:08 +08:00
jxxghp
79473ca229 Merge pull request #3196 from wikrin/fix 2024-11-23 23:01:09 +08:00
Attente
fccbe39547 修改target_directory获取逻辑 2024-11-23 22:41:55 +08:00
Attente
85324acacc 下载流程中get_dir()添加storage="local"入参 2024-11-23 22:41:55 +08:00
Attente
9dec4d704b get_dir去除fileitem参数
- 和`src_path & storage`重复, 需要的话直接传入这两项
2024-11-23 22:41:55 +08:00
jxxghp
72732277a1 fix alipan 2024-11-23 21:54:03 +08:00
jxxghp
8d737f9e37 fix alipan && rclone get_folder 2024-11-23 21:43:53 +08:00
jxxghp
96b3746caa fix alist delete 2024-11-23 21:29:08 +08:00
jxxghp
c690ea3c39 fix #3214
fix #3199
2024-11-23 21:26:22 +08:00
jxxghp
3282fb88e0 Merge pull request #3219 from mackerel-12138/s0_fix 2024-11-23 20:25:08 +08:00
zhanglijun
b9c2b9a044 重命名格式支持S0重命名为Specials,SPs 2024-11-23 20:22:37 +08:00
zhanglijun
24b58dc002 修复S0刮削问题
修复某些情况下剧集根目录判断错误的问题
2024-11-23 20:13:01 +08:00
jxxghp
42c56497c6 Merge pull request #3218 from DDS-Derek/issue_rfc 2024-11-23 12:34:52 +08:00
jxxghp
c7512d1580 Merge pull request #3217 from DDS-Derek/fix_tmp 2024-11-23 12:34:39 +08:00
jxxghp
7d25bf7b48 Merge pull request #3215 from mackerel-12138/v2 2024-11-23 12:34:04 +08:00
DDSRem
99daa3a95e chore(issue): add rfc template 2024-11-23 12:31:28 +08:00
jxxghp
0a923bced9 fix storage 2024-11-23 12:29:34 +08:00
DDSRem
06e3b0def2 fix(update): useless tmp directory when not updated 2024-11-23 12:25:46 +08:00
jxxghp
0feecc3eca fix #3204 2024-11-23 11:48:23 +08:00
jxxghp
0afbc58263 fix #3191 自动整理时,优先同盘 2024-11-23 11:31:56 +08:00
jxxghp
7c7561029a fix #3178 手动整理时支持选择一二级分类 2024-11-23 11:19:25 +08:00
zhanglijun
65683999e1 change comment 2024-11-23 11:00:37 +08:00
zhanglijun
f72e26015f delete unused code 2024-11-23 10:58:32 +08:00
zhanglijun
b4e5c50655 修复重命名时S0年份为None的问题
增加重命名配置 剧集日期
2024-11-23 10:55:21 +08:00
jxxghp
f395dc68c3 fix #3209 刮削加锁 2024-11-23 10:48:54 +08:00
jxxghp
27cf5bb7e6 feat:远程交互刷新数据时发送统计消息 2024-11-23 10:36:48 +08:00
jxxghp
9b573535cd Merge pull request #3201 from InfinityPacer/feature/event 2024-11-22 16:25:52 +08:00
jxxghp
cb32305b86 Merge pull request #3200 from cddjr/fix_subscribe_search_filter 2024-11-22 14:04:08 +08:00
景大侠
f7164450d0 fix: 将订阅规则过滤前置,避免因imdbid匹配而跳过 2024-11-22 13:47:18 +08:00
InfinityPacer
344862dbd4 feat(event): support smart rename event 2024-11-22 13:41:14 +08:00
InfinityPacer
f1d0e9d50a Revert "fix #3154 相同事件避免并发处理"
This reverts commit 79c637e003.
2024-11-22 12:41:14 +08:00
jxxghp
9ba9e8f41c v2.0.9 2024-11-22 08:11:07 +08:00
jxxghp
78fc5b7017 Merge pull request #3193 from wikrin/fix_any_files 2024-11-22 08:10:12 +08:00
Attente
fe07830b71 fix: 某些情况下误删媒体文件的问题 2024-11-22 07:45:01 +08:00
jxxghp
350f1faf2a Merge pull request #3189 from InfinityPacer/feature/module 2024-11-21 20:16:06 +08:00
InfinityPacer
103cfe0b47 fix(config): ensure accurate handling of env config updates 2024-11-21 20:08:18 +08:00
jxxghp
0953c1be16 Merge pull request #3187 from InfinityPacer/feature/scheduler 2024-11-21 17:43:29 +08:00
InfinityPacer
c299bf6f7c fix(auth): adjust auth to occur before module init 2024-11-21 17:37:48 +08:00
InfinityPacer
c0eb9d824c Revert "fix(auth): initialize plugin service only during retry auth"
This reverts commit 9f4cf530f8.
2024-11-21 16:41:56 +08:00
jxxghp
ebffdebdb2 refactor: 优化缓存策略 2024-11-21 15:52:08 +08:00
jxxghp
acd9e38477 Merge pull request #3186 from InfinityPacer/feature/scheduler 2024-11-21 14:54:01 +08:00
InfinityPacer
9f4cf530f8 fix(auth): initialize plugin service only during retry auth 2024-11-21 14:49:42 +08:00
jxxghp
84897aa592 fix #3162 2024-11-21 13:50:49 +08:00
jxxghp
23c5982f5a Merge pull request #3185 from InfinityPacer/feature/module 2024-11-21 12:42:05 +08:00
InfinityPacer
1849930b72 feat(qb): add support for ignoring category check via kwargs 2024-11-21 12:35:15 +08:00
jxxghp
4f1d3a7572 fix #3180 2024-11-21 12:13:44 +08:00
jxxghp
824c3ac5d6 fix #3176 2024-11-21 10:25:46 +08:00
80 changed files with 2232 additions and 1129 deletions

45
.github/ISSUE_TEMPLATE/rfc.yml vendored Normal file
View File

@@ -0,0 +1,45 @@
name: 功能提案
description: Request for Comments
title: "[RFC]"
labels: ["RFC"]
body:
- type: markdown
attributes:
value: |
一份提案(RFC)定位为 **「在某功能/重构的具体开发前,用于开发者间 review 技术设计/方案的文档」**
目的是让协作的开发者间清晰的知道「要做什么」和「具体会怎么做」,以及所有的开发者都能公开透明的参与讨论;
以便评估和讨论产生的影响 (遗漏的考虑、向后兼容性、与现有功能的冲突)
因此提案侧重在对解决问题的 **方案、设计、步骤** 的描述上。
如果仅希望讨论是否添加或改进某功能本身,请使用 -> [Issue: 功能改进](https://github.com/jxxghp/MoviePilot/issues/new?assignees=&labels=feature+request&projects=&template=feature_request.yml&title=%5BFeature+Request%5D%3A+)
- type: textarea
id: background
attributes:
label: 背景 or 问题
description: 简单描述遇到的什么问题或需要改动什么。可以引用其他 issue、讨论、文档等。
validations:
required: true
- type: textarea
id: goal
attributes:
label: "目标 & 方案简述"
description: 简单描述提案此提案实现后,**预期的目标效果**,以及简单大致描述会采取的方案/步骤,可能会/不会产生什么影响。
validations:
required: true
- type: textarea
id: design
attributes:
label: "方案设计 & 实现步骤"
description: |
详细描述你设计的具体方案,可以考虑拆分列表或要点,一步步描述具体打算如何实现的步骤和相关细节。
这部份不需要一次性写完整,即使在创建完此提案 issue 后,依旧可以再次编辑修改。
validations:
required: false
- type: textarea
id: alternative
attributes:
label: "替代方案 & 对比"
description: |
[可选] 为来实现目标效果,还考虑过什么其他方案,有什么对比?
validations:
required: false

View File

@@ -5,10 +5,7 @@ on:
branches:
- v2
paths:
- '.github/workflows/build.yml'
- 'Dockerfile'
- 'version.py'
- 'requirements.in'
jobs:
Docker-build:

View File

@@ -10,10 +10,7 @@ ENV LANG="C.UTF-8" \
UMASK=000 \
PORT=3001 \
NGINX_PORT=3000 \
PROXY_HOST="" \
MOVIEPILOT_AUTO_UPDATE=false \
AUTH_SITE="iyuu" \
IYUU_SIGN=""
MOVIEPILOT_AUTO_UPDATE=release
WORKDIR "/app"
RUN apt-get update -y \
&& apt-get upgrade -y \

View File

@@ -6,6 +6,7 @@
![GitHub repo size](https://img.shields.io/github/repo-size/jxxghp/MoviePilot?style=for-the-badge)
![GitHub issues](https://img.shields.io/github/issues/jxxghp/MoviePilot?style=for-the-badge)
![Docker Pulls](https://img.shields.io/docker/pulls/jxxghp/moviepilot?style=for-the-badge)
![Docker Pulls V2](https://img.shields.io/docker/pulls/jxxghp/moviepilot-v2?style=for-the-badge)
![Platform](https://img.shields.io/badge/platform-Windows%20%7C%20Linux%20%7C%20Synology-blue?style=for-the-badge)

View File

@@ -9,7 +9,9 @@ from app.core.context import MediaInfo, Context, TorrentInfo
from app.core.metainfo import MetaInfo
from app.core.security import verify_token
from app.db.models.user import User
from app.db.systemconfig_oper import SystemConfigOper
from app.db.user_oper import get_current_active_user
from app.schemas.types import SystemConfigKey
router = APIRouter()
@@ -49,7 +51,7 @@ def download(
torrent_info=torrentinfo
)
did = DownloadChain().download_single(context=context, username=current_user.name,
downloader=downloader, save_path=save_path)
downloader=downloader, save_path=save_path, source="Manual")
if not did:
return schemas.Response(success=False, message="任务添加失败")
return schemas.Response(success=True, data={
@@ -82,7 +84,7 @@ def add(
torrent_info=torrentinfo
)
did = DownloadChain().download_single(context=context, username=current_user.name,
downloader=downloader, save_path=save_path)
downloader=downloader, save_path=save_path, source="Manual")
if not did:
return schemas.Response(success=False, message="任务添加失败")
return schemas.Response(success=True, data={
@@ -111,6 +113,17 @@ def stop(hashString: str,
return schemas.Response(success=True if ret else False)
@router.get("/clients", summary="查询可用下载器", response_model=List[dict])
def clients(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
查询可用下载器
"""
downloaders: List[dict] = SystemConfigOper().get(SystemConfigKey.Downloaders)
if downloaders:
return [{"name": d.get("name"), "type": d.get("type")} for d in downloaders if d.get("enabled")]
return []
@router.delete("/{hashString}", summary="删除下载任务", response_model=schemas.Response)
def delete(hashString: str,
_: schemas.TokenPayload = Depends(verify_token)) -> Any:

View File

@@ -111,7 +111,7 @@ def scrape(fileitem: schemas.FileItem,
scrape_path = Path(fileitem.path)
meta = MetaInfoPath(scrape_path)
mediainfo = chain.recognize_by_meta(meta)
if not media_info:
if not mediainfo:
return schemas.Response(success=False, message="刮削失败,无法识别媒体信息")
if storage == "local":
if not scrape_path.exists():

View File

@@ -12,8 +12,10 @@ from app.core.security import verify_token
from app.db import get_db
from app.db.mediaserver_oper import MediaServerOper
from app.db.models import MediaServerItem
from app.db.systemconfig_oper import SystemConfigOper
from app.helper.mediaserver import MediaServerHelper
from app.schemas import MediaType, NotExistMediaInfo
from app.schemas.types import SystemConfigKey
router = APIRouter()
@@ -143,3 +145,14 @@ def library(server: str, hidden: bool = False,
获取媒体服务器媒体库列表
"""
return MediaServerChain().librarys(server=server, username=userinfo.username, hidden=hidden) or []
@router.get("/clients", summary="查询可用媒体服务器", response_model=List[dict])
def clients(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
查询可用媒体服务器
"""
mediaservers: List[dict] = SystemConfigOper().get(SystemConfigKey.MediaServers)
if mediaservers:
return [{"name": d.get("name"), "type": d.get("type")} for d in mediaservers if d.get("enabled")]
return []

View File

@@ -8,6 +8,7 @@ from app import schemas
from app.chain.site import SiteChain
from app.chain.torrents import TorrentsChain
from app.core.event import EventManager
from app.core.plugin import PluginManager
from app.core.security import verify_token
from app.db import get_db
from app.db.models import User
@@ -351,6 +352,8 @@ def auth_site(
return schemas.Response(success=False, message="请输入认证站点和认证参数")
status, msg = SitesHelper().check_user(auth_info.site, auth_info.params)
SystemConfigOper().set(SystemConfigKey.UserSiteAuthParams, auth_info.dict())
PluginManager().init_config()
Scheduler().init_plugin_jobs()
return schemas.Response(success=status, message=msg)

View File

@@ -124,6 +124,27 @@ def update_subscribe(
return schemas.Response(success=True)
@router.put("/status/{subid}", summary="更新订阅状态", response_model=schemas.Response)
def update_subscribe_status(
subid: int,
state: str,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
更新订阅状态
"""
subscribe = Subscribe.get(db, subid)
if not subscribe:
return schemas.Response(success=False, message="订阅不存在")
valid_states = ["R", "P", "S"]
if state not in valid_states:
return schemas.Response(success=False, message="无效的订阅状态")
subscribe.update(db, {
"state": state
})
return schemas.Response(success=True)
@router.get("/media/{mediaid}", summary="查询订阅", response_model=schemas.Subscribe)
def subscribe_mediaid(
mediaid: str,

View File

@@ -16,6 +16,7 @@ from app import schemas
from app.chain.search import SearchChain
from app.chain.system import SystemChain
from app.core.config import global_vars, settings
from app.core.metainfo import MetaInfo
from app.core.module import ModuleManager
from app.core.security import verify_apitoken, verify_resource_token, verify_token
from app.db.models import User
@@ -385,9 +386,14 @@ def ruletest(title: str,
if not rulegroup:
return schemas.Response(success=False, message=f"过滤规则组 {rulegroup_name} 不存在!")
# 根据标题查询媒体信息
media_info = SearchChain().recognize_media(MetaInfo(title=title, subtitle=subtitle))
if not media_info:
return schemas.Response(success=False, message="未识别到媒体信息!")
# 过滤
result = SearchChain().filter_torrents(rule_groups=[rulegroup.name],
torrent_list=[torrent])
torrent_list=[torrent], mediainfo=media_info)
if not result:
return schemas.Response(success=False, message="不符合过滤规则!")
return schemas.Response(success=True, data={

View File

@@ -20,22 +20,42 @@ router = APIRouter()
class ManualTransferItem(BaseModel):
fileitem: FileItem = None,
logid: Optional[int] = None,
target_storage: Optional[str] = None,
target_path: Optional[str] = None,
tmdbid: Optional[int] = None,
doubanid: Optional[str] = None,
type_name: Optional[str] = None,
season: Optional[int] = None,
transfer_type: Optional[str] = None,
episode_format: Optional[str] = None,
episode_detail: Optional[str] = None,
episode_part: Optional[str] = None,
episode_offset: Optional[str] = None,
min_filesize: Optional[int] = 0,
scrape: bool = False,
from_history: bool = False
# 文件项
fileitem: FileItem = None
# 日志ID
logid: Optional[int] = None
# 目标存储
target_storage: Optional[str] = None
# 目标路径
target_path: Optional[str] = None
# TMDB ID
tmdbid: Optional[int] = None
# 豆瓣ID
doubanid: Optional[str] = None
# 类型
type_name: Optional[str] = None
# 季号
season: Optional[int] = None
# 整理方式
transfer_type: Optional[str] = None
# 自定义格式
episode_format: Optional[str] = None
# 指定集数
episode_detail: Optional[str] = None
# 指定PART
episode_part: Optional[str] = None
# 集数偏移
episode_offset: Optional[str] = None
# 最小文件大小
min_filesize: Optional[int] = 0
# 刮削
scrape: bool = False
# 媒体库类型子目录
library_type_folder: Optional[bool] = None
# 媒体库类别子目录
library_category_folder: Optional[bool] = None
# 复用历史识别信息
from_history: Optional[bool] = False
@router.get("/name", summary="查询整理后的名称", response_model=schemas.Response)
@@ -148,6 +168,8 @@ def manual_transfer(transer_item: ManualTransferItem,
epformat=epformat,
min_filesize=transer_item.min_filesize,
scrape=transer_item.scrape,
library_type_folder=transer_item.library_type_folder,
library_category_folder=transer_item.library_category_folder,
force=force
)
# 失败

View File

@@ -385,6 +385,7 @@ class ChainBase(metaclass=ABCMeta):
target_directory: TransferDirectoryConf = None,
target_storage: str = None, target_path: Path = None,
transfer_type: str = None, scrape: bool = None,
library_type_folder: bool = None, library_category_folder: bool = None,
episodes_info: List[TmdbEpisode] = None) -> Optional[TransferInfo]:
"""
文件转移
@@ -396,6 +397,8 @@ class ChainBase(metaclass=ABCMeta):
:param target_path: 目标路径
:param transfer_type: 转移模式
:param scrape: 是否刮削元数据
:param library_type_folder: 是否按类型创建目录
:param library_category_folder: 是否按类别创建目录
:param episodes_info: 当前季的全部集信息
:return: {path, target_path, message}
"""
@@ -404,6 +407,8 @@ class ChainBase(metaclass=ABCMeta):
target_directory=target_directory,
target_path=target_path, target_storage=target_storage,
transfer_type=transfer_type, scrape=scrape,
library_type_folder=library_type_folder,
library_category_folder=library_category_folder,
episodes_info=episodes_info)
def transfer_completed(self, hashs: str, downloader: str = None) -> None:

View File

@@ -54,6 +54,11 @@ class CommandChain(ChainBase, metaclass=Singleton):
"description": "更新站点Cookie",
"data": {}
},
"/site_statistic": {
"func": SiteChain().remote_refresh_userdatas,
"description": "站点数据统计",
"data": {}
},
"/site_enable": {
"func": SiteChain().remote_enable,
"description": "启用站点",
@@ -402,7 +407,7 @@ class CommandChain(ChainBase, metaclass=Singleton):
channel=event_channel, source=event_source, userid=event_user)
@eventmanager.register(EventType.ModuleReload)
def module_reload_event(self, event: ManagerEvent) -> None:
def module_reload_event(self, _: ManagerEvent) -> None:
"""
注册模块重载事件
"""

View File

@@ -20,7 +20,8 @@ from app.helper.message import MessageHelper
from app.helper.torrent import TorrentHelper
from app.log import logger
from app.schemas import ExistMediaInfo, NotExistMediaInfo, DownloadingTorrent, Notification
from app.schemas.types import MediaType, TorrentStatus, EventType, MessageChannel, NotificationType
from app.schemas.event import ResourceSelectionEventData, ResourceDownloadEventData
from app.schemas.types import MediaType, TorrentStatus, EventType, MessageChannel, NotificationType, ChainEventType
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
@@ -191,7 +192,7 @@ class DownloadChain(ChainBase):
logger.error(f"下载种子文件失败:{torrent.title} - {torrent_url}")
self.post_message(Notification(
channel=channel,
source=source,
source=source if channel else None,
mtype=NotificationType.Manual,
title=f"{torrent.title} 种子下载失败!",
text=f"错误信息:{error_msg}\n站点:{torrent.site_name}",
@@ -203,7 +204,8 @@ class DownloadChain(ChainBase):
def download_single(self, context: Context, torrent_file: Path = None,
episodes: Set[int] = None,
channel: MessageChannel = None, source: str = None,
channel: MessageChannel = None,
source: str = None,
downloader: str = None,
save_path: str = None,
userid: Union[str, int] = None,
@@ -215,13 +217,38 @@ class DownloadChain(ChainBase):
:param torrent_file: 种子文件路径
:param episodes: 需要下载的集数
:param channel: 通知渠道
:param source: 通知来源
:param source: 来源消息通知、Subscribe、Manual等
:param downloader: 下载器
:param save_path: 保存路径
:param userid: 用户ID
:param username: 调用下载的用户名/插件名
:param media_category: 自定义媒体类别
"""
# 发送资源下载事件,允许外部拦截下载
event_data = ResourceDownloadEventData(
context=context,
episodes=episodes,
channel=channel,
origin=source,
downloader=downloader,
options={
"save_path": save_path,
"userid": userid,
"username": username,
"media_category": media_category
}
)
# 触发资源下载事件
event = eventmanager.send_event(ChainEventType.ResourceDownload, event_data)
if event and event.event_data:
event_data: ResourceDownloadEventData = event.event_data
# 如果事件被取消,跳过资源下载
if event_data.cancel:
logger.debug(
f"Resource download canceled by event: {event_data.source},"
f"Reason: {event_data.reason}")
return None
_torrent = context.torrent_info
_media = context.media_info
_meta = context.meta_info
@@ -256,7 +283,7 @@ class DownloadChain(ChainBase):
download_dir = Path(save_path)
else:
# 根据媒体信息查询下载目录配置
dir_info = self.directoryhelper.get_dir(_media)
dir_info = self.directoryhelper.get_dir(_media, storage="local", include_unsorted=True)
# 拼装子目录
if dir_info:
# 一级目录
@@ -355,7 +382,8 @@ class DownloadChain(ChainBase):
"hash": _hash,
"context": context,
"username": username,
"downloader": _downloader
"downloader": _downloader,
"episodes": episodes
})
else:
# 下载失败
@@ -364,7 +392,7 @@ class DownloadChain(ChainBase):
# 只发送给对应渠道和用户
self.post_message(Notification(
channel=channel,
source=source,
source=source if channel else None,
mtype=NotificationType.Manual,
title="添加下载任务失败:%s %s"
% (_media.title_year, _meta.season_episode),
@@ -392,7 +420,7 @@ class DownloadChain(ChainBase):
:param no_exists: 缺失的剧集信息
:param save_path: 保存路径
:param channel: 通知渠道
:param source: 通知来源
:param source: 来源(消息通知、订阅、手工下载等)
:param userid: 用户ID
:param username: 调用下载的用户名/插件名
:param media_category: 自定义媒体类别
@@ -457,6 +485,21 @@ class DownloadChain(ChainBase):
return 9999
return no_exist[season].total_episode
# 发送资源选择事件,允许外部修改上下文数据
logger.debug(f"Initial contexts: {len(contexts)} items, Downloader: {downloader}")
event_data = ResourceSelectionEventData(
contexts=contexts,
downloader=downloader
)
event = eventmanager.send_event(ChainEventType.ResourceSelection, event_data)
# 如果事件修改了上下文数据,使用更新后的数据
if event and event.event_data:
event_data: ResourceSelectionEventData = event.event_data
if event_data.updated and event_data.updated_contexts is not None:
logger.debug(f"Contexts updated by event: "
f"{len(event_data.updated_contexts)} items (source: {event_data.source})")
contexts = event_data.updated_contexts
# 分组排序
contexts = TorrentHelper().sort_group_torrents(contexts)

View File

@@ -11,12 +11,15 @@ from app.core.event import eventmanager, Event
from app.core.meta import MetaBase
from app.core.metainfo import MetaInfo, MetaInfoPath
from app.log import logger
from app.schemas import FileItem
from app.schemas.types import EventType, MediaType, ChainEventType
from app.utils.http import RequestUtils
from app.utils.singleton import Singleton
from app.utils.string import StringUtils
recognize_lock = Lock()
scraping_lock = Lock()
scraping_files = []
class MediaChain(ChainBase, metaclass=Singleton):
@@ -301,12 +304,23 @@ class MediaChain(ChainBase, metaclass=Singleton):
if not event:
return
event_data = event.event_data or {}
fileitem = event_data.get("fileitem")
meta = event_data.get("meta")
mediainfo = event_data.get("mediainfo")
fileitem: FileItem = event_data.get("fileitem")
meta: MetaBase = event_data.get("meta")
mediainfo: MediaInfo = event_data.get("mediainfo")
if not fileitem:
return
self.scrape_metadata(fileitem=fileitem, meta=meta, mediainfo=mediainfo)
# 刮削锁
with scraping_lock:
if fileitem.path in scraping_files:
return
scraping_files.append(fileitem.path)
try:
# 执行刮削
self.scrape_metadata(fileitem=fileitem, meta=meta, mediainfo=mediainfo)
finally:
# 释放锁
with scraping_lock:
scraping_files.remove(fileitem.path)
def scrape_metadata(self, fileitem: schemas.FileItem,
meta: MetaBase = None, mediainfo: MediaInfo = None,
@@ -322,6 +336,20 @@ class MediaChain(ChainBase, metaclass=Singleton):
:param overwrite: 是否覆盖已有文件
"""
def is_bluray_folder(_fileitem: schemas.FileItem) -> bool:
"""
判断是否为原盘目录
"""
if not _fileitem or _fileitem.type != "dir":
return False
# 蓝光原盘目录必备的文件或文件夹
required_files = ['BDMV', 'CERTIFICATE']
# 检查目录下是否存在所需文件或文件夹
for item in self.storagechain.list_files(_fileitem):
if item.name in required_files:
return True
return False
def __list_files(_fileitem: schemas.FileItem):
"""
列出下级文件
@@ -337,14 +365,19 @@ class MediaChain(ChainBase, metaclass=Singleton):
"""
if not _fileitem or not _content or not _path:
return
# 保存文件到临时目录
tmp_file = settings.TEMP_PATH / _path.name
tmp_file.write_bytes(_content)
_fileitem.path = str(_path.parent)
item = self.storagechain.upload_file(fileitem=_fileitem, path=tmp_file)
if item:
logger.info(f"已保存文件:{Path(item.path) / item.name}")
if tmp_file.exists():
tmp_file.unlink()
# 获取文件的父目录
try:
item = self.storagechain.upload_file(fileitem=_fileitem, path=tmp_file, new_name=_path.name)
if item:
logger.info(f"已保存文件:{item.path}")
else:
logger.warn(f"文件保存失败:{item.path}")
finally:
if tmp_file.exists():
tmp_file.unlink()
def __download_image(_url: str) -> Optional[bytes]:
"""
@@ -380,25 +413,37 @@ class MediaChain(ChainBase, metaclass=Singleton):
# 是否已存在
nfo_path = filepath.with_suffix(".nfo")
if not overwrite and self.storagechain.get_file_item(storage=fileitem.storage, path=nfo_path):
logger.debug(f"已存在nfo文件{nfo_path}")
logger.info(f"已存在nfo文件{nfo_path}")
return
# 电影文件
logger.info(f"正在生成电影nfo{mediainfo.title_year} - {filepath.name}")
movie_nfo = self.metadata_nfo(meta=meta, mediainfo=mediainfo)
if not movie_nfo:
logger.warn(f"{filepath.name} nfo文件生成失败")
return
# 保存或上传nfo文件到上级目录
if not parent:
parent = self.storagechain.get_parent_item(fileitem)
__save_file(_fileitem=parent, _path=nfo_path, _content=movie_nfo)
else:
# 电影目录
files = __list_files(_fileitem=fileitem)
for file in files:
self.scrape_metadata(fileitem=file,
meta=meta, mediainfo=mediainfo,
init_folder=False, parent=fileitem)
if is_bluray_folder(fileitem):
# 原盘目录
nfo_path = filepath / (filepath.name + ".nfo")
if not overwrite and self.storagechain.get_file_item(storage=fileitem.storage, path=nfo_path):
logger.info(f"已存在nfo文件{nfo_path}")
return
# 生成原盘nfo
movie_nfo = self.metadata_nfo(meta=meta, mediainfo=mediainfo)
if not movie_nfo:
logger.warn(f"{filepath.name} nfo文件生成失败")
return
# 保存或上传nfo文件到当前目录
__save_file(_fileitem=fileitem, _path=nfo_path, _content=movie_nfo)
else:
# 处理目录内的文件
files = __list_files(_fileitem=fileitem)
for file in files:
self.scrape_metadata(fileitem=file,
meta=meta, mediainfo=mediainfo,
init_folder=False, parent=fileitem)
# 生成目录内图片文件
if init_folder:
# 图片
@@ -412,7 +457,7 @@ class MediaChain(ChainBase, metaclass=Singleton):
image_path = filepath / image_name
if not overwrite and self.storagechain.get_file_item(storage=fileitem.storage,
path=image_path):
logger.debug(f"已存在图片文件:{image_path}")
logger.info(f"已存在图片文件:{image_path}")
continue
# 下载图片
content = __download_image(_url=attr_value)
@@ -425,14 +470,14 @@ class MediaChain(ChainBase, metaclass=Singleton):
# 是否已存在
nfo_path = filepath.with_suffix(".nfo")
if not overwrite and self.storagechain.get_file_item(storage=fileitem.storage, path=nfo_path):
logger.debug(f"已存在nfo文件{nfo_path}")
logger.info(f"已存在nfo文件{nfo_path}")
return
# 重新识别季集
file_meta = MetaInfoPath(filepath)
if not file_meta.begin_episode:
logger.warn(f"{filepath.name} 无法识别文件集数!")
return
file_mediainfo = self.recognize_media(meta=file_meta)
file_mediainfo = self.recognize_media(meta=file_meta, tmdbid=mediainfo.tmdb_id)
if not file_mediainfo:
logger.warn(f"{filepath.name} 无法识别文件媒体信息!")
return
@@ -453,7 +498,7 @@ class MediaChain(ChainBase, metaclass=Singleton):
for episode, image_url in image_dict.items():
image_path = filepath.with_suffix(Path(image_url).suffix)
if not overwrite and self.storagechain.get_file_item(storage=fileitem.storage, path=image_path):
logger.debug(f"已存在图片文件:{image_path}")
logger.info(f"已存在图片文件:{image_path}")
continue
# 下载图片
content = __download_image(image_url)
@@ -475,11 +520,14 @@ class MediaChain(ChainBase, metaclass=Singleton):
if init_folder:
# 识别文件夹名称
season_meta = MetaInfo(filepath.name)
if season_meta.begin_season:
# 当前文件夹为Specials或者SPs时设置为S0
if filepath.name in settings.RENAME_FORMAT_S0_NAMES:
season_meta.begin_season = 0
if season_meta.begin_season is not None:
# 是否已存在
nfo_path = filepath / "season.nfo"
if not overwrite and self.storagechain.get_file_item(storage=fileitem.storage, path=nfo_path):
logger.debug(f"已存在nfo文件{nfo_path}")
logger.info(f"已存在nfo文件{nfo_path}")
return
# 当前目录有季号生成季nfo
season_nfo = self.metadata_nfo(meta=meta, mediainfo=mediainfo, season=season_meta.begin_season)
@@ -495,19 +543,44 @@ class MediaChain(ChainBase, metaclass=Singleton):
image_path = filepath.with_name(image_name)
if not overwrite and self.storagechain.get_file_item(storage=fileitem.storage,
path=image_path):
logger.debug(f"已存在图片文件:{image_path}")
logger.info(f"已存在图片文件:{image_path}")
continue
# 下载图片
content = __download_image(image_url)
# 保存图片文件到当前目录
# 保存图片文件到剧集目录
if content:
__save_file(_fileitem=fileitem, _path=image_path, _content=content)
if not parent:
parent = self.storagechain.get_parent_item(fileitem)
__save_file(_fileitem=parent, _path=image_path, _content=content)
# 额外fanart季图片poster thumb banner
image_dict = self.metadata_img(mediainfo=mediainfo)
if image_dict:
for image_name, image_url in image_dict.items():
if image_name.startswith("season"):
image_path = filepath.with_name(image_name)
# 只下载当前刮削季的图片
image_season = "00" if "specials" in image_name else image_name[6:8]
if image_season != str(season_meta.begin_season).rjust(2, '0'):
logger.info(f"当前刮削季为:{season_meta.begin_season},跳过文件:{image_path}")
continue
if not overwrite and self.storagechain.get_file_item(storage=fileitem.storage,
path=image_path):
logger.info(f"已存在图片文件:{image_path}")
continue
# 下载图片
content = __download_image(image_url)
# 保存图片文件到当前目录
if content:
if not parent:
parent = self.storagechain.get_parent_item(fileitem)
__save_file(_fileitem=parent, _path=image_path, _content=content)
# 判断当前目录是不是剧集根目录
if season_meta.name:
if not season_meta.season:
# 是否已存在
nfo_path = filepath / "tvshow.nfo"
if not overwrite and self.storagechain.get_file_item(storage=fileitem.storage, path=nfo_path):
logger.debug(f"已存在nfo文件{nfo_path}")
logger.info(f"已存在nfo文件{nfo_path}")
return
# 当前目录有名称生成tvshow nfo 和 tv图片
tv_nfo = self.metadata_nfo(meta=meta, mediainfo=mediainfo)
@@ -520,10 +593,13 @@ class MediaChain(ChainBase, metaclass=Singleton):
image_dict = self.metadata_img(mediainfo=mediainfo)
if image_dict:
for image_name, image_url in image_dict.items():
# 不下载季图片
if image_name.startswith("season"):
continue
image_path = filepath / image_name
if not overwrite and self.storagechain.get_file_item(storage=fileitem.storage,
path=image_path):
logger.debug(f"已存在图片文件:{image_path}")
logger.info(f"已存在图片文件:{image_path}")
continue
# 下载图片
content = __download_image(image_url)

View File

@@ -111,6 +111,8 @@ class MessageChain(ChainBase):
info = self.message_parser(source=source, body=body, form=form, args=args)
if not info:
return
# 更新消息来源
source = info.source
# 渠道
channel = info.channel
# 用户ID

View File

@@ -221,6 +221,12 @@ class SearchChain(ChainBase):
key=ProgressKey.Search)
if not torrent.title:
continue
# 匹配订阅附加参数
if filter_params and not self.torrenthelper.filter_torrent(torrent_info=torrent,
filter_params=filter_params):
continue
# 识别元数据
torrent_meta = MetaInfo(title=torrent.title, subtitle=torrent.description,
custom_words=custom_words)
@@ -234,11 +240,6 @@ class SearchChain(ChainBase):
_match_torrents.append((torrent, torrent_meta))
continue
# 匹配订阅附加参数
if filter_params and not self.torrenthelper.filter_torrent(torrent_info=torrent,
filter_params=filter_params):
continue
# 比对种子
if self.torrenthelper.match_torrent(mediainfo=mediainfo,
torrent_meta=torrent_meta,

View File

@@ -1,7 +1,7 @@
import base64
import re
from datetime import datetime
from typing import Optional, Tuple, Union
from typing import Optional, Tuple, Union, Dict
from urllib.parse import urljoin
from lxml import etree
@@ -86,14 +86,22 @@ class SiteChain(ChainBase):
f"{userdata.message_unread} 条新消息,请登陆查看",
link=site.get("url")
))
# 低分享率警告
if userdata.ratio and float(userdata.ratio) < 1:
self.post_message(Notification(
mtype=NotificationType.SiteMessage,
title=f"【站点分享率低预警】",
text=f"站点 {site.get('name')} 分享率 {userdata.ratio},请注意!"
))
return userdata
def refresh_userdatas(self) -> None:
def refresh_userdatas(self) -> Dict[str, SiteUserData]:
"""
刷新所有站点的用户数据
"""
sites = self.siteshelper.get_indexers()
any_site_updated = False
result = {}
for site in sites:
if global_vars.is_system_stopped:
return
@@ -101,10 +109,12 @@ class SiteChain(ChainBase):
userdata = self.refresh_userdata(site)
if userdata:
any_site_updated = True
result[site.get("name")] = userdata
if any_site_updated:
EventManager().send_event(EventType.SiteRefreshed, {
"site_id": "*"
})
return result
def is_special_site(self, domain: str) -> bool:
"""
@@ -705,3 +715,66 @@ class SiteChain(ChainBase):
source=source,
title=f"{site_info.name}】 Cookie&UA更新成功",
userid=userid))
def remote_refresh_userdatas(self, channel: MessageChannel,
userid: Union[str, int] = None, source: str = None):
"""
刷新所有站点用户数据
"""
logger.info("收到命令,开始刷新站点数据 ...")
self.post_message(Notification(
channel=channel,
source=source,
title="开始刷新站点数据 ...",
userid=userid
))
# 刷新站点数据
site_datas = self.refresh_userdatas()
if site_datas:
# 发送消息
messages = {}
# 总上传
incUploads = 0
# 总下载
incDownloads = 0
# 今天日期
today_date = datetime.now().strftime("%Y-%m-%d")
for rand, site in enumerate(site_datas.keys()):
upload = int(site_datas[site].upload or 0)
download = int(site_datas[site].download or 0)
updated_date = site_datas[site].updated_day
if updated_date and updated_date != today_date:
updated_date = f"{updated_date}"
else:
updated_date = ""
if upload > 0 or download > 0:
incUploads += upload
incDownloads += download
messages[upload + (rand / 1000)] = (
f"{site}{updated_date}\n"
+ f"上传量:{StringUtils.str_filesize(upload)}\n"
+ f"下载量:{StringUtils.str_filesize(download)}\n"
+ "————————————"
)
if incDownloads or incUploads:
sorted_messages = [messages[key] for key in sorted(messages.keys(), reverse=True)]
sorted_messages.insert(0, f"【汇总】\n"
f"总上传:{StringUtils.str_filesize(incUploads)}\n"
f"总下载:{StringUtils.str_filesize(incDownloads)}\n"
f"————————————")
self.post_message(Notification(
channel=channel,
source=source,
title="【站点数据统计】",
text="\n".join(sorted_messages),
userid=userid
))
else:
self.post_message(Notification(
channel=channel,
source=source,
title="没有刷新到任何站点数据!",
userid=userid
))

View File

@@ -135,9 +135,17 @@ class StorageChain(ChainBase):
if not self.delete_file(fileitem):
logger.warn(f"{fileitem.storage}{fileitem.path} 删除失败")
return False
# 处理上级目录
if mtype and mtype == MediaType.TV:
dir_item = self.get_file_item(storage=fileitem.storage, path=Path(fileitem.path).parent.parent)
if mtype:
# 重命名格式
rename_format = settings.TV_RENAME_FORMAT \
if mtype == MediaType.TV else settings.MOVIE_RENAME_FORMAT
# 计算重命名中的文件夹层数
rename_format_level = len(rename_format.split("/")) - 1
if rename_format_level < 1:
return True
# 处理上级目录
dir_item = self.get_file_item(storage=fileitem.storage,
path=Path(fileitem.path).parents[rename_format_level - 1])
else:
dir_item = self.get_parent_item(fileitem)
if dir_item and len(Path(dir_item.path).parts) > 2:

View File

@@ -1,6 +1,7 @@
import copy
import random
import time
import threading
from datetime import datetime
from typing import Dict, List, Optional, Union, Tuple
@@ -28,15 +29,17 @@ from app.log import logger
from app.schemas import NotExistMediaInfo, Notification, SubscrbieInfo, SubscribeEpisodeInfo, SubscribeDownloadFileInfo, \
SubscribeLibraryFileInfo
from app.schemas.types import MediaType, SystemConfigKey, MessageChannel, NotificationType, EventType
from app.utils.singleton import Singleton
class SubscribeChain(ChainBase):
class SubscribeChain(ChainBase, metaclass=Singleton):
"""
订阅管理处理链
"""
def __init__(self):
super().__init__()
self._rlock = threading.RLock()
self.downloadchain = DownloadChain()
self.downloadhis = DownloadHistoryOper()
self.searchchain = SearchChain()
@@ -234,178 +237,189 @@ class SubscribeChain(ChainBase):
"""
订阅搜索
:param sid: 订阅ID有值时只处理该订阅
:param state: 订阅状态 N:未搜索 R:已搜索
:param state: 订阅状态 N:新建, R:订阅中, P:待定, S:暂停
:param manual: 是否手动搜索
:return: 更新订阅状态为R或删除订阅
"""
if sid:
subscribes = [self.subscribeoper.get(sid)]
else:
subscribes = self.subscribeoper.list(state)
# 遍历订阅
for subscribe in subscribes:
if global_vars.is_system_stopped:
break
mediakey = subscribe.tmdbid or subscribe.doubanid
custom_word_list = subscribe.custom_words.split("\n") if subscribe.custom_words else None
# 校验当前时间减订阅创建时间是否大于1分钟否则跳过先留出编辑订阅的时间
if subscribe.date:
now = datetime.now()
subscribe_time = datetime.strptime(subscribe.date, '%Y-%m-%d %H:%M:%S')
if (now - subscribe_time).total_seconds() < 60:
logger.debug(f"订阅标题:{subscribe.name} 新增小于1分钟暂不搜索...")
continue
# 随机休眠1-5分钟
if not sid and state == 'R':
sleep_time = random.randint(60, 300)
logger.info(f'订阅搜索随机休眠 {sleep_time} 秒 ...')
time.sleep(sleep_time)
logger.info(f'开始搜索订阅,标题:{subscribe.name} ...')
# 如果状态为N则更新为R
if subscribe.state == 'N':
self.subscribeoper.update(subscribe.id, {'state': 'R'})
# 生成元数据
meta = MetaInfo(subscribe.name)
meta.year = subscribe.year
meta.begin_season = subscribe.season or None
try:
meta.type = MediaType(subscribe.type)
except ValueError:
logger.error(f'订阅 {subscribe.name} 类型错误:{subscribe.type}')
continue
# 识别媒体信息
mediainfo: MediaInfo = self.recognize_media(meta=meta, mtype=meta.type,
tmdbid=subscribe.tmdbid,
doubanid=subscribe.doubanid,
cache=False)
if not mediainfo:
logger.warn(
f'未识别到媒体信息,标题:{subscribe.name}tmdbid{subscribe.tmdbid}doubanid{subscribe.doubanid}')
continue
# 非洗版状态
if not subscribe.best_version:
# 每季总集数
totals = {}
if subscribe.season and subscribe.total_episode:
totals = {
subscribe.season: subscribe.total_episode
}
# 查询媒体库缺失的媒体信息
exist_flag, no_exists = self.downloadchain.get_no_exists_info(
meta=meta,
mediainfo=mediainfo,
totals=totals
)
else:
# 洗版状态
exist_flag = False
if meta.type == MediaType.TV:
no_exists = {
mediakey: {
subscribe.season: NotExistMediaInfo(
season=subscribe.season,
episodes=[],
total_episode=subscribe.total_episode,
start_episode=subscribe.start_episode or 1)
}
}
else:
no_exists = {}
# 已存在
if exist_flag:
logger.info(f'{mediainfo.title_year} 媒体库中已存在')
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta, mediainfo=mediainfo, force=True)
continue
# 电视剧订阅处理缺失集
if meta.type == MediaType.TV:
# 实际缺失集与订阅开始结束集范围进行整合,同时剔除已下载的集数
no_exists = self.__get_subscribe_no_exits(
subscribe_name=f'{subscribe.name} {meta.season}',
no_exists=no_exists,
mediakey=mediakey,
begin_season=meta.begin_season,
total_episode=subscribe.total_episode,
start_episode=subscribe.start_episode,
downloaded_episodes=self.__get_downloaded_episodes(subscribe)
)
# 站点范围
sites = self.get_sub_sites(subscribe)
# 优先级过滤规则
if subscribe.best_version:
rule_groups = subscribe.filter_groups \
or self.systemconfig.get(SystemConfigKey.BestVersionFilterRuleGroups)
else:
rule_groups = subscribe.filter_groups \
or self.systemconfig.get(SystemConfigKey.SubscribeFilterRuleGroups)
# 搜索,同时电视剧会过滤掉不需要的剧集
contexts = self.searchchain.process(mediainfo=mediainfo,
keyword=subscribe.keyword,
no_exists=no_exists,
sites=sites,
rule_groups=rule_groups,
area="imdbid" if subscribe.search_imdbid else "title",
custom_words=custom_word_list,
filter_params=self.get_params(subscribe))
if not contexts:
logger.warn(f'订阅 {subscribe.keyword or subscribe.name} 未搜索到资源')
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta,
mediainfo=mediainfo, lefts=no_exists)
continue
# 过滤搜索结果
matched_contexts = []
for context in contexts:
torrent_meta = context.meta_info
torrent_info = context.torrent_info
torrent_mediainfo = context.media_info
# 洗版
if subscribe.best_version:
# 洗版时,非整季不要
if torrent_mediainfo.type == MediaType.TV:
if torrent_meta.episode_list:
logger.info(f'{subscribe.name} 正在洗版,{torrent_info.title} 不是整季')
continue
# 洗版时,优先级小于等于已下载优先级的不要
if subscribe.current_priority \
and torrent_info.pri_order <= subscribe.current_priority:
logger.info(f'{subscribe.name} 正在洗版,{torrent_info.title} 优先级低于或等于已下载优先级')
continue
matched_contexts.append(context)
if not matched_contexts:
logger.warn(f'订阅 {subscribe.name} 没有符合过滤条件的资源')
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta,
mediainfo=mediainfo, lefts=no_exists)
continue
# 自动下载
downloads, lefts = self.downloadchain.batch_download(
contexts=matched_contexts,
no_exists=no_exists,
userid=subscribe.username,
username=subscribe.username,
save_path=subscribe.save_path,
media_category=subscribe.media_category,
downloader=subscribe.downloader,
)
# 判断是否应完成订阅
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta, mediainfo=mediainfo,
downloads=downloads, lefts=lefts)
# 手动触发时发送系统消息
if manual:
with self._rlock:
logger.debug(f"search lock acquired at {datetime.now()}")
if sid:
self.message.put(f'{subscribes[0].name} 搜索完成!', title="订阅搜索", role="system")
subscribe = self.subscribeoper.get(sid)
subscribes = [subscribe] if subscribe else []
else:
self.message.put('所有订阅搜索完成!', title="订阅搜索", role="system")
subscribes = self.subscribeoper.list(self.get_states_for_search(state))
# 遍历订阅
for subscribe in subscribes:
if global_vars.is_system_stopped:
break
mediakey = subscribe.tmdbid or subscribe.doubanid
custom_word_list = subscribe.custom_words.split("\n") if subscribe.custom_words else None
# 校验当前时间减订阅创建时间是否大于1分钟否则跳过先留出编辑订阅的时间
if subscribe.date:
now = datetime.now()
subscribe_time = datetime.strptime(subscribe.date, '%Y-%m-%d %H:%M:%S')
if (now - subscribe_time).total_seconds() < 60:
logger.debug(f"订阅标题:{subscribe.name} 新增小于1分钟暂不搜索...")
continue
# 随机休眠1-5分钟
if not sid and state in ['R', 'P']:
sleep_time = random.randint(60, 300)
logger.info(f'订阅搜索随机休眠 {sleep_time} 秒 ...')
time.sleep(sleep_time)
try:
logger.info(f'开始搜索订阅,标题:{subscribe.name} ...')
# 生成元数据
meta = MetaInfo(subscribe.name)
meta.year = subscribe.year
meta.begin_season = subscribe.season or None
try:
meta.type = MediaType(subscribe.type)
except ValueError:
logger.error(f'订阅 {subscribe.name} 类型错误:{subscribe.type}')
continue
# 识别媒体信息
mediainfo: MediaInfo = self.recognize_media(meta=meta, mtype=meta.type,
tmdbid=subscribe.tmdbid,
doubanid=subscribe.doubanid,
cache=False)
if not mediainfo:
logger.warn(
f'未识别到媒体信息,标题:{subscribe.name}tmdbid{subscribe.tmdbid}doubanid{subscribe.doubanid}')
continue
# 非洗版状态
if not subscribe.best_version:
# 每季总集数
totals = {}
if subscribe.season and subscribe.total_episode:
totals = {
subscribe.season: subscribe.total_episode
}
# 查询媒体库缺失的媒体信息
exist_flag, no_exists = self.downloadchain.get_no_exists_info(
meta=meta,
mediainfo=mediainfo,
totals=totals
)
else:
# 洗版状态
exist_flag = False
if meta.type == MediaType.TV:
no_exists = {
mediakey: {
subscribe.season: NotExistMediaInfo(
season=subscribe.season,
episodes=[],
total_episode=subscribe.total_episode,
start_episode=subscribe.start_episode or 1)
}
}
else:
no_exists = {}
# 已存在
if exist_flag:
logger.info(f'{mediainfo.title_year} 媒体库中已存在')
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta, mediainfo=mediainfo, force=True)
continue
# 电视剧订阅处理缺失集
if meta.type == MediaType.TV:
# 实际缺失集与订阅开始结束集范围进行整合,同时剔除已下载的集数
no_exists = self.__get_subscribe_no_exits(
subscribe_name=f'{subscribe.name} {meta.season}',
no_exists=no_exists,
mediakey=mediakey,
begin_season=meta.begin_season,
total_episode=subscribe.total_episode,
start_episode=subscribe.start_episode,
downloaded_episodes=self.__get_downloaded_episodes(subscribe)
)
# 站点范围
sites = self.get_sub_sites(subscribe)
# 优先级过滤规则
if subscribe.best_version:
rule_groups = subscribe.filter_groups \
or self.systemconfig.get(SystemConfigKey.BestVersionFilterRuleGroups) or []
else:
rule_groups = subscribe.filter_groups \
or self.systemconfig.get(SystemConfigKey.SubscribeFilterRuleGroups) or []
# 搜索,同时电视剧会过滤掉不需要的剧集
contexts = self.searchchain.process(mediainfo=mediainfo,
keyword=subscribe.keyword,
no_exists=no_exists,
sites=sites,
rule_groups=rule_groups,
area="imdbid" if subscribe.search_imdbid else "title",
custom_words=custom_word_list,
filter_params=self.get_params(subscribe))
if not contexts:
logger.warn(f'订阅 {subscribe.keyword or subscribe.name} 未搜索到资源')
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta,
mediainfo=mediainfo, lefts=no_exists)
continue
# 过滤搜索结果
matched_contexts = []
for context in contexts:
torrent_meta = context.meta_info
torrent_info = context.torrent_info
torrent_mediainfo = context.media_info
# 洗版
if subscribe.best_version:
# 洗版时,非整季不要
if torrent_mediainfo.type == MediaType.TV:
if torrent_meta.episode_list:
logger.info(f'{subscribe.name} 正在洗版,{torrent_info.title} 不是整季')
continue
# 洗版时,优先级小于等于已下载优先级的不要
if subscribe.current_priority \
and torrent_info.pri_order <= subscribe.current_priority:
logger.info(
f'{subscribe.name} 正在洗版,{torrent_info.title} 优先级低于或等于已下载优先级')
continue
matched_contexts.append(context)
if not matched_contexts:
logger.warn(f'订阅 {subscribe.name} 没有符合过滤条件的资源')
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta,
mediainfo=mediainfo, lefts=no_exists)
continue
# 自动下载
downloads, lefts = self.downloadchain.batch_download(
contexts=matched_contexts,
no_exists=no_exists,
userid=subscribe.username,
username=subscribe.username,
save_path=subscribe.save_path,
media_category=subscribe.media_category,
downloader=subscribe.downloader,
source="Subscribe"
)
# 判断是否应完成订阅
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta, mediainfo=mediainfo,
downloads=downloads, lefts=lefts)
finally:
# 如果状态为N则更新为R
if subscribe.state == 'N':
self.subscribeoper.update(subscribe.id, {'state': 'R'})
# 手动触发时发送系统消息
if manual:
if subscribes:
if sid:
self.message.put(f'{subscribes[0].name} 搜索完成!', title="订阅搜索", role="system")
else:
self.message.put('所有订阅搜索完成!', title="订阅搜索", role="system")
else:
self.message.put('没有找到订阅!', title="订阅搜索", role="system")
logger.debug(f"search Lock released at {datetime.now()}")
def update_subscribe_priority(self, subscribe: Subscribe, meta: MetaInfo,
mediainfo: MediaInfo, downloads: List[Context]):
@@ -508,7 +522,7 @@ class SubscribeChain(ChainBase):
:return: 返回[]代表所有站点命中返回None代表没有订阅
"""
# 查询所有订阅
subscribes = self.subscribeoper.list('R')
subscribes = self.subscribeoper.list(self.get_states_for_search('R'))
if not subscribes:
return None
ret_sites = []
@@ -533,250 +547,255 @@ class SubscribeChain(ChainBase):
# 记录重新识别过的种子
_recognize_cached = []
# 所有订阅
subscribes = self.subscribeoper.list('R')
# 遍历订阅
for subscribe in subscribes:
if global_vars.is_system_stopped:
break
logger.info(f'开始匹配订阅,标题:{subscribe.name} ...')
mediakey = subscribe.tmdbid or subscribe.doubanid
# 生成元数据
meta = MetaInfo(subscribe.name)
meta.year = subscribe.year
meta.begin_season = subscribe.season or None
try:
meta.type = MediaType(subscribe.type)
except ValueError:
logger.error(f'订阅 {subscribe.name} 类型错误:{subscribe.type}')
continue
# 订阅的站点域名列表
domains = []
if subscribe.sites:
domains = self.siteoper.get_domains_by_ids(subscribe.sites)
# 识别媒体信息
mediainfo: MediaInfo = self.recognize_media(meta=meta, mtype=meta.type,
tmdbid=subscribe.tmdbid,
doubanid=subscribe.doubanid,
cache=False)
if not mediainfo:
logger.warn(
f'未识别到媒体信息,标题:{subscribe.name}tmdbid{subscribe.tmdbid}doubanid{subscribe.doubanid}')
continue
# 非洗版
if not subscribe.best_version:
# 每季总集数
totals = {}
if subscribe.season and subscribe.total_episode:
totals = {
subscribe.season: subscribe.total_episode
}
# 查询缺失的媒体信息
exist_flag, no_exists = self.downloadchain.get_no_exists_info(
meta=meta,
mediainfo=mediainfo,
totals=totals
)
else:
# 洗版
exist_flag = False
if meta.type == MediaType.TV:
no_exists = {
mediakey: {
subscribe.season: NotExistMediaInfo(
season=subscribe.season,
episodes=[],
total_episode=subscribe.total_episode,
start_episode=subscribe.start_episode or 1)
}
}
else:
no_exists = {}
# 已存在
if exist_flag:
logger.info(f'{mediainfo.title_year} 媒体库中已存在')
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta, mediainfo=mediainfo, force=True)
continue
# 电视剧订阅
if meta.type == MediaType.TV:
# 整合实际缺失集与订阅开始集结束集,同时剔除已下载的集数
no_exists = self.__get_subscribe_no_exits(
subscribe_name=f'{subscribe.name} {meta.season}',
no_exists=no_exists,
mediakey=mediakey,
begin_season=meta.begin_season,
total_episode=subscribe.total_episode,
start_episode=subscribe.start_episode,
downloaded_episodes=self.__get_downloaded_episodes(subscribe)
)
# 遍历缓存种子
_match_context = []
for domain, contexts in torrents.items():
with self._rlock:
logger.debug(f"match lock acquired at {datetime.now()}")
# 所有订阅
subscribes = self.subscribeoper.list(self.get_states_for_search('R'))
# 遍历订阅
for subscribe in subscribes:
if global_vars.is_system_stopped:
break
if domains and domain not in domains:
logger.info(f'开始匹配订阅,标题:{subscribe.name} ...')
mediakey = subscribe.tmdbid or subscribe.doubanid
# 生成元数据
meta = MetaInfo(subscribe.name)
meta.year = subscribe.year
meta.begin_season = subscribe.season or None
try:
meta.type = MediaType(subscribe.type)
except ValueError:
logger.error(f'订阅 {subscribe.name} 类型错误:{subscribe.type}')
continue
logger.debug(f'开始匹配站点:{domain},共缓存了 {len(contexts)} 个种子...')
for context in contexts:
# 提取信息
torrent_meta = copy.deepcopy(context.meta_info)
torrent_mediainfo = copy.deepcopy(context.media_info)
torrent_info = context.torrent_info
# 不在订阅站点范围的不处理
sub_sites = self.get_sub_sites(subscribe)
if sub_sites and torrent_info.site not in sub_sites:
logger.debug(f"{torrent_info.site_name} - {torrent_info.title} 不符合订阅站点要求")
continue
# 有自定义识别词时,需要判断是否需要重新识别
if subscribe.custom_words:
_, apply_words = WordsMatcher().prepare(torrent_info.title,
custom_words=subscribe.custom_words.split("\n"))
if apply_words:
logger.info(
f'{torrent_info.site_name} - {torrent_info.title} 因订阅存在自定义识别词,重新识别元数据...')
# 重新识别元数据
torrent_meta = MetaInfo(title=torrent_info.title, subtitle=torrent_info.description,
custom_words=subscribe.custom_words)
# 媒体信息需要重新识别
torrent_mediainfo = None
# 先判断是否有没识别的种子,否则重新识别
if not torrent_mediainfo \
or (not torrent_mediainfo.tmdb_id and not torrent_mediainfo.douban_id):
# 避免重复处理
_cache_key = f"{torrent_meta.org_string}_{torrent_info.description}"
if _cache_key not in _recognize_cached:
_recognize_cached.append(_cache_key)
# 重新识别媒体信息
torrent_mediainfo = self.recognize_media(meta=torrent_meta)
if torrent_mediainfo:
# 更新种子缓存
context.media_info = torrent_mediainfo
if not torrent_mediainfo:
# 通过标题匹配兜底
logger.warn(
f'{torrent_info.site_name} - {torrent_info.title} 重新识别失败,尝试通过标题匹配...')
if self.torrenthelper.match_torrent(mediainfo=mediainfo,
torrent_meta=torrent_meta,
torrent=torrent_info):
# 匹配成功
logger.info(
f'{mediainfo.title_year} 通过标题匹配到可选资源:{torrent_info.site_name} - {torrent_info.title}')
# 更新种子缓存
torrent_mediainfo = mediainfo
context.media_info = mediainfo
else:
continue
# 直接比对媒体信息
if torrent_mediainfo and (torrent_mediainfo.tmdb_id or torrent_mediainfo.douban_id):
if torrent_mediainfo.type != mediainfo.type:
continue
if torrent_mediainfo.tmdb_id \
and torrent_mediainfo.tmdb_id != mediainfo.tmdb_id:
continue
if torrent_mediainfo.douban_id \
and torrent_mediainfo.douban_id != mediainfo.douban_id:
continue
logger.info(
f'{mediainfo.title_year} 通过媒体信ID匹配到可选资源{torrent_info.site_name} - {torrent_info.title}')
# 订阅的站点域名列表
domains = []
if subscribe.sites:
domains = self.siteoper.get_domains_by_ids(subscribe.sites)
# 识别媒体信息
mediainfo: MediaInfo = self.recognize_media(meta=meta, mtype=meta.type,
tmdbid=subscribe.tmdbid,
doubanid=subscribe.doubanid,
cache=False)
if not mediainfo:
logger.warn(
f'未识别到媒体信息,标题:{subscribe.name}tmdbid{subscribe.tmdbid}doubanid{subscribe.doubanid}')
continue
# 非洗版
if not subscribe.best_version:
# 每季总集数
totals = {}
if subscribe.season and subscribe.total_episode:
totals = {
subscribe.season: subscribe.total_episode
}
# 查询缺失的媒体信息
exist_flag, no_exists = self.downloadchain.get_no_exists_info(
meta=meta,
mediainfo=mediainfo,
totals=totals
)
else:
# 洗版
exist_flag = False
if meta.type == MediaType.TV:
no_exists = {
mediakey: {
subscribe.season: NotExistMediaInfo(
season=subscribe.season,
episodes=[],
total_episode=subscribe.total_episode,
start_episode=subscribe.start_episode or 1)
}
}
else:
continue
no_exists = {}
# 如果是电视剧
if torrent_mediainfo.type == MediaType.TV:
# 有多季的不要
if len(torrent_meta.season_list) > 1:
logger.debug(f'{torrent_info.title} 有多季,不处理')
# 已存在
if exist_flag:
logger.info(f'{mediainfo.title_year} 媒体库中已存在')
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta, mediainfo=mediainfo, force=True)
continue
# 电视剧订阅
if meta.type == MediaType.TV:
# 整合实际缺失集与订阅开始集结束集,同时剔除已下载的集数
no_exists = self.__get_subscribe_no_exits(
subscribe_name=f'{subscribe.name} {meta.season}',
no_exists=no_exists,
mediakey=mediakey,
begin_season=meta.begin_season,
total_episode=subscribe.total_episode,
start_episode=subscribe.start_episode,
downloaded_episodes=self.__get_downloaded_episodes(subscribe)
)
# 遍历缓存种子
_match_context = []
for domain, contexts in torrents.items():
if global_vars.is_system_stopped:
break
if domains and domain not in domains:
continue
logger.debug(f'开始匹配站点:{domain},共缓存了 {len(contexts)} 个种子...')
for context in contexts:
# 提取信息
torrent_meta = copy.deepcopy(context.meta_info)
torrent_mediainfo = copy.deepcopy(context.media_info)
torrent_info = context.torrent_info
# 不在订阅站点范围的不处理
sub_sites = self.get_sub_sites(subscribe)
if sub_sites and torrent_info.site not in sub_sites:
logger.debug(f"{torrent_info.site_name} - {torrent_info.title} 不符合订阅站点要求")
continue
# 比对季
if torrent_meta.begin_season:
if meta.begin_season != torrent_meta.begin_season:
# 有自定义识别词时,需要判断是否需要重新识别
if subscribe.custom_words:
_, apply_words = WordsMatcher().prepare(torrent_info.title,
custom_words=subscribe.custom_words.split("\n"))
if apply_words:
logger.info(
f'{torrent_info.site_name} - {torrent_info.title} 因订阅存在自定义识别词,重新识别元数据...')
# 重新识别元数据
torrent_meta = MetaInfo(title=torrent_info.title, subtitle=torrent_info.description,
custom_words=subscribe.custom_words)
# 媒体信息需要重新识别
torrent_mediainfo = None
# 先判断是否有没识别的种子,否则重新识别
if not torrent_mediainfo \
or (not torrent_mediainfo.tmdb_id and not torrent_mediainfo.douban_id):
# 避免重复处理
_cache_key = f"{torrent_meta.org_string}_{torrent_info.description}"
if _cache_key not in _recognize_cached:
_recognize_cached.append(_cache_key)
# 重新识别媒体信息
torrent_mediainfo = self.recognize_media(meta=torrent_meta)
if torrent_mediainfo:
# 更新种子缓存
context.media_info = torrent_mediainfo
if not torrent_mediainfo:
# 通过标题匹配兜底
logger.warn(
f'{torrent_info.site_name} - {torrent_info.title} 重新识别失败,尝试通过标题匹配...')
if self.torrenthelper.match_torrent(mediainfo=mediainfo,
torrent_meta=torrent_meta,
torrent=torrent_info):
# 匹配成功
logger.info(
f'{mediainfo.title_year} 通过标题匹配到可选资源:{torrent_info.site_name} - {torrent_info.title}')
# 更新种子缓存
torrent_mediainfo = mediainfo
context.media_info = mediainfo
else:
continue
# 直接比对媒体信息
if torrent_mediainfo and (torrent_mediainfo.tmdb_id or torrent_mediainfo.douban_id):
if torrent_mediainfo.type != mediainfo.type:
continue
if torrent_mediainfo.tmdb_id \
and torrent_mediainfo.tmdb_id != mediainfo.tmdb_id:
continue
if torrent_mediainfo.douban_id \
and torrent_mediainfo.douban_id != mediainfo.douban_id:
continue
logger.info(
f'{mediainfo.title_year} 通过媒体信ID匹配到可选资源{torrent_info.site_name} - {torrent_info.title}')
else:
continue
# 如果是电视剧
if torrent_mediainfo.type == MediaType.TV:
# 有多季的不要
if len(torrent_meta.season_list) > 1:
logger.debug(f'{torrent_info.title} 有多季,不处理')
continue
# 比对季
if torrent_meta.begin_season:
if meta.begin_season != torrent_meta.begin_season:
logger.debug(f'{torrent_info.title} 季不匹配')
continue
elif meta.begin_season != 1:
logger.debug(f'{torrent_info.title} 季不匹配')
continue
elif meta.begin_season != 1:
logger.debug(f'{torrent_info.title} 季不匹配')
continue
# 非洗版
if not subscribe.best_version:
# 不是缺失的剧集不要
if no_exists and no_exists.get(mediakey):
# 缺失
no_exists_info = no_exists.get(mediakey).get(subscribe.season)
if no_exists_info:
# 是否有交集
if no_exists_info.episodes and \
torrent_meta.episode_list and \
not set(no_exists_info.episodes).intersection(
set(torrent_meta.episode_list)
):
logger.debug(
f'{torrent_info.title} 对应剧集 {torrent_meta.episode_list} 未包含缺失的剧集'
)
# 非洗版
if not subscribe.best_version:
# 不是缺失的剧集不要
if no_exists and no_exists.get(mediakey):
# 缺失集
no_exists_info = no_exists.get(mediakey).get(subscribe.season)
if no_exists_info:
# 是否有交
if no_exists_info.episodes and \
torrent_meta.episode_list and \
not set(no_exists_info.episodes).intersection(
set(torrent_meta.episode_list)
):
logger.debug(
f'{torrent_info.title} 对应剧集 {torrent_meta.episode_list} 未包含缺失的剧集'
)
continue
else:
# 洗版时,非整季不要
if meta.type == MediaType.TV:
if torrent_meta.episode_list:
logger.debug(f'{subscribe.name} 正在洗版,{torrent_info.title} 不是整季')
continue
else:
# 洗版时,非整季不要
if meta.type == MediaType.TV:
if torrent_meta.episode_list:
logger.debug(f'{subscribe.name} 正在洗版,{torrent_info.title} 不是整季')
continue
# 匹配订阅附加参数
if not self.torrenthelper.filter_torrent(torrent_info=torrent_info,
filter_params=self.get_params(subscribe)):
continue
# 优先级过滤规则
if subscribe.best_version:
rule_groups = subscribe.filter_groups \
or self.systemconfig.get(SystemConfigKey.BestVersionFilterRuleGroups)
else:
rule_groups = subscribe.filter_groups \
or self.systemconfig.get(SystemConfigKey.SubscribeFilterRuleGroups)
result: List[TorrentInfo] = self.filter_torrents(
rule_groups=rule_groups,
torrent_list=[torrent_info],
mediainfo=torrent_mediainfo)
if result is not None and not result:
# 不符合过滤规则
logger.debug(f"{torrent_info.title} 不匹配过滤规则")
continue
# 洗版时,优先级小于已下载优先级的不要
if subscribe.best_version:
if subscribe.current_priority \
and torrent_info.pri_order <= subscribe.current_priority:
logger.info(f'{subscribe.name} 正在洗版,{torrent_info.title} 优先级低于或等于已下载优先级')
# 匹配订阅附加参数
if not self.torrenthelper.filter_torrent(torrent_info=torrent_info,
filter_params=self.get_params(subscribe)):
continue
# 匹配成功
logger.info(f'{mediainfo.title_year} 匹配成功:{torrent_info.title}')
_match_context.append(context)
# 优先级过滤规则
if subscribe.best_version:
rule_groups = subscribe.filter_groups \
or self.systemconfig.get(SystemConfigKey.BestVersionFilterRuleGroups)
else:
rule_groups = subscribe.filter_groups \
or self.systemconfig.get(SystemConfigKey.SubscribeFilterRuleGroups)
result: List[TorrentInfo] = self.filter_torrents(
rule_groups=rule_groups,
torrent_list=[torrent_info],
mediainfo=torrent_mediainfo)
if result is not None and not result:
# 不符合过滤规则
logger.debug(f"{torrent_info.title} 不匹配过滤规则")
continue
if not _match_context:
# 未匹配到资源
logger.info(f'{mediainfo.title_year} 未匹配到符合条件的资源')
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta,
mediainfo=mediainfo, lefts=no_exists)
continue
# 洗版时,优先级小于已下载优先级的不要
if subscribe.best_version:
if subscribe.current_priority \
and torrent_info.pri_order <= subscribe.current_priority:
logger.info(
f'{subscribe.name} 正在洗版,{torrent_info.title} 优先级低于或等于已下载优先级')
continue
# 开始批量择优下载
logger.info(f'{mediainfo.title_year} 匹配完成,共匹配到{len(_match_context)}个资源')
downloads, lefts = self.downloadchain.batch_download(contexts=_match_context,
no_exists=no_exists,
userid=subscribe.username,
username=subscribe.username,
save_path=subscribe.save_path,
media_category=subscribe.media_category,
downloader=subscribe.downloader)
# 判断是否要完成订阅
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta, mediainfo=mediainfo,
downloads=downloads, lefts=lefts)
# 匹配成功
logger.info(f'{mediainfo.title_year} 匹配成功:{torrent_info.title}')
_match_context.append(context)
if not _match_context:
# 未匹配到资源
logger.info(f'{mediainfo.title_year} 未匹配到符合条件的资源')
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta,
mediainfo=mediainfo, lefts=no_exists)
continue
# 开始批量择优下载
logger.info(f'{mediainfo.title_year} 匹配完成,共匹配到{len(_match_context)}个资源')
downloads, lefts = self.downloadchain.batch_download(contexts=_match_context,
no_exists=no_exists,
userid=subscribe.username,
username=subscribe.username,
save_path=subscribe.save_path,
media_category=subscribe.media_category,
downloader=subscribe.downloader,
source="Subscribe")
# 判断是否要完成订阅
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta, mediainfo=mediainfo,
downloads=downloads, lefts=lefts)
logger.debug(f"match Lock released at {datetime.now()}")
def check(self):
"""
@@ -916,6 +935,9 @@ class SubscribeChain(ChainBase):
"""
完成订阅
"""
# 如果订阅状态为待定P说明订阅信息尚未完全更新无法完成订阅
if subscribe.state == "P":
return
# 完成订阅
msgstr = "订阅"
if bestversion:
@@ -1294,3 +1316,19 @@ class SubscribeChain(ChainBase):
subscribe_info.subscribe = Subscribe(**subscribe.to_dict())
subscribe_info.episodes = episodes
return subscribe_info
@staticmethod
def get_states_for_search(state: str) -> str:
"""
根据给定的状态返回实际需要搜索的状态列表,支持多个状态用逗号分隔
:param state: 订阅状态
N: New新建未处理
R: Resolved订阅中
P: Pending待定信息待进一步更新允许搜索不允许完成
S: Suspended暂停订阅不参与任何动作暂时停止处理
:return: 需要查询的状态列表(多个状态用逗号分隔)
"""
# 如果状态是 R 或 P则视为一起搜索返回 R,P 作为查询条件
if state in ["R", "P"]:
return "R,P"
return state

View File

@@ -175,7 +175,7 @@ class TorrentsChain(ChainBase, metaclass=Singleton):
# 按pubdate降序排列
torrents.sort(key=lambda x: x.pubdate or '', reverse=True)
# 取前N条
torrents = torrents[:settings.CACHE_CONF.get('refresh')]
torrents = torrents[:settings.CACHE_CONF["refresh"]]
if torrents:
# 过滤出没有处理过的种子
torrents = [torrent for torrent in torrents
@@ -215,8 +215,8 @@ class TorrentsChain(ChainBase, metaclass=Singleton):
else:
torrents_cache[domain].append(context)
# 如果超过了限制条数则移除掉前面的
if len(torrents_cache[domain]) > settings.CACHE_CONF.get('torrents'):
torrents_cache[domain] = torrents_cache[domain][-settings.CACHE_CONF.get('torrents'):]
if len(torrents_cache[domain]) > settings.CACHE_CONF["torrents"]:
torrents_cache[domain] = torrents_cache[domain][-settings.CACHE_CONF["torrents"]:]
# 回收资源
del torrents
else:

View File

@@ -10,7 +10,7 @@ from app.chain.tmdb import TmdbChain
from app.core.config import settings, global_vars
from app.core.context import MediaInfo
from app.core.meta import MetaBase
from app.core.metainfo import MetaInfoPath
from app.core.metainfo import MetaInfoPath, MetaInfo
from app.db.downloadhistory_oper import DownloadHistoryOper
from app.db.models.downloadhistory import DownloadHistory
from app.db.models.transferhistory import TransferHistory
@@ -131,6 +131,7 @@ class TransferChain(ChainBase):
extension=file_path.suffix.lstrip('.'),
),
mediainfo=mediainfo,
downloader=torrent.downloader,
download_hash=torrent.hash,
src_match=True
)
@@ -148,8 +149,9 @@ class TransferChain(ChainBase):
target_directory: TransferDirectoryConf = None,
target_storage: str = None, target_path: Path = None,
transfer_type: str = None, scrape: bool = None,
season: int = None, epformat: EpisodeFormat = None,
min_filesize: int = 0, download_hash: str = None,
library_type_folder: bool = None, library_category_folder: bool = None,
season: int = None, epformat: EpisodeFormat = None, min_filesize: int = 0,
downloader: str = None, download_hash: str = None,
force: bool = False, src_match: bool = False) -> Tuple[bool, str]:
"""
执行一个复杂目录的整理操作
@@ -161,9 +163,12 @@ class TransferChain(ChainBase):
:param target_path: 目标路径
:param transfer_type: 整理类型
:param scrape: 是否刮削元数据
:param library_type_folder: 媒体库类型子目录
:param library_category_folder: 媒体库类别子目录
:param season: 季
:param epformat: 剧集格式
:param min_filesize: 最小文件大小(MB)
:param downloader: 下载器
:param download_hash: 下载记录hash
:param force: 是否强制整理
:param src_match: 是否源目录匹配
@@ -184,8 +189,6 @@ class TransferChain(ChainBase):
# 汇总季集清单
season_episodes: Dict[Tuple, List[int]] = {}
# 汇总元数据
metas: Dict[Tuple, MetaBase] = {}
# 汇总媒体信息
medias: Dict[Tuple, MediaInfo] = {}
# 汇总整理信息
@@ -389,25 +392,37 @@ class TransferChain(ChainBase):
download_hash = download_file.download_hash
# 查询整理目标目录
if not target_directory and not target_path:
dir_info = None
if not target_directory:
if src_match:
# 按源目录匹配,以便找到更合适的目录配置
target_directory = self.directoryhelper.get_dir(file_mediainfo,
storage=file_item.storage, src_path=file_path)
dir_info = self.directoryhelper.get_dir(media=file_mediainfo,
storage=file_item.storage,
src_path=file_path,
target_storage=target_storage)
elif target_path:
# 指定目标路径,`手动整理`场景下使用,忽略源目录匹配,使用指定目录匹配
dir_info = self.directoryhelper.get_dir(media=file_mediainfo,
dest_path=target_path,
target_storage=target_storage)
else:
# 未指定目标路径,根据媒体信息获取目标目录
target_directory = self.directoryhelper.get_dir(file_mediainfo)
dir_info = self.directoryhelper.get_dir(file_mediainfo,
storage=file_item.storage,
target_storage=target_storage)
# 执行整理
transferinfo: TransferInfo = self.transfer(fileitem=file_item,
meta=file_meta,
mediainfo=file_mediainfo,
target_directory=target_directory,
target_directory=target_directory or dir_info,
target_storage=target_storage,
target_path=target_path,
transfer_type=transfer_type,
episodes_info=episodes_info,
scrape=scrape)
scrape=scrape,
library_type_folder=library_type_folder,
library_category_folder=library_category_folder)
if not transferinfo:
logger.error("文件整理模块运行失败")
return False, "文件整理模块运行失败"
@@ -443,7 +458,6 @@ class TransferChain(ChainBase):
mkey = (file_mediainfo.tmdb_id, file_meta.begin_season)
if mkey not in medias:
# 新增信息
metas[mkey] = file_meta
medias[mkey] = file_mediainfo
season_episodes[mkey] = file_meta.episode_list
transfers[mkey] = transferinfo
@@ -467,6 +481,15 @@ class TransferChain(ChainBase):
transferinfo=transferinfo
)
# 整理完成事件
self.eventmanager.send_event(EventType.TransferComplete, {
'meta': file_meta,
'mediainfo': file_mediainfo,
'transferinfo': transferinfo,
'downloader': downloader,
'download_hash': download_hash,
})
# 更新进度
processed_num += 1
self.progress.update(value=processed_num / total_num * 100,
@@ -479,8 +502,9 @@ class TransferChain(ChainBase):
# 执行后续处理
for mkey, media in medias.items():
transfer_meta = metas[mkey]
transfer_info = transfers[mkey]
transfer_meta = MetaInfo(transfer_info.target_diritem.name)
transfer_meta.begin_season = mkey[1]
# 发送通知
if transfer_info.need_notify:
se_str = None
@@ -497,19 +521,12 @@ class TransferChain(ChainBase):
'mediainfo': media,
'fileitem': transfer_info.target_diritem
})
# 整理完成事件
self.eventmanager.send_event(EventType.TransferComplete, {
'meta': transfer_meta,
'mediainfo': media,
'transferinfo': transfer_info,
'download_hash': download_hash,
})
# 移动模式处理
if all_success and current_transfer_type in ["move"]:
# 下载器hash
if download_hash:
if self.remove_torrents(download_hash):
if self.remove_torrents(download_hash, downloader=downloader):
logger.info(f"移动模式删除种子成功:{download_hash} ")
# 删除残留目录
if fileitem:
@@ -677,6 +694,8 @@ class TransferChain(ChainBase):
epformat: EpisodeFormat = None,
min_filesize: int = 0,
scrape: bool = None,
library_type_folder: bool = None,
library_category_folder: bool = None,
force: bool = False) -> Tuple[bool, Union[str, list]]:
"""
手动整理,支持复杂条件,带进度显示
@@ -691,6 +710,8 @@ class TransferChain(ChainBase):
:param epformat: 剧集格式
:param min_filesize: 最小文件大小(MB)
:param scrape: 是否刮削元数据
:param library_type_folder: 是否按类型建立目录
:param library_category_folder: 是否按类别建立目录
:param force: 是否强制整理
"""
logger.info(f"手动整理:{fileitem.path} ...")
@@ -719,6 +740,8 @@ class TransferChain(ChainBase):
epformat=epformat,
min_filesize=min_filesize,
scrape=scrape,
library_type_folder=library_type_folder,
library_category_folder=library_category_folder,
force=force,
)
if not state:
@@ -737,6 +760,8 @@ class TransferChain(ChainBase):
epformat=epformat,
min_filesize=min_filesize,
scrape=scrape,
library_type_folder=library_type_folder,
library_category_folder=library_category_folder,
force=force)
return state, errmsg

View File

@@ -8,7 +8,7 @@ from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Type
from dotenv import set_key
from pydantic import BaseModel, BaseSettings, validator
from pydantic import BaseModel, BaseSettings, validator, Field
from app.log import logger
from app.utils.system import SystemUtils
@@ -36,7 +36,7 @@ class ConfigModel(BaseModel):
# RESOURCE密钥
RESOURCE_SECRET_KEY: str = secrets.token_urlsafe(32)
# 允许的域名
ALLOWED_HOSTS: list = ["*"]
ALLOWED_HOSTS: list = Field(default_factory=lambda: ["*"])
# TOKEN过期时间
ACCESS_TOKEN_EXPIRE_MINUTES: int = 60 * 24 * 8
# RESOURCE_TOKEN过期时间
@@ -114,29 +114,39 @@ class ConfigModel(BaseModel):
# 是否启用DOH解析域名
DOH_ENABLE: bool = True
# 使用 DOH 解析的域名列表
DOH_DOMAINS: str = "api.themoviedb.org,api.tmdb.org,webservice.fanart.tv,api.github.com,github.com,raw.githubusercontent.com,api.telegram.org"
DOH_DOMAINS: str = ("api.themoviedb.org,"
"api.tmdb.org,"
"webservice.fanart.tv,"
"api.github.com,"
"github.com,"
"raw.githubusercontent.com,"
"api.telegram.org")
# DOH 解析服务器列表
DOH_RESOLVERS: str = "1.0.0.1,1.1.1.1,9.9.9.9,149.112.112.112"
# 支持的后缀格式
RMT_MEDIAEXT: list = ['.mp4', '.mkv', '.ts', '.iso',
'.rmvb', '.avi', '.mov', '.mpeg',
'.mpg', '.wmv', '.3gp', '.asf',
'.m4v', '.flv', '.m2ts', '.strm',
'.tp', '.f4v']
RMT_MEDIAEXT: list = Field(
default_factory=lambda: ['.mp4', '.mkv', '.ts', '.iso',
'.rmvb', '.avi', '.mov', '.mpeg',
'.mpg', '.wmv', '.3gp', '.asf',
'.m4v', '.flv', '.m2ts', '.strm',
'.tp', '.f4v']
)
# 支持的字幕文件后缀格式
RMT_SUBEXT: list = ['.srt', '.ass', '.ssa', '.sup']
RMT_SUBEXT: list = Field(default_factory=lambda: ['.srt', '.ass', '.ssa', '.sup'])
# 支持的音轨文件后缀格式
RMT_AUDIO_TRACK_EXT: list = ['.mka']
RMT_AUDIO_TRACK_EXT: list = Field(default_factory=lambda: ['.mka'])
# 音轨文件后缀格式
RMT_AUDIOEXT: list = ['.aac', '.ac3', '.amr', '.caf', '.cda', '.dsf',
'.dff', '.kar', '.m4a', '.mp1', '.mp2', '.mp3',
'.mid', '.mod', '.mka', '.mpc', '.nsf', '.ogg',
'.pcm', '.rmi', '.s3m', '.snd', '.spx', '.tak',
'.tta', '.vqf', '.wav', '.wma',
'.aifc', '.aiff', '.alac', '.adif', '.adts',
'.flac', '.midi', '.opus', '.sfalc']
RMT_AUDIOEXT: list = Field(
default_factory=lambda: ['.aac', '.ac3', '.amr', '.caf', '.cda', '.dsf',
'.dff', '.kar', '.m4a', '.mp1', '.mp2', '.mp3',
'.mid', '.mod', '.mka', '.mpc', '.nsf', '.ogg',
'.pcm', '.rmi', '.s3m', '.snd', '.spx', '.tak',
'.tta', '.vqf', '.wav', '.wma',
'.aifc', '.aiff', '.alac', '.adif', '.adts',
'.flac', '.midi', '.opus', '.sfalc']
)
# 下载器临时文件后缀
DOWNLOAD_TMPEXT: list = ['.!qb', '.part']
DOWNLOAD_TMPEXT: list = Field(default_factory=lambda: ['.!qb', '.part'])
# 媒体服务器同步间隔(小时)
MEDIASERVER_SYNC_INTERVAL: int = 6
# 订阅模式
@@ -189,7 +199,10 @@ class ConfigModel(BaseModel):
# 服务器地址,对应 https://github.com/jxxghp/MoviePilot-Server 项目
MP_SERVER_HOST: str = "https://movie-pilot.org"
# 插件市场仓库地址,多个地址使用,分隔,地址以/结尾
PLUGIN_MARKET: str = "https://github.com/jxxghp/MoviePilot-Plugins,https://github.com/thsrite/MoviePilot-Plugins,https://github.com/honue/MoviePilot-Plugins,https://github.com/InfinityPacer/MoviePilot-Plugins"
PLUGIN_MARKET: str = ("https://github.com/jxxghp/MoviePilot-Plugins,"
"https://github.com/thsrite/MoviePilot-Plugins,"
"https://github.com/honue/MoviePilot-Plugins,"
"https://github.com/InfinityPacer/MoviePilot-Plugins")
# 插件安装数据共享
PLUGIN_STATISTIC_SHARE: bool = True
# 是否开启插件热加载
@@ -206,11 +219,27 @@ class ConfigModel(BaseModel):
BIG_MEMORY_MODE: bool = False
# 全局图片缓存,将媒体图片缓存到本地
GLOBAL_IMAGE_CACHE: bool = False
# 是否启用编码探测的性能模式
ENCODING_DETECTION_PERFORMANCE_MODE: bool = True
# 编码探测的最低置信度阈值
ENCODING_DETECTION_MIN_CONFIDENCE: float = 0.8
# 允许的图片缓存域名
SECURITY_IMAGE_DOMAINS: List[str] = ["image.tmdb.org", "static-mdb.v.geilijiasu.com", "doubanio.com", "lain.bgm.tv",
"raw.githubusercontent.com", "github.com"]
SECURITY_IMAGE_DOMAINS: List[str] = Field(
default_factory=lambda: ["image.tmdb.org",
"static-mdb.v.geilijiasu.com",
"doubanio.com",
"lain.bgm.tv",
"raw.githubusercontent.com",
"github.com"]
)
# 允许的图片文件后缀格式
SECURITY_IMAGE_SUFFIXES: List[str] = [".jpg", ".jpeg", ".png", ".webp", ".gif", ".svg"]
SECURITY_IMAGE_SUFFIXES: List[str] = Field(
default_factory=lambda: [".jpg", ".jpeg", ".png", ".webp", ".gif", ".svg"]
)
# 重命名时支持的S0别名
RENAME_FORMAT_S0_NAMES: List[str] = Field(
default_factory=lambda: ["Specials", "SPs"]
)
class Settings(BaseSettings, ConfigModel):
@@ -345,10 +374,9 @@ class Settings(BaseSettings, ConfigModel):
logger.warning(message)
if field.name in os.environ:
if is_converted:
message = f"配置项 '{field.name}' 已在环境变量中设置,请手动更新以保持一致性"
logger.warning(message)
return False, message
message = f"配置项 '{field.name}' 已在环境变量中设置,请手动更新以保持一致性"
logger.warning(message)
return False, message
else:
set_key(SystemUtils.get_env_path(), field.name, str(converted_value) if converted_value is not None else "")
if is_converted:
@@ -372,7 +400,7 @@ class Settings(BaseSettings, ConfigModel):
field.default, key)
# 如果没有抛出异常,则统一使用 converted_value 进行更新
if needs_update or str(value) != str(converted_value):
success, message = self.update_env_config(field, original_value, converted_value)
success, message = self.update_env_config(field, value, converted_value)
# 仅成功更新配置时,才更新内存
if success:
setattr(self, key, converted_value)
@@ -437,22 +465,32 @@ class Settings(BaseSettings, ConfigModel):
@property
def CACHE_CONF(self):
"""
{
"torrents": "缓存种子数量",
"refresh": "订阅刷新处理数量",
"tmdb": "TMDB请求缓存数量",
"douban": "豆瓣请求缓存数量",
"fanart": "Fanart请求缓存数量",
"meta": "元数据缓存过期时间(秒)"
}
"""
if self.BIG_MEMORY_MODE:
return {
"torrents": 200,
"refresh": 100,
"tmdb": 1024,
"refresh": 50,
"torrents": 100,
"douban": 512,
"fanart": 512,
"meta": (self.META_CACHE_EXPIRE or 168) * 3600
"meta": (self.META_CACHE_EXPIRE or 24) * 3600
}
return {
"torrents": 100,
"refresh": 50,
"tmdb": 256,
"refresh": 30,
"torrents": 50,
"douban": 256,
"fanart": 128,
"meta": (self.META_CACHE_EXPIRE or 72) * 3600
"meta": (self.META_CACHE_EXPIRE or 2) * 3600
}
@property

View File

@@ -84,7 +84,6 @@ class EventManager(metaclass=Singleton):
self.__disabled_handlers = set() # 禁用的事件处理器集合
self.__disabled_classes = set() # 禁用的事件处理器类集合
self.__lock = threading.Lock() # 线程锁
self.__processing_events = {} # 用于记录当前正在处理的事件 {event_hash: event}
def start(self):
"""
@@ -130,14 +129,6 @@ class EventManager(metaclass=Singleton):
for handler in handlers.values()
)
@staticmethod
def __get_event_hash(event: Event) -> str:
"""
计算事件的唯一标识符hash
"""
data_string = str(event.event_type.value) + str(event.event_data)
return str(uuid.uuid5(uuid.NAMESPACE_DNS, data_string))
def send_event(self, etype: Union[EventType, ChainEventType], data: Optional[Union[Dict, ChainEventData]] = None,
priority: int = DEFAULT_EVENT_PRIORITY) -> Optional[Event]:
"""
@@ -148,12 +139,6 @@ class EventManager(metaclass=Singleton):
:return: 如果是链式事件,返回处理后的事件数据;否则返回 None
"""
event = Event(etype, data, priority)
event_hash = self.__get_event_hash(event)
with self.__lock:
if event_hash in self.__processing_events:
logger.debug(f"Duplicate event ignored: {event}")
return None
self.__processing_events[event_hash] = event
if isinstance(etype, EventType):
self.__trigger_broadcast_event(event)
elif isinstance(etype, ChainEventType):
@@ -248,23 +233,29 @@ class EventManager(metaclass=Singleton):
可视化所有事件处理器,包括是否被禁用的状态
:return: 处理器列表,包含事件类型、处理器标识符、优先级(如果有)和状态
"""
def parse_handler_data(data):
"""
解析处理器数据,判断是否包含优先级
:param data: 订阅者数据,可能是元组或单一值
:return: (priority, handler),若没有优先级则返回 (None, handler)
"""
if isinstance(data, tuple) and len(data) == 2:
return data
return None, data
handler_info = []
# 统一处理广播事件和链式事件
for event_type, subscribers in {**self.__broadcast_subscribers, **self.__chain_subscribers}.items():
for handler_data in subscribers:
if isinstance(subscribers, dict):
priority, handler = handler_data
else:
priority = None
handler = handler_data
# 获取处理器的唯一标识符
handler_id = self.__get_handler_identifier(handler)
for handler_identifier, handler_data in subscribers.items():
# 解析优先级和处理器
priority, handler = parse_handler_data(handler_data)
# 检查处理器的启用状态
status = "enabled" if self.__is_handler_enabled(handler) else "disabled"
# 构建处理器信息字典
handler_dict = {
"event_type": event_type.value,
"handler_identifier": handler_id,
"handler_identifier": handler_identifier,
"status": status
}
if priority is not None:
@@ -335,14 +326,9 @@ class EventManager(metaclass=Singleton):
"""
触发链式事件,按顺序调用订阅的处理器,并记录处理耗时
"""
try:
logger.debug(f"Triggering synchronous chain event: {event}")
dispatch = self.__dispatch_chain_event(event)
return event if dispatch else None
finally:
event_hash = self.__get_event_hash(event)
with self.__lock:
self.__processing_events.pop(event_hash, None)
logger.debug(f"Triggering synchronous chain event: {event}")
dispatch = self.__dispatch_chain_event(event)
return event if dispatch else None
def __trigger_broadcast_event(self, event: Event):
"""
@@ -361,8 +347,17 @@ class EventManager(metaclass=Singleton):
if not handlers:
logger.debug(f"No handlers found for chain event: {event}")
return False
# 过滤出启用的处理器
enabled_handlers = {handler_id: (priority, handler) for handler_id, (priority, handler) in handlers.items()
if self.__is_handler_enabled(handler)}
if not enabled_handlers:
logger.debug(f"No enabled handlers found for chain event: {event}. Skipping execution.")
return False
self.__log_event_lifecycle(event, "Started")
for handler_id, (priority, handler) in handlers.items():
for handler_id, (priority, handler) in enabled_handlers.items():
start_time = time.time()
self.__safe_invoke_handler(handler, event)
logger.debug(
@@ -383,9 +378,6 @@ class EventManager(metaclass=Singleton):
return
for handler_id, handler in handlers.items():
self.__executor.submit(self.__safe_invoke_handler, handler, event)
event_hash = self.__get_event_hash(event)
with self.__lock:
self.__processing_events.pop(event_hash, None)
def __safe_invoke_handler(self, handler: Callable, event: Event):
"""

View File

@@ -1,11 +1,12 @@
import traceback
from typing import Generator, Optional, Tuple, Any
from typing import Generator, Optional, Tuple, Any, Union
from app.core.config import settings
from app.core.event import eventmanager
from app.helper.module import ModuleHelper
from app.log import logger
from app.schemas.types import EventType, ModuleType
from app.schemas.types import EventType, ModuleType, DownloaderType, MediaServerType, MessageChannel, StorageSchema, \
OtherModulesType
from app.utils.object import ObjectUtils
from app.utils.singleton import Singleton
@@ -19,6 +20,8 @@ class ModuleManager(metaclass=Singleton):
_modules: dict = {}
# 运行态模块列表
_running_modules: dict = {}
# 子模块类型集合
SubType = Union[DownloaderType, MediaServerType, MessageChannel, StorageSchema, OtherModulesType]
def __init__(self):
self.load_modules()
@@ -135,6 +138,17 @@ class ModuleManager(metaclass=Singleton):
and module.get_type() == module_type:
yield module
def get_running_subtype_module(self, module_subtype: SubType) -> Generator:
"""
获取指定子类型的模块
"""
if not self._running_modules:
return []
for _, module in self._running_modules.items():
if hasattr(module, 'get_subtype') \
and module.get_subtype() == module_subtype:
yield module
def get_module(self, module_id: str) -> Any:
"""
根据模块id获取模块

View File

@@ -526,7 +526,8 @@ class PluginManager(metaclass=Singleton):
"name": "服务名称",
"trigger": "触发器cron、interval、date、CronTrigger.from_crontab()",
"func": self.xxx,
"kwargs": {} # 定时器参数
"kwargs": {} # 定时器参数,
"func_kwargs": {} # 方法参数
}]
"""
ret_services = []

View File

@@ -53,7 +53,9 @@ class DownloadHistory(Base):
@staticmethod
@db_query
def get_by_hash(db: Session, download_hash: str):
return db.query(DownloadHistory).filter(DownloadHistory.download_hash == download_hash).first()
return db.query(DownloadHistory).filter(DownloadHistory.download_hash == download_hash).order_by(
DownloadHistory.date.desc()
).first()
@staticmethod
@db_query

View File

@@ -81,7 +81,7 @@ class SiteUserData(Base):
func.max(SiteUserData.updated_day).label('latest_update_day')
)
.group_by(SiteUserData.domain)
.filter(SiteUserData.err_msg == None)
.filter(SiteUserData.err_msg is None)
.subquery()
)

View File

@@ -54,7 +54,7 @@ class Subscribe(Base):
lack_episode = Column(Integer)
# 附加信息
note = Column(JSON)
# 状态N-新建 R-订阅中
# 状态N-新建 R-订阅中 P-待定 S-暂停
state = Column(String, nullable=False, index=True, default='N')
# 最后更新时间
last_update = Column(String)
@@ -98,7 +98,13 @@ class Subscribe(Base):
@staticmethod
@db_query
def get_by_state(db: Session, state: str):
result = db.query(Subscribe).filter(Subscribe.state == state).all()
# 如果 state 为空或 None返回所有订阅
if not state:
result = db.query(Subscribe).all()
else:
# 如果传入的状态不为空,拆分成多个状态
states = state.split(',')
result = db.query(Subscribe).filter(Subscribe.state.in_(states)).all()
return list(result)
@staticmethod

View File

@@ -83,7 +83,8 @@ class SubscribeOper(DbOper):
更新订阅
"""
subscribe = self.get(sid)
subscribe.update(self._db, payload)
if subscribe:
subscribe.update(self._db, payload)
return subscribe
def list_by_tmdbid(self, tmdbid: int, season: int = None) -> List[Subscribe]:

View File

@@ -16,7 +16,7 @@ class PlaywrightHelper:
"""
sync_stealth(page, pure=True)
page.goto(url)
return sync_cf_retry(page)
return sync_cf_retry(page)[0]
def action(self, url: str,
callback: Callable,

View File

@@ -5,6 +5,7 @@ from app import schemas
from app.core.context import MediaInfo
from app.db.systemconfig_oper import SystemConfigOper
from app.schemas.types import SystemConfigKey
from app.utils.system import SystemUtils
class DirectoryHelper:
@@ -48,16 +49,18 @@ class DirectoryHelper:
"""
return [d for d in self.get_library_dirs() if d.library_storage == "local"]
def get_dir(self, media: MediaInfo, storage: str = "local",
src_path: Path = None, dest_path: Path = None, fileitem: schemas.FileItem = None
def get_dir(self, media: MediaInfo, include_unsorted: bool = False,
storage: str = None, src_path: Path = None,
target_storage: str = None, dest_path: Path = None
) -> Optional[schemas.TransferDirectoryConf]:
"""
根据媒体信息获取下载目录、媒体库目录配置
:param media: 媒体信息
:param storage: 存储类型
:param include_unsorted: 包含不整理目录
:param storage: 源存储类型
:param target_storage: 目标存储类型
:param src_path: 源目录,有值时直接匹配
:param dest_path: 目标目录,有值时直接匹配
:param fileitem: 文件项,使用文件路径匹配
"""
# 处理类型
if not media:
@@ -65,35 +68,43 @@ class DirectoryHelper:
# 电影/电视剧
media_type = media.type.value
dirs = self.get_dirs()
# 已匹配的目录
matched_dirs: List[schemas.TransferDirectoryConf] = []
# 按照配置顺序查找
for d in dirs:
# 没有启用整理的目录
if not d.monitor_type:
if not d.monitor_type and not include_unsorted:
continue
# 存储类型不匹配
# 存储类型不匹配
if storage and d.storage != storage:
continue
# 下载目录
download_path = Path(d.download_path)
# 媒体库目录
library_path = Path(d.library_path)
# 有源目录时,源目录不匹配下载目录
if src_path and not src_path.is_relative_to(download_path):
# 目标存储类型不匹配
if target_storage and d.library_storage != target_storage:
continue
# 有文件项时,文件项不匹配下载目录
if fileitem and not Path(fileitem.path).is_relative_to(download_path):
# 有源目录时,源目录不匹配下载目录
if src_path and not src_path.is_relative_to(d.download_path):
continue
# 有目标目录时,目标目录不匹配媒体库目录
if dest_path and not dest_path.is_relative_to(library_path):
if dest_path and dest_path != Path(d.library_path):
continue
# 目录类型为全部的,符合条件
if not d.media_type:
return d
matched_dirs.append(d)
continue
# 目录类型相等,目录类别为全部,符合条件
if d.media_type == media_type and not d.media_category:
return d
matched_dirs.append(d)
continue
# 目录类型相等,目录类别相等,符合条件
if d.media_type == media_type and d.media_category == media.category:
return d
matched_dirs.append(d)
continue
if matched_dirs:
if src_path:
# 优先源目录同盘
for matched_dir in matched_dirs:
matched_path = Path(matched_dir.download_path)
if SystemUtils.is_same_disk(matched_path, src_path):
return matched_dir
return matched_dirs[0]
return None

View File

@@ -290,7 +290,7 @@ class TorrentHelper(metaclass=Singleton):
if not file_path.suffix or file_path.suffix.lower() not in settings.RMT_MEDIAEXT:
continue
# 只使用文件名识别
meta = MetaInfo(file_path.stem)
meta = MetaInfo(file_path.name)
if not meta.begin_episode:
continue
episodes = list(set(episodes).union(set(meta.episode_list)))

View File

@@ -2,8 +2,9 @@ from abc import abstractmethod, ABCMeta
from typing import Generic, Tuple, Union, TypeVar, Type, Dict, Optional, Callable
from app.helper.service import ServiceConfigHelper
from app.schemas import Notification, MessageChannel, NotificationConf, MediaServerConf, DownloaderConf
from app.schemas.types import ModuleType
from app.schemas import Notification, NotificationConf, MediaServerConf, DownloaderConf
from app.schemas.types import ModuleType, DownloaderType, MediaServerType, MessageChannel, StorageSchema, \
OtherModulesType
class _ModuleBase(metaclass=ABCMeta):
@@ -43,6 +44,14 @@ class _ModuleBase(metaclass=ABCMeta):
"""
pass
@staticmethod
@abstractmethod
def get_subtype() -> Union[DownloaderType, MediaServerType, MessageChannel, StorageSchema, OtherModulesType]:
"""
获取模块子类型(下载器、媒体服务器、消息通道、存储类型、其他杂项模块类型)
"""
pass
@staticmethod
@abstractmethod
def get_priority() -> int:

View File

@@ -7,7 +7,7 @@ from app.core.meta import MetaBase
from app.log import logger
from app.modules import _ModuleBase
from app.modules.bangumi.bangumi import BangumiApi
from app.schemas.types import ModuleType
from app.schemas.types import ModuleType, MediaRecognizeType
from app.utils.http import RequestUtils
@@ -44,6 +44,13 @@ class BangumiModule(_ModuleBase):
获取模块类型
"""
return ModuleType.MediaRecognize
@staticmethod
def get_subtype() -> MediaRecognizeType:
"""
获取模块子类型
"""
return MediaRecognizeType.Bangumi
@staticmethod
def get_priority() -> int:

View File

@@ -15,7 +15,7 @@ from app.modules.douban.apiv2 import DoubanApi
from app.modules.douban.douban_cache import DoubanCache
from app.modules.douban.scraper import DoubanScraper
from app.schemas import MediaPerson, APIRateLimitException
from app.schemas.types import MediaType, ModuleType
from app.schemas.types import MediaType, ModuleType, MediaRecognizeType
from app.utils.common import retry
from app.utils.http import RequestUtils
from app.utils.limit import rate_limit_exponential
@@ -59,6 +59,13 @@ class DoubanModule(_ModuleBase):
"""
return ModuleType.MediaRecognize
@staticmethod
def get_subtype() -> MediaRecognizeType:
"""
获取模块子类型
"""
return MediaRecognizeType.Douban
@staticmethod
def get_priority() -> int:
"""

View File

@@ -3,11 +3,11 @@ import base64
import hashlib
import hmac
from datetime import datetime
from functools import lru_cache
from random import choice
from urllib import parse
import requests
from cachetools import TTLCache, cached
from app.core.config import settings
from app.utils.http import RequestUtils
@@ -160,12 +160,12 @@ class DoubanApi(metaclass=Singleton):
self._session = requests.Session()
@classmethod
def __sign(cls, url: str, ts: int, method='GET') -> str:
def __sign(cls, url: str, ts: str, method='GET') -> str:
"""
签名
"""
url_path = parse.urlparse(url).path
raw_sign = '&'.join([method.upper(), parse.quote(url_path, safe=''), str(ts)])
raw_sign = '&'.join([method.upper(), parse.quote(url_path, safe=''), ts])
return base64.b64encode(
hmac.new(
cls._api_secret_key.encode(),
@@ -174,7 +174,7 @@ class DoubanApi(metaclass=Singleton):
).digest()
).decode()
@lru_cache(maxsize=settings.CACHE_CONF.get('douban'))
@cached(cache=TTLCache(maxsize=settings.CACHE_CONF["douban"], ttl=settings.CACHE_CONF["meta"]))
def __invoke(self, url: str, **kwargs) -> dict:
"""
GET请求
@@ -203,7 +203,7 @@ class DoubanApi(metaclass=Singleton):
return resp.json()
return resp.json() if resp else {}
@lru_cache(maxsize=settings.CACHE_CONF.get('douban'))
@cached(cache=TTLCache(maxsize=settings.CACHE_CONF["douban"], ttl=settings.CACHE_CONF["meta"]))
def __post(self, url: str, **kwargs) -> dict:
"""
POST请求

View File

@@ -16,7 +16,7 @@ from app.schemas.types import MediaType
lock = RLock()
CACHE_EXPIRE_TIMESTAMP_STR = "cache_expire_timestamp"
EXPIRE_TIMESTAMP = settings.CACHE_CONF.get('meta')
EXPIRE_TIMESTAMP = settings.CACHE_CONF["meta"]
class DoubanCache(metaclass=Singleton):
@@ -77,7 +77,7 @@ class DoubanCache(metaclass=Singleton):
@return: 被删除的缓存内容
"""
with lock:
return self._meta_data.pop(key, None)
return self._meta_data.pop(key, {})
def delete_by_doubanid(self, doubanid: str) -> None:
"""

View File

@@ -7,7 +7,7 @@ from app.log import logger
from app.modules import _MediaServerBase, _ModuleBase
from app.modules.emby.emby import Emby
from app.schemas.event import AuthCredentials, AuthInterceptCredentials
from app.schemas.types import MediaType, ModuleType, ChainEventType
from app.schemas.types import MediaType, ModuleType, ChainEventType, MediaServerType
class EmbyModule(_ModuleBase, _MediaServerBase[Emby]):
@@ -30,6 +30,13 @@ class EmbyModule(_ModuleBase, _MediaServerBase[Emby]):
"""
return ModuleType.MediaServer
@staticmethod
def get_subtype() -> MediaServerType:
"""
获取模块子类型
"""
return MediaServerType.Emby
@staticmethod
def get_priority() -> int:
"""
@@ -66,16 +73,26 @@ class EmbyModule(_ModuleBase, _MediaServerBase[Emby]):
logger.info(f"Emby服务器 {name} 连接断开,尝试重连 ...")
server.reconnect()
def user_authenticate(self, credentials: AuthCredentials) -> Optional[AuthCredentials]:
def user_authenticate(self, credentials: AuthCredentials, service_name: Optional[str] = None) \
-> Optional[AuthCredentials]:
"""
使用Emby用户辅助完成用户认证
:param credentials: 认证数据
:param service_name: 指定要认证的媒体服务器名称,若为 None 则认证所有服务
:return: 认证数据
"""
# Emby认证
if not credentials or credentials.grant_type != "password":
return None
for name, server in self.get_instances().items():
# 确定要认证的服务器列表
if service_name:
# 如果指定了服务名,获取该服务实例
servers = [(service_name, server)] if (server := self.get_instance(service_name)) else []
else:
# 如果没有指定服务名,遍历所有服务
servers = self.get_instances().items()
# 遍历要认证的服务器
for name, server in servers:
# 触发认证拦截事件
intercept_event = eventmanager.send_event(
etype=ChainEventType.AuthIntercept,

View File

@@ -1,12 +1,13 @@
import re
from functools import lru_cache
from typing import Optional, Tuple, Union
from cachetools import TTLCache, cached
from app.core.context import MediaInfo, settings
from app.log import logger
from app.modules import _ModuleBase
from app.schemas.types import MediaType, ModuleType, OtherModulesType
from app.utils.http import RequestUtils
from app.schemas.types import MediaType, ModuleType
class FanartModule(_ModuleBase):
@@ -342,6 +343,13 @@ class FanartModule(_ModuleBase):
"""
return ModuleType.Other
@staticmethod
def get_subtype() -> OtherModulesType:
"""
获取模块子类型
"""
return OtherModulesType.Fanart
@staticmethod
def get_priority() -> int:
"""
@@ -376,20 +384,30 @@ class FanartModule(_ModuleBase):
continue
if not isinstance(images, list):
continue
# 按欢迎程度倒排
images.sort(key=lambda x: int(x.get('likes', 0)), reverse=True)
# 取第一张图片
image_obj = images[0]
# 图片属性xx_path
image_name = self.__name(name)
image_season = image_obj.get('season')
# 设置图片
if image_name.startswith("season") and image_season:
# 季图片格式 seasonxx-poster
image_name = f"season{str(image_season).rjust(2, '0')}-{image_name[6:]}"
if not mediainfo.get_image(image_name):
# 没有图片才设置
mediainfo.set_image(image_name, image_obj.get('url'))
if image_name.startswith("season"):
# 季图片图片格式seasonxx-xxxx/season-specials-xxxx
for image_obj in images:
image_season = image_obj.get('season')
if image_season is not None:
# 包括poster,thumb,banner
if image_season == '0':
season_image = f"season-specials-{image_name[6:]}"
else:
season_image = f"season{str(image_season).rjust(2, '0')}-{image_name[6:]}"
# 设置图片,没有图片才设置
if not mediainfo.get_image(season_image):
mediainfo.set_image(season_image, image_obj.get('url'))
else:
# 其他图片,按欢迎程度倒排
images.sort(key=lambda x: int(x.get('likes', 0)), reverse=True)
# 取第一张图片
image_obj = images[0]
# 设置图片,没有图片才设置
if not mediainfo.get_image(image_name):
mediainfo.set_image(image_name, image_obj.get('url'))
return mediainfo
@@ -404,7 +422,7 @@ class FanartModule(_ModuleBase):
return result
@classmethod
@lru_cache(maxsize=settings.CACHE_CONF.get('fanart'))
@cached(cache=TTLCache(maxsize=settings.CACHE_CONF["fanart"], ttl=settings.CACHE_CONF["meta"]))
def __request_fanart(cls, media_type: MediaType, queryid: Union[str, int]) -> Optional[dict]:
if media_type == MediaType.MOVIE:
image_url = cls._movie_url % queryid

View File

@@ -1,4 +1,3 @@
import copy
import re
from pathlib import Path
from threading import Lock
@@ -8,6 +7,7 @@ from jinja2 import Template
from app.core.config import settings
from app.core.context import MediaInfo
from app.core.event import eventmanager
from app.core.meta import MetaBase
from app.core.metainfo import MetaInfo, MetaInfoPath
from app.helper.directory import DirectoryHelper
@@ -17,7 +17,8 @@ from app.log import logger
from app.modules import _ModuleBase
from app.modules.filemanager.storages import StorageBase
from app.schemas import TransferInfo, ExistMediaInfo, TmdbEpisode, TransferDirectoryConf, FileItem, StorageUsage
from app.schemas.types import MediaType, ModuleType
from app.schemas.event import TransferRenameEventData
from app.schemas.types import MediaType, ModuleType, ChainEventType, OtherModulesType
from app.utils.system import SystemUtils
lock = Lock()
@@ -51,6 +52,13 @@ class FileManagerModule(_ModuleBase):
"""
return ModuleType.Other
@staticmethod
def get_subtype() -> OtherModulesType:
"""
获取模块子类型
"""
return OtherModulesType.FileManager
@staticmethod
def get_priority() -> int:
"""
@@ -131,8 +139,6 @@ class FileManagerModule(_ModuleBase):
)
return str(path)
pass
def save_config(self, storage: str, conf: Dict) -> None:
"""
保存存储配置
@@ -219,7 +225,8 @@ class FileManagerModule(_ModuleBase):
and f".{t.extension.lower()}" in extensions):
return True
elif t.type == "dir":
return __any_file(t)
if __any_file(t):
return True
return False
# 返回结果
@@ -322,6 +329,7 @@ class FileManagerModule(_ModuleBase):
target_directory: TransferDirectoryConf = None,
target_storage: str = None, target_path: Path = None,
transfer_type: str = None, scrape: bool = None,
library_type_folder: bool = None, library_category_folder: bool = None,
episodes_info: List[TmdbEpisode] = None) -> TransferInfo:
"""
文件整理
@@ -333,6 +341,8 @@ class FileManagerModule(_ModuleBase):
:param target_path: 目标路径
:param transfer_type: 转移模式
:param scrape: 是否刮削元数据
:param library_type_folder: 是否按媒体类型创建目录
:param library_category_folder: 是否按媒体类别创建目录
:param episodes_info: 当前季的全部集信息
:return: {path, target_path, message}
"""
@@ -349,37 +359,36 @@ class FileManagerModule(_ModuleBase):
message=f"{target_path} 不是有效目录")
# 获取目标路径
if target_directory:
# 拼装媒体库一、二级子目录
target_path = self.__get_dest_dir(mediainfo=mediainfo, target_dir=target_directory)
# 目标存储类型
if not target_storage:
target_storage = target_directory.library_storage
# 整理方式
if not transfer_type:
transfer_type = target_directory.transfer_type
if not transfer_type:
logger.error(f"{target_directory.name} 未设置整理方式")
return TransferInfo(success=False,
fileitem=fileitem,
message=f"{target_directory.name} 未设置整理方式")
# 是否需要刮削
if scrape is None:
need_scrape = target_directory.scraping
else:
need_scrape = scrape
# 是否需要重命名
need_rename = target_directory.renaming
# 是否需要通知
need_notify = target_directory.notify
# 覆盖模式
overwrite_mode = target_directory.overwrite_mode
# 是否需要刮削
if scrape is None:
need_scrape = target_directory.scraping
else:
need_scrape = scrape
# 目标存储类型
if not target_storage:
target_storage = target_directory.library_storage
# 拼装媒体库一、二级子目录
target_path = self.__get_dest_dir(mediainfo=mediainfo, target_dir=target_directory,
need_type_folder=library_type_folder,
need_category_folder=library_category_folder)
elif target_path:
# 手动整理的场景,有自定义目标路径
need_scrape = scrape or False
need_rename = True
need_notify = False
overwrite_mode = "never"
logger.warn(f"{target_path} 为自定义路径, 通知将不会发送")
# 手动整理的场景,有自定义目标路径
target_path = self.__get_dest_path(mediainfo=mediainfo, target_path=target_path,
need_type_folder=library_type_folder,
need_category_folder=library_category_folder)
else:
# 未找到有效的媒体库目录
logger.error(
@@ -387,9 +396,14 @@ class FileManagerModule(_ModuleBase):
return TransferInfo(success=False,
fileitem=fileitem,
message="未找到有效的媒体库目录")
logger.info(f"获取整理目标路径:【{target_storage}{target_path}")
# 整理方式
if not transfer_type:
logger.error(f"{target_directory.name} 未设置整理方式")
return TransferInfo(success=False,
fileitem=fileitem,
message=f"{target_directory.name} 未设置整理方式")
# 整理
logger.info(f"获取整理目标路径:【{target_storage}{target_path}")
return self.transfer_media(fileitem=fileitem,
in_meta=meta,
mediainfo=mediainfo,
@@ -463,9 +477,9 @@ class FileManagerModule(_ModuleBase):
target_file.parent.mkdir(parents=True)
# 本地到本地
if transfer_type == "copy":
state = source_oper.copy(fileitem, target_file)
state = source_oper.copy(fileitem, target_file.parent, target_file.name)
elif transfer_type == "move":
state = source_oper.move(fileitem, target_file)
state = source_oper.move(fileitem, target_file.parent, target_file.name)
elif transfer_type == "link":
state = source_oper.link(fileitem, target_file)
elif transfer_type == "softlink":
@@ -493,7 +507,7 @@ class FileManagerModule(_ModuleBase):
else:
return None, f"{fileitem.path} 上传 {target_storage} 失败"
else:
return None, f"{target_file.parent} {target_storage} 目录获取失败"
return None, f"{target_storage}{target_file.parent} 目录获取失败"
elif transfer_type == "move":
# 移动
# 根据目的路径获取文件夹
@@ -508,7 +522,7 @@ class FileManagerModule(_ModuleBase):
else:
return None, f"{fileitem.path} 上传 {target_storage} 失败"
else:
return None, f"{target_file.parent} {target_storage} 目录获取失败"
return None, f"{target_storage}{target_file.parent} 目录获取失败"
elif fileitem.storage != "local" and target_storage == "local":
# 网盘到本地
if target_file.exists():
@@ -532,25 +546,28 @@ class FileManagerModule(_ModuleBase):
return None, f"{fileitem.path} {fileitem.storage} 下载失败"
elif fileitem.storage == target_storage:
# 同一网盘
# 根据目的路径获取文件夹
target_diritem = target_oper.get_folder(target_file.parent)
if target_diritem:
# 重命名文件
if target_oper.rename(fileitem, target_file.name):
# 移动文件到新目录
if source_oper.move(fileitem, target_diritem):
ret_fileitem = copy.deepcopy(fileitem)
ret_fileitem.path = target_diritem.path + "/" + target_file.name
ret_fileitem.name = target_file.name
ret_fileitem.basename = target_file.stem
ret_fileitem.parent_fileid = target_diritem.fileid
return ret_fileitem, ""
if transfer_type == "copy":
# 复制文件到新目录
target_fileitem = target_oper.get_folder(target_file.parent)
if target_fileitem:
if source_oper.move(fileitem, Path(target_fileitem.path), target_file.name):
return target_oper.get_item(target_file), ""
else:
return None, f"{fileitem.path} {target_storage} 移动文件失败"
return None, f"{target_storage}{fileitem.path} 复制文件失败"
else:
return None, f"{fileitem.path} {target_storage} 重命名文件失败"
return None, f"{target_storage}{target_file.parent} 目录获取失败"
elif transfer_type == "move":
# 移动文件到新目录
target_fileitem = target_oper.get_folder(target_file.parent)
if target_fileitem:
if source_oper.move(fileitem, Path(target_fileitem.path), target_file.name):
return target_oper.get_item(target_file), ""
else:
return None, f"{target_storage}{fileitem.path} 移动文件失败"
else:
return None, f"{target_storage}{target_file.parent} 目录获取失败"
else:
return None, f"{target_file.parent} {target_storage} 目录获取失败"
return None, f"不支持的整理方式:{transfer_type}"
return None, "未知错误"
@@ -685,7 +702,7 @@ class FileManagerModule(_ModuleBase):
return False, errmsg
except Exception as error:
logger.info(f"字幕 {new_file} 出错了,原因: {str(error)}")
return False, ""
return True, ""
def __transfer_audio_track_files(self, fileitem: FileItem, target_storage: str, target_file: Path,
transfer_type: str) -> Tuple[bool, str]:
@@ -709,7 +726,7 @@ class FileManagerModule(_ModuleBase):
file_list: List[FileItem] = storage_oper.list(parent_item)
# 匹配音轨文件
pending_file_list: List[FileItem] = [file for file in file_list
if Path(file.name).stem == org_path.name
if Path(file.name).stem == org_path.stem
and file.type == "file" and file.extension
and f".{file.extension.lower()}" in settings.RMT_AUDIOEXT]
if len(pending_file_list) == 0:
@@ -815,7 +832,8 @@ class FileManagerModule(_ModuleBase):
else:
logger.info(f"正在删除已存在的文件:{target_file}")
target_file.unlink()
logger.info(f"正在整理文件:【{fileitem.storage}{fileitem.path} 到 【{target_storage}{target_file}")
logger.info(f"正在整理文件:【{fileitem.storage}{fileitem.path} 到 【{target_storage}{target_file}"
f"操作类型:{transfer_type}")
new_item, errmsg = self.__transfer_command(fileitem=fileitem,
target_storage=target_storage,
target_file=target_file,
@@ -831,26 +849,43 @@ class FileManagerModule(_ModuleBase):
return None, errmsg
@staticmethod
def __get_dest_dir(mediainfo: MediaInfo, target_dir: TransferDirectoryConf) -> Path:
def __get_dest_path(mediainfo: MediaInfo, target_path: Path,
need_type_folder: bool = False, need_category_folder: bool = False):
"""
获取目标路径
"""
if need_type_folder:
target_path = target_path / mediainfo.type.value
if need_category_folder and mediainfo.category:
target_path = target_path / mediainfo.category
return target_path
@staticmethod
def __get_dest_dir(mediainfo: MediaInfo, target_dir: TransferDirectoryConf,
need_type_folder: bool = None, need_category_folder: bool = None) -> Path:
"""
根据设置并装媒体库目录
:param mediainfo: 媒体信息
:target_dir: 媒体库根目录
:typename_dir: 是否加上类型目录
:need_type_folder: 是否需要按媒体类型创建目录
:need_category_folder: 是否需要按媒体类别创建目录
"""
if not target_dir.media_type and target_dir.library_type_folder:
if need_type_folder is None:
need_type_folder = target_dir.library_type_folder
if need_category_folder is None:
need_category_folder = target_dir.library_category_folder
if not target_dir.media_type and need_type_folder:
# 一级自动分类
library_dir = Path(target_dir.library_path) / mediainfo.type.value
elif target_dir.media_type and target_dir.library_type_folder:
elif target_dir.media_type and need_type_folder:
# 一级手动分类
library_dir = Path(target_dir.library_path) / target_dir.media_type
else:
library_dir = Path(target_dir.library_path)
if not target_dir.media_category and target_dir.library_category_folder and mediainfo.category:
if not target_dir.media_category and need_category_folder and mediainfo.category:
# 二级自动分类
library_dir = library_dir / mediainfo.category
elif target_dir.media_category and target_dir.library_category_folder:
elif target_dir.media_category and need_category_folder:
# 二级手动分类
library_dir = library_dir / target_dir.media_category
@@ -889,6 +924,18 @@ class FileManagerModule(_ModuleBase):
rename_format = settings.TV_RENAME_FORMAT \
if mediainfo.type == MediaType.TV else settings.MOVIE_RENAME_FORMAT
# 计算重命名中的文件夹层数
rename_format_level = len(rename_format.split("/")) - 1
if rename_format_level < 1:
# 重命名格式不合法
logger.error(f"重命名格式不合法:{rename_format}")
return TransferInfo(success=False,
message=f"重命名格式不合法",
fileitem=fileitem,
transfer_type=transfer_type,
need_notify=need_notify)
# 判断是否为文件夹
if fileitem.type == "dir":
# 整理整个目录,一般为蓝光原盘
@@ -969,9 +1016,15 @@ class FileManagerModule(_ModuleBase):
# 目的操作对象
target_oper: StorageBase = self.__get_storage_oper(target_storage)
# 目标目录
target_diritem = target_oper.get_folder(
new_file.parent) if mediainfo.type == MediaType.MOVIE else target_oper.get_folder(
new_file.parent.parent)
target_diritem = target_oper.get_folder(new_file.parents[rename_format_level - 1])
if not target_diritem:
logger.error(f"目标目录 {new_file.parents[rename_format_level - 1]} 获取失败")
return TransferInfo(success=False,
message=f"目标目录 {new_file.parents[rename_format_level - 1]} 获取失败",
fileitem=fileitem,
fail_list=[fileitem.path],
transfer_type=transfer_type,
need_notify=need_notify)
# 目标文件
target_item = target_oper.get_item(new_file)
if target_item:
@@ -1080,7 +1133,14 @@ class FileManagerModule(_ModuleBase):
if episode.episode_number == meta.begin_episode:
episode_title = episode.name
break
# 获取集播出日期
episode_date = None
if meta.begin_episode and episodes_info:
for episode in episodes_info:
if episode.episode_number == meta.begin_episode:
episode_date = episode.air_date
break
return {
# 标题
"title": __convert_invalid_characters(mediainfo.title),
@@ -1130,21 +1190,51 @@ class FileManagerModule(_ModuleBase):
"part": meta.part,
# 剧集标题
"episode_title": __convert_invalid_characters(episode_title),
# 剧集日期根据episodes_info值获取
"episode_date": episode_date,
# 文件后缀
"fileExt": file_ext,
# 自定义占位符
"customization": meta.customization
"customization": meta.customization,
# 文件元数据
"__meta__": meta,
# 识别的媒体信息
"__mediainfo__": mediainfo,
# 当前季的全部集信息
"__episodes_info__": episodes_info,
}
@staticmethod
def get_rename_path(template_string: str, rename_dict: dict, path: Path = None) -> Path:
"""
生成重命名后的完整路径
生成重命名后的完整路径,支持智能重命名事件
:param template_string: Jinja2 模板字符串
:param rename_dict: 渲染上下文,用于替换模板中的变量
:param path: 可选的基础路径,如果提供,将在其基础上拼接生成的路径
:return: 生成的完整路径
"""
# 创建jinja2模板对象
template = Template(template_string)
# 渲染生成的字符串
render_str = template.render(rename_dict)
logger.debug(f"Initial render string: {render_str}")
# 发送智能重命名事件
event_data = TransferRenameEventData(
template_string=template_string,
rename_dict=rename_dict,
render_str=render_str,
path=path
)
event = eventmanager.send_event(ChainEventType.TransferRename, event_data)
# 检查事件返回的结果
if event and event.event_data:
event_data: TransferRenameEventData = event.event_data
if event_data.updated and event_data.updated_str:
logger.debug(f"Render string updated by event: "
f"{render_str} -> {event_data.updated_str} (source: {event_data.source})")
render_str = event_data.updated_str
# 目的路径
if path:
return path / render_str
@@ -1170,17 +1260,19 @@ class FileManagerModule(_ModuleBase):
# 重命名格式
rename_format = settings.TV_RENAME_FORMAT \
if mediainfo.type == MediaType.TV else settings.MOVIE_RENAME_FORMAT
# 获取相对路径(重命名路径)
rel_path = self.get_rename_path(
# 计算重命名中的文件夹层数
rename_format_level = len(rename_format.split("/")) - 1
if rename_format_level < 1:
continue
# 获取路径(重命名路径)
target_path = self.get_rename_path(
path=dir_path,
template_string=rename_format,
rename_dict=self.__get_naming_dict(meta=MetaInfo(mediainfo.title),
mediainfo=mediainfo)
)
# 取相对路径的第1层目录
if rel_path.parts:
media_path = dir_path / rel_path.parts[0]
else:
continue
media_path = target_path.parents[rename_format_level - 1]
# 检索媒体文件
fileitem = storage_oper.get_item(media_path)
if not fileitem:

View File

@@ -79,6 +79,8 @@ class StorageBase(metaclass=ABCMeta):
def create_folder(self, fileitem: schemas.FileItem, name: str) -> Optional[schemas.FileItem]:
"""
创建目录
:param fileitem: 父目录
:param name: 目录名
"""
pass
@@ -122,7 +124,6 @@ class StorageBase(metaclass=ABCMeta):
下载文件,保存到本地,返回本地临时文件地址
:param fileitem: 文件项
:param path: 文件保存路径
"""
pass
@@ -144,16 +145,22 @@ class StorageBase(metaclass=ABCMeta):
pass
@abstractmethod
def copy(self, fileitem: schemas.FileItem, target: Union[schemas.FileItem, Path]) -> bool:
def copy(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
"""
复制文件
:param fileitem: 文件项
:param path: 目标目录
:param new_name: 新文件名
"""
pass
@abstractmethod
def move(self, fileitem: schemas.FileItem, target: Union[schemas.FileItem, Path]) -> bool:
def move(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
"""
移动文件
:param fileitem: 文件项
:param path: 目标目录
:param new_name: 新文件名
"""
pass

View File

@@ -61,6 +61,7 @@ class AliPan(StorageBase, metaclass=Singleton):
"""
初始化 aligo
"""
def show_qrcode(qr_link: str):
"""
显示二维码
@@ -254,28 +255,9 @@ class AliPan(StorageBase, metaclass=Singleton):
return []
# 根目录处理
if not fileitem or not fileitem.drive_id:
return [
schemas.FileItem(
storage=self.schema.value,
fileid="root",
drive_id=self.__auth_params.get("resourceDriveId"),
parent_fileid="root",
type="dir",
path="/资源库/",
name="资源库",
basename="资源库"
),
schemas.FileItem(
storage=self.schema.value,
fileid="root",
drive_id=self.__auth_params.get("backDriveId"),
parent_fileid="root",
type="dir",
path="/备份盘/",
name="备份盘",
basename="备份盘"
)
]
items = self.aligo.get_file_list()
if items:
return [self.__get_fileitem(item) for item in items]
elif fileitem.type == "file":
# 文件处理
file = self.detail(fileitem)
@@ -290,6 +272,8 @@ class AliPan(StorageBase, metaclass=Singleton):
def create_folder(self, fileitem: schemas.FileItem, name: str) -> Optional[schemas.FileItem]:
"""
创建目录
:param fileitem: 父目录
:param name: 目录名
"""
if not self.aligo:
return None
@@ -297,21 +281,43 @@ class AliPan(StorageBase, metaclass=Singleton):
if item:
if isinstance(item, CreateFileResponse):
item = self.aligo.get_file(file_id=item.file_id, drive_id=item.drive_id)
return self.__get_fileitem(item)
return self.__get_fileitem(item, parent=fileitem.path)
return None
def get_folder(self, path: Path) -> Optional[schemas.FileItem]:
"""
根据文件路程获取目录,不存在则创建
"""
if not self.aligo:
def __find_dir(_fileitem: schemas.FileItem, _name: str) -> Optional[schemas.FileItem]:
"""
查找下级目录中匹配名称的目录
"""
for sub_folder in self.list(_fileitem):
if sub_folder.type != "dir":
continue
if sub_folder.name == _name:
return sub_folder
return None
item = self.aligo.get_folder_by_path(path=str(path), create_folder=True)
if item:
if isinstance(item, CreateFileResponse):
item = self.aligo.get_file(file_id=item.file_id, drive_id=item.drive_id)
return self.__get_fileitem(item)
return None
# 是否已存在
folder = self.get_item(path)
if folder:
return folder
# 逐级查找和创建目录
fileitem = schemas.FileItem(path="/")
for part in path.parts:
if part == "/":
continue
dir_file = __find_dir(fileitem, part)
if dir_file:
fileitem = dir_file
else:
dir_file = self.create_folder(fileitem, part)
if not dir_file:
return None
fileitem = dir_file
return fileitem
def get_item(self, path: Path) -> Optional[schemas.FileItem]:
"""
@@ -321,7 +327,7 @@ class AliPan(StorageBase, metaclass=Singleton):
return None
item = self.aligo.get_file_by_path(path=str(path))
if item:
return self.__get_fileitem(item)
return self.__get_fileitem(item, parent=path.parent)
return None
def delete(self, fileitem: schemas.FileItem) -> bool:
@@ -342,7 +348,7 @@ class AliPan(StorageBase, metaclass=Singleton):
return None
item = self.aligo.get_file(file_id=fileitem.fileid, drive_id=fileitem.drive_id)
if item:
return self.__get_fileitem(item)
return self.__get_fileitem(item, parent=fileitem.path)
return None
def rename(self, fileitem: schemas.FileItem, name: str) -> bool:
@@ -370,6 +376,9 @@ class AliPan(StorageBase, metaclass=Singleton):
def upload(self, fileitem: schemas.FileItem, path: Path, new_name: str = None) -> Optional[schemas.FileItem]:
"""
上传文件,并标记完成
:param fileitem: 上传目录项
:param path: 本地文件路径
:param new_name: 上传后文件名
"""
if not self.aligo:
return None
@@ -380,22 +389,44 @@ class AliPan(StorageBase, metaclass=Singleton):
if result:
item = self.aligo.get_file(file_id=result.file_id, drive_id=result.drive_id)
if item:
return self.__get_fileitem(item)
return self.__get_fileitem(item, parent=fileitem.path)
return None
def move(self, fileitem: schemas.FileItem, target: schemas.FileItem) -> bool:
def move(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
"""
移动文件
:param fileitem: 文件项
:param path: 目标目录
:param new_name: 新文件名
"""
if not self.aligo:
return False
target = self.get_folder(path)
if not target:
return False
if self.aligo.move_file(file_id=fileitem.fileid, drive_id=fileitem.drive_id,
to_parent_file_id=target.fileid, to_drive_id=target.drive_id):
to_parent_file_id=target.fileid, to_drive_id=target.drive_id,
new_name=new_name):
return True
return False
def copy(self, fileitem: schemas.FileItem, target: schemas.FileItem) -> bool:
pass
def copy(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
"""
复制文件
:param fileitem: 文件项
:param path: 目标目录
:param new_name: 新文件名
"""
if not self.aligo:
return False
target = self.get_folder(path)
if not target:
return False
if self.aligo.copy_file(file_id=fileitem.fileid, drive_id=fileitem.drive_id,
to_parent_file_id=target.fileid, to_drive_id=target.drive_id,
new_name=new_name):
return True
return False
def link(self, fileitem: schemas.FileItem, target_file: Path) -> bool:
"""

View File

@@ -2,10 +2,10 @@ import json
import logging
from datetime import datetime
from pathlib import Path
from typing import Optional, Tuple, List, Dict, Union
from typing import Optional, List, Dict
from requests import Response
from cachetools import cached, TTLCache
from requests import Response
from app import schemas
from app.core.config import settings
@@ -13,10 +13,11 @@ from app.log import logger
from app.modules.filemanager.storages import StorageBase
from app.schemas.types import StorageSchema
from app.utils.http import RequestUtils
from app.utils.singleton import Singleton
from app.utils.url import UrlUtils
class Alist(StorageBase):
class Alist(StorageBase, metaclass=Singleton):
"""
Alist相关操作
api文档https://alist.nn.ci/zh/guide/api
@@ -232,6 +233,8 @@ class Alist(StorageBase):
) -> Optional[schemas.FileItem]:
"""
创建目录
:param fileitem: 父目录
:param name: 目录名
"""
path = Path(fileitem.path) / name
resp: Response = RequestUtils(
@@ -270,14 +273,16 @@ class Alist(StorageBase):
获取目录,如目录不存在则创建
"""
folder = self.get_item(path)
if folder:
return folder
if not folder:
folder = self.create_folder(self.get_parent(schemas.FileItem(
folder = self.create_folder(schemas.FileItem(
storage=self.schema.value,
type="dir",
path=path.as_posix() + "/",
path=path.parent.as_posix(),
name=path.name,
basename=path.stem
)), path.name)
), path.name)
return folder
def get_item(
@@ -348,7 +353,7 @@ class Alist(StorageBase):
result = resp.json()
if result["code"] != 200:
logging.warning(f'获取文件 {path} 失败,错误信息:{result["message"]}')
logging.debug(f'获取文件 {path} 失败,错误信息:{result["message"]}')
return
return schemas.FileItem(
@@ -376,7 +381,7 @@ class Alist(StorageBase):
resp: Response = RequestUtils(
headers=self.__get_header_with_token()
).post_res(
self.__get_api_url("/api/fs/delete"),
self.__get_api_url("/api/fs/remove"),
json={
"dir": Path(fileitem.path).parent.as_posix(),
"names": [fileitem.name],
@@ -576,51 +581,21 @@ class Alist(StorageBase):
"""
return self.get_item(Path(fileitem.path))
@staticmethod
def __get_copy_and_move_data(
fileitem: schemas.FileItem, target: Union[schemas.FileItem, Path]
) -> Tuple[str, str, List[str], bool]:
"""
获取复制或移动文件需要的数据
:param fileitem: 文件项
:param target: 目标文件项或目标路径
:return: 源目录,目标目录,文件名列表,是否有效
"""
name = Path(target).name
if fileitem.name != name:
return "", "", [], False
src_dir = Path(fileitem.path).parent.as_posix()
if isinstance(target, schemas.FileItem):
traget_dir = Path(target.path).parent.as_posix()
else:
traget_dir = target.parent.as_posix()
return src_dir, traget_dir, [name], True
def copy(
self, fileitem: schemas.FileItem, target: Union[schemas.FileItem, Path]
) -> bool:
def copy(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
"""
复制文件
源文件名和目标文件名必须相同
:param fileitem: 文件项
:param path: 目标目录
:param new_name: 新文件名
"""
src_dir, dst_dir, names, is_valid = self.__get_copy_and_move_data(
fileitem, target
)
if not is_valid:
return False
resp: Response = RequestUtils(
headers=self.__get_header_with_token()
).post_res(
self.__get_api_url("/api/fs/copy"),
json={
"src_dir": src_dir,
"dst_dir": dst_dir,
"names": names,
"src_dir": Path(fileitem.path).parent.as_posix(),
"dst_dir": path.as_posix(),
"names": [fileitem.name],
},
)
"""
@@ -655,28 +630,31 @@ class Alist(StorageBase):
f'复制文件 {fileitem.path} 失败,错误信息:{result["message"]}'
)
return False
# 重命名
if fileitem.name != new_name:
self.rename(
self.get_item(path / fileitem.name), new_name
)
return True
def move(
self, fileitem: schemas.FileItem, target: Union[schemas.FileItem, Path]
) -> bool:
def move(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
"""
移动文件
:param fileitem: 文件项
:param path: 目标目录
:param new_name: 新文件名
"""
src_dir, dst_dir, names, is_valid = self.__get_copy_and_move_data(
fileitem, target
)
if not is_valid:
return False
# 先重命名
if fileitem.name != new_name:
self.rename(fileitem, new_name)
resp: Response = RequestUtils(
headers=self.__get_header_with_token()
).post_res(
self.__get_api_url("/api/fs/move"),
json={
"src_dir": src_dir,
"dst_dir": dst_dir,
"names": names,
"src_dir": Path(fileitem.path).parent.as_posix(),
"dst_dir": path.as_posix(),
"names": [new_name],
},
)
"""
@@ -757,15 +735,7 @@ class Alist(StorageBase):
@staticmethod
def __parse_timestamp(time_str: str) -> float:
# try:
# # 尝试解析带微秒的时间格式
# dt = datetime.strptime(time_str[:26], '%Y-%m-%dT%H:%M:%S.%f')
# except ValueError:
# # 如果失败,尝试解析不带微秒的时间格式
# dt = datetime.strptime(time_str, '%Y-%m-%dT%H:%M:%SZ')
# 直接使用 ISO 8601 格式解析时间
dt = datetime.fromisoformat(time_str)
# 返回时间戳
return dt.timestamp()
"""
直接使用 ISO 8601 格式解析时间
"""
return datetime.fromisoformat(time_str).timestamp()

View File

@@ -37,7 +37,7 @@ class LocalStorage(StorageBase):
"""
return True
def __get_fileitem(self, path: Path):
def __get_fileitem(self, path: Path) -> schemas.FileItem:
"""
获取文件项
"""
@@ -52,7 +52,7 @@ class LocalStorage(StorageBase):
modify_time=path.stat().st_mtime,
)
def __get_diritem(self, path: Path):
def __get_diritem(self, path: Path) -> schemas.FileItem:
"""
获取目录项
"""
@@ -115,6 +115,8 @@ class LocalStorage(StorageBase):
def create_folder(self, fileitem: schemas.FileItem, name: str) -> Optional[schemas.FileItem]:
"""
创建目录
:param fileitem: 父目录
:param name: 目录名
"""
if not fileitem.path:
return None
@@ -192,6 +194,9 @@ class LocalStorage(StorageBase):
def upload(self, fileitem: schemas.FileItem, path: Path, new_name: str = None) -> Optional[schemas.FileItem]:
"""
上传文件
:param fileitem: 上传目录项
:param path: 本地文件路径
:param new_name: 上传后文件名
"""
dir_path = Path(fileitem.path)
target_path = dir_path / (new_name or path.name)
@@ -201,17 +206,6 @@ class LocalStorage(StorageBase):
return None
return self.get_item(target_path)
def copy(self, fileitem: schemas.FileItem, target_file: Path) -> bool:
"""
复制文件
"""
file_path = Path(fileitem.path)
code, message = SystemUtils.copy(file_path, target_file)
if code != 0:
logger.error(f"复制文件失败:{message}")
return False
return True
def link(self, fileitem: schemas.FileItem, target_file: Path) -> bool:
"""
硬链接文件
@@ -234,12 +228,29 @@ class LocalStorage(StorageBase):
return False
return True
def move(self, fileitem: schemas.FileItem, target: Path) -> bool:
def copy(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
"""
移动文件
复制文件
:param fileitem: 文件项
:param path: 目标目录
:param new_name: 新文件名
"""
file_path = Path(fileitem.path)
code, message = SystemUtils.move(file_path, target)
code, message = SystemUtils.copy(file_path, path / new_name)
if code != 0:
logger.error(f"复制文件失败:{message}")
return False
return True
def move(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
"""
移动文件
:param fileitem: 文件项
:param path: 目标目录
:param new_name: 新文件名
"""
file_path = Path(fileitem.path)
code, message = SystemUtils.move(file_path, path / new_name)
if code != 0:
logger.error(f"移动文件失败:{message}")
return False

View File

@@ -139,6 +139,8 @@ class Rclone(StorageBase):
def create_folder(self, fileitem: schemas.FileItem, name: str) -> Optional[schemas.FileItem]:
"""
创建目录
:param fileitem: 父目录
:param name: 目录名
"""
try:
retcode = subprocess.run(
@@ -149,10 +151,7 @@ class Rclone(StorageBase):
startupinfo=self.__get_hidden_shell()
).returncode
if retcode == 0:
ret_fileitem = copy.deepcopy(fileitem)
ret_fileitem.path = f"{fileitem.path}/{name}/"
ret_fileitem.name = name
return ret_fileitem
return self.get_item(Path(f"{fileitem.path}/{name}"))
except Exception as err:
logger.error(f"rclone创建目录失败{err}")
return None
@@ -166,13 +165,17 @@ class Rclone(StorageBase):
"""
查找下级目录中匹配名称的目录
"""
for sub_file in self.list(_fileitem):
if sub_file.type != "dir":
for sub_folder in self.list(_fileitem):
if sub_folder.type != "dir":
continue
if sub_file.name == _name:
return sub_file
if sub_folder.name == _name:
return sub_folder
return None
# 是否已存在
folder = self.get_item(path)
if folder:
return folder
# 逐级查找和创建目录
fileitem = schemas.FileItem(path="/")
for part in path.parts:
@@ -269,6 +272,9 @@ class Rclone(StorageBase):
def upload(self, fileitem: schemas.FileItem, path: Path, new_name: str = None) -> Optional[schemas.FileItem]:
"""
上传文件
:param fileitem: 上传目录项
:param path: 本地文件路径
:param new_name: 上传后文件名
"""
try:
new_path = Path(fileitem.path) / (new_name or path.name)
@@ -306,16 +312,19 @@ class Rclone(StorageBase):
logger.error(f"rclone获取文件详情失败{err}")
return None
def move(self, fileitem: schemas.FileItem, target: Path) -> bool:
def move(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
"""
移动文件target_file格式rclone:path
移动文件
:param fileitem: 文件项
:param path: 目标目录
:param new_name: 新文件名
"""
try:
retcode = subprocess.run(
[
'rclone', 'moveto',
f'MP:{fileitem.path}',
f'MP:{target}'
f'MP:{path / new_name}'
],
startupinfo=self.__get_hidden_shell()
).returncode
@@ -325,8 +334,27 @@ class Rclone(StorageBase):
logger.error(f"rclone移动文件失败{err}")
return False
def copy(self, fileitem: schemas.FileItem, target_file: Path) -> bool:
pass
def copy(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
"""
复制文件
:param fileitem: 文件项
:param path: 目标目录
:param new_name: 新文件名
"""
try:
retcode = subprocess.run(
[
'rclone', 'copyto',
f'MP:{fileitem.path}',
f'MP:{path / new_name}'
],
startupinfo=self.__get_hidden_shell()
).returncode
if retcode == 0:
return True
except Exception as err:
logger.error(f"rclone复制文件失败{err}")
return False
def link(self, fileitem: schemas.FileItem, target_file: Path) -> bool:
pass

View File

@@ -225,6 +225,9 @@ class U115Pan(StorageBase, metaclass=Singleton):
"""
if not self.client:
return None
folder = self.get_item(path)
if folder:
return folder
try:
result = self.client.fs.makedirs(path, exist_ok=True)
if result:
@@ -336,6 +339,9 @@ class U115Pan(StorageBase, metaclass=Singleton):
def upload(self, fileitem: schemas.FileItem, path: Path, new_name: str = None) -> Optional[schemas.FileItem]:
"""
上传文件
:param fileitem: 上传目录项
:param path: 本地文件路径
:param new_name: 上传后文件名
"""
if not self.client:
return None
@@ -358,32 +364,38 @@ class U115Pan(StorageBase, metaclass=Singleton):
logger.error(f"115上传文件失败{str(e)}")
return None
def move(self, fileitem: schemas.FileItem, target: schemas.FileItem) -> bool:
"""
移动文件
"""
if not self.client:
return False
try:
self.client.fs.move(fileitem.path, target.path)
return True
except Exception as e:
logger.error(f"115移动文件失败{str(e)}")
return False
def copy(self, fileitem: schemas.FileItem, target_file: Path) -> bool:
def copy(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
"""
复制文件
:param fileitem: 文件项
:param path: 目标目录
:param new_name: 新文件名
"""
if not self.client:
return False
try:
self.client.fs.copy(fileitem.path, target_file)
self.client.fs.copy(fileitem.path, path / new_name)
return True
except Exception as e:
logger.error(f"115复制文件失败{str(e)}")
return False
def move(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
"""
移动文件
:param fileitem: 文件项
:param path: 目标目录
:param new_name: 新文件名
"""
if not self.client:
return False
try:
self.client.fs.move(fileitem.path, path / new_name)
return True
except Exception as e:
logger.error(f"115移动文件失败{str(e)}")
return False
def link(self, fileitem: schemas.FileItem, target_file: Path) -> bool:
pass

View File

@@ -7,7 +7,7 @@ from app.helper.rule import RuleHelper
from app.log import logger
from app.modules import _ModuleBase
from app.modules.filter.RuleParser import RuleParser
from app.schemas.types import ModuleType
from app.schemas.types import ModuleType, OtherModulesType
from app.utils.string import StringUtils
@@ -167,6 +167,13 @@ class FilterModule(_ModuleBase):
"""
return ModuleType.Other
@staticmethod
def get_subtype() -> OtherModulesType:
"""
获取模块子类型
"""
return OtherModulesType.Filter
@staticmethod
def get_priority() -> int:
"""

View File

@@ -18,7 +18,7 @@ from app.modules.indexer.spider.tnode import TNodeSpider
from app.modules.indexer.spider.torrentleech import TorrentLeech
from app.modules.indexer.spider.yema import YemaSpider
from app.schemas import SiteUserData
from app.schemas.types import MediaType, ModuleType
from app.schemas.types import MediaType, ModuleType, OtherModulesType
from app.utils.string import StringUtils
@@ -47,6 +47,13 @@ class IndexerModule(_ModuleBase):
"""
return ModuleType.Indexer
@staticmethod
def get_subtype() -> OtherModulesType:
"""
获取模块子类型
"""
return OtherModulesType.Indexer
@staticmethod
def get_priority() -> int:
"""

View File

@@ -344,11 +344,9 @@ class SiteParserBase(metaclass=ABCMeta):
logger.warn(
f"{self._site_name} 检测到Cloudflare请更新Cookie和UA")
return ""
if re.search(r"charset=\"?utf-8\"?", res.text, re.IGNORECASE):
res.encoding = "utf-8"
else:
res.encoding = res.apparent_encoding
return res.text
return RequestUtils.get_decoded_html_content(res,
settings.ENCODING_DETECTION_PERFORMANCE_MODE,
settings.ENCODING_DETECTION_MIN_CONFIDENCE)
return ""

View File

@@ -5,7 +5,6 @@ import traceback
from typing import List
from urllib.parse import quote, urlencode, urlparse, parse_qs
import chardet
from jinja2 import Template
from pyquery import PyQuery
from ruamel.yaml import CommentedMap
@@ -250,27 +249,9 @@ class TorrentSpider:
referer=self.referer,
proxies=self.proxies
).get_res(searchurl, allow_redirects=True)
if ret is not None:
# 使用chardet检测字符编码
raw_data = ret.content
if raw_data:
try:
result = chardet.detect(raw_data)
encoding = result['encoding']
# 解码为字符串
page_source = raw_data.decode(encoding)
except Exception as e:
logger.debug(f"chardet解码失败{str(e)}")
# 探测utf-8解码
if re.search(r"charset=\"?utf-8\"?", ret.text, re.IGNORECASE):
ret.encoding = "utf-8"
else:
ret.encoding = ret.apparent_encoding
page_source = ret.text
else:
page_source = ret.text
else:
page_source = ""
page_source = RequestUtils.get_decoded_html_content(ret,
settings.ENCODING_DETECTION_PERFORMANCE_MODE,
settings.ENCODING_DETECTION_MIN_CONFIDENCE)
# 解析
return self.parse(page_source)

View File

@@ -21,14 +21,30 @@ class YemaSpider:
_cookie = None
_ua = None
_size = 40
_searchurl = "%sapi/torrent/fetchCategoryOpenTorrentList"
_searchurl = "%sapi/torrent/fetchOpenTorrentList"
_downloadurl = "%sapi/torrent/download?id=%s"
_pageurl = "%s#/torrent/detail/%s/"
_timeout = 15
# 分类
_movie_category = 4
_tv_category = 5
_movie_category = [4]
_tv_category = [5, 13, 14, 17, 15, 6, 16]
# 标签 https://wiki.yemapt.org/developer/constants
_labels = {
"1": "禁转",
"2": "首发",
"3": "官方",
"4": "自制",
"5": "国语",
"6": "中字",
"7": "粤语",
"8": "英字",
"9": "HDR10",
"10": "杜比视界",
"11": "分集",
"12": "完结",
}
def __init__(self, indexer: CommentedMap):
self.systemconfig = SystemConfigOper()
@@ -47,14 +63,7 @@ class YemaSpider:
"""
搜索
"""
if not mtype:
categoryId = self._movie_category
elif mtype == MediaType.TV:
categoryId = self._tv_category
else:
categoryId = self._movie_category
params = {
"categoryId": categoryId,
"pageParam": {
"current": page + 1,
"pageSize": self._size,
@@ -62,6 +71,12 @@ class YemaSpider:
},
"sorter": {}
}
# 新接口可不传 categoryId 参数
# if mtype == MediaType.MOVIE:
# params.update({
# "categoryId": self._movie_category,
# })
# pass
if keyword:
params.update({
"keyword": keyword,
@@ -82,17 +97,27 @@ class YemaSpider:
results = res.json().get('data', []) or []
for result in results:
category_value = result.get('categoryId')
if category_value == self._tv_category:
if category_value in self._tv_category:
category = MediaType.TV.value
elif category_value == self._movie_category:
elif category_value in self._movie_category:
category = MediaType.MOVIE.value
else:
category = MediaType.UNKNOWN.value
pass
torrentLabelIds = result.get('tagList', []) or []
torrentLabels = []
for labelId in torrentLabelIds:
if self._labels.get(labelId) is not None:
torrentLabels.append(self._labels.get(labelId))
pass
pass
torrent = {
'title': result.get('showName'),
'description': result.get('shortDesc'),
'enclosure': self.__get_download_url(result.get('id')),
'pubdate': StringUtils.unify_datetime_str(result.get('gmtCreate')),
# 使用上架时间,而不是用户发布时间,上架时间即其他用户可见时间
'pubdate': StringUtils.unify_datetime_str(result.get('listingTime')),
'size': result.get('fileSize'),
'seeders': result.get('seedNum'),
'peers': result.get('leechNum'),
@@ -101,7 +126,7 @@ class YemaSpider:
'uploadvolumefactor': self.__get_uploadvolumefactor(result.get('uploadPromotion')),
'freedate': StringUtils.unify_datetime_str(result.get('downloadPromotionEndTime')),
'page_url': self._pageurl % (self._domain, result.get('id')),
'labels': [],
'labels': torrentLabels,
'category': category
}
torrents.append(torrent)

View File

@@ -7,7 +7,7 @@ from app.log import logger
from app.modules import _MediaServerBase, _ModuleBase
from app.modules.jellyfin.jellyfin import Jellyfin
from app.schemas.event import AuthCredentials, AuthInterceptCredentials
from app.schemas.types import MediaType, ModuleType, ChainEventType
from app.schemas.types import MediaType, ModuleType, ChainEventType, MediaServerType
class JellyfinModule(_ModuleBase, _MediaServerBase[Jellyfin]):
@@ -30,6 +30,13 @@ class JellyfinModule(_ModuleBase, _MediaServerBase[Jellyfin]):
"""
return ModuleType.MediaServer
@staticmethod
def get_subtype() -> MediaServerType:
"""
获取模块子类型
"""
return MediaServerType.Jellyfin
@staticmethod
def get_priority() -> int:
"""
@@ -66,16 +73,26 @@ class JellyfinModule(_ModuleBase, _MediaServerBase[Jellyfin]):
return False, f"无法连接Jellyfin服务器{name}"
return True, ""
def user_authenticate(self, credentials: AuthCredentials) -> Optional[AuthCredentials]:
def user_authenticate(self, credentials: AuthCredentials, service_name: Optional[str] = None) \
-> Optional[AuthCredentials]:
"""
使用Jellyfin用户辅助完成用户认证
:param credentials: 认证数据
:param service_name: 指定要认证的媒体服务器名称,若为 None 则认证所有服务
:return: 认证数据
"""
# Jellyfin认证
if not credentials or credentials.grant_type != "password":
return None
for name, server in self.get_instances().items():
# 确定要认证的服务器列表
if service_name:
# 如果指定了服务名,获取该服务实例
servers = [(service_name, server)] if (server := self.get_instance(service_name)) else []
else:
# 如果没有指定服务名,遍历所有服务
servers = self.get_instances().items()
# 遍历要认证的服务器
for name, server in servers:
# 触发认证拦截事件
intercept_event = eventmanager.send_event(
etype=ChainEventType.AuthIntercept,

View File

@@ -7,7 +7,7 @@ from app.log import logger
from app.modules import _ModuleBase, _MediaServerBase
from app.modules.plex.plex import Plex
from app.schemas.event import AuthCredentials, AuthInterceptCredentials
from app.schemas.types import MediaType, ModuleType, ChainEventType
from app.schemas.types import MediaType, ModuleType, ChainEventType, MediaServerType
class PlexModule(_ModuleBase, _MediaServerBase[Plex]):
@@ -30,6 +30,13 @@ class PlexModule(_ModuleBase, _MediaServerBase[Plex]):
"""
return ModuleType.MediaServer
@staticmethod
def get_subtype() -> MediaServerType:
"""
获取模块子类型
"""
return MediaServerType.Plex
@staticmethod
def get_priority() -> int:
"""
@@ -66,16 +73,26 @@ class PlexModule(_ModuleBase, _MediaServerBase[Plex]):
logger.info(f"Plex {name} 服务器连接断开,尝试重连 ...")
server.reconnect()
def user_authenticate(self, credentials: AuthCredentials) -> Optional[AuthCredentials]:
def user_authenticate(self, credentials: AuthCredentials, service_name: Optional[str] = None) \
-> Optional[AuthCredentials]:
"""
使用Plex用户辅助完成用户认证
:param credentials: 认证数据
:param service_name: 指定要认证的媒体服务器名称,若为 None 则认证所有服务
:return: 认证数据
"""
# Plex认证
if not credentials or credentials.grant_type != "password":
return None
for name, server in self.get_instances().items():
# 确定要认证的服务器列表
if service_name:
# 如果指定了服务名,获取该服务实例
servers = [(service_name, server)] if (server := self.get_instance(service_name)) else []
else:
# 如果没有指定服务名,遍历所有服务
servers = self.get_instances().items()
# 遍历要认证的服务器
for name, server in servers:
# 触发认证拦截事件
intercept_event = eventmanager.send_event(
etype=ChainEventType.AuthIntercept,

View File

@@ -1,5 +1,5 @@
from pathlib import Path
from typing import Set, Tuple, Optional, Union, List
from typing import Set, Tuple, Optional, Union, List, Dict
from qbittorrentapi import TorrentFilesList
from torrentool.torrent import Torrent
@@ -11,7 +11,7 @@ from app.log import logger
from app.modules import _ModuleBase, _DownloaderBase
from app.modules.qbittorrent.qbittorrent import Qbittorrent
from app.schemas import TransferTorrent, DownloadingTorrent
from app.schemas.types import TorrentStatus, ModuleType
from app.schemas.types import TorrentStatus, ModuleType, DownloaderType
from app.utils.string import StringUtils
@@ -35,6 +35,13 @@ class QbittorrentModule(_ModuleBase, _DownloaderBase[Qbittorrent]):
"""
return ModuleType.Downloader
@staticmethod
def get_subtype() -> DownloaderType:
"""
获取模块子类型
"""
return DownloaderType.Qbittorrent
@staticmethod
def get_priority() -> int:
"""
@@ -124,7 +131,8 @@ class QbittorrentModule(_ModuleBase, _DownloaderBase[Qbittorrent]):
is_paused=is_paused,
tag=tags,
cookie=cookie,
category=category
category=category,
ignore_category_check=False
)
if not state:
# 读取种子的名称
@@ -203,66 +211,75 @@ class QbittorrentModule(_ModuleBase, _DownloaderBase[Qbittorrent]):
:return: 下载器中符合状态的种子列表
"""
# 获取下载器
server: Qbittorrent = self.get_instance(downloader)
if not server:
return None
if downloader:
server: Qbittorrent = self.get_instance(downloader)
if not server:
return None
servers = {downloader: server}
else:
servers: Dict[str, Qbittorrent] = self.get_instances()
ret_torrents = []
if hashs:
# 按Hash获取
torrents, _ = server.get_torrents(ids=hashs, tags=settings.TORRENT_TAG)
for torrent in torrents or []:
content_path = torrent.get("content_path")
if content_path:
torrent_path = Path(content_path)
else:
torrent_path = Path(torrent.get('save_path')) / torrent.get('name')
ret_torrents.append(TransferTorrent(
title=torrent.get('name'),
path=torrent_path,
hash=torrent.get('hash'),
size=torrent.get('total_size'),
tags=torrent.get('tags')
))
for name, server in servers.items():
torrents, _ = server.get_torrents(ids=hashs, tags=settings.TORRENT_TAG)
for torrent in torrents or []:
content_path = torrent.get("content_path")
if content_path:
torrent_path = Path(content_path)
else:
torrent_path = Path(torrent.get('save_path')) / torrent.get('name')
ret_torrents.append(TransferTorrent(
downloader=name,
title=torrent.get('name'),
path=torrent_path,
hash=torrent.get('hash'),
size=torrent.get('total_size'),
tags=torrent.get('tags')
))
elif status == TorrentStatus.TRANSFER:
# 获取已完成且未整理的
torrents = server.get_completed_torrents(tags=settings.TORRENT_TAG)
for torrent in torrents or []:
tags = torrent.get("tags") or []
if "已整理" in tags:
continue
# 内容路径
content_path = torrent.get("content_path")
if content_path:
torrent_path = Path(content_path)
else:
torrent_path = torrent.get('save_path') / torrent.get('name')
ret_torrents.append(TransferTorrent(
title=torrent.get('name'),
path=torrent_path,
hash=torrent.get('hash'),
tags=torrent.get('tags')
))
for name, server in servers.items():
torrents = server.get_completed_torrents(tags=settings.TORRENT_TAG)
for torrent in torrents or []:
tags = torrent.get("tags") or []
if "已整理" in tags:
continue
# 内容路径
content_path = torrent.get("content_path")
if content_path:
torrent_path = Path(content_path)
else:
torrent_path = torrent.get('save_path') / torrent.get('name')
ret_torrents.append(TransferTorrent(
downloader=name,
title=torrent.get('name'),
path=torrent_path,
hash=torrent.get('hash'),
tags=torrent.get('tags')
))
elif status == TorrentStatus.DOWNLOADING:
# 获取正在下载的任务
torrents = server.get_downloading_torrents(tags=settings.TORRENT_TAG)
for torrent in torrents or []:
meta = MetaInfo(torrent.get('name'))
ret_torrents.append(DownloadingTorrent(
hash=torrent.get('hash'),
title=torrent.get('name'),
name=meta.name,
year=meta.year,
season_episode=meta.season_episode,
progress=torrent.get('progress') * 100,
size=torrent.get('total_size'),
state="paused" if torrent.get('state') in ("paused", "pausedDL") else "downloading",
dlspeed=StringUtils.str_filesize(torrent.get('dlspeed')),
upspeed=StringUtils.str_filesize(torrent.get('upspeed')),
left_time=StringUtils.str_secends(
(torrent.get('total_size') - torrent.get('completed')) / torrent.get('dlspeed')) if torrent.get(
'dlspeed') > 0 else ''
))
for name, server in servers.items():
torrents = server.get_downloading_torrents(tags=settings.TORRENT_TAG)
for torrent in torrents or []:
meta = MetaInfo(torrent.get('name'))
ret_torrents.append(DownloadingTorrent(
downloader=name,
hash=torrent.get('hash'),
title=torrent.get('name'),
name=meta.name,
year=meta.year,
season_episode=meta.season_episode,
progress=torrent.get('progress') * 100,
size=torrent.get('total_size'),
state="paused" if torrent.get('state') in ("paused", "pausedDL") else "downloading",
dlspeed=StringUtils.str_filesize(torrent.get('dlspeed')),
upspeed=StringUtils.str_filesize(torrent.get('upspeed')),
left_time=StringUtils.str_secends(
(torrent.get('total_size') - torrent.get('completed')) / torrent.get('dlspeed')) if torrent.get(
'dlspeed') > 0 else ''
))
else:
return None
return ret_torrents

View File

@@ -251,6 +251,7 @@ class Qbittorrent:
:param category: 种子分类
:param download_dir: 下载路径
:param cookie: 站点Cookie用于辅助下载种子
:param kwargs: 可选参数,如 ignore_category_check 以及 QB相关参数
:return: bool
"""
if not self.qbc or not content:
@@ -276,13 +277,16 @@ class Qbittorrent:
else:
tags = None
# 分类自动管理
if category and self._category:
is_auto = True
# 如果忽略分类检查,则直接使用传入的分类值,否则,仅在分类存在且启用了自动管理时才传递参数
ignore_category_check = kwargs.pop("ignore_category_check", True)
if ignore_category_check:
is_auto = self._category
else:
is_auto = False
category = None
if category and self._category:
is_auto = True
else:
is_auto = False
category = None
try:
# 添加下载
qbc_ret = self.qbc.torrents_add(urls=urls,

View File

@@ -31,6 +31,13 @@ class SlackModule(_ModuleBase, _MessageBase[Slack]):
"""
return ModuleType.Notification
@staticmethod
def get_subtype() -> MessageChannel:
"""
获取模块子类型
"""
return MessageChannel.Slack
@staticmethod
def get_priority() -> int:
"""

View File

@@ -10,7 +10,7 @@ from app.core.context import Context
from app.helper.torrent import TorrentHelper
from app.log import logger
from app.modules import _ModuleBase
from app.schemas.types import ModuleType
from app.schemas.types import ModuleType, OtherModulesType
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
from app.utils.system import SystemUtils
@@ -40,6 +40,13 @@ class SubtitleModule(_ModuleBase):
"""
return ModuleType.Other
@staticmethod
def get_subtype() -> OtherModulesType:
"""
获取模块子类型
"""
return OtherModulesType.Subtitle
@staticmethod
def get_priority() -> int:
"""

View File

@@ -29,6 +29,13 @@ class SynologyChatModule(_ModuleBase, _MessageBase[SynologyChat]):
"""
return ModuleType.Notification
@staticmethod
def get_subtype() -> MessageChannel:
"""
获取模块子类型
"""
return MessageChannel.SynologyChat
@staticmethod
def get_priority() -> int:
"""

View File

@@ -34,6 +34,13 @@ class TelegramModule(_ModuleBase, _MessageBase[Telegram]):
"""
return ModuleType.Notification
@staticmethod
def get_subtype() -> MessageChannel:
"""
获取模块子类型
"""
return MessageChannel.Telegram
@staticmethod
def get_priority() -> int:
"""

View File

@@ -14,7 +14,7 @@ from app.modules.themoviedb.scraper import TmdbScraper
from app.modules.themoviedb.tmdb_cache import TmdbCache
from app.modules.themoviedb.tmdbapi import TmdbApi
from app.schemas import MediaPerson
from app.schemas.types import MediaType, MediaImageType, ModuleType
from app.schemas.types import MediaType, MediaImageType, ModuleType, MediaRecognizeType
from app.utils.http import RequestUtils
@@ -49,6 +49,13 @@ class TheMovieDbModule(_ModuleBase):
"""
return ModuleType.MediaRecognize
@staticmethod
def get_subtype() -> MediaRecognizeType:
"""
获取模块子类型
"""
return MediaRecognizeType.TMDB
@staticmethod
def get_priority() -> int:
"""

View File

@@ -30,9 +30,9 @@ class TmdbScraper:
# 电影元数据文件
doc = self.__gen_movie_nfo_file(mediainfo=mediainfo)
else:
if season:
if season is not None:
# 查询季信息
seasoninfo = self.tmdb.get_tv_season_detail(mediainfo.tmdb_id, meta.begin_season)
seasoninfo = self.tmdb.get_tv_season_detail(mediainfo.tmdb_id, season)
if episode:
# 集元数据文件
episodeinfo = self.__get_episode_detail(seasoninfo, meta.begin_episode)
@@ -57,7 +57,7 @@ class TmdbScraper:
:param episode: 集号
"""
images = {}
if season:
if season is not None:
# 只需要集的图片
if episode:
# 集的图片
@@ -102,8 +102,13 @@ class TmdbScraper:
ext = Path(seasoninfo.get('poster_path')).suffix
# URL
url = f"https://{settings.TMDB_IMAGE_DOMAIN}/t/p/original{seasoninfo.get('poster_path')}"
image_name = f"season{sea_seq}-poster{ext}"
# S0海报格式不同
if season == 0:
image_name = f"season-specials-poster{ext}"
else:
image_name = f"season{sea_seq}-poster{ext}"
return image_name, url
return "", ""
@staticmethod
def __get_episode_detail(seasoninfo: dict, episode: int) -> dict:
@@ -228,7 +233,7 @@ class TmdbScraper:
xoutline = DomUtils.add_node(doc, root, "outline")
xoutline.appendChild(doc.createCDATASection(seasoninfo.get("overview") or ""))
# 标题
DomUtils.add_node(doc, root, "title", "%s" % season)
DomUtils.add_node(doc, root, "title", seasoninfo.get("name") or "%s" % season)
# 发行日期
DomUtils.add_node(doc, root, "premiered", seasoninfo.get("air_date") or "")
DomUtils.add_node(doc, root, "releasedate", seasoninfo.get("air_date") or "")

View File

@@ -15,7 +15,7 @@ from app.schemas.types import MediaType
lock = RLock()
CACHE_EXPIRE_TIMESTAMP_STR = "cache_expire_timestamp"
EXPIRE_TIMESTAMP = settings.CACHE_CONF.get('meta')
EXPIRE_TIMESTAMP = settings.CACHE_CONF["meta"]
class TmdbCache(metaclass=Singleton):
@@ -75,7 +75,7 @@ class TmdbCache(metaclass=Singleton):
@return: 被删除的缓存内容
"""
with lock:
return self._meta_data.pop(key, None)
return self._meta_data.pop(key, {})
def delete_by_tmdbid(self, tmdbid: int) -> None:
"""
@@ -138,14 +138,14 @@ class TmdbCache(metaclass=Singleton):
if cache_year:
cache_year = cache_year[:4]
self._meta_data[self.__get_key(meta)] = {
"id": info.get("id"),
"type": info.get("media_type"),
"year": cache_year,
"title": cache_title,
"poster_path": info.get("poster_path"),
"backdrop_path": info.get("backdrop_path"),
CACHE_EXPIRE_TIMESTAMP_STR: int(time.time()) + EXPIRE_TIMESTAMP
}
"id": info.get("id"),
"type": info.get("media_type"),
"year": cache_year,
"title": cache_title,
"poster_path": info.get("poster_path"),
"backdrop_path": info.get("backdrop_path"),
CACHE_EXPIRE_TIMESTAMP_STR: int(time.time()) + EXPIRE_TIMESTAMP
}
elif info is not None:
# None时不缓存此时代表网络错误允许重复请求
self._meta_data[self.__get_key(meta)] = {'id': 0}
@@ -164,7 +164,7 @@ class TmdbCache(metaclass=Singleton):
return
with open(self._meta_path, 'wb') as f:
pickle.dump(new_meta_data, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(new_meta_data, f, pickle.HIGHEST_PROTOCOL) # type: ignore
def _random_sample(self, new_meta_data: dict) -> bool:
"""

View File

@@ -1,9 +1,9 @@
import traceback
from functools import lru_cache
from typing import Optional, List
from urllib.parse import quote
import zhconv
from cachetools import TTLCache, cached
from lxml import etree
from app.core.config import settings
@@ -27,8 +27,6 @@ class TmdbApi:
self.tmdb.domain = settings.TMDB_API_DOMAIN
# 开启缓存
self.tmdb.cache = True
# 缓存大小
self.tmdb.REQUEST_CACHE_MAXSIZE = settings.CACHE_CONF.get('tmdb')
# APIKEY
self.tmdb.api_key = settings.TMDB_API_KEY
# 语种
@@ -163,7 +161,7 @@ class TmdbApi:
season_number: int = None) -> Optional[dict]:
"""
搜索tmdb中的媒体信息匹配返回一条尽可能正确的信息
:param name: 索的名称
:param name: 索的名称
:param mtype: 类型:电影、电视剧
:param year: 年份,如要是季集需要是首播年份(first_air_date)
:param season_year: 当前季集年份
@@ -466,7 +464,7 @@ class TmdbApi:
return ret_info
@lru_cache(maxsize=settings.CACHE_CONF.get('tmdb'))
@cached(cache=TTLCache(maxsize=settings.CACHE_CONF["tmdb"], ttl=settings.CACHE_CONF["meta"]))
def match_web(self, name: str, mtype: MediaType) -> Optional[dict]:
"""
搜索TMDB网站直接抓取结果结果只有一条时才返回
@@ -1292,7 +1290,7 @@ class TmdbApi:
for group_episode in group_episodes:
order = group_episode.get('order')
episodes = group_episode.get('episodes')
if not episodes or not order:
if not episodes:
continue
# 当前季第一季时间
first_date = episodes[0].get("air_date")

View File

@@ -4,11 +4,12 @@ import logging
import os
import time
from datetime import datetime
from functools import lru_cache
import requests
import requests.exceptions
from cachetools import TTLCache, cached
from app.core.config import settings
from app.utils.http import RequestUtils
from .exceptions import TMDbException
@@ -24,7 +25,6 @@ class TMDb(object):
TMDB_CACHE_ENABLED = "TMDB_CACHE_ENABLED"
TMDB_PROXIES = "TMDB_PROXIES"
TMDB_DOMAIN = "TMDB_DOMAIN"
REQUEST_CACHE_MAXSIZE = None
_req = None
_session = None
@@ -137,7 +137,7 @@ class TMDb(object):
def cache(self, cache):
os.environ[self.TMDB_CACHE_ENABLED] = str(cache)
@lru_cache(maxsize=REQUEST_CACHE_MAXSIZE)
@cached(cache=TTLCache(maxsize=settings.CACHE_CONF["tmdb"], ttl=settings.CACHE_CONF["meta"]))
def cached_request(self, method, url, data, json,
_ts=datetime.strftime(datetime.now(), '%Y%m%d')):
"""

View File

@@ -4,7 +4,7 @@ from app.core.config import settings
from app.log import logger
from app.modules import _ModuleBase
from app.modules.thetvdb import tvdbapi
from app.schemas.types import ModuleType
from app.schemas.types import ModuleType, MediaRecognizeType
from app.utils.http import RequestUtils
@@ -28,6 +28,13 @@ class TheTvDbModule(_ModuleBase):
"""
return ModuleType.MediaRecognize
@staticmethod
def get_subtype() -> MediaRecognizeType:
"""
获取模块子类型
"""
return MediaRecognizeType.TVDB
@staticmethod
def get_priority() -> int:
"""

View File

@@ -1,5 +1,5 @@
from pathlib import Path
from typing import Set, Tuple, Optional, Union, List
from typing import Set, Tuple, Optional, Union, List, Dict
from torrentool.torrent import Torrent
from transmission_rpc import File
@@ -11,7 +11,7 @@ from app.log import logger
from app.modules import _ModuleBase, _DownloaderBase
from app.modules.transmission.transmission import Transmission
from app.schemas import TransferTorrent, DownloadingTorrent
from app.schemas.types import TorrentStatus, ModuleType
from app.schemas.types import TorrentStatus, ModuleType, DownloaderType
from app.utils.string import StringUtils
@@ -35,6 +35,13 @@ class TransmissionModule(_ModuleBase, _DownloaderBase[Transmission]):
"""
return ModuleType.Downloader
@staticmethod
def get_subtype() -> DownloaderType:
"""
获取模块子类型
"""
return DownloaderType.Transmission
@staticmethod
def get_priority() -> int:
"""
@@ -196,60 +203,70 @@ class TransmissionModule(_ModuleBase, _DownloaderBase[Transmission]):
:return: 下载器中符合状态的种子列表
"""
# 获取下载器
server: Transmission = self.get_instance(downloader)
if not server:
return None
if downloader:
server: Transmission = self.get_instance(downloader)
if not server:
return None
servers = {downloader: server}
else:
servers: Dict[str, Transmission] = self.get_instances()
ret_torrents = []
if hashs:
# 按Hash获取
torrents, _ = server.get_torrents(ids=hashs, tags=settings.TORRENT_TAG)
for torrent in torrents or []:
ret_torrents.append(TransferTorrent(
title=torrent.name,
path=Path(torrent.download_dir) / torrent.name,
hash=torrent.hashString,
size=torrent.total_size,
tags=",".join(torrent.labels or [])
))
for name, server in servers.items():
torrents, _ = server.get_torrents(ids=hashs, tags=settings.TORRENT_TAG)
for torrent in torrents or []:
ret_torrents.append(TransferTorrent(
downloader=name,
title=torrent.name,
path=Path(torrent.download_dir) / torrent.name,
hash=torrent.hashString,
size=torrent.total_size,
tags=",".join(torrent.labels or [])
))
elif status == TorrentStatus.TRANSFER:
# 获取已完成且未整理的
torrents = server.get_completed_torrents(tags=settings.TORRENT_TAG)
for torrent in torrents or []:
# 含"已整理"tag的不处理
if "已整理" in torrent.labels or []:
continue
# 下载路径
path = torrent.download_dir
# 无法获取下载路径的不处理
if not path:
logger.debug(f"未获取到 {torrent.name} 下载保存路径")
continue
ret_torrents.append(TransferTorrent(
title=torrent.name,
path=Path(torrent.download_dir) / torrent.name,
hash=torrent.hashString,
tags=",".join(torrent.labels or [])
))
for name, server in servers.items():
torrents = server.get_completed_torrents(tags=settings.TORRENT_TAG)
for torrent in torrents or []:
# 含"已整理"tag的不处理
if "已整理" in torrent.labels or []:
continue
# 下载路径
path = torrent.download_dir
# 无法获取下载路径的不处理
if not path:
logger.debug(f"未获取到 {torrent.name} 下载保存路径")
continue
ret_torrents.append(TransferTorrent(
downloader=name,
title=torrent.name,
path=Path(torrent.download_dir) / torrent.name,
hash=torrent.hashString,
tags=",".join(torrent.labels or [])
))
elif status == TorrentStatus.DOWNLOADING:
# 获取正在下载的任务
torrents = server.get_downloading_torrents(tags=settings.TORRENT_TAG)
for torrent in torrents or []:
meta = MetaInfo(torrent.name)
dlspeed = torrent.rate_download if hasattr(torrent, "rate_download") else torrent.rateDownload
upspeed = torrent.rate_upload if hasattr(torrent, "rate_upload") else torrent.rateUpload
ret_torrents.append(DownloadingTorrent(
hash=torrent.hashString,
title=torrent.name,
name=meta.name,
year=meta.year,
season_episode=meta.season_episode,
progress=torrent.progress,
size=torrent.total_size,
state="paused" if torrent.status == "stopped" else "downloading",
dlspeed=StringUtils.str_filesize(dlspeed),
upspeed=StringUtils.str_filesize(upspeed),
left_time=StringUtils.str_secends(torrent.left_until_done / dlspeed) if dlspeed > 0 else ''
))
for name, server in servers.items():
torrents = server.get_downloading_torrents(tags=settings.TORRENT_TAG)
for torrent in torrents or []:
meta = MetaInfo(torrent.name)
dlspeed = torrent.rate_download if hasattr(torrent, "rate_download") else torrent.rateDownload
upspeed = torrent.rate_upload if hasattr(torrent, "rate_upload") else torrent.rateUpload
ret_torrents.append(DownloadingTorrent(
downloader=name,
hash=torrent.hashString,
title=torrent.name,
name=meta.name,
year=meta.year,
season_episode=meta.season_episode,
progress=torrent.progress,
size=torrent.total_size,
state="paused" if torrent.status == "stopped" else "downloading",
dlspeed=StringUtils.str_filesize(dlspeed),
upspeed=StringUtils.str_filesize(upspeed),
left_time=StringUtils.str_secends(torrent.left_until_done / dlspeed) if dlspeed > 0 else ''
))
else:
return None
return ret_torrents

View File

@@ -30,6 +30,13 @@ class VoceChatModule(_ModuleBase, _MessageBase[VoceChat]):
"""
return ModuleType.Notification
@staticmethod
def get_subtype() -> MessageChannel:
"""
获取模块子类型
"""
return MessageChannel.VoceChat
@staticmethod
def get_priority() -> int:
"""

View File

@@ -30,6 +30,13 @@ class WebPushModule(_ModuleBase, _MessageBase):
"""
return ModuleType.Notification
@staticmethod
def get_subtype() -> MessageChannel:
"""
获取模块子类型
"""
return MessageChannel.WebPush
@staticmethod
def get_priority() -> int:
"""

View File

@@ -36,6 +36,13 @@ class WechatModule(_ModuleBase, _MessageBase[WeChat]):
"""
return ModuleType.Notification
@staticmethod
def get_subtype() -> MessageChannel:
"""
获取模块的子类型
"""
return MessageChannel.Wechat
@staticmethod
def get_priority() -> int:
"""

View File

@@ -280,9 +280,9 @@ class Monitor(metaclass=Singleton):
"""
获取BDMV目录的上级目录
"""
for parent in _path.parents:
if parent.name == "BDMV":
return parent.parent
for p in _path.parents:
if p.name == "BDMV":
return p.parent
return None
# 全程加锁

View File

@@ -41,7 +41,7 @@ class Scheduler(metaclass=Singleton):
# 退出事件
_event = threading.Event()
# 锁
_lock = threading.Lock()
_lock = threading.RLock()
# 各服务的运行状态
_jobs = {}
# 用户认证失败次数
@@ -54,53 +54,6 @@ class Scheduler(metaclass=Singleton):
"""
初始化定时服务
"""
def clear_cache():
"""
清理缓存
"""
TorrentsChain().clear_cache()
SchedulerChain().clear_cache()
def user_auth():
"""
用户认证检查
"""
if SitesHelper().auth_level >= 2:
return
# 最大重试次数
__max_try__ = 30
if self._auth_count > __max_try__:
SchedulerChain().messagehelper.put(title=f"用户认证失败",
message="用户认证失败次数过多,将不再尝试认证!",
role="system")
return
logger.info("用户未认证,正在尝试认证...")
auth_conf = SystemConfigOper().get(SystemConfigKey.UserSiteAuthParams)
if auth_conf:
status, msg = SitesHelper().check_user(**auth_conf)
else:
status, msg = SitesHelper().check_user()
if status:
self._auth_count = 0
logger.info(f"{msg} 用户认证成功")
SchedulerChain().post_message(
Notification(
mtype=NotificationType.Manual,
title="MoviePilot用户认证成功",
text=f"使用站点:{msg}",
link=settings.MP_DOMAIN('#/site')
)
)
PluginManager().init_config()
self.init_plugin_jobs()
else:
self._auth_count += 1
logger.error(f"用户认证失败:{msg},共失败 {self._auth_count}")
if self._auth_count >= __max_try__:
logger.error("用户认证失败次数过多,将不再尝试认证!")
# 各服务的运行状态
self._jobs = {
"cookiecloud": {
@@ -146,12 +99,12 @@ class Scheduler(metaclass=Singleton):
},
"clear_cache": {
"name": "缓存清理",
"func": clear_cache,
"func": self.clear_cache,
"running": False,
},
"user_auth": {
"name": "用户认证检查",
"func": user_auth,
"func": self.user_auth,
"running": False,
},
"scheduler_job": {
@@ -174,9 +127,6 @@ class Scheduler(metaclass=Singleton):
# 停止定时服务
self.stop()
# 用户认证立即执行一次
user_auth()
# 调试模式不启动定时服务
if settings.DEV:
return
@@ -329,7 +279,7 @@ class Scheduler(metaclass=Singleton):
"interval",
id="clear_cache",
name="缓存清理",
hours=settings.CACHE_CONF.get("meta") / 3600,
hours=settings.CACHE_CONF["meta"] / 3600,
kwargs={
'job_id': 'clear_cache'
}
@@ -347,17 +297,18 @@ class Scheduler(metaclass=Singleton):
}
)
# 站点数据刷新每隔30分钟
self._scheduler.add_job(
self.start,
"interval",
id="sitedata_refresh",
name="站点数据刷新",
minutes=settings.SITEDATA_REFRESH_INTERVAL * 60,
kwargs={
'job_id': 'sitedata_refresh'
}
)
# 站点数据刷新
if settings.SITEDATA_REFRESH_INTERVAL:
self._scheduler.add_job(
self.start,
"interval",
id="sitedata_refresh",
name="站点数据刷新",
minutes=settings.SITEDATA_REFRESH_INTERVAL * 60,
kwargs={
'job_id': 'sitedata_refresh'
}
)
self.init_plugin_jobs()
@@ -437,11 +388,13 @@ class Scheduler(metaclass=Singleton):
try:
sid = f"{service['id']}"
job_id = sid.split("|")[0]
self.remove_plugin_job(pid, job_id)
self._jobs[job_id] = {
"func": service["func"],
"name": service["name"],
"pid": pid,
"plugin_name": plugin_name,
"kwargs": service.get("func_kwargs") or {},
"running": False,
}
self._scheduler.add_job(
@@ -449,7 +402,7 @@ class Scheduler(metaclass=Singleton):
service["trigger"],
id=sid,
name=service["name"],
**service["kwargs"],
**(service.get("kwargs") or {}),
kwargs={"job_id": job_id},
replace_existing=True
)
@@ -460,23 +413,34 @@ class Scheduler(metaclass=Singleton):
message=str(e),
role="system")
def remove_plugin_job(self, pid: str):
def remove_plugin_job(self, pid: str, job_id: str = None):
"""
移除插件定时服务
移除定时服务,可以是单个服务(包括默认服务)或整个插件的所有服务
:param pid: 插件 ID
:param job_id: 可选,指定要移除的单个服务的 job_id。如果不提供则移除该插件的所有服务当移除单个服务时默认服务也包含在内
"""
if not self._scheduler:
return
with self._lock:
# 获取插件名称
plugin_name = PluginManager().get_plugin_attr(pid, "plugin_name")
# 先从 _jobs 中查找匹配的服务
jobs_to_remove = [(job_id, service) for job_id, service in self._jobs.items() if service.get("pid") == pid]
if job_id:
# 移除单个服务
service = self._jobs.pop(job_id, None)
if not service:
return
jobs_to_remove = [(job_id, service)]
else:
# 移除插件的所有服务
jobs_to_remove = [
(job_id, service) for job_id, service in self._jobs.items() if service.get("pid") == pid
]
for job_id, _ in jobs_to_remove:
self._jobs.pop(job_id, None)
if not jobs_to_remove:
return
plugin_name = PluginManager().get_plugin_attr(pid, "plugin_name")
# 遍历移除任务
for job_id, service in jobs_to_remove:
try:
# 移除服务
self._jobs.pop(job_id, None)
# 在调度器中查找并移除对应的 job
job_removed = False
for job in list(self._scheduler.get_jobs()):
@@ -560,3 +524,50 @@ class Scheduler(metaclass=Singleton):
logger.info("定时任务停止完成")
except Exception as e:
logger.error(f"停止定时任务失败::{str(e)} - {traceback.format_exc()}")
@staticmethod
def clear_cache():
"""
清理缓存
"""
TorrentsChain().clear_cache()
SchedulerChain().clear_cache()
def user_auth(self):
"""
用户认证检查
"""
if SitesHelper().auth_level >= 2:
return
# 最大重试次数
__max_try__ = 30
if self._auth_count > __max_try__:
SchedulerChain().messagehelper.put(title=f"用户认证失败",
message="用户认证失败次数过多,将不再尝试认证!",
role="system")
return
logger.info("用户未认证,正在尝试认证...")
auth_conf = SystemConfigOper().get(SystemConfigKey.UserSiteAuthParams)
if auth_conf:
status, msg = SitesHelper().check_user(**auth_conf)
else:
status, msg = SitesHelper().check_user()
if status:
self._auth_count = 0
logger.info(f"{msg} 用户认证成功")
SchedulerChain().post_message(
Notification(
mtype=NotificationType.Manual,
title="MoviePilot用户认证成功",
text=f"使用站点:{msg}",
link=settings.MP_DOMAIN('#/site')
)
)
PluginManager().init_config()
self.init_plugin_jobs()
else:
self._auth_count += 1
logger.error(f"用户认证失败:{msg},共失败 {self._auth_count}")
if self._auth_count >= __max_try__:
logger.error("用户认证失败次数过多,将不再尝试认证!")

View File

@@ -1,7 +1,11 @@
from typing import Optional, Dict
from pathlib import Path
from typing import Optional, Dict, Any, List, Set
from pydantic import BaseModel, Field, root_validator
from app.core.context import Context
from app.schemas import MessageChannel
class BaseEventData(BaseModel):
"""
@@ -114,3 +118,87 @@ class CommandRegisterEventData(ChainEventData):
# 输出参数
cancel: bool = Field(False, description="是否取消注册")
source: str = Field("未知拦截源", description="拦截源")
class TransferRenameEventData(ChainEventData):
"""
TransferRename 事件的数据模型
Attributes:
# 输入参数
template_string (str): Jinja2 模板字符串
rename_dict (dict): 渲染上下文
render_str (str): 渲染生成的字符串
path (Optional[Path]): 当前文件的目标路径
# 输出参数
updated (bool): 是否已更新,默认值为 False
updated_str (str): 更新后的字符串
source (str): 拦截源,默认值为 "未知拦截源"
"""
# 输入参数
template_string: str = Field(..., description="模板字符串")
rename_dict: Dict[str, Any] = Field(..., description="渲染上下文")
path: Optional[Path] = Field(None, description="文件的目标路径")
render_str: str = Field(..., description="渲染生成的字符串")
# 输出参数
updated: bool = Field(False, description="是否已更新")
updated_str: Optional[str] = Field(None, description="更新后的字符串")
source: Optional[str] = Field("未知拦截源", description="拦截源")
class ResourceSelectionEventData(BaseModel):
"""
ResourceSelection 事件的数据模型
Attributes:
# 输入参数
contexts (List[Context]): 当前待选择的资源上下文列表
source (str): 事件源,指示事件的触发来源
# 输出参数
updated (bool): 是否已更新,默认值为 False
updated_contexts (Optional[List[Context]]): 已更新的资源上下文列表,默认值为 None
source (str): 更新源,默认值为 "未知更新源"
"""
# 输入参数
contexts: Any = Field(None, description="待选择的资源上下文列表")
downloader: Optional[str] = Field(None, description="下载器")
# 输出参数
updated: bool = Field(False, description="是否已更新")
updated_contexts: Optional[List[Context]] = Field(None, description="已更新的资源上下文列表")
source: Optional[str] = Field("未知拦截源", description="拦截源")
class ResourceDownloadEventData(ChainEventData):
"""
ResourceDownload 事件的数据模型
Attributes:
# 输入参数
context (Context): 当前资源上下文
episodes (Set[int]): 需要下载的集数
channel (MessageChannel): 通知渠道
origin (str): 来源消息通知、Subscribe、Manual等
downloader (str): 下载器
options (dict): 其他参数
# 输出参数
cancel (bool): 是否取消下载,默认值为 False
source (str): 拦截源,默认值为 "未知拦截源"
reason (str): 拦截原因,描述拦截的具体原因
"""
# 输入参数
context: Any = Field(None, description="当前资源上下文")
episodes: Optional[Set[int]] = Field(None, description="需要下载的集数")
channel: Optional[MessageChannel] = Field(None, description="通知渠道")
origin: Optional[str] = Field(None, description="来源")
downloader: Optional[str] = Field(None, description="下载器")
options: Optional[dict] = Field(None, description="其他参数")
# 输出参数
cancel: bool = Field(False, description="是否取消下载")
source: str = Field("未知拦截源", description="拦截源")
reason: str = Field("", description="拦截原因")

View File

@@ -10,6 +10,7 @@ class TransferTorrent(BaseModel):
"""
待转移任务信息
"""
downloader: Optional[str] = None
title: Optional[str] = None
path: Optional[Path] = None
hash: Optional[str] = None
@@ -22,6 +23,7 @@ class DownloadingTorrent(BaseModel):
"""
下载中任务信息
"""
downloader: Optional[str] = None
hash: Optional[str] = None
title: Optional[str] = None
name: Optional[str] = None

View File

@@ -60,14 +60,20 @@ class EventType(Enum):
# 同步链式事件
class ChainEventType(Enum):
# 名称识别请求
# 名称识别
NameRecognize = "name.recognize"
# 认证验证请求
# 认证验证
AuthVerification = "auth.verification"
# 认证拦截请求
# 认证拦截
AuthIntercept = "auth.intercept"
# 命令注册请求
# 命令注册
CommandRegister = "command.register"
# 整理重命名
TransferRename = "transfer.rename"
# 资源选择
ResourceSelection = "resource.selection"
# 资源下载
ResourceDownload = "resource.download"
# 系统配置Key字典
@@ -176,6 +182,52 @@ class MessageChannel(Enum):
WebPush = "WebPush"
# 下载器类型
class DownloaderType(Enum):
# Qbittorrent
Qbittorrent = "Qbittorrent"
# Transmission
Transmission = "Transmission"
# Aria2
# Aria2 = "Aria2"
# 媒体服务器类型
class MediaServerType(Enum):
# Emby
Emby = "Emby"
# Jellyfin
Jellyfin = "Jellyfin"
# Plex
Plex = "Plex"
# 识别器类型
class MediaRecognizeType(Enum):
# 豆瓣
Douban = "豆瓣"
# TMDB
TMDB = "TheMovieDb"
# TVDB
TVDB = "TheTvDb"
# bangumi
Bangumi = "Bangumi"
# 其他杂项模块类型
class OtherModulesType(Enum):
# 字幕
Subtitle = "站点字幕"
# Fanart
Fanart = "Fanart"
# 文件整理
FileManager = "文件整理"
# 过滤器
Filter = "过滤器"
# 站点索引
Indexer = "站点索引"
# 用户配置Key字典
class UserConfigKey(Enum):
# 监控面板

View File

@@ -23,7 +23,9 @@ from app.helper.message import MessageHelper
from app.scheduler import Scheduler
from app.monitor import Monitor
from app.schemas import Notification, NotificationType
from app.schemas.types import SystemConfigKey
from app.db import close_database
from app.db.systemconfig_oper import SystemConfigOper
from app.chain.command import CommandChain
@@ -72,6 +74,19 @@ def clear_temp():
SystemUtils.clear(settings.CACHE_PATH / "images", days=7)
def user_auth():
"""
用户认证检查
"""
if SitesHelper().auth_level >= 2:
return
auth_conf = SystemConfigOper().get(SystemConfigKey.UserSiteAuthParams)
if auth_conf:
SitesHelper().check_user(**auth_conf)
else:
SitesHelper().check_user()
def check_auth():
"""
检查认证状态
@@ -128,6 +143,8 @@ def start_modules(_: FastAPI):
SitesHelper()
# 资源包检测
ResourceHelper()
# 用户认证
user_auth()
# 加载模块
ModuleManager()
# 启动事件消费

View File

@@ -1,5 +1,7 @@
import re
from typing import Any, Optional, Union
import chardet
import requests
import urllib3
from requests import Response, Session
@@ -273,3 +275,108 @@ class RequestUtils:
cache_headers["Cache-Control"] = f"max-age={max_age}"
return cache_headers
@staticmethod
def detect_encoding_from_html_response(response: Response,
performance_mode: bool = False, confidence_threshold: float = 0.8):
"""
根据HTML响应内容探测编码信息
:param response: HTTP 响应对象
:param performance_mode: 是否使用性能模式,默认为 False (兼容模式)
:param confidence_threshold: chardet 检测置信度阈值,默认为 0.8
:return: 解析得到的字符编码
"""
fallback_encoding = None
try:
if not performance_mode:
# 兼容模式使用chardet分析后再处理 BOM 和 meta 信息
# 1. 使用 chardet 库进一步分析内容
detection = chardet.detect(response.content)
if detection["confidence"] > confidence_threshold:
return detection.get("encoding")
# 保存 chardet 的结果备用
fallback_encoding = detection.get("encoding")
# 2. 检查响应体中的 BOM 标记(例如 UTF-8 BOM
if response.content[:3] == b"\xef\xbb\xbf": # UTF-8 BOM
return "utf-8"
# 3. 如果是 HTML 响应体,检查其中的 <meta charset="..."> 标签
if re.search(r"charset=[\"']?utf-8[\"']?", response.text, re.IGNORECASE):
return "utf-8"
# 4. 尝试从 response headers 中获取编码信息
content_type = response.headers.get("Content-Type", "")
if re.search(r"charset=[\"']?utf-8[\"']?", content_type, re.IGNORECASE):
return "utf-8"
else:
# 性能模式:优先从 headers 和 BOM 标记获取,最后使用 chardet 分析
# 1. 尝试从 response headers 中获取编码信息
content_type = response.headers.get("Content-Type", "")
if re.search(r"charset=[\"']?utf-8[\"']?", content_type, re.IGNORECASE):
return "utf-8"
# 暂不支持直接提取字符集仅提取UTF8
# match = re.search(r"charset=[\"']?([^\"';\s]+)", content_type, re.IGNORECASE)
# if match:
# return match.group(1)
# 2. 检查响应体中的 BOM 标记(例如 UTF-8 BOM
if response.content[:3] == b"\xef\xbb\xbf":
return "utf-8"
# 3. 如果是 HTML 响应体,检查其中的 <meta charset="..."> 标签
if re.search(r"charset=[\"']?utf-8[\"']?", response.text, re.IGNORECASE):
return "utf-8"
# 暂不支持直接提取字符集仅提取UTF8
# match = re.search(r"<meta[^>]+charset=[\"']?([^\"'>\s]+)", response.text, re.IGNORECASE)
# if match:
# return match.group(1)
# 4. 使用 chardet 库进一步分析内容
detection = chardet.detect(response.content)
if detection.get("confidence", 0) > confidence_threshold:
return detection.get("encoding")
# 保存 chardet 的结果备用
fallback_encoding = detection.get("encoding")
# 5. 如果上述方法都无法确定,信任 chardet 的结果(即使置信度较低),否则返回默认字符集
return fallback_encoding or "utf-8"
except Exception as e:
logger.debug(f"Error when detect_encoding_from_response: {str(e)}")
return fallback_encoding or "utf-8"
@staticmethod
def get_decoded_html_content(response: Response,
performance_mode: bool = False, confidence_threshold: float = 0.8) -> str:
"""
获取HTML响应的解码文本内容
:param response: HTTP 响应对象
:param performance_mode: 是否使用性能模式,默认为 False (兼容模式)
:param confidence_threshold: chardet 检测置信度阈值,默认为 0.8
:return: 解码后的响应文本内容
"""
try:
if not response:
return ""
if response.content:
# 1. 获取编码信息
encoding = (RequestUtils.detect_encoding_from_html_response(response, performance_mode,
confidence_threshold)
or response.apparent_encoding)
# 2. 根据解析得到的编码进行解码
try:
# 尝试用推测的编码解码
return response.content.decode(encoding)
except Exception as e:
logger.debug(f"Decoding failed, error message: {str(e)}")
# 如果解码失败,尝试 fallback 使用 apparent_encoding
response.encoding = response.apparent_encoding
return response.text
else:
return response.text
except Exception as e:
logger.debug(f"Error when getting decoded content: {str(e)}")
return response.text

View File

@@ -58,6 +58,6 @@ pystray~=0.19.5
pyotp~=2.9.0
Pinyin2Hanzi~=0.1.1
pywebpush~=2.0.0
python-115~=0.0.9.8.7
python-115~=0.0.9.8.8.2
aligo~=6.2.4
aiofiles~=24.1.0

22
update
View File

@@ -20,16 +20,6 @@ function WARN() {
echo -e "${WARN} ${1}"
}
TMP_PATH=$(mktemp -d)
if [ ! -d "${TMP_PATH}" ]; then
# 如果自动生成 tmp 文件夹失败则手动指定,避免出现数据丢失等情况
TMP_PATH=/tmp/mp_update_path
if [ -d /tmp/mp_update_path ]; then
rm -rf /tmp/mp_update_path
fi
mkdir -p /tmp/mp_update_path
fi
# 下载及解压
function download_and_unzip() {
local retries=0
@@ -275,6 +265,15 @@ function get_priority() {
}
if [[ "${MOVIEPILOT_AUTO_UPDATE}" = "true" ]] || [[ "${MOVIEPILOT_AUTO_UPDATE}" = "release" ]] || [[ "${MOVIEPILOT_AUTO_UPDATE}" = "dev" ]]; then
TMP_PATH=$(mktemp -d)
if [ ! -d "${TMP_PATH}" ]; then
# 如果自动生成 tmp 文件夹失败则手动指定,避免出现数据丢失等情况
TMP_PATH=/tmp/mp_update_path
if [ -d /tmp/mp_update_path ]; then
rm -rf /tmp/mp_update_path
fi
mkdir -p /tmp/mp_update_path
fi
# 优先级:镜像站 > 全局 > 不代理
# pip
retries=0
@@ -324,6 +323,9 @@ if [[ "${MOVIEPILOT_AUTO_UPDATE}" = "true" ]] || [[ "${MOVIEPILOT_AUTO_UPDATE}"
WARN "当前版本号获取失败,继续启动..."
fi
fi
if [ -d "${TMP_PATH}" ]; then
rm -rf "${TMP_PATH}"
fi
elif [[ "${MOVIEPILOT_AUTO_UPDATE}" = "false" ]]; then
INFO "程序自动升级已关闭如需自动升级请在创建容器时设置环境变量MOVIEPILOT_AUTO_UPDATE=release"
else

View File

@@ -1,2 +1,2 @@
APP_VERSION = 'v2.0.8'
FRONTEND_VERSION = 'v2.0.8'
APP_VERSION = 'v2.1.2'
FRONTEND_VERSION = 'v2.1.2'