Compare commits

..

199 Commits

Author SHA1 Message Date
jxxghp
ba62ca3d18 - 文件整理支持仅保留最新版本文件(转移覆盖模式设为:latest
- 重启时会识别用户已安装过但本地不存在的插件并自动下载安装,避免升级重置后第三方插件消失的问题
2023-11-13 12:11:43 +08:00
jxxghp
612271bf0c fix 缺失状态识判 2023-11-13 12:00:53 +08:00
jxxghp
3b99fb5c96 fix #1109 2023-11-12 19:10:46 +08:00
jxxghp
bb61f8197c fix #1110 2023-11-12 19:01:25 +08:00
jxxghp
b54f04a35b fix #1110 2023-11-12 18:51:24 +08:00
jxxghp
d47639bada fix #1110 2023-11-12 18:51:08 +08:00
jxxghp
ae9bab2981 Merge pull request #1110 from thsrite/update
fix 重启或重置后三方插件丢失问题
2023-11-12 18:44:45 +08:00
thsrite
2116b094ad fix 三方插件丢失问题 2023-11-11 23:40:39 -06:00
jxxghp
288883a13b feat 文件整理支持仅保留最新版本 2023-11-12 08:03:15 +08:00
jxxghp
07c988abae fix transfer message 2023-11-12 07:21:57 +08:00
jxxghp
fd4a3b5671 fix bug 2023-11-11 15:23:10 +08:00
jxxghp
71adfad94d fix bug 2023-11-11 14:57:38 +08:00
jxxghp
7faaaf3dcd fix bug 2023-11-11 14:14:09 +08:00
jxxghp
25e7db5ac9 fix seerr api 2023-11-11 12:20:27 +08:00
jxxghp
07bd5f1926 fix seerr api 2023-11-11 12:16:45 +08:00
jxxghp
9439d02351 fix TMDB缓存None的问题 2023-11-11 10:37:33 +08:00
jxxghp
cbea7ccdf6 fix TMDB缓存None的问题 2023-11-11 10:34:49 +08:00
jxxghp
93661dfde4 v1.4.1 2023-11-10 22:23:13 +08:00
jxxghp
b98f5351cf fix #1098 2023-11-10 21:56:38 +08:00
jxxghp
83a7261fcd fix #1101 2023-11-10 21:54:45 +08:00
jxxghp
daa2b7a8cd feat 部分API支持api token访问 2023-11-10 21:40:24 +08:00
jxxghp
d245fedb3f fix api token 2023-11-10 20:51:09 +08:00
jxxghp
b0fee2cb3c add token verify 2023-11-10 17:31:19 +08:00
jxxghp
9a102056d8 fix douban rank 2023-11-10 12:23:03 +08:00
jxxghp
3905463940 fix douban mode 2023-11-09 23:44:08 +08:00
jxxghp
746fde592d fix douban mode 2023-11-09 23:22:38 +08:00
jxxghp
3e5f5554da fix bug 2023-11-09 21:47:22 +08:00
jxxghp
01fb6e8772 fix bug 2023-11-09 21:15:36 +08:00
jxxghp
b7448232e6 fix bug 2023-11-09 19:56:56 +08:00
jxxghp
05f1a24199 feat 支持豆瓣做为识别源 2023-11-09 17:32:26 +08:00
jxxghp
4072799c13 fix #1064 2023-11-06 11:29:43 +08:00
jxxghp
9744032f93 Merge remote-tracking branch 'origin/main' 2023-11-06 08:14:13 +08:00
jxxghp
eb9a92d76d v1.4.0 2023-11-06 08:06:57 +08:00
jxxghp
89a4932823 fix plugin get_state 2023-11-05 21:20:27 +08:00
jxxghp
cef06a8894 fix message event 2023-11-05 08:41:45 +08:00
jxxghp
c741edffb0 fix 2023-11-04 07:52:48 +08:00
jxxghp
e7c543fcb9 feat 第三方插件支持依赖 2023-11-04 07:31:14 +08:00
jxxghp
2a61720b0a Merge pull request #1052 from DDS-Derek/main 2023-11-03 19:00:06 +08:00
DDSRem
73484647ba feat: optimize restart update 2023-11-03 18:33:53 +08:00
jxxghp
c9d461f8c8 更新 update 2023-11-03 12:46:29 +08:00
jxxghp
9bdc056359 fix time 2023-11-03 12:29:38 +08:00
jxxghp
6a8a1e799d fix 同步用户提权Bug 2023-11-03 12:19:05 +08:00
jxxghp
c3c55f3a13 - 修复插件事件重复执行的问题 2023-11-03 09:13:37 +08:00
jxxghp
6f881a80d6 - 修复插件事件重复执行的问题 2023-11-03 09:13:18 +08:00
jxxghp
a75c4110a8 fix #1047 2023-11-03 07:51:42 +08:00
jxxghp
3e031c6191 fix #1047 2023-11-03 07:28:34 +08:00
jxxghp
a4b7ca824e v1.3.9-1 2023-11-02 17:50:31 +08:00
jxxghp
ecab2b63c9 fix #1044 2023-11-02 17:42:52 +08:00
jxxghp
620e3d55d1 fix 插件更新不立即生效的问题 2023-11-02 13:38:50 +08:00
jxxghp
3716d7fd47 fix bug 2023-11-02 12:35:25 +08:00
jxxghp
60764d198a Merge remote-tracking branch 'origin/main' 2023-11-02 12:27:36 +08:00
jxxghp
3178d9da88 fix update 2023-11-02 12:27:30 +08:00
jxxghp
7264313c9c 更新 README.md 2023-11-02 11:52:51 +08:00
jxxghp
71c36881fb fix update 2023-11-02 11:47:24 +08:00
jxxghp
5a0f7ae838 fix 2023-11-02 11:29:48 +08:00
jxxghp
10fb61bd57 fix repo cache 2023-11-02 11:20:11 +08:00
jxxghp
6d4b4c6ba7 fix bug 2023-11-02 11:06:37 +08:00
jxxghp
798a737f06 fix 在线插件去重(以后面的为准) 2023-11-02 11:00:49 +08:00
jxxghp
0a9e125f89 feat 支持ptvicomo认证 2023-11-02 10:18:32 +08:00
jxxghp
21b3525f23 v1.3.9 2023-11-02 08:29:23 +08:00
jxxghp
8e842c385d fix bug 2023-11-02 08:19:41 +08:00
jxxghp
8e8a587bca fix bug 2023-11-02 08:12:36 +08:00
jxxghp
155aa2580b fix build 2023-11-02 07:51:17 +08:00
jxxghp
01aa381848 fix timeout 2023-11-02 00:06:10 +08:00
jxxghp
6c13fa02c1 fix update 2023-11-02 00:04:46 +08:00
jxxghp
c50576b508 fix update 2023-11-01 22:48:52 +08:00
jxxghp
7bc4a6906a fix README.md 2023-11-01 22:17:58 +08:00
jxxghp
c80318f442 fix py修改不生效问题 2023-11-01 22:08:43 +08:00
jxxghp
cd4229a915 fix bug 2023-11-01 22:05:48 +08:00
jxxghp
fbe306ba90 feat 在线仓库插件安装 2023-11-01 20:56:38 +08:00
jxxghp
0dac3f1b1d fix gitignore 2023-11-01 17:58:48 +08:00
jxxghp
7a90e6c1a7 fix update 2023-11-01 17:49:52 +08:00
jxxghp
42a4a8639d fix README.md 2023-11-01 17:45:46 +08:00
jxxghp
a687642a6a fix update 2023-11-01 17:33:00 +08:00
jxxghp
dbba7fc92a feat 拆分资源包 2023-11-01 17:27:17 +08:00
jxxghp
a0afd86b6a fix update 2023-11-01 16:56:57 +08:00
jxxghp
22b76f9919 feat 拆分插件仓库 2023-11-01 16:50:53 +08:00
jxxghp
c7a869b750 feat 媒体库刷新移植为插件 2023-11-01 16:20:15 +08:00
jxxghp
00052efbbc feat 媒体服务器通知插件 2023-11-01 15:53:45 +08:00
jxxghp
a36332581a add helper 2023-11-01 15:25:30 +08:00
jxxghp
8c81e6ae02 add platform 2023-11-01 14:13:41 +08:00
jxxghp
a7285f2b1a add versions 2023-11-01 12:23:24 +08:00
jxxghp
271b33ecdc Merge pull request #1034 from thsrite/main 2023-11-01 12:08:53 +08:00
thsrite
b4d07cf6ab fix 2023-11-01 12:06:05 +08:00
thsrite
382035768e fix 兼容emby webhook删除season没有tmdbid 2023-11-01 11:51:07 +08:00
jxxghp
fcb825c1e2 Merge pull request #1033 from honue/main 2023-11-01 11:43:09 +08:00
honue
abfeea63f7 fix README.md 2023-11-01 11:39:21 +08:00
jxxghp
b0ce7e6531 fix resources 2023-11-01 11:37:23 +08:00
honue
303aa9b580 fix rss报文设置编码问题 2023-11-01 11:30:26 +08:00
jxxghp
d016d239e3 fix README.md 2023-11-01 10:08:53 +08:00
jxxghp
e35838c326 Merge remote-tracking branch 'origin/main' 2023-11-01 10:08:47 +08:00
jxxghp
de15f9b56e fix README.md 2023-11-01 10:08:40 +08:00
jxxghp
560773a11a Merge pull request #1030 from Shurelol/main 2023-11-01 08:35:37 +08:00
Ma
dd6df471dc fix 2023-11-01 07:43:49 +08:00
Ma
eea5c056f3 fix 2023-11-01 07:34:48 +08:00
Ma
a2d503b2f5 fix #1029 2023-11-01 07:29:53 +08:00
jxxghp
cf13e4a4fa Merge pull request #1028 from thsrite/main 2023-10-31 16:55:53 +08:00
thsrite
45fb8e86bf fix webhook 2023-10-31 16:39:20 +08:00
thsrite
099dcda185 fix webhook 2023-10-31 16:33:48 +08:00
thsrite
c07e12cc5a fix webhook 2023-10-31 15:37:39 +08:00
jxxghp
b23f78e94d v1.3.8 2023-10-31 11:50:18 +08:00
jxxghp
812a9a55d0 fix Plugin Alerts UI 2023-10-31 11:48:36 +08:00
jxxghp
2e289e80d1 fix 2023-10-31 10:37:36 +08:00
jxxghp
0d3dfdcbda feat 服务增加清理缓存 2023-10-31 10:32:56 +08:00
jxxghp
87eae72f51 fix #1024 2023-10-31 07:08:28 +08:00
jxxghp
17fa7101bd fix so 2023-10-30 13:35:19 +08:00
jxxghp
312bd53079 fix #1012 2023-10-29 16:08:34 +08:00
jxxghp
4bc7d47576 Merge pull request #1012 from Shurelol/main 2023-10-29 15:56:34 +08:00
Shurelol
71445b56f1 feat: 名称识别支持tmdbid等标记 2023-10-29 13:49:08 +08:00
jxxghp
9ce9e0a4ef fix #1006 2023-10-28 20:27:48 +08:00
jxxghp
ae196f1aeb Merge pull request #1006 from thsrite/main
fix bug
2023-10-28 20:27:25 +08:00
thsrite
38e09b894d fix bug 2023-10-28 20:24:16 +08:00
jxxghp
247d5ff255 Merge pull request #999 from honue/main
enhance 定期清理插件
2023-10-28 20:20:28 +08:00
jxxghp
0091e462fa Merge pull request #1004 from thsrite/main
fix 下载进度推送username
2023-10-28 20:19:12 +08:00
thsrite
7b314970b5 fix 下载进度推送username 2023-10-28 19:47:27 +08:00
jxxghp
7ac881e3e3 Merge pull request #1003 from thsrite/main 2023-10-28 19:31:29 +08:00
thsrite
8874723632 fix 认证失败后插件站点缺失bug 2023-10-28 19:14:08 +08:00
thsrite
262bda94c4 fix 目录监控入库消息延迟支持自定义 2023-10-28 18:52:46 +08:00
honue
d6e2cab5ef 兼容1.3.7版本清理插件配置 2023-10-28 18:40:27 +08:00
Summer⛱
6d3e33a05d Merge branch 'main' into main 2023-10-28 18:30:01 +08:00
jxxghp
f2d0bec0ac fix README 2023-10-28 17:46:57 +08:00
jxxghp
dea78f4bfd fix 2023-10-28 17:45:28 +08:00
jxxghp
f85f4b1342 Merge pull request #1000 from WithdewHua/qb
feat: qb 支持强制继续
2023-10-28 17:45:03 +08:00
WithdewHua
d03771f8ab feat: qb 支持强制继续 2023-10-28 17:41:31 +08:00
jxxghp
4b655dfac4 fix #957
fix #982
2023-10-28 17:41:22 +08:00
honue
cdfcdd80bf fix 2023-10-28 17:16:59 +08:00
honue
64d3942ba9 enhance 定期清理插件 2023-10-28 17:11:48 +08:00
jxxghp
16cce73f82 Merge pull request #996 from honue/main 2023-10-28 13:15:36 +08:00
honue
846edff84a fix 豆瓣榜单插件 2023-10-28 13:13:06 +08:00
jxxghp
d038bf31d3 Merge pull request #995 from honue/main 2023-10-28 12:41:50 +08:00
honue
376a69af5c fix 豆瓣榜单插件 2023-10-28 12:36:06 +08:00
jxxghp
380bb9bb3d Merge pull request #994 from thsrite/main 2023-10-28 12:26:27 +08:00
thsrite
f59e10ae1d fix qb按顺序下载支持变量配置 2023-10-28 12:24:24 +08:00
jxxghp
c8d2d80cc5 feat 支持配置多个认证站点 2023-10-28 11:50:50 +08:00
jxxghp
f0bb9ddfca Merge pull request #993 from thsrite/main
fix 目录监控消息电影不用等待直接发送入库消息
2023-10-28 11:46:23 +08:00
thsrite
9ab86e4a85 fix 目录监控消息电影不用等待直接发送入库消息 2023-10-28 11:41:44 +08:00
jxxghp
e33f1a3ffc Merge pull request #992 from thsrite/main
fix plugin api
2023-10-28 11:28:36 +08:00
jxxghp
e2213e1ef6 fix 远程搜索选择序号问题 2023-10-28 11:25:24 +08:00
thsrite
bbc4a1bfa5 fix plugin api 2023-10-28 11:22:37 +08:00
jxxghp
61e7ec9a36 Merge pull request #991 from honue/main 2023-10-28 11:06:25 +08:00
jxxghp
534ad0bad6 Merge pull request #987 from thsrite/main 2023-10-28 11:04:21 +08:00
thsrite
db3040a50e fix 2023-10-28 11:02:34 +08:00
honue
8dd74e7dd8 fix 完善页面download传参username 2023-10-28 11:02:26 +08:00
jxxghp
206cdb2663 Merge pull request #988 from khalid586/main 2023-10-28 10:58:24 +08:00
jxxghp
ca334813b7 更新 __init__.py 2023-10-28 10:55:02 +08:00
jxxghp
5fc93ee8e6 Merge pull request #986 from WithdewHua/fix-mediaserver 2023-10-28 10:52:53 +08:00
Khalid Abdullah
9cef7b2615 Update __init__.py(typos fixed) 2023-10-27 23:56:37 +06:00
thsrite
a3916207ae fix division by zero 2023-10-27 23:13:56 +08:00
thsrite
b6e1702051 fix add plugins api 2023-10-27 21:28:37 +08:00
WithdewHua
2cfc8b1ec7 fix: 重连判断 2023-10-27 20:31:52 +08:00
jxxghp
2f7570eec1 Merge pull request #983 from thsrite/main 2023-10-27 17:06:36 +08:00
thsrite
070481cab0 fix 正在下载显示剩余下载时间 2023-10-27 16:52:47 +08:00
jxxghp
26cd2c6cfe Merge pull request #982 from honue/main 2023-10-27 16:25:34 +08:00
honue
1ff571eb46 fix 定时清理媒体库,增加username字段 2023-10-27 15:12:10 +08:00
jxxghp
d8fcb4d240 Merge pull request #980 from thsrite/main 2023-10-27 10:42:45 +08:00
thsrite
778f97c1f3 fix log友好提示 2023-10-27 10:41:31 +08:00
jxxghp
1d6d9aa96d v1.3.7 2023-10-26 17:14:43 +08:00
jxxghp
3bdd96a8ee fix #951 不缓存网络错误导致的TMDB信息None 2023-10-26 17:07:01 +08:00
jxxghp
935ad73d32 fix #955 2023-10-26 16:45:09 +08:00
jxxghp
a85d55f3a8 fix 2023-10-26 16:17:29 +08:00
jxxghp
d7c659b736 fix 2023-10-26 16:15:31 +08:00
jxxghp
e5cedab873 Merge pull request #975 from Shurelol/main 2023-10-26 16:03:22 +08:00
jxxghp
3653d73f4f Merge pull request #974 from thsrite/main 2023-10-26 16:02:21 +08:00
Shurelol
4af57ed861 feat: 增加转移覆盖模式配置 2023-10-26 15:55:09 +08:00
Shurelol
10445c6f56 feat: 增加转移覆盖模式配置 2023-10-26 15:51:17 +08:00
Shurelol
dc6051f0b0 feat: 增加转移覆盖模式配置 2023-10-26 15:49:33 +08:00
Shurelol
2a524eaf22 feat: 增加转移覆盖模式配置 2023-10-26 15:28:41 +08:00
Shurelol
9a810f440d feat: 增加转移覆盖模式配置 2023-10-26 15:24:39 +08:00
thsrite
27ba8db4ea fix images 2023-10-26 15:07:39 +08:00
thsrite
7130194d5f fix 2023-10-26 14:55:08 +08:00
thsrite
d70afc36c9 fix 2023-10-26 14:48:46 +08:00
thsrite
78017b8a0e fix del image 2023-10-26 14:40:57 +08:00
thsrite
e87fdc896c fix tmdbinfo images 2023-10-26 14:34:58 +08:00
thsrite
7bb6d448ed feat 云盘文件删除插件 2023-10-26 13:38:48 +08:00
jxxghp
6415fd9286 Merge pull request #966 from thsrite/main 2023-10-26 13:13:16 +08:00
thsrite
2dd4395698 fix #935 2023-10-26 11:28:07 +08:00
thsrite
68b6e67a93 fix #970 2023-10-26 11:07:11 +08:00
thsrite
71b35e39ab fix 18262f98 2023-10-26 09:10:59 +08:00
thsrite
9ff6015fec fix Cloudflare IP优选插件描述… 2023-10-25 16:53:51 +08:00
thsrite
124817b733 fix 自定义hosts插件描述… 2023-10-25 16:52:19 +08:00
thsrite
8f8f3af7cd fix 药丸签到定时任务描述 2023-10-25 16:37:02 +08:00
thsrite
882fe6cd00 fix 关于路径映射描述…… 2023-10-25 16:33:09 +08:00
thsrite
18262f98f7 fix 同步删除通知图片 2023-10-25 10:52:33 +08:00
jxxghp
fe5a90ac2f Merge pull request #964 from thsrite/main 2023-10-25 10:20:39 +08:00
thsrite
22869b7932 fix #962 2023-10-25 10:10:17 +08:00
jxxghp
e702c16a74 Merge pull request #959 from thsrite/main 2023-10-24 11:32:13 +08:00
thsrite
408690c0ae fix 4aaf5997 2023-10-24 11:22:47 +08:00
thsrite
4aaf5997df fix 登录页海报支持自定义tmdb/bing 2023-10-24 11:17:32 +08:00
jxxghp
f50104bc86 Merge pull request #953 from thsrite/main 2023-10-23 12:01:02 +08:00
thsrite
ee10fc18a7 fix #952 2023-10-23 09:12:53 +08:00
jxxghp
818ef63aec fix #948 2023-10-22 08:23:04 +08:00
jxxghp
4af374f86d Merge remote-tracking branch 'origin/main' 2023-10-22 08:20:10 +08:00
jxxghp
277b252ad8 fix #949 2023-10-22 08:20:00 +08:00
jxxghp
cc7671efd0 Merge pull request #950 from LWLLR/bugfix/rss-proxy 2023-10-22 08:04:10 +08:00
LWLLR
419276eb85 fix: RSS订阅插件下载没有启用代理问题 2023-10-22 06:42:59 +08:00
jxxghp
7d97b9142a Merge pull request #945 from DDS-Derek/main 2023-10-21 15:08:08 +08:00
DDSDerek
c3c041f675 fix: docker buildx cache 2023-10-21 14:40:45 +08:00
jxxghp
d790e6b731 Merge pull request #944 from DDS-Derek/main 2023-10-21 14:33:24 +08:00
DDSRem
8b714a4710 feat: emphasis mark 2023-10-21 14:30:42 +08:00
145 changed files with 2057 additions and 23887 deletions

View File

@@ -9,9 +9,9 @@ body:
请确认以下信息:
1. 请按此模板提交issues不按模板提交的问题将直接关闭。
2. 如果你的问题可以直接在以往 issue 或者 Telegram频道 中找到,那么你的 issue 将会被直接关闭。
3. 提交问题务必描述清楚、附上日志,描述不清导致无法理解和分析的问题会被直接关闭。
3. **$\color{red}{提交问题务必描述清楚、附上日志}$**,描述不清导致无法理解和分析的问题会被直接关闭。
4. 此仓库为后端仓库,如果是前端 WebUI 问题请在[前端仓库](https://github.com/jxxghp/MoviePilot-Frontend)提 issue。
5. 不要通过issues来寻求解决你的环境问题、配置安装类问题、咨询类问题,否则直接关闭并加入用户黑名单!实在没有精力陪一波又一波的伸手党玩。
5. **$\color{red}{不要通过issues来寻求解决你的环境问题、配置安装类问题、咨询类问题}$**,否则直接关闭并加入用户 $\color{red}{黑名单}$ !实在没有精力陪一波又一波的伸手党玩。
- type: checkboxes
id: ensure
attributes:

View File

@@ -55,8 +55,8 @@ jobs:
MOVIEPILOT_VERSION=${{ env.app_version }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha, scope=${{ github.workflow }}
cache-to: type=gha, scope=${{ github.workflow }}
cache-from: type=gha, scope=${{ github.workflow }}-docker
cache-to: type=gha, scope=${{ github.workflow }}-docker
Windows-build:
runs-on: windows-latest
@@ -96,6 +96,16 @@ jobs:
New-Item -Path "nginx/temp/__keep__.txt" -ItemType File -Force
New-Item -Path "nginx/logs" -ItemType Directory -Force
New-Item -Path "nginx/logs/__keep__.txt" -ItemType File -Force
Invoke-WebRequest -Uri "https://github.com/jxxghp/MoviePilot-Plugins/archive/refs/heads/main.zip" -OutFile "MoviePilot-Plugins-main.zip"
Expand-Archive -Path "MoviePilot-Plugins-main.zip" -DestinationPath "MoviePilot-Plugins-main"
Move-Item -Path "MoviePilot-Plugins-main/MoviePilot-Plugins-main/plugins/*" -Destination "app/plugins/" -Force
Remove-Item -Path "MoviePilot-Plugins-main.zip"
Remove-Item -Path "MoviePilot-Plugins-main" -Recurse -Force
Invoke-WebRequest -Uri "https://github.com/jxxghp/MoviePilot-Resources/archive/refs/heads/main.zip" -OutFile "MoviePilot-Resources-main.zip"
Expand-Archive -Path "MoviePilot-Resources-main.zip" -DestinationPath "MoviePilot-Resources-main"
Move-Item -Path "MoviePilot-Resources-main/MoviePilot-Resources-main/resources/*" -Destination "app/helper/" -Force
Remove-Item -Path "MoviePilot-Resources-main.zip"
Remove-Item -Path "MoviePilot-Resources-main" -Recurse -Force
shell: pwsh
- name: Pyinstaller

5
.gitignore vendored
View File

@@ -5,6 +5,11 @@ dist/
nginx/
test.py
app/helper/sites.py
app/helper/*.so
app/helper/*.pyd
app/helper/*.bin
app/plugins/**
config/user.db
config/sites/**
*.pyc
*.log

View File

@@ -11,8 +11,7 @@ ENV LANG="C.UTF-8" \
PORT=3001 \
NGINX_PORT=3000 \
PROXY_HOST="" \
MOVIEPILOT_AUTO_UPDATE=true \
MOVIEPILOT_AUTO_UPDATE_DEV=false \
MOVIEPILOT_AUTO_UPDATE=release \
AUTH_SITE="iyuu" \
IYUU_SIGN=""
WORKDIR "/app"
@@ -32,6 +31,7 @@ RUN apt-get update -y \
jq \
haproxy \
fuse3 \
rsync \
&& \
if [ "$(uname -m)" = "x86_64" ]; \
then ln -s /usr/lib/x86_64-linux-musl/libc.so /lib/libc.musl-x86_64.so.1; \
@@ -76,7 +76,12 @@ RUN cp -f /app/nginx.conf /etc/nginx/nginx.template.conf \
&& locale-gen zh_CN.UTF-8 \
&& FRONTEND_VERSION=$(curl -sL "https://api.github.com/repos/jxxghp/MoviePilot-Frontend/releases/latest" | jq -r .tag_name) \
&& curl -sL "https://github.com/jxxghp/MoviePilot-Frontend/releases/download/${FRONTEND_VERSION}/dist.zip" | busybox unzip -d / - \
&& mv /dist /public
&& mv /dist /public \
&& curl -sL "https://github.com/jxxghp/MoviePilot-Plugins/archive/refs/heads/main.zip" | busybox unzip -d /tmp - \
&& mv -f /tmp/MoviePilot-Plugins-main/plugins/* /app/app/plugins/ \
&& curl -sL "https://github.com/jxxghp/MoviePilot-Resources/archive/refs/heads/main.zip" | busybox unzip -d /tmp - \
&& mv -f /tmp/MoviePilot-Resources-main/resources/* /app/app/helper/ \
&& rm -rf /tmp/*
EXPOSE 3000
VOLUME [ "/config" ]
ENTRYPOINT [ "/entrypoint" ]

View File

@@ -39,14 +39,23 @@ MoviePilot需要配套下载器和媒体服务器配合使用。
docker pull jxxghp/moviepilot:latest
```
- Windows
下载 [MoviePilot.exe](https://github.com/jxxghp/MoviePilot/releases),双击运行后自动生成配置文件目录。
- 本地运行
1) 将工程 [MoviePilot-Plugins](https://github.com/jxxghp/MoviePilot-Plugins) plugins目录下的所有文件复制到`app/plugins`目录
2) 将工程 [MoviePilot-Resources](https://github.com/jxxghp/MoviePilot-Resources) resources目录下的所有文件复制到`app/helper`目录
3) 执行命令:`pip install -r requirements.txt` 安装依赖
4) 执行命令:`python app/main.py` 启动服务
## 配置
项目的所有配置均通过环境变量进行设置,支持两种配置方式:
- 在Docker环境变量部分或Wdinows系统环境变量中进行参数配置如未自动显示配置项则需要手动增加对应环境变量。
- 在Docker环境变量部分或Windows系统环境变量中进行参数配置如未自动显示配置项则需要手动增加对应环境变量。
- 下载 [app.env](https://github.com/jxxghp/MoviePilot/raw/main/config/app.env) 配置文件,修改好配置后放置到配置文件映射路径根目录,配置项可根据说明自主增减。
配置文件映射路径:`/config`,配置项生效优先级:环境变量 > env文件 > 默认值,**部分参数如路径映射、站点认证、权限端口、时区等必须通过环境变量进行配置**。
@@ -61,20 +70,22 @@ MoviePilot需要配套下载器和媒体服务器配合使用。
- **PGID**:运行程序用户的`gid`,默认`0`(仅支持环境变量配置)
- **UMASK**:掩码权限,默认`000`,可以考虑设置为`022`(仅支持环境变量配置)
- **PROXY_HOST** 网络代理访问themoviedb或者重启更新需要使用代理访问格式为`http(s)://ip:port`、`socks5://user:pass@host:port`(仅支持环境变量配置)
- **MOVIEPILOT_AUTO_UPDATE**:重启更新,`true`/`false`,默认`true` **注意:如果出现网络问题可以配置`PROXY_HOST`**(仅支持环境变量配置)
- **MOVIEPILOT_AUTO_UPDATE_DEV**:重启时更新到未发布的开发版本代码,`true`/`false`,默认`false`(仅支持环境变量配置)
- **MOVIEPILOT_AUTO_UPDATE**:重启更新,`true`/`release`/`dev`/`false`,默认`release` **注意:如果出现网络问题可以配置`PROXY_HOST`**(仅支持环境变量配置)
---
- **❗SUPERUSER** 超级管理员用户名,默认`admin`,安装后使用该用户登录后台管理界面
- **❗SUPERUSER_PASSWORD** 超级管理员初始密码,默认`password`,建议修改为复杂密码
- **❗API_TOKEN** API密钥默认`moviepilot`在媒体服务器Webhook、微信回调等地址配置中需要加上`?token=`该值,建议修改为复杂字符串
- **TMDB_API_DOMAIN** TMDB API地址默认`api.themoviedb.org`,也可配置为`api.tmdb.org`或其它中转代理服务地址,能连通即可
- **TMDB_IMAGE_DOMAIN** TMDB图片地址默认`image.tmdb.org`可配置为其它中转代理以加速TMDB图片显示`static-mdb.v.geilijiasu.com`
- **WALLPAPER** 登录首页电影海报,`tmdb`/`bing`,默认`tmdb`
- **RECOGNIZE_SOURCE** 媒体信息识别来源,`themoviedb`/`douban`,默认`themoviedb`,使用`douban`时不支持二级分类
- **SCRAP_SOURCE** 刮削元数据及图片使用的数据源,`themoviedb`/`douban`,默认`themoviedb`
---
- **SCRAP_METADATA** 刮削入库的媒体文件,`true`/`false`,默认`true`
- **SCRAP_SOURCE** 刮削元数据及图片使用的数据源,`themoviedb`/`douban`,默认`themoviedb`
- **SCRAP_FOLLOW_TMDB** 新增已入库媒体是否跟随TMDB信息变化`true`/`false`,默认`true`
- **SCRAP_FOLLOW_TMDB** 新增已入库媒体是否跟随TMDB信息变化`true`/`false`,默认`true`,为`false`时即使TMDB信息变化了也会仍然按历史记录中已入库的信息进行刮削
---
- **❗TRANSFER_TYPE** 整理转移方式,支持`link`/`copy`/`move`/`softlink`/`rclone_copy`/`rclone_move` **注意:在`link`和`softlink`转移方式下,转移后的文件会继承源文件的权限掩码,不受`UMASK`影响rclone需要自行映射rclone配置目录到容器中或在容器内完成rclone配置节点名称必须为`MP`**
- **❗OVERWRITE_MODE** 转移覆盖模式,默认为`size`,支持`nerver`/`size`/`always`/`latest`,分别表示`不覆盖同名文件`/`同名文件根据文件大小覆盖(大覆盖小)`/`总是覆盖同名文件`/`仅保留最新版本,删除旧版本文件(包括非同名文件)`
- **❗LIBRARY_PATH** 媒体库目录,多个目录使用`,`分隔
- **LIBRARY_MOVIE_NAME** 电影媒体库目录名称(不是完整路径),默认`电影`
- **LIBRARY_TV_NAME** 电视剧媒体库目录称(不是完整路径),默认`电视剧`
@@ -86,14 +97,15 @@ MoviePilot需要配套下载器和媒体服务器配合使用。
- **❗COOKIECLOUD_PASSWORD** CookieCloud端对端加密密码
- **❗COOKIECLOUD_INTERVAL** CookieCloud同步间隔分钟
- **❗USER_AGENT** CookieCloud保存Cookie对应的浏览器UA建议配置设置后可增加连接站点的成功率同步站点后可以在管理界面中修改
- **OCR_HOST** OCR识别服务器地址格式`http(s)://ip:port`用于识别站点验证码实现自动登录获取Cookie等不配置默认使用内建服务器`https://movie-pilot.org`,可使用 [这个镜像](https://hub.docker.com/r/jxxghp/moviepilot-ocr) 自行搭建。
---
- **SUBSCRIBE_MODE** 订阅模式,`rss`/`spider`,默认`spider``rss`模式通过定时刷新RSS来匹配订阅RSS地址会自动获取也可手动维护对站点压力小同时可设置订阅刷新周期24小时运行但订阅和下载通知不能过滤和显示免费推荐使用rss模式。
- **SUBSCRIBE_RSS_INTERVAL** RSS订阅模式刷新时间间隔分钟默认`30`分钟不能小于5分钟。
- **SUBSCRIBE_SEARCH** 订阅搜索,`true`/`false`,默认`false`开启后会每隔24小时对所有订阅进行全量搜索以补齐缺失剧集一般情况下正常订阅即可订阅搜索只做为兜底会增加站点压力不建议开启
- **SEARCH_SOURCE** 媒体信息搜索来源,`themoviedb`/`douban`,默认`themoviedb`
- **AUTO_DOWNLOAD_USER** 远程交互搜索时自动择优下载的用户ID消息通知渠道的用户ID多个用户使用,分割,未设置需要选择资源或者回复`0`
---
- **OCR_HOST** OCR识别服务器地址格式`http(s)://ip:port`用于识别站点验证码实现自动登录获取Cookie等不配置默认使用内建服务器`https://movie-pilot.org`,可使用 [这个镜像](https://hub.docker.com/r/jxxghp/moviepilot-ocr) 自行搭建。
- **PLUGIN_MARKET** 插件市场仓库地址,多个地址使用`,`分隔,保留最后的/,默认为官方插件仓库:`https://raw.githubusercontent.com/jxxghp/MoviePilot-Plugins/main/`。
---
- **AUTO_DOWNLOAD_USER** 远程交互搜索时自动择优下载的用户ID多个用户使用,分割,未设置需要选择资源或者回复`0`
- **❗MESSAGER** 消息通知渠道,支持 `telegram`/`wechat`/`slack`/`synologychat`,开启多个渠道时使用`,`分隔。同时还需要配置对应渠道的环境变量,非对应渠道的变量可删除,推荐使用`telegram`
- `wechat`设置项:
@@ -141,6 +153,8 @@ MoviePilot需要配套下载器和媒体服务器配合使用。
- **QB_USER** qbittorrent用户名
- **QB_PASSWORD** qbittorrent密码
- **QB_CATEGORY** qbittorrent分类自动管理`true`/`false`,默认`false`,开启后会将下载二级分类传递到下载器,由下载器管理下载目录,需要同步开启`DOWNLOAD_CATEGORY`
- **QB_SEQUENTIAL** qbittorrent按顺序下载`true`/`false`,默认`true`
- **QB_FORCE_RESUME** qbittorrent忽略队列限制强制继续`true`/`false`,默认 `false`
- `transmission`设置项:
@@ -149,7 +163,6 @@ MoviePilot需要配套下载器和媒体服务器配合使用。
- **TR_PASSWORD** transmission密码
---
- **REFRESH_MEDIASERVER** 入库后是否刷新媒体服务器,`true`/`false`,默认`true`
- **❗MEDIASERVER** 媒体服务器,支持`emby`/`jellyfin`/`plex`,同时开启多个使用`,`分隔。还需要配置对应媒体服务器的环境变量,非对应媒体服务器的变量可删除,推荐使用`emby`
- `emby`设置项:
@@ -175,7 +188,9 @@ MoviePilot需要配套下载器和媒体服务器配合使用。
`MoviePilot`需要认证后才能使用,配置`AUTH_SITE`后,需要根据下表配置对应站点的认证参数(**仅能通过环境变量配置**
- **❗AUTH_SITE** 认证站点,支持`iyuu`/`hhclub`/`audiences`/`hddolby`/`zmpt`/`freefarm`/`hdfans`/`wintersakura`/`leaves`/`1ptba`/`icc2022`/`ptlsp`/`xingtan`
`AUTH_SITE`支持配置多个认证站点,使用`,`分隔,如:`iyuu,hhclub`,会依次执行认证操作,直到有一个站点认证成功。
- **❗AUTH_SITE** 认证站点,认证资源`v1.0.1`支持`iyuu`/`hhclub`/`audiences`/`hddolby`/`zmpt`/`freefarm`/`hdfans`/`wintersakura`/`leaves`/`1ptba`/`icc2022`/`ptlsp`/`xingtan`/`ptvicomo`
| 站点 | 参数 |
|:------------:|:-----------------------------------------------------:|
@@ -192,11 +207,12 @@ MoviePilot需要配套下载器和媒体服务器配合使用。
| icc2022 | `ICC2022_UID`用户ID<br/>`ICC2022_PASSKEY`:密钥 |
| ptlsp | `PTLSP_UID`用户ID<br/>`PTLSP_PASSKEY`:密钥 |
| xingtan | `XINGTAN_UID`用户ID<br/>`XINGTAN_PASSKEY`:密钥 |
| ptvicomo | `PTVICOMO_UID`用户ID<br/>`PTVICOMO_PASSKEY`:密钥 |
### 2. **进阶配置**
- **BIG_MEMORY_MODE** 大内存模式,默认为`false`,开启后会占用更多的内存,但响应速度会更快
- **BIG_MEMORY_MODE** 大内存模式,默认为`false`,开启后会增加缓存数量,占用更多的内存,但响应速度会更快
- **MOVIE_RENAME_FORMAT** 电影重命名格式
@@ -219,6 +235,9 @@ MoviePilot需要配套下载器和媒体服务器配合使用。
> `imdbid` IMDBID
> `part`:段/节
> `fileExt`:文件扩展名
> `tmdbid`TMDB ID
> `imdbid`IMDB ID
> `customization`:自定义占位符
`MOVIE_RENAME_FORMAT`默认配置格式:

View File

@@ -6,7 +6,7 @@ from sqlalchemy.orm import Session
from app import schemas
from app.chain.dashboard import DashboardChain
from app.core.config import settings
from app.core.security import verify_token
from app.core.security import verify_token, verify_uri_token
from app.db import get_db
from app.db.models.transferhistory import TransferHistory
from app.scheduler import Scheduler
@@ -34,6 +34,14 @@ def statistic(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
return schemas.Statistic()
@router.get("/statistic2", summary="媒体数量统计API_TOKEN", response_model=schemas.Statistic)
def statistic2(_: str = Depends(verify_uri_token)) -> Any:
"""
查询媒体数量统计信息 API_TOKEN认证?token=xxx
"""
return statistic()
@router.get("/storage", summary="存储空间", response_model=schemas.Storage)
def storage(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
@@ -46,6 +54,14 @@ def storage(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
)
@router.get("/storage2", summary="存储空间API_TOKEN", response_model=schemas.Storage)
def storage2(_: str = Depends(verify_uri_token)) -> Any:
"""
查询存储空间信息 API_TOKEN认证?token=xxx
"""
return storage()
@router.get("/processes", summary="进程信息", response_model=List[schemas.ProcessInfo])
def processes(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
@@ -73,6 +89,14 @@ def downloader(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
return schemas.DownloaderInfo()
@router.get("/downloader2", summary="下载器信息API_TOKEN", response_model=schemas.DownloaderInfo)
def downloader2(_: str = Depends(verify_uri_token)) -> Any:
"""
查询下载器信息 API_TOKEN认证?token=xxx
"""
return downloader()
@router.get("/schedule", summary="后台服务", response_model=List[schemas.ScheduleInfo])
def schedule(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
@@ -81,6 +105,14 @@ def schedule(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
return Scheduler().list()
@router.get("/schedule2", summary="后台服务API_TOKEN", response_model=List[schemas.ScheduleInfo])
def schedule2(_: str = Depends(verify_uri_token)) -> Any:
"""
查询下载器信息 API_TOKEN认证?token=xxx
"""
return schedule()
@router.get("/transfer", summary="文件整理统计", response_model=List[int])
def transfer(days: int = 7, db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
@@ -99,9 +131,25 @@ def cpu(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
return SystemUtils.cpu_usage()
@router.get("/cpu2", summary="获取当前CPU使用率API_TOKEN", response_model=int)
def cpu2(_: str = Depends(verify_uri_token)) -> Any:
"""
获取当前CPU使用率 API_TOKEN认证?token=xxx
"""
return cpu()
@router.get("/memory", summary="获取当前内存使用量和使用率", response_model=List[int])
def memory(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
获取当前内存使用率
"""
return SystemUtils.memory_usage()
@router.get("/memory2", summary="获取当前内存使用量和使用率API_TOKEN", response_model=List[int])
def memory2(_: str = Depends(verify_uri_token)) -> Any:
"""
获取当前内存使用率 API_TOKEN认证?token=xxx
"""
return memory()

View File

@@ -28,20 +28,6 @@ def douban_img(imgurl: str) -> Any:
return None
@router.get("/recognize/{doubanid}", summary="豆瓣ID识别", response_model=schemas.Context)
def recognize_doubanid(doubanid: str,
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
根据豆瓣ID识别媒体信息
"""
# 识别媒体信息
context = DoubanChain().recognize_by_doubanid(doubanid=doubanid)
if context:
return context.to_dict()
else:
return schemas.Context()
@router.get("/showing", summary="豆瓣正在热映", response_model=List[schemas.MediaInfo])
def movie_showing(page: int = 1,
count: int = 30,
@@ -141,6 +127,28 @@ def tv_animation(page: int = 1,
return [MediaInfo(douban_info=tv).to_dict() for tv in tvs]
@router.get("/movie_hot", summary="豆瓣热门电影", response_model=List[schemas.MediaInfo])
def movie_hot(page: int = 1,
count: int = 30,
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
热门电影
"""
movies = DoubanChain().movie_hot(page=page, count=count)
return [MediaInfo(douban_info=movie).to_dict() for movie in movies]
@router.get("/tv_hot", summary="豆瓣热门电视剧", response_model=List[schemas.MediaInfo])
def tv_hot(page: int = 1,
count: int = 30,
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
热门电视剧
"""
tvs = DoubanChain().tv_hot(page=page, count=count)
return [MediaInfo(douban_info=tv).to_dict() for tv in tvs]
@router.get("/{doubanid}", summary="查询豆瓣详情", response_model=schemas.MediaInfo)
def douban_info(doubanid: str,
_: schemas.TokenPayload = Depends(verify_token)) -> Any:

View File

@@ -3,12 +3,13 @@ from typing import Any, List
from fastapi import APIRouter, Depends, HTTPException
from app import schemas
from app.chain.douban import DoubanChain
from app.chain.download import DownloadChain
from app.chain.media import MediaChain
from app.core.context import MediaInfo, Context, TorrentInfo
from app.core.metainfo import MetaInfo
from app.core.security import verify_token
from app.db.models.user import User
from app.db.userauth import get_current_active_user
from app.schemas import NotExistMediaInfo, MediaType
router = APIRouter()
@@ -27,6 +28,7 @@ def read_downloading(
def add_downloading(
media_in: schemas.MediaInfo,
torrent_in: schemas.TorrentInfo,
current_user: User = Depends(get_current_active_user),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
添加下载任务
@@ -45,7 +47,7 @@ def add_downloading(
media_info=mediainfo,
torrent_info=torrentinfo
)
did = DownloadChain().download_single(context=context)
did = DownloadChain().download_single(context=context, username=current_user.name)
return schemas.Response(success=True if did else False, data={
"download_id": did
})
@@ -58,30 +60,31 @@ def exists(media_in: schemas.MediaInfo,
查询缺失媒体信息
"""
# 媒体信息
mediainfo = MediaInfo()
meta = MetaInfo(title=media_in.title)
if media_in.tmdb_id:
mediainfo.from_dict(media_in.dict())
elif media_in.douban_id:
context = DoubanChain().recognize_by_doubanid(doubanid=media_in.douban_id)
if context:
mediainfo = context.media_info
meta = context.meta_info
mtype = MediaType(media_in.type) if media_in.type else None
if mtype:
meta.type = mtype
if media_in.season:
meta.begin_season = media_in.season
meta.type = MediaType.TV
if media_in.year:
meta.year = media_in.year
if media_in.tmdb_id or media_in.douban_id:
mediainfo = MediaChain().recognize_media(meta=meta, mtype=mtype,
tmdbid=media_in.tmdb_id, doubanid=media_in.douban_id)
else:
context = MediaChain().recognize_by_title(title=f"{media_in.title} {media_in.year}")
if context:
mediainfo = context.media_info
meta = context.meta_info
mediainfo = MediaChain().recognize_by_meta(metainfo=meta)
# 查询缺失信息
if not mediainfo or not mediainfo.tmdb_id:
if not mediainfo:
raise HTTPException(status_code=404, detail="媒体信息不存在")
mediakey = mediainfo.tmdb_id or mediainfo.douban_id
exist_flag, no_exists = DownloadChain().get_no_exists_info(meta=meta, mediainfo=mediainfo)
if mediainfo.type == MediaType.MOVIE:
# 电影已存在时返回空列表,存在时返回空对像列表
return [] if exist_flag else [NotExistMediaInfo()]
elif no_exists and no_exists.get(mediainfo.tmdb_id):
elif no_exists and no_exists.get(mediakey):
# 电视剧返回缺失的剧集
return list(no_exists.get(mediainfo.tmdb_id).values())
return list(no_exists.get(mediakey).values())
return []

View File

@@ -75,10 +75,14 @@ def delete_transfer_history(history_in: schemas.TransferHistory,
return schemas.Response(success=False, msg="记录不存在")
# 册除媒体库文件
if deletedest and history.dest:
TransferChain().delete_files(Path(history.dest))
state, msg = TransferChain().delete_files(Path(history.dest))
if not state:
return schemas.Response(success=False, msg=msg)
# 删除源文件
if deletesrc and history.src:
TransferChain().delete_files(Path(history.src))
state, msg = TransferChain().delete_files(Path(history.src))
if not state:
return schemas.Response(success=False, msg=msg)
# 发送事件
eventmanager.send_event(
EventType.DownloadFileDeleted,

View File

@@ -37,18 +37,23 @@ async def login_access_token(
logger.warn("登录用户本地不匹配,尝试辅助认证 ...")
token = UserChain().user_authenticate(form_data.username, form_data.password)
if not token:
logger.warn(f"用户 {form_data.username} 登录失败!")
raise HTTPException(status_code=401, detail="用户名或密码不正确")
else:
logger.info(f"辅助认证成功,用户信息: {token}")
logger.info(f"用户 {form_data.username} 辅助认证成功,用户信息: {token}")
# 加入用户信息表
user = User.get_by_name(db=db, name=form_data.username)
if not user:
logger.info(f"用户不存在,创建用户: {form_data.username}")
logger.info(f"用户不存在,创建普通用户: {form_data.username}")
user = User(name=form_data.username, is_active=True,
is_superuser=False, hashed_password=get_password_hash(token))
user.create(db)
else:
# 普通用户权限
user.is_superuser = False
elif not user.is_active:
raise HTTPException(status_code=403, detail="用户未启用")
logger.info(f"用户 {user.name} 登录成功!")
return schemas.Token(
access_token=security.create_access_token(
user.id,
@@ -61,6 +66,18 @@ async def login_access_token(
)
@router.get("/wallpaper", summary="登录页面电影海报", response_model=schemas.Response)
def wallpaper() -> Any:
"""
获取登录页面电影海报
"""
if settings.WALLPAPER == "tmdb":
return tmdb_wallpaper()
elif settings.WALLPAPER == "bing":
return bing_wallpaper()
return schemas.Response(success=False)
@router.get("/bing", summary="Bing每日壁纸", response_model=schemas.Response)
def bing_wallpaper() -> Any:
"""
@@ -68,8 +85,10 @@ def bing_wallpaper() -> Any:
"""
url = WebUtils.get_bing_wallpaper()
if url:
return schemas.Response(success=False,
message=url)
return schemas.Response(
success=True,
message=url
)
return schemas.Response(success=False)

View File

@@ -4,12 +4,11 @@ from fastapi import APIRouter, Depends
from sqlalchemy.orm import Session
from app import schemas
from app.chain.douban import DoubanChain
from app.chain.media import MediaChain
from app.chain.tmdb import TmdbChain
from app.core.context import MediaInfo
from app.core.config import settings
from app.core.context import Context
from app.core.metainfo import MetaInfo
from app.core.security import verify_token
from app.core.security import verify_token, verify_uri_token
from app.db import get_db
from app.db.mediaserver_oper import MediaServerOper
from app.schemas import MediaType
@@ -25,15 +24,27 @@ def recognize(title: str,
根据标题、副标题识别媒体信息
"""
# 识别媒体信息
context = MediaChain().recognize_by_title(title=title, subtitle=subtitle)
if context:
return context.to_dict()
metainfo = MetaInfo(title, subtitle)
mediainfo = MediaChain().recognize_by_meta(metainfo)
if mediainfo:
return Context(meta_info=metainfo, media_info=mediainfo).to_dict()
return schemas.Context()
@router.get("/recognize2", summary="识别种子媒体信息API_TOKEN", response_model=schemas.Context)
def recognize2(title: str,
subtitle: str = None,
_: str = Depends(verify_uri_token)) -> Any:
"""
根据标题、副标题识别媒体信息 API_TOKEN认证?token=xxx
"""
# 识别媒体信息
return recognize(title, subtitle)
@router.get("/recognize_file", summary="识别媒体信息(文件)", response_model=schemas.Context)
def recognize(path: str,
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
def recognize_file(path: str,
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
根据文件路径识别媒体信息
"""
@@ -44,6 +55,16 @@ def recognize(path: str,
return schemas.Context()
@router.get("/recognize_file2", summary="识别文件媒体信息API_TOKEN", response_model=schemas.Context)
def recognize_file2(path: str,
_: str = Depends(verify_uri_token)) -> Any:
"""
根据文件路径识别媒体信息 API_TOKEN认证?token=xxx
"""
# 识别媒体信息
return recognize_file(path)
@router.get("/search", summary="搜索媒体信息", response_model=List[schemas.MediaInfo])
def search_by_title(title: str,
page: int = 1,
@@ -81,26 +102,34 @@ def exists(title: str = None,
@router.get("/{mediaid}", summary="查询媒体详情", response_model=schemas.MediaInfo)
def tmdb_info(mediaid: str, type_name: str,
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
def media_info(mediaid: str, type_name: str,
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
根据媒体ID查询themoviedb或豆瓣媒体信息type_name: 电影/电视剧
"""
mtype = MediaType(type_name)
tmdbid, doubanid = None, None
if mediaid.startswith("tmdb:"):
result = TmdbChain().tmdb_info(int(mediaid[5:]), mtype)
return MediaInfo(tmdb_info=result).to_dict()
tmdbid = int(mediaid[5:])
elif mediaid.startswith("douban:"):
# 查询豆瓣信息
doubaninfo = DoubanChain().douban_info(doubanid=mediaid[7:])
if not doubaninfo:
return schemas.MediaInfo()
result = DoubanChain().recognize_by_doubaninfo(doubaninfo)
if result:
# TMDB
return result.media_info.to_dict()
else:
# 豆瓣
return MediaInfo(douban_info=doubaninfo).to_dict()
else:
doubanid = mediaid[7:]
if not tmdbid and not doubanid:
return schemas.MediaInfo()
if settings.RECOGNIZE_SOURCE == "themoviedb":
if not tmdbid and doubanid:
tmdbinfo = MediaChain().get_tmdbinfo_by_doubanid(doubanid=doubanid, mtype=mtype)
if tmdbinfo:
tmdbid = tmdbinfo.get("id")
else:
return schemas.MediaInfo()
else:
if not doubanid and tmdbid:
doubaninfo = MediaChain().get_doubaninfo_by_tmdbid(tmdbid=tmdbid, mtype=mtype)
if doubaninfo:
doubanid = doubaninfo.get("id")
else:
return schemas.MediaInfo()
mediainfo = MediaChain().recognize_media(tmdbid=tmdbid, doubanid=doubanid, mtype=mtype)
if mediainfo:
return mediainfo.to_dict()
return schemas.MediaInfo()

View File

@@ -6,6 +6,7 @@ from app import schemas
from app.core.plugin import PluginManager
from app.core.security import verify_token
from app.db.systemconfig_oper import SystemConfigOper
from app.helper.plugin import PluginHelper
from app.schemas.types import SystemConfigKey
router = APIRouter()
@@ -14,9 +15,27 @@ router = APIRouter()
@router.get("/", summary="所有插件", response_model=List[schemas.Plugin])
def all_plugins(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
查询所有插件清单
查询所有插件清单,包括本地插件和在线插件
"""
return PluginManager().get_plugin_apps()
plugins = []
# 本地插件
local_plugins = PluginManager().get_local_plugins()
# 在线插件
online_plugins = PluginManager().get_online_plugins()
if not online_plugins:
return local_plugins
# 已安装插件IDS
installed_ids = [plugin["id"] for plugin in local_plugins if plugin.get("installed")]
# 已经安装的本地
plugins.extend([plugin for plugin in local_plugins if plugin.get("installed")])
# 未安装的线上插件或者有更新的插件
for plugin in online_plugins:
if plugin["id"] not in installed_ids:
plugins.append(plugin)
elif plugin.get("has_update"):
plugin["installed"] = False
plugins.append(plugin)
return plugins
@router.get("/installed", summary="已安装插件", response_model=List[str])
@@ -29,19 +48,28 @@ def installed_plugins(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
@router.get("/install/{plugin_id}", summary="安装插件", response_model=schemas.Response)
def install_plugin(plugin_id: str,
repo_url: str = "",
force: bool = False,
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
安装插件
"""
# 已安装插件
install_plugins = SystemConfigOper().get(SystemConfigKey.UserInstalledPlugins) or []
# 如果是非本地括件,或者强制安装时,则需要下载安装
if repo_url and (force or plugin_id not in PluginManager().get_plugin_ids()):
# 下载安装
state, msg = PluginHelper().install(pid=plugin_id, repo_url=repo_url)
if not state:
# 安装失败
return schemas.Response(success=False, msg=msg)
# 安装插件
if plugin_id not in install_plugins:
install_plugins.append(plugin_id)
# 保存设置
SystemConfigOper().set(SystemConfigKey.UserInstalledPlugins, install_plugins)
# 重载插件管理器
PluginManager().init_config()
# 重载插件管理器
PluginManager().init_config()
return schemas.Response(success=True)

View File

@@ -3,8 +3,9 @@ from typing import List, Any
from fastapi import APIRouter, Depends
from app import schemas
from app.chain.douban import DoubanChain
from app.chain.media import MediaChain
from app.chain.search import SearchChain
from app.core.config import settings
from app.core.security import verify_token
from app.schemas.types import MediaType
@@ -21,27 +22,36 @@ async def search_latest(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
@router.get("/media/{mediaid}", summary="精确搜索资源", response_model=List[schemas.Context])
def search_by_tmdbid(mediaid: str,
mtype: str = None,
area: str = "title",
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
def search_by_id(mediaid: str,
mtype: str = None,
area: str = "title",
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
根据TMDBID/豆瓣ID精确搜索站点资源 tmdb:/douban:/
"""
torrents = []
if mtype:
mtype = MediaType(mtype)
if mediaid.startswith("tmdb:"):
tmdbid = int(mediaid.replace("tmdb:", ""))
if mtype:
mtype = MediaType(mtype)
torrents = SearchChain().search_by_tmdbid(tmdbid=tmdbid, mtype=mtype, area=area)
if settings.RECOGNIZE_SOURCE == "douban":
# 通过TMDBID识别豆瓣ID
doubaninfo = MediaChain().get_doubaninfo_by_tmdbid(tmdbid=tmdbid, mtype=mtype)
if doubaninfo:
torrents = SearchChain().search_by_id(doubanid=doubaninfo.get("id"),
mtype=mtype, area=area)
else:
torrents = SearchChain().search_by_id(tmdbid=tmdbid, mtype=mtype, area=area)
elif mediaid.startswith("douban:"):
doubanid = mediaid.replace("douban:", "")
# 识别豆瓣信息
context = DoubanChain().recognize_by_doubanid(doubanid)
if not context or not context.media_info or not context.media_info.tmdb_id:
return []
torrents = SearchChain().search_by_tmdbid(tmdbid=context.media_info.tmdb_id,
mtype=context.media_info.type,
area=area)
if settings.RECOGNIZE_SOURCE == "themoviedb":
# 通过豆瓣ID识别TMDBID
tmdbinfo = MediaChain().get_tmdbinfo_by_doubanid(doubanid=doubanid, mtype=mtype)
if tmdbinfo:
torrents = SearchChain().search_by_id(tmdbid=tmdbinfo.get("id"),
mtype=mtype, area=area)
else:
torrents = SearchChain().search_by_id(doubanid=doubanid, mtype=mtype, area=area)
else:
return []
return [torrent.to_dict() for torrent in torrents]

View File

@@ -7,7 +7,8 @@ from sqlalchemy.orm import Session
from app import schemas
from app.chain.subscribe import SubscribeChain
from app.core.config import settings
from app.core.security import verify_token
from app.core.metainfo import MetaInfo
from app.core.security import verify_token, verify_uri_token
from app.db import get_db
from app.db.models.subscribe import Subscribe
from app.db.models.user import User
@@ -27,7 +28,7 @@ def start_subscribe_add(title: str, year: str,
mtype=mtype, tmdbid=tmdbid, season=season, username=username)
@router.get("/", summary="所有订阅", response_model=List[schemas.Subscribe])
@router.get("/", summary="查询所有订阅", response_model=List[schemas.Subscribe])
def read_subscribes(
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
@@ -41,6 +42,14 @@ def read_subscribes(
return subscribes
@router.get("/list", summary="查询所有订阅API_TOKEN", response_model=List[schemas.Subscribe])
def list_subscribes(_: str = Depends(verify_uri_token)) -> Any:
"""
查询所有订阅 API_TOKEN认证?token=xxx
"""
return read_subscribes()
@router.post("/", summary="新增订阅", response_model=schemas.Response)
def create_subscribe(
*,
@@ -55,6 +64,11 @@ def create_subscribe(
mtype = MediaType(subscribe_in.type)
else:
mtype = None
# 豆瓣标理
if subscribe_in.doubanid:
meta = MetaInfo(subscribe_in.name)
subscribe_in.name = meta.name
subscribe_in.season = meta.begin_season
# 标题转换
if subscribe_in.name:
title = subscribe_in.name
@@ -117,7 +131,7 @@ def subscribe_mediaid(
tmdbid = mediaid[5:]
if not tmdbid or not str(tmdbid).isdigit():
return Subscribe()
result = Subscribe.exists(db, int(tmdbid), season)
result = Subscribe.exists(db, tmdbid=int(tmdbid), season=season)
elif mediaid.startswith("douban:"):
doubanid = mediaid[7:]
if not doubanid:

View File

@@ -14,6 +14,7 @@ from app.core.security import verify_token
from app.db.systemconfig_oper import SystemConfigOper
from app.helper.message import MessageHelper
from app.helper.progress import ProgressHelper
from app.helper.sites import SitesHelper
from app.scheduler import Scheduler
from app.schemas.types import SystemConfigKey
from app.utils.http import RequestUtils
@@ -32,7 +33,9 @@ def get_env_setting(_: schemas.TokenPayload = Depends(verify_token)):
exclude={"SECRET_KEY", "SUPERUSER_PASSWORD", "API_TOKEN"}
)
info.update({
"VERSION": APP_VERSION
"VERSION": APP_VERSION,
"AUTH_VERSION": SitesHelper().auth_version,
"INDEXER_VERSION": SitesHelper().indexer_version,
})
return schemas.Response(success=True,
data=info)
@@ -219,8 +222,5 @@ def execute_command(jobid: str,
"""
if not jobid:
return schemas.Response(success=False, message="命令不能为空!")
if jobid == "subscribe_search":
Scheduler().start(jobid, state='R')
else:
Scheduler().start(jobid)
Scheduler().start(jobid)
return schemas.Response(success=True)

View File

@@ -1,10 +1,11 @@
from typing import Any
from fastapi import APIRouter, BackgroundTasks, Request
from fastapi import APIRouter, BackgroundTasks, Request, Depends
from app import schemas
from app.chain.webhook import WebhookChain
from app.core.config import settings
from app.core.security import verify_uri_token
router = APIRouter()
@@ -18,13 +19,12 @@ def start_webhook_chain(body: Any, form: Any, args: Any):
@router.post("/", summary="Webhook消息响应", response_model=schemas.Response)
async def webhook_message(background_tasks: BackgroundTasks,
token: str, request: Request,
request: Request,
_: str = Depends(verify_uri_token)
) -> Any:
"""
Webhook响应
"""
if token != settings.API_TOKEN:
return schemas.Response(success=False, message="token认证不通过")
body = await request.body()
form = await request.form()
args = request.query_params
@@ -34,12 +34,10 @@ async def webhook_message(background_tasks: BackgroundTasks,
@router.get("/", summary="Webhook消息响应", response_model=schemas.Response)
async def webhook_message(background_tasks: BackgroundTasks,
token: str, request: Request) -> Any:
request: Request, _: str = Depends(verify_uri_token)) -> Any:
"""
Webhook响应
"""
if token != settings.API_TOKEN:
return schemas.Response(success=False, message="token认证不通过")
args = request.query_params
background_tasks.add_task(start_webhook_chain, None, None, args)
return schemas.Response(success=True)

View File

@@ -8,6 +8,7 @@ from app.chain.media import MediaChain
from app.chain.subscribe import SubscribeChain
from app.core.config import settings
from app.core.metainfo import MetaInfo
from app.core.security import verify_uri_apikey
from app.db import get_db
from app.db.models.subscribe import Subscribe
from app.schemas import RadarrMovie, SonarrSeries
@@ -18,15 +19,10 @@ arr_router = APIRouter(tags=['servarr'])
@arr_router.get("/system/status", summary="系统状态")
def arr_system_status(apikey: str) -> Any:
def arr_system_status(_: str = Depends(verify_uri_apikey)) -> Any:
"""
模拟Radarr、Sonarr系统状态
"""
if not apikey or apikey != settings.API_TOKEN:
raise HTTPException(
status_code=403,
detail="认证失败!",
)
return {
"appName": "MoviePilot",
"instanceName": "moviepilot",
@@ -77,15 +73,10 @@ def arr_system_status(apikey: str) -> Any:
@arr_router.get("/qualityProfile", summary="质量配置")
def arr_qualityProfile(apikey: str) -> Any:
def arr_qualityProfile(_: str = Depends(verify_uri_apikey)) -> Any:
"""
模拟Radarr、Sonarr质量配置
"""
if not apikey or apikey != settings.API_TOKEN:
raise HTTPException(
status_code=403,
detail="认证失败!",
)
return [
{
"id": 1,
@@ -123,15 +114,10 @@ def arr_qualityProfile(apikey: str) -> Any:
@arr_router.get("/rootfolder", summary="根目录")
def arr_rootfolder(apikey: str) -> Any:
def arr_rootfolder(_: str = Depends(verify_uri_apikey)) -> Any:
"""
模拟Radarr、Sonarr根目录
"""
if not apikey or apikey != settings.API_TOKEN:
raise HTTPException(
status_code=403,
detail="认证失败!",
)
return [
{
"id": 1,
@@ -144,15 +130,10 @@ def arr_rootfolder(apikey: str) -> Any:
@arr_router.get("/tag", summary="标签")
def arr_tag(apikey: str) -> Any:
def arr_tag(_: str = Depends(verify_uri_apikey)) -> Any:
"""
模拟Radarr、Sonarr标签
"""
if not apikey or apikey != settings.API_TOKEN:
raise HTTPException(
status_code=403,
detail="认证失败!",
)
return [
{
"id": 1,
@@ -162,15 +143,10 @@ def arr_tag(apikey: str) -> Any:
@arr_router.get("/languageprofile", summary="语言")
def arr_languageprofile(apikey: str) -> Any:
def arr_languageprofile(_: str = Depends(verify_uri_apikey)) -> Any:
"""
模拟Radarr、Sonarr语言
"""
if not apikey or apikey != settings.API_TOKEN:
raise HTTPException(
status_code=403,
detail="认证失败!",
)
return [{
"id": 1,
"name": "默认",
@@ -193,7 +169,7 @@ def arr_languageprofile(apikey: str) -> Any:
@arr_router.get("/movie", summary="所有订阅电影", response_model=List[schemas.RadarrMovie])
def arr_movies(apikey: str, db: Session = Depends(get_db)) -> Any:
def arr_movies(_: str = Depends(verify_uri_apikey), db: Session = Depends(get_db)) -> Any:
"""
查询Rardar电影
"""
@@ -262,11 +238,6 @@ def arr_movies(apikey: str, db: Session = Depends(get_db)) -> Any:
}
]
"""
if not apikey or apikey != settings.API_TOKEN:
raise HTTPException(
status_code=403,
detail="认证失败!",
)
# 查询所有电影订阅
result = []
subscribes = Subscribe.list(db)
@@ -289,16 +260,11 @@ def arr_movies(apikey: str, db: Session = Depends(get_db)) -> Any:
@arr_router.get("/movie/lookup", summary="查询电影", response_model=List[schemas.RadarrMovie])
def arr_movie_lookup(apikey: str, term: str, db: Session = Depends(get_db)) -> Any:
def arr_movie_lookup(term: str, db: Session = Depends(get_db), _: str = Depends(verify_uri_apikey)) -> Any:
"""
查询Rardar电影 term: `tmdb:${id}`
存在和不存在均不能返回错误
"""
if not apikey or apikey != settings.API_TOKEN:
raise HTTPException(
status_code=403,
detail="认证失败!",
)
tmdbid = term.replace("tmdb:", "")
# 查询媒体信息
mediainfo = MediaChain().recognize_media(mtype=MediaType.MOVIE, tmdbid=int(tmdbid))
@@ -340,15 +306,10 @@ def arr_movie_lookup(apikey: str, term: str, db: Session = Depends(get_db)) -> A
@arr_router.get("/movie/{mid}", summary="电影订阅详情", response_model=schemas.RadarrMovie)
def arr_movie(apikey: str, mid: int, db: Session = Depends(get_db)) -> Any:
def arr_movie(mid: int, db: Session = Depends(get_db), _: str = Depends(verify_uri_apikey)) -> Any:
"""
查询Rardar电影订阅
"""
if not apikey or apikey != settings.API_TOKEN:
raise HTTPException(
status_code=403,
detail="认证失败!",
)
subscribe = Subscribe.get(db, mid)
if subscribe:
return RadarrMovie(
@@ -371,18 +332,13 @@ def arr_movie(apikey: str, mid: int, db: Session = Depends(get_db)) -> Any:
@arr_router.post("/movie", summary="新增电影订阅")
def arr_add_movie(apikey: str,
movie: RadarrMovie,
def arr_add_movie(movie: RadarrMovie,
db: Session = Depends(get_db),
_: str = Depends(verify_uri_apikey)
) -> Any:
"""
新增Rardar电影订阅
"""
if not apikey or apikey != settings.API_TOKEN:
raise HTTPException(
status_code=403,
detail="认证失败!",
)
# 检查订阅是否已存在
subscribe = Subscribe.get_by_tmdbid(db, movie.tmdbId)
if subscribe:
@@ -394,7 +350,7 @@ def arr_add_movie(apikey: str,
year=movie.year,
mtype=MediaType.MOVIE,
tmdbid=movie.tmdbId,
userid="Seerr")
username="Seerr")
if sid:
return {
"id": sid
@@ -407,15 +363,10 @@ def arr_add_movie(apikey: str,
@arr_router.delete("/movie/{mid}", summary="删除电影订阅", response_model=schemas.Response)
def arr_remove_movie(apikey: str, mid: int, db: Session = Depends(get_db)) -> Any:
def arr_remove_movie(mid: int, db: Session = Depends(get_db), _: str = Depends(verify_uri_apikey)) -> Any:
"""
删除Rardar电影订阅
"""
if not apikey or apikey != settings.API_TOKEN:
raise HTTPException(
status_code=403,
detail="认证失败!",
)
subscribe = Subscribe.get(db, mid)
if subscribe:
subscribe.delete(db, mid)
@@ -428,7 +379,7 @@ def arr_remove_movie(apikey: str, mid: int, db: Session = Depends(get_db)) -> An
@arr_router.get("/series", summary="所有剧集", response_model=List[schemas.SonarrSeries])
def arr_series(apikey: str, db: Session = Depends(get_db)) -> Any:
def arr_series(_: str = Depends(verify_uri_apikey), db: Session = Depends(get_db)) -> Any:
"""
查询Sonarr剧集
"""
@@ -534,11 +485,6 @@ def arr_series(apikey: str, db: Session = Depends(get_db)) -> Any:
}
]
"""
if not apikey or apikey != settings.API_TOKEN:
raise HTTPException(
status_code=403,
detail="认证失败!",
)
# 查询所有电视剧订阅
result = []
subscribes = Subscribe.list(db)
@@ -569,16 +515,10 @@ def arr_series(apikey: str, db: Session = Depends(get_db)) -> Any:
@arr_router.get("/series/lookup", summary="查询剧集")
def arr_series_lookup(apikey: str, term: str, db: Session = Depends(get_db)) -> Any:
def arr_series_lookup(term: str, db: Session = Depends(get_db), _: str = Depends(verify_uri_apikey)) -> Any:
"""
查询Sonarr剧集 term: `tvdb:${id}` title
"""
if not apikey or apikey != settings.API_TOKEN:
raise HTTPException(
status_code=403,
detail="认证失败!",
)
# 获取TVDBID
if not term.startswith("tvdb:"):
mediainfo = MediaChain().recognize_media(meta=MetaInfo(term),
@@ -664,15 +604,10 @@ def arr_series_lookup(apikey: str, term: str, db: Session = Depends(get_db)) ->
@arr_router.get("/series/{tid}", summary="剧集详情")
def arr_serie(apikey: str, tid: int, db: Session = Depends(get_db)) -> Any:
def arr_serie(tid: int, db: Session = Depends(get_db), _: str = Depends(verify_uri_apikey)) -> Any:
"""
查询Sonarr剧集
"""
if not apikey or apikey != settings.API_TOKEN:
raise HTTPException(
status_code=403,
detail="认证失败!",
)
subscribe = Subscribe.get(db, tid)
if subscribe:
return SonarrSeries(
@@ -703,16 +638,12 @@ def arr_serie(apikey: str, tid: int, db: Session = Depends(get_db)) -> Any:
@arr_router.post("/series", summary="新增剧集订阅")
def arr_add_series(apikey: str, tv: schemas.SonarrSeries,
db: Session = Depends(get_db)) -> Any:
def arr_add_series(tv: schemas.SonarrSeries,
db: Session = Depends(get_db),
_: str = Depends(verify_uri_apikey)) -> Any:
"""
新增Sonarr剧集订阅
"""
if not apikey or apikey != settings.API_TOKEN:
raise HTTPException(
status_code=403,
detail="认证失败!",
)
# 检查订阅是否存在
left_seasons = []
for season in tv.seasons:
@@ -737,7 +668,7 @@ def arr_add_series(apikey: str, tv: schemas.SonarrSeries,
season=season.get("seasonNumber"),
tmdbid=tv.tmdbId,
mtype=MediaType.TV,
userid="Seerr")
username="Seerr")
if sid:
return {
@@ -751,15 +682,10 @@ def arr_add_series(apikey: str, tv: schemas.SonarrSeries,
@arr_router.delete("/series/{tid}", summary="删除剧集订阅")
def arr_remove_series(apikey: str, tid: int, db: Session = Depends(get_db)) -> Any:
def arr_remove_series(tid: int, db: Session = Depends(get_db), _: str = Depends(verify_uri_apikey)) -> Any:
"""
删除Sonarr剧集订阅
"""
if not apikey or apikey != settings.API_TOKEN:
raise HTTPException(
status_code=403,
detail="认证失败!",
)
subscribe = Subscribe.get(db, tid)
if subscribe:
subscribe.delete(db, tid)

View File

@@ -107,23 +107,34 @@ class ChainBase(metaclass=ABCMeta):
# 中止继续执行
break
except Exception as err:
logger.error(f"运行模块 {method} 出错:{module.__class__.__name__} - {str(err)}\n{traceback.print_exc()}")
logger.error(
f"运行模块 {method} 出错:{module.__class__.__name__} - {str(err)}\n{traceback.print_exc()}")
return result
def recognize_media(self, meta: MetaBase = None,
mtype: MediaType = None,
tmdbid: int = None) -> Optional[MediaInfo]:
tmdbid: int = None,
doubanid: str = None) -> Optional[MediaInfo]:
"""
识别媒体信息
:param meta: 识别的元数据
:param mtype: 识别的媒体类型与tmdbid配套
:param tmdbid: tmdbid
:param doubanid: 豆瓣ID
:return: 识别的媒体信息,包括剧集信息
"""
return self.run_module("recognize_media", meta=meta, mtype=mtype, tmdbid=tmdbid)
# 识别用名中含指定信息情形
if not mtype and meta and meta.type in [MediaType.TV, MediaType.MOVIE]:
mtype = meta.type
if not tmdbid and hasattr(meta, "tmdbid"):
tmdbid = meta.tmdbid
if not doubanid and hasattr(meta, "doubanid"):
doubanid = meta.doubanid
return self.run_module("recognize_media", meta=meta, mtype=mtype,
tmdbid=tmdbid, doubanid=doubanid)
def match_doubaninfo(self, name: str, imdbid: str = None,
mtype: str = None, year: str = None, season: int = None) -> Optional[dict]:
mtype: MediaType = None, year: str = None, season: int = None) -> Optional[dict]:
"""
搜索和匹配豆瓣信息
:param name: 标题
@@ -135,6 +146,18 @@ class ChainBase(metaclass=ABCMeta):
return self.run_module("match_doubaninfo", name=name, imdbid=imdbid,
mtype=mtype, year=year, season=season)
def match_tmdbinfo(self, name: str, mtype: MediaType = None,
year: str = None, season: int = None) -> Optional[dict]:
"""
搜索和匹配TMDB信息
:param name: 标题
:param mtype: 类型
:param year: 年份
:param season: 季
"""
return self.run_module("match_tmdbinfo", name=name,
mtype=mtype, year=year, season=season)
def obtain_images(self, mediainfo: MediaInfo) -> Optional[MediaInfo]:
"""
补充抓取媒体信息图片
@@ -159,13 +182,14 @@ class ChainBase(metaclass=ABCMeta):
image_prefix=image_prefix, image_type=image_type,
season=season, episode=episode)
def douban_info(self, doubanid: str) -> Optional[dict]:
def douban_info(self, doubanid: str, mtype: MediaType = None) -> Optional[dict]:
"""
获取豆瓣信息
:param doubanid: 豆瓣ID
:param mtype: 媒体类型
:return: 豆瓣信息
"""
return self.run_module("douban_info", doubanid=doubanid)
return self.run_module("douban_info", doubanid=doubanid, mtype=mtype)
def tvdb_info(self, tvdbid: int) -> Optional[dict]:
"""
@@ -357,15 +381,6 @@ class ChainBase(metaclass=ABCMeta):
"""
return self.run_module("media_exists", mediainfo=mediainfo, itemid=itemid)
def refresh_mediaserver(self, mediainfo: MediaInfo, file_path: Path) -> None:
"""
刷新媒体库
:param mediainfo: 识别的媒体信息
:param file_path: 文件路径
:return: 成功或失败
"""
self.run_module("refresh_mediaserver", mediainfo=mediainfo, file_path=file_path)
def post_message(self, message: Notification) -> None:
"""
发送消息
@@ -376,6 +391,7 @@ class ChainBase(metaclass=ABCMeta):
self.eventmanager.send_event(etype=EventType.NoticeMessage,
data={
"channel": message.channel,
"type": message.mtype,
"title": message.title,
"text": message.text,
"image": message.image,

View File

@@ -1,10 +1,7 @@
from typing import Optional, List
from app.chain import ChainBase
from app.core.context import Context
from app.core.context import MediaInfo
from app.core.metainfo import MetaInfo
from app.log import logger
from app.core.config import settings
from app.schemas import MediaType
from app.utils.singleton import Singleton
@@ -14,53 +11,7 @@ class DoubanChain(ChainBase, metaclass=Singleton):
豆瓣处理链,单例运行
"""
def recognize_by_doubanid(self, doubanid: str) -> Optional[Context]:
"""
根据豆瓣ID识别媒体信息
"""
logger.info(f'开始识别媒体信息豆瓣ID{doubanid} ...')
# 查询豆瓣信息
doubaninfo = self.douban_info(doubanid=doubanid)
if not doubaninfo:
logger.warn(f'未查询到豆瓣信息豆瓣ID{doubanid}')
return None
return self.recognize_by_doubaninfo(doubaninfo)
def recognize_by_doubaninfo(self, doubaninfo: dict) -> Optional[Context]:
"""
根据豆瓣信息识别媒体信息
"""
# 优先使用原标题匹配
season_meta = None
if doubaninfo.get("original_title"):
meta = MetaInfo(title=doubaninfo.get("original_title"))
season_meta = MetaInfo(title=doubaninfo.get("title"))
# 合并季
meta.begin_season = season_meta.begin_season
else:
meta = MetaInfo(title=doubaninfo.get("title"))
# 年份
if doubaninfo.get("year"):
meta.year = doubaninfo.get("year")
# 处理类型
if isinstance(doubaninfo.get('media_type'), MediaType):
meta.type = doubaninfo.get('media_type')
else:
meta.type = MediaType.MOVIE if doubaninfo.get("type") == "movie" else MediaType.TV
# 使用原标题识别媒体信息
mediainfo = self.recognize_media(meta=meta, mtype=meta.type)
if not mediainfo:
if season_meta and season_meta.name != meta.name:
# 使用主标题识别媒体信息
mediainfo = self.recognize_media(meta=season_meta, mtype=season_meta.type)
if not mediainfo:
logger.warn(f'{meta.name} 未识别到TMDB媒体信息')
return Context(meta_info=meta, media_info=MediaInfo(douban_info=doubaninfo))
logger.info(f'识别到媒体信息:{mediainfo.type.value} {mediainfo.title_year} {meta.season}')
mediainfo.set_douban_info(doubaninfo)
return Context(meta_info=meta, media_info=mediainfo)
def movie_top250(self, page: int = 1, count: int = 30) -> List[dict]:
def movie_top250(self, page: int = 1, count: int = 30) -> Optional[List[dict]]:
"""
获取豆瓣电影TOP250
:param page: 页码
@@ -68,19 +19,19 @@ class DoubanChain(ChainBase, metaclass=Singleton):
"""
return self.run_module("movie_top250", page=page, count=count)
def movie_showing(self, page: int = 1, count: int = 30) -> List[dict]:
def movie_showing(self, page: int = 1, count: int = 30) -> Optional[List[dict]]:
"""
获取正在上映的电影
"""
return self.run_module("movie_showing", page=page, count=count)
def tv_weekly_chinese(self, page: int = 1, count: int = 30) -> List[dict]:
def tv_weekly_chinese(self, page: int = 1, count: int = 30) -> Optional[List[dict]]:
"""
获取本周中国剧集榜
"""
return self.run_module("tv_weekly_chinese", page=page, count=count)
def tv_weekly_global(self, page: int = 1, count: int = 30) -> List[dict]:
def tv_weekly_global(self, page: int = 1, count: int = 30) -> Optional[List[dict]]:
"""
获取本周全球剧集榜
"""
@@ -100,8 +51,24 @@ class DoubanChain(ChainBase, metaclass=Singleton):
return self.run_module("douban_discover", mtype=mtype, sort=sort, tags=tags,
page=page, count=count)
def tv_animation(self, page: int = 1, count: int = 30) -> List[dict]:
def tv_animation(self, page: int = 1, count: int = 30) -> Optional[List[dict]]:
"""
获取动画剧集
"""
return self.run_module("tv_animation", page=page, count=count)
def movie_hot(self, page: int = 1, count: int = 30) -> Optional[List[dict]]:
"""
获取热门电影
"""
if settings.RECOGNIZE_SOURCE != "douban":
return None
return self.run_module("movie_hot", page=page, count=count)
def tv_hot(self, page: int = 1, count: int = 30) -> Optional[List[dict]]:
"""
获取热门剧集
"""
if settings.RECOGNIZE_SOURCE != "douban":
return None
return self.run_module("tv_hot", page=page, count=count)

View File

@@ -170,7 +170,8 @@ class DownloadChain(ChainBase):
episodes: Set[int] = None,
channel: MessageChannel = None,
save_path: str = None,
userid: Union[str, int] = None) -> Optional[str]:
userid: Union[str, int] = None,
username: str = None) -> Optional[str]:
"""
下载及发送通知
:param context: 资源上下文
@@ -179,6 +180,7 @@ class DownloadChain(ChainBase):
:param channel: 通知渠道
:param save_path: 保存路径
:param userid: 用户ID
:param username: 调用下载的用户名/插件名
"""
_torrent = context.torrent_info
_media = context.media_info
@@ -267,6 +269,7 @@ class DownloadChain(ChainBase):
torrent_description=_torrent.description,
torrent_site=_torrent.site_name,
userid=userid,
username=username,
channel=channel.value if channel else None,
date=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
)
@@ -318,10 +321,11 @@ class DownloadChain(ChainBase):
def batch_download(self,
contexts: List[Context],
no_exists: Dict[int, Dict[int, NotExistMediaInfo]] = None,
no_exists: Dict[Union[int, str], Dict[int, NotExistMediaInfo]] = None,
save_path: str = None,
channel: MessageChannel = None,
userid: str = None) -> Tuple[List[Context], Dict[int, Dict[int, NotExistMediaInfo]]]:
userid: str = None,
username: str = None) -> Tuple[List[Context], Dict[int, Dict[int, NotExistMediaInfo]]]:
"""
根据缺失数据,自动种子列表中组合择优下载
:param contexts: 资源上下文列表
@@ -329,33 +333,35 @@ class DownloadChain(ChainBase):
:param save_path: 保存路径
:param channel: 通知渠道
:param userid: 用户ID
:return: 已经下载的资源列表、剩余未下载到的剧集 no_exists[tmdb_id] = {season: NotExistMediaInfo}
:param username: 调用下载的用户名/插件名
:return: 已经下载的资源列表、剩余未下载到的剧集 no_exists[tmdb_id/douban_id] = {season: NotExistMediaInfo}
"""
# 已下载的项目
downloaded_list: List[Context] = []
def __update_seasons(_tmdbid: int, _need: list, _current: list) -> list:
def __update_seasons(_mid: Union[int, str], _need: list, _current: list) -> list:
"""
更新need_tvs季数返回剩余季数
:param _tmdbid: TMDBID
:param _mid: TMDBID
:param _need: 需要下载的季数
:param _current: 已经下载的季数
"""
# 剩余季数
need = list(set(_need).difference(set(_current)))
# 清除已下载的季信息
seas = copy.deepcopy(no_exists.get(_tmdbid))
seas = copy.deepcopy(no_exists.get(_mid))
for _sea in list(seas):
if _sea not in need:
no_exists[_tmdbid].pop(_sea)
if not no_exists.get(_tmdbid) and no_exists.get(_tmdbid) is not None:
no_exists.pop(_tmdbid)
no_exists[_mid].pop(_sea)
if not no_exists.get(_mid) and no_exists.get(_mid) is not None:
no_exists.pop(_mid)
break
return need
def __update_episodes(_tmdbid: int, _sea: int, _need: list, _current: set) -> list:
def __update_episodes(_mid: Union[int, str], _sea: int, _need: list, _current: set) -> list:
"""
更新need_tvs集数返回剩余集数
:param _tmdbid: TMDBID
:param _mid: TMDBID
:param _sea: 季数
:param _need: 需要下载的集数
:param _current: 已经下载的集数
@@ -363,26 +369,26 @@ class DownloadChain(ChainBase):
# 剩余集数
need = list(set(_need).difference(set(_current)))
if need:
not_exist = no_exists[_tmdbid][_sea]
no_exists[_tmdbid][_sea] = NotExistMediaInfo(
not_exist = no_exists[_mid][_sea]
no_exists[_mid][_sea] = NotExistMediaInfo(
season=not_exist.season,
episodes=need,
total_episode=not_exist.total_episode,
start_episode=not_exist.start_episode
)
else:
no_exists[_tmdbid].pop(_sea)
if not no_exists.get(_tmdbid) and no_exists.get(_tmdbid) is not None:
no_exists.pop(_tmdbid)
no_exists[_mid].pop(_sea)
if not no_exists.get(_mid) and no_exists.get(_mid) is not None:
no_exists.pop(_mid)
return need
def __get_season_episodes(tmdbid: int, season: int) -> int:
def __get_season_episodes(_mid: Union[int, str], season: int) -> int:
"""
获取需要的季的集数
"""
if not no_exists.get(tmdbid):
if not no_exists.get(_mid):
return 9999
no_exist = no_exists.get(tmdbid)
no_exist = no_exists.get(_mid)
if not no_exist.get(season):
return 9999
return no_exist[season].total_episode
@@ -394,7 +400,7 @@ class DownloadChain(ChainBase):
for context in contexts:
if context.media_info.type == MediaType.MOVIE:
if self.download_single(context, save_path=save_path,
channel=channel, userid=userid):
channel=channel, userid=userid, username=username):
# 下载成功
downloaded_list.append(context)
@@ -402,17 +408,17 @@ class DownloadChain(ChainBase):
if no_exists:
# 先把整季缺失的拿出来,看是否刚好有所有季都满足的种子 {tmdbid: [seasons]}
need_seasons: Dict[int, list] = {}
for need_tmdbid, need_tv in no_exists.items():
for need_mid, need_tv in no_exists.items():
for tv in need_tv.values():
if not tv:
continue
# 季列表为空的,代表全季缺失
if not tv.episodes:
if not need_seasons.get(need_tmdbid):
need_seasons[need_tmdbid] = []
need_seasons[need_tmdbid].append(tv.season or 1)
if not need_seasons.get(need_mid):
need_seasons[need_mid] = []
need_seasons[need_mid].append(tv.season or 1)
# 查找整季包含的种子,只处理整季没集的种子或者是集数超过季的种子
for need_tmdbid, need_season in need_seasons.items():
for need_mid, need_season in need_seasons.items():
# 循环种子
for context in contexts:
# 媒体信息
@@ -430,7 +436,7 @@ class DownloadChain(ChainBase):
if meta.episode_list:
continue
# 匹配TMDBID
if need_tmdbid == media.tmdb_id:
if need_mid == media.tmdb_id or need_mid == media.douban_id:
# 种子季是需要季或者子集
if set(torrent_season).issubset(set(need_season)):
if len(torrent_season) == 1:
@@ -450,7 +456,7 @@ class DownloadChain(ChainBase):
end_ep = max(torrent_episodes)
meta.set_episodes(begin=begin_ep, end=end_ep)
# 需要总集数
need_total = __get_season_episodes(need_tmdbid, torrent_season[0])
need_total = __get_season_episodes(need_mid, torrent_season[0])
if len(torrent_episodes) < need_total:
logger.info(
f"{meta.org_string} 解析文件集数发现不是完整合集")
@@ -462,31 +468,33 @@ class DownloadChain(ChainBase):
torrent_file=content if isinstance(content, Path) else None,
save_path=save_path,
channel=channel,
userid=userid
userid=userid,
username=username
)
else:
# 下载
download_id = self.download_single(context, save_path=save_path,
channel=channel, userid=userid)
channel=channel, userid=userid, username=username)
if download_id:
# 下载成功
downloaded_list.append(context)
# 更新仍需季集
need_season = __update_seasons(_tmdbid=need_tmdbid,
need_season = __update_seasons(_mid=need_mid,
_need=need_season,
_current=torrent_season)
# 电视剧季内的集匹配
if no_exists:
# TMDBID列表
need_tv_list = list(no_exists)
for need_tmdbid in need_tv_list:
for need_mid in need_tv_list:
# dict[season, [NotExistMediaInfo]]
need_tv = no_exists.get(need_tmdbid)
need_tv = no_exists.get(need_mid)
if not need_tv:
continue
need_tv_copy = copy.deepcopy(no_exists.get(need_mid))
# 循环每一季
for sea, tv in need_tv.items():
for sea, tv in need_tv_copy.items():
# 当前需要季
need_season = sea
# 当前需要集
@@ -508,7 +516,7 @@ class DownloadChain(ChainBase):
if media.type != MediaType.TV:
continue
# 匹配TMDB
if media.tmdb_id == need_tmdbid:
if media.tmdb_id == need_mid or media.douban_id == need_mid:
# 不重复添加
if context in downloaded_list:
continue
@@ -526,12 +534,12 @@ class DownloadChain(ChainBase):
if torrent_episodes.issubset(set(need_episodes)):
# 下载
download_id = self.download_single(context, save_path=save_path,
channel=channel, userid=userid)
channel=channel, userid=userid, username=username)
if download_id:
# 下载成功
downloaded_list.append(context)
# 更新仍需集数
need_episodes = __update_episodes(_tmdbid=need_tmdbid,
need_episodes = __update_episodes(_mid=need_mid,
_need=need_episodes,
_sea=need_season,
_current=torrent_episodes)
@@ -540,9 +548,9 @@ class DownloadChain(ChainBase):
if no_exists:
# TMDBID列表
no_exists_list = list(no_exists)
for need_tmdbid in no_exists_list:
for need_mid in no_exists_list:
# dict[season, [NotExistMediaInfo]]
need_tv = no_exists.get(need_tmdbid)
need_tv = no_exists.get(need_mid)
if not need_tv:
continue
# 需要季列表
@@ -576,7 +584,7 @@ class DownloadChain(ChainBase):
if not need_episodes:
break
# 选中一个单季整季的或单季包括需要的所有集的
if media.tmdb_id == need_tmdbid \
if (media.tmdb_id == need_mid or media.douban_id == need_mid) \
and (not meta.episode_list
or set(meta.episode_list).intersection(set(need_episodes))) \
and len(meta.season_list) == 1 \
@@ -604,7 +612,8 @@ class DownloadChain(ChainBase):
episodes=selected_episodes,
save_path=save_path,
channel=channel,
userid=userid
userid=userid,
username=username
)
if not download_id:
continue
@@ -615,7 +624,7 @@ class DownloadChain(ChainBase):
end_ep = max(torrent_episodes)
meta.set_episodes(begin=begin_ep, end=end_ep)
# 更新仍需集数
need_episodes = __update_episodes(_tmdbid=need_tmdbid,
need_episodes = __update_episodes(_mid=need_mid,
_need=need_episodes,
_sea=need_season,
_current=selected_episodes)
@@ -647,8 +656,9 @@ class DownloadChain(ChainBase):
"start_episode": int
]}
"""
if not no_exists.get(mediainfo.tmdb_id):
no_exists[mediainfo.tmdb_id] = {
mediakey = mediainfo.tmdb_id or mediainfo.douban_id
if not no_exists.get(mediakey):
no_exists[mediakey] = {
_season: NotExistMediaInfo(
season=_season,
episodes=_episodes,
@@ -657,7 +667,7 @@ class DownloadChain(ChainBase):
)
}
else:
no_exists[mediainfo.tmdb_id][_season] = NotExistMediaInfo(
no_exists[mediakey][_season] = NotExistMediaInfo(
season=_season,
episodes=_episodes,
total_episode=_total,
@@ -673,6 +683,7 @@ class DownloadChain(ChainBase):
if mediainfo.type == MediaType.MOVIE:
# 电影
itemid = self.mediaserver.get_item_id(mtype=mediainfo.type.value,
title=mediainfo.title,
tmdbid=mediainfo.tmdb_id)
exists_movies: Optional[ExistMediaInfo] = self.media_exists(mediainfo=mediainfo, itemid=itemid)
if exists_movies:
@@ -683,7 +694,8 @@ class DownloadChain(ChainBase):
if not mediainfo.seasons:
# 补充媒体信息
mediainfo: MediaInfo = self.recognize_media(mtype=mediainfo.type,
tmdbid=mediainfo.tmdb_id)
tmdbid=mediainfo.tmdb_id,
doubanid=mediainfo.douban_id)
if not mediainfo:
logger.error(f"媒体信息识别失败!")
return False, {}
@@ -692,6 +704,7 @@ class DownloadChain(ChainBase):
return False, {}
# 电视剧
itemid = self.mediaserver.get_item_id(mtype=mediainfo.type.value,
title=mediainfo.title,
tmdbid=mediainfo.tmdb_id,
season=mediainfo.season)
# 媒体库已存在的剧集
@@ -702,7 +715,7 @@ class DownloadChain(ChainBase):
if not episodes:
continue
# 全季不存在
if meta.season_list \
if meta.sea \
and season not in meta.season_list:
continue
# 总集数
@@ -713,7 +726,7 @@ class DownloadChain(ChainBase):
else:
# 存在一些,检查每季缺失的季集
for season, episodes in mediainfo.seasons.items():
if meta.begin_season \
if meta.sea \
and season not in meta.season_list:
continue
if not episodes:

View File

@@ -14,7 +14,6 @@ from app.schemas.types import EventType, MediaType
from app.utils.singleton import Singleton
from app.utils.string import StringUtils
recognize_lock = Lock()
@@ -27,13 +26,11 @@ class MediaChain(ChainBase, metaclass=Singleton):
# 临时识别结果 {title, name, year, season, episode}
recognize_temp: Optional[dict] = None
def recognize_by_title(self, title: str, subtitle: str = None) -> Optional[Context]:
def recognize_by_meta(self, metainfo: MetaBase) -> Optional[MediaInfo]:
"""
根据主副标题识别媒体信息
"""
logger.info(f'开始识别媒体信息,标题:{title},副标题:{subtitle} ...')
# 识别元数据
metainfo = MetaInfo(title, subtitle)
title = metainfo.title
# 识别媒体信息
mediainfo: MediaInfo = self.recognize_media(meta=metainfo)
if not mediainfo:
@@ -43,13 +40,13 @@ class MediaChain(ChainBase, metaclass=Singleton):
mediainfo = self.recognize_help(title=title, org_meta=metainfo)
if not mediainfo:
logger.warn(f'{title} 未识别到媒体信息')
return Context(meta_info=metainfo)
return None
# 识别成功
logger.info(f'{title} 识别到媒体信息:{mediainfo.type.value} {mediainfo.title_year}')
# 更新媒体图片
self.obtain_images(mediainfo=mediainfo)
# 返回上下文
return Context(meta_info=metainfo, media_info=mediainfo)
return mediainfo
def recognize_help(self, title: str, org_meta: MetaBase) -> Optional[MediaInfo]:
"""
@@ -69,7 +66,7 @@ class MediaChain(ChainBase, metaclass=Singleton):
}
)
# 每0.5秒循环一次等待结果直到10秒后超时
for i in range(10):
for i in range(20):
if self.recognize_temp is not None:
break
time.sleep(0.5)
@@ -170,8 +167,7 @@ class MediaChain(ChainBase, metaclass=Singleton):
# 识别
meta = MetaInfo(content)
if not meta.name:
logger.warn(f'{title} 未识别到元数据!')
return meta, []
meta.cn_name = content
# 合并信息
if mtype:
meta.type = mtype
@@ -190,3 +186,78 @@ class MediaChain(ChainBase, metaclass=Singleton):
logger.info(f"{content} 搜索到 {len(medias)} 条相关媒体信息")
# 识别的元数据,媒体信息列表
return meta, medias
def get_tmdbinfo_by_doubanid(self, doubanid: str, mtype: MediaType = None) -> Optional[dict]:
"""
根据豆瓣ID获取TMDB信息
"""
tmdbinfo = None
doubaninfo = self.douban_info(doubanid=doubanid, mtype=mtype)
if doubaninfo:
# 优先使用原标题匹配
season_meta = None
if doubaninfo.get("original_title"):
meta = MetaInfo(title=doubaninfo.get("original_title"))
season_meta = MetaInfo(title=doubaninfo.get("title"))
# 合并季
meta.begin_season = season_meta.begin_season
else:
meta = MetaInfo(title=doubaninfo.get("title"))
# 年份
if doubaninfo.get("year"):
meta.year = doubaninfo.get("year")
# 处理类型
if isinstance(doubaninfo.get('media_type'), MediaType):
meta.type = doubaninfo.get('media_type')
else:
meta.type = MediaType.MOVIE if doubaninfo.get("type") == "movie" else MediaType.TV
# 使用原标题识别TMDB媒体信息
tmdbinfo = self.match_tmdbinfo(
name=meta.name,
year=meta.year,
mtype=mtype or meta.type,
season=meta.begin_season
)
if not tmdbinfo:
if season_meta and season_meta.name != meta.name:
# 使用主标题识别媒体信息
tmdbinfo = self.match_tmdbinfo(
name=season_meta.name,
year=meta.year,
mtype=mtype or meta.type,
season=meta.begin_season
)
return tmdbinfo
def get_doubaninfo_by_tmdbid(self, tmdbid: int,
mtype: MediaType = None, season: int = None) -> Optional[dict]:
"""
根据TMDBID获取豆瓣信息
"""
tmdbinfo = self.tmdb_info(tmdbid=tmdbid, mtype=mtype)
if tmdbinfo:
# 名称
name = tmdbinfo.get("title") or tmdbinfo.get("name")
# 年份
year = None
if tmdbinfo.get('release_date'):
year = tmdbinfo['release_date'][:4]
elif tmdbinfo.get('seasons') and season:
for seainfo in tmdbinfo['seasons']:
# 季
season_number = seainfo.get("season_number")
if not season_number:
continue
air_date = seainfo.get("air_date")
if air_date and season_number == season:
year = air_date[:4]
break
# IMDBID
imdbid = tmdbinfo.get("external_ids", {}).get("imdb_id")
return self.match_doubaninfo(
name=name,
year=year,
mtype=mtype,
imdbid=imdbid
)
return None

View File

@@ -1,4 +1,3 @@
import copy
from typing import Any
from app.chain.download import *
@@ -87,13 +86,15 @@ class MessageChain(ChainBase):
# 发送消息
self.post_message(Notification(channel=channel, title="输入有误!", userid=userid))
return
# 选择的序号
_choice = int(text) + _current_page * self._page_size - 1
# 缓存类型
cache_type: str = cache_data.get('type')
# 缓存列表
cache_list: list = cache_data.get('items')
cache_list: list = copy.deepcopy(cache_data.get('items'))
# 选择
if cache_type == "Search":
mediainfo: MediaInfo = cache_list[int(text) + _current_page * self._page_size - 1]
mediainfo: MediaInfo = cache_list[_choice]
_current_media = mediainfo
# 查询缺失的媒体信息
exist_flag, no_exists = self.downloadchain.get_no_exists_info(meta=_current_meta,
@@ -108,9 +109,10 @@ class MessageChain(ChainBase):
# 发送缺失的媒体信息
if no_exists:
# 发送消息
mediakey = mediainfo.tmdb_id or mediainfo.douban_id
messages = [
f"{sea} 季缺失 {StringUtils.str_series(no_exist.episodes) if no_exist.episodes else no_exist.total_episode}"
for sea, no_exist in no_exists.get(mediainfo.tmdb_id).items()]
for sea, no_exist in no_exists.get(mediakey).items()]
self.post_message(Notification(channel=channel,
title=f"{mediainfo.title_year}\n" + "\n".join(messages),
userid=userid))
@@ -158,7 +160,7 @@ class MessageChain(ChainBase):
elif cache_type == "Subscribe":
# 订阅媒体
mediainfo: MediaInfo = cache_list[int(text) - 1]
mediainfo: MediaInfo = cache_list[_choice]
# 查询缺失的媒体信息
exist_flag, _ = self.downloadchain.get_no_exists_info(meta=_current_meta,
mediainfo=mediainfo)
@@ -187,9 +189,9 @@ class MessageChain(ChainBase):
username=username)
else:
# 下载种子
context: Context = cache_list[int(text) - 1]
context: Context = cache_list[_choice]
# 下载
self.downloadchain.download_single(context, userid=userid, channel=channel)
self.downloadchain.download_single(context, userid=userid, channel=channel, username=username)
elif text.lower() == "p":
# 上一页
@@ -217,11 +219,6 @@ class MessageChain(ChainBase):
start = _current_page * self._page_size
end = start + self._page_size
if cache_type == "Torrent":
# 更新缓存
user_cache[userid] = {
"type": "Torrent",
"items": cache_list[start:end]
}
# 发送种子数据
self.__post_torrents_message(channel=channel,
title=_current_media.title,
@@ -260,11 +257,6 @@ class MessageChain(ChainBase):
# 加一页
_current_page += 1
if cache_type == "Torrent":
# 更新缓存
user_cache[userid] = {
"type": "Torrent",
"items": cache_list
}
# 发送种子数据
self.__post_torrents_message(channel=channel,
title=_current_media.title,
@@ -353,7 +345,8 @@ class MessageChain(ChainBase):
downloads, lefts = self.downloadchain.batch_download(contexts=cache_list,
no_exists=no_exists,
channel=channel,
userid=userid)
userid=userid,
username=username)
if downloads and not lefts:
# 全部下载完成
logger.info(f'{_current_media.title_year} 下载完成')

View File

@@ -31,14 +31,16 @@ class SearchChain(ChainBase):
self.systemconfig = SystemConfigOper()
self.torrenthelper = TorrentHelper()
def search_by_tmdbid(self, tmdbid: int, mtype: MediaType = None, area: str = "title") -> List[Context]:
def search_by_id(self, tmdbid: int = None, doubanid: str = None,
mtype: MediaType = None, area: str = "title") -> List[Context]:
"""
根据TMDB ID搜索资源精确匹配但不不过滤本地存在的资源
根据TMDBID/豆瓣ID搜索资源精确匹配但不不过滤本地存在的资源
:param tmdbid: TMDB ID
:param doubanid: 豆瓣 ID
:param mtype: 媒体,电影 or 电视剧
:param area: 搜索范围title or imdbid
"""
mediainfo = self.recognize_media(tmdbid=tmdbid, mtype=mtype)
mediainfo = self.recognize_media(tmdbid=tmdbid, doubanid=doubanid, mtype=mtype)
if not mediainfo:
logger.error(f'{tmdbid} 媒体信息识别失败!')
return []
@@ -92,19 +94,29 @@ class SearchChain(ChainBase):
:param filter_rule: 过滤规则,为空是使用默认过滤规则
:param area: 搜索范围title or imdbid
"""
# 豆瓣标题处理
if not mediainfo.tmdb_id:
meta = MetaInfo(title=mediainfo.title)
mediainfo.title = meta.name
mediainfo.season = meta.begin_season
logger.info(f'开始搜索资源,关键词:{keyword or mediainfo.title} ...')
# 补充媒体信息
if not mediainfo.names:
mediainfo: MediaInfo = self.recognize_media(mtype=mediainfo.type,
tmdbid=mediainfo.tmdb_id)
tmdbid=mediainfo.tmdb_id,
doubanid=mediainfo.douban_id)
if not mediainfo:
logger.error(f'媒体信息识别失败!')
return []
# 缺失的季集
if no_exists and no_exists.get(mediainfo.tmdb_id):
mediakey = mediainfo.tmdb_id or mediainfo.douban_id
if no_exists and no_exists.get(mediakey):
# 过滤剧集
season_episodes = {sea: info.episodes
for sea, info in no_exists[mediainfo.tmdb_id].items()}
elif mediainfo.season:
# 豆瓣只搜索当前季
season_episodes = {mediainfo.season: []}
else:
season_episodes = None
# 搜索关键词
@@ -154,6 +166,7 @@ class SearchChain(ChainBase):
if mediainfo:
self.progress.start(ProgressKey.Search)
logger.info(f'开始匹配,总 {_total} 个资源 ...')
logger.info(f"标题:{mediainfo.title},原标题:{mediainfo.original_title},别名:{mediainfo.names}")
self.progress.update(value=0, text=f'开始匹配,总 {_total} 个资源 ...', key=ProgressKey.Search)
for torrent in torrents:
_count += 1

View File

@@ -6,10 +6,11 @@ from datetime import datetime
from typing import Dict, List, Optional, Union, Tuple
from app.chain import ChainBase
from app.chain.douban import DoubanChain
from app.chain.download import DownloadChain
from app.chain.media import MediaChain
from app.chain.search import SearchChain
from app.chain.torrents import TorrentsChain
from app.core.config import settings
from app.core.context import TorrentInfo, Context, MediaInfo
from app.core.meta import MetaBase
from app.core.metainfo import MetaInfo
@@ -33,6 +34,7 @@ class SubscribeChain(ChainBase):
self.searchchain = SearchChain()
self.subscribeoper = SubscribeOper()
self.torrentschain = TorrentsChain()
self.mediachain = MediaChain()
self.message = MessageHelper()
self.systemconfig = SystemConfigOper()
@@ -51,32 +53,39 @@ class SubscribeChain(ChainBase):
识别媒体信息并添加订阅
"""
logger.info(f'开始添加订阅,标题:{title} ...')
metainfo = None
mediainfo = None
if not tmdbid and doubanid:
# 将豆瓣信息转换为TMDB信息
context = DoubanChain().recognize_by_doubanid(doubanid)
if context:
metainfo = context.meta_info
mediainfo = context.media_info
metainfo = MetaInfo(title)
if year:
metainfo.year = year
if mtype:
metainfo.type = mtype
if season:
metainfo.type = MediaType.TV
metainfo.begin_season = season
# 识别媒体信息
if settings.RECOGNIZE_SOURCE == "themoviedb":
# TMDB识别模式
if not tmdbid and doubanid:
# 将豆瓣信息转换为TMDB信息
tmdbinfo = self.mediachain.get_tmdbinfo_by_doubanid(doubanid=doubanid, mtype=mtype)
if tmdbinfo:
mediainfo = MediaInfo(tmdb_info=tmdbinfo)
else:
# 识别TMDB信息
mediainfo = self.recognize_media(meta=metainfo, mtype=mtype, tmdbid=tmdbid)
else:
# 识别元数据
metainfo = MetaInfo(title)
if year:
metainfo.year = year
if mtype:
metainfo.type = mtype
if season:
metainfo.type = MediaType.TV
metainfo.begin_season = season
# 识别媒体信息
mediainfo = self.recognize_media(meta=metainfo, mtype=mtype, tmdbid=tmdbid)
# 豆瓣识别模式
mediainfo = self.recognize_media(meta=metainfo, mtype=mtype, doubanid=doubanid)
if mediainfo:
# 豆瓣标题处理
meta = MetaInfo(mediainfo.title)
mediainfo.title = meta.name
if not season:
season = meta.begin_season
# 识别失败
if not mediainfo or not metainfo or not mediainfo.tmdb_id:
logger.warn(f'未识别到媒体信息,标题:{title}tmdbid{tmdbid}')
if not mediainfo:
logger.warn(f'未识别到媒体信息,标题:{title}tmdbid{tmdbid}doubanid{doubanid}')
return None, "未识别到媒体信息"
# 更新媒体图片
self.obtain_images(mediainfo=mediainfo)
# 总集数
if mediainfo.type == MediaType.TV:
if not season:
@@ -86,16 +95,17 @@ class SubscribeChain(ChainBase):
if not mediainfo.seasons:
# 补充媒体信息
mediainfo = self.recognize_media(mtype=mediainfo.type,
tmdbid=mediainfo.tmdb_id)
tmdbid=mediainfo.tmdb_id,
doubanid=mediainfo.douban_id)
if not mediainfo:
logger.error(f"媒体信息识别失败!")
return None, "媒体信息识别失败"
if not mediainfo.seasons:
logger.error(f"媒体信息中没有季集信息,标题:{title}tmdbid{tmdbid}")
logger.error(f"媒体信息中没有季集信息,标题:{title}tmdbid{tmdbid}doubanid{doubanid}")
return None, "媒体信息中没有季集信息"
total_episode = len(mediainfo.seasons.get(season) or [])
if not total_episode:
logger.error(f'未获取到总集数,标题:{title}tmdbid{tmdbid}')
logger.error(f'未获取到总集数,标题:{title}tmdbid{tmdbid}, doubanid{doubanid}')
return None, f"未获取到第 {season} 季的总集数"
kwargs.update({
'total_episode': total_episode
@@ -105,9 +115,13 @@ class SubscribeChain(ChainBase):
kwargs.update({
'lack_episode': kwargs.get('total_episode')
})
# 更新媒体图片
self.obtain_images(mediainfo=mediainfo)
# 合并信息
if doubanid:
mediainfo.douban_id = doubanid
# 添加订阅
sid, err_msg = self.subscribeoper.add(mediainfo, doubanid=doubanid,
season=season, username=username, **kwargs)
sid, err_msg = self.subscribeoper.add(mediainfo, season=season, username=username, **kwargs)
if not sid:
logger.error(f'{mediainfo.title_year} {err_msg}')
if not exist_ok and message:
@@ -139,6 +153,7 @@ class SubscribeChain(ChainBase):
判断订阅是否已存在
"""
if self.subscribeoper.exists(tmdbid=mediainfo.tmdb_id,
doubanid=mediainfo.douban_id,
season=meta.begin_season if meta else None):
return True
return False
@@ -157,6 +172,7 @@ class SubscribeChain(ChainBase):
subscribes = self.subscribeoper.list(state)
# 遍历订阅
for subscribe in subscribes:
mediakey = subscribe.tmdbid or subscribe.doubanid
# 校验当前时间减订阅创建时间是否大于1分钟否则跳过先留出编辑订阅的时间
if subscribe.date:
now = datetime.now()
@@ -179,9 +195,11 @@ class SubscribeChain(ChainBase):
meta.begin_season = subscribe.season or None
meta.type = MediaType(subscribe.type)
# 识别媒体信息
mediainfo: MediaInfo = self.recognize_media(meta=meta, mtype=meta.type, tmdbid=subscribe.tmdbid)
mediainfo: MediaInfo = self.recognize_media(meta=meta, mtype=meta.type,
tmdbid=subscribe.tmdbid,
doubanid=subscribe.doubanid)
if not mediainfo:
logger.warn(f'未识别到媒体信息,标题:{subscribe.name}tmdbid{subscribe.tmdbid}')
logger.warn(f'未识别到媒体信息,标题:{subscribe.name}tmdbid{subscribe.tmdbid}doubanid{subscribe.doubanid}')
continue
# 非洗版状态
@@ -203,7 +221,7 @@ class SubscribeChain(ChainBase):
exist_flag = False
if meta.type == MediaType.TV:
no_exists = {
subscribe.tmdbid: {
mediakey: {
subscribe.season: NotExistMediaInfo(
season=subscribe.season,
episodes=[],
@@ -225,15 +243,15 @@ class SubscribeChain(ChainBase):
# 使用订阅的总集数和开始集数替换no_exists
no_exists = self.__get_subscribe_no_exits(
no_exists=no_exists,
tmdb_id=mediainfo.tmdb_id,
mediakey=mediakey,
begin_season=meta.begin_season,
total_episode=subscribe.total_episode,
start_episode=subscribe.start_episode,
)
# 打印缺失集信息
if no_exists and no_exists.get(subscribe.tmdbid):
no_exists_info = no_exists.get(subscribe.tmdbid).get(subscribe.season)
if no_exists and no_exists.get(mediakey):
no_exists_info = no_exists.get(mediakey).get(subscribe.season)
if no_exists_info:
logger.info(f'订阅 {mediainfo.title_year} {meta.season} 缺失集:{no_exists_info.episodes}')
@@ -302,7 +320,7 @@ class SubscribeChain(ChainBase):
# 自动下载
downloads, lefts = self.downloadchain.batch_download(contexts=matched_contexts,
no_exists=no_exists)
no_exists=no_exists, username=subscribe.username)
# 更新已经下载的集数
if downloads \
and meta.type == MediaType.TV \
@@ -415,7 +433,7 @@ class SubscribeChain(ChainBase):
}
# 订阅默认过滤规则
return self.systemconfig.get(SystemConfigKey.DefaultFilterRules) or {}
@staticmethod
def check_filter_rule(torrent_info: TorrentInfo, filter_rule: Dict[str, str]) -> bool:
"""
@@ -469,15 +487,17 @@ class SubscribeChain(ChainBase):
# 遍历订阅
for subscribe in subscribes:
logger.info(f'开始匹配订阅,标题:{subscribe.name} ...')
mediakey = subscribe.tmdbid or subscribe.doubanid
# 生成元数据
meta = MetaInfo(subscribe.name)
meta.year = subscribe.year
meta.begin_season = subscribe.season or None
meta.type = MediaType(subscribe.type)
# 识别媒体信息
mediainfo: MediaInfo = self.recognize_media(meta=meta, mtype=meta.type, tmdbid=subscribe.tmdbid)
mediainfo: MediaInfo = self.recognize_media(meta=meta, mtype=meta.type,
tmdbid=subscribe.tmdbid, doubanid=subscribe.doubanid)
if not mediainfo:
logger.warn(f'未识别到媒体信息,标题:{subscribe.name}tmdbid{subscribe.tmdbid}')
logger.warn(f'未识别到媒体信息,标题:{subscribe.name}tmdbid{subscribe.tmdbid}doubanid{subscribe.doubanid}')
continue
# 非洗版
if not subscribe.best_version:
@@ -498,7 +518,7 @@ class SubscribeChain(ChainBase):
exist_flag = False
if meta.type == MediaType.TV:
no_exists = {
subscribe.tmdbid: {
mediakey: {
subscribe.season: NotExistMediaInfo(
season=subscribe.season,
episodes=[],
@@ -520,15 +540,15 @@ class SubscribeChain(ChainBase):
# 使用订阅的总集数和开始集数替换no_exists
no_exists = self.__get_subscribe_no_exits(
no_exists=no_exists,
tmdb_id=mediainfo.tmdb_id,
mediakey=mediakey,
begin_season=meta.begin_season,
total_episode=subscribe.total_episode,
start_episode=subscribe.start_episode,
)
# 打印缺失集信息
if no_exists and no_exists.get(subscribe.tmdbid):
no_exists_info = no_exists.get(subscribe.tmdbid).get(subscribe.season)
if no_exists and no_exists.get(mediakey):
no_exists_info = no_exists.get(mediakey).get(subscribe.season)
if no_exists_info:
logger.info(f'订阅 {mediainfo.title_year} {meta.season} 缺失集:{no_exists_info.episodes}')
@@ -583,9 +603,9 @@ class SubscribeChain(ChainBase):
# 非洗版
if not subscribe.best_version:
# 不是缺失的剧集不要
if no_exists and no_exists.get(subscribe.tmdbid):
if no_exists and no_exists.get(mediakey):
# 缺失集
no_exists_info = no_exists.get(subscribe.tmdbid).get(subscribe.season)
no_exists_info = no_exists.get(mediakey).get(subscribe.season)
if no_exists_info:
# 是否有交集
if no_exists_info.episodes and \
@@ -621,7 +641,8 @@ class SubscribeChain(ChainBase):
logger.info(f'{mediainfo.title_year} 匹配完成,共匹配到{len(_match_context)}个资源')
if _match_context:
# 批量择优下载
downloads, lefts = self.downloadchain.batch_download(contexts=_match_context, no_exists=no_exists)
downloads, lefts = self.downloadchain.batch_download(contexts=_match_context, no_exists=no_exists,
username=subscribe.username)
# 更新已经下载的集数
if downloads and meta.type == MediaType.TV:
self.__update_subscribe_note(subscribe=subscribe, downloads=downloads)
@@ -660,9 +681,10 @@ class SubscribeChain(ChainBase):
meta.begin_season = subscribe.season or None
meta.type = MediaType(subscribe.type)
# 识别媒体信息
mediainfo: MediaInfo = self.recognize_media(meta=meta, mtype=meta.type, tmdbid=subscribe.tmdbid)
mediainfo: MediaInfo = self.recognize_media(meta=meta, mtype=meta.type,
tmdbid=subscribe.tmdbid, doubanid=subscribe.doubanid)
if not mediainfo:
logger.warn(f'未识别到媒体信息,标题:{subscribe.name}tmdbid{subscribe.tmdbid}')
logger.warn(f'未识别到媒体信息,标题:{subscribe.name}tmdbid{subscribe.tmdbid}doubanid{subscribe.doubanid}')
continue
# 对于电视剧,获取当前季的总集数
episodes = mediainfo.seasons.get(subscribe.season) or []
@@ -704,7 +726,11 @@ class SubscribeChain(ChainBase):
mediainfo = context.media_info
if mediainfo.type != MediaType.TV:
continue
if mediainfo.tmdb_id != subscribe.tmdbid:
if subscribe.tmdbid and mediainfo.tmdb_id \
and mediainfo.tmdb_id != subscribe.tmdbid:
continue
if subscribe.doubanid and mediainfo.douban_id \
and mediainfo.douban_id != subscribe.doubanid:
continue
episodes = meta.episode_list
if not episodes:
@@ -738,7 +764,8 @@ class SubscribeChain(ChainBase):
"""
更新订阅剩余集数
"""
left_seasons = lefts.get(mediainfo.tmdb_id)
mediakey = subscribe.tmdbid or subscribe.doubanid
left_seasons = lefts.get(mediakey)
if left_seasons:
for season_info in left_seasons.values():
season = season_info.season
@@ -779,11 +806,17 @@ class SubscribeChain(ChainBase):
messages = []
for subscribe in subscribes:
if subscribe.type == MediaType.MOVIE.value:
tmdb_link = f"https://www.themoviedb.org/movie/{subscribe.tmdbid}"
messages.append(f"{subscribe.id}. [{subscribe.name}{subscribe.year}]({tmdb_link})")
if subscribe.tmdbid:
link = f"https://www.themoviedb.org/movie/{subscribe.tmdbid}"
else:
link = f"https://movie.douban.com/subject/{subscribe.doubanid}"
messages.append(f"{subscribe.id}. [{subscribe.name}{subscribe.year}]({link})")
else:
tmdb_link = f"https://www.themoviedb.org/tv/{subscribe.tmdbid}"
messages.append(f"{subscribe.id}. [{subscribe.name}{subscribe.year}]({tmdb_link}) "
if subscribe.tmdbid:
link = f"https://www.themoviedb.org/tv/{subscribe.tmdbid}"
else:
link = f"https://movie.douban.com/subject/{subscribe.doubanid}"
messages.append(f"{subscribe.id}. [{subscribe.name}{subscribe.year}]({link}) "
f"{subscribe.season}"
f"_{subscribe.total_episode - (subscribe.lack_episode or subscribe.total_episode)}"
f"/{subscribe.total_episode}_")
@@ -818,24 +851,24 @@ class SubscribeChain(ChainBase):
@staticmethod
def __get_subscribe_no_exits(no_exists: Dict[int, Dict[int, NotExistMediaInfo]],
tmdb_id: int,
mediakey: Union[str, int],
begin_season: int,
total_episode: int,
start_episode: int):
"""
根据订阅开始集数和总集数结合TMDB信息计算当前订阅的缺失集数
:param no_exists: 缺失季集列表
:param tmdb_id: TMDB ID
:param mediakey: TMDB ID或豆瓣ID
:param begin_season: 开始季
:param total_episode: 订阅设定总集数
:param start_episode: 订阅设定开始集数
"""
# 使用订阅的总集数和开始集数替换no_exists
if no_exists \
and no_exists.get(tmdb_id) \
and no_exists.get(mediakey) \
and (total_episode or start_episode):
# 该季原缺失信息
no_exist_season = no_exists.get(tmdb_id).get(begin_season)
no_exist_season = no_exists.get(mediakey).get(begin_season)
if no_exist_season:
# 原集列表
episode_list = no_exist_season.episodes
@@ -867,7 +900,7 @@ class SubscribeChain(ChainBase):
# 与原集列表取交集
episodes = list(set(episode_list).intersection(set(new_episodes)))
# 更新集合
no_exists[tmdb_id][begin_season] = NotExistMediaInfo(
no_exists[mediakey][begin_season] = NotExistMediaInfo(
season=begin_season,
episodes=episodes,
total_episode=total_episode,

View File

@@ -25,17 +25,21 @@ class TmdbChain(ChainBase, metaclass=Singleton):
:param page: 页码
:return: 媒体信息列表
"""
if settings.RECOGNIZE_SOURCE != "themoviedb":
return None
return self.run_module("tmdb_discover", mtype=mtype,
sort_by=sort_by, with_genres=with_genres,
with_original_language=with_original_language,
page=page)
def tmdb_trending(self, page: int = 1) -> List[dict]:
def tmdb_trending(self, page: int = 1) -> Optional[List[dict]]:
"""
TMDB流行趋势
:param page: 第几页
:return: TMDB信息列表
"""
if settings.RECOGNIZE_SOURCE != "themoviedb":
return None
return self.run_module("tmdb_trending", page=page)
def tmdb_seasons(self, tmdbid: int) -> List[schemas.TmdbSeason]:

View File

@@ -4,6 +4,7 @@ from typing import Dict, List, Union
from cachetools import cached, TTLCache
from app.chain import ChainBase
from app.chain.media import MediaChain
from app.core.config import settings
from app.core.context import TorrentInfo, Context, MediaInfo
from app.core.metainfo import MetaInfo
@@ -32,6 +33,7 @@ class TorrentsChain(ChainBase, metaclass=Singleton):
self.siteoper = SiteOper()
self.rsshelper = RssHelper()
self.systemconfig = SystemConfigOper()
self.mediachain = MediaChain()
def remote_refresh(self, channel: MessageChannel, userid: Union[str, int] = None):
"""
@@ -166,7 +168,7 @@ class TorrentsChain(ChainBase, metaclass=Singleton):
# 识别
meta = MetaInfo(title=torrent.title, subtitle=torrent.description)
# 识别媒体信息
mediainfo: MediaInfo = self.recognize_media(meta=meta)
mediainfo: MediaInfo = self.mediachain.recognize_by_meta(meta)
if not mediainfo:
logger.warn(f'未识别到媒体信息,标题:{torrent.title}')
# 存储空的媒体信息

View File

@@ -66,7 +66,8 @@ class TransferChain(ChainBase):
mtype = MediaType(downloadhis.type)
# 按TMDBID识别
mediainfo = self.recognize_media(mtype=mtype,
tmdbid=downloadhis.tmdbid)
tmdbid=downloadhis.tmdbid,
doubanid=downloadhis.doubanid)
else:
# 非MoviePilot下载的任务按文件识别
mediainfo = None
@@ -243,7 +244,7 @@ class TransferChain(ChainBase):
if not mediainfo:
# 识别媒体信息
file_mediainfo = self.recognize_media(meta=file_meta)
file_mediainfo = self.mediachain.recognize_by_meta(file_meta)
else:
file_mediainfo = mediainfo
@@ -275,9 +276,6 @@ class TransferChain(ChainBase):
logger.info(f"{file_path.name} 识别为:{file_mediainfo.type.value} {file_mediainfo.title_year}")
# 更新媒体图片
self.obtain_images(mediainfo=file_mediainfo)
# 获取集数据
if file_mediainfo.type == MediaType.TV:
episodes_info = self.tmdbchain.tmdb_episodes(tmdbid=file_mediainfo.tmdb_id,
@@ -375,9 +373,6 @@ class TransferChain(ChainBase):
# 媒体目录
if transfer_info.target_path.is_file():
transfer_info.target_path = transfer_info.target_path.parent
# 刷新媒体库,根目录或季目录
if settings.REFRESH_MEDIASERVER:
self.refresh_mediaserver(mediainfo=media, file_path=transfer_info.target_path)
# 发送通知
se_str = None
if media.type == MediaType.TV:
@@ -452,7 +447,7 @@ class TransferChain(ChainBase):
def args_error():
self.post_message(Notification(channel=channel,
title="请输入正确的命令格式:/redo [id] [tmdbid]|[类型]"
title="请输入正确的命令格式:/redo [id] [tmdbid/豆瓣id]|[类型]"
"[id]历史记录编号", userid=userid))
if not arg_str:
@@ -467,31 +462,32 @@ class TransferChain(ChainBase):
if not logid.isdigit():
args_error()
return
# TMDB ID
tmdb_strs = arg_strs[1].split('|')
tmdbid = tmdb_strs[0]
# TMDBID/豆瓣ID
id_strs = arg_strs[1].split('|')
media_id = id_strs[0]
if not logid.isdigit():
args_error()
return
# 类型
type_str = tmdb_strs[1] if len(tmdb_strs) > 1 else None
type_str = id_strs[1] if len(id_strs) > 1 else None
if not type_str or type_str not in [MediaType.MOVIE.value, MediaType.TV.value]:
args_error()
return
state, errmsg = self.re_transfer(logid=int(logid),
mtype=MediaType(type_str), tmdbid=int(tmdbid))
mtype=MediaType(type_str),
mediaid=media_id)
if not state:
self.post_message(Notification(channel=channel, title="手动整理失败",
text=errmsg, userid=userid))
return
def re_transfer(self, logid: int,
mtype: MediaType = None, tmdbid: int = None) -> Tuple[bool, str]:
def re_transfer(self, logid: int, mtype: MediaType = None,
mediaid: str = None) -> Tuple[bool, str]:
"""
根据历史记录,重新识别转移,只支持简单条件
:param logid: 历史记录ID
:param mtype: 媒体类型
:param tmdbid: TMDB ID
:param mediaid: TMDB ID/豆瓣ID
"""
# 查询历史记录
history: TransferHistory = self.transferhis.get(logid)
@@ -504,17 +500,18 @@ class TransferChain(ChainBase):
return False, f"源目录不存在:{src_path}"
dest_path = Path(history.dest) if history.dest else None
# 查询媒体信息
if mtype and tmdbid:
mediainfo = self.recognize_media(mtype=mtype, tmdbid=tmdbid)
if mtype and mediaid:
mediainfo = self.recognize_media(mtype=mtype, tmdbid=int(mediaid) if str(mediaid).isdigit() else None,
doubanid=mediaid)
if mediainfo:
# 更新媒体图片
self.obtain_images(mediainfo=mediainfo)
else:
meta = MetaInfoPath(src_path)
mediainfo = self.recognize_media(meta=meta)
mediainfo = self.mediachain.recognize_by_path(str(src_path))
if not mediainfo:
return False, f"未识别到媒体信息,类型:{mtype.value}tmdbid{tmdbid}"
return False, f"未识别到媒体信息,类型:{mtype.value}id{mediaid}"
# 重新执行转移
logger.info(f"{src_path.name} 识别为:{mediainfo.title_year}")
# 更新媒体图片
self.obtain_images(mediainfo=mediainfo)
# 删除旧的已整理文件
if history.dest:
@@ -617,14 +614,15 @@ class TransferChain(ChainBase):
title=msg_title, text=msg_str, image=mediainfo.get_message_image()))
@staticmethod
def delete_files(path: Path):
def delete_files(path: Path) -> Tuple[bool, str]:
"""
删除转移后的文件以及空目录
:param path: 文件路径
:return: 成功标识,错误信息
"""
logger.info(f"开始删除文件以及空目录:{path} ...")
if not path.exists():
return
return True, f"文件或目录不存在:{path}"
if path.is_file():
# 删除文件、nfo、jpg等同名文件
pattern = path.stem.replace('[', '?').replace(']', '?')
@@ -636,7 +634,7 @@ class TransferChain(ChainBase):
elif str(path.parent) == str(path.root):
# 根目录,不删除
logger.warn(f"根目录 {path} 不能删除!")
return
return False, f"根目录 {path} 不能删除!"
else:
# 非根目录,才删除目录
shutil.rmtree(path)
@@ -662,5 +660,10 @@ class TransferChain(ChainBase):
# 父目录非根目录,才删除父目录
if not SystemUtils.exits_files(parent_path, settings.RMT_MEDIAEXT):
# 当前路径下没有媒体文件则删除
shutil.rmtree(parent_path)
try:
shutil.rmtree(parent_path)
except Exception as e:
logger.error(f"删除目录 {parent_path} 失败:{str(e)}")
return False, f"删除目录 {parent_path} 失败:{str(e)}"
logger.warn(f"目录 {parent_path} 已删除")
return True, ""

View File

@@ -1,11 +1,8 @@
import time
from typing import Any
from app.chain import ChainBase
from app.schemas import Notification
from app.schemas.types import EventType, MediaImageType, MediaType, NotificationType
from app.schemas.types import EventType
from app.utils.singleton import Singleton
from app.utils.web import WebUtils
class WebhookChain(ChainBase, metaclass=Singleton):
@@ -15,7 +12,7 @@ class WebhookChain(ChainBase, metaclass=Singleton):
def message(self, body: Any, form: Any, args: Any) -> None:
"""
处理Webhook报文并发送消息
处理Webhook报文并发送事件
"""
# 获取主体内容
event_info = self.webhook_parser(body=body, form=form, args=args)
@@ -23,76 +20,3 @@ class WebhookChain(ChainBase, metaclass=Singleton):
return
# 广播事件
self.eventmanager.send_event(EventType.WebhookMessage, event_info)
# 拼装消息内容
_webhook_actions = {
"library.new": "新入库",
"system.webhooktest": "测试",
"playback.start": "开始播放",
"playback.stop": "停止播放",
"user.authenticated": "登录成功",
"user.authenticationfailed": "登录失败",
"media.play": "开始播放",
"media.stop": "停止播放",
"PlaybackStart": "开始播放",
"PlaybackStop": "停止播放",
"item.rate": "标记了"
}
_webhook_images = {
"emby": "https://emby.media/notificationicon.png",
"plex": "https://www.plex.tv/wp-content/uploads/2022/04/new-logo-process-lines-gray.png",
"jellyfin": "https://play-lh.googleusercontent.com/SCsUK3hCCRqkJbmLDctNYCfehLxsS4ggD1ZPHIFrrAN1Tn9yhjmGMPep2D9lMaaa9eQi"
}
if not _webhook_actions.get(event_info.event):
return
# 消息标题
if event_info.item_type in ["TV", "SHOW"]:
message_title = f"{_webhook_actions.get(event_info.event)}剧集 {event_info.item_name}"
elif event_info.item_type == "MOV":
message_title = f"{_webhook_actions.get(event_info.event)}电影 {event_info.item_name}"
elif event_info.item_type == "AUD":
message_title = f"{_webhook_actions.get(event_info.event)}有声书 {event_info.item_name}"
else:
message_title = f"{_webhook_actions.get(event_info.event)}"
# 消息内容
message_texts = []
if event_info.user_name:
message_texts.append(f"用户:{event_info.user_name}")
if event_info.device_name:
message_texts.append(f"设备:{event_info.client} {event_info.device_name}")
if event_info.ip:
message_texts.append(f"IP地址{event_info.ip} {WebUtils.get_location(event_info.ip)}")
if event_info.percentage:
percentage = round(float(event_info.percentage), 2)
message_texts.append(f"进度:{percentage}%")
if event_info.overview:
message_texts.append(f"剧情:{event_info.overview}")
message_texts.append(f"时间:{time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))}")
# 消息内容
message_content = "\n".join(message_texts)
# 消息图片
image_url = event_info.image_url
# 查询剧集图片
if (event_info.tmdb_id
and event_info.season_id
and event_info.episode_id):
specific_image = self.obtain_specific_image(
mediaid=event_info.tmdb_id,
mtype=MediaType.TV,
image_type=MediaImageType.Backdrop,
season=event_info.season_id,
episode=event_info.episode_id
)
if specific_image:
image_url = specific_image
# 使用默认图片
if not image_url:
image_url = _webhook_images.get(event_info.channel)
# 发送消息
self.post_message(Notification(mtype=NotificationType.MediaServer,
title=message_title, text=message_content, image=image_url))

View File

@@ -39,16 +39,18 @@ class Settings(BaseSettings):
SUPERUSER_PASSWORD: str = "password"
# API密钥需要更换
API_TOKEN: str = "moviepilot"
# 登录页面电影海报,tmdb/bing
WALLPAPER: str = "tmdb"
# 网络代理 IP:PORT
PROXY_HOST: str = None
# 媒体信息搜索来源
SEARCH_SOURCE: str = "themoviedb"
# 媒体识别来源 themoviedb/douban
RECOGNIZE_SOURCE: str = "themoviedb"
# 刮削来源 themoviedb/douban
SCRAP_SOURCE: str = "themoviedb"
# 刮削入库的媒体文件
SCRAP_METADATA: bool = True
# 新增已入库媒体是否跟随TMDB信息变化
SCRAP_FOLLOW_TMDB: bool = True
# 刮削来源
SCRAP_SOURCE: str = "themoviedb"
# TMDB图片地址
TMDB_IMAGE_DOMAIN: str = "image.tmdb.org"
# TMDB API地址
@@ -127,6 +129,10 @@ class Settings(BaseSettings):
QB_PASSWORD: str = None
# Qbittorrent分类自动管理
QB_CATEGORY: bool = False
# Qbittorrent按顺序下载
QB_SEQUENTIAL: bool = True
# Qbittorrent忽略队列限制强制继续
QB_FORCE_RESUME: bool = False
# Transmission地址IP:PORT
TR_HOST: str = None
# Transmission用户名
@@ -149,8 +155,6 @@ class Settings(BaseSettings):
DOWNLOAD_SUBTITLE: bool = True
# 媒体服务器 emby/jellyfin/plex多个媒体服务器,分割
MEDIASERVER: str = "emby"
# 入库刷新媒体库
REFRESH_MEDIASERVER: bool = True
# 媒体服务器同步间隔(小时)
MEDIASERVER_SYNC_INTERVAL: int = 6
# 媒体服务器同步黑名单,多个媒体库名称,分割
@@ -202,8 +206,12 @@ class Settings(BaseSettings):
"/Season {{season}}" \
"/{{title}} - {{season_episode}}{% if part %}-{{part}}{% endif %}{% if episode %} - 第 {{episode}} 集{% endif %}" \
"{{fileExt}}"
# 转移时覆盖模式
OVERWRITE_MODE: str = "size"
# 大内存模式
BIG_MEMORY_MODE: bool = False
# 插件市场仓库地址,多个地址使用,分隔,地址以/结尾
PLUGIN_MARKET: str = "https://raw.githubusercontent.com/jxxghp/MoviePilot-Plugins/main/"
@property
def INNER_CONFIG_PATH(self):
@@ -327,12 +335,6 @@ class Settings(BaseSettings):
with self.LOG_PATH as p:
if not p.exists():
p.mkdir(parents=True, exist_ok=True)
with self.SAVE_PATH as p:
if not p.exists():
p.mkdir(parents=True, exist_ok=True)
for path in self.LIBRARY_PATHS:
if not path.exists():
path.mkdir(parents=True, exist_ok=True)
class Config:
case_sensitive = True

View File

@@ -414,24 +414,31 @@ class MediaInfo:
# 豆瓣ID
self.douban_id = str(info.get("id"))
# 类型
if not self.type:
if isinstance(info.get('media_type'), MediaType):
self.type = info.get('media_type')
else:
elif info.get("type"):
self.type = MediaType.MOVIE if info.get("type") == "movie" else MediaType.TV
elif info.get("type_name"):
self.type = MediaType(info.get("type_name"))
# 标题
if not self.title:
self.title = info.get("title")
# 识别标题中的季
meta = MetaInfo(self.title)
self.season = meta.begin_season
# 原语种标题
if not self.original_title:
self.original_title = info.get("original_title")
# 年份
if not self.year:
self.year = info.get("year")[:4] if info.get("year") else None
# 识别标题中的季
meta = MetaInfo(info.get("title"))
# 季
if not self.season:
self.season = meta.begin_season
if self.season:
self.type = MediaType.TV
elif not self.type:
self.type = MediaType.MOVIE
# 评分
if not self.vote_average:
rating = info.get("rating")
@@ -472,14 +479,22 @@ class MediaInfo:
self.actors = info.get("actors") or []
# 别名
if not self.names:
self.names = info.get("aka") or []
akas = info.get("aka")
if akas:
self.names = [re.sub(r'\([港台豆友译名]+\)', "", aka) for aka in akas]
# 剧集
if self.type == MediaType.TV and not self.seasons:
meta = MetaInfo(info.get("title"))
if meta.begin_season:
episodes_count = info.get("episodes_count")
if episodes_count:
self.seasons[meta.begin_season] = list(range(1, episodes_count + 1))
season = meta.begin_season or 1
episodes_count = info.get("episodes_count")
if episodes_count:
self.seasons[season] = list(range(1, episodes_count + 1))
# 季年份
if self.type == MediaType.TV and not self.season_years:
season = self.season or 1
self.season_years = {
season: self.year
}
# 剩余属性赋值
for key, value in info.items():
if not hasattr(self, key):

View File

@@ -1,4 +1,5 @@
from queue import Queue, Empty
from typing import Dict, Any
from app.log import logger
from app.utils.singleton import Singleton
@@ -14,7 +15,7 @@ class EventManager(metaclass=Singleton):
# 事件队列
self._eventQueue = Queue()
# 事件响应函数字典
self._handlers = {}
self._handlers: Dict[str, Dict[str, Any]] = {}
# 已禁用的事件响应
self._disabled_handlers = []
@@ -24,12 +25,13 @@ class EventManager(metaclass=Singleton):
"""
try:
event = self._eventQueue.get(block=True, timeout=1)
handlerList = self._handlers.get(event.event_type) or []
if handlerList:
handlers = self._handlers.get(event.event_type) or {}
if handlers:
# 去除掉被禁用的事件响应
handlerList = [handler for handler in handlerList
handlerList = [handler for handler in handlers.values()
if handler.__qualname__.split(".")[0] not in self._disabled_handlers]
return event, handlerList
return event, handlerList
return event, []
except Empty:
return None, []
@@ -44,13 +46,15 @@ class EventManager(metaclass=Singleton):
注册事件处理
"""
try:
handlerList = self._handlers[etype.value]
handlers = self._handlers[etype.value]
except KeyError:
handlerList = []
self._handlers[etype.value] = handlerList
if handler not in handlerList:
handlerList.append(handler)
logger.debug(f"Event Registed{etype.value} - {handler}")
handlers = {}
self._handlers[etype.value] = handlers
if handler.__qualname__ in handlers:
handlers.pop(handler.__qualname__)
else:
logger.debug(f"Event Registed{etype.value} - {handler.__qualname__}")
handlers[handler.__qualname__] = handler
def disable_events_hander(self, class_name: str):
"""

View File

@@ -59,6 +59,9 @@ class MetaBase(object):
audio_encode: Optional[str] = None
# 应用的识别词信息
apply_words: Optional[List[str]] = None
# 附加信息
tmdbid: int = None
doubanid: str = None
# 副标题解析
_subtitle_flag = False

View File

@@ -1,10 +1,12 @@
from pathlib import Path
from typing import Tuple
import regex as re
from app.core.config import settings
from app.core.meta import MetaAnime, MetaVideo, MetaBase
from app.core.meta.words import WordsMatcher
from app.schemas.types import MediaType
def MetaInfo(title: str, subtitle: str = None) -> MetaBase:
@@ -18,6 +20,8 @@ def MetaInfo(title: str, subtitle: str = None) -> MetaBase:
org_title = title
# 预处理标题
title, apply_words = WordsMatcher().prepare(title)
# 获取标题中媒体信息
title, metainfo = find_metainfo(title)
# 判断是否处理文件
if title and Path(title).suffix.lower() in settings.RMT_MEDIAEXT:
isfile = True
@@ -29,7 +33,25 @@ def MetaInfo(title: str, subtitle: str = None) -> MetaBase:
meta.title = org_title
# 记录使用的识别词
meta.apply_words = apply_words or []
# 修正媒体信息
if metainfo.get('tmdbid'):
meta.tmdbid = metainfo['tmdbid']
if metainfo.get('doubanid'):
meta.tmdbid = metainfo['doubanid']
if metainfo.get('type'):
meta.type = metainfo['type']
if metainfo.get('begin_season'):
meta.begin_season = metainfo['begin_season']
if metainfo.get('end_season'):
meta.end_season = metainfo['end_season']
if metainfo.get('total_season'):
meta.total_season = metainfo['total_season']
if metainfo.get('begin_episode'):
meta.begin_episode = metainfo['begin_episode']
if metainfo.get('end_episode'):
meta.end_episode = metainfo['end_episode']
if metainfo.get('total_episode'):
meta.total_episode = metainfo['total_episode']
return meta
@@ -65,3 +87,76 @@ def is_anime(name: str) -> bool:
if re.search(r'\[[+0-9XVPI-]+]\s*\[', name, re.IGNORECASE):
return True
return False
def find_metainfo(title: str) -> Tuple[str, dict]:
"""
从标题中提取媒体信息
"""
metainfo = {
'tmdbid': None,
'doubanid': None,
'type': None,
'begin_season': None,
'end_season': None,
'total_season': None,
'begin_episode': None,
'end_episode': None,
'total_episode': None,
}
if not title:
return title, metainfo
# 从标题中提取媒体信息 格式为{[tmdbid=xxx;type=xxx;s=xxx;e=xxx]}
results = re.findall(r'(?<={\[)[\W\w]+(?=]})', title)
if not results:
return title, metainfo
for result in results:
# 查找tmdbid信息
tmdbid = re.findall(r'(?<=tmdbid=)\d+', result)
if tmdbid and tmdbid[0].isdigit():
metainfo['tmdbid'] = tmdbid[0]
# 查找豆瓣id信息
doubanid = re.findall(r'(?<=doubanid=)\d+', result)
if doubanid and doubanid[0].isdigit():
metainfo['doubanid'] = doubanid[0]
# 查找媒体类型
mtype = re.findall(r'(?<=type=)\d+', result)
if mtype:
match mtype[0]:
case "movie":
metainfo['type'] = MediaType.MOVIE
case "tv":
metainfo['type'] = MediaType.TV
case _:
pass
# 查找季信息
begin_season = re.findall(r'(?<=s=)\d+', result)
if begin_season and begin_season[0].isdigit():
metainfo['begin_season'] = int(begin_season[0])
end_season = re.findall(r'(?<=s=\d+-)\d+', result)
if end_season and end_season[0].isdigit():
metainfo['end_season'] = int(end_season[0])
# 查找集信息
begin_episode = re.findall(r'(?<=e=)\d+', result)
if begin_episode and begin_episode[0].isdigit():
metainfo['begin_episode'] = int(begin_episode[0])
end_episode = re.findall(r'(?<=e=\d+-)\d+', result)
if end_episode and end_episode[0].isdigit():
metainfo['end_episode'] = int(end_episode[0])
# 去除title中该部分
if tmdbid or mtype or begin_season or end_season or begin_episode or end_episode:
title = title.replace(f"{{[{result}]}}", '')
# 计算季集总数
if metainfo.get('begin_season') and metainfo.get('end_season'):
if metainfo['begin_season'] > metainfo['end_season']:
metainfo['begin_season'], metainfo['end_season'] = metainfo['end_season'], metainfo['begin_season']
metainfo['total_season'] = metainfo['end_season'] - metainfo['begin_season'] + 1
elif metainfo.get('begin_season') and not metainfo.get('end_season'):
metainfo['total_season'] = 1
if metainfo.get('begin_episode') and metainfo.get('end_episode'):
if metainfo['begin_episode'] > metainfo['end_episode']:
metainfo['begin_episode'], metainfo['end_episode'] = metainfo['end_episode'], metainfo['begin_episode']
metainfo['total_episode'] = metainfo['end_episode'] - metainfo['begin_episode'] + 1
elif metainfo.get('begin_episode') and not metainfo.get('end_episode'):
metainfo['total_episode'] = 1
return title, metainfo

View File

@@ -1,14 +1,17 @@
import traceback
from typing import List, Any, Dict, Tuple
from app.core.config import settings
from app.core.event import eventmanager
from app.db.systemconfig_oper import SystemConfigOper
from app.helper.module import ModuleHelper
from app.helper.plugin import PluginHelper
from app.helper.sites import SitesHelper
from app.log import logger
from app.schemas.types import SystemConfigKey
from app.utils.object import ObjectUtils
from app.utils.singleton import Singleton
from app.utils.string import StringUtils
class PluginManager(metaclass=Singleton):
@@ -26,11 +29,12 @@ class PluginManager(metaclass=Singleton):
def __init__(self):
self.siteshelper = SitesHelper()
self.pluginhelper = PluginHelper()
self.systemconfig = SystemConfigOper()
self.install_online_plugin()
self.init_config()
def init_config(self):
# 配置管理
self.systemconfig = SystemConfigOper()
# 停止已有插件
self.stop()
# 启动插件
@@ -40,7 +44,6 @@ class PluginManager(metaclass=Singleton):
"""
启动加载插件
"""
# 扫描插件目录
plugins = ModuleHelper.load(
"app.plugins",
@@ -98,6 +101,33 @@ class PluginManager(metaclass=Singleton):
self._plugins = {}
self._running_plugins = {}
def install_online_plugin(self):
"""
安装本地不存在的在线插件
"""
logger.info("开始安装在线插件...")
# 已安装插件
install_plugins = self.systemconfig.get(SystemConfigKey.UserInstalledPlugins) or []
# 在线插件
online_plugins = self.get_online_plugins()
if not online_plugins:
logger.error("未获取到在线插件")
return
# 支持更新的插件自动更新
for plugin in online_plugins:
# 只处理已安装的插件
if plugin.get("id") in install_plugins and not self.is_plugin_exists(plugin.get("id")):
# 下载安装
state, msg = self.pluginhelper.install(pid=plugin.get("id"),
repo_url=plugin.get("repo_url"))
# 安装失败
if not state:
logger.error(
f"插件 {plugin.get('plugin_name')} v{plugin.get('plugin_version')} 安装失败:{msg}")
continue
logger.info(f"插件 {plugin.get('plugin_name')} 安装成功,版本:{plugin.get('plugin_version')}")
logger.info("在线插件安装完成")
def get_plugin_config(self, pid: str) -> dict:
"""
获取插件配置
@@ -188,9 +218,93 @@ class PluginManager(metaclass=Singleton):
"""
return list(self._plugins.keys())
def get_plugin_apps(self) -> List[dict]:
def get_online_plugins(self) -> List[dict]:
"""
获取所有插件信息
获取所有在线插件信息
"""
# 返回值
all_confs = []
if not settings.PLUGIN_MARKET:
return all_confs
# 已安装插件
installed_apps = self.systemconfig.get(SystemConfigKey.UserInstalledPlugins) or []
# 线上插件列表
markets = settings.PLUGIN_MARKET.split(",")
for market in markets:
online_plugins = self.pluginhelper.get_plugins(market) or {}
for pid, plugin in online_plugins.items():
# 运行状插件
plugin_obj = self._running_plugins.get(pid)
# 非运行态插件
plugin_static = self._plugins.get(pid)
# 基本属性
conf = {}
# ID
conf.update({"id": pid})
# 安装状态
if pid in installed_apps and plugin_static:
conf.update({"installed": True})
else:
conf.update({"installed": False})
# 是否有新版本
conf.update({"has_update": False})
if plugin_static:
installed_version = getattr(plugin_static, "plugin_version")
if StringUtils.compare_version(installed_version, plugin.get("version")) < 0:
# 需要更新
conf.update({"has_update": True})
# 运行状态
if plugin_obj and hasattr(plugin_obj, "get_state"):
try:
state = plugin_obj.get_state()
except Exception as e:
logger.error(f"获取插件 {pid} 状态出错:{str(e)}")
state = False
conf.update({"state": state})
else:
conf.update({"state": False})
# 是否有详情页面
conf.update({"has_page": False})
if plugin_obj and hasattr(plugin_obj, "get_page"):
if ObjectUtils.check_method(plugin_obj.get_page):
conf.update({"has_page": True})
# 权限
if plugin.get("level"):
conf.update({"auth_level": plugin.get("level")})
if self.siteshelper.auth_level < plugin.get("level"):
continue
# 名称
if plugin.get("name"):
conf.update({"plugin_name": plugin.get("name")})
# 描述
if plugin.get("description"):
conf.update({"plugin_desc": plugin.get("description")})
# 版本
if plugin.get("version"):
conf.update({"plugin_version": plugin.get("version")})
# 图标
if plugin.get("icon"):
conf.update({"plugin_icon": plugin.get("icon")})
# 主题色
if plugin.get("color"):
conf.update({"plugin_color": plugin.get("color")})
# 作者
if plugin.get("author"):
conf.update({"plugin_author": plugin.get("author")})
# 仓库链接
conf.update({"repo_url": market})
# 本地标志
conf.update({"is_local": False})
# 汇总
all_confs.append(conf)
# 按插件ID去重
if all_confs:
all_confs = list({v["id"]: v for v in all_confs}.values())
return all_confs
def get_local_plugins(self) -> List[dict]:
"""
获取所有本地已下载的插件信息
"""
# 返回值
all_confs = []
@@ -209,8 +323,13 @@ class PluginManager(metaclass=Singleton):
else:
conf.update({"installed": False})
# 运行状态
if plugin_obj and hasattr(plugin, "get_state"):
conf.update({"state": plugin_obj.get_state()})
if plugin_obj and hasattr(plugin_obj, "get_state"):
try:
state = plugin_obj.get_state()
except Exception as e:
logger.error(f"获取插件 {pid} 状态出错:{str(e)}")
state = False
conf.update({"state": state})
else:
conf.update({"state": False})
# 是否有详情页面
@@ -221,6 +340,7 @@ class PluginManager(metaclass=Singleton):
conf.update({"has_page": False})
# 权限
if hasattr(plugin, "auth_level"):
conf.update({"auth_level": plugin.auth_level})
if self.siteshelper.auth_level < plugin.auth_level:
continue
# 名称
@@ -244,6 +364,20 @@ class PluginManager(metaclass=Singleton):
# 作者链接
if hasattr(plugin, "author_url"):
conf.update({"author_url": plugin.author_url})
# 是否需要更新
conf.update({"has_update": False})
# 本地标志
conf.update({"is_local": True})
# 汇总
all_confs.append(conf)
return all_confs
@staticmethod
def is_plugin_exists(pid: str) -> bool:
"""
判断插件是否存在
"""
if not pid:
return False
plugin_dir = settings.ROOT_PATH / "app" / "plugins" / pid.lower()
return plugin_dir.exists()

View File

@@ -52,6 +52,44 @@ def verify_token(token: str = Depends(reusable_oauth2)) -> schemas.TokenPayload:
)
def get_token(token: str = None) -> str:
"""
从请求URL中获取token
"""
return token
def get_apikey(apikey: str = None) -> str:
"""
从请求URL中获取apikey
"""
return apikey
def verify_uri_token(token: str = Depends(get_token)) -> str:
"""
通过依赖项使用token进行身份认证
"""
if token != settings.API_TOKEN:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="token校验不通过"
)
return token
def verify_uri_apikey(apikey: str = Depends(get_apikey)) -> str:
"""
通过依赖项使用apikey进行身份认证
"""
if apikey != settings.API_TOKEN:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="apikey校验不通过"
)
return apikey
def verify_password(plain_password: str, hashed_password: str) -> bool:
return pwd_context.verify(plain_password, hashed_password)

View File

@@ -108,10 +108,10 @@ class DownloadHistoryOper(DbOper):
episode=episode,
tmdbid=tmdbid)
def list_by_user_date(self, date: str, userid: str = None) -> List[DownloadHistory]:
def list_by_user_date(self, date: str, username: str = None) -> List[DownloadHistory]:
"""
查询某用户某时间之的下载历史
查询某用户某时间之的下载历史
"""
return DownloadHistory.list_by_user_date(db=self._db,
date=date,
userid=userid)
username=username)

View File

@@ -39,10 +39,12 @@ class MediaServerOper(DbOper):
# 优先按TMDBID查
item = MediaServerItem.exist_by_tmdbid(self._db, tmdbid=kwargs.get("tmdbid"),
mtype=kwargs.get("mtype"))
else:
elif kwargs.get("title"):
# 按标题、类型、年份查
item = MediaServerItem.exists_by_title(self._db, title=kwargs.get("title"),
mtype=kwargs.get("mtype"), year=kwargs.get("year"))
else:
return None
if not item:
return None

View File

@@ -38,6 +38,8 @@ class DownloadHistory(Base):
torrent_site = Column(String)
# 下载用户
userid = Column(String)
# 下载用户名/插件名
username = Column(String)
# 下载渠道
channel = Column(String)
# 创建时间
@@ -108,13 +110,13 @@ class DownloadHistory(Base):
@staticmethod
@db_query
def list_by_user_date(db: Session, date: str, userid: str = None):
def list_by_user_date(db: Session, date: str, username: str = None):
"""
查询某用户某时间之后的下载历史
"""
if userid:
if username:
result = db.query(DownloadHistory).filter(DownloadHistory.date < date,
DownloadHistory.userid == userid).order_by(
DownloadHistory.username == username).order_by(
DownloadHistory.id.desc()).all()
else:
result = db.query(DownloadHistory).filter(DownloadHistory.date < date).order_by(
@@ -165,7 +167,6 @@ class DownloadFiles(Base):
result = db.query(DownloadFiles).filter(DownloadFiles.savepath == savepath).all()
return list(result)
@staticmethod
@db_update
def delete_by_fullpath(db: Session, fullpath: str):
db.query(DownloadFiles).filter(DownloadFiles.fullpath == fullpath,

View File

@@ -69,11 +69,15 @@ class Subscribe(Base):
@staticmethod
@db_query
def exists(db: Session, tmdbid: int, season: int = None):
if season:
return db.query(Subscribe).filter(Subscribe.tmdbid == tmdbid,
Subscribe.season == season).first()
return db.query(Subscribe).filter(Subscribe.tmdbid == tmdbid).first()
def exists(db: Session, tmdbid: int = None, doubanid: str = None, season: int = None):
if tmdbid:
if season:
return db.query(Subscribe).filter(Subscribe.tmdbid == tmdbid,
Subscribe.season == season).first()
return db.query(Subscribe).filter(Subscribe.tmdbid == tmdbid).first()
elif doubanid:
return db.query(Subscribe).filter(Subscribe.doubanid == doubanid).first()
return None
@staticmethod
@db_query

View File

@@ -158,6 +158,13 @@ class TransferHistory(Base):
# 电视剧所有季集
result = db.query(TransferHistory).filter(TransferHistory.title == title,
TransferHistory.year == year).all()
# 类型 + 转移路径emby webhook season无tmdbid场景
elif mtype and season and dest:
# 电视剧某季
result = db.query(TransferHistory).filter(TransferHistory.type == mtype,
TransferHistory.seasons == season,
TransferHistory.dest.like(f"{dest}%")).all()
if result:
return list(result)
return []

View File

@@ -15,7 +15,10 @@ class SubscribeOper(DbOper):
"""
新增订阅
"""
subscribe = Subscribe.exists(self._db, tmdbid=mediainfo.tmdb_id, season=kwargs.get('season'))
subscribe = Subscribe.exists(self._db,
tmdbid=mediainfo.tmdb_id,
doubanid=mediainfo.douban_id,
season=kwargs.get('season'))
if not subscribe:
subscribe = Subscribe(name=mediainfo.title,
year=mediainfo.year,
@@ -23,6 +26,7 @@ class SubscribeOper(DbOper):
tmdbid=mediainfo.tmdb_id,
imdbid=mediainfo.imdb_id,
tvdbid=mediainfo.tvdb_id,
doubanid=mediainfo.douban_id,
poster=mediainfo.get_poster_image(),
backdrop=mediainfo.get_backdrop_image(),
vote=mediainfo.vote_average,
@@ -31,19 +35,26 @@ class SubscribeOper(DbOper):
**kwargs)
subscribe.create(self._db)
# 查询订阅
subscribe = Subscribe.exists(self._db, tmdbid=mediainfo.tmdb_id, season=kwargs.get('season'))
subscribe = Subscribe.exists(self._db,
tmdbid=mediainfo.tmdb_id,
doubanid=mediainfo.douban_id,
season=kwargs.get('season'))
return subscribe.id, "新增订阅成功"
else:
return subscribe.id, "订阅已存在"
def exists(self, tmdbid: int, season: int) -> bool:
def exists(self, tmdbid: int = None, doubanid: str = None, season: int = None) -> bool:
"""
判断是否存在
"""
if season:
return True if Subscribe.exists(self._db, tmdbid=tmdbid, season=season) else False
else:
return True if Subscribe.exists(self._db, tmdbid=tmdbid) else False
if tmdbid:
if season:
return True if Subscribe.exists(self._db, tmdbid=tmdbid, season=season) else False
else:
return True if Subscribe.exists(self._db, tmdbid=tmdbid) else False
elif doubanid:
return True if Subscribe.exists(self._db, doubanid=doubanid) else False
return False
def get(self, sid: int) -> Subscribe:
"""

View File

@@ -20,14 +20,18 @@ class ModuleHelper:
submodules: list = []
packages = importlib.import_module(package_path)
for importer, package_name, _ in pkgutil.iter_modules(packages.__path__):
if package_name.startswith('_'):
continue
full_package_name = f'{package_path}.{package_name}'
module = importlib.import_module(full_package_name)
for name, obj in module.__dict__.items():
if name.startswith('_'):
try:
if package_name.startswith('_'):
continue
if isinstance(obj, type) and filter_func(name, obj):
submodules.append(obj)
full_package_name = f'{package_path}.{package_name}'
module = importlib.import_module(full_package_name)
importlib.reload(module)
for name, obj in module.__dict__.items():
if name.startswith('_'):
continue
if isinstance(obj, type) and filter_func(name, obj):
submodules.append(obj)
except Exception as err:
print(f'加载模块 {package_name} 失败:{err}')
return submodules

127
app/helper/plugin.py Normal file
View File

@@ -0,0 +1,127 @@
import json
import shutil
from pathlib import Path
from typing import Dict, Tuple, Optional, List
from cachetools import TTLCache, cached
from app.core.config import settings
from app.utils.http import RequestUtils
from app.utils.singleton import Singleton
from app.utils.system import SystemUtils
class PluginHelper(metaclass=Singleton):
"""
插件市场管理,下载安装插件到本地
"""
@cached(cache=TTLCache(maxsize=10, ttl=1800))
def get_plugins(self, repo_url: str) -> Dict[str, dict]:
"""
获取Github所有最新插件列表
:param repo_url: Github仓库地址
"""
if not repo_url:
return {}
res = RequestUtils(proxies=settings.PROXY, timeout=10).get_res(f"{repo_url}package.json")
if res:
return json.loads(res.text)
return {}
@staticmethod
def install(pid: str, repo_url: str) -> Tuple[bool, str]:
"""
安装插件
"""
# 从Github的repo_url获取用户和项目名
try:
user, repo = repo_url.split("/")[-4:-2]
except Exception as e:
return False, f"不支持的插件仓库地址格式:{str(e)}"
if not user or not repo:
return False, "不支持的插件仓库地址格式"
if SystemUtils.is_frozen():
return False, "可执行文件模式下,只能安装本地插件"
def __get_filelist(_p: str) -> Tuple[Optional[list], Optional[str]]:
"""
获取插件的文件列表
"""
file_api = f"https://api.github.com/repos/{user}/{repo}/contents/plugins/{_p.lower()}"
r = RequestUtils(proxies=settings.PROXY).get_res(file_api)
if not r or r.status_code != 200:
return None, f"连接仓库失败:{r.status_code} - {r.reason}"
ret = r.json()
if ret and ret[0].get("message") == "Not Found":
return None, "插件在仓库中不存在"
return ret, ""
def __download_files(_p: str, _l: List[dict]) -> Tuple[bool, str]:
"""
下载插件文件
"""
if not _l:
return False, "文件列表为空"
for item in _l:
if item.get("download_url"):
# 下载插件文件
res = RequestUtils(proxies=settings.PROXY).get_res(item["download_url"])
if not res:
return False, f"文件 {item.get('name')} 下载失败!"
elif res.status_code != 200:
return False, f"下载文件 {item.get('name')} 失败:{res.status_code} - {res.reason}"
# 创建插件文件夹
file_path = Path(settings.ROOT_PATH) / "app" / item.get("path")
if not file_path.parent.exists():
file_path.parent.mkdir(parents=True, exist_ok=True)
with open(file_path, "w", encoding="utf-8") as f:
f.write(res.text)
else:
# 递归下载子目录
p = f"{_p}/{item.get('name')}"
l, m = __get_filelist(p)
if not l:
return False, m
return __download_files(p, l)
return True, ""
if not pid or not repo_url:
return False, "参数错误"
# 获取插件的文件列表
"""
[
{
"name": "__init__.py",
"path": "plugins/autobackup/__init__.py",
"sha": "cd10eba3f0355d61adeb35561cb26a0a36c15a6c",
"size": 12385,
"url": "https://api.github.com/repos/jxxghp/MoviePilot-Plugins/contents/plugins/autobackup/__init__.py?ref=main",
"html_url": "https://github.com/jxxghp/MoviePilot-Plugins/blob/main/plugins/autobackup/__init__.py",
"git_url": "https://api.github.com/repos/jxxghp/MoviePilot-Plugins/git/blobs/cd10eba3f0355d61adeb35561cb26a0a36c15a6c",
"download_url": "https://raw.githubusercontent.com/jxxghp/MoviePilot-Plugins/main/plugins/autobackup/__init__.py",
"type": "file",
"_links": {
"self": "https://api.github.com/repos/jxxghp/MoviePilot-Plugins/contents/plugins/autobackup/__init__.py?ref=main",
"git": "https://api.github.com/repos/jxxghp/MoviePilot-Plugins/git/blobs/cd10eba3f0355d61adeb35561cb26a0a36c15a6c",
"html": "https://github.com/jxxghp/MoviePilot-Plugins/blob/main/plugins/autobackup/__init__.py"
}
}
]
"""
# 获取第一级文件列表
file_list, msg = __get_filelist(pid.lower())
if not file_list:
return False, msg
# 本地存在时先删除
plugin_dir = Path(settings.ROOT_PATH) / "app" / "plugins" / pid.lower()
if plugin_dir.exists():
shutil.rmtree(plugin_dir, ignore_errors=True)
# 下载所有文件
__download_files(pid.lower(), file_list)
# 插件目录下如有requirements.txt则安装依赖
requirements_file = plugin_dir / "requirements.txt"
if requirements_file.exists():
SystemUtils.execute(f"pip install -r {requirements_file}")
return True, ""

View File

@@ -236,7 +236,6 @@ class RssHelper:
ret = RequestUtils(proxies=settings.PROXY if proxy else None).get_res(url)
if not ret:
return []
ret.encoding = ret.apparent_encoding
except Exception as err:
print(str(err))
return []

Binary file not shown.

View File

@@ -9,6 +9,7 @@ from app.core.metainfo import MetaInfo
from app.log import logger
from app.modules import _ModuleBase
from app.modules.douban.apiv2 import DoubanApi
from app.modules.douban.douban_cache import DoubanCache
from app.modules.douban.scraper import DoubanScraper
from app.schemas.types import MediaType
from app.utils.common import retry
@@ -18,10 +19,12 @@ from app.utils.system import SystemUtils
class DoubanModule(_ModuleBase):
doubanapi: DoubanApi = None
scraper: DoubanScraper = None
cache: DoubanCache = None
def init_module(self) -> None:
self.doubanapi = DoubanApi()
self.scraper = DoubanScraper()
self.cache = DoubanCache()
def stop(self):
pass
@@ -29,10 +32,87 @@ class DoubanModule(_ModuleBase):
def init_setting(self) -> Tuple[str, Union[str, bool]]:
pass
def douban_info(self, doubanid: str) -> Optional[dict]:
def recognize_media(self, meta: MetaBase = None,
mtype: MediaType = None,
doubanid: str = None,
**kwargs) -> Optional[MediaInfo]:
"""
识别媒体信息
:param meta: 识别的元数据
:param mtype: 识别的媒体类型与doubanid配套
:param doubanid: 豆瓣ID
:return: 识别的媒体信息,包括剧集信息
"""
if settings.RECOGNIZE_SOURCE != "douban":
return None
if not meta:
cache_info = {}
else:
if mtype:
meta.type = mtype
cache_info = self.cache.get(meta)
if not cache_info:
# 缓存没有或者强制不使用缓存
if doubanid:
# 直接查询详情
info = self.douban_info(doubanid=doubanid, mtype=mtype or meta.type)
elif meta:
if meta.begin_season:
logger.info(f"正在识别 {meta.name}{meta.begin_season}季 ...")
else:
logger.info(f"正在识别 {meta.name} ...")
# 匹配豆瓣信息
match_info = self.match_doubaninfo(name=meta.name,
mtype=mtype or meta.type,
year=meta.year,
season=meta.begin_season)
if match_info:
# 匹配到豆瓣信息
info = self.douban_info(
doubanid=match_info.get("id"),
mtype=mtype or meta.type
)
else:
logger.info(f"{meta.name if meta else doubanid} 未匹配到豆瓣媒体信息")
return None
else:
logger.error("识别媒体信息时未提供元数据或豆瓣ID")
return None
# 保存到缓存
if meta:
self.cache.update(meta, info)
else:
# 使用缓存信息
if cache_info.get("title"):
logger.info(f"{meta.name} 使用豆瓣识别缓存:{cache_info.get('title')}")
info = self.douban_info(mtype=cache_info.get("type"),
doubanid=cache_info.get("id"))
else:
logger.info(f"{meta.name} 使用豆瓣识别缓存:无法识别")
info = None
if info:
# 赋值TMDB信息并返回
mediainfo = MediaInfo(douban_info=info)
if meta:
logger.info(f"{meta.name} 豆瓣识别结果:{mediainfo.type.value} "
f"{mediainfo.title_year} "
f"{mediainfo.douban_id}")
else:
logger.info(f"{doubanid} 豆瓣识别结果:{mediainfo.type.value} "
f"{mediainfo.title_year}")
return mediainfo
else:
logger.info(f"{meta.name if meta else doubanid} 未匹配到豆瓣媒体信息")
return None
def douban_info(self, doubanid: str, mtype: MediaType = None) -> Optional[dict]:
"""
获取豆瓣信息
:param doubanid: 豆瓣ID
:param mtype: 媒体类型
:return: 豆瓣信息
"""
"""
@@ -300,22 +380,40 @@ class DoubanModule(_ModuleBase):
"interest_cmt_earlier_tip_desc": "该短评的发布时间早于公开上映时间,作者可能通过其他渠道提前观看,请谨慎参考。其评分将不计入总评分。"
}
"""
def __douban_tv():
"""
获取豆瓣剧集信息
"""
info = self.doubanapi.tv_detail(doubanid)
if info:
celebrities = self.doubanapi.tv_celebrities(doubanid)
if celebrities:
info["directors"] = celebrities.get("directors")
info["actors"] = celebrities.get("actors")
return info
def __douban_movie():
"""
获取豆瓣电影信息
"""
info = self.doubanapi.movie_detail(doubanid)
if info:
celebrities = self.doubanapi.movie_celebrities(doubanid)
if celebrities:
info["directors"] = celebrities.get("directors")
info["actors"] = celebrities.get("actors")
return info
if not doubanid:
return None
logger.info(f"开始获取豆瓣信息:{doubanid} ...")
douban_info = self.doubanapi.movie_detail(doubanid)
if douban_info:
celebrities = self.doubanapi.movie_celebrities(doubanid)
if celebrities:
douban_info["directors"] = celebrities.get("directors")
douban_info["actors"] = celebrities.get("actors")
if mtype == MediaType.TV:
return __douban_tv()
elif mtype == MediaType.MOVIE:
return __douban_movie()
else:
douban_info = self.doubanapi.tv_detail(doubanid)
celebrities = self.doubanapi.tv_celebrities(doubanid)
if douban_info and celebrities:
douban_info["directors"] = celebrities.get("directors")
douban_info["actors"] = celebrities.get("actors")
return douban_info
return __douban_movie() or __douban_tv()
def douban_discover(self, mtype: MediaType, sort: str, tags: str,
page: int = 1, count: int = 30) -> Optional[List[dict]]:
@@ -379,6 +477,26 @@ class DoubanModule(_ModuleBase):
return []
return infos.get("subject_collection_items")
def movie_hot(self, page: int = 1, count: int = 30) -> List[dict]:
"""
获取豆瓣热门电影
"""
infos = self.doubanapi.movie_hot_gaia(start=(page - 1) * count,
count=count)
if not infos:
return []
return infos.get("subject_collection_items")
def tv_hot(self, page: int = 1, count: int = 30) -> List[dict]:
"""
获取豆瓣热门剧集
"""
infos = self.doubanapi.tv_hot(start=(page - 1) * count,
count=count)
if not infos:
return []
return infos.get("subject_collection_items")
def search_medias(self, meta: MetaBase) -> Optional[List[MediaInfo]]:
"""
搜索媒体信息
@@ -386,7 +504,7 @@ class DoubanModule(_ModuleBase):
:reutrn: 媒体信息
"""
# 未启用豆瓣搜索时返回None
if settings.SEARCH_SOURCE != "douban":
if settings.RECOGNIZE_SOURCE != "douban":
return None
if not meta.name:
@@ -397,7 +515,7 @@ class DoubanModule(_ModuleBase):
# 返回数据
ret_medias = []
for item_obj in result.get("items"):
if meta.type and meta.type.value != item_obj.get("type_name"):
if meta.type and meta.type != MediaType.UNKNOWN and meta.type.value != item_obj.get("type_name"):
continue
if item_obj.get("type_name") not in (MediaType.TV.value, MediaType.MOVIE.value):
continue
@@ -407,12 +525,12 @@ class DoubanModule(_ModuleBase):
@retry(Exception, 5, 3, 3, logger=logger)
def match_doubaninfo(self, name: str, imdbid: str = None,
mtype: str = None, year: str = None, season: int = None) -> dict:
mtype: MediaType = None, year: str = None, season: int = None) -> dict:
"""
搜索和匹配豆瓣信息
:param name: 名称
:param imdbid: IMDB ID
:param mtype: 类型 电影/电视剧
:param mtype: 类型
:param year: 年份
:param season: 季号
"""
@@ -441,7 +559,7 @@ class DoubanModule(_ModuleBase):
type_name = item_obj.get("type_name")
if type_name not in [MediaType.TV.value, MediaType.MOVIE.value]:
continue
if mtype and mtype != type_name:
if mtype and mtype.value != type_name:
continue
if mtype == MediaType.TV and not season:
season = 1
@@ -486,17 +604,20 @@ class DoubanModule(_ModuleBase):
meta = MetaInfo(path.stem)
if not meta.name:
return
# 根据名称查询豆瓣数据
doubaninfo = self.match_doubaninfo(name=mediainfo.title,
imdbid=mediainfo.imdb_id,
mtype=mediainfo.type.value,
year=mediainfo.year,
season=meta.begin_season)
if not doubaninfo:
logger.warn(f"未找到 {mediainfo.title} 的豆瓣信息")
return
# 查询豆瓣详情
doubaninfo = self.douban_info(doubaninfo.get("id"))
if not mediainfo.douban_id:
# 根据名称查询豆瓣数据
doubaninfo = self.match_doubaninfo(name=mediainfo.title,
imdbid=mediainfo.imdb_id,
mtype=mediainfo.type,
year=mediainfo.year)
if not doubaninfo:
logger.warn(f"未找到 {mediainfo.title} 的豆瓣信息")
return
doubaninfo = self.douban_info(doubanid=doubaninfo.get("id"), mtype=mediainfo.type)
else:
doubaninfo = self.douban_info(doubanid=mediainfo.douban_id,
mtype=mediainfo.type)
# 刮削路径
scrape_path = path / path.name
self.scraper.gen_scraper_files(meta=meta,
@@ -513,17 +634,21 @@ class DoubanModule(_ModuleBase):
meta = MetaInfo(file.stem)
if not meta.name:
continue
# 根据名称查询豆瓣数据
doubaninfo = self.match_doubaninfo(name=mediainfo.title,
imdbid=mediainfo.imdb_id,
mtype=mediainfo.type.value,
year=mediainfo.year,
season=meta.begin_season)
if not doubaninfo:
logger.warn(f"未找到 {mediainfo.title} 的豆瓣信息")
break
# 查询豆瓣详情
doubaninfo = self.douban_info(doubaninfo.get("id"))
if not mediainfo.douban_id:
# 根据名称查询豆瓣数据
doubaninfo = self.match_doubaninfo(name=mediainfo.title,
imdbid=mediainfo.imdb_id,
mtype=mediainfo.type,
year=mediainfo.year,
season=meta.begin_season)
if not doubaninfo:
logger.warn(f"未找到 {mediainfo.title} 的豆瓣信息")
break
# 查询豆瓣详情
doubaninfo = self.douban_info(doubanid=doubaninfo.get("id"), mtype=mediainfo.type)
else:
doubaninfo = self.douban_info(doubanid=mediainfo.douban_id,
mtype=mediainfo.type)
# 刮削
self.scraper.gen_scraper_files(meta=meta,
mediainfo=MediaInfo(douban_info=doubaninfo),
@@ -532,3 +657,10 @@ class DoubanModule(_ModuleBase):
except Exception as e:
logger.error(f"刮削文件 {file} 失败,原因:{str(e)}")
logger.info(f"{path} 刮削完成")
def clear_cache(self):
"""
清除缓存
"""
self.doubanapi.clear_cache()
self.cache.clear()

View File

@@ -427,6 +427,12 @@ class DoubanApi(metaclass=Singleton):
return self.__invoke(self._urls["doulist_items"] % subject_id,
start=start, count=count, _ts=ts)
def clear_cache(self):
"""
清空LRU缓存
"""
self.__invoke.cache_clear()
def __del__(self):
if self._session:
self._session.close()

View File

@@ -0,0 +1,232 @@
import pickle
import random
import time
from pathlib import Path
from threading import RLock
from typing import Optional
from app.core.config import settings
from app.core.meta import MetaBase
from app.core.metainfo import MetaInfo
from app.utils.singleton import Singleton
from app.schemas.types import MediaType
lock = RLock()
CACHE_EXPIRE_TIMESTAMP_STR = "cache_expire_timestamp"
EXPIRE_TIMESTAMP = settings.CACHE_CONF.get('meta')
class DoubanCache(metaclass=Singleton):
"""
豆瓣缓存数据
{
"id": '',
"title": '',
"year": '',
"type": MediaType
}
"""
_meta_data: dict = {}
# 缓存文件路径
_meta_path: Path = None
# TMDB缓存过期
_tmdb_cache_expire: bool = True
def __init__(self):
self._meta_path = settings.TEMP_PATH / "__douban_cache__"
self._meta_data = self.__load(self._meta_path)
def clear(self):
"""
清空所有TMDB缓存
"""
with lock:
self._meta_data = {}
@staticmethod
def __get_key(meta: MetaBase) -> str:
"""
获取缓存KEY
"""
return f"[{meta.type.value if meta.type else '未知'}]{meta.name}-{meta.year}-{meta.begin_season}"
def get(self, meta: MetaBase):
"""
根据KEY值获取缓存值
"""
key = self.__get_key(meta)
with lock:
info: dict = self._meta_data.get(key)
if info:
expire = info.get(CACHE_EXPIRE_TIMESTAMP_STR)
if not expire or int(time.time()) < expire:
info[CACHE_EXPIRE_TIMESTAMP_STR] = int(time.time()) + EXPIRE_TIMESTAMP
self._meta_data[key] = info
elif expire and self._tmdb_cache_expire:
self.delete(key)
return info or {}
def delete(self, key: str) -> dict:
"""
删除缓存信息
@param key: 缓存key
@return: 被删除的缓存内容
"""
with lock:
return self._meta_data.pop(key, None)
def delete_by_doubanid(self, doubanid: str) -> None:
"""
清空对应豆瓣ID的所有缓存记录以强制更新TMDB中最新的数据
"""
for key in list(self._meta_data):
if self._meta_data.get(key, {}).get("id") == doubanid:
with lock:
self._meta_data.pop(key)
def delete_unknown(self) -> None:
"""
清除未识别的缓存记录以便重新搜索TMDB
"""
for key in list(self._meta_data):
if self._meta_data.get(key, {}).get("id") == "0":
with lock:
self._meta_data.pop(key)
def modify(self, key: str, title: str) -> dict:
"""
删除缓存信息
@param key: 缓存key
@param title: 标题
@return: 被修改后缓存内容
"""
with lock:
if self._meta_data.get(key):
self._meta_data[key]['title'] = title
self._meta_data[key][CACHE_EXPIRE_TIMESTAMP_STR] = int(time.time()) + EXPIRE_TIMESTAMP
return self._meta_data.get(key)
@staticmethod
def __load(path: Path) -> dict:
"""
从文件中加载缓存
"""
try:
if path.exists():
with open(path, 'rb') as f:
data = pickle.load(f)
return data
return {}
except Exception as e:
print(str(e))
return {}
def update(self, meta: MetaBase, info: dict) -> None:
"""
新增或更新缓存条目
"""
with lock:
if info:
# 缓存标题
cache_title = info.get("title")
# 缓存年份
cache_year = info.get('year')
# 类型
if isinstance(info.get('media_type'), MediaType):
mtype = info.get('media_type')
elif info.get("type"):
mtype = MediaType.MOVIE if info.get("type") == "movie" else MediaType.TV
else:
meta = MetaInfo(cache_title)
if meta.begin_season:
mtype = MediaType.TV
else:
mtype = MediaType.MOVIE
# 海报
poster_path = info.get("pic", {}).get("large")
if not poster_path and info.get("cover_url"):
poster_path = info.get("cover_url")
if not poster_path and info.get("cover"):
poster_path = info.get("cover").get("url")
self._meta_data[self.__get_key(meta)] = {
"id": info.get("id"),
"type": mtype,
"year": cache_year,
"title": cache_title,
"poster_path": poster_path,
CACHE_EXPIRE_TIMESTAMP_STR: int(time.time()) + EXPIRE_TIMESTAMP
}
elif info is not None:
# None时不缓存此时代表网络错误允许重复请求
self._meta_data[self.__get_key(meta)] = {'id': "0"}
def save(self, force: bool = False) -> None:
"""
保存缓存数据到文件
"""
meta_data = self.__load(self._meta_path)
new_meta_data = {k: v for k, v in self._meta_data.items() if v.get("id")}
if not force \
and not self._random_sample(new_meta_data) \
and meta_data.keys() == new_meta_data.keys():
return
with open(self._meta_path, 'wb') as f:
pickle.dump(new_meta_data, f, pickle.HIGHEST_PROTOCOL)
def _random_sample(self, new_meta_data: dict) -> bool:
"""
采样分析是否需要保存
"""
ret = False
if len(new_meta_data) < 25:
keys = list(new_meta_data.keys())
for k in keys:
info = new_meta_data.get(k)
expire = info.get(CACHE_EXPIRE_TIMESTAMP_STR)
if not expire:
ret = True
info[CACHE_EXPIRE_TIMESTAMP_STR] = int(time.time()) + EXPIRE_TIMESTAMP
elif int(time.time()) >= expire:
ret = True
if self._tmdb_cache_expire:
new_meta_data.pop(k)
else:
count = 0
keys = random.sample(sorted(new_meta_data.keys()), 25)
for k in keys:
info = new_meta_data.get(k)
expire = info.get(CACHE_EXPIRE_TIMESTAMP_STR)
if not expire:
ret = True
info[CACHE_EXPIRE_TIMESTAMP_STR] = int(time.time()) + EXPIRE_TIMESTAMP
elif int(time.time()) >= expire:
ret = True
if self._tmdb_cache_expire:
new_meta_data.pop(k)
count += 1
if count >= 5:
ret |= self._random_sample(new_meta_data)
return ret
def get_title(self, key: str) -> Optional[str]:
"""
获取缓存的标题
"""
cache_media_info = self._meta_data.get(key)
if not cache_media_info or not cache_media_info.get("id"):
return None
return cache_media_info.get("title")
def set_title(self, key: str, cn_title: str) -> None:
"""
重新设置缓存标题
"""
cache_media_info = self._meta_data.get(key)
if not cache_media_info:
return
self._meta_data[key]['title'] = cn_title

View File

@@ -1,4 +1,3 @@
from pathlib import Path
from typing import Optional, Tuple, Union, Any, List, Generator
from app import schemas
@@ -26,7 +25,7 @@ class EmbyModule(_ModuleBase):
定时任务每10分钟调用一次
"""
# 定时重连
if not self.emby.is_inactive():
if self.emby.is_inactive():
self.emby.reconnect()
def user_authenticate(self, name: str, password: str) -> Optional[str]:
@@ -96,24 +95,6 @@ class EmbyModule(_ModuleBase):
itemid=itemid
)
def refresh_mediaserver(self, mediainfo: MediaInfo, file_path: Path) -> None:
"""
刷新媒体库
:param mediainfo: 识别的媒体信息
:param file_path: 文件路径
:return: 成功或失败
"""
items = [
schemas.RefreshMediaItem(
title=mediainfo.title,
year=mediainfo.year,
type=mediainfo.type,
category=mediainfo.category,
target_path=file_path
)
]
self.emby.refresh_library_by_items(items)
def media_statistic(self) -> List[schemas.Statistic]:
"""
媒体数量统计

View File

@@ -244,7 +244,7 @@ class Emby(metaclass=Singleton):
"&Limit=10"
"&IncludeSearchTypes=false"
"&api_key=%s") % (
self._host, name, self._apikey)
self._host, name, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res:
@@ -803,7 +803,10 @@ class Emby(metaclass=Singleton):
logger.debug(f"接收到emby webhook{message}")
eventItem = schemas.WebhookEventInfo(event=eventType, channel="emby")
if message.get('Item'):
if message.get('Item', {}).get('Type') == 'Episode':
eventItem.media_type = message.get('Item', {}).get('Type')
if message.get('Item', {}).get('Type') == 'Episode' \
or message.get('Item', {}).get('Type') == 'Series' \
or message.get('Item', {}).get('Type') == 'Season':
eventItem.item_type = "TV"
if message.get('Item', {}).get('SeriesName') \
and message.get('Item', {}).get('ParentIndexNumber') \
@@ -813,6 +816,10 @@ class Emby(metaclass=Singleton):
"S" + str(message.get('Item', {}).get('ParentIndexNumber')),
"E" + str(message.get('Item', {}).get('IndexNumber')),
message.get('Item', {}).get('Name'))
elif message.get('Item', {}).get('SeriesName'):
eventItem.item_name = "%s %s" % (
message.get('Item', {}).get('SeriesName'),
message.get('Item', {}).get('Name'))
else:
eventItem.item_name = message.get('Item', {}).get('Name')
eventItem.item_id = message.get('Item', {}).get('SeriesId')

View File

@@ -326,17 +326,19 @@ class FanartModule(_ModuleBase):
:param mediainfo: 识别的媒体信息
:return: 更新后的媒体信息
"""
if not mediainfo.tmdb_id and not mediainfo.tvdb_id:
return None
if mediainfo.type == MediaType.MOVIE:
result = self.__request_fanart(mediainfo.type, mediainfo.tmdb_id)
else:
if mediainfo.tvdb_id:
result = self.__request_fanart(mediainfo.type, mediainfo.tvdb_id)
else:
logger.info(f"{mediainfo.title_year} 没有tvdbid无法获取Fanart图片")
return
logger.info(f"{mediainfo.title_year} 没有tvdbid无法获取fanart图片")
return None
if not result or result.get('status') == 'error':
logger.warn(f"没有获取到 {mediainfo.title_year}Fanart图片数据")
return
logger.warn(f"没有获取到 {mediainfo.title_year}fanart图片数据")
return None
# 获取所有图片
for name, images in result.items():
if not images:

View File

@@ -8,11 +8,12 @@ from jinja2 import Template
from app.core.config import settings
from app.core.context import MediaInfo
from app.core.meta import MetaBase
from app.core.metainfo import MetaInfo
from app.core.metainfo import MetaInfo, MetaInfoPath
from app.log import logger
from app.modules import _ModuleBase
from app.schemas import TransferInfo, ExistMediaInfo, TmdbEpisode
from app.schemas.types import MediaType
from app.utils.string import StringUtils
from app.utils.system import SystemUtils
lock = Lock()
@@ -53,6 +54,8 @@ class FileTransferModule(_ModuleBase):
return TransferInfo(success=False,
path=path,
message="未找到媒体库目录")
else:
logger.info(f"获取转移目标路径:{target}")
# 转移
return self.transfer_media(in_path=path,
in_meta=meta,
@@ -413,6 +416,14 @@ class FileTransferModule(_ModuleBase):
rename_dict=self.__get_naming_dict(meta=in_meta,
mediainfo=mediainfo)
).parent
# 目录已存在时不处理
if new_path.exists():
logger.warn(f"目标目录已存在:{new_path}")
return TransferInfo(success=False,
message=f"目标目录已存在:{new_path}",
path=in_path,
target_path=new_path,
is_bluray=bluray_flag)
# 转移蓝光原盘
retcode = self.__transfer_dir(file_path=in_path,
new_path=new_path,
@@ -468,9 +479,36 @@ class FileTransferModule(_ModuleBase):
# 判断是否要覆盖
overflag = False
if new_file.exists():
if new_file.stat().st_size < in_path.stat().st_size:
logger.info(f"目标文件已存在,但文件大小更小,将覆盖:{new_file}")
overflag = True
# 目标文件已存在
logger.info(f"目标文件已存在,转移覆盖模式:{settings.OVERWRITE_MODE}")
match settings.OVERWRITE_MODE:
case 'always':
# 总是覆盖同名文件
overflag = True
case 'size':
# 存在时大覆盖小
if new_file.stat().st_size < in_path.stat().st_size:
logger.info(f"目标文件文件大小更小,将被覆盖:{new_file}")
overflag = True
else:
return TransferInfo(success=False,
message=f"媒体库中已存在,且质量更好",
path=in_path,
target_path=new_file,
fail_list=[str(in_path)])
case 'never':
# 存在不覆盖
return TransferInfo(success=False,
message=f"媒体库中已存在,当前设置为不覆盖",
path=in_path,
target_path=new_file,
fail_list=[str(in_path)])
case 'latest':
# 仅保留最新版本
self.delete_all_version_files(new_file)
overflag = True
case _:
pass
# 原文件大小
file_size = in_path.stat().st_size
# 转移文件
@@ -613,7 +651,8 @@ class FileTransferModule(_ModuleBase):
if in_path:
for path in dest_paths:
try:
relative = in_path.relative_to(path).as_posix()
# 计算in_path和path的公共字符串长度
relative = StringUtils.find_common_prefix(str(in_path), str(path))
if len(relative) > max_length:
max_length = len(relative)
target_path = path
@@ -696,3 +735,34 @@ class FileTransferModule(_ModuleBase):
return ExistMediaInfo(type=MediaType.TV, seasons=seasons)
# 不存在
return None
@staticmethod
def delete_all_version_files(path: Path) -> bool:
"""
删除目录下的所有版本文件
:param path: 目录路径
"""
if not path.exists():
return False
# 识别文件中的季集信息
meta = MetaInfoPath(path)
season = meta.season
episode = meta.episode
# 检索媒体文件
logger.warn(f"正在删除目标目录中其它版本的文件:{path.parent}")
media_files = SystemUtils.list_files(directory=path.parent, extensions=settings.RMT_MEDIAEXT)
if not media_files:
logger.info(f"目录中没有媒体文件:{path.parent}")
return False
# 删除文件
for media_file in media_files:
if media_file == path:
continue
# 识别文件中的季集信息
filemeta = MetaInfoPath(media_file)
# 相同季集的文件才删除
if filemeta.season != season or filemeta.episode != episode:
continue
logger.info(f"正在删除文件:{media_file}")
media_file.unlink()
return True

View File

@@ -1,4 +1,3 @@
from pathlib import Path
from typing import Optional, Tuple, Union, Any, List, Generator
from app import schemas
@@ -23,7 +22,7 @@ class JellyfinModule(_ModuleBase):
定时任务每10分钟调用一次
"""
# 定时重连
if not self.jellyfin.is_inactive():
if self.jellyfin.is_inactive():
self.jellyfin.reconnect()
def stop(self):
@@ -94,15 +93,6 @@ class JellyfinModule(_ModuleBase):
itemid=itemid
)
def refresh_mediaserver(self, mediainfo: MediaInfo, file_path: Path) -> None:
"""
刷新媒体库
:param mediainfo: 识别的媒体信息
:param file_path: 文件路径
:return: 成功或失败
"""
self.jellyfin.refresh_root_library()
def media_statistic(self) -> List[schemas.Statistic]:
"""
媒体数量统计

View File

@@ -212,7 +212,7 @@ class Jellyfin(metaclass=Singleton):
return None
req_url = ("%sUsers/%s/Items?"
"api_key=%s&searchTerm=%s&IncludeItemTypes=Series&Limit=10&Recursive=true") % (
self._host, self.user, self._apikey, name)
self._host, self.user, self._apikey, name)
try:
res = RequestUtils().get_res(req_url)
if res:
@@ -227,8 +227,8 @@ class Jellyfin(metaclass=Singleton):
return None
return ""
def get_movies(self,
title: str,
def get_movies(self,
title: str,
year: str = None,
tmdb_id: int = None) -> Optional[List[schemas.MediaServerItem]]:
"""
@@ -242,7 +242,7 @@ class Jellyfin(metaclass=Singleton):
return None
req_url = ("%sUsers/%s/Items?"
"api_key=%s&searchTerm=%s&IncludeItemTypes=Movie&Limit=10&Recursive=true") % (
self._host, self.user, self._apikey, title)
self._host, self.user, self._apikey, title)
try:
res = RequestUtils().get_res(req_url)
if res:
@@ -466,7 +466,10 @@ class Jellyfin(metaclass=Singleton):
eventItem.device_name = message.get('DeviceName')
eventItem.user_name = message.get('NotificationUsername')
eventItem.client = message.get('ClientName')
if message.get("ItemType") == "Episode":
eventItem.media_type = message.get('ItemType')
if message.get("ItemType") == "Episode" \
or message.get("ItemType") == "Series" \
or message.get("ItemType") == "Season":
# 剧集
eventItem.item_type = "TV"
eventItem.season_id = message.get('SeasonNumber')

View File

@@ -1,4 +1,3 @@
from pathlib import Path
from typing import Optional, Tuple, Union, Any, List, Generator
from app import schemas
@@ -26,7 +25,7 @@ class PlexModule(_ModuleBase):
定时任务每10分钟调用一次
"""
# 定时重连
if not self.plex.is_inactive():
if self.plex.is_inactive():
self.plex.reconnect()
def webhook_parser(self, body: Any, form: Any, args: Any) -> Optional[schemas.WebhookEventInfo]:
@@ -88,24 +87,6 @@ class PlexModule(_ModuleBase):
itemid=item_id
)
def refresh_mediaserver(self, mediainfo: MediaInfo, file_path: Path) -> None:
"""
刷新媒体库
:param mediainfo: 识别的媒体信息
:param file_path: 文件路径
:return: 成功或失败
"""
items = [
schemas.RefreshMediaItem(
title=mediainfo.title,
year=mediainfo.year,
type=mediainfo.type,
category=mediainfo.category,
target_path=file_path
)
]
self.plex.refresh_library_by_items(items)
def media_statistic(self) -> List[schemas.Statistic]:
"""
媒体数量统计

View File

@@ -101,9 +101,15 @@ class QbittorrentModule(_ModuleBase):
# 选择文件
self.qbittorrent.set_files(torrent_hash=torrent_hash, file_ids=file_ids, priority=0)
# 开始任务
self.qbittorrent.start_torrents(torrent_hash)
if settings.QB_FORCE_RESUME:
# 强制继续
self.qbittorrent.torrents_set_force_start(torrent_hash)
else:
self.qbittorrent.start_torrents(torrent_hash)
return torrent_hash, f"添加下载成功,已选择集数:{sucess_epidised}"
else:
if settings.QB_FORCE_RESUME:
self.qbittorrent.torrents_set_force_start(torrent_hash)
return torrent_hash, "添加下载成功"
def list_torrents(self, status: TorrentStatus = None,
@@ -165,6 +171,9 @@ class QbittorrentModule(_ModuleBase):
state="paused" if torrent.get('state') == "paused" else "downloading",
dlspeed=StringUtils.str_filesize(torrent.get('dlspeed')),
upspeed=StringUtils.str_filesize(torrent.get('upspeed')),
left_time=StringUtils.str_secends(
(torrent.get('total_size') - torrent.get('completed')) / torrent.get('dlspeed')) if torrent.get(
'dlspeed') > 0 else ''
))
else:
return None

View File

@@ -243,7 +243,7 @@ class Qbittorrent(metaclass=Singleton):
is_paused=is_paused,
tags=tags,
use_auto_torrent_management=is_auto,
is_sequential_download=True,
is_sequential_download=settings.QB_SEQUENTIAL,
cookie=cookie,
category=category,
**kwargs)

View File

@@ -43,7 +43,8 @@ class TheMovieDbModule(_ModuleBase):
def recognize_media(self, meta: MetaBase = None,
mtype: MediaType = None,
tmdbid: int = None) -> Optional[MediaInfo]:
tmdbid: int = None,
**kwargs) -> Optional[MediaInfo]:
"""
识别媒体信息
:param meta: 识别的元数据
@@ -51,6 +52,9 @@ class TheMovieDbModule(_ModuleBase):
:param tmdbid: tmdbid
:return: 识别的媒体信息,包括剧集信息
"""
if settings.RECOGNIZE_SOURCE != "themoviedb":
return None
if not meta:
cache_info = {}
else:
@@ -112,11 +116,11 @@ class TheMovieDbModule(_ModuleBase):
else:
# 使用缓存信息
if cache_info.get("title"):
logger.info(f"{meta.name} 使用识别缓存:{cache_info.get('title')}")
logger.info(f"{meta.name} 使用TMDB识别缓存:{cache_info.get('title')}")
info = self.tmdb.get_info(mtype=cache_info.get("type"),
tmdbid=cache_info.get("id"))
else:
logger.info(f"{meta.name} 使用识别缓存:无法识别")
logger.info(f"{meta.name} 使用TMDB识别缓存:无法识别")
info = None
if info:
@@ -129,11 +133,11 @@ class TheMovieDbModule(_ModuleBase):
mediainfo = MediaInfo(tmdb_info=info)
mediainfo.set_category(cat)
if meta:
logger.info(f"{meta.name} 识别结果:{mediainfo.type.value} "
logger.info(f"{meta.name} TMDB识别结果:{mediainfo.type.value} "
f"{mediainfo.title_year} "
f"{mediainfo.tmdb_id}")
else:
logger.info(f"{tmdbid} 识别结果:{mediainfo.type.value} "
logger.info(f"{tmdbid} TMDB识别结果:{mediainfo.type.value} "
f"{mediainfo.title_year}")
# 补充剧集年份
@@ -143,10 +147,31 @@ class TheMovieDbModule(_ModuleBase):
mediainfo.season_years = episode_years
return mediainfo
else:
logger.info(f"{meta.name if meta else tmdbid} 未匹配到媒体信息")
logger.info(f"{meta.name if meta else tmdbid} 未匹配到TMDB媒体信息")
return None
def match_tmdbinfo(self, name: str, mtype: MediaType = None,
year: str = None, season: int = None) -> dict:
"""
搜索和匹配TMDB信息
:param name: 名称
:param mtype: 类型
:param year: 年份
:param season: 季号
"""
# 搜索
logger.info(f"开始使用 名称:{name}、年份:{year} 匹配TMDB信息 ...")
info = self.tmdb.match(name=name,
year=year,
mtype=mtype,
season_year=year,
season_number=season)
if info and not info.get("genres"):
info = self.tmdb.get_info(mtype=info.get("media_type"),
tmdbid=info.get("id"))
return info
def tmdb_info(self, tmdbid: int, mtype: MediaType) -> Optional[dict]:
"""
获取TMDB信息
@@ -163,7 +188,7 @@ class TheMovieDbModule(_ModuleBase):
:reutrn: 媒体信息列表
"""
# 未启用时返回None
if settings.SEARCH_SOURCE != "themoviedb":
if settings.RECOGNIZE_SOURCE != "themoviedb":
return None
if not meta.name:

View File

@@ -144,7 +144,8 @@ class TmdbCache(metaclass=Singleton):
"backdrop_path": info.get("backdrop_path"),
CACHE_EXPIRE_TIMESTAMP_STR: int(time.time()) + EXPIRE_TIMESTAMP
}
else:
elif info is not None:
# None时不缓存此时代表网络错误允许重复请求
self._meta_data[self.__get_key(meta)] = {'id': 0}
def save(self, force: bool = False) -> None:

View File

@@ -141,15 +141,18 @@ class TMDb(object):
def cached_request(self, method, url, data, json,
_ts=datetime.strftime(datetime.now(), '%Y%m%d')):
"""
缓存请求时间默认1天
缓存请求时间默认1天None不缓存
"""
return self.request(method, url, data, json)
def request(self, method, url, data, json):
if method == "GET":
return self._req.get_res(url, params=data, json=json)
req = self._req.get_res(url, params=data, json=json)
else:
return self._req.post_res(url, data=data, json=json)
req = self._req.post_res(url, data=data, json=json)
if req is None:
raise TMDbException("无法连接TheMovieDb请检查网络连接")
return req
def cache_clear(self):
return self.cached_request.cache_clear()
@@ -157,7 +160,7 @@ class TMDb(object):
def _request_obj(self, action, params="", call_cached=True,
method="GET", data=None, json=None, key=None):
if self.api_key is None or self.api_key == "":
raise TMDbException("No API key found.")
raise TMDbException("TheMovieDb API Key 未设置!")
url = "https://%s/3%s?api_key=%s&%s&language=%s" % (
self.domain,
@@ -173,7 +176,7 @@ class TMDb(object):
req = self.request(method, url, data, json)
if req is None:
raise TMDbException("Failed to establish a new connection: no response from the server.")
return None
headers = req.headers
@@ -188,11 +191,11 @@ class TMDb(object):
sleep_time = self._reset - current_time
if self.wait_on_rate_limit:
logger.warning("Rate limit reached. Sleeping for: %d" % sleep_time)
logger.warning("达到请求频率限制,休眠:%d 秒..." % sleep_time)
time.sleep(abs(sleep_time))
return self._request_obj(action, params, call_cached, method, data, json, key)
else:
raise TMDbException("Rate limit reached. Try again in %d seconds." % sleep_time)
raise TMDbException("达到请求频率限制,将在 %d 秒后重试..." % sleep_time)
json = req.json()

View File

@@ -33,7 +33,7 @@ class TransmissionModule(_ModuleBase):
定时任务每10分钟调用一次
"""
# 定时重连
if not self.transmission.is_inactive():
if self.transmission.is_inactive():
self.transmission.reconnect()
def download(self, content: Union[Path, str], download_dir: Path, cookie: str,
@@ -151,6 +151,7 @@ class TransmissionModule(_ModuleBase):
state="paused" if torrent.status == "stopped" else "downloading",
dlspeed=StringUtils.str_filesize(dlspeed),
upspeed=StringUtils.str_filesize(upspeed),
left_time=StringUtils.str_secends(torrent.left_until_done / dlspeed) if dlspeed > 0 else ''
))
else:
return None

View File

@@ -86,6 +86,7 @@ class _PluginBase(metaclass=ABCMeta):
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
插件配置页面使用Vuetify组件拼装参考https://vuetifyjs.com/
"""
pass
@@ -93,6 +94,7 @@ class _PluginBase(metaclass=ABCMeta):
def get_page(self) -> List[dict]:
"""
拼装插件详情页面,需要返回页面配置,同时附带数据
插件详情页面使用Vuetify组件拼装参考https://vuetifyjs.com/
"""
pass

View File

@@ -1,317 +0,0 @@
import glob
import os
import shutil
import time
from datetime import datetime, timedelta
from pathlib import Path
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from app.core.config import settings
from app.plugins import _PluginBase
from typing import Any, List, Dict, Tuple, Optional
from app.log import logger
from app.schemas import NotificationType
class AutoBackup(_PluginBase):
# 插件名称
plugin_name = "自动备份"
# 插件描述
plugin_desc = "自动备份数据和配置文件。"
# 插件图标
plugin_icon = "backup.png"
# 主题色
plugin_color = "#4FB647"
# 插件版本
plugin_version = "1.0"
# 插件作者
plugin_author = "thsrite"
# 作者主页
author_url = "https://github.com/thsrite"
# 插件配置项ID前缀
plugin_config_prefix = "autobackup_"
# 加载顺序
plugin_order = 17
# 可使用的用户级别
auth_level = 1
# 私有属性
_enabled = False
# 任务执行间隔
_cron = None
_cnt = None
_onlyonce = False
_notify = False
# 定时器
_scheduler: Optional[BackgroundScheduler] = None
def init_plugin(self, config: dict = None):
# 停止现有任务
self.stop_service()
if config:
self._enabled = config.get("enabled")
self._cron = config.get("cron")
self._cnt = config.get("cnt")
self._notify = config.get("notify")
self._onlyonce = config.get("onlyonce")
# 加载模块
if self._enabled:
# 定时服务
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
if self._cron:
try:
self._scheduler.add_job(func=self.__backup,
trigger=CronTrigger.from_crontab(self._cron),
name="自动备份")
except Exception as err:
logger.error(f"定时任务配置错误:{str(err)}")
if self._onlyonce:
logger.info(f"自动备份服务启动,立即运行一次")
self._scheduler.add_job(func=self.__backup, trigger='date',
run_date=datetime.now(tz=pytz.timezone(settings.TZ)) + timedelta(seconds=3),
name="自动备份")
# 关闭一次性开关
self._onlyonce = False
self.update_config({
"onlyonce": False,
"cron": self._cron,
"enabled": self._enabled,
"cnt": self._cnt,
"notify": self._notify,
})
# 启动任务
if self._scheduler.get_jobs():
self._scheduler.print_jobs()
self._scheduler.start()
def __backup(self):
"""
自动备份、删除备份
"""
logger.info(f"当前时间 {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))} 开始备份")
# docker用默认路径
bk_path = self.get_data_path()
# 备份
zip_file = self.backup(bk_path=bk_path)
if zip_file:
logger.info(f"备份完成 备份文件 {zip_file} ")
else:
logger.error("创建备份失败")
# 清理备份
bk_cnt = 0
del_cnt = 0
if self._cnt:
# 获取指定路径下所有以"bk"开头的文件,按照创建时间从旧到新排序
files = sorted(glob.glob(f"{bk_path}/bk**"), key=os.path.getctime)
bk_cnt = len(files)
# 计算需要删除的文件数
del_cnt = bk_cnt - int(self._cnt)
if del_cnt > 0:
logger.info(
f"获取到 {bk_path} 路径下备份文件数量 {bk_cnt} 保留数量 {int(self._cnt)} 需要删除备份文件数量 {del_cnt}")
# 遍历并删除最旧的几个备份
for i in range(del_cnt):
os.remove(files[i])
logger.debug(f"删除备份文件 {files[i]} 成功")
else:
logger.info(
f"获取到 {bk_path} 路径下备份文件数量 {bk_cnt} 保留数量 {int(self._cnt)} 无需删除")
# 发送通知
if self._notify:
self.post_message(
mtype=NotificationType.SiteMessage,
title="【自动备份任务完成】",
text=f"创建备份{'成功' if zip_file else '失败'}\n"
f"清理备份数量 {del_cnt}\n"
f"剩余备份数量 {bk_cnt - del_cnt}")
@staticmethod
def backup(bk_path: Path = None):
"""
@param bk_path 自定义备份路径
"""
try:
# 创建备份文件夹
config_path = Path(settings.CONFIG_PATH)
backup_file = f"bk_{time.strftime('%Y%m%d%H%M%S')}"
backup_path = bk_path / backup_file
backup_path.mkdir(parents=True)
# 把现有的相关文件进行copy备份
if settings.LIBRARY_CATEGORY:
shutil.copy(f'{config_path}/category.yaml', backup_path)
shutil.copy(f'{config_path}/user.db', backup_path)
zip_file = str(backup_path) + '.zip'
if os.path.exists(zip_file):
zip_file = str(backup_path) + '.zip'
shutil.make_archive(str(backup_path), 'zip', str(backup_path))
shutil.rmtree(str(backup_path))
return zip_file
except IOError:
return None
def get_state(self) -> bool:
return self._enabled
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'notify',
'label': '开启通知',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'onlyonce',
'label': '立即运行一次',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'cron',
'label': '备份周期'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'cnt',
'label': '最大保留备份数'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VAlert',
'props': {
'text': '备份文件路径默认为本地映射的config/plugins/AutoBackup。'
}
}
]
}
]
}
]
}
], {
"enabled": False,
"request_method": "POST",
"webhook_url": ""
}
def get_page(self) -> List[dict]:
pass
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._scheduler.shutdown()
self._scheduler = None
except Exception as e:
logger.error("退出插件失败:%s" % str(e))

View File

@@ -1,551 +0,0 @@
import time
from collections import defaultdict
from datetime import datetime, timedelta
from pathlib import Path
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from app.chain.transfer import TransferChain
from app.core.config import settings
from app.core.event import eventmanager
from app.db.downloadhistory_oper import DownloadHistoryOper
from app.db.transferhistory_oper import TransferHistoryOper
from app.plugins import _PluginBase
from typing import Any, List, Dict, Tuple, Optional
from app.log import logger
from app.schemas import NotificationType, DownloadHistory
from app.schemas.types import EventType
class AutoClean(_PluginBase):
# 插件名称
plugin_name = "定时清理媒体库"
# 插件描述
plugin_desc = "定时清理用户下载的种子、源文件、媒体库文件。"
# 插件图标
plugin_icon = "clean.png"
# 主题色
plugin_color = "#3377ed"
# 插件版本
plugin_version = "1.0"
# 插件作者
plugin_author = "thsrite"
# 作者主页
author_url = "https://github.com/thsrite"
# 插件配置项ID前缀
plugin_config_prefix = "autoclean_"
# 加载顺序
plugin_order = 23
# 可使用的用户级别
auth_level = 2
# 私有属性
_enabled = False
# 任务执行间隔
_cron = None
_type = None
_onlyonce = False
_notify = False
_cleantype = None
_cleanuser = None
_cleandate = None
_downloadhis = None
_transferhis = None
# 定时器
_scheduler: Optional[BackgroundScheduler] = None
def init_plugin(self, config: dict = None):
# 停止现有任务
self.stop_service()
if config:
self._enabled = config.get("enabled")
self._cron = config.get("cron")
self._onlyonce = config.get("onlyonce")
self._notify = config.get("notify")
self._cleantype = config.get("cleantype")
self._cleanuser = config.get("cleanuser")
self._cleandate = config.get("cleandate")
# 加载模块
if self._enabled:
self._downloadhis = DownloadHistoryOper()
self._transferhis = TransferHistoryOper()
# 定时服务
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
if self._cron:
try:
self._scheduler.add_job(func=self.__clean,
trigger=CronTrigger.from_crontab(self._cron),
name="定时清理媒体库")
except Exception as err:
logger.error(f"定时任务配置错误:{str(err)}")
if self._onlyonce:
logger.info(f"定时清理媒体库服务启动,立即运行一次")
self._scheduler.add_job(func=self.__clean, trigger='date',
run_date=datetime.now(tz=pytz.timezone(settings.TZ)) + timedelta(seconds=3),
name="定时清理媒体库")
# 关闭一次性开关
self._onlyonce = False
self.update_config({
"onlyonce": False,
"cron": self._cron,
"cleantype": self._cleantype,
"enabled": self._enabled,
"cleanuser": self._cleanuser,
"cleandate": self._cleandate,
"notify": self._notify,
})
# 启动任务
if self._scheduler.get_jobs():
self._scheduler.print_jobs()
self._scheduler.start()
def __clean(self):
"""
定时清理媒体库
"""
if not self._cleandate:
logger.error("未配置清理媒体库时间,停止运行")
return
# 清理日期
current_time = datetime.now()
days_ago = current_time - timedelta(days=int(self._cleandate))
clean_date = days_ago.strftime("%Y-%m-%d")
# 查询用户清理日期之后的下载历史
if not self._cleanuser:
downloadhis_list = self._downloadhis.list_by_user_date(date=clean_date)
logger.info(f'获取到日期 {clean_date} 之后的下载历史 {len(downloadhis_list)}')
self.__clean_history(date=clean_date, downloadhis_list=downloadhis_list)
else:
for userid in str(self._cleanuser).split(","):
downloadhis_list = self._downloadhis.list_by_user_date(date=clean_date,
userid=userid)
logger.info(
f'获取到用户 {userid} 日期 {clean_date} 之后的下载历史 {len(downloadhis_list)}')
self.__clean_history(date=clean_date, downloadhis_list=downloadhis_list, userid=userid)
def __clean_history(self, date: str, downloadhis_list: List[DownloadHistory], userid: str = None):
"""
清理下载历史、转移记录
"""
if not downloadhis_list:
logger.warn(f"未获取到日期 {date} 之后的下载记录,停止运行")
return
# 读取历史记录
history = self.get_data('history') or []
# 创建一个字典来保存分组结果
downloadhis_grouped_dict: Dict[tuple, List[DownloadHistory]] = defaultdict(list)
# 遍历DownloadHistory对象列表
for downloadhis in downloadhis_list:
# 获取type和tmdbid的值
dtype = downloadhis.type
tmdbid = downloadhis.tmdbid
# 将DownloadHistory对象添加到对应分组的列表中
downloadhis_grouped_dict[(dtype, tmdbid)].append(downloadhis)
# 输出分组结果
for key, downloadhis_list in downloadhis_grouped_dict.items():
logger.info(f"开始清理 {key}")
del_transferhis_cnt = 0
del_media_name = downloadhis_list[0].title
del_media_user = downloadhis_list[0].userid
del_media_type = downloadhis_list[0].type
del_media_year = downloadhis_list[0].year
del_media_season = downloadhis_list[0].seasons
del_media_episode = downloadhis_list[0].episodes
del_image = downloadhis_list[0].image
for downloadhis in downloadhis_list:
if not downloadhis.download_hash:
logger.debug(f'下载历史 {downloadhis.id} {downloadhis.title} 未获取到download_hash跳过处理')
continue
# 根据hash获取转移记录
transferhis_list = self._transferhis.list_by_hash(download_hash=downloadhis.download_hash)
if not transferhis_list:
logger.warn(f"下载历史 {downloadhis.download_hash} 未查询到转移记录,跳过处理")
continue
for history in transferhis_list:
# 册除媒体库文件
if str(self._cleantype == "dest") or str(self._cleantype == "all"):
TransferChain().delete_files(Path(history.dest))
# 删除记录
self._transferhis.delete(history.id)
# 删除源文件
if str(self._cleantype == "src") or str(self._cleantype == "all"):
TransferChain().delete_files(Path(history.src))
# 发送事件
eventmanager.send_event(
EventType.DownloadFileDeleted,
{
"src": history.src
}
)
# 累加删除数量
del_transferhis_cnt += len(transferhis_list)
# 发送消息
if self._notify:
self.post_message(
mtype=NotificationType.MediaServer,
title="【定时清理媒体库任务完成】",
text=f"清理媒体名称 {del_media_name}\n"
f"下载媒体用户 {del_media_user}\n"
f"删除历史记录 {del_transferhis_cnt}",
userid=userid)
history.append({
"type": del_media_type,
"title": del_media_name,
"year": del_media_year,
"season": del_media_season,
"episode": del_media_episode,
"image": del_image,
"del_time": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
})
# 保存历史
self.save_data("history", history)
def get_state(self) -> bool:
return self._enabled
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'onlyonce',
'label': '立即运行一次',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'notify',
'label': '开启通知',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'cron',
'label': '执行周期',
'placeholder': '0 0 ? ? ?'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSelect',
'props': {
'model': 'cleantype',
'label': '清理方式',
'items': [
{'title': '媒体库文件', 'value': 'dest'},
{'title': '源文件', 'value': 'src'},
{'title': '所有文件', 'value': 'all'},
]
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'cleandate',
'label': '清理媒体日期',
'placeholder': '清理多少天之前的下载记录(天)'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'cleanuser',
'label': '清理下载用户',
'placeholder': '多个用户,分割'
}
}
]
}
]
}
]
}
], {
"enabled": False,
"onlyonce": False,
"notify": False,
"cleantype": "dest",
"cron": "",
"cleanuser": "",
"cleandate": 30
}
def get_page(self) -> List[dict]:
"""
拼装插件详情页面,需要返回页面配置,同时附带数据
"""
# 查询同步详情
historys = self.get_data('history')
if not historys:
return [
{
'component': 'div',
'text': '暂无数据',
'props': {
'class': 'text-center',
}
}
]
# 数据按时间降序排序
historys = sorted(historys, key=lambda x: x.get('del_time'), reverse=True)
# 拼装页面
contents = []
for history in historys:
htype = history.get("type")
title = history.get("title")
year = history.get("year")
season = history.get("season")
episode = history.get("episode")
image = history.get("image")
del_time = history.get("del_time")
if season:
sub_contents = [
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'类型:{htype}'
},
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'标题:{title}'
},
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'年份:{year}'
},
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'季:{season}'
},
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'集:{episode}'
},
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'时间:{del_time}'
}
]
else:
sub_contents = [
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'类型:{htype}'
},
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'标题:{title}'
},
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'年份:{year}'
},
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'时间:{del_time}'
}
]
contents.append(
{
'component': 'VCard',
'content': [
{
'component': 'div',
'props': {
'class': 'd-flex justify-space-start flex-nowrap flex-row',
},
'content': [
{
'component': 'div',
'content': [
{
'component': 'VImg',
'props': {
'src': image,
'height': 120,
'width': 80,
'aspect-ratio': '2/3',
'class': 'object-cover shadow ring-gray-500',
'cover': True
}
}
]
},
{
'component': 'div',
'content': sub_contents
}
]
}
]
}
)
return [
{
'component': 'div',
'props': {
'class': 'grid gap-3 grid-info-card',
},
'content': contents
}
]
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._scheduler.shutdown()
self._scheduler = None
except Exception as e:
logger.error("退出插件失败:%s" % str(e))

File diff suppressed because it is too large Load Diff

View File

@@ -1,147 +0,0 @@
import random
import re
from typing import Tuple
from lxml import etree
from app.core.config import settings
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class Pt52(_ISiteSigninHandler):
"""
52pt
如果填写openai key则调用chatgpt获取答案
否则随机
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "52pt.site"
# 已签到
_sign_regex = ['今天已经签过到了']
# 签到成功,待补充
_success_regex = ['\\d+点魔力值']
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
render = site_info.get("render")
proxy = site_info.get("proxy")
# 判断今日是否已签到
html_text = self.get_page_source(url='https://52pt.site/bakatest.php',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
sign_status = self.sign_in_result(html_res=html_text,
regexs=self._sign_regex)
if sign_status:
logger.info(f"今日已签到")
return True, '今日已签到'
# 没有签到则解析html
html = etree.HTML(html_text)
if not html:
return False, '签到失败'
# 获取页面问题、答案
questionid = html.xpath("//input[@name='questionid']/@value")[0]
option_ids = html.xpath("//input[@name='choice[]']/@value")
question_str = html.xpath("//td[@class='text' and contains(text(),'请问:')]/text()")[0]
# 正则获取问题
match = re.search(r'请问:(.+)', question_str)
if match:
question_str = match.group(1)
logger.debug(f"获取到签到问题 {question_str}")
else:
logger.error(f"未获取到签到问题")
return False, f"{site}】签到失败,未获取到签到问题"
# 正确答案默认随机如果gpt返回则用gpt返回的答案提交
choice = [option_ids[random.randint(0, len(option_ids) - 1)]]
# 签到
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site)
def __signin(self, questionid: str,
choice: list,
site: str,
site_cookie: str,
ua: str,
proxy: bool) -> Tuple[bool, str]:
"""
签到请求
questionid: 450
choice[]: 8
choice[]: 4
usercomment: 此刻心情:无
submit: 提交
多选会有多个choice[]....
"""
data = {
'questionid': questionid,
'choice[]': choice[0] if len(choice) == 1 else choice,
'usercomment': '太难了!',
'wantskip': '不会'
}
logger.debug(f"签到请求参数 {data}")
sign_res = RequestUtils(cookies=site_cookie,
ua=ua,
proxies=settings.PROXY if proxy else None
).post_res(url='https://52pt.site/bakatest.php', data=data)
if not sign_res or sign_res.status_code != 200:
logger.error(f"{site} 签到失败,签到接口请求失败")
return False, '签到失败,签到接口请求失败'
# 判断是否签到成功
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._success_regex)
if sign_status:
logger.info(f"{site} 签到成功")
return True, '签到成功'
else:
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._sign_regex)
if sign_status:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
logger.error(f"{site} 签到失败,请到页面查看")
return False, '签到失败,请到页面查看'

View File

@@ -1,88 +0,0 @@
# -*- coding: utf-8 -*-
import re
from abc import ABCMeta, abstractmethod
from typing import Tuple
import chardet
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.helper.browser import PlaywrightHelper
from app.log import logger
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class _ISiteSigninHandler(metaclass=ABCMeta):
"""
实现站点签到的基类所有站点签到类都需要继承此类并实现match和signin方法
实现类放置到sitesignin目录下将会自动加载
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = ""
@abstractmethod
def match(self, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, self.site_url) else False
@abstractmethod
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: True|False,签到结果信息
"""
pass
@staticmethod
def get_page_source(url: str, cookie: str, ua: str, proxy: bool, render: bool) -> str:
"""
获取页面源码
:param url: Url地址
:param cookie: Cookie
:param ua: UA
:param proxy: 是否使用代理
:param render: 是否渲染
:return: 页面源码,错误信息
"""
if render:
return PlaywrightHelper().get_page_source(url=url,
cookies=cookie,
ua=ua,
proxies=settings.PROXY_SERVER if proxy else None)
else:
res = RequestUtils(cookies=cookie,
ua=ua,
proxies=settings.PROXY if proxy else None
).get_res(url=url)
if res is not None:
# 使用chardet检测字符编码
raw_data = res.content
if raw_data:
try:
result = chardet.detect(raw_data)
encoding = result['encoding']
# 解码为字符串
return raw_data.decode(encoding)
except Exception as e:
logger.error(f"chardet解码失败{str(e)}")
return res.text
else:
return res.text
return ""
@staticmethod
def sign_in_result(html_res: str, regexs: list) -> bool:
"""
判断是否签到成功
"""
html_text = re.sub(r"#\d+", "", re.sub(r"\d+px", "", html_res))
for regex in regexs:
if re.search(str(regex), html_text):
return True
return False

View File

@@ -1,75 +0,0 @@
from typing import Tuple
from ruamel.yaml import CommentedMap
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.string import StringUtils
class BTSchool(_ISiteSigninHandler):
"""
学校签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "pt.btschool.club"
# 已签到
_sign_text = '每日签到'
@classmethod
def match(cls, url) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
render = site_info.get("render")
proxy = site_info.get("proxy")
logger.info(f"{site} 开始签到")
# 判断今日是否已签到
html_text = self.get_page_source(url='https://pt.btschool.club',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
# 已签到
if self._sign_text not in html_text:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
html_text = self.get_page_source(url='https://pt.btschool.club/index.php?action=addbonus',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,签到接口请求失败")
return False, '签到失败,签到接口请求失败'
# 签到成功
if self._sign_text not in html_text:
logger.info(f"{site} 签到成功")
return True, '签到成功'

View File

@@ -1,148 +0,0 @@
import random
import re
from typing import Tuple
from lxml import etree
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class CHDBits(_ISiteSigninHandler):
"""
彩虹岛签到
如果填写openai key则调用chatgpt获取答案
否则随机
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "ptchdbits.co"
# 已签到
_sign_regex = ['今天已经签过到了']
# 签到成功,待补充
_success_regex = ['\\d+点魔力值']
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = site_info.get("proxy")
render = site_info.get("render")
# 判断今日是否已签到
html_text = self.get_page_source(url='https://ptchdbits.co/bakatest.php',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
sign_status = self.sign_in_result(html_res=html_text,
regexs=self._sign_regex)
if sign_status:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
# 没有签到则解析html
html = etree.HTML(html_text)
if not html:
return False, '签到失败'
# 获取页面问题、答案
questionid = html.xpath("//input[@name='questionid']/@value")[0]
option_ids = html.xpath("//input[@name='choice[]']/@value")
question_str = html.xpath("//td[@class='text' and contains(text(),'请问:')]/text()")[0]
# 正则获取问题
match = re.search(r'请问:(.+)', question_str)
if match:
question_str = match.group(1)
logger.debug(f"获取到签到问题 {question_str}")
else:
logger.error(f"未获取到签到问题")
return False, f"{site}】签到失败,未获取到签到问题"
# 正确答案默认随机如果gpt返回则用gpt返回的答案提交
choice = [option_ids[random.randint(0, len(option_ids) - 1)]]
# 签到
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site)
def __signin(self, questionid: str,
choice: list,
site: str,
site_cookie: str,
ua: str,
proxy: bool) -> Tuple[bool, str]:
"""
签到请求
questionid: 450
choice[]: 8
choice[]: 4
usercomment: 此刻心情:无
submit: 提交
多选会有多个choice[]....
"""
data = {
'questionid': questionid,
'choice[]': choice[0] if len(choice) == 1 else choice,
'usercomment': '太难了!',
'wantskip': '不会'
}
logger.debug(f"签到请求参数 {data}")
sign_res = RequestUtils(cookies=site_cookie,
ua=ua,
proxies=settings.PROXY if proxy else None
).post_res(url='https://ptchdbits.co/bakatest.php', data=data)
if not sign_res or sign_res.status_code != 200:
logger.error(f"{site} 签到失败,签到接口请求失败")
return False, '签到失败,签到接口请求失败'
# 判断是否签到成功
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._success_regex)
if sign_status:
logger.info(f"{site} 签到成功")
return True, '签到成功'
else:
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._sign_regex)
if sign_status:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
logger.error(f"{site} 签到失败,请到页面查看")
return False, '签到失败,请到页面查看'

View File

@@ -1,62 +0,0 @@
from typing import Tuple
from ruamel.yaml import CommentedMap
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.string import StringUtils
class HaiDan(_ISiteSigninHandler):
"""
海胆签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "haidan.video"
# 签到成功
_succeed_regex = ['(?<=value=")已经打卡(?=")']
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = site_info.get("proxy")
render = site_info.get("render")
# 签到
html_text = self.get_page_source(url='https://www.haidan.video/signin.php',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
sign_status = self.sign_in_result(html_res=html_text,
regexs=self._succeed_regex)
if sign_status:
logger.info(f"{site} 签到成功")
return True, '签到成功'
logger.error(f"{site} 签到失败,签到接口返回 {html_text}")
return False, '签到失败'

View File

@@ -1,83 +0,0 @@
import json
from typing import Tuple
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class Hares(_ISiteSigninHandler):
"""
白兔签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "club.hares.top"
# 已签到
_sign_text = '已签到'
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = site_info.get("proxy")
render = site_info.get("render")
# 获取页面html
html_text = self.get_page_source(url='https://club.hares.top',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 模拟访问失败,请检查站点连通性")
return False, '模拟访问失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 模拟访问失败Cookie已失效")
return False, '模拟访问失败Cookie已失效'
# if self._sign_text in html_res.text:
# logger.info(f"今日已签到")
# return True, '今日已签到'
headers = {
'Accept': 'application/json',
"User-Agent": ua
}
sign_res = RequestUtils(cookies=site_cookie,
headers=headers,
proxies=settings.PROXY if proxy else None
).get_res(url="https://club.hares.top/attendance.php?action=sign")
if not sign_res or sign_res.status_code != 200:
logger.error(f"{site} 签到失败,签到接口请求失败")
return False, '签到失败,签到接口请求失败'
# {"code":1,"msg":"您今天已经签到过了"}
# {"code":0,"msg":"签到成功"}
sign_dict = json.loads(sign_res.text)
if sign_dict['code'] == 0:
logger.info(f"{site} 签到成功")
return True, '签到成功'
else:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'

View File

@@ -1,81 +0,0 @@
from typing import Tuple
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class HD4fans(_ISiteSigninHandler):
"""
兽签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "pt.hd4fans.org"
# 签到成功
_repeat_text = '<span id="checkedin">[签到成功]</span>'
_success_text = "签到成功"
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = site_info.get("proxy")
render = site_info.get("render")
# 获取页面html
html_text = self.get_page_source(url='https://pt.hd4fans.org/index.php',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
# 判断是否已签到
if self._repeat_text in html_text:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
# 签到
data = {
'action': 'checkin'
}
sign_res = RequestUtils(cookies=site_cookie,
ua=ua,
proxies=settings.PROXY if proxy else None
).post_res(url="https://pt.hd4fans.org/checkin.php", data=data)
if not sign_res or sign_res.status_code != 200:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
# sign_res.text=本次签到魔力
if sign_res.text and sign_res.text.isdigit():
logger.info(f"{site} 签到成功")
return True, '签到成功'
logger.error(f"{site} 签到失败,签到接口返回 {sign_res.text}")
return False, '签到失败'

View File

@@ -1,69 +0,0 @@
from typing import Tuple
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class HDArea(_ISiteSigninHandler):
"""
好大签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "hdarea.club"
# 签到成功
_success_text = "此次签到您获得"
_repeat_text = "请不要重复签到哦"
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxies = settings.PROXY if site_info.get("proxy") else None
# 获取页面html
data = {
'action': 'sign_in'
}
html_res = RequestUtils(cookies=site_cookie,
ua=ua,
proxies=proxies
).post_res(url="https://www.hdarea.club/sign_in.php", data=data)
if not html_res or html_res.status_code != 200:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_res.text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
# 判断是否已签到
# '已连续签到278天此次签到您获得了100魔力值奖励!'
if self._success_text in html_res.text:
logger.info(f"{site} 签到成功")
return True, '签到成功'
if self._repeat_text in html_res.text:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
logger.error(f"{site} 签到失败,签到接口返回 {html_res.text}")
return False, '签到失败'

View File

@@ -1,117 +0,0 @@
import json
from typing import Tuple
from lxml import etree
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class HDChina(_ISiteSigninHandler):
"""
瓷器签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "hdchina.org"
# 已签到
_sign_regex = ['<a class="label label-default" href="#">已签到</a>']
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxies = settings.PROXY if site_info.get("proxy") else None
# 尝试解决瓷器cookie每天签到后过期,只保留hdchina=部分
cookie = ""
# 按照分号进行字符串拆分
sub_strs = site_cookie.split(";")
# 遍历每个子字符串
for sub_str in sub_strs:
if "hdchina=" in sub_str:
# 如果子字符串包含"hdchina=",则保留该子字符串
cookie += sub_str + ";"
if "hdchina=" not in cookie:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
site_cookie = cookie
# 获取页面html
html_res = RequestUtils(cookies=site_cookie,
ua=ua,
proxies=proxies
).get_res(url="https://hdchina.org/index.php")
if not html_res or html_res.status_code != 200:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_res.text or "阻断页面" in html_res.text:
logger.error(f"{site} 签到失败Cookie失效")
return False, '签到失败Cookie失效'
# 获取新返回的cookie进行签到
site_cookie = ';'.join(['{}={}'.format(k, v) for k, v in html_res.cookies.get_dict().items()])
# 判断是否已签到
html_res.encoding = "utf-8"
sign_status = self.sign_in_result(html_res=html_res.text,
regexs=self._sign_regex)
if sign_status:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
# 没有签到则解析html
html = etree.HTML(html_res.text)
if not html:
return False, '签到失败'
# x_csrf
x_csrf = html.xpath("//meta[@name='x-csrf']/@content")[0]
if not x_csrf:
logger.error("{site} 签到失败获取x-csrf失败")
return False, '签到失败'
logger.debug(f"获取到x-csrf {x_csrf}")
# 签到
data = {
'csrf': x_csrf
}
sign_res = RequestUtils(cookies=site_cookie,
ua=ua,
proxies=proxies
).post_res(url="https://hdchina.org/plugin_sign-in.php?cmd=signin", data=data)
if not sign_res or sign_res.status_code != 200:
logger.error(f"{site} 签到失败,签到接口请求失败")
return False, '签到失败,签到接口请求失败'
sign_dict = json.loads(sign_res.text)
logger.debug(f"签到返回结果 {sign_dict}")
if sign_dict['state']:
# {'state': 'success', 'signindays': 10, 'integral': 20}
logger.info(f"{site} 签到成功")
return True, '签到成功'
else:
# {'state': False, 'msg': '不正确的CSRF / Incorrect CSRF token'}
logger.error(f"{site} 签到失败不正确的CSRF / Incorrect CSRF token")
return False, '签到失败'

View File

@@ -1,66 +0,0 @@
from typing import Tuple
from ruamel.yaml import CommentedMap
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.string import StringUtils
class HDCity(_ISiteSigninHandler):
"""
城市签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "hdcity.city"
# 签到成功
_success_text = '本次签到获得魅力'
# 重复签到
_repeat_text = '已签到'
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = site_info.get("proxy")
render = site_info.get("render")
# 获取页面html
html_text = self.get_page_source(url='https://hdcity.city/sign',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login" in html_text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
# 判断是否已签到
# '已连续签到278天此次签到您获得了100魔力值奖励!'
if self._success_text in html_text:
logger.info(f"{site} 签到成功")
return True, '签到成功'
if self._repeat_text in html_text:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
logger.error(f"{site} 签到失败,签到接口返回 {html_text}")
return False, '签到失败'

View File

@@ -1,133 +0,0 @@
import json
import time
from typing import Tuple
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.helper.ocr import OcrHelper
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class HDSky(_ISiteSigninHandler):
"""
天空ocr签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "hdsky.me"
# 已签到
_sign_regex = ['已签到']
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = site_info.get("proxy")
render = site_info.get("render")
# 判断今日是否已签到
html_text = self.get_page_source(url='https://hdsky.me',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
sign_status = self.sign_in_result(html_res=html_text,
regexs=self._sign_regex)
if sign_status:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
# 获取验证码请求,考虑到网络问题获取失败,多获取几次试试
res_times = 0
img_hash = None
while not img_hash and res_times <= 3:
image_res = RequestUtils(cookies=site_cookie,
ua=ua,
proxies=settings.PROXY if proxy else None
).post_res(url='https://hdsky.me/image_code_ajax.php',
data={'action': 'new'})
if image_res and image_res.status_code == 200:
image_json = json.loads(image_res.text)
if image_json["success"]:
img_hash = image_json["code"]
break
res_times += 1
logger.debug(f"获取{site}验证码失败,正在进行重试,目前重试次数 {res_times}")
time.sleep(1)
# 获取到二维码hash
if img_hash:
# 完整验证码url
img_get_url = 'https://hdsky.me/image.php?action=regimage&imagehash=%s' % img_hash
logger.debug(f"获取到{site}验证码链接 {img_get_url}")
# ocr识别多次获取6位验证码
times = 0
ocr_result = None
# 识别几次
while times <= 3:
# ocr二维码识别
ocr_result = OcrHelper().get_captcha_text(image_url=img_get_url,
cookie=site_cookie,
ua=ua)
logger.debug(f"ocr识别{site}验证码 {ocr_result}")
if ocr_result:
if len(ocr_result) == 6:
logger.info(f"ocr识别{site}验证码成功 {ocr_result}")
break
times += 1
logger.debug(f"ocr识别{site}验证码失败,正在进行重试,目前重试次数 {times}")
time.sleep(1)
if ocr_result:
# 组装请求参数
data = {
'action': 'showup',
'imagehash': img_hash,
'imagestring': ocr_result
}
# 访问签到链接
res = RequestUtils(cookies=site_cookie,
ua=ua,
proxies=settings.PROXY if proxy else None
).post_res(url='https://hdsky.me/showup.php', data=data)
if res and res.status_code == 200:
if json.loads(res.text)["success"]:
logger.info(f"{site} 签到成功")
return True, '签到成功'
elif str(json.loads(res.text)["message"]) == "date_unmatch":
# 重复签到
logger.warn(f"{site} 重复成功")
return True, '今日已签到'
elif str(json.loads(res.text)["message"]) == "invalid_imagehash":
# 验证码错误
logger.warn(f"{site} 签到失败:验证码错误")
return False, '签到失败:验证码错误'
logger.error(f'{site} 签到失败:未获取到验证码')
return False, '签到失败:未获取到验证码'

View File

@@ -1,82 +0,0 @@
import re
from typing import Tuple
from ruamel.yaml import CommentedMap
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.string import StringUtils
class HDUpt(_ISiteSigninHandler):
"""
hdu签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "pt.hdupt.com"
# 已签到
_sign_regex = ['<span id="yiqiandao">']
# 签到成功
_success_text = '本次签到获得魅力'
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = site_info.get("proxy")
render = site_info.get("render")
# 获取页面html
html_text = self.get_page_source(url='https://pt.hdupt.com',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
sign_status = self.sign_in_result(html_res=html_text,
regexs=self._sign_regex)
if sign_status:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
# 签到
html_text = self.get_page_source(url='https://pt.hdupt.com/added.php?action=qiandao',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
logger.debug(f"{site} 签到接口返回 {html_text}")
# 判断是否已签到 sign_res.text = ".23"
if len(list(map(int, re.findall(r"\d+", html_text)))) > 0:
logger.info(f"{site} 签到成功")
return True, '签到成功'
logger.error(f"{site} 签到失败,签到接口返回 {html_text}")
return False, '签到失败'

View File

@@ -1,132 +0,0 @@
import json
import time
from typing import Tuple
from lxml import etree
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.helper.ocr import OcrHelper
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class Opencd(_ISiteSigninHandler):
"""
皇后ocr签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "open.cd"
# 已签到
_repeat_text = "/plugin_sign-in.php?cmd=show-log"
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = site_info.get("proxy")
render = site_info.get("render")
# 判断今日是否已签到
html_text = self.get_page_source(url='https://www.open.cd',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
if self._repeat_text in html_text:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
# 获取签到参数
html_text = self.get_page_source(url='https://www.open.cd/plugin_sign-in.php',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
# 没有签到则解析html
html = etree.HTML(html_text)
if not html:
return False, '签到失败'
# 签到参数
img_url = html.xpath('//form[@id="frmSignin"]//img/@src')[0]
img_hash = html.xpath('//form[@id="frmSignin"]//input[@name="imagehash"]/@value')[0]
if not img_url or not img_hash:
logger.error(f"{site} 签到失败,获取签到参数失败")
return False, '签到失败,获取签到参数失败'
# 完整验证码url
img_get_url = 'https://www.open.cd/%s' % img_url
logger.debug(f"{site} 获取到{site}验证码链接 {img_get_url}")
# ocr识别多次获取6位验证码
times = 0
ocr_result = None
# 识别几次
while times <= 3:
# ocr二维码识别
ocr_result = OcrHelper().get_captcha_text(image_url=img_get_url,
cookie=site_cookie,
ua=ua)
logger.debug(f"ocr识别{site}验证码 {ocr_result}")
if ocr_result:
if len(ocr_result) == 6:
logger.info(f"ocr识别{site}验证码成功 {ocr_result}")
break
times += 1
logger.debug(f"ocr识别{site}验证码失败,正在进行重试,目前重试次数 {times}")
time.sleep(1)
if ocr_result:
# 组装请求参数
data = {
'imagehash': img_hash,
'imagestring': ocr_result
}
# 访问签到链接
sign_res = RequestUtils(cookies=site_cookie,
ua=ua,
proxies=settings.PROXY if proxy else None
).post_res(url='https://www.open.cd/plugin_sign-in.php?cmd=signin', data=data)
if sign_res and sign_res.status_code == 200:
logger.debug(f"sign_res返回 {sign_res.text}")
# sign_res.text = '{"state":"success","signindays":"0","integral":"10"}'
sign_dict = json.loads(sign_res.text)
if sign_dict['state']:
logger.info(f"{site} 签到成功")
return True, '签到成功'
else:
logger.error(f"{site} 签到失败,签到接口返回 {sign_dict}")
return False, '签到失败'
logger.error(f'{site} 签到失败:未获取到验证码')
return False, '签到失败:未获取到验证码'

View File

@@ -1,65 +0,0 @@
import json
from typing import Tuple
from ruamel.yaml import CommentedMap
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.string import StringUtils
class PTerClub(_ISiteSigninHandler):
"""
猫签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "pterclub.com"
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = site_info.get("proxy")
render = site_info.get("render")
# 签到
html_text = self.get_page_source(url='https://pterclub.com/attendance-ajax.php',
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
try:
sign_dict = json.loads(html_text)
except Exception as e:
logger.error(f"{site} 签到失败,签到接口返回数据异常,错误信息:{str(e)}")
return False, '签到失败,签到接口返回数据异常'
if sign_dict['status'] == '1':
# {"status":"1","data":" (签到已成功300)","message":"<p>这是您的第<b>237</b>次签到,
# 已连续签到<b>237</b>天。</p><p>本次签到获得<b>300</b>克猫粮。</p>"}
logger.info(f"{site} 签到成功")
return True, '签到成功'
else:
# {"status":"0","data":"抱歉","message":"您今天已经签到过了,请勿重复刷新。"}
logger.info(f"{site} 今日已签到")
return True, '今日已签到'

View File

@@ -1,274 +0,0 @@
import json
import os
import time
from io import BytesIO
from typing import Tuple
from PIL import Image
from lxml import etree
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class Tjupt(_ISiteSigninHandler):
"""
北洋签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "tjupt.org"
# 签到地址
_sign_in_url = 'https://www.tjupt.org/attendance.php'
# 已签到
_sign_regex = ['<a href="attendance.php">今日已签到</a>']
# 签到成功
_succeed_regex = ['这是您的首次签到,本次签到获得\\d+个魔力值。',
'签到成功,这是您的第\\d+次签到,已连续签到\\d+天,本次签到获得\\d+个魔力值。',
'重新签到成功,本次签到获得\\d+个魔力值']
# 存储正确的答案,后续可直接查
_answer_path = settings.TEMP_PATH / "signin/"
_answer_file = _answer_path / "tjupt.json"
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = site_info.get("proxy")
render = site_info.get("render")
# 创建正确答案存储目录
if not os.path.exists(os.path.dirname(self._answer_file)):
os.makedirs(os.path.dirname(self._answer_file))
# 获取北洋签到页面html
html_text = self.get_page_source(url=self._sign_in_url,
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
# 获取签到后返回html判断是否签到成功
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
sign_status = self.sign_in_result(html_res=html_text,
regexs=self._sign_regex)
if sign_status:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
# 没有签到则解析html
html = etree.HTML(html_text)
if not html:
return False, '签到失败'
img_url = html.xpath('//table[@class="captcha"]//img/@src')[0]
if not img_url:
logger.error(f"{site} 签到失败,未获取到签到图片")
return False, '签到失败,未获取到签到图片'
# 签到图片
img_url = "https://www.tjupt.org" + img_url
logger.info(f"获取到签到图片 {img_url}")
# 获取签到图片hash
captcha_img_res = RequestUtils(cookies=site_cookie,
ua=ua,
proxies=settings.PROXY if proxy else None
).get_res(url=img_url)
if not captcha_img_res or captcha_img_res.status_code != 200:
logger.error(f"{site} 签到图片 {img_url} 请求失败")
return False, '签到失败,未获取到签到图片'
captcha_img = Image.open(BytesIO(captcha_img_res.content))
captcha_img_hash = self._tohash(captcha_img)
logger.debug(f"签到图片hash {captcha_img_hash}")
# 签到答案选项
values = html.xpath("//input[@name='answer']/@value")
options = html.xpath("//input[@name='answer']/following-sibling::text()")
if not values or not options:
logger.error(f"{site} 签到失败,未获取到答案选项")
return False, '签到失败,未获取到答案选项'
# value+选项
answers = list(zip(values, options))
logger.debug(f"获取到所有签到选项 {answers}")
# 查询已有答案
exits_answers = {}
try:
with open(self._answer_file, 'r') as f:
json_str = f.read()
exits_answers = json.loads(json_str)
# 查询本地本次验证码hash答案
captcha_answer = exits_answers[captcha_img_hash]
# 本地存在本次hash对应的正确答案再遍历查询
if captcha_answer:
for value, answer in answers:
if str(captcha_answer) == str(answer):
# 确实是答案
return self.__signin(answer=value,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site)
except (FileNotFoundError, IOError, OSError) as e:
logger.debug(f"查询本地已知答案失败:{str(e)},继续请求豆瓣查询")
# 本地不存在正确答案则请求豆瓣查询匹配
for value, answer in answers:
if answer:
# 豆瓣检索
db_res = RequestUtils().get_res(url=f'https://movie.douban.com/j/subject_suggest?q={answer}')
if not db_res or db_res.status_code != 200:
logger.debug(f"签到选项 {answer} 未查询到豆瓣数据")
continue
# 豆瓣返回结果
db_answers = json.loads(db_res.text)
if not isinstance(db_answers, list):
db_answers = [db_answers]
if len(db_answers) == 0:
logger.debug(f"签到选项 {answer} 查询到豆瓣数据为空")
for db_answer in db_answers:
answer_img_url = db_answer['img']
# 获取答案hash
answer_img_res = RequestUtils(referer="https://movie.douban.com").get_res(url=answer_img_url)
if not answer_img_res or answer_img_res.status_code != 200:
logger.debug(f"签到答案 {answer} {answer_img_url} 请求失败")
continue
answer_img = Image.open(BytesIO(answer_img_res.content))
answer_img_hash = self._tohash(answer_img)
logger.debug(f"签到答案图片hash {answer} {answer_img_hash}")
# 获取选项图片与签到图片相似度大于0.9默认是正确答案
score = self._comparehash(captcha_img_hash, answer_img_hash)
logger.info(f"签到图片与选项 {answer} 豆瓣图片相似度 {score}")
if score > 0.9:
# 确实是答案
return self.__signin(answer=value,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site,
exits_answers=exits_answers,
captcha_img_hash=captcha_img_hash)
# 间隔5s防止请求太频繁被豆瓣屏蔽ip
time.sleep(5)
logger.error(f"豆瓣图片匹配,未获取到匹配答案")
# 没有匹配签到成功,则签到失败
return False, '签到失败,未获取到匹配答案'
def __signin(self, answer, site_cookie, ua, proxy, site, exits_answers=None, captcha_img_hash=None):
"""
签到请求
"""
data = {
'answer': answer,
'submit': '提交'
}
logger.debug(f"提交data {data}")
sign_in_res = RequestUtils(cookies=site_cookie,
ua=ua,
proxies=settings.PROXY if proxy else None
).post_res(url=self._sign_in_url, data=data)
if not sign_in_res or sign_in_res.status_code != 200:
logger.error(f"{site} 签到失败,签到接口请求失败")
return False, '签到失败,签到接口请求失败'
# 获取签到后返回html判断是否签到成功
sign_status = self.sign_in_result(html_res=sign_in_res.text,
regexs=self._succeed_regex)
if sign_status:
logger.info(f"签到成功")
if exits_answers and captcha_img_hash:
# 签到成功写入本地文件
self.__write_local_answer(exits_answers=exits_answers or {},
captcha_img_hash=captcha_img_hash,
answer=answer)
return True, '签到成功'
else:
logger.error(f"{site} 签到失败,请到页面查看")
return False, '签到失败,请到页面查看'
def __write_local_answer(self, exits_answers, captcha_img_hash, answer):
"""
签到成功写入本地文件
"""
try:
exits_answers[captcha_img_hash] = answer
# 序列化数据
formatted_data = json.dumps(exits_answers, indent=4)
with open(self._answer_file, 'w') as f:
f.write(formatted_data)
except (FileNotFoundError, IOError, OSError) as e:
logger.debug(f"签到成功写入本地文件失败:{str(e)}")
@staticmethod
def _tohash(img, shape=(10, 10)):
"""
获取图片hash
"""
img = img.resize(shape)
gray = img.convert('L')
s = 0
hash_str = ''
for i in range(shape[1]):
for j in range(shape[0]):
s = s + gray.getpixel((j, i))
avg = s / (shape[0] * shape[1])
for i in range(shape[1]):
for j in range(shape[0]):
if gray.getpixel((j, i)) > avg:
hash_str = hash_str + '1'
else:
hash_str = hash_str + '0'
return hash_str
@staticmethod
def _comparehash(hash1, hash2, shape=(10, 10)):
"""
比较图片hash
返回相似度
"""
n = 0
if len(hash1) != len(hash2):
return -1
for i in range(len(hash1)):
if hash1[i] == hash2[i]:
n = n + 1
return n / (shape[0] * shape[1])

View File

@@ -1,97 +0,0 @@
import re
from typing import Tuple
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class TTG(_ISiteSigninHandler):
"""
TTG签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "totheglory.im"
# 已签到
_sign_regex = ['<b style="color:green;">已签到</b>']
_sign_text = '亲,您今天已签到过,不要太贪哦'
# 签到成功
_success_text = '您已连续签到'
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = site_info.get("proxy")
render = site_info.get("render")
# 获取页面html
html_text = self.get_page_source(url="https://totheglory.im",
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
# 判断是否已签到
sign_status = self.sign_in_result(html_res=html_text,
regexs=self._sign_regex)
if sign_status:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
# 获取签到参数
signed_timestamp = re.search('(?<=signed_timestamp: ")\\d{10}', html_text).group()
signed_token = re.search('(?<=signed_token: ").*(?=")', html_text).group()
logger.debug(f"signed_timestamp={signed_timestamp} signed_token={signed_token}")
data = {
'signed_timestamp': signed_timestamp,
'signed_token': signed_token
}
# 签到
sign_res = RequestUtils(cookies=site_cookie,
ua=ua,
proxies=settings.PROXY if proxy else None
).post_res(url="https://totheglory.im/signed.php",
data=data)
if not sign_res or sign_res.status_code != 200:
logger.error(f"{site} 签到失败,签到接口请求失败")
return False, '签到失败,签到接口请求失败'
sign_res.encoding = "utf-8"
if self._success_text in sign_res.text:
logger.info(f"{site} 签到成功")
return True, '签到成功'
if self._sign_text in sign_res.text:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
logger.error(f"{site} 签到失败,未知原因")
return False, '签到失败,未知原因'

View File

@@ -1,123 +0,0 @@
import datetime
import random
import re
from typing import Tuple
from lxml import etree
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class U2(_ISiteSigninHandler):
"""
U2签到 随机
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "u2.dmhy.org"
# 已签到
_sign_regex = ['<a href="showup.php">已签到</a>',
'<a href="showup.php">Show Up</a>',
'<a href="showup.php">Показать</a>',
'<a href="showup.php">已簽到</a>',
'<a href="showup.php">已簽到</a>']
# 签到成功
_success_text = "window.location.href = 'showup.php';</script>"
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = site_info.get("proxy")
render = site_info.get("render")
now = datetime.datetime.now()
# 判断当前时间是否小于9点
if now.hour < 9:
logger.error(f"{site} 签到失败9点前不签到")
return False, '签到失败9点前不签到'
# 获取页面html
html_text = self.get_page_source(url="https://u2.dmhy.org/showup.php",
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 签到失败,请检查站点连通性")
return False, '签到失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 签到失败Cookie已失效")
return False, '签到失败Cookie已失效'
# 判断是否已签到
sign_status = self.sign_in_result(html_res=html_text,
regexs=self._sign_regex)
if sign_status:
logger.info(f"{site} 今日已签到")
return True, '今日已签到'
# 没有签到则解析html
html = etree.HTML(html_text)
if not html:
return False, '签到失败'
# 获取签到参数
req = html.xpath("//form//td/input[@name='req']/@value")[0]
hash_str = html.xpath("//form//td/input[@name='hash']/@value")[0]
form = html.xpath("//form//td/input[@name='form']/@value")[0]
submit_name = html.xpath("//form//td/input[@type='submit']/@name")
submit_value = html.xpath("//form//td/input[@type='submit']/@value")
if not re or not hash_str or not form or not submit_name or not submit_value:
logger.error("{site} 签到失败,未获取到相关签到参数")
return False, '签到失败'
# 随机一个答案
answer_num = random.randint(0, 3)
data = {
'req': req,
'hash': hash_str,
'form': form,
'message': '一切随缘~',
submit_name[answer_num]: submit_value[answer_num]
}
# 签到
sign_res = RequestUtils(cookies=site_cookie,
ua=ua,
proxies=settings.PROXY if proxy else None
).post_res(url="https://u2.dmhy.org/showup.php?action=show",
data=data)
if not sign_res or sign_res.status_code != 200:
logger.error(f"{site} 签到失败,签到接口请求失败")
return False, '签到失败,签到接口请求失败'
# 判断是否签到成功
# sign_res.text = "<script type="text/javascript">window.location.href = 'showup.php';</script>"
if self._success_text in sign_res.text:
logger.info(f"{site} 签到成功")
return True, '签到成功'
else:
logger.error(f"{site} 签到失败,未知原因")
return False, '签到失败,未知原因'

View File

@@ -1,88 +0,0 @@
import json
from typing import Tuple
from lxml import etree
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.log import logger
from app.plugins.autosignin.sites import _ISiteSigninHandler
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class ZhuQue(_ISiteSigninHandler):
"""
ZHUQUE签到
"""
# 匹配的站点Url每一个实现类都需要设置为自己的站点Url
site_url = "zhuque.in"
@classmethod
def match(cls, url: str) -> bool:
"""
根据站点Url判断是否匹配当前站点签到类大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: CommentedMap) -> Tuple[bool, str]:
"""
执行签到操作
:param site_info: 站点信息含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = site_info.get("proxy")
render = site_info.get("render")
# 获取页面html
html_text = self.get_page_source(url="https://zhuque.in",
cookie=site_cookie,
ua=ua,
proxy=proxy,
render=render)
if not html_text:
logger.error(f"{site} 模拟登录失败,请检查站点连通性")
return False, '模拟登录失败,请检查站点连通性'
if "login.php" in html_text:
logger.error(f"{site} 模拟登录失败Cookie已失效")
return False, '模拟登录失败Cookie已失效'
html = etree.HTML(html_text)
if not html:
return False, '模拟登录失败'
# 释放技能
msg = '失败'
x_csrf_token = html.xpath("//meta[@name='x-csrf-token']/@content")[0]
if x_csrf_token:
data = {
"all": 1,
"resetModal": "true"
}
headers = {
"x-csrf-token": str(x_csrf_token),
"Content-Type": "application/json; charset=utf-8",
"User-Agent": ua
}
skill_res = RequestUtils(cookies=site_cookie,
headers=headers,
proxies=settings.PROXY if proxy else None
).post_res(url="https://zhuque.in/api/gaming/fireGenshinCharacterMagic", json=data)
if not skill_res or skill_res.status_code != 200:
logger.error(f"模拟登录失败,释放技能失败")
# '{"status":200,"data":{"code":"FIRE_GENSHIN_CHARACTER_MAGIC_SUCCESS","bonus":0}}'
skill_dict = json.loads(skill_res.text)
if skill_dict['status'] == 200:
bonus = int(skill_dict['data']['bonus'])
msg = f'成功,获得{bonus}魔力'
logger.info(f'{site}】模拟登录成功,技能释放{msg}')
return True, f'模拟登录成功,技能释放{msg}'

View File

@@ -1,692 +0,0 @@
from datetime import datetime, timedelta
from functools import reduce
from pathlib import Path
from threading import RLock
from typing import Optional, Any, List, Dict, Tuple
from xml.dom.minidom import parseString
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from requests import Response
from app.chain.subscribe import SubscribeChain
from app.core.config import settings
from app.core.context import MediaInfo
from app.core.event import eventmanager
from app.log import logger
from app.modules.emby import Emby
from app.modules.jellyfin import Jellyfin
from app.modules.plex import Plex
from app.plugins import _PluginBase
from app.schemas import WebhookEventInfo
from app.schemas.types import MediaType, EventType
from app.utils.http import RequestUtils
lock = RLock()
class BestFilmVersion(_PluginBase):
# 插件名称
plugin_name = "收藏洗版"
# 插件描述
plugin_desc = "Jellyfin/Emby/Plex点击收藏电影后自动订阅洗版。"
# 插件图标
plugin_icon = "like.jpg"
# 主题色
plugin_color = "#E4003F"
# 插件版本
plugin_version = "2.0"
# 插件作者
plugin_author = "wlj"
# 作者主页
author_url = "https://github.com/developer-wlj"
# 插件配置项ID前缀
plugin_config_prefix = "bestfilmversion_"
# 加载顺序
plugin_order = 13
# 可使用的用户级别
auth_level = 2
# 私有变量
_scheduler: Optional[BackgroundScheduler] = None
_cache_path: Optional[Path] = None
subscribechain = None
# 配置属性
_enabled: bool = False
_cron: str = ""
_notify: bool = False
_webhook_enabled: bool = False
_only_once: bool = False
def init_plugin(self, config: dict = None):
self._cache_path = settings.TEMP_PATH / "__best_film_version_cache__"
self.subscribechain = SubscribeChain()
# 停止现有任务
self.stop_service()
# 配置
if config:
self._enabled = config.get("enabled")
self._cron = config.get("cron")
self._notify = config.get("notify")
self._webhook_enabled = config.get("webhook_enabled")
self._only_once = config.get("only_once")
if self._enabled:
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
if not self._webhook_enabled:
if self._cron:
try:
self._scheduler.add_job(func=self.sync,
trigger=CronTrigger.from_crontab(self._cron),
name="收藏洗版")
except Exception as err:
logger.error(f"定时任务配置错误:{str(err)}")
# 推送实时消息
self.systemmessage.put(f"执行周期配置错误:{str(err)}")
else:
self._scheduler.add_job(self.sync, "interval", minutes=30, name="收藏洗版")
if self._only_once:
self._only_once = False
self.update_config({
"enabled": self._enabled,
"cron": self._cron,
"notify": self._notify,
"webhook_enabled": self._webhook_enabled,
"only_once": self._only_once
})
self._scheduler.add_job(self.sync, 'date',
run_date=datetime.now(tz=pytz.timezone(settings.TZ)) + timedelta(seconds=3),
name="立即运行收藏洗版")
# 启动任务
if self._scheduler.get_jobs():
self._scheduler.print_jobs()
self._scheduler.start()
def get_state(self) -> bool:
return self._enabled
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
"""
获取插件API
[{
"path": "/xx",
"endpoint": self.xxx,
"methods": ["GET", "POST"],
"summary": "API说明"
}]
"""
pass
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 3
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 3
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'notify',
'label': '发送通知',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 3
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'only_once',
'label': '立即运行一次',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 3
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'webhook_enabled',
'label': 'Webhook',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'cron',
'label': '执行周期',
'placeholder': '5位cron表达式留空自动'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
},
'content': [
{
'component': 'VAlert',
'props': {
'text': '支持主动定时获取媒体库数据和Webhook实时触发两种方式两者只能选其一'
'Webhook需要在媒体服务器设置发送Webhook报文。'
'Plex使用主动获取时建议执行周期设置大于1小时'
'收藏Api调用Plex官网接口有频率限制。'
}
}
]
}
]
}
]
}
], {
"enabled": False,
"notify": False,
"cron": "*/30 * * * *",
"webhook_enabled": False,
"only_once": False
}
def get_page(self) -> List[dict]:
"""
拼装插件详情页面,需要返回页面配置,同时附带数据
"""
# 查询同步详情
historys = self.get_data('history')
if not historys:
return [
{
'component': 'div',
'text': '暂无数据',
'props': {
'class': 'text-center',
}
}
]
# 数据按时间降序排序
historys = sorted(historys, key=lambda x: x.get('time'), reverse=True)
# 拼装页面
contents = []
for history in historys:
title = history.get("title")
poster = history.get("poster")
mtype = history.get("type")
time_str = history.get("time")
tmdbid = history.get("tmdbid")
contents.append(
{
'component': 'VCard',
'content': [
{
'component': 'div',
'props': {
'class': 'd-flex justify-space-start flex-nowrap flex-row',
},
'content': [
{
'component': 'div',
'content': [
{
'component': 'VImg',
'props': {
'src': poster,
'height': 120,
'width': 80,
'aspect-ratio': '2/3',
'class': 'object-cover shadow ring-gray-500',
'cover': True
}
}
]
},
{
'component': 'div',
'content': [
{
'component': 'VCardSubtitle',
'props': {
'class': 'pa-2 font-bold break-words whitespace-break-spaces'
},
'content': [
{
'component': 'a',
'props': {
'href': f"https://www.themoviedb.org/movie/{tmdbid}",
'target': '_blank'
},
'text': title
}
]
},
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'类型:{mtype}'
},
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'时间:{time_str}'
}
]
}
]
}
]
}
)
return [
{
'component': 'div',
'props': {
'class': 'grid gap-3 grid-info-card',
},
'content': contents
}
]
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._scheduler.shutdown()
self._scheduler = None
except Exception as e:
logger.error("退出插件失败:%s" % str(e))
def sync(self):
"""
通过流媒体管理工具收藏,自动洗版
"""
# 获取锁
_is_lock: bool = lock.acquire(timeout=60)
if not _is_lock:
return
try:
# 读取缓存
caches = self._cache_path.read_text().split("\n") if self._cache_path.exists() else []
# 读取历史记录
history = self.get_data('history') or []
# 媒体服务器类型,多个以,分隔
if not settings.MEDIASERVER:
return
media_servers = settings.MEDIASERVER.split(',')
# 读取收藏
all_items = {}
for media_server in media_servers:
if media_server == 'jellyfin':
all_items['jellyfin'] = self.jellyfin_get_items()
elif media_server == 'emby':
all_items['emby'] = self.emby_get_items()
else:
all_items['plex'] = self.plex_get_watchlist()
def function(y, x):
return y if (x['Name'] in [i['Name'] for i in y]) else (lambda z, u: (z.append(u), z))(y, x)[1]
# 处理所有结果
for server, all_item in all_items.items():
# all_item 根据电影名去重
result = reduce(function, all_item, [])
for data in result:
# 检查缓存
if data.get('Name') in caches:
continue
# 获取详情
if server == 'jellyfin':
item_info_resp = Jellyfin().get_iteminfo(itemid=data.get('Id'))
elif server == 'emby':
item_info_resp = Emby().get_iteminfo(itemid=data.get('Id'))
else:
item_info_resp = self.plex_get_iteminfo(itemid=data.get('Id'))
logger.debug(f'BestFilmVersion插件 item打印 {item_info_resp}')
if not item_info_resp:
continue
# 只接受Movie类型
if data.get('Type') != 'Movie':
continue
# 获取tmdb_id
tmdb_id = item_info_resp.tmdbid
if not tmdb_id:
continue
# 识别媒体信息
mediainfo: MediaInfo = self.chain.recognize_media(tmdbid=tmdb_id, mtype=MediaType.MOVIE)
if not mediainfo:
logger.warn(f'未识别到媒体信息,标题:{data.get("Name")}tmdbid{tmdb_id}')
continue
# 添加订阅
self.subscribechain.add(mtype=MediaType.MOVIE,
title=mediainfo.title,
year=mediainfo.year,
tmdbid=mediainfo.tmdb_id,
best_version=True,
username="收藏洗版",
exist_ok=True)
# 加入缓存
caches.append(data.get('Name'))
# 存储历史记录
if mediainfo.tmdb_id not in [h.get("tmdbid") for h in history]:
history.append({
"title": mediainfo.title,
"type": mediainfo.type.value,
"year": mediainfo.year,
"poster": mediainfo.get_poster_image(),
"overview": mediainfo.overview,
"tmdbid": mediainfo.tmdb_id,
"time": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
})
# 保存历史记录
self.save_data('history', history)
# 保存缓存
self._cache_path.write_text("\n".join(caches))
finally:
lock.release()
def jellyfin_get_items(self) -> List[dict]:
# 获取所有user
users_url = "[HOST]Users?&apikey=[APIKEY]"
users = self.get_users(Jellyfin().get_data(users_url))
if not users:
logger.info(f"bestfilmversion/users_url: {users_url}")
return []
all_items = []
for user in users:
# 根据加入日期 降序排序
url = "[HOST]Users/" + user + "/Items?SortBy=DateCreated%2CSortName" \
"&SortOrder=Descending" \
"&Filters=IsFavorite" \
"&Recursive=true" \
"&Fields=PrimaryImageAspectRatio%2CBasicSyncInfo" \
"&CollapseBoxSetItems=false" \
"&ExcludeLocationTypes=Virtual" \
"&EnableTotalRecordCount=false" \
"&Limit=20" \
"&apikey=[APIKEY]"
resp = self.get_items(Jellyfin().get_data(url))
if not resp:
continue
all_items.extend(resp)
return all_items
def emby_get_items(self) -> List[dict]:
# 获取所有user
get_users_url = "[HOST]Users?&api_key=[APIKEY]"
users = self.get_users(Emby().get_data(get_users_url))
if not users:
return []
all_items = []
for user in users:
# 根据加入日期 降序排序
url = "[HOST]emby/Users/" + user + "/Items?SortBy=DateCreated%2CSortName" \
"&SortOrder=Descending" \
"&Filters=IsFavorite" \
"&Recursive=true" \
"&Fields=PrimaryImageAspectRatio%2CBasicSyncInfo" \
"&CollapseBoxSetItems=false" \
"&ExcludeLocationTypes=Virtual" \
"&EnableTotalRecordCount=false" \
"&Limit=20&api_key=[APIKEY]"
resp = self.get_items(Emby().get_data(url))
if not resp:
continue
all_items.extend(resp)
return all_items
@staticmethod
def get_items(resp: Response):
try:
if resp:
return resp.json().get("Items") or []
else:
return []
except Exception as e:
print(str(e))
return []
@staticmethod
def get_users(resp: Response):
try:
if resp:
return [data['Id'] for data in resp.json()]
else:
logger.error(f"BestFilmVersion/Users 未获取到返回数据")
return []
except Exception as e:
logger.error(f"连接BestFilmVersion/Users 出错:" + str(e))
return []
@staticmethod
def plex_get_watchlist() -> List[dict]:
# 根据加入日期 降序排序
url = f"https://metadata.provider.plex.tv/library/sections/watchlist/all?type=1&sort=addedAt%3Adesc" \
f"&X-Plex-Container-Start=0&X-Plex-Container-Size=50" \
f"&X-Plex-Token={settings.PLEX_TOKEN}"
res = []
try:
resp = RequestUtils().get_res(url=url)
if resp:
dom = parseString(resp.text)
# 获取文档元素对象
elem = dom.documentElement
# 获取 指定元素
eles = elem.getElementsByTagName('Video')
if not eles:
return []
for ele in eles:
data = {}
# 获取标签中内容
ele_id = ele.attributes['ratingKey'].nodeValue
ele_title = ele.attributes['title'].nodeValue
ele_type = ele.attributes['type'].nodeValue
_type = "Movie" if ele_type == "movie" else ""
data['Id'] = ele_id
data['Name'] = ele_title
data['Type'] = _type
res.append(data)
return res
else:
logger.error(f"Plex/Watchlist 未获取到返回数据")
return []
except Exception as e:
logger.error(f"连接Plex/Watchlist 出错:" + str(e))
return []
@staticmethod
def plex_get_iteminfo(itemid):
url = f"https://metadata.provider.plex.tv/library/metadata/{itemid}" \
f"?X-Plex-Token={settings.PLEX_TOKEN}"
ids = []
try:
resp = RequestUtils(accept_type="application/json, text/plain, */*").get_res(url=url)
if resp:
metadata = resp.json().get('MediaContainer').get('Metadata')
for item in metadata:
_guid = item.get('Guid')
if not _guid:
continue
id_list = [h.get('id') for h in _guid if h.get('id').__contains__("tmdb")]
if not id_list:
continue
ids.append({'Name': 'TheMovieDb', 'Url': id_list[0]})
if not ids:
return []
return {'ExternalUrls': ids}
else:
logger.error(f"Plex/Items 未获取到返回数据")
return []
except Exception as e:
logger.error(f"连接Plex/Items 出错:" + str(e))
return []
@eventmanager.register(EventType.WebhookMessage)
def webhook_message_action(self, event):
if not self._enabled:
return
if not self._webhook_enabled:
return
data: WebhookEventInfo = event.event_data
# 排除不是收藏调用
if data.channel not in ['jellyfin', 'emby', 'plex']:
return
if data.channel in ['emby', 'plex'] and data.event != 'item.rate':
return
if data.channel == 'jellyfin' and data.save_reason != 'UpdateUserRating':
return
logger.info(f'BestFilmVersion/webhook_message_action WebhookEventInfo打印{data}')
# 获取锁
_is_lock: bool = lock.acquire(timeout=60)
if not _is_lock:
return
try:
if not data.tmdb_id:
info = None
if (data.channel == 'jellyfin'
and data.save_reason == 'UpdateUserRating'
and data.item_favorite):
info = Jellyfin().get_iteminfo(itemid=data.item_id)
elif data.channel == 'emby' and data.event == 'item.rate':
info = Emby().get_iteminfo(itemid=data.item_id)
elif data.channel == 'plex' and data.event == 'item.rate':
info = Plex().get_iteminfo(itemid=data.item_id)
logger.debug(f'BestFilmVersion/webhook_message_action item打印{info}')
if not info:
return
if info.item_type not in ['Movie', 'MOV', 'movie']:
return
# 获取tmdb_id
tmdb_id = info.tmdbid
else:
tmdb_id = data.tmdb_id
if (data.channel == 'jellyfin'
and (data.save_reason != 'UpdateUserRating' or not data.item_favorite)):
return
if data.item_type not in ['Movie', 'MOV', 'movie']:
return
# 识别媒体信息
mediainfo = self.chain.recognize_media(tmdbid=tmdb_id, mtype=MediaType.MOVIE)
if not mediainfo:
logger.warn(f'未识别到媒体信息,标题:{data.item_name}tmdbID{tmdb_id}')
return
# 读取缓存
caches = self._cache_path.read_text().split("\n") if self._cache_path.exists() else []
# 检查缓存
if data.item_name in caches:
return
# 读取历史记录
history = self.get_data('history') or []
# 添加订阅
self.subscribechain.add(mtype=MediaType.MOVIE,
title=mediainfo.title,
year=mediainfo.year,
tmdbid=mediainfo.tmdb_id,
best_version=True,
username="收藏洗版",
exist_ok=True)
# 加入缓存
caches.append(data.item_name)
# 存储历史记录
if mediainfo.tmdb_id not in [h.get("tmdbid") for h in history]:
history.append({
"title": mediainfo.title,
"type": mediainfo.type.value,
"year": mediainfo.year,
"poster": mediainfo.get_poster_image(),
"overview": mediainfo.overview,
"tmdbid": mediainfo.tmdb_id,
"time": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
})
# 保存历史记录
self.save_data('history', history)
# 保存缓存
self._cache_path.write_text("\n".join(caches))
finally:
lock.release()

File diff suppressed because it is too large Load Diff

View File

@@ -1,231 +0,0 @@
from typing import Any, List, Dict, Tuple
from app.core.config import settings
from app.core.event import eventmanager, Event
from app.log import logger
from app.plugins import _PluginBase
from app.plugins.chatgpt.openai import OpenAi
from app.schemas.types import EventType
class ChatGPT(_PluginBase):
# 插件名称
plugin_name = "ChatGPT"
# 插件描述
plugin_desc = "消息交互支持与ChatGPT对话。"
# 插件图标
plugin_icon = "chatgpt.png"
# 主题色
plugin_color = "#74AA9C"
# 插件版本
plugin_version = "1.0"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
plugin_config_prefix = "chatgpt_"
# 加载顺序
plugin_order = 15
# 可使用的用户级别
auth_level = 1
# 私有属性
openai = None
_enabled = False
_proxy = False
_recognize = False
_openai_url = None
_openai_key = None
def init_plugin(self, config: dict = None):
if config:
self._enabled = config.get("enabled")
self._proxy = config.get("proxy")
self._recognize = config.get("recognize")
self._openai_url = config.get("openai_url")
self._openai_key = config.get("openai_key")
self.openai = OpenAi(api_key=self._openai_key, api_url=self._openai_url,
proxy=settings.PROXY if self._proxy else None)
def get_state(self) -> bool:
return self._enabled
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'proxy',
'label': '使用代理',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'recognize',
'label': '辅助识别',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'openai_url',
'label': 'OpenAI API Url',
'placeholder': 'https://api.openai.com',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'openai_key',
'label': 'sk-xxx'
}
}
]
}
]
},
]
}
], {
"enabled": False,
"proxy": False,
"recognize": False,
"openai_url": "https://api.openai.com",
"openai_key": ""
}
def get_page(self) -> List[dict]:
pass
@eventmanager.register(EventType.UserMessage)
def talk(self, event: Event):
"""
监听用户消息获取ChatGPT回复
"""
if not self._enabled:
return
if not self.openai:
return
text = event.event_data.get("text")
userid = event.event_data.get("userid")
channel = event.event_data.get("channel")
if not text:
return
response = self.openai.get_response(text=text, userid=userid)
if response:
self.post_message(channel=channel, title=response, userid=userid)
@eventmanager.register(EventType.NameRecognize)
def recognize(self, event: Event):
"""
监听识别事件使用ChatGPT辅助识别名称
"""
if not event.event_data:
return
title = event.event_data.get("title")
if not title:
return
# 收到事件后需要立码返回,避免主程序等待
if not self._enabled \
or not self.openai \
or not self._recognize:
eventmanager.send_event(
EventType.NameRecognizeResult,
{
'title': title
}
)
return
# 调用ChatGPT
response = self.openai.get_media_name(filename=title)
logger.info(f"ChatGPT辅助识别结果{response}")
if response:
eventmanager.send_event(
EventType.NameRecognizeResult,
{
'title': title,
'name': response.get("title"),
'year': response.get("year"),
'season': response.get("season"),
'episode': response.get("episode")
}
)
def stop_service(self):
"""
退出插件
"""
pass

View File

@@ -1,204 +0,0 @@
import json
import time
from typing import List, Union
import openai
from cacheout import Cache
OpenAISessionCache = Cache(maxsize=100, ttl=3600, timer=time.time, default=None)
class OpenAi:
_api_key: str = None
_api_url: str = None
def __init__(self, api_key: str = None, api_url: str = None, proxy: dict = None):
self._api_key = api_key
self._api_url = api_url
openai.api_base = self._api_url + "/v1"
openai.api_key = self._api_key
if proxy and proxy.get("https"):
openai.proxy = proxy.get("https")
def get_state(self) -> bool:
return True if self._api_key else False
@staticmethod
def __save_session(session_id: str, message: str):
"""
保存会话
:param session_id: 会话ID
:param message: 消息
:return:
"""
seasion = OpenAISessionCache.get(session_id)
if seasion:
seasion.append({
"role": "assistant",
"content": message
})
OpenAISessionCache.set(session_id, seasion)
@staticmethod
def __get_session(session_id: str, message: str) -> List[dict]:
"""
获取会话
:param session_id: 会话ID
:return: 会话上下文
"""
seasion = OpenAISessionCache.get(session_id)
if seasion:
seasion.append({
"role": "user",
"content": message
})
else:
seasion = [
{
"role": "system",
"content": "请在接下来的对话中请使用中文回复,并且内容尽可能详细。"
},
{
"role": "user",
"content": message
}]
OpenAISessionCache.set(session_id, seasion)
return seasion
@staticmethod
def __get_model(message: Union[str, List[dict]],
prompt: str = None,
user: str = "MoviePilot",
**kwargs):
"""
获取模型
"""
if not isinstance(message, list):
if prompt:
message = [
{
"role": "system",
"content": prompt
},
{
"role": "user",
"content": message
}
]
else:
message = [
{
"role": "user",
"content": message
}
]
return openai.ChatCompletion.create(
model="gpt-3.5-turbo",
user=user,
messages=message,
**kwargs
)
@staticmethod
def __clear_session(session_id: str):
"""
清除会话
:param session_id: 会话ID
:return:
"""
if OpenAISessionCache.get(session_id):
OpenAISessionCache.delete(session_id)
def get_media_name(self, filename: str):
"""
从文件名中提取媒体名称等要素
:param filename: 文件名
:return: Json
"""
if not self.get_state():
return None
result = ""
try:
_filename_prompt = "I will give you a movie/tvshow file name.You need to return a Json." \
"\nPay attention to the correct identification of the film name." \
"\n{\"title\":string,\"version\":string,\"part\":string,\"year\":string,\"resolution\":string,\"season\":number|null,\"episode\":number|null}"
completion = self.__get_model(prompt=_filename_prompt, message=filename)
result = completion.choices[0].message.content
return json.loads(result)
except Exception as e:
print(f"{str(e)}{result}")
return {}
def get_response(self, text: str, userid: str):
"""
聊天对话,获取答案
:param text: 输入文本
:param userid: 用户ID
:return:
"""
if not self.get_state():
return ""
try:
if not userid:
return "用户信息错误"
else:
userid = str(userid)
if text == "#清除":
self.__clear_session(userid)
return "会话已清除"
# 获取历史上下文
messages = self.__get_session(userid, text)
completion = self.__get_model(message=messages, user=userid)
result = completion.choices[0].message.content
if result:
self.__save_session(userid, text)
return result
except openai.error.RateLimitError as e:
return f"请求被ChatGPT拒绝了{str(e)}"
except openai.error.APIConnectionError as e:
return f"ChatGPT网络连接失败{str(e)}"
except openai.error.Timeout as e:
return f"没有接收到ChatGPT的返回消息{str(e)}"
except Exception as e:
return f"请求ChatGPT出现错误{str(e)}"
def translate_to_zh(self, text: str):
"""
翻译为中文
:param text: 输入文本
"""
if not self.get_state():
return False, None
system_prompt = "You are a translation engine that can only translate text and cannot interpret it."
user_prompt = f"translate to zh-CN:\n\n{text}"
result = ""
try:
completion = self.__get_model(prompt=system_prompt,
message=user_prompt,
temperature=0,
top_p=1,
frequency_penalty=0,
presence_penalty=0)
result = completion.choices[0].message.content.strip()
return True, result
except Exception as e:
print(f"{str(e)}{result}")
return False, str(e)
def get_question_answer(self, question: str):
"""
从给定问题和选项中获取正确答案
:param question: 问题及选项
:return: Json
"""
if not self.get_state():
return None
result = ""
try:
_question_prompt = "下面我们来玩一个游戏,你是老师,我是学生,你需要回答我的问题,我会给你一个题目和几个选项,你的回复必须是给定选项中正确答案对应的序号,请直接回复数字"
completion = self.__get_model(prompt=_question_prompt, message=question)
result = completion.choices[0].message.content
return result
except Exception as e:
print(f"{str(e)}{result}")
return {}

View File

@@ -1,256 +0,0 @@
from functools import lru_cache
from pathlib import Path
from typing import List, Tuple, Dict, Any
from app.core.config import settings
from app.core.context import MediaInfo
from app.core.event import eventmanager, Event
from app.log import logger
from app.plugins import _PluginBase
from app.schemas import TransferInfo
from app.schemas.types import EventType, MediaType
from app.utils.http import RequestUtils
class ChineseSubFinder(_PluginBase):
# 插件名称
plugin_name = "ChineseSubFinder"
# 插件描述
plugin_desc = "整理入库时通知ChineseSubFinder下载字幕。"
# 插件图标
plugin_icon = "chinesesubfinder.png"
# 主题色
plugin_color = "#83BE39"
# 插件版本
plugin_version = "1.0"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
plugin_config_prefix = "chinesesubfinder_"
# 加载顺序
plugin_order = 5
# 可使用的用户级别
auth_level = 1
# 私有属性
_save_tmp_path = None
_enabled = False
_host = None
_api_key = None
_remote_path = None
_local_path = None
def init_plugin(self, config: dict = None):
self._save_tmp_path = settings.TEMP_PATH
if config:
self._enabled = config.get("enabled")
self._api_key = config.get("api_key")
self._host = config.get('host')
if self._host:
if not self._host.startswith('http'):
self._host = "http://" + self._host
if not self._host.endswith('/'):
self._host = self._host + "/"
self._local_path = config.get("local_path")
self._remote_path = config.get("remote_path")
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'host',
'label': '服务器'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'api_key',
'label': 'API密钥'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'local_path',
'label': '本地路径'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'remote_path',
'label': '远端路径'
}
}
]
}
]
}
]
}
], {
"enabled": False,
"host": "",
"api_key": "",
"local_path": "",
"remote_path": ""
}
def get_state(self) -> bool:
return self._enabled
def get_page(self) -> List[dict]:
pass
def stop_service(self):
pass
@eventmanager.register(EventType.TransferComplete)
def download(self, event: Event):
"""
调用ChineseSubFinder下载字幕
"""
if not self._enabled or not self._host or not self._api_key:
return
item = event.event_data
if not item:
return
# 请求地址
req_url = "%sapi/v1/add-job" % self._host
# 媒体信息
item_media: MediaInfo = item.get("mediainfo")
# 转移信息
item_transfer: TransferInfo = item.get("transferinfo")
# 类型
item_type = item_media.type
# 目的路径
item_dest: Path = item_transfer.target_path
# 是否蓝光原盘
item_bluray = item_transfer.is_bluray
# 文件清单
item_file_list = item_transfer.file_list_new
if item_bluray:
# 蓝光原盘虚拟个文件
item_file_list = ["%s.mp4" % item_dest / item_dest.name]
for file_path in item_file_list:
# 路径替换
if self._local_path and self._remote_path and file_path.startswith(self._local_path):
file_path = file_path.replace(self._local_path, self._remote_path).replace('\\', '/')
# 调用CSF下载字幕
self.__request_csf(req_url=req_url,
file_path=file_path,
item_type=0 if item_type == MediaType.MOVIE.value else 1,
item_bluray=item_bluray)
@lru_cache(maxsize=128)
def __request_csf(self, req_url, file_path, item_type, item_bluray):
# 一个名称只建一个任务
logger.info("通知ChineseSubFinder下载字幕: %s" % file_path)
params = {
"video_type": item_type,
"physical_video_file_full_path": file_path,
"task_priority_level": 3,
"media_server_inside_video_id": "",
"is_bluray": item_bluray
}
try:
res = RequestUtils(headers={
"Authorization": "Bearer %s" % self._api_key
}).post(req_url, json=params)
if not res or res.status_code != 200:
logger.error("调用ChineseSubFinder API失败")
else:
# 如果文件目录没有识别的nfo元数据 此接口会返回控制符推测是ChineseSubFinder的原因
# emby refresh元数据时异步的
if res.text:
job_id = res.json().get("job_id")
message = res.json().get("message")
if not job_id:
logger.warn("ChineseSubFinder下载字幕出错%s" % message)
else:
logger.info("ChineseSubFinder任务添加成功%s" % job_id)
elif res.status_code != 200:
logger.warn(f"ChineseSubFinder调用出错{res.status_code} - {res.reason}")
except Exception as e:
logger.error("连接ChineseSubFinder出错" + str(e))

View File

@@ -1,777 +0,0 @@
import os
import subprocess
import time
import zipfile
from datetime import datetime, timedelta
from pathlib import Path
from typing import List, Tuple, Dict, Any
import pytz
import requests
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from python_hosts import Hosts, HostsEntry
from requests import Response
from app.core.config import settings
from app.core.event import eventmanager, Event
from app.log import logger
from app.plugins import _PluginBase
from app.schemas.types import EventType, NotificationType
from app.utils.http import RequestUtils
from app.utils.ip import IpUtils
from app.utils.system import SystemUtils
class CloudflareSpeedTest(_PluginBase):
# 插件名称
plugin_name = "Cloudflare IP优选"
# 插件描述
plugin_desc = "🌩 测试 Cloudflare CDN 延迟和速度自动优选IP。"
# 插件图标
plugin_icon = "cloudflare.jpg"
# 主题色
plugin_color = "#F6821F"
# 插件版本
plugin_version = "1.0"
# 插件作者
plugin_author = "thsrite"
# 作者主页
author_url = "https://github.com/thsrite"
# 插件配置项ID前缀
plugin_config_prefix = "cloudflarespeedtest_"
# 加载顺序
plugin_order = 12
# 可使用的用户级别
auth_level = 1
# 私有属性
_customhosts = False
_cf_ip = None
_scheduler = None
_cron = None
_onlyonce = False
_ipv4 = False
_ipv6 = False
_version = None
_additional_args = None
_re_install = False
_notify = False
_check = False
_cf_path = None
_cf_ipv4 = None
_cf_ipv6 = None
_result_file = None
_release_prefix = 'https://github.com/XIU2/CloudflareSpeedTest/releases/download'
_binary_name = 'CloudflareST'
def init_plugin(self, config: dict = None):
# 停止现有任务
self.stop_service()
# 读取配置
if config:
self._onlyonce = config.get("onlyonce")
self._cron = config.get("cron")
self._cf_ip = config.get("cf_ip")
self._version = config.get("version")
self._ipv4 = config.get("ipv4")
self._ipv6 = config.get("ipv6")
self._re_install = config.get("re_install")
self._additional_args = config.get("additional_args")
self._notify = config.get("notify")
self._check = config.get("check")
if self.get_state() or self._onlyonce:
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
try:
if self.get_state() and self._cron:
logger.info(f"Cloudflare CDN优选服务启动周期{self._cron}")
self._scheduler.add_job(func=self.__cloudflareSpeedTest,
trigger=CronTrigger.from_crontab(self._cron),
name="Cloudflare优选")
if self._onlyonce:
logger.info(f"Cloudflare CDN优选服务启动立即运行一次")
self._scheduler.add_job(func=self.__cloudflareSpeedTest, trigger='date',
run_date=datetime.now(tz=pytz.timezone(settings.TZ)) + timedelta(seconds=3),
name="Cloudflare优选")
# 关闭一次性开关
self._onlyonce = False
self.__update_config()
except Exception as err:
logger.error(f"Cloudflare CDN优选服务出错{str(err)}")
self.systemmessage.put(f"Cloudflare CDN优选服务出错{str(err)}")
return
# 启动任务
if self._scheduler.get_jobs():
self._scheduler.print_jobs()
self._scheduler.start()
@eventmanager.register(EventType.CloudFlareSpeedTest)
def __cloudflareSpeedTest(self, event: Event = None):
"""
CloudflareSpeedTest优选
"""
self._cf_path = self.get_data_path()
self._cf_ipv4 = os.path.join(self._cf_path, "ip.txt")
self._cf_ipv6 = os.path.join(self._cf_path, "ipv6.txt")
self._result_file = os.path.join(self._cf_path, "result_hosts.txt")
# 获取自定义Hosts插件若无设置则停止
customHosts = self.get_config("CustomHosts")
self._customhosts = customHosts and customHosts.get("enabled")
if self._cf_ip and not customHosts or not customHosts.get("hosts"):
logger.error(f"Cloudflare CDN优选依赖于自定义Hosts请先维护hosts")
return
if not self._cf_ip:
logger.error("CloudflareSpeedTest加载成功首次运行需要配置优选ip")
return
if event and event.event_data:
logger.info("收到命令开始Cloudflare IP优选 ...")
self.post_message(channel=event.event_data.get("channel"),
title="开始Cloudflare IP优选 ...",
userid=event.event_data.get("user"))
# ipv4和ipv6必须其一
if not self._ipv4 and not self._ipv6:
self._ipv4 = True
self.__update_config()
logger.warn(f"Cloudflare CDN优选未指定ip类型默认ipv4")
err_flag, release_version = self.__check_envirment()
if err_flag and release_version:
# 更新版本
self._version = release_version
self.__update_config()
hosts = customHosts.get("hosts")
if isinstance(hosts, str):
hosts = str(hosts).split('\n')
# 校正优选ip
if self._check:
self.__check_cf_ip(hosts=hosts)
# 开始优选
if err_flag:
logger.info("正在进行CLoudflare CDN优选请耐心等待")
# 执行优选命令,-dd不测速
if SystemUtils.is_windows():
cf_command = f'cd \"{self._cf_path}\" && CloudflareST {self._additional_args} -o \"{self._result_file}\"' + (
f' -f \"{self._cf_ipv4}\"' if self._ipv4 else '') + (
f' -f \"{self._cf_ipv6}\"' if self._ipv6 else '')
else:
cf_command = f'cd {self._cf_path} && chmod a+x {self._binary_name} && ./{self._binary_name} {self._additional_args} -o {self._result_file}' + (
f' -f {self._cf_ipv4}' if self._ipv4 else '') + (f' -f {self._cf_ipv6}' if self._ipv6 else '')
logger.info(f'正在执行优选命令 {cf_command}')
if SystemUtils.is_windows():
process = subprocess.Popen(cf_command, shell=True)
# 执行命令后无法退出 采用异步和设置超时方案
# 设置超时时间为120秒
if cf_command.__contains__("-dd"):
time.sleep(120)
else:
time.sleep(600)
# 如果没有在120秒内完成任务那么杀死该进程
if process.poll() is None:
os.system('taskkill /F /IM CloudflareST.exe')
else:
os.system(cf_command)
# 获取优选后最优ip
if SystemUtils.is_windows():
powershell_command = f"powershell.exe -Command \"Get-Content \'{self._result_file}\' | Select-Object -Skip 1 -First 1 | Write-Output\""
logger.info(f'正在执行powershell命令 {powershell_command}')
best_ip = SystemUtils.execute(powershell_command)
best_ip = best_ip.split(',')[0]
else:
best_ip = SystemUtils.execute("sed -n '2,1p' " + self._result_file + " | awk -F, '{print $1}'")
logger.info(f"\n获取到最优ip==>[{best_ip}]")
# 替换自定义Hosts插件数据库hosts
if IpUtils.is_ipv4(best_ip) or IpUtils.is_ipv6(best_ip):
if best_ip == self._cf_ip:
logger.info(f"CloudflareSpeedTest CDN优选ip未变不做处理")
else:
# 替换优选ip
err_hosts = customHosts.get("err_hosts")
# 处理ip
new_hosts = []
for host in hosts:
if host and host != '\n':
host_arr = str(host).split()
if host_arr[0] == self._cf_ip:
new_hosts.append(host.replace(self._cf_ip, best_ip).replace("\n", "") + "\n")
else:
new_hosts.append(host.replace("\n", "") + "\n")
# 更新自定义Hosts
self.update_config(
{
"hosts": ''.join(new_hosts),
"err_hosts": err_hosts,
"enabled": True
}, "CustomHosts"
)
# 更新优选ip
old_ip = self._cf_ip
self._cf_ip = best_ip
self.__update_config()
logger.info(f"Cloudflare CDN优选ip [{best_ip}] 已替换自定义Hosts插件")
# 解发自定义hosts插件重载
logger.info("通知CustomHosts插件重载 ...")
self.eventmanager.send_event(EventType.PluginReload,
{
"plugin_id": "CustomHosts"
})
if self._notify:
self.post_message(
mtype=NotificationType.SiteMessage,
title="【Cloudflare优选任务完成】",
text=f"原ip{old_ip}\n"
f"新ip{best_ip}"
)
else:
logger.error("获取到最优ip格式错误请重试")
self._onlyonce = False
self.__update_config()
self.stop_service()
def __check_cf_ip(self, hosts):
"""
校正cf优选ip
防止特殊情况下cf优选ip和自定义hosts插件中ip不一致
"""
# 统计每个IP地址出现的次数
ip_count = {}
for host in hosts:
if host:
ip = host.split()[0]
if ip in ip_count:
ip_count[ip] += 1
else:
ip_count[ip] = 1
# 找出出现次数最多的IP地址
max_ips = [] # 保存最多出现的IP地址
max_count = 0
for ip, count in ip_count.items():
if count > max_count:
max_ips = [ip] # 更新最多的IP地址
max_count = count
elif count == max_count:
max_ips.append(ip)
# 如果出现次数最多的ip不止一个则不做兼容处理
if len(max_ips) != 1:
return
if max_ips[0] != self._cf_ip:
self._cf_ip = max_ips[0]
logger.info(f"获取到自定义hosts插件中ip {max_ips[0]} 出现次数最多已自动校正优选ip")
def __check_envirment(self):
"""
环境检查
"""
# 是否安装标识
install_flag = False
# 是否重新安装
if self._re_install:
install_flag = True
if SystemUtils.is_windows():
os.system(f'rd /s /q \"{self._cf_path}\"')
else:
os.system(f'rm -rf {self._cf_path}')
logger.info(f'删除CloudflareSpeedTest目录 {self._cf_path},开始重新安装')
# 判断目录是否存在
cf_path = Path(self._cf_path)
if not cf_path.exists():
os.mkdir(self._cf_path)
# 获取CloudflareSpeedTest最新版本
release_version = self.__get_release_version()
if not release_version:
# 如果升级失败但是有可执行文件CloudflareST则可继续运行反之停止
if Path(f'{self._cf_path}/{self._binary_name}').exists():
logger.warn(f"获取CloudflareSpeedTest版本失败存在可执行版本继续运行")
return True, None
elif self._version:
logger.error(f"获取CloudflareSpeedTest版本失败获取上次运行版本{self._version},开始安装")
install_flag = True
else:
release_version = "v2.2.2"
self._version = release_version
logger.error(f"获取CloudflareSpeedTest版本失败获取默认版本{release_version},开始安装")
install_flag = True
# 有更新
if not install_flag and release_version != self._version:
logger.info(f"检测到CloudflareSpeedTest有版本[{release_version}]更新,开始安装")
install_flag = True
# 重装后数据库有版本数据,但是本地没有则重装
if not install_flag and release_version == self._version and not Path(
f'{self._cf_path}/{self._binary_name}').exists() and not Path(
f'{self._cf_path}/CloudflareST.exe').exists():
logger.warn(f"未检测到CloudflareSpeedTest本地版本重新安装")
install_flag = True
if not install_flag:
logger.info(f"CloudflareSpeedTest无新版本存在可执行版本继续运行")
return True, None
# 检查环境、安装
if SystemUtils.is_windows():
# windows
cf_file_name = 'CloudflareST_windows_amd64.zip'
download_url = f'{self._release_prefix}/{release_version}/{cf_file_name}'
return self.__os_install(download_url, cf_file_name, release_version,
f"ditto -V -x -k --sequesterRsrc {self._cf_path}/{cf_file_name} {self._cf_path}")
elif SystemUtils.is_macos():
# mac
uname = SystemUtils.execute('uname -m')
arch = 'amd64' if uname == 'x86_64' else 'arm64'
cf_file_name = f'CloudflareST_darwin_{arch}.zip'
download_url = f'{self._release_prefix}/{release_version}/{cf_file_name}'
return self.__os_install(download_url, cf_file_name, release_version,
f"ditto -V -x -k --sequesterRsrc {self._cf_path}/{cf_file_name} {self._cf_path}")
else:
# docker
uname = SystemUtils.execute('uname -m')
arch = 'amd64' if uname == 'x86_64' else 'arm64'
cf_file_name = f'CloudflareST_linux_{arch}.tar.gz'
download_url = f'{self._release_prefix}/{release_version}/{cf_file_name}'
return self.__os_install(download_url, cf_file_name, release_version,
f"tar -zxf {self._cf_path}/{cf_file_name} -C {self._cf_path}")
def __os_install(self, download_url, cf_file_name, release_version, unzip_command):
"""
macos docker安装cloudflare
"""
# 手动下载安装包后,无需在此下载
if not Path(f'{self._cf_path}/{cf_file_name}').exists():
# 首次下载或下载新版压缩包
proxies = settings.PROXY
https_proxy = proxies.get("https") if proxies and proxies.get("https") else None
if https_proxy:
if SystemUtils.is_windows():
self.__get_windows_cloudflarest(download_url, proxies)
else:
os.system(
f'wget -P {self._cf_path} --no-check-certificate -e use_proxy=yes -e https_proxy={https_proxy} {download_url}')
else:
if SystemUtils.is_windows():
self.__get_windows_cloudflarest(download_url, proxies)
else:
os.system(f'wget -P {self._cf_path} https://ghproxy.com/{download_url}')
# 判断是否下载好安装包
if Path(f'{self._cf_path}/{cf_file_name}').exists():
try:
if SystemUtils.is_windows():
with zipfile.ZipFile(f'{self._cf_path}/{cf_file_name}', 'r') as zip_ref:
# 解压ZIP文件中的所有文件到指定目录
zip_ref.extractall(self._cf_path)
if Path(f'{self._cf_path}\\CloudflareST.exe').exists():
logger.info(f"CloudflareSpeedTest安装成功当前版本{release_version}")
return True, release_version
else:
logger.error(f"CloudflareSpeedTest安装失败请检查")
os.system(f'rd /s /q \"{self._cf_path}\"')
return False, None
# 解压
os.system(f'{unzip_command}')
# 删除压缩包
os.system(f'rm -rf {self._cf_path}/{cf_file_name}')
if Path(f'{self._cf_path}/{self._binary_name}').exists():
logger.info(f"CloudflareSpeedTest安装成功当前版本{release_version}")
return True, release_version
else:
logger.error(f"CloudflareSpeedTest安装失败请检查")
os.removedirs(self._cf_path)
return False, None
except Exception as err:
# 如果升级失败但是有可执行文件CloudflareST则可继续运行反之停止
if Path(f'{self._cf_path}/{self._binary_name}').exists() or \
Path(f'{self._cf_path}\\CloudflareST.exe').exists():
logger.error(f"CloudflareSpeedTest安装失败{str(err)},继续使用现版本运行")
return True, None
else:
logger.error(f"CloudflareSpeedTest安装失败{str(err)},无可用版本,停止运行")
if SystemUtils.is_windows():
os.system(f'rd /s /q \"{self._cf_path}\"')
else:
os.removedirs(self._cf_path)
return False, None
else:
# 如果升级失败但是有可执行文件CloudflareST则可继续运行反之停止
if Path(f'{self._cf_path}/{self._binary_name}').exists() or \
Path(f'{self._cf_path}\\CloudflareST.exe').exists():
logger.warn(f"CloudflareSpeedTest安装失败存在可执行版本继续运行")
return True, None
else:
logger.error(f"CloudflareSpeedTest安装失败无可用版本停止运行")
if SystemUtils.is_windows():
os.system(f'rd /s /q \"{self._cf_path}\"')
else:
os.removedirs(self._cf_path)
return False, None
def __get_windows_cloudflarest(self, download_url, proxies):
response = Response()
try:
response = requests.get(download_url, stream=True, proxies=proxies if proxies else None)
except requests.exceptions.RequestException as e:
logger.error(f"CloudflareSpeedTest下载失败{str(e)}")
if response.status_code == 200:
with open(f'{self._cf_path}\\CloudflareST_windows_amd64.zip', 'wb') as file:
for chunk in response.iter_content(chunk_size=8192):
file.write(chunk)
@staticmethod
def __get_release_version():
"""
获取CloudflareSpeedTest最新版本
"""
version_res = RequestUtils().get_res(
"https://api.github.com/repos/XIU2/CloudflareSpeedTest/releases/latest")
if not version_res:
version_res = RequestUtils(proxies=settings.PROXY).get_res(
"https://api.github.com/repos/XIU2/CloudflareSpeedTest/releases/latest")
if version_res:
ver_json = version_res.json()
version = f"{ver_json['tag_name']}"
return version
else:
return None
def __update_config(self):
"""
更新优选插件配置
"""
self.update_config({
"onlyonce": False,
"cron": self._cron,
"cf_ip": self._cf_ip,
"version": self._version,
"ipv4": self._ipv4,
"ipv6": self._ipv6,
"re_install": self._re_install,
"additional_args": self._additional_args,
"notify": self._notify,
"check": self._check
})
def get_state(self) -> bool:
return True if self._cf_ip and self._cron else False
@staticmethod
def get_command() -> List[Dict[str, Any]]:
"""
定义远程控制命令
:return: 命令关键字、事件、描述、附带数据
"""
return [{
"cmd": "/cloudflare_speedtest",
"event": EventType.CloudFlareSpeedTest,
"desc": "Cloudflare IP优选",
"data": {}
}]
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'cf_ip',
'label': '优选IP',
'placeholder': '121.121.121.121'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'cron',
'label': '优选周期',
'placeholder': '0 0 0 ? *'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'version',
'readonly': True,
'label': 'CloudflareSpeedTest版本',
'placeholder': '暂未安装'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'ipv4',
'label': 'IPv4',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'ipv6',
'label': 'IPv6',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'check',
'label': '自动校准',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'onlyonce',
'label': '立即运行一次',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 're_install',
'label': '重装后运行',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'notify',
'label': '运行时通知',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'additional_args',
'label': '高级参数',
'placeholder': '-dd'
}
}
]
}
]
}
]
}
], {
"cf_ip": "",
"cron": "",
"version": "",
"ipv4": True,
"ipv6": False,
"check": False,
"onlyonce": False,
"re_install": False,
"notify": True,
"additional_args": ""
}
def get_page(self) -> List[dict]:
pass
@staticmethod
def __read_system_hosts():
"""
读取系统hosts对象
"""
# 获取本机hosts路径
if SystemUtils.is_windows():
hosts_path = r"c:\windows\system32\drivers\etc\hosts"
else:
hosts_path = '/etc/hosts'
# 读取系统hosts
return Hosts(path=hosts_path)
def __add_hosts_to_system(self, hosts):
"""
添加hosts到系统
"""
# 系统hosts对象
system_hosts = self.__read_system_hosts()
# 过滤掉插件添加的hosts
orgin_entries = []
for entry in system_hosts.entries:
if entry.entry_type == "comment" and entry.comment == "# CustomHostsPlugin":
break
orgin_entries.append(entry)
system_hosts.entries = orgin_entries
# 新的有效hosts
new_entrys = []
# 新的错误的hosts
err_hosts = []
err_flag = False
for host in hosts:
if not host:
continue
host_arr = str(host).split()
try:
host_entry = HostsEntry(entry_type='ipv4' if IpUtils.is_ipv4(str(host_arr[0])) else 'ipv6',
address=host_arr[0],
names=host_arr[1:])
new_entrys.append(host_entry)
except Exception as err:
err_hosts.append(host + "\n")
logger.error(f"[HOST] 格式转换错误:{str(err)}")
# 推送实时消息
self.systemmessage.put(f"[HOST] 格式转换错误:{str(err)}")
# 写入系统hosts
if new_entrys:
try:
# 添加分隔标识
system_hosts.add([HostsEntry(entry_type='comment', comment="# CustomHostsPlugin")])
# 添加新的Hosts
system_hosts.add(new_entrys)
system_hosts.write()
logger.info("更新系统hosts文件成功")
except Exception as err:
err_flag = True
logger.error(f"更新系统hosts文件失败{str(err) or '请检查权限'}")
# 推送实时消息
self.systemmessage.put(f"更新系统hosts文件失败{str(err) or '请检查权限'}")
return err_flag, err_hosts
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._scheduler.shutdown()
self._scheduler = None
except Exception as e:
logger.error("退出插件失败:%s" % str(e))

View File

@@ -1,238 +0,0 @@
from typing import List, Tuple, Dict, Any
from python_hosts import Hosts, HostsEntry
from app.core.event import eventmanager
from app.log import logger
from app.plugins import _PluginBase
from app.schemas.types import EventType
from app.utils.ip import IpUtils
from app.utils.system import SystemUtils
class CustomHosts(_PluginBase):
# 插件名称
plugin_name = "自定义Hosts"
# 插件描述
plugin_desc = "修改系统hosts文件加速网络访问。"
# 插件图标
plugin_icon = "hosts.png"
# 主题色
plugin_color = "#02C4E0"
# 插件版本
plugin_version = "1.0"
# 插件作者
plugin_author = "thsrite"
# 作者主页
author_url = "https://github.com/thsrite"
# 插件配置项ID前缀
plugin_config_prefix = "customhosts_"
# 加载顺序
plugin_order = 10
# 可使用的用户级别
auth_level = 1
# 私有属性
_hosts = []
_enabled = False
def init_plugin(self, config: dict = None):
# 读取配置
if config:
self._enabled = config.get("enabled")
self._hosts = config.get("hosts")
if isinstance(self._hosts, str):
self._hosts = str(self._hosts).split('\n')
if self._enabled and self._hosts:
# 排除空的host
new_hosts = []
for host in self._hosts:
if host and host != '\n':
new_hosts.append(host.replace("\n", "") + "\n")
self._hosts = new_hosts
# 添加到系统
error_flag, error_hosts = self.__add_hosts_to_system(self._hosts)
self._enabled = self._enabled and not error_flag
# 更新错误Hosts
self.update_config({
"hosts": ''.join(self._hosts),
"err_hosts": error_hosts,
"enabled": self._enabled
})
def get_state(self) -> bool:
return self._enabled
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VTextarea',
'props': {
'model': 'hosts',
'label': '自定义hosts',
'rows': 10,
'placeholder': '每行一个配置格式为ip host1 host2 ...'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VTextarea',
'props': {
'model': 'err_hosts',
'readonly': True,
'label': '错误hosts',
'rows': 2,
'placeholder': '错误的hosts配置会展示在此处请修改上方hosts重新提交错误的hosts不会写入系统hosts文件'
}
}
]
}
]
}
]
}
], {
"enabled": False,
"hosts": "",
"err_hosts": ""
}
def get_page(self) -> List[dict]:
pass
@staticmethod
def __read_system_hosts():
"""
读取系统hosts对象
"""
# 获取本机hosts路径
if SystemUtils.is_windows():
hosts_path = r"c:\windows\system32\drivers\etc\hosts"
else:
hosts_path = '/etc/hosts'
# 读取系统hosts
return Hosts(path=hosts_path)
def __add_hosts_to_system(self, hosts):
"""
添加hosts到系统
"""
# 系统hosts对象
system_hosts = self.__read_system_hosts()
# 过滤掉插件添加的hosts
orgin_entries = []
for entry in system_hosts.entries:
if entry.entry_type == "comment" and entry.comment == "# CustomHostsPlugin":
break
orgin_entries.append(entry)
system_hosts.entries = orgin_entries
# 新的有效hosts
new_entrys = []
# 新的错误的hosts
err_hosts = []
err_flag = False
for host in hosts:
if not host:
continue
host_arr = str(host).split()
try:
host_entry = HostsEntry(entry_type='ipv4' if IpUtils.is_ipv4(str(host_arr[0])) else 'ipv6',
address=host_arr[0],
names=host_arr[1:])
new_entrys.append(host_entry)
except Exception as err:
err_hosts.append(host + "\n")
logger.error(f"[HOST] 格式转换错误:{str(err)}")
# 推送实时消息
self.systemmessage.put(f"[HOST] 格式转换错误:{str(err)}")
# 写入系统hosts
if new_entrys:
try:
# 添加分隔标识
system_hosts.add([HostsEntry(entry_type='comment', comment="# CustomHostsPlugin")])
# 添加新的Hosts
system_hosts.add(new_entrys)
system_hosts.write()
logger.info("更新系统hosts文件成功容器运行则更新容器hosts")
except Exception as err:
err_flag = True
logger.error(f"更新系统hosts文件失败{str(err) or '请检查权限'}")
# 推送实时消息
self.systemmessage.put(f"更新系统hosts文件失败{str(err) or '请检查权限'}")
return err_flag, err_hosts
def stop_service(self):
"""
退出插件
"""
pass
@eventmanager.register(EventType.PluginReload)
def reload(self, event):
"""
响应插件重载事件
"""
plugin_id = event.event_data.get("plugin_id")
if not plugin_id:
return
if plugin_id != self.__class__.__name__:
return
return self.init_plugin(self.get_config())

View File

@@ -1,250 +0,0 @@
from typing import Any, List, Dict, Tuple
from urllib.parse import urlparse
from app.core.config import settings
from app.core.event import EventManager
from app.helper.cookiecloud import CookieCloudHelper
from app.log import logger
from app.plugins import _PluginBase
from app.schemas.types import EventType
class CustomSites(_PluginBase):
# 插件名称
plugin_name = "自定义站点"
# 插件描述
plugin_desc = "增加自定义站点为签到和统计使用。"
# 插件图标
plugin_icon = "world.png"
# 主题色
plugin_color = "#9AC16C"
# 插件版本
plugin_version = "0.1"
# 插件作者
plugin_author = "lightolly"
# 作者主页
author_url = "https://github.com/lightolly"
# 插件配置项ID前缀
plugin_config_prefix = "customsites_"
# 加载顺序
plugin_order = 0
# 可使用的用户级别
auth_level = 2
# 自定义站点起始 id
site_id_base = 60000
site_id_alloc = site_id_base
# 私有属性
cookie_cloud: CookieCloudHelper = None
# 配置属性
_enabled: bool = False
"""
{
"id": "站点ID",
"name": "站点名称",
"url": "站点地址",
"cookie": "站点Cookie",
"ua": "User-Agent",
"proxy": "是否使用代理",
"render": "是否仿真",
}
"""
_sites: list[Dict] = []
"""
格式
站点名称|url|是否仿真
"""
_site_urls: str = ""
def init_plugin(self, config: dict = None):
self.cookie_cloud = CookieCloudHelper(
server=settings.COOKIECLOUD_HOST,
key=settings.COOKIECLOUD_KEY,
password=settings.COOKIECLOUD_PASSWORD
)
del_sites = []
sites = []
new_site_urls = []
# 配置
if config:
self._enabled = config.get("enabled", False)
self._sites = config.get("sites", [])
self._site_urls = config.get("site_urls", "")
if not self._enabled:
return
site_urls = self._site_urls.splitlines()
# 只保留 匹配site_urls的 sites
urls = [site_url.split('|')[1] for site_url in site_urls]
for site in self._sites:
if site.get("url") not in urls:
del_sites.append(site)
else:
sites.append(site)
for item in site_urls:
_, url, _ = item.split("|")
if url in [site.get("url") for site in self._sites]:
continue
else:
new_site_urls.append(item)
# 获取待分配的最大ID
alloc_ids = [site.get("id") for site in self._sites if site.get("id")] + [self.site_id_base]
self.site_id_alloc = max(alloc_ids) + 1
# 补全 site_id
for item in new_site_urls:
site_name, item, site_render = item.split("|")
sites.append({
"id": self.site_id_alloc,
"name": site_name,
"url": item,
"render": True if site_render.upper() == 'Y' else False,
"cookie": "",
})
self.site_id_alloc += 1
self._sites = sites
# 保存配置
self.sync_cookie()
self.__update_config()
# 通知站点删除
for site in del_sites:
self.delete_site(site.get("id"))
logger.info(f"删除站点 {site.get('name')}")
def get_state(self) -> bool:
return self._enabled
def __update_config(self):
# 保存配置
self.update_config(
{
"enabled": self._enabled,
"sites": self._sites,
"site_urls": self._site_urls
}
)
def __get_site_by_domain(self, domain):
for site in self._sites:
site_domain = urlparse(site.get("url")).netloc
if site_domain.endswith(domain):
return site
return None
def sync_cookie(self):
"""
通过CookieCloud同步站点Cookie
"""
logger.info("开始同步CookieCloud站点 ...")
cookies, msg = self.cookie_cloud.download()
if not cookies:
logger.error(f"CookieCloud同步失败{msg}")
return
# 保存Cookie或新增站点
_update_count = 0
for domain, cookie in cookies.items():
# 获取站点信息
site_info = self.__get_site_by_domain(domain)
if site_info:
# 更新站点Cookie
logger.info(f"更新站点 {domain} Cookie ...")
site_info.update({"cookie": cookie})
_update_count += 1
# 处理完成
ret_msg = f"更新了{_update_count}个站点,总{len(self._sites)}个站点"
logger.info(f"自定义站点 Cookie同步成功{ret_msg}")
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VTextarea',
'props': {
'model': 'site_urls',
'label': '站点列表',
'rows': 5,
'placeholder': '每一行一个站点,配置方式:\n'
'站点名称|站点地址|是否仿真(Y/N)\n'
}
}
]
}
]
}
]
}
], {
"enabled": False,
"site_urls": [],
"sites": self._sites
}
def get_page(self) -> List[dict]:
pass
def stop_service(self):
"""
退出插件
"""
pass
@staticmethod
def delete_site(site_id):
"""
删除站点通知
"""
# 插件站点删除
EventManager().send_event(EventType.SiteDeleted,
{
"site_id": site_id
})

View File

@@ -1,793 +0,0 @@
import datetime
import re
import shutil
import threading
import traceback
from pathlib import Path
from typing import List, Tuple, Dict, Any, Optional
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
from watchdog.observers.polling import PollingObserver
from app.chain.tmdb import TmdbChain
from app.chain.transfer import TransferChain
from app.core.config import settings
from app.core.context import MediaInfo
from app.core.event import eventmanager, Event
from app.core.metainfo import MetaInfoPath
from app.db.downloadhistory_oper import DownloadHistoryOper
from app.db.transferhistory_oper import TransferHistoryOper
from app.log import logger
from app.plugins import _PluginBase
from app.schemas import Notification, NotificationType, TransferInfo
from app.schemas.types import EventType, MediaType, SystemConfigKey
from app.utils.string import StringUtils
from app.utils.system import SystemUtils
lock = threading.Lock()
class FileMonitorHandler(FileSystemEventHandler):
"""
目录监控响应类
"""
def __init__(self, monpath: str, sync: Any, **kwargs):
super(FileMonitorHandler, self).__init__(**kwargs)
self._watch_path = monpath
self.sync = sync
def on_created(self, event):
self.sync.event_handler(event=event, text="创建",
mon_path=self._watch_path, event_path=event.src_path)
def on_moved(self, event):
self.sync.event_handler(event=event, text="移动",
mon_path=self._watch_path, event_path=event.dest_path)
class DirMonitor(_PluginBase):
# 插件名称
plugin_name = "目录监控"
# 插件描述
plugin_desc = "监控目录文件发生变化时实时整理到媒体库。"
# 插件图标
plugin_icon = "directory.png"
# 主题色
plugin_color = "#E0995E"
# 插件版本
plugin_version = "1.0"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
plugin_config_prefix = "dirmonitor_"
# 加载顺序
plugin_order = 4
# 可使用的用户级别
auth_level = 1
# 私有属性
_scheduler = None
transferhis = None
downloadhis = None
transferchian = None
tmdbchain = None
_observer = []
_enabled = False
_notify = False
_onlyonce = False
# 模式 compatibility/fast
_mode = "fast"
# 转移方式
_transfer_type = settings.TRANSFER_TYPE
_monitor_dirs = ""
_exclude_keywords = ""
# 存储源目录与目的目录关系
_dirconf: Dict[str, Optional[Path]] = {}
# 存储源目录转移方式
_transferconf: Dict[str, Optional[str]] = {}
_medias = {}
# 退出事件
_event = threading.Event()
def init_plugin(self, config: dict = None):
self.transferhis = TransferHistoryOper()
self.downloadhis = DownloadHistoryOper()
self.transferchian = TransferChain()
self.tmdbchain = TmdbChain()
# 清空配置
self._dirconf = {}
self._transferconf = {}
# 读取配置
if config:
self._enabled = config.get("enabled")
self._notify = config.get("notify")
self._onlyonce = config.get("onlyonce")
self._mode = config.get("mode")
self._transfer_type = config.get("transfer_type")
self._monitor_dirs = config.get("monitor_dirs") or ""
self._exclude_keywords = config.get("exclude_keywords") or ""
# 停止现有任务
self.stop_service()
if self._enabled or self._onlyonce:
# 定时服务管理器
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
# 追加入库消息统一发送服务
self._scheduler.add_job(self.send_msg, trigger='interval', seconds=15)
# 读取目录配置
monitor_dirs = self._monitor_dirs.split("\n")
if not monitor_dirs:
return
for mon_path in monitor_dirs:
# 格式源目录:目的目录
if not mon_path:
continue
# 自定义转移方式
_transfer_type = self._transfer_type
if mon_path.count("#") == 1:
_transfer_type = mon_path.split("#")[1]
mon_path = mon_path.split("#")[0]
# 存储目的目录
if SystemUtils.is_windows():
if mon_path.count(":") > 1:
paths = [mon_path.split(":")[0] + ":" + mon_path.split(":")[1],
mon_path.split(":")[2] + ":" + mon_path.split(":")[3]]
else:
paths = [mon_path]
else:
paths = mon_path.split(":")
# 目的目录
target_path = None
if len(paths) > 1:
mon_path = paths[0]
target_path = Path(paths[1])
self._dirconf[mon_path] = target_path
else:
self._dirconf[mon_path] = None
# 转移方式
self._transferconf[mon_path] = _transfer_type
# 启用目录监控
if self._enabled:
# 检查媒体库目录是不是下载目录的子目录
try:
if target_path and target_path.is_relative_to(Path(mon_path)):
logger.warn(f"{target_path} 是下载目录 {mon_path} 的子目录,无法监控")
self.systemmessage.put(f"{target_path} 是下载目录 {mon_path} 的子目录,无法监控")
continue
except Exception as e:
logger.debug(str(e))
pass
try:
if self._mode == "compatibility":
# 兼容模式目录同步性能降低且NAS不能休眠但可以兼容挂载的远程共享目录如SMB
observer = PollingObserver(timeout=10)
else:
# 内部处理系统操作类型选择最优解
observer = Observer(timeout=10)
self._observer.append(observer)
observer.schedule(FileMonitorHandler(mon_path, self), path=mon_path, recursive=True)
observer.daemon = True
observer.start()
logger.info(f"{mon_path} 的目录监控服务启动")
except Exception as e:
err_msg = str(e)
if "inotify" in err_msg and "reached" in err_msg:
logger.warn(
f"目录监控服务启动出现异常:{err_msg}请在宿主机上不是docker容器内执行以下命令并重启"
+ """
echo fs.inotify.max_user_watches=524288 | sudo tee -a /etc/sysctl.conf
echo fs.inotify.max_user_instances=524288 | sudo tee -a /etc/sysctl.conf
sudo sysctl -p
""")
else:
logger.error(f"{mon_path} 启动目录监控失败:{err_msg}")
self.systemmessage.put(f"{mon_path} 启动目录监控失败:{err_msg}")
# 运行一次定时服务
if self._onlyonce:
logger.info("目录监控服务启动,立即运行一次")
self._scheduler.add_job(func=self.sync_all, trigger='date',
run_date=datetime.datetime.now(
tz=pytz.timezone(settings.TZ)) + datetime.timedelta(seconds=3)
)
# 关闭一次性开关
self._onlyonce = False
# 保存配置
self.__update_config()
# 启动定时服务
if self._scheduler.get_jobs():
self._scheduler.print_jobs()
self._scheduler.start()
def __update_config(self):
"""
更新配置
"""
self.update_config({
"enabled": self._enabled,
"notify": self._notify,
"onlyonce": self._onlyonce,
"mode": self._mode,
"transfer_type": self._transfer_type,
"monitor_dirs": self._monitor_dirs,
"exclude_keywords": self._exclude_keywords
})
@eventmanager.register(EventType.DirectorySync)
def remote_sync(self, event: Event):
"""
远程全量同步
"""
if event:
self.post_message(channel=event.event_data.get("channel"),
title="开始同步监控目录 ...",
userid=event.event_data.get("user"))
self.sync_all()
if event:
self.post_message(channel=event.event_data.get("channel"),
title="监控目录同步完成!", userid=event.event_data.get("user"))
def sync_all(self):
"""
立即运行一次,全量同步目录中所有文件
"""
logger.info("开始全量同步监控目录 ...")
# 遍历所有监控目录
for mon_path in self._dirconf.keys():
# 遍历目录下所有文件
for file_path in SystemUtils.list_files(Path(mon_path), settings.RMT_MEDIAEXT):
self.__handle_file(event_path=str(file_path), mon_path=mon_path)
logger.info("全量同步监控目录完成!")
def event_handler(self, event, mon_path: str, text: str, event_path: str):
"""
处理文件变化
:param event: 事件
:param mon_path: 监控目录
:param text: 事件描述
:param event_path: 事件文件路径
"""
if not event.is_directory:
# 文件发生变化
logger.debug("文件%s%s" % (text, event_path))
self.__handle_file(event_path=event_path, mon_path=mon_path)
def __handle_file(self, event_path: str, mon_path: str):
"""
同步一个文件
:param event_path: 事件文件路径
:param mon_path: 监控目录
"""
file_path = Path(event_path)
try:
if not file_path.exists():
return
# 全程加锁
with lock:
transfer_history = self.transferhis.get_by_src(event_path)
if transfer_history:
logger.debug("文件已处理过:%s" % event_path)
return
# 回收站及隐藏的文件不处理
if event_path.find('/@Recycle/') != -1 \
or event_path.find('/#recycle/') != -1 \
or event_path.find('/.') != -1 \
or event_path.find('/@eaDir') != -1:
logger.debug(f"{event_path} 是回收站或隐藏的文件")
return
# 命中过滤关键字不处理
if self._exclude_keywords:
for keyword in self._exclude_keywords.split("\n"):
if keyword and re.findall(keyword, event_path):
logger.info(f"{event_path} 命中过滤关键字 {keyword},不处理")
return
# 整理屏蔽词不处理
transfer_exclude_words = self.systemconfig.get(SystemConfigKey.TransferExcludeWords)
if transfer_exclude_words:
for keyword in transfer_exclude_words:
if not keyword:
continue
if keyword and re.search(r"%s" % keyword, event_path, re.IGNORECASE):
logger.info(f"{event_path} 命中整理屏蔽词 {keyword},不处理")
return
# 不是媒体文件不处理
if file_path.suffix not in settings.RMT_MEDIAEXT:
logger.debug(f"{event_path} 不是媒体文件")
return
# 判断是不是蓝光目录
if re.search(r"BDMV[/\\]STREAM", event_path, re.IGNORECASE):
# 截取BDMV前面的路径
event_path = event_path[:event_path.find("BDMV")]
file_path = Path(event_path)
# 查询历史记录,已转移的不处理
if self.transferhis.get_by_src(event_path):
logger.info(f"{event_path} 已整理过")
return
# 元数据
file_meta = MetaInfoPath(file_path)
if not file_meta.name:
logger.error(f"{file_path.name} 无法识别有效信息")
return
# 查询转移目的目录
target: Path = self._dirconf.get(mon_path)
# 查询转移方式
transfer_type = self._transferconf.get(mon_path)
# 根据父路径获取下载历史
download_history = self.downloadhis.get_by_path(Path(event_path).parent)
# 识别媒体信息
mediainfo: MediaInfo = self.chain.recognize_media(meta=file_meta,
tmdbid=download_history.tmdbid if download_history else None)
if not mediainfo:
logger.warn(f'未识别到媒体信息,标题:{file_meta.name}')
# 新增转移成功历史记录
his = self.transferhis.add_fail(
src_path=file_path,
mode=transfer_type,
meta=file_meta
)
if self._notify:
self.chain.post_message(Notification(
mtype=NotificationType.Manual,
title=f"{file_path.name} 未识别到媒体信息,无法入库!\n"
f"回复:```\n/redo {his.id} [tmdbid]|[类型]\n``` 手动识别转移。"
))
return
# 如果未开启新增已入库媒体是否跟随TMDB信息变化则根据tmdbid查询之前的title
if not settings.SCRAP_FOLLOW_TMDB:
transfer_history = self.transferhis.get_by_type_tmdbid(tmdbid=mediainfo.tmdb_id,
mtype=mediainfo.type.value)
if transfer_history:
mediainfo.title = transfer_history.title
logger.info(f"{file_path.name} 识别为:{mediainfo.type.value} {mediainfo.title_year}")
# 更新媒体图片
self.chain.obtain_images(mediainfo=mediainfo)
# 获取集数据
if mediainfo.type == MediaType.TV:
episodes_info = self.tmdbchain.tmdb_episodes(tmdbid=mediainfo.tmdb_id,
season=file_meta.begin_season or 1)
else:
episodes_info = None
# 获取downloadhash
download_hash = self.get_download_hash(src=str(file_path))
# 转移
transferinfo: TransferInfo = self.chain.transfer(mediainfo=mediainfo,
path=file_path,
transfer_type=transfer_type,
target=target,
meta=file_meta,
episodes_info=episodes_info)
if not transferinfo:
logger.error("文件转移模块运行失败")
return
if not transferinfo.success:
# 转移失败
logger.warn(f"{file_path.name} 入库失败:{transferinfo.message}")
# 新增转移失败历史记录
self.transferhis.add_fail(
src_path=file_path,
mode=transfer_type,
download_hash=download_hash,
meta=file_meta,
mediainfo=mediainfo,
transferinfo=transferinfo
)
if self._notify:
self.chain.post_message(Notification(
mtype=NotificationType.Manual,
title=f"{mediainfo.title_year}{file_meta.season_episode} 入库失败!",
text=f"原因:{transferinfo.message or '未知'}",
image=mediainfo.get_message_image()
))
return
# 新增转移成功历史记录
self.transferhis.add_success(
src_path=file_path,
mode=transfer_type,
download_hash=download_hash,
meta=file_meta,
mediainfo=mediainfo,
transferinfo=transferinfo
)
# 刮削单个文件
if settings.SCRAP_METADATA:
self.chain.scrape_metadata(path=transferinfo.target_path,
mediainfo=mediainfo,
transfer_type=transfer_type)
"""
{
"title_year season": {
"files": [
{
"path":,
"mediainfo":,
"file_meta":,
"transferinfo":
}
],
"time": "2023-08-24 23:23:23.332"
}
}
"""
# 发送消息汇总
media_list = self._medias.get(mediainfo.title_year + " " + file_meta.season) or {}
if media_list:
media_files = media_list.get("files") or []
if media_files:
file_exists = False
for file in media_files:
if str(event_path) == file.get("path"):
file_exists = True
break
if not file_exists:
media_files.append({
"path": event_path,
"mediainfo": mediainfo,
"file_meta": file_meta,
"transferinfo": transferinfo
})
else:
media_files = [
{
"path": event_path,
"mediainfo": mediainfo,
"file_meta": file_meta,
"transferinfo": transferinfo
}
]
media_list = {
"files": media_files,
"time": datetime.datetime.now()
}
else:
media_list = {
"files": [
{
"path": event_path,
"mediainfo": mediainfo,
"file_meta": file_meta,
"transferinfo": transferinfo
}
],
"time": datetime.datetime.now()
}
self._medias[mediainfo.title_year + " " + file_meta.season] = media_list
# 汇总刷新媒体库
if settings.REFRESH_MEDIASERVER:
self.chain.refresh_mediaserver(mediainfo=mediainfo, file_path=transferinfo.target_path)
# 广播事件
self.eventmanager.send_event(EventType.TransferComplete, {
'meta': file_meta,
'mediainfo': mediainfo,
'transferinfo': transferinfo
})
# 移动模式删除空目录
if transfer_type == "move":
for file_dir in file_path.parents:
if len(str(file_dir)) <= len(str(Path(mon_path))):
# 重要,删除到监控目录为止
break
files = SystemUtils.list_files(file_dir, settings.RMT_MEDIAEXT)
if not files:
logger.warn(f"移动模式,删除空目录:{file_dir}")
shutil.rmtree(file_dir, ignore_errors=True)
except Exception as e:
logger.error("目录监控发生错误:%s - %s" % (str(e), traceback.format_exc()))
def send_msg(self):
"""
定时检查是否有媒体处理完,发送统一消息
"""
if not self._medias or not self._medias.keys():
return
# 遍历检查是否已刮削完,发送消息
for medis_title_year_season in list(self._medias.keys()):
media_list = self._medias.get(medis_title_year_season)
logger.info(f"开始处理媒体 {medis_title_year_season} 消息")
if not media_list:
continue
# 获取最后更新时间
last_update_time = media_list.get("time")
media_files = media_list.get("files")
if not last_update_time or not media_files:
continue
transferinfo = media_files[0].get("transferinfo")
file_meta = media_files[0].get("file_meta")
mediainfo = media_files[0].get("mediainfo")
# 判断最后更新时间距现在是已超过10秒超过则发送消息
if (datetime.datetime.now() - last_update_time).total_seconds() > 10:
# 发送通知
if self._notify:
# 汇总处理文件总大小
total_size = 0
file_count = 0
# 剧集汇总
episodes = []
for file in media_files:
transferinfo = file.get("transferinfo")
total_size += transferinfo.total_size
file_count += 1
file_meta = file.get("file_meta")
if file_meta and file_meta.begin_episode:
episodes.append(file_meta.begin_episode)
transferinfo.total_size = total_size
# 汇总处理文件数量
transferinfo.file_count = file_count
# 剧集季集信息 S01 E01-E04 || S01 E01、E02、E04
season_episode = None
# 处理文件多,说明是剧集,显示季入库消息
if mediainfo.type == MediaType.TV:
# 季集文本
season_episode = f"{file_meta.season} {StringUtils.format_ep(episodes)}"
# 发送消息
self.transferchian.send_transfer_message(meta=file_meta,
mediainfo=mediainfo,
transferinfo=transferinfo,
season_episode=season_episode)
# 发送完消息移出key
del self._medias[medis_title_year_season]
continue
def get_download_hash(self, src: str):
"""
从表中获取download_hash避免连接下载器
"""
download_file = self.downloadhis.get_file_by_fullpath(src)
if download_file:
return download_file.download_hash
return None
def get_state(self) -> bool:
return self._enabled
@staticmethod
def get_command() -> List[Dict[str, Any]]:
"""
定义远程控制命令
:return: 命令关键字、事件、描述、附带数据
"""
return [{
"cmd": "/directory_sync",
"event": EventType.DirectorySync,
"desc": "目录监控同步",
"category": "管理",
"data": {}
}]
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'notify',
'label': '发送通知',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'onlyonce',
'label': '立即运行一次',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSelect',
'props': {
'model': 'mode',
'label': '监控模式',
'items': [
{'title': '兼容模式', 'value': 'compatibility'},
{'title': '性能模式', 'value': 'fast'}
]
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSelect',
'props': {
'model': 'transfer_type',
'label': '转移方式',
'items': [
{'title': '移动', 'value': 'move'},
{'title': '复制', 'value': 'copy'},
{'title': '硬链接', 'value': 'link'},
{'title': '软链接', 'value': 'softlink'},
{'title': 'Rclone复制', 'value': 'rclone_copy'},
{'title': 'Rclone移动', 'value': 'rclone_move'}
]
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VTextarea',
'props': {
'model': 'monitor_dirs',
'label': '监控目录',
'rows': 5,
'placeholder': '每一行一个目录,支持以下几种配置方式,转移方式支持 move、copy、link、softlink、rclone_copy、rclone_move\n'
'监控目录\n'
'监控目录#转移方式\n'
'监控目录:转移目的目录\n'
'监控目录:转移目的目录#转移方式'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VTextarea',
'props': {
'model': 'exclude_keywords',
'label': '排除关键词',
'rows': 2,
'placeholder': '每一行一个关键词'
}
}
]
}
]
}
]
}
], {
"enabled": False,
"notify": False,
"onlyonce": False,
"mode": "fast",
"transfer_type": settings.TRANSFER_TYPE,
"monitor_dirs": "",
"exclude_keywords": ""
}
def get_page(self) -> List[dict]:
pass
def stop_service(self):
"""
退出插件
"""
if self._observer:
for observer in self._observer:
try:
observer.stop()
observer.join()
except Exception as e:
print(str(e))
self._observer = []
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._event.set()
self._scheduler.shutdown()
self._event.clear()
self._scheduler = None

View File

@@ -1,571 +0,0 @@
import datetime
import re
import xml.dom.minidom
from threading import Event
from typing import Tuple, List, Dict, Any
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from app.chain.douban import DoubanChain
from app.chain.download import DownloadChain
from app.chain.subscribe import SubscribeChain
from app.core.config import settings
from app.core.context import MediaInfo
from app.core.metainfo import MetaInfo
from app.log import logger
from app.plugins import _PluginBase
from app.utils.dom import DomUtils
from app.utils.http import RequestUtils
class DoubanRank(_PluginBase):
# 插件名称
plugin_name = "豆瓣榜单订阅"
# 插件描述
plugin_desc = "监控豆瓣热门榜单,自动添加订阅。"
# 插件图标
plugin_icon = "movie.jpg"
# 主题色
plugin_color = "#01B3E3"
# 插件版本
plugin_version = "1.0"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
plugin_config_prefix = "doubanrank_"
# 加载顺序
plugin_order = 6
# 可使用的用户级别
auth_level = 2
# 退出事件
_event = Event()
# 私有属性
downloadchain: DownloadChain = None
subscribechain: SubscribeChain = None
doubanchain: DoubanChain = None
_scheduler = None
_douban_address = {
'movie-ustop': 'https://rsshub.app/douban/movie/ustop',
'movie-weekly': 'https://rsshub.app/douban/movie/weekly',
'movie-real-time': 'https://rsshub.app/douban/movie/weekly/subject_real_time_hotest',
'show-domestic': 'https://rsshub.app/douban/movie/weekly/show_domestic',
'movie-hot-gaia': 'https://rsshub.app/douban/movie/weekly/movie_hot_gaia',
'tv-hot': 'https://rsshub.app/douban/movie/weekly/tv_hot',
'movie-top250': 'https://rsshub.app/douban/movie/weekly/movie_top250',
}
_enabled = False
_cron = ""
_onlyonce = False
_rss_addrs = []
_ranks = []
_vote = 0
_clear = False
_clearflag = False
def init_plugin(self, config: dict = None):
self.downloadchain = DownloadChain()
self.subscribechain = SubscribeChain()
self.doubanchain = DoubanChain()
if config:
self._enabled = config.get("enabled")
self._cron = config.get("cron")
self._onlyonce = config.get("onlyonce")
self._vote = float(config.get("vote")) if config.get("vote") else 0
rss_addrs = config.get("rss_addrs")
if rss_addrs:
if isinstance(rss_addrs, str):
self._rss_addrs = rss_addrs.split('\n')
else:
self._rss_addrs = rss_addrs
else:
self._rss_addrs = []
self._ranks = config.get("ranks") or []
self._clear = config.get("clear")
# 停止现有任务
self.stop_service()
# 启动服务
if self._enabled or self._onlyonce:
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
if self._cron:
logger.info(f"豆瓣榜单订阅服务启动,周期:{self._cron}")
try:
self._scheduler.add_job(func=self.__refresh_rss,
trigger=CronTrigger.from_crontab(self._cron),
name="豆瓣榜单订阅")
except Exception as e:
logger.error(f"豆瓣榜单订阅服务启动失败,错误信息:{str(e)}")
self.systemmessage.put(f"豆瓣榜单订阅服务启动失败,错误信息:{str(e)}")
else:
self._scheduler.add_job(func=self.__refresh_rss, trigger='date',
run_date=datetime.datetime.now(
tz=pytz.timezone(settings.TZ)) + datetime.timedelta(seconds=3)
)
logger.info("豆瓣榜单订阅服务启动,周期:每天 08:00")
if self._onlyonce:
logger.info("豆瓣榜单订阅服务启动,立即运行一次")
self._scheduler.add_job(func=self.__refresh_rss, trigger='date',
run_date=datetime.datetime.now(
tz=pytz.timezone(settings.TZ)) + datetime.timedelta(seconds=3)
)
if self._onlyonce or self._clear:
# 关闭一次性开关
self._onlyonce = False
# 记录缓存清理标志
self._clearflag = self._clear
# 关闭清理缓存
self._clear = False
# 保存配置
self.__update_config()
if self._scheduler.get_jobs():
# 启动服务
self._scheduler.print_jobs()
self._scheduler.start()
def get_state(self) -> bool:
return self._enabled
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'onlyonce',
'label': '立即运行一次',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'cron',
'label': '执行周期',
'placeholder': '5位cron表达式留空自动'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'vote',
'label': '评分',
'placeholder': '评分大于等于该值才订阅'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'content': [
{
'component': 'VSelect',
'props': {
'chips': True,
'multiple': True,
'model': 'ranks',
'label': '热门榜单',
'items': [
{'title': '电影北美票房榜', 'value': 'movie-ustop'},
{'title': '一周口碑电影榜', 'value': 'movie-weekly'},
{'title': '实时热门电影', 'value': 'movie-real-time'},
{'title': '热门综艺', 'value': 'show-domestic'},
{'title': '热门电影', 'value': 'movie-hot-gaia'},
{'title': '热门电视剧', 'value': 'tv-hot'},
{'title': '电影TOP10', 'value': 'movie-top250'},
]
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'content': [
{
'component': 'VTextarea',
'props': {
'model': 'rss_addrs',
'label': '自定义榜单地址',
'placeholder': '每行一个地址https://rsshub.app/douban/movie/ustop'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'clear',
'label': '清理历史记录',
}
}
]
}
]
}
]
}
], {
"enabled": False,
"cron": "",
"onlyonce": False,
"vote": "",
"ranks": [],
"rss_addrs": "",
"clear": False
}
def get_page(self) -> List[dict]:
"""
拼装插件详情页面,需要返回页面配置,同时附带数据
"""
# 查询历史记录
historys = self.get_data('history')
if not historys:
return [
{
'component': 'div',
'text': '暂无数据',
'props': {
'class': 'text-center',
}
}
]
# 数据按时间降序排序
historys = sorted(historys, key=lambda x: x.get('time'), reverse=True)
# 拼装页面
contents = []
for history in historys:
title = history.get("title")
poster = history.get("poster")
mtype = history.get("type")
time_str = history.get("time")
doubanid = history.get("doubanid")
contents.append(
{
'component': 'VCard',
'content': [
{
'component': 'div',
'props': {
'class': 'd-flex justify-space-start flex-nowrap flex-row',
},
'content': [
{
'component': 'div',
'content': [
{
'component': 'VImg',
'props': {
'src': poster,
'height': 120,
'width': 80,
'aspect-ratio': '2/3',
'class': 'object-cover shadow ring-gray-500',
'cover': True
}
}
]
},
{
'component': 'div',
'content': [
{
'component': 'VCardSubtitle',
'props': {
'class': 'pa-2 font-bold break-words whitespace-break-spaces'
},
'content': [
{
'component': 'a',
'props': {
'href': f"https://movie.douban.com/subject/{doubanid}",
'target': '_blank'
},
'text': title
}
]
},
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'类型:{mtype}'
},
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'时间:{time_str}'
}
]
}
]
}
]
}
)
return [
{
'component': 'div',
'props': {
'class': 'grid gap-3 grid-info-card',
},
'content': contents
}
]
def stop_service(self):
"""
停止服务
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._event.set()
self._scheduler.shutdown()
self._event.clear()
self._scheduler = None
except Exception as e:
print(str(e))
def __update_config(self):
"""
列新配置
"""
self.update_config({
"enabled": self._enabled,
"cron": self._cron,
"onlyonce": self._onlyonce,
"vote": self._vote,
"ranks": self._ranks,
"rss_addrs": self._rss_addrs,
"clear": self._clear
})
def __refresh_rss(self):
"""
刷新RSS
"""
logger.info(f"开始刷新豆瓣榜单 ...")
addr_list = self._rss_addrs + [self._douban_address.get(rank) for rank in self._ranks]
if not addr_list:
logger.info(f"未设置榜单RSS地址")
return
else:
logger.info(f"{len(addr_list)} 个榜单RSS地址需要刷新")
# 读取历史记录
if self._clearflag:
history = []
else:
history: List[dict] = self.get_data('history') or []
for addr in addr_list:
if not addr:
continue
try:
logger.info(f"获取RSS{addr} ...")
rss_infos = self.__get_rss_info(addr)
if not rss_infos:
logger.error(f"RSS地址{addr} ,未查询到数据")
continue
else:
logger.info(f"RSS地址{addr} ,共 {len(rss_infos)} 条数据")
for rss_info in rss_infos:
if self._event.is_set():
logger.info(f"订阅服务停止")
return
title = rss_info.get('title')
douban_id = rss_info.get('doubanid')
unique_flag = f"doubanrank: {title} (DB:{douban_id})"
# 检查是否已处理过
if unique_flag in [h.get("unique") for h in history]:
continue
# 元数据
meta = MetaInfo(title)
# 识别媒体信息
if douban_id:
# 识别豆瓣信息
context = self.doubanchain.recognize_by_doubanid(douban_id)
mediainfo = context.media_info
if not mediainfo or not mediainfo.tmdb_id:
logger.warn(f'未识别到媒体信息,标题:{title}豆瓣ID{douban_id}')
continue
else:
# 匹配媒体信息
mediainfo: MediaInfo = self.chain.recognize_media(meta=meta)
if not mediainfo:
logger.warn(f'未识别到媒体信息,标题:{title}豆瓣ID{douban_id}')
continue
# 查询缺失的媒体信息
exist_flag, _ = self.downloadchain.get_no_exists_info(meta=meta, mediainfo=mediainfo)
if exist_flag:
logger.info(f'{mediainfo.title_year} 媒体库中已存在')
continue
# 添加订阅
self.subscribechain.add(title=mediainfo.title,
year=mediainfo.year,
mtype=mediainfo.type,
tmdbid=mediainfo.tmdb_id,
season=meta.begin_season,
exist_ok=True,
username="豆瓣榜单")
# 存储历史记录
history.append({
"title": title,
"type": mediainfo.type.value,
"year": mediainfo.year,
"poster": mediainfo.get_poster_image(),
"overview": mediainfo.overview,
"tmdbid": mediainfo.tmdb_id,
"doubanid": douban_id,
"time": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"unique": unique_flag
})
except Exception as e:
logger.error(str(e))
# 保存历史记录
self.save_data('history', history)
# 缓存只清理一次
self._clearflag = False
logger.info(f"所有榜单RSS刷新完成")
@staticmethod
def __get_rss_info(addr) -> List[dict]:
"""
获取RSS
"""
try:
ret = RequestUtils().get_res(addr)
if not ret:
return []
ret.encoding = ret.apparent_encoding
ret_xml = ret.text
ret_array = []
# 解析XML
dom_tree = xml.dom.minidom.parseString(ret_xml)
rootNode = dom_tree.documentElement
items = rootNode.getElementsByTagName("item")
for item in items:
try:
# 标题
title = DomUtils.tag_value(item, "title", default="")
# 链接
link = DomUtils.tag_value(item, "link", default="")
if not title and not link:
logger.warn(f"条目标题和链接均为空,无法处理")
continue
doubanid = re.findall(r"/(\d+)/", link)
if doubanid:
doubanid = doubanid[0]
if doubanid and not str(doubanid).isdigit():
logger.warn(f"解析的豆瓣ID格式不正确{doubanid}")
continue
# 返回对象
ret_array.append({
'title': title,
'link': link,
'doubanid': doubanid
})
except Exception as e1:
logger.error("解析RSS条目失败" + str(e1))
continue
return ret_array
except Exception as e:
logger.error("获取RSS失败" + str(e))
return []

View File

@@ -1,560 +0,0 @@
import datetime
from pathlib import Path
from threading import Lock
from typing import Optional, Any, List, Dict, Tuple
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from app.chain.douban import DoubanChain
from app.chain.download import DownloadChain
from app.chain.search import SearchChain
from app.chain.subscribe import SubscribeChain
from app.core.config import settings
from app.core.event import Event
from app.core.event import eventmanager
from app.core.metainfo import MetaInfo
from app.helper.rss import RssHelper
from app.log import logger
from app.plugins import _PluginBase
from app.schemas.types import EventType
lock = Lock()
class DoubanSync(_PluginBase):
# 插件名称
plugin_name = "豆瓣想看"
# 插件描述
plugin_desc = "同步豆瓣想看数据,自动添加订阅。"
# 插件图标
plugin_icon = "douban.png"
# 主题色
plugin_color = "#05B711"
# 插件版本
plugin_version = "1.0"
# 插件作者
plugin_author = "jxxghp"
# 作者主页
author_url = "https://github.com/jxxghp"
# 插件配置项ID前缀
plugin_config_prefix = "doubansync_"
# 加载顺序
plugin_order = 3
# 可使用的用户级别
auth_level = 2
# 私有变量
_interests_url: str = "https://www.douban.com/feed/people/%s/interests"
_scheduler: Optional[BackgroundScheduler] = None
_cache_path: Optional[Path] = None
rsshelper = None
downloadchain = None
searchchain = None
subscribechain = None
doubanchain = None
# 配置属性
_enabled: bool = False
_onlyonce: bool = False
_cron: str = ""
_notify: bool = False
_days: int = 7
_users: str = ""
_clear: bool = False
_clearflag: bool = False
def init_plugin(self, config: dict = None):
self.rsshelper = RssHelper()
self.downloadchain = DownloadChain()
self.searchchain = SearchChain()
self.subscribechain = SubscribeChain()
self.doubanchain = DoubanChain()
# 停止现有任务
self.stop_service()
# 配置
if config:
self._enabled = config.get("enabled")
self._cron = config.get("cron")
self._notify = config.get("notify")
self._days = config.get("days")
self._users = config.get("users")
self._onlyonce = config.get("onlyonce")
self._clear = config.get("clear")
if self._enabled or self._onlyonce:
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
if self._cron:
try:
self._scheduler.add_job(func=self.sync,
trigger=CronTrigger.from_crontab(self._cron),
name="豆瓣想看")
except Exception as err:
logger.error(f"定时任务配置错误:{str(err)}")
# 推送实时消息
self.systemmessage.put(f"执行周期配置错误:{str(err)}")
else:
self._scheduler.add_job(self.sync, "interval", minutes=30, name="豆瓣想看")
if self._onlyonce:
logger.info(f"豆瓣想看服务启动,立即运行一次")
self._scheduler.add_job(func=self.sync, trigger='date',
run_date=datetime.datetime.now(
tz=pytz.timezone(settings.TZ)) + datetime.timedelta(seconds=3)
)
if self._onlyonce or self._clear:
# 关闭一次性开关
self._onlyonce = False
# 记录缓存清理标志
self._clearflag = self._clear
# 关闭清理缓存
self._clear = False
# 保存配置
self.__update_config()
# 启动任务
if self._scheduler.get_jobs():
self._scheduler.print_jobs()
self._scheduler.start()
def get_state(self) -> bool:
return self._enabled
@staticmethod
def get_command() -> List[Dict[str, Any]]:
"""
定义远程控制命令
:return: 命令关键字、事件、描述、附带数据
"""
return [{
"cmd": "/douban_sync",
"event": EventType.DoubanSync,
"desc": "同步豆瓣想看",
"category": "订阅",
"data": {}
}]
def get_api(self) -> List[Dict[str, Any]]:
"""
获取插件API
[{
"path": "/xx",
"endpoint": self.xxx,
"methods": ["GET", "POST"],
"summary": "API说明"
}]
"""
pass
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'notify',
'label': '发送通知',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'onlyonce',
'label': '立即运行一次',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'cron',
'label': '执行周期',
'placeholder': '5位cron表达式留空自动'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'days',
'label': '同步天数'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'content': [
{
'component': 'VTextField',
'props': {
'model': 'users',
'label': '用户列表',
'placeholder': '豆瓣用户ID多个用英文逗号分隔'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'clear',
'label': '清理历史记录',
}
}
]
}
]
}
]
}
], {
"enabled": False,
"notify": True,
"onlyonce": False,
"cron": "*/30 * * * *",
"days": 7,
"users": "",
"clear": False
}
def get_page(self) -> List[dict]:
"""
拼装插件详情页面,需要返回页面配置,同时附带数据
"""
# 查询同步详情
historys = self.get_data('history')
if not historys:
return [
{
'component': 'div',
'text': '暂无数据',
'props': {
'class': 'text-center',
}
}
]
# 数据按时间降序排序
historys = sorted(historys, key=lambda x: x.get('time'), reverse=True)
# 拼装页面
contents = []
for history in historys:
title = history.get("title")
poster = history.get("poster")
mtype = history.get("type")
time_str = history.get("time")
doubanid = history.get("doubanid")
contents.append(
{
'component': 'VCard',
'content': [
{
'component': 'div',
'props': {
'class': 'd-flex justify-space-start flex-nowrap flex-row',
},
'content': [
{
'component': 'div',
'content': [
{
'component': 'VImg',
'props': {
'src': poster,
'height': 120,
'width': 80,
'aspect-ratio': '2/3',
'class': 'object-cover shadow ring-gray-500',
'cover': True
}
}
]
},
{
'component': 'div',
'content': [
{
'component': 'VCardSubtitle',
'props': {
'class': 'pa-2 font-bold break-words whitespace-break-spaces'
},
'content': [
{
'component': 'a',
'props': {
'href': f"https://movie.douban.com/subject/{doubanid}",
'target': '_blank'
},
'text': title
}
]
},
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'类型:{mtype}'
},
{
'component': 'VCardText',
'props': {
'class': 'pa-0 px-2'
},
'text': f'时间:{time_str}'
}
]
}
]
}
]
}
)
return [
{
'component': 'div',
'props': {
'class': 'grid gap-3 grid-info-card',
},
'content': contents
}
]
def __update_config(self):
"""
更新配置
"""
self.update_config({
"enabled": self._enabled,
"notify": self._notify,
"onlyonce": self._onlyonce,
"cron": self._cron,
"days": self._days,
"users": self._users,
"clear": self._clear
})
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._scheduler.shutdown()
self._scheduler = None
except Exception as e:
logger.error("退出插件失败:%s" % str(e))
def sync(self):
"""
通过用户RSS同步豆瓣想看数据
"""
if not self._users:
return
# 读取历史记录
if self._clearflag:
history = []
else:
history: List[dict] = self.get_data('history') or []
for user_id in self._users.split(","):
# 同步每个用户的豆瓣数据
if not user_id:
continue
logger.info(f"开始同步用户 {user_id} 的豆瓣想看数据 ...")
url = self._interests_url % user_id
results = self.rsshelper.parse(url)
if not results:
logger.warn(f"未获取到用户 {user_id} 豆瓣RSS数据{url}")
continue
else:
logger.info(f"获取到用户 {user_id} 豆瓣RSS数据{len(results)}")
# 解析数据
for result in results:
try:
dtype = result.get("title", "")[:2]
title = result.get("title", "")[2:]
if dtype not in ["想看", "在看"]:
logger.info(f'标题:{title},非想看/在看数据,跳过')
continue
if not result.get("link"):
logger.warn(f'标题:{title},未获取到链接,跳过')
continue
# 判断是否在天数范围
pubdate: Optional[datetime.datetime] = result.get("pubdate")
if pubdate:
if (datetime.datetime.now(datetime.timezone.utc) - pubdate).days > float(self._days):
logger.info(f'已超过同步天数,标题:{title},发布时间:{pubdate}')
continue
douban_id = result.get("link", "").split("/")[-2]
# 检查是否处理过
if not douban_id or douban_id in [h.get("doubanid") for h in history]:
logger.info(f'标题:{title}豆瓣ID{douban_id} 已处理过')
continue
# 识别媒体信息
meta = MetaInfo(title=title)
context = self.doubanchain.recognize_by_doubanid(douban_id)
mediainfo = context.media_info
if not mediainfo or not mediainfo.tmdb_id:
logger.warn(f'未识别到媒体信息,标题:{title}豆瓣ID{douban_id}')
continue
# 查询缺失的媒体信息
exist_flag, no_exists = self.downloadchain.get_no_exists_info(meta=meta, mediainfo=mediainfo)
if exist_flag:
logger.info(f'{mediainfo.title_year} 媒体库中已存在')
action = "exist"
else:
logger.info(f'{mediainfo.title_year} 媒体库中不存在,开始搜索 ...')
# 搜索
contexts = self.searchchain.process(mediainfo=mediainfo,
no_exists=no_exists)
if not contexts:
logger.warn(f'{mediainfo.title_year} 未搜索到资源')
# 添加订阅
self.subscribechain.add(title=mediainfo.title,
year=mediainfo.year,
mtype=mediainfo.type,
tmdbid=mediainfo.tmdb_id,
season=meta.begin_season,
exist_ok=True,
username="豆瓣想看")
action = "subscribe"
else:
# 自动下载
downloads, lefts = self.downloadchain.batch_download(contexts=contexts, no_exists=no_exists)
if downloads and not lefts:
# 全部下载完成
logger.info(f'{mediainfo.title_year} 下载完成')
action = "download"
else:
# 未完成下载
logger.info(f'{mediainfo.title_year} 未下载未完整,添加订阅 ...')
# 添加订阅
self.subscribechain.add(title=mediainfo.title,
year=mediainfo.year,
mtype=mediainfo.type,
tmdbid=mediainfo.tmdb_id,
season=meta.begin_season,
exist_ok=True,
username="豆瓣想看")
action = "subscribe"
# 存储历史记录
history.append({
"action": action,
"title": title,
"type": mediainfo.type.value,
"year": mediainfo.year,
"poster": mediainfo.get_poster_image(),
"overview": mediainfo.overview,
"tmdbid": mediainfo.tmdb_id,
"doubanid": douban_id,
"time": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
})
except Exception as err:
logger.error(f'同步用户 {user_id} 豆瓣想看数据出错:{str(err)}')
logger.info(f"用户 {user_id} 豆瓣想看同步完成")
# 保存历史记录
self.save_data('history', history)
# 缓存只清理一次
self._clearflag = False
@eventmanager.register(EventType.DoubanSync)
def remote_sync(self, event: Event):
"""
豆瓣想看同步
"""
if event:
logger.info("收到命令,开始执行豆瓣想看同步 ...")
self.post_message(channel=event.event_data.get("channel"),
title="开始同步豆瓣想看 ...",
userid=event.event_data.get("user"))
self.sync()
if event:
self.post_message(channel=event.event_data.get("channel"),
title="同步豆瓣想看数据完成!", userid=event.event_data.get("user"))

View File

@@ -1,324 +0,0 @@
from apscheduler.schedulers.background import BackgroundScheduler
from app.chain.download import DownloadChain
from app.chain.media import MediaChain
from app.core.config import settings
from app.db.downloadhistory_oper import DownloadHistoryOper
from app.plugins import _PluginBase
from typing import Any, List, Dict, Tuple, Optional, Union
from app.log import logger
from app.schemas import NotificationType, TransferTorrent, DownloadingTorrent
from app.schemas.types import TorrentStatus, MessageChannel
from app.utils.string import StringUtils
class DownloadingMsg(_PluginBase):
# 插件名称
plugin_name = "下载进度推送"
# 插件描述
plugin_desc = "定时推送正在下载进度。"
# 插件图标
plugin_icon = "downloadmsg.png"
# 主题色
plugin_color = "#3DE75D"
# 插件版本
plugin_version = "1.0"
# 插件作者
plugin_author = "thsrite"
# 作者主页
author_url = "https://github.com/thsrite"
# 插件配置项ID前缀
plugin_config_prefix = "downloading_"
# 加载顺序
plugin_order = 22
# 可使用的用户级别
auth_level = 2
# 私有属性
_enabled = False
# 任务执行间隔
_seconds = None
_type = None
_adminuser = None
_downloadhis = None
# 定时器
_scheduler: Optional[BackgroundScheduler] = None
def init_plugin(self, config: dict = None):
# 停止现有任务
self.stop_service()
if config:
self._enabled = config.get("enabled")
self._seconds = config.get("seconds") or 300
self._type = config.get("type") or 'admin'
self._adminuser = config.get("adminuser")
# 加载模块
if self._enabled:
self._downloadhis = DownloadHistoryOper()
# 定时服务
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
if self._seconds:
try:
self._scheduler.add_job(func=self.__downloading,
trigger='interval',
seconds=int(self._seconds),
name="下载进度推送")
except Exception as err:
logger.error(f"定时任务配置错误:{str(err)}")
# 启动任务
if self._scheduler.get_jobs():
self._scheduler.print_jobs()
self._scheduler.start()
def __downloading(self):
"""
定时推送正在下载进度
"""
# 正在下载种子
torrents = DownloadChain().list_torrents(status=TorrentStatus.DOWNLOADING)
if not torrents:
logger.info("当前没有正在下载的任务!")
return
# 推送用户
if self._type == "admin" or self._type == "both":
if not self._adminuser:
logger.error("未配置管理员用户")
return
for userid in str(self._adminuser).split(","):
self.__send_msg(torrents=torrents, userid=userid)
if self._type == "user" or self._type == "both":
user_torrents = {}
# 根据正在下载种子hash获取下载历史
for torrent in torrents:
downloadhis = self._downloadhis.get_by_hash(download_hash=torrent.hash)
if not downloadhis:
logger.warn(f"种子 {torrent.hash} 未获取到MoviePilot下载历史无法推送下载进度")
continue
if not downloadhis.userid:
logger.debug(f"种子 {torrent.hash} 未获取到下载用户记录,无法推送下载进度")
continue
user_torrent = user_torrents.get(downloadhis.userid) or []
user_torrent.append(torrent)
user_torrents[downloadhis.userid] = user_torrent
if not user_torrents or not user_torrents.keys():
logger.warn("未获取到用户下载记录,无法推送下载进度")
return
# 推送用户下载任务进度
for userid in list(user_torrents.keys()):
if not userid:
continue
# 如果用户是管理员,无需重复推送
if (self._type == "admin" or self._type == "both") and self._adminuser and userid in str(
self._adminuser).split(","):
logger.debug("管理员已推送")
continue
user_torrent = user_torrents.get(userid)
if not user_torrent:
logger.warn(f"未获取到用户 {userid} 下载任务")
continue
self.__send_msg(torrents=user_torrent,
userid=userid)
if self._type == "all":
self.__send_msg(torrents=torrents)
def __send_msg(self, torrents: Optional[List[Union[TransferTorrent, DownloadingTorrent]]], userid: str = None):
"""
发送消息
"""
title = f"{len(torrents)} 个任务正在下载:"
messages = []
index = 1
channel_value = None
for torrent in torrents:
year = None
name = None
se = None
ep = None
# 先查询下载记录,没有再识别
downloadhis = self._downloadhis.get_by_hash(download_hash=torrent.hash)
if downloadhis:
name = downloadhis.title
year = downloadhis.year
se = downloadhis.seasons
ep = downloadhis.episodes
if not channel_value:
channel_value = downloadhis.channel
else:
try:
context = MediaChain().recognize_by_title(title=torrent.title)
if not context or not context.media_info:
continue
media_info = context.media_info
year = media_info.year
name = media_info.title
if media_info.number_of_seasons:
se = f"S{str(media_info.number_of_seasons).rjust(2, '0')}"
if media_info.number_of_episodes:
ep = f"E{str(media_info.number_of_episodes).rjust(2, '0')}"
except Exception as e:
print(str(e))
# 拼装标题
if year:
media_name = "%s (%s) %s%s" % (name, year, se, ep)
elif name:
media_name = "%s %s%s" % (name, se, ep)
else:
media_name = torrent.title
if not self._adminuser or userid not in str(self._adminuser).split(","):
# 下载用户发送精简消息
messages.append(f"{index}. {media_name} {round(torrent.progress, 1)}%")
else:
messages.append(f"{index}. {media_name}\n"
f"{torrent.title} "
f"{StringUtils.str_filesize(torrent.size)} "
f"{round(torrent.progress, 1)}%")
index += 1
# 用户消息渠道
if channel_value:
channel = next(
(channel for channel in MessageChannel.__members__.values() if channel.value == channel_value), None)
else:
channel = None
self.post_message(mtype=NotificationType.Download,
channel=channel,
title=title,
text="\n".join(messages),
userid=userid)
def get_state(self) -> bool:
return self._enabled
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'seconds',
'label': '执行间隔',
'placeholder': '单位(秒)'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'adminuser',
'label': '管理员用户',
'placeholder': '多个用户,分割'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSelect',
'props': {
'model': 'type',
'label': '推送类型',
'items': [
{'title': '管理员', 'value': 'admin'},
{'title': '下载用户', 'value': 'user'},
{'title': '管理员和下载用户', 'value': 'both'},
{'title': '所有用户', 'value': 'all'}
]
}
}
]
}
]
}
]
}
], {
"enabled": False,
"seconds": 300,
"adminuser": "",
"type": "admin"
}
def get_page(self) -> List[dict]:
pass
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._scheduler.shutdown()
self._scheduler = None
except Exception as e:
logger.error("退出插件失败:%s" % str(e))

View File

@@ -1,292 +0,0 @@
import json
import re
from datetime import datetime, timedelta
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from app.core.config import settings
from app.plugins import _PluginBase
from typing import Any, List, Dict, Tuple, Optional
from app.log import logger
from app.schemas import NotificationType
from app.utils.http import RequestUtils
class InvitesSignin(_PluginBase):
# 插件名称
plugin_name = "药丸签到"
# 插件描述
plugin_desc = "药丸论坛签到。"
# 插件图标
plugin_icon = "invites.png"
# 主题色
plugin_color = "#FFFFFF"
# 插件版本
plugin_version = "1.0"
# 插件作者
plugin_author = "thsrite"
# 作者主页
author_url = "https://github.com/thsrite"
# 插件配置项ID前缀
plugin_config_prefix = "invitessignin_"
# 加载顺序
plugin_order = 24
# 可使用的用户级别
auth_level = 2
# 私有属性
_enabled = False
# 任务执行间隔
_cron = None
_cookie = None
_onlyonce = False
_notify = False
# 定时器
_scheduler: Optional[BackgroundScheduler] = None
def init_plugin(self, config: dict = None):
# 停止现有任务
self.stop_service()
if config:
self._enabled = config.get("enabled")
self._cron = config.get("cron")
self._cookie = config.get("cookie")
self._notify = config.get("notify")
self._onlyonce = config.get("onlyonce")
# 加载模块
if self._enabled:
# 定时服务
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
if self._cron:
try:
self._scheduler.add_job(func=self.__signin,
trigger=CronTrigger.from_crontab(self._cron),
name="药丸签到")
except Exception as err:
logger.error(f"定时任务配置错误:{str(err)}")
if self._onlyonce:
logger.info(f"药丸签到服务启动,立即运行一次")
self._scheduler.add_job(func=self.__signin, trigger='date',
run_date=datetime.now(tz=pytz.timezone(settings.TZ)) + timedelta(seconds=3),
name="药丸签到")
# 关闭一次性开关
self._onlyonce = False
self.update_config({
"onlyonce": False,
"cron": self._cron,
"enabled": self._enabled,
"cookie": self._cookie,
"notify": self._notify,
})
# 启动任务
if self._scheduler.get_jobs():
self._scheduler.print_jobs()
self._scheduler.start()
def __signin(self):
"""
药丸签到
"""
res = RequestUtils(cookies=self._cookie).get_res(url="https://invites.fun")
if not res or res.status_code != 200:
logger.error("请求药丸错误")
return
# 获取csrfToken
pattern = r'"csrfToken":"(.*?)"'
csrfToken = re.findall(pattern, res.text)
if not csrfToken:
logger.error("请求csrfToken失败")
return
csrfToken = csrfToken[0]
logger.info(f"获取csrfToken成功 {csrfToken}")
# 获取userid
pattern = r'"userId":(\d+)'
match = re.search(pattern, res.text)
if match:
userId = match.group(1)
logger.info(f"获取userid成功 {userId}")
else:
logger.error("未找到userId")
return
headers = {
"X-Csrf-Token": csrfToken,
"X-Http-Method-Override": "PATCH",
"Cookie": self._cookie
}
data = {
"data": {
"type": "users",
"attributes": {
"canCheckin": False,
"totalContinuousCheckIn": 2
},
"id": userId
}
}
# 开始签到
res = RequestUtils(headers=headers).post_res(url=f"https://invites.fun/api/users/{userId}", json=data)
if not res or res.status_code != 200:
logger.error("药丸签到失败")
return
sign_dict = json.loads(res.text)
money = sign_dict['data']['attributes']['money']
totalContinuousCheckIn = sign_dict['data']['attributes']['totalContinuousCheckIn']
# 发送通知
if self._notify:
self.post_message(
mtype=NotificationType.SiteMessage,
title="【药丸签到任务完成】",
text=f"累计签到 {totalContinuousCheckIn} \n"
f"剩余药丸 {money}")
def get_state(self) -> bool:
return self._enabled
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'notify',
'label': '开启通知',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'onlyonce',
'label': '立即运行一次',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'cron',
'label': '签到周期'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'cookie',
'label': '药丸cookie'
}
}
]
}
]
}
]
}
], {
"enabled": False,
"onlyonce": False,
"notify": False,
"cookie": "",
"cron": "0 9 * * *"
}
def get_page(self) -> List[dict]:
pass
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._scheduler.shutdown()
self._scheduler = None
except Exception as e:
logger.error("退出插件失败:%s" % str(e))

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More