Compare commits

...

848 Commits
v1.0.3 ... v

Author SHA1 Message Date
jxxghp
c37e02009f fix build 2023-10-09 19:39:19 +08:00
jxxghp
a96b8a4e07 fix build 2023-10-09 19:37:49 +08:00
jxxghp
79b4d5fb8e fix build 2023-10-09 19:33:05 +08:00
jxxghp
de128f5e6a fix 2023-10-09 15:04:54 +08:00
jxxghp
ef8ddcde07 fix 2023-10-09 14:46:23 +08:00
jxxghp
eaff557d70 windows package 2023-10-09 14:11:03 +08:00
jxxghp
38f7a31200 windows package 2023-10-09 13:40:09 +08:00
jxxghp
97f16289c9 windows package 2023-10-09 12:57:52 +08:00
jxxghp
e15f5ab93e Merge pull request #767 from thsrite/main 2023-10-09 11:50:18 +08:00
thsrite
15fd312765 fix #766 2023-10-09 11:41:59 +08:00
jxxghp
eea316865f fix #753 2023-10-09 11:05:53 +08:00
jxxghp
05bbfbbd54 Merge pull request #765 from thsrite/main
fix #701
2023-10-09 10:09:46 +08:00
thsrite
6039a9d0d5 fix 2023-10-09 10:06:04 +08:00
thsrite
0159b02916 fix 8bbd4dc9 2023-10-09 09:50:30 +08:00
thsrite
8bbd4dc913 fix #701 2023-10-09 09:37:16 +08:00
jxxghp
9e3ded6ad5 Merge pull request #764 from thsrite/main
fix 下载消息发送所有
2023-10-09 09:27:40 +08:00
jxxghp
fe63275a6b fix bug 2023-10-09 09:09:59 +08:00
jxxghp
81ed465607 fix #759 2023-10-09 09:05:48 +08:00
thsrite
d9aa281ce1 fix 下载消息发送所有 2023-10-09 09:02:01 +08:00
jxxghp
56648d664e fix README.md 2023-10-08 17:03:20 +08:00
jxxghp
da49d5577a fix app.env 2023-10-08 16:41:53 +08:00
jxxghp
f3dbdefdb1 fix README.md 2023-10-08 16:26:22 +08:00
jxxghp
d4302759e6 fix README.md 2023-10-08 16:25:27 +08:00
jxxghp
914f192fb2 test 2023-10-08 16:24:40 +08:00
jxxghp
522b554e36 fix README.md 2023-10-08 16:12:27 +08:00
jxxghp
4c54ab5319 fix README.md 2023-10-08 15:58:42 +08:00
jxxghp
d7f4ed069c Merge pull request #757 from lightolly/dev/20231008 2023-10-08 14:04:00 +08:00
olly
7ea0c5ee4c fix:演职员刮削优化
1.豆瓣查询增加速率限制后重试
2.全中文演职员跳过处理
2023-10-08 14:00:55 +08:00
jxxghp
e773a9d9d4 Merge pull request #755 from thsrite/customization 2023-10-08 12:22:56 +08:00
thsrite
b570542fab fix 2023-10-08 12:16:45 +08:00
thsrite
09716e98ba feat 自定义占位符 2023-10-08 11:59:52 +08:00
jxxghp
9236b361e2 Merge remote-tracking branch 'origin/main' 2023-10-08 06:56:57 +08:00
jxxghp
f281d8c068 fix #749 2023-10-08 06:56:45 +08:00
jxxghp
83ed17d5c1 Merge pull request #752 from thsrite/main
feat 药丸论坛签到
2023-10-07 20:54:25 +08:00
jxxghp
e2671dd4ed fix dockerfile 2023-10-07 05:52:43 -07:00
thsrite
4c4d640331 feat 药丸论坛签到 2023-10-07 20:51:32 +08:00
jxxghp
6c4307c918 fix #750 2023-10-07 05:29:23 -07:00
jxxghp
5a7062c699 fix 2023-10-07 05:03:19 -07:00
jxxghp
7da01f7404 fix 2023-10-07 05:03:06 -07:00
jxxghp
2b695cb8c6 fix #748 2023-10-07 04:59:07 -07:00
jxxghp
599817eec7 test 2023-10-07 04:44:06 -07:00
jxxghp
11fa33be0a test 2023-10-07 04:33:52 -07:00
jxxghp
b5ac9d4ce4 fix app.env 2023-10-07 04:08:19 -07:00
jxxghp
78f0ac0042 fix README.md 2023-10-07 04:01:21 -07:00
jxxghp
00ecd7adc5 更新 app.env 2023-10-07 18:24:02 +08:00
jxxghp
c39cb3bffc 更新 app.env 2023-10-07 18:22:32 +08:00
jxxghp
2fa902bfff Merge pull request #747 from thsrite/main 2023-10-07 18:09:25 +08:00
thsrite
f8bcd351ae fix 依赖 2023-10-07 18:08:33 +08:00
jxxghp
6013d99bf6 v1.2.9 2023-10-07 17:21:08 +08:00
jxxghp
e7c3977f7b fix README.md 2023-10-07 12:26:16 +08:00
jxxghp
47e1218fe0 fix #732 2023-10-07 10:31:33 +08:00
jxxghp
a71a95892f fix 2023-10-05 23:23:33 -07:00
jxxghp
b5f53e309f fix 2023-10-05 23:12:46 -07:00
jxxghp
3164ba2d98 fix #734 2023-10-05 17:57:47 -07:00
jxxghp
89854d188d fix actor thumb 2023-10-05 17:49:31 -07:00
jxxghp
79c7475435 fix tmdb lru cache 2023-10-05 17:41:02 -07:00
jxxghp
2ee477c35e fix requests session stream 2023-10-05 17:32:23 -07:00
jxxghp
5bcd90c569 fix requests session 2023-10-05 17:21:59 -07:00
jxxghp
1a49c7c59e try fix 2023-10-05 07:44:21 +08:00
jxxghp
d995932a1c fix personmeta 2023-10-04 14:34:42 +08:00
jxxghp
1b0bbbbbfd fix webhook plugin 2023-10-04 08:01:30 +08:00
jxxghp
2aa93fa341 fix webhook plugin 2023-10-04 08:01:02 +08:00
jxxghp
a970f90c6f Merge remote-tracking branch 'origin/main' 2023-10-04 07:33:38 +08:00
jxxghp
44f612fed5 v1.2.8 2023-10-04 07:33:31 +08:00
jxxghp
564a48dd8f fix 2023-10-03 16:24:27 -07:00
jxxghp
9d029de56a fix 2023-10-03 16:23:05 -07:00
jxxghp
2dd3fc5d8c fix #722 2023-10-03 16:19:43 -07:00
jxxghp
9c335dbdfb fix #724 2023-10-03 16:17:19 -07:00
jxxghp
0e30ea92f1 fix #726 2023-10-03 16:14:04 -07:00
jxxghp
a0ced4e43c 认证站点支持xingtan.one 2023-10-03 16:05:50 -07:00
jxxghp
cfaaf65edc support xingtan 2023-10-04 07:03:13 +08:00
jxxghp
35be18bb1a fix 2023-10-01 21:55:49 +08:00
jxxghp
02296e1758 fix 2023-10-01 21:46:09 +08:00
jxxghp
0b84b05cdd fix #705 2023-10-01 21:36:33 +08:00
jxxghp
99e3d5acca fix #707 2023-10-01 21:33:58 +08:00
jxxghp
8001511484 fix #690 2023-10-01 21:23:41 +08:00
jxxghp
8420b2ea85 fix personmeta 2023-10-01 21:08:16 +08:00
jxxghp
9af883acbb fix personmeta 2023-10-01 18:27:26 +08:00
jxxghp
e21ba5ad51 fix personmeta 2023-10-01 18:11:01 +08:00
jxxghp
1293fafd34 fix 2023-10-01 16:47:47 +08:00
jxxghp
4bcc6bd733 fix bug 2023-10-01 14:18:56 +08:00
jxxghp
53a514feb6 fix personmeta支持豆瓣 2023-10-01 14:16:36 +08:00
jxxghp
e697889aad fix 2023-10-01 12:37:18 +08:00
jxxghp
8b0fba054e Merge remote-tracking branch 'origin/main' 2023-10-01 12:28:46 +08:00
jxxghp
32ff385444 fix personmeta 2023-10-01 12:28:41 +08:00
jxxghp
8456c7f4a3 Merge pull request #718 from DDS-Derek/main
功能改进增加选择类型
2023-10-01 11:55:56 +08:00
jxxghp
fcbfb63645 fix personmeta 2023-10-01 11:52:25 +08:00
DDSDerek
1fa7d15982 fix: issue 2023-10-01 10:07:51 +08:00
DDSDerek
a173978f6b feat: optimize issue 2023-10-01 10:06:11 +08:00
jxxghp
2f069afc77 fix personmeta 2023-10-01 08:15:19 +08:00
jxxghp
ea998b4e41 fix personmeta 2023-10-01 07:53:50 +08:00
jxxghp
ba27d02854 fix 2023-09-30 20:40:48 +08:00
jxxghp
f78df58906 fix 2023-09-30 20:36:51 +08:00
jxxghp
308683a7e9 fix scraper 2023-09-30 20:27:48 +08:00
jxxghp
b3f4a6f251 fix mediaserver 2023-09-30 15:27:01 +08:00
jxxghp
d1841d8f15 fix mediaserver 2023-09-30 15:16:53 +08:00
jxxghp
c8d6de3e9b Merge pull request #706 from song-zhou/main 2023-09-29 22:04:22 +08:00
Elsie Weber
938f5c8cea Merge branch 'jxxghp:main' into main 2023-09-29 21:57:50 +08:00
songzhou
d166930b0a 修复手动执行订阅搜索服务无效bug 2023-09-29 21:57:41 +08:00
jxxghp
e1ac3c0d15 fix personmeta 2023-09-29 12:01:00 +08:00
jxxghp
59da489e05 Merge pull request #704 from developer-wlj/wlj0909 2023-09-29 10:30:16 +08:00
developer-wlj
be12c736fb Merge branch 'jxxghp:main' into wlj0909 2023-09-29 10:14:36 +08:00
jxxghp
71c52aae7b Merge pull request #703 from DDS-Derek/main 2023-09-29 10:12:32 +08:00
mayun110
dbfe2af53c fix PersonMeta插件jellyfin无法显示头像问题 2023-09-29 10:11:18 +08:00
DDSRem
cca898f5b6 feat: docker build use cache 2023-09-29 09:31:47 +08:00
jxxghp
9abd780aa2 fix PersonMeta 2023-09-29 08:34:45 +08:00
jxxghp
2e89eeca2c fix #694 按站点多次检索 2023-09-29 08:20:55 +08:00
jxxghp
dbb3bead6b fix #696 2023-09-28 22:38:11 +08:00
jxxghp
d0b88ec7f6 fix #696 2023-09-28 22:36:35 +08:00
jxxghp
5898bc7eb1 - 修复v1.2.7版本中的问题 2023-09-28 22:19:13 +08:00
jxxghp
cfe113f6c3 fix bug 2023-09-28 22:16:21 +08:00
jxxghp
83500128c9 Merge pull request #698 from song-zhou/main
修复通知emby时libraryId错误bug
2023-09-28 22:09:32 +08:00
songzhou
2bff3a80da 修复通知emby时libraryId错误bug 2023-09-28 22:05:43 +08:00
jxxghp
3dd7b33f3e fix bug 2023-09-28 21:37:57 +08:00
jxxghp
8de487b0bf fix bug 2023-09-28 21:27:39 +08:00
jxxghp
ce88a6818f fix #693 2023-09-28 21:18:40 +08:00
jxxghp
6172832f41 fix 图片下载重试 2023-09-28 21:13:40 +08:00
jxxghp
a0ed228f4b fix 演员头像&中文名 2023-09-28 21:11:08 +08:00
jxxghp
01fd56a019 feat 演职人员优先使用TMDB中的中文名 2023-09-28 20:24:47 +08:00
jxxghp
087fcd340a fix #692 2023-09-28 20:06:03 +08:00
jxxghp
b3b09f3c03 Merge pull request #692 from DDS-Derek/main 2023-09-28 20:04:30 +08:00
DDSRem
11d17bf21a fix: https://github.com/jxxghp/MoviePilot/pull/654 2023-09-28 19:57:28 +08:00
jxxghp
b1ee80edee fix themoivedb timeout 2023-09-28 19:08:34 +08:00
jxxghp
107d496adb v1.2.7 2023-09-28 17:43:34 +08:00
jxxghp
9f1112b58d fix 2023-09-28 17:41:48 +08:00
jxxghp
989d6e3fe7 fix 2023-09-28 17:29:21 +08:00
jxxghp
3999c64853 add PersonMeta 2023-09-28 17:11:55 +08:00
jxxghp
760e3d6de0 更新 __init__.py 2023-09-28 16:32:56 +08:00
jxxghp
02111a3b9f fix #684 2023-09-28 16:23:10 +08:00
jxxghp
e6af2c0f34 fix 2023-09-28 16:14:52 +08:00
jxxghp
bd4c639761 Merge pull request #688 from thsrite/main
feat 定时清理媒体库插件
2023-09-28 15:46:13 +08:00
thsrite
d39b7ec021 fix 2023-09-28 15:40:13 +08:00
thsrite
63ca5f5017 fix 下载进度推送逻辑 2023-09-28 15:32:07 +08:00
thsrite
2202cf457b fix 2023-09-28 15:25:04 +08:00
thsrite
5d04b7abd6 feat 定时清理媒体库插件 2023-09-28 15:21:01 +08:00
jxxghp
0588d5d5f3 fix get_location 2023-09-28 14:49:54 +08:00
jxxghp
5a59e443d7 fix 2023-09-28 14:43:08 +08:00
jxxghp
470f4df979 fix #669 2023-09-28 14:32:34 +08:00
jxxghp
84bda71330 fix #657 2023-09-28 14:16:27 +08:00
jxxghp
ea883255cb fix #685 添加resourceType资源类型 2023-09-28 13:45:06 +08:00
jxxghp
e9abb69fb5 fix 2023-09-28 12:52:32 +08:00
jxxghp
ff63390794 Merge pull request #686 from thsrite/main 2023-09-28 12:39:12 +08:00
jxxghp
78b3135276 feat 媒体文件同步删除插件:支持手动删除源文件同步处理下载任务 2023-09-28 12:35:41 +08:00
thsrite
15bd2c09ed fix 2023-09-28 12:28:24 +08:00
thsrite
34d44857e4 fix messageforward 2023-09-28 12:11:39 +08:00
thsrite
dccded2d3e fix 下载消息增加用户 2023-09-28 12:03:18 +08:00
thsrite
295cafc060 fix 2023-09-28 11:56:13 +08:00
thsrite
c792e97f67 fix 下载进度增加识别名 2023-09-28 11:41:30 +08:00
thsrite
d30a02987d feat 正在下载进度推送插件 2023-09-28 11:10:34 +08:00
jxxghp
84d4c9cf73 feat 重命名支持episode_title集标题 2023-09-28 10:58:31 +08:00
jxxghp
21ecd1f708 fix #673 2023-09-28 08:34:34 +08:00
jxxghp
248b9a8e8c fix #663 2023-09-28 08:24:39 +08:00
jxxghp
3c7abfada6 fix #677 2023-09-28 08:14:22 +08:00
jxxghp
f363656e0a Merge remote-tracking branch 'origin/main' 2023-09-28 08:09:01 +08:00
jxxghp
e9ee9dbce1 fix #676 2023-09-28 08:08:55 +08:00
jxxghp
ab0b8653ab Merge pull request #674 from developer-wlj/wlj0909 2023-09-27 18:12:10 +08:00
developer-wlj
20711e17fb Merge branch 'jxxghp:main' into wlj0909 2023-09-27 18:06:51 +08:00
mayun110
a89bd8b816 Merge remote-tracking branch 'origin/wlj0909' into wlj0909 2023-09-27 18:05:46 +08:00
mayun110
3692cfea64 fix 无法匹配国语标签的bug 2023-09-27 15:38:35 +08:00
jxxghp
81d9d39029 fix bug 2023-09-27 14:12:11 +08:00
jxxghp
f5a61ceff1 fix bug 2023-09-27 13:40:35 +08:00
jxxghp
404a7b8337 fix bug 2023-09-27 11:14:56 +08:00
jxxghp
71ce3a2920 v1.2.6 2023-09-27 10:19:37 +08:00
jxxghp
3a27656769 fix #557 2023-09-27 10:18:40 +08:00
jxxghp
27b1e0ffd5 fix #668 rollback #654 2023-09-27 09:47:56 +08:00
jxxghp
1401ea74dd fix #667 硬链接支持极空间 2023-09-27 08:22:32 +08:00
jxxghp
cb93a63970 feat 历史记录支持重新识别 2023-09-27 08:16:26 +08:00
jxxghp
da4ff99570 fix #655 2023-09-25 08:40:19 +08:00
jxxghp
b3c0dc813b fix #662 2023-09-25 07:12:36 +08:00
jxxghp
a7b51d9fcc fix bug 2023-09-24 19:48:03 +08:00
jxxghp
76f1de42a8 v1.2.5 2023-09-24 19:33:25 +08:00
jxxghp
bad016b2b4 rollback mteam 2023-09-24 19:29:24 +08:00
jxxghp
5cd48d5447 fix 优化定时服务调度 2023-09-24 12:41:59 +08:00
jxxghp
41ff5363ea Merge remote-tracking branch 'origin/main' 2023-09-24 11:14:00 +08:00
jxxghp
85014f4acb feat 服务手动触发 2023-09-24 11:13:49 +08:00
jxxghp
d9a68daddd Merge pull request #658 from WithdewHua/fix-torrentremover 2023-09-24 08:02:45 +08:00
WithdewHua
141e78f274 fix: 种子分类为空时被删除 2023-09-24 02:58:24 +08:00
jxxghp
de98ccd33c fix mteam、zhuque登录判定 2023-09-23 21:42:21 +08:00
jxxghp
d490dadfdd fix mteam 2023-09-23 16:35:27 +08:00
jxxghp
f46bbf73ba Merge pull request #654 from DDS-Derek/main
fix: container id retrieval error
2023-09-23 16:21:05 +08:00
jxxghp
17eba86f7a fix mteam 2023-09-23 16:20:08 +08:00
DDSRem
fdf25b8c66 fix: container id retrieval error 2023-09-23 16:04:25 +08:00
jxxghp
516cb443b9 fix mteam 2023-09-23 15:58:42 +08:00
jxxghp
7c4c3b3f9a feat 支持新版本mteam 2023-09-23 12:30:19 +08:00
jxxghp
e298a1a8a0 feat 支持新版本mteam 2023-09-23 12:02:04 +08:00
jxxghp
fd9eef2089 feat 支持多媒体服务器同时使用 2023-09-23 09:20:51 +08:00
jxxghp
78dab04c96 fix #650 2023-09-23 08:33:49 +08:00
jxxghp
c34475653f Merge pull request #652 from WithdewHua/fix-torrentremover 2023-09-22 22:45:17 +08:00
WithdewHua
eb6a6eee0a fix: 种子分类为空时被删除 2023-09-22 21:27:48 +08:00
jxxghp
48f6a45194 v1.2.4 2023-09-22 16:06:00 +08:00
jxxghp
c8ae6bcc78 fix message format 2023-09-22 16:04:04 +08:00
jxxghp
7f6beb2a78 feat SynologyChat 2023-09-22 15:40:23 +08:00
jxxghp
ea160afd90 fix CronTrigger.from_crontab异常捕捉 2023-09-22 14:42:11 +08:00
jxxghp
29df0813fd fix 屏蔽telebot的trackback日志 2023-09-22 14:37:10 +08:00
jxxghp
b014c4a4e5 fix #646 2023-09-22 14:26:46 +08:00
jxxghp
f173c21695 更新 telegram.py 2023-09-22 13:04:20 +08:00
jxxghp
dc41f4946a fix bug 2023-09-22 12:52:40 +08:00
jxxghp
fed754f03a fix memory 2023-09-22 11:42:34 +08:00
jxxghp
382d9ed525 Merge remote-tracking branch 'origin/main' 2023-09-22 11:33:32 +08:00
jxxghp
e3707f39bb fix wallpaper 2023-09-22 11:33:25 +08:00
jxxghp
9df8d3d360 fix bug 2023-09-22 11:20:12 +08:00
jxxghp
5b3c310cda Merge pull request #643 from thsrite/main 2023-09-22 11:01:16 +08:00
jxxghp
79d692771e Merge remote-tracking branch 'origin/main' 2023-09-22 10:59:28 +08:00
jxxghp
f74ffed3ae fix #628 2023-09-22 10:59:19 +08:00
thsrite
0325d7f4f1 fix 优化删除代码 2023-09-22 10:30:04 +08:00
jxxghp
3926298907 Merge pull request #642 from developer-wlj/wlj0909 2023-09-22 09:46:27 +08:00
mayun110
d98376b490 filter_torrents_by_default_rule方法 添加参数和返回值声明 2023-09-22 09:45:23 +08:00
mayun110
219690afc0 fix 在搜索模式中 默认过滤规则无效问题 2023-09-22 09:10:58 +08:00
jxxghp
bcb1fc1600 fix memory 2023-09-21 23:12:06 +08:00
jxxghp
923be7e1e9 feat 历史记录删除支持删除源文件 2023-09-21 19:59:29 +08:00
jxxghp
951353ee0b Merge pull request #634 from thsrite/main 2023-09-21 12:34:17 +08:00
thsrite
52bdfa7f9a feat 媒体服务器同步黑名单 2023-09-21 12:08:09 +08:00
jxxghp
4af29aa76d Merge pull request #632 from Sowevo/main 2023-09-21 10:04:48 +08:00
Sowevo
8efa6a742b Merge branch 'jxxghp:main' into main 2023-09-20 21:02:31 -05:00
sowevo
ada5e1cca5 feat: plex更精准的媒体库刷新 2023-09-21 10:01:48 +08:00
jxxghp
859191203f Merge pull request #630 from thsrite/main 2023-09-21 09:09:36 +08:00
thsrite
cab4055315 fix #629 2023-09-21 09:08:53 +08:00
jxxghp
cacee7abfe - 修复删除媒体库文件时范围过大的问题,v1.2.3版本需要升级! 2023-09-20 16:26:46 +08:00
jxxghp
61694f4c2b Merge pull request #626 from thsrite/main 2023-09-20 16:14:38 +08:00
thsrite
9c328e3d1c fix #625 2023-09-20 16:11:53 +08:00
jxxghp
b2fe86c744 v1.2.3
- 优先级规则现可以按订阅和搜索分别设置
- 中文字幕过滤规则只针对原语种为非中文生效
2023-09-20 06:52:31 +08:00
jxxghp
600e32d3e4 更新 __init__.py 2023-09-19 23:29:35 +08:00
jxxghp
3ad733bab4 Merge remote-tracking branch 'origin/main' 2023-09-19 21:40:52 +08:00
jxxghp
1799b63abb feat 优先级规则按订阅和搜索拆分 2023-09-19 21:40:36 +08:00
jxxghp
d71dc13e32 Merge pull request #621 from developer-wlj/wlj0909 2023-09-19 18:21:47 +08:00
mayun110
f4633788e9 Merge remote-tracking branch 'origin/wlj0909' into wlj0909 2023-09-19 18:14:47 +08:00
jxxghp
2250e7db39 Merge remote-tracking branch 'origin/main' 2023-09-19 17:15:26 +08:00
jxxghp
b1bb0ced7a fix #608 2023-09-19 17:15:16 +08:00
jxxghp
28aecd79c6 Merge pull request #612 from thsrite/main
fix #553 修复unraid删除资源慢的问题
2023-09-19 17:08:24 +08:00
thsrite
d097ef45eb fix 当前路径下没有媒体文件则删除 2023-09-19 16:44:20 +08:00
thsrite
dac718edc8 fix 7a5d2101 2023-09-19 16:15:05 +08:00
mayun110
598ab23a2c 优化Windows下Cloudflare IP优选插件 2023-09-19 13:39:41 +08:00
jxxghp
8be6e28933 feat 中文字幕过滤规则只针对原语种为非中文 2023-09-19 12:42:10 +08:00
mayun110
bd6805be58 优化Windows下Cloudflare IP优选插件 2023-09-19 11:45:06 +08:00
thsrite
c147d36cb2 fix 资源下载msg增加下载用户 2023-09-19 11:15:14 +08:00
thsrite
7a5d210167 fix #553 修复unraid删除资源慢的问题 2023-09-19 09:17:48 +08:00
mayun110
ef335f2b8e Cloudflare IP优选新增windows支持 2023-09-19 00:02:59 +08:00
jxxghp
19eca11d17 Merge pull request #616 from thsrite/fix 2023-09-18 18:33:42 +08:00
thsrite
ab99bd356a fix iyuuautoseed 2023-09-18 18:32:19 +08:00
jxxghp
70f2d72532 Merge pull request #615 from thsrite/fix 2023-09-18 18:29:43 +08:00
thsrite
0ca995da0f fix #613 2023-09-18 18:25:52 +08:00
jxxghp
2a67abe62d v1.2.2
- 修复了RSS模式指定订阅站点时不刷新订阅的问题
- 推荐页面后退时会记住浏览位置
- 订阅及搜索支持设置全局包含和排除规则
2023-09-18 17:13:51 +08:00
jxxghp
03a07ac7bf fix RSS模式指定订阅站点时不刷新订阅的问题 2023-09-18 17:05:08 +08:00
jxxghp
f104c903ec Merge pull request #611 from thsrite/main 2023-09-18 11:38:00 +08:00
thsrite
6b74a8e266 fix 插件站点排序、删除 2023-09-18 10:30:28 +08:00
thsrite
cadd885dbf fix #592 2023-09-18 10:29:27 +08:00
jxxghp
7e0cad8491 fix 2023-09-17 19:49:21 +08:00
jxxghp
4c05e9fb2b Merge pull request #609 from WithdewHua/subscribe 2023-09-17 18:59:42 +08:00
WithdewHua
42311f0118 feat: 订阅搜索支持默认包含与排除规则 2023-09-17 18:35:31 +08:00
WithdewHua
951be74a21 fix: 函数命名 2023-09-17 18:35:31 +08:00
jxxghp
c86a21d11d Merge pull request #604 from WithdewHua/subscribe 2023-09-16 20:31:42 +08:00
WithdewHua
3fb02f6490 feat: 增加更新订阅 tmdb 信息 API 2023-09-16 19:36:49 +08:00
WithdewHua
ca2c0392bb fix: 调整 API 顺序,避免错误匹配 2023-09-16 18:43:33 +08:00
WithdewHua
b8663ee735 fix: 同时更新电影订阅信息;修复 typo 2023-09-16 16:16:39 +08:00
WithdewHua
4ab60423c1 feat: 根据原标题查询媒体服务器(plex) 2023-09-16 15:48:22 +08:00
jxxghp
1ea80e6870 更新 README.md 2023-09-16 10:58:33 +08:00
jxxghp
6f1d4754be Merge pull request #600 from DDS-Derek/main 2023-09-16 08:28:56 +08:00
DDSRem
52288d98c0 bump: action jobs version
docker/metadata-action@v5
docker/setup-qemu-action@v3
docker/setup-buildx-action@v3
docker/login-action@v3
docker/build-push-action@v5

Co-Authored-By: DDSDerek <108336573+DDSDerek@users.noreply.github.com>
Co-Authored-By: DDSTomo <142158217+ddstomo@users.noreply.github.com>
2023-09-15 20:18:28 +08:00
jxxghp
d1368c4f84 fix bug 2023-09-15 17:28:35 +08:00
jxxghp
4367c53bb0 fix bug 2023-09-15 17:24:22 +08:00
jxxghp
d87f69da35 fix azusa 2023-09-15 16:07:01 +08:00
jxxghp
5ece44090e fix 2023-09-15 15:38:30 +08:00
jxxghp
01be4f9549 need test 2023-09-15 15:37:05 +08:00
jxxghp
94077917f3 Merge remote-tracking branch 'origin/main' 2023-09-15 15:22:19 +08:00
jxxghp
8af981738c fix README.md 2023-09-15 15:22:11 +08:00
jxxghp
4d7982803e Merge pull request #596 from thsrite/main
fix 辅种插件增加不辅种路径
2023-09-15 15:15:55 +08:00
thsrite
a1bba6da4a fix 辅种插件增加不辅种路径 2023-09-15 15:08:15 +08:00
jxxghp
4eb3e16b37 v1.2.1
- 修复了IOS下菜单栏需要点击两次的问题
- 修复了电影洗版重复下载的问题
- 站点新增支持ptlsp、azusa
- 认证站点新增支持ptlsp
- 仿真签到增加判断签到状态
2023-09-15 15:04:18 +08:00
jxxghp
1f0b40fe05 support ptlsp 2023-09-15 14:29:15 +08:00
jxxghp
29e92a17e7 support azusa 2023-09-15 14:01:12 +08:00
jxxghp
8cc4469282 fix #591 2023-09-15 10:59:46 +08:00
jxxghp
a5e66071ba support PTLSP 2023-09-15 10:46:54 +08:00
jxxghp
fb4e817993 fix #594 2023-09-15 10:38:15 +08:00
jxxghp
8f26110e65 Merge pull request #590 from thsrite/main 2023-09-14 16:19:46 +08:00
thsrite
9f65a088c0 fix 插件交互命令增加channel字段 2023-09-14 16:09:56 +08:00
jxxghp
15c15388b6 Merge pull request #589 from thsrite/main 2023-09-14 15:34:49 +08:00
thsrite
950a43e001 fix 每日签到记录存储bug 2023-09-14 15:28:06 +08:00
jxxghp
9a28f8c365 Merge pull request #588 from thsrite/main 2023-09-14 15:18:43 +08:00
thsrite
32cb96fc44 fix 仿真签到判断是否已签 2023-09-14 15:17:30 +08:00
jxxghp
f7982e3e43 fix build 2023-09-14 11:36:37 +08:00
jxxghp
d13602827c fix build 2023-09-14 11:30:22 +08:00
jxxghp
182adc77b6 v1.2.0
- 修复了 QB4.5+ 转种到 TR3.0 丢失tracker的问题
- 站点新增支持byr、hdcity、okpt
- RSS订阅模式时自动检测是否失效并更新链接地址
- 自定义订阅插件支持磁力链接下载
- 增加了自定义识别词支持的配置格式:被替换词 => 替换词 && 前定位词 <> 后定位词 >> 集偏移量
- 媒体库同步删除插件支持多版本文件处理
2023-09-14 11:17:10 +08:00
jxxghp
ef4cdb41c8 fix release 2023-09-14 10:07:20 +08:00
jxxghp
9a60121914 fix #579 修改转种使用的模块 2023-09-14 09:46:51 +08:00
jxxghp
6fb0c92183 fix message content 2023-09-14 09:18:11 +08:00
jxxghp
96c4e0ba2f Merge remote-tracking branch 'origin/main' 2023-09-14 09:09:43 +08:00
jxxghp
7afe82480c fix brush 2023-09-14 09:08:57 +08:00
jxxghp
c37c8e7318 Merge pull request #583 from thsrite/main 2023-09-13 21:41:48 +08:00
thsrite
3d10ca4c8b fix 签到数量 2023-09-13 20:19:06 +08:00
jxxghp
4e515ec442 fix #516 支持磁力链下载 2023-09-13 17:56:57 +08:00
jxxghp
5eb37b5d28 fix sites 2023-09-13 16:58:05 +08:00
jxxghp
7f95bab0d5 fix #578 2023-09-13 16:12:57 +08:00
jxxghp
3fc267bcfa Merge pull request #578 from thsrite/main
fix 订阅刷新只处理订阅选中的站点(没选刷新所有设定的订阅站点)
2023-09-13 15:58:04 +08:00
jxxghp
648f0b6ec1 add byr、hdcity、okpt 2023-09-13 15:53:58 +08:00
thsrite
be3c3ef37f fix 订阅刷新站点 2023-09-13 15:52:32 +08:00
jxxghp
a47f382c21 fix download message 2023-09-13 15:18:23 +08:00
jxxghp
61c59b4405 fix #572 2023-09-13 14:58:33 +08:00
jxxghp
8ee391688d Merge pull request #575 from thsrite/main 2023-09-13 13:26:09 +08:00
thsrite
68c7bf0a96 Revert "fix"
This reverts commit 7c3c6ee999.
2023-09-13 13:10:11 +08:00
thsrite
6dd517a490 fix 自定义识别词空格 2023-09-13 12:45:44 +08:00
jxxghp
9baa5e1d35 Merge pull request #574 from thsrite/main 2023-09-13 12:36:13 +08:00
thsrite
e675e4358a fix 同步删除插件 2023-09-13 12:35:09 +08:00
jxxghp
c9a6081a57 fix log 2023-09-13 12:30:45 +08:00
jxxghp
2de20f601b fix 开关位置 2023-09-13 12:23:32 +08:00
jxxghp
79c708c30e Merge pull request #564 from thsrite/main 2023-09-13 12:04:09 +08:00
thsrite
f38defb515 Revert "fix 卸载插件时删除插件配置"
This reverts commit dd7803c90a.
2023-09-13 11:58:37 +08:00
thsrite
ac11d4eb30 Revert "fix dd7803c9"
This reverts commit 08560fc7c3.
2023-09-13 11:58:30 +08:00
thsrite
221c31f481 fix 自定义识别词增加规则:被替换词 => 替换词 && 偏移前 <> 偏移后 >> 集偏移 2023-09-13 10:37:13 +08:00
thsrite
7c3c6ee999 fix 2023-09-13 09:52:05 +08:00
thsrite
08560fc7c3 fix dd7803c9 2023-09-13 09:29:52 +08:00
thsrite
4659e7367f fix d8afa339 函数参数名 2023-09-13 09:24:16 +08:00
thsrite
2fa11a4796 Merge remote-tracking branch 'origin/main' 2023-09-13 09:22:29 +08:00
thsrite
01a153902e Revert "fix 自定义订阅插件增加识别按钮"
This reverts commit 1b2f09b95f.
2023-09-13 09:21:56 +08:00
jxxghp
5eb65046f0 Merge pull request #571 from WithdewHua/media_exists 2023-09-13 06:36:22 +08:00
WithdewHua
bb64e57f7c fix: 检查媒体文件是否存在时验证 TMDB ID 2023-09-12 23:03:15 +08:00
thsrite
0cb75d689c fix 根据type和tmdbid查询转移记录 2023-09-12 15:28:21 +08:00
thsrite
d7310ade86 fix 同步删除插件兼容多分辨率 2023-09-12 15:21:34 +08:00
thsrite
dd7803c90a fix 卸载插件时删除插件配置 2023-09-12 14:57:31 +08:00
thsrite
d8afa339de fix 媒体库刮削插件开启强制刮削时忽略SCRAP_METADATA变量 2023-09-12 13:29:12 +08:00
thsrite
1b2f09b95f fix 自定义订阅插件增加识别按钮 2023-09-12 12:45:39 +08:00
jxxghp
0414854832 Merge pull request #562 from thsrite/main 2023-09-12 11:47:57 +08:00
thsrite
9e6a7be5b1 fix #537 天空辅种失败问题 2023-09-12 11:45:57 +08:00
thsrite
e3c1407b62 fix 憨憨用户等级 2023-09-12 11:26:45 +08:00
thsrite
7a9ee954c5 fix sub正则 2023-09-12 11:11:36 +08:00
thsrite
99a06dcba0 fix rss过期,尝试保留原配置生成新的rss地址 2023-09-12 10:09:17 +08:00
jxxghp
bb8fc14bc6 v1.1.9
- 修复了部分情况下媒体识别错误的问题
- 站点新增支持dajiao、ptcafe
- 支持RSS订阅模式,RSS模式会自动获取RSS链接(也可手动维护),订阅刷新对站点压力小,同时可设置订阅刷新周期,24小时运行,可通过开关切换。
- 移除了自定义订阅功能,可使用RSS订阅模式或使用自定义订阅插件替代。
- 手动整理时支持通过名称搜索TMDBID。
2023-09-12 08:07:03 +08:00
jxxghp
50d9dcf17b fix #556 2023-09-12 07:36:29 +08:00
jxxghp
141b99d134 fix #556 2023-09-12 07:22:06 +08:00
jxxghp
18457a4de7 fix #555 2023-09-11 21:43:30 +08:00
jxxghp
a343d736ae fix #550 2023-09-11 21:25:56 +08:00
jxxghp
df5c364185 fix #550 2023-09-11 21:14:49 +08:00
jxxghp
edcec114ae fix bug 2023-09-11 19:54:38 +08:00
jxxghp
605a7486b3 fix log 2023-09-11 19:10:51 +08:00
jxxghp
efe89f59b9 feat 支持dajiao、ptcafe 2023-09-11 18:10:50 +08:00
jxxghp
fdd4aef3d3 feat 整合RSS订阅模式 2023-09-11 17:47:51 +08:00
jxxghp
08aef1f47f fix rsslink helper 2023-09-11 17:13:26 +08:00
jxxghp
c45f5e6ac4 Merge pull request #549 from thsrite/main
feat 自动生成站点默认rss地址
2023-09-11 16:35:36 +08:00
thsrite
f239cede07 fix speedlimit 未开启时return 2023-09-11 16:14:15 +08:00
thsrite
b2eb952cd0 fix 自动获取rss使用代理 2023-09-11 13:15:15 +08:00
thsrite
3a2fba0422 fix 自动获取rss data 2023-09-11 12:43:27 +08:00
thsrite
1034caa9fd fix ttg、zhuque等自动获取rss 2023-09-11 12:26:23 +08:00
thsrite
8b243e23ab feat 自动生成默认rss地址 2023-09-11 11:39:39 +08:00
jxxghp
1f76dc1e2a Merge pull request #540 from thsrite/main 2023-09-10 20:22:25 +08:00
thsrite
ea5c2fb4cf fix 限速插件每次重启完发送取消限速消息 2023-09-10 20:08:09 +08:00
thsrite
e50b56d542 fix 交互命令翻页下载 2023-09-10 19:51:20 +08:00
jxxghp
2206fafda9 Merge pull request #539 from thsrite/main 2023-09-10 18:45:33 +08:00
thsrite
345b74d881 fix #538 2023-09-10 18:41:04 +08:00
jxxghp
d231d75446 v1.1.8
- 修复了Jellyfin/Plex的webhook通知消息
- 修复了手动整理时屏蔽词不生效的问题
- 优化了剧集的年份匹配
- 优化了站点种子的索引频率控制
- 增加了站点分享率低时的信息提醒
- 增加了重启系统的远程交互命令
2023-09-10 17:43:04 +08:00
jxxghp
afb5874350 fix #536 2023-09-10 17:35:58 +08:00
jxxghp
1bd7b5c77e fix jellyfin webhook 2023-09-10 17:07:24 +08:00
jxxghp
ba41de61cb fix plex webhook 2023-09-10 12:57:51 +08:00
jxxghp
ae40d32115 fix bug 2023-09-10 09:15:12 +08:00
jxxghp
3fe4c9467e fix 2023-09-10 09:06:00 +08:00
jxxghp
b89512cc33 fix #526 2023-09-10 09:02:46 +08:00
jxxghp
f3b12bed20 feat 分享率低通知预警 2023-09-10 08:54:33 +08:00
jxxghp
08c7fff5ab fix README.md 2023-09-10 08:32:52 +08:00
jxxghp
9c20d1a270 Merge pull request #530 from thsrite/main
feat 补充剧集全部季年份
2023-09-09 22:06:58 +08:00
thsrite
b7b1aee878 fix 2023-09-09 22:03:51 +08:00
jxxghp
f998b39152 fix 删除种子数无法计算 2023-09-09 21:58:49 +08:00
jxxghp
ca01db31a9 fix LIBRARY_PATH 2023-09-09 21:41:55 +08:00
thsrite
a0b8cc6719 feat 补充剧集全部季年份 2023-09-09 21:24:07 +08:00
jxxghp
66b91abe90 fix sites.cpython-311-darwin 2023-09-09 20:58:52 +08:00
jxxghp
9b17d55ac0 fix db session 2023-09-09 20:56:37 +08:00
jxxghp
a7a0889867 Merge pull request #528 from thsrite/main 2023-09-09 20:17:09 +08:00
thsrite
af6cf306c8 fix 交互命令重启 2023-09-09 20:01:43 +08:00
jxxghp
20f35854f9 fix update 2023-09-09 19:43:02 +08:00
jxxghp
e5165c8fea fix plugin db session 2023-09-09 19:41:06 +08:00
jxxghp
0e36d003c0 fix db session 2023-09-09 19:26:56 +08:00
jxxghp
ccc249f29d Merge pull request #527 from developer-wlj/wlj0909 2023-09-09 18:31:27 +08:00
mayun110
f4edb32886 fix Windows目录监控下获取目录问题 2023-09-09 18:11:50 +08:00
jxxghp
475a84bfa6 Merge pull request #525 from thsrite/main 2023-09-09 17:53:29 +08:00
mayun110
3914ff4dd6 fix Windows下获取目录问题 2023-09-09 17:49:40 +08:00
jxxghp
5bcbacf3a5 feat torrents全局缓存共享 2023-09-09 17:42:31 +08:00
jxxghp
27238ac467 fix brushflow plugin 2023-09-09 16:49:15 +08:00
thsrite
019d40c17a fix 辅种插件排除已删除站点 2023-09-09 16:40:09 +08:00
jxxghp
fa5b92214f fix ssd 2023-09-09 16:24:53 +08:00
jxxghp
32a5f67e72 Merge pull request #524 from thsrite/main 2023-09-09 15:56:02 +08:00
thsrite
d6e9c14183 fix qb删种 2023-09-09 15:47:51 +08:00
jxxghp
87325d5bbd Merge pull request #523 from thsrite/main 2023-09-09 15:07:53 +08:00
thsrite
67ead871c1 fix 删除清除缓存按钮 2023-09-09 15:06:44 +08:00
jxxghp
691beb1186 Merge pull request #522 from DDS-Derek/main 2023-09-09 14:57:02 +08:00
jxxghp
b30d3c7dac Merge pull request #521 from WithdewHua/rsssubscribe 2023-09-09 14:55:41 +08:00
DDSRem
5e048f0150 feat: 优化容器id获取 2023-09-09 14:18:10 +08:00
WithdewHua
cb2cfe9d85 fix: 关闭清理缓存开关 2023-09-09 14:13:17 +08:00
jxxghp
482fca9b8c Merge pull request #520 from DDS-Derek/main
fix: failed to obtain container id
2023-09-09 12:08:50 +08:00
DDSRem
42511b95d8 fix: failed to obtain container id 2023-09-09 12:03:48 +08:00
jxxghp
b18e901fbd fix plugin ui 2023-09-09 11:37:34 +08:00
jxxghp
a30e3f49a3 v1.1.7
- 修复了文件转移无法覆盖的问题
- 修复了过滤规则只能从尾部开始删除的问题
- 优化了内建重启,支持非root权限环境(需要重拉镜像)
- 文件管理功能支持排序
- 优化了插件页面交互,优先展示插件数据
2023-09-09 11:08:45 +08:00
jxxghp
65d202e636 fix README.md 2023-09-09 10:51:59 +08:00
jxxghp
4373c0596b Merge pull request #518 from DDS-Derek/main
fix: port conflict
2023-09-09 10:45:54 +08:00
DDSRem
0136d9fe06 fix: port conflict 2023-09-09 10:44:05 +08:00
jxxghp
933c6d838c fix #497 2023-09-09 08:27:40 +08:00
jxxghp
7ce656148f fix #508 2023-09-09 08:19:17 +08:00
jxxghp
c05ffed6df fix #514 文件管理支持排序 2023-09-09 08:00:17 +08:00
jxxghp
6770ba3a35 feat 文件管理API排序 2023-09-08 22:48:53 +08:00
jxxghp
3b73dfcdc6 fix 文件转移时无法覆盖 2023-09-08 22:36:31 +08:00
jxxghp
100ff97017 Merge pull request #515 from thsrite/main 2023-09-08 21:49:49 +08:00
thsrite
4fe96178ee fix 2023-09-08 21:44:32 +08:00
thsrite
86d484fac0 fix 2023-09-08 21:41:30 +08:00
thsrite
db23b62fd1 fix 2023-09-08 21:31:36 +08:00
jxxghp
b84c8fd7f1 Merge pull request #512 from thsrite/main 2023-09-08 21:29:46 +08:00
jxxghp
c9f6c75069 Merge pull request #510 from DDS-Derek/main 2023-09-08 21:26:00 +08:00
thsrite
846459c244 fix wechat token 2023-09-08 21:21:43 +08:00
DDSRem
c4898d04aa docs: update 2023-09-08 20:38:07 +08:00
DDSRem
c8bc6a4618 fix: 重启更新 2023-09-08 20:33:23 +08:00
DDSRem
55dce26cb8 test: restart 2023-09-08 19:55:03 +08:00
DDSRem
ae3b73a73f feat: 优化重启 2023-09-08 19:49:10 +08:00
jxxghp
091df01b7c fix plugin 2023-09-08 16:48:13 +08:00
jxxghp
20c4c7d6e6 add 发布时间 2023-09-08 16:36:02 +08:00
jxxghp
eb1e045d8f Merge remote-tracking branch 'origin/main' 2023-09-08 15:38:38 +08:00
jxxghp
678638e9f1 feat 插件API 2023-09-08 15:38:32 +08:00
jxxghp
d8b78d3051 Merge pull request #505 from thsrite/main
fix 消息转发插件清理缓存按钮
2023-09-08 13:15:45 +08:00
thsrite
eaf0d17118 fix 消息转发插件清理缓存按钮 2023-09-08 13:13:11 +08:00
jxxghp
81bcfef6ec Merge pull request #504 from thsrite/main 2023-09-08 13:11:13 +08:00
thsrite
0997691b23 fix time format 2023-09-08 13:06:40 +08:00
jxxghp
d1f9647a63 Merge pull request #503 from thsrite/main
feat 签到插件支持分别配置签到、登录站点
2023-09-08 12:26:15 +08:00
thsrite
64a04ba8ed fix 2023-09-08 12:24:27 +08:00
jxxghp
726c130f1f Merge remote-tracking branch 'origin/main' 2023-09-08 12:23:29 +08:00
jxxghp
215b56b9f2 feat 打印jellyfin/plex webhook报文 2023-09-08 12:23:19 +08:00
thsrite
516bd8bc30 Merge remote-tracking branch 'origin/main' 2023-09-08 12:21:38 +08:00
thsrite
8bc6e04665 feat 签到插件支持分别配置签到、登录站点 2023-09-08 12:21:29 +08:00
jxxghp
94057cd5f1 Merge pull request #499 from thsrite/main 2023-09-08 11:24:49 +08:00
thsrite
2e80586436 Merge branch 'jxxghp:main' into main 2023-09-08 11:22:42 +08:00
thsrite
faa6d7dadd fix bug 2023-09-08 11:20:25 +08:00
jxxghp
071c81d52c v1.1.6
- 修复了一个未设置媒体服务器时订阅日志报错的问题
- 媒体库刮削插件支持覆盖已有元数据和图片
- 新增了一个沿用已有刮削名称的开关(默认开),避免TMDB信息变化时导致整理后名称不一致
- 刮削时季的海报优先使用TMDB的图片
- 增加了内建重启失败时的提示
2023-09-08 11:01:37 +08:00
jxxghp
52d4feb583 Update README.md 2023-09-08 10:45:55 +08:00
jxxghp
584e05e63e fix ui 2023-09-08 10:34:05 +08:00
jxxghp
061ff322ab fix bug 2023-09-08 10:26:00 +08:00
jxxghp
a2bcf8df9a Merge remote-tracking branch 'origin/main' 2023-09-08 10:03:23 +08:00
jxxghp
6c85040eb6 fix plugin 2023-09-08 10:03:14 +08:00
jxxghp
2e5d892120 fix plugin 2023-09-08 09:44:51 +08:00
jxxghp
43d108aea9 Merge pull request #498 from thsrite/main 2023-09-08 09:44:07 +08:00
thsrite
c46b1dd116 fix 消息转发插件bug 2023-09-08 09:22:59 +08:00
jxxghp
d3fac56e9a fix 2023-09-08 08:05:54 +08:00
jxxghp
b3f5b87b02 fix 2023-09-08 08:04:09 +08:00
jxxghp
03abdf9cb4 fix 2023-09-08 07:52:05 +08:00
jxxghp
42bc354e06 fix 2023-09-08 07:39:08 +08:00
jxxghp
02e81a79b2 fix 2023-09-07 23:11:08 +08:00
jxxghp
9fa4b8dfbe fix 2023-09-07 23:04:35 +08:00
jxxghp
366f59623a fix 2023-09-07 23:00:51 +08:00
jxxghp
d4c28500b7 fix plugin 2023-09-07 22:04:07 +08:00
jxxghp
5780344c43 fix 2023-09-07 20:19:03 +08:00
jxxghp
18970efc1a add index 2023-09-07 18:23:43 +08:00
jxxghp
5725584176 add index 2023-09-07 18:23:30 +08:00
jxxghp
4e26168ab5 fix plugin 2023-09-07 17:40:09 +08:00
jxxghp
f694dee71d fix 2023-09-07 16:16:04 +08:00
jxxghp
a9db0f6bbf fix 2023-09-07 16:05:48 +08:00
jxxghp
7efcde89b9 fix 2023-09-07 14:59:32 +08:00
jxxghp
1c07b306c3 Merge pull request #489 from thsrite/main
fix 目录监控已处理逻辑 && feat 新增已入库媒体是否跟随TMDB信息变化开关,关闭则延用媒体库名称
2023-09-07 14:29:08 +08:00
jxxghp
6c59a5ebb0 Merge branch 'main' into main 2023-09-07 14:29:01 +08:00
thsrite
4c7321a738 fix 2023-09-07 13:57:27 +08:00
jxxghp
f42fd023bb fix #490 2023-09-07 13:39:08 +08:00
jxxghp
9b8a4ebdd4 fix 2023-09-07 12:56:39 +08:00
jxxghp
443e2d8104 fix 减少刮削识别次数 2023-09-07 12:51:49 +08:00
jxxghp
2c61d439ca feat 媒体库刮削支持覆盖
fix 类型声明
2023-09-07 12:35:35 +08:00
thsrite
e01268222c fix 2023-09-07 12:27:04 +08:00
thsrite
27ff77b504 fix type 2023-09-07 12:25:01 +08:00
thsrite
bf8893d71b fix 文件所在文件夹重新刮削bug 2023-09-07 11:16:11 +08:00
thsrite
54b09a17c2 fix 2023-09-07 11:12:16 +08:00
thsrite
b01621049b feat 新增已入库媒体是否跟随TMDB信息变化开关,关闭则延用媒体库名称 2023-09-07 10:55:01 +08:00
thsrite
e5dc40e3c1 fix token过期后重新获取、重新发送请求 2023-09-07 10:24:21 +08:00
thsrite
44d4bcdd19 fix 目录监控已处理逻辑 2023-09-07 10:16:44 +08:00
jxxghp
b899b23d04 fix 2023-09-07 08:37:57 +08:00
jxxghp
fa23012adb fix #486 季图片优先使用TMDB的 2023-09-07 08:03:05 +08:00
jxxghp
d836b385ae fix 2023-09-07 07:20:10 +08:00
jxxghp
15a0bc6c12 fix 重启失败提示 2023-09-06 21:48:09 +08:00
jxxghp
22791e361d 更新 README.md 2023-09-06 21:41:23 +08:00
jxxghp
47b7dade5d Merge pull request #484 from thsrite/main 2023-09-06 21:23:32 +08:00
thsrite
c57d13afcc fix 优化同步删除插件msg 2023-09-06 21:21:00 +08:00
jxxghp
8db1c2952c Merge remote-tracking branch 'origin/main' 2023-09-06 21:15:21 +08:00
jxxghp
28c19bc4e3 fix 优化文件整理进度提示 2023-09-06 21:15:10 +08:00
jxxghp
fbef1735b0 Merge pull request #482 from thsrite/main
fix bug
2023-09-06 20:19:44 +08:00
thsrite
9869af992b fix bug 2023-09-06 20:18:43 +08:00
jxxghp
b6cb241b8a Merge pull request #480 from WPF0414/main
fix:限速通知速率展示问题
2023-09-06 20:09:02 +08:00
jxxghp
7edf8e7c30 Merge pull request #481 from thsrite/main
fix 删除辅种bug
2023-09-06 20:07:42 +08:00
thsrite
452161f1b8 fix 删除辅种bug 2023-09-06 20:05:29 +08:00
jxxghp
f75abb27b6 v1.1.5
- 修复了批量整理时只刮削第一个文件的问题
- 修复了多下载任务同一下载目录时会重复处理文件的问题
- 支持在WEB页面操作重启(需要映射`/var/run/docker.sock`文件到容器)
2023-09-06 19:54:23 +08:00
wangpengfei
30311e8e56 fix:限速通知
修复限速时通知错误问题
2023-09-06 19:50:18 +08:00
jxxghp
adff3b22e9 Merge pull request #476 from thsrite/main
fix 媒体库同步删除插件优化
2023-09-06 16:55:10 +08:00
thsrite
013c0dea3b fix NAStool同步插件不处理download_hash 2023-09-06 16:22:32 +08:00
jxxghp
c593c3ba16 fix #461 已转移成功的文件不重复处理 2023-09-06 16:12:40 +08:00
jxxghp
61b74735de fix #464 2023-09-06 16:00:42 +08:00
thsrite
952cae50e2 fix 同步删除插件删种逻辑 2023-09-06 15:57:46 +08:00
thsrite
7a9f89e86c fix 删除同步删除插件交互命令 2023-09-06 15:34:35 +08:00
jxxghp
f14d8bec1b fix api 2023-09-06 15:29:52 +08:00
thsrite
697d5a815b fix 标题不一致时防误删 2023-09-06 15:07:06 +08:00
thsrite
cfeaa2674d fix 媒体库同步删除插件优化 2023-09-06 14:26:24 +08:00
jxxghp
08f046f059 fix #465 批量转移时只刮削一个文件的问题 2023-09-06 13:04:18 +08:00
jxxghp
a66912f41a fix #465 批量转移时只刮削一个文件的问题 2023-09-06 13:01:13 +08:00
jxxghp
f244728a96 Merge remote-tracking branch 'origin/main' 2023-09-06 12:56:04 +08:00
jxxghp
576ac08a05 feat 内建重启 2023-09-06 12:55:48 +08:00
jxxghp
e874b3f294 Merge pull request #474 from thsrite/main
fix NAStool记录同步增加进度…
2023-09-06 11:34:13 +08:00
thsrite
90ff0fc793 fix NAStool记录同步增加进度… 2023-09-06 11:32:34 +08:00
jxxghp
259e8fc2e1 fix #463 2023-09-06 11:29:47 +08:00
jxxghp
5c0be93913 Merge pull request #471 from thsrite/main 2023-09-06 10:47:21 +08:00
thsrite
e84a5c74f6 fix 同步删除插件防重复消费 2023-09-06 09:37:02 +08:00
jxxghp
5145527d0e fix #456 2023-09-06 08:34:04 +08:00
jxxghp
e3f7f873c0 Merge pull request #462 from WPF0414/main 2023-09-05 22:38:21 +08:00
wangpengfei
84a2db2247 Update __init__.py
修复按比例的bug
2023-09-05 22:35:39 +08:00
jxxghp
4902d5ebed feat 本地文件系统判重 2023-09-05 20:32:38 +08:00
jxxghp
243391ee30 fix release 2023-09-05 19:57:24 +08:00
jxxghp
c424de65b3 - 修复了站点数据统计某些情况下不发消息的问题
- 修复了播放限速TR不生效的问题
- 优化了下载器文件同步插件
- 优化了数据库异常处理
- 媒体库同步删除插件支持Emby Webhook方式。
- 微信现在会自动添加交互操作菜单了
- 新增了一套UI主题配色
2023-09-05 19:52:46 +08:00
jxxghp
2077eede8c Merge pull request #459 from thsrite/main 2023-09-05 19:48:27 +08:00
thsrite
876d1e01b4 fix 签到插件strip 2023-09-05 19:33:27 +08:00
thsrite
dec022fd89 fix 同步删除插件 2023-09-05 19:30:46 +08:00
jxxghp
83829cbe27 Merge pull request #458 from thsrite/main 2023-09-05 19:08:41 +08:00
thsrite
8249f9356f fix 同步删除插件适配emby webhook方式! 2023-09-05 19:07:07 +08:00
jxxghp
b5fc6cdd1e fix 统一处理db事务回滚 2023-09-05 18:19:02 +08:00
jxxghp
51b959cff8 Merge remote-tracking branch 'origin/main' 2023-09-05 17:11:09 +08:00
jxxghp
36880a8b7d fix 下载文件记录只登记选中的文件 2023-09-05 17:11:02 +08:00
jxxghp
380cc7552f Merge pull request #453 from thsrite/main
fix 同步插件路径替换
2023-09-05 16:57:04 +08:00
thsrite
0f1c8cb226 Merge branch 'jxxghp:main' into main 2023-09-05 16:55:08 +08:00
thsrite
7435fb0c10 fix tr文件同步过滤掉未下载的文件 2023-09-05 16:52:02 +08:00
jxxghp
1a03981463 fix #193 2023-09-05 16:43:34 +08:00
jxxghp
4cb7a488a9 fix #193 2023-09-05 16:43:02 +08:00
jxxghp
c69762d4c9 fix #448 TR限速不生效的问题 2023-09-05 16:35:15 +08:00
thsrite
03d9bf6d05 fix 路径替换 2023-09-05 16:20:42 +08:00
jxxghp
6a08b4ba7f fix 提高DB连接等待时间,避免database locked报错。 2023-09-05 16:18:04 +08:00
jxxghp
99218515ea fix 部分数据库操作没有Commit 2023-09-05 16:12:43 +08:00
jxxghp
c3a0a839c3 Merge pull request #450 from thsrite/main
fix 交互命令消息原路返回
2023-09-05 13:39:14 +08:00
thsrite
351513bcbc fix 交互命令消息原路返回 2023-09-05 13:19:25 +08:00
jxxghp
ed5dec1b0f feat 种子刷新频率控制 2023-09-05 12:39:01 +08:00
jxxghp
c62b29edc4 fix 微信菜单 2023-09-05 11:54:16 +08:00
jxxghp
c224a7c07b fix bug 2023-09-05 11:52:46 +08:00
jxxghp
a7b244a4b4 fix README.md 2023-09-05 11:48:36 +08:00
jxxghp
b564f70c63 feat 微信自动注册菜单 2023-09-05 11:33:42 +08:00
jxxghp
551f32491d fix 微信菜单长度 2023-09-05 11:23:21 +08:00
jxxghp
2826b9411d fix bug 2023-09-05 11:20:06 +08:00
jxxghp
4bf9045784 fix bug 2023-09-05 11:01:12 +08:00
jxxghp
114788e3ed feat 微信自动注册菜单 2023-09-05 10:58:19 +08:00
jxxghp
bb729bf976 fix #442 2023-09-05 08:39:23 +08:00
jxxghp
bedc885232 Merge pull request #440 from amtoaer/memory_percent 2023-09-04 23:18:46 +08:00
amtoaer
21e39611bc feat: 内存占用图使用百分比 2023-09-04 23:07:39 +08:00
jxxghp
73e7e547ea Merge pull request #437 from thsrite/main 2023-09-04 22:22:40 +08:00
thsrite
bc25d71b88 fix #407 2023-09-04 22:21:03 +08:00
jxxghp
ff8a9dc8c7 v1.1.3
- 修复了历史记录重新整理记录缺失的问题
- 优化了数据库会话处理
- 优化了普通用户的菜单权限
- 优化了文件管理UI细节
- 调整了仪表仪显示内容
- 捷径新增了过滤规则测试功能
- 图片刮削下载失败时支持重试
- 播放限速插件支持手动配置不限速地址范围
2023-09-04 21:21:04 +08:00
jxxghp
4ee7daa673 Merge remote-tracking branch 'origin/main' 2023-09-04 20:40:28 +08:00
jxxghp
aca1673ee3 fix db session 2023-09-04 20:40:17 +08:00
jxxghp
87ece98471 Merge pull request #435 from thsrite/main 2023-09-04 20:24:39 +08:00
thsrite
4c16cd7bfb fix b7d2168f 2023-09-04 20:20:42 +08:00
jxxghp
712af24a72 fix 2023-09-04 20:13:16 +08:00
jxxghp
b7d2168f8e fix #434 2023-09-04 19:30:06 +08:00
jxxghp
65ad7123f9 fix #419 2023-09-04 18:08:11 +08:00
jxxghp
ce42e48b37 fix login api 2023-09-04 17:48:44 +08:00
jxxghp
45b53da056 Merge pull request #428 from thsrite/main 2023-09-04 11:47:52 +08:00
thsrite
70f93e02e4 fix #365 限速插件增加不限速地址范围,不设置默认不限速内网ip 2023-09-04 11:40:19 +08:00
jxxghp
e4b63eacae add system apis 2023-09-04 11:07:30 +08:00
jxxghp
96f17e2bc2 fix #426 刮削下载图片重试 2023-09-04 10:14:05 +08:00
jxxghp
7eb77875f1 fix 重连机制 2023-09-03 21:59:18 +08:00
jxxghp
bbc27bbe19 更新 README.md 2023-09-03 21:39:47 +08:00
jxxghp
3691b2a10b add 过滤规则测试API 2023-09-03 18:36:06 +08:00
jxxghp
08a3d02daf fix 调整重新整理的删除顺序 2023-09-03 17:37:06 +08:00
jxxghp
57abc7816b Merge pull request #420 from thsrite/main 2023-09-03 16:30:01 +08:00
thsrite
69c277777e fix 签到周期重启bug 2023-09-03 16:23:38 +08:00
jxxghp
5f88fe81e3 fix 手动整理时剧集处理 2023-09-03 14:38:24 +08:00
jxxghp
d043dbd89e v1.1.2 2023-09-03 14:22:26 +08:00
jxxghp
53a2887717 fix 蓝光原盘刮削 2023-09-03 14:14:41 +08:00
jxxghp
28d181db44 fix #403 修复蓝光原盘转移失败 2023-09-03 13:40:39 +08:00
jxxghp
7d3f43e488 fix 媒体库同步使用独立数据库会话 2023-09-03 13:11:42 +08:00
jxxghp
62df3f7c84 add 文件识别API 2023-09-03 13:04:08 +08:00
jxxghp
1338a061c4 更新 __init__.py 2023-09-03 11:14:36 +08:00
jxxghp
4f26f0607a 更新 transfer.py 2023-09-03 11:13:42 +08:00
jxxghp
b72aa314b6 emby/jellyfin异常数据兼容 2023-09-03 09:50:05 +08:00
jxxghp
082ec8d718 fix #340 前端已调整日志位置
fix #239 增加转移屏蔽词设置
2023-09-03 09:29:38 +08:00
jxxghp
e785f20c5a fix #352 历史记录重新整理时删除原已整理的文件 2023-09-03 08:40:26 +08:00
jxxghp
0050a96faf fix #406 支持QB分类自动管理模式 2023-09-03 07:56:20 +08:00
jxxghp
31b460f89f Merge pull request #408 from amtoaer/fix_subscribe_lack 2023-09-03 07:16:58 +08:00
jxxghp
89cd2bbadc Merge pull request #405 from thsrite/main 2023-09-03 07:15:16 +08:00
amtoaer
7d19467b6c fix: 修复自定义开始集导致的订阅集数不刷新问题 2023-09-03 01:53:47 +08:00
thsrite
97667249d5 Merge branch 'jxxghp:main' into main 2023-09-02 23:49:23 +08:00
thsrite
2e2472a387 fix 目录监控汇总消息适当增加处理时间 2023-09-02 23:48:48 +08:00
jxxghp
4b10028690 fix update 2023-09-02 22:27:09 +08:00
jxxghp
e0a492d8ab v1.1.1 2023-09-02 22:05:41 +08:00
jxxghp
52e89747b7 feat 电视剧无法识别集时发送消息 2023-09-02 21:38:01 +08:00
jxxghp
59b947fa65 fix 目录监控登录转移方式错误 2023-09-02 21:22:03 +08:00
jxxghp
212e2f1287 Merge pull request #399 from thsrite/main 2023-09-02 18:31:46 +08:00
thsrite
685be88c46 fix 目录监控增加失败历史记录 2023-09-02 18:28:12 +08:00
jxxghp
8297b3e199 更新 scheduler.py 2023-09-02 18:08:34 +08:00
jxxghp
75c5844d64 Merge pull request #397 from DDS-Derek/main 2023-09-02 17:53:24 +08:00
DDSRem
ad5ca69bbb feat: 前端下载前判断版本号是否获取成功 2023-09-02 17:49:14 +08:00
jxxghp
6befa35a26 Merge pull request #395 from WithdewHua/fix-torrentremover 2023-09-02 16:29:41 +08:00
WithdewHua
4fec6aede4 fix: 删除自动删种插件通知消息中多余的文件单位 2023-09-02 16:24:09 +08:00
jxxghp
68a3bc8732 Merge pull request #394 from amtoaer/main 2023-09-02 16:06:36 +08:00
amtoaer
ba2745266a fix: 修复消息中百分比多乘了 100 的问题 2023-09-02 16:03:28 +08:00
jxxghp
2fcf5039ff Merge pull request #392 from DDS-Derek/main 2023-09-02 14:44:43 +08:00
DDSRem
b37dc4471e fix: update env 2023-09-02 14:43:33 +08:00
jxxghp
ffc5c48830 更新 __init__.py 2023-09-02 13:34:17 +08:00
jxxghp
dbe3701032 Merge pull request #385 from DDS-Derek/main 2023-09-02 11:16:38 +08:00
DDSRem
751d405aac fix: update curl 2023-09-02 11:15:44 +08:00
jxxghp
9224169f31 Merge pull request #384 from DDS-Derek/main 2023-09-02 10:54:44 +08:00
DDSRem
62c1a924e8 feat: dev update 2023-09-02 10:52:19 +08:00
jxxghp
9fdd838b7a Merge pull request #368 from DDS-Derek/main 2023-09-02 08:40:57 +08:00
DDSDerek
510911b7a3 feat: add discussions 2023-09-02 08:39:52 +08:00
DDSDerek
36e68f44dc fix: delete discussion 2023-09-02 08:38:39 +08:00
jxxghp
374e633ca7 fix 调整数据库会话 #330 2023-09-02 08:18:01 +08:00
jxxghp
ec8c9c996a fix #356 猫站数据统计问题 2023-09-02 07:57:44 +08:00
jxxghp
3c753686c6 fix #359 定期自动刷新订阅的TMDB数据 2023-09-02 07:33:27 +08:00
jxxghp
5f4580282e fix #362 恢复动漫独立目录二级分类 2023-09-02 07:11:21 +08:00
jxxghp
5d9e0b699c fix 转移历史记录没有时间 2023-09-02 07:09:38 +08:00
jxxghp
5debfca89a fix #361
fix #357
2023-09-01 22:42:58 +08:00
jxxghp
3eeb9e299a Merge pull request #360 from thsrite/main 2023-09-01 21:16:28 +08:00
thsrite
9c4aba10bf Update downloadhistory_oper.py 2023-09-01 21:12:32 +08:00
jxxghp
7b37d86527 fix #358 2023-09-01 18:28:05 +08:00
jxxghp
55c061176d fix #358 2023-09-01 18:24:43 +08:00
jxxghp
5dc11b07e3 fix #342 2023-09-01 17:30:21 +08:00
jxxghp
0bb67824bd Merge remote-tracking branch 'origin/main' 2023-09-01 15:00:37 +08:00
jxxghp
ac1dcbed3c fix 偿试减少会话使用 2023-09-01 15:00:27 +08:00
jxxghp
d0a586a46b 更新 transfer.py 2023-09-01 12:07:37 +08:00
jxxghp
fa8dcea7da 更新 system.py 2023-09-01 12:07:04 +08:00
jxxghp
76a94a80ef Merge pull request #354 from thsrite/main 2023-09-01 11:51:06 +08:00
thsrite
9139c1297e fix 订阅创建一分钟内不自动搜索,留出编辑订阅的时间 2023-09-01 11:48:31 +08:00
jxxghp
4dba739d54 fix bug 2023-09-01 11:35:46 +08:00
jxxghp
fe80f86518 fix 2023-09-01 11:05:17 +08:00
jxxghp
7307105dcd - 站点新增支持Rousi、蝴蝶、OpenCD
- 电影搜索增加了纪录片类型
- 支持设置自建OCR识别服务地址
- 下载器监控、手动整理按文件登记历史记录
- 新增了下载器文件同步插件,可将非MoviePilot添加下载的任务文件导入数据库,以便删除文件时联动删除下载任务
- 整理历史记录支持批量操作
- 播放限速插件支持智能限速
- 刮削海报优先使用TMDB图片
- 修复了憨憨站点数据统计
- 修复了过滤规则无法清空的问题
- 修复了自定义订阅已处理状态计算的问题
- 修复了Slack消息过长导致发送失败的问题
- 修复了动漫独立目录时出现两级目录的问题
- 调整了暗黑主题的UI配色
2023-09-01 11:01:13 +08:00
jxxghp
1c7715d94c 更新 transfer.py 2023-09-01 07:35:27 +08:00
jxxghp
4dd2d6d307 更新 __init__.py 2023-09-01 07:34:12 +08:00
jxxghp
7cfd05a7a5 fix 通知标题计算方法 2023-09-01 07:29:49 +08:00
jxxghp
8eab38c91e fix 优化目录监控通知标题计算方法 2023-09-01 07:16:39 +08:00
jxxghp
6ad78fa875 add 剧集格式化方法 2023-08-31 21:29:28 +08:00
jxxghp
781cffb255 fix bug 2023-08-31 20:12:38 +08:00
jxxghp
2a7fc7bbe6 Merge pull request #350 from thsrite/main
feat 播放限速插件支持智能限速、不限速地址
2023-08-31 19:48:48 +08:00
thsrite
f65da9b202 fix 删除不限速地址配置 2023-08-31 19:48:00 +08:00
thsrite
0cf11db76a fix 自动限速 2023-08-31 19:33:16 +08:00
thsrite
37bada89ef Merge branch 'main' of https://github.com/thsrite/MoviePilot into main 2023-08-31 19:26:24 +08:00
thsrite
38d6467740 fix 播放限速 2023-08-31 19:26:18 +08:00
thsrite
3bc639bcab fix 播放限速 2023-08-31 19:08:50 +08:00
thsrite
7baa07474c Update __init__.py 2023-08-31 19:01:14 +08:00
jxxghp
8e304f77b4 fix ui 2023-08-31 19:01:10 +08:00
thsrite
93ec8df713 Merge branch 'jxxghp:main' into main 2023-08-31 17:07:06 +08:00
thsrite
8854acf908 Merge remote-tracking branch 'origin/main' 2023-08-31 17:05:55 +08:00
thsrite
143ffd18b7 feat 限速插件支持智能限速 2023-08-31 17:05:47 +08:00
jxxghp
212f9c250f fix #343 2023-08-31 16:38:46 +08:00
jxxghp
fa62943679 fix ui 2023-08-31 16:28:18 +08:00
jxxghp
3f95962ced Merge pull request #347 from thsrite/main
fix 下载器种子排除辅种、防止mp下载任务重复处理
2023-08-31 16:23:49 +08:00
jxxghp
e68aab423e Merge branch 'main' into main 2023-08-31 16:23:42 +08:00
jxxghp
49d51ca13e fix 2023-08-31 16:20:44 +08:00
jxxghp
f6b5994fe5 fix plugin manager 2023-08-31 16:13:19 +08:00
thsrite
8ad75e93a9 fix 下载器任务同步插件支持周期运行 2023-08-31 15:51:39 +08:00
jxxghp
796133e26f fix SyncDownloadFiles 2023-08-31 15:50:46 +08:00
thsrite
8414c5df0a fix 下载器种子排除辅种、防止mp下载任务重复处理 2023-08-31 15:31:35 +08:00
jxxghp
1fcdf633ba Merge pull request #345 from thsrite/main
feat 下载器种子同步插件 && fix 同步删除插件
2023-08-31 15:11:58 +08:00
jxxghp
b503dee631 add opencd 2023-08-31 15:08:18 +08:00
thsrite
0837950334 fix 下载器文件同步插件友情提示 2023-08-31 15:07:30 +08:00
thsrite
95787f6ef6 fix last_sync_time按照下载器设置 2023-08-31 15:02:05 +08:00
thsrite
3943a7a793 fix NAStool数据同步插件 2023-08-31 14:47:21 +08:00
thsrite
9f0bd2b933 fix 签到插件 2023-08-31 14:43:41 +08:00
thsrite
053c89bf9f fix 同步删除插件 2023-08-31 14:37:10 +08:00
thsrite
8739a67679 feat 下载器种子同步插件 2023-08-31 14:33:39 +08:00
jxxghp
cb41086fa3 fix 目录监控从表中查询download_hash 2023-08-31 13:56:51 +08:00
jxxghp
84cbeaada2 fix bug 2023-08-31 13:52:48 +08:00
jxxghp
344742871c fix bug 2023-08-31 12:45:33 +08:00
jxxghp
95df1c4c1c fix bug 2023-08-31 12:28:30 +08:00
jxxghp
593211c037 feat 下载时记录文件清单 2023-08-31 08:37:00 +08:00
jxxghp
f80e5739ca feat 媒体服务器/下载器定时检查重连 2023-08-31 08:15:43 +08:00
jxxghp
17fcd77b8e fix 2023-08-31 07:14:57 +08:00
jxxghp
f0666986f0 fix 2023-08-30 23:59:27 +08:00
jxxghp
854fafd880 fix 2023-08-30 23:07:48 +08:00
jxxghp
bdd45304c8 fix 2023-08-30 22:50:38 +08:00
jxxghp
c372d0451e fix 2023-08-30 22:40:36 +08:00
jxxghp
38eff64c95 need fix 2023-08-30 22:01:07 +08:00
jxxghp
9326676bb6 - 新增了正在热映推荐
- 站点新增支持Rousi、蝴蝶
- 电影搜索增加了纪录片类型
- 修复了憨憨站点数据统计
- 修复了过滤规则无法清空的问题
- 修复了自定义订阅已处理状态计算的问题
- 修复了Slack消息过长导致发送失败的问题
2023-08-30 19:39:36 +08:00
jxxghp
7df1d807bb fix README 2023-08-30 19:15:46 +08:00
jxxghp
cce543274e fix Ocr Host 2023-08-30 19:00:48 +08:00
jxxghp
3b7c1fed74 fix #283 2023-08-30 17:32:59 +08:00
jxxghp
e0dfbc213a fix #283 2023-08-30 17:09:49 +08:00
jxxghp
d76fa9bb00 fix #324 2023-08-30 16:56:49 +08:00
jxxghp
e59a498826 fix #271 2023-08-30 16:38:41 +08:00
jxxghp
e6452d68bb fix #326 2023-08-30 16:14:21 +08:00
jxxghp
0d830b237b fix #336 2023-08-30 15:51:01 +08:00
jxxghp
470ebb7b79 Merge remote-tracking branch 'origin/main' 2023-08-30 15:46:13 +08:00
jxxghp
a6819c08bf fix #286 2023-08-30 15:46:04 +08:00
jxxghp
16ba4587e1 Merge pull request #338 from thsrite/main 2023-08-30 15:37:00 +08:00
jxxghp
911651a5f7 Merge remote-tracking branch 'origin/main' 2023-08-30 15:31:21 +08:00
jxxghp
3f94f5f709 fix 站点数据统计UI 2023-08-30 15:31:11 +08:00
jxxghp
16289d86b6 fix hhanclub数据统计 2023-08-30 14:51:55 +08:00
thsrite
17450c7c70 fix 优选插件get_state 2023-08-30 14:00:20 +08:00
jxxghp
eac9fc02fa Merge pull request #333 from thsrite/main 2023-08-30 12:09:16 +08:00
thsrite
1a026ffb12 fix plugins 2023-08-30 12:03:52 +08:00
thsrite
85477a4bd3 fix #184 2023-08-30 10:26:24 +08:00
jxxghp
f8221bb526 Merge remote-tracking branch 'origin/main' 2023-08-30 08:29:00 +08:00
jxxghp
85a581f0cd feat 推荐新增正在热映
fix 豆瓣搜索API
2023-08-30 08:28:37 +08:00
jxxghp
ae7b48ad9f Merge pull request #325 from DDS-Derek/main 2023-08-29 22:40:58 +08:00
jxxghp
59907af4f4 Create LICENSE 2023-08-29 22:35:46 +08:00
DDSRem
e63f52bee5 feat: optimize image size 2023-08-29 22:20:18 +08:00
jxxghp
b9b8b86019 fix build 2023-08-29 19:47:42 +08:00
jxxghp
bfca8a52d6 fix build 2023-08-29 19:44:40 +08:00
jxxghp
99ccbfef22 Merge pull request #320 from thsrite/main 2023-08-29 19:15:31 +08:00
thsrite
5e2f4b413d fix 2b462a1b 2023-08-29 18:53:31 +08:00
jxxghp
a0ec38a6a9 Merge remote-tracking branch 'origin/main' 2023-08-29 17:14:56 +08:00
jxxghp
eae89b2d36 fix #318 2023-08-29 17:14:45 +08:00
jxxghp
e5926a489d Merge pull request #316 from thsrite/main 2023-08-29 15:49:41 +08:00
thsrite
8acfde7906 fix 签到插件 2023-08-29 15:46:20 +08:00
jxxghp
24a164f47e v1.0.9 2023-08-29 15:15:24 +08:00
jxxghp
72fbbffa02 Merge pull request #315 from thsrite/main
fix 站点签到插件支持仅模拟登陆
2023-08-29 14:04:57 +08:00
thsrite
95a87f3e33 feat 站点签到插件支持仅模拟登陆 2023-08-29 13:49:38 +08:00
jxxghp
55206ea092 fix #299 搜索时去掉特殊字符 2023-08-29 12:29:18 +08:00
jxxghp
c138cda735 fix #300 2023-08-29 12:22:14 +08:00
jxxghp
d0a92531ac fix #301
fix #303
2023-08-29 12:11:25 +08:00
jxxghp
96fc32efd0 fix #308 缺失集计算错误问题 2023-08-29 11:41:30 +08:00
jxxghp
a9a0acc091 fix #312 无年份无季集时优先匹配电影 2023-08-29 11:12:55 +08:00
jxxghp
fa6f2c01e0 fix #313 检查本地存在时未应用订阅总集数的问题 2023-08-29 10:48:27 +08:00
jxxghp
05a0026ea4 fix #306 2023-08-29 08:18:34 +08:00
jxxghp
8f352c23c8 更新 __init__.py 2023-08-28 22:58:09 +08:00
jxxghp
8bc883b621 fix 2023-08-28 19:04:37 +08:00
jxxghp
6a34c7196c Merge pull request #307 from thsrite/main 2023-08-28 14:53:26 +08:00
thsrite
58ded2ef5e feat 订阅站点单独配置 2023-08-28 13:23:56 +08:00
jxxghp
2b462a1b9c fix #305 2023-08-28 13:04:18 +08:00
jxxghp
a6d0504900 Merge pull request #305 from thsrite/main
feat 动漫一级分类 && fix bugs
2023-08-28 12:54:55 +08:00
thsrite
7717afab69 fix 动漫一级分类判断条件 2023-08-28 12:50:47 +08:00
jxxghp
683ba4cfad feat 手动整理支持自动识别批量处理,增加进度显示 2023-08-28 12:50:21 +08:00
jxxghp
921783d6bb fix #304 增加订阅搜索开关且默认关闭 2023-08-28 11:43:55 +08:00
thsrite
b7e9e8ee21 feat 动漫一级分类 2023-08-28 10:01:14 +08:00
thsrite
dadad74085 fix 剧集文件命名没有季,默认1 2023-08-28 10:00:54 +08:00
thsrite
e405c98bae fix qb下载按文件循序下载 2023-08-28 10:00:29 +08:00
jxxghp
9d4bec7d81 fix bug 2023-08-28 08:30:39 +08:00
jxxghp
d6a73d6017 Merge pull request #298 from thsrite/main 2023-08-27 20:40:15 +08:00
thsrite
b4a780aba7 fix #292 2023-08-27 20:30:54 +08:00
thsrite
f15f98fcfc fix 签到每天首次全量签到后续签到命中错误关键词 2023-08-27 20:20:42 +08:00
jxxghp
4bb8b01301 Merge pull request #296 from lightolly/dev/20230827 2023-08-27 18:47:25 +08:00
olly
aa8cb889f8 fix:修复tr下载显示速率问题 2023-08-27 18:22:34 +08:00
jxxghp
9e31c53fa5 Merge pull request #291 from DDS-Derek/main 2023-08-27 13:02:11 +08:00
DDSRem
4b23f3f076 fix: repeat install pysocks 2023-08-27 13:01:18 +08:00
DDSRem
52fac09021 fix: 更新成功提示 2023-08-27 12:38:03 +08:00
DDSRem
bb67e902c5 feat: 优化重启更新逻辑
先安装依赖,再替换文件,防止依赖安装失败导致无法正常启动
2023-08-27 12:36:48 +08:00
DDSRem
6206c5f4a3 fix: 优化代码 2023-08-27 12:29:55 +08:00
DDSRem
de3d3de411 feat: 依赖安装添加代理 2023-08-27 12:21:12 +08:00
jxxghp
91896946d8 fix 文件管理列表图标 2023-08-27 10:31:06 +08:00
jxxghp
cc545490cd fix 自动更新时重新安装依赖 2023-08-27 09:49:42 +08:00
jxxghp
4cfa051dfc v1.0.8 2023-08-27 09:44:15 +08:00
jxxghp
41a45b1a8d add dev最新代码一键升级脚本 2023-08-27 09:12:28 +08:00
jxxghp
66c7ca0b96 fix #272 支持使用Socks5代理 2023-08-27 08:42:52 +08:00
jxxghp
214a766d7d fix #284 #273 修复自定义总集数无效的问题 2023-08-27 08:34:02 +08:00
jxxghp
310dd7c229 fix API 2023-08-27 08:18:31 +08:00
jxxghp
4b91510695 fix #267 电影年份匹配上下浮动1年 2023-08-27 07:48:16 +08:00
jxxghp
f52deb3ff2 fix #285 2023-08-27 07:44:40 +08:00
jxxghp
9be9006013 fix 手动整理API 2023-08-26 23:51:48 +08:00
jxxghp
fc2312a045 feat 手动整理API 2023-08-26 22:47:41 +08:00
jxxghp
c593f6423c fix 重命名接口 2023-08-26 19:58:05 +08:00
jxxghp
200e5ff027 feat 文件下载、图片读取等Api 2023-08-26 19:27:01 +08:00
jxxghp
d7f2bbb121 Merge pull request #281 from thsrite/main 2023-08-26 15:01:29 +08:00
jxxghp
f4a1f420c5 feat 文件管理API 2023-08-26 14:31:05 +08:00
thsrite
ed8e02bb38 fix 2023-08-26 11:20:41 +08:00
thsrite
4049468444 Merge remote-tracking branch 'origin/main' into main 2023-08-26 10:51:26 +08:00
thsrite
f8d5e3f438 fix 目录监控消息通知剧集集数错误 2023-08-26 10:51:12 +08:00
jxxghp
fc50540ab1 Merge pull request #274 from thsrite/main 2023-08-25 21:32:09 +08:00
thsrite
624365542c fix 目录监控判断 2023-08-25 21:04:18 +08:00
jxxghp
bb93919707 Merge remote-tracking branch 'origin/main' 2023-08-25 17:05:47 +08:00
jxxghp
3acb2b254c fix #270 设置了开始集数时总集数比对出错的问题 2023-08-25 17:05:31 +08:00
jxxghp
ff900c5d01 Merge pull request #266 from thsrite/main 2023-08-25 16:06:04 +08:00
thsrite
8171124503 feat 可配置交互搜索自动下载用户 2023-08-25 14:27:19 +08:00
jxxghp
dbd858b27d fix requirements 2023-08-25 13:42:36 +08:00
jxxghp
df5337947c v1.0.7 2023-08-25 12:48:01 +08:00
jxxghp
ddf6f5c0b6 feat 种子缓存拆分为独立的模块 2023-08-25 12:44:59 +08:00
jxxghp
d879e54bb7 fix hhanclub 2023-08-25 12:24:25 +08:00
jxxghp
7666fa6db3 Merge pull request #260 from developer-wlj/wlj0807 2023-08-25 11:56:40 +08:00
jxxghp
cef33d370a fix #68 修改TVDB模块以支持代理 2023-08-25 11:34:41 +08:00
jxxghp
76cd4048e3 fix #241 2023-08-24 21:11:13 +08:00
jxxghp
6505aa9efb fix 蓝光原盘过滤 2023-08-24 20:43:51 +08:00
mayun110
81a29d3604 fix #233 2023-08-24 20:31:32 +08:00
jxxghp
86d7dceb84 fix #258 2023-08-24 20:12:14 +08:00
jxxghp
5775accd35 fix 蓝光原盘转移 2023-08-24 17:25:25 +08:00
jxxghp
fda8e3fdb6 fix 适当延长监控消息发送周期 2023-08-24 17:18:42 +08:00
jxxghp
3f72f89b15 fix README.md 2023-08-24 17:01:22 +08:00
jxxghp
6727b65ed4 fix service address 2023-08-24 16:46:26 +08:00
jxxghp
583a04167a Merge pull request #254 from thsrite/main
fix 目录监控转移消息统一发送
2023-08-24 13:54:23 +08:00
jxxghp
6fc9bd4ea0 fix hhanclub
add hudbt
2023-08-24 13:45:34 +08:00
thsrite
1361ed1a16 fix 目录监控target_path 2023-08-24 13:29:04 +08:00
thsrite
2781ed2ae1 fix 合并消息episode 2023-08-24 13:11:46 +08:00
thsrite
dd9258dc42 fix 目录监控转移消息统一发送 2023-08-24 13:07:31 +08:00
jxxghp
7c39a99e60 fix #178 蓝光原盘转移 2023-08-24 11:40:28 +08:00
jxxghp
96a30e8e24 fix 2023-08-24 11:12:09 +08:00
jxxghp
004047b6bb 回滚PR #233 2023-08-24 11:01:36 +08:00
jxxghp
10ee8d33fa fix 文件转移Bug 2023-08-24 10:34:38 +08:00
jxxghp
1bbb92d92b fix log 2023-08-24 10:19:52 +08:00
jxxghp
c246c036c9 fix api bug 2023-08-24 10:12:45 +08:00
jxxghp
b435b84782 fix bug 2023-08-24 09:11:02 +08:00
jxxghp
9607c398ff v1.0.6 2023-08-24 08:35:15 +08:00
jxxghp
2e2ce32c54 feat 支持IMDBID搜索 2023-08-24 08:34:29 +08:00
jxxghp
4298e36d74 feat 同名时优先匹配新片 2023-08-23 22:01:19 +08:00
jxxghp
e3a29178b6 feat 同名时优先匹配新片 2023-08-23 21:52:03 +08:00
jxxghp
613a4220d7 fix logging 2023-08-23 21:21:56 +08:00
jxxghp
91b3fe5b1d fix bug 2023-08-23 19:51:22 +08:00
jxxghp
8bb4db227a color logging 2023-08-23 19:09:15 +08:00
jxxghp
b82f232642 color logging 2023-08-23 18:59:33 +08:00
jxxghp
62c92820f0 Merge remote-tracking branch 'origin/main' 2023-08-23 18:57:23 +08:00
jxxghp
80bb49776a color logging 2023-08-23 18:57:10 +08:00
jxxghp
cad7687de6 更新 servarr.py 2023-08-23 18:22:08 +08:00
jxxghp
f0a680abc6 fix logging 2023-08-23 15:49:02 +08:00
jxxghp
318ba9816b fix log level 2023-08-23 13:46:19 +08:00
jxxghp
89ff7a4603 fix log level 2023-08-23 13:42:09 +08:00
jxxghp
4586a0c1fe fix bug 2023-08-23 12:54:38 +08:00
jxxghp
2682a80815 fix 转移历史记录 2023-08-23 12:50:08 +08:00
jxxghp
6f159958a1 fix bugs 2023-08-23 12:27:54 +08:00
jxxghp
d59ed1e160 Merge pull request #233 from developer-wlj/wlj0807 2023-08-23 11:38:45 +08:00
jxxghp
66a1f25465 feat 下载器监控支持转移合集 2023-08-23 08:47:03 +08:00
jxxghp
e5e33d4486 fix 自动更新 2023-08-23 07:10:43 +08:00
jxxghp
b77c17a999 fix RSS订阅插件 2023-08-23 06:55:14 +08:00
mayun110
e698e30826 fix #231 临时目录问题或重复通知文件转移失败问题 2023-08-22 23:48:08 +08:00
jxxghp
e448cafb21 fix 插件重复启动的问题 2023-08-22 21:02:35 +08:00
jxxghp
45faf0cf18 fix text 2023-08-22 20:15:58 +08:00
jxxghp
91e3788b73 v1.0.5 2023-08-22 18:03:45 +08:00
jxxghp
a890b4f01d fix trackers 2023-08-22 17:57:52 +08:00
jxxghp
c958e0e458 fix TorrentTransfer,通过下载器API补充Tracker 2023-08-22 17:30:54 +08:00
jxxghp
b831d71bf7 fix 多通知Bug 2023-08-22 13:43:10 +08:00
jxxghp
0cc104ef11 更新 __init__.py 2023-08-22 13:20:36 +08:00
jxxghp
b9c441108a 更新 __init__.py 2023-08-22 13:18:21 +08:00
jxxghp
4bdacf7ac1 Merge pull request #224 from yubanmeiqin9048/main-1 2023-08-22 12:34:21 +08:00
jxxghp
7435b7c702 feat 新增清理TMDB缓存命令 2023-08-22 12:32:48 +08:00
yubanmeiqin9048
42c7371d16 fix build 2023-08-22 12:21:31 +08:00
jxxghp
afe5ee9abb fix 通知消息不会多渠道发送问题 2023-08-22 11:39:03 +08:00
jxxghp
14c0063e7c fix hhanclub 2023-08-22 10:50:47 +08:00
jxxghp
064cf4c5c3 fix build 前端下载最新Release而不是同版本号Release 2023-08-22 10:44:09 +08:00
jxxghp
c9452d29c1 v1.0.4 2023-08-22 08:33:47 +08:00
jxxghp
781de29591 fix 数据库连接复用 2023-08-22 08:13:44 +08:00
jxxghp
a202b5efdd fix #215 2023-08-22 07:00:00 +08:00
jxxghp
f02ac2eaef fix 2023-08-21 18:05:17 +08:00
jxxghp
c82ab161d0 fix TorrentTransfer 2023-08-21 17:58:27 +08:00
jxxghp
538c20ee56 fix hhanclub
fix #206 目录监控移动模式删除空目录
2023-08-21 17:48:29 +08:00
jxxghp
995a672bf3 fix hhanclub 2023-08-21 17:35:22 +08:00
jxxghp
7acbd0904b fix tmdbapi 2023-08-21 16:44:42 +08:00
jxxghp
3b95453363 Merge pull request #210 from thsrite/main 2023-08-21 13:51:02 +08:00
thsrite
bd91ea5c50 格式化代码 2023-08-21 13:24:05 +08:00
thsrite
f387846732 fix download_hash补充逻辑 2023-08-21 13:18:42 +08:00
thsrite
7b0ba6112e fix 2023-08-21 13:12:28 +08:00
thsrite
6f927be081 fix 2023-08-21 13:11:19 +08:00
thsrite
1e7f5bf04e fix 尝试补充mp之外下载的download_hash 2023-08-21 13:09:17 +08:00
jxxghp
6ee934a745 Merge remote-tracking branch 'origin/main' 2023-08-21 12:33:00 +08:00
jxxghp
0d626ad4b8 fix H265|HEVC H254|AVC 2023-08-21 12:32:48 +08:00
jxxghp
3379a68476 Merge pull request #208 from thsrite/main 2023-08-21 12:02:34 +08:00
thsrite
6afdfa3b97 fix 2023-08-21 11:55:32 +08:00
thsrite
6337a72b0f fix #204 2023-08-21 11:02:38 +08:00
thsrite
4135df693c fix #202 2023-08-21 10:41:25 +08:00
jxxghp
75bd4d4b77 fix 不优先下载整季的Bug 2023-08-21 08:21:42 +08:00
jxxghp
5d9b45a2f8 Merge pull request #201 from thsrite/main 2023-08-21 07:51:24 +08:00
thsrite
2c4ef1f3a9 fix 2023-08-20 22:24:37 +08:00
thsrite
1ad39faf24 fix cookiecloud 2023-08-20 21:54:02 +08:00
thsrite
dc88fb74fd fix 暂停种子 2023-08-20 21:45:07 +08:00
thsrite
062e9e467d fix 2023-08-20 21:24:40 +08:00
thsrite
8b8473b92c fix 匹配download_hash 2023-08-20 21:08:30 +08:00
thsrite
dd76909d45 Merge remote-tracking branch 'origin/main' into main 2023-08-20 20:54:14 +08:00
thsrite
ebbd48dcf6 fix 同步删除逻辑 2023-08-20 20:54:06 +08:00
thsrite
aa27af811f fix 目录监控获取真实download_hash 2023-08-20 20:53:54 +08:00
jxxghp
81d6fcbe3f fix README.md 2023-08-20 19:16:11 +08:00
jxxghp
8a00a9c389 更新 README.md 2023-08-20 19:09:18 +08:00
jxxghp
3c96f1c687 Merge pull request #198 from thsrite/main 2023-08-20 18:48:26 +08:00
thsrite
e0497f590a fix 2023-08-20 18:38:54 +08:00
thsrite
40cf80406e fix 数据同步还原同步开关 2023-08-20 18:37:04 +08:00
thsrite
a469136049 fix 目录监控增加转移时间 2023-08-20 18:36:47 +08:00
194 changed files with 17859 additions and 4997 deletions

3
.dockerignore Normal file
View File

@@ -0,0 +1,3 @@
# Ignore git
.github
.git

View File

@@ -1,5 +1,8 @@
blank_issues_enabled: false
contact_links:
- name: 项目讨论
url: https://github.com/jxxghp/MoviePilot/discussions/new/choose
about: discussion
- name: Telegram 频道
url: https://t.me/moviepilot_channel
about: 更新日志

View File

@@ -1,17 +0,0 @@
name: 项目讨论
description: discussion
title: "[Discussion]: "
labels: ["discussion"]
body:
- type: markdown
attributes:
value: |
[BUG](https://github.com/jxxghp/MoviePilot/issues/new?assignees=&labels=bug&template=bug_report.yml&title=%5BBUG%5D%3A) 与 [Feature Request](https://github.com/jxxghp/MoviePilot/issues/new?assignees=&labels=feature+request&template=feature_request.yml&title=%5BFeature+Request%5D%3A+) 请转到对应位置提交。
- type: textarea
id: discussion
attributes:
label: 项目讨论
description: 请详细描述需要讨论的内容。
placeholder: "项目讨论"
validations:
required: true

View File

@@ -14,6 +14,18 @@ body:
description: 目前使用的程序版本
validations:
required: true
- type: dropdown
id: type
attributes:
label: 功能改进类型
description: 你需要在下面哪个方面改进功能
options:
- 主程序
- 插件
- Docker
- 其他
validations:
required: true
- type: textarea
id: feature-request
attributes:

View File

@@ -1,75 +0,0 @@
name: MoviePilot Docker
on:
workflow_dispatch:
push:
branches:
- main
paths:
- version.py
jobs:
build:
runs-on: ubuntu-latest
name: Build Docker Image
steps:
-
name: Checkout
uses: actions/checkout@v3
- name: Docker meta
id: meta
uses: docker/metadata-action@v4
with:
images: ${{ secrets.DOCKER_USERNAME }}/moviepilot
-
name: Release version
id: release_version
run: |
app_version=$(cat version.py |sed -ne "s/APP_VERSION\s=\s'v\(.*\)'/\1/gp")
echo "app_version=$app_version" >> $GITHUB_ENV
-
name: Set Up QEMU
uses: docker/setup-qemu-action@v2
-
name: Set Up Buildx
uses: docker/setup-buildx-action@v2
-
name: Login DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Build Image
uses: docker/build-push-action@v4
with:
context: .
file: Dockerfile
platforms: |
linux/amd64
linux/arm64
push: true
build-args: |
MOVIEPILOT_FRONTEND_VERSION=${{ env.app_version }}
tags: |
${{ secrets.DOCKER_USERNAME }}/moviepilot:latest
${{ secrets.DOCKER_USERNAME }}/moviepilot:${{ env.app_version }}
labels: ${{ steps.meta.outputs.labels }}
-
name: Create Release
id: create_release
uses: actions/create-release@latest
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: v${{ env.app_version }}
release_name: v${{ env.app_version }}
body: ${{ github.event.commits[0].message }}
draft: false
prerelease: false

65
.github/workflows/build-windows.yml vendored Normal file
View File

@@ -0,0 +1,65 @@
name: MoviePilot Windows Builder
on:
workflow_dispatch:
push:
branches:
- main
paths:
- version.py
jobs:
Windows-build:
runs-on: windows-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Release Version
id: release_version
run: |
$app_version = Select-String -Path "version.py" -Pattern "APP_VERSION\s=\s'v(.*)'" | ForEach-Object { $_.Matches.Groups[1].Value }
$env:GITHUB_ENV += "app_version=$app_version"
- name: Init Python 3.11.4
uses: actions/setup-python@v4
with:
python-version: '3.11.4'
- name: Install Dependent Packages
run: |
python -m pip install --upgrade pip
pip install wheel pyinstaller
pip install -r requirements.txt
shell: pwsh
- name: Pyinstaller
run: |
pyinstaller windows.spec
shell: pwsh
- name: Upload Windows File
uses: actions/upload-artifact@v3
with:
name: windows
path: dist/MoviePilot.exe
- name: Generate Release
id: generate_release
uses: actions/create-release@latest
with:
tag_name: v${{ env.app_version }}
release_name: v${{ env.app_version }}
body: ${{ github.event.commits[0].message }}
draft: false
prerelease: false
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload Release Asset
uses: dwenegar/upload-release-assets@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
release_id: ${{ steps.generate_release.outputs.id }}
assets_path: |
dist/MoviePilot.exe

59
.github/workflows/build.yml vendored Normal file
View File

@@ -0,0 +1,59 @@
name: MoviePilot Docker Builder
on:
workflow_dispatch:
push:
branches:
- main
paths:
- version.py
jobs:
Docker-build:
runs-on: ubuntu-latest
name: Build Docker Image
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Release version
id: release_version
run: |
app_version=$(cat version.py |sed -ne "s/APP_VERSION\s=\s'v\(.*\)'/\1/gp")
echo "app_version=$app_version" >> $GITHUB_ENV
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ secrets.DOCKER_USERNAME }}/moviepilot
tags: |
type=raw,value=${{ env.app_version }}
type=raw,value=latest
- name: Set Up QEMU
uses: docker/setup-qemu-action@v3
- name: Set Up Buildx
uses: docker/setup-buildx-action@v3
- name: Login DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Build Image
uses: docker/build-push-action@v5
with:
context: .
file: Dockerfile
platforms: |
linux/amd64
linux/arm64/v8
push: true
build-args: |
MOVIEPILOT_VERSION=${{ env.app_version }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha, scope=${{ github.workflow }}
cache-to: type=gha, scope=${{ github.workflow }}

View File

@@ -1,39 +1,18 @@
FROM python:3.11.4-slim-bullseye
ARG MOVIEPILOT_FRONTEND_VERSION
ARG MOVIEPILOT_VERSION
ENV LANG="C.UTF-8" \
HOME="/moviepilot" \
TERM="xterm" \
TZ="Asia/Shanghai" \
PUID=0 \
PGID=0 \
UMASK=000 \
MOVIEPILOT_AUTO_UPDATE=true \
PORT=3001 \
NGINX_PORT=3000 \
CONFIG_DIR="/config" \
API_TOKEN="moviepilot" \
AUTH_SITE="iyuu" \
DOWNLOAD_PATH="/downloads" \
DOWNLOAD_CATEGORY="false" \
TORRENT_TAG="MOVIEPILOT" \
LIBRARY_PATH="" \
LIBRARY_CATEGORY="false" \
TRANSFER_TYPE="copy" \
COOKIECLOUD_HOST="https://nastool.org/cookiecloud" \
COOKIECLOUD_KEY="" \
COOKIECLOUD_PASSWORD="" \
MESSAGER="telegram" \
TELEGRAM_TOKEN="" \
TELEGRAM_CHAT_ID="" \
DOWNLOADER="qbittorrent" \
QB_HOST="127.0.0.1:8080" \
QB_USER="admin" \
QB_PASSWORD="adminadmin" \
MEDIASERVER="emby" \
EMBY_HOST="http://127.0.0.1:8096" \
EMBY_API_KEY=""
MOVIEPILOT_AUTO_UPDATE=true \
MOVIEPILOT_AUTO_UPDATE_DEV=false \
CONFIG_DIR="/config"
WORKDIR "/app"
COPY . .
RUN apt-get update \
RUN apt-get update -y \
&& apt-get -y install \
musl-dev \
nginx \
@@ -47,30 +26,27 @@ RUN apt-get update \
busybox \
dumb-init \
jq \
haproxy \
&& \
if [ "$(uname -m)" = "x86_64" ]; \
then ln -s /usr/lib/x86_64-linux-musl/libc.so /lib/libc.musl-x86_64.so.1; \
elif [ "$(uname -m)" = "aarch64" ]; \
then ln -s /usr/lib/aarch64-linux-musl/libc.so /lib/libc.musl-aarch64.so.1; \
fi \
&& cp -f /app/nginx.conf /etc/nginx/nginx.template.conf \
&& cp -f /app/update /usr/local/bin/mp_update \
&& cp -f /app/entrypoint /entrypoint \
&& chmod +x /entrypoint /usr/local/bin/mp_update \
&& mkdir -p ${HOME} \
&& groupadd -r moviepilot -g 911 \
&& useradd -r moviepilot -g moviepilot -d ${HOME} -s /bin/bash -u 911 \
&& apt-get autoremove -y \
&& apt-get clean -y \
&& rm -rf \
/tmp/* \
/moviepilot/.cache \
/var/lib/apt/lists/* \
/var/tmp/*
COPY requirements.txt requirements.txt
RUN apt-get update -y \
&& apt-get install -y build-essential \
&& pip install --upgrade pip \
&& pip install Cython \
&& pip install -r requirements.txt \
&& playwright install-deps chromium \
&& python_ver=$(python3 -V | awk '{print $2}') \
&& echo "/app/" > /usr/local/lib/python${python_ver%.*}/site-packages/app.pth \
&& echo 'fs.inotify.max_user_watches=5242880' >> /etc/sysctl.conf \
&& echo 'fs.inotify.max_user_instances=5242880' >> /etc/sysctl.conf \
&& locale-gen zh_CN.UTF-8 \
&& curl -sL "https://github.com/jxxghp/MoviePilot-Frontend/releases/download/v${MOVIEPILOT_FRONTEND_VERSION}/dist.zip" | busybox unzip -d / - \
&& mv /dist /public \
&& apt-get remove -y build-essential \
&& apt-get autoremove -y \
&& apt-get clean -y \
@@ -79,6 +55,22 @@ RUN apt-get update \
/moviepilot/.cache \
/var/lib/apt/lists/* \
/var/tmp/*
COPY . .
RUN cp -f /app/nginx.conf /etc/nginx/nginx.template.conf \
&& cp -f /app/update /usr/local/bin/mp_update \
&& cp -f /app/entrypoint /entrypoint \
&& chmod +x /entrypoint /usr/local/bin/mp_update \
&& mkdir -p ${HOME} /var/lib/haproxy/server-state \
&& groupadd -r moviepilot -g 911 \
&& useradd -r moviepilot -g moviepilot -d ${HOME} -s /bin/bash -u 911 \
&& python_ver=$(python3 -V | awk '{print $2}') \
&& echo "/app/" > /usr/local/lib/python${python_ver%.*}/site-packages/app.pth \
&& echo 'fs.inotify.max_user_watches=5242880' >> /etc/sysctl.conf \
&& echo 'fs.inotify.max_user_instances=5242880' >> /etc/sysctl.conf \
&& locale-gen zh_CN.UTF-8 \
&& FRONTEND_VERSION=$(curl -sL "https://api.github.com/repos/jxxghp/MoviePilot-Frontend/releases/latest" | jq -r .tag_name) \
&& curl -sL "https://github.com/jxxghp/MoviePilot-Frontend/releases/download/${FRONTEND_VERSION}/dist.zip" | busybox unzip -d / - \
&& mv /dist /public
EXPOSE 3000
VOLUME ["/config"]
VOLUME [ "/config" ]
ENTRYPOINT [ "/entrypoint" ]

674
LICENSE Normal file
View File

@@ -0,0 +1,674 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.

201
README.md
View File

@@ -15,26 +15,25 @@ Dockerhttps://hub.docker.com/r/jxxghp/moviepilot
## 安装
1. **安装CookieCloud插件**
### 1. **安装CookieCloud插件**
站点信息需要通过CookieCloud同步获取因此需要安装CookieCloud插件将浏览器中的站点Cookie数据同步到云端后再同步到MoviePilot使用。 插件下载地址请点击 [这里](https://github.com/easychen/CookieCloud/releases)。
2. **安装CookieCloud服务端可选**
### 2. **安装CookieCloud服务端可选**
MoviePilot内置了公共CookieCloud服务器如果需要自建服务可参考 [CookieCloud](https://github.com/easychen/CookieCloud) 项目进行安装
```shell
docker pull easychen/cookiecloud:latest
```
MoviePilot内置了公共CookieCloud服务器如果需要自建服务可参考 [CookieCloud](https://github.com/easychen/CookieCloud) 项目进行搭建docker镜像请点击 [这里](https://hub.docker.com/r/easychen/cookiecloud)
3. **安装配套管理软件**
**声明:** 本项目不会收集用户敏感数据Cookie同步也是基于CookieCloud项目实现非本项目提供的能力。技术角度上CookieCloud采用端到端加密在个人不泄露`用户KEY``端对端加密密码`的情况下第三方无法窃取任何用户信息(包括服务器持有者)。如果你不放心,可以不使用公共服务或者不使用本项目,但如果使用后发生了任何信息泄露与本项目无关!
MoviePilot跟NAStool一样需要配套下载器和媒体服务器使用。
### 3. **安装配套管理软件**
MoviePilot需要配套下载器和媒体服务器配合使用。
- 下载器支持qBittorrent、TransmissionQB版本号要求>= 4.3.9TR版本号要求>= 3.0推荐使用QB。
- 媒体服务器支持Jellyfin、Emby、Plex推荐使用Emby。
4. **安装MoviePilot**
### 4. **安装MoviePilot**
目前仅提供docker镜像后续可能会提供更多安装方式。
目前仅提供docker镜像点击 [这里](https://hub.docker.com/r/jxxghp/moviepilot) 或执行命令:
```shell
docker pull jxxghp/moviepilot:latest
@@ -42,41 +41,56 @@ docker pull jxxghp/moviepilot:latest
## 配置
项目的所有配置均通过环境变量进行设置,部分环境建立容器后会自动显示待配置项,如未自动显示配置项则需要手动增加对应环境变量。
项目的所有配置均通过环境变量进行设置,支持两种配置方式:
- 在docker环境变量部分进行参数配置部分环境建立容器后会自动显示待配置项如未自动显示配置项则需要手动增加对应环境变量。
- 下载 [app.env](https://github.com/jxxghp/MoviePilot/raw/main/config/app.env) 文件,修改好配置后放置到配置文件映射路径根目录,配置项可根据说明自主增减。
配置文件映射路径:`/config`
配置文件映射路径:`/config`,配置项生效优先级:环境变量 > env文件 > 默认值,部分参数如路径映射、站点认证、权限端口等必须通过环境变量进行配置。
> $\color{red}{*}$ 号标识的为必填项,其它为可选项,可选项可删除配置变量从而使用默认值。
### 1. **基础设置**
- **PUID**:运行程序用户的`uid`,默认`0`
- **PGID**:运行程序用户的`gid`,默认`0`
- **UMASK**:掩码权限,默认`000`,可以考虑设置为`022`
- **MOVIEPILOT_AUTO_UPDATE**:重启更新,`true`/`false`,默认`true` **注意:如果出现网络问题可以配置`PROXY_HOST`,具体看下方`PROXY_HOST`解释**
- **NGINX_PORT** WEB服务端口,默认`3000`,可自行修改,但不能为`3001`
- **SUPERUSER** 超级管理员用户名,默认`admin`,安装后使用该用户登录后台管理界面
- **SUPERUSER_PASSWORD** 超级管理员初始密码,默认`password`,建议修改为复杂密码
- **API_TOKEN** API密钥默认`moviepilot`在媒体服务器Webhook、微信回调等地址配置中需要加上`?token=`该值,建议修改为复杂字符串
- **PROXY_HOST** 网络代理可选访问themoviedb或者重启更新需要使用代理访问格式为`http(s)://ip:port`
- **NGINX_PORT $\color{red}{*}$ ** WEB服务端口默认`3000`可自行修改不能与API服务端口冲突仅支持环境变量配置
- **PORT $\color{red}{*}$ ** API服务端口默认`3001`可自行修改不能与WEB服务端口冲突仅支持环境变量配置
- **PUID**:运行程序用户的`uid`,默认`0`(仅支持环境变量配置)
- **PGID**:运行程序用户的`gid`,默认`0`(仅支持环境变量配置)
- **UMASK**:掩码权限,默认`000`,可以考虑设置为`022`(仅支持环境变量配置)
- **MOVIEPILOT_AUTO_UPDATE**:重启更新,`true`/`false`,默认`true` **注意:如果出现网络问题可以配置`PROXY_HOST`,具体看下方`PROXY_HOST`解释**(仅支持环境变量配置)
- **MOVIEPILOT_AUTO_UPDATE_DEV**:重启时更新到未发布的开发版本代码,`true`/`false`,默认`false`(仅支持环境变量配置)
---
- **SUPERUSER $\color{red}{*}$ ** 超级管理员用户名,默认`admin`,安装后使用该用户登录后台管理界面
- **SUPERUSER_PASSWORD $\color{red}{*}$ ** 超级管理员初始密码,默认`password`,建议修改为复杂密码
- **API_TOKEN $\color{red}{*}$ ** API密钥默认`moviepilot`在媒体服务器Webhook、微信回调等地址配置中需要加上`?token=`该值,建议修改为复杂字符串
- **PROXY_HOST** 网络代理访问themoviedb或者重启更新需要使用代理访问格式为`http(s)://ip:port``socks5://user:pass@host:port`(可选)
- **TMDB_API_DOMAIN** TMDB API地址默认`api.themoviedb.org`,也可配置为`api.tmdb.org`或其它中转代理服务地址,能连通即可
- **DOWNLOAD_PATH** 下载保存目录,**注意:需要将`moviepilot``下载器`的映射路径保持一致**,否则会导致下载文件无法转移
- **DOWNLOAD_MOVIE_PATH** 电影下载保存目录,**必须是`DOWNLOAD_PATH`的下级路径**,不设置则下载到`DOWNLOAD_PATH`
- **DOWNLOAD_TV_PATH** 电视剧下载保存目录,**必须是`DOWNLOAD_PATH`的下级路径**,不设置则下载到`DOWNLOAD_PATH`
- **DOWNLOAD_CATEGORY** 下载二级分类开关,`true`/`false`,默认`false`,开启后会根据配置`category.yaml`自动在下载目录下建立二级目录分类
- **DOWNLOAD_SUBTITLE** 下载站点字幕,`true`/`false`,默认`true`
- **REFRESH_MEDIASERVER** 入库刷新媒体库,`true`/`false`,默认`true`
- **TMDB_IMAGE_DOMAIN** TMDB图片地址默认`image.tmdb.org`可配置为其它中转代理以加速TMDB图片显示`static-mdb.v.geilijiasu.com`
---
- **SCRAP_METADATA** 刮削入库的媒体文件,`true`/`false`,默认`true`
- **TORRENT_TAG** 种子标签,默认为`MOVIEPILOT`设置后只有MoviePilot添加的下载才会处理留空所有下载器中的任务均会处理
- **LIBRARY_PATH** 媒体库目录,多个目录使用`,`分隔
- **LIBRARY_MOVIE_NAME** 电影媒体库目录名,默认`电影`
- **LIBRARY_TV_NAME** 电视剧媒体库目录名,默认`电视剧`
- **LIBRARY_CATEGORY** 媒体库二级分类开关,`true`/`false`,默认`false`,开启后会根据配置`category.yaml`自动在媒体库目录下建立二级目录分类
- **TRANSFER_TYPE** 转移方式,支持`link`/`copy`/`move`/`softlink` **注意:在`link`和`softlink`转移方式下,转移后的文件会继承源文件的权限掩码,不受`UMASK`影响**
- **COOKIECLOUD_HOST** CookieCloud服务器地址格式`http://ip:port`,必须配置,否则无法添加站点
- **COOKIECLOUD_KEY** CookieCloud用户KEY
- **COOKIECLOUD_PASSWORD** CookieCloud端对端加密密码
- **COOKIECLOUD_INTERVAL** CookieCloud同步间隔分钟
- **USER_AGENT** CookieCloud对应的浏览器UA可选设置后可增加连接站点的成功率同步站点后可以在管理界面中修改
- **MESSAGER** 消息通知渠道,支持 `telegram`/`wechat`/`slack`,开启多个渠道时使用`,`分隔。同时还需要配置对应渠道的环境变量,非对应渠道的变量可删除,推荐使用`telegram`
- **SCRAP_SOURCE** 刮削元数据及图片使用的数据源,`themoviedb`/`douban`,默认`themoviedb`
- **SCRAP_FOLLOW_TMDB** 新增已入库媒体是否跟随TMDB信息变化`true`/`false`,默认`true`
---
- **TRANSFER_TYPE $\color{red}{*}$ ** 整理转移方式,支持`link`/`copy`/`move`/`softlink` **注意:在`link`和`softlink`转移方式下,转移后的文件会继承源文件的权限掩码,不受`UMASK`影响**
- **LIBRARY_PATH $\color{red}{*}$ ** 媒体库目录,多个目录使用`,`分隔
- **LIBRARY_MOVIE_NAME** 电影媒体库目录名称(不是完整路径),默认`电影`
- **LIBRARY_TV_NAME** 电视剧媒体库目录称(不是完整路径),默认`电视剧`
- **LIBRARY_ANIME_NAME** 动漫媒体库目录称(不是完整路径),默认`电视剧/动漫`
- **LIBRARY_CATEGORY** 媒体库二级分类开关,`true`/`false`,默认`false`,开启后会根据配置 [category.yaml](https://github.com/jxxghp/MoviePilot/raw/main/config/category.yaml) 自动在媒体库目录下建立二级目录分类
---
- **COOKIECLOUD_HOST $\color{red}{*}$ ** CookieCloud服务器地址格式`http(s)://ip:port`,不配置默认使用内建服务器`https://movie-pilot.org/cookiecloud`
- **COOKIECLOUD_KEY $\color{red}{*}$ ** CookieCloud用户KEY
- **COOKIECLOUD_PASSWORD $\color{red}{*}$ ** CookieCloud端对端加密密码
- **COOKIECLOUD_INTERVAL $\color{red}{*}$ ** CookieCloud同步间隔分钟
- **USER_AGENT $\color{red}{*}$ ** CookieCloud保存Cookie对应的浏览器UA建议配置设置后可增加连接站点的成功率同步站点后可以在管理界面中修改
- **OCR_HOST** OCR识别服务器地址格式`http(s)://ip:port`用于识别站点验证码实现自动登录获取Cookie等不配置默认使用内建服务器`https://movie-pilot.org`,可使用 [这个镜像](https://hub.docker.com/r/jxxghp/moviepilot-ocr) 自行搭建。
---
- **SUBSCRIBE_MODE** 订阅模式,`rss`/`spider`,默认`spider``rss`模式通过定时刷新RSS来匹配订阅RSS地址会自动获取也可手动维护对站点压力小同时可设置订阅刷新周期24小时运行但订阅和下载通知不能过滤和显示免费推荐使用rss模式。
- **SUBSCRIBE_RSS_INTERVAL** RSS订阅模式刷新时间间隔分钟默认`30`分钟不能小于5分钟。
- **SUBSCRIBE_SEARCH** 订阅搜索,`true`/`false`,默认`false`开启后会每隔24小时对所有订阅进行全量搜索以补齐缺失剧集一般情况下正常订阅即可订阅搜索只做为兜底会增加站点压力不建议开启
- **SEARCH_SOURCE** 媒体信息搜索来源,`themoviedb`/`douban`,默认`themoviedb`
---
- **AUTO_DOWNLOAD_USER** 远程交互搜索时自动择优下载的用户ID多个用户使用,分割,未设置需要选择资源或者回复`0`
- **MESSAGER $\color{red}{*}$ ** 消息通知渠道,支持 `telegram`/`wechat`/`slack`/`synologychat`,开启多个渠道时使用`,`分隔。同时还需要配置对应渠道的环境变量,非对应渠道的变量可删除,推荐使用`telegram`
- `wechat`设置项:
@@ -93,22 +107,36 @@ docker pull jxxghp/moviepilot:latest
- **TELEGRAM_TOKEN** Telegram Bot Token
- **TELEGRAM_CHAT_ID** Telegram Chat ID
- **TELEGRAM_USERS** Telegram 用户ID多个使用,分隔只有用户ID在列表中才可以使用Bot如未设置则均可以使用Bot
- **TELEGRAM_ADMINS** Telegram 管理员ID多个使用,分隔只有管理员才可以操作Bot菜单如未设置则均可以操作菜单
- **TELEGRAM_ADMINS** Telegram 管理员ID多个使用,分隔只有管理员才可以操作Bot菜单如未设置则均可以操作菜单(可选)
- `slack`设置项:
- **SLACK_OAUTH_TOKEN** Slack Bot User OAuth Token
- **SLACK_APP_TOKEN** Slack App-Level Token
- **SLACK_CHANNEL** Slack 频道名称,默认`全体`
- **SLACK_CHANNEL** Slack 频道名称,默认`全体`(可选)
- `synologychat`设置项:
- **SYNOLOGYCHAT_WEBHOOK** 在Synology Chat中创建机器人获取机器人`传入URL`
- **SYNOLOGYCHAT_TOKEN** SynologyChat机器人`令牌`
- **DOWNLOADER** 下载器,支持`qbittorrent`/`transmission`QB版本号要求>= 4.3.9TR版本号要求>= 3.0,同时还需要配置对应渠道的环境变量,非对应渠道的变量可删除,推荐使用`qbittorrent`
---
- **DOWNLOAD_PATH $\color{red}{*}$ ** 下载保存目录,**注意:需要将`moviepilot``下载器`的映射路径保持一致**,否则会导致下载文件无法转移
- **DOWNLOAD_MOVIE_PATH** 电影下载保存目录路径,不设置则下载到`DOWNLOAD_PATH`
- **DOWNLOAD_TV_PATH** 电视剧下载保存目录路径,不设置则下载到`DOWNLOAD_PATH`
- **DOWNLOAD_ANIME_PATH** 动漫下载保存目录路径,不设置则下载到`DOWNLOAD_PATH`
- **DOWNLOAD_CATEGORY** 下载二级分类开关,`true`/`false`,默认`false`,开启后会根据配置 [category.yaml](https://github.com/jxxghp/MoviePilot/raw/main/config/category.yaml) 自动在下载目录下建立二级目录分类
- **DOWNLOAD_SUBTITLE** 下载站点字幕,`true`/`false`,默认`true`
- **DOWNLOADER_MONITOR** 下载器监控,`true`/`false`,默认为`true`,开启后下载完成时才会自动整理入库
- **TORRENT_TAG** 下载器种子标签,默认为`MOVIEPILOT`设置后只有MoviePilot添加的下载才会处理留空所有下载器中的任务均会处理
- **DOWNLOADER $\color{red}{*}$ ** 下载器,支持`qbittorrent`/`transmission`QB版本号要求>= 4.3.9TR版本号要求>= 3.0,同时还需要配置对应渠道的环境变量,非对应渠道的变量可删除,推荐使用`qbittorrent`
- `qbittorrent`设置项:
- **QB_HOST** qbittorrent地址格式`ip:port`https需要添加`https://`前缀
- **QB_USER** qbittorrent用户名
- **QB_PASSWORD** qbittorrent密码
- **QB_CATEGORY** qbittorrent分类自动管理`true`/`false`,默认`false`,开启后会将下载二级分类传递到下载器,由下载器管理下载目录,需要同步开启`DOWNLOAD_CATEGORY`
- `transmission`设置项:
@@ -116,9 +144,9 @@ docker pull jxxghp/moviepilot:latest
- **TR_USER** transmission用户名
- **TR_PASSWORD** transmission密码
- **DOWNLOADER_MONITOR** 下载器监控,`true`/`false`,默认为`true`,开启后下载完成时才会自动整理入库
- **MEDIASERVER** 媒体服务器,支持`emby`/`jellyfin`/`plex`,同时还需要配置对应媒体服务器的环境变量,非对应媒体服务器的变量可删除,推荐使用`emby`
---
- **REFRESH_MEDIASERVER** 入库后是否刷新媒体服务器,`true`/`false`,默认`true`
- **MEDIASERVER $\color{red}{*}$ ** 媒体服务器,支持`emby`/`jellyfin`/`plex`,同时开启多个使用`,`分隔。还需要配置对应媒体服务器的环境变量,非对应媒体服务器的变量可删除,推荐使用`emby`
- `emby`设置项:
@@ -136,27 +164,30 @@ docker pull jxxghp/moviepilot:latest
- **PLEX_TOKEN** Plex网页Url中的`X-Plex-Token`通过浏览器F12->网络从请求URL中获取
- **MEDIASERVER_SYNC_INTERVAL:** 媒体服务器同步间隔(小时),默认`6`,留空则不同步
- **MEDIASERVER_SYNC_BLACKLIST:** 媒体服务器同步黑名单,多个媒体库名称使用,分割
### 2. **用户认证**
- **AUTH_SITE** 认证站点,支持`hhclub`/`audiences`/`hddolby`/`zmpt`/`freefarm`/`hdfans`/`wintersakura`/`leaves`/`1ptba`/`icc2022`/`iyuu`
`MoviePilot`需要认证后才能使用,配置`AUTH_SITE`后,需要根据下表配置对应站点的认证参数(**仅能通过docker环境变量配置**
`MoviePilot`需要认证后才能使用,配置`AUTH_SITE`后,需要根据下表配置对应站点的认证参数。
- **AUTH_SITE $\color{red}{*}$ ** 认证站点,支持`iyuu`/`hhclub`/`audiences`/`hddolby`/`zmpt`/`freefarm`/`hdfans`/`wintersakura`/`leaves`/`1ptba`/`icc2022`/`ptlsp`/`xingtan`
| 站点 | 参数 |
|:--:|:-----------------------------------------------------:|
| iyuu | `IYUU_SIGN`IYUU登录令牌 |
| hhclub | `HHCLUB_USERNAME`:用户名<br/>`HHCLUB_PASSKEY`:密钥 |
| audiences | `AUDIENCES_UID`用户ID<br/>`AUDIENCES_PASSKEY`:密钥 |
| hddolby | `HDDOLBY_ID`用户ID<br/>`HDDOLBY_PASSKEY`:密钥 |
| zmpt | `ZMPT_UID`用户ID<br/>`ZMPT_PASSKEY`:密钥 |
| freefarm | `FREEFARM_UID`用户ID<br/>`FREEFARM_PASSKEY`:密钥 |
| hdfans | `HDFANS_UID`用户ID<br/>`HDFANS_PASSKEY`:密钥 |
| 站点 | 参数 |
|:------------:|:-----------------------------------------------------:|
| iyuu | `IYUU_SIGN`IYUU登录令牌 |
| hhclub | `HHCLUB_USERNAME`:用户名<br/>`HHCLUB_PASSKEY`:密钥 |
| audiences | `AUDIENCES_UID`用户ID<br/>`AUDIENCES_PASSKEY`:密钥 |
| hddolby | `HDDOLBY_ID`用户ID<br/>`HDDOLBY_PASSKEY`:密钥 |
| zmpt | `ZMPT_UID`用户ID<br/>`ZMPT_PASSKEY`:密钥 |
| freefarm | `FREEFARM_UID`用户ID<br/>`FREEFARM_PASSKEY`:密钥 |
| hdfans | `HDFANS_UID`用户ID<br/>`HDFANS_PASSKEY`:密钥 |
| wintersakura | `WINTERSAKURA_UID`用户ID<br/>`WINTERSAKURA_PASSKEY`:密钥 |
| leaves | `LEAVES_UID`用户ID<br/>`LEAVES_PASSKEY`:密钥 |
| 1ptba | `1PTBA_UID`用户ID<br/>`1PTBA_PASSKEY`:密钥 |
| icc2022 | `ICC2022_UID`用户ID<br/>`ICC2022_PASSKEY`:密钥 |
| leaves | `LEAVES_UID`用户ID<br/>`LEAVES_PASSKEY`:密钥 |
| 1ptba | `1PTBA_UID`用户ID<br/>`1PTBA_PASSKEY`:密钥 |
| icc2022 | `ICC2022_UID`用户ID<br/>`ICC2022_PASSKEY`:密钥 |
| ptlsp | `PTLSP_UID`用户ID<br/>`PTLSP_PASSKEY`:密钥 |
| xingtan | `XINGTAN_UID`用户ID<br/>`XINGTAN_PASSKEY`:密钥 |
### 2. **进阶配置**
@@ -172,10 +203,12 @@ docker pull jxxghp/moviepilot:latest
> `original_title` 原语种标题
> `name` 识别名称
> `year` 年份
> `edition` 版本
> `resourceType`:资源类型
> `effect`:特效
> `edition` 版本(资源类型+特效)
> `videoFormat` 分辨率
> `releaseGroup` 制作组/字幕组
> `effect` 特效
> `customization` 自定义占位符
> `videoCodec` 视频编码
> `audioCodec` 音频编码
> `tmdbid` TMDBID
@@ -196,6 +229,7 @@ docker pull jxxghp/moviepilot:latest
> `season` 季号
> `episode` 集号
> `season_episode` 季集 SxxExx
> `episode_title` 集标题
`TV_RENAME_FORMAT`默认配置格式:
@@ -204,9 +238,7 @@ docker pull jxxghp/moviepilot:latest
```
### 3. **过滤规则**
`设定`-`规则`中设定,规则说明:
### 3. **优先级规则**
- 仅支持使用内置规则进行排列组合,内置规则有:`蓝光原盘``4K``1080P``中文字幕``特效字幕``H265``H264``杜比``HDR``REMUX``WEB-DL``免费``国语配音`
- 符合任一层级规则的资源将被标识选中,匹配成功的层级做为该资源的优先级,排越前面优先级超高
@@ -215,17 +247,17 @@ docker pull jxxghp/moviepilot:latest
## 使用
- 通过CookieCloud同步快速同步站点不需要使用的站点可在WEB管理界面中禁用。
- 通过下载器监控实现自动整理入库刮削
- 通过微信/Telegram/Slack远程管理其中Telegram将会自动添加操作菜单。微信回调相对路径为`/api/v1/message/`
- 通过WEB进行管理将WEB添加到手机桌面获得类App使用效果管理界面端口`3000`
- 设置媒体服务器Webhook通过MoviePilot发送播放通知等。Webhook回调相对路径为`/api/v1/webhook?token=moviepilot`,其中`moviepilot`为设置的`API_TOKEN`
- 将MoviePilot做为Radarr或Sonarr服务器添加到Overseerr或Jellyseerr可使用Overseerr/Jellyseerr浏览订阅。
- 通过CookieCloud同步快速同步站点不需要使用的站点可在WEB管理界面中禁用,无法同步的站点可手动新增
- 通过WEB进行管理将WEB添加到手机桌面获得类App使用效果管理界面端口`3000`后台API端口`3001`
- 通过下载器监控或使用目录监控插件实现自动整理入库刮削(二选一)
- 通过微信/Telegram/Slack/SynologyChat远程管理其中微信/Telegram将会自动添加操作菜单微信菜单条数有限制部分菜单不显示微信需要在官方页面设置回调地址SynologyChat需要设置机器人传入地址地址相对路径为`/api/v1/message/`
- 设置媒体服务器Webhook通过MoviePilot发送播放通知等。Webhook回调相对路径为`/api/v1/webhook?token=moviepilot``3001`端口),其中`moviepilot`为设置的`API_TOKEN`
- 将MoviePilot做为Radarr或Sonarr服务器添加到Overseerr或Jellyseerr`API服务端口`可使用Overseerr/Jellyseerr浏览订阅。
- 映射宿主机docker.sock文件到容器`/var/run/docker.sock`,以支持内建重启操作。实例:`-v /var/run/docker.sock:/var/run/docker.sock:ro`
**注意**
1) 容器首次启动需要下载浏览器内核,根据网络情况可能需要较长时间,此时无法登录。可映射`/moviepilot`目录避免容器重置后重新触发浏览器内核下载。
2) 使用反向代理时,需要添加以下配置,否则可能会导致部分功能无法访问(`ip:port`修改为实际值):
### **注意**
- 容器首次启动需要下载浏览器内核,根据网络情况可能需要较长时间,此时无法登录。可映射`/moviepilot`目录避免容器重置后重新触发浏览器内核下载。
- 使用反向代理时,需要添加以下配置,否则可能会导致部分功能无法访问(`ip:port`修改为实际值):
```nginx configuration
location / {
proxy_pass http://ip:port;
@@ -235,11 +267,24 @@ location / {
proxy_set_header X-Forwarded-Proto $scheme;
}
```
- 新建的企业微信应用需要固定公网IP的代理才能收到消息代理添加以下代码
```nginx configuration
location /cgi-bin/gettoken {
proxy_pass https://qyapi.weixin.qq.com;
}
location /cgi-bin/message/send {
proxy_pass https://qyapi.weixin.qq.com;
}
location /cgi-bin/menu/create {
proxy_pass https://qyapi.weixin.qq.com;
}
```
![image](https://github.com/jxxghp/MoviePilot/assets/51039935/b8f0238d-847f-4f9d-b210-e905837362b9)
![image](https://github.com/jxxghp/MoviePilot/assets/51039935/f2654b09-26f3-464f-a0af-1de3f97832ee)
![image](https://github.com/jxxghp/MoviePilot/assets/51039935/28219233-ec7d-479b-b184-9a901c947dd1)
![image](https://github.com/jxxghp/MoviePilot/assets/51039935/fcb87529-56dd-43df-8337-6e34b8582819)
![image](https://github.com/jxxghp/MoviePilot/assets/51039935/f7df0806-668d-4c8b-ad41-133bf8f0bf73)
![image](https://github.com/jxxghp/MoviePilot/assets/51039935/bfa77c71-510a-46a6-9c1e-cf98cb101e3a)
![image](https://github.com/jxxghp/MoviePilot/assets/51039935/51cafd09-e38c-47f9-ae62-1e83ab8bf89b)
![image](https://github.com/jxxghp/MoviePilot/assets/51039935/f7ea77cd-0362-4c35-967c-7f1b22dbef05)

BIN
app.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 174 KiB

View File

@@ -1,7 +1,7 @@
from fastapi import APIRouter
from app.api.endpoints import login, user, site, message, webhook, subscribe, \
media, douban, search, plugin, tmdb, history, system, download, dashboard, rss
media, douban, search, plugin, tmdb, history, system, download, dashboard, filebrowser, transfer
api_router = APIRouter()
api_router.include_router(login.router, prefix="/login", tags=["login"])
@@ -19,4 +19,5 @@ api_router.include_router(system.router, prefix="/system", tags=["system"])
api_router.include_router(plugin.router, prefix="/plugin", tags=["plugin"])
api_router.include_router(download.router, prefix="/download", tags=["download"])
api_router.include_router(dashboard.router, prefix="/dashboard", tags=["dashboard"])
api_router.include_router(rss.router, prefix="/rss", tags=["rss"])
api_router.include_router(filebrowser.router, prefix="/filebrowser", tags=["filebrowser"])
api_router.include_router(transfer.router, prefix="/transfer", tags=["transfer"])

View File

@@ -1,8 +1,8 @@
from pathlib import Path
from typing import Any, List
from typing import Any, List, Optional
from fastapi import APIRouter, Depends
from requests import Session
from sqlalchemy.orm import Session
from app import schemas
from app.chain.dashboard import DashboardChain
@@ -11,9 +11,7 @@ from app.core.security import verify_token
from app.db import get_db
from app.db.models.transferhistory import TransferHistory
from app.scheduler import Scheduler
from app.utils.string import StringUtils
from app.utils.system import SystemUtils
from app.utils.timer import TimerUtils
router = APIRouter()
@@ -24,14 +22,16 @@ def statistic(db: Session = Depends(get_db),
"""
查询媒体数量统计信息
"""
media_statistic = DashboardChain(db).media_statistic()
if media_statistic:
return schemas.Statistic(
movie_count=media_statistic.movie_count,
tv_count=media_statistic.tv_count,
episode_count=media_statistic.episode_count,
user_count=media_statistic.user_count
)
media_statistics: Optional[List[schemas.Statistic]] = DashboardChain(db).media_statistic()
if media_statistics:
# 汇总各媒体库统计信息
ret_statistic = schemas.Statistic()
for media_statistic in media_statistics:
ret_statistic.movie_count += media_statistic.movie_count
ret_statistic.tv_count += media_statistic.tv_count
ret_statistic.episode_count += media_statistic.episode_count
ret_statistic.user_count += media_statistic.user_count
return ret_statistic
else:
return schemas.Statistic()
@@ -41,12 +41,7 @@ def storage(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
查询存储空间信息
"""
if settings.LIBRARY_PATH:
total_storage, free_storage = SystemUtils.space_usage(
[Path(path) for path in settings.LIBRARY_PATH.split(",")]
)
else:
total_storage, free_storage = 0, 0
total_storage, free_storage = SystemUtils.space_usage(settings.LIBRARY_PATHS)
return schemas.Storage(
total_storage=total_storage,
used_storage=total_storage - free_storage
@@ -69,13 +64,16 @@ def downloader(db: Session = Depends(get_db),
"""
transfer_info = DashboardChain(db).downloader_info()
free_space = SystemUtils.free_space(Path(settings.DOWNLOAD_PATH))
return schemas.DownloaderInfo(
download_speed=transfer_info.download_speed,
upload_speed=transfer_info.upload_speed,
download_size=transfer_info.download_size,
upload_size=transfer_info.upload_size,
free_space=free_space
)
if transfer_info:
return schemas.DownloaderInfo(
download_speed=transfer_info.download_speed,
upload_speed=transfer_info.upload_speed,
download_size=transfer_info.download_size,
upload_size=transfer_info.upload_size,
free_space=free_space
)
else:
return schemas.DownloaderInfo()
@router.get("/schedule", summary="后台服务", response_model=List[schemas.ScheduleInfo])
@@ -83,37 +81,7 @@ def schedule(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
查询后台服务信息
"""
# 返回计时任务
schedulers = []
# 去重
added = []
jobs = Scheduler().list()
# 按照下次运行时间排序
jobs.sort(key=lambda x: x.next_run_time)
for job in jobs:
if job.name not in added:
added.append(job.name)
else:
continue
if not StringUtils.is_chinese(job.name):
continue
if not job.next_run_time:
status = "已停止"
next_run = ""
else:
next_run = TimerUtils.time_difference(job.next_run_time)
if not next_run:
status = "正在运行"
else:
status = "阻塞" if job.pending else "等待"
schedulers.append(schemas.ScheduleInfo(
id=job.id,
name=job.name,
status=status,
next_run=next_run
))
return schedulers
return Scheduler().list()
@router.get("/transfer", summary="文件整理统计", response_model=List[int])
@@ -124,3 +92,19 @@ def transfer(days: int = 7, db: Session = Depends(get_db),
"""
transfer_stat = TransferHistory.statistic(db, days)
return [stat[1] for stat in transfer_stat]
@router.get("/cpu", summary="获取当前CPU使用率", response_model=int)
def cpu(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
获取当前CPU使用率
"""
return SystemUtils.cpu_usage()
@router.get("/memory", summary="获取当前内存使用量和使用率", response_model=List[int])
def memory(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
获取当前内存使用率
"""
return SystemUtils.memory_usage()

View File

@@ -45,6 +45,21 @@ def recognize_doubanid(doubanid: str,
return schemas.Context()
@router.get("/showing", summary="豆瓣正在热映", response_model=List[schemas.MediaInfo])
def movie_showing(page: int = 1,
count: int = 30,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
浏览豆瓣正在热映
"""
movies = DoubanChain(db).movie_showing(page=page, count=count)
if not movies:
return []
medias = [MediaInfo(douban_info=movie) for movie in movies]
return [media.to_dict() for media in medias]
@router.get("/movies", summary="豆瓣电影", response_model=List[schemas.MediaInfo])
def douban_movies(sort: str = "R",
tags: str = "",

View File

@@ -11,8 +11,6 @@ from app.core.context import MediaInfo, Context, TorrentInfo
from app.core.metainfo import MetaInfo
from app.core.security import verify_token
from app.db import get_db
from app.db.models.user import User
from app.db.userauth import get_current_active_superuser
from app.schemas import NotExistMediaInfo, MediaType
router = APIRouter()

View File

@@ -0,0 +1,189 @@
import shutil
from pathlib import Path
from typing import Any, List
from fastapi import APIRouter, Depends
from starlette.responses import FileResponse, Response
from app import schemas
from app.core.config import settings
from app.core.security import verify_token
from app.log import logger
from app.utils.system import SystemUtils
router = APIRouter()
IMAGE_TYPES = [".jpg", ".png", ".gif", ".bmp", ".jpeg", ".webp"]
@router.get("/list", summary="所有目录和文件", response_model=List[schemas.FileItem])
def list_path(path: str,
sort: str = 'time',
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
查询当前目录下所有目录和文件
:param path: 目录路径
:param sort: 排序方式name:按名称排序time:按修改时间排序
:param _: token
:return: 所有目录和文件
"""
# 返回结果
ret_items = []
if not path or path == "/":
if SystemUtils.is_windows():
partitions = SystemUtils.get_windows_drives() or ["C:/"]
for partition in partitions:
ret_items.append(schemas.FileItem(
type="dir",
path=partition + "/",
name=partition,
basename=partition
))
return ret_items
else:
path = "/"
else:
if not SystemUtils.is_windows() and not path.startswith("/"):
path = "/" + path
# 遍历目录
path_obj = Path(path)
if not path_obj.exists():
logger.error(f"目录不存在:{path}")
return []
# 如果是文件
if path_obj.is_file():
ret_items.append(schemas.FileItem(
type="file",
path=str(path_obj).replace("\\", "/"),
name=path_obj.name,
basename=path_obj.stem,
extension=path_obj.suffix[1:],
size=path_obj.stat().st_size,
modify_time=path_obj.stat().st_mtime,
))
return ret_items
# 扁历所有目录
for item in SystemUtils.list_sub_directory(path_obj):
ret_items.append(schemas.FileItem(
type="dir",
path=str(item).replace("\\", "/") + "/",
name=item.name,
basename=item.stem,
modify_time=item.stat().st_mtime,
))
# 遍历所有文件,不含子目录
for item in SystemUtils.list_sub_files(path_obj,
settings.RMT_MEDIAEXT
+ settings.RMT_SUBEXT
+ IMAGE_TYPES
+ [".nfo"]):
ret_items.append(schemas.FileItem(
type="file",
path=str(item).replace("\\", "/"),
name=item.name,
basename=item.stem,
extension=item.suffix[1:],
size=item.stat().st_size,
modify_time=item.stat().st_mtime,
))
# 排序
if sort == 'time':
ret_items.sort(key=lambda x: x.modify_time, reverse=True)
else:
ret_items.sort(key=lambda x: x.name, reverse=False)
return ret_items
@router.get("/mkdir", summary="创建目录", response_model=schemas.Response)
def mkdir(path: str, _: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
创建目录
"""
if not path:
return schemas.Response(success=False)
path_obj = Path(path)
if path_obj.exists():
return schemas.Response(success=False)
path_obj.mkdir(parents=True, exist_ok=True)
return schemas.Response(success=True)
@router.get("/delete", summary="删除文件或目录", response_model=schemas.Response)
def delete(path: str, _: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
删除文件或目录
"""
if not path:
return schemas.Response(success=False)
path_obj = Path(path)
if not path_obj.exists():
return schemas.Response(success=True)
if path_obj.is_file():
path_obj.unlink()
else:
shutil.rmtree(path_obj, ignore_errors=True)
return schemas.Response(success=True)
@router.get("/download", summary="下载文件或目录")
def download(path: str, token: str) -> Any:
"""
下载文件或目录
"""
if not path:
return schemas.Response(success=False)
# 认证token
if not verify_token(token):
return None
path_obj = Path(path)
if not path_obj.exists():
return schemas.Response(success=False)
if path_obj.is_file():
# 做为文件流式下载
return FileResponse(path_obj)
else:
# 做为压缩包下载
shutil.make_archive(base_name=path_obj.stem, format="zip", root_dir=path_obj)
reponse = Response(content=path_obj.read_bytes(), media_type="application/zip")
# 删除压缩包
Path(f"{path_obj.stem}.zip").unlink()
return reponse
@router.get("/rename", summary="重命名文件或目录", response_model=schemas.Response)
def rename(path: str, new_name: str, _: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
重命名文件或目录
"""
if not path or not new_name:
return schemas.Response(success=False)
path_obj = Path(path)
if not path_obj.exists():
return schemas.Response(success=False)
path_obj.rename(path_obj.parent / new_name)
return schemas.Response(success=True)
@router.get("/image", summary="读取图片")
def image(path: str, token: str) -> Any:
"""
读取图片
"""
if not path:
return None
# 认证token
if not verify_token(token):
return None
path_obj = Path(path)
if not path_obj.exists():
return None
if not path_obj.is_file():
return None
# 判断是否图片文件
if path_obj.suffix.lower() not in IMAGE_TYPES:
return None
return Response(content=path_obj.read_bytes(), media_type="image/jpeg")

View File

@@ -6,11 +6,13 @@ from sqlalchemy.orm import Session
from app import schemas
from app.chain.transfer import TransferChain
from app.core.event import eventmanager
from app.core.security import verify_token
from app.db import get_db
from app.db.models.downloadhistory import DownloadHistory
from app.db.models.transferhistory import TransferHistory
from app.schemas import MediaType
from app.schemas.types import EventType
router = APIRouter()
@@ -62,19 +64,29 @@ def transfer_history(title: str = None,
@router.delete("/transfer", summary="删除转移历史记录", response_model=schemas.Response)
def delete_transfer_history(history_in: schemas.TransferHistory,
delete_file: bool = False,
deletesrc: bool = False,
deletedest: bool = False,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
删除转移历史记录
"""
# 触发删除事件
if delete_file:
history = TransferHistory.get(db, history_in.id)
if not history:
return schemas.Response(success=False, msg="记录不存在")
# 册除文件
history = TransferHistory.get(db, history_in.id)
if not history:
return schemas.Response(success=False, msg="记录不存在")
# 册除媒体库文件
if deletedest and history.dest:
TransferChain(db).delete_files(Path(history.dest))
# 删除源文件
if deletesrc and history.src:
TransferChain(db).delete_files(Path(history.src))
# 发送事件
eventmanager.send_event(
EventType.DownloadFileDeleted,
{
"src": history.src
}
)
# 删除记录
TransferHistory.delete(db, history_in.id)
return schemas.Response(success=True)
@@ -82,15 +94,18 @@ def delete_transfer_history(history_in: schemas.TransferHistory,
@router.post("/transfer", summary="历史记录重新转移", response_model=schemas.Response)
def redo_transfer_history(history_in: schemas.TransferHistory,
mtype: str,
new_tmdbid: int,
mtype: str = None,
new_tmdbid: int = None,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
历史记录重新转移
历史记录重新转移,不输入 mtype 和 new_tmdbid 时,自动使用文件名重新识别
"""
state, errmsg = TransferChain(db).re_transfer(logid=history_in.id,
mtype=MediaType(mtype), tmdbid=new_tmdbid)
if mtype and new_tmdbid:
state, errmsg = TransferChain(db).re_transfer(logid=history_in.id,
mtype=MediaType(mtype), tmdbid=new_tmdbid)
else:
state, errmsg = TransferChain(db).re_transfer(logid=history_in.id)
if state:
return schemas.Response(success=True)
else:

View File

@@ -1,4 +1,3 @@
import random
from datetime import timedelta
from typing import Any
@@ -15,7 +14,7 @@ from app.core.security import get_password_hash
from app.db import get_db
from app.db.models.user import User
from app.log import logger
from app.utils.http import RequestUtils
from app.utils.web import WebUtils
router = APIRouter()
@@ -56,6 +55,9 @@ async def login_access_token(
user.id, expires_delta=access_token_expires
),
token_type="bearer",
super_user=user.is_superuser,
user_name=user.name,
avatar=user.avatar
)
@@ -64,21 +66,10 @@ def bing_wallpaper() -> Any:
"""
获取Bing每日壁纸
"""
url = "https://cn.bing.com/HPImageArchive.aspx?format=js&idx=0&n=1"
try:
resp = RequestUtils(timeout=5).get_res(url)
except Exception as err:
print(str(err))
return schemas.Response(success=False)
if resp and resp.status_code == 200:
try:
result = resp.json()
if isinstance(result, dict):
for image in result.get('images') or []:
return schemas.Response(success=False,
message=f"https://cn.bing.com{image.get('url')}" if 'url' in image else '')
except Exception as err:
print(str(err))
url = WebUtils.get_bing_wallpaper()
if url:
return schemas.Response(success=False,
message=url)
return schemas.Response(success=False)
@@ -87,14 +78,10 @@ def tmdb_wallpaper(db: Session = Depends(get_db)) -> Any:
"""
获取TMDB电影海报
"""
infos = TmdbChain(db).tmdb_trending()
if infos:
# 随机一个电影
while True:
info = random.choice(infos)
if info and info.get("backdrop_path"):
return schemas.Response(
success=True,
message=f"https://image.tmdb.org/t/p/original{info.get('backdrop_path')}"
)
wallpager = TmdbChain(db).get_random_wallpager()
if wallpager:
return schemas.Response(
success=True,
message=wallpager
)
return schemas.Response(success=False)

View File

@@ -17,7 +17,7 @@ from app.schemas import MediaType
router = APIRouter()
@router.get("/recognize", summary="识别媒体信息", response_model=schemas.Context)
@router.get("/recognize", summary="识别媒体信息(种子)", response_model=schemas.Context)
def recognize(title: str,
subtitle: str = None,
db: Session = Depends(get_db),
@@ -32,6 +32,20 @@ def recognize(title: str,
return schemas.Context()
@router.get("/recognize_file", summary="识别媒体信息(文件)", response_model=schemas.Context)
def recognize(path: str,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
根据文件路径识别媒体信息
"""
# 识别媒体信息
context = MediaChain(db).recognize_by_path(path)
if context:
return context.to_dict()
return schemas.Context()
@router.get("/search", summary="搜索媒体信息", response_model=List[schemas.MediaInfo])
def search_by_title(title: str,
page: int = 1,

View File

@@ -64,17 +64,18 @@ def wechat_verify(echostr: str, msg_signature: str,
@router.get("/switchs", summary="查询通知消息渠道开关", response_model=List[NotificationSwitch])
def read_switchs(db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
def read_switchs(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
查询通知消息渠道开关
"""
return_list = []
# 读取数据库
switchs = SystemConfigOper(db).get(SystemConfigKey.NotificationChannels)
switchs = SystemConfigOper().get(SystemConfigKey.NotificationChannels)
if not switchs:
for noti in NotificationType:
return_list.append(NotificationSwitch(mtype=noti.value, wechat=True, telegram=True, slack=True))
return_list.append(NotificationSwitch(mtype=noti.value, wechat=True,
telegram=True, slack=True,
synologychat=True))
else:
for switch in switchs:
return_list.append(NotificationSwitch(**switch))
@@ -83,7 +84,6 @@ def read_switchs(db: Session = Depends(get_db),
@router.post("/switchs", summary="设置通知消息渠道开关", response_model=schemas.Response)
def set_switchs(switchs: List[NotificationSwitch],
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
查询通知消息渠道开关
@@ -92,6 +92,6 @@ def set_switchs(switchs: List[NotificationSwitch],
for switch in switchs:
switch_list.append(switch.dict())
# 存入数据库
SystemConfigOper(db).set(SystemConfigKey.NotificationChannels, switch_list)
SystemConfigOper().set(SystemConfigKey.NotificationChannels, switch_list)
return schemas.Response(success=True)

View File

@@ -1,12 +1,10 @@
from typing import Any, List
from fastapi import APIRouter, Depends
from sqlalchemy.orm import Session
from app import schemas
from app.core.plugin import PluginManager
from app.core.security import verify_token
from app.db import get_db
from app.db.systemconfig_oper import SystemConfigOper
from app.schemas.types import SystemConfigKey
@@ -22,28 +20,26 @@ def all_plugins(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
@router.get("/installed", summary="已安装插件", response_model=List[str])
def installed_plugins(db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
def installed_plugins(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
查询用户已安装插件清单
"""
return SystemConfigOper(db).get(SystemConfigKey.UserInstalledPlugins) or []
return SystemConfigOper().get(SystemConfigKey.UserInstalledPlugins) or []
@router.get("/install/{plugin_id}", summary="安装插件", response_model=schemas.Response)
def install_plugin(plugin_id: str,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
安装插件
"""
# 已安装插件
install_plugins = SystemConfigOper(db).get(SystemConfigKey.UserInstalledPlugins) or []
install_plugins = SystemConfigOper().get(SystemConfigKey.UserInstalledPlugins) or []
# 安装插件
if plugin_id not in install_plugins:
install_plugins.append(plugin_id)
# 保存设置
SystemConfigOper(db).set(SystemConfigKey.UserInstalledPlugins, install_plugins)
SystemConfigOper().set(SystemConfigKey.UserInstalledPlugins, install_plugins)
# 重载插件管理器
PluginManager().init_config()
return schemas.Response(success=True)
@@ -93,19 +89,18 @@ def set_plugin_config(plugin_id: str, conf: dict,
@router.delete("/{plugin_id}", summary="卸载插件", response_model=schemas.Response)
def uninstall_plugin(plugin_id: str,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
卸载插件
"""
# 删除已安装信息
install_plugins = SystemConfigOper(db).get(SystemConfigKey.UserInstalledPlugins) or []
install_plugins = SystemConfigOper().get(SystemConfigKey.UserInstalledPlugins) or []
for plugin in install_plugins:
if plugin == plugin_id:
install_plugins.remove(plugin)
break
# 保存
SystemConfigOper(db).set(SystemConfigKey.UserInstalledPlugins, install_plugins)
SystemConfigOper().set(SystemConfigKey.UserInstalledPlugins, install_plugins)
# 重载插件管理器
PluginManager().init_config()
return schemas.Response(success=True)

View File

@@ -1,135 +0,0 @@
from typing import List, Any
from fastapi import APIRouter, Depends
from sqlalchemy.orm import Session
from starlette.background import BackgroundTasks
from app import schemas
from app.chain.rss import RssChain
from app.core.security import verify_token
from app.db import get_db
from app.db.models.rss import Rss
from app.helper.rss import RssHelper
from app.schemas import MediaType
router = APIRouter()
def start_rss_refresh(db: Session, rssid: int = None):
"""
启动自定义订阅刷新
"""
RssChain(db).refresh(rssid=rssid, manual=True)
@router.get("/", summary="所有自定义订阅", response_model=List[schemas.Rss])
def read_rsses(
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
查询所有自定义订阅
"""
return Rss.list(db)
@router.post("/", summary="新增自定义订阅", response_model=schemas.Response)
def create_rss(
*,
rss_in: schemas.Rss,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)
) -> Any:
"""
新增自定义订阅
"""
if rss_in.type:
mtype = MediaType(rss_in.type)
else:
mtype = None
rssid, errormsg = RssChain(db).add(
mtype=mtype,
**rss_in.dict()
)
if not rssid:
return schemas.Response(success=False, message=errormsg)
return schemas.Response(success=True, data={
"id": rssid
})
@router.put("/", summary="更新自定义订阅", response_model=schemas.Response)
def update_rss(
*,
rss_in: schemas.Rss,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)
) -> Any:
"""
更新自定义订阅信息
"""
rss = Rss.get(db, rss_in.id)
if not rss:
return schemas.Response(success=False, message="自定义订阅不存在")
rss.update(db, rss_in.dict())
return schemas.Response(success=True)
@router.get("/preview/{rssid}", summary="预览自定义订阅", response_model=List[schemas.TorrentInfo])
def preview_rss(
rssid: int,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
根据ID查询自定义订阅RSS报文
"""
rssinfo: Rss = Rss.get(db, rssid)
if not rssinfo:
return []
torrents = RssHelper.parse(rssinfo.url, proxy=True if rssinfo.proxy else False) or []
return [schemas.TorrentInfo(
title=t.get("title"),
description=t.get("description"),
enclosure=t.get("enclosure"),
size=t.get("size"),
page_url=t.get("link"),
pubdate=t["pubdate"].strftime("%Y-%m-%d %H:%M:%S") if t.get("pubdate") else None,
) for t in torrents]
@router.get("/refresh/{rssid}", summary="刷新自定义订阅", response_model=schemas.Response)
def refresh_rss(
rssid: int,
background_tasks: BackgroundTasks,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
根据ID刷新自定义订阅
"""
background_tasks.add_task(start_rss_refresh,
db=db,
rssid=rssid)
return schemas.Response(success=True)
@router.get("/{rssid}", summary="查询自定义订阅详情", response_model=schemas.Rss)
def read_rss(
rssid: int,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
根据ID查询自定义订阅详情
"""
return Rss.get(db, rssid)
@router.delete("/{rssid}", summary="删除自定义订阅", response_model=schemas.Response)
def read_rss(
rssid: int,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
根据ID删除自定义订阅
"""
Rss.delete(db, rssid)
return schemas.Response(success=True)

View File

@@ -1,6 +1,6 @@
from typing import List, Any
from fastapi import APIRouter, Depends, HTTPException
from fastapi import APIRouter, Depends
from sqlalchemy.orm import Session
from app import schemas
@@ -26,6 +26,7 @@ async def search_latest(db: Session = Depends(get_db),
@router.get("/media/{mediaid}", summary="精确搜索资源", response_model=List[schemas.Context])
def search_by_tmdbid(mediaid: str,
mtype: str = None,
area: str = "title",
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
@@ -35,15 +36,16 @@ def search_by_tmdbid(mediaid: str,
tmdbid = int(mediaid.replace("tmdb:", ""))
if mtype:
mtype = MediaType(mtype)
torrents = SearchChain(db).search_by_tmdbid(tmdbid=tmdbid, mtype=mtype)
torrents = SearchChain(db).search_by_tmdbid(tmdbid=tmdbid, mtype=mtype, area=area)
elif mediaid.startswith("douban:"):
doubanid = mediaid.replace("douban:", "")
# 识别豆瓣信息
context = DoubanChain(db).recognize_by_doubanid(doubanid)
if not context or not context.media_info or not context.media_info.tmdb_id:
raise HTTPException(status_code=404, detail="无法识别TMDB媒体信息")
return []
torrents = SearchChain(db).search_by_tmdbid(tmdbid=context.media_info.tmdb_id,
mtype=context.media_info.type)
mtype=context.media_info.type,
area=area)
else:
return []
return [torrent.to_dict() for torrent in torrents]

View File

@@ -5,28 +5,22 @@ from sqlalchemy.orm import Session
from starlette.background import BackgroundTasks
from app import schemas
from app.chain.cookiecloud import CookieCloudChain
from app.chain.search import SearchChain
from app.chain.site import SiteChain
from app.chain.torrents import TorrentsChain
from app.core.event import EventManager
from app.core.security import verify_token
from app.db import get_db
from app.db.models.site import Site
from app.db.models.siteicon import SiteIcon
from app.db.systemconfig_oper import SystemConfigOper
from app.helper.sites import SitesHelper
from app.schemas.types import SystemConfigKey
from app.scheduler import Scheduler
from app.schemas.types import SystemConfigKey, EventType
from app.utils.string import StringUtils
router = APIRouter()
def start_cookiecloud_sync(db: Session):
"""
后台启动CookieCloud站点同步
"""
CookieCloudChain(db).process(manual=True)
@router.get("/", summary="所有站点", response_model=List[schemas.Site])
def read_sites(db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> List[dict]:
@@ -37,7 +31,7 @@ def read_sites(db: Session = Depends(get_db),
@router.post("/", summary="新增站点", response_model=schemas.Response)
def update_site(
def add_site(
*,
db: Session = Depends(get_db),
site_in: schemas.Site,
@@ -90,17 +84,21 @@ def delete_site(
删除站点
"""
Site.delete(db, site_id)
# 插件站点删除
EventManager().send_event(EventType.SiteDeleted,
{
"site_id": site_id
})
return schemas.Response(success=True)
@router.get("/cookiecloud", summary="CookieCloud同步", response_model=schemas.Response)
def cookie_cloud_sync(background_tasks: BackgroundTasks,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
运行CookieCloud同步站点信息
"""
background_tasks.add_task(start_cookiecloud_sync, db)
background_tasks.add_task(Scheduler().start, job_id="cookiecloud")
return schemas.Response(success=True, message="CookieCloud同步任务已启动")
@@ -111,8 +109,15 @@ def cookie_cloud_sync(db: Session = Depends(get_db),
清空所有站点数据并重新同步CookieCloud站点信息
"""
Site.reset(db)
SystemConfigOper(db).set(SystemConfigKey.IndexerSites, [])
CookieCloudChain(db).process(manual=True)
SystemConfigOper().set(SystemConfigKey.IndexerSites, [])
SystemConfigOper().set(SystemConfigKey.RssSites, [])
# 启动定时服务
Scheduler().start("cookiecloud", manual=True)
# 插件站点删除
EventManager().send_event(EventType.SiteDeleted,
{
"site_id": None
})
return schemas.Response(success=True, message="站点已重置!")
@@ -179,7 +184,7 @@ def site_icon(site_id: int,
@router.get("/resource/{site_id}", summary="站点资源", response_model=List[schemas.TorrentInfo])
def site_resource(site_id: int, keyword: str = None,
def site_resource(site_id: int,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
@@ -191,7 +196,7 @@ def site_resource(site_id: int, keyword: str = None,
status_code=404,
detail=f"站点 {site_id} 不存在",
)
torrents = SearchChain(db).browse(site.domain, keyword)
torrents = TorrentsChain().browse(domain=site.domain)
if not torrents:
return []
return [torrent.to_dict() for torrent in torrents]
@@ -216,6 +221,23 @@ def read_site_by_domain(
return site
@router.get("/rss", summary="所有订阅站点", response_model=List[schemas.Site])
def read_rss_sites(db: Session = Depends(get_db)) -> List[dict]:
"""
获取站点列表
"""
# 选中的rss站点
selected_sites = SystemConfigOper().get(SystemConfigKey.RssSites) or []
# 所有站点
all_site = Site.list_order_by_pri(db)
if not selected_sites or not all_site:
return []
# 选中的rss站点
rss_sites = [site for site in all_site if site and site.id in selected_sites]
return rss_sites
@router.get("/{site_id}", summary="站点详情", response_model=schemas.Site)
def read_site(
site_id: int,

View File

@@ -1,5 +1,5 @@
import json
from typing import List, Any, Optional
from typing import List, Any
from fastapi import APIRouter, Request, BackgroundTasks, Depends, HTTPException, Header
from sqlalchemy.orm import Session
@@ -12,6 +12,7 @@ from app.db import get_db
from app.db.models.subscribe import Subscribe
from app.db.models.user import User
from app.db.userauth import get_current_active_user
from app.scheduler import Scheduler
from app.schemas.types import MediaType
router = APIRouter()
@@ -26,13 +27,6 @@ def start_subscribe_add(db: Session, title: str, year: str,
mtype=mtype, tmdbid=tmdbid, season=season, username=username)
def start_subscribe_search(db: Session, sid: Optional[int], state: Optional[str]):
"""
启动订阅搜索任务
"""
SubscribeChain(db).search(sid=sid, state=state, manual=True)
@router.get("/", summary="所有订阅", response_model=List[schemas.Subscribe])
def read_subscribes(
db: Session = Depends(get_db),
@@ -94,7 +88,7 @@ def update_subscribe(
subscribe = Subscribe.get(db, subscribe_in.id)
if not subscribe:
return schemas.Response(success=False, message="订阅不存在")
if subscribe_in.sites:
if subscribe_in.sites is not None:
subscribe_in.sites = json.dumps(subscribe_in.sites)
# 避免更新缺失集数
subscribe_dict = subscribe_in.dict()
@@ -121,9 +115,15 @@ def subscribe_mediaid(
根据TMDBID或豆瓣ID查询订阅 tmdb:/douban:
"""
if mediaid.startswith("tmdb:"):
result = Subscribe.exists(db, int(mediaid[5:]), season)
tmdbid = mediaid[5:]
if not tmdbid or not str(tmdbid).isdigit():
return Subscribe()
result = Subscribe.exists(db, int(tmdbid), season)
elif mediaid.startswith("douban:"):
result = Subscribe.get_by_doubanid(db, mediaid[7:])
doubanid = mediaid[7:]
if not doubanid:
return Subscribe()
result = Subscribe.get_by_doubanid(db, doubanid)
else:
result = None
if result and result.sites:
@@ -132,6 +132,61 @@ def subscribe_mediaid(
return result if result else Subscribe()
@router.get("/refresh", summary="刷新订阅", response_model=schemas.Response)
def refresh_subscribes(
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
刷新所有订阅
"""
Scheduler().start("subscribe_refresh")
return schemas.Response(success=True)
@router.get("/check", summary="刷新订阅 TMDB 信息", response_model=schemas.Response)
def check_subscribes(
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
刷新订阅 TMDB 信息
"""
Scheduler().start("subscribe_tmdb")
return schemas.Response(success=True)
@router.get("/search", summary="搜索所有订阅", response_model=schemas.Response)
def search_subscribes(
background_tasks: BackgroundTasks,
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
搜索所有订阅
"""
background_tasks.add_task(
Scheduler().start,
job_id="subscribe_search",
sid=None,
state='R',
manual=True
)
return schemas.Response(success=True)
@router.get("/search/{subscribe_id}", summary="搜索订阅", response_model=schemas.Response)
def search_subscribe(
subscribe_id: int,
background_tasks: BackgroundTasks,
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
根据订阅编号搜索订阅
"""
background_tasks.add_task(
Scheduler().start,
job_id="subscribe_search",
sid=subscribe_id,
state=None,
manual=True
)
return schemas.Response(success=True)
@router.get("/{subscribe_id}", summary="订阅详情", response_model=schemas.Subscribe)
def read_subscribe(
subscribe_id: int,
@@ -157,9 +212,15 @@ def delete_subscribe_by_mediaid(
根据TMDBID或豆瓣ID删除订阅 tmdb:/douban:
"""
if mediaid.startswith("tmdb:"):
Subscribe().delete_by_tmdbid(db, int(mediaid[5:]), season)
tmdbid = mediaid[5:]
if not tmdbid or not str(tmdbid).isdigit():
return schemas.Response(success=False)
Subscribe().delete_by_tmdbid(db, int(tmdbid), season)
elif mediaid.startswith("douban:"):
Subscribe().delete_by_doubanid(db, mediaid[7:])
doubanid = mediaid[7:]
if not doubanid:
return schemas.Response(success=False)
Subscribe().delete_by_doubanid(db, doubanid)
return schemas.Response(success=True)
@@ -231,39 +292,3 @@ async def seerr_subscribe(request: Request, background_tasks: BackgroundTasks,
username=user_name)
return schemas.Response(success=True)
@router.get("/refresh", summary="刷新订阅", response_model=schemas.Response)
def refresh_subscribes(
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
刷新所有订阅
"""
SubscribeChain(db).refresh()
return schemas.Response(success=True)
@router.get("/search/{subscribe_id}", summary="搜索订阅", response_model=schemas.Response)
def search_subscribe(
subscribe_id: int,
background_tasks: BackgroundTasks,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
搜索所有订阅
"""
background_tasks.add_task(start_subscribe_search, db=db, sid=subscribe_id, state=None)
return schemas.Response(success=True)
@router.get("/search", summary="搜索所有订阅", response_model=schemas.Response)
def search_subscribes(
background_tasks: BackgroundTasks,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
搜索所有订阅
"""
background_tasks.add_task(start_subscribe_search, db=db, sid=None, state='R')
return schemas.Response(success=True)

View File

@@ -1,28 +1,32 @@
import json
import time
import tailer
from datetime import datetime
from typing import Union
import tailer
from fastapi import APIRouter, HTTPException, Depends
from fastapi.responses import StreamingResponse
from sqlalchemy.orm import Session
from app import schemas
from app.chain.search import SearchChain
from app.core.config import settings
from app.core.security import verify_token
from app.db import get_db
from app.db.systemconfig_oper import SystemConfigOper
from app.helper.message import MessageHelper
from app.helper.progress import ProgressHelper
from app.scheduler import Scheduler
from app.schemas.types import SystemConfigKey
from app.utils.http import RequestUtils
from app.utils.system import SystemUtils
from version import APP_VERSION
router = APIRouter()
@router.get("/env", summary="查询系统环境变量", response_model=schemas.Response)
def get_setting(_: schemas.TokenPayload = Depends(verify_token)):
def get_env_setting(_: schemas.TokenPayload = Depends(verify_token)):
"""
查询系统环境变量,包括当前版本号
"""
@@ -60,29 +64,27 @@ def get_progress(process_type: str, token: str):
@router.get("/setting/{key}", summary="查询系统设置", response_model=schemas.Response)
def get_setting(key: str,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)):
"""
查询系统设置
"""
return schemas.Response(success=True, data={
"value": SystemConfigOper(db).get(key)
"value": SystemConfigOper().get(key)
})
@router.post("/setting/{key}", summary="更新系统设置", response_model=schemas.Response)
def set_setting(key: str, value: Union[list, dict, str, int] = None,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)):
"""
更新系统设置
"""
SystemConfigOper(db).set(key, value)
SystemConfigOper().set(key, value)
return schemas.Response(success=True)
@router.get("/message", summary="实时消息")
def get_progress(token: str):
def get_message(token: str):
"""
实时获取系统消息返回格式为SSE
"""
@@ -166,3 +168,62 @@ def latest_version(_: schemas.TokenPayload = Depends(verify_token)):
if ver_json:
return schemas.Response(success=True, data=ver_json)
return schemas.Response(success=False)
@router.get("/ruletest", summary="优先级规则测试", response_model=schemas.Response)
def ruletest(title: str,
subtitle: str = None,
ruletype: str = None,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)):
"""
过滤规则测试,规则类型 1-订阅2-洗版3-搜索
"""
torrent = schemas.TorrentInfo(
title=title,
description=subtitle,
)
if ruletype == "2":
rule_string = SystemConfigOper().get(SystemConfigKey.BestVersionFilterRules)
elif ruletype == "3":
rule_string = SystemConfigOper().get(SystemConfigKey.SearchFilterRules)
else:
rule_string = SystemConfigOper().get(SystemConfigKey.SubscribeFilterRules)
if not rule_string:
return schemas.Response(success=False, message="优先级规则未设置!")
# 过滤
result = SearchChain(db).filter_torrents(rule_string=rule_string,
torrent_list=[torrent])
if not result:
return schemas.Response(success=False, message="不符合优先级规则!")
return schemas.Response(success=True, data={
"priority": 100 - result[0].pri_order + 1
})
@router.get("/restart", summary="重启系统", response_model=schemas.Response)
def restart_system(_: schemas.TokenPayload = Depends(verify_token)):
"""
重启系统
"""
if not SystemUtils.can_restart():
return schemas.Response(success=False, message="当前运行环境不支持重启操作!")
# 执行重启
ret, msg = SystemUtils.restart()
return schemas.Response(success=ret, message=msg)
@router.get("/runscheduler", summary="运行服务", response_model=schemas.Response)
def execute_command(jobid: str,
_: schemas.TokenPayload = Depends(verify_token)):
"""
执行命令
"""
if not jobid:
return schemas.Response(success=False, message="命令不能为空!")
if jobid == "subscribe_search":
Scheduler().start(jobid, state = 'R')
else:
Scheduler().start(jobid)
return schemas.Response(success=True)

View File

@@ -0,0 +1,79 @@
from pathlib import Path
from typing import Any
from fastapi import APIRouter, Depends
from sqlalchemy.orm import Session
from app import schemas
from app.chain.transfer import TransferChain
from app.core.security import verify_token
from app.db import get_db
from app.schemas import MediaType
router = APIRouter()
@router.post("/manual", summary="手动转移", response_model=schemas.Response)
def manual_transfer(path: str,
target: str = None,
tmdbid: int = None,
type_name: str = None,
season: int = None,
transfer_type: str = None,
episode_format: str = None,
episode_detail: str = None,
episode_part: str = None,
episode_offset: int = 0,
min_filesize: int = 0,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
手动转移,支持自定义剧集识别格式
:param path: 转移路径或文件
:param target: 目标路径
:param type_name: 媒体类型、电影/电视剧
:param tmdbid: tmdbid
:param season: 剧集季号
:param transfer_type: 转移类型move/copy
:param episode_format: 剧集识别格式
:param episode_detail: 剧集识别详细信息
:param episode_part: 剧集识别分集信息
:param episode_offset: 剧集识别偏移量
:param min_filesize: 最小文件大小(MB)
:param db: 数据库
:param _: Token校验
"""
in_path = Path(path)
if target:
target = Path(target)
if not target.exists():
return schemas.Response(success=False, message=f"目标路径不存在")
# 类型
mtype = MediaType(type_name) if type_name else None
# 自定义格式
epformat = None
if episode_offset or episode_part or episode_detail or episode_format:
epformat = schemas.EpisodeFormat(
format=episode_format,
detail=episode_detail,
part=episode_part,
offset=episode_offset,
)
# 开始转移
state, errormsg = TransferChain(db).manual_transfer(
in_path=in_path,
target=target,
tmdbid=tmdbid,
mtype=mtype,
season=season,
transfer_type=transfer_type,
epformat=epformat,
min_filesize=min_filesize
)
# 失败
if not state:
if isinstance(errormsg, list):
errormsg = f"整理完成,{len(errormsg)} 个文件转移失败!"
return schemas.Response(success=False, message=errormsg)
# 成功
return schemas.Response(success=True)

View File

@@ -1,7 +1,7 @@
from typing import Any, List
from fastapi import APIRouter, HTTPException, Depends
from requests import Session
from sqlalchemy.orm import Session
from app import schemas
from app.chain.media import MediaChain
@@ -76,7 +76,7 @@ def arr_system_status(apikey: str) -> Any:
}
@arr_router.get("/qualityprofile", summary="质量配置")
@arr_router.get("/qualityProfile", summary="质量配置")
def arr_qualityProfile(apikey: str) -> Any:
"""
模拟Radarr、Sonarr质量配置
@@ -132,13 +132,10 @@ def arr_rootfolder(apikey: str) -> Any:
status_code=403,
detail="认证失败!",
)
library_path = "/"
if settings.LIBRARY_PATH:
library_path = settings.LIBRARY_PATH.split(",")[0]
return [
{
"id": 1,
"path": library_path,
"path": "/" if not settings.LIBRARY_PATHS else str(settings.LIBRARY_PATHS[0]),
"accessible": True,
"freeSpace": 0,
"unmappedFolders": []
@@ -687,7 +684,7 @@ def arr_serie(apikey: str, tid: int, db: Session = Depends(get_db)) -> Any:
"monitored": True,
}],
year=subscribe.year,
remotePoster=subscribe.image,
remotePoster=subscribe.poster,
tmdbId=subscribe.tmdbid,
tvdbId=subscribe.tvdbid,
imdbId=subscribe.imdbid,

View File

@@ -18,7 +18,7 @@ from app.core.meta import MetaBase
from app.core.module import ModuleManager
from app.log import logger
from app.schemas import TransferInfo, TransferTorrent, ExistMediaInfo, DownloadingTorrent, CommingMessage, Notification, \
WebhookEventInfo
WebhookEventInfo, TmdbEpisode
from app.schemas.types import TorrentStatus, MediaType, MediaImageType, EventType
from app.utils.object import ObjectUtils
@@ -97,8 +97,8 @@ class ChainBase(metaclass=ABCMeta):
if isinstance(temp, list):
result.extend(temp)
else:
# 返回结果非列表也非空,则继续执行下一模块
continue
# 中止继续执行
break
except Exception as err:
logger.error(f"运行模块 {method} 出错:{module.__class__.__name__} - {err}\n{traceback.print_exc()}")
return result
@@ -115,6 +115,17 @@ class ChainBase(metaclass=ABCMeta):
"""
return self.run_module("recognize_media", meta=meta, mtype=mtype, tmdbid=tmdbid)
def match_doubaninfo(self, name: str, mtype: str = None,
year: str = None, season: int = None) -> Optional[dict]:
"""
搜索和匹配豆瓣信息
:param name: 标题
:param mtype: 类型
:param year: 年份
:param season: 季
"""
return self.run_module("match_doubaninfo", name=name, mtype=mtype, year=year, season=season)
def obtain_images(self, mediainfo: MediaInfo) -> Optional[MediaInfo]:
"""
补充抓取媒体信息图片
@@ -197,19 +208,19 @@ class ChainBase(metaclass=ABCMeta):
return self.run_module("search_medias", meta=meta)
def search_torrents(self, site: CommentedMap,
mediainfo: Optional[MediaInfo] = None,
keyword: str = None,
keywords: List[str],
mtype: MediaType = None,
page: int = 0) -> List[TorrentInfo]:
"""
搜索一个站点的种子资源
:param site: 站点
:param mediainfo: 识别的媒体信息
:param keyword: 搜索关键词,如有按关键词搜索,否则按媒体信息名称搜索
:param keywords: 搜索关键词列表
:param mtype: 媒体类型
:param page: 页码
:reutrn: 资源列表
"""
return self.run_module("search_torrents", mediainfo=mediainfo, site=site,
keyword=keyword, page=page)
return self.run_module("search_torrents", site=site, keywords=keywords,
mtype=mtype, page=page)
def refresh_torrents(self, site: CommentedMap) -> List[TorrentInfo]:
"""
@@ -221,43 +232,45 @@ class ChainBase(metaclass=ABCMeta):
def filter_torrents(self, rule_string: str,
torrent_list: List[TorrentInfo],
season_episodes: Dict[int, list] = None) -> List[TorrentInfo]:
season_episodes: Dict[int, list] = None,
mediainfo: MediaInfo = None) -> List[TorrentInfo]:
"""
过滤种子资源
:param rule_string: 过滤规则
:param torrent_list: 资源列表
:param season_episodes: 季集数过滤 {season:[episodes]}
:param mediainfo: 识别的媒体信息
:return: 过滤后的资源列表,添加资源优先级
"""
return self.run_module("filter_torrents", rule_string=rule_string,
torrent_list=torrent_list, season_episodes=season_episodes)
torrent_list=torrent_list, season_episodes=season_episodes,
mediainfo=mediainfo)
def download(self, torrent_path: Path, download_dir: Path, cookie: str,
episodes: Set[int] = None,
def download(self, content: Union[Path, str], download_dir: Path, cookie: str,
episodes: Set[int] = None, category: str = None
) -> Optional[Tuple[Optional[str], str]]:
"""
根据种子文件,选择并添加下载任务
:param torrent_path: 种子文件地址
:param content: 种子文件地址或者磁力链接
:param download_dir: 下载目录
:param cookie: cookie
:param episodes: 需要下载的集数
:param category: 种子分类
:return: 种子Hash错误信息
"""
return self.run_module("download", torrent_path=torrent_path, download_dir=download_dir,
cookie=cookie, episodes=episodes, )
return self.run_module("download", content=content, download_dir=download_dir,
cookie=cookie, episodes=episodes, category=category)
def download_added(self, context: Context, torrent_path: Path, download_dir: Path) -> None:
def download_added(self, context: Context, download_dir: Path, torrent_path: Path = None) -> None:
"""
添加下载任务成功后,从站点下载字幕,保存到下载目录
:param context: 上下文,包括识别信息、媒体信息、种子信息
:param torrent_path: 种子文件地址
:param download_dir: 下载目录
:param torrent_path: 种子文件地址
:return: None该方法可被多个模块同时处理
"""
if settings.DOWNLOAD_SUBTITLE:
return self.run_module("download_added", context=context, torrent_path=torrent_path,
download_dir=download_dir)
return None
return self.run_module("download_added", context=context, torrent_path=torrent_path,
download_dir=download_dir)
def list_torrents(self, status: TorrentStatus = None,
hashs: Union[list, str] = None) -> Optional[List[Union[TransferTorrent, DownloadingTorrent]]]:
@@ -269,29 +282,30 @@ class ChainBase(metaclass=ABCMeta):
"""
return self.run_module("list_torrents", status=status, hashs=hashs)
def transfer(self, path: Path, mediainfo: MediaInfo,
transfer_type: str,
target: Path = None,
meta: MetaBase = None) -> Optional[TransferInfo]:
def transfer(self, path: Path, meta: MetaBase, mediainfo: MediaInfo,
transfer_type: str, target: Path = None,
episodes_info: List[TmdbEpisode] = None) -> Optional[TransferInfo]:
"""
文件转移
:param path: 文件路径
:param meta: 预识别的元数据
:param mediainfo: 识别的媒体信息
:param transfer_type: 转移模式
:param target: 转移目标路径
:param meta: 预识别的元数据,仅单文件转移时传递
:param episodes_info: 当前季的全部集信息
:return: {path, target_path, message}
"""
return self.run_module("transfer", path=path, mediainfo=mediainfo,
transfer_type=transfer_type, target=target, meta=meta)
return self.run_module("transfer", path=path, meta=meta, mediainfo=mediainfo,
transfer_type=transfer_type, target=target,
episodes_info=episodes_info)
def transfer_completed(self, hashs: Union[str, list], transinfo: TransferInfo) -> None:
def transfer_completed(self, hashs: Union[str, list], path: Path = None) -> None:
"""
转移完成后的处理
:param hashs: 种子Hash
:param transinfo: 转移信息
:param path: 源目录
"""
return self.run_module("transfer_completed", hashs=hashs, transinfo=transinfo)
return self.run_module("transfer_completed", hashs=hashs, path=path)
def remove_torrents(self, hashs: Union[str, list]) -> bool:
"""
@@ -319,7 +333,7 @@ class ChainBase(metaclass=ABCMeta):
def torrent_files(self, tid: str) -> Optional[Union[TorrentFilesList, List[File]]]:
"""
根据种子文件,选择并添加下载任务
获取种子文件
:param tid: 种子Hash
:return: 种子文件
"""
@@ -334,18 +348,16 @@ class ChainBase(metaclass=ABCMeta):
"""
return self.run_module("media_exists", mediainfo=mediainfo, itemid=itemid)
def refresh_mediaserver(self, mediainfo: MediaInfo, file_path: Path) -> Optional[bool]:
def refresh_mediaserver(self, mediainfo: MediaInfo, file_path: Path) -> None:
"""
刷新媒体库
:param mediainfo: 识别的媒体信息
:param file_path: 文件路径
:return: 成功或失败
"""
if settings.REFRESH_MEDIASERVER:
return self.run_module("refresh_mediaserver", mediainfo=mediainfo, file_path=file_path)
return None
self.run_module("refresh_mediaserver", mediainfo=mediainfo, file_path=file_path)
def post_message(self, message: Notification) -> Optional[bool]:
def post_message(self, message: Notification) -> None:
"""
发送消息
:param message: 消息体
@@ -364,7 +376,7 @@ class ChainBase(metaclass=ABCMeta):
f"title={message.title}, "
f"text={message.text}"
f"userid={message.userid}")
return self.run_module("post_message", message=message)
self.run_module("post_message", message=message)
def post_medias_message(self, message: Notification, medias: List[MediaInfo]) -> Optional[bool]:
"""
@@ -391,18 +403,22 @@ class ChainBase(metaclass=ABCMeta):
:param mediainfo: 识别的媒体信息
:return: 成功或失败
"""
if settings.SCRAP_METADATA:
return self.run_module("scrape_metadata", path=path, mediainfo=mediainfo)
return None
self.run_module("scrape_metadata", path=path, mediainfo=mediainfo)
def register_commands(self, commands: dict) -> None:
def register_commands(self, commands: Dict[str, dict]) -> None:
"""
注册菜单命令
"""
return self.run_module("register_commands", commands=commands)
self.run_module("register_commands", commands=commands)
def scheduler_job(self) -> None:
"""
定时任务每10分钟调用一次模块实现该接口以实现定时服务
"""
return self.run_module("scheduler_job")
self.run_module("scheduler_job")
def clear_cache(self) -> None:
"""
清理缓存,模块实现该接口响应清理缓存事件
"""
self.run_module("clear_cache")

View File

@@ -1,5 +1,5 @@
import base64
from typing import Tuple, Optional, Union
from typing import Tuple, Optional
from urllib.parse import urljoin
from lxml import etree
@@ -8,15 +8,14 @@ from sqlalchemy.orm import Session
from app.chain import ChainBase
from app.chain.site import SiteChain
from app.core.config import settings
from app.db.siteicon_oper import SiteIconOper
from app.db.site_oper import SiteOper
from app.helper.browser import PlaywrightHelper
from app.db.siteicon_oper import SiteIconOper
from app.helper.cloudflare import under_challenge
from app.helper.cookiecloud import CookieCloudHelper
from app.helper.message import MessageHelper
from app.helper.rss import RssHelper
from app.helper.sites import SitesHelper
from app.log import logger
from app.schemas import Notification, NotificationType, MessageChannel
from app.utils.http import RequestUtils
from app.utils.site import SiteUtils
@@ -31,6 +30,7 @@ class CookieCloudChain(ChainBase):
self.siteoper = SiteOper(self._db)
self.siteiconoper = SiteIconOper(self._db)
self.siteshelper = SitesHelper()
self.rsshelper = RssHelper()
self.sitechain = SiteChain(self._db)
self.message = MessageHelper()
self.cookiecloud = CookieCloudHelper(
@@ -39,21 +39,6 @@ class CookieCloudChain(ChainBase):
password=settings.COOKIECLOUD_PASSWORD
)
def remote_sync(self, channel: MessageChannel, userid: Union[int, str]):
"""
远程触发同步站点,发送消息
"""
self.post_message(Notification(channel=channel, mtype=NotificationType.SiteMessage,
title="开始同步CookieCloud站点 ...", userid=userid))
# 开始同步
success, msg = self.process()
if success:
self.post_message(Notification(channel=channel, mtype=NotificationType.SiteMessage,
title=f"同步站点成功,{msg}", userid=userid))
else:
self.post_message(Notification(channel=channel, mtype=NotificationType.SiteMessage,
title=f"同步站点失败:{msg}", userid=userid))
def process(self, manual=False) -> Tuple[bool, str]:
"""
通过CookieCloud同步站点Cookie
@@ -72,14 +57,30 @@ class CookieCloudChain(ChainBase):
for domain, cookie in cookies.items():
# 获取站点信息
indexer = self.siteshelper.get_indexer(domain)
if self.siteoper.exists(domain):
site_info = self.siteoper.get_by_domain(domain)
if site_info:
# 检查站点连通性
status, msg = self.sitechain.test(domain)
# 更新站点Cookie
if status:
logger.info(f"站点【{indexer.get('name')}】连通性正常不同步CookieCloud数据")
logger.info(f"站点【{site_info.name}】连通性正常不同步CookieCloud数据")
# 更新站点rss地址
if not site_info.public and not site_info.rss:
# 自动生成rss地址
rss_url, errmsg = self.rsshelper.get_rss_link(
url=site_info.url,
cookie=cookie,
ua=settings.USER_AGENT,
proxy=True if site_info.proxy else False
)
if rss_url:
logger.info(f"更新站点 {domain} RSS地址 ...")
self.siteoper.update_rss(domain=domain, rss=rss_url)
else:
logger.warn(errmsg)
continue
# 更新站点Cookie
logger.info(f"更新站点 {domain} Cookie ...")
self.siteoper.update_cookie(domain=domain, cookies=cookie)
_update_count += 1
elif indexer:
@@ -104,12 +105,25 @@ class CookieCloudChain(ChainBase):
_fail_count += 1
logger.warn(f"站点 {indexer.get('name')} 连接失败,无法添加站点")
continue
# 获取rss地址
rss_url = None
if not indexer.get("public") and indexer.get("domain"):
# 自动生成rss地址
rss_url, errmsg = self.rsshelper.get_rss_link(url=indexer.get("domain"),
cookie=cookie,
ua=settings.USER_AGENT)
if errmsg:
logger.warn(errmsg)
# 插入数据库
logger.info(f"新增站点 {indexer.get('name')} ...")
self.siteoper.add(name=indexer.get("name"),
url=indexer.get("domain"),
domain=domain,
cookie=cookie,
rss=rss_url,
public=1 if indexer.get("public") else 0)
_add_count += 1
# 保存站点图标
if indexer:
site_icon = self.siteiconoper.get_by_domain(domain)

View File

@@ -1,3 +1,5 @@
from typing import Optional, List
from app import schemas
from app.chain import ChainBase
@@ -6,7 +8,7 @@ class DashboardChain(ChainBase):
"""
各类仪表板统计处理链
"""
def media_statistic(self) -> schemas.Statistic:
def media_statistic(self) -> Optional[List[schemas.Statistic]]:
"""
媒体数量统计
"""

View File

@@ -41,7 +41,7 @@ class DoubanChain(ChainBase):
if not mediainfo:
logger.warn(f'{meta.name} 未识别到TMDB媒体信息')
return Context(meta_info=meta, media_info=MediaInfo(douban_info=doubaninfo))
logger.info(f'识别到媒体信息:{mediainfo.type.value} {mediainfo.title_year}{meta.season}')
logger.info(f'识别到媒体信息:{mediainfo.type.value} {mediainfo.title_year} {meta.season}')
mediainfo.set_douban_info(doubaninfo)
return Context(meta_info=meta, media_info=mediainfo)

View File

@@ -1,4 +1,7 @@
import base64
import json
import re
import time
from pathlib import Path
from typing import List, Optional, Tuple, Set, Dict, Union
@@ -8,12 +11,14 @@ from app.chain import ChainBase
from app.core.config import settings
from app.core.context import MediaInfo, TorrentInfo, Context
from app.core.meta import MetaBase
from app.core.metainfo import MetaInfo
from app.db.downloadhistory_oper import DownloadHistoryOper
from app.db.mediaserver_oper import MediaServerOper
from app.helper.torrent import TorrentHelper
from app.log import logger
from app.schemas import ExistMediaInfo, NotExistMediaInfo, DownloadingTorrent, Notification
from app.schemas.types import MediaType, TorrentStatus, EventType, MessageChannel, NotificationType
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
@@ -35,8 +40,10 @@ class DownloadChain(ChainBase):
发送添加下载的消息
"""
msg_text = ""
if userid:
msg_text = f"用户:{userid}"
if torrent.site_name:
msg_text = f"站点:{torrent.site_name}"
msg_text = f"{msg_text}\n站点:{torrent.site_name}"
if meta.resource_term:
msg_text = f"{msg_text}\n质量:{meta.resource_term}"
if torrent.size:
@@ -47,9 +54,12 @@ class DownloadChain(ChainBase):
msg_text = f"{msg_text}\n大小:{size}"
if torrent.title:
msg_text = f"{msg_text}\n种子:{torrent.title}"
if torrent.pubdate:
msg_text = f"{msg_text}\n发布时间:{torrent.pubdate}"
if torrent.seeders:
msg_text = f"{msg_text}\n做种数:{torrent.seeders}"
msg_text = f"{msg_text}\n促销:{torrent.volume_factor}"
if torrent.uploadvolumefactor and torrent.downloadvolumefactor:
msg_text = f"{msg_text}\n促销:{torrent.volume_factor}"
if torrent.hit_and_run:
msg_text = f"{msg_text}\nHit&Run"
if torrent.description:
@@ -64,30 +74,97 @@ class DownloadChain(ChainBase):
title=f"{mediainfo.title_year} "
f"{meta.season_episode} 开始下载",
text=msg_text,
image=mediainfo.get_message_image(),
userid=userid))
image=mediainfo.get_message_image()))
def download_torrent(self, torrent: TorrentInfo,
channel: MessageChannel = None,
userid: Union[str, int] = None) -> Tuple[Optional[Path], str, list]:
userid: Union[str, int] = None
) -> Tuple[Optional[Union[Path, str]], str, list]:
"""
下载种子文件
下载种子文件,如果是磁力链,会返回磁力链接本身
:return: 种子路径,种子目录名,种子文件清单
"""
torrent_file, _, download_folder, files, error_msg = self.torrent.download_torrent(
url=torrent.enclosure,
def __get_redict_url(url: str, ua: str = None, cookie: str = None) -> Optional[str]:
"""
获取下载链接, url格式[base64]url
"""
# 获取[]中的内容
m = re.search(r"\[(.*)](.*)", url)
if m:
# 参数
base64_str = m.group(1)
# URL
url = m.group(2)
if not base64_str:
return url
# 解码参数
req_str = base64.b64decode(base64_str.encode('utf-8')).decode('utf-8')
req_params: Dict[str, dict] = json.loads(req_str)
if req_params.get('method') == 'get':
# GET请求
res = RequestUtils(
ua=ua,
cookies=cookie
).get_res(url, params=req_params.get('params'))
else:
# POST请求
res = RequestUtils(
ua=ua,
cookies=cookie
).post_res(url, params=req_params.get('params'))
if not res:
return None
if not req_params.get('result'):
return res.text
else:
data = res.json()
for key in str(req_params.get('result')).split("."):
data = data.get(key)
if not data:
return None
logger.info(f"获取到下载地址:{data}")
return data
return None
# 获取下载链接
if not torrent.enclosure:
return None, "", []
if torrent.enclosure.startswith("magnet:"):
return torrent.enclosure, "", []
if torrent.enclosure.startswith("["):
# 需要解码获取下载地址
torrent_url = __get_redict_url(url=torrent.enclosure,
ua=torrent.site_ua,
cookie=torrent.site_cookie)
else:
torrent_url = torrent.enclosure
if not torrent_url:
logger.error(f"{torrent.title} 无法获取下载地址:{torrent.enclosure}")
return None, "", []
# 下载种子文件
torrent_file, content, download_folder, files, error_msg = self.torrent.download_torrent(
url=torrent_url,
cookie=torrent.site_cookie,
ua=torrent.site_ua,
proxy=torrent.site_proxy)
if isinstance(content, str):
# 磁力链
return content, "", []
if not torrent_file:
logger.error(f"下载种子文件失败:{torrent.title} - {torrent.enclosure}")
logger.error(f"下载种子文件失败:{torrent.title} - {torrent_url}")
self.post_message(Notification(
channel=channel,
mtype=NotificationType.Manual,
title=f"{torrent.title} 种子下载失败!",
text=f"错误信息:{error_msg}\n种子链接{torrent.enclosure}",
text=f"错误信息:{error_msg}\n站点{torrent.site_name}",
userid=userid))
return None, "", []
# 返回 种子文件路径,种子目录名,种子文件清单
return torrent_file, download_folder, files
def download_single(self, context: Context, torrent_file: Path = None,
@@ -97,46 +174,87 @@ class DownloadChain(ChainBase):
userid: Union[str, int] = None) -> Optional[str]:
"""
下载及发送通知
:param context: 资源上下文
:param torrent_file: 种子文件路径
:param episodes: 需要下载的集数
:param channel: 通知渠道
:param save_path: 保存路径
:param userid: 用户ID
"""
_torrent = context.torrent_info
_media = context.media_info
_meta = context.meta_info
_folder_name = ""
if not torrent_file:
# 下载种子文件
torrent_file, _folder_name, _ = self.download_torrent(_torrent, userid=userid)
if not torrent_file:
# 下载种子文件,得到的可能是文件也可能是磁力链
content, _folder_name, _file_list = self.download_torrent(_torrent,
channel=channel,
userid=userid)
if not content:
return
else:
content = torrent_file
# 获取种子文件的文件夹名和文件清单
_folder_name, _file_list = self.torrent.get_torrent_info(torrent_file)
# 下载目录
if not save_path:
if settings.DOWNLOAD_CATEGORY and _media and _media.category:
# 开启下载二级目录
if _media.type == MediaType.MOVIE:
# 电影
download_dir = Path(settings.DOWNLOAD_MOVIE_PATH or settings.DOWNLOAD_PATH) / _media.category
else:
download_dir = Path(settings.DOWNLOAD_TV_PATH or settings.DOWNLOAD_PATH) / _media.category
if settings.DOWNLOAD_ANIME_PATH \
and _media.genre_ids \
and set(_media.genre_ids).intersection(set(settings.ANIME_GENREIDS)):
# 动漫
download_dir = Path(settings.DOWNLOAD_ANIME_PATH)
else:
# 电视剧
download_dir = Path(settings.DOWNLOAD_TV_PATH or settings.DOWNLOAD_PATH) / _media.category
elif _media:
# 未开启下载二级目录
if _media.type == MediaType.MOVIE:
# 电影
download_dir = Path(settings.DOWNLOAD_MOVIE_PATH or settings.DOWNLOAD_PATH)
else:
download_dir = Path(settings.DOWNLOAD_TV_PATH or settings.DOWNLOAD_PATH)
if settings.DOWNLOAD_ANIME_PATH \
and _media.genre_ids \
and set(_media.genre_ids).intersection(set(settings.ANIME_GENREIDS)):
# 动漫
download_dir = Path(settings.DOWNLOAD_ANIME_PATH)
else:
# 电视剧
download_dir = Path(settings.DOWNLOAD_TV_PATH or settings.DOWNLOAD_PATH)
else:
# 未识别
download_dir = Path(settings.DOWNLOAD_PATH)
else:
# 自定义下载目录
download_dir = Path(save_path)
# 添加下载
result: Optional[tuple] = self.download(torrent_path=torrent_file,
result: Optional[tuple] = self.download(content=content,
cookie=_torrent.site_cookie,
episodes=episodes,
download_dir=download_dir)
download_dir=download_dir,
category=_media.category)
if result:
_hash, error_msg = result
else:
_hash, error_msg = None, "未知错误"
if _hash:
# 下载文件路径
if _folder_name:
download_path = download_dir / _folder_name
else:
download_path = download_dir / _file_list[0] if _file_list else download_dir
# 登记下载记录
self.downloadhis.add(
path=_folder_name or _torrent.title,
path=str(download_path),
type=_media.type.value,
title=_media.title,
year=_media.year,
@@ -150,16 +268,39 @@ class DownloadChain(ChainBase):
download_hash=_hash,
torrent_name=_torrent.title,
torrent_description=_torrent.description,
torrent_site=_torrent.site_name
torrent_site=_torrent.site_name,
userid=userid,
channel=channel.value if channel else None,
date=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
)
# 登记下载文件
files_to_add = []
for file in _file_list:
if episodes:
# 识别文件集
file_meta = MetaInfo(Path(file).stem)
if not file_meta.begin_episode \
or file_meta.begin_episode not in episodes:
continue
files_to_add.append({
"download_hash": _hash,
"downloader": settings.DOWNLOADER,
"fullpath": str(download_dir / _folder_name / file),
"savepath": str(download_dir / _folder_name),
"filepath": file,
"torrentname": _meta.org_string,
})
if files_to_add:
self.downloadhis.add_files(files_to_add)
# 发送消息
self.post_download_message(meta=_meta, mediainfo=_media, torrent=_torrent, channel=channel)
self.post_download_message(meta=_meta, mediainfo=_media, torrent=_torrent, channel=channel, userid=userid)
# 下载成功后处理
self.download_added(context=context, torrent_path=torrent_file, download_dir=download_dir)
self.download_added(context=context, download_dir=download_dir, torrent_path=torrent_file)
# 广播事件
self.eventmanager.send_event(EventType.DownloadAdded, {
"hash": _hash,
"torrent_file": torrent_file,
"context": context
})
else:
@@ -173,7 +314,6 @@ class DownloadChain(ChainBase):
% (_media.title_year, _meta.season_episode),
text=f"站点:{_torrent.site_name}\n"
f"种子名称:{_meta.org_string}\n"
f"种子链接:{_torrent.enclosure}\n"
f"错误信息:{error_msg}",
image=_media.get_message_image(),
userid=userid))
@@ -183,12 +323,14 @@ class DownloadChain(ChainBase):
contexts: List[Context],
no_exists: Dict[int, Dict[int, NotExistMediaInfo]] = None,
save_path: str = None,
channel: MessageChannel = None,
userid: str = None) -> Tuple[List[Context], Dict[int, Dict[int, NotExistMediaInfo]]]:
"""
根据缺失数据,自动种子列表中组合择优下载
:param contexts: 资源上下文列表
:param no_exists: 缺失的剧集信息
:param save_path: 保存路径
:param channel: 通知渠道
:param userid: 用户ID
:return: 已经下载的资源列表、剩余未下载到的剧集 no_exists[tmdb_id] = {season: NotExistMediaInfo}
"""
@@ -241,10 +383,10 @@ class DownloadChain(ChainBase):
获取需要的季的集数
"""
if not no_exists.get(tmdbid):
return 0
return 9999
no_exist = no_exists.get(tmdbid)
if not no_exist.get(season):
return 0
return 9999
return no_exist[season].total_episode
# 分组排序
@@ -253,7 +395,8 @@ class DownloadChain(ChainBase):
# 如果是电影,直接下载
for context in contexts:
if context.media_info.type == MediaType.MOVIE:
if self.download_single(context, save_path=save_path, userid=userid):
if self.download_single(context, save_path=save_path,
channel=channel, userid=userid):
# 下载成功
downloaded_list.append(context)
@@ -294,25 +437,39 @@ class DownloadChain(ChainBase):
if set(torrent_season).issubset(set(need_season)):
if len(torrent_season) == 1:
# 只有一季的可能是命名错误,需要打开种子鉴别,只有实际集数大于等于总集数才下载
torrent_path, _, torrent_files = self.download_torrent(torrent)
if not torrent_path:
content, _, torrent_files = self.download_torrent(torrent)
if not content:
continue
if isinstance(content, str):
logger.warn(f"{meta.org_string} 下载地址是磁力链,无法确定种子文件集数")
continue
torrent_episodes = self.torrent.get_torrent_episodes(torrent_files)
if not torrent_episodes \
or len(torrent_episodes) >= __get_season_episodes(need_tmdbid,
torrent_season[0]):
# 下载
download_id = self.download_single(context=context,
torrent_file=torrent_path,
save_path=save_path,
userid=userid)
else:
logger.info(
f"{meta.org_string} 解析文件集数为 {len(torrent_episodes)},未含所需集数")
logger.info(f"{meta.org_string} 解析文件集数为 {torrent_episodes}")
if not torrent_episodes:
continue
# 总集数
need_total = __get_season_episodes(need_tmdbid, torrent_season[0])
if len(torrent_episodes) < need_total:
# 更新集数范围
begin_ep = min(torrent_episodes)
end_ep = max(torrent_episodes)
meta.set_episodes(begin=begin_ep, end=end_ep)
logger.info(
f"{meta.org_string} 解析文件集数发现不是完整合集")
continue
else:
# 下载
download_id = self.download_single(
context=context,
torrent_file=content if isinstance(content, Path) else None,
save_path=save_path,
channel=channel,
userid=userid
)
else:
# 下载
download_id = self.download_single(context, save_path=save_path, userid=userid)
download_id = self.download_single(context, save_path=save_path,
channel=channel, userid=userid)
if download_id:
# 下载成功
@@ -370,7 +527,8 @@ class DownloadChain(ChainBase):
# 为需要集的子集则下载
if torrent_episodes.issubset(set(need_episodes)):
# 下载
download_id = self.download_single(context, save_path=save_path, userid=userid)
download_id = self.download_single(context, save_path=save_path,
channel=channel, userid=userid)
if download_id:
# 下载成功
downloaded_list.append(context)
@@ -426,22 +584,30 @@ class DownloadChain(ChainBase):
and len(meta.season_list) == 1 \
and meta.season_list[0] == need_season:
# 检查种子看是否有需要的集
torrent_path, _, torrent_files = self.download_torrent(torrent, userid=userid)
if not torrent_path:
content, _, torrent_files = self.download_torrent(torrent)
if not content:
continue
if isinstance(content, str):
logger.warn(f"{meta.org_string} 下载地址是磁力链,无法解析种子文件集数")
continue
# 种子全部集
torrent_episodes = self.torrent.get_torrent_episodes(torrent_files)
logger.info(f"{torrent.site_name} - {meta.org_string} 解析文件集数:{torrent_episodes}")
# 选中的集
selected_episodes = set(torrent_episodes).intersection(set(need_episodes))
if not selected_episodes:
logger.info(f"{torrent.site_name} - {torrent.title} 没有需要的集,跳过...")
continue
logger.info(f"{torrent.site_name} - {torrent.title} 选中集数:{selected_episodes}")
# 添加下载
download_id = self.download_single(context=context,
torrent_file=torrent_path,
episodes=selected_episodes,
save_path=save_path,
userid=userid)
download_id = self.download_single(
context=context,
torrent_file=content if isinstance(content, Path) else None,
episodes=selected_episodes,
save_path=save_path,
channel=channel,
userid=userid
)
if not download_id:
continue
# 把识别的集更新到上下文
@@ -460,13 +626,15 @@ class DownloadChain(ChainBase):
def get_no_exists_info(self, meta: MetaBase,
mediainfo: MediaInfo,
no_exists: Dict[int, Dict[int, NotExistMediaInfo]] = None
no_exists: Dict[int, Dict[int, NotExistMediaInfo]] = None,
totals: Dict[int, int] = None
) -> Tuple[bool, Dict[int, Dict[int, NotExistMediaInfo]]]:
"""
检查媒体库,查询是否存在,对于剧集同时返回不存在的季集信息
:param meta: 元数据
:param mediainfo: 已识别的媒体信息
:param no_exists: 在调用该方法前已经存储的不存在的季集信息,有传入时该函数搜索的内容将会叠加后输出
:param totals: 电视剧每季的总集数
:return: 当前媒体是否缺失,各标题总的季集和缺失的季集
"""
@@ -499,6 +667,10 @@ class DownloadChain(ChainBase):
if not no_exists:
no_exists = {}
if not totals:
totals = {}
if mediainfo.type == MediaType.MOVIE:
# 电影
itemid = self.mediaserver.get_item_id(mtype=mediainfo.type.value,
@@ -523,40 +695,54 @@ class DownloadChain(ChainBase):
itemid = self.mediaserver.get_item_id(mtype=mediainfo.type.value,
tmdbid=mediainfo.tmdb_id,
season=mediainfo.season)
# 媒体库已存在的剧集
exists_tvs: Optional[ExistMediaInfo] = self.media_exists(mediainfo=mediainfo, itemid=itemid)
if not exists_tvs:
# 所有集均缺失
# 所有集均缺失
for season, episodes in mediainfo.seasons.items():
if not episodes:
continue
# 全季不存在
if meta.begin_season \
if meta.season_list \
and season not in meta.season_list:
continue
__append_no_exists(_season=season, _episodes=[], _total=len(episodes), _start=min(episodes))
# 总集数
total_ep = totals.get(season) or len(episodes)
__append_no_exists(_season=season, _episodes=[],
_total=total_ep, _start=min(episodes))
return False, no_exists
else:
# 存在一些,检查缺失的季集
# 存在一些,检查每季缺失的季集
for season, episodes in mediainfo.seasons.items():
if meta.begin_season \
and season not in meta.season_list:
continue
if not episodes:
continue
exist_seasons = exists_tvs.seasons
if exist_seasons.get(season):
# 取差
lack_episodes = list(set(episodes).difference(set(exist_seasons[season])))
# 该季总集数
season_total = totals.get(season) or len(episodes)
# 该季已存在的
exist_episodes = exists_tvs.seasons.get(season)
if exist_episodes:
# 已存在取差集
if totals.get(season):
# 按总集数计算缺失集开始集为TMDB中的最小集
lack_episodes = list(set(range(min(episodes),
season_total + min(episodes))
).difference(set(exist_episodes)))
else:
# 按TMDB集数计算缺失集
lack_episodes = list(set(episodes).difference(set(exist_episodes)))
if not lack_episodes:
# 全部集存在
continue
# 添加不存在的季集信息
__append_no_exists(_season=season, _episodes=lack_episodes,
_total=len(episodes), _start=min(episodes))
_total=season_total, _start=min(lack_episodes))
else:
# 全季不存在
__append_no_exists(_season=season, _episodes=[],
_total=len(episodes), _start=min(episodes))
_total=season_total, _start=min(episodes))
# 存在不完整的剧集
if no_exists:
logger.debug(f"媒体库中已存在部分剧集,缺失:{no_exists}")
@@ -573,7 +759,8 @@ class DownloadChain(ChainBase):
self.post_message(Notification(
channel=channel,
mtype=NotificationType.Download,
title="没有正在下载的任务!"))
title="没有正在下载的任务!",
userid=userid))
return
# 发送消息
title = f"{len(torrents)} 个任务正在下载:"
@@ -582,7 +769,7 @@ class DownloadChain(ChainBase):
for torrent in torrents:
messages.append(f"{index}. {torrent.title} "
f"{StringUtils.str_filesize(torrent.size)} "
f"{round(torrent.progress * 100, 1)}%")
f"{round(torrent.progress, 1)}%")
index += 1
self.post_message(Notification(
channel=channel, mtype=NotificationType.Download,

View File

@@ -1,9 +1,10 @@
from pathlib import Path
from typing import Optional, List, Tuple
from app.chain import ChainBase
from app.core.context import Context, MediaInfo
from app.core.meta import MetaBase
from app.core.metainfo import MetaInfo
from app.core.metainfo import MetaInfo, MetaInfoPath
from app.log import logger
from app.utils.string import StringUtils
@@ -31,6 +32,25 @@ class MediaChain(ChainBase):
# 返回上下文
return Context(meta_info=metainfo, media_info=mediainfo)
def recognize_by_path(self, path: str) -> Optional[Context]:
"""
根据文件路径识别媒体信息
"""
logger.info(f'开始识别媒体信息,文件:{path} ...')
file_path = Path(path)
# 元数据
file_meta = MetaInfoPath(file_path)
# 识别媒体信息
mediainfo = self.recognize_media(meta=file_meta)
if not mediainfo:
logger.warn(f'{path} 未识别到媒体信息')
return Context(meta_info=file_meta)
logger.info(f'{path} 识别到媒体信息:{mediainfo.type.value} {mediainfo.title_year}')
# 更新媒体图片
self.obtain_images(mediainfo=mediainfo)
# 返回上下文
return Context(meta_info=file_meta, media_info=mediainfo)
def search(self, title: str) -> Tuple[MetaBase, List[MediaInfo]]:
"""
搜索媒体信息

View File

@@ -7,9 +7,9 @@ from sqlalchemy.orm import Session
from app import schemas
from app.chain import ChainBase
from app.core.config import settings
from app.db import SessionFactory
from app.db.mediaserver_oper import MediaServerOper
from app.log import logger
from app.schemas import MessageChannel, Notification
lock = threading.Lock()
@@ -21,70 +21,83 @@ class MediaServerChain(ChainBase):
def __init__(self, db: Session = None):
super().__init__(db)
self.mediaserverdb = MediaServerOper(db)
def librarys(self) -> List[schemas.MediaServerLibrary]:
def librarys(self, server: str) -> List[schemas.MediaServerLibrary]:
"""
获取媒体服务器所有媒体库
"""
return self.run_module("mediaserver_librarys")
return self.run_module("mediaserver_librarys", server=server)
def items(self, library_id: Union[str, int]) -> Generator:
def items(self, server: str, library_id: Union[str, int]) -> List[schemas.MediaServerItem]:
"""
获取媒体服务器所有项目
"""
return self.run_module("mediaserver_items", library_id=library_id)
return self.run_module("mediaserver_items", server=server, library_id=library_id)
def episodes(self, item_id: Union[str, int]) -> List[schemas.MediaServerSeasonInfo]:
def iteminfo(self, server: str, item_id: Union[str, int]) -> schemas.MediaServerItem:
"""
获取媒体服务器项目信息
"""
return self.run_module("mediaserver_iteminfo", server=server, item_id=item_id)
def episodes(self, server: str, item_id: Union[str, int]) -> List[schemas.MediaServerSeasonInfo]:
"""
获取媒体服务器剧集信息
"""
return self.run_module("mediaserver_tv_episodes", item_id=item_id)
def remote_sync(self, channel: MessageChannel, userid: Union[int, str]):
"""
同步豆瓣想看数据,发送消息
"""
self.post_message(Notification(channel=channel,
title="开始媒体服务器 ...", userid=userid))
self.sync()
self.post_message(Notification(channel=channel,
title="同步媒体服务器完成!", userid=userid))
return self.run_module("mediaserver_tv_episodes", server=server, item_id=item_id)
def sync(self):
"""
同步媒体库所有数据到本地数据库
"""
with lock:
logger.info("开始同步媒体库数据 ...")
# 媒体服务器同步使用独立的会话
_db = SessionFactory()
_dbOper = MediaServerOper(_db)
# 汇总统计
total_count = 0
# 清空登记薄
self.mediaserverdb.empty(server=settings.MEDIASERVER)
for library in self.librarys():
logger.info(f"正在同步媒体库 {library.name} ...")
library_count = 0
for item in self.items(library.id):
if not item:
_dbOper.empty(server=settings.MEDIASERVER)
# 同步黑名单
sync_blacklist = settings.MEDIASERVER_SYNC_BLACKLIST.split(
",") if settings.MEDIASERVER_SYNC_BLACKLIST else []
# 设置的媒体服务器
if not settings.MEDIASERVER:
return
mediaservers = settings.MEDIASERVER.split(",")
# 遍历媒体服务器
for mediaserver in mediaservers:
logger.info(f"开始同步媒体库 {mediaserver} 的数据 ...")
for library in self.librarys(mediaserver):
# 同步黑名单 跳过
if library.name in sync_blacklist:
continue
if not item.item_id:
continue
# 计数
library_count += 1
seasoninfo = {}
# 类型
item_type = "电视剧" if item.item_type in ['Series', 'show'] else "电影"
if item_type == "电视剧":
# 查询剧集信息
espisodes_info = self.episodes(item.item_id) or []
for episode in espisodes_info:
seasoninfo[episode.season] = episode.episodes
# 插入数据
item_dict = item.dict()
item_dict['seasoninfo'] = json.dumps(seasoninfo)
item_dict['item_type'] = item_type
self.mediaserverdb.add(**item_dict)
logger.info(f"媒体库 {library.name} 同步完成,共同步数量:{library_count}")
# 总数累加
total_count += library_count
logger.info(f"正在同步 {mediaserver} 媒体库 {library.name} ...")
library_count = 0
for item in self.items(mediaserver, library.id):
if not item:
continue
if not item.item_id:
continue
# 计数
library_count += 1
seasoninfo = {}
# 类型
item_type = "电视剧" if item.item_type in ['Series', 'show'] else "电影"
if item_type == "电视剧":
# 查询剧集信息
espisodes_info = self.episodes(mediaserver, item.item_id) or []
for episode in espisodes_info:
seasoninfo[episode.season] = episode.episodes
# 插入数据
item_dict = item.dict()
item_dict['seasoninfo'] = json.dumps(seasoninfo)
item_dict['item_type'] = item_type
_dbOper.add(**item_dict)
logger.info(f"{mediaserver} 媒体库 {library.name} 同步完成,共同步数量:{library_count}")
# 总数累加
total_count += library_count
# 关闭数据库连接
if _db:
_db.close()
logger.info("【MediaServer】媒体库数据同步完成同步数量%s" % total_count)

View File

@@ -130,19 +130,29 @@ class MessageChain(ChainBase):
return
# 搜索结果排序
contexts = self.torrenthelper.sort_torrents(contexts)
# 更新缓存
user_cache[userid] = {
"type": "Torrent",
"items": contexts
}
_current_page = 0
# 发送种子数据
logger.info(f"搜索到 {len(contexts)} 条数据,开始发送选择消息 ...")
self.__post_torrents_message(channel=channel,
title=mediainfo.title,
items=contexts[:self._page_size],
userid=userid,
total=len(contexts))
# 判断是否设置自动下载
auto_download_user = settings.AUTO_DOWNLOAD_USER
# 匹配到自动下载用户
if auto_download_user and any(userid == user for user in auto_download_user.split(",")):
logger.info(f"用户 {userid} 在自动下载用户中,开始自动择优下载")
# 自动选择下载
self.__auto_download(channel=channel,
cache_list=contexts,
userid=userid,
username=username)
else:
# 更新缓存
user_cache[userid] = {
"type": "Torrent",
"items": contexts
}
# 发送种子数据
logger.info(f"搜索到 {len(contexts)} 条数据,开始发送选择消息 ...")
self.__post_torrents_message(channel=channel,
title=mediainfo.title,
items=contexts[:self._page_size],
userid=userid,
total=len(contexts))
elif cache_type == "Subscribe":
# 订阅媒体
@@ -169,41 +179,15 @@ class MessageChain(ChainBase):
elif cache_type == "Torrent":
if int(text) == 0:
# 自动选择下载
# 查询缺失的媒体信息
exist_flag, no_exists = self.downloadchain.get_no_exists_info(meta=_current_meta,
mediainfo=_current_media)
if exist_flag:
self.post_message(Notification(
channel=channel,
title=f"{_current_media.title_year}"
f"{_current_meta.sea} 媒体库中已存在",
userid=userid))
return
# 批量下载
downloads, lefts = self.downloadchain.batch_download(contexts=cache_list,
no_exists=no_exists,
userid=userid)
if downloads and not lefts:
# 全部下载完成
logger.info(f'{_current_media.title_year} 下载完成')
else:
# 未完成下载
logger.info(f'{_current_media.title_year} 未下载未完整,添加订阅 ...')
# 添加订阅状态为R
self.subscribechain.add(title=_current_media.title,
year=_current_media.year,
mtype=_current_media.type,
tmdbid=_current_media.tmdb_id,
season=_current_meta.begin_season,
channel=channel,
userid=userid,
username=username,
state="R")
self.__auto_download(channel=channel,
cache_list=cache_list,
userid=userid,
username=username)
else:
# 下载种子
context: Context = cache_list[int(text) - 1]
# 下载
self.downloadchain.download_single(context, userid=userid)
self.downloadchain.download_single(context, userid=userid, channel=channel)
elif text.lower() == "p":
# 上一页
@@ -230,6 +214,11 @@ class MessageChain(ChainBase):
start = _current_page * self._page_size
end = start + self._page_size
if cache_type == "Torrent":
# 更新缓存
user_cache[userid] = {
"type": "Torrent",
"items": cache_list[start:end]
}
# 发送种子数据
self.__post_torrents_message(channel=channel,
title=_current_media.title,
@@ -267,6 +256,11 @@ class MessageChain(ChainBase):
# 加一页
_current_page += 1
if cache_type == "Torrent":
# 更新缓存
user_cache[userid] = {
"type": "Torrent",
"items": cache_list
}
# 发送种子数据
self.__post_torrents_message(channel=channel,
title=_current_media.title,
@@ -337,6 +331,42 @@ class MessageChain(ChainBase):
# 保存缓存
self.save_cache(user_cache, self._cache_file)
def __auto_download(self, channel, cache_list, userid, username):
"""
自动择优下载
"""
# 查询缺失的媒体信息
exist_flag, no_exists = self.downloadchain.get_no_exists_info(meta=_current_meta,
mediainfo=_current_media)
if exist_flag:
self.post_message(Notification(
channel=channel,
title=f"{_current_media.title_year}"
f"{_current_meta.sea} 媒体库中已存在",
userid=userid))
return
# 批量下载
downloads, lefts = self.downloadchain.batch_download(contexts=cache_list,
no_exists=no_exists,
channel=channel,
userid=userid)
if downloads and not lefts:
# 全部下载完成
logger.info(f'{_current_media.title_year} 下载完成')
else:
# 未完成下载
logger.info(f'{_current_media.title_year} 未下载未完整,添加订阅 ...')
# 添加订阅状态为R
self.subscribechain.add(title=_current_media.title,
year=_current_media.year,
mtype=_current_media.type,
tmdbid=_current_media.tmdb_id,
season=_current_meta.begin_season,
channel=channel,
userid=userid,
username=username,
state="R")
def __post_medias_message(self, channel: MessageChannel,
title: str, items: list, userid: str, total: int):
"""

View File

@@ -1,280 +0,0 @@
import json
import re
import time
from datetime import datetime
from typing import Tuple, Optional
from sqlalchemy.orm import Session
from app.chain import ChainBase
from app.chain.download import DownloadChain
from app.core.config import settings
from app.core.context import Context, TorrentInfo, MediaInfo
from app.core.metainfo import MetaInfo
from app.db.rss_oper import RssOper
from app.db.systemconfig_oper import SystemConfigOper
from app.helper.message import MessageHelper
from app.helper.rss import RssHelper
from app.helper.sites import SitesHelper
from app.log import logger
from app.schemas import Notification, NotExistMediaInfo
from app.schemas.types import SystemConfigKey, MediaType, NotificationType
from app.utils.string import StringUtils
class RssChain(ChainBase):
"""
RSS处理链
"""
def __init__(self, db: Session = None):
super().__init__(db)
self.rssoper = RssOper(self._db)
self.sites = SitesHelper()
self.systemconfig = SystemConfigOper(self._db)
self.downloadchain = DownloadChain(self._db)
self.message = MessageHelper()
def add(self, title: str, year: str,
mtype: MediaType = None,
season: int = None,
**kwargs) -> Tuple[Optional[int], str]:
"""
识别媒体信息并添加订阅
"""
logger.info(f'开始添加自定义订阅,标题:{title} ...')
# 识别元数据
metainfo = MetaInfo(title)
if year:
metainfo.year = year
if mtype:
metainfo.type = mtype
if season:
metainfo.type = MediaType.TV
metainfo.begin_season = season
# 识别媒体信息
mediainfo: MediaInfo = self.recognize_media(meta=metainfo)
if not mediainfo:
logger.warn(f'{title} 未识别到媒体信息')
return None, "未识别到媒体信息"
# 更新媒体图片
self.obtain_images(mediainfo=mediainfo)
# 总集数
if mediainfo.type == MediaType.TV:
if not season:
season = 1
# 总集数
if not kwargs.get('total_episode'):
if not mediainfo.seasons:
# 补充媒体信息
mediainfo: MediaInfo = self.recognize_media(mtype=mediainfo.type,
tmdbid=mediainfo.tmdb_id)
if not mediainfo:
logger.error(f"媒体信息识别失败!")
return None, "媒体信息识别失败"
if not mediainfo.seasons:
logger.error(f"{title} 媒体信息中没有季集信息")
return None, "媒体信息中没有季集信息"
total_episode = len(mediainfo.seasons.get(season) or [])
if not total_episode:
logger.error(f'{title} 未获取到总集数')
return None, "未获取到总集数"
kwargs.update({
'total_episode': total_episode
})
# 检查是否存在
if self.rssoper.exists(tmdbid=mediainfo.tmdb_id, season=season):
logger.warn(f'{mediainfo.title} 已存在')
return None, f'{mediainfo.title} 自定义订阅已存在'
if not kwargs.get("name"):
kwargs.update({
"name": mediainfo.title
})
kwargs.update({
"tmdbid": mediainfo.tmdb_id,
"poster": mediainfo.get_poster_image(),
"backdrop": mediainfo.get_backdrop_image(),
"vote": mediainfo.vote_average,
"description": mediainfo.overview,
})
# 添加订阅
sid = self.rssoper.add(title=title, year=year, season=season, **kwargs)
if not sid:
logger.error(f'{mediainfo.title_year} 添加自定义订阅失败')
return None, "添加自定义订阅失败"
else:
logger.info(f'{mediainfo.title_year}{metainfo.season} 添加订阅成功')
# 返回结果
return sid, ""
def refresh(self, rssid: int = None, manual: bool = False):
"""
刷新RSS订阅数据
"""
# 所有RSS订阅
logger.info("开始刷新RSS订阅数据 ...")
rss_tasks = self.rssoper.list(rssid) or []
for rss_task in rss_tasks:
if not rss_task:
continue
if not rss_task.url:
continue
# 下载Rss报文
items = RssHelper.parse(rss_task.url, True if rss_task.proxy else False)
if not items:
logger.error(f"RSS未下载到数据{rss_task.url}")
logger.info(f"{rss_task.name} RSS下载到数据{len(items)}")
# 检查站点
domain = StringUtils.get_url_domain(rss_task.url)
site_info = self.sites.get_indexer(domain) or {}
# 过滤规则
if rss_task.best_version:
filter_rule = self.systemconfig.get(SystemConfigKey.FilterRules2)
else:
filter_rule = self.systemconfig.get(SystemConfigKey.FilterRules)
# 处理RSS条目
matched_contexts = []
# 处理过的title
processed_data = json.loads(rss_task.note) if rss_task.note else {
"titles": [],
"season_episodes": []
}
for item in items:
if not item.get("title"):
continue
# 标题是否已处理过
if item.get("title") in processed_data.get('titles'):
logger.info(f"{item.get('title')} 已处理过")
continue
# 基本要素匹配
if rss_task.include \
and not re.search(r"%s" % rss_task.include, item.get("title")):
logger.info(f"{item.get('title')} 未包含 {rss_task.include}")
continue
if rss_task.exclude \
and re.search(r"%s" % rss_task.exclude, item.get("title")):
logger.info(f"{item.get('title')} 包含 {rss_task.exclude}")
continue
# 识别媒体信息
meta = MetaInfo(title=item.get("title"), subtitle=item.get("description"))
if not meta.name:
logger.error(f"{item.get('title')} 未识别到有效信息")
continue
mediainfo = self.recognize_media(meta=meta)
if not mediainfo:
logger.error(f"{item.get('title')} 未识别到TMDB媒体信息")
continue
if mediainfo.tmdb_id != rss_task.tmdbid:
logger.error(f"{item.get('title')} 不匹配")
continue
# 季集是否已处理过
if meta.season_episode in processed_data.get('season_episodes'):
logger.info(f"{meta.season_episode} 已处理过")
continue
# 种子
torrentinfo = TorrentInfo(
site=site_info.get("id"),
site_name=site_info.get("name"),
site_cookie=site_info.get("cookie"),
site_ua=site_info.get("cookie") or settings.USER_AGENT,
site_proxy=site_info.get("proxy") or rss_task.proxy,
site_order=site_info.get("pri"),
title=item.get("title"),
description=item.get("description"),
enclosure=item.get("enclosure"),
page_url=item.get("link"),
size=item.get("size"),
pubdate=time.strftime("%Y-%m-%d %H:%M:%S", item.get("pubdate")) if item.get("pubdate") else None,
)
# 过滤种子
if rss_task.filter:
result = self.filter_torrents(
rule_string=filter_rule,
torrent_list=[torrentinfo]
)
if not result:
logger.info(f"{rss_task.name} 不匹配过滤规则")
continue
# 更新已处理数据
processed_data['titles'].append(item.get("title"))
processed_data['season_episodes'].append(meta.season_episode)
# 清除多条数据
mediainfo.clear()
# 匹配到的数据
matched_contexts.append(Context(
meta_info=meta,
media_info=mediainfo,
torrent_info=torrentinfo
))
# 更新已处理过的title
self.rssoper.update(rssid=rss_task.id, note=json.dumps(processed_data))
if not matched_contexts:
logger.info(f"{rss_task.name} 未匹配到数据")
continue
logger.info(f"{rss_task.name} 匹配到 {len(matched_contexts)} 条数据")
# 查询本地存在情况
if not rss_task.best_version:
# 查询缺失的媒体信息
rss_meta = MetaInfo(title=rss_task.title)
rss_meta.year = rss_task.year
rss_meta.begin_season = rss_task.season
rss_meta.type = MediaType(rss_task.type)
exist_flag, no_exists = self.downloadchain.get_no_exists_info(
meta=rss_meta,
mediainfo=MediaInfo(
title=rss_task.title,
year=rss_task.year,
tmdb_id=rss_task.tmdbid,
season=rss_task.season
),
)
if exist_flag:
logger.info(f'{rss_task.name} 媒体库中已存在,完成订阅')
self.rssoper.delete(rss_task.id)
# 发送通知
self.post_message(Notification(mtype=NotificationType.Subscribe,
title=f'自定义订阅 {rss_task.name} 已完成',
image=rss_task.backdrop))
continue
elif rss_meta.type == MediaType.TV.value:
# 打印缺失集信息
if no_exists and no_exists.get(rss_task.tmdbid):
no_exists_info = no_exists.get(rss_task.tmdbid).get(rss_task.season)
if no_exists_info:
logger.info(f'订阅 {rss_task.name} 缺失集:{no_exists_info.episodes}')
else:
if rss_task.type == MediaType.TV.value:
no_exists = {
rss_task.season: NotExistMediaInfo(
season=rss_task.season,
episodes=[],
total_episode=rss_task.total_episode,
start_episode=1)
}
else:
no_exists = {}
# 开始下载
downloads, lefts = self.downloadchain.batch_download(contexts=matched_contexts,
no_exists=no_exists,
save_path=rss_task.save_path)
if downloads and not lefts:
if not rss_task.best_version:
self.rssoper.delete(rss_task.id)
# 发送通知
self.post_message(Notification(mtype=NotificationType.Subscribe,
title=f'自定义订阅 {rss_task.name} 已完成',
image=rss_task.backdrop))
# 未完成下载
logger.info(f'{rss_task.name} 未下载未完整,继续订阅 ...')
if downloads:
# 更新最后更新时间和已处理数量
self.rssoper.update(rssid=rss_task.id,
processed=(rss_task.processed or 0) + len(downloads),
last_update=datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
logger.info("刷新RSS订阅数据完成")
if manual:
if len(rss_tasks) == 1:
self.message.put(f"{rss_tasks[0].name} 自定义订阅刷新完成")
else:
self.message.put(f"自定义订阅刷新完成")

View File

@@ -1,4 +1,5 @@
import pickle
import re
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime
from typing import Dict
@@ -29,20 +30,21 @@ class SearchChain(ChainBase):
super().__init__(db)
self.siteshelper = SitesHelper()
self.progress = ProgressHelper()
self.systemconfig = SystemConfigOper(self._db)
self.systemconfig = SystemConfigOper()
self.torrenthelper = TorrentHelper()
def search_by_tmdbid(self, tmdbid: int, mtype: MediaType = None) -> List[Context]:
def search_by_tmdbid(self, tmdbid: int, mtype: MediaType = None, area: str = "title") -> List[Context]:
"""
根据TMDB ID搜索资源精确匹配但不不过滤本地存在的资源
:param tmdbid: TMDB ID
:param mtype: 媒体,电影 or 电视剧
:param area: 搜索范围title or imdbid
"""
mediainfo = self.recognize_media(tmdbid=tmdbid, mtype=mtype)
if not mediainfo:
logger.error(f'{tmdbid} 媒体信息识别失败!')
return []
results = self.process(mediainfo=mediainfo)
results = self.process(mediainfo=mediainfo, area=area)
# 保存眲结果
bytes_results = pickle.dumps(results)
self.systemconfig.set(SystemConfigKey.SearchResults, bytes_results)
@@ -60,7 +62,7 @@ class SearchChain(ChainBase):
else:
logger.info(f'开始浏览资源,站点:{site} ...')
# 搜索
return self.__search_all_sites(keyword=title, sites=[site] if site else None, page=page) or []
return self.__search_all_sites(keywords=[title], sites=[site] if site else None, page=page) or []
def last_search_results(self) -> List[Context]:
"""
@@ -75,34 +77,22 @@ class SearchChain(ChainBase):
print(str(e))
return []
def browse(self, domain: str, keyword: str = None) -> List[TorrentInfo]:
"""
浏览站点首页内容
:param domain: 站点域名
:param keyword: 关键词,有值时为搜索
"""
if not keyword:
logger.info(f'开始浏览站点首页内容,站点:{domain} ...')
else:
logger.info(f'开始搜索资源,关键词:{keyword},站点:{domain} ...')
site = self.siteshelper.get_indexer(domain)
if not site:
logger.error(f'站点 {domain} 不存在!')
return []
return self.search_torrents(site=site, keyword=keyword)
def process(self, mediainfo: MediaInfo,
keyword: str = None,
no_exists: Dict[int, Dict[int, NotExistMediaInfo]] = None,
sites: List[int] = None,
filter_rule: str = None) -> List[Context]:
priority_rule: str = None,
filter_rule: Dict[str, str] = None,
area: str = "title") -> List[Context]:
"""
根据媒体信息搜索种子资源精确匹配应用过滤规则同时根据no_exists过滤本地已存在的资源
:param mediainfo: 媒体信息
:param keyword: 搜索关键词
:param no_exists: 缺失的媒体信息
:param sites: 站点ID列表为空时搜索所有站点
:param priority_rule: 优先级规则,为空时使用搜索优先级规则
:param filter_rule: 过滤规则,为空是使用默认过滤规则
:param area: 搜索范围title or imdbid
"""
logger.info(f'开始搜索资源,关键词:{keyword or mediainfo.title} ...')
# 补充媒体信息
@@ -127,32 +117,36 @@ class SearchChain(ChainBase):
else:
keywords = [mediainfo.title]
# 执行搜索
torrents: List[TorrentInfo] = []
for keyword in keywords:
torrents = self.__search_all_sites(
mediainfo=mediainfo,
keyword=keyword,
sites=sites
)
if torrents:
break
torrents: List[TorrentInfo] = self.__search_all_sites(
mediainfo=mediainfo,
keywords=keywords,
sites=sites,
area=area
)
if not torrents:
logger.warn(f'{keyword or mediainfo.title} 未搜索到资源')
return []
# 过滤种子
if filter_rule is None:
# 取默认过滤规则
filter_rule = self.systemconfig.get(SystemConfigKey.FilterRules)
if filter_rule:
logger.info(f'开始过滤资源,当前规则:{filter_rule} ...')
result: List[TorrentInfo] = self.filter_torrents(rule_string=filter_rule,
if priority_rule is None:
# 取搜索优先级规则
priority_rule = self.systemconfig.get(SystemConfigKey.SearchFilterRules)
if priority_rule:
logger.info(f'开始过滤资源,当前规则:{priority_rule} ...')
result: List[TorrentInfo] = self.filter_torrents(rule_string=priority_rule,
torrent_list=torrents,
season_episodes=season_episodes)
season_episodes=season_episodes,
mediainfo=mediainfo)
if result is not None:
torrents = result
if not torrents:
logger.warn(f'{keyword or mediainfo.title} 没有符合过滤条件的资源')
logger.warn(f'{keyword or mediainfo.title} 没有符合优先级规则的资源')
return []
# 使用默认过滤规则再次过滤
torrents = self.filter_torrents_by_rule(torrents=torrents,
filter_rule=filter_rule)
if not torrents:
logger.warn(f'{keyword or mediainfo.title} 没有符合过滤规则的资源')
return []
# 匹配的资源
_match_torrents = []
# 总数
@@ -185,29 +179,42 @@ class SearchChain(ChainBase):
# 比对年份
if mediainfo.year:
if mediainfo.type == MediaType.TV:
# 需要剧集
# 剧集年份,每季的年份可能不同
if torrent_meta.year and torrent_meta.year not in [year for year in
mediainfo.season_years.values()]:
logger.warn(f'{torrent.site_name} - {torrent.title} 年份不匹配')
continue
else:
# 需要电影
if torrent_meta.year != mediainfo.year:
# 电影年份上下浮动1年
if torrent_meta.year not in [str(int(mediainfo.year) - 1),
mediainfo.year,
str(int(mediainfo.year) + 1)]:
logger.warn(f'{torrent.site_name} - {torrent.title} 年份不匹配')
continue
# 比对标题
# 比对标题和原语种标题
meta_name = StringUtils.clear_upper(torrent_meta.name)
if meta_name in [
StringUtils.clear_upper(mediainfo.title),
StringUtils.clear_upper(mediainfo.original_title)
]:
logger.info(f'{mediainfo.title} 匹配到资源:{torrent.site_name} - {torrent.title}')
logger.info(f'{mediainfo.title} 通过标题匹配到资源:{torrent.site_name} - {torrent.title}')
_match_torrents.append(torrent)
continue
# 在副标题中判断是否存在标题与原语种标题
if torrent.description:
subtitle = torrent.description.split()
if (StringUtils.is_chinese(mediainfo.title)
and str(mediainfo.title) in subtitle) \
or (StringUtils.is_chinese(mediainfo.original_title)
and str(mediainfo.original_title) in subtitle):
logger.info(f'{mediainfo.title} 通过副标题匹配到资源:{torrent.site_name} - {torrent.title}'
f'副标题:{torrent.description}')
_match_torrents.append(torrent)
continue
# 比对别名和译名
for name in mediainfo.names:
if StringUtils.clear_upper(name) == meta_name:
logger.info(f'{mediainfo.title} 匹配到资源:{torrent.site_name} - {torrent.title}')
logger.info(f'{mediainfo.title} 通过别名或译名匹配到资源:{torrent.site_name} - {torrent.title}')
_match_torrents.append(torrent)
break
else:
@@ -230,28 +237,30 @@ class SearchChain(ChainBase):
# 返回
return contexts
def __search_all_sites(self, mediainfo: Optional[MediaInfo] = None,
keyword: str = None,
def __search_all_sites(self, keywords: List[str],
mediainfo: Optional[MediaInfo] = None,
sites: List[int] = None,
page: int = 0) -> Optional[List[TorrentInfo]]:
page: int = 0,
area: str = "title") -> Optional[List[TorrentInfo]]:
"""
多线程搜索多个站点
:param mediainfo: 识别的媒体信息
:param keyword: 搜索关键词,如有按关键词搜索,否则按媒体信息名称搜索
:param keywords: 搜索关键词列表
:param sites: 指定站点ID列表如有则只搜索指定站点否则搜索所有站点
:param page: 搜索页码
:param area: 搜索区域 title or imdbid
:reutrn: 资源列表
"""
# 未开启的站点不搜索
indexer_sites = []
# 配置的索引站点
if sites:
config_indexers = [str(sid) for sid in sites]
else:
config_indexers = [str(sid) for sid in self.systemconfig.get(SystemConfigKey.IndexerSites) or []]
if not sites:
sites = self.systemconfig.get(SystemConfigKey.IndexerSites) or []
for indexer in self.siteshelper.get_indexers():
# 检查站点索引开关
if not config_indexers or str(indexer.get("id")) in config_indexers:
if not sites or indexer.get("id") in sites:
# 站点流控
state, msg = self.siteshelper.check(indexer.get("domain"))
if state:
@@ -261,6 +270,7 @@ class SearchChain(ChainBase):
if not indexer_sites:
logger.warn('未开启任何有效站点,无法搜索资源')
return []
# 开始进度
self.progress.start(ProgressKey.Search)
# 开始计时
@@ -277,8 +287,18 @@ class SearchChain(ChainBase):
executor = ThreadPoolExecutor(max_workers=len(indexer_sites))
all_task = []
for site in indexer_sites:
task = executor.submit(self.search_torrents, mediainfo=mediainfo,
site=site, keyword=keyword, page=page)
if area == "imdbid":
# 搜索IMDBID
task = executor.submit(self.search_torrents, site=site,
keywords=[mediainfo.imdb_id] if mediainfo else None,
mtype=mediainfo.type if mediainfo else None,
page=page)
else:
# 搜索标题
task = executor.submit(self.search_torrents, site=site,
keywords=keywords,
mtype=mediainfo.type if mediainfo else None,
page=page)
all_task.append(task)
# 结果集
results = []
@@ -289,7 +309,7 @@ class SearchChain(ChainBase):
results.extend(result)
logger.info(f"站点搜索进度:{finish_count} / {total_num}")
self.progress.update(value=finish_count / total_num * 100,
text=f"正在搜索{keyword or ''},已完成 {finish_count} / {total_num} 个站点 ...",
text=f"正在搜索{keywords or ''},已完成 {finish_count} / {total_num} 个站点 ...",
key=ProgressKey.Search)
# 计算耗时
end_time = datetime.now()
@@ -302,3 +322,44 @@ class SearchChain(ChainBase):
self.progress.end(ProgressKey.Search)
# 返回
return results
def filter_torrents_by_rule(self,
torrents: List[TorrentInfo],
filter_rule: Dict[str, str] = None
) -> List[TorrentInfo]:
"""
使用过滤规则过滤种子
:param torrents: 种子列表
:param filter_rule: 过滤规则
"""
# 取默认过滤规则
if not filter_rule:
filter_rule = self.systemconfig.get(SystemConfigKey.DefaultFilterRules)
if not filter_rule:
return torrents
# 包含
include = filter_rule.get("include")
# 排除
exclude = filter_rule.get("exclude")
def __filter_torrent(t: TorrentInfo) -> bool:
"""
过滤种子
"""
# 包含
if include:
if not re.search(r"%s" % include,
f"{t.title} {t.description}", re.I):
logger.info(f"{t.title} 不匹配包含规则 {include}")
return False
# 排除
if exclude:
if re.search(r"%s" % exclude,
f"{t.title} {t.description}", re.I):
logger.info(f"{t.title} 匹配排除规则 {exclude}")
return False
return True
# 使用默认过滤规则再次过滤
return list(filter(lambda t: __filter_torrent(t), torrents))

View File

@@ -1,3 +1,4 @@
import re
from typing import Union, Tuple
from sqlalchemy.orm import Session
@@ -28,6 +29,66 @@ class SiteChain(ChainBase):
self.cookiehelper = CookieHelper()
self.message = MessageHelper()
# 特殊站点登录验证
self.special_site_test = {
"zhuque.in": self.__zhuque_test,
# "m-team.io": self.__mteam_test,
}
@staticmethod
def __zhuque_test(site: Site) -> Tuple[bool, str]:
"""
判断站点是否已经登陆zhuique
"""
# 获取token
token = None
res = RequestUtils(
ua=site.ua,
cookies=site.cookie,
proxies=settings.PROXY if site.proxy else None,
timeout=15
).get_res(url=site.url)
if res and res.status_code == 200:
csrf_token = re.search(r'<meta name="x-csrf-token" content="(.+?)">', res.text)
if csrf_token:
token = csrf_token.group(1)
if not token:
return False, "无法获取Token"
# 调用查询用户信息接口
user_res = RequestUtils(
headers={
'X-CSRF-TOKEN': token,
"Content-Type": "application/json; charset=utf-8",
"User-Agent": f"{site.ua}"
},
cookies=site.cookie,
proxies=settings.PROXY if site.proxy else None,
timeout=15
).get_res(url=f"{site.url}api/user/getInfo")
if user_res and user_res.status_code == 200:
user_info = user_res.json()
if user_info and user_info.get("data"):
return True, "连接成功"
return False, "Cookie已失效"
@staticmethod
def __mteam_test(site: Site) -> Tuple[bool, str]:
"""
判断站点是否已经登陆m-team
"""
url = f"{site.url}api/member/profile"
res = RequestUtils(
ua=site.ua,
cookies=site.cookie,
proxies=settings.PROXY if site.proxy else None,
timeout=15
).post_res(url=url)
if res and res.status_code == 200:
user_info = res.json()
if user_info and user_info.get("data"):
return True, "连接成功"
return False, "Cookie已失效"
def test(self, url: str) -> Tuple[bool, str]:
"""
测试站点是否可用
@@ -39,6 +100,12 @@ class SiteChain(ChainBase):
site_info = self.siteoper.get_by_domain(domain)
if not site_info:
return False, f"站点【{url}】不存在"
# 特殊站点测试
if self.special_site_test.get(domain):
return self.special_site_test[domain](site_info)
# 通用站点测试
site_url = site_info.url
site_cookie = site_info.cookie
ua = site_info.ua
@@ -91,7 +158,8 @@ class SiteChain(ChainBase):
if not site_list:
self.post_message(Notification(
channel=channel,
title="没有维护任何站点信息!"))
title="没有维护任何站点信息!",
userid=userid))
title = f"共有 {len(site_list)} 个站点,回复对应指令操作:" \
f"\n- 禁用站点:/site_disable [id]" \
f"\n- 启用站点:/site_enable [id]" \
@@ -221,8 +289,8 @@ class SiteChain(ChainBase):
title=f"站点编号 {site_id} 不存在!", userid=userid))
return
self.post_message(Notification(
channel=channel,
title=f"开始更新【{site_info.name}】Cookie&UA ...", userid=userid))
channel=channel,
title=f"开始更新【{site_info.name}】Cookie&UA ...", userid=userid))
# 用户名
username = args[1]
# 密码

View File

@@ -3,12 +3,12 @@ import re
from datetime import datetime
from typing import Dict, List, Optional, Union, Tuple
from requests import Session
from sqlalchemy.orm import Session
from app.chain import ChainBase
from app.chain.download import DownloadChain
from app.chain.search import SearchChain
from app.core.config import settings
from app.chain.torrents import TorrentsChain
from app.core.context import TorrentInfo, Context, MediaInfo
from app.core.meta import MetaBase
from app.core.metainfo import MetaInfo
@@ -16,11 +16,9 @@ from app.db.models.subscribe import Subscribe
from app.db.subscribe_oper import SubscribeOper
from app.db.systemconfig_oper import SystemConfigOper
from app.helper.message import MessageHelper
from app.helper.sites import SitesHelper
from app.log import logger
from app.schemas import NotExistMediaInfo, Notification
from app.schemas.types import MediaType, SystemConfigKey, MessageChannel, NotificationType
from app.utils.string import StringUtils
class SubscribeChain(ChainBase):
@@ -28,16 +26,14 @@ class SubscribeChain(ChainBase):
订阅管理处理链
"""
_cache_file = "__torrents_cache__"
def __init__(self, db: Session = None):
super().__init__(db)
self.downloadchain = DownloadChain(self._db)
self.searchchain = SearchChain(self._db)
self.subscribeoper = SubscribeOper(self._db)
self.siteshelper = SitesHelper()
self.torrentschain = TorrentsChain()
self.message = MessageHelper()
self.systemconfig = SystemConfigOper(self._db)
self.systemconfig = SystemConfigOper()
def add(self, title: str, year: str,
mtype: MediaType = None,
@@ -107,13 +103,13 @@ class SubscribeChain(ChainBase):
# 发回原用户
self.post_message(Notification(channel=channel,
mtype=NotificationType.Subscribe,
title=f"{mediainfo.title_year}{metainfo.season} "
title=f"{mediainfo.title_year} {metainfo.season} "
f"添加订阅失败!",
text=f"{err_msg}",
image=mediainfo.get_message_image(),
userid=userid))
elif message:
logger.info(f'{mediainfo.title_year}{metainfo.season} 添加订阅成功')
logger.info(f'{mediainfo.title_year} {metainfo.season} 添加订阅成功')
if username or userid:
text = f"评分:{mediainfo.vote_average},来自用户:{username or userid}"
else:
@@ -121,7 +117,7 @@ class SubscribeChain(ChainBase):
# 广而告之
self.post_message(Notification(channel=channel,
mtype=NotificationType.Subscribe,
title=f"{mediainfo.title_year}{metainfo.season} 已添加订阅",
title=f"{mediainfo.title_year} {metainfo.season} 已添加订阅",
text=text,
image=mediainfo.get_message_image()))
# 返回结果
@@ -136,45 +132,6 @@ class SubscribeChain(ChainBase):
return True
return False
def remote_refresh(self, channel: MessageChannel, userid: Union[str, int] = None):
"""
远程刷新订阅,发送消息
"""
self.post_message(Notification(channel=channel,
title=f"开始刷新订阅 ...", userid=userid))
self.refresh()
self.post_message(Notification(channel=channel,
title=f"订阅刷新完成!", userid=userid))
def remote_search(self, arg_str: str, channel: MessageChannel, userid: Union[str, int] = None):
"""
远程搜索订阅,发送消息
"""
if arg_str and not str(arg_str).isdigit():
self.post_message(Notification(channel=channel,
title="请输入正确的命令格式:/subscribe_search [id]"
"[id]为订阅编号,不输入订阅编号时搜索所有订阅", userid=userid))
return
if arg_str:
sid = int(arg_str)
subscribe = self.subscribeoper.get(sid)
if not subscribe:
self.post_message(Notification(channel=channel,
title=f"订阅编号 {sid} 不存在!", userid=userid))
return
self.post_message(Notification(channel=channel,
title=f"开始搜索 {subscribe.name} ...", userid=userid))
# 搜索订阅
self.search(sid=int(arg_str))
self.post_message(Notification(channel=channel,
title=f"{subscribe.name} 搜索完成!", userid=userid))
else:
self.post_message(Notification(channel=channel,
title=f"开始搜索所有订阅 ...", userid=userid))
self.search(state='R')
self.post_message(Notification(channel=channel,
title=f"订阅搜索完成!", userid=userid))
def search(self, sid: int = None, state: str = 'N', manual: bool = False):
"""
订阅搜索
@@ -189,6 +146,13 @@ class SubscribeChain(ChainBase):
subscribes = self.subscribeoper.list(state)
# 遍历订阅
for subscribe in subscribes:
# 校验当前时间减订阅创建时间是否大于1分钟否则跳过先留出编辑订阅的时间
if subscribe.date:
now = datetime.now()
subscribe_time = datetime.strptime(subscribe.date, '%Y-%m-%d %H:%M:%S')
if (now - subscribe_time).total_seconds() < 60:
logger.debug(f"订阅标题:{subscribe.name} 新增小于1分钟暂不搜索...")
continue
logger.info(f'开始搜索订阅,标题:{subscribe.name} ...')
# 如果状态为N则更新为R
if subscribe.state == 'N':
@@ -206,14 +170,24 @@ class SubscribeChain(ChainBase):
# 非洗版状态
if not subscribe.best_version:
# 每季总集数
totals = {}
if subscribe.season and subscribe.total_episode:
totals = {
subscribe.season: subscribe.total_episode
}
# 查询缺失的媒体信息
exist_flag, no_exists = self.downloadchain.get_no_exists_info(meta=meta, mediainfo=mediainfo)
exist_flag, no_exists = self.downloadchain.get_no_exists_info(
meta=meta,
mediainfo=mediainfo,
totals=totals
)
if exist_flag:
logger.info(f'{mediainfo.title_year} 媒体库中已存在,完成订阅')
self.subscribeoper.delete(subscribe.id)
# 发送通知
self.post_message(Notification(mtype=NotificationType.Subscribe,
title=f'{mediainfo.title_year}{meta.season} 已完成订阅',
title=f'{mediainfo.title_year} {meta.season} 已完成订阅',
image=mediainfo.get_message_image()))
continue
# 电视剧订阅
@@ -231,7 +205,7 @@ class SubscribeChain(ChainBase):
if no_exists and no_exists.get(subscribe.tmdbid):
no_exists_info = no_exists.get(subscribe.tmdbid).get(subscribe.season)
if no_exists_info:
logger.info(f'订阅 {mediainfo.title_year}{meta.season} 缺失集:{no_exists_info.episodes}')
logger.info(f'订阅 {mediainfo.title_year} {meta.season} 缺失集:{no_exists_info.episodes}')
else:
# 洗版状态
if meta.type == MediaType.TV:
@@ -249,22 +223,31 @@ class SubscribeChain(ChainBase):
sites = json.loads(subscribe.sites)
else:
sites = None
# 过滤规则
# 优先级过滤规则
if subscribe.best_version:
filter_rule = self.systemconfig.get(SystemConfigKey.FilterRules2)
priority_rule = self.systemconfig.get(SystemConfigKey.BestVersionFilterRules)
else:
filter_rule = self.systemconfig.get(SystemConfigKey.FilterRules)
priority_rule = self.systemconfig.get(SystemConfigKey.SubscribeFilterRules)
# 默认过滤规则
if subscribe.include or subscribe.exclude:
filter_rule = {
"include": subscribe.include,
"exclude": subscribe.exclude
}
else:
filter_rule = self.systemconfig.get(SystemConfigKey.DefaultFilterRules)
# 搜索,同时电视剧会过滤掉不需要的剧集
contexts = self.searchchain.process(mediainfo=mediainfo,
keyword=subscribe.keyword,
no_exists=no_exists,
sites=sites,
priority_rule=priority_rule,
filter_rule=filter_rule)
if not contexts:
logger.warn(f'订阅 {subscribe.keyword or subscribe.name} 未搜索到资源')
if meta.type == MediaType.TV:
# 未搜索到资源,但本地缺失可能有变化,更新订阅剩余集数
self.__upate_lack_episodes(lefts=no_exists, subscribe=subscribe, mediainfo=mediainfo)
self.__update_lack_episodes(lefts=no_exists, subscribe=subscribe, mediainfo=mediainfo)
continue
# 过滤
matched_contexts = []
@@ -272,22 +255,12 @@ class SubscribeChain(ChainBase):
torrent_meta = context.meta_info
torrent_info = context.torrent_info
torrent_mediainfo = context.media_info
# 包含
if subscribe.include:
if not re.search(r"%s" % subscribe.include,
f"{torrent_info.title} {torrent_info.description}", re.I):
continue
# 排除
if subscribe.exclude:
if re.search(r"%s" % subscribe.exclude,
f"{torrent_info.title} {torrent_info.description}", re.I):
continue
# 非洗版
if not subscribe.best_version:
# 如果是电视剧过滤掉已经下载的集数
if torrent_mediainfo.type == MediaType.TV:
if self.__check_subscribe_note(subscribe, torrent_meta.episode_list):
logger.info(f'{torrent_info.title} 对应剧集 {torrent_meta.episodes} 已下载过')
logger.info(f'{torrent_info.title} 对应剧集 {torrent_meta.episode_list} 已下载过')
continue
else:
# 洗版时,非整季不要
@@ -295,12 +268,17 @@ class SubscribeChain(ChainBase):
if torrent_meta.episode_list:
logger.info(f'{subscribe.name} 正在洗版,{torrent_info.title} 不是整季')
continue
# 优先级小于已下载优先级的不要
if subscribe.current_priority \
and torrent_info.pri_order < subscribe.current_priority:
logger.info(f'{subscribe.name} 正在洗版,{torrent_info.title} 优先级低于已下载优先级')
continue
matched_contexts.append(context)
if not matched_contexts:
logger.warn(f'订阅 {subscribe.name} 没有符合过滤条件的资源')
# 非洗版未搜索到资源,但本地缺失可能有变化,更新订阅剩余集数
if meta.type == MediaType.TV and not subscribe.best_version:
self.__upate_lack_episodes(lefts=no_exists, subscribe=subscribe, mediainfo=mediainfo)
self.__update_lack_episodes(lefts=no_exists, subscribe=subscribe, mediainfo=mediainfo)
continue
# 自动下载
downloads, lefts = self.downloadchain.batch_download(contexts=matched_contexts,
@@ -317,18 +295,18 @@ class SubscribeChain(ChainBase):
mediainfo=mediainfo, downloads=downloads)
else:
# 未完成下载
logger.info(f'{mediainfo.title_year} 未下载完整,继续订阅 ...')
logger.info(f'{mediainfo.title_year} 未下载完整,继续订阅 ...')
if meta.type == MediaType.TV and not subscribe.best_version:
# 更新订阅剩余集数和时间
update_date = True if downloads else False
self.__upate_lack_episodes(lefts=lefts, subscribe=subscribe,
mediainfo=mediainfo, update_date=update_date)
self.__update_lack_episodes(lefts=lefts, subscribe=subscribe,
mediainfo=mediainfo, update_date=update_date)
# 手动触发时发送系统消息
if manual:
if sid:
self.message.put(f'订阅 {subscribes[0].name} 搜索完成!')
else:
self.message.put(f'所有订阅搜索完成!')
self.message.put('所有订阅搜索完成!')
def finish_subscribe_or_not(self, subscribe: Subscribe, meta: MetaInfo,
mediainfo: MediaInfo, downloads: List[Context]):
@@ -341,7 +319,7 @@ class SubscribeChain(ChainBase):
self.subscribeoper.delete(subscribe.id)
# 发送通知
self.post_message(Notification(mtype=NotificationType.Subscribe,
title=f'{mediainfo.title_year}{meta.season} 已完成订阅',
title=f'{mediainfo.title_year} {meta.season} 已完成订阅',
image=mediainfo.get_message_image()))
else:
# 当前下载资源的优先级
@@ -351,7 +329,7 @@ class SubscribeChain(ChainBase):
self.subscribeoper.delete(subscribe.id)
# 发送通知
self.post_message(Notification(mtype=NotificationType.Subscribe,
title=f'{mediainfo.title_year}{meta.season} 已洗版完成',
title=f'{mediainfo.title_year} {meta.season} 已洗版完成',
image=mediainfo.get_message_image()))
else:
# 正在洗版,更新资源优先级
@@ -362,73 +340,40 @@ class SubscribeChain(ChainBase):
def refresh(self):
"""
刷新站点最新资源
订阅刷新
"""
# 所有订阅
# 触发刷新站点资源,从缓存中匹配订阅
sites = self.get_subscribed_sites()
if sites is None:
return
self.match(
self.torrentschain.refresh(sites=sites)
)
def get_subscribed_sites(self) -> Optional[List[int]]:
"""
获取订阅中涉及的所有站点清单(节约资源)
:return: 返回[]代表所有站点命中返回None代表没有订阅
"""
# 查询所有订阅
subscribes = self.subscribeoper.list('R')
if not subscribes:
# 没有订阅不运行
return
# 读取缓存
torrents_cache: Dict[str, List[Context]] = self.load_cache(self._cache_file) or {}
return None
ret_sites = []
# 刷新订阅选中的Rss站点
for subscribe in subscribes:
# 如果有一个订阅没有选择站点,则刷新所有订阅站点
if not subscribe.sites:
return []
# 刷新选中的站点
sub_sites = json.loads(subscribe.sites)
if sub_sites:
ret_sites.extend(sub_sites)
# 去重
if ret_sites:
ret_sites = list(set(ret_sites))
# 所有站点索引
indexers = self.siteshelper.get_indexers()
# 配置的索引站点
config_indexers = [str(sid) for sid in self.systemconfig.get(SystemConfigKey.IndexerSites) or []]
# 遍历站点缓存资源
for indexer in indexers:
# 未开启的站点不搜索
if config_indexers and str(indexer.get("id")) not in config_indexers:
continue
logger.info(f'开始刷新 {indexer.get("name")} 最新种子 ...')
domain = StringUtils.get_url_domain(indexer.get("domain"))
torrents: List[TorrentInfo] = self.refresh_torrents(site=indexer)
# 按pubdate降序排列
torrents.sort(key=lambda x: x.pubdate or '', reverse=True)
# 取前N条
torrents = torrents[:settings.CACHE_CONF.get('refresh')]
if torrents:
# 过滤出没有处理过的种子
torrents = [torrent for torrent in torrents
if f'{torrent.title}{torrent.description}'
not in [f'{t.torrent_info.title}{t.torrent_info.description}'
for t in torrents_cache.get(domain) or []]]
if torrents:
logger.info(f'{indexer.get("name")}{len(torrents)} 个新种子')
else:
logger.info(f'{indexer.get("name")} 没有新种子')
continue
for torrent in torrents:
logger.info(f'处理资源:{torrent.title} ...')
# 识别
meta = MetaInfo(title=torrent.title, subtitle=torrent.description)
# 识别媒体信息
mediainfo: MediaInfo = self.recognize_media(meta=meta)
if not mediainfo:
logger.warn(f'未识别到媒体信息,标题:{torrent.title}')
# 存储空的媒体信息
mediainfo = MediaInfo()
# 清理多余数据
mediainfo.clear()
# 上下文
context = Context(meta_info=meta, media_info=mediainfo, torrent_info=torrent)
# 添加到缓存
if not torrents_cache.get(domain):
torrents_cache[domain] = [context]
else:
torrents_cache[domain].append(context)
# 如果超过了限制条数则移除掉前面的
if len(torrents_cache[domain]) > settings.CACHE_CONF.get('torrents'):
torrents_cache[domain] = torrents_cache[domain][-settings.CACHE_CONF.get('torrents'):]
# 回收资源
del torrents
else:
logger.info(f'{indexer.get("name")} 获取到种子')
# 从缓存中匹配订阅
self.match(torrents_cache)
# 保存缓存到本地
self.save_cache(torrents_cache, self._cache_file)
return ret_sites
def match(self, torrents: Dict[str, List[Context]]):
"""
@@ -454,14 +399,24 @@ class SubscribeChain(ChainBase):
continue
# 非洗版
if not subscribe.best_version:
# 每季总集数
totals = {}
if subscribe.season and subscribe.total_episode:
totals = {
subscribe.season: subscribe.total_episode
}
# 查询缺失的媒体信息
exist_flag, no_exists = self.downloadchain.get_no_exists_info(meta=meta, mediainfo=mediainfo)
exist_flag, no_exists = self.downloadchain.get_no_exists_info(
meta=meta,
mediainfo=mediainfo,
totals=totals
)
if exist_flag:
logger.info(f'{mediainfo.title_year} 媒体库中已存在,完成订阅')
self.subscribeoper.delete(subscribe.id)
# 发送通知
self.post_message(Notification(mtype=NotificationType.Subscribe,
title=f'{mediainfo.title_year}{meta.season} 已完成订阅',
title=f'{mediainfo.title_year} {meta.season} 已完成订阅',
image=mediainfo.get_message_image()))
continue
# 电视剧订阅
@@ -479,7 +434,7 @@ class SubscribeChain(ChainBase):
if no_exists and no_exists.get(subscribe.tmdbid):
no_exists_info = no_exists.get(subscribe.tmdbid).get(subscribe.season)
if no_exists_info:
logger.info(f'订阅 {mediainfo.title_year}{meta.season} 缺失集:{no_exists_info.episodes}')
logger.info(f'订阅 {mediainfo.title_year} {meta.season} 缺失集:{no_exists_info.episodes}')
else:
# 洗版
if meta.type == MediaType.TV:
@@ -492,6 +447,10 @@ class SubscribeChain(ChainBase):
}
else:
no_exists = {}
# 默认过滤规则
default_filter = self.systemconfig.get(SystemConfigKey.DefaultFilterRules) or {}
include = subscribe.include or default_filter.get("include")
exclude = subscribe.exclude or default_filter.get("exclude")
# 遍历缓存种子
_match_context = []
for domain, contexts in torrents.items():
@@ -504,21 +463,24 @@ class SubscribeChain(ChainBase):
if torrent_mediainfo.tmdb_id != mediainfo.tmdb_id \
or torrent_mediainfo.type != mediainfo.type:
continue
# 过滤规则
# 优先级过滤规则
if subscribe.best_version:
filter_rule = self.systemconfig.get(SystemConfigKey.FilterRules2)
filter_rule = self.systemconfig.get(SystemConfigKey.BestVersionFilterRules)
else:
filter_rule = self.systemconfig.get(SystemConfigKey.FilterRules)
filter_rule = self.systemconfig.get(SystemConfigKey.SubscribeFilterRules)
result: List[TorrentInfo] = self.filter_torrents(
rule_string=filter_rule,
torrent_list=[torrent_info])
torrent_list=[torrent_info],
mediainfo=torrent_mediainfo)
if result is not None and not result:
# 不符合过滤规则
logger.info(f"{torrent_info.title} 不匹配当前过滤规则")
continue
# 不在订阅站点范围的不处理
if subscribe.sites:
sub_sites = json.loads(subscribe.sites)
if sub_sites and torrent_info.site not in sub_sites:
logger.info(f"{torrent_info.title} 不符合 {torrent_mediainfo.title_year} 订阅站点要求")
continue
# 如果是电视剧
if torrent_mediainfo.type == MediaType.TV:
@@ -548,11 +510,11 @@ class SubscribeChain(ChainBase):
set(torrent_meta.episode_list)
):
logger.info(
f'{torrent_info.title} 对应剧集 {torrent_meta.episodes} 未包含缺失的剧集')
f'{torrent_info.title} 对应剧集 {torrent_meta.episode_list} 未包含缺失的剧集')
continue
# 过滤掉已经下载的集数
if self.__check_subscribe_note(subscribe, torrent_meta.episode_list):
logger.info(f'{torrent_info.title} 对应剧集 {torrent_meta.episodes} 已下载过')
logger.info(f'{torrent_info.title} 对应剧集 {torrent_meta.episode_list} 已下载过')
continue
else:
# 洗版时,非整季不要
@@ -561,14 +523,16 @@ class SubscribeChain(ChainBase):
logger.info(f'{subscribe.name} 正在洗版,{torrent_info.title} 不是整季')
continue
# 包含
if subscribe.include:
if not re.search(r"%s" % subscribe.include,
if include:
if not re.search(r"%s" % include,
f"{torrent_info.title} {torrent_info.description}", re.I):
logger.info(f"{torrent_info.title} 不匹配包含规则 {include}")
continue
# 排除
if subscribe.exclude:
if re.search(r"%s" % subscribe.exclude,
if exclude:
if re.search(r"%s" % exclude,
f"{torrent_info.title} {torrent_info.description}", re.I):
logger.info(f"{torrent_info.title} 匹配排除规则 {exclude}")
continue
# 匹配成功
logger.info(f'{mediainfo.title_year} 匹配成功:{torrent_info.title}')
@@ -590,12 +554,59 @@ class SubscribeChain(ChainBase):
if meta.type == MediaType.TV and not subscribe.best_version:
update_date = True if downloads else False
# 未完成下载,计算剩余集数
self.__upate_lack_episodes(lefts=lefts, subscribe=subscribe,
mediainfo=mediainfo, update_date=update_date)
self.__update_lack_episodes(lefts=lefts, subscribe=subscribe,
mediainfo=mediainfo, update_date=update_date)
else:
if meta.type == MediaType.TV:
# 未搜索到资源,但本地缺失可能有变化,更新订阅剩余集数
self.__upate_lack_episodes(lefts=no_exists, subscribe=subscribe, mediainfo=mediainfo)
self.__update_lack_episodes(lefts=no_exists, subscribe=subscribe, mediainfo=mediainfo)
def check(self):
"""
定时检查订阅,更新订阅信息
"""
# 查询所有订阅
subscribes = self.subscribeoper.list()
if not subscribes:
# 没有订阅不运行
return
# 遍历订阅
for subscribe in subscribes:
logger.info(f'开始检查订阅:{subscribe.name} ...')
# 生成元数据
meta = MetaInfo(subscribe.name)
meta.year = subscribe.year
meta.begin_season = subscribe.season or None
meta.type = MediaType(subscribe.type)
# 识别媒体信息
mediainfo: MediaInfo = self.recognize_media(meta=meta, mtype=meta.type, tmdbid=subscribe.tmdbid)
if not mediainfo:
logger.warn(f'未识别到媒体信息,标题:{subscribe.name}tmdbid{subscribe.tmdbid}')
continue
# 对于电视剧,获取当前季的总集数
episodes = mediainfo.seasons.get(subscribe.season) or []
if len(episodes) > (subscribe.total_episode or 0):
total_episode = len(episodes)
lack_episode = subscribe.lack_episode + (total_episode - subscribe.total_episode)
logger.info(
f'订阅 {subscribe.name} 总集数变化,更新总集数为{total_episode},缺失集数为{lack_episode} ...')
else:
total_episode = subscribe.total_episode
lack_episode = subscribe.lack_episode
# 更新TMDB信息
self.subscribeoper.update(subscribe.id, {
"name": mediainfo.title,
"year": mediainfo.year,
"vote": mediainfo.vote_average,
"poster": mediainfo.get_poster_image(),
"backdrop": mediainfo.get_backdrop_image(),
"description": mediainfo.overview,
"imdbid": mediainfo.imdb_id,
"tvdbid": mediainfo.tvdb_id,
"total_episode": total_episode,
"lack_episode": lack_episode
})
logger.info(f'订阅 {subscribe.name} 更新完成')
def __update_subscribe_note(self, subscribe: Subscribe, downloads: List[Context]):
"""
@@ -638,10 +649,10 @@ class SubscribeChain(ChainBase):
return True
return False
def __upate_lack_episodes(self, lefts: Dict[int, Dict[int, NotExistMediaInfo]],
subscribe: Subscribe,
mediainfo: MediaInfo,
update_date: bool = False):
def __update_lack_episodes(self, lefts: Dict[int, Dict[int, NotExistMediaInfo]],
subscribe: Subscribe,
mediainfo: MediaInfo,
update_date: bool = False):
"""
更新订阅剩余集数
"""
@@ -650,16 +661,20 @@ class SubscribeChain(ChainBase):
season = season_info.season
if season == subscribe.season:
left_episodes = season_info.episodes
logger.info(f'{mediainfo.title_year}{season} 更新缺失集数为{len(left_episodes)} ...')
if not left_episodes:
lack_episode = season_info.total_episode
else:
lack_episode = len(left_episodes)
logger.info(f'{mediainfo.title_year}{season} 更新缺失集数为{lack_episode} ...')
if update_date:
# 同时更新最后时间
self.subscribeoper.update(subscribe.id, {
"lack_episode": len(left_episodes),
"lack_episode": lack_episode,
"last_update": datetime.now().strftime('%Y-%m-%d %H:%M:%S')
})
else:
self.subscribeoper.update(subscribe.id, {
"lack_episode": len(left_episodes)
"lack_episode": lack_episode
})
def remote_list(self, channel: MessageChannel, userid: Union[str, int] = None):
@@ -722,40 +737,50 @@ class SubscribeChain(ChainBase):
total_episode: int,
start_episode: int):
"""
根据订阅开始集数和总结合TMDB信息计算当前订阅的缺失集数
根据订阅开始集数和总结合TMDB信息计算当前订阅的缺失集数
:param no_exists: 缺失季集列表
:param tmdb_id: TMDB ID
:param begin_season: 开始季
:param total_episode: 总集数
:param start_episode: 开始集数
:param total_episode: 订阅设定总集数
:param start_episode: 订阅设定开始集数
"""
# 使用订阅的总集数和开始集数替换no_exists
if no_exists \
and no_exists.get(tmdb_id) \
and (total_episode or start_episode):
# 该季原缺失信息
no_exist_season = no_exists.get(tmdb_id).get(begin_season)
if no_exist_season:
# 原集列表
# 原集列表
episode_list = no_exist_season.episodes
# 原总集数
total = no_exist_season.total_episode
if total_episode and start_episode:
# 有开始集和总集数
episodes = list(range(start_episode, total_episode + 1))
elif not start_episode:
# 有总集数没有开始集
episodes = list(range(min(episode_list or [1]), total_episode + 1))
start_episode = min(episode_list or [1])
elif not total_episode:
# 有开始集没有总集数
episodes = list(range(start_episode, max(episode_list or [total]) + 1))
total_episode = max(episode_list or [total])
# 原开始集数
start = no_exist_season.start_episode
# 更新剧集列表、开始集数、总集数
if not episode_list:
# 整季缺失
episodes = []
start_episode = start_episode or start
total_episode = total_episode or total
else:
return no_exists
# 与原有集取交集
if episode_list:
episodes = list(set(episodes).intersection(set(episode_list)))
# 处理集合
# 部分缺失
if not start_episode \
and not total_episode:
# 无需调整
return no_exists
if not start_episode:
# 没有自定义开始集
start_episode = start
if not total_episode:
# 没有自定义总集数
total_episode = total
# 新的集列表
new_episodes = list(range(max(start_episode, start), total_episode + 1))
# 与原集列表取交集
episodes = list(set(episode_list).intersection(set(new_episodes)))
# 更新集合
no_exists[tmdb_id][begin_season] = NotExistMediaInfo(
season=begin_season,
episodes=episodes,

17
app/chain/system.py Normal file
View File

@@ -0,0 +1,17 @@
from typing import Union
from app.chain import ChainBase
from app.schemas import Notification, MessageChannel
class SystemChain(ChainBase):
"""
系统级处理链
"""
def remote_clear_cache(self, channel: MessageChannel, userid: Union[int, str]):
"""
清理系统缓存
"""
self.clear_cache()
self.post_message(Notification(channel=channel,
title=f"缓存清理完成!", userid=userid))

View File

@@ -1,11 +1,16 @@
import random
from typing import Optional, List
from cachetools import cached, TTLCache
from app import schemas
from app.chain import ChainBase
from app.core.config import settings
from app.schemas import MediaType
from app.utils.singleton import Singleton
class TmdbChain(ChainBase):
class TmdbChain(ChainBase, metaclass=Singleton):
"""
TheMovieDB处理链
"""
@@ -106,3 +111,17 @@ class TmdbChain(ChainBase):
:param page: 页码
"""
return self.run_module("person_credits", person_id=person_id, page=page)
@cached(cache=TTLCache(maxsize=1, ttl=3600))
def get_random_wallpager(self):
"""
获取随机壁纸缓存1个小时
"""
infos = self.tmdb_trending()
if infos:
# 随机一个电影
while True:
info = random.choice(infos)
if info and info.get("backdrop_path"):
return f"https://{settings.TMDB_IMAGE_DOMAIN}/t/p/original{info.get('backdrop_path')}"
return None

234
app/chain/torrents.py Normal file
View File

@@ -0,0 +1,234 @@
import re
from typing import Dict, List, Union
from cachetools import cached, TTLCache
from app.chain import ChainBase
from app.core.config import settings
from app.core.context import TorrentInfo, Context, MediaInfo
from app.core.metainfo import MetaInfo
from app.db import SessionFactory
from app.db.site_oper import SiteOper
from app.db.systemconfig_oper import SystemConfigOper
from app.helper.rss import RssHelper
from app.helper.sites import SitesHelper
from app.log import logger
from app.schemas import Notification
from app.schemas.types import SystemConfigKey, MessageChannel, NotificationType
from app.utils.singleton import Singleton
from app.utils.string import StringUtils
class TorrentsChain(ChainBase, metaclass=Singleton):
"""
站点首页或RSS种子处理链服务于订阅、刷流等
"""
_spider_file = "__torrents_cache__"
_rss_file = "__rss_cache__"
def __init__(self):
self._db = SessionFactory()
super().__init__(self._db)
self.siteshelper = SitesHelper()
self.siteoper = SiteOper(self._db)
self.rsshelper = RssHelper()
self.systemconfig = SystemConfigOper()
def remote_refresh(self, channel: MessageChannel, userid: Union[str, int] = None):
"""
远程刷新订阅,发送消息
"""
self.post_message(Notification(channel=channel,
title=f"开始刷新种子 ...", userid=userid))
self.refresh()
self.post_message(Notification(channel=channel,
title=f"种子刷新完成!", userid=userid))
def get_torrents(self, stype: str = None) -> Dict[str, List[Context]]:
"""
获取当前缓存的种子
:param stype: 强制指定缓存类型spider:爬虫缓存rss:rss缓存
"""
if not stype:
stype = settings.SUBSCRIBE_MODE
# 读取缓存
if stype == 'spider':
return self.load_cache(self._spider_file) or {}
else:
return self.load_cache(self._rss_file) or {}
@cached(cache=TTLCache(maxsize=128 if settings.BIG_MEMORY_MODE else 1, ttl=600))
def browse(self, domain: str) -> List[TorrentInfo]:
"""
浏览站点首页内容返回种子清单TTL缓存10分钟
:param domain: 站点域名
"""
logger.info(f'开始获取站点 {domain} 最新种子 ...')
site = self.siteshelper.get_indexer(domain)
if not site:
logger.error(f'站点 {domain} 不存在!')
return []
return self.refresh_torrents(site=site)
@cached(cache=TTLCache(maxsize=128 if settings.BIG_MEMORY_MODE else 1, ttl=300))
def rss(self, domain: str) -> List[TorrentInfo]:
"""
获取站点RSS内容返回种子清单TTL缓存5分钟
:param domain: 站点域名
"""
logger.info(f'开始获取站点 {domain} RSS ...')
site = self.siteshelper.get_indexer(domain)
if not site:
logger.error(f'站点 {domain} 不存在!')
return []
if not site.get("rss"):
logger.error(f'站点 {domain} 未配置RSS地址')
return []
rss_items = self.rsshelper.parse(site.get("rss"), True if site.get("proxy") else False)
if rss_items is None:
# rss过期尝试保留原配置生成新的rss
self.__renew_rss_url(domain=domain, site=site)
return []
if not rss_items:
logger.error(f'站点 {domain} 未获取到RSS数据')
return []
# 组装种子
ret_torrents: List[TorrentInfo] = []
for item in rss_items:
if not item.get("title"):
continue
torrentinfo = TorrentInfo(
site=site.get("id"),
site_name=site.get("name"),
site_cookie=site.get("cookie"),
site_ua=site.get("ua") or settings.USER_AGENT,
site_proxy=site.get("proxy"),
site_order=site.get("pri"),
title=item.get("title"),
enclosure=item.get("enclosure"),
page_url=item.get("link"),
size=item.get("size"),
pubdate=item["pubdate"].strftime("%Y-%m-%d %H:%M:%S") if item.get("pubdate") else None,
)
ret_torrents.append(torrentinfo)
return ret_torrents
def refresh(self, stype: str = None, sites: List[int] = None) -> Dict[str, List[Context]]:
"""
刷新站点最新资源,识别并缓存起来
:param stype: 强制指定缓存类型spider:爬虫缓存rss:rss缓存
:param sites: 强制指定站点ID列表为空则读取设置的订阅站点
"""
# 刷新类型
if not stype:
stype = settings.SUBSCRIBE_MODE
# 刷新站点
if not sites:
sites = self.systemconfig.get(SystemConfigKey.RssSites) or []
# 读取缓存
torrents_cache = self.get_torrents()
# 所有站点索引
indexers = self.siteshelper.get_indexers()
# 遍历站点缓存资源
for indexer in indexers:
# 未开启的站点不刷新
if sites and indexer.get("id") not in sites:
continue
domain = StringUtils.get_url_domain(indexer.get("domain"))
if stype == "spider":
# 刷新首页种子
torrents: List[TorrentInfo] = self.browse(domain=domain)
else:
# 刷新RSS种子
torrents: List[TorrentInfo] = self.rss(domain=domain)
# 按pubdate降序排列
torrents.sort(key=lambda x: x.pubdate or '', reverse=True)
# 取前N条
torrents = torrents[:settings.CACHE_CONF.get('refresh')]
if torrents:
# 过滤出没有处理过的种子
torrents = [torrent for torrent in torrents
if f'{torrent.title}{torrent.description}'
not in [f'{t.torrent_info.title}{t.torrent_info.description}'
for t in torrents_cache.get(domain) or []]]
if torrents:
logger.info(f'{indexer.get("name")}{len(torrents)} 个新种子')
else:
logger.info(f'{indexer.get("name")} 没有新种子')
continue
for torrent in torrents:
logger.info(f'处理资源:{torrent.title} ...')
# 识别
meta = MetaInfo(title=torrent.title, subtitle=torrent.description)
# 识别媒体信息
mediainfo: MediaInfo = self.recognize_media(meta=meta)
if not mediainfo:
logger.warn(f'未识别到媒体信息,标题:{torrent.title}')
# 存储空的媒体信息
mediainfo = MediaInfo()
# 清理多余数据
mediainfo.clear()
# 上下文
context = Context(meta_info=meta, media_info=mediainfo, torrent_info=torrent)
# 添加到缓存
if not torrents_cache.get(domain):
torrents_cache[domain] = [context]
else:
torrents_cache[domain].append(context)
# 如果超过了限制条数则移除掉前面的
if len(torrents_cache[domain]) > settings.CACHE_CONF.get('torrents'):
torrents_cache[domain] = torrents_cache[domain][-settings.CACHE_CONF.get('torrents'):]
# 回收资源
del torrents
else:
logger.info(f'{indexer.get("name")} 没有获取到种子')
# 保存缓存到本地
if stype == "spider":
self.save_cache(torrents_cache, self._spider_file)
else:
self.save_cache(torrents_cache, self._rss_file)
# 返回
return torrents_cache
def __renew_rss_url(self, domain: str, site: dict):
"""
保留原配置生成新的rss地址
"""
try:
# RSS链接过期
logger.error(f"站点 {domain} RSS链接已过期正在尝试自动获取")
# 自动生成rss地址
rss_url, errmsg = self.rsshelper.get_rss_link(
url=site.get("url"),
cookie=site.get("cookie"),
ua=site.get("ua") or settings.USER_AGENT,
proxy=True if site.get("proxy") else False
)
if rss_url:
# 获取新的日期的passkey
match = re.search(r'passkey=([a-zA-Z0-9]+)', rss_url)
if match:
new_passkey = match.group(1)
# 获取过期rss除去passkey部分
new_rss = re.sub(r'&passkey=([a-zA-Z0-9]+)', f'&passkey={new_passkey}', site.get("rss"))
logger.info(f"更新站点 {domain} RSS地址 ...")
self.siteoper.update_rss(domain=domain, rss=new_rss)
else:
# 发送消息
self.post_message(
Notification(mtype=NotificationType.SiteMessage, title=f"站点 {domain} RSS链接已过期"))
else:
self.post_message(
Notification(mtype=NotificationType.SiteMessage, title=f"站点 {domain} RSS链接已过期"))
except Exception as e:
print(str(e))
self.post_message(Notification(mtype=NotificationType.SiteMessage, title=f"站点 {domain} RSS链接已过期"))

View File

@@ -1,24 +1,30 @@
import json
import glob
import re
import shutil
import threading
from pathlib import Path
from typing import List, Optional, Tuple, Union
from typing import List, Optional, Tuple, Union, Dict
from sqlalchemy.orm import Session
from app.chain import ChainBase
from app.chain.media import MediaChain
from app.chain.tmdb import TmdbChain
from app.core.config import settings
from app.core.context import MediaInfo
from app.core.meta import MetaBase
from app.core.metainfo import MetaInfo
from app.core.metainfo import MetaInfoPath
from app.db.downloadhistory_oper import DownloadHistoryOper
from app.db.models.downloadhistory import DownloadHistory
from app.db.models.transferhistory import TransferHistory
from app.db.systemconfig_oper import SystemConfigOper
from app.db.transferhistory_oper import TransferHistoryOper
from app.helper.format import FormatParser
from app.helper.progress import ProgressHelper
from app.log import logger
from app.schemas import TransferInfo, TransferTorrent, Notification
from app.schemas.types import TorrentStatus, EventType, MediaType, ProgressKey, NotificationType, MessageChannel
from app.schemas import TransferInfo, TransferTorrent, Notification, EpisodeFormat
from app.schemas.types import TorrentStatus, EventType, MediaType, ProgressKey, NotificationType, MessageChannel, \
SystemConfigKey
from app.utils.string import StringUtils
from app.utils.system import SystemUtils
@@ -35,6 +41,9 @@ class TransferChain(ChainBase):
self.downloadhis = DownloadHistoryOper(self._db)
self.transferhis = TransferHistoryOper(self._db)
self.progress = ProgressHelper()
self.mediachain = MediaChain(self._db)
self.tmdbchain = TmdbChain(self._db)
self.systemconfig = SystemConfigOper()
def process(self) -> bool:
"""
@@ -51,128 +60,397 @@ class TransferChain(ChainBase):
return False
logger.info(f"获取到 {len(torrents)} 个已完成的下载任务")
# 开始进度
self.progress.start(ProgressKey.FileTransfer)
# 总数
total_num = len(torrents)
# 已处理数量
processed_num = 0
self.progress.update(value=0,
text=f"开始转移下载任务文件,共 {total_num} 个任务 ...",
key=ProgressKey.FileTransfer)
for torrent in torrents:
# 更新进度
self.progress.update(value=processed_num / total_num * 100,
text=f"正在转移 {torrent.title} ...",
key=ProgressKey.FileTransfer)
# 识别元数据
meta: MetaBase = MetaInfo(title=torrent.title)
if not meta.name:
logger.error(f'未识别到元数据,标题:{torrent.title}')
continue
for torrent in torrents:
# 查询下载记录识别情况
downloadhis: DownloadHistory = self.downloadhis.get_by_hash(torrent.hash)
if downloadhis:
# 类型
mtype = MediaType(downloadhis.type)
# 补充剧集信息
if mtype == MediaType.TV \
and ((not meta.season_list and downloadhis.seasons)
or (not meta.episode_list and downloadhis.episodes)):
meta = MetaInfo(f"{torrent.title} {downloadhis.seasons} {downloadhis.episodes}")
# 按TMDBID识别
mediainfo = self.recognize_media(mtype=mtype,
tmdbid=downloadhis.tmdbid)
else:
mediainfo = self.recognize_media(meta=meta)
# 非MoviePilot下载的任务按文件识别
mediainfo = None
# 执行转移
self.do_transfer(path=torrent.path, mediainfo=mediainfo,
download_hash=torrent.hash)
# 设置下载任务状态
self.transfer_completed(hashs=torrent.hash, path=torrent.path)
# 结束
logger.info("下载器文件转移执行完成")
return True
def do_transfer(self, path: Path, meta: MetaBase = None,
mediainfo: MediaInfo = None, download_hash: str = None,
target: Path = None, transfer_type: str = None,
season: int = None, epformat: EpisodeFormat = None,
min_filesize: int = 0, force: bool = False) -> Tuple[bool, str]:
"""
执行一个复杂目录的转移操作
:param path: 待转移目录或文件
:param meta: 元数据
:param mediainfo: 媒体信息
:param download_hash: 下载记录hash
:param target: 目标路径
:param transfer_type: 转移类型
:param season: 季
:param epformat: 剧集格式
:param min_filesize: 最小文件大小(MB)
:param force: 是否强制转移
返回:成功标识,错误信息
"""
if not transfer_type:
transfer_type = settings.TRANSFER_TYPE
# 获取待转移路径清单
trans_paths = self.__get_trans_paths(path)
if not trans_paths:
logger.warn(f"{path.name} 没有找到可转移的媒体文件")
return False, f"{path.name} 没有找到可转移的媒体文件"
# 有集自定义格式
formaterHandler = FormatParser(eformat=epformat.format,
details=epformat.detail,
part=epformat.part,
offset=epformat.offset) if epformat else None
# 开始进度
self.progress.start(ProgressKey.FileTransfer)
# 目录所有文件清单
transfer_files = SystemUtils.list_files(directory=path,
extensions=settings.RMT_MEDIAEXT,
min_filesize=min_filesize)
if formaterHandler:
# 有集自定义格式,过滤文件
transfer_files = [f for f in transfer_files if formaterHandler.match(f.name)]
# 汇总错误信息
err_msgs: List[str] = []
# 总文件数
total_num = len(transfer_files)
# 已处理数量
processed_num = 0
# 失败数量
fail_num = 0
# 跳过数量
skip_num = 0
self.progress.update(value=0,
text=f"开始转移 {path},共 {total_num} 个文件 ...",
key=ProgressKey.FileTransfer)
# 整理屏蔽词
transfer_exclude_words = self.systemconfig.get(SystemConfigKey.TransferExcludeWords)
# 处理所有待转移目录或文件,默认一个转移路径或文件只有一个媒体信息
for trans_path in trans_paths:
# 汇总季集清单
season_episodes: Dict[Tuple, List[int]] = {}
# 汇总元数据
metas: Dict[Tuple, MetaBase] = {}
# 汇总媒体信息
medias: Dict[Tuple, MediaInfo] = {}
# 汇总转移信息
transfers: Dict[Tuple, TransferInfo] = {}
# 如果是目录且不是⼀蓝光原盘,获取所有文件并转移
if (not trans_path.is_file()
and not SystemUtils.is_bluray_dir(trans_path)):
# 遍历获取下载目录所有文件
file_paths = SystemUtils.list_files(directory=trans_path,
extensions=settings.RMT_MEDIAEXT,
min_filesize=min_filesize)
else:
file_paths = [trans_path]
if formaterHandler:
# 有集自定义格式,过滤文件
file_paths = [f for f in file_paths if formaterHandler.match(f.name)]
# 转移所有文件
for file_path in file_paths:
# 回收站及隐藏的文件不处理
file_path_str = str(file_path)
if file_path_str.find('/@Recycle/') != -1 \
or file_path_str.find('/#recycle/') != -1 \
or file_path_str.find('/.') != -1 \
or file_path_str.find('/@eaDir') != -1:
logger.debug(f"{file_path_str} 是回收站或隐藏的文件")
# 计数
processed_num += 1
skip_num += 1
continue
# 整理屏蔽词不处理
is_blocked = False
if transfer_exclude_words:
for keyword in transfer_exclude_words:
if not keyword:
continue
if keyword and re.search(r"%s" % keyword, file_path_str, re.IGNORECASE):
logger.info(f"{file_path} 命中整理屏蔽词 {keyword},不处理")
is_blocked = True
break
if is_blocked:
err_msgs.append(f"{file_path.name} 命中整理屏蔽词")
# 计数
processed_num += 1
skip_num += 1
continue
# 转移成功的不再处理
if not force:
transferd = self.transferhis.get_by_src(file_path_str)
if transferd and transferd.status:
logger.info(f"{file_path} 已成功转移过,如需重新处理,请删除历史记录。")
# 计数
processed_num += 1
skip_num += 1
continue
# 更新进度
self.progress.update(value=processed_num / total_num * 100,
text=f"正在转移 {processed_num + 1}/{total_num}{file_path.name} ...",
key=ProgressKey.FileTransfer)
if not meta:
# 文件元数据
file_meta = MetaInfoPath(file_path)
else:
file_meta = meta
# 合并季
if season is not None:
file_meta.begin_season = season
if not file_meta:
logger.error(f"{file_path} 无法识别有效信息")
err_msgs.append(f"{file_path} 无法识别有效信息")
# 计数
processed_num += 1
fail_num += 1
continue
# 自定义识别
if formaterHandler:
# 开始集、结束集、PART
begin_ep, end_ep, part = formaterHandler.split_episode(file_path.stem)
if begin_ep is not None:
file_meta.begin_episode = begin_ep
file_meta.part = part
if end_ep is not None:
file_meta.end_episode = end_ep
if not mediainfo:
logger.warn(f'识别媒体信息,标题:{torrent.title}')
# 识别媒体信息
file_mediainfo = self.recognize_media(meta=file_meta)
else:
file_mediainfo = mediainfo
if not file_mediainfo:
logger.warn(f'{file_path} 未识别到媒体信息')
# 新增转移失败历史记录
his = self.__insert_fail_history(
src_path=torrent.path,
download_hash=torrent.hash,
meta=meta
his = self.transferhis.add_fail(
src_path=file_path,
mode=transfer_type,
meta=file_meta,
download_hash=download_hash
)
self.post_message(Notification(
mtype=NotificationType.Manual,
title=f"{torrent.title} 未识别到媒体信息,无法入库!\n"
title=f"{file_path.name} 未识别到媒体信息,无法入库!\n"
f"回复:```\n/redo {his.id} [tmdbid]|[类型]\n``` 手动识别转移。"
))
# 设置种子状态,避免一直报错
self.transfer_completed(hashs=torrent.hash, transinfo=transferinfo)
# 计数
processed_num += 1
fail_num += 1
continue
logger.info(f"{torrent.title} 识别为:{mediainfo.type.value} {mediainfo.title_year}")
# 更新媒体图片
self.obtain_images(mediainfo=mediainfo)
# 如果未开启新增已入库媒体是否跟随TMDB信息变化则根据tmdbid查询之前的title
if not settings.SCRAP_FOLLOW_TMDB:
transfer_history = self.transferhis.get_by_type_tmdbid(tmdbid=file_mediainfo.tmdb_id,
mtype=file_mediainfo.type.value)
if transfer_history:
file_mediainfo.title = transfer_history.title
# 转移
transferinfo: TransferInfo = self.transfer(mediainfo=mediainfo,
path=torrent.path,
transfer_type=settings.TRANSFER_TYPE)
logger.info(f"{file_path.name} 识别为:{file_mediainfo.type.value} {file_mediainfo.title_year}")
# 更新媒体图片
self.obtain_images(mediainfo=file_mediainfo)
# 获取集数据
if file_mediainfo.type == MediaType.TV:
episodes_info = self.tmdbchain.tmdb_episodes(tmdbid=file_mediainfo.tmdb_id,
season=file_meta.begin_season or 1)
else:
episodes_info = None
# 获取下载hash
if not download_hash:
download_file = self.downloadhis.get_file_by_fullpath(file_path_str)
if download_file:
download_hash = download_file.download_hash
# 执行转移
transferinfo: TransferInfo = self.transfer(meta=file_meta,
mediainfo=file_mediainfo,
path=file_path,
transfer_type=transfer_type,
target=target,
episodes_info=episodes_info)
if not transferinfo:
logger.error("文件转移模块运行失败")
continue
if not transferinfo.target_path:
return False, "文件转移模块运行失败"
if not transferinfo.success:
# 转移失败
logger.warn(f"{torrent.title} 入库失败:{transferinfo.message}")
logger.warn(f"{file_path.name} 入库失败:{transferinfo.message}")
err_msgs.append(f"{file_path.name} {transferinfo.message}")
# 新增转移失败历史记录
self.__insert_fail_history(
src_path=torrent.path,
download_hash=torrent.hash,
meta=meta,
mediainfo=mediainfo,
self.transferhis.add_fail(
src_path=file_path,
mode=transfer_type,
download_hash=download_hash,
meta=file_meta,
mediainfo=file_mediainfo,
transferinfo=transferinfo
)
# 发送消息
self.post_message(Notification(
title=f"{mediainfo.title_year} {meta.season_episode} 入库失败!",
mtype=NotificationType.Manual,
title=f"{file_mediainfo.title_year} {file_meta.season_episode} 入库失败!",
text=f"原因:{transferinfo.message or '未知'}",
image=mediainfo.get_message_image()
image=file_mediainfo.get_message_image()
))
# 设置种子状态,避免一直报错
self.transfer_completed(hashs=torrent.hash, transinfo=transferinfo)
# 计数
processed_num += 1
fail_num += 1
continue
# 汇总信息
mkey = (file_mediainfo.tmdb_id, file_meta.begin_season)
if mkey not in medias:
# 新增信息
metas[mkey] = file_meta
medias[mkey] = file_mediainfo
season_episodes[mkey] = file_meta.episode_list
transfers[mkey] = transferinfo
else:
# 合并季集清单
season_episodes[mkey] = list(set(season_episodes[mkey] + file_meta.episode_list))
# 合并转移数据
transfers[mkey].file_count += transferinfo.file_count
transfers[mkey].total_size += transferinfo.total_size
transfers[mkey].file_list.extend(transferinfo.file_list)
transfers[mkey].file_list_new.extend(transferinfo.file_list_new)
transfers[mkey].fail_list.extend(transferinfo.fail_list)
# 新增转移成功历史记录
self.__insert_sucess_history(
src_path=torrent.path,
download_hash=torrent.hash,
meta=meta,
mediainfo=mediainfo,
self.transferhis.add_success(
src_path=file_path,
mode=transfer_type,
download_hash=download_hash,
meta=file_meta,
mediainfo=file_mediainfo,
transferinfo=transferinfo
)
# 转移完成
self.transfer_completed(hashs=torrent.hash, transinfo=transferinfo)
# 刮削元数据
self.scrape_metadata(path=transferinfo.target_path, mediainfo=mediainfo)
# 刷新媒体库
self.refresh_mediaserver(mediainfo=mediainfo, file_path=transferinfo.target_path)
# 刮削单个文件
if settings.SCRAP_METADATA:
self.scrape_metadata(path=transferinfo.target_path, mediainfo=file_mediainfo)
# 更新进度
processed_num += 1
self.progress.update(value=processed_num / total_num * 100,
text=f"{file_path.name} 转移完成",
key=ProgressKey.FileTransfer)
# 目录或文件转移完成
self.progress.update(text=f"{trans_path} 转移完成,正在执行后续处理 ...",
key=ProgressKey.FileTransfer)
# 执行后续处理
for mkey, media in medias.items():
transfer_meta = metas[mkey]
transfer_info = transfers[mkey]
# 媒体目录
if transfer_info.target_path.is_file():
transfer_info.target_path = transfer_info.target_path.parent
# 刷新媒体库,根目录或季目录
if settings.REFRESH_MEDIASERVER:
self.refresh_mediaserver(mediainfo=media, file_path=transfer_info.target_path)
# 发送通知
self.send_transfer_message(meta=meta, mediainfo=mediainfo, transferinfo=transferinfo)
se_str = None
if media.type == MediaType.TV:
se_str = f"{transfer_meta.season} {StringUtils.format_ep(season_episodes[mkey])}"
self.send_transfer_message(meta=transfer_meta,
mediainfo=media,
transferinfo=transfer_info,
season_episode=se_str)
# 广播事件
self.eventmanager.send_event(EventType.TransferComplete, {
'meta': meta,
'mediainfo': mediainfo,
'transferinfo': transferinfo
'meta': transfer_meta,
'mediainfo': media,
'transferinfo': transfer_info
})
# 计数
processed_num += 1
# 更新进度
self.progress.update(value=processed_num / total_num * 100,
text=f"{torrent.title} 转移完成",
key=ProgressKey.FileTransfer)
# 结束进度
self.progress.end(ProgressKey.FileTransfer)
logger.info("下载器文件转移执行完成")
return True
# 结束进度
logger.info(f"{path} 转移完成,共 {total_num} 个文件,"
f"失败 {fail_num} 个,跳过 {skip_num}")
self.progress.update(value=100,
text=f"{path} 转移完成,共 {total_num} 个文件,"
f"失败 {fail_num} 个,跳过 {skip_num}",
key=ProgressKey.FileTransfer)
self.progress.end(ProgressKey.FileTransfer)
return True, "\n".join(err_msgs)
@staticmethod
def __get_trans_paths(directory: Path):
"""
获取转移目录列表
"""
if not directory.exists():
logger.warn(f"目录不存在:{directory}")
return []
# 单文件
if directory.is_file():
return [directory]
# 蓝光原盘
if SystemUtils.is_bluray_dir(directory):
return [directory]
# 需要转移的路径列表
trans_paths = []
# 先检查当前目录的下级目录,以支持合集的情况
for sub_dir in SystemUtils.list_sub_directory(directory):
# 如果是蓝光原盘
if SystemUtils.is_bluray_dir(sub_dir):
trans_paths.append(sub_dir)
# 没有媒体文件的目录跳过
elif SystemUtils.list_files(sub_dir, extensions=settings.RMT_MEDIAEXT):
trans_paths.append(sub_dir)
if not trans_paths:
# 没有有效子目录,直接转移当前目录
trans_paths.append(directory)
else:
# 有子目录时,把当前目录的文件添加到转移任务中
trans_paths.extend(
SystemUtils.list_sub_files(directory, extensions=settings.RMT_MEDIAEXT)
)
return trans_paths
def remote_transfer(self, arg_str: str, channel: MessageChannel, userid: Union[str, int] = None):
"""
远程重新转移,参数 历史记录ID TMDBID|类型
"""
def args_error():
self.post_message(Notification(channel=channel,
title="请输入正确的命令格式:/redo [id] [tmdbid]|[类型]"
@@ -208,9 +486,10 @@ class TransferChain(ChainBase):
text=errmsg, userid=userid))
return
def re_transfer(self, logid: int, mtype: MediaType, tmdbid: int) -> Tuple[bool, str]:
def re_transfer(self, logid: int,
mtype: MediaType = None, tmdbid: int = None) -> Tuple[bool, str]:
"""
根据历史记录,重新识别转移
根据历史记录,重新识别转移只处理对应的src目录
:param logid: 历史记录ID
:param mtype: 媒体类型
:param tmdbid: TMDB ID
@@ -220,143 +499,102 @@ class TransferChain(ChainBase):
if not history:
logger.error(f"历史记录不存在ID{logid}")
return False, "历史记录不存在"
if history.download_hash:
# 有下载记录,按下载记录重新转移
torrents: Optional[List[TransferTorrent]] = self.list_torrents(hashs=history.download_hash)
if not torrents:
return False, f"没有获取到种子hash{history.download_hash}"
# 源目录
src_path = Path(torrents[0].path)
else:
# 没有下载记录,按源目录路径重新转移
src_path = Path(history.src)
if not src_path.exists():
return False, f"源目录不存在:{src_path}"
# 识别元数据
meta = MetaInfo(title=src_path.stem)
if not meta.name:
return False, f"未识别到元数据,标题:{src_path.stem}"
# 没有下载记录,按源目录路径重新转移
src_path = Path(history.src)
if not src_path.exists():
return False, f"源目录不存在:{src_path}"
dest_path = Path(history.dest) if history.dest else None
# 查询媒体信息
mediainfo = self.recognize_media(mtype=mtype, tmdbid=tmdbid)
if mtype and tmdbid:
mediainfo = self.recognize_media(mtype=mtype, tmdbid=tmdbid)
else:
meta = MetaInfoPath(src_path)
mediainfo = self.recognize_media(meta=meta)
if not mediainfo:
return False, f"未识别到媒体信息,类型:{mtype.value}tmdbid{tmdbid}"
# 重新执行转移
logger.info(f"{mtype.value} {tmdbid} 识别为:{mediainfo.title_year}")
logger.info(f"{src_path.name} 识别为:{mediainfo.title_year}")
# 更新媒体图片
self.obtain_images(mediainfo=mediainfo)
# 转移
transferinfo: TransferInfo = self.transfer(mediainfo=mediainfo,
path=src_path,
transfer_type=settings.TRANSFER_TYPE)
if not transferinfo:
logger.error("文件转移模块运行失败")
return False, "文件转移模块运行失败"
if not transferinfo.target_path:
# 转移失败
logger.warn(f"{src_path.name} 入库失败:{transferinfo.message}")
# 新增转移失败历史记录
self.__insert_fail_history(
src_path=src_path,
download_hash=history.download_hash,
meta=meta,
mediainfo=mediainfo,
transferinfo=transferinfo
)
return False, transferinfo.message
# 删除旧的已整理文件
if history.dest:
self.delete_files(Path(history.dest))
# 强制转移
state, errmsg = self.do_transfer(path=src_path,
mediainfo=mediainfo,
download_hash=history.download_hash,
target=dest_path,
force=True)
if not state:
return False, errmsg
# 新增转移成功历史记录
self.__insert_sucess_history(
src_path=src_path,
download_hash=history.download_hash,
meta=meta,
mediainfo=mediainfo,
transferinfo=transferinfo
)
# 删除旧历史记录
self.transferhis.delete(logid)
# 刮削元数据
self.scrape_metadata(path=transferinfo.target_path, mediainfo=mediainfo)
# 刷新媒体库
self.refresh_mediaserver(mediainfo=mediainfo, file_path=transferinfo.target_path)
# 发送通知
self.send_transfer_message(meta=meta, mediainfo=mediainfo, transferinfo=transferinfo)
# 广播事件
self.eventmanager.send_event(EventType.TransferComplete, {
'meta': meta,
'mediainfo': mediainfo,
'transferinfo': transferinfo
})
return True, ""
def __insert_sucess_history(self, src_path: Path, download_hash: str, meta: MetaBase,
mediainfo: MediaInfo, transferinfo: TransferInfo):
def manual_transfer(self, in_path: Path,
target: Path = None,
tmdbid: int = None,
mtype: MediaType = None,
season: int = None,
transfer_type: str = None,
epformat: EpisodeFormat = None,
min_filesize: int = 0) -> Tuple[bool, Union[str, list]]:
"""
新增转移成功历史记录
手动转移
:param in_path: 源文件路径
:param target: 目标路径
:param tmdbid: TMDB ID
:param mtype: 媒体类型
:param season: 季度
:param transfer_type: 转移类型
:param epformat: 剧集格式
:param min_filesize: 最小文件大小(MB)
"""
self.transferhis.add(
src=str(src_path),
dest=str(transferinfo.target_path),
mode=settings.TRANSFER_TYPE,
type=mediainfo.type.value,
category=mediainfo.category,
title=mediainfo.title,
year=mediainfo.year,
tmdbid=mediainfo.tmdb_id,
imdbid=mediainfo.imdb_id,
tvdbid=mediainfo.tvdb_id,
doubanid=mediainfo.douban_id,
seasons=meta.season,
episodes=meta.episode,
image=mediainfo.get_poster_image(),
download_hash=download_hash,
status=1,
files=json.dumps(transferinfo.file_list)
)
logger.info(f"手动转移:{in_path} ...")
def __insert_fail_history(self, src_path: Path, download_hash: str, meta: MetaBase,
transferinfo: TransferInfo = None, mediainfo: MediaInfo = None):
"""
新增转移失败历史记录
"""
if mediainfo and transferinfo:
his = self.transferhis.add(
src=str(src_path),
dest=str(transferinfo.target_path),
mode=settings.TRANSFER_TYPE,
type=mediainfo.type.value,
category=mediainfo.category,
title=mediainfo.title,
year=mediainfo.year,
tmdbid=mediainfo.tmdb_id,
imdbid=mediainfo.imdb_id,
tvdbid=mediainfo.tvdb_id,
doubanid=mediainfo.douban_id,
seasons=meta.season,
episodes=meta.episode,
image=mediainfo.get_poster_image(),
download_hash=download_hash,
status=0,
errmsg=transferinfo.message or '未知错误',
files=json.dumps(transferinfo.file_list)
if tmdbid:
# 有输入TMDBID时单个识别
# 识别媒体信息
mediainfo: MediaInfo = self.mediachain.recognize_media(tmdbid=tmdbid, mtype=mtype)
if not mediainfo:
return False, f"媒体信息识别失败tmdbid: {tmdbid}, type: {mtype.value}"
# 开始进度
self.progress.start(ProgressKey.FileTransfer)
self.progress.update(value=0,
text=f"开始转移 {in_path} ...",
key=ProgressKey.FileTransfer)
# 开始转移
state, errmsg = self.do_transfer(
path=in_path,
mediainfo=mediainfo,
target=target,
season=season,
epformat=epformat,
min_filesize=min_filesize
)
if not state:
return False, errmsg
self.progress.end(ProgressKey.FileTransfer)
logger.info(f"{in_path} 转移完成")
return True, ""
else:
his = self.transferhis.add(
src=str(src_path),
mode=settings.TRANSFER_TYPE,
seasons=meta.season,
episodes=meta.episode,
download_hash=download_hash,
status=0,
errmsg="未识别到媒体信息"
)
return his
# 没有输入TMDBID时按文件识别
state, errmsg = self.do_transfer(path=in_path,
target=target,
transfer_type=transfer_type,
season=season,
epformat=epformat,
min_filesize=min_filesize)
return state, errmsg
def send_transfer_message(self, meta: MetaBase, mediainfo: MediaInfo, transferinfo: TransferInfo):
def send_transfer_message(self, meta: MetaBase, mediainfo: MediaInfo,
transferinfo: TransferInfo, season_episode: str = None):
"""
发送入库成功的消息
"""
msg_title = f"{mediainfo.title_year} {meta.season_episode} 已入库"
msg_title = f"{mediainfo.title_year} {meta.season_episode if not season_episode else season_episode} 已入库"
if mediainfo.vote_average:
msg_str = f"评分:{mediainfo.vote_average},类型:{mediainfo.type.value}"
else:
@@ -378,25 +616,47 @@ class TransferChain(ChainBase):
def delete_files(path: Path):
"""
删除转移后的文件以及空目录
:param path: 文件路径
"""
logger.info(f"开始删除文件以及空目录:{path} ...")
if not path.exists():
logger.error(f"{path} 不存在")
return
elif path.is_file():
# 删除文件
path.unlink()
if path.is_file():
# 删除文件、nfo、jpg等同名文件
pattern = path.stem.replace('[', '?').replace(']', '?')
files = path.parent.glob(f"{pattern}.*")
for file in files:
Path(file).unlink()
logger.warn(f"文件 {path} 已删除")
# 判断目录是否为空, 为空则删除
if str(path.parent.parent) != str(path.root):
# 父父目录非根目录,删除父目录
files = SystemUtils.list_files_with_extensions(path.parent, settings.RMT_MEDIAEXT)
if not files:
shutil.rmtree(path.parent)
logger.warn(f"目录 {path.parent} 已删除")
# 需要删除父目录
elif str(path.parent) == str(path.root):
# 根目录,删除
logger.warn(f"根目录 {path} 不能删除!")
return
else:
if str(path.parent) != str(path.root):
# 父目录非根目录,才删除目录
shutil.rmtree(path)
# 删除目录
logger.warn(f"目录 {path} 已删除")
# 非根目录,才删除目录
shutil.rmtree(path)
# 删除目录
logger.warn(f"目录 {path} 已删除")
# 需要删除父目录
# 判断当前媒体父路径下是否有媒体文件,如有则无需遍历父级
if not SystemUtils.exits_files(path.parent, settings.RMT_MEDIAEXT):
# 媒体库二级分类根路径
library_root_names = [
settings.LIBRARY_MOVIE_NAME or '电影',
settings.LIBRARY_TV_NAME or '电视剧',
settings.LIBRARY_ANIME_NAME or '动漫',
]
# 判断父目录是否为空, 为空则删除
for parent_path in path.parents:
# 遍历父目录到媒体库二级分类根路径
if str(parent_path.name) in library_root_names:
break
if str(parent_path.parent) != str(path.root):
# 父目录非根目录,才删除父目录
if not SystemUtils.exits_files(parent_path, settings.RMT_MEDIAEXT):
# 当前路径下没有媒体文件则删除
shutil.rmtree(parent_path)
logger.warn(f"目录 {parent_path} 已删除")

View File

@@ -4,7 +4,7 @@ from typing import Any
from app.chain import ChainBase
from app.schemas import Notification
from app.schemas.types import EventType, MediaImageType, MediaType, NotificationType
from app.utils.http import WebUtils
from app.utils.web import WebUtils
class WebhookChain(ChainBase):

View File

@@ -1,21 +1,24 @@
import traceback
from threading import Thread, Event
from typing import Any, Union
from typing import Any, Union, Dict
from app.chain import ChainBase
from app.chain.cookiecloud import CookieCloudChain
from app.chain.download import DownloadChain
from app.chain.mediaserver import MediaServerChain
from app.chain.site import SiteChain
from app.chain.subscribe import SubscribeChain
from app.chain.system import SystemChain
from app.chain.transfer import TransferChain
from app.core.event import Event as ManagerEvent
from app.core.event import eventmanager, EventManager
from app.core.plugin import PluginManager
from app.db import SessionFactory
from app.log import logger
from app.scheduler import Scheduler
from app.schemas import Notification
from app.schemas.types import EventType, MessageChannel
from app.utils.object import ObjectUtils
from app.utils.singleton import Singleton
from app.utils.system import SystemUtils
class CommandChian(ChainBase):
@@ -38,76 +41,107 @@ class Command(metaclass=Singleton):
_event = Event()
def __init__(self):
# 数据库连接
self._db = SessionFactory()
# 事件管理器
self.eventmanager = EventManager()
# 插件管理器
self.pluginmanager = PluginManager()
# 处理链
self.chain = CommandChian(self._db)
# 定时服务管理
self.scheduler = Scheduler()
# 内置命令
self._commands = {
"/cookiecloud": {
"func": CookieCloudChain().remote_sync,
"id": "cookiecloud",
"type": "scheduler",
"description": "同步站点",
"data": {}
"category": "站点"
},
"/sites": {
"func": SiteChain().remote_list,
"func": SiteChain(self._db).remote_list,
"description": "查询站点",
"category": "站点",
"data": {}
},
"/site_cookie": {
"func": SiteChain().remote_cookie,
"func": SiteChain(self._db).remote_cookie,
"description": "更新站点Cookie",
"data": {}
},
"/site_enable": {
"func": SiteChain().remote_enable,
"func": SiteChain(self._db).remote_enable,
"description": "启用站点",
"data": {}
},
"/site_disable": {
"func": SiteChain().remote_disable,
"func": SiteChain(self._db).remote_disable,
"description": "禁用站点",
"data": {}
},
"/mediaserver_sync": {
"func": MediaServerChain().remote_sync,
"id": "mediaserver_sync",
"type": "scheduler",
"description": "同步媒体服务器",
"data": {}
"category": "管理"
},
"/subscribes": {
"func": SubscribeChain().remote_list,
"func": SubscribeChain(self._db).remote_list,
"description": "查询订阅",
"category": "订阅",
"data": {}
},
"/subscribe_refresh": {
"func": SubscribeChain().remote_refresh,
"id": "subscribe_refresh",
"type": "scheduler",
"description": "刷新订阅",
"data": {}
"category": "订阅"
},
"/subscribe_search": {
"func": SubscribeChain().remote_search,
"id": "subscribe_search",
"type": "scheduler",
"description": "搜索订阅",
"data": {}
"category": "订阅"
},
"/subscribe_delete": {
"func": SubscribeChain().remote_delete,
"func": SubscribeChain(self._db).remote_delete,
"description": "删除订阅",
"data": {}
},
"/subscribe_tmdb": {
"id": "subscribe_tmdb",
"type": "scheduler",
"description": "订阅元数据更新"
},
"/downloading": {
"func": DownloadChain().remote_downloading,
"func": DownloadChain(self._db).remote_downloading,
"description": "正在下载",
"category": "管理",
"data": {}
},
"/transfer": {
"func": TransferChain().process,
"id": "transfer",
"type": "scheduler",
"description": "下载文件整理",
"data": {}
"category": "管理"
},
"/redo": {
"func": TransferChain().remote_transfer,
"func": TransferChain(self._db).remote_transfer,
"description": "手动整理",
"data": {}
},
"/clear_cache": {
"func": SystemChain(self._db).remote_clear_cache,
"description": "清理缓存",
"category": "管理",
"data": {}
},
"/restart": {
"func": SystemUtils.restart,
"description": "重启系统",
"category": "管理",
"data": {}
}
}
# 汇总插件命令
@@ -117,13 +151,12 @@ class Command(metaclass=Singleton):
cmd=command.get('cmd'),
func=Command.send_plugin_event,
desc=command.get('desc'),
category=command.get('category'),
data={
'etype': command.get('event'),
'data': command.get('data')
}
)
# 处理链
self.chain = CommandChian()
# 广播注册命令菜单
self.chain.register_commands(commands=self.get_commands())
# 消息处理线程
@@ -149,12 +182,64 @@ class Command(metaclass=Singleton):
except Exception as e:
logger.error(f"事件处理出错:{str(e)} - {traceback.format_exc()}")
def __run_command(self, command: Dict[str, any],
data_str: str = "",
channel: MessageChannel = None, userid: Union[str, int] = None):
"""
运行定时服务
"""
if command.get("type") == "scheduler":
# 定时服务
if userid:
self.chain.post_message(
Notification(
channel=channel,
title=f"开始执行 {command.get('description')} ...",
userid=userid
)
)
# 执行定时任务
self.scheduler.start(job_id=command.get("id"))
if userid:
self.chain.post_message(
Notification(
channel=channel,
title=f"{command.get('description')} 执行完成",
userid=userid
)
)
else:
# 命令
cmd_data = command['data'] if command.get('data') else {}
args_num = ObjectUtils.arguments(command['func'])
if args_num > 0:
if cmd_data:
# 有内置参数直接使用内置参数
data = cmd_data.get("data") or {}
data['channel'] = channel
data['user'] = userid
cmd_data['data'] = data
command['func'](**cmd_data)
elif args_num == 2:
# 没有输入参数只输入渠道和用户ID
command['func'](channel, userid)
elif args_num > 2:
# 多个输入参数用户输入、用户ID
command['func'](data_str, channel, userid)
else:
# 没有参数
command['func']()
def stop(self):
"""
停止事件处理线程
"""
self._event.set()
self._thread.join()
if self._db:
self._db.close()
def get_commands(self):
"""
@@ -162,13 +247,15 @@ class Command(metaclass=Singleton):
"""
return self._commands
def register(self, cmd: str, func: Any, data: dict = None, desc: str = None) -> None:
def register(self, cmd: str, func: Any, data: dict = None,
desc: str = None, category: str = None) -> None:
"""
注册命令
"""
self._commands[cmd] = {
"func": func,
"description": desc,
"category": category,
"data": data or {}
}
@@ -186,23 +273,19 @@ class Command(metaclass=Singleton):
command = self.get(cmd)
if command:
try:
logger.info(f"用户 {userid} 开始执行:{command.get('description')} ...")
cmd_data = command['data'] if command.get('data') else {}
args_num = ObjectUtils.arguments(command['func'])
if args_num > 0:
if cmd_data:
# 有内置参数直接使用内置参数
command['func'](**cmd_data)
elif args_num == 2:
# 没有输入参数只输入渠道和用户ID
command['func'](channel, userid)
elif args_num > 2:
# 多个输入参数用户输入、用户ID
command['func'](data_str, channel, userid)
if userid:
logger.info(f"用户 {userid} 开始执行:{command.get('description')} ...")
else:
# 没有参数
command['func']()
logger.info(f"用户 {userid} {command.get('description')} 执行完成")
logger.info(f"开始执行:{command.get('description')} ...")
# 执行命令
self.__run_command(command, data_str=data_str,
channel=channel, userid=userid)
if userid:
logger.info(f"用户 {userid} {command.get('description')} 执行完成")
else:
logger.info(f"{command.get('description')} 执行完成")
except Exception as err:
logger.error(f"执行命令 {cmd} 出错:{str(err)}")
traceback.print_exc()

View File

@@ -1,8 +1,13 @@
import os
import secrets
import sys
from pathlib import Path
from typing import List
from pydantic import BaseSettings
from app.utils.system import SystemUtils
class Settings(BaseSettings):
# 项目名称
@@ -39,6 +44,8 @@ class Settings(BaseSettings):
SEARCH_SOURCE: str = "themoviedb"
# 刮削入库的媒体文件
SCRAP_METADATA: bool = True
# 新增已入库媒体是否跟随TMDB信息变化
SCRAP_FOLLOW_TMDB: bool = True
# 刮削来源
SCRAP_SOURCE: str = "themoviedb"
# TMDB图片地址
@@ -63,9 +70,17 @@ class Settings(BaseSettings):
RMT_AUDIO_TRACK_EXT: list = ['.mka']
# 索引器
INDEXER: str = "builtin"
# 用户认证站点 hhclub/audiences/hddolby/zmpt/freefarm/hdfans/wintersakura/leaves/1ptba/icc2022/iyuu
# 订阅模式
SUBSCRIBE_MODE: str = "spider"
# RSS订阅模式刷新时间间隔分钟
SUBSCRIBE_RSS_INTERVAL: int = 30
# 订阅搜索开关
SUBSCRIBE_SEARCH: bool = False
# 用户认证站点
AUTH_SITE: str = ""
# 消息通知渠道 telegram/wechat/slack
# 交互搜索自动下载用户ID使用,分割
AUTO_DOWNLOAD_USER: str = None
# 消息通知渠道 telegram/wechat/slack多个通知渠道用,分隔
MESSAGER: str = "telegram"
# WeChat企业ID
WECHAT_CORPID: str = None
@@ -95,6 +110,10 @@ class Settings(BaseSettings):
SLACK_APP_TOKEN: str = ""
# Slack 频道名称
SLACK_CHANNEL: str = ""
# SynologyChat Webhook
SYNOLOGYCHAT_WEBHOOK: str = ""
# SynologyChat Token
SYNOLOGYCHAT_TOKEN: str = ""
# 下载器 qbittorrent/transmission
DOWNLOADER: str = "qbittorrent"
# 下载器监控开关
@@ -105,6 +124,8 @@ class Settings(BaseSettings):
QB_USER: str = None
# Qbittorrent密码
QB_PASSWORD: str = None
# Qbittorrent分类自动管理
QB_CATEGORY: bool = False
# Transmission地址IP:PORT
TR_HOST: str = None
# Transmission用户名
@@ -119,16 +140,20 @@ class Settings(BaseSettings):
DOWNLOAD_MOVIE_PATH: str = None
# 电视剧下载保存目录,容器内映射路径需要一致
DOWNLOAD_TV_PATH: str = None
# 动漫下载保存目录,容器内映射路径需要一致
DOWNLOAD_ANIME_PATH: str = None
# 下载目录二级分类
DOWNLOAD_CATEGORY: bool = False
# 下载站点字幕
DOWNLOAD_SUBTITLE: bool = True
# 媒体服务器 emby/jellyfin/plex
# 媒体服务器 emby/jellyfin/plex,多个媒体服务器,分割
MEDIASERVER: str = "emby"
# 入库刷新媒体库
REFRESH_MEDIASERVER: bool = True
# 媒体服务器同步间隔(小时)
MEDIASERVER_SYNC_INTERVAL: int = 6
# 媒体服务器同步黑名单,多个媒体库名称,分割
MEDIASERVER_SYNC_BLACKLIST: str = None
# EMBY服务器地址IP:PORT
EMBY_HOST: str = None
# EMBY Api Key
@@ -144,23 +169,29 @@ class Settings(BaseSettings):
# 转移方式 link/copy/move/softlink
TRANSFER_TYPE: str = "copy"
# CookieCloud服务器地址
COOKIECLOUD_HOST: str = "https://nastool.org/cookiecloud"
COOKIECLOUD_HOST: str = "https://movie-pilot.org/cookiecloud"
# CookieCloud用户KEY
COOKIECLOUD_KEY: str = None
# CookieCloud端对端加密密码
COOKIECLOUD_PASSWORD: str = None
# CookieCloud同步间隔分钟
COOKIECLOUD_INTERVAL: int = 60 * 24
# OCR服务器地址
OCR_HOST: str = "https://movie-pilot.org"
# CookieCloud对应的浏览器UA
USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.57"
# 媒体库目录
# 媒体库目录,多个目录使用,分隔
LIBRARY_PATH: str = None
# 电影媒体库目录名,默认"电影"
LIBRARY_MOVIE_NAME: str = None
# 电视剧媒体库目录名,默认"电视剧"
LIBRARY_TV_NAME: str = None
# 动漫媒体库目录名,默认"电视剧/动漫"
LIBRARY_ANIME_NAME: str = None
# 二级分类
LIBRARY_CATEGORY: bool = True
# 电视剧动漫的分类genre_ids
ANIME_GENREIDS = [16]
# 电影重命名格式
MOVIE_RENAME_FORMAT: str = "{{title}}{% if year %} ({{year}}){% endif %}" \
"/{{title}}{% if year %} ({{year}}){% endif %}{% if part %}-{{part}}{% endif %}{% if videoFormat %} - {{videoFormat}}{% endif %}" \
@@ -181,7 +212,11 @@ class Settings(BaseSettings):
def CONFIG_PATH(self):
if self.CONFIG_DIR:
return Path(self.CONFIG_DIR)
return self.INNER_CONFIG_PATH
elif SystemUtils.is_docker():
return Path("/config")
elif SystemUtils.is_frozen():
return Path(sys.executable).parent / "config"
return self.ROOT_PATH / "config"
@property
def TEMP_PATH(self):
@@ -235,11 +270,20 @@ class Settings(BaseSettings):
"server": self.PROXY_HOST
}
def __init__(self):
super().__init__()
@property
def LIBRARY_PATHS(self) -> List[Path]:
if self.LIBRARY_PATH:
return [Path(path) for path in self.LIBRARY_PATH.split(",")]
return []
def __init__(self, **kwargs):
super().__init__(**kwargs)
with self.CONFIG_PATH as p:
if not p.exists():
p.mkdir(parents=True, exist_ok=True)
if SystemUtils.is_frozen():
if not (p / "app.env").exists():
SystemUtils.copy(self.INNER_CONFIG_PATH / "app.env", p / "app.env")
with self.TEMP_PATH as p:
if not p.exists():
p.mkdir(parents=True, exist_ok=True)
@@ -251,4 +295,7 @@ class Settings(BaseSettings):
case_sensitive = True
settings = Settings()
settings = Settings(
_env_file=Settings().CONFIG_PATH / "app.env",
_env_file_encoding="utf-8"
)

View File

@@ -1,6 +1,6 @@
import re
from dataclasses import dataclass, field, asdict
from typing import List, Dict, Any
from typing import List, Dict, Any, Tuple
from app.core.config import settings
from app.core.meta import MetaBase
@@ -148,6 +148,8 @@ class MediaInfo:
vote_average: int = 0
# 描述
overview: str = None
# 风格ID
genre_ids: list = field(default_factory=list)
# 所有别名和译名
names: list = field(default_factory=list)
# 各季的剧集清单信息
@@ -250,6 +252,15 @@ class MediaInfo:
"""
setattr(self, f"{name}_path", image)
def get_image(self, name: str):
"""
获取图片地址
"""
try:
return getattr(self, f"{name}_path")
except AttributeError:
return None
def set_category(self, cat: str):
"""
设置二级分类
@@ -261,7 +272,7 @@ class MediaInfo:
初始化媒信息
"""
def __directors_actors(tmdbinfo: dict):
def __directors_actors(tmdbinfo: dict) -> Tuple[List[dict], List[dict]]:
"""
查询导演和演员
:param tmdbinfo: TMDB元数据
@@ -338,6 +349,8 @@ class MediaInfo:
self.vote_average = round(float(info.get('vote_average')), 1) if info.get('vote_average') else 0
# 描述
self.overview = info.get('overview')
# 风格
self.genre_ids = info.get('genre_ids') or []
# 原语种
self.original_language = info.get('original_language')
if self.type == MediaType.MOVIE:
@@ -442,6 +455,8 @@ class MediaInfo:
self.poster_path = info.get("pic", {}).get("large")
if not self.poster_path and info.get("cover_url"):
self.poster_path = info.get("cover_url")
if not self.poster_path and info.get("cover"):
self.poster_path = info.get("cover").get("url")
# 简介
if not self.overview:
self.overview = info.get("intro") or info.get("card_subtitle") or ""
@@ -549,7 +564,6 @@ class MediaInfo:
dicts["type"] = self.type.value if self.type else None
dicts["detail_link"] = self.detail_link
dicts["title_year"] = self.title_year
dicts["tmdb_info"]["media_type"] = self.type.value if self.type else None
return dicts
def clear(self):

View File

@@ -0,0 +1,47 @@
import regex as re
from app.db.systemconfig_oper import SystemConfigOper
from app.schemas.types import SystemConfigKey
from app.utils.singleton import Singleton
class CustomizationMatcher(metaclass=Singleton):
"""
识别自定义占位符
"""
customization = None
custom_separator = None
def __init__(self):
self.systemconfig = SystemConfigOper()
self.customization = None
self.custom_separator = None
def match(self, title=None):
"""
:param title: 资源标题或文件名
:return: 匹配结果
"""
if not title:
return ""
if not self.customization:
# 自定义占位符
customization = self.systemconfig.get(SystemConfigKey.Customization)
if not customization:
return ""
if isinstance(customization, str):
customization = customization.replace("\n", ";").replace("|", ";").strip(";").split(";")
self.customization = "|".join([f"({item})" for item in customization])
customization_re = re.compile(r"%s" % self.customization)
# 处理重复多次的情况,保留先后顺序(按添加自定义占位符的顺序)
unique_customization = {}
for item in re.findall(customization_re, title):
if not isinstance(item, tuple):
item = (item,)
for i in range(len(item)):
if item[i] and unique_customization.get(item[i]) is None:
unique_customization[item[i]] = i
unique_customization = list(dict(sorted(unique_customization.items(), key=lambda x: x[1])).keys())
separator = self.custom_separator or "@"
return separator.join(unique_customization)

View File

@@ -1,6 +1,7 @@
import re
import zhconv
import anitopy
from app.core.meta.customization import CustomizationMatcher
from app.core.meta.metabase import MetaBase
from app.core.meta.releasegroup import ReleaseGroupsMatcher
from app.utils.string import StringUtils
@@ -144,6 +145,8 @@ class MetaAnime(MetaBase):
self.resource_team = \
ReleaseGroupsMatcher().match(title=original_title) or \
anitopy_info_origin.get("release_group") or None
# 自定义占位符
self.customization = CustomizationMatcher().match(title=original_title) or None
# 视频编码
self.video_encode = anitopy_info.get("video_term")
if isinstance(self.video_encode, list):

View File

@@ -15,9 +15,9 @@ class MetaBase(object):
"""
# 是否处理的文件
isfile: bool = False
# 原标题字符串
# 原标题字符串(未经过识别词处理)
title: str = ""
# 识别用字符串
# 识别用字符串(经过识别词处理后)
org_string: Optional[str] = None
# 副标题
subtitle: Optional[str] = None
@@ -51,6 +51,8 @@ class MetaBase(object):
resource_pix: Optional[str] = None
# 识别的制作组/字幕组
resource_team: Optional[str] = None
# 识别的自定义占位符
customization: Optional[str] = None
# 视频编码
video_encode: Optional[str] = None
# 音频编码
@@ -243,7 +245,7 @@ class MetaBase(object):
else:
return [self.begin_season]
@ property
@property
def episode(self) -> str:
"""
返回开始集、结束集字符串
@@ -440,9 +442,21 @@ class MetaBase(object):
elif len(ep) > 1 and str(ep[0]).isdigit() and str(ep[-1]).isdigit():
self.begin_episode = int(ep[0])
self.end_episode = int(ep[-1])
self.total_episode = (self.end_episode - self.begin_episode) + 1
elif str(ep).isdigit():
self.begin_episode = int(ep)
self.end_episode = None
def set_episodes(self, begin: int, end: int):
"""
设置开始集结束集
"""
if begin:
self.begin_episode = begin
if end:
self.end_episode = end
if self.begin_episode and self.end_episode:
self.total_episode = (self.end_episode - self.begin_episode) + 1
def merge(self, meta: Self):
"""
@@ -480,6 +494,9 @@ class MetaBase(object):
# 制作组/字幕组
if not self.resource_team:
self.resource_team = meta.resource_team
# 自定义占位符
if not self.customization:
self.customization = meta.customization
# 特效
if not self.resource_effect:
self.resource_effect = meta.resource_effect

View File

@@ -2,6 +2,7 @@ import re
from pathlib import Path
from app.core.config import settings
from app.core.meta.customization import CustomizationMatcher
from app.core.meta.metabase import MetaBase
from app.core.meta.releasegroup import ReleaseGroupsMatcher
from app.utils.string import StringUtils
@@ -130,6 +131,8 @@ class MetaVideo(MetaBase):
self.part = None
# 制作组/字幕组
self.resource_team = ReleaseGroupsMatcher().match(title=original_title) or None
# 自定义占位符
self.customization = CustomizationMatcher().match(title=original_title) or None
def __fix_name(self, name: str):
if not name:
@@ -371,6 +374,8 @@ class MetaVideo(MetaBase):
self.type = MediaType.TV
elif token.upper() == "SEASON" and self.begin_season is None:
self._last_token_type = "SEASON"
elif self.type == MediaType.TV and self.begin_season is None:
self.begin_season = 1
def __init_episode(self, token: str):
re_res = re.findall(r"%s" % self._episode_re, token, re.IGNORECASE)

View File

@@ -28,7 +28,23 @@ class WordsMatcher(metaclass=Singleton):
if not word:
continue
try:
if word.count(" => "):
if word.count(" => ") and word.count(" && ") and word.count(" >> ") and word.count(" <> "):
# 替换词
thc = str(re.findall(r'(.*?)\s*=>', word)[0]).strip()
# 被替换词
bthc = str(re.findall(r'=>\s*(.*?)\s*&&', word)[0]).strip()
# 集偏移前字段
pyq = str(re.findall(r'&&\s*(.*?)\s*<>', word)[0]).strip()
# 集偏移后字段
pyh = str(re.findall(r'<>(.*?)\s*>>', word)[0]).strip()
# 集偏移
offsets = str(re.findall(r'>>\s*(.*?)$', word)[0]).strip()
# 替换词
title, message, state = self.__replace_regex(title, thc, bthc)
if state:
# 替换词成功再进行集偏移
title, message, state = self.__episode_offset(title, pyq, pyh, offsets)
elif word.count(" => "):
# 替换词
strings = word.split(" => ")
title, message, state = self.__replace_regex(title, strings[0], strings[1])

View File

@@ -9,7 +9,7 @@ from app.core.meta.words import WordsMatcher
def MetaInfo(title: str, subtitle: str = None) -> MetaBase:
"""
媒体整理入口,根据名称和副标题,判断是哪种类型的识别,返回对应对象
根据标题和副标题识别元数据
:param title: 标题、种子名、文件名
:param subtitle: 副标题、描述
:return: MetaAnime、MetaVideo
@@ -33,6 +33,20 @@ def MetaInfo(title: str, subtitle: str = None) -> MetaBase:
return meta
def MetaInfoPath(path: Path) -> MetaBase:
"""
根据路径识别元数据
:param path: 路径
"""
# 上级目录元数据
dir_meta = MetaInfo(title=path.parent.name)
# 文件元数据,不包含后缀
file_meta = MetaInfo(title=path.stem)
# 合并元数据
file_meta.merge(dir_meta)
return file_meta
def is_anime(name: str) -> bool:
"""
判断是否为动漫

View File

@@ -3,6 +3,7 @@ from typing import List, Any, Dict, Tuple
from app.db.systemconfig_oper import SystemConfigOper
from app.helper.module import ModuleHelper
from app.helper.sites import SitesHelper
from app.log import logger
from app.schemas.types import SystemConfigKey
from app.utils.object import ObjectUtils
@@ -23,6 +24,7 @@ class PluginManager(metaclass=Singleton):
_config_key: str = "plugin.%s"
def __init__(self):
self.siteshelper = SitesHelper()
self.init_config()
def init_config(self):
@@ -37,6 +39,7 @@ class PluginManager(metaclass=Singleton):
"""
启动加载插件
"""
# 扫描插件目录
plugins = ModuleHelper.load(
"app.plugins",
@@ -80,8 +83,15 @@ class PluginManager(metaclass=Singleton):
"""
# 停止所有插件
for plugin in self._running_plugins.values():
if hasattr(plugin, "stop"):
plugin.stop()
# 关闭数据库
if hasattr(plugin, "close"):
plugin.close()
# 关闭插件
if hasattr(plugin, "stop_service"):
plugin.stop_service()
# 清空对像
self._plugins = {}
self._running_plugins = {}
def get_plugin_config(self, pid: str) -> dict:
"""
@@ -176,6 +186,8 @@ class PluginManager(metaclass=Singleton):
# 已安装插件
installed_apps = self.systemconfig.get(SystemConfigKey.UserInstalledPlugins) or []
for pid, plugin in self._plugins.items():
# 运行状插件
plugin_obj = self._running_plugins.get(pid)
# 基本属性
conf = {}
# ID
@@ -186,11 +198,20 @@ class PluginManager(metaclass=Singleton):
else:
conf.update({"installed": False})
# 运行状态
if pid in self._running_plugins.keys() and hasattr(plugin, "get_state"):
plugin_obj = self._running_plugins.get(pid)
if plugin_obj and hasattr(plugin, "get_state"):
conf.update({"state": plugin_obj.get_state()})
else:
conf.update({"state": False})
# 是否有详情页面
if hasattr(plugin, "get_page"):
if ObjectUtils.check_method(plugin.get_page):
conf.update({"has_page": True})
else:
conf.update({"has_page": False})
# 权限
if hasattr(plugin, "auth_level"):
if self.siteshelper.auth_level < plugin.auth_level:
continue
# 名称
if hasattr(plugin, "plugin_name"):
conf.update({"plugin_name": plugin.plugin_name})

View File

@@ -1,5 +1,5 @@
from sqlalchemy import create_engine, QueuePool
from sqlalchemy.orm import sessionmaker, Session
from sqlalchemy.orm import sessionmaker, Session, scoped_session
from app.core.config import settings
@@ -8,11 +8,16 @@ Engine = create_engine(f"sqlite:///{settings.CONFIG_PATH}/user.db",
pool_pre_ping=True,
echo=False,
poolclass=QueuePool,
pool_size=1000,
pool_recycle=60 * 10,
max_overflow=0)
# 数据库会话
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=Engine)
pool_size=1024,
pool_recycle=600,
pool_timeout=180,
max_overflow=0,
connect_args={"timeout": 60})
# 会话工厂
SessionFactory = sessionmaker(autocommit=False, autoflush=False, bind=Engine)
# 多线程全局使用的数据库会话
ScopedSession = scoped_session(SessionFactory)
def get_db():
@@ -22,7 +27,7 @@ def get_db():
"""
db = None
try:
db = SessionLocal()
db = SessionFactory()
yield db
finally:
if db:
@@ -30,15 +35,10 @@ def get_db():
class DbOper:
_db: Session = None
def __init__(self, db: Session = None):
if db:
self._db = db
else:
self._db = SessionLocal()
def __del__(self):
if self._db:
self._db.close()
self._db = ScopedSession()

View File

@@ -1,8 +1,8 @@
from pathlib import Path
from typing import Any
from typing import List
from app.db import DbOper
from app.db.models.downloadhistory import DownloadHistory
from app.db.models.downloadhistory import DownloadHistory, DownloadFiles
class DownloadHistoryOper(DbOper):
@@ -10,28 +10,81 @@ class DownloadHistoryOper(DbOper):
下载历史管理
"""
def get_by_path(self, path: Path) -> Any:
def get_by_path(self, path: Path) -> DownloadHistory:
"""
按路径查询下载记录
:param path: 数据key
"""
return DownloadHistory.get_by_path(self._db, str(path))
def get_by_hash(self, download_hash: str) -> Any:
def get_by_hash(self, download_hash: str) -> DownloadHistory:
"""
按Hash查询下载记录
:param download_hash: 数据key
"""
return DownloadHistory.get_by_hash(self._db, download_hash)
def add(self, **kwargs):
def add(self, **kwargs) -> DownloadHistory:
"""
新增下载历史
"""
downloadhistory = DownloadHistory(**kwargs)
return downloadhistory.create(self._db)
def list_by_page(self, page: int = 1, count: int = 30):
def add_files(self, file_items: List[dict]):
"""
新增下载历史文件
"""
for file_item in file_items:
downloadfile = DownloadFiles(**file_item)
downloadfile.create(self._db)
def truncate_files(self):
"""
清空下载历史文件记录
"""
DownloadFiles.truncate(self._db)
def get_files_by_hash(self, download_hash: str, state: int = None) -> List[DownloadFiles]:
"""
按Hash查询下载文件记录
:param download_hash: 数据key
:param state: 删除状态
"""
return DownloadFiles.get_by_hash(self._db, download_hash, state)
def get_file_by_fullpath(self, fullpath: str) -> DownloadFiles:
"""
按fullpath查询下载文件记录
:param fullpath: 数据key
"""
return DownloadFiles.get_by_fullpath(self._db, fullpath)
def get_files_by_savepath(self, fullpath: str) -> List[DownloadFiles]:
"""
按savepath查询下载文件记录
:param fullpath: 数据key
"""
return DownloadFiles.get_by_savepath(self._db, fullpath)
def delete_file_by_fullpath(self, fullpath: str):
"""
按fullpath删除下载文件记录
:param fullpath: 数据key
"""
DownloadFiles.delete_by_fullpath(self._db, fullpath)
def get_hash_by_fullpath(self, fullpath: str) -> str:
"""
按fullpath查询下载文件记录hash
:param fullpath: 数据key
"""
fileinfo: DownloadFiles = DownloadFiles.get_by_fullpath(self._db, fullpath)
if fileinfo:
return fileinfo.download_hash
return ""
def list_by_page(self, page: int = 1, count: int = 30) -> List[DownloadHistory]:
"""
分页查询下载历史
"""
@@ -44,7 +97,7 @@ class DownloadHistoryOper(DbOper):
DownloadHistory.truncate(self._db)
def get_last_by(self, mtype=None, title: str = None, year: str = None,
season: str = None, episode: str = None, tmdbid=None) -> DownloadHistory:
season: str = None, episode: str = None, tmdbid=None) -> List[DownloadHistory]:
"""
按类型、标题、年份、季集查询下载记录
"""
@@ -55,3 +108,11 @@ class DownloadHistoryOper(DbOper):
season=season,
episode=episode,
tmdbid=tmdbid)
def list_by_user_date(self, date: str, userid: str = None) -> List[DownloadHistory]:
"""
查询某用户某时间之后的下载历史
"""
return DownloadHistory.list_by_user_date(db=self._db,
date=date,
userid=userid)

View File

@@ -6,7 +6,7 @@ from alembic.config import Config
from app.core.config import settings
from app.core.security import get_password_hash
from app.db import Engine, SessionLocal
from app.db import Engine, SessionFactory
from app.db.models import Base
from app.db.models.user import User
from app.log import logger
@@ -22,15 +22,16 @@ def init_db():
# 全量建表
Base.metadata.create_all(bind=Engine)
# 初始化超级管理员
_db = SessionLocal()
user = User.get_by_name(db=_db, name=settings.SUPERUSER)
db = SessionFactory()
user = User.get_by_name(db=db, name=settings.SUPERUSER)
if not user:
user = User(
name=settings.SUPERUSER,
hashed_password=get_password_hash(settings.SUPERUSER_PASSWORD),
is_superuser=True,
)
user.create(_db)
user.create(db)
db.close()
def update_db():
@@ -38,7 +39,7 @@ def update_db():
更新数据库
"""
db_location = settings.CONFIG_PATH / 'user.db'
script_location = settings.ROOT_PATH / 'alembic'
script_location = settings.ROOT_PATH / 'database'
try:
alembic_cfg = Config()
alembic_cfg.set_main_option('script_location', str(script_location))

View File

@@ -1,6 +1,6 @@
from typing import Any
from typing import Any, Self, List
from sqlalchemy.orm import as_declarative, declared_attr
from sqlalchemy.orm import as_declarative, declared_attr, Session
@as_declarative()
@@ -8,33 +8,41 @@ class Base:
id: Any
__name__: str
def create(self, db):
@staticmethod
def commit(db: Session):
try:
db.commit()
except Exception as err:
db.rollback()
raise err
def create(self, db: Session) -> Self:
db.add(self)
db.commit()
self.commit(db)
return self
@classmethod
def get(cls, db, rid: int):
def get(cls, db: Session, rid: int) -> Self:
return db.query(cls).filter(cls.id == rid).first()
def update(self, db, payload: dict):
def update(self, db: Session, payload: dict):
payload = {k: v for k, v in payload.items() if v is not None}
for key, value in payload.items():
setattr(self, key, value)
db.commit()
Base.commit(db)
@classmethod
def delete(cls, db, rid):
def delete(cls, db: Session, rid):
db.query(cls).filter(cls.id == rid).delete()
db.commit()
Base.commit(db)
@classmethod
def truncate(cls, db):
def truncate(cls, db: Session):
db.query(cls).delete()
db.commit()
Base.commit(db)
@classmethod
def list(cls, db):
def list(cls, db: Session) -> List[Self]:
return db.query(cls).all()
def to_dict(self):

View File

@@ -35,6 +35,12 @@ class DownloadHistory(Base):
torrent_description = Column(String)
# 种子站点
torrent_site = Column(String)
# 下载用户
userid = Column(String)
# 下载渠道
channel = Column(String)
# 创建时间
date = Column(String)
# 附加信息
note = Column(String)
@@ -52,35 +58,35 @@ class DownloadHistory(Base):
@staticmethod
def get_last_by(db: Session, mtype: str = None, title: str = None, year: int = None, season: str = None,
episode: str = None, tmdbid: str = None):
episode: str = None, tmdbid: int = None):
"""
据tmdbid、season、season_episode查询转移记录
"""
if tmdbid and not season and not episode:
return db.query(DownloadHistory).filter(DownloadHistory.tmdbid == tmdbid).order_by(
DownloadHistory.id.desc()).first()
DownloadHistory.id.desc()).all()
if tmdbid and season and not episode:
return db.query(DownloadHistory).filter(DownloadHistory.tmdbid == tmdbid,
DownloadHistory.seasons == season).order_by(
DownloadHistory.id.desc()).first()
DownloadHistory.id.desc()).all()
if tmdbid and season and episode:
return db.query(DownloadHistory).filter(DownloadHistory.tmdbid == tmdbid,
DownloadHistory.seasons == season,
DownloadHistory.episodes == episode).order_by(
DownloadHistory.id.desc()).first()
DownloadHistory.id.desc()).all()
# 电视剧所有季集|电影
if not season and not episode:
return db.query(DownloadHistory).filter(DownloadHistory.type == mtype,
DownloadHistory.title == title,
DownloadHistory.year == year).order_by(
DownloadHistory.id.desc()).first()
DownloadHistory.id.desc()).all()
# 电视剧某季
if season and not episode:
return db.query(DownloadHistory).filter(DownloadHistory.type == mtype,
DownloadHistory.title == title,
DownloadHistory.year == year,
DownloadHistory.seasons == season).order_by(
DownloadHistory.id.desc()).first()
DownloadHistory.id.desc()).all()
# 电视剧某季某集
if season and episode:
return db.query(DownloadHistory).filter(DownloadHistory.type == mtype,
@@ -88,4 +94,65 @@ class DownloadHistory(Base):
DownloadHistory.year == year,
DownloadHistory.seasons == season,
DownloadHistory.episodes == episode).order_by(
DownloadHistory.id.desc()).first()
DownloadHistory.id.desc()).all()
@staticmethod
def list_by_user_date(db: Session, date: str, userid: str = None):
"""
查询某用户某时间之后的下载历史
"""
if userid:
return db.query(DownloadHistory).filter(DownloadHistory.date < date,
DownloadHistory.userid == userid).order_by(
DownloadHistory.id.desc()).all()
else:
return db.query(DownloadHistory).filter(DownloadHistory.date < date).order_by(
DownloadHistory.id.desc()).all()
class DownloadFiles(Base):
"""
下载文件记录
"""
id = Column(Integer, Sequence('id'), primary_key=True, index=True)
# 下载任务Hash
download_hash = Column(String, index=True)
# 下载器
downloader = Column(String)
# 完整路径
fullpath = Column(String, index=True)
# 保存路径
savepath = Column(String, index=True)
# 文件相对路径/名称
filepath = Column(String)
# 种子名称
torrentname = Column(String)
# 状态 0-已删除 1-正常
state = Column(Integer, nullable=False, default=1)
@staticmethod
def get_by_hash(db: Session, download_hash: str, state: int = None):
if state:
return db.query(DownloadFiles).filter(DownloadFiles.download_hash == download_hash,
DownloadFiles.state == state).all()
else:
return db.query(DownloadFiles).filter(DownloadFiles.download_hash == download_hash).all()
@staticmethod
def get_by_fullpath(db: Session, fullpath: str):
return db.query(DownloadFiles).filter(DownloadFiles.fullpath == fullpath).order_by(
DownloadFiles.id.desc()).first()
@staticmethod
def get_by_savepath(db: Session, savepath: str):
return db.query(DownloadFiles).filter(DownloadFiles.savepath == savepath).all()
@staticmethod
def delete_by_fullpath(db: Session, fullpath: str):
db.query(DownloadFiles).filter(DownloadFiles.fullpath == fullpath,
DownloadFiles.state == 1).update(
{
"state": 0
}
)
Base.commit(db)

View File

@@ -47,7 +47,7 @@ class MediaServerItem(Base):
@staticmethod
def empty(db: Session, server: str):
db.query(MediaServerItem).filter(MediaServerItem.server == server).delete()
db.commit()
Base.commit(db)
@staticmethod
def exist_by_tmdbid(db: Session, tmdbid: int, mtype: str):

View File

@@ -23,7 +23,8 @@ class PluginData(Base):
@staticmethod
def del_plugin_data_by_key(db: Session, plugin_id: str, key: str):
return db.query(PluginData).filter(PluginData.plugin_id == plugin_id, PluginData.key == key).delete()
db.query(PluginData).filter(PluginData.plugin_id == plugin_id, PluginData.key == key).delete()
Base.commit(db)
@staticmethod
def get_plugin_data_by_plugin_id(db: Session, plugin_id: str):

View File

@@ -1,66 +0,0 @@
from sqlalchemy import Column, Integer, String, Sequence
from sqlalchemy.orm import Session
from app.db.models import Base
class Rss(Base):
"""
RSS订阅
"""
id = Column(Integer, Sequence('id'), primary_key=True, index=True)
# 名称
name = Column(String, nullable=False)
# RSS地址
url = Column(String, nullable=False)
# 类型
type = Column(String)
# 标题
title = Column(String)
# 年份
year = Column(String)
# TMDBID
tmdbid = Column(Integer, index=True)
# 季号
season = Column(Integer)
# 海报
poster = Column(String)
# 背景图
backdrop = Column(String)
# 评分
vote = Column(Integer)
# 简介
description = Column(String)
# 总集数
total_episode = Column(Integer)
# 包含
include = Column(String)
# 排除
exclude = Column(String)
# 洗版
best_version = Column(Integer)
# 是否使用代理服务器
proxy = Column(Integer)
# 是否使用过滤规则
filter = Column(Integer)
# 保存路径
save_path = Column(String)
# 已处理数量
processed = Column(Integer)
# 附加信息,已处理数据
note = Column(String)
# 最后更新时间
last_update = Column(String)
# 状态 0-停用1-启用
state = Column(Integer, default=1)
@staticmethod
def get_by_tmdbid(db: Session, tmdbid: int, season: int = None):
if season:
return db.query(Rss).filter(Rss.tmdbid == tmdbid,
Rss.season == season).all()
return db.query(Rss).filter(Rss.tmdbid == tmdbid).all()
@staticmethod
def get_by_title(db: Session, title: str):
return db.query(Rss).filter(Rss.title == title).first()

View File

@@ -61,4 +61,4 @@ class Site(Base):
@staticmethod
def reset(db: Session):
db.query(Site).delete()
db.commit()
Base.commit(db)

View File

@@ -49,6 +49,8 @@ class Subscribe(Base):
state = Column(String, nullable=False, index=True, default='N')
# 最后更新时间
last_update = Column(String)
# 创建时间
date = Column(String)
# 订阅用户
username = Column(String)
# 订阅站点

View File

@@ -25,7 +25,7 @@ class TransferHistory(Base):
title = Column(String, index=True)
# 年份
year = Column(String)
tmdbid = Column(Integer)
tmdbid = Column(Integer, index=True)
imdbid = Column(String)
tvdbid = Column(Integer)
doubanid = Column(String)
@@ -65,6 +65,10 @@ class TransferHistory(Base):
def get_by_src(db: Session, src: str):
return db.query(TransferHistory).filter(TransferHistory.src == src).first()
@staticmethod
def list_by_hash(db: Session, download_hash: str):
return db.query(TransferHistory).filter(TransferHistory.download_hash == download_hash).all()
@staticmethod
def statistic(db: Session, days: int = 7):
"""
@@ -85,35 +89,75 @@ class TransferHistory(Base):
return db.query(func.count(TransferHistory.id)).filter(TransferHistory.title.like(f'%{title}%')).first()[0]
@staticmethod
def list_by(db: Session, mtype: str = None, title: str = None, year: int = None, season: str = None,
episode: str = None, tmdbid: str = None):
def list_by(db: Session, mtype: str = None, title: str = None, year: str = None, season: str = None,
episode: str = None, tmdbid: int = None, dest: str = None):
"""
据tmdbid、season、season_episode查询转移记录
tmdbid + mtype 或 title + year 必输
"""
if tmdbid and not season and not episode:
return db.query(TransferHistory).filter(TransferHistory.tmdbid == tmdbid).all()
if tmdbid and season and not episode:
return db.query(TransferHistory).filter(TransferHistory.tmdbid == tmdbid,
TransferHistory.seasons == season).all()
if tmdbid and season and episode:
return db.query(TransferHistory).filter(TransferHistory.tmdbid == tmdbid,
TransferHistory.seasons == season,
TransferHistory.episodes == episode).all()
# 电视剧所有季集|电影
if not season and not episode:
return db.query(TransferHistory).filter(TransferHistory.type == mtype,
TransferHistory.title == title,
TransferHistory.year == year).all()
# 电视剧某季
if season and not episode:
return db.query(TransferHistory).filter(TransferHistory.type == mtype,
TransferHistory.title == title,
TransferHistory.year == year,
TransferHistory.seasons == season).all()
# 电视剧某季某集
if season and episode:
return db.query(TransferHistory).filter(TransferHistory.type == mtype,
TransferHistory.title == title,
TransferHistory.year == year,
TransferHistory.seasons == season,
TransferHistory.episodes == episode).all()
# TMDBID + 类型
if tmdbid and mtype:
# 电视剧某季某集
if season and episode:
return db.query(TransferHistory).filter(TransferHistory.tmdbid == tmdbid,
TransferHistory.type == mtype,
TransferHistory.seasons == season,
TransferHistory.episodes == episode,
TransferHistory.dest == dest).all()
# 电视剧某季
elif season:
return db.query(TransferHistory).filter(TransferHistory.tmdbid == tmdbid,
TransferHistory.type == mtype,
TransferHistory.seasons == season).all()
else:
if dest:
# 电影
return db.query(TransferHistory).filter(TransferHistory.tmdbid == tmdbid,
TransferHistory.type == mtype,
TransferHistory.dest == dest).all()
else:
# 电视剧所有季集
return db.query(TransferHistory).filter(TransferHistory.tmdbid == tmdbid,
TransferHistory.type == mtype).all()
# 标题 + 年份
elif title and year:
# 电视剧某季某集
if season and episode:
return db.query(TransferHistory).filter(TransferHistory.title == title,
TransferHistory.year == year,
TransferHistory.seasons == season,
TransferHistory.episodes == episode,
TransferHistory.dest == dest).all()
# 电视剧某季
elif season:
return db.query(TransferHistory).filter(TransferHistory.title == title,
TransferHistory.year == year,
TransferHistory.seasons == season).all()
else:
if dest:
# 电影
return db.query(TransferHistory).filter(TransferHistory.title == title,
TransferHistory.year == year,
TransferHistory.dest == dest).all()
else:
# 电视剧所有季集
return db.query(TransferHistory).filter(TransferHistory.title == title,
TransferHistory.year == year).all()
return []
@staticmethod
def get_by_type_tmdbid(db: Session, mtype: str = None, tmdbid: int = None):
"""
据tmdbid、type查询转移记录
"""
return db.query(TransferHistory).filter(TransferHistory.tmdbid == tmdbid,
TransferHistory.type == mtype).first()
@staticmethod
def update_download_hash(db: Session, historyid: int = None, download_hash: str = None):
db.query(TransferHistory).filter(TransferHistory.id == historyid).update(
{
"download_hash": download_hash
}
)
Base.commit(db)

View File

@@ -2,7 +2,6 @@ import json
from typing import Any
from app.db import DbOper
from app.db.models import Base
from app.db.models.plugin import PluginData
from app.utils.object import ObjectUtils
@@ -12,7 +11,7 @@ class PluginDataOper(DbOper):
插件数据管理
"""
def save(self, plugin_id: str, key: str, value: Any) -> Base:
def save(self, plugin_id: str, key: str, value: Any) -> PluginData:
"""
保存插件数据
:param plugin_id: 插件id

View File

@@ -1,57 +0,0 @@
from typing import List
from sqlalchemy.orm import Session
from app.db import DbOper
from app.db.models.rss import Rss
class RssOper(DbOper):
"""
RSS订阅数据管理
"""
def __init__(self, db: Session = None):
super().__init__(db)
def add(self, **kwargs) -> bool:
"""
新增RSS订阅
"""
item = Rss(**kwargs)
item.create(self._db)
return True
def exists(self, tmdbid: int, season: int = None):
"""
判断是否存在
"""
return Rss.get_by_tmdbid(self._db, tmdbid, season)
def list(self, rssid: int = None) -> List[Rss]:
"""
查询所有RSS订阅
"""
if rssid:
return [Rss.get(self._db, rssid)]
return Rss.list(self._db)
def delete(self, rssid: int) -> bool:
"""
删除RSS订阅
"""
item = Rss.get(self._db, rssid)
if item:
item.delete(self._db)
return True
return False
def update(self, rssid: int, **kwargs) -> bool:
"""
更新RSS订阅
"""
item = Rss.get(self._db, rssid)
if item:
item.update(self._db, kwargs)
return True
return False

View File

@@ -19,7 +19,7 @@ class SiteOper(DbOper):
return True, "新增站点成功"
return False, "站点已存在"
def get(self, sid: int):
def get(self, sid: int) -> Site:
"""
查询单个站点
"""
@@ -31,7 +31,7 @@ class SiteOper(DbOper):
"""
return Site.list(self._db)
def list_active(self):
def list_active(self) -> List[Site]:
"""
按状态获取站点列表
"""
@@ -41,9 +41,9 @@ class SiteOper(DbOper):
"""
删除站点
"""
return Site.delete(self._db, sid)
Site.delete(self._db, sid)
def update(self, sid: int, payload: dict):
def update(self, sid: int, payload: dict) -> Site:
"""
更新站点
"""
@@ -74,3 +74,15 @@ class SiteOper(DbOper):
"cookie": cookies
})
return True, "更新站点Cookie成功"
def update_rss(self, domain: str, rss: str) -> Tuple[bool, str]:
"""
更新站点rss
"""
site = Site.get_by_domain(self._db, domain)
if not site:
return False, "站点不存在"
site.update(self._db, {
"rss": rss
})
return True, "更新站点RSS地址成功"

View File

@@ -1,3 +1,4 @@
import time
from typing import Tuple, List
from app.core.context import MediaInfo
@@ -26,13 +27,14 @@ class SubscribeOper(DbOper):
backdrop=mediainfo.get_backdrop_image(),
vote=mediainfo.vote_average,
description=mediainfo.overview,
date=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
**kwargs)
subscribe.create(self._db)
return subscribe.id, "新增订阅成功"
else:
return subscribe.id, "订阅已存在"
def exists(self, tmdbid: int, season: int):
def exists(self, tmdbid: int, season: int) -> bool:
"""
判断是否存在
"""
@@ -61,7 +63,7 @@ class SubscribeOper(DbOper):
"""
Subscribe.delete(self._db, rid=sid)
def update(self, sid: int, payload: dict):
def update(self, sid: int, payload: dict) -> Subscribe:
"""
更新订阅
"""

View File

@@ -1,9 +1,7 @@
import json
from typing import Any, Union
from sqlalchemy.orm import Session
from app.db import DbOper
from app.db import DbOper, SessionFactory
from app.db.models.systemconfig import SystemConfig
from app.schemas.types import SystemConfigKey
from app.utils.object import ObjectUtils
@@ -14,11 +12,12 @@ class SystemConfigOper(DbOper, metaclass=Singleton):
# 配置对象
__SYSTEMCONF: dict = {}
def __init__(self, db: Session = None):
def __init__(self):
"""
加载配置到内存
"""
super().__init__(db)
self._db = SessionFactory()
super().__init__(self._db)
for item in SystemConfig.list(self._db):
if ObjectUtils.is_obj(item.value):
self.__SYSTEMCONF[item.key] = json.loads(item.value)
@@ -35,18 +34,20 @@ class SystemConfigOper(DbOper, metaclass=Singleton):
self.__SYSTEMCONF[key] = value
# 写入数据库
if ObjectUtils.is_obj(value):
if value is not None:
value = json.dumps(value)
else:
value = ''
value = json.dumps(value)
elif value is None:
value = ''
conf = SystemConfig.get_by_key(self._db, key)
if conf:
conf.update(self._db, {"value": value})
if value:
conf.update(self._db, {"value": value})
else:
conf.delete(self._db, conf.id)
else:
conf = SystemConfig(key=key, value=value)
conf.create(self._db)
def get(self, key: Union[str, SystemConfigKey] = None):
def get(self, key: Union[str, SystemConfigKey] = None) -> Any:
"""
获取系统设置
"""
@@ -55,3 +56,7 @@ class SystemConfigOper(DbOper, metaclass=Singleton):
if not key:
return self.__SYSTEMCONF
return self.__SYSTEMCONF.get(key)
def __del__(self):
if self._db:
self._db.close()

View File

@@ -1,8 +1,13 @@
import json
import time
from typing import Any
from pathlib import Path
from typing import Any, List
from app.core.context import MediaInfo
from app.core.meta import MetaBase
from app.db import DbOper
from app.db.models.transferhistory import TransferHistory
from app.schemas import TransferInfo
class TransferHistoryOper(DbOper):
@@ -10,59 +15,71 @@ class TransferHistoryOper(DbOper):
转移历史管理
"""
def get(self, historyid: int) -> Any:
def get(self, historyid: int) -> TransferHistory:
"""
获取转移历史
:param historyid: 转移历史id
"""
return TransferHistory.get(self._db, historyid)
def get_by_title(self, title: str) -> Any:
def get_by_title(self, title: str) -> List[TransferHistory]:
"""
按标题查询转移记录
:param title: 数据key
"""
return TransferHistory.list_by_title(self._db, title)
def get_by_src(self, src: str) -> Any:
def get_by_src(self, src: str) -> TransferHistory:
"""
按源查询转移记录
:param src: 数据key
"""
return TransferHistory.get_by_src(self._db, src)
def add(self, **kwargs):
def list_by_hash(self, download_hash: str) -> List[TransferHistory]:
"""
按种子hash查询转移记录
:param download_hash: 种子hash
"""
return TransferHistory.list_by_hash(self._db, download_hash)
def add(self, **kwargs) -> TransferHistory:
"""
新增转移历史
"""
if kwargs.get("download_hash"):
transferhistory = TransferHistory.get_by_hash(self._db, kwargs.get("download_hash"))
if transferhistory:
transferhistory.delete(self._db, transferhistory.id)
kwargs.update({
"date": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
})
return TransferHistory(**kwargs).create(self._db)
def statistic(self, days: int = 7):
def statistic(self, days: int = 7) -> List[Any]:
"""
统计最近days天的下载历史数量
"""
return TransferHistory.statistic(self._db, days)
def get_by(self, mtype: str = None, title: str = None, year: str = None,
season: str = None, episode: str = None, tmdbid: str = None) -> Any:
def get_by(self, title: str = None, year: str = None, mtype: str = None,
season: str = None, episode: str = None, tmdbid: int = None, dest: str = None) -> List[TransferHistory]:
"""
按类型、标题、年份、季集查询转移记录
"""
return TransferHistory.list_by(db=self._db,
mtype=mtype,
title=title,
dest=dest,
year=year,
season=season,
episode=episode,
tmdbid=tmdbid)
def get_by_type_tmdbid(self, mtype: str = None, tmdbid: int = None) -> TransferHistory:
"""
按类型、tmdb查询转移记录
"""
return TransferHistory.get_by_type_tmdbid(db=self._db,
mtype=mtype,
tmdbid=tmdbid)
def delete(self, historyid):
"""
删除转移记录
@@ -75,8 +92,87 @@ class TransferHistoryOper(DbOper):
"""
TransferHistory.truncate(self._db)
def add_force(self, **kwargs):
def add_force(self, **kwargs) -> TransferHistory:
"""
新增转移历史
新增转移历史,相同源目录的记录会被删除
"""
return TransferHistory(**kwargs).create(self._db)
if kwargs.get("src"):
transferhistory = TransferHistory.get_by_src(self._db, kwargs.get("src"))
if transferhistory:
transferhistory.delete(self._db, transferhistory.id)
kwargs.update({
"date": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
})
return TransferHistory(**kwargs).create(self._db)
def update_download_hash(self, historyid, download_hash):
"""
补充转移记录download_hash
"""
TransferHistory.update_download_hash(self._db, historyid, download_hash)
def add_success(self, src_path: Path, mode: str, meta: MetaBase,
mediainfo: MediaInfo, transferinfo: TransferInfo,
download_hash: str = None):
"""
新增转移成功历史记录
"""
self.add_force(
src=str(src_path),
dest=str(transferinfo.target_path),
mode=mode,
type=mediainfo.type.value,
category=mediainfo.category,
title=mediainfo.title,
year=mediainfo.year,
tmdbid=mediainfo.tmdb_id,
imdbid=mediainfo.imdb_id,
tvdbid=mediainfo.tvdb_id,
doubanid=mediainfo.douban_id,
seasons=meta.season,
episodes=meta.episode,
image=mediainfo.get_poster_image(),
download_hash=download_hash,
status=1,
files=json.dumps(transferinfo.file_list)
)
def add_fail(self, src_path: Path, mode: str, meta: MetaBase, mediainfo: MediaInfo = None,
transferinfo: TransferInfo = None, download_hash: str = None):
"""
新增转移失败历史记录
"""
if mediainfo and transferinfo:
his = self.add_force(
src=str(src_path),
dest=str(transferinfo.target_path),
mode=mode,
type=mediainfo.type.value,
category=mediainfo.category,
title=mediainfo.title or meta.name,
year=mediainfo.year or meta.year,
tmdbid=mediainfo.tmdb_id,
imdbid=mediainfo.imdb_id,
tvdbid=mediainfo.tvdb_id,
doubanid=mediainfo.douban_id,
seasons=meta.season,
episodes=meta.episode,
image=mediainfo.get_poster_image(),
download_hash=download_hash,
status=0,
errmsg=transferinfo.message or '未知错误',
files=json.dumps(transferinfo.file_list)
)
else:
his = self.add_force(
title=meta.name,
year=meta.year,
src=str(src_path),
mode=mode,
seasons=meta.season,
episodes=meta.episode,
download_hash=download_hash,
status=0,
errmsg="未识别到媒体信息"
)
return his

View File

@@ -23,14 +23,17 @@ class CookieHelper:
"password": [
'//input[@name="password"]',
'//input[@id="form_item_password"]',
'//input[@id="password"]'
'//input[@id="password"]',
'//input[@type="password"]'
],
"captcha": [
'//input[@name="imagestring"]',
'//input[@name="captcha"]',
'//input[@id="form_item_captcha"]'
'//input[@id="form_item_captcha"]',
'//input[@placeholder="驗證碼"]'
],
"captcha_img": [
'//img[@alt="captcha"]/@src',
'//img[@alt="CAPTCHA"]/@src',
'//img[@alt="SECURITY CODE"]/@src',
'//img[@id="LAY-user-get-vercode"]/@src',

View File

@@ -2,12 +2,15 @@ from pyvirtualdisplay import Display
from app.log import logger
from app.utils.singleton import Singleton
from app.utils.system import SystemUtils
class DisplayHelper(metaclass=Singleton):
_display: Display = None
def __init__(self):
if not SystemUtils.is_docker():
return
try:
self._display = Display(visible=False, size=(1024, 768))
self._display.start()

108
app/helper/format.py Normal file
View File

@@ -0,0 +1,108 @@
import re
from typing import Tuple, Optional
import parse
class FormatParser(object):
_key = ""
_split_chars = r"\.|\s+|\(|\)|\[|]|-|\+|【|】|/||;|&|\||#|_|「|」|~"
def __init__(self, eformat: str, details: str = None, part: str = None,
offset: int = None, key: str = "ep"):
"""
:params eformat: 格式化字符串
:params details: 格式化详情
:params part: 分集
:params offset: 偏移量
:prams key: EP关键字
"""
self._format = eformat
self._start_ep = None
self._end_ep = None
self._part = None
if part:
self._part = part
if details:
if re.compile("\\d{1,4}-\\d{1,4}").match(details):
self._start_ep = details
self._end_ep = details
else:
tmp = details.split(",")
if len(tmp) > 1:
self._start_ep = int(tmp[0])
self._end_ep = int(tmp[0]) if int(tmp[0]) > int(tmp[1]) else int(tmp[1])
else:
self._start_ep = self._end_ep = int(tmp[0])
self.__offset = int(offset) if offset else 0
self._key = key
@property
def format(self):
return self._format
@property
def start_ep(self):
return self._start_ep
@property
def end_ep(self):
return self._end_ep
@property
def part(self):
return self._part
@property
def offset(self):
return self.__offset
def match(self, file: str) -> bool:
if not self._format:
return True
s, e = self.__handle_single(file)
if not s:
return False
if self._start_ep is None:
return True
if self._start_ep <= s <= self._end_ep:
return True
return False
def split_episode(self, file_name: str) -> Tuple[Optional[int], Optional[int], Optional[str]]:
"""
拆分集数返回开始集数结束集数Part信息
"""
# 指定的具体集数,直接返回
if self._start_ep is not None and self._start_ep == self._end_ep:
if isinstance(self._start_ep, str):
s, e = self._start_ep.split("-")
if int(s) == int(e):
return int(s) + self.__offset, None, self.part
return int(s) + self.__offset, int(e) + self.__offset, self.part
return self._start_ep + self.__offset, None, self.part
if not self._format:
return None, None, None
s, e = self.__handle_single(file_name)
return s + self.__offset if s is not None else None, \
e + self.__offset if e is not None else None, self.part
def __handle_single(self, file: str) -> Tuple[Optional[int], Optional[int]]:
"""
处理单集,返回单集的开始和结束集数
"""
if not self._format:
return None, None
ret = parse.parse(self._format, file)
if not ret or not ret.__contains__(self._key):
return None, None
episodes = ret.__getitem__(self._key)
if not re.compile(r"^(EP)?(\d{1,4})(-(EP)?(\d{1,4}))?$", re.IGNORECASE).match(episodes):
return None, None
episode_splits = list(filter(lambda x: re.compile(r'[a-zA-Z]*\d{1,4}', re.IGNORECASE).match(x),
re.split(r'%s' % self._split_chars, episodes)))
if len(episode_splits) == 1:
return int(re.compile(r'[a-zA-Z]*', re.IGNORECASE).sub("", episode_splits[0])), None
else:
return int(re.compile(r'[a-zA-Z]*', re.IGNORECASE).sub("", episode_splits[0])), int(
re.compile(r'[a-zA-Z]*', re.IGNORECASE).sub("", episode_splits[1]))

View File

@@ -1,5 +1,6 @@
import xml.etree.ElementTree as ET
from pathlib import Path
from typing import List, Optional
class NfoReader:
@@ -8,6 +9,9 @@ class NfoReader:
self.tree = ET.parse(xml_file_path)
self.root = self.tree.getroot()
def get_element_value(self, element_path):
def get_element_value(self, element_path) -> Optional[str]:
element = self.root.find(element_path)
return element.text if element is not None else None
def get_elements(self, element_path) -> List[ET.Element]:
return self.root.findall(element_path)

View File

@@ -1,11 +1,12 @@
import base64
from app.core.config import settings
from app.utils.http import RequestUtils
class OcrHelper:
_ocr_b64_url = "https://nastool.org/captcha/base64"
_ocr_b64_url = f"{settings.OCR_HOST}/captcha/base64"
def get_captcha_text(self, image_url=None, image_b64=None, cookie=None, ua=None):
"""

View File

@@ -1,16 +1,227 @@
import xml.dom.minidom
from typing import List
from typing import List, Tuple, Union
from urllib.parse import urljoin
from lxml import etree
from app.core.config import settings
from app.helper.browser import PlaywrightHelper
from app.utils.dom import DomUtils
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class RssHelper:
"""
RSS帮助类解析RSS报文、获取RSS地址等
"""
# 各站点RSS链接获取配置
rss_link_conf = {
"default": {
"xpath": "//a[@class='faqlink']/@href",
"url": "getrss.php",
"params": {
"inclbookmarked": 0,
"itemsmalldescr": 1,
"showrows": 50,
"search_mode": 1,
}
},
"hares.top": {
"xpath": "//*[@id='layui-layer100001']/div[2]/div/p[4]/a/@href",
"url": "getrss.php",
"params": {
"inclbookmarked": 0,
"itemsmalldescr": 1,
"showrows": 50,
"search_mode": 1,
}
},
"et8.org": {
"xpath": "//*[@id='outer']/table/tbody/tr/td/table/tbody/tr/td/a[2]/@href",
"url": "getrss.php",
"params": {
"inclbookmarked": 0,
"itemsmalldescr": 1,
"showrows": 50,
"search_mode": 1,
}
},
"pttime.org": {
"xpath": "//*[@id='outer']/table/tbody/tr/td/table/tbody/tr/td/text()[5]",
"url": "getrss.php",
"params": {
"showrows": 10,
"inclbookmarked": 0,
"itemsmalldescr": 1
}
},
"ourbits.club": {
"xpath": "//a[@class='gen_rsslink']/@href",
"url": "getrss.php",
"params": {
"inclbookmarked": 0,
"itemsmalldescr": 1,
"showrows": 50,
"search_mode": 1,
}
},
"totheglory.im": {
"xpath": "//textarea/text()",
"url": "rsstools.php?c51=51&c52=52&c53=53&c54=54&c108=108&c109=109&c62=62&c63=63&c67=67&c69=69&c70=70&c73=73&c76=76&c75=75&c74=74&c87=87&c88=88&c99=99&c90=90&c58=58&c103=103&c101=101&c60=60",
"params": {
"inclbookmarked": 0,
"itemsmalldescr": 1,
"showrows": 50,
"search_mode": 1,
}
},
"monikadesign.uk": {
"xpath": "//a/@href",
"url": "rss",
"params": {
"inclbookmarked": 0,
"itemsmalldescr": 1,
"showrows": 50,
"search_mode": 1,
}
},
"zhuque.in": {
"xpath": "//a/@href",
"url": "user/rss",
"render": True,
"params": {
"inclbookmarked": 0,
"itemsmalldescr": 1,
"showrows": 50,
"search_mode": 1,
}
},
"hdchina.org": {
"xpath": "//a[@class='faqlink']/@href",
"url": "getrss.php",
"params": {
"inclbookmarked": 0,
"itemsmalldescr": 1,
"showrows": 50,
"search_mode": 1,
"rsscart": 0
}
},
"audiences.me": {
"xpath": "//a[@class='faqlink']/@href",
"url": "getrss.php",
"params": {
"inclbookmarked": 0,
"itemsmalldescr": 1,
"showrows": 50,
"search_mode": 1,
"torrent_type": 1,
"exp": 180
}
},
"shadowflow.org": {
"xpath": "//a[@class='faqlink']/@href",
"url": "getrss.php",
"params": {
"inclbookmarked": 0,
"itemsmalldescr": 1,
"paid": 0,
"search_mode": 0,
"showrows": 30
}
},
"hddolby.com": {
"xpath": "//a[@class='faqlink']/@href",
"url": "getrss.php",
"params": {
"inclbookmarked": 0,
"itemsmalldescr": 1,
"showrows": 50,
"search_mode": 1,
"exp": 180
}
},
"hdhome.org": {
"xpath": "//a[@class='faqlink']/@href",
"url": "getrss.php",
"params": {
"inclbookmarked": 0,
"itemsmalldescr": 1,
"showrows": 50,
"search_mode": 1,
"exp": 180
}
},
"pthome.net": {
"xpath": "//a[@class='faqlink']/@href",
"url": "getrss.php",
"params": {
"inclbookmarked": 0,
"itemsmalldescr": 1,
"showrows": 50,
"search_mode": 1,
"exp": 180
}
},
"ptsbao.club": {
"xpath": "//a[@class='faqlink']/@href",
"url": "getrss.php",
"params": {
"inclbookmarked": 0,
"itemsmalldescr": 1,
"showrows": 50,
"search_mode": 1,
"size": 0
}
},
"leaves.red": {
"xpath": "//a[@class='faqlink']/@href",
"url": "getrss.php",
"params": {
"inclbookmarked": 0,
"itemsmalldescr": 1,
"showrows": 50,
"search_mode": 0,
"paid": 2
}
},
"hdtime.org": {
"xpath": "//a[@class='faqlink']/@href",
"url": "getrss.php",
"params": {
"inclbookmarked": 0,
"itemsmalldescr": 1,
"showrows": 50,
"search_mode": 0,
}
},
"m-team.io": {
"xpath": "//a[@class='faqlink']/@href",
"url": "getrss.php",
"params": {
"showrows": 50,
"inclbookmarked": 0,
"itemsmalldescr": 1,
"https": 1
}
},
"u2.dmhy.org": {
"xpath": "//a[@class='faqlink']/@href",
"url": "getrss.php",
"params": {
"inclbookmarked": 0,
"itemsmalldescr": 1,
"showrows": 50,
"search_mode": 1,
"inclautochecked": 1,
"trackerssl": 1
}
},
}
@staticmethod
def parse(url, proxy: bool = False) -> List[dict]:
def parse(url, proxy: bool = False) -> Union[List[dict], None]:
"""
解析RSS订阅URL获取RSS中的种子信息
:param url: RSS地址
@@ -77,4 +288,61 @@ class RssHelper:
continue
except Exception as e2:
print(str(e2))
# RSS过期 观众RSS 链接已过期,您需要获得一个新的! pthome RSS Link has expired, You need to get a new one!
_rss_expired_msg = [
"RSS 链接已过期, 您需要获得一个新的!",
"RSS Link has expired, You need to get a new one!",
"RSS Link has expired, You need to get new!"
]
if ret_xml in _rss_expired_msg:
return None
return ret_array
def get_rss_link(self, url: str, cookie: str, ua: str, proxy: bool = False) -> Tuple[str, str]:
"""
获取站点rss地址
:param url: 站点地址
:param cookie: 站点cookie
:param ua: 站点ua
:param proxy: 是否使用代理
:return: rss地址、错误信息
"""
try:
# 获取站点域名
domain = StringUtils.get_url_domain(url)
# 获取配置
site_conf = self.rss_link_conf.get(domain) or self.rss_link_conf.get("default")
# RSS地址
rss_url = urljoin(url, site_conf.get("url"))
# RSS请求参数
rss_params = site_conf.get("params")
# 请求RSS页面
if site_conf.get("render"):
html_text = PlaywrightHelper().get_page_source(
url=rss_url,
cookies=cookie,
ua=ua,
proxies=settings.PROXY if proxy else None
)
else:
res = RequestUtils(
cookies=cookie,
timeout=60,
ua=ua,
proxies=settings.PROXY if proxy else None
).post_res(url=rss_url, data=rss_params)
if res:
html_text = res.text
elif res is not None:
return "", f"获取 {url} RSS链接失败错误码{res.status_code},错误原因:{res.reason}"
else:
return "", f"获取RSS链接失败无法连接 {url} "
# 解析HTML
html = etree.HTML(html_text)
if html:
rss_link = html.xpath(site_conf.get("xpath"))
if rss_link:
return str(rss_link[-1]), ""
return "", f"获取RSS链接失败{url}"
except Exception as e:
return "", f"获取 {url} RSS链接失败{str(e)}"

Binary file not shown.

View File

@@ -130,21 +130,34 @@ class TorrentHelper:
"""
获取种子文件的文件夹名和文件清单
:param torrent_path: 种子文件路径
:return: 文件夹名、文件清单
:return: 文件夹名、文件清单,单文件种子返回空文件夹名
"""
if not torrent_path or not torrent_path.exists():
return "", []
try:
torrentinfo = Torrent.from_file(torrent_path)
# 获取目录名
folder_name = torrentinfo.name
# 获取文件清单
if len(torrentinfo.files) <= 1:
if (not torrentinfo.files
or (len(torrentinfo.files) == 1
and torrentinfo.files[0].name == torrentinfo.name)):
# 单文件种子目录名返回空
folder_name = ""
# 单文件种子
file_list = [torrentinfo.name]
else:
file_list = [fileinfo.name for fileinfo in torrentinfo.files]
logger.debug(f"{torrent_path.stem} -> 目录:{folder_name},文件清单:{file_list}")
# 目录名
folder_name = torrentinfo.name
# 文件清单,如果一级目录与种子名相同则去掉
file_list = []
for fileinfo in torrentinfo.files:
file_path = Path(fileinfo.name)
# 根路径
root_path = file_path.parts[0]
if root_path == folder_name:
file_list.append(str(file_path.relative_to(root_path)))
else:
file_list.append(fileinfo.name)
logger.info(f"解析种子:{torrent_path.name} => 目录:{folder_name},文件清单:{file_list}")
return folder_name, file_list
except Exception as err:
logger.error(f"种子文件解析失败:{err}")
@@ -188,7 +201,12 @@ class TorrentHelper:
# 季数
_season_len = str(len(_meta.season_list)).rjust(2, '0')
# 集数
_episode_len = str(9999 - len(_meta.episode_list)).rjust(4, '0')
if not _meta.episode_list:
# 无集数的排最前面
_episode_len = "9999"
else:
# 集数越多的排越前面
_episode_len = str(len(_meta.episode_list)).rjust(4, '0')
# 优先规则
priority = self.system_config.get(SystemConfigKey.TorrentsPriority)
if priority != "site":
@@ -249,9 +267,11 @@ class TorrentHelper:
for file in files:
if not file:
continue
if Path(file).suffix not in settings.RMT_MEDIAEXT:
file_path = Path(file)
if file_path.suffix not in settings.RMT_MEDIAEXT:
continue
meta = MetaInfo(file)
# 只使用文件名识别
meta = MetaInfo(file_path.stem)
if not meta.begin_episode:
continue
episodes = list(set(episodes).union(set(meta.episode_list)))

View File

@@ -1,6 +1,8 @@
import logging
from logging.handlers import RotatingFileHandler
import click
from app.core.config import settings
# logger
@@ -21,12 +23,31 @@ file_handler = RotatingFileHandler(filename=settings.LOG_PATH / 'moviepilot.log'
backupCount=3,
encoding='utf-8')
file_handler.setLevel(logging.INFO)
level_name_colors = {
logging.DEBUG: lambda level_name: click.style(str(level_name), fg="cyan"),
logging.INFO: lambda level_name: click.style(str(level_name), fg="green"),
logging.WARNING: lambda level_name: click.style(str(level_name), fg="yellow"),
logging.ERROR: lambda level_name: click.style(str(level_name), fg="red"),
logging.CRITICAL: lambda level_name: click.style(
str(level_name), fg="bright_red"
),
}
# 定义日志输出格式
formatter = logging.Formatter("%(asctime)s - %(filename)s -【%(levelname)s%(message)s")
console_handler.setFormatter(formatter)
file_handler.setFormatter(formatter)
class CustomFormatter(logging.Formatter):
def format(self, record):
seperator = " " * (8 - len(record.levelname))
record.leveltext = level_name_colors[record.levelno](record.levelname + ":") + seperator
return super().format(record)
# 将Handler添加到Logger
# 终端日志
console_formatter = CustomFormatter("%(leveltext)s%(filename)s - %(message)s")
console_handler.setFormatter(console_formatter)
logger.addHandler(console_handler)
# 文件日志
file_formater = CustomFormatter("%(levelname)s%(asctime)s - %(filename)s - %(message)s")
file_handler.setFormatter(file_formater)
logger.addHandler(file_handler)

View File

@@ -58,6 +58,8 @@ def checkMessage(channel_type: MessageChannel):
return None
if channel_type == MessageChannel.Slack and not switch.get("slack"):
return None
if channel_type == MessageChannel.SynologyChat and not switch.get("synologychat"):
return None
return func(self, message, *args, **kwargs)
return wrapper

View File

@@ -1,3 +1,4 @@
from datetime import datetime
from pathlib import Path
from typing import List, Optional, Tuple, Union
@@ -10,11 +11,11 @@ from app.modules import _ModuleBase
from app.modules.douban.apiv2 import DoubanApi
from app.modules.douban.scraper import DoubanScraper
from app.schemas.types import MediaType
from app.utils.common import retry
from app.utils.system import SystemUtils
class DoubanModule(_ModuleBase):
doubanapi: DoubanApi = None
scraper: DoubanScraper = None
@@ -34,6 +35,271 @@ class DoubanModule(_ModuleBase):
:param doubanid: 豆瓣ID
:return: 豆瓣信息
"""
"""
{
"rating": {
"count": 287365,
"max": 10,
"star_count": 3.5,
"value": 6.6
},
"lineticket_url": "",
"controversy_reason": "",
"pubdate": [
"2021-10-29(中国大陆)"
],
"last_episode_number": null,
"interest_control_info": null,
"pic": {
"large": "https://img9.doubanio.com/view/photo/m_ratio_poster/public/p2707553644.webp",
"normal": "https://img9.doubanio.com/view/photo/s_ratio_poster/public/p2707553644.webp"
},
"vendor_count": 6,
"body_bg_color": "f4f5f9",
"is_tv": false,
"head_info": null,
"album_no_interact": false,
"ticket_price_info": "",
"webisode_count": 0,
"year": "2021",
"card_subtitle": "2021 / 英国 美国 / 动作 惊悚 冒险 / 凯瑞·福永 / 丹尼尔·克雷格 蕾雅·赛杜",
"forum_info": null,
"webisode": null,
"id": "20276229",
"gallery_topic_count": 0,
"languages": [
"英语",
"法语",
"意大利语",
"俄语",
"西班牙语"
],
"genres": [
"动作",
"惊悚",
"冒险"
],
"review_count": 926,
"title": "007无暇赴死",
"intro": "世界局势波诡云谲,再度出山的邦德(丹尼尔·克雷格 饰面临有史以来空前的危机传奇特工007的故事在本片中达到高潮。新老角色集结亮相蕾雅·赛杜回归二度饰演邦女郎玛德琳。系列最恐怖反派萨芬拉米·马雷克 饰重磅登场毫不留情地展示了自己狠辣的一面不仅揭开了玛德琳身上隐藏的秘密还酝酿着危及数百万人性命的阴谋幽灵党的身影也似乎再次浮出水面。半路杀出的新00号特工拉什纳·林奇 饰)与神秘女子(安娜·德·阿玛斯 饰)看似与邦德同阵作战,但其真实目的依然成谜。关乎邦德生死的新仇旧怨接踵而至,暗潮汹涌之下他能否拯救世界?",
"interest_cmt_earlier_tip_title": "发布于上映前",
"has_linewatch": true,
"ugc_tabs": [
{
"source": "reviews",
"type": "review",
"title": "影评"
},
{
"source": "forum_topics",
"type": "forum",
"title": "讨论"
}
],
"forum_topic_count": 857,
"ticket_promo_text": "",
"webview_info": {},
"is_released": true,
"actors": [
{
"name": "丹尼尔·克雷格",
"roles": [
"演员",
"制片人",
"配音"
],
"title": "丹尼尔·克雷格(同名)英国,英格兰,柴郡,切斯特影视演员",
"url": "https://movie.douban.com/celebrity/1025175/",
"user": null,
"character": "饰 詹姆斯·邦德 James Bond 007",
"uri": "douban://douban.com/celebrity/1025175?subject_id=27230907",
"avatar": {
"large": "https://qnmob3.doubanio.com/view/celebrity/raw/public/p42588.jpg?imageView2/2/q/80/w/600/h/3000/format/webp",
"normal": "https://qnmob3.doubanio.com/view/celebrity/raw/public/p42588.jpg?imageView2/2/q/80/w/200/h/300/format/webp"
},
"sharing_url": "https://www.douban.com/doubanapp/dispatch?uri=/celebrity/1025175/",
"type": "celebrity",
"id": "1025175",
"latin_name": "Daniel Craig"
}
],
"interest": null,
"vendor_icons": [
"https://img9.doubanio.com/f/frodo/fbc90f355fc45d5d2056e0d88c697f9414b56b44/pics/vendors/tencent.png",
"https://img2.doubanio.com/f/frodo/8286b9b5240f35c7e59e1b1768cd2ccf0467cde5/pics/vendors/migu_video.png",
"https://img9.doubanio.com/f/frodo/88a62f5e0cf9981c910e60f4421c3e66aac2c9bc/pics/vendors/bilibili.png"
],
"episodes_count": 0,
"color_scheme": {
"is_dark": true,
"primary_color_light": "868ca5",
"_base_color": [
0.6333333333333333,
0.18867924528301885,
0.20784313725490197
],
"secondary_color": "f4f5f9",
"_avg_color": [
0.059523809523809625,
0.09790209790209795,
0.5607843137254902
],
"primary_color_dark": "676c7f"
},
"type": "movie",
"null_rating_reason": "",
"linewatches": [
{
"url": "http://v.youku.com/v_show/id_XNTIwMzM2NDg5Mg==.html?tpa=dW5pb25faWQ9MzAwMDA4XzEwMDAwMl8wMl8wMQ&refer=esfhz_operation.xuka.xj_00003036_000000_FNZfau_19010900",
"source": {
"literal": "youku",
"pic": "https://img1.doubanio.com/img/files/file-1432869267.png",
"name": "优酷视频"
},
"source_uri": "youku://play?vid=XNTIwMzM2NDg5Mg==&source=douban&refer=esfhz_operation.xuka.xj_00003036_000000_FNZfau_19010900",
"free": false
},
],
"info_url": "https://www.douban.com/doubanapp//h5/movie/20276229/desc",
"tags": [],
"durations": [
"163分钟"
],
"comment_count": 97204,
"cover": {
"description": "",
"author": {
"loc": {
"id": "108288",
"name": "北京",
"uid": "beijing"
},
"kind": "user",
"name": "雨落下",
"reg_time": "2020-08-11 16:22:48",
"url": "https://www.douban.com/people/221011676/",
"uri": "douban://douban.com/user/221011676",
"id": "221011676",
"avatar_side_icon_type": 3,
"avatar_side_icon_id": "234",
"avatar": "https://img2.doubanio.com/icon/up221011676-2.jpg",
"is_club": false,
"type": "user",
"avatar_side_icon": "https://img2.doubanio.com/view/files/raw/file-1683625971.png",
"uid": "221011676"
},
"url": "https://movie.douban.com/photos/photo/2707553644/",
"image": {
"large": {
"url": "https://img9.doubanio.com/view/photo/l/public/p2707553644.webp",
"width": 1082,
"height": 1600,
"size": 0
},
"raw": null,
"small": {
"url": "https://img9.doubanio.com/view/photo/s/public/p2707553644.webp",
"width": 405,
"height": 600,
"size": 0
},
"normal": {
"url": "https://img9.doubanio.com/view/photo/m/public/p2707553644.webp",
"width": 405,
"height": 600,
"size": 0
},
"is_animated": false
},
"uri": "douban://douban.com/photo/2707553644",
"create_time": "2021-10-26 15:05:01",
"position": 0,
"owner_uri": "douban://douban.com/movie/20276229",
"type": "photo",
"id": "2707553644",
"sharing_url": "https://www.douban.com/doubanapp/dispatch?uri=/photo/2707553644/"
},
"cover_url": "https://img9.doubanio.com/view/photo/m_ratio_poster/public/p2707553644.webp",
"restrictive_icon_url": "",
"header_bg_color": "676c7f",
"is_douban_intro": false,
"ticket_vendor_icons": [
"https://img9.doubanio.com/view/dale-online/dale_ad/public/0589a62f2f2d7c2.jpg"
],
"honor_infos": [],
"sharing_url": "https://movie.douban.com/subject/20276229/",
"subject_collections": [],
"wechat_timeline_share": "screenshot",
"countries": [
"英国",
"美国"
],
"url": "https://movie.douban.com/subject/20276229/",
"release_date": null,
"original_title": "No Time to Die",
"uri": "douban://douban.com/movie/20276229",
"pre_playable_date": null,
"episodes_info": "",
"subtype": "movie",
"directors": [
{
"name": "凯瑞·福永",
"roles": [
"导演",
"制片人",
"编剧",
"摄影",
"演员"
],
"title": "凯瑞·福永(同名)美国,加利福尼亚州,奥克兰影视演员",
"url": "https://movie.douban.com/celebrity/1009531/",
"user": null,
"character": "导演",
"uri": "douban://douban.com/celebrity/1009531?subject_id=27215222",
"avatar": {
"large": "https://qnmob3.doubanio.com/view/celebrity/raw/public/p1392285899.57.jpg?imageView2/2/q/80/w/600/h/3000/format/webp",
"normal": "https://qnmob3.doubanio.com/view/celebrity/raw/public/p1392285899.57.jpg?imageView2/2/q/80/w/200/h/300/format/webp"
},
"sharing_url": "https://www.douban.com/doubanapp/dispatch?uri=/celebrity/1009531/",
"type": "celebrity",
"id": "1009531",
"latin_name": "Cary Fukunaga"
}
],
"is_show": false,
"in_blacklist": false,
"pre_release_desc": "",
"video": null,
"aka": [
"007生死有时(港)",
"007生死交战(台)",
"007间不容死",
"邦德25",
"007没空去死(豆友译名)",
"James Bond 25",
"Never Dream of Dying",
"Shatterhand"
],
"is_restrictive": false,
"trailer": {
"sharing_url": "https://www.douban.com/doubanapp/dispatch?uri=/movie/20276229/trailer%3Ftrailer_id%3D282585%26trailer_type%3DA",
"video_url": "https://vt1.doubanio.com/202310011325/3b1f5827e91dde7826dc20930380dfc2/view/movie/M/402820585.mp4",
"title": "中国预告片:终极决战版 (中文字幕)",
"uri": "douban://douban.com/movie/20276229/trailer?trailer_id=282585&trailer_type=A",
"cover_url": "https://img1.doubanio.com/img/trailer/medium/2712944408.jpg",
"term_num": 0,
"n_comments": 21,
"create_time": "2021-11-01",
"subject_title": "007无暇赴死",
"file_size": 10520074,
"runtime": "00:42",
"type": "A",
"id": "282585",
"desc": ""
},
"interest_cmt_earlier_tip_desc": "该短评的发布时间早于公开上映时间,作者可能通过其他渠道提前观看,请谨慎参考。其评分将不计入总评分。"
}
"""
if not doubanid:
return None
logger.info(f"开始获取豆瓣信息:{doubanid} ...")
@@ -129,22 +395,45 @@ class DoubanModule(_ModuleBase):
return ret_medias
def __match(self, name: str, year: str, season: int = None) -> dict:
@retry(Exception, 5, 3, 3, logger=logger)
def match_doubaninfo(self, name: str, mtype: str = None,
year: str = None, season: int = None) -> dict:
"""
搜索和匹配豆瓣信息
:param name: 名称
:param mtype: 类型 电影/电视剧
:param year: 年份
:param season: 季号
"""
result = self.doubanapi.search(f"{name} {year or ''}")
result = self.doubanapi.search(f"{name} {year or ''}".strip(),
ts=datetime.strftime(datetime.now(), '%Y%m%d%H%M%S'))
if not result:
logger.warn(f"未找到 {name} 的豆瓣信息")
return {}
# 触发rate limit
if "search_access_rate_limit" in result.values():
logger.warn(f"触发豆瓣API速率限制 错误信息 {result} ...")
raise Exception("触发豆瓣API速率限制")
for item_obj in result.get("items"):
if item_obj.get("type_name") not in (MediaType.TV.value, MediaType.MOVIE.value):
type_name = item_obj.get("type_name")
if type_name not in [MediaType.TV.value, MediaType.MOVIE.value]:
continue
title = item_obj.get("title")
if mtype and mtype != type_name:
continue
if mtype == MediaType.TV and not season:
season = 1
item = item_obj.get("target")
title = item.get("title")
if not title:
continue
meta = MetaInfo(title)
if meta.name == name and (not season or meta.begin_season == season):
return item_obj
if type_name == MediaType.TV.value:
meta.type = MediaType.TV
meta.begin_season = meta.begin_season or 1
if meta.name == name \
and ((not season and not meta.begin_season) or meta.begin_season == season) \
and (not year or item.get('year') == year):
return item
return {}
def movie_top250(self, page: int = 1, count: int = 30) -> List[dict]:
@@ -166,22 +455,46 @@ class DoubanModule(_ModuleBase):
"""
if settings.SCRAP_SOURCE != "douban":
return None
# 目录下的所有文件
for file in SystemUtils.list_files_with_extensions(path, settings.RMT_MEDIAEXT):
if not file:
continue
logger.info(f"开始刮削媒体库文件:{file} ...")
try:
meta = MetaInfo(file.stem)
if not meta.name:
if SystemUtils.is_bluray_dir(path):
# 蓝光原盘
logger.info(f"开始刮削蓝光原盘:{path} ...")
meta = MetaInfo(path.stem)
if not meta.name:
return
# 根据名称查询豆瓣数据
doubaninfo = self.match_doubaninfo(name=mediainfo.title,
mtype=mediainfo.type.value,
year=mediainfo.year,
season=meta.begin_season)
if not doubaninfo:
logger.warn(f"未找到 {mediainfo.title} 的豆瓣信息")
return
scrape_path = path / path.name
self.scraper.gen_scraper_files(meta=meta,
mediainfo=MediaInfo(douban_info=doubaninfo),
file_path=scrape_path)
else:
# 目录下的所有文件
for file in SystemUtils.list_files(path, settings.RMT_MEDIAEXT):
if not file:
continue
# 根据名称查询豆瓣数据
doubaninfo = self.__match(name=mediainfo.title, year=mediainfo.year, season=meta.begin_season)
if not doubaninfo:
logger.warn(f"未找到 {mediainfo.title} 的豆瓣信息")
break
# 刮削
self.scraper.gen_scraper_files(meta, MediaInfo(douban_info=doubaninfo), file)
except Exception as e:
logger.error(f"刮削文件 {file} 失败,原因:{e}")
logger.info(f"{file} 刮削完成")
logger.info(f"开始刮削媒体库文件:{file} ...")
try:
meta = MetaInfo(file.stem)
if not meta.name:
continue
# 根据名称查询豆瓣数据
doubaninfo = self.match_doubaninfo(name=mediainfo.title,
mtype=mediainfo.type.value,
year=mediainfo.year,
season=meta.begin_season)
if not doubaninfo:
logger.warn(f"未找到 {mediainfo.title} 的豆瓣信息")
break
# 刮削
self.scraper.gen_scraper_files(meta=meta,
mediainfo=MediaInfo(douban_info=doubaninfo),
file_path=file)
except Exception as e:
logger.error(f"刮削文件 {file} 失败,原因:{e}")
logger.info(f"{path} 刮削完成")

View File

@@ -146,72 +146,113 @@ class DoubanApi(metaclass=Singleton):
_api_secret_key = "bf7dddc7c9cfe6f7"
_api_key = "0dad551ec0f84ed02907ff5c42e8ec70"
_base_url = "https://frodo.douban.com/api/v2"
_session = requests.Session()
_session = None
def __init__(self):
pass
self._session = requests.Session()
@classmethod
def __sign(cls, url: str, ts: int, method='GET') -> str:
url_path = parse.urlparse(url).path
raw_sign = '&'.join([method.upper(), parse.quote(url_path, safe=''), str(ts)])
return base64.b64encode(hmac.new(cls._api_secret_key.encode(), raw_sign.encode(), hashlib.sha1).digest()
).decode()
return base64.b64encode(
hmac.new(
cls._api_secret_key.encode(),
raw_sign.encode(),
hashlib.sha1
).digest()
).decode()
@classmethod
@lru_cache(maxsize=settings.CACHE_CONF.get('douban'))
def __invoke(cls, url, **kwargs):
req_url = cls._base_url + url
def __invoke(self, url, **kwargs):
req_url = self._base_url + url
params = {'apiKey': cls._api_key}
params = {'apiKey': self._api_key}
if kwargs:
params.update(kwargs)
ts = params.pop('_ts', int(datetime.strftime(datetime.now(), '%Y%m%d')))
params.update({'os_rom': 'android', 'apiKey': cls._api_key, '_ts': ts, '_sig': cls.__sign(url=req_url, ts=ts)})
resp = RequestUtils(ua=choice(cls._user_agents), session=cls._session).get_res(url=req_url, params=params)
ts = params.pop(
'_ts',
datetime.strftime(datetime.now(), '%Y%m%d')
)
params.update({
'os_rom': 'android',
'apiKey': self._api_key,
'_ts': ts,
'_sig': self.__sign(url=req_url, ts=ts)
})
resp = RequestUtils(
ua=choice(self._user_agents),
session=self._session
).get_res(url=req_url, params=params)
if resp.status_code == 400 and "rate_limit" in resp.text:
return resp.json()
return resp.json() if resp else {}
def search(self, keyword, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["search"], q=keyword, start=start, count=count, _ts=ts)
def search(self, keyword, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["search"], q=keyword,
start=start, count=count, _ts=ts)
def movie_search(self, keyword, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["movie_search"], q=keyword, start=start, count=count, _ts=ts)
def movie_search(self, keyword, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["movie_search"], q=keyword,
start=start, count=count, _ts=ts)
def tv_search(self, keyword, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_search"], q=keyword, start=start, count=count, _ts=ts)
def tv_search(self, keyword, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_search"], q=keyword,
start=start, count=count, _ts=ts)
def book_search(self, keyword, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["book_search"], q=keyword, start=start, count=count, _ts=ts)
def book_search(self, keyword, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["book_search"], q=keyword,
start=start, count=count, _ts=ts)
def group_search(self, keyword, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["group_search"], q=keyword, start=start, count=count, _ts=ts)
def group_search(self, keyword, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["group_search"], q=keyword,
start=start, count=count, _ts=ts)
def movie_showing(self, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["movie_showing"], start=start, count=count, _ts=ts)
def movie_showing(self, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["movie_showing"],
start=start, count=count, _ts=ts)
def movie_soon(self, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["movie_soon"], start=start, count=count, _ts=ts)
def movie_soon(self, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["movie_soon"],
start=start, count=count, _ts=ts)
def movie_hot_gaia(self, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["movie_hot_gaia"], start=start, count=count, _ts=ts)
def movie_hot_gaia(self, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["movie_hot_gaia"],
start=start, count=count, _ts=ts)
def tv_hot(self, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_hot"], start=start, count=count, _ts=ts)
def tv_hot(self, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_hot"],
start=start, count=count, _ts=ts)
def tv_animation(self, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_animation"], start=start, count=count, _ts=ts)
def tv_animation(self, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_animation"],
start=start, count=count, _ts=ts)
def tv_variety_show(self, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_variety_show"], start=start, count=count, _ts=ts)
def tv_variety_show(self, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_variety_show"],
start=start, count=count, _ts=ts)
def tv_rank_list(self, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_rank_list"], start=start, count=count, _ts=ts)
def tv_rank_list(self, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_rank_list"],
start=start, count=count, _ts=ts)
def show_hot(self, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["show_hot"], start=start, count=count, _ts=ts)
def show_hot(self, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["show_hot"],
start=start, count=count, _ts=ts)
def movie_detail(self, subject_id):
return self.__invoke(self._urls["movie_detail"] + subject_id)
@@ -228,20 +269,30 @@ class DoubanApi(metaclass=Singleton):
def book_detail(self, subject_id):
return self.__invoke(self._urls["book_detail"] + subject_id)
def movie_top250(self, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["movie_top250"], start=start, count=count, _ts=ts)
def movie_top250(self, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["movie_top250"],
start=start, count=count, _ts=ts)
def movie_recommend(self, tags='', sort='R', start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["movie_recommend"], tags=tags, sort=sort, start=start, count=count, _ts=ts)
def movie_recommend(self, tags='', sort='R', start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["movie_recommend"], tags=tags, sort=sort,
start=start, count=count, _ts=ts)
def tv_recommend(self, tags='', sort='R', start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_recommend"], tags=tags, sort=sort, start=start, count=count, _ts=ts)
def tv_recommend(self, tags='', sort='R', start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_recommend"], tags=tags, sort=sort,
start=start, count=count, _ts=ts)
def tv_chinese_best_weekly(self, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_chinese_best_weekly"], start=start, count=count, _ts=ts)
def tv_chinese_best_weekly(self, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_chinese_best_weekly"],
start=start, count=count, _ts=ts)
def tv_global_best_weekly(self, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_global_best_weekly"], start=start, count=count, _ts=ts)
def tv_global_best_weekly(self, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_global_best_weekly"],
start=start, count=count, _ts=ts)
def doulist_detail(self, subject_id):
"""
@@ -250,7 +301,8 @@ class DoubanApi(metaclass=Singleton):
"""
return self.__invoke(self._urls["doulist"] + subject_id)
def doulist_items(self, subject_id, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
def doulist_items(self, subject_id, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
"""
豆列列表
:param subject_id: 豆列id
@@ -258,4 +310,9 @@ class DoubanApi(metaclass=Singleton):
:param count: 数量
:param ts: 时间戳
"""
return self.__invoke(self._urls["doulist_items"] % subject_id, start=start, count=count, _ts=ts)
return self.__invoke(self._urls["doulist_items"] % subject_id,
start=start, count=count, _ts=ts)
def __del__(self):
if self._session:
self._session.close()

View File

@@ -17,7 +17,7 @@ class DoubanScraper:
生成刮削文件
:param meta: 元数据
:param mediainfo: 媒体信息
:param file_path: 文件路径
:param file_path: 文件路径或者目录路径
"""
try:

View File

@@ -1,4 +1,3 @@
import json
from pathlib import Path
from typing import Optional, Tuple, Union, Any, List, Generator
@@ -7,7 +6,6 @@ from app.core.context import MediaInfo
from app.log import logger
from app.modules import _ModuleBase
from app.modules.emby.emby import Emby
from app.schemas import ExistMediaInfo, RefreshMediaItem, WebhookEventInfo
from app.schemas.types import MediaType
@@ -23,6 +21,14 @@ class EmbyModule(_ModuleBase):
def init_setting(self) -> Tuple[str, Union[str, bool]]:
return "MEDIASERVER", "emby"
def scheduler_job(self) -> None:
"""
定时任务每10分钟调用一次
"""
# 定时重连
if not self.emby.is_inactive():
self.emby.reconnect()
def user_authenticate(self, name: str, password: str) -> Optional[str]:
"""
使用Emby用户辅助完成用户认证
@@ -33,7 +39,7 @@ class EmbyModule(_ModuleBase):
# Emby认证
return self.emby.authenticate(name, password)
def webhook_parser(self, body: Any, form: Any, args: Any) -> WebhookEventInfo:
def webhook_parser(self, body: Any, form: Any, args: Any) -> Optional[schemas.WebhookEventInfo]:
"""
解析Webhook报文体
:param body: 请求体
@@ -41,13 +47,9 @@ class EmbyModule(_ModuleBase):
:param args: 请求参数
:return: 字典解析为消息时需要包含title、text、image
"""
if form and form.get("data"):
result = form.get("data")
else:
result = json.dumps(dict(args))
return self.emby.get_webhook_message(result)
return self.emby.get_webhook_message(form, args)
def media_exists(self, mediainfo: MediaInfo, itemid: str = None) -> Optional[ExistMediaInfo]:
def media_exists(self, mediainfo: MediaInfo, itemid: str = None) -> Optional[schemas.ExistMediaInfo]:
"""
判断媒体文件是否存在
:param mediainfo: 识别的媒体信息
@@ -59,27 +61,42 @@ class EmbyModule(_ModuleBase):
movie = self.emby.get_iteminfo(itemid)
if movie:
logger.info(f"媒体库中已存在:{movie}")
return ExistMediaInfo(type=MediaType.MOVIE)
movies = self.emby.get_movies(title=mediainfo.title, year=mediainfo.year)
return schemas.ExistMediaInfo(
type=MediaType.MOVIE,
server="emby",
itemid=movie.item_id
)
movies = self.emby.get_movies(title=mediainfo.title,
year=mediainfo.year,
tmdb_id=mediainfo.tmdb_id)
if not movies:
logger.info(f"{mediainfo.title_year} 在媒体库中不存在")
return None
else:
logger.info(f"媒体库中已存在:{movies}")
return ExistMediaInfo(type=MediaType.MOVIE)
return schemas.ExistMediaInfo(
type=MediaType.MOVIE,
server="emby",
itemid=movies[0].item_id
)
else:
tvs = self.emby.get_tv_episodes(title=mediainfo.title,
year=mediainfo.year,
tmdb_id=mediainfo.tmdb_id,
item_id=itemid)
itemid, tvs = self.emby.get_tv_episodes(title=mediainfo.title,
year=mediainfo.year,
tmdb_id=mediainfo.tmdb_id,
item_id=itemid)
if not tvs:
logger.info(f"{mediainfo.title_year} 在媒体库中不存在")
return None
else:
logger.info(f"{mediainfo.title_year} 媒体库中已存在:{tvs}")
return ExistMediaInfo(type=MediaType.TV, seasons=tvs)
return schemas.ExistMediaInfo(
type=MediaType.TV,
seasons=tvs,
server="emby",
itemid=itemid
)
def refresh_mediaserver(self, mediainfo: MediaInfo, file_path: Path) -> Optional[bool]:
def refresh_mediaserver(self, mediainfo: MediaInfo, file_path: Path) -> None:
"""
刷新媒体库
:param mediainfo: 识别的媒体信息
@@ -87,7 +104,7 @@ class EmbyModule(_ModuleBase):
:return: 成功或失败
"""
items = [
RefreshMediaItem(
schemas.RefreshMediaItem(
title=mediainfo.title,
year=mediainfo.year,
type=mediainfo.type,
@@ -95,61 +112,48 @@ class EmbyModule(_ModuleBase):
target_path=file_path
)
]
return self.emby.refresh_library_by_items(items)
self.emby.refresh_library_by_items(items)
def media_statistic(self) -> schemas.Statistic:
def media_statistic(self) -> List[schemas.Statistic]:
"""
媒体数量统计
"""
media_statistic = self.emby.get_medias_count()
user_count = self.emby.get_user_count()
return schemas.Statistic(
movie_count=media_statistic.get("MovieCount") or 0,
tv_count=media_statistic.get("SeriesCount") or 0,
episode_count=media_statistic.get("EpisodeCount") or 0,
user_count=user_count or 0
)
media_statistic.user_count = self.emby.get_user_count()
return [media_statistic]
def mediaserver_librarys(self) -> List[schemas.MediaServerLibrary]:
def mediaserver_librarys(self, server: str) -> Optional[List[schemas.MediaServerLibrary]]:
"""
媒体库列表
"""
librarys = self.emby.get_librarys()
if not librarys:
return []
return [schemas.MediaServerLibrary(
server="emby",
id=library.get("id"),
name=library.get("name"),
type=library.get("type"),
path=library.get("path")
) for library in librarys]
if server != "emby":
return None
return self.emby.get_librarys()
def mediaserver_items(self, library_id: str) -> Generator:
def mediaserver_items(self, server: str, library_id: str) -> Optional[Generator]:
"""
媒体库项目列表
"""
items = self.emby.get_items(library_id)
for item in items:
yield schemas.MediaServerItem(
server="emby",
library=item.get("library"),
item_id=item.get("id"),
item_type=item.get("type"),
title=item.get("title"),
original_title=item.get("original_title"),
year=item.get("year"),
tmdbid=int(item.get("tmdbid")) if item.get("tmdbid") else None,
imdbid=item.get("imdbid"),
tvdbid=item.get("tvdbid"),
path=item.get("path"),
)
if server != "emby":
return None
return self.emby.get_items(library_id)
def mediaserver_tv_episodes(self, item_id: Union[str, int]) -> List[schemas.MediaServerSeasonInfo]:
def mediaserver_iteminfo(self, server: str, item_id: str) -> Optional[schemas.MediaServerItem]:
"""
媒体库项目详情
"""
if server != "emby":
return None
return self.emby.get_iteminfo(item_id)
def mediaserver_tv_episodes(self, server: str,
item_id: Union[str, int]) -> Optional[List[schemas.MediaServerSeasonInfo]]:
"""
获取剧集信息
"""
seasoninfo = self.emby.get_tv_episodes(item_id=item_id)
if server != "emby":
return None
_, seasoninfo = self.emby.get_tv_episodes(item_id=item_id)
if not seasoninfo:
return []
return [schemas.MediaServerSeasonInfo(

View File

@@ -1,17 +1,16 @@
import json
import re
from pathlib import Path
from typing import List, Optional, Union, Dict, Generator
from typing import List, Optional, Union, Dict, Generator, Tuple
from requests import Response
from app import schemas
from app.core.config import settings
from app.log import logger
from app.schemas import RefreshMediaItem, WebhookEventInfo
from app.schemas.types import MediaType
from app.utils.http import RequestUtils
from app.utils.singleton import Singleton
from app.utils.string import StringUtils
class Emby(metaclass=Singleton):
@@ -24,8 +23,23 @@ class Emby(metaclass=Singleton):
if not self._host.startswith("http"):
self._host = "http://" + self._host
self._apikey = settings.EMBY_API_KEY
self._user = self.get_user()
self._folders = self.get_emby_folders()
self.user = self.get_user()
self.folders = self.get_emby_folders()
def is_inactive(self) -> bool:
"""
判断是否需要重连
"""
if not self._host or not self._apikey:
return False
return True if not self.user else False
def reconnect(self):
"""
重连
"""
self.user = self.get_user()
self.folders = self.get_emby_folders()
def get_emby_folders(self) -> List[dict]:
"""
@@ -51,7 +65,7 @@ class Emby(metaclass=Singleton):
"""
if not self._host or not self._apikey:
return []
req_url = f"{self._host}emby/Users/{self._user}/Views?api_key={self._apikey}"
req_url = f"{self._host}emby/Users/{self.user}/Views?api_key={self._apikey}"
try:
res = RequestUtils().get_res(req_url)
if res:
@@ -63,7 +77,7 @@ class Emby(metaclass=Singleton):
logger.error(f"连接User/Views 出错:" + str(e))
return []
def get_librarys(self):
def get_librarys(self) -> List[schemas.MediaServerLibrary]:
"""
获取媒体服务器所有媒体库列表
"""
@@ -78,12 +92,15 @@ class Emby(metaclass=Singleton):
library_type = MediaType.TV.value
case _:
continue
libraries.append({
"id": library.get("Id"),
"name": library.get("Name"),
"path": library.get("Path"),
"type": library_type
})
libraries.append(
schemas.MediaServerLibrary(
server="emby",
id=library.get("Id"),
name=library.get("Name"),
path=library.get("Path"),
type=library_type
)
)
return libraries
def get_user(self, user_name: str = None) -> Optional[Union[str, int]]:
@@ -185,59 +202,29 @@ class Emby(metaclass=Singleton):
logger.error(f"连接Users/Query出错" + str(e))
return 0
def get_activity_log(self, num: int = 30) -> List[dict]:
"""
获取Emby活动记录
"""
if not self._host or not self._apikey:
return []
req_url = "%semby/System/ActivityLog/Entries?api_key=%s&" % (self._host, self._apikey)
ret_array = []
try:
res = RequestUtils().get_res(req_url)
if res:
ret_json = res.json()
items = ret_json.get('Items')
for item in items:
if item.get("Type") == "AuthenticationSucceeded":
event_type = "LG"
event_date = StringUtils.get_time(item.get("Date"))
event_str = "%s, %s" % (item.get("Name"), item.get("ShortOverview"))
activity = {"type": event_type, "event": event_str, "date": event_date}
ret_array.append(activity)
if item.get("Type") in ["VideoPlayback", "VideoPlaybackStopped"]:
event_type = "PL"
event_date = StringUtils.get_time(item.get("Date"))
event_str = item.get("Name")
activity = {"type": event_type, "event": event_str, "date": event_date}
ret_array.append(activity)
else:
logger.error(f"System/ActivityLog/Entries 未获取到返回数据")
return []
except Exception as e:
logger.error(f"连接System/ActivityLog/Entries出错" + str(e))
return []
return ret_array[:num]
def get_medias_count(self) -> dict:
def get_medias_count(self) -> schemas.Statistic:
"""
获得电影、电视剧、动漫媒体数量
:return: MovieCount SeriesCount SongCount
"""
if not self._host or not self._apikey:
return {}
return schemas.Statistic()
req_url = "%semby/Items/Counts?api_key=%s" % (self._host, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res:
return res.json()
result = res.json()
return schemas.Statistic(
movie_count=result.get("MovieCount") or 0,
tv_count=result.get("SeriesCount") or 0,
episode_count=result.get("EpisodeCount") or 0
)
else:
logger.error(f"Items/Counts 未获取到返回数据")
return {}
return schemas.Statistic()
except Exception as e:
logger.error(f"连接Items/Counts出错" + str(e))
return {}
return schemas.Statistic()
def __get_emby_series_id_by_name(self, name: str, year: str) -> Optional[str]:
"""
@@ -248,7 +235,15 @@ class Emby(metaclass=Singleton):
"""
if not self._host or not self._apikey:
return None
req_url = "%semby/Items?IncludeItemTypes=Series&Fields=ProductionYear&StartIndex=0&Recursive=true&SearchTerm=%s&Limit=10&IncludeSearchTypes=false&api_key=%s" % (
req_url = ("%semby/Items?"
"IncludeItemTypes=Series"
"&Fields=ProductionYear"
"&StartIndex=0"
"&Recursive=true"
"&SearchTerm=%s"
"&Limit=10"
"&IncludeSearchTypes=false"
"&api_key=%s") % (
self._host, name, self._apikey)
try:
res = RequestUtils().get_res(req_url)
@@ -264,11 +259,15 @@ class Emby(metaclass=Singleton):
return None
return ""
def get_movies(self, title: str, year: str = None) -> Optional[List[dict]]:
def get_movies(self,
title: str,
year: str = None,
tmdb_id: int = None) -> Optional[List[schemas.MediaServerItem]]:
"""
根据标题和年份检查电影是否在Emby中存在存在则返回列表
:param title: 标题
:param year: 年份,可以为空,为空时不按年份过滤
:param tmdb_id: TMDB ID
:return: 含title、year属性的字典列表
"""
if not self._host or not self._apikey:
@@ -283,11 +282,30 @@ class Emby(metaclass=Singleton):
if res_items:
ret_movies = []
for res_item in res_items:
if res_item.get('Name') == title and (
not year or str(res_item.get('ProductionYear')) == str(year)):
ret_movies.append(
{'title': res_item.get('Name'), 'year': str(res_item.get('ProductionYear'))})
return ret_movies
item_tmdbid = res_item.get("ProviderIds", {}).get("Tmdb")
mediaserver_item = schemas.MediaServerItem(
server="emby",
library=res_item.get("ParentId"),
item_id=res_item.get("Id"),
item_type=res_item.get("Type"),
title=res_item.get("Name"),
original_title=res_item.get("OriginalTitle"),
year=res_item.get("ProductionYear"),
tmdbid=int(item_tmdbid) if item_tmdbid else None,
imdbid=res_item.get("ProviderIds", {}).get("Imdb"),
tvdbid=res_item.get("ProviderIds", {}).get("Tvdb"),
path=res_item.get("Path")
)
if tmdb_id and item_tmdbid:
if str(item_tmdbid) != str(tmdb_id):
continue
else:
ret_movies.append(mediaserver_item)
continue
if (mediaserver_item.title == title
and (not year or str(mediaserver_item.year) == str(year))):
ret_movies.append(mediaserver_item)
return ret_movies
except Exception as e:
logger.error(f"连接Items出错" + str(e))
return None
@@ -298,7 +316,8 @@ class Emby(metaclass=Singleton):
title: str = None,
year: str = None,
tmdb_id: int = None,
season: int = None) -> Optional[Dict[int, list]]:
season: int = None
) -> Tuple[Optional[str], Optional[Dict[int, List[Dict[int, list]]]]]:
"""
根据标题和年份和季返回Emby中的剧集列表
:param item_id: Emby中的ID
@@ -309,20 +328,21 @@ class Emby(metaclass=Singleton):
:return: 每一季的已有集数
"""
if not self._host or not self._apikey:
return None
return None, None
# 电视剧
if not item_id:
item_id = self.__get_emby_series_id_by_name(title, year)
if item_id is None:
return None
return None, None
if not item_id:
return {}
return None, {}
# 验证tmdbid是否相同
item_tmdbid = self.get_iteminfo(item_id).get("ProviderIds", {}).get("Tmdb")
if tmdb_id and item_tmdbid:
if str(tmdb_id) != str(item_tmdbid):
return {}
# /Shows/Id/Episodes 查集的信息
item_info = self.get_iteminfo(item_id)
if item_info:
if tmdb_id and item_info.tmdbid:
if str(tmdb_id) != str(item_info.tmdbid):
return None, {}
# 查集的信息
if not season:
season = ""
try:
@@ -330,7 +350,8 @@ class Emby(metaclass=Singleton):
self._host, item_id, season, self._apikey)
res_json = RequestUtils().get_res(req_url)
if res_json:
res_items = res_json.json().get("Items")
tv_item = res_json.json()
res_items = tv_item.get("Items")
season_episodes = {}
for res_item in res_items:
season_index = res_item.get("ParentIndexNumber")
@@ -345,11 +366,11 @@ class Emby(metaclass=Singleton):
season_episodes[season_index] = []
season_episodes[season_index].append(episode_index)
# 返回
return season_episodes
return tv_item.get("Id"), season_episodes
except Exception as e:
logger.error(f"连接Shows/Id/Episodes出错" + str(e))
return None
return {}
return None, None
return None, {}
def get_remote_image_by_id(self, item_id: str, image_type: str) -> Optional[str]:
"""
@@ -412,7 +433,7 @@ class Emby(metaclass=Singleton):
return False
return False
def refresh_library_by_items(self, items: List[RefreshMediaItem]) -> bool:
def refresh_library_by_items(self, items: List[schemas.RefreshMediaItem]) -> bool:
"""
按类型、名称、年份来刷新媒体库
:param items: 已识别的需要刷新媒体库的媒体信息列表
@@ -434,7 +455,7 @@ class Emby(metaclass=Singleton):
return self.__refresh_emby_library_by_id(library_id)
logger.info(f"Emby媒体库刷新完成")
def __get_emby_library_id_by_item(self, item: RefreshMediaItem) -> Optional[str]:
def __get_emby_library_id_by_item(self, item: schemas.RefreshMediaItem) -> Optional[str]:
"""
根据媒体信息查询在哪个媒体库返回要刷新的位置的ID
:param item: {title, year, type, category, target_path}
@@ -452,33 +473,18 @@ class Emby(metaclass=Singleton):
return None
# 查找需要刷新的媒体库ID
item_path = Path(item.target_path)
for folder in self._folders:
# 找同级路径最多的媒体库(要求容器内映射路径与实际一致)
max_comm_path = ""
match_num = 0
match_id = None
# 匹配子目录
# 匹配子目录
for folder in self.folders:
for subfolder in folder.get("SubFolders"):
try:
# 查询最大公共路径
# 匹配子目录
subfolder_path = Path(subfolder.get("Path"))
item_path_parents = list(item_path.parents)
subfolder_path_parents = list(subfolder_path.parents)
common_path = next(p1 for p1, p2 in zip(reversed(item_path_parents),
reversed(subfolder_path_parents)
) if p1 == p2)
if len(common_path) > len(max_comm_path):
max_comm_path = common_path
match_id = subfolder.get("Id")
match_num += 1
except StopIteration:
continue
if item_path.is_relative_to(subfolder_path):
return folder.get("Id")
except Exception as err:
print(str(err))
# 检查匹配情况
if match_id:
return match_id if match_num == 1 else folder.get("Id")
# 如果找不到,只要路径中有分类目录名就命中
# 如果找不到,只要路径中有分类目录名就命中
for folder in self.folders:
for subfolder in folder.get("SubFolders"):
if subfolder.get("Path") and re.search(r"[/\\]%s" % item.category,
subfolder.get("Path")):
@@ -486,32 +492,46 @@ class Emby(metaclass=Singleton):
# 刷新根目录
return "/"
def get_iteminfo(self, itemid: str) -> dict:
def get_iteminfo(self, itemid: str) -> Optional[schemas.MediaServerItem]:
"""
获取单个项目详情
"""
if not itemid:
return {}
return None
if not self._host or not self._apikey:
return {}
req_url = "%semby/Users/%s/Items/%s?api_key=%s" % (self._host, self._user, itemid, self._apikey)
return None
req_url = "%semby/Users/%s/Items/%s?api_key=%s" % (self._host, self.user, itemid, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res and res.status_code == 200:
return res.json()
item = res.json()
tmdbid = item.get("ProviderIds", {}).get("Tmdb")
return schemas.MediaServerItem(
server="emby",
library=item.get("ParentId"),
item_id=item.get("Id"),
item_type=item.get("Type"),
title=item.get("Name"),
original_title=item.get("OriginalTitle"),
year=item.get("ProductionYear"),
tmdbid=int(tmdbid) if tmdbid else None,
imdbid=item.get("ProviderIds", {}).get("Imdb"),
tvdbid=item.get("ProviderIds", {}).get("Tvdb"),
path=item.get("Path")
)
except Exception as e:
logger.error(f"连接Items/Id出错" + str(e))
return {}
return None
def get_items(self, parent: str) -> Generator:
"""
获取媒体服务器所有媒体库列表
"""
if not parent:
yield {}
yield None
if not self._host or not self._apikey:
yield {}
req_url = "%semby/Users/%s/Items?ParentId=%s&api_key=%s" % (self._host, self._user, parent, self._apikey)
yield None
req_url = "%semby/Users/%s/Items?ParentId=%s&api_key=%s" % (self._host, self.user, parent, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res and res.status_code == 200:
@@ -520,26 +540,15 @@ class Emby(metaclass=Singleton):
if not result:
continue
if result.get("Type") in ["Movie", "Series"]:
item_info = self.get_iteminfo(result.get("Id"))
yield {"id": result.get("Id"),
"library": item_info.get("ParentId"),
"type": item_info.get("Type"),
"title": item_info.get("Name"),
"original_title": item_info.get("OriginalTitle"),
"year": item_info.get("ProductionYear"),
"tmdbid": item_info.get("ProviderIds", {}).get("Tmdb"),
"imdbid": item_info.get("ProviderIds", {}).get("Imdb"),
"tvdbid": item_info.get("ProviderIds", {}).get("Tvdb"),
"path": item_info.get("Path"),
"json": str(item_info)}
yield self.get_iteminfo(result.get("Id"))
elif "Folder" in result.get("Type"):
for item in self.get_items(parent=result.get('Id')):
yield item
except Exception as e:
logger.error(f"连接Users/Items出错" + str(e))
yield {}
yield None
def get_webhook_message(self, message_str: str) -> WebhookEventInfo:
def get_webhook_message(self, form: any, args: dict) -> Optional[schemas.WebhookEventInfo]:
"""
解析Emby Webhook报文
电影:
@@ -777,8 +786,22 @@ class Emby(metaclass=Singleton):
}
}
"""
message = json.loads(message_str)
eventItem = WebhookEventInfo(event=message.get('Event', ''), channel="emby")
if not form and not args:
return None
try:
if form and form.get("data"):
result = form.get("data")
else:
result = json.dumps(dict(args))
message = json.loads(result)
except Exception as e:
logger.debug(f"解析emby webhook报文出错" + str(e))
return None
eventType = message.get('Event')
if not eventType:
return None
logger.info(f"接收到emby webhook{message}")
eventItem = schemas.WebhookEventInfo(event=eventType, channel="emby")
if message.get('Item'):
if message.get('Item', {}).get('Type') == 'Episode':
eventItem.item_type = "TV"
@@ -806,9 +829,9 @@ class Emby(metaclass=Singleton):
eventItem.item_type = "MOV"
eventItem.item_name = "%s %s" % (
message.get('Item', {}).get('Name'), "(" + str(message.get('Item', {}).get('ProductionYear')) + ")")
eventItem.item_path = message.get('Item', {}).get('Path')
eventItem.item_id = message.get('Item', {}).get('Id')
eventItem.item_path = message.get('Item', {}).get('Path')
eventItem.tmdb_id = message.get('Item', {}).get('ProviderIds', {}).get('Tmdb')
if message.get('Item', {}).get('Overview') and len(message.get('Item', {}).get('Overview')) > 100:
eventItem.overview = str(message.get('Item', {}).get('Overview'))[:100] + "..."
@@ -844,16 +867,36 @@ class Emby(metaclass=Singleton):
def get_data(self, url: str) -> Optional[Response]:
"""
自定义URL从媒体服务器获取数据其中{HOST}{APIKEY}{USER}会被替换成实际的值
自定义URL从媒体服务器获取数据其中[HOST]、[APIKEY]、[USER]会被替换成实际的值
:param url: 请求地址
"""
if not self._host or not self._apikey:
return None
url = url.replace("{HOST}", self._host)\
.replace("{APIKEY}", self._apikey)\
.replace("{USER}", self._user)
url = url.replace("[HOST]", self._host) \
.replace("[APIKEY]", self._apikey) \
.replace("[USER]", self.user)
try:
return RequestUtils().get_res(url=url)
return RequestUtils(content_type="application/json").get_res(url=url)
except Exception as e:
logger.error(f"连接Emby出错" + str(e))
return None
def post_data(self, url: str, data: str = None, headers: dict = None) -> Optional[Response]:
"""
自定义URL从媒体服务器获取数据其中[HOST]、[APIKEY]、[USER]会被替换成实际的值
:param url: 请求地址
:param data: 请求数据
:param headers: 请求头
"""
if not self._host or not self._apikey:
return None
url = url.replace("[HOST]", self._host) \
.replace("[APIKEY]", self._apikey) \
.replace("[USER]", self.user)
try:
return RequestUtils(
headers=headers,
).post_res(url=url, data=data)
except Exception as e:
logger.error(f"连接Emby出错" + str(e))
return None

View File

@@ -11,6 +11,299 @@ from app.schemas.types import MediaType
class FanartModule(_ModuleBase):
"""
{
"name": "The Wheel of Time",
"thetvdb_id": "355730",
"tvposter": [
{
"id": "174068",
"url": "http://assets.fanart.tv/fanart/tv/355730/tvposter/the-wheel-of-time-64b009de9548d.jpg",
"lang": "en",
"likes": "3"
},
{
"id": "176424",
"url": "http://assets.fanart.tv/fanart/tv/355730/tvposter/the-wheel-of-time-64de44fe42073.jpg",
"lang": "00",
"likes": "3"
},
{
"id": "176407",
"url": "http://assets.fanart.tv/fanart/tv/355730/tvposter/the-wheel-of-time-64dde63c7c941.jpg",
"lang": "en",
"likes": "0"
},
{
"id": "177321",
"url": "http://assets.fanart.tv/fanart/tv/355730/tvposter/the-wheel-of-time-64eda10599c3d.jpg",
"lang": "cz",
"likes": "0"
},
{
"id": "155050",
"url": "http://assets.fanart.tv/fanart/tv/355730/tvposter/the-wheel-of-time-6313adbd1fd58.jpg",
"lang": "pl",
"likes": "0"
},
{
"id": "140198",
"url": "http://assets.fanart.tv/fanart/tv/355730/tvposter/the-wheel-of-time-61a0d7b11952e.jpg",
"lang": "en",
"likes": "0"
},
{
"id": "140034",
"url": "http://assets.fanart.tv/fanart/tv/355730/tvposter/the-wheel-of-time-619e65b73871d.jpg",
"lang": "en",
"likes": "0"
}
],
"hdtvlogo": [
{
"id": "139835",
"url": "http://assets.fanart.tv/fanart/tv/355730/hdtvlogo/the-wheel-of-time-6197d9392faba.png",
"lang": "en",
"likes": "3"
},
{
"id": "140039",
"url": "http://assets.fanart.tv/fanart/tv/355730/hdtvlogo/the-wheel-of-time-619e87941a128.png",
"lang": "pt",
"likes": "3"
},
{
"id": "140092",
"url": "http://assets.fanart.tv/fanart/tv/355730/hdtvlogo/the-wheel-of-time-619fa2347bada.png",
"lang": "en",
"likes": "3"
},
{
"id": "164312",
"url": "http://assets.fanart.tv/fanart/tv/355730/hdtvlogo/the-wheel-of-time-63c8185cb8824.png",
"lang": "hu",
"likes": "1"
},
{
"id": "139827",
"url": "http://assets.fanart.tv/fanart/tv/355730/hdtvlogo/the-wheel-of-time-6197539658a9e.png",
"lang": "en",
"likes": "1"
},
{
"id": "177214",
"url": "http://assets.fanart.tv/fanart/tv/355730/hdtvlogo/the-wheel-of-time-64ebae44c23a6.png",
"lang": "cz",
"likes": "0"
},
{
"id": "177215",
"url": "http://assets.fanart.tv/fanart/tv/355730/hdtvlogo/the-wheel-of-time-64ebae472deef.png",
"lang": "cz",
"likes": "0"
},
{
"id": "156163",
"url": "http://assets.fanart.tv/fanart/tv/355730/hdtvlogo/the-wheel-of-time-63316bef1ff9d.png",
"lang": "cz",
"likes": "0"
},
{
"id": "155051",
"url": "http://assets.fanart.tv/fanart/tv/355730/hdtvlogo/the-wheel-of-time-6313add04ca92.png",
"lang": "pl",
"likes": "0"
},
{
"id": "152668",
"url": "http://assets.fanart.tv/fanart/tv/355730/hdtvlogo/the-wheel-of-time-62ced3775a40a.png",
"lang": "pl",
"likes": "0"
},
{
"id": "142266",
"url": "http://assets.fanart.tv/fanart/tv/355730/hdtvlogo/the-wheel-of-time-61ccd93eeac2b.png",
"lang": "de",
"likes": "0"
}
],
"hdclearart": [
{
"id": "164313",
"url": "http://assets.fanart.tv/fanart/tv/355730/hdclearart/the-wheel-of-time-63c81871c982c.png",
"lang": "en",
"likes": "3"
},
{
"id": "140284",
"url": "http://assets.fanart.tv/fanart/tv/355730/hdclearart/the-wheel-of-time-61a2128ed1df2.png",
"lang": "pt",
"likes": "3"
},
{
"id": "139828",
"url": "http://assets.fanart.tv/fanart/tv/355730/hdclearart/the-wheel-of-time-61975401e894c.png",
"lang": "en",
"likes": "1"
},
{
"id": "164314",
"url": "http://assets.fanart.tv/fanart/tv/355730/hdclearart/the-wheel-of-time-63c8188488a5f.png",
"lang": "hu",
"likes": "1"
},
{
"id": "177322",
"url": "http://assets.fanart.tv/fanart/tv/355730/hdclearart/the-wheel-of-time-64eda135933b6.png",
"lang": "cz",
"likes": "0"
},
{
"id": "142267",
"url": "http://assets.fanart.tv/fanart/tv/355730/hdclearart/the-wheel-of-time-61ccda9918c5c.png",
"lang": "de",
"likes": "0"
}
],
"seasonposter": [
{
"id": "140199",
"url": "http://assets.fanart.tv/fanart/tv/355730/seasonposter/the-wheel-of-time-61a0d7c2976de.jpg",
"lang": "en",
"likes": "1",
"season": "1"
},
{
"id": "176395",
"url": "http://assets.fanart.tv/fanart/tv/355730/seasonposter/the-wheel-of-time-64dd80b3d79a9.jpg",
"lang": "en",
"likes": "0",
"season": "1"
},
{
"id": "140035",
"url": "http://assets.fanart.tv/fanart/tv/355730/seasonposter/the-wheel-of-time-619e65c4d5357.jpg",
"lang": "en",
"likes": "0",
"season": "1"
}
],
"tvthumb": [
{
"id": "140242",
"url": "http://assets.fanart.tv/fanart/tv/355730/tvthumb/the-wheel-of-time-61a1813035506.jpg",
"lang": "en",
"likes": "1"
},
{
"id": "177323",
"url": "http://assets.fanart.tv/fanart/tv/355730/tvthumb/the-wheel-of-time-64eda15b6dce6.jpg",
"lang": "cz",
"likes": "0"
},
{
"id": "176399",
"url": "http://assets.fanart.tv/fanart/tv/355730/tvthumb/the-wheel-of-time-64dd85c9b618c.jpg",
"lang": "en",
"likes": "0"
},
{
"id": "152669",
"url": "http://assets.fanart.tv/fanart/tv/355730/tvthumb/the-wheel-of-time-62ced53d16574.jpg",
"lang": "pl",
"likes": "0"
},
{
"id": "141983",
"url": "http://assets.fanart.tv/fanart/tv/355730/tvthumb/the-wheel-of-time-61c6d04a6d701.jpg",
"lang": "en",
"likes": "0"
}
],
"showbackground": [
{
"id": "177324",
"url": "http://assets.fanart.tv/fanart/tv/355730/showbackground/the-wheel-of-time-64eda1833ccb1.jpg",
"lang": "",
"likes": "0",
"season": "all"
},
{
"id": "141986",
"url": "http://assets.fanart.tv/fanart/tv/355730/showbackground/the-wheel-of-time-61c6d08f7c7e2.jpg",
"lang": "",
"likes": "0",
"season": "all"
},
{
"id": "139868",
"url": "http://assets.fanart.tv/fanart/tv/355730/showbackground/the-wheel-of-time-6198ce358b98a.jpg",
"lang": "",
"likes": "0",
"season": "all"
}
],
"seasonthumb": [
{
"id": "176396",
"url": "http://assets.fanart.tv/fanart/tv/355730/seasonthumb/the-wheel-of-time-64dd80c8593f9.jpg",
"lang": "en",
"likes": "0",
"season": "1"
},
{
"id": "176400",
"url": "http://assets.fanart.tv/fanart/tv/355730/seasonthumb/the-wheel-of-time-64dd85da7c5e9.jpg",
"lang": "en",
"likes": "0",
"season": "0"
}
],
"tvbanner": [
{
"id": "176397",
"url": "http://assets.fanart.tv/fanart/tv/355730/tvbanner/the-wheel-of-time-64dd80da9a255.jpg",
"lang": "en",
"likes": "0"
},
{
"id": "176401",
"url": "http://assets.fanart.tv/fanart/tv/355730/tvbanner/the-wheel-of-time-64dd85e8904ea.jpg",
"lang": "en",
"likes": "0"
},
{
"id": "141988",
"url": "http://assets.fanart.tv/fanart/tv/355730/tvbanner/the-wheel-of-time-61c6d34bceb5f.jpg",
"lang": "en",
"likes": "0"
},
{
"id": "141984",
"url": "http://assets.fanart.tv/fanart/tv/355730/tvbanner/the-wheel-of-time-61c6d06c1c21c.jpg",
"lang": "en",
"likes": "0"
}
],
"seasonbanner": [
{
"id": "176398",
"url": "http://assets.fanart.tv/fanart/tv/355730/seasonbanner/the-wheel-of-time-64dd80e7dbd9f.jpg",
"lang": "en",
"likes": "0",
"season": "1"
},
{
"id": "176402",
"url": "http://assets.fanart.tv/fanart/tv/355730/seasonbanner/the-wheel-of-time-64dd85fb4f1b1.jpg",
"lang": "en",
"likes": "0",
"season": "0"
}
]
}
"""
# 代理
_proxies: dict = settings.PROXY
@@ -36,10 +329,15 @@ class FanartModule(_ModuleBase):
if mediainfo.type == MediaType.MOVIE:
result = self.__request_fanart(mediainfo.type, mediainfo.tmdb_id)
else:
result = self.__request_fanart(mediainfo.type, mediainfo.tvdb_id)
if mediainfo.tvdb_id:
result = self.__request_fanart(mediainfo.type, mediainfo.tvdb_id)
else:
logger.info(f"{mediainfo.title_year} 没有tvdbid无法获取Fanart图片")
return
if not result or result.get('status') == 'error':
logger.warn(f"没有获取到 {mediainfo.title_year} 的Fanart图片数据")
return
# 获取所有图片
for name, images in result.items():
if not images:
continue
@@ -47,7 +345,18 @@ class FanartModule(_ModuleBase):
continue
# 按欢迎程度倒排
images.sort(key=lambda x: int(x.get('likes', 0)), reverse=True)
mediainfo.set_image(self.__name(name), images[0].get('url'))
# 取第一张图片
image_obj = images[0]
# 图片属性xx_path
image_name = self.__name(name)
image_season = image_obj.get('season')
# 设置图片
if image_name.startswith("season") and image_season:
# 季图片格式 seasonxx-poster
image_name = f"season{str(image_season).rjust(2, '0')}-{image_name[6:]}"
if not mediainfo.get_image(image_name):
# 没有图片才设置
mediainfo.set_image(image_name, image_obj.get('url'))
return mediainfo

View File

@@ -1,19 +1,19 @@
import re
from pathlib import Path
from threading import Lock
from typing import Optional, List, Tuple, Union
from typing import Optional, List, Tuple, Union, Dict
from jinja2 import Template
from app.core.context import MediaInfo
from app.core.metainfo import MetaInfo
from app.core.config import settings
from app.core.context import MediaInfo
from app.core.meta import MetaBase
from app.core.metainfo import MetaInfo
from app.log import logger
from app.modules import _ModuleBase
from app.schemas import TransferInfo
from app.utils.system import SystemUtils
from app.schemas import TransferInfo, ExistMediaInfo, TmdbEpisode
from app.schemas.types import MediaType
from app.utils.system import SystemUtils
lock = Lock()
@@ -29,29 +29,36 @@ class FileTransferModule(_ModuleBase):
def init_setting(self) -> Tuple[str, Union[str, bool]]:
pass
def transfer(self, path: Path, mediainfo: MediaInfo,
transfer_type: str, target: Path = None, meta: MetaBase = None) -> TransferInfo:
def transfer(self, path: Path, meta: MetaBase, mediainfo: MediaInfo,
transfer_type: str, target: Path = None,
episodes_info: List[TmdbEpisode] = None) -> TransferInfo:
"""
文件转移
:param path: 文件路径
:param meta: 预识别的元数据,仅单文件转移时传递
:param mediainfo: 识别的媒体信息
:param transfer_type: 转移方式
:param target: 目标路径
:param meta: 预识别的元数据,仅单文件转移时传递
:param episodes_info: 当前季的全部集信息
:return: {path, target_path, message}
"""
# 获取目标路径
if not target:
target = self.get_target_path(in_path=path)
else:
target = self.get_library_path(target)
if not target:
logger.error("未找到媒体库目录,无法转移文件")
return TransferInfo(message="未找到媒体库目录,无法转移文件")
return TransferInfo(success=False,
path=path,
message="未找到媒体库目录")
# 转移
return self.transfer_media(in_path=path,
in_meta=meta,
mediainfo=mediainfo,
transfer_type=transfer_type,
target_dir=target,
in_meta=meta)
episodes_info=episodes_info)
@staticmethod
def __transfer_command(file_item: Path, target_file: Path, transfer_type: str) -> int:
@@ -121,7 +128,7 @@ class FileTransferModule(_ModuleBase):
# 比对文件名并转移字幕
org_dir: Path = org_path.parent
file_list: List[Path] = SystemUtils.list_files_with_extensions(org_dir, settings.RMT_SUBEXT)
file_list: List[Path] = SystemUtils.list_files(org_dir, settings.RMT_SUBEXT)
if len(file_list) == 0:
logger.debug(f"{org_dir} 目录下没有找到字幕文件...")
else:
@@ -207,7 +214,7 @@ class FileTransferModule(_ModuleBase):
"""
dir_name = org_path.parent
file_name = org_path.name
file_list: List[Path] = SystemUtils.list_files_with_extensions(dir_name, ['.mka'])
file_list: List[Path] = SystemUtils.list_files(dir_name, ['.mka'])
pending_file_list: List[Path] = [file for file in file_list if org_path.stem == file.stem]
if len(pending_file_list) == 0:
logger.debug(f"{dir_name} 目录下没有找到匹配的音轨文件")
@@ -236,9 +243,9 @@ class FileTransferModule(_ModuleBase):
logger.error(f"音轨文件 {file_name} {transfer_type}失败:{reason}")
return 0
def __transfer_bluray_dir(self, file_path: Path, new_path: Path, transfer_type: str) -> int:
def __transfer_dir(self, file_path: Path, new_path: Path, transfer_type: str) -> int:
"""
转移蓝光文件夹
转移整个文件夹
:param file_path: 原路径
:param new_path: 新路径
:param transfer_type: RmtMode转移方式
@@ -257,14 +264,18 @@ class FileTransferModule(_ModuleBase):
def __transfer_dir_files(self, src_dir: Path, target_dir: Path, transfer_type: str) -> int:
"""
按目录结构转移所有文件
按目录结构转移目录下所有文件
:param src_dir: 原路径
:param target_dir: 新路径
:param transfer_type: RmtMode转移方式
"""
retcode = 0
for file in src_dir.glob("**/*"):
new_file = target_dir.with_name(src_dir.name)
# 过滤掉目录
if file.is_dir():
continue
# 使用target_dir的父目录作为新的父目录
new_file = target_dir.joinpath(file.relative_to(src_dir))
if new_file.exists():
logger.warn(f"{new_file} 文件已存在")
continue
@@ -279,7 +290,7 @@ class FileTransferModule(_ModuleBase):
return retcode
def __transfer_file(self, file_item: Path, new_file: Path, transfer_type: str,
over_flag: bool = False, old_file: Path = None) -> int:
over_flag: bool = False) -> int:
"""
转移一个文件,同时处理其他相关文件
:param file_item: 原文件路径
@@ -287,12 +298,13 @@ class FileTransferModule(_ModuleBase):
:param transfer_type: RmtMode转移方式
:param over_flag: 是否覆盖为True时会先删除再转移
"""
if not over_flag and new_file.exists():
logger.warn(f"文件已存在:{new_file}")
return 0
if over_flag and old_file and old_file.exists():
logger.info(f"正在删除已存在的文件:{old_file}")
old_file.unlink()
if new_file.exists():
if not over_flag:
logger.warn(f"文件已存在:{new_file}")
return 0
else:
logger.info(f"正在删除已存在的文件:{new_file}")
new_file.unlink()
logger.info(f"正在转移文件:{file_item}{new_file}")
# 创建父目录
new_file.parent.mkdir(parents=True, exist_ok=True)
@@ -311,42 +323,14 @@ class FileTransferModule(_ModuleBase):
over_flag=over_flag)
@staticmethod
def __is_bluray_dir(dir_path: Path) -> bool:
def __get_dest_dir(mediainfo: MediaInfo, target_dir: Path) -> Path:
"""
判断是否为蓝光原盘目录
"""
# 蓝光原盘目录必备的文件或文件夹
required_files = ['BDMV', 'CERTIFICATE']
# 检查目录下是否存在所需文件或文件夹
for item in required_files:
if (dir_path / item).exists():
return True
return False
def transfer_media(self,
in_path: Path,
mediainfo: MediaInfo,
transfer_type: str,
target_dir: Path = None,
in_meta: MetaBase = None
) -> TransferInfo:
"""
识别并转移一个文件、多个文件或者目录
:param in_path: 转移的路径,可能是一个文件也可以是一个目录
根据设置并装媒体库目录
:param mediainfo: 媒体信息
:param target_dir: 目的文件夹,非空的转移到该文件夹,为空时则按类型转移到配置文件中的媒体库文件夹
:param transfer_type: 文件转移方式
:param in_meta预识别元数为空则重新识别
:return: TransferInfo、错误信息
:target_dir: 媒体库根目录
"""
# 检查目录路径
if not in_path.exists():
return TransferInfo(message=f"{in_path} 路径不存在")
if not target_dir.exists():
return TransferInfo(message=f"{target_dir} 目标路径不存在")
if mediainfo.type == MediaType.MOVIE:
# 电影
if settings.LIBRARY_MOVIE_NAME:
target_dir = target_dir / settings.LIBRARY_MOVIE_NAME / mediainfo.category
else:
@@ -354,172 +338,191 @@ class FileTransferModule(_ModuleBase):
target_dir = target_dir / mediainfo.type.value / mediainfo.category
if mediainfo.type == MediaType.TV:
if settings.LIBRARY_TV_NAME:
# 电视剧
if settings.LIBRARY_ANIME_NAME \
and mediainfo.genre_ids \
and set(mediainfo.genre_ids).intersection(set(settings.ANIME_GENREIDS)):
# 动漫
target_dir = target_dir / settings.LIBRARY_ANIME_NAME / mediainfo.category
elif settings.LIBRARY_TV_NAME:
# 电视剧
target_dir = target_dir / settings.LIBRARY_TV_NAME / mediainfo.category
else:
# 目的目录加上类型和二级分类
target_dir = target_dir / mediainfo.type.value / mediainfo.category
return target_dir
def transfer_media(self,
in_path: Path,
in_meta: MetaBase,
mediainfo: MediaInfo,
transfer_type: str,
target_dir: Path,
episodes_info: List[TmdbEpisode] = None
) -> TransferInfo:
"""
识别并转移一个文件或者一个目录下的所有文件
:param in_path: 转移的路径,可能是一个文件也可以是一个目录
:param in_meta预识别元数据
:param mediainfo: 媒体信息
:param target_dir: 媒体库根目录
:param transfer_type: 文件转移方式
:param episodes_info: 当前季的全部集信息
:return: TransferInfo、错误信息
"""
# 检查目录路径
if not in_path.exists():
return TransferInfo(success=False,
path=in_path,
message=f"{in_path} 路径不存在")
if not target_dir.exists():
return TransferInfo(success=False,
path=in_path,
message=f"{target_dir} 目标路径不存在")
# 媒体库目的目录
target_dir = self.__get_dest_dir(mediainfo=mediainfo, target_dir=target_dir)
# 重命名格式
rename_format = settings.TV_RENAME_FORMAT \
if mediainfo.type == MediaType.TV else settings.MOVIE_RENAME_FORMAT
# 总大小
total_filesize = 0
# 处理文件清单
file_list = []
# 目标文件清单
file_list_new = []
# 失败文件清单
fail_list = []
# 错误信息
err_msgs = []
# 判断是否为蓝光原盘
bluray_flag = self.__is_bluray_dir(in_path)
if bluray_flag:
# 识别目录名称,不包括后缀
meta = MetaInfo(in_path.stem)
# 判断是否为文件夹
if in_path.is_dir():
# 转移整个目录
# 是否蓝光原盘
bluray_flag = SystemUtils.is_bluray_dir(in_path)
if bluray_flag:
logger.info(f"{in_path} 是蓝光原盘文件夹")
# 目的路径
new_path = self.get_rename_path(
path=target_dir,
template_string=rename_format,
rename_dict=self.__get_naming_dict(meta=meta,
rename_dict=self.__get_naming_dict(meta=in_meta,
mediainfo=mediainfo)
).parent
# 转移蓝光原盘
retcode = self.__transfer_bluray_dir(file_path=in_path,
new_path=new_path,
transfer_type=transfer_type)
retcode = self.__transfer_dir(file_path=in_path,
new_path=new_path,
transfer_type=transfer_type)
if retcode != 0:
return TransferInfo(message=f"{retcode},蓝光原盘转移失败")
else:
# 计算大小
total_filesize += in_path.stat().st_size
# 返回转移后的路径
return TransferInfo(path=in_path,
logger.error(f"文件夹 {in_path} 转移失败,错误码:{retcode}")
return TransferInfo(success=False,
message=f"错误码:{retcode}",
path=in_path,
target_path=new_path,
total_size=total_filesize,
is_bluray=bluray_flag,
file_list=[],
file_list_new=[])
else:
# 获取文件清单
transfer_files: List[Path] = SystemUtils.list_files_with_extensions(in_path, settings.RMT_MEDIAEXT)
if len(transfer_files) == 0:
return TransferInfo(message=f"{in_path} 目录下没有找到可转移的文件")
if not in_meta:
# 识别目录名称,不包括后缀
meta = MetaInfo(in_path.stem)
else:
meta = in_meta
# 目的路径
new_path = target_dir / (self.get_rename_path(
template_string=rename_format,
rename_dict=self.__get_naming_dict(meta=meta,
mediainfo=mediainfo)).parents[-2].name)
# 转移所有文件
for transfer_file in transfer_files:
try:
if not in_meta:
# 识别文件元数据,不包含后缀
file_meta = MetaInfo(transfer_file.stem)
# 合并元数据
file_meta.merge(meta)
else:
file_meta = in_meta
is_bluray=bluray_flag)
# 文件结束季为空
file_meta.end_season = None
# 文件总季数为1
if file_meta.total_season:
file_meta.total_season = 1
# 文件不可能有多集
if file_meta.total_episode > 2:
file_meta.total_episode = 1
file_meta.end_episode = None
# 目的文件名
new_file = self.get_rename_path(
path=target_dir,
template_string=rename_format,
rename_dict=self.__get_naming_dict(meta=file_meta,
mediainfo=mediainfo,
file_ext=transfer_file.suffix)
)
# 判断是否要覆盖
overflag = False
if new_file.exists():
if new_file.stat().st_size < transfer_file.stat().st_size:
logger.info(f"目标文件已存在,但文件大小更小,将覆盖:{new_file}")
overflag = True
# 转移文件
retcode = self.__transfer_file(file_item=transfer_file,
new_file=new_file,
transfer_type=transfer_type,
over_flag=overflag)
if retcode != 0:
logger.error(f"{transfer_file} 转移文件失败,错误码:{retcode}")
err_msgs.append(f"{transfer_file.name}:错误码 {retcode}")
fail_list.append(transfer_file)
continue
# 源文件清单
file_list.append(str(transfer_file))
# 目的文件清单
file_list_new.append(str(new_file))
# 计算总大小
total_filesize += transfer_file.stat().st_size
except Exception as err:
err_msgs.append(f"{transfer_file.name}{err}")
logger.error(f"{transfer_file}转移失败:{err}")
fail_list.append(transfer_file)
if not file_list:
# 没有成功的
return TransferInfo(message="\n".join(err_msgs))
return TransferInfo(path=in_path,
logger.info(f"文件夹 {in_path} 转移成功")
# 返回转移后的路径
return TransferInfo(success=True,
path=in_path,
target_path=new_path,
message="\n".join(err_msgs),
file_count=len(file_list),
total_size=total_filesize,
fail_list=fail_list,
is_bluray=bluray_flag,
file_list=file_list,
file_list_new=file_list_new)
total_size=new_path.stat().st_size,
is_bluray=bluray_flag)
else:
# 转移单个文件
if mediainfo.type == MediaType.TV:
# 电视剧
if in_meta.begin_episode is None:
logger.warn(f"文件 {in_path} 转移失败:未识别到文件集数")
return TransferInfo(success=False,
message=f"未识别到文件集数",
path=in_path,
fail_list=[str(in_path)])
# 文件结束季为空
in_meta.end_season = None
# 文件总季数为1
if in_meta.total_season:
in_meta.total_season = 1
# 文件不可能超过2集
if in_meta.total_episode > 2:
in_meta.total_episode = 1
in_meta.end_episode = None
# 目的文件名
new_file = self.get_rename_path(
path=target_dir,
template_string=rename_format,
rename_dict=self.__get_naming_dict(
meta=in_meta,
mediainfo=mediainfo,
episodes_info=episodes_info,
file_ext=in_path.suffix
)
)
# 判断是否要覆盖
overflag = False
if new_file.exists():
if new_file.stat().st_size < in_path.stat().st_size:
logger.info(f"目标文件已存在,但文件大小更小,将覆盖:{new_file}")
overflag = True
# 转移文件
retcode = self.__transfer_file(file_item=in_path,
new_file=new_file,
transfer_type=transfer_type,
over_flag=overflag)
if retcode != 0:
logger.error(f"文件 {in_path} 转移失败,错误码:{retcode}")
return TransferInfo(success=False,
message=f"错误码:{retcode}",
path=in_path,
target_path=new_file,
fail_list=[str(in_path)])
logger.info(f"文件 {in_path} 转移成功")
return TransferInfo(success=True,
path=in_path,
target_path=new_file,
file_count=1,
total_size=new_file.stat().st_size,
is_bluray=False,
file_list=[str(in_path)],
file_list_new=[str(new_file)])
@staticmethod
def __get_naming_dict(meta: MetaBase, mediainfo: MediaInfo, file_ext: str = None) -> dict:
def __get_naming_dict(meta: MetaBase, mediainfo: MediaInfo, file_ext: str = None,
episodes_info: List[TmdbEpisode] = None) -> dict:
"""
根据媒体信息返回Format字典
:param meta: 文件元数据
:param mediainfo: 识别的媒体信息
:param file_ext: 文件扩展名
:param episodes_info: 当前季的全部集信息
"""
# 获取集标题
episode_title = None
if meta.begin_episode and episodes_info:
for episode in episodes_info:
if episode.episode_number == meta.begin_episode:
episode_title = episode.name
break
return {
# 标题
"title": mediainfo.title,
# 原文件名
"original_name": meta.org_string,
"original_name": f"{meta.org_string}{file_ext}",
# 原语种标题
"original_title": mediainfo.original_title,
# 识别名称
"name": meta.name,
# 年份
"year": mediainfo.year or meta.year,
# 资源类型
"resourceType": meta.resource_type,
# 特效
"effect": meta.resource_effect,
# 版本
"edition": meta.edition,
# 分辨率
"videoFormat": meta.resource_pix,
# 制作组/字幕组
"releaseGroup": meta.resource_team,
# 特效
"effect": meta.resource_effect,
# 视频编码
"videoCodec": meta.video_encode,
# 音频编码
@@ -536,8 +539,12 @@ class FileTransferModule(_ModuleBase):
"season_episode": "%s%s" % (meta.season, meta.episodes),
# 段/节
"part": meta.part,
# 剧集标题
"episode_title": episode_title,
# 文件后缀
"fileExt": file_ext
"fileExt": file_ext,
# 自定义占位符
"customization": meta.customization
}
@staticmethod
@@ -555,38 +562,125 @@ class FileTransferModule(_ModuleBase):
else:
return Path(render_str)
@staticmethod
def get_library_path(path: Path):
"""
根据目录查询其所在的媒体库目录,查询不到的返回输入目录
"""
if not path:
return None
if not settings.LIBRARY_PATHS:
return path
# 目的路径,多路径以,分隔
dest_paths = settings.LIBRARY_PATHS
for libpath in dest_paths:
try:
if path.is_relative_to(libpath):
return libpath
except Exception as e:
logger.debug(f"计算媒体库路径时出错:{e}")
continue
return path
@staticmethod
def get_target_path(in_path: Path = None) -> Optional[Path]:
"""
计算一个最好的目的目录有in_path时找与in_path同路径的没有in_path时顺序查找1个符合大小要求的没有in_path和size时返回第1个
:param in_path: 源目录
"""
if not settings.LIBRARY_PATH:
if not settings.LIBRARY_PATHS:
return None
# 目的路径,多路径以,分隔
dest_paths = str(settings.LIBRARY_PATH).split(",")
dest_paths = settings.LIBRARY_PATHS
# 只有一个路径,直接返回
if len(dest_paths) == 1:
return Path(dest_paths[0])
return dest_paths[0]
# 匹配有最长共同上级路径的目录
max_length = 0
target_path = None
if in_path:
for path in dest_paths:
try:
relative = Path(in_path).relative_to(path).as_posix()
relative = in_path.relative_to(path).as_posix()
if len(relative) > max_length:
max_length = len(relative)
target_path = path
except Exception as e:
logger.debug(f"计算目标路径时出错:{e}")
continue
if target_path:
return Path(target_path)
return target_path
# 顺序匹配第1个满足空间存储要求的目录
if in_path.exists():
file_size = in_path.stat().st_size
for path in dest_paths:
if SystemUtils.free_space(Path(path)) > file_size:
return Path(path)
if SystemUtils.free_space(path) > file_size:
return path
# 默认返回第1个
return Path(dest_paths[0])
return dest_paths[0]
def media_exists(self, mediainfo: MediaInfo, itemid: str = None) -> Optional[ExistMediaInfo]:
"""
判断媒体文件是否存在于本地文件系统
:param mediainfo: 识别的媒体信息
:param itemid: 媒体服务器ItemID
:return: 如不存在返回None存在时返回信息包括每季已存在所有集{type: movie/tv, seasons: {season: [episodes]}}
"""
if not settings.LIBRARY_PATHS:
return None
# 目的路径
dest_paths = settings.LIBRARY_PATHS
# 检查每一个媒体库目录
for dest_path in dest_paths:
# 媒体库路径
target_dir = self.get_target_path(dest_path)
if not target_dir:
continue
# 媒体分类路径
target_dir = self.__get_dest_dir(mediainfo=mediainfo, target_dir=target_dir)
# 重命名格式
rename_format = settings.TV_RENAME_FORMAT \
if mediainfo.type == MediaType.TV else settings.MOVIE_RENAME_FORMAT
# 相对路径
meta = MetaInfo(mediainfo.title)
rel_path = self.get_rename_path(
template_string=rename_format,
rename_dict=self.__get_naming_dict(meta=meta,
mediainfo=mediainfo)
)
# 取相对路径的第1层目录
if rel_path.parts:
media_path = target_dir / rel_path.parts[0]
else:
continue
# 检查媒体文件夹是否存在
if not media_path.exists():
continue
# 检索媒体文件
media_files = SystemUtils.list_files(directory=media_path, extensions=settings.RMT_MEDIAEXT)
if not media_files:
continue
if mediainfo.type == MediaType.MOVIE:
# 电影存在任何文件为存在
logger.info(f"文件系统已存在:{mediainfo.title_year}")
return ExistMediaInfo(type=MediaType.MOVIE)
else:
# 电视剧检索集数
seasons: Dict[int, list] = {}
for media_file in media_files:
file_meta = MetaInfo(media_file.stem)
season_index = file_meta.begin_season or 1
episode_index = file_meta.begin_episode
if not episode_index:
continue
if season_index not in seasons:
seasons[season_index] = []
seasons[season_index].append(episode_index)
# 返回剧集情况
logger.info(f"{mediainfo.title_year} 文件系统已存在:{seasons}")
return ExistMediaInfo(type=MediaType.TV, seasons=seasons)
# 不存在
return None

View File

@@ -1,7 +1,7 @@
import re
from typing import List, Tuple, Union, Dict, Optional
from app.core.context import TorrentInfo
from app.core.context import TorrentInfo, MediaInfo
from app.core.metainfo import MetaInfo
from app.log import logger
from app.modules import _ModuleBase
@@ -9,16 +9,17 @@ from app.modules.filter.RuleParser import RuleParser
class FilterModule(_ModuleBase):
# 规则解析器
parser: RuleParser = None
# 媒体信息
media: MediaInfo = None
# 内置规则集
rule_set: Dict[str, dict] = {
# 蓝光
# 蓝光原盘
"BLU": {
"include": [r'Blu-?Ray.+VC-?1|Blu-?Ray.+AVC|UHD.+blu-?ray.+HEVC|MiniBD'],
"exclude": []
"exclude": [r'[Hx].?264|[Hx].?265|WEB-?DL|WEB-?RIP|REMUX']
},
# 4K
"4K": {
@@ -37,8 +38,12 @@ class FilterModule(_ModuleBase):
},
# 中字
"CNSUB": {
"include": [r'[中国國繁简](/|\s|\\|\|)?[繁简英粤]|[英简繁](/|\s|\\|\|)?[中繁简]|繁體|简体|[中国國][字配]|国语|國語|中文|中字'],
"exclude": []
"include": [
r'[中国國繁简](/|\s|\\|\|)?[繁简英粤]|[英简繁](/|\s|\\|\|)?[中繁简]|繁體|简体|[中国國][字配]|国语|國語|中文|中字'],
"exclude": [],
"tmdb": {
"original_language": "zh,cn"
}
},
# 特效字幕
"SPECSUB": {
@@ -57,17 +62,17 @@ class FilterModule(_ModuleBase):
},
# H265
"H265": {
"include": [r'[Hx].?265'],
"include": [r'[Hx].?265|HEVC'],
"exclude": []
},
# H264
"H264": {
"include": [r'[Hx].?264'],
"include": [r'[Hx].?264|AVC'],
"exclude": []
},
# 杜比
"DOLBY": {
"include": [r"DOLBY|DOVI|[\s.]+DV[\s.]+|杜比"],
"include": [r"Dolby[\s.]+Vision|DOVI|[\s.]+DV[\s.]+|杜比视界"],
"exclude": []
},
# HDR
@@ -91,7 +96,7 @@ class FilterModule(_ModuleBase):
},
# 国语配音
"CNVOI": {
"include": [r'[国國][语語]配音|[国國]配'],
"include": [r'[国國][语語]配音|[国國]配|[国國][语語]'],
"exclude": []
}
}
@@ -107,16 +112,19 @@ class FilterModule(_ModuleBase):
def filter_torrents(self, rule_string: str,
torrent_list: List[TorrentInfo],
season_episodes: Dict[int, list] = None) -> List[TorrentInfo]:
season_episodes: Dict[int, list] = None,
mediainfo: MediaInfo = None) -> List[TorrentInfo]:
"""
过滤种子资源
:param rule_string: 过滤规则
:param torrent_list: 资源列表
:param season_episodes: 季集数过滤 {season:[episodes]}
:param mediainfo: 媒体信息
:return: 过滤后的资源列表,添加资源优先级
"""
if not rule_string:
return torrent_list
self.media = mediainfo
# 返回种子列表
ret_torrents = []
for torrent in torrent_list:
@@ -215,6 +223,11 @@ class FilterModule(_ModuleBase):
if not self.rule_set.get(rule_name):
# 规则不存在
return False
# TMDB规则
tmdb = self.rule_set[rule_name].get("tmdb")
# 符合TMDB规则的直接返回True即不过滤
if tmdb and self.__match_tmdb(tmdb):
return True
# 包含规则项
includes = self.rule_set[rule_name].get("include") or []
# 排除规则项
@@ -236,3 +249,44 @@ class FilterModule(_ModuleBase):
# FREE规则不匹配
return False
return True
def __match_tmdb(self, tmdb: dict) -> bool:
"""
判断种子是否匹配TMDB规则
"""
def __get_media_value(key: str):
try:
return getattr(self.media, key)
except ValueError:
return ""
if not self.media:
return False
for attr, value in tmdb.items():
if not value:
continue
# 获取media信息的值
info_value = __get_media_value(attr)
if not info_value:
# 没有该值,不匹配
return False
elif attr == "production_countries":
# 国家信息
info_values = [str(val.get("iso_3166_1")).upper() for val in info_value]
else:
# media信息转化为数组
if isinstance(info_value, list):
info_values = [str(val).upper() for val in info_value]
else:
info_values = [str(info_value).upper()]
# 过滤值转化为数组
if value.find(",") != -1:
values = [str(val).upper() for val in value.split(",")]
else:
values = [str(value).upper()]
# 没有交集为不匹配
if not set(values).intersection(set(info_values)):
return False
return True

View File

@@ -3,9 +3,10 @@ from typing import List, Optional, Tuple, Union
from ruamel.yaml import CommentedMap
from app.core.context import MediaInfo, TorrentInfo
from app.core.context import TorrentInfo
from app.log import logger
from app.modules import _ModuleBase
from app.modules.indexer.mtorrent import MTorrentSpider
from app.modules.indexer.spider import TorrentSpider
from app.modules.indexer.tnode import TNodeSpider
from app.modules.indexer.torrentleech import TorrentLeech
@@ -27,57 +28,71 @@ class IndexerModule(_ModuleBase):
def init_setting(self) -> Tuple[str, Union[str, bool]]:
return "INDEXER", "builtin"
def search_torrents(self, site: CommentedMap, mediainfo: MediaInfo = None,
keyword: str = None, page: int = 0) -> List[TorrentInfo]:
def search_torrents(self, site: CommentedMap,
keywords: List[str] = None,
mtype: MediaType = None,
page: int = 0) -> List[TorrentInfo]:
"""
搜索一个站点
:param mediainfo: 识别的媒体信息
:param site: 站点
:param keyword: 搜索关键词,如有按关键词搜索,否则按媒体信息名称搜索
:param keywords: 搜索关键词列表
:param mtype: 媒体类型
:param page: 页码
:return: 资源列表
"""
# 确认搜索的名字
if keyword:
search_word = keyword
elif mediainfo:
search_word = mediainfo.title
else:
search_word = None
if search_word \
and site.get('language') == "en" \
and StringUtils.is_chinese(search_word):
# 不支持中文
logger.warn(f"{site.get('name')} 不支持中文搜索")
return []
if not keywords:
# 浏览种子页
keywords = [None]
# 开始索引
result_array = []
# 开始计时
start_time = datetime.now()
try:
if site.get('parser') == "TNodeSpider":
error_flag, result_array = TNodeSpider(site).search(
keyword=search_word,
# imdbid=mediainfo.imdb_id if mediainfo else None,
page=page
)
elif site.get('parser') == "TorrentLeech":
error_flag, result_array = TorrentLeech(site).search(
keyword=search_word,
page=page
)
else:
error_flag, result_array = self.__spider_search(
keyword=search_word,
# imdbid=mediainfo.imdb_id if mediainfo else None,
indexer=site,
mtype=mediainfo.type if mediainfo else None,
page=page
)
except Exception as err:
logger.error(f"{site.get('name')} 搜索出错:{err}")
# 搜索多个关键字
for search_word in keywords:
# 可能为关键字或ttxxxx
if search_word \
and site.get('language') == "en" \
and StringUtils.is_chinese(search_word):
# 不支持中文
logger.warn(f"{site.get('name')} 不支持中文搜索")
continue
# 去除搜索关键字中的特殊字符
if search_word:
search_word = StringUtils.clear(search_word, replace_word=" ", allow_space=True)
try:
if site.get('parser') == "TNodeSpider":
error_flag, result_array = TNodeSpider(site).search(
keyword=search_word,
page=page
)
elif site.get('parser') == "TorrentLeech":
error_flag, result_array = TorrentLeech(site).search(
keyword=search_word,
page=page
)
elif site.get('parser') == "mTorrent":
error_flag, result_array = MTorrentSpider(site).search(
keyword=search_word,
mtype=mtype,
page=page
)
else:
error_flag, result_array = self.__spider_search(
search_word=search_word,
indexer=site,
mtype=mtype,
page=page
)
# 有结果后停止
if result_array:
break
except Exception as err:
logger.error(f"{site.get('name')} 搜索出错:{err}")
# 索引花费的时间
seconds = round((datetime.now() - start_time).seconds, 1)
@@ -99,15 +114,13 @@ class IndexerModule(_ModuleBase):
@staticmethod
def __spider_search(indexer: CommentedMap,
keyword: str = None,
imdbid: str = None,
search_word: str = None,
mtype: MediaType = None,
page: int = 0) -> (bool, List[dict]):
"""
根据关键字搜索单个站点
:param: indexer: 站点配置
:param: keyword: 关键字
:param: imdbid: imdbid
:param: search_word: 关键字
:param: page: 页码
:param: mtype: 媒体类型
:param: timeout: 超时时间
@@ -115,8 +128,7 @@ class IndexerModule(_ModuleBase):
"""
_spider = TorrentSpider(indexer=indexer,
mtype=mtype,
keyword=keyword,
imdbid=imdbid,
keyword=search_word,
page=page)
return _spider.is_error, _spider.get_torrents()

View File

@@ -0,0 +1,144 @@
import base64
import json
import re
from typing import Tuple, List
from ruamel.yaml import CommentedMap
from app.core.config import settings
from app.log import logger
from app.schemas import MediaType
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class MTorrentSpider:
_indexerid = None
_domain = None
_name = ""
_proxy = None
_cookie = None
_ua = None
_size = 100
_searchurl = "%sapi/torrent/search"
_downloadurl = "%sapi/torrent/genDlToken"
_pageurl = "%sdetail/%s"
# 电影分类
_movie_category = ['401', '419', '420', '421', '439', '405', '404']
_tv_category = ['403', '402', '435', '438', '404', '405']
# 标签
_labels = {
0: "",
4: "中字",
6: "国配",
}
def __init__(self, indexer: CommentedMap):
if indexer:
self._indexerid = indexer.get('id')
self._domain = indexer.get('domain')
self._searchurl = self._searchurl % self._domain
self._name = indexer.get('name')
if indexer.get('proxy'):
self._proxy = settings.PROXY
self._cookie = indexer.get('cookie')
self._ua = indexer.get('ua')
def search(self, keyword: str, mtype: MediaType = None, page: int = 0) -> Tuple[bool, List[dict]]:
if not mtype:
categories = []
elif mtype == MediaType.TV:
categories = self._tv_category
else:
categories = self._movie_category
params = {
"keyword": keyword,
"categories": categories,
"pageNumber": int(page) + 1,
"pageSize": self._size,
"visible": 1
}
res = RequestUtils(
headers={
"Content-Type": "application/json",
"User-Agent": f"{self._ua}"
},
cookies=self._cookie,
proxies=self._proxy,
referer=f"{self._domain}browse",
timeout=30
).post_res(url=self._searchurl, json=params)
torrents = []
if res and res.status_code == 200:
results = res.json().get('data', {}).get("data") or []
for result in results:
torrent = {
'title': result.get('name'),
'description': result.get('smallDescr'),
'enclosure': self.__get_download_url(result.get('id')),
'pubdate': StringUtils.format_timestamp(result.get('createdDate')),
'size': result.get('size'),
'seeders': result.get('status', {}).get("seeders"),
'peers': result.get('status', {}).get("leechers"),
'grabs': result.get('status', {}).get("timesCompleted"),
'downloadvolumefactor': self.__get_downloadvolumefactor(result.get('status', {}).get("discount")),
'uploadvolumefactor': self.__get_uploadvolumefactor(result.get('status', {}).get("discount")),
'page_url': self._pageurl % (self._domain, result.get('id')),
'imdbid': self.__find_imdbid(result.get('imdb')),
'labels': [self._labels.get(result.get('labels') or 0)] if result.get('labels') else []
}
torrents.append(torrent)
elif res is not None:
logger.warn(f"{self._name} 搜索失败,错误码:{res.status_code}")
return True, []
else:
logger.warn(f"{self._name} 搜索失败,无法连接 {self._domain}")
return True, []
return False, torrents
@staticmethod
def __find_imdbid(imdb: str) -> str:
if imdb:
m = re.search(r"tt\d+", imdb)
if m:
return m.group(0)
return ""
@staticmethod
def __get_downloadvolumefactor(discount: str) -> float:
discount_dict = {
"FREE": 0,
"PERCENT_50": 0.5,
"PERCENT_70": 0.3,
"_2X_FREE": 0,
"_2X_PERCENT_50": 0.5
}
if discount:
return discount_dict.get(discount, 1)
return 1
@staticmethod
def __get_uploadvolumefactor(discount: str) -> float:
uploadvolumefactor_dict = {
"_2X": 2.0,
"_2X_FREE": 2.0,
"_2X_PERCENT_50": 2.0
}
if discount:
return uploadvolumefactor_dict.get(discount, 1)
return 1
def __get_download_url(self, torrent_id: str) -> str:
url = self._downloadurl % self._domain
params = {
'method': 'post',
'params': {
'id': torrent_id
},
'result': 'data'
}
# base64编码
base64_str = base64.b64encode(json.dumps(params).encode('utf-8')).decode('utf-8')
return f"[{base64_str}]{url}"

View File

@@ -40,8 +40,6 @@ class TorrentSpider:
referer: str = None
# 搜索关键字
keyword: str = None
# 搜索IMDBID
imdbid: str = None
# 媒体类型
mtype: MediaType = None
# 搜索路径、方式配置
@@ -68,7 +66,6 @@ class TorrentSpider:
def __init__(self,
indexer: CommentedMap,
keyword: [str, list] = None,
imdbid: str = None,
page: int = 0,
referer: str = None,
mtype: MediaType = None):
@@ -76,7 +73,6 @@ class TorrentSpider:
设置查询参数
:param indexer: 索引器
:param keyword: 搜索关键字,如果数组则为批量搜索
:param imdbid: IMDB ID
:param page: 页码
:param referer: Referer
:param mtype: 媒体类型
@@ -84,7 +80,6 @@ class TorrentSpider:
if not indexer:
return
self.keyword = keyword
self.imdbid = imdbid
self.mtype = mtype
self.indexerid = indexer.get('id')
self.indexername = indexer.get('name')
@@ -159,20 +154,17 @@ class TorrentSpider:
# 搜索URL
indexer_params = self.search.get("params") or {}
if indexer_params:
# 支持IMDBID时优先使用IMDBID搜索
search_area = indexer_params.get("search_area") or 0
if self.imdbid and search_area:
search_word = self.imdbid
else:
search_word = self.keyword
# 不启用IMDBID搜索时需要将search_area移除
if search_area:
indexer_params.pop('search_area')
search_area = indexer_params.get('search_area')
# search_area非0表示支持imdbid搜索
if (search_area and
(not self.keyword or not self.keyword.startswith('tt'))):
# 支持imdbid搜索但关键字不是imdbid时不启用imdbid搜索
indexer_params.pop('search_area')
# 变量字典
inputs_dict = {
"keyword": search_word
}
# 查询参数
# 查询参数,默认查询标题
params = {
"search_mode": search_mode,
"search_area": 0,
@@ -262,7 +254,12 @@ class TorrentSpider:
# 解码为字符串
page_source = raw_data.decode(encoding)
except Exception as e:
logger.error(f"chardet解码失败{e}")
logger.debug(f"chardet解码失败{e}")
# 探测utf-8解码
if re.search(r"charset=\"?utf-8\"?", ret.text, re.IGNORECASE):
ret.encoding = "utf-8"
else:
ret.encoding = ret.apparent_encoding
page_source = ret.text
else:
page_source = ret.text

View File

@@ -49,16 +49,16 @@ class TNodeSpider:
if csrf_token:
self._token = csrf_token.group(1)
def search(self, keyword: str, imdbid: str = None, page: int = 0) -> Tuple[bool, List[dict]]:
def search(self, keyword: str, page: int = 0) -> Tuple[bool, List[dict]]:
if not self._token:
logger.warn(f"{self._name} 未获取到token无法搜索")
return True, []
search_type = "imdbid" if imdbid else "title"
search_type = "imdbid" if (keyword and keyword.startswith('tt')) else "title"
params = {
"page": int(page) + 1,
"size": self._size,
"type": search_type,
"keyword": imdbid or keyword or "",
"keyword": keyword or "",
"sorter": "id",
"order": "desc",
"tags": [],

View File

@@ -1,4 +1,3 @@
import json
from pathlib import Path
from typing import Optional, Tuple, Union, Any, List, Generator
@@ -7,7 +6,6 @@ from app.core.context import MediaInfo
from app.log import logger
from app.modules import _ModuleBase
from app.modules.jellyfin.jellyfin import Jellyfin
from app.schemas import ExistMediaInfo, WebhookEventInfo
from app.schemas.types import MediaType
@@ -17,12 +15,20 @@ class JellyfinModule(_ModuleBase):
def init_module(self) -> None:
self.jellyfin = Jellyfin()
def stop(self):
pass
def init_setting(self) -> Tuple[str, Union[str, bool]]:
return "MEDIASERVER", "jellyfin"
def scheduler_job(self) -> None:
"""
定时任务每10分钟调用一次
"""
# 定时重连
if not self.jellyfin.is_inactive():
self.jellyfin.reconnect()
def stop(self):
pass
def user_authenticate(self, name: str, password: str) -> Optional[str]:
"""
使用Emby用户辅助完成用户认证
@@ -33,7 +39,7 @@ class JellyfinModule(_ModuleBase):
# Jellyfin认证
return self.jellyfin.authenticate(name, password)
def webhook_parser(self, body: Any, form: Any, args: Any) -> WebhookEventInfo:
def webhook_parser(self, body: Any, form: Any, args: Any) -> Optional[schemas.WebhookEventInfo]:
"""
解析Webhook报文体
:param body: 请求体
@@ -41,9 +47,9 @@ class JellyfinModule(_ModuleBase):
:param args: 请求参数
:return: 字典解析为消息时需要包含title、text、image
"""
return self.jellyfin.get_webhook_message(json.loads(body))
return self.jellyfin.get_webhook_message(body)
def media_exists(self, mediainfo: MediaInfo, itemid: str = None) -> Optional[ExistMediaInfo]:
def media_exists(self, mediainfo: MediaInfo, itemid: str = None) -> Optional[schemas.ExistMediaInfo]:
"""
判断媒体文件是否存在
:param mediainfo: 识别的媒体信息
@@ -55,88 +61,88 @@ class JellyfinModule(_ModuleBase):
movie = self.jellyfin.get_iteminfo(itemid)
if movie:
logger.info(f"媒体库中已存在:{movie}")
return ExistMediaInfo(type=MediaType.MOVIE)
movies = self.jellyfin.get_movies(title=mediainfo.title, year=mediainfo.year)
return schemas.ExistMediaInfo(
type=MediaType.MOVIE,
server="jellyfin",
itemid=movie.item_id
)
movies = self.jellyfin.get_movies(title=mediainfo.title, year=mediainfo.year, tmdb_id=mediainfo.tmdb_id)
if not movies:
logger.info(f"{mediainfo.title_year} 在媒体库中不存在")
return None
else:
logger.info(f"媒体库中已存在:{movies}")
return ExistMediaInfo(type=MediaType.MOVIE)
return schemas.ExistMediaInfo(
type=MediaType.MOVIE,
server="jellyfin",
itemid=movies[0].item_id
)
else:
tvs = self.jellyfin.get_tv_episodes(title=mediainfo.title,
year=mediainfo.year,
tmdb_id=mediainfo.tmdb_id,
item_id=itemid)
itemid, tvs = self.jellyfin.get_tv_episodes(title=mediainfo.title,
year=mediainfo.year,
tmdb_id=mediainfo.tmdb_id,
item_id=itemid)
if not tvs:
logger.info(f"{mediainfo.title_year} 在媒体库中不存在")
return None
else:
logger.info(f"{mediainfo.title_year} 媒体库中已存在:{tvs}")
return ExistMediaInfo(type=MediaType.TV, seasons=tvs)
return schemas.ExistMediaInfo(
type=MediaType.TV,
seasons=tvs,
server="jellyfin",
itemid=itemid
)
def refresh_mediaserver(self, mediainfo: MediaInfo, file_path: Path) -> Optional[bool]:
def refresh_mediaserver(self, mediainfo: MediaInfo, file_path: Path) -> None:
"""
刷新媒体库
:param mediainfo: 识别的媒体信息
:param file_path: 文件路径
:return: 成功或失败
"""
return self.jellyfin.refresh_root_library()
self.jellyfin.refresh_root_library()
def media_statistic(self) -> schemas.Statistic:
def media_statistic(self) -> List[schemas.Statistic]:
"""
媒体数量统计
"""
media_statistic = self.jellyfin.get_medias_count()
user_count = self.jellyfin.get_user_count()
return schemas.Statistic(
movie_count=media_statistic.get("MovieCount") or 0,
tv_count=media_statistic.get("SeriesCount") or 0,
episode_count=media_statistic.get("EpisodeCount") or 0,
user_count=user_count or 0
)
media_statistic.user_count = self.jellyfin.get_user_count()
return [media_statistic]
def mediaserver_librarys(self) -> List[schemas.MediaServerLibrary]:
def mediaserver_librarys(self, server: str) -> Optional[List[schemas.MediaServerLibrary]]:
"""
媒体库列表
"""
librarys = self.jellyfin.get_librarys()
if not librarys:
return []
return [schemas.MediaServerLibrary(
server="jellyfin",
id=library.get("id"),
name=library.get("name"),
type=library.get("type"),
path=library.get("path")
) for library in librarys]
if server != "jellyfin":
return None
return self.jellyfin.get_librarys()
def mediaserver_items(self, library_id: str) -> Generator:
def mediaserver_items(self, server: str, library_id: str) -> Optional[Generator]:
"""
媒体库项目列表
"""
items = self.jellyfin.get_items(library_id)
for item in items:
yield schemas.MediaServerItem(
server="jellyfin",
library=item.get("library"),
item_id=item.get("id"),
item_type=item.get("type"),
title=item.get("title"),
original_title=item.get("original_title"),
year=item.get("year"),
tmdbid=item.get("tmdbid"),
imdbid=item.get("imdbid"),
tvdbid=item.get("tvdbid"),
path=item.get("path"),
)
if server != "jellyfin":
return None
return self.jellyfin.get_items(library_id)
def mediaserver_tv_episodes(self, item_id: Union[str, int]) -> List[schemas.MediaServerSeasonInfo]:
def mediaserver_iteminfo(self, server: str, item_id: str) -> Optional[schemas.MediaServerItem]:
"""
媒体库项目详情
"""
if server != "jellyfin":
return None
return self.jellyfin.get_iteminfo(item_id)
def mediaserver_tv_episodes(self, server: str,
item_id: Union[str, int]) -> Optional[List[schemas.MediaServerSeasonInfo]]:
"""
获取剧集信息
"""
seasoninfo = self.jellyfin.get_tv_episodes(item_id=item_id)
if server != "jellyfin":
return None
_, seasoninfo = self.jellyfin.get_tv_episodes(item_id=item_id)
if not seasoninfo:
return []
return [schemas.MediaServerSeasonInfo(

View File

@@ -1,15 +1,14 @@
import json
import re
from typing import List, Union, Optional, Dict, Generator
from typing import List, Union, Optional, Dict, Generator, Tuple
from requests import Response
from app import schemas
from app.core.config import settings
from app.log import logger
from app.schemas import MediaType, WebhookEventInfo
from app.schemas import MediaType
from app.utils.http import RequestUtils
from app.utils.singleton import Singleton
from app.utils.string import StringUtils
class Jellyfin(metaclass=Singleton):
@@ -22,8 +21,23 @@ class Jellyfin(metaclass=Singleton):
if not self._host.startswith("http"):
self._host = "http://" + self._host
self._apikey = settings.JELLYFIN_API_KEY
self._user = self.get_user()
self._serverid = self.get_server_id()
self.user = self.get_user()
self.serverid = self.get_server_id()
def is_inactive(self) -> bool:
"""
判断是否需要重连
"""
if not self._host or not self._apikey:
return False
return True if not self.user else False
def reconnect(self):
"""
重连
"""
self.user = self.get_user()
self.serverid = self.get_server_id()
def __get_jellyfin_librarys(self) -> List[dict]:
"""
@@ -31,7 +45,7 @@ class Jellyfin(metaclass=Singleton):
"""
if not self._host or not self._apikey:
return []
req_url = f"{self._host}Users/{self._user}/Views?api_key={self._apikey}"
req_url = f"{self._host}Users/{self.user}/Views?api_key={self._apikey}"
try:
res = RequestUtils().get_res(req_url)
if res:
@@ -58,12 +72,14 @@ class Jellyfin(metaclass=Singleton):
library_type = MediaType.TV.value
case _:
continue
libraries.append({
"id": library.get("Id"),
"name": library.get("Name"),
"path": library.get("Path"),
"type": library_type
})
libraries.append(
schemas.MediaServerLibrary(
server="jellyfin",
id=library.get("Id"),
name=library.get("Name"),
path=library.get("Path"),
type=library_type
))
return libraries
def get_user_count(self) -> int:
@@ -164,68 +180,39 @@ class Jellyfin(metaclass=Singleton):
logger.error(f"连接System/Info出错" + str(e))
return None
def get_activity_log(self, num: int = 30) -> List[dict]:
"""
获取Jellyfin活动记录
"""
if not self._host or not self._apikey:
return []
req_url = "%sSystem/ActivityLog/Entries?api_key=%s&Limit=%s" % (self._host, self._apikey, num)
ret_array = []
try:
res = RequestUtils().get_res(req_url)
if res:
ret_json = res.json()
items = ret_json.get('Items')
for item in items:
if item.get("Type") == "SessionStarted":
event_type = "LG"
event_date = re.sub(r'\dZ', 'Z', item.get("Date"))
event_str = "%s, %s" % (item.get("Name"), item.get("ShortOverview"))
activity = {"type": event_type, "event": event_str,
"date": StringUtils.get_time(event_date)}
ret_array.append(activity)
if item.get("Type") in ["VideoPlayback", "VideoPlaybackStopped"]:
event_type = "PL"
event_date = re.sub(r'\dZ', 'Z', item.get("Date"))
activity = {"type": event_type, "event": item.get("Name"),
"date": StringUtils.get_time(event_date)}
ret_array.append(activity)
else:
logger.error(f"System/ActivityLog/Entries 未获取到返回数据")
return []
except Exception as e:
logger.error(f"连接System/ActivityLog/Entries出错" + str(e))
return []
return ret_array
def get_medias_count(self) -> Optional[dict]:
def get_medias_count(self) -> schemas.Statistic:
"""
获得电影、电视剧、动漫媒体数量
:return: MovieCount SeriesCount SongCount
"""
if not self._host or not self._apikey:
return None
return schemas.Statistic()
req_url = "%sItems/Counts?api_key=%s" % (self._host, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res:
return res.json()
result = res.json()
return schemas.Statistic(
movie_count=result.get("MovieCount") or 0,
tv_count=result.get("SeriesCount") or 0,
episode_count=result.get("EpisodeCount") or 0
)
else:
logger.error(f"Items/Counts 未获取到返回数据")
return {}
return schemas.Statistic()
except Exception as e:
logger.error(f"连接Items/Counts出错" + str(e))
return {}
return schemas.Statistic()
def __get_jellyfin_series_id_by_name(self, name: str, year: str) -> Optional[str]:
"""
根据名称查询Jellyfin中剧集的SeriesId
"""
if not self._host or not self._apikey or not self._user:
if not self._host or not self._apikey or not self.user:
return None
req_url = "%sUsers/%s/Items?api_key=%s&searchTerm=%s&IncludeItemTypes=Series&Limit=10&Recursive=true" % (
self._host, self._user, self._apikey, name)
req_url = ("%sUsers/%s/Items?"
"api_key=%s&searchTerm=%s&IncludeItemTypes=Series&Limit=10&Recursive=true") % (
self._host, self.user, self._apikey, name)
try:
res = RequestUtils().get_res(req_url)
if res:
@@ -240,29 +227,53 @@ class Jellyfin(metaclass=Singleton):
return None
return ""
def get_movies(self, title: str, year: str = None) -> Optional[List[dict]]:
def get_movies(self,
title: str,
year: str = None,
tmdb_id: int = None) -> Optional[List[schemas.MediaServerItem]]:
"""
根据标题和年份检查电影是否在Jellyfin中存在存在则返回列表
:param title: 标题
:param year: 年份,为空则不过滤
:param tmdb_id: TMDB ID
:return: 含title、year属性的字典列表
"""
if not self._host or not self._apikey or not self._user:
if not self._host or not self._apikey or not self.user:
return None
req_url = "%sUsers/%s/Items?api_key=%s&searchTerm=%s&IncludeItemTypes=Movie&Limit=10&Recursive=true" % (
self._host, self._user, self._apikey, title)
req_url = ("%sUsers/%s/Items?"
"api_key=%s&searchTerm=%s&IncludeItemTypes=Movie&Limit=10&Recursive=true") % (
self._host, self.user, self._apikey, title)
try:
res = RequestUtils().get_res(req_url)
if res:
res_items = res.json().get("Items")
if res_items:
ret_movies = []
for res_item in res_items:
if res_item.get('Name') == title and (
not year or str(res_item.get('ProductionYear')) == str(year)):
ret_movies.append(
{'title': res_item.get('Name'), 'year': str(res_item.get('ProductionYear'))})
return ret_movies
for item in res_items:
item_tmdbid = item.get("ProviderIds", {}).get("Tmdb")
mediaserver_item = schemas.MediaServerItem(
server="jellyfin",
library=item.get("ParentId"),
item_id=item.get("Id"),
item_type=item.get("Type"),
title=item.get("Name"),
original_title=item.get("OriginalTitle"),
year=item.get("ProductionYear"),
tmdbid=int(item_tmdbid) if item_tmdbid else None,
imdbid=item.get("ProviderIds", {}).get("Imdb"),
tvdbid=item.get("ProviderIds", {}).get("Tvdb"),
path=item.get("Path")
)
if tmdb_id and item_tmdbid:
if str(item_tmdbid) != str(tmdb_id):
continue
else:
ret_movies.append(mediaserver_item)
continue
if mediaserver_item.title == title and (
not year or str(mediaserver_item.year) == str(year)):
ret_movies.append(mediaserver_item)
return ret_movies
except Exception as e:
logger.error(f"连接Items出错" + str(e))
return None
@@ -273,7 +284,7 @@ class Jellyfin(metaclass=Singleton):
title: str = None,
year: str = None,
tmdb_id: int = None,
season: int = None) -> Optional[Dict[int, list]]:
season: int = None) -> Tuple[Optional[str], Optional[Dict[int, list]]]:
"""
根据标题和年份和季返回Jellyfin中的剧集列表
:param item_id: Jellyfin中的Id
@@ -283,28 +294,30 @@ class Jellyfin(metaclass=Singleton):
:param season: 季
:return: 集号的列表
"""
if not self._host or not self._apikey or not self._user:
return None
if not self._host or not self._apikey or not self.user:
return None, None
# 查TVID
if not item_id:
item_id = self.__get_jellyfin_series_id_by_name(title, year)
if item_id is None:
return None
return None, None
if not item_id:
return {}
return None, {}
# 验证tmdbid是否相同
item_tmdbid = self.get_iteminfo(item_id).get("ProviderIds", {}).get("Tmdb")
if tmdb_id and item_tmdbid:
if str(tmdb_id) != str(item_tmdbid):
return {}
item_info = self.get_iteminfo(item_id)
if item_info:
if tmdb_id and item_info.tmdbid:
if str(tmdb_id) != str(item_info.tmdbid):
return None, {}
if not season:
season = ""
try:
req_url = "%sShows/%s/Episodes?season=%s&&userId=%s&isMissing=false&api_key=%s" % (
self._host, item_id, season, self._user, self._apikey)
self._host, item_id, season, self.user, self._apikey)
res_json = RequestUtils().get_res(req_url)
if res_json:
res_items = res_json.json().get("Items")
tv_info = res_json.json()
res_items = tv_info.get("Items")
# 返回的季集信息
season_episodes = {}
for res_item in res_items:
@@ -319,11 +332,11 @@ class Jellyfin(metaclass=Singleton):
if not season_episodes.get(season_index):
season_episodes[season_index] = []
season_episodes[season_index].append(episode_index)
return season_episodes
return tv_info.get('Id'), season_episodes
except Exception as e:
logger.error(f"连接Shows/Id/Episodes出错" + str(e))
return None
return {}
return None, None
return None, {}
def get_remote_image_by_id(self, item_id: str, image_type: str) -> Optional[str]:
"""
@@ -367,57 +380,159 @@ class Jellyfin(metaclass=Singleton):
logger.error(f"连接Library/Refresh出错" + str(e))
return False
def get_webhook_message(self, message: dict) -> WebhookEventInfo:
def get_webhook_message(self, body: any) -> Optional[schemas.WebhookEventInfo]:
"""
解析Jellyfin报文
{
"ServerId": "d79d3a6261614419a114595a585xxxxx",
"ServerName": "nyanmisaka-jellyfin1",
"ServerVersion": "10.8.10",
"ServerUrl": "http://xxxxxxxx:8098",
"NotificationType": "PlaybackStart",
"Timestamp": "2023-09-10T08:35:25.3996506+00:00",
"UtcTimestamp": "2023-09-10T08:35:25.3996527Z",
"Name": "慕灼华逃婚离开",
"Overview": "慕灼华假装在读书,她害怕大娘子说她不务正业。",
"Tagline": "",
"ItemId": "4b92551344f53b560fb55cd6700xxxxx",
"ItemType": "Episode",
"RunTimeTicks": 27074985984,
"RunTime": "00:45:07",
"Year": 2023,
"SeriesName": "灼灼风流",
"SeasonNumber": 1,
"SeasonNumber00": "01",
"SeasonNumber000": "001",
"EpisodeNumber": 1,
"EpisodeNumber00": "01",
"EpisodeNumber000": "001",
"Provider_tmdb": "229210",
"Video_0_Title": "4K HEVC SDR",
"Video_0_Type": "Video",
"Video_0_Codec": "hevc",
"Video_0_Profile": "Main",
"Video_0_Level": 150,
"Video_0_Height": 2160,
"Video_0_Width": 3840,
"Video_0_AspectRatio": "16:9",
"Video_0_Interlaced": false,
"Video_0_FrameRate": 25,
"Video_0_VideoRange": "SDR",
"Video_0_ColorSpace": "bt709",
"Video_0_ColorTransfer": "bt709",
"Video_0_ColorPrimaries": "bt709",
"Video_0_PixelFormat": "yuv420p",
"Video_0_RefFrames": 1,
"Audio_0_Title": "AAC - Stereo - Default",
"Audio_0_Type": "Audio",
"Audio_0_Language": "und",
"Audio_0_Codec": "aac",
"Audio_0_Channels": 2,
"Audio_0_Bitrate": 125360,
"Audio_0_SampleRate": 48000,
"Audio_0_Default": true,
"PlaybackPositionTicks": 1000000,
"PlaybackPosition": "00:00:00",
"MediaSourceId": "4b92551344f53b560fb55cd6700ebc86",
"IsPaused": false,
"IsAutomated": false,
"DeviceId": "TW96aWxsxxxxxjA",
"DeviceName": "Edge Chromium",
"ClientName": "Jellyfin Web",
"NotificationUsername": "Jeaven",
"UserId": "9783d2432b0d40a8a716b6aa46xxxxx"
}
"""
eventItem = WebhookEventInfo(
event=message.get('NotificationType', ''),
item_id=message.get('ItemId'),
item_name=message.get('Name'),
item_type=message.get('ItemType'),
item_favorite=message.get('Favorite'),
save_reason=message.get('SaveReason'),
tmdb_id=message.get('Provider_tmdb'),
user_name=message.get('NotificationUsername'),
if not body:
return None
try:
message = json.loads(body)
except Exception as e:
logger.debug(f"解析Jellyfin Webhook报文出错" + str(e))
return None
if not message:
return None
logger.info(f"接收到jellyfin webhook{message}")
eventType = message.get('NotificationType')
if not eventType:
return None
eventItem = schemas.WebhookEventInfo(
event=eventType,
channel="jellyfin"
)
eventItem.item_id = message.get('ItemId')
eventItem.tmdb_id = message.get('Provider_tmdb')
eventItem.overview = message.get('Overview')
eventItem.device_name = message.get('DeviceName')
eventItem.user_name = message.get('NotificationUsername')
eventItem.client = message.get('ClientName')
if message.get("ItemType") == "Episode":
# 剧集
eventItem.item_type = "TV"
eventItem.season_id = message.get('SeasonNumber')
eventItem.episode_id = message.get('EpisodeNumber')
eventItem.item_name = "%s %s%s %s" % (
message.get('SeriesName'),
"S" + str(eventItem.season_id),
"E" + str(eventItem.episode_id),
message.get('Name'))
else:
# 电影
eventItem.item_type = "MOV"
eventItem.item_name = "%s %s" % (
message.get('Name'), "(" + str(message.get('Year')) + ")")
# 获取消息图片
if eventItem.item_id:
# 根据返回的item_id去调用媒体服务器获取
eventItem.image_url = self.get_remote_image_by_id(item_id=eventItem.item_id,
image_type="Backdrop")
eventItem.image_url = self.get_remote_image_by_id(
item_id=eventItem.item_id,
image_type="Backdrop"
)
return eventItem
def get_iteminfo(self, itemid: str) -> dict:
def get_iteminfo(self, itemid: str) -> Optional[schemas.MediaServerItem]:
"""
获取单个项目详情
"""
if not itemid:
return {}
return None
if not self._host or not self._apikey:
return {}
return None
req_url = "%sUsers/%s/Items/%s?api_key=%s" % (
self._host, self._user, itemid, self._apikey)
self._host, self.user, itemid, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res and res.status_code == 200:
return res.json()
item = res.json()
tmdbid = item.get("ProviderIds", {}).get("Tmdb")
return schemas.MediaServerItem(
server="jellyfin",
library=item.get("ParentId"),
item_id=item.get("Id"),
item_type=item.get("Type"),
title=item.get("Name"),
original_title=item.get("OriginalTitle"),
year=item.get("ProductionYear"),
tmdbid=int(tmdbid) if tmdbid else None,
imdbid=item.get("ProviderIds", {}).get("Imdb"),
tvdbid=item.get("ProviderIds", {}).get("Tvdb"),
path=item.get("Path")
)
except Exception as e:
logger.error(f"连接Users/Items出错" + str(e))
return {}
return None
def get_items(self, parent: str) -> Generator:
"""
获取媒体服务器所有媒体库列表
"""
if not parent:
yield {}
yield None
if not self._host or not self._apikey:
yield {}
req_url = "%sUsers/%s/Items?parentId=%s&api_key=%s" % (self._host, self._user, parent, self._apikey)
yield None
req_url = "%sUsers/%s/Items?parentId=%s&api_key=%s" % (self._host, self.user, parent, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res and res.status_code == 200:
@@ -426,37 +541,46 @@ class Jellyfin(metaclass=Singleton):
if not result:
continue
if result.get("Type") in ["Movie", "Series"]:
item_info = self.get_iteminfo(result.get("Id"))
yield {"id": result.get("Id"),
"library": item_info.get("ParentId"),
"type": item_info.get("Type"),
"title": item_info.get("Name"),
"original_title": item_info.get("OriginalTitle"),
"year": item_info.get("ProductionYear"),
"tmdbid": item_info.get("ProviderIds", {}).get("Tmdb"),
"imdbid": item_info.get("ProviderIds", {}).get("Imdb"),
"tvdbid": item_info.get("ProviderIds", {}).get("Tvdb"),
"path": item_info.get("Path"),
"json": str(item_info)}
yield self.get_iteminfo(result.get("Id"))
elif "Folder" in result.get("Type"):
for item in self.get_items(result.get("Id")):
yield item
except Exception as e:
logger.error(f"连接Users/Items出错" + str(e))
yield {}
yield None
def get_data(self, url: str) -> Optional[Response]:
"""
自定义URL从媒体服务器获取数据其中{HOST}{APIKEY}{USER}会被替换成实际的值
自定义URL从媒体服务器获取数据其中[HOST]、[APIKEY]、[USER]会被替换成实际的值
:param url: 请求地址
"""
if not self._host or not self._apikey:
return None
url = url.replace("{HOST}", self._host)\
.replace("{APIKEY}", self._apikey)\
.replace("{USER}", self._user)
url = url.replace("[HOST]", self._host) \
.replace("[APIKEY]", self._apikey) \
.replace("[USER]", self.user)
try:
return RequestUtils().get_res(url=url)
return RequestUtils(accept_type="application/json").get_res(url=url)
except Exception as e:
logger.error(f"连接Jellyfin出错" + str(e))
return None
def post_data(self, url: str, data: str = None, headers: dict = None) -> Optional[Response]:
"""
自定义URL从媒体服务器获取数据其中[HOST]、[APIKEY]、[USER]会被替换成实际的值
:param url: 请求地址
:param data: 请求数据
:param headers: 请求头
"""
if not self._host or not self._apikey:
return None
url = url.replace("[HOST]", self._host) \
.replace("[APIKEY]", self._apikey) \
.replace("[USER]", self.user)
try:
return RequestUtils(
headers=headers
).post_res(url=url, data=data)
except Exception as e:
logger.error(f"连接Jellyfin出错" + str(e))
return None

View File

@@ -6,12 +6,10 @@ from app.core.context import MediaInfo
from app.log import logger
from app.modules import _ModuleBase
from app.modules.plex.plex import Plex
from app.schemas import ExistMediaInfo, RefreshMediaItem, WebhookEventInfo
from app.schemas.types import MediaType
class PlexModule(_ModuleBase):
plex: Plex = None
def init_module(self) -> None:
@@ -23,7 +21,15 @@ class PlexModule(_ModuleBase):
def init_setting(self) -> Tuple[str, Union[str, bool]]:
return "MEDIASERVER", "plex"
def webhook_parser(self, body: Any, form: Any, args: Any) -> WebhookEventInfo:
def scheduler_job(self) -> None:
"""
定时任务每10分钟调用一次
"""
# 定时重连
if not self.plex.is_inactive():
self.plex.reconnect()
def webhook_parser(self, body: Any, form: Any, args: Any) -> Optional[schemas.WebhookEventInfo]:
"""
解析Webhook报文体
:param body: 请求体
@@ -31,9 +37,9 @@ class PlexModule(_ModuleBase):
:param args: 请求参数
:return: 字典解析为消息时需要包含title、text、image
"""
return self.plex.get_webhook_message(form.get("payload"))
return self.plex.get_webhook_message(form)
def media_exists(self, mediainfo: MediaInfo, itemid: str = None) -> Optional[ExistMediaInfo]:
def media_exists(self, mediainfo: MediaInfo, itemid: str = None) -> Optional[schemas.ExistMediaInfo]:
"""
判断媒体文件是否存在
:param mediainfo: 识别的媒体信息
@@ -45,26 +51,44 @@ class PlexModule(_ModuleBase):
movie = self.plex.get_iteminfo(itemid)
if movie:
logger.info(f"媒体库中已存在:{movie}")
return ExistMediaInfo(type=MediaType.MOVIE)
movies = self.plex.get_movies(title=mediainfo.title, year=mediainfo.year)
return schemas.ExistMediaInfo(
type=MediaType.MOVIE,
server="plex",
itemid=movie.item_id
)
movies = self.plex.get_movies(title=mediainfo.title,
original_title=mediainfo.original_title,
year=mediainfo.year,
tmdb_id=mediainfo.tmdb_id)
if not movies:
logger.info(f"{mediainfo.title_year} 在媒体库中不存在")
return None
else:
logger.info(f"媒体库中已存在:{movies}")
return ExistMediaInfo(type=MediaType.MOVIE)
return schemas.ExistMediaInfo(
type=MediaType.MOVIE,
server="plex",
itemid=movies[0].item_id
)
else:
tvs = self.plex.get_tv_episodes(title=mediainfo.title,
year=mediainfo.year,
item_id=itemid)
item_id, tvs = self.plex.get_tv_episodes(title=mediainfo.title,
original_title=mediainfo.original_title,
year=mediainfo.year,
tmdb_id=mediainfo.tmdb_id,
item_id=itemid)
if not tvs:
logger.info(f"{mediainfo.title_year} 在媒体库中不存在")
return None
else:
logger.info(f"{mediainfo.title_year} 媒体库中已存在:{tvs}")
return ExistMediaInfo(type=MediaType.TV, seasons=tvs)
return schemas.ExistMediaInfo(
type=MediaType.TV,
seasons=tvs,
server="plex",
itemid=item_id
)
def refresh_mediaserver(self, mediainfo: MediaInfo, file_path: Path) -> Optional[bool]:
def refresh_mediaserver(self, mediainfo: MediaInfo, file_path: Path) -> None:
"""
刷新媒体库
:param mediainfo: 识别的媒体信息
@@ -72,7 +96,7 @@ class PlexModule(_ModuleBase):
:return: 成功或失败
"""
items = [
RefreshMediaItem(
schemas.RefreshMediaItem(
title=mediainfo.title,
year=mediainfo.year,
type=mediainfo.type,
@@ -80,60 +104,48 @@ class PlexModule(_ModuleBase):
target_path=file_path
)
]
return self.plex.refresh_library_by_items(items)
self.plex.refresh_library_by_items(items)
def media_statistic(self) -> schemas.Statistic:
def media_statistic(self) -> List[schemas.Statistic]:
"""
媒体数量统计
"""
media_statistic = self.plex.get_medias_count()
return schemas.Statistic(
movie_count=media_statistic.get("MovieCount") or 0,
tv_count=media_statistic.get("SeriesCount") or 0,
episode_count=media_statistic.get("EpisodeCount") or 0,
user_count=1
)
media_statistic.user_count = 1
return [media_statistic]
def mediaserver_librarys(self) -> List[schemas.MediaServerLibrary]:
def mediaserver_librarys(self, server: str) -> Optional[List[schemas.MediaServerLibrary]]:
"""
媒体库列表
"""
librarys = self.plex.get_librarys()
if not librarys:
return []
return [schemas.MediaServerLibrary(
server="plex",
id=library.get("id"),
name=library.get("name"),
type=library.get("type"),
path=library.get("path")
) for library in librarys]
if server != "plex":
return None
return self.plex.get_librarys()
def mediaserver_items(self, library_id: str) -> Generator:
def mediaserver_items(self, server: str, library_id: str) -> Optional[Generator]:
"""
媒体库项目列表
"""
items = self.plex.get_items(library_id)
for item in items:
yield schemas.MediaServerItem(
server="plex",
library=item.get("library"),
item_id=item.get("id"),
item_type=item.get("type"),
title=item.get("title"),
original_title=item.get("original_title"),
year=item.get("year"),
tmdbid=item.get("tmdbid"),
imdbid=item.get("imdbid"),
tvdbid=item.get("tvdbid"),
path=item.get("path"),
)
if server != "plex":
return None
return self.plex.get_items(library_id)
def mediaserver_tv_episodes(self, item_id: Union[str, int]) -> List[schemas.MediaServerSeasonInfo]:
def mediaserver_iteminfo(self, server: str, item_id: str) -> Optional[schemas.MediaServerItem]:
"""
媒体库项目详情
"""
if server != "plex":
return None
return self.plex.get_iteminfo(item_id)
def mediaserver_tv_episodes(self, server: str,
item_id: Union[str, int]) -> Optional[List[schemas.MediaServerSeasonInfo]]:
"""
获取剧集信息
"""
seasoninfo = self.plex.get_tv_episodes(item_id=item_id)
if server != "plex":
return None
_, seasoninfo = self.plex.get_tv_episodes(item_id=item_id)
if not seasoninfo:
return []
return [schemas.MediaServerSeasonInfo(

Some files were not shown because too many files have changed in this diff Show More