Compare commits

...

259 Commits

Author SHA1 Message Date
jxxghp
7fc257ea79 v2.4.0 2025-04-16 08:11:31 +08:00
jxxghp
24f170ff72 fix 搜索缓存 2025-04-16 08:10:48 +08:00
jxxghp
39999c9ee4 更新 Dockerfile 2025-04-15 06:54:11 +08:00
jxxghp
27a5188e4e 更新 Dockerfile.lite 2025-04-15 06:52:53 +08:00
jxxghp
a5af0786aa - 修复UI错误 2025-04-13 16:03:40 +08:00
jxxghp
e9c9cfaa72 Merge pull request #4137 from lddsb/patch-1 2025-04-11 16:06:29 +08:00
Dee Luo
8ca4ea0f3f perf: 优化qb下载器端口获取逻辑 2025-04-11 15:43:40 +08:00
jxxghp
86e1f9a9d6 Merge pull request #4136 from lddsb/patch-3 2025-04-11 11:43:26 +08:00
Dee Luo
b36ceda585 fix: Rename groups to groups.py 2025-04-11 11:22:29 +08:00
Dee Luo
27a3e6c6db feat: 增加制作组的单元测试 2025-04-11 11:21:39 +08:00
Dee Luo
a731327c00 feat: 增加制作组的单元测试cases 2025-04-11 11:20:36 +08:00
Dee Luo
737c00978e perf: 优化制作组匹配逻辑,解决部分Web组匹配不到的问题
增加两个站制作组的匹配规则
2025-04-11 11:18:15 +08:00
jxxghp
18bcb3a067 fix #4118 2025-04-10 19:40:22 +08:00
jxxghp
f49f55576f Merge pull request #4128 from lddsb/patch-2 2025-04-10 11:09:12 +08:00
Dee Luo
1bef4f9a4d perf: 优化制作组读取自定义制作组的逻辑,避免被空字符串的list影响最终结果 2025-04-10 11:00:46 +08:00
Dee Luo
ab1df59f7a fix: 修复前端传递了[""]这样的空list导致判空时逻辑异常的问题 2025-04-10 10:51:40 +08:00
jxxghp
bcd235521e v2.3.9
- 优化多处UI细节
- 修复了订阅分享参数传递问题,开放了订阅分享管理功能
2025-04-10 08:34:16 +08:00
jxxghp
31a2eac302 fix:订阅分享参数传递 2025-04-10 08:19:59 +08:00
jxxghp
7e6b7e5dd5 更新 subscribe.py 2025-04-09 17:32:07 +08:00
jxxghp
9ec9f48425 feat:增加订阅管理员 #4123 2025-04-09 13:26:58 +08:00
jxxghp
a3bec43eab feat:增加订阅管理员 #4123 2025-04-09 13:26:10 +08:00
jxxghp
f429b6397e fix RecommendMediaSource 2025-04-08 18:52:54 +08:00
jxxghp
9d6e7dc288 Merge pull request #4115 from lddsb/patch-1 2025-04-08 17:58:36 +08:00
Dee Luo
a27c09c1e8 perf: 放宽制作组后缀匹配
支持 制作组xxx 这样的后缀匹配
2025-04-08 16:35:38 +08:00
jxxghp
ceb0697c73 - 适配馒头API变动 2025-04-07 21:30:41 +08:00
jxxghp
6ad6a08bf1 Merge pull request #4110 from cddjr/trimemedia
提升飞牛服务端地址的兼容性
2025-04-07 21:15:38 +08:00
jxxghp
fac6ad7116 Merge pull request #4109 from cddjr/fix_mteam
修复馒头请求参数错误的问题
2025-04-07 21:14:42 +08:00
景大侠
7d8cda0457 修复馒头请求参数错误的问题 2025-04-07 21:04:21 +08:00
景大侠
33fc3fd63b 新增删除媒体的api 2025-04-07 17:20:47 +08:00
景大侠
8d39cc87f7 提升服务端地址的兼容性 2025-04-07 16:37:41 +08:00
景大侠
d0b1348c96 fix some warnings 2025-04-07 16:21:39 +08:00
jxxghp
0afc38f6b8 Merge pull request #4103 from wikrin/v2 2025-04-07 11:07:11 +08:00
Attente
264896ba17 fix: 剧集组刮削 2025-04-07 09:25:06 +08:00
jxxghp
08decf0b82 feat:新增默认插件库 2025-04-07 08:06:59 +08:00
jxxghp
98381265e6 更新 u115.py 2025-04-07 07:37:00 +08:00
DDSRem
d323159719 Update requirements.in 2025-04-06 13:10:56 +08:00
jxxghp
7ef21e1d1c Merge pull request #4098 from DDS-Derek/dev 2025-04-06 12:02:01 +08:00
DDSRem
2d6b2ab7d7 bump: python environment upgrade 3.12
links https://github.com/jxxghp/MoviePilot/issues/3543
2025-04-06 11:56:00 +08:00
jxxghp
a1e6fd88a9 更新 version.py 2025-04-06 07:53:29 +08:00
jxxghp
e72ff867fc fix 115 pickcode 2025-04-05 09:29:08 +08:00
jxxghp
8512641984 更新 scraper.py 2025-04-04 22:13:14 +08:00
jxxghp
f1aa64d191 fix episodes group 2025-04-04 12:17:42 +08:00
jxxghp
347262538f fix episodes group 2025-04-04 08:59:12 +08:00
jxxghp
82510d60ca 更新 __init__.py 2025-04-03 22:48:29 +08:00
jxxghp
6104cd04c3 更新 context.py 2025-04-03 20:32:56 +08:00
jxxghp
44eb58426a feat:支持指定剧集组识别和刮削 2025-04-03 18:43:04 +08:00
jxxghp
078b60cc1e feat:支持指定剧集组识别和刮削 2025-04-03 18:35:02 +08:00
jxxghp
21e120a4f8 refactor:减少一次接口查询 2025-04-03 10:43:31 +08:00
jxxghp
439b834aa8 更新 version.py 2025-04-02 18:39:50 +08:00
jxxghp
ddbe8324be README增加开发说明 2025-03-30 11:36:19 +08:00
jxxghp
8ffe93113b README增加开发说明 2025-03-30 09:53:34 +08:00
jxxghp
8b31b7cb8a v2.3.6-1
- 修复媒体服务器库存检索问题
- 继续优化搜索页面
2025-03-30 09:23:46 +08:00
jxxghp
e09e21caa9 Merge pull request #4067 from cddjr/fix_media_exists 2025-03-30 02:48:19 +08:00
景大侠
20b145c679 继续修复媒体缺失问题 2025-03-30 02:41:24 +08:00
jxxghp
c5730cf1ad Merge pull request #4065 from cddjr/fix_v235_emby_bug 2025-03-29 23:18:34 +08:00
景大侠
f16b038463 修复v2.3.5引入的emby误报媒体缺失的bug 2025-03-29 23:15:58 +08:00
jxxghp
c08beec232 fix:优化未扫码报错 2025-03-29 22:02:59 +08:00
jxxghp
946361e0ae 更新 requirements.in 2025-03-29 20:30:57 +08:00
jxxghp
97cf65a231 更新 version.py 2025-03-29 20:21:54 +08:00
jxxghp
d7eb6ac15d 更新 alipan.py 2025-03-29 19:30:22 +08:00
jxxghp
075afdbb77 fix alipan upload 2025-03-29 15:39:29 +08:00
jxxghp
2ac047504a fix alipan 2025-03-29 14:52:49 +08:00
jxxghp
c44aa50ef5 fix 上传进度条 2025-03-29 14:33:45 +08:00
jxxghp
7ffafb49c4 fix alipan upload 2025-03-29 10:26:59 +08:00
jxxghp
9b7d57a853 fix alipan api 2025-03-29 09:42:23 +08:00
jxxghp
ac19b3b512 fix alipan api 2025-03-28 21:22:02 +08:00
jxxghp
b030317186 fix: 减少115遍历 2025-03-28 20:58:35 +08:00
jxxghp
b506059874 Merge pull request #4059 from cddjr/trimemedia 2025-03-28 20:13:16 +08:00
景大侠
cf7ba6e17f 移除测试代码 2025-03-28 19:54:47 +08:00
jxxghp
b7ce5663a3 fix ide warnings 2025-03-28 19:43:55 +08:00
jxxghp
58fa8064ad Merge pull request #4058 from cddjr/trimemedia
初步支持飞牛影视
2025-03-28 19:28:35 +08:00
jxxghp
ed48f56526 fix alipan 2025-03-28 17:48:30 +08:00
景大侠
896eb13f7d 初步支持飞牛影视 2025-03-28 16:26:40 +08:00
jxxghp
b8cd1c46c1 feat:Alipan Open Api 2025-03-28 13:40:29 +08:00
jxxghp
c5e84273c0 fix 115目录创建 2025-03-27 19:55:01 +08:00
jxxghp
f21653ffb7 修复115列表异常问题 2025-03-27 17:27:01 +08:00
jxxghp
65c8116cc9 fix 115列表异常处理 2025-03-27 17:26:07 +08:00
jxxghp
5e442433e5 fix 115列表出错时抛出异常 2025-03-27 12:48:19 +08:00
jxxghp
7041347e76 更新 version.py 2025-03-27 12:13:19 +08:00
jxxghp
810c205709 fix 115 2025-03-27 12:04:49 +08:00
jxxghp
ec7035990a fix 2025-03-26 20:12:08 +08:00
jxxghp
da6d9bb2bd fix 115 upload 2025-03-26 18:31:20 +08:00
jxxghp
e009043c63 fix log 2025-03-26 14:00:41 +08:00
jxxghp
79020e9338 hack fix 115 callback format error 2025-03-26 10:39:40 +08:00
jxxghp
2020244cae fix _path_to_id 2025-03-26 08:54:51 +08:00
jxxghp
43fe8f25f8 fix _path_to_id 2025-03-26 08:50:25 +08:00
jxxghp
9522888a60 fix 115 2025-03-26 08:30:30 +08:00
jxxghp
70c183ae2b try fix 115 upload 2025-03-26 07:15:31 +08:00
jxxghp
5d56eb9bef fix 115 upload 2025-03-25 21:33:29 +08:00
jxxghp
a461414a04 fix 115 callback encode 2025-03-25 20:37:46 +08:00
jxxghp
5737c3dca6 fix 115日志频率 2025-03-25 20:00:44 +08:00
jxxghp
57ea50e59c fix 115 callback 2025-03-25 19:38:39 +08:00
jxxghp
7f630e8460 fix 115 callback 2025-03-25 19:37:00 +08:00
jxxghp
108e8502e1 fix 115 上传进度 2025-03-25 19:27:53 +08:00
jxxghp
4aa986d122 fix 115 秒传检测 2025-03-25 18:26:45 +08:00
jxxghp
60239bbfc4 fix bug 2025-03-25 13:57:39 +08:00
jxxghp
93ef3b1f1a add debug logging 2025-03-25 13:48:00 +08:00
jxxghp
d9ed135be4 fix 115 2025-03-25 12:58:03 +08:00
jxxghp
e83fe0aabe fix storage logging 2025-03-25 08:34:36 +08:00
jxxghp
4be7426ae7 fix 115 2025-03-24 22:57:16 +08:00
jxxghp
0ce5ef7f56 fix 115 upload 2025-03-24 21:49:27 +08:00
jxxghp
c2c0946423 fix 115 upload 2025-03-24 21:39:03 +08:00
jxxghp
63049f61f7 fix typing 2025-03-24 19:14:04 +08:00
jxxghp
1918b0f192 fix 115 api 2025-03-24 19:11:18 +08:00
jxxghp
a3ad49b1fa fix 115 api 2025-03-24 19:03:57 +08:00
jxxghp
bed63d1e2b fix 115 api 2025-03-24 19:02:24 +08:00
jxxghp
4a8e739686 fix 115 api 2025-03-24 13:11:23 +08:00
jxxghp
d502f33041 fix 115 open api 2025-03-24 12:04:23 +08:00
jxxghp
4a0ecf36c7 fix typing 2025-03-24 08:40:18 +08:00
jxxghp
afb9e49755 fix typing 2025-03-24 08:11:02 +08:00
jxxghp
18f65e5597 fix year type 2025-03-23 23:16:11 +08:00
jxxghp
22b69f7dac fix blanke 2025-03-23 22:35:37 +08:00
jxxghp
15df062825 更新 discover.py 2025-03-23 22:23:31 +08:00
jxxghp
ed607d3895 更新 recommend.py 2025-03-23 21:57:48 +08:00
jxxghp
f9b0db623d fix cython type error 2025-03-23 21:39:37 +08:00
jxxghp
740cf12c11 fix cython errors 2025-03-23 19:09:48 +08:00
jxxghp
4c4bf698b1 更新 scheduler.py 2025-03-23 18:26:36 +08:00
jxxghp
dc74e749c9 更新 bulit-lite.yml 2025-03-23 18:03:30 +08:00
jxxghp
fa52c542d7 fix lite Dockfile 2025-03-23 15:55:02 +08:00
jxxghp
850d480c7c fix:build lite 2025-03-23 14:48:20 +08:00
jxxghp
a92cc9dce9 更新 bulit-lite.yml 2025-03-23 14:31:29 +08:00
jxxghp
4944a0a456 更新 Dockerfile.lite 2025-03-23 14:28:45 +08:00
jxxghp
13c40058a8 fix:build lite 2025-03-23 13:00:07 +08:00
jxxghp
1410c03c26 feat:build lite 2025-03-23 12:40:14 +08:00
jxxghp
2f38b3040d fix:修复代码兼容性写法 2025-03-23 12:10:21 +08:00
jxxghp
79411a7350 fix:修复代码兼容性写法 2025-03-23 09:00:24 +08:00
jxxghp
ee94c2af32 Merge pull request #4034 from DDS-Derek/dev 2025-03-22 11:31:25 +08:00
DDSRem
d46e5c8d86 bump: docker version 6.1.3 to 7.1.0 2025-03-22 11:13:06 +08:00
jxxghp
95cd10bfba fix #4014 2025-03-22 08:15:58 +08:00
jxxghp
59ed08b92d fix 115 api 2025-03-21 21:08:14 +08:00
jxxghp
2b9f7bca51 fix 115 api 2025-03-21 21:01:37 +08:00
jxxghp
a860a8c02b fix 115 open api 2025-03-21 19:06:53 +08:00
jxxghp
f2cbb8d2f7 fix 115 open api 2025-03-21 18:53:26 +08:00
jxxghp
ea61599589 add 115 open api 2025-03-21 13:27:31 +08:00
jxxghp
0b59c95f63 fix #4029 2025-03-21 11:24:08 +08:00
jxxghp
66d4308810 fix https://github.com/jxxghp/MoviePilot-Frontend/issues/312 2025-03-21 11:19:29 +08:00
jxxghp
f2648df2ad add special domains 2025-03-20 13:00:53 +08:00
jxxghp
d20f68e897 remove setup.py 2025-03-20 08:53:02 +08:00
jxxghp
338021645d 更新 requirements.in 2025-03-19 21:50:26 +08:00
jxxghp
a0a11842cb fix workflow count 2025-03-15 10:16:25 +08:00
jxxghp
f5832d6a25 Merge pull request #4012 from fanrongbin/v2 2025-03-14 17:22:23 +08:00
Robin-PC-X1C
8fa6d9de39 20250314 修改rss.py
修改原因:管理员在mp添加多个豆瓣id时,不同的豆瓣用户订阅内容,发送通知时统一为“豆瓣想看”,无法区分
修改后:增加豆瓣昵称获取,便于推送订阅通知消息时,区分豆瓣用户名称
2025-03-14 16:42:41 +08:00
jxxghp
e662338d6f Merge pull request #3995 from KoWming/v2 2025-03-10 12:48:31 +08:00
KoWming
2c1d6817dd Update security.py 2025-03-10 12:46:06 +08:00
jxxghp
5d4a3fec1f v2.3.4
- 新增支持设定消息发送的时间范围
- 探索标签页支持拖动排序
- 修复演员头像不显示的问题
- 修复站点流控不生效的问题
- 修复短时间内重复保存设定后定时任务消失的问题
- 修复工作流执行数据叠加的问题
2025-03-10 10:08:34 +08:00
jxxghp
6603a30e7e fix MessageQueueManager 2025-03-10 10:02:32 +08:00
jxxghp
81d08ca517 fix MessageQueueManager 2025-03-10 08:24:28 +08:00
jxxghp
e04506a614 fix workflow message link 2025-03-09 21:07:52 +08:00
jxxghp
39756512ae feat: 支持消息发送时间范围 2025-03-09 19:34:05 +08:00
jxxghp
71c29ea5e7 fix ide warnings 2025-03-09 18:35:52 +08:00
jxxghp
87ce266b14 fix warnings 2025-03-09 16:48:32 +08:00
jxxghp
ed6d856c24 Merge remote-tracking branch 'origin/v2' into v2 2025-03-09 16:33:01 +08:00
jxxghp
d3ecbef946 fix warnings 2025-03-09 08:37:05 +08:00
jxxghp
7b24f5eb21 fix:站点流控 2025-03-07 08:19:28 +08:00
jxxghp
e1f82e338a fix:定时任务初始化加锁 2025-03-07 08:07:57 +08:00
jxxghp
a835d34a01 Merge pull request #3975 from so1ve/patch-1 2025-03-06 06:54:11 +08:00
Ray
79d70c9977 fix: 标签为"官组"的种子应识别为官种 2025-03-05 22:10:28 +08:00
jxxghp
aea82723cb Merge pull request #3965 from mackerel-12138/fix_s0_scrap 2025-03-05 11:56:22 +08:00
zhanglijun
d47ff0b31a 修复s0集信息错误 2025-03-04 23:18:41 +08:00
jxxghp
affcb9d5c3 fix bug 2025-03-04 14:22:32 +08:00
jxxghp
9be2686733 Merge pull request #3957 from thsrite/v2 2025-03-03 14:22:06 +08:00
thsrite
7126fed2b5 fix docker container log duplicate printing 2025-03-03 13:44:38 +08:00
jxxghp
5bc4330e1c 修复HDDolby 2025-03-02 14:55:18 +08:00
jxxghp
b25ac7116e 更新 hddolby.py 2025-03-02 14:41:55 +08:00
jxxghp
8896867bb3 更新 fetch_medias.py 2025-03-02 14:23:37 +08:00
jxxghp
ba7c9eec7b fix 2025-03-02 13:16:46 +08:00
jxxghp
9b95fde8d1 v2.3.3
- 增加了多个索引和认证站点支持
- HDDolby切换为使用API(需要调整站点设置,否则无法正常刷新站点数据)
- 调整了IYUU认证使用的域名地址
- 继续完善任务工作流
2025-03-02 12:48:32 +08:00
jxxghp
2851f16395 feat:actions增加缓存机制 2025-03-02 12:27:36 +08:00
jxxghp
0d63dfb931 fix actions 2025-03-02 11:15:52 +08:00
jxxghp
37558e3135 更新 hddolby.py 2025-03-02 10:24:17 +08:00
jxxghp
96021e42a2 fix 2025-03-02 10:08:03 +08:00
jxxghp
c32b845515 feat:actions增加识别选项 2025-03-02 09:45:24 +08:00
jxxghp
147d980c54 fix hddolby 2025-03-02 08:51:09 +08:00
jxxghp
f91c43dde9 fix hddolby 2025-03-02 08:08:46 +08:00
jxxghp
4cf5cb06a0 fix hddolby 2025-03-02 08:06:25 +08:00
jxxghp
8e4b4c3144 add hddolby userdata api 2025-03-01 21:28:15 +08:00
jxxghp
c302013696 add hddolby api 2025-03-01 21:24:01 +08:00
jxxghp
37cb94c59d add hddolby api 2025-03-01 21:08:37 +08:00
jxxghp
01f7c6bc2b fix 2025-03-01 18:55:16 +08:00
jxxghp
8bd6ccb0de fix 完善事件和消息发送 2025-03-01 18:34:39 +08:00
jxxghp
ed8895dfbb v2.3.2
- 任务工作流支持手动停止、支持导入导出流程数据、完善动作组件等
2025-03-01 15:51:15 +08:00
jxxghp
a55632051b fix fetch_medias action 2025-03-01 13:54:29 +08:00
jxxghp
7e347a458d add ScanFileAction 2025-02-28 21:23:44 +08:00
jxxghp
cce71f23e2 add ScanFileAction 2025-02-28 21:11:51 +08:00
jxxghp
d68461a127 更新 scheduler.py 2025-02-28 19:37:39 +08:00
jxxghp
1bd12a9411 feat:工作流手动中止 2025-02-28 19:02:38 +08:00
jxxghp
4086ba4763 更新 version.py 2025-02-28 12:30:45 +08:00
jxxghp
6a9cdf71d7 fix AddDownloadAction 2025-02-28 12:12:52 +08:00
jxxghp
a9644c4f86 fix actions 2025-02-28 11:56:26 +08:00
jxxghp
cf62ad5e8e fix actions 2025-02-28 11:15:24 +08:00
jxxghp
f8ed16666c fix actions execute 2025-02-27 20:39:42 +08:00
jxxghp
37926b4c19 fix actions 2025-02-27 18:58:11 +08:00
jxxghp
b080a2003f fix actions 2025-02-27 17:08:38 +08:00
jxxghp
ab0008be86 fix actions 2025-02-27 13:09:01 +08:00
jxxghp
4a42b0d000 fix import 2025-02-26 21:13:41 +08:00
jxxghp
e3d4b19dac fix actionid type 2025-02-26 20:28:10 +08:00
jxxghp
403d600db4 fix workflow edit api 2025-02-26 19:06:30 +08:00
jxxghp
835e6e8891 fix workflow scheduler 2025-02-26 18:32:25 +08:00
jxxghp
eec25113b5 fix workflow scheduler 2025-02-26 18:24:27 +08:00
jxxghp
a7c4161f91 fix workflow executor 2025-02-26 12:57:57 +08:00
jxxghp
799eb9e6ef add workflow executor 2025-02-26 08:37:37 +08:00
jxxghp
88993cb67b fix workflow api 2025-02-25 17:27:21 +08:00
jxxghp
0dc9c98c06 fix workflow api 2025-02-25 13:35:32 +08:00
jxxghp
c1c91cec44 fix workflow api 2025-02-25 13:25:56 +08:00
jxxghp
19b6927320 fix workflow process 2025-02-25 12:42:15 +08:00
jxxghp
0889ebc8b8 fix workflow schema 2025-02-25 08:25:19 +08:00
jxxghp
fb249c0ea5 fix workflow excute 2025-02-25 08:22:02 +08:00
jxxghp
feb22ff0a7 Merge pull request #3922 from WingGao/v2 2025-02-22 17:51:13 +08:00
WingGao
3c95156ce1 fix: alist不应该缓存失败的结果 2025-02-22 15:05:04 +08:00
jxxghp
8b6dca6a46 fix bug 2025-02-22 11:22:21 +08:00
jxxghp
43907eea26 fix 2025-02-22 11:12:14 +08:00
jxxghp
67145a80d0 add workflow apis 2025-02-22 10:35:57 +08:00
jxxghp
0b3138fec6 fix actions 2025-02-22 09:57:32 +08:00
jxxghp
b84896b4f9 Merge pull request #3919 from InfinityPacer/feature/plugin 2025-02-22 07:46:02 +08:00
InfinityPacer
efd046d2f8 fix(plugin): handle None response for online plugins retrieval 2025-02-22 00:34:35 +08:00
jxxghp
06fcf817bb Merge pull request #3917 from gtsicko/v2 2025-02-21 07:29:23 +08:00
gtsicko
16a94d9054 fix: 修复带路径的WECHAT_PROXY不生效 2025-02-20 23:41:14 +08:00
jxxghp
5bf502188d fix 2025-02-20 19:32:58 +08:00
jxxghp
5269b4bc82 fix #3914
feat:搜索支持指定站点
2025-02-20 13:03:12 +08:00
jxxghp
e3f8ed9886 add downloads path 2025-02-20 10:51:22 +08:00
jxxghp
74de554fb0 Merge pull request #3914 from TimoYoung/v2 2025-02-19 18:01:49 +08:00
jxxghp
b41de1a982 fix actions 2025-02-19 17:44:14 +08:00
Timo_Young
25f7d9ccdd Merge branch 'jxxghp:v2' into v2 2025-02-19 17:28:22 +08:00
yangyux
9646745181 fix: mtype为空且tmdbid在movie和tv中都存在时的识别错误问题 2025-02-19 17:27:38 +08:00
jxxghp
1317d9c4f0 fix actions 2025-02-19 16:43:42 +08:00
jxxghp
351029a842 fix AddDownloadAction 2025-02-19 15:24:13 +08:00
jxxghp
15e1fb61ac fix actions 2025-02-19 08:33:15 +08:00
jxxghp
1889a829b5 fix workflow process 2025-02-19 08:16:35 +08:00
jxxghp
53a14fce38 fix workflow process 2025-02-19 08:15:49 +08:00
jxxghp
d9ed7b09c7 v2.3.0
- 站点资源浏览支持关键字和分类搜索,优化了界面,修改了站点卡片点击时的交互行为
- 优化了APP模式下更多菜单、滚动条等多处UI细节
2025-02-18 17:05:24 +08:00
jxxghp
4dcb18f00e fix: site browse api 2025-02-18 16:32:10 +08:00
jxxghp
0a52fe0a7a refactor: site browse api 2025-02-17 19:01:05 +08:00
jxxghp
e5a4d11cf9 fix workflow 2025-02-17 15:08:24 +08:00
jxxghp
6c233f13de fix workflow chain 2025-02-17 12:38:29 +08:00
jxxghp
00aee3496c add workflow oper 2025-02-17 11:54:11 +08:00
jxxghp
77ae40e3d6 fix workflow 2025-02-17 11:40:32 +08:00
jxxghp
68cba44476 fix modules load 2025-02-16 17:24:17 +08:00
jxxghp
b86d06f632 add workflow lifecycle 2025-02-16 16:53:38 +08:00
jxxghp
0b7cf305a0 add action templates 2025-02-16 13:45:15 +08:00
jxxghp
21ae36bc3a add action templates 2025-02-16 12:52:29 +08:00
jxxghp
4e2d9e9165 Merge pull request #3899 from Mister-album/v2-sync 2025-02-15 08:10:15 +08:00
Mister-album
6cee308894 添加为指定字幕添加.default后缀设置为默认字幕功能 2025-02-14 19:58:29 +08:00
jxxghp
b8f4cd5fea v2.2.9
- 资源包升级以提升安全性
- 优化了页面数据刷新机制

注意:本次升级后会默认清理一次种子识别缓存
2025-02-14 19:35:49 +08:00
jxxghp
aa1557ad9e fix setup 2025-02-14 17:37:10 +08:00
jxxghp
f03da6daca fix setup 2025-02-14 17:17:16 +08:00
jxxghp
30eb4385d4 fix sites 2025-02-14 13:44:18 +08:00
jxxghp
4c9afcc1a8 fix 2025-02-14 13:32:20 +08:00
jxxghp
dd47432a45 fix 2025-02-14 12:55:32 +08:00
jxxghp
0ba6974bd6 fix #3843
fix #3829
2025-02-13 08:08:13 +08:00
jxxghp
827d8f6d84 add workflow framework 2025-02-12 17:49:01 +08:00
jxxghp
943a462c69 Merge pull request #3885 from InfinityPacer/feature/security 2025-02-11 17:21:04 +08:00
InfinityPacer
a1bc773fb5 feat(security): add AVIF support 2025-02-11 17:10:50 +08:00
InfinityPacer
ac169b7d22 feat(security): add cache default extension for files without suffix 2025-02-11 17:09:43 +08:00
jxxghp
eecbbfea3a 更新 version.py 2025-02-10 22:28:06 +08:00
jxxghp
635ddb044e add depends for DiscoverMediaSource 2025-02-10 22:05:56 +08:00
jxxghp
1a6123489d 更新 config.py 2025-02-10 07:52:40 +08:00
jxxghp
4e69195a8d Merge pull request #3876 from InfinityPacer/feature/security 2025-02-10 07:11:28 +08:00
InfinityPacer
e48c8ee652 Revert "fix is_safe_url"
This reverts commit 5e2ad34864.
2025-02-10 02:22:53 +08:00
InfinityPacer
7df07b86b9 feat(security): add cmvideo image for http with port 2025-02-10 02:19:08 +08:00
194 changed files with 8883 additions and 3279 deletions

55
.github/workflows/bulit-lite.yml vendored Normal file
View File

@@ -0,0 +1,55 @@
name: MoviePilot Builder v2 Lite
on:
workflow_dispatch:
push:
branches:
- v2
paths:
- 'version.py'
jobs:
Docker-build:
runs-on: ubuntu-latest
name: Build Docker Image
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Release version
id: release_version
run: |
app_version=$(cat version.py |sed -ne "s/APP_VERSION\s=\s'v\(.*\)'/\1/gp")
echo "app_version=$app_version" >> $GITHUB_ENV
- name: Docker Meta
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ secrets.DOCKER_USERNAME }}/moviepilot-v2
tags: |
type=raw,value=lite-latest
- name: Set Up QEMU
uses: docker/setup-qemu-action@v3
- name: Set Up Buildx
uses: docker/setup-buildx-action@v3
- name: Login DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Build Image
uses: docker/build-push-action@v5
with:
context: .
file: Dockerfile.lite
platforms: |
linux/amd64
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha, scope=${{ github.workflow }}-docker
cache-to: type=gha, scope=${{ github.workflow }}-docker

3
.gitignore vendored
View File

@@ -1,6 +1,9 @@
.idea/ .idea/
*.c *.c
*.so
*.pyd
build/ build/
cython_cache/
dist/ dist/
nginx/ nginx/
test.py test.py

View File

@@ -1,4 +1,4 @@
FROM python:3.11.4-slim-bookworm FROM python:3.12.8-slim-bookworm
ENV LANG="C.UTF-8" \ ENV LANG="C.UTF-8" \
TZ="Asia/Shanghai" \ TZ="Asia/Shanghai" \
HOME="/moviepilot" \ HOME="/moviepilot" \
@@ -38,7 +38,6 @@ RUN apt-get update -y \
then ln -s /usr/lib/aarch64-linux-musl/libc.so /lib/libc.musl-aarch64.so.1; \ then ln -s /usr/lib/aarch64-linux-musl/libc.so /lib/libc.musl-aarch64.so.1; \
fi \ fi \
&& curl https://rclone.org/install.sh | bash \ && curl https://rclone.org/install.sh | bash \
&& curl --insecure -fsSL https://raw.githubusercontent.com/DDS-Derek/Aria2-Pro-Core/master/aria2-install.sh | bash \
&& apt-get autoremove -y \ && apt-get autoremove -y \
&& apt-get clean -y \ && apt-get clean -y \
&& rm -rf \ && rm -rf \

92
Dockerfile.lite Normal file
View File

@@ -0,0 +1,92 @@
FROM python:3.12.8-slim-bookworm
ENV LANG="C.UTF-8" \
TZ="Asia/Shanghai" \
HOME="/moviepilot" \
CONFIG_DIR="/config" \
TERM="xterm" \
DISPLAY=:987 \
PUID=0 \
PGID=0 \
UMASK=000 \
PORT=3001 \
NGINX_PORT=3000 \
MOVIEPILOT_AUTO_UPDATE=release
WORKDIR "/app"
RUN apt-get update -y \
&& apt-get upgrade -y \
&& apt-get -y install \
musl-dev \
nginx \
gettext-base \
locales \
procps \
gosu \
bash \
wget \
curl \
busybox \
dumb-init \
jq \
fuse3 \
rsync \
ffmpeg \
nano \
&& \
if [ "$(uname -m)" = "x86_64" ]; \
then ln -s /usr/lib/x86_64-linux-musl/libc.so /lib/libc.musl-x86_64.so.1; \
elif [ "$(uname -m)" = "aarch64" ]; \
then ln -s /usr/lib/aarch64-linux-musl/libc.so /lib/libc.musl-aarch64.so.1; \
fi \
&& curl https://rclone.org/install.sh | bash \
&& apt-get autoremove -y \
&& apt-get clean -y \
&& rm -rf \
/tmp/* \
/moviepilot/.cache \
/var/lib/apt/lists/* \
/var/tmp/*
COPY requirements.in requirements.in
RUN apt-get update -y \
&& apt-get install -y build-essential \
&& pip install --upgrade pip \
&& pip install Cython pip-tools \
&& pip-compile requirements.in \
&& pip install -r requirements.txt \
&& playwright install-deps chromium \
&& apt-get remove -y build-essential \
&& apt-get autoremove -y \
&& apt-get clean -y \
&& rm -rf \
/tmp/* \
/moviepilot/.cache \
/var/lib/apt/lists/* \
/var/tmp/*
COPY . .
RUN cp -f /app/nginx.conf /etc/nginx/nginx.template.conf \
&& cp -f /app/update /usr/local/bin/mp_update \
&& cp -f /app/entrypoint /entrypoint \
&& cp -f /app/docker_http_proxy.conf /etc/nginx/docker_http_proxy.conf \
&& chmod +x /entrypoint /usr/local/bin/mp_update \
&& mkdir -p ${HOME} \
&& groupadd -r moviepilot -g 918 \
&& useradd -r moviepilot -g moviepilot -d ${HOME} -s /bin/bash -u 918 \
&& python_ver=$(python3 -V | awk '{print $2}') \
&& echo "/app/" > /usr/local/lib/python${python_ver%.*}/site-packages/app.pth \
&& echo 'fs.inotify.max_user_watches=5242880' >> /etc/sysctl.conf \
&& echo 'fs.inotify.max_user_instances=5242880' >> /etc/sysctl.conf \
&& locale-gen zh_CN.UTF-8 \
&& python3 /app/setup.py \
&& find /app/app -type f -name "*.py" ! -path "/app/app/main.py" -exec rm -f {} \; \
&& FRONTEND_VERSION=$(sed -n "s/^FRONTEND_VERSION\s*=\s*'\([^']*\)'/\1/p" /app/version.py) \
&& curl -sL "https://github.com/jxxghp/MoviePilot-Frontend/releases/download/${FRONTEND_VERSION}/dist.zip" | busybox unzip -d / - \
&& mv /dist /public \
&& curl -sL "https://github.com/jxxghp/MoviePilot-Plugins/archive/refs/heads/main.zip" | busybox unzip -d /tmp - \
&& mv -f /tmp/MoviePilot-Plugins-main/plugins.v2/* /app/app/plugins/ \
&& cat /tmp/MoviePilot-Plugins-main/package.json | jq -r 'to_entries[] | select(.value.v2 == true) | .key' | awk '{print tolower($0)}' | \
while read -r i; do if [ ! -d "/app/app/plugins/$i" ]; then mv "/tmp/MoviePilot-Plugins-main/plugins/$i" "/app/app/plugins/"; else echo "跳过 $i"; fi; done \
&& curl -sL "https://github.com/jxxghp/MoviePilot-Resources/archive/refs/heads/main.zip" | busybox unzip -d /tmp - \
&& mv -f /tmp/MoviePilot-Resources-main/resources/* /app/app/helper/ \
&& rm -rf /tmp/* /app/build
EXPOSE 3000
VOLUME [ "/config" ]
ENTRYPOINT [ "/entrypoint" ]

View File

@@ -26,6 +26,34 @@
访问官方Wikihttps://wiki.movie-pilot.org 访问官方Wikihttps://wiki.movie-pilot.org
## 参与开发
需要 `Python 3.12``Node JS v20.12.1`
- 克隆主项目 [MoviePilot](https://github.com/jxxghp/MoviePilot)
```shell
git clone https://github.com/jxxghp/MoviePilot
```
- 克隆资源项目 [MoviePilot-Resources](https://github.com/jxxghp/MoviePilot-Resources) ,将 `resources` 目录下对应平台及版本的库 `.so`/`.pyd`/`.bin` 文件复制到 `app/helper` 目录
```shell
git clone https://github.com/jxxghp/MoviePilot-Resources
```
- 安装后端依赖,设置`app`为源代码根目录,运行 `main.py` 启动后端服务,默认监听端口:`3001`API文档地址`http://localhost:3001/docs`
```shell
pip install -r requirements.txt
python3 main.py
```
- 克隆前端项目 [MoviePilot-Frontend](https://github.com/jxxghp/MoviePilot-Frontend)
```shell
git clone https://github.com/jxxghp/MoviePilot-Frontend
```
- 安装前端依赖,运行前端项目,访问:`http://localhost:5173`
```shell
yarn
yarn dev
```
- 参考 [插件开发指引](https://wiki.movie-pilot.org/zh/plugindev) 在 `app/plugins` 目录下开发插件代码
## 贡献者 ## 贡献者
<a href="https://github.com/jxxghp/MoviePilot/graphs/contributors"> <a href="https://github.com/jxxghp/MoviePilot/graphs/contributors">

106
app/actions/__init__.py Normal file
View File

@@ -0,0 +1,106 @@
from abc import ABC, abstractmethod
from typing import Union
from app.chain import ChainBase
from app.db.systemconfig_oper import SystemConfigOper
from app.schemas import ActionContext, ActionParams
class ActionChain(ChainBase):
pass
class BaseAction(ABC):
"""
工作流动作基类
"""
# 动作ID
_action_id = None
# 完成标志
_done_flag = False
# 执行信息
_message = ""
# 缓存键值
_cache_key = "WorkflowCache-%s"
def __init__(self, action_id: str):
self._action_id = action_id
self.systemconfigoper = SystemConfigOper()
@classmethod
@property
@abstractmethod
def name(cls) -> str: # noqa
pass
@classmethod
@property
@abstractmethod
def description(cls) -> str: # noqa
pass
@classmethod
@property
@abstractmethod
def data(cls) -> dict: # noqa
pass
@property
def done(self) -> bool:
"""
判断动作是否完成
"""
return self._done_flag
@property
@abstractmethod
def success(self) -> bool:
"""
判断动作是否成功
"""
pass
@property
def message(self) -> str:
"""
执行信息
"""
return self._message
def job_done(self, message: str = None):
"""
标记动作完成
"""
self._message = message
self._done_flag = True
def check_cache(self, workflow_id: int, key: str) -> bool:
"""
检查是否处理过
"""
workflow_key = self._cache_key % workflow_id
workflow_cache = self.systemconfigoper.get(workflow_key) or {}
action_cache = workflow_cache.get(self._action_id) or []
return key in action_cache
def save_cache(self, workflow_id: int, data: Union[list, str]):
"""
保存缓存
"""
workflow_key = self._cache_key % workflow_id
workflow_cache = self.systemconfigoper.get(workflow_key) or {}
action_cache = workflow_cache.get(self._action_id) or []
if isinstance(data, list):
action_cache.extend(data)
else:
action_cache.append(data)
workflow_cache[self._action_id] = action_cache
self.systemconfigoper.set(workflow_key, workflow_cache)
@abstractmethod
def execute(self, workflow_id: int, params: ActionParams, context: ActionContext) -> ActionContext:
"""
执行动作
"""
raise NotImplementedError

121
app/actions/add_download.py Normal file
View File

@@ -0,0 +1,121 @@
from typing import Optional
from pydantic import Field
from app.actions import BaseAction
from app.chain.download import DownloadChain
from app.chain.media import MediaChain
from app.core.config import global_vars
from app.core.metainfo import MetaInfo
from app.log import logger
from app.schemas import ActionParams, ActionContext, DownloadTask, MediaType
class AddDownloadParams(ActionParams):
"""
添加下载资源参数
"""
downloader: Optional[str] = Field(default=None, description="下载器")
save_path: Optional[str] = Field(default=None, description="保存路径")
labels: Optional[str] = Field(default=None, description="标签(,分隔)")
only_lack: Optional[bool] = Field(default=False, description="仅下载缺失的资源")
class AddDownloadAction(BaseAction):
"""
添加下载资源
"""
# 已添加的下载
_added_downloads = []
_has_error = False
def __init__(self, action_id: str):
super().__init__(action_id)
self.downloadchain = DownloadChain()
self.mediachain = MediaChain()
self._added_downloads = []
self._has_error = False
@classmethod
@property
def name(cls) -> str: # noqa
return "添加下载"
@classmethod
@property
def description(cls) -> str: # noqa
return "根据资源列表添加下载任务"
@classmethod
@property
def data(cls) -> dict: # noqa
return AddDownloadParams().dict()
@property
def success(self) -> bool:
return not self._has_error
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
"""
将上下文中的torrents添加到下载任务中
"""
params = AddDownloadParams(**params)
_started = False
for t in context.torrents:
if global_vars.is_workflow_stopped(workflow_id):
break
# 检查缓存
cache_key = f"{t.torrent_info.site}-{t.torrent_info.title}"
if self.check_cache(workflow_id, cache_key):
logger.info(f"{t.torrent_info.title} 已添加过下载,跳过")
continue
if not t.meta_info:
t.meta_info = MetaInfo(title=t.torrent_info.title, subtitle=t.torrent_info.description)
if not t.media_info:
t.media_info = self.mediachain.recognize_media(meta=t.meta_info)
if not t.media_info:
self._has_error = True
logger.warning(f"{t.torrent_info.title} 未识别到媒体信息,无法下载")
continue
if params.only_lack:
exists_info = self.downloadchain.media_exists(t.media_info)
if exists_info:
if t.media_info.type == MediaType.MOVIE:
# 电影
logger.warning(f"{t.torrent_info.title} 媒体库中已存在,跳过")
continue
else:
# 电视剧
exists_seasons = exists_info.seasons or {}
if len(t.meta_info.season_list) > 1:
# 多季不下载
logger.warning(f"{t.meta_info.title} 有多季,跳过")
continue
else:
exists_episodes = exists_seasons.get(t.meta_info.begin_season)
if exists_episodes:
if set(t.meta_info.episode_list).issubset(exists_episodes):
logger.warning(f"{t.meta_info.title}{t.meta_info.begin_season} 季第 {t.meta_info.episode_list} 集已存在,跳过")
continue
_started = True
did = self.downloadchain.download_single(context=t,
downloader=params.downloader,
save_path=params.save_path,
label=params.labels)
if did:
self._added_downloads.append(did)
# 保存缓存
self.save_cache(workflow_id, cache_key)
if self._added_downloads:
logger.info(f"已添加 {len(self._added_downloads)} 个下载任务")
context.downloads.extend(
[DownloadTask(download_id=did, downloader=params.downloader) for did in self._added_downloads]
)
elif _started:
self._has_error = True
self.job_done(f"已添加 {len(self._added_downloads)} 个下载任务")
return context

View File

@@ -0,0 +1,92 @@
from app.actions import BaseAction
from app.chain.subscribe import SubscribeChain
from app.core.config import settings, global_vars
from app.core.context import MediaInfo
from app.db.subscribe_oper import SubscribeOper
from app.log import logger
from app.schemas import ActionParams, ActionContext
class AddSubscribeParams(ActionParams):
"""
添加订阅参数
"""
pass
class AddSubscribeAction(BaseAction):
"""
添加订阅
"""
_added_subscribes = []
_has_error = False
def __init__(self, action_id: str):
super().__init__(action_id)
self.subscribechain = SubscribeChain()
self.subscribeoper = SubscribeOper()
self._added_subscribes = []
self._has_error = False
@classmethod
@property
def name(cls) -> str: # noqa
return "添加订阅"
@classmethod
@property
def description(cls) -> str: # noqa
return "根据媒体列表添加订阅"
@classmethod
@property
def data(cls) -> dict: # noqa
return AddSubscribeParams().dict()
@property
def success(self) -> bool:
return not self._has_error
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
"""
将medias中的信息添加订阅如果订阅不存在的话
"""
_started = False
for media in context.medias:
if global_vars.is_workflow_stopped(workflow_id):
break
# 检查缓存
cache_key = f"{media.type}-{media.title}-{media.year}-{media.season}"
if self.check_cache(workflow_id, cache_key):
logger.info(f"{media.title} {media.year} 已添加过订阅,跳过")
continue
mediainfo = MediaInfo()
mediainfo.from_dict(media.dict())
if self.subscribechain.exists(mediainfo):
logger.info(f"{media.title} 已存在订阅")
continue
# 添加订阅
_started = True
sid, message = self.subscribechain.add(mtype=mediainfo.type,
title=mediainfo.title,
year=mediainfo.year,
tmdbid=mediainfo.tmdb_id,
season=mediainfo.season,
doubanid=mediainfo.douban_id,
bangumiid=mediainfo.bangumi_id,
username=settings.SUPERUSER)
if sid:
self._added_subscribes.append(sid)
# 保存缓存
self.save_cache(workflow_id, cache_key)
if self._added_subscribes:
logger.info(f"已添加 {len(self._added_subscribes)} 个订阅")
for sid in self._added_subscribes:
context.subscribes.append(self.subscribeoper.get(sid))
elif _started:
self._has_error = True
self.job_done(f"已添加 {len(self._added_subscribes)} 个订阅")
return context

View File

@@ -0,0 +1,68 @@
from app.actions import BaseAction, ActionChain
from app.core.config import global_vars
from app.schemas import ActionParams, ActionContext
from app.log import logger
class FetchDownloadsParams(ActionParams):
"""
获取下载任务参数
"""
pass
class FetchDownloadsAction(BaseAction):
"""
获取下载任务
"""
_downloads = []
def __init__(self, action_id: str):
super().__init__(action_id)
self.chain = ActionChain()
self._downloads = []
@classmethod
@property
def name(cls) -> str: # noqa
return "获取下载任务"
@classmethod
@property
def description(cls) -> str: # noqa
return "获取下载队列中的任务状态"
@classmethod
@property
def data(cls) -> dict: # noqa
return FetchDownloadsParams().dict()
@property
def success(self) -> bool:
return self.done
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
"""
更新downloads中的下载任务状态
"""
__all_complete = False
for download in self._downloads:
if global_vars.is_workflow_stopped(workflow_id):
break
logger.info(f"获取下载任务 {download.download_id} 状态 ...")
torrents = self.chain.list_torrents(hashs=[download.download_id])
if not torrents:
download.completed = True
continue
for t in torrents:
download.path = t.path
if t.progress >= 100:
logger.info(f"下载任务 {download.download_id} 已完成")
download.completed = True
else:
logger.info(f"下载任务 {download.download_id} 未完成")
download.completed = False
if all([d.completed for d in self._downloads]):
self.job_done()
return context

176
app/actions/fetch_medias.py Normal file
View File

@@ -0,0 +1,176 @@
from typing import List, Optional
from pydantic import Field
from app.actions import BaseAction
from app.chain.recommend import RecommendChain
from app.schemas import ActionParams, ActionContext
from app.core.config import settings, global_vars
from app.core.event import eventmanager
from app.log import logger
from app.schemas import RecommendSourceEventData, MediaInfo
from app.schemas.types import ChainEventType
from app.utils.http import RequestUtils
class FetchMediasParams(ActionParams):
"""
获取媒体数据参数
"""
source_type: Optional[str] = Field(default="ranking", description="来源")
sources: Optional[List[str]] = Field(default=[], description="榜单")
api_path: Optional[str] = Field(default=None, description="API路径")
class FetchMediasAction(BaseAction):
"""
获取媒体数据
"""
_inner_sources = []
_medias = []
_has_error = False
def __init__(self, action_id: str):
super().__init__(action_id)
self._medias = []
self._has_error = False
self.__inner_sources = [
{
"func": RecommendChain().tmdb_trending,
"name": '流行趋势',
},
{
"func": RecommendChain().douban_movie_showing,
"name": '正在热映',
},
{
"func": RecommendChain().bangumi_calendar,
"name": 'Bangumi每日放送',
},
{
"func": RecommendChain().tmdb_movies,
"name": 'TMDB热门电影',
},
{
"func": RecommendChain().tmdb_tvs,
"name": 'TMDB热门电视剧',
},
{
"func": RecommendChain().douban_movie_hot,
"name": '豆瓣热门电影',
},
{
"func": RecommendChain().douban_tv_hot,
"name": '豆瓣热门电视剧',
},
{
"func": RecommendChain().douban_tv_animation,
"name": '豆瓣热门动漫',
},
{
"func": RecommendChain().douban_movies,
"name": '豆瓣最新电影',
},
{
"func": RecommendChain().douban_tvs,
"name": '豆瓣最新电视剧',
},
{
"func": RecommendChain().douban_movie_top250,
"name": '豆瓣电影TOP250',
},
{
"func": RecommendChain().douban_tv_weekly_chinese,
"name": '豆瓣国产剧集榜',
},
{
"func": RecommendChain().douban_tv_weekly_global,
"name": '豆瓣全球剧集榜',
}
]
# 广播事件,请示额外的推荐数据源支持
event_data = RecommendSourceEventData()
event = eventmanager.send_event(ChainEventType.RecommendSource, event_data)
# 使用事件返回的上下文数据
if event and event.event_data:
event_data: RecommendSourceEventData = event.event_data
if event_data.extra_sources:
self.__inner_sources.extend([s.dict() for s in event_data.extra_sources])
@classmethod
@property
def name(cls) -> str: # noqa
return "获取媒体数据"
@classmethod
@property
def description(cls) -> str: # noqa
return "获取榜单等媒体数据列表"
@classmethod
@property
def data(cls) -> dict: # noqa
return FetchMediasParams().dict()
@property
def success(self) -> bool:
return not self._has_error
def __get_source(self, source: str):
"""
获取数据源
"""
for s in self.__inner_sources:
if s['name'] == source:
return s
return None
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
"""
获取媒体数据填充到medias
"""
params = FetchMediasParams(**params)
try:
if params.source_type == "ranking":
for name in params.sources:
if global_vars.is_workflow_stopped(workflow_id):
break
source = self.__get_source(name)
if not source:
continue
logger.info(f"获取媒体数据 {source} ...")
results = []
if source.get("func"):
results = source['func']()
else:
# 调用内部API获取数据
api_url = f"http://127.0.0.1:{settings.PORT}/api/v1/{source['api_path']}?token={settings.API_TOKEN}"
res = RequestUtils(timeout=15).post_res(api_url)
if res:
results = res.json()
if results:
logger.info(f"{name} 获取到 {len(results)} 条数据")
self._medias.extend([MediaInfo(**r) for r in results])
else:
logger.error(f"{name} 获取数据失败")
else:
# 调用内部API获取数据
api_url = f"http://127.0.0.1:{settings.PORT}{params.api_path}?token={settings.API_TOKEN}"
res = RequestUtils(timeout=15).post_res(api_url)
if res:
results = res.json()
if results:
logger.info(f"{params.api_path} 获取到 {len(results)} 条数据")
self._medias.extend([MediaInfo(**r) for r in results])
except Exception as e:
logger.error(f"获取媒体数据失败: {e}")
self._has_error = True
if self._medias:
context.medias.extend(self._medias)
self.job_done(f"获取到 {len(self._medias)} 条媒数据")
return context

117
app/actions/fetch_rss.py Normal file
View File

@@ -0,0 +1,117 @@
from typing import Optional
from pydantic import Field
from app.actions import BaseAction, ActionChain
from app.core.config import settings, global_vars
from app.core.context import Context
from app.core.metainfo import MetaInfo
from app.helper.rss import RssHelper
from app.log import logger
from app.schemas import ActionParams, ActionContext, TorrentInfo
class FetchRssParams(ActionParams):
"""
获取RSS资源列表参数
"""
url: str = Field(default=None, description="RSS地址")
proxy: Optional[bool] = Field(default=False, description="是否使用代理")
timeout: Optional[int] = Field(default=15, description="超时时间")
content_type: Optional[str] = Field(default=None, description="Content-Type")
referer: Optional[str] = Field(default=None, description="Referer")
ua: Optional[str] = Field(default=None, description="User-Agent")
match_media: Optional[str] = Field(default=None, description="匹配媒体信息")
class FetchRssAction(BaseAction):
"""
获取RSS资源列表
"""
_rss_torrents = []
_has_error = False
def __init__(self, action_id: str):
super().__init__(action_id)
self.rsshelper = RssHelper()
self.chain = ActionChain()
self._rss_torrents = []
self._has_error = False
@classmethod
@property
def name(cls) -> str: # noqa
return "获取RSS资源"
@classmethod
@property
def description(cls) -> str: # noqa
return "订阅RSS地址获取资源"
@classmethod
@property
def data(cls) -> dict: # noqa
return FetchRssParams().dict()
@property
def success(self) -> bool:
return not self._has_error
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
"""
请求RSS地址获取数据并解析为资源列表
"""
params = FetchRssParams(**params)
if not params.url:
return context
headers = {}
if params.content_type:
headers["Content-Type"] = params.content_type
if params.referer:
headers["Referer"] = params.referer
if params.ua:
headers["User-Agent"] = params.ua
rss_items = self.rsshelper.parse(url=params.url,
proxy=settings.PROXY if params.proxy else None,
timeout=params.timeout,
headers=headers)
if rss_items is None or rss_items is False:
logger.error(f'RSS地址 {params.url} 请求失败!')
self._has_error = True
return context
if not rss_items:
logger.error(f'RSS地址 {params.url} 未获取到RSS数据')
return context
# 组装种子
for item in rss_items:
if global_vars.is_workflow_stopped(workflow_id):
break
if not item.get("title"):
continue
torrentinfo = TorrentInfo(
title=item.get("title"),
enclosure=item.get("enclosure"),
page_url=item.get("link"),
size=item.get("size"),
pubdate=item["pubdate"].strftime("%Y-%m-%d %H:%M:%S") if item.get("pubdate") else None,
)
meta = MetaInfo(title=torrentinfo.title, subtitle=torrentinfo.description)
mediainfo = None
if params.match_media:
mediainfo = self.chain.recognize_media(meta)
if not mediainfo:
logger.warning(f"{torrentinfo.title} 未识别到媒体信息")
continue
self._rss_torrents.append(Context(meta_info=meta, media_info=mediainfo, torrent_info=torrentinfo))
if self._rss_torrents:
logger.info(f"获取到 {len(self._rss_torrents)} 个RSS资源")
context.torrents.extend(self._rss_torrents)
self.job_done(f"获取到 {len(self._rss_torrents)} 个资源")
return context

View File

@@ -0,0 +1,104 @@
import random
import time
from typing import Optional, List
from pydantic import Field
from app.actions import BaseAction
from app.chain.search import SearchChain
from app.core.config import global_vars
from app.log import logger
from app.schemas import ActionParams, ActionContext, MediaType
class FetchTorrentsParams(ActionParams):
"""
获取站点资源参数
"""
search_type: Optional[str] = Field(default="keyword", description="搜索类型")
name: Optional[str] = Field(default=None, description="资源名称")
year: Optional[str] = Field(default=None, description="年份")
type: Optional[str] = Field(default=None, description="资源类型 (电影/电视剧)")
season: Optional[int] = Field(default=None, description="季度")
sites: Optional[List[int]] = Field(default=[], description="站点列表")
match_media: Optional[bool] = Field(default=False, description="匹配媒体信息")
class FetchTorrentsAction(BaseAction):
"""
搜索站点资源
"""
_torrents = []
def __init__(self, action_id: str):
super().__init__(action_id)
self.searchchain = SearchChain()
self._torrents = []
@classmethod
@property
def name(cls) -> str: # noqa
return "搜索站点资源"
@classmethod
@property
def description(cls) -> str: # noqa
return "搜索站点种子资源列表"
@classmethod
@property
def data(cls) -> dict: # noqa
return FetchTorrentsParams().dict()
@property
def success(self) -> bool:
return self.done
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
"""
搜索站点,获取资源列表
"""
params = FetchTorrentsParams(**params)
if params.search_type == "keyword":
# 按关键字搜索
torrents = self.searchchain.search_by_title(title=params.name, sites=params.sites)
for torrent in torrents:
if global_vars.is_workflow_stopped(workflow_id):
break
if params.year and torrent.meta_info.year != params.year:
continue
if params.type and torrent.media_info and torrent.media_info.type != MediaType(params.type):
continue
if params.season and torrent.meta_info.begin_season != params.season:
continue
# 识别媒体信息
if params.match_media:
torrent.media_info = self.searchchain.recognize_media(torrent.meta_info)
if not torrent.media_info:
logger.warning(f"{torrent.torrent_info.title} 未识别到媒体信息")
continue
self._torrents.append(torrent)
else:
# 搜索媒体列表
for media in context.medias:
if global_vars.is_workflow_stopped(workflow_id):
break
torrents = self.searchchain.search_by_id(tmdbid=media.tmdb_id,
doubanid=media.douban_id,
mtype=MediaType(media.type),
sites=params.sites)
for torrent in torrents:
self._torrents.append(torrent)
# 随机休眠 5-30秒
sleep_time = random.randint(5, 30)
logger.info(f"随机休眠 {sleep_time} 秒 ...")
time.sleep(sleep_time)
if self._torrents:
context.torrents.extend(self._torrents)
logger.info(f"共搜索到 {len(self._torrents)} 条资源")
self.job_done(f"搜索到 {len(self._torrents)} 个资源")
return context

View File

@@ -0,0 +1,71 @@
from typing import Optional
from pydantic import Field
from app.actions import BaseAction
from app.core.config import global_vars
from app.log import logger
from app.schemas import ActionParams, ActionContext
class FilterMediasParams(ActionParams):
"""
过滤媒体数据参数
"""
type: Optional[str] = Field(default=None, description="媒体类型 (电影/电视剧)")
vote: Optional[int] = Field(default=0, description="评分")
year: Optional[str] = Field(default=None, description="年份")
class FilterMediasAction(BaseAction):
"""
过滤媒体数据
"""
_medias = []
def __init__(self, action_id: str):
super().__init__(action_id)
self._medias = []
@classmethod
@property
def name(cls) -> str: # noqa
return "过滤媒体数据"
@classmethod
@property
def description(cls) -> str: # noqa
return "对媒体数据列表进行过滤"
@classmethod
@property
def data(cls) -> dict: # noqa
return FilterMediasParams().dict()
@property
def success(self) -> bool:
return self.done
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
"""
过滤medias中媒体数据
"""
params = FilterMediasParams(**params)
for media in context.medias:
if global_vars.is_workflow_stopped(workflow_id):
break
if params.type and media.type != params.type:
continue
if params.vote and media.vote_average < params.vote:
continue
if params.year and media.year != params.year:
continue
self._medias.append(media)
logger.info(f"过滤后剩余 {len(self._medias)} 条媒体数据")
context.medias = self._medias
self.job_done(f"过滤后剩余 {len(self._medias)} 条媒体数据")
return context

View File

@@ -0,0 +1,88 @@
from typing import Optional, List
from pydantic import Field
from app.actions import BaseAction, ActionChain
from app.core.config import global_vars
from app.helper.torrent import TorrentHelper
from app.log import logger
from app.schemas import ActionParams, ActionContext
class FilterTorrentsParams(ActionParams):
"""
过滤资源数据参数
"""
rule_groups: Optional[List[str]] = Field(default=[], description="规则组")
quality: Optional[str] = Field(default=None, description="资源质量")
resolution: Optional[str] = Field(default=None, description="资源分辨率")
effect: Optional[str] = Field(default=None, description="特效")
include: Optional[str] = Field(default=None, description="包含规则")
exclude: Optional[str] = Field(default=None, description="排除规则")
size: Optional[str] = Field(default=None, description="资源大小范围MB")
class FilterTorrentsAction(BaseAction):
"""
过滤资源数据
"""
_torrents = []
def __init__(self, action_id: str):
super().__init__(action_id)
self.torrenthelper = TorrentHelper()
self.chain = ActionChain()
self._torrents = []
@classmethod
@property
def name(cls) -> str: # noqa
return "过滤资源"
@classmethod
@property
def description(cls) -> str: # noqa
return "对资源列表数据进行过滤"
@classmethod
@property
def data(cls) -> dict: # noqa
return FilterTorrentsParams().dict()
@property
def success(self) -> bool:
return self.done
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
"""
过滤torrents中的资源
"""
params = FilterTorrentsParams(**params)
for torrent in context.torrents:
if global_vars.is_workflow_stopped(workflow_id):
break
if self.torrenthelper.filter_torrent(
torrent_info=torrent.torrent_info,
filter_params={
"quality": params.quality,
"resolution": params.resolution,
"effect": params.effect,
"include": params.include,
"exclude": params.exclude,
"size": params.size
}
):
if self.chain.filter_torrents(
rule_groups=params.rule_groups,
torrent_list=[torrent.torrent_info],
mediainfo=torrent.media_info
):
self._torrents.append(torrent)
logger.info(f"过滤后剩余 {len(self._torrents)} 个资源")
context.torrents = self._torrents
self.job_done(f"过滤后剩余 {len(self._torrents)} 个资源")
return context

86
app/actions/scan_file.py Normal file
View File

@@ -0,0 +1,86 @@
from pathlib import Path
from typing import Optional
from pydantic import Field
from app.actions import BaseAction
from app.chain.storage import StorageChain
from app.core.config import global_vars, settings
from app.log import logger
from app.schemas import ActionParams, ActionContext
class ScanFileParams(ActionParams):
"""
整理文件参数
"""
# 存储
storage: Optional[str] = Field(default="local", description="存储")
directory: Optional[str] = Field(default=None, description="目录")
class ScanFileAction(BaseAction):
"""
整理文件
"""
_fileitems = []
_has_error = False
def __init__(self, action_id: str):
super().__init__(action_id)
self.storagechain = StorageChain()
self._fileitems = []
self._has_error = False
@classmethod
@property
def name(cls) -> str: # noqa
return "扫描目录"
@classmethod
@property
def description(cls) -> str: # noqa
return "扫描目录文件到队列"
@classmethod
@property
def data(cls) -> dict: # noqa
return ScanFileParams().dict()
@property
def success(self) -> bool:
return not self._has_error
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
"""
扫描目录中的所有文件记录到fileitems
"""
params = ScanFileParams(**params)
if not params.storage or not params.directory:
return context
fileitem = self.storagechain.get_file_item(params.storage, Path(params.directory))
if not fileitem:
logger.error(f"目录不存在: 【{params.storage}{params.directory}")
self._has_error = True
return context
files = self.storagechain.list_files(fileitem, recursion=True)
for file in files:
if global_vars.is_workflow_stopped(workflow_id):
break
if not file.extension or f".{file.extension.lower()}" not in settings.RMT_MEDIAEXT:
continue
# 检查缓存
cache_key = f"{file.path}"
if self.check_cache(workflow_id, cache_key):
logger.info(f"{file.path} 已处理过,跳过")
continue
self._fileitems.append(fileitem)
# 保存缓存
self.save_cache(workflow_id, cache_key)
if self._fileitems:
context.fileitems.extend(self._fileitems)
self.job_done(f"扫描到 {len(self._fileitems)} 个文件")
return context

View File

@@ -0,0 +1,86 @@
from pathlib import Path
from app.actions import BaseAction
from app.core.config import global_vars
from app.schemas import ActionParams, ActionContext
from app.chain.media import MediaChain
from app.chain.storage import StorageChain
from app.core.metainfo import MetaInfoPath
from app.log import logger
class ScrapeFileParams(ActionParams):
"""
刮削文件参数
"""
pass
class ScrapeFileAction(BaseAction):
"""
刮削文件
"""
_scraped_files = []
_has_error = False
def __init__(self, action_id: str):
super().__init__(action_id)
self.storagechain = StorageChain()
self.mediachain = MediaChain()
self._scraped_files = []
self._has_error = False
@classmethod
@property
def name(cls) -> str: # noqa
return "刮削文件"
@classmethod
@property
def description(cls) -> str: # noqa
return "刮削媒体信息和图片"
@classmethod
@property
def data(cls) -> dict: # noqa
return ScrapeFileParams().dict()
@property
def success(self) -> bool:
return not self._has_error
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
"""
刮削fileitems中的所有文件
"""
# 失败次数
_failed_count = 0
for fileitem in context.fileitems:
if global_vars.is_workflow_stopped(workflow_id):
break
if fileitem in self._scraped_files:
continue
if not self.storagechain.exists(fileitem):
continue
# 检查缓存
cache_key = f"{fileitem.path}"
if self.check_cache(workflow_id, cache_key):
logger.info(f"{fileitem.path} 已刮削过,跳过")
continue
meta = MetaInfoPath(Path(fileitem.path))
mediainfo = self.mediachain.recognize_media(meta)
if not mediainfo:
_failed_count += 1
logger.info(f"{fileitem.path} 未识别到媒体信息,无法刮削")
continue
self.mediachain.scrape_metadata(fileitem=fileitem, meta=meta, mediainfo=mediainfo)
self._scraped_files.append(fileitem)
# 保存缓存
self.save_cache(workflow_id, cache_key)
if not self._scraped_files and _failed_count:
self._has_error = True
self.job_done(f"成功刮削 {len(self._scraped_files)} 个文件,失败 {_failed_count}")
return context

48
app/actions/send_event.py Normal file
View File

@@ -0,0 +1,48 @@
from app.actions import BaseAction
from app.core.event import eventmanager
from app.schemas import ActionParams, ActionContext
from app.schemas.types import ChainEventType
class SendEventParams(ActionParams):
"""
发送事件参数
"""
pass
class SendEventAction(BaseAction):
"""
发送事件
"""
@classmethod
@property
def name(cls) -> str: # noqa
return "发送事件"
@classmethod
@property
def description(cls) -> str: # noqa
return "发送任务执行事件"
@classmethod
@property
def data(cls) -> dict: # noqa
return SendEventParams().dict()
@property
def success(self) -> bool:
return self.done
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
"""
发送工作流事件,以更插件干预工作流执行
"""
# 触发资源下载事件,更新执行上下文
event = eventmanager.send_event(ChainEventType.WorkflowExecution, context)
if event and event.event_data:
context = event.event_data
self.job_done()
return context

View File

@@ -0,0 +1,74 @@
from typing import List, Optional, Union
from pydantic import Field
from app.actions import BaseAction, ActionChain
from app.schemas import ActionParams, ActionContext, Notification
from core.config import settings
class SendMessageParams(ActionParams):
"""
发送消息参数
"""
client: Optional[List[str]] = Field(default=[], description="消息渠道")
userid: Optional[Union[str, int]] = Field(default=None, description="用户ID")
class SendMessageAction(BaseAction):
"""
发送消息
"""
def __init__(self, action_id: str):
super().__init__(action_id)
self.chain = ActionChain()
@classmethod
@property
def name(cls) -> str: # noqa
return "发送消息"
@classmethod
@property
def description(cls) -> str: # noqa
return "发送任务执行消息"
@classmethod
@property
def data(cls) -> dict: # noqa
return SendMessageParams().dict()
@property
def success(self) -> bool:
return self.done
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
"""
发送messages中的消息
"""
params = SendMessageParams(**params)
msg_text = f"当前进度:{context.progress}%"
index = 1
if context.execute_history:
for history in context.execute_history:
if not history.message:
continue
msg_text += f"\n{index}. {history.action}{history.message}"
index += 1
# 发送消息
if not params.client:
params.client = [""]
for client in params.client:
self.chain.post_message(
Notification(
source=client,
userid=params.userid,
title="【工作流执行结果】",
text=msg_text,
link=settings.MP_DOMAIN("#/workflow")
)
)
self.job_done()
return context

View File

@@ -0,0 +1,139 @@
import copy
from pathlib import Path
from typing import Optional
from pydantic import Field
from app.actions import BaseAction
from app.core.config import global_vars
from app.db.transferhistory_oper import TransferHistoryOper
from app.schemas import ActionParams, ActionContext
from app.chain.storage import StorageChain
from app.chain.transfer import TransferChain
from app.log import logger
class TransferFileParams(ActionParams):
"""
整理文件参数
"""
# 来源
source: Optional[str] = Field(default="downloads", description="来源")
class TransferFileAction(BaseAction):
"""
整理文件
"""
_fileitems = []
_has_error = False
def __init__(self, action_id: str):
super().__init__(action_id)
self.transferchain = TransferChain()
self.storagechain = StorageChain()
self.transferhis = TransferHistoryOper()
self._fileitems = []
self._has_error = False
@classmethod
@property
def name(cls) -> str: # noqa
return "整理文件"
@classmethod
@property
def description(cls) -> str: # noqa
return "整理队列中的文件"
@classmethod
@property
def data(cls) -> dict: # noqa
return TransferFileParams().dict()
@property
def success(self) -> bool:
return not self._has_error
def execute(self, workflow_id: int, params: dict, context: ActionContext) -> ActionContext:
"""
从 downloads / fileitems 中整理文件记录到fileitems
"""
def check_continue():
"""
检查是否继续整理文件
"""
if global_vars.is_workflow_stopped(workflow_id):
return False
return True
params = TransferFileParams(**params)
# 失败次数
_failed_count = 0
if params.source == "downloads":
# 从下载任务中整理文件
for download in context.downloads:
if global_vars.is_workflow_stopped(workflow_id):
break
if not download.completed:
logger.info(f"下载任务 {download.download_id} 未完成")
continue
# 检查缓存
cache_key = f"{download.download_id}"
if self.check_cache(workflow_id, cache_key):
logger.info(f"{download.path} 已整理过,跳过")
continue
fileitem = self.storagechain.get_file_item(storage="local", path=Path(download.path))
if not fileitem:
logger.info(f"文件 {download.path} 不存在")
continue
transferd = self.transferhis.get_by_src(fileitem.path, storage=fileitem.storage)
if transferd:
# 已经整理过的文件不再整理
continue
logger.info(f"开始整理文件 {download.path} ...")
state, errmsg = self.transferchain.do_transfer(fileitem, background=False)
if not state:
_failed_count += 1
logger.error(f"整理文件 {download.path} 失败: {errmsg}")
continue
logger.info(f"整理文件 {download.path} 完成")
self._fileitems.append(fileitem)
self.save_cache(workflow_id, cache_key)
else:
# 从 fileitems 中整理文件
for fileitem in copy.deepcopy(context.fileitems):
if not check_continue():
break
# 检查缓存
cache_key = f"{fileitem.path}"
if self.check_cache(workflow_id, cache_key):
logger.info(f"{fileitem.path} 已整理过,跳过")
continue
transferd = self.transferhis.get_by_src(fileitem.path, storage=fileitem.storage)
if transferd:
# 已经整理过的文件不再整理
continue
logger.info(f"开始整理文件 {fileitem.path} ...")
state, errmsg = self.transferchain.do_transfer(fileitem, background=False,
continue_callback=check_continue)
if not state:
_failed_count += 1
logger.error(f"整理文件 {fileitem.path} 失败: {errmsg}")
continue
logger.info(f"整理文件 {fileitem.path} 完成")
# 从 fileitems 中移除已整理的文件
context.fileitems.remove(fileitem)
self._fileitems.append(fileitem)
# 记录已整理的文件
self.save_cache(workflow_id, cache_key)
if self._fileitems:
context.fileitems.extend(self._fileitems)
elif _failed_count:
self._has_error = True
self.job_done(f"整理成功 {len(self._fileitems)} 个文件,失败 {_failed_count}")
return context

View File

@@ -2,7 +2,7 @@ from fastapi import APIRouter
from app.api.endpoints import login, user, site, message, webhook, subscribe, \ from app.api.endpoints import login, user, site, message, webhook, subscribe, \
media, douban, search, plugin, tmdb, history, system, download, dashboard, \ media, douban, search, plugin, tmdb, history, system, download, dashboard, \
transfer, mediaserver, bangumi, storage, discover, recommend transfer, mediaserver, bangumi, storage, discover, recommend, workflow
api_router = APIRouter() api_router = APIRouter()
api_router.include_router(login.router, prefix="/login", tags=["login"]) api_router.include_router(login.router, prefix="/login", tags=["login"])
@@ -26,3 +26,4 @@ api_router.include_router(mediaserver.router, prefix="/mediaserver", tags=["medi
api_router.include_router(bangumi.router, prefix="/bangumi", tags=["bangumi"]) api_router.include_router(bangumi.router, prefix="/bangumi", tags=["bangumi"])
api_router.include_router(discover.router, prefix="/discover", tags=["discover"]) api_router.include_router(discover.router, prefix="/discover", tags=["discover"])
api_router.include_router(recommend.router, prefix="/recommend", tags=["recommend"]) api_router.include_router(recommend.router, prefix="/recommend", tags=["recommend"])
api_router.include_router(workflow.router, prefix="/workflow", tags=["workflow"])

View File

@@ -1,4 +1,4 @@
from typing import List, Any from typing import List, Any, Optional
from fastapi import APIRouter, Depends from fastapi import APIRouter, Depends
@@ -12,8 +12,8 @@ router = APIRouter()
@router.get("/credits/{bangumiid}", summary="查询Bangumi演职员表", response_model=List[schemas.MediaPerson]) @router.get("/credits/{bangumiid}", summary="查询Bangumi演职员表", response_model=List[schemas.MediaPerson])
def bangumi_credits(bangumiid: int, def bangumi_credits(bangumiid: int,
page: int = 1, page: Optional[int] = 1,
count: int = 20, count: Optional[int] = 20,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
查询Bangumi演职员表 查询Bangumi演职员表
@@ -26,8 +26,8 @@ def bangumi_credits(bangumiid: int,
@router.get("/recommend/{bangumiid}", summary="查询Bangumi推荐", response_model=List[schemas.MediaInfo]) @router.get("/recommend/{bangumiid}", summary="查询Bangumi推荐", response_model=List[schemas.MediaInfo])
def bangumi_recommend(bangumiid: int, def bangumi_recommend(bangumiid: int,
page: int = 1, page: Optional[int] = 1,
count: int = 20, count: Optional[int] = 20,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
查询Bangumi推荐 查询Bangumi推荐
@@ -49,8 +49,8 @@ def bangumi_person(person_id: int,
@router.get("/person/credits/{person_id}", summary="人物参演作品", response_model=List[schemas.MediaInfo]) @router.get("/person/credits/{person_id}", summary="人物参演作品", response_model=List[schemas.MediaInfo])
def bangumi_person_credits(person_id: int, def bangumi_person_credits(person_id: int,
page: int = 1, page: Optional[int] = 1,
count: int = 20, count: Optional[int] = 20,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
根据人物ID查询人物参演作品 根据人物ID查询人物参演作品

View File

@@ -1,5 +1,5 @@
from pathlib import Path from pathlib import Path
from typing import Any, List, Optional from typing import Any, List, Optional, Annotated
from fastapi import APIRouter, Depends from fastapi import APIRouter, Depends
from sqlalchemy.orm import Session from sqlalchemy.orm import Session
@@ -18,7 +18,7 @@ router = APIRouter()
@router.get("/statistic", summary="媒体数量统计", response_model=schemas.Statistic) @router.get("/statistic", summary="媒体数量统计", response_model=schemas.Statistic)
def statistic(name: str = None, _: schemas.TokenPayload = Depends(verify_token)) -> Any: def statistic(name: Optional[str] = None, _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
查询媒体数量统计信息 查询媒体数量统计信息
""" """
@@ -37,7 +37,7 @@ def statistic(name: str = None, _: schemas.TokenPayload = Depends(verify_token))
@router.get("/statistic2", summary="媒体数量统计API_TOKEN", response_model=schemas.Statistic) @router.get("/statistic2", summary="媒体数量统计API_TOKEN", response_model=schemas.Statistic)
def statistic2(_: str = Depends(verify_apitoken)) -> Any: def statistic2(_: Annotated[str, Depends(verify_apitoken)]) -> Any:
""" """
查询媒体数量统计信息 API_TOKEN认证?token=xxx 查询媒体数量统计信息 API_TOKEN认证?token=xxx
""" """
@@ -66,7 +66,7 @@ def storage(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
@router.get("/storage2", summary="本地存储空间API_TOKEN", response_model=schemas.Storage) @router.get("/storage2", summary="本地存储空间API_TOKEN", response_model=schemas.Storage)
def storage2(_: str = Depends(verify_apitoken)) -> Any: def storage2(_: Annotated[str, Depends(verify_apitoken)]) -> Any:
""" """
查询本地存储空间信息 API_TOKEN认证?token=xxx 查询本地存储空间信息 API_TOKEN认证?token=xxx
""" """
@@ -82,7 +82,7 @@ def processes(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
@router.get("/downloader", summary="下载器信息", response_model=schemas.DownloaderInfo) @router.get("/downloader", summary="下载器信息", response_model=schemas.DownloaderInfo)
def downloader(name: str = None, _: schemas.TokenPayload = Depends(verify_token)) -> Any: def downloader(name: Optional[str] = None, _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
查询下载器信息 查询下载器信息
""" """
@@ -103,7 +103,7 @@ def downloader(name: str = None, _: schemas.TokenPayload = Depends(verify_token)
@router.get("/downloader2", summary="下载器信息API_TOKEN", response_model=schemas.DownloaderInfo) @router.get("/downloader2", summary="下载器信息API_TOKEN", response_model=schemas.DownloaderInfo)
def downloader2(_: str = Depends(verify_apitoken)) -> Any: def downloader2(_: Annotated[str, Depends(verify_apitoken)]) -> Any:
""" """
查询下载器信息 API_TOKEN认证?token=xxx 查询下载器信息 API_TOKEN认证?token=xxx
""" """
@@ -119,7 +119,7 @@ def schedule(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
@router.get("/schedule2", summary="后台服务API_TOKEN", response_model=List[schemas.ScheduleInfo]) @router.get("/schedule2", summary="后台服务API_TOKEN", response_model=List[schemas.ScheduleInfo])
def schedule2(_: str = Depends(verify_apitoken)) -> Any: def schedule2(_: Annotated[str, Depends(verify_apitoken)]) -> Any:
""" """
查询下载器信息 API_TOKEN认证?token=xxx 查询下载器信息 API_TOKEN认证?token=xxx
""" """
@@ -127,7 +127,7 @@ def schedule2(_: str = Depends(verify_apitoken)) -> Any:
@router.get("/transfer", summary="文件整理统计", response_model=List[int]) @router.get("/transfer", summary="文件整理统计", response_model=List[int])
def transfer(days: int = 7, db: Session = Depends(get_db), def transfer(days: Optional[int] = 7, db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
查询文件整理统计信息 查询文件整理统计信息
@@ -145,7 +145,7 @@ def cpu(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
@router.get("/cpu2", summary="获取当前CPU使用率API_TOKEN", response_model=int) @router.get("/cpu2", summary="获取当前CPU使用率API_TOKEN", response_model=int)
def cpu2(_: str = Depends(verify_apitoken)) -> Any: def cpu2(_: Annotated[str, Depends(verify_apitoken)]) -> Any:
""" """
获取当前CPU使用率 API_TOKEN认证?token=xxx 获取当前CPU使用率 API_TOKEN认证?token=xxx
""" """
@@ -161,7 +161,7 @@ def memory(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
@router.get("/memory2", summary="获取当前内存使用量和使用率API_TOKEN", response_model=List[int]) @router.get("/memory2", summary="获取当前内存使用量和使用率API_TOKEN", response_model=List[int])
def memory2(_: str = Depends(verify_apitoken)) -> Any: def memory2(_: Annotated[str, Depends(verify_apitoken)]) -> Any:
""" """
获取当前内存使用率 API_TOKEN认证?token=xxx 获取当前内存使用率 API_TOKEN认证?token=xxx
""" """

View File

@@ -1,4 +1,4 @@
from typing import Any, List from typing import Any, List, Optional
from fastapi import APIRouter, Depends from fastapi import APIRouter, Depends
@@ -31,12 +31,12 @@ def source(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
@router.get("/bangumi", summary="探索Bangumi", response_model=List[schemas.MediaInfo]) @router.get("/bangumi", summary="探索Bangumi", response_model=List[schemas.MediaInfo])
def bangumi(type: int = 2, def bangumi(type: Optional[int] = 2,
cat: int = None, cat: Optional[int] = None,
sort: str = 'rank', sort: Optional[str] = 'rank',
year: int = None, year: Optional[str] = None,
page: int = 1, page: Optional[int] = 1,
count: int = 30, count: Optional[int] = 30,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
探索Bangumi 探索Bangumi
@@ -49,10 +49,10 @@ def bangumi(type: int = 2,
@router.get("/douban_movies", summary="探索豆瓣电影", response_model=List[schemas.MediaInfo]) @router.get("/douban_movies", summary="探索豆瓣电影", response_model=List[schemas.MediaInfo])
def douban_movies(sort: str = "R", def douban_movies(sort: Optional[str] = "R",
tags: str = "", tags: Optional[str] = "",
page: int = 1, page: Optional[int] = 1,
count: int = 30, count: Optional[int] = 30,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
浏览豆瓣电影信息 浏览豆瓣电影信息
@@ -63,10 +63,10 @@ def douban_movies(sort: str = "R",
@router.get("/douban_tvs", summary="探索豆瓣剧集", response_model=List[schemas.MediaInfo]) @router.get("/douban_tvs", summary="探索豆瓣剧集", response_model=List[schemas.MediaInfo])
def douban_tvs(sort: str = "R", def douban_tvs(sort: Optional[str] = "R",
tags: str = "", tags: Optional[str] = "",
page: int = 1, page: Optional[int] = 1,
count: int = 30, count: Optional[int] = 30,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
浏览豆瓣剧集信息 浏览豆瓣剧集信息
@@ -77,15 +77,15 @@ def douban_tvs(sort: str = "R",
@router.get("/tmdb_movies", summary="探索TMDB电影", response_model=List[schemas.MediaInfo]) @router.get("/tmdb_movies", summary="探索TMDB电影", response_model=List[schemas.MediaInfo])
def tmdb_movies(sort_by: str = "popularity.desc", def tmdb_movies(sort_by: Optional[str] = "popularity.desc",
with_genres: str = "", with_genres: Optional[str] = "",
with_original_language: str = "", with_original_language: Optional[str] = "",
with_keywords: str = "", with_keywords: Optional[str] = "",
with_watch_providers: str = "", with_watch_providers: Optional[str] = "",
vote_average: float = 0, vote_average: Optional[float] = 0.0,
vote_count: int = 0, vote_count: Optional[int] = 0,
release_date: str = "", release_date: Optional[str] = "",
page: int = 1, page: Optional[int] = 1,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
浏览TMDB电影信息 浏览TMDB电影信息
@@ -104,15 +104,15 @@ def tmdb_movies(sort_by: str = "popularity.desc",
@router.get("/tmdb_tvs", summary="探索TMDB剧集", response_model=List[schemas.MediaInfo]) @router.get("/tmdb_tvs", summary="探索TMDB剧集", response_model=List[schemas.MediaInfo])
def tmdb_tvs(sort_by: str = "popularity.desc", def tmdb_tvs(sort_by: Optional[str] = "popularity.desc",
with_genres: str = "", with_genres: Optional[str] = "",
with_original_language: str = "", with_original_language: Optional[str] = "",
with_keywords: str = "", with_keywords: Optional[str] = "",
with_watch_providers: str = "", with_watch_providers: Optional[str] = "",
vote_average: float = 0, vote_average: Optional[float] = 0.0,
vote_count: int = 0, vote_count: Optional[int] = 0,
release_date: str = "", release_date: Optional[str] = "",
page: int = 1, page: Optional[int] = 1,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
浏览TMDB剧集信息 浏览TMDB剧集信息

View File

@@ -1,4 +1,4 @@
from typing import Any, List from typing import Any, List, Optional
from fastapi import APIRouter, Depends from fastapi import APIRouter, Depends
@@ -22,7 +22,7 @@ def douban_person(person_id: int,
@router.get("/person/credits/{person_id}", summary="人物参演作品", response_model=List[schemas.MediaInfo]) @router.get("/person/credits/{person_id}", summary="人物参演作品", response_model=List[schemas.MediaInfo])
def douban_person_credits(person_id: int, def douban_person_credits(person_id: int,
page: int = 1, page: Optional[int] = 1,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
根据人物ID查询人物参演作品 根据人物ID查询人物参演作品

View File

@@ -1,4 +1,4 @@
from typing import Any, List from typing import Any, List, Annotated, Optional
from fastapi import APIRouter, Depends, Body from fastapi import APIRouter, Depends, Body
@@ -18,7 +18,7 @@ router = APIRouter()
@router.get("/", summary="正在下载", response_model=List[schemas.DownloadingTorrent]) @router.get("/", summary="正在下载", response_model=List[schemas.DownloadingTorrent])
def current( def current(
name: str = None, name: Optional[str] = None,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
查询正在下载的任务 查询正在下载的任务
@@ -30,8 +30,8 @@ def current(
def download( def download(
media_in: schemas.MediaInfo, media_in: schemas.MediaInfo,
torrent_in: schemas.TorrentInfo, torrent_in: schemas.TorrentInfo,
downloader: str = Body(None), downloader: Annotated[str | None, Body()] = None,
save_path: str = Body(None), save_path: Annotated[str | None, Body()] = None,
current_user: User = Depends(get_current_active_user)) -> Any: current_user: User = Depends(get_current_active_user)) -> Any:
""" """
添加下载任务(含媒体信息) 添加下载任务(含媒体信息)
@@ -62,8 +62,8 @@ def download(
@router.post("/add", summary="添加下载(不含媒体信息)", response_model=schemas.Response) @router.post("/add", summary="添加下载(不含媒体信息)", response_model=schemas.Response)
def add( def add(
torrent_in: schemas.TorrentInfo, torrent_in: schemas.TorrentInfo,
downloader: str = Body(None), downloader: Annotated[str | None, Body()] = None,
save_path: str = Body(None), save_path: Annotated[str | None, Body()] = None,
current_user: User = Depends(get_current_active_user)) -> Any: current_user: User = Depends(get_current_active_user)) -> Any:
""" """
添加下载任务(不含媒体信息) 添加下载任务(不含媒体信息)

View File

@@ -1,4 +1,4 @@
from typing import List, Any from typing import List, Any, Optional
import jieba import jieba
from fastapi import APIRouter, Depends from fastapi import APIRouter, Depends
@@ -20,8 +20,8 @@ router = APIRouter()
@router.get("/download", summary="查询下载历史记录", response_model=List[schemas.DownloadHistory]) @router.get("/download", summary="查询下载历史记录", response_model=List[schemas.DownloadHistory])
def download_history(page: int = 1, def download_history(page: Optional[int] = 1,
count: int = 30, count: Optional[int] = 30,
db: Session = Depends(get_db), db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
@@ -42,10 +42,10 @@ def delete_download_history(history_in: schemas.DownloadHistory,
@router.get("/transfer", summary="查询整理记录", response_model=schemas.Response) @router.get("/transfer", summary="查询整理记录", response_model=schemas.Response)
def transfer_history(title: str = None, def transfer_history(title: Optional[str] = None,
page: int = 1, page: Optional[int] = 1,
count: int = 30, count: Optional[int] = 30,
status: bool = None, status: Optional[bool] = None,
db: Session = Depends(get_db), db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
@@ -78,8 +78,8 @@ def transfer_history(title: str = None,
@router.delete("/transfer", summary="删除整理记录", response_model=schemas.Response) @router.delete("/transfer", summary="删除整理记录", response_model=schemas.Response)
def delete_transfer_history(history_in: schemas.TransferHistory, def delete_transfer_history(history_in: schemas.TransferHistory,
deletesrc: bool = False, deletesrc: Optional[bool] = False,
deletedest: bool = False, deletedest: Optional[bool] = False,
db: Session = Depends(get_db), db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(get_current_active_superuser)) -> Any: _: schemas.TokenPayload = Depends(get_current_active_superuser)) -> Any:
""" """

View File

@@ -1,5 +1,5 @@
from datetime import timedelta from datetime import timedelta
from typing import Any, List from typing import Any, List, Annotated
from fastapi import APIRouter, Depends, Form, HTTPException from fastapi import APIRouter, Depends, Form, HTTPException
from fastapi.security import OAuth2PasswordRequestForm from fastapi.security import OAuth2PasswordRequestForm
@@ -18,8 +18,8 @@ router = APIRouter()
@router.post("/access-token", summary="获取token", response_model=schemas.Token) @router.post("/access-token", summary="获取token", response_model=schemas.Token)
def login_access_token( def login_access_token(
form_data: OAuth2PasswordRequestForm = Depends(), form_data: Annotated[OAuth2PasswordRequestForm, Depends()],
otp_password: str = Form(None) otp_password: Annotated[str | None, Form()] = None
) -> Any: ) -> Any:
""" """
获取认证Token 获取认证Token

View File

@@ -1,5 +1,5 @@
from pathlib import Path from pathlib import Path
from typing import List, Any, Union from typing import List, Any, Union, Annotated, Optional
from fastapi import APIRouter, Depends from fastapi import APIRouter, Depends
@@ -19,7 +19,7 @@ router = APIRouter()
@router.get("/recognize", summary="识别媒体信息(种子)", response_model=schemas.Context) @router.get("/recognize", summary="识别媒体信息(种子)", response_model=schemas.Context)
def recognize(title: str, def recognize(title: str,
subtitle: str = None, subtitle: Optional[str] = None,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
根据标题、副标题识别媒体信息 根据标题、副标题识别媒体信息
@@ -33,9 +33,10 @@ def recognize(title: str,
@router.get("/recognize2", summary="识别种子媒体信息API_TOKEN", response_model=schemas.Context) @router.get("/recognize2", summary="识别种子媒体信息API_TOKEN", response_model=schemas.Context)
def recognize2(title: str, def recognize2(_: Annotated[str, Depends(verify_apitoken)],
subtitle: str = None, title: str,
_: str = Depends(verify_apitoken)) -> Any: subtitle: Optional[str] = None
) -> Any:
""" """
根据标题、副标题识别媒体信息 API_TOKEN认证?token=xxx 根据标题、副标题识别媒体信息 API_TOKEN认证?token=xxx
""" """
@@ -58,7 +59,7 @@ def recognize_file(path: str,
@router.get("/recognize_file2", summary="识别文件媒体信息API_TOKEN", response_model=schemas.Context) @router.get("/recognize_file2", summary="识别文件媒体信息API_TOKEN", response_model=schemas.Context)
def recognize_file2(path: str, def recognize_file2(path: str,
_: str = Depends(verify_apitoken)) -> Any: _: Annotated[str, Depends(verify_apitoken)]) -> Any:
""" """
根据文件路径识别媒体信息 API_TOKEN认证?token=xxx 根据文件路径识别媒体信息 API_TOKEN认证?token=xxx
""" """
@@ -68,7 +69,7 @@ def recognize_file2(path: str,
@router.get("/search", summary="搜索媒体/人物信息", response_model=List[dict]) @router.get("/search", summary="搜索媒体/人物信息", response_model=List[dict])
def search(title: str, def search(title: str,
type: str = "media", type: Optional[str] = "media",
page: int = 1, page: int = 1,
count: int = 8, count: int = 8,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
@@ -105,7 +106,7 @@ def search(title: str,
@router.post("/scrape/{storage}", summary="刮削媒体信息", response_model=schemas.Response) @router.post("/scrape/{storage}", summary="刮削媒体信息", response_model=schemas.Response)
def scrape(fileitem: schemas.FileItem, def scrape(fileitem: schemas.FileItem,
storage: str = "local", storage: Optional[str] = "local",
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
刮削媒体信息 刮削媒体信息
@@ -135,10 +136,28 @@ def category(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
return MediaChain().media_category() or {} return MediaChain().media_category() or {}
@router.get("/group/seasons/{episode_group}", summary="查询剧集组季信息", response_model=List[schemas.MediaSeason])
def group_seasons(episode_group: str, _: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
查询剧集组季信息themoviedb
"""
return TmdbChain().tmdb_group_seasons(group_id=episode_group)
@router.get("/groups/{tmdbid}", summary="查询媒体剧集组", response_model=List[dict])
def seasons(tmdbid: int, _: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
查询媒体剧集组列表themoviedb
"""
mediainfo = MediaChain().recognize_media(tmdbid=tmdbid, mtype=MediaType.TV)
if not mediainfo:
return []
return mediainfo.episode_groups
@router.get("/seasons", summary="查询媒体季信息", response_model=List[schemas.MediaSeason]) @router.get("/seasons", summary="查询媒体季信息", response_model=List[schemas.MediaSeason])
def seasons(mediaid: str = None, def seasons(mediaid: Optional[str] = None,
title: str = None, title: Optional[str] = None,
year: int = None, year: str = None,
season: int = None, season: int = None,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
@@ -179,7 +198,7 @@ def seasons(mediaid: str = None,
@router.get("/{mediaid}", summary="查询媒体详情", response_model=schemas.MediaInfo) @router.get("/{mediaid}", summary="查询媒体详情", response_model=schemas.MediaInfo)
def detail(mediaid: str, type_name: str, title: str = None, year: int = None, def detail(mediaid: str, type_name: str, title: Optional[str] = None, year: int = None,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
根据媒体ID查询themoviedb或豆瓣媒体信息type_name: 电影/电视剧 根据媒体ID查询themoviedb或豆瓣媒体信息type_name: 电影/电视剧

View File

@@ -1,4 +1,4 @@
from typing import Any, List, Dict from typing import Any, List, Dict, Optional
from fastapi import APIRouter, Depends from fastapi import APIRouter, Depends
from sqlalchemy.orm import Session from sqlalchemy.orm import Session
@@ -43,11 +43,11 @@ def play_item(itemid: str, _: schemas.TokenPayload = Depends(verify_token)) -> s
@router.get("/exists", summary="查询本地是否存在(数据库)", response_model=schemas.Response) @router.get("/exists", summary="查询本地是否存在(数据库)", response_model=schemas.Response)
def exists_local(title: str = None, def exists_local(title: Optional[str] = None,
year: int = None, year: Optional[str] = None,
mtype: str = None, mtype: Optional[str] = None,
tmdbid: int = None, tmdbid: Optional[int] = None,
season: int = None, season: Optional[int] = None,
db: Session = Depends(get_db), db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
@@ -121,7 +121,7 @@ def not_exists(media_in: schemas.MediaInfo,
@router.get("/latest", summary="最新入库条目", response_model=List[schemas.MediaServerPlayItem]) @router.get("/latest", summary="最新入库条目", response_model=List[schemas.MediaServerPlayItem])
def latest(server: str, count: int = 18, def latest(server: str, count: Optional[int] = 18,
userinfo: schemas.TokenPayload = Depends(verify_token)) -> Any: userinfo: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
获取媒体服务器最新入库条目 获取媒体服务器最新入库条目
@@ -130,7 +130,7 @@ def latest(server: str, count: int = 18,
@router.get("/playing", summary="正在播放条目", response_model=List[schemas.MediaServerPlayItem]) @router.get("/playing", summary="正在播放条目", response_model=List[schemas.MediaServerPlayItem])
def playing(server: str, count: int = 12, def playing(server: str, count: Optional[int] = 12,
userinfo: schemas.TokenPayload = Depends(verify_token)) -> Any: userinfo: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
获取媒体服务器正在播放条目 获取媒体服务器正在播放条目
@@ -139,7 +139,7 @@ def playing(server: str, count: int = 12,
@router.get("/library", summary="媒体库列表", response_model=List[schemas.MediaServerLibrary]) @router.get("/library", summary="媒体库列表", response_model=List[schemas.MediaServerLibrary])
def library(server: str, hidden: bool = False, def library(server: str, hidden: Optional[bool] = False,
userinfo: schemas.TokenPayload = Depends(verify_token)) -> Any: userinfo: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
获取媒体服务器媒体库列表 获取媒体服务器媒体库列表

View File

@@ -1,5 +1,5 @@
import json import json
from typing import Union, Any, List from typing import Union, Any, List, Optional
from fastapi import APIRouter, BackgroundTasks, Depends, Request from fastapi import APIRouter, BackgroundTasks, Depends, Request
from pywebpush import WebPushException, webpush from pywebpush import WebPushException, webpush
@@ -60,8 +60,8 @@ def web_message(text: str, current_user: User = Depends(get_current_active_super
@router.get("/web", summary="获取WEB消息", response_model=List[dict]) @router.get("/web", summary="获取WEB消息", response_model=List[dict])
def get_web_message(_: schemas.TokenPayload = Depends(verify_token), def get_web_message(_: schemas.TokenPayload = Depends(verify_token),
db: Session = Depends(get_db), db: Session = Depends(get_db),
page: int = 1, page: Optional[int] = 1,
count: int = 20): count: Optional[int] = 20):
""" """
获取WEB消息列表 获取WEB消息列表
""" """
@@ -77,7 +77,7 @@ def get_web_message(_: schemas.TokenPayload = Depends(verify_token),
def wechat_verify(echostr: str, msg_signature: str, timestamp: Union[str, int], nonce: str, def wechat_verify(echostr: str, msg_signature: str, timestamp: Union[str, int], nonce: str,
source: str = None) -> Any: source: Optional[str] = None) -> Any:
""" """
微信验证响应 微信验证响应
""" """
@@ -114,8 +114,8 @@ def vocechat_verify() -> Any:
@router.get("/", summary="回调请求验证") @router.get("/", summary="回调请求验证")
def incoming_verify(token: str = None, echostr: str = None, msg_signature: str = None, def incoming_verify(token: Optional[str] = None, echostr: Optional[str] = None, msg_signature: Optional[str] = None,
timestamp: Union[str, int] = None, nonce: str = None, source: str = None, timestamp: Union[str, int] = None, nonce: Optional[str] = None, source: Optional[str] = None,
_: schemas.TokenPayload = Depends(verify_apitoken)) -> Any: _: schemas.TokenPayload = Depends(verify_apitoken)) -> Any:
""" """
微信/VoceChat等验证响应 微信/VoceChat等验证响应

View File

@@ -118,7 +118,7 @@ def _clean_protected_routes(existing_paths: dict):
@router.get("/", summary="所有插件", response_model=List[schemas.Plugin]) @router.get("/", summary="所有插件", response_model=List[schemas.Plugin])
def all_plugins(_: schemas.TokenPayload = Depends(get_current_active_superuser), def all_plugins(_: schemas.TokenPayload = Depends(get_current_active_superuser),
state: str = "all") -> List[schemas.Plugin]: state: Optional[str] = "all") -> List[schemas.Plugin]:
""" """
查询所有插件清单包括本地插件和在线插件插件状态installed, market, all 查询所有插件清单包括本地插件和在线插件插件状态installed, market, all
""" """
@@ -181,8 +181,8 @@ def statistic(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
@router.get("/install/{plugin_id}", summary="安装插件", response_model=schemas.Response) @router.get("/install/{plugin_id}", summary="安装插件", response_model=schemas.Response)
def install(plugin_id: str, def install(plugin_id: str,
repo_url: str = "", repo_url: Optional[str] = "",
force: bool = False, force: Optional[bool] = False,
_: schemas.TokenPayload = Depends(get_current_active_superuser)) -> Any: _: schemas.TokenPayload = Depends(get_current_active_superuser)) -> Any:
""" """
安装插件 安装插件

View File

@@ -1,4 +1,4 @@
from typing import Any, List from typing import Any, List, Optional
from fastapi import APIRouter, Depends from fastapi import APIRouter, Depends
@@ -6,8 +6,8 @@ from app import schemas
from app.core.event import eventmanager from app.core.event import eventmanager
from app.core.security import verify_token from app.core.security import verify_token
from app.schemas.types import ChainEventType from app.schemas.types import ChainEventType
from chain.recommend import RecommendChain from app.chain.recommend import RecommendChain
from schemas import RecommendSourceEventData from app.schemas import RecommendSourceEventData
router = APIRouter() router = APIRouter()
@@ -29,8 +29,8 @@ def source(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
@router.get("/bangumi_calendar", summary="Bangumi每日放送", response_model=List[schemas.MediaInfo]) @router.get("/bangumi_calendar", summary="Bangumi每日放送", response_model=List[schemas.MediaInfo])
def bangumi_calendar(page: int = 1, def bangumi_calendar(page: Optional[int] = 1,
count: int = 30, count: Optional[int] = 30,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
浏览Bangumi每日放送 浏览Bangumi每日放送
@@ -39,8 +39,8 @@ def bangumi_calendar(page: int = 1,
@router.get("/douban_showing", summary="豆瓣正在热映", response_model=List[schemas.MediaInfo]) @router.get("/douban_showing", summary="豆瓣正在热映", response_model=List[schemas.MediaInfo])
def douban_showing(page: int = 1, def douban_showing(page: Optional[int] = 1,
count: int = 30, count: Optional[int] = 30,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
浏览豆瓣正在热映 浏览豆瓣正在热映
@@ -49,10 +49,10 @@ def douban_showing(page: int = 1,
@router.get("/douban_movies", summary="豆瓣电影", response_model=List[schemas.MediaInfo]) @router.get("/douban_movies", summary="豆瓣电影", response_model=List[schemas.MediaInfo])
def douban_movies(sort: str = "R", def douban_movies(sort: Optional[str] = "R",
tags: str = "", tags: Optional[str] = "",
page: int = 1, page: Optional[int] = 1,
count: int = 30, count: Optional[int] = 30,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
浏览豆瓣电影信息 浏览豆瓣电影信息
@@ -61,10 +61,10 @@ def douban_movies(sort: str = "R",
@router.get("/douban_tvs", summary="豆瓣剧集", response_model=List[schemas.MediaInfo]) @router.get("/douban_tvs", summary="豆瓣剧集", response_model=List[schemas.MediaInfo])
def douban_tvs(sort: str = "R", def douban_tvs(sort: Optional[str] = "R",
tags: str = "", tags: Optional[str] = "",
page: int = 1, page: Optional[int] = 1,
count: int = 30, count: Optional[int] = 30,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
浏览豆瓣剧集信息 浏览豆瓣剧集信息
@@ -73,8 +73,8 @@ def douban_tvs(sort: str = "R",
@router.get("/douban_movie_top250", summary="豆瓣电影TOP250", response_model=List[schemas.MediaInfo]) @router.get("/douban_movie_top250", summary="豆瓣电影TOP250", response_model=List[schemas.MediaInfo])
def douban_movie_top250(page: int = 1, def douban_movie_top250(page: Optional[int] = 1,
count: int = 30, count: Optional[int] = 30,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
浏览豆瓣剧集信息 浏览豆瓣剧集信息
@@ -83,8 +83,8 @@ def douban_movie_top250(page: int = 1,
@router.get("/douban_tv_weekly_chinese", summary="豆瓣国产剧集周榜", response_model=List[schemas.MediaInfo]) @router.get("/douban_tv_weekly_chinese", summary="豆瓣国产剧集周榜", response_model=List[schemas.MediaInfo])
def douban_tv_weekly_chinese(page: int = 1, def douban_tv_weekly_chinese(page: Optional[int] = 1,
count: int = 30, count: Optional[int] = 30,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
中国每周剧集口碑榜 中国每周剧集口碑榜
@@ -93,8 +93,8 @@ def douban_tv_weekly_chinese(page: int = 1,
@router.get("/douban_tv_weekly_global", summary="豆瓣全球剧集周榜", response_model=List[schemas.MediaInfo]) @router.get("/douban_tv_weekly_global", summary="豆瓣全球剧集周榜", response_model=List[schemas.MediaInfo])
def douban_tv_weekly_global(page: int = 1, def douban_tv_weekly_global(page: Optional[int] = 1,
count: int = 30, count: Optional[int] = 30,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
全球每周剧集口碑榜 全球每周剧集口碑榜
@@ -103,8 +103,8 @@ def douban_tv_weekly_global(page: int = 1,
@router.get("/douban_tv_animation", summary="豆瓣动画剧集", response_model=List[schemas.MediaInfo]) @router.get("/douban_tv_animation", summary="豆瓣动画剧集", response_model=List[schemas.MediaInfo])
def douban_tv_animation(page: int = 1, def douban_tv_animation(page: Optional[int] = 1,
count: int = 30, count: Optional[int] = 30,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
热门动画剧集 热门动画剧集
@@ -113,8 +113,8 @@ def douban_tv_animation(page: int = 1,
@router.get("/douban_movie_hot", summary="豆瓣热门电影", response_model=List[schemas.MediaInfo]) @router.get("/douban_movie_hot", summary="豆瓣热门电影", response_model=List[schemas.MediaInfo])
def douban_movie_hot(page: int = 1, def douban_movie_hot(page: Optional[int] = 1,
count: int = 30, count: Optional[int] = 30,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
热门电影 热门电影
@@ -123,8 +123,8 @@ def douban_movie_hot(page: int = 1,
@router.get("/douban_tv_hot", summary="豆瓣热门电视剧", response_model=List[schemas.MediaInfo]) @router.get("/douban_tv_hot", summary="豆瓣热门电视剧", response_model=List[schemas.MediaInfo])
def douban_tv_hot(page: int = 1, def douban_tv_hot(page: Optional[int] = 1,
count: int = 30, count: Optional[int] = 30,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
热门电视剧 热门电视剧
@@ -133,15 +133,15 @@ def douban_tv_hot(page: int = 1,
@router.get("/tmdb_movies", summary="TMDB电影", response_model=List[schemas.MediaInfo]) @router.get("/tmdb_movies", summary="TMDB电影", response_model=List[schemas.MediaInfo])
def tmdb_movies(sort_by: str = "popularity.desc", def tmdb_movies(sort_by: Optional[str] = "popularity.desc",
with_genres: str = "", with_genres: Optional[str] = "",
with_original_language: str = "", with_original_language: Optional[str] = "",
with_keywords: str = "", with_keywords: Optional[str] = "",
with_watch_providers: str = "", with_watch_providers: Optional[str] = "",
vote_average: float = 0, vote_average: Optional[float] = 0.0,
vote_count: int = 0, vote_count: Optional[int] = 0,
release_date: str = "", release_date: Optional[str] = "",
page: int = 1, page: Optional[int] = 1,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
浏览TMDB电影信息 浏览TMDB电影信息
@@ -158,15 +158,15 @@ def tmdb_movies(sort_by: str = "popularity.desc",
@router.get("/tmdb_tvs", summary="TMDB剧集", response_model=List[schemas.MediaInfo]) @router.get("/tmdb_tvs", summary="TMDB剧集", response_model=List[schemas.MediaInfo])
def tmdb_tvs(sort_by: str = "popularity.desc", def tmdb_tvs(sort_by: Optional[str] = "popularity.desc",
with_genres: str = "", with_genres: Optional[str] = "",
with_original_language: str = "", with_original_language: Optional[str] = "",
with_keywords: str = "", with_keywords: Optional[str] = "",
with_watch_providers: str = "", with_watch_providers: Optional[str] = "",
vote_average: float = 0, vote_average: Optional[float] = 0.0,
vote_count: int = 0, vote_count: Optional[int] = 0,
release_date: str = "", release_date: Optional[str] = "",
page: int = 1, page: Optional[int] = 1,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
浏览TMDB剧集信息 浏览TMDB剧集信息
@@ -183,7 +183,7 @@ def tmdb_tvs(sort_by: str = "popularity.desc",
@router.get("/tmdb_trending", summary="TMDB流行趋势", response_model=List[schemas.MediaInfo]) @router.get("/tmdb_trending", summary="TMDB流行趋势", response_model=List[schemas.MediaInfo])
def tmdb_trending(page: int = 1, def tmdb_trending(page: Optional[int] = 1,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
TMDB流行趋势 TMDB流行趋势

View File

@@ -1,4 +1,4 @@
from typing import List, Any from typing import List, Any, Optional
from fastapi import APIRouter, Depends from fastapi import APIRouter, Depends
@@ -26,47 +26,60 @@ def search_latest(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
@router.get("/media/{mediaid}", summary="精确搜索资源", response_model=schemas.Response) @router.get("/media/{mediaid}", summary="精确搜索资源", response_model=schemas.Response)
def search_by_id(mediaid: str, def search_by_id(mediaid: str,
mtype: str = None, mtype: Optional[str] = None,
area: str = "title", area: Optional[str] = "title",
title: str = None, title: Optional[str] = None,
year: int = None, year: Optional[str] = None,
season: str = None, season: Optional[str] = None,
sites: Optional[str] = None,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
根据TMDBID/豆瓣ID精确搜索站点资源 tmdb:/douban:/bangumi: 根据TMDBID/豆瓣ID精确搜索站点资源 tmdb:/douban:/bangumi:
""" """
if mtype: if mtype:
mtype = MediaType(mtype) media_type = MediaType(mtype)
else:
media_type = None
if season: if season:
season = int(season) media_season = int(season)
else:
media_season = None
if sites:
site_list = [int(site) for site in sites.split(",") if site]
else:
site_list = None
torrents = None torrents = None
# 根据前缀识别媒体ID # 根据前缀识别媒体ID
if mediaid.startswith("tmdb:"): if mediaid.startswith("tmdb:"):
tmdbid = int(mediaid.replace("tmdb:", "")) tmdbid = int(mediaid.replace("tmdb:", ""))
if settings.RECOGNIZE_SOURCE == "douban": if settings.RECOGNIZE_SOURCE == "douban":
# 通过TMDBID识别豆瓣ID # 通过TMDBID识别豆瓣ID
doubaninfo = MediaChain().get_doubaninfo_by_tmdbid(tmdbid=tmdbid, mtype=mtype) doubaninfo = MediaChain().get_doubaninfo_by_tmdbid(tmdbid=tmdbid, mtype=media_type)
if doubaninfo: if doubaninfo:
torrents = SearchChain().search_by_id(doubanid=doubaninfo.get("id"), torrents = SearchChain().search_by_id(doubanid=doubaninfo.get("id"),
mtype=mtype, area=area, season=season) mtype=media_type, area=area, season=media_season,
sites=site_list, cache_local=True)
else: else:
return schemas.Response(success=False, message="未识别到豆瓣媒体信息") return schemas.Response(success=False, message="未识别到豆瓣媒体信息")
else: else:
torrents = SearchChain().search_by_id(tmdbid=tmdbid, mtype=mtype, area=area, season=season) torrents = SearchChain().search_by_id(tmdbid=tmdbid, mtype=media_type, area=area, season=media_season,
sites=site_list, cache_local=True)
elif mediaid.startswith("douban:"): elif mediaid.startswith("douban:"):
doubanid = mediaid.replace("douban:", "") doubanid = mediaid.replace("douban:", "")
if settings.RECOGNIZE_SOURCE == "themoviedb": if settings.RECOGNIZE_SOURCE == "themoviedb":
# 通过豆瓣ID识别TMDBID # 通过豆瓣ID识别TMDBID
tmdbinfo = MediaChain().get_tmdbinfo_by_doubanid(doubanid=doubanid, mtype=mtype) tmdbinfo = MediaChain().get_tmdbinfo_by_doubanid(doubanid=doubanid, mtype=media_type)
if tmdbinfo: if tmdbinfo:
if tmdbinfo.get('season') and not season: if tmdbinfo.get('season') and not media_season:
season = tmdbinfo.get('season') media_season = tmdbinfo.get('season')
torrents = SearchChain().search_by_id(tmdbid=tmdbinfo.get("id"), torrents = SearchChain().search_by_id(tmdbid=tmdbinfo.get("id"),
mtype=mtype, area=area, season=season) mtype=media_type, area=area, season=media_season,
sites=site_list, cache_local=True)
else: else:
return schemas.Response(success=False, message="未识别到TMDB媒体信息") return schemas.Response(success=False, message="未识别到TMDB媒体信息")
else: else:
torrents = SearchChain().search_by_id(doubanid=doubanid, mtype=mtype, area=area, season=season) torrents = SearchChain().search_by_id(doubanid=doubanid, mtype=media_type, area=area, season=media_season,
sites=site_list, cache_local=True)
elif mediaid.startswith("bangumi:"): elif mediaid.startswith("bangumi:"):
bangumiid = int(mediaid.replace("bangumi:", "")) bangumiid = int(mediaid.replace("bangumi:", ""))
if settings.RECOGNIZE_SOURCE == "themoviedb": if settings.RECOGNIZE_SOURCE == "themoviedb":
@@ -74,7 +87,8 @@ def search_by_id(mediaid: str,
tmdbinfo = MediaChain().get_tmdbinfo_by_bangumiid(bangumiid=bangumiid) tmdbinfo = MediaChain().get_tmdbinfo_by_bangumiid(bangumiid=bangumiid)
if tmdbinfo: if tmdbinfo:
torrents = SearchChain().search_by_id(tmdbid=tmdbinfo.get("id"), torrents = SearchChain().search_by_id(tmdbid=tmdbinfo.get("id"),
mtype=mtype, area=area, season=season) mtype=media_type, area=area, season=media_season,
sites=site_list, cache_local=True)
else: else:
return schemas.Response(success=False, message="未识别到TMDB媒体信息") return schemas.Response(success=False, message="未识别到TMDB媒体信息")
else: else:
@@ -82,7 +96,8 @@ def search_by_id(mediaid: str,
doubaninfo = MediaChain().get_doubaninfo_by_bangumiid(bangumiid=bangumiid) doubaninfo = MediaChain().get_doubaninfo_by_bangumiid(bangumiid=bangumiid)
if doubaninfo: if doubaninfo:
torrents = SearchChain().search_by_id(doubanid=doubaninfo.get("id"), torrents = SearchChain().search_by_id(doubanid=doubaninfo.get("id"),
mtype=mtype, area=area, season=season) mtype=media_type, area=area, season=media_season,
sites=site_list, cache_local=True)
else: else:
return schemas.Response(success=False, message="未识别到豆瓣媒体信息") return schemas.Response(success=False, message="未识别到豆瓣媒体信息")
else: else:
@@ -98,11 +113,11 @@ def search_by_id(mediaid: str,
if event_data.media_dict: if event_data.media_dict:
search_id = event_data.media_dict.get("id") search_id = event_data.media_dict.get("id")
if event_data.convert_type == "themoviedb": if event_data.convert_type == "themoviedb":
torrents = SearchChain().search_by_id(tmdbid=search_id, torrents = SearchChain().search_by_id(tmdbid=search_id, mtype=media_type, area=area,
mtype=mtype, area=area, season=season) season=media_season, cache_local=True)
elif event_data.convert_type == "douban": elif event_data.convert_type == "douban":
torrents = SearchChain().search_by_id(doubanid=search_id, torrents = SearchChain().search_by_id(doubanid=search_id, mtype=media_type, area=area,
mtype=mtype, area=area, season=season) season=media_season, cache_local=True)
else: else:
if not title: if not title:
return schemas.Response(success=False, message="未知的媒体ID") return schemas.Response(success=False, message="未知的媒体ID")
@@ -110,19 +125,19 @@ def search_by_id(mediaid: str,
meta = MetaInfo(title) meta = MetaInfo(title)
if year: if year:
meta.year = year meta.year = year
if mtype: if media_type:
meta.type = mtype meta.type = media_type
if season: if media_season:
meta.type = MediaType.TV meta.type = MediaType.TV
meta.begin_season = season meta.begin_season = media_season
mediainfo = MediaChain().recognize_media(meta=meta) mediainfo = MediaChain().recognize_media(meta=meta)
if mediainfo: if mediainfo:
if settings.RECOGNIZE_SOURCE == "themoviedb": if settings.RECOGNIZE_SOURCE == "themoviedb":
torrents = SearchChain().search_by_id(tmdbid=mediainfo.tmdb_id, torrents = SearchChain().search_by_id(tmdbid=mediainfo.tmdb_id, mtype=media_type, area=area,
mtype=mtype, area=area, season=season) season=media_season, cache_local=True)
else: else:
torrents = SearchChain().search_by_id(doubanid=mediainfo.douban_id, torrents = SearchChain().search_by_id(doubanid=mediainfo.douban_id, mtype=media_type, area=area,
mtype=mtype, area=area, season=season) season=media_season, cache_local=True)
# 返回搜索结果 # 返回搜索结果
if not torrents: if not torrents:
return schemas.Response(success=False, message="未搜索到任何资源") return schemas.Response(success=False, message="未搜索到任何资源")
@@ -131,14 +146,16 @@ def search_by_id(mediaid: str,
@router.get("/title", summary="模糊搜索资源", response_model=schemas.Response) @router.get("/title", summary="模糊搜索资源", response_model=schemas.Response)
def search_by_title(keyword: str = None, def search_by_title(keyword: Optional[str] = None,
page: int = 0, page: Optional[int] = 0,
site: int = None, sites: Optional[str] = None,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
根据名称模糊搜索站点资源,支持分页,关键词为空是返回首页资源 根据名称模糊搜索站点资源,支持分页,关键词为空是返回首页资源
""" """
torrents = SearchChain().search_by_title(title=keyword, page=page, site=site) torrents = SearchChain().search_by_title(title=keyword, page=page,
sites=[int(site) for site in sites.split(",") if site] if sites else None,
cache_local=True)
if not torrents: if not torrents:
return schemas.Response(success=False, message="未搜索到任何资源") return schemas.Response(success=False, message="未搜索到任何资源")
return schemas.Response(success=True, data=[torrent.to_dict() for torrent in torrents]) return schemas.Response(success=True, data=[torrent.to_dict() for torrent in torrents])

View File

@@ -1,4 +1,4 @@
from typing import List, Any from typing import List, Any, Dict, Optional
from fastapi import APIRouter, Depends, HTTPException from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session from sqlalchemy.orm import Session
@@ -145,7 +145,7 @@ def update_cookie(
site_id: int, site_id: int,
username: str, username: str,
password: str, password: str,
code: str = None, code: Optional[str] = None,
db: Session = Depends(get_db), db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(get_current_active_superuser)) -> Any: _: schemas.TokenPayload = Depends(get_current_active_superuser)) -> Any:
""" """
@@ -203,7 +203,7 @@ def read_userdata_latest(
@router.get("/userdata/{site_id}", summary="查询某站点用户数据", response_model=schemas.Response) @router.get("/userdata/{site_id}", summary="查询某站点用户数据", response_model=schemas.Response)
def read_userdata( def read_userdata(
site_id: int, site_id: int,
workdate: str = None, workdate: Optional[str] = None,
db: Session = Depends(get_db), db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(get_current_active_superuser)) -> Any: _: schemas.TokenPayload = Depends(get_current_active_superuser)) -> Any:
""" """
@@ -259,8 +259,41 @@ def site_icon(site_id: int,
}) })
@router.get("/category/{site_id}", summary="站点分类", response_model=List[schemas.SiteCategory])
def site_category(site_id: int,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
获取站点分类
"""
site = Site.get(db, site_id)
if not site:
raise HTTPException(
status_code=404,
detail=f"站点 {site_id} 不存在",
)
indexer = SitesHelper().get_indexer(site.domain)
if not indexer:
raise HTTPException(
status_code=404,
detail=f"站点 {site.domain} 不支持",
)
category: Dict[str, List[dict]] = indexer.get('category') or []
if not category:
return []
result = []
for cats in category.values():
for cat in cats:
if cat not in result:
result.append(cat)
return result
@router.get("/resource/{site_id}", summary="站点资源", response_model=List[schemas.TorrentInfo]) @router.get("/resource/{site_id}", summary="站点资源", response_model=List[schemas.TorrentInfo])
def site_resource(site_id: int, def site_resource(site_id: int,
keyword: Optional[str] = None,
cat: Optional[str] = None,
page: Optional[int] = 0,
db: Session = Depends(get_db), db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(get_current_active_superuser)) -> Any: _: schemas.TokenPayload = Depends(get_current_active_superuser)) -> Any:
""" """
@@ -272,7 +305,7 @@ def site_resource(site_id: int,
status_code=404, status_code=404,
detail=f"站点 {site_id} 不存在", detail=f"站点 {site_id} 不存在",
) )
torrents = TorrentsChain().browse(domain=site.domain) torrents = TorrentsChain().browse(domain=site.domain, keyword=keyword, cat=cat, page=page)
if not torrents: if not torrents:
return [] return []
return [torrent.to_dict() for torrent in torrents] return [torrent.to_dict() for torrent in torrents]

View File

@@ -1,6 +1,6 @@
from datetime import datetime from datetime import datetime
from pathlib import Path from pathlib import Path
from typing import Any, List from typing import Any, List, Optional
from fastapi import APIRouter, Depends, HTTPException from fastapi import APIRouter, Depends, HTTPException
from starlette.responses import FileResponse, Response from starlette.responses import FileResponse, Response
@@ -27,11 +27,12 @@ def qrcode(name: str, _: schemas.TokenPayload = Depends(verify_token)) -> Any:
qrcode_data, errmsg = StorageChain().generate_qrcode(name) qrcode_data, errmsg = StorageChain().generate_qrcode(name)
if qrcode_data: if qrcode_data:
return schemas.Response(success=True, data=qrcode_data, message=errmsg) return schemas.Response(success=True, data=qrcode_data, message=errmsg)
return schemas.Response(success=False) return schemas.Response(success=False, message=errmsg)
@router.get("/check/{name}", summary="二维码登录确认", response_model=schemas.Response) @router.get("/check/{name}", summary="二维码登录确认", response_model=schemas.Response)
def check(name: str, ck: str = None, t: str = None, _: schemas.TokenPayload = Depends(verify_token)) -> Any: def check(name: str, ck: Optional[str] = None, t: Optional[str] = None,
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
二维码登录确认 二维码登录确认
""" """
@@ -57,7 +58,7 @@ def save(name: str,
@router.post("/list", summary="所有目录和文件", response_model=List[schemas.FileItem]) @router.post("/list", summary="所有目录和文件", response_model=List[schemas.FileItem])
def list_files(fileitem: schemas.FileItem, def list_files(fileitem: schemas.FileItem,
sort: str = 'updated_at', sort: Optional[str] = 'updated_at',
_: User = Depends(get_current_active_superuser)) -> Any: _: User = Depends(get_current_active_superuser)) -> Any:
""" """
查询当前目录下所有目录和文件 查询当前目录下所有目录和文件
@@ -140,7 +141,7 @@ def image(fileitem: schemas.FileItem,
@router.post("/rename", summary="重命名文件或目录", response_model=schemas.Response) @router.post("/rename", summary="重命名文件或目录", response_model=schemas.Response)
def rename(fileitem: schemas.FileItem, def rename(fileitem: schemas.FileItem,
new_name: str, new_name: str,
recursive: bool = False, recursive: Optional[bool] = False,
_: User = Depends(get_current_active_superuser)) -> Any: _: User = Depends(get_current_active_superuser)) -> Any:
""" """
重命名文件或目录 重命名文件或目录

View File

@@ -1,4 +1,4 @@
from typing import List, Any from typing import List, Any, Annotated, Optional
import cn2an import cn2an
from fastapi import APIRouter, Request, BackgroundTasks, Depends, HTTPException, Header from fastapi import APIRouter, Request, BackgroundTasks, Depends, HTTPException, Header
@@ -44,7 +44,7 @@ def read_subscribes(
@router.get("/list", summary="查询所有订阅API_TOKEN", response_model=List[schemas.Subscribe]) @router.get("/list", summary="查询所有订阅API_TOKEN", response_model=List[schemas.Subscribe])
def list_subscribes(_: str = Depends(verify_apitoken)) -> Any: def list_subscribes(_: Annotated[str, Depends(verify_apitoken)]) -> Any:
""" """
查询所有订阅 API_TOKEN认证?token=xxx 查询所有订阅 API_TOKEN认证?token=xxx
""" """
@@ -75,22 +75,12 @@ def create_subscribe(
title = subscribe_in.name title = subscribe_in.name
else: else:
title = None title = None
# 订阅用户
subscribe_in.username = current_user.name
sid, message = SubscribeChain().add(mtype=mtype, sid, message = SubscribeChain().add(mtype=mtype,
title=title, title=title,
year=subscribe_in.year, exist_ok=True,
tmdbid=subscribe_in.tmdbid, **subscribe_in.dict())
season=subscribe_in.season,
doubanid=subscribe_in.doubanid,
bangumiid=subscribe_in.bangumiid,
mediaid=subscribe_in.mediaid,
username=current_user.name,
best_version=subscribe_in.best_version,
save_path=subscribe_in.save_path,
search_imdbid=subscribe_in.search_imdbid,
custom_words=subscribe_in.custom_words,
media_category=subscribe_in.media_category,
filter_groups=subscribe_in.filter_groups,
exist_ok=True)
return schemas.Response( return schemas.Response(
success=bool(sid), message=message, data={"id": sid} success=bool(sid), message=message, data={"id": sid}
) )
@@ -165,8 +155,8 @@ def update_subscribe_status(
@router.get("/media/{mediaid}", summary="查询订阅", response_model=schemas.Subscribe) @router.get("/media/{mediaid}", summary="查询订阅", response_model=schemas.Subscribe)
def subscribe_mediaid( def subscribe_mediaid(
mediaid: str, mediaid: str,
season: int = None, season: Optional[int] = None,
title: str = None, title: Optional[str] = None,
db: Session = Depends(get_db), db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
@@ -294,7 +284,7 @@ def search_subscribe(
@router.delete("/media/{mediaid}", summary="删除订阅", response_model=schemas.Response) @router.delete("/media/{mediaid}", summary="删除订阅", response_model=schemas.Response)
def delete_subscribe_by_mediaid( def delete_subscribe_by_mediaid(
mediaid: str, mediaid: str,
season: int = None, season: Optional[int] = None,
db: Session = Depends(get_db), db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token) _: schemas.TokenPayload = Depends(verify_token)
) -> Any: ) -> Any:
@@ -331,7 +321,7 @@ def delete_subscribe_by_mediaid(
@router.post("/seerr", summary="OverSeerr/JellySeerr通知订阅", response_model=schemas.Response) @router.post("/seerr", summary="OverSeerr/JellySeerr通知订阅", response_model=schemas.Response)
async def seerr_subscribe(request: Request, background_tasks: BackgroundTasks, async def seerr_subscribe(request: Request, background_tasks: BackgroundTasks,
authorization: str = Header(None)) -> Any: authorization: Annotated[str | None, Header()] = None) -> Any:
""" """
Jellyseerr/Overseerr网络勾子通知订阅 Jellyseerr/Overseerr网络勾子通知订阅
""" """
@@ -385,8 +375,8 @@ async def seerr_subscribe(request: Request, background_tasks: BackgroundTasks,
@router.get("/history/{mtype}", summary="查询订阅历史", response_model=List[schemas.Subscribe]) @router.get("/history/{mtype}", summary="查询订阅历史", response_model=List[schemas.Subscribe])
def subscribe_history( def subscribe_history(
mtype: str, mtype: str,
page: int = 1, page: Optional[int] = 1,
count: int = 30, count: Optional[int] = 30,
db: Session = Depends(get_db), db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
@@ -411,9 +401,9 @@ def delete_subscribe(
@router.get("/popular", summary="热门订阅(基于用户共享数据)", response_model=List[schemas.MediaInfo]) @router.get("/popular", summary="热门订阅(基于用户共享数据)", response_model=List[schemas.MediaInfo])
def popular_subscribes( def popular_subscribes(
stype: str, stype: str,
page: int = 1, page: Optional[int] = 1,
count: int = 30, count: Optional[int] = 30,
min_sub: int = None, min_sub: Optional[int] = None,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
查询热门订阅 查询热门订阅
@@ -532,7 +522,7 @@ def followed_subscribers(_: schemas.TokenPayload = Depends(verify_token)) -> Any
@router.post("/follow", summary="Follow订阅分享人", response_model=schemas.Response) @router.post("/follow", summary="Follow订阅分享人", response_model=schemas.Response)
def follow_subscriber( def follow_subscriber(
share_uid: str = None, share_uid: Optional[str] = None,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
Follow订阅分享人 Follow订阅分享人
@@ -546,7 +536,7 @@ def follow_subscriber(
@router.delete("/follow", summary="取消Follow订阅分享人", response_model=schemas.Response) @router.delete("/follow", summary="取消Follow订阅分享人", response_model=schemas.Response)
def unfollow_subscriber( def unfollow_subscriber(
share_uid: str = None, share_uid: Optional[str] = None,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
取消Follow订阅分享人 取消Follow订阅分享人
@@ -560,9 +550,9 @@ def unfollow_subscriber(
@router.get("/shares", summary="查询分享的订阅", response_model=List[schemas.SubscribeShare]) @router.get("/shares", summary="查询分享的订阅", response_model=List[schemas.SubscribeShare])
def popular_subscribes( def popular_subscribes(
name: str = None, name: Optional[str] = None,
page: int = 1, page: Optional[int] = 1,
count: int = 30, count: Optional[int] = 30,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
查询分享的订阅 查询分享的订阅

View File

@@ -5,9 +5,10 @@ import tempfile
from collections import deque from collections import deque
from datetime import datetime from datetime import datetime
from pathlib import Path from pathlib import Path
from typing import Optional, Union from typing import Optional, Union, Annotated
import aiofiles import aiofiles
import pillow_avif # noqa 用于自动注册AVIF支持
from PIL import Image from PIL import Image
from fastapi import APIRouter, Depends, HTTPException, Header, Request, Response from fastapi import APIRouter, Depends, HTTPException, Header, Request, Response
from fastapi.responses import StreamingResponse from fastapi.responses import StreamingResponse
@@ -23,10 +24,11 @@ from app.db.models import User
from app.db.systemconfig_oper import SystemConfigOper from app.db.systemconfig_oper import SystemConfigOper
from app.db.user_oper import get_current_active_superuser from app.db.user_oper import get_current_active_superuser
from app.helper.mediaserver import MediaServerHelper from app.helper.mediaserver import MediaServerHelper
from app.helper.message import MessageHelper from app.helper.message import MessageHelper, MessageQueueManager
from app.helper.progress import ProgressHelper from app.helper.progress import ProgressHelper
from app.helper.rule import RuleHelper from app.helper.rule import RuleHelper
from app.helper.sites import SitesHelper from app.helper.sites import SitesHelper
from app.helper.subscribe import SubscribeHelper
from app.log import logger from app.log import logger
from app.monitor import Monitor from app.monitor import Monitor
from app.scheduler import Scheduler from app.scheduler import Scheduler
@@ -50,7 +52,6 @@ def fetch_image(
""" """
处理图片缓存逻辑支持HTTP缓存和磁盘缓存 处理图片缓存逻辑支持HTTP缓存和磁盘缓存
""" """
if not url: if not url:
raise HTTPException(status_code=404, detail="URL not provided") raise HTTPException(status_code=404, detail="URL not provided")
@@ -68,6 +69,10 @@ def fetch_image(
sanitized_path = SecurityUtils.sanitize_url_path(url) sanitized_path = SecurityUtils.sanitize_url_path(url)
cache_path = settings.CACHE_PATH / "images" / sanitized_path cache_path = settings.CACHE_PATH / "images" / sanitized_path
# 没有文件类型,则添加后缀,在恶意文件类型和实际需求下的折衷选择
if not cache_path.suffix:
cache_path = cache_path.with_suffix(".jpg")
# 确保缓存路径和文件类型合法 # 确保缓存路径和文件类型合法
if not SecurityUtils.is_safe_path(settings.CACHE_PATH, cache_path, settings.SECURITY_IMAGE_SUFFIXES): if not SecurityUtils.is_safe_path(settings.CACHE_PATH, cache_path, settings.SECURITY_IMAGE_SUFFIXES):
raise HTTPException(status_code=400, detail="Invalid cache path or file type") raise HTTPException(status_code=400, detail="Invalid cache path or file type")
@@ -88,7 +93,8 @@ def fetch_image(
# 请求远程图片 # 请求远程图片
referer = "https://movie.douban.com/" if "doubanio.com" in url else None referer = "https://movie.douban.com/" if "doubanio.com" in url else None
proxies = settings.PROXY if proxy else None proxies = settings.PROXY if proxy else None
response = RequestUtils(ua=settings.USER_AGENT, proxies=proxies, referer=referer).get_res(url=url) response = RequestUtils(ua=settings.USER_AGENT, proxies=proxies, referer=referer,
accept_type="image/avif,image/webp,image/apng,*/*").get_res(url=url)
if not response: if not response:
raise HTTPException(status_code=502, detail="Failed to fetch the image from the remote server") raise HTTPException(status_code=502, detail="Failed to fetch the image from the remote server")
@@ -136,7 +142,7 @@ def fetch_image(
def proxy_img( def proxy_img(
imgurl: str, imgurl: str,
proxy: bool = False, proxy: bool = False,
if_none_match: Optional[str] = Header(None), if_none_match: Annotated[str | None, Header()] = None,
_: schemas.TokenPayload = Depends(verify_resource_token) _: schemas.TokenPayload = Depends(verify_resource_token)
) -> Response: ) -> Response:
""" """
@@ -153,7 +159,7 @@ def proxy_img(
@router.get("/cache/image", summary="图片缓存") @router.get("/cache/image", summary="图片缓存")
def cache_img( def cache_img(
url: str, url: str,
if_none_match: Optional[str] = Header(None), if_none_match: Annotated[str | None, Header()] = None,
_: schemas.TokenPayload = Depends(verify_resource_token) _: schemas.TokenPayload = Depends(verify_resource_token)
) -> Response: ) -> Response:
""" """
@@ -174,9 +180,10 @@ def get_global_setting():
exclude={"SECRET_KEY", "RESOURCE_SECRET_KEY", "API_TOKEN", "TMDB_API_KEY", "TVDB_API_KEY", "FANART_API_KEY", exclude={"SECRET_KEY", "RESOURCE_SECRET_KEY", "API_TOKEN", "TMDB_API_KEY", "TVDB_API_KEY", "FANART_API_KEY",
"COOKIECLOUD_KEY", "COOKIECLOUD_PASSWORD", "GITHUB_TOKEN", "REPO_GITHUB_TOKEN"} "COOKIECLOUD_KEY", "COOKIECLOUD_PASSWORD", "GITHUB_TOKEN", "REPO_GITHUB_TOKEN"}
) )
# 追加用户唯一ID # 追加用户唯一ID和订阅分享管理权限
info.update({ info.update({
"USER_UNIQUE_ID": SystemUtils.generate_user_unique_id() "USER_UNIQUE_ID": SubscribeHelper().get_user_uuid(),
"SUBSCRIBE_SHARE_MANAGE": SubscribeHelper().is_admin_user(),
}) })
return schemas.Response(success=True, return schemas.Response(success=True,
data=info) data=info)
@@ -276,6 +283,9 @@ def set_setting(key: str, value: Union[list, dict, bool, int, str] = None,
success, message = settings.update_setting(key=key, value=value) success, message = settings.update_setting(key=key, value=value)
return schemas.Response(success=success, message=message) return schemas.Response(success=success, message=message)
elif key in {item.value for item in SystemConfigKey}: elif key in {item.value for item in SystemConfigKey}:
if isinstance(value, list):
value = list(filter(None, value))
value = value if value else None
SystemConfigOper().set(key, value) SystemConfigOper().set(key, value)
return schemas.Response(success=True) return schemas.Response(success=True)
else: else:
@@ -283,7 +293,8 @@ def set_setting(key: str, value: Union[list, dict, bool, int, str] = None,
@router.get("/message", summary="实时消息") @router.get("/message", summary="实时消息")
async def get_message(request: Request, role: str = "system", _: schemas.TokenPayload = Depends(verify_resource_token)): async def get_message(request: Request, role: Optional[str] = "system",
_: schemas.TokenPayload = Depends(verify_resource_token)):
""" """
实时获取系统消息返回格式为SSE 实时获取系统消息返回格式为SSE
""" """
@@ -304,7 +315,7 @@ async def get_message(request: Request, role: str = "system", _: schemas.TokenPa
@router.get("/logging", summary="实时日志") @router.get("/logging", summary="实时日志")
async def get_logging(request: Request, length: int = 50, logfile: str = "moviepilot.log", async def get_logging(request: Request, length: Optional[int] = 50, logfile: Optional[str] = "moviepilot.log",
_: schemas.TokenPayload = Depends(verify_resource_token)): _: schemas.TokenPayload = Depends(verify_resource_token)):
""" """
实时获取系统日志 实时获取系统日志
@@ -376,7 +387,7 @@ def latest_version(_: schemas.TokenPayload = Depends(verify_token)):
@router.get("/ruletest", summary="过滤规则测试", response_model=schemas.Response) @router.get("/ruletest", summary="过滤规则测试", response_model=schemas.Response)
def ruletest(title: str, def ruletest(title: str,
rulegroup_name: str, rulegroup_name: str,
subtitle: str = None, subtitle: Optional[str] = None,
_: schemas.TokenPayload = Depends(verify_token)): _: schemas.TokenPayload = Depends(verify_token)):
""" """
过滤规则测试,规则类型 1-订阅2-洗版3-搜索 过滤规则测试,规则类型 1-订阅2-洗版3-搜索
@@ -474,6 +485,7 @@ def reload_module(_: User = Depends(get_current_active_superuser)):
""" """
重新加载模块(仅管理员) 重新加载模块(仅管理员)
""" """
MessageQueueManager().init_config()
ModuleManager().reload() ModuleManager().reload()
Scheduler().init() Scheduler().init()
Monitor().init() Monitor().init()
@@ -494,7 +506,7 @@ def run_scheduler(jobid: str,
@router.get("/runscheduler2", summary="运行服务API_TOKEN", response_model=schemas.Response) @router.get("/runscheduler2", summary="运行服务API_TOKEN", response_model=schemas.Response)
def run_scheduler2(jobid: str, def run_scheduler2(jobid: str,
_: str = Depends(verify_apitoken)): _: Annotated[str, Depends(verify_apitoken)]):
""" """
执行命令API_TOKEN认证 执行命令API_TOKEN认证
""" """

View File

@@ -1,4 +1,4 @@
from typing import List, Any from typing import List, Any, Optional
from fastapi import APIRouter, Depends from fastapi import APIRouter, Depends
@@ -61,8 +61,8 @@ def tmdb_recommend(tmdbid: int,
@router.get("/collection/{collection_id}", summary="系列合集详情", response_model=List[schemas.MediaInfo]) @router.get("/collection/{collection_id}", summary="系列合集详情", response_model=List[schemas.MediaInfo])
def tmdb_collection(collection_id: int, def tmdb_collection(collection_id: int,
page: int = 1, page: Optional[int] = 1,
count: int = 20, count: Optional[int] = 20,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
根据合集ID查询合集详情 根据合集ID查询合集详情
@@ -76,7 +76,7 @@ def tmdb_collection(collection_id: int,
@router.get("/credits/{tmdbid}/{type_name}", summary="演员阵容", response_model=List[schemas.MediaPerson]) @router.get("/credits/{tmdbid}/{type_name}", summary="演员阵容", response_model=List[schemas.MediaPerson])
def tmdb_credits(tmdbid: int, def tmdb_credits(tmdbid: int,
type_name: str, type_name: str,
page: int = 1, page: Optional[int] = 1,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
根据TMDBID查询演员阵容type_name: 电影/电视剧 根据TMDBID查询演员阵容type_name: 电影/电视剧
@@ -102,7 +102,7 @@ def tmdb_person(person_id: int,
@router.get("/person/credits/{person_id}", summary="人物参演作品", response_model=List[schemas.MediaInfo]) @router.get("/person/credits/{person_id}", summary="人物参演作品", response_model=List[schemas.MediaInfo])
def tmdb_person_credits(person_id: int, def tmdb_person_credits(person_id: int,
page: int = 1, page: Optional[int] = 1,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
根据人物ID查询人物参演作品 根据人物ID查询人物参演作品
@@ -114,9 +114,9 @@ def tmdb_person_credits(person_id: int,
@router.get("/{tmdbid}/{season}", summary="TMDB季所有集", response_model=List[schemas.TmdbEpisode]) @router.get("/{tmdbid}/{season}", summary="TMDB季所有集", response_model=List[schemas.TmdbEpisode])
def tmdb_season_episodes(tmdbid: int, season: int, def tmdb_season_episodes(tmdbid: int, season: int, episode_group: Optional[str] = None,
_: schemas.TokenPayload = Depends(verify_token)) -> Any: _: schemas.TokenPayload = Depends(verify_token)) -> Any:
""" """
根据TMDBID查询某季的所有信信息 根据TMDBID查询某季的所有信信息
""" """
return TmdbChain().tmdb_episodes(tmdbid=tmdbid, season=season) return TmdbChain().tmdb_episodes(tmdbid=tmdbid, season=season, episode_group=episode_group)

View File

@@ -1,5 +1,5 @@
from pathlib import Path from pathlib import Path
from typing import Any, List from typing import Any, List, Annotated, Optional
from fastapi import APIRouter, Depends from fastapi import APIRouter, Depends
from sqlalchemy.orm import Session from sqlalchemy.orm import Session
@@ -69,7 +69,7 @@ def remove_queue(fileitem: schemas.FileItem, _: schemas.TokenPayload = Depends(v
@router.post("/manual", summary="手动转移", response_model=schemas.Response) @router.post("/manual", summary="手动转移", response_model=schemas.Response)
def manual_transfer(transer_item: ManualTransferItem, def manual_transfer(transer_item: ManualTransferItem,
background: bool = False, background: Optional[bool] = False,
db: Session = Depends(get_db), db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(get_current_active_superuser)) -> Any: _: schemas.TokenPayload = Depends(get_current_active_superuser)) -> Any:
""" """
@@ -146,6 +146,7 @@ def manual_transfer(transer_item: ManualTransferItem,
doubanid=transer_item.doubanid, doubanid=transer_item.doubanid,
mtype=mtype, mtype=mtype,
season=transer_item.season, season=transer_item.season,
episode_group=transer_item.episode_group,
transfer_type=transer_item.transfer_type, transfer_type=transer_item.transfer_type,
epformat=epformat, epformat=epformat,
min_filesize=transer_item.min_filesize, min_filesize=transer_item.min_filesize,
@@ -165,7 +166,7 @@ def manual_transfer(transer_item: ManualTransferItem,
@router.get("/now", summary="立即执行下载器文件整理", response_model=schemas.Response) @router.get("/now", summary="立即执行下载器文件整理", response_model=schemas.Response)
def now(_: str = Depends(verify_apitoken)) -> Any: def now(_: Annotated[str, Depends(verify_apitoken)]) -> Any:
""" """
立即执行下载器文件整理 API_TOKEN认证?token=xxx 立即执行下载器文件整理 API_TOKEN认证?token=xxx
""" """

View File

@@ -1,4 +1,4 @@
from typing import Any from typing import Any, Annotated
from fastapi import APIRouter, BackgroundTasks, Request, Depends from fastapi import APIRouter, BackgroundTasks, Request, Depends
@@ -19,7 +19,7 @@ def start_webhook_chain(body: Any, form: Any, args: Any):
@router.post("/", summary="Webhook消息响应", response_model=schemas.Response) @router.post("/", summary="Webhook消息响应", response_model=schemas.Response)
async def webhook_message(background_tasks: BackgroundTasks, async def webhook_message(background_tasks: BackgroundTasks,
request: Request, request: Request,
_: str = Depends(verify_apitoken) _: Annotated[str, Depends(verify_apitoken)]
) -> Any: ) -> Any:
""" """
Webhook响应配置请求中需要添加参数token=API_TOKEN&source=媒体服务器名 Webhook响应配置请求中需要添加参数token=API_TOKEN&source=媒体服务器名
@@ -33,7 +33,7 @@ async def webhook_message(background_tasks: BackgroundTasks,
@router.get("/", summary="Webhook消息响应", response_model=schemas.Response) @router.get("/", summary="Webhook消息响应", response_model=schemas.Response)
def webhook_message(background_tasks: BackgroundTasks, def webhook_message(background_tasks: BackgroundTasks,
request: Request, _: str = Depends(verify_apitoken)) -> Any: request: Request, _: Annotated[str, Depends(verify_apitoken)]) -> Any:
""" """
Webhook响应配置请求中需要添加参数token=API_TOKEN&source=媒体服务器名 Webhook响应配置请求中需要添加参数token=API_TOKEN&source=媒体服务器名
""" """

View File

@@ -0,0 +1,162 @@
from datetime import datetime
from typing import List, Any, Optional
from fastapi import APIRouter, Depends
from sqlalchemy.orm import Session
from app import schemas
from app.core.config import global_vars
from app.core.workflow import WorkFlowManager
from app.db import get_db
from app.db.models.workflow import Workflow
from app.db.systemconfig_oper import SystemConfigOper
from app.db.user_oper import get_current_active_user
from app.chain.workflow import WorkflowChain
from app.scheduler import Scheduler
router = APIRouter()
@router.get("/", summary="所有工作流", response_model=List[schemas.Workflow])
def list_workflows(db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
"""
获取工作流列表
"""
return Workflow.list(db)
@router.post("/", summary="创建工作流", response_model=schemas.Response)
def create_workflow(workflow: schemas.Workflow,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
"""
创建工作流
"""
if Workflow.get_by_name(db, workflow.name):
return schemas.Response(success=False, message="已存在相同名称的工作流")
if not workflow.add_time:
workflow.add_time = datetime.strftime(datetime.now(), "%Y-%m-%d %H:%M:%S")
if not workflow.state:
workflow.state = "P"
Workflow(**workflow.dict()).create(db)
return schemas.Response(success=True, message="创建工作流成功")
@router.get("/actions", summary="所有动作", response_model=List[dict])
def list_actions(_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
"""
获取所有动作
"""
return WorkFlowManager().list_actions()
@router.get("/{workflow_id}", summary="工作流详情", response_model=schemas.Workflow)
def get_workflow(workflow_id: int,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
"""
获取工作流详情
"""
return Workflow.get(db, workflow_id)
@router.put("/{workflow_id}", summary="更新工作流", response_model=schemas.Response)
def update_workflow(workflow: schemas.Workflow,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
"""
更新工作流
"""
wf = Workflow.get(db, workflow.id)
if not wf:
return schemas.Response(success=False, message="工作流不存在")
wf.update(db, workflow.dict())
return schemas.Response(success=True, message="更新成功")
@router.delete("/{workflow_id}", summary="删除工作流", response_model=schemas.Response)
def delete_workflow(workflow_id: int,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
"""
删除工作流
"""
workflow = Workflow.get(db, workflow_id)
if not workflow:
return schemas.Response(success=False, message="工作流不存在")
# 删除定时任务
Scheduler().remove_workflow_job(workflow)
# 删除工作流
Workflow.delete(db, workflow_id)
# 删除缓存
SystemConfigOper().delete(f"WorkflowCache-{workflow_id}")
return schemas.Response(success=True, message="删除成功")
@router.post("/{workflow_id}/run", summary="执行工作流", response_model=schemas.Response)
def run_workflow(workflow_id: int,
from_begin: Optional[bool] = True,
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
"""
执行工作流
"""
state, errmsg = WorkflowChain().process(workflow_id, from_begin=from_begin)
if not state:
return schemas.Response(success=False, message=errmsg)
return schemas.Response(success=True)
@router.post("/{workflow_id}/start", summary="启用工作流", response_model=schemas.Response)
def start_workflow(workflow_id: int,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
"""
启用工作流
"""
workflow = Workflow.get(db, workflow_id)
if not workflow:
return schemas.Response(success=False, message="工作流不存在")
# 添加定时任务
Scheduler().update_workflow_job(workflow)
# 更新状态
workflow.update_state(db, workflow_id, "W")
return schemas.Response(success=True)
@router.post("/{workflow_id}/pause", summary="停用工作流", response_model=schemas.Response)
def pause_workflow(workflow_id: int,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
"""
停用工作流
"""
workflow = Workflow.get(db, workflow_id)
if not workflow:
return schemas.Response(success=False, message="工作流不存在")
# 删除定时任务
Scheduler().remove_workflow_job(workflow)
# 停止工作流
global_vars.stop_workflow(workflow_id)
# 更新状态
workflow.update_state(db, workflow_id, "P")
return schemas.Response(success=True)
@router.post("/{workflow_id}/reset", summary="重置工作流", response_model=schemas.Response)
def reset_workflow(workflow_id: int,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(get_current_active_user)) -> Any:
"""
重置工作流
"""
workflow = Workflow.get(db, workflow_id)
if not workflow:
return schemas.Response(success=False, message="工作流不存在")
# 停止工作流
global_vars.stop_workflow(workflow_id)
# 重置工作流
workflow.reset(db, workflow_id, reset_count=True)
# 删除缓存
SystemConfigOper().delete(f"WorkflowCache-{workflow_id}")
return schemas.Response(success=True)

View File

@@ -1,4 +1,4 @@
from typing import Any, List from typing import Any, List, Annotated
from fastapi import APIRouter, HTTPException, Depends from fastapi import APIRouter, HTTPException, Depends
from sqlalchemy.orm import Session from sqlalchemy.orm import Session
@@ -18,7 +18,7 @@ arr_router = APIRouter(tags=['servarr'])
@arr_router.get("/system/status", summary="系统状态") @arr_router.get("/system/status", summary="系统状态")
def arr_system_status(_: str = Depends(verify_apikey)) -> Any: def arr_system_status(_: Annotated[str, Depends(verify_apikey)]) -> Any:
""" """
模拟Radarr、Sonarr系统状态 模拟Radarr、Sonarr系统状态
""" """
@@ -72,7 +72,7 @@ def arr_system_status(_: str = Depends(verify_apikey)) -> Any:
@arr_router.get("/qualityProfile", summary="质量配置") @arr_router.get("/qualityProfile", summary="质量配置")
def arr_qualityProfile(_: str = Depends(verify_apikey)) -> Any: def arr_qualityProfile(_: Annotated[str, Depends(verify_apikey)]) -> Any:
""" """
模拟Radarr、Sonarr质量配置 模拟Radarr、Sonarr质量配置
""" """
@@ -113,7 +113,7 @@ def arr_qualityProfile(_: str = Depends(verify_apikey)) -> Any:
@arr_router.get("/rootfolder", summary="根目录") @arr_router.get("/rootfolder", summary="根目录")
def arr_rootfolder(_: str = Depends(verify_apikey)) -> Any: def arr_rootfolder(_: Annotated[str, Depends(verify_apikey)]) -> Any:
""" """
模拟Radarr、Sonarr根目录 模拟Radarr、Sonarr根目录
""" """
@@ -129,7 +129,7 @@ def arr_rootfolder(_: str = Depends(verify_apikey)) -> Any:
@arr_router.get("/tag", summary="标签") @arr_router.get("/tag", summary="标签")
def arr_tag(_: str = Depends(verify_apikey)) -> Any: def arr_tag(_: Annotated[str, Depends(verify_apikey)]) -> Any:
""" """
模拟Radarr、Sonarr标签 模拟Radarr、Sonarr标签
""" """
@@ -142,7 +142,7 @@ def arr_tag(_: str = Depends(verify_apikey)) -> Any:
@arr_router.get("/languageprofile", summary="语言") @arr_router.get("/languageprofile", summary="语言")
def arr_languageprofile(_: str = Depends(verify_apikey)) -> Any: def arr_languageprofile(_: Annotated[str, Depends(verify_apikey)]) -> Any:
""" """
模拟Radarr、Sonarr语言 模拟Radarr、Sonarr语言
""" """
@@ -168,7 +168,7 @@ def arr_languageprofile(_: str = Depends(verify_apikey)) -> Any:
@arr_router.get("/movie", summary="所有订阅电影", response_model=List[schemas.RadarrMovie]) @arr_router.get("/movie", summary="所有订阅电影", response_model=List[schemas.RadarrMovie])
def arr_movies(_: str = Depends(verify_apikey), db: Session = Depends(get_db)) -> Any: def arr_movies(_: Annotated[str, Depends(verify_apikey)], db: Session = Depends(get_db)) -> Any:
""" """
查询Rardar电影 查询Rardar电影
""" """
@@ -259,7 +259,7 @@ def arr_movies(_: str = Depends(verify_apikey), db: Session = Depends(get_db)) -
@arr_router.get("/movie/lookup", summary="查询电影", response_model=List[schemas.RadarrMovie]) @arr_router.get("/movie/lookup", summary="查询电影", response_model=List[schemas.RadarrMovie])
def arr_movie_lookup(term: str, db: Session = Depends(get_db), _: str = Depends(verify_apikey)) -> Any: def arr_movie_lookup(term: str, _: Annotated[str, Depends(verify_apikey)], db: Session = Depends(get_db)) -> Any:
""" """
查询Rardar电影 term: `tmdb:${id}` 查询Rardar电影 term: `tmdb:${id}`
存在和不存在均不能返回错误 存在和不存在均不能返回错误
@@ -305,7 +305,7 @@ def arr_movie_lookup(term: str, db: Session = Depends(get_db), _: str = Depends(
@arr_router.get("/movie/{mid}", summary="电影订阅详情", response_model=schemas.RadarrMovie) @arr_router.get("/movie/{mid}", summary="电影订阅详情", response_model=schemas.RadarrMovie)
def arr_movie(mid: int, db: Session = Depends(get_db), _: str = Depends(verify_apikey)) -> Any: def arr_movie(mid: int, _: Annotated[str, Depends(verify_apikey)], db: Session = Depends(get_db)) -> Any:
""" """
查询Rardar电影订阅 查询Rardar电影订阅
""" """
@@ -331,9 +331,9 @@ def arr_movie(mid: int, db: Session = Depends(get_db), _: str = Depends(verify_a
@arr_router.post("/movie", summary="新增电影订阅") @arr_router.post("/movie", summary="新增电影订阅")
def arr_add_movie(movie: RadarrMovie, def arr_add_movie(_: Annotated[str, Depends(verify_apikey)],
db: Session = Depends(get_db), movie: RadarrMovie,
_: str = Depends(verify_apikey) db: Session = Depends(get_db)
) -> Any: ) -> Any:
""" """
新增Rardar电影订阅 新增Rardar电影订阅
@@ -362,7 +362,7 @@ def arr_add_movie(movie: RadarrMovie,
@arr_router.delete("/movie/{mid}", summary="删除电影订阅", response_model=schemas.Response) @arr_router.delete("/movie/{mid}", summary="删除电影订阅", response_model=schemas.Response)
def arr_remove_movie(mid: int, db: Session = Depends(get_db), _: str = Depends(verify_apikey)) -> Any: def arr_remove_movie(mid: int, _: Annotated[str, Depends(verify_apikey)], db: Session = Depends(get_db)) -> Any:
""" """
删除Rardar电影订阅 删除Rardar电影订阅
""" """
@@ -378,7 +378,7 @@ def arr_remove_movie(mid: int, db: Session = Depends(get_db), _: str = Depends(v
@arr_router.get("/series", summary="所有剧集", response_model=List[schemas.SonarrSeries]) @arr_router.get("/series", summary="所有剧集", response_model=List[schemas.SonarrSeries])
def arr_series(_: str = Depends(verify_apikey), db: Session = Depends(get_db)) -> Any: def arr_series(_: Annotated[str, Depends(verify_apikey)], db: Session = Depends(get_db)) -> Any:
""" """
查询Sonarr剧集 查询Sonarr剧集
""" """
@@ -514,7 +514,7 @@ def arr_series(_: str = Depends(verify_apikey), db: Session = Depends(get_db)) -
@arr_router.get("/series/lookup", summary="查询剧集") @arr_router.get("/series/lookup", summary="查询剧集")
def arr_series_lookup(term: str, db: Session = Depends(get_db), _: str = Depends(verify_apikey)) -> Any: def arr_series_lookup(term: str, _: Annotated[str, Depends(verify_apikey)], db: Session = Depends(get_db)) -> Any:
""" """
查询Sonarr剧集 term: `tvdb:${id}` title 查询Sonarr剧集 term: `tvdb:${id}` title
""" """
@@ -603,7 +603,7 @@ def arr_series_lookup(term: str, db: Session = Depends(get_db), _: str = Depends
@arr_router.get("/series/{tid}", summary="剧集详情") @arr_router.get("/series/{tid}", summary="剧集详情")
def arr_serie(tid: int, db: Session = Depends(get_db), _: str = Depends(verify_apikey)) -> Any: def arr_serie(tid: int, _: Annotated[str, Depends(verify_apikey)], db: Session = Depends(get_db)) -> Any:
""" """
查询Sonarr剧集 查询Sonarr剧集
""" """
@@ -638,8 +638,8 @@ def arr_serie(tid: int, db: Session = Depends(get_db), _: str = Depends(verify_a
@arr_router.post("/series", summary="新增剧集订阅") @arr_router.post("/series", summary="新增剧集订阅")
def arr_add_series(tv: schemas.SonarrSeries, def arr_add_series(tv: schemas.SonarrSeries,
db: Session = Depends(get_db), _: Annotated[str, Depends(verify_apikey)],
_: str = Depends(verify_apikey)) -> Any: db: Session = Depends(get_db)) -> Any:
""" """
新增Sonarr剧集订阅 新增Sonarr剧集订阅
""" """
@@ -689,7 +689,7 @@ def arr_update_series(tv: schemas.SonarrSeries) -> Any:
@arr_router.delete("/series/{tid}", summary="删除剧集订阅") @arr_router.delete("/series/{tid}", summary="删除剧集订阅")
def arr_remove_series(tid: int, db: Session = Depends(get_db), _: str = Depends(verify_apikey)) -> Any: def arr_remove_series(tid: int, _: Annotated[str, Depends(verify_apikey)], db: Session = Depends(get_db)) -> Any:
""" """
删除Sonarr剧集订阅 删除Sonarr剧集订阅
""" """

View File

@@ -7,7 +7,6 @@ from pathlib import Path
from typing import Optional, Any, Tuple, List, Set, Union, Dict from typing import Optional, Any, Tuple, List, Set, Union, Dict
from qbittorrentapi import TorrentFilesList from qbittorrentapi import TorrentFilesList
from ruamel.yaml import CommentedMap
from transmission_rpc import File from transmission_rpc import File
from app.core.config import settings from app.core.config import settings
@@ -17,7 +16,7 @@ from app.core.meta import MetaBase
from app.core.module import ModuleManager from app.core.module import ModuleManager
from app.db.message_oper import MessageOper from app.db.message_oper import MessageOper
from app.db.user_oper import UserOper from app.db.user_oper import UserOper
from app.helper.message import MessageHelper from app.helper.message import MessageHelper, MessageQueueManager
from app.helper.service import ServiceConfigHelper from app.helper.service import ServiceConfigHelper
from app.log import logger from app.log import logger
from app.schemas import TransferInfo, TransferTorrent, ExistMediaInfo, DownloadingTorrent, CommingMessage, Notification, \ from app.schemas import TransferInfo, TransferTorrent, ExistMediaInfo, DownloadingTorrent, CommingMessage, Notification, \
@@ -39,6 +38,9 @@ class ChainBase(metaclass=ABCMeta):
self.eventmanager = EventManager() self.eventmanager = EventManager()
self.messageoper = MessageOper() self.messageoper = MessageOper()
self.messagehelper = MessageHelper() self.messagehelper = MessageHelper()
self.messagequeue = MessageQueueManager(
send_callback=self.run_module
)
self.useroper = UserOper() self.useroper = UserOper()
@staticmethod @staticmethod
@@ -77,7 +79,7 @@ class ChainBase(metaclass=ABCMeta):
""" """
cache_path = settings.TEMP_PATH / filename cache_path = settings.TEMP_PATH / filename
if cache_path.exists(): if cache_path.exists():
Path(cache_path).unlink() cache_path.unlink()
def run_module(self, method: str, *args, **kwargs) -> Any: def run_module(self, method: str, *args, **kwargs) -> Any:
""" """
@@ -92,10 +94,10 @@ class ChainBase(metaclass=ABCMeta):
if isinstance(ret, tuple): if isinstance(ret, tuple):
return all(value is None for value in ret) return all(value is None for value in ret)
else: else:
return result is None return ret is None
logger.debug(f"请求模块执行:{method} ...")
result = None result = None
logger.debug(f"请求模块执行:{method} ...")
modules = self.modulemanager.get_running_modules(method) modules = self.modulemanager.get_running_modules(method)
# 按优先级排序 # 按优先级排序
modules = sorted(modules, key=lambda x: x.get_priority()) modules = sorted(modules, key=lambda x: x.get_priority())
@@ -144,10 +146,11 @@ class ChainBase(metaclass=ABCMeta):
return result return result
def recognize_media(self, meta: MetaBase = None, def recognize_media(self, meta: MetaBase = None,
mtype: MediaType = None, mtype: Optional[MediaType] = None,
tmdbid: int = None, tmdbid: Optional[int] = None,
doubanid: str = None, doubanid: Optional[str] = None,
bangumiid: int = None, bangumiid: Optional[int] = None,
episode_group: Optional[str] = None,
cache: bool = True) -> Optional[MediaInfo]: cache: bool = True) -> Optional[MediaInfo]:
""" """
识别媒体信息不含Fanart图片 识别媒体信息不含Fanart图片
@@ -156,6 +159,7 @@ class ChainBase(metaclass=ABCMeta):
:param tmdbid: tmdbid :param tmdbid: tmdbid
:param doubanid: 豆瓣ID :param doubanid: 豆瓣ID
:param bangumiid: BangumiID :param bangumiid: BangumiID
:param episode_group: 剧集组
:param cache: 是否使用缓存 :param cache: 是否使用缓存
:return: 识别的媒体信息,包括剧集信息 :return: 识别的媒体信息,包括剧集信息
""" """
@@ -171,10 +175,11 @@ class ChainBase(metaclass=ABCMeta):
doubanid = None doubanid = None
bangumiid = None bangumiid = None
return self.run_module("recognize_media", meta=meta, mtype=mtype, return self.run_module("recognize_media", meta=meta, mtype=mtype,
tmdbid=tmdbid, doubanid=doubanid, bangumiid=bangumiid, cache=cache) tmdbid=tmdbid, doubanid=doubanid, bangumiid=bangumiid,
episode_group=episode_group, cache=cache)
def match_doubaninfo(self, name: str, imdbid: str = None, def match_doubaninfo(self, name: str, imdbid: Optional[str] = None,
mtype: MediaType = None, year: str = None, season: int = None, mtype: Optional[MediaType] = None, year: Optional[str] = None, season: Optional[int] = None,
raise_exception: bool = False) -> Optional[dict]: raise_exception: bool = False) -> Optional[dict]:
""" """
搜索和匹配豆瓣信息 搜索和匹配豆瓣信息
@@ -188,8 +193,8 @@ class ChainBase(metaclass=ABCMeta):
return self.run_module("match_doubaninfo", name=name, imdbid=imdbid, return self.run_module("match_doubaninfo", name=name, imdbid=imdbid,
mtype=mtype, year=year, season=season, raise_exception=raise_exception) mtype=mtype, year=year, season=season, raise_exception=raise_exception)
def match_tmdbinfo(self, name: str, mtype: MediaType = None, def match_tmdbinfo(self, name: str, mtype: Optional[MediaType] = None,
year: str = None, season: int = None) -> Optional[dict]: year: Optional[str] = None, season: Optional[int] = None) -> Optional[dict]:
""" """
搜索和匹配TMDB信息 搜索和匹配TMDB信息
:param name: 标题 :param name: 标题
@@ -209,8 +214,8 @@ class ChainBase(metaclass=ABCMeta):
return self.run_module("obtain_images", mediainfo=mediainfo) return self.run_module("obtain_images", mediainfo=mediainfo)
def obtain_specific_image(self, mediaid: Union[str, int], mtype: MediaType, def obtain_specific_image(self, mediaid: Union[str, int], mtype: MediaType,
image_type: MediaImageType, image_prefix: str = None, image_type: MediaImageType, image_prefix: Optional[str] = None,
season: int = None, episode: int = None) -> Optional[str]: season: Optional[int] = None, episode: Optional[int] = None) -> Optional[str]:
""" """
获取指定媒体信息图片,返回图片地址 获取指定媒体信息图片,返回图片地址
:param mediaid: 媒体ID :param mediaid: 媒体ID
@@ -224,7 +229,7 @@ class ChainBase(metaclass=ABCMeta):
image_prefix=image_prefix, image_type=image_type, image_prefix=image_prefix, image_type=image_type,
season=season, episode=episode) season=season, episode=episode)
def douban_info(self, doubanid: str, mtype: MediaType = None, def douban_info(self, doubanid: str, mtype: Optional[MediaType] = None,
raise_exception: bool = False) -> Optional[dict]: raise_exception: bool = False) -> Optional[dict]:
""" """
获取豆瓣信息 获取豆瓣信息
@@ -243,7 +248,7 @@ class ChainBase(metaclass=ABCMeta):
""" """
return self.run_module("tvdb_info", tvdbid=tvdbid) return self.run_module("tvdb_info", tvdbid=tvdbid)
def tmdb_info(self, tmdbid: int, mtype: MediaType, season: int = None) -> Optional[dict]: def tmdb_info(self, tmdbid: int, mtype: MediaType, season: Optional[int] = None) -> Optional[dict]:
""" """
获取TMDB信息 获取TMDB信息
:param tmdbid: int :param tmdbid: int
@@ -308,10 +313,10 @@ class ChainBase(metaclass=ABCMeta):
""" """
return self.run_module("search_collections", name=name) return self.run_module("search_collections", name=name)
def search_torrents(self, site: CommentedMap, def search_torrents(self, site: dict,
keywords: List[str], keywords: List[str],
mtype: MediaType = None, mtype: Optional[MediaType] = None,
page: int = 0) -> List[TorrentInfo]: page: Optional[int] = 0) -> List[TorrentInfo]:
""" """
搜索一个站点的种子资源 搜索一个站点的种子资源
:param site: 站点 :param site: 站点
@@ -323,13 +328,17 @@ class ChainBase(metaclass=ABCMeta):
return self.run_module("search_torrents", site=site, keywords=keywords, return self.run_module("search_torrents", site=site, keywords=keywords,
mtype=mtype, page=page) mtype=mtype, page=page)
def refresh_torrents(self, site: CommentedMap) -> List[TorrentInfo]: def refresh_torrents(self, site: dict, keyword: Optional[str] = None,
cat: Optional[str] = None, page: Optional[int] = 0) -> List[TorrentInfo]:
""" """
获取站点最新一页的种子,多个站点需要多线程处理 获取站点最新一页的种子,多个站点需要多线程处理
:param site: 站点 :param site: 站点
:param keyword: 标题
:param cat: 分类
:param page: 页码
:reutrn: 种子资源列表 :reutrn: 种子资源列表
""" """
return self.run_module("refresh_torrents", site=site) return self.run_module("refresh_torrents", site=site, keyword=keyword, cat=cat, page=page)
def filter_torrents(self, rule_groups: List[str], def filter_torrents(self, rule_groups: List[str],
torrent_list: List[TorrentInfo], torrent_list: List[TorrentInfo],
@@ -345,8 +354,8 @@ class ChainBase(metaclass=ABCMeta):
torrent_list=torrent_list, mediainfo=mediainfo) torrent_list=torrent_list, mediainfo=mediainfo)
def download(self, content: Union[Path, str], download_dir: Path, cookie: str, def download(self, content: Union[Path, str], download_dir: Path, cookie: str,
episodes: Set[int] = None, category: str = None, episodes: Set[int] = None, category: Optional[str] = None, label: Optional[str] = None,
downloader: str = None downloader: Optional[str] = None
) -> Optional[Tuple[Optional[str], Optional[str], Optional[str], str]]: ) -> Optional[Tuple[Optional[str], Optional[str], Optional[str], str]]:
""" """
根据种子文件,选择并添加下载任务 根据种子文件,选择并添加下载任务
@@ -355,11 +364,12 @@ class ChainBase(metaclass=ABCMeta):
:param cookie: cookie :param cookie: cookie
:param episodes: 需要下载的集数 :param episodes: 需要下载的集数
:param category: 种子分类 :param category: 种子分类
:param label: 标签
:param downloader: 下载器 :param downloader: 下载器
:return: 下载器名称、种子Hash、种子文件布局、错误原因 :return: 下载器名称、种子Hash、种子文件布局、错误原因
""" """
return self.run_module("download", content=content, download_dir=download_dir, return self.run_module("download", content=content, download_dir=download_dir,
cookie=cookie, episodes=episodes, category=category, cookie=cookie, episodes=episodes, category=category, label=label,
downloader=downloader) downloader=downloader)
def download_added(self, context: Context, download_dir: Path, torrent_path: Path = None) -> None: def download_added(self, context: Context, download_dir: Path, torrent_path: Path = None) -> None:
@@ -375,7 +385,7 @@ class ChainBase(metaclass=ABCMeta):
def list_torrents(self, status: TorrentStatus = None, def list_torrents(self, status: TorrentStatus = None,
hashs: Union[list, str] = None, hashs: Union[list, str] = None,
downloader: str = None downloader: Optional[str] = None
) -> Optional[List[Union[TransferTorrent, DownloadingTorrent]]]: ) -> Optional[List[Union[TransferTorrent, DownloadingTorrent]]]:
""" """
获取下载器种子列表 获取下载器种子列表
@@ -388,8 +398,8 @@ class ChainBase(metaclass=ABCMeta):
def transfer(self, fileitem: FileItem, meta: MetaBase, mediainfo: MediaInfo, def transfer(self, fileitem: FileItem, meta: MetaBase, mediainfo: MediaInfo,
target_directory: TransferDirectoryConf = None, target_directory: TransferDirectoryConf = None,
target_storage: str = None, target_path: Path = None, target_storage: Optional[str] = None, target_path: Path = None,
transfer_type: str = None, scrape: bool = None, transfer_type: Optional[str] = None, scrape: bool = None,
library_type_folder: bool = None, library_category_folder: bool = None, library_type_folder: bool = None, library_category_folder: bool = None,
episodes_info: List[TmdbEpisode] = None) -> Optional[TransferInfo]: episodes_info: List[TmdbEpisode] = None) -> Optional[TransferInfo]:
""" """
@@ -416,7 +426,7 @@ class ChainBase(metaclass=ABCMeta):
library_category_folder=library_category_folder, library_category_folder=library_category_folder,
episodes_info=episodes_info) episodes_info=episodes_info)
def transfer_completed(self, hashs: str, downloader: str = None) -> None: def transfer_completed(self, hashs: str, downloader: Optional[str] = None) -> None:
""" """
下载器转移完成后的处理 下载器转移完成后的处理
:param hashs: 种子Hash :param hashs: 种子Hash
@@ -425,7 +435,7 @@ class ChainBase(metaclass=ABCMeta):
return self.run_module("transfer_completed", hashs=hashs, downloader=downloader) return self.run_module("transfer_completed", hashs=hashs, downloader=downloader)
def remove_torrents(self, hashs: Union[str, list], delete_file: bool = True, def remove_torrents(self, hashs: Union[str, list], delete_file: bool = True,
downloader: str = None) -> bool: downloader: Optional[str] = None) -> bool:
""" """
删除下载器种子 删除下载器种子
:param hashs: 种子Hash :param hashs: 种子Hash
@@ -435,7 +445,7 @@ class ChainBase(metaclass=ABCMeta):
""" """
return self.run_module("remove_torrents", hashs=hashs, delete_file=delete_file, downloader=downloader) return self.run_module("remove_torrents", hashs=hashs, delete_file=delete_file, downloader=downloader)
def start_torrents(self, hashs: Union[list, str], downloader: str = None) -> bool: def start_torrents(self, hashs: Union[list, str], downloader: Optional[str] = None) -> bool:
""" """
开始下载 开始下载
:param hashs: 种子Hash :param hashs: 种子Hash
@@ -444,7 +454,7 @@ class ChainBase(metaclass=ABCMeta):
""" """
return self.run_module("start_torrents", hashs=hashs, downloader=downloader) return self.run_module("start_torrents", hashs=hashs, downloader=downloader)
def stop_torrents(self, hashs: Union[list, str], downloader: str = None) -> bool: def stop_torrents(self, hashs: Union[list, str], downloader: Optional[str] = None) -> bool:
""" """
停止下载 停止下载
:param hashs: 种子Hash :param hashs: 种子Hash
@@ -454,7 +464,7 @@ class ChainBase(metaclass=ABCMeta):
return self.run_module("stop_torrents", hashs=hashs, downloader=downloader) return self.run_module("stop_torrents", hashs=hashs, downloader=downloader)
def torrent_files(self, tid: str, def torrent_files(self, tid: str,
downloader: str = None) -> Optional[Union[TorrentFilesList, List[File]]]: downloader: Optional[str] = None) -> Optional[Union[TorrentFilesList, List[File]]]:
""" """
获取种子文件 获取种子文件
:param tid: 种子Hash :param tid: 种子Hash
@@ -463,8 +473,8 @@ class ChainBase(metaclass=ABCMeta):
""" """
return self.run_module("torrent_files", tid=tid, downloader=downloader) return self.run_module("torrent_files", tid=tid, downloader=downloader)
def media_exists(self, mediainfo: MediaInfo, itemid: str = None, def media_exists(self, mediainfo: MediaInfo, itemid: Optional[str] = None,
server: str = None) -> Optional[ExistMediaInfo]: server: Optional[str] = None) -> Optional[ExistMediaInfo]:
""" """
判断媒体文件是否存在 判断媒体文件是否存在
:param mediainfo: 识别的媒体信息 :param mediainfo: 识别的媒体信息
@@ -488,11 +498,6 @@ class ChainBase(metaclass=ABCMeta):
:param message: 消息体 :param message: 消息体
:return: 成功或失败 :return: 成功或失败
""" """
logger.info(f"发送消息channel={message.channel}"
f"source={message.source},"
f"title={message.title}, "
f"text={message.text}"
f"userid={message.userid}")
# 保存原消息 # 保存原消息
self.messagehelper.put(message, role="user", title=message.title) self.messagehelper.put(message, role="user", title=message.title)
self.messageoper.add(**message.dict()) self.messageoper.add(**message.dict())
@@ -542,13 +547,13 @@ class ChainBase(metaclass=ABCMeta):
# 按设定发送 # 按设定发送
self.eventmanager.send_event(etype=EventType.NoticeMessage, self.eventmanager.send_event(etype=EventType.NoticeMessage,
data={**send_message.dict(), "type": send_message.mtype}) data={**send_message.dict(), "type": send_message.mtype})
self.run_module("post_message", message=send_message) self.messagequeue.send_message("post_message", message=send_message)
if not send_orignal: if not send_orignal:
return return
# 发送消息事件 # 发送消息事件
self.eventmanager.send_event(etype=EventType.NoticeMessage, data={**message.dict(), "type": message.mtype}) self.eventmanager.send_event(etype=EventType.NoticeMessage, data={**message.dict(), "type": message.mtype})
# 按原消息发送 # 按原消息发送
self.run_module("post_message", message=message) self.messagequeue.send_message("post_message", message=message)
def post_medias_message(self, message: Notification, medias: List[MediaInfo]) -> None: def post_medias_message(self, message: Notification, medias: List[MediaInfo]) -> None:
""" """
@@ -560,7 +565,7 @@ class ChainBase(metaclass=ABCMeta):
note_list = [media.to_dict() for media in medias] note_list = [media.to_dict() for media in medias]
self.messagehelper.put(message, role="user", note=note_list, title=message.title) self.messagehelper.put(message, role="user", note=note_list, title=message.title)
self.messageoper.add(**message.dict(), note=note_list) self.messageoper.add(**message.dict(), note=note_list)
return self.run_module("post_medias_message", message=message, medias=medias) return self.messagequeue.send_message("post_medias_message", message=message, medias=medias)
def post_torrents_message(self, message: Notification, torrents: List[Context]) -> None: def post_torrents_message(self, message: Notification, torrents: List[Context]) -> None:
""" """
@@ -572,9 +577,10 @@ class ChainBase(metaclass=ABCMeta):
note_list = [torrent.torrent_info.to_dict() for torrent in torrents] note_list = [torrent.torrent_info.to_dict() for torrent in torrents]
self.messagehelper.put(message, role="user", note=note_list, title=message.title) self.messagehelper.put(message, role="user", note=note_list, title=message.title)
self.messageoper.add(**message.dict(), note=note_list) self.messageoper.add(**message.dict(), note=note_list)
return self.run_module("post_torrents_message", message=message, torrents=torrents) return self.messagequeue.send_message("post_torrents_message", message=message, torrents=torrents)
def metadata_img(self, mediainfo: MediaInfo, season: int = None, episode: int = None) -> Optional[dict]: def metadata_img(self, mediainfo: MediaInfo,
season: Optional[int] = None, episode: Optional[int] = None) -> Optional[dict]:
""" """
获取图片名称和url 获取图片名称和url
:param mediainfo: 媒体信息 :param mediainfo: 媒体信息

View File

@@ -9,13 +9,13 @@ class DashboardChain(ChainBase, metaclass=Singleton):
""" """
各类仪表板统计处理链 各类仪表板统计处理链
""" """
def media_statistic(self, server: str = None) -> Optional[List[schemas.Statistic]]: def media_statistic(self, server: Optional[str] = None) -> Optional[List[schemas.Statistic]]:
""" """
媒体数量统计 媒体数量统计
""" """
return self.run_module("media_statistic", server=server) return self.run_module("media_statistic", server=server)
def downloader_info(self, downloader: str = None) -> Optional[List[schemas.DownloaderInfo]]: def downloader_info(self, downloader: Optional[str] = None) -> Optional[List[schemas.DownloaderInfo]]:
""" """
下载器信息 下载器信息
""" """

View File

@@ -19,7 +19,7 @@ class DoubanChain(ChainBase, metaclass=Singleton):
""" """
return self.run_module("douban_person_detail", person_id=person_id) return self.run_module("douban_person_detail", person_id=person_id)
def person_credits(self, person_id: int, page: int = 1) -> List[MediaInfo]: def person_credits(self, person_id: int, page: Optional[int] = 1) -> List[MediaInfo]:
""" """
根据人物ID查询人物参演作品 根据人物ID查询人物参演作品
:param person_id: 人物ID :param person_id: 人物ID
@@ -27,7 +27,7 @@ class DoubanChain(ChainBase, metaclass=Singleton):
""" """
return self.run_module("douban_person_credits", person_id=person_id, page=page) return self.run_module("douban_person_credits", person_id=person_id, page=page)
def movie_top250(self, page: int = 1, count: int = 30) -> Optional[List[MediaInfo]]: def movie_top250(self, page: Optional[int] = 1, count: Optional[int] = 30) -> Optional[List[MediaInfo]]:
""" """
获取豆瓣电影TOP250 获取豆瓣电影TOP250
:param page: 页码 :param page: 页码
@@ -35,26 +35,26 @@ class DoubanChain(ChainBase, metaclass=Singleton):
""" """
return self.run_module("movie_top250", page=page, count=count) return self.run_module("movie_top250", page=page, count=count)
def movie_showing(self, page: int = 1, count: int = 30) -> Optional[List[MediaInfo]]: def movie_showing(self, page: Optional[int] = 1, count: Optional[int] = 30) -> Optional[List[MediaInfo]]:
""" """
获取正在上映的电影 获取正在上映的电影
""" """
return self.run_module("movie_showing", page=page, count=count) return self.run_module("movie_showing", page=page, count=count)
def tv_weekly_chinese(self, page: int = 1, count: int = 30) -> Optional[List[MediaInfo]]: def tv_weekly_chinese(self, page: Optional[int] = 1, count: Optional[int] = 30) -> Optional[List[MediaInfo]]:
""" """
获取本周中国剧集榜 获取本周中国剧集榜
""" """
return self.run_module("tv_weekly_chinese", page=page, count=count) return self.run_module("tv_weekly_chinese", page=page, count=count)
def tv_weekly_global(self, page: int = 1, count: int = 30) -> Optional[List[MediaInfo]]: def tv_weekly_global(self, page: Optional[int] = 1, count: Optional[int] = 30) -> Optional[List[MediaInfo]]:
""" """
获取本周全球剧集榜 获取本周全球剧集榜
""" """
return self.run_module("tv_weekly_global", page=page, count=count) return self.run_module("tv_weekly_global", page=page, count=count)
def douban_discover(self, mtype: MediaType, sort: str, tags: str, def douban_discover(self, mtype: MediaType, sort: str, tags: str,
page: int = 0, count: int = 30) -> Optional[List[MediaInfo]]: page: Optional[int] = 0, count: Optional[int] = 30) -> Optional[List[MediaInfo]]:
""" """
发现豆瓣电影、剧集 发现豆瓣电影、剧集
:param mtype: 媒体类型 :param mtype: 媒体类型
@@ -67,19 +67,19 @@ class DoubanChain(ChainBase, metaclass=Singleton):
return self.run_module("douban_discover", mtype=mtype, sort=sort, tags=tags, return self.run_module("douban_discover", mtype=mtype, sort=sort, tags=tags,
page=page, count=count) page=page, count=count)
def tv_animation(self, page: int = 1, count: int = 30) -> Optional[List[MediaInfo]]: def tv_animation(self, page: Optional[int] = 1, count: Optional[int] = 30) -> Optional[List[MediaInfo]]:
""" """
获取动画剧集 获取动画剧集
""" """
return self.run_module("tv_animation", page=page, count=count) return self.run_module("tv_animation", page=page, count=count)
def movie_hot(self, page: int = 1, count: int = 30) -> Optional[List[MediaInfo]]: def movie_hot(self, page: Optional[int] = 1, count: Optional[int] = 30) -> Optional[List[MediaInfo]]:
""" """
获取热门电影 获取热门电影
""" """
return self.run_module("movie_hot", page=page, count=count) return self.run_module("movie_hot", page=page, count=count)
def tv_hot(self, page: int = 1, count: int = 30) -> Optional[List[MediaInfo]]: def tv_hot(self, page: Optional[int] = 1, count: Optional[int] = 30) -> Optional[List[MediaInfo]]:
""" """
获取热门剧集 获取热门剧集
""" """

View File

@@ -39,8 +39,8 @@ class DownloadChain(ChainBase):
self.messagehelper = MessageHelper() self.messagehelper = MessageHelper()
def post_download_message(self, meta: MetaBase, mediainfo: MediaInfo, torrent: TorrentInfo, def post_download_message(self, meta: MetaBase, mediainfo: MediaInfo, torrent: TorrentInfo,
channel: MessageChannel = None, username: str = None, channel: MessageChannel = None, username: Optional[str] = None,
download_episodes: str = None): download_episodes: Optional[str] = None):
""" """
发送添加下载的消息,根据消息场景开关决定发给谁 发送添加下载的消息,根据消息场景开关决定发给谁
:param meta: 元数据 :param meta: 元数据
@@ -97,7 +97,7 @@ class DownloadChain(ChainBase):
def download_torrent(self, torrent: TorrentInfo, def download_torrent(self, torrent: TorrentInfo,
channel: MessageChannel = None, channel: MessageChannel = None,
source: str = None, source: Optional[str] = None,
userid: Union[str, int] = None userid: Union[str, int] = None
) -> Tuple[Optional[Union[Path, str]], str, list]: ) -> Tuple[Optional[Union[Path, str]], str, list]:
""" """
@@ -105,7 +105,7 @@ class DownloadChain(ChainBase):
:return: 种子路径,种子目录名,种子文件清单 :return: 种子路径,种子目录名,种子文件清单
""" """
def __get_redict_url(url: str, ua: str = None, cookie: str = None) -> Optional[str]: def __get_redict_url(url: str, ua: Optional[str] = None, cookie: Optional[str] = None) -> Optional[str]:
""" """
获取下载链接, url格式[base64]url 获取下载链接, url格式[base64]url
""" """
@@ -204,12 +204,12 @@ class DownloadChain(ChainBase):
def download_single(self, context: Context, torrent_file: Path = None, def download_single(self, context: Context, torrent_file: Path = None,
episodes: Set[int] = None, episodes: Set[int] = None,
channel: MessageChannel = None, channel: MessageChannel = None,
source: str = None, source: Optional[str] = None,
downloader: str = None, downloader: Optional[str] = None,
save_path: str = None, save_path: Optional[str] = None,
userid: Union[str, int] = None, userid: Union[str, int] = None,
username: str = None, username: Optional[str] = None,
media_category: str = None) -> Optional[str]: label: Optional[str] = None) -> Optional[str]:
""" """
下载及发送通知 下载及发送通知
:param context: 资源上下文 :param context: 资源上下文
@@ -221,8 +221,13 @@ class DownloadChain(ChainBase):
:param save_path: 保存路径 :param save_path: 保存路径
:param userid: 用户ID :param userid: 用户ID
:param username: 调用下载的用户名/插件名 :param username: 调用下载的用户名/插件名
:param media_category: 自定义媒体类别 :param label: 自定义标签
""" """
_torrent = context.torrent_info
_media = context.media_info
_meta = context.meta_info
_site_downloader = _torrent.site_downloader
# 发送资源下载事件,允许外部拦截下载 # 发送资源下载事件,允许外部拦截下载
event_data = ResourceDownloadEventData( event_data = ResourceDownloadEventData(
context=context, context=context,
@@ -234,7 +239,7 @@ class DownloadChain(ChainBase):
"save_path": save_path, "save_path": save_path,
"userid": userid, "userid": userid,
"username": username, "username": username,
"media_category": media_category "media_category": _media.category
} }
) )
# 触发资源下载事件 # 触发资源下载事件
@@ -248,15 +253,11 @@ class DownloadChain(ChainBase):
f"Reason: {event_data.reason}") f"Reason: {event_data.reason}")
return None return None
_torrent = context.torrent_info
_media = context.media_info
_meta = context.meta_info
_site_downloader = _torrent.site_downloader
# 补充完整的media数据 # 补充完整的media数据
if not _media.genre_ids: if not _media.genre_ids:
new_media = self.recognize_media(mtype=_media.type, tmdbid=_media.tmdb_id, new_media = self.recognize_media(mtype=_media.type, tmdbid=_media.tmdb_id,
doubanid=_media.douban_id, bangumiid=_media.bangumi_id) doubanid=_media.douban_id, bangumiid=_media.bangumi_id,
episode_group=_media.episode_group)
if new_media: if new_media:
_media = new_media _media = new_media
@@ -310,6 +311,7 @@ class DownloadChain(ChainBase):
episodes=episodes, episodes=episodes,
download_dir=download_dir, download_dir=download_dir,
category=_media.category, category=_media.category,
label=label,
downloader=downloader or _site_downloader) downloader=downloader or _site_downloader)
if result: if result:
_downloader, _hash, _layout, error_msg = result _downloader, _hash, _layout, error_msg = result
@@ -352,7 +354,8 @@ class DownloadChain(ChainBase):
username=username, username=username,
channel=channel.value if channel else None, channel=channel.value if channel else None,
date=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), date=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
media_category=media_category, media_category=_media.category,
episode_group=_media.episode_group,
note={"source": source} note={"source": source}
) )
@@ -415,13 +418,12 @@ class DownloadChain(ChainBase):
def batch_download(self, def batch_download(self,
contexts: List[Context], contexts: List[Context],
no_exists: Dict[Union[int, str], Dict[int, NotExistMediaInfo]] = None, no_exists: Dict[Union[int, str], Dict[int, NotExistMediaInfo]] = None,
save_path: str = None, save_path: Optional[str] = None,
channel: MessageChannel = None, channel: MessageChannel = None,
source: str = None, source: Optional[str] = None,
userid: str = None, userid: Optional[str] = None,
username: str = None, username: Optional[str] = None,
media_category: str = None, downloader: Optional[str] = None
downloader: str = None
) -> Tuple[List[Context], Dict[Union[int, str], Dict[int, NotExistMediaInfo]]]: ) -> Tuple[List[Context], Dict[Union[int, str], Dict[int, NotExistMediaInfo]]]:
""" """
根据缺失数据,自动种子列表中组合择优下载 根据缺失数据,自动种子列表中组合择优下载
@@ -432,7 +434,6 @@ class DownloadChain(ChainBase):
:param source: 来源(消息通知、订阅、手工下载等) :param source: 来源(消息通知、订阅、手工下载等)
:param userid: 用户ID :param userid: 用户ID
:param username: 调用下载的用户名/插件名 :param username: 调用下载的用户名/插件名
:param media_category: 自定义媒体类别
:param downloader: 下载器 :param downloader: 下载器
:return: 已经下载的资源列表、剩余未下载到的剧集 no_exists[tmdb_id/douban_id] = {season: NotExistMediaInfo} :return: 已经下载的资源列表、剩余未下载到的剧集 no_exists[tmdb_id/douban_id] = {season: NotExistMediaInfo}
""" """
@@ -521,7 +522,7 @@ class DownloadChain(ChainBase):
logger.info(f"开始下载电影 {context.torrent_info.title} ...") logger.info(f"开始下载电影 {context.torrent_info.title} ...")
if self.download_single(context, save_path=save_path, channel=channel, if self.download_single(context, save_path=save_path, channel=channel,
source=source, userid=userid, username=username, source=source, userid=userid, username=username,
media_category=media_category, downloader=downloader): downloader=downloader):
# 下载成功 # 下载成功
logger.info(f"{context.torrent_info.title} 添加下载成功") logger.info(f"{context.torrent_info.title} 添加下载成功")
downloaded_list.append(context) downloaded_list.append(context)
@@ -606,8 +607,7 @@ class DownloadChain(ChainBase):
source=source, source=source,
userid=userid, userid=userid,
username=username, username=username,
media_category=media_category, downloader=downloader
downloader=downloader,
) )
else: else:
# 下载 # 下载
@@ -615,7 +615,6 @@ class DownloadChain(ChainBase):
download_id = self.download_single(context, save_path=save_path, download_id = self.download_single(context, save_path=save_path,
channel=channel, source=source, channel=channel, source=source,
userid=userid, username=username, userid=userid, username=username,
media_category=media_category,
downloader=downloader) downloader=downloader)
if download_id: if download_id:
@@ -687,7 +686,6 @@ class DownloadChain(ChainBase):
download_id = self.download_single(context, save_path=save_path, download_id = self.download_single(context, save_path=save_path,
channel=channel, source=source, channel=channel, source=source,
userid=userid, username=username, userid=userid, username=username,
media_category=media_category,
downloader=downloader) downloader=downloader)
if download_id: if download_id:
# 下载成功 # 下载成功
@@ -777,7 +775,6 @@ class DownloadChain(ChainBase):
source=source, source=source,
userid=userid, userid=userid,
username=username, username=username,
media_category=media_category,
downloader=downloader downloader=downloader
) )
if not download_id: if not download_id:
@@ -863,7 +860,8 @@ class DownloadChain(ChainBase):
# 补充媒体信息 # 补充媒体信息
mediainfo: MediaInfo = self.recognize_media(mtype=mediainfo.type, mediainfo: MediaInfo = self.recognize_media(mtype=mediainfo.type,
tmdbid=mediainfo.tmdb_id, tmdbid=mediainfo.tmdb_id,
doubanid=mediainfo.douban_id) doubanid=mediainfo.douban_id,
episode_group=mediainfo.episode_group)
if not mediainfo: if not mediainfo:
logger.error(f"媒体信息识别失败!") logger.error(f"媒体信息识别失败!")
return False, {} return False, {}
@@ -930,7 +928,7 @@ class DownloadChain(ChainBase):
# 全部存在 # 全部存在
return True, no_exists return True, no_exists
def remote_downloading(self, channel: MessageChannel, userid: Union[str, int] = None, source: str = None): def remote_downloading(self, channel: MessageChannel, userid: Union[str, int] = None, source: Optional[str] = None):
""" """
查询正在下载的任务,并发送消息 查询正在下载的任务,并发送消息
""" """
@@ -964,7 +962,7 @@ class DownloadChain(ChainBase):
link=settings.MP_DOMAIN('#/downloading') link=settings.MP_DOMAIN('#/downloading')
)) ))
def downloading(self, name: str = None) -> List[DownloadingTorrent]: def downloading(self, name: Optional[str] = None) -> List[DownloadingTorrent]:
""" """
查询正在下载的任务 查询正在下载的任务
""" """

View File

@@ -32,7 +32,7 @@ class MediaChain(ChainBase, metaclass=Singleton):
self.storagechain = StorageChain() self.storagechain = StorageChain()
def metadata_nfo(self, meta: MetaBase, mediainfo: MediaInfo, def metadata_nfo(self, meta: MetaBase, mediainfo: MediaInfo,
season: int = None, episode: int = None) -> Optional[str]: season: Optional[int] = None, episode: Optional[int] = None) -> Optional[str]:
""" """
获取NFO文件内容文本 获取NFO文件内容文本
:param meta: 元数据 :param meta: 元数据
@@ -42,13 +42,13 @@ class MediaChain(ChainBase, metaclass=Singleton):
""" """
return self.run_module("metadata_nfo", meta=meta, mediainfo=mediainfo, season=season, episode=episode) return self.run_module("metadata_nfo", meta=meta, mediainfo=mediainfo, season=season, episode=episode)
def recognize_by_meta(self, metainfo: MetaBase) -> Optional[MediaInfo]: def recognize_by_meta(self, metainfo: MetaBase, episode_group: Optional[str] = None) -> Optional[MediaInfo]:
""" """
根据主副标题识别媒体信息 根据主副标题识别媒体信息
""" """
title = metainfo.title title = metainfo.title
# 识别媒体信息 # 识别媒体信息
mediainfo: MediaInfo = self.recognize_media(meta=metainfo) mediainfo: MediaInfo = self.recognize_media(meta=metainfo, episode_group=episode_group)
if not mediainfo: if not mediainfo:
# 尝试使用辅助识别,如果有注册响应事件的话 # 尝试使用辅助识别,如果有注册响应事件的话
if eventmanager.check(ChainEventType.NameRecognize): if eventmanager.check(ChainEventType.NameRecognize):
@@ -112,7 +112,7 @@ class MediaChain(ChainBase, metaclass=Singleton):
# 重新识别 # 重新识别
return self.recognize_media(meta=org_meta) return self.recognize_media(meta=org_meta)
def recognize_by_path(self, path: str) -> Optional[Context]: def recognize_by_path(self, path: str, episode_group: Optional[str] = None) -> Optional[Context]:
""" """
根据文件路径识别媒体信息 根据文件路径识别媒体信息
""" """
@@ -121,7 +121,7 @@ class MediaChain(ChainBase, metaclass=Singleton):
# 元数据 # 元数据
file_meta = MetaInfoPath(file_path) file_meta = MetaInfoPath(file_path)
# 识别媒体信息 # 识别媒体信息
mediainfo = self.recognize_media(meta=file_meta) mediainfo = self.recognize_media(meta=file_meta, episode_group=episode_group)
if not mediainfo: if not mediainfo:
# 尝试使用辅助识别,如果有注册响应事件的话 # 尝试使用辅助识别,如果有注册响应事件的话
if eventmanager.check(ChainEventType.NameRecognize): if eventmanager.check(ChainEventType.NameRecognize):
@@ -238,7 +238,7 @@ class MediaChain(ChainBase, metaclass=Singleton):
return None return None
def get_doubaninfo_by_tmdbid(self, tmdbid: int, def get_doubaninfo_by_tmdbid(self, tmdbid: int,
mtype: MediaType = None, season: int = None) -> Optional[dict]: mtype: MediaType = None, season: Optional[int] = None) -> Optional[dict]:
""" """
根据TMDBID获取豆瓣信息 根据TMDBID获取豆瓣信息
""" """
@@ -375,7 +375,7 @@ class MediaChain(ChainBase, metaclass=Singleton):
if item: if item:
logger.info(f"已保存文件:{item.path}") logger.info(f"已保存文件:{item.path}")
else: else:
logger.warn(f"文件保存失败:{item.path}") logger.warn(f"文件保存失败:{_path}")
finally: finally:
if tmp_file.exists(): if tmp_file.exists():
tmp_file.unlink() tmp_file.unlink()
@@ -474,7 +474,8 @@ class MediaChain(ChainBase, metaclass=Singleton):
if not file_meta.begin_episode: if not file_meta.begin_episode:
logger.warn(f"{filepath.name} 无法识别文件集数!") logger.warn(f"{filepath.name} 无法识别文件集数!")
return return
file_mediainfo = self.recognize_media(meta=file_meta, tmdbid=mediainfo.tmdb_id) file_mediainfo = self.recognize_media(meta=file_meta, tmdbid=mediainfo.tmdb_id,
episode_group=mediainfo.episode_group)
if not file_mediainfo: if not file_mediainfo:
logger.warn(f"{filepath.name} 无法识别文件媒体信息!") logger.warn(f"{filepath.name} 无法识别文件媒体信息!")
return return
@@ -483,7 +484,8 @@ class MediaChain(ChainBase, metaclass=Singleton):
if overwrite or not self.storagechain.get_file_item(storage=fileitem.storage, path=nfo_path): if overwrite or not self.storagechain.get_file_item(storage=fileitem.storage, path=nfo_path):
# 获取集的nfo文件 # 获取集的nfo文件
episode_nfo = self.metadata_nfo(meta=file_meta, mediainfo=file_mediainfo, episode_nfo = self.metadata_nfo(meta=file_meta, mediainfo=file_mediainfo,
season=file_meta.begin_season, episode=file_meta.begin_episode) season=file_meta.begin_season,
episode=file_meta.begin_episode)
if episode_nfo: if episode_nfo:
# 保存或上传nfo文件到上级目录 # 保存或上传nfo文件到上级目录
if not parent: if not parent:

View File

@@ -1,5 +1,5 @@
import threading import threading
from typing import List, Union, Optional, Generator from typing import List, Union, Optional, Generator, Any
from app.chain import ChainBase from app.chain import ChainBase
from app.core.cache import cached from app.core.cache import cached
@@ -21,14 +21,15 @@ class MediaServerChain(ChainBase):
super().__init__() super().__init__()
self.dboper = MediaServerOper() self.dboper = MediaServerOper()
def librarys(self, server: str, username: str = None, hidden: bool = False) -> List[MediaServerLibrary]: def librarys(self, server: str, username: Optional[str] = None,
hidden: bool = False) -> List[MediaServerLibrary]:
""" """
获取媒体服务器所有媒体库 获取媒体服务器所有媒体库
""" """
return self.run_module("mediaserver_librarys", server=server, username=username, hidden=hidden) return self.run_module("mediaserver_librarys", server=server, username=username, hidden=hidden)
def items(self, server: str, library_id: Union[str, int], start_index: int = 0, limit: Optional[int] = -1) \ def items(self, server: str, library_id: Union[str, int],
-> Optional[Generator]: start_index: Optional[int] = 0, limit: Optional[int] = -1) -> Generator[Any, None, None]:
""" """
获取媒体服务器项目列表,支持分页和不分页逻辑,默认不分页获取所有数据 获取媒体服务器项目列表,支持分页和不分页逻辑,默认不分页获取所有数据
@@ -81,28 +82,31 @@ class MediaServerChain(ChainBase):
""" """
return self.run_module("mediaserver_tv_episodes", server=server, item_id=item_id) return self.run_module("mediaserver_tv_episodes", server=server, item_id=item_id)
def playing(self, server: str, count: int = 20, username: str = None) -> List[MediaServerPlayItem]: def playing(self, server: str, count: Optional[int] = 20,
username: Optional[str] = None) -> List[MediaServerPlayItem]:
""" """
获取媒体服务器正在播放信息 获取媒体服务器正在播放信息
""" """
return self.run_module("mediaserver_playing", count=count, server=server, username=username) return self.run_module("mediaserver_playing", count=count, server=server, username=username)
def latest(self, server: str, count: int = 20, username: str = None) -> List[MediaServerPlayItem]: def latest(self, server: str, count: Optional[int] = 20,
username: Optional[str] = None) -> List[MediaServerPlayItem]:
""" """
获取媒体服务器最新入库条目 获取媒体服务器最新入库条目
""" """
return self.run_module("mediaserver_latest", count=count, server=server, username=username) return self.run_module("mediaserver_latest", count=count, server=server, username=username)
@cached(maxsize=1, ttl=3600) @cached(maxsize=1, ttl=3600)
def get_latest_wallpapers(self, server: str = None, count: int = 10, def get_latest_wallpapers(self, server: Optional[str] = None, count: Optional[int] = 10,
remote: bool = True, username: str = None) -> List[str]: remote: bool = True, username: Optional[str] = None) -> List[str]:
""" """
获取最新最新入库条目海报作为壁纸缓存1小时 获取最新最新入库条目海报作为壁纸缓存1小时
""" """
return self.run_module("mediaserver_latest_images", server=server, count=count, return self.run_module("mediaserver_latest_images", server=server, count=count,
remote=remote, username=username) remote=remote, username=username)
def get_latest_wallpaper(self, server: str = None, remote: bool = True, username: str = None) -> Optional[str]: def get_latest_wallpaper(self, server: Optional[str] = None,
remote: bool = True, username: Optional[str] = None) -> Optional[str]:
""" """
获取最新最新入库条目海报作为壁纸缓存1小时 获取最新最新入库条目海报作为壁纸缓存1小时
""" """

View File

@@ -1,8 +1,9 @@
import io import io
import tempfile import tempfile
from pathlib import Path from pathlib import Path
from typing import List from typing import List, Optional
import pillow_avif # noqa 用于自动注册AVIF支持
from PIL import Image from PIL import Image
from app.chain import ChainBase from app.chain import ChainBase
@@ -116,6 +117,10 @@ class RecommendChain(ChainBase, metaclass=Singleton):
sanitized_path = SecurityUtils.sanitize_url_path(url) sanitized_path = SecurityUtils.sanitize_url_path(url)
cache_path = settings.CACHE_PATH / "images" / sanitized_path cache_path = settings.CACHE_PATH / "images" / sanitized_path
# 没有文件类型,则添加后缀,在恶意文件类型和实际需求下的折衷选择
if not cache_path.suffix:
cache_path = cache_path.with_suffix(".jpg")
# 确保缓存路径和文件类型合法 # 确保缓存路径和文件类型合法
if not SecurityUtils.is_safe_path(settings.CACHE_PATH, cache_path, settings.SECURITY_IMAGE_SUFFIXES): if not SecurityUtils.is_safe_path(settings.CACHE_PATH, cache_path, settings.SECURITY_IMAGE_SUFFIXES):
logger.debug(f"Invalid cache path or file type for URL: {url}, sanitized path: {sanitized_path}") logger.debug(f"Invalid cache path or file type for URL: {url}, sanitized path: {sanitized_path}")
@@ -157,15 +162,15 @@ class RecommendChain(ChainBase, metaclass=Singleton):
@log_execution_time(logger=logger) @log_execution_time(logger=logger)
@cached(ttl=recommend_ttl, region=recommend_cache_region) @cached(ttl=recommend_ttl, region=recommend_cache_region)
def tmdb_movies(self, sort_by: str = "popularity.desc", def tmdb_movies(self, sort_by: Optional[str] = "popularity.desc",
with_genres: str = "", with_genres: Optional[str] = "",
with_original_language: str = "", with_original_language: Optional[str] = "",
with_keywords: str = "", with_keywords: Optional[str] = "",
with_watch_providers: str = "", with_watch_providers: Optional[str] = "",
vote_average: float = 0, vote_average: Optional[float] = 0.0,
vote_count: int = 0, vote_count: Optional[int] = 0,
release_date: str = "", release_date: Optional[str] = "",
page: int = 1) -> List[dict]: page: Optional[int] = 1) -> List[dict]:
""" """
TMDB热门电影 TMDB热门电影
""" """
@@ -183,15 +188,15 @@ class RecommendChain(ChainBase, metaclass=Singleton):
@log_execution_time(logger=logger) @log_execution_time(logger=logger)
@cached(ttl=recommend_ttl, region=recommend_cache_region) @cached(ttl=recommend_ttl, region=recommend_cache_region)
def tmdb_tvs(self, sort_by: str = "popularity.desc", def tmdb_tvs(self, sort_by: Optional[str] = "popularity.desc",
with_genres: str = "", with_genres: Optional[str] = "",
with_original_language: str = "zh|en|ja|ko", with_original_language: Optional[str] = "zh|en|ja|ko",
with_keywords: str = "", with_keywords: Optional[str] = "",
with_watch_providers: str = "", with_watch_providers: Optional[str] = "",
vote_average: float = 0, vote_average: Optional[float] = 0.0,
vote_count: int = 0, vote_count: Optional[int] = 0,
release_date: str = "", release_date: Optional[str] = "",
page: int = 1) -> List[dict]: page: Optional[int] = 1) -> List[dict]:
""" """
TMDB热门电视剧 TMDB热门电视剧
""" """
@@ -209,7 +214,7 @@ class RecommendChain(ChainBase, metaclass=Singleton):
@log_execution_time(logger=logger) @log_execution_time(logger=logger)
@cached(ttl=recommend_ttl, region=recommend_cache_region) @cached(ttl=recommend_ttl, region=recommend_cache_region)
def tmdb_trending(self, page: int = 1) -> List[dict]: def tmdb_trending(self, page: Optional[int] = 1) -> List[dict]:
""" """
TMDB流行趋势 TMDB流行趋势
""" """
@@ -218,7 +223,7 @@ class RecommendChain(ChainBase, metaclass=Singleton):
@log_execution_time(logger=logger) @log_execution_time(logger=logger)
@cached(ttl=recommend_ttl, region=recommend_cache_region) @cached(ttl=recommend_ttl, region=recommend_cache_region)
def bangumi_calendar(self, page: int = 1, count: int = 30) -> List[dict]: def bangumi_calendar(self, page: Optional[int] = 1, count: Optional[int] = 30) -> List[dict]:
""" """
Bangumi每日放送 Bangumi每日放送
""" """
@@ -227,7 +232,7 @@ class RecommendChain(ChainBase, metaclass=Singleton):
@log_execution_time(logger=logger) @log_execution_time(logger=logger)
@cached(ttl=recommend_ttl, region=recommend_cache_region) @cached(ttl=recommend_ttl, region=recommend_cache_region)
def douban_movie_showing(self, page: int = 1, count: int = 30) -> List[dict]: def douban_movie_showing(self, page: Optional[int] = 1, count: Optional[int] = 30) -> List[dict]:
""" """
豆瓣正在热映 豆瓣正在热映
""" """
@@ -236,7 +241,8 @@ class RecommendChain(ChainBase, metaclass=Singleton):
@log_execution_time(logger=logger) @log_execution_time(logger=logger)
@cached(ttl=recommend_ttl, region=recommend_cache_region) @cached(ttl=recommend_ttl, region=recommend_cache_region)
def douban_movies(self, sort: str = "R", tags: str = "", page: int = 1, count: int = 30) -> List[dict]: def douban_movies(self, sort: Optional[str] = "R", tags: Optional[str] = "",
page: Optional[int] = 1, count: Optional[int] = 30) -> List[dict]:
""" """
豆瓣最新电影 豆瓣最新电影
""" """
@@ -246,7 +252,8 @@ class RecommendChain(ChainBase, metaclass=Singleton):
@log_execution_time(logger=logger) @log_execution_time(logger=logger)
@cached(ttl=recommend_ttl, region=recommend_cache_region) @cached(ttl=recommend_ttl, region=recommend_cache_region)
def douban_tvs(self, sort: str = "R", tags: str = "", page: int = 1, count: int = 30) -> List[dict]: def douban_tvs(self, sort: Optional[str] = "R", tags: Optional[str] = "",
page: Optional[int] = 1, count: Optional[int] = 30) -> List[dict]:
""" """
豆瓣最新电视剧 豆瓣最新电视剧
""" """
@@ -256,7 +263,7 @@ class RecommendChain(ChainBase, metaclass=Singleton):
@log_execution_time(logger=logger) @log_execution_time(logger=logger)
@cached(ttl=recommend_ttl, region=recommend_cache_region) @cached(ttl=recommend_ttl, region=recommend_cache_region)
def douban_movie_top250(self, page: int = 1, count: int = 30) -> List[dict]: def douban_movie_top250(self, page: Optional[int] = 1, count: Optional[int] = 30) -> List[dict]:
""" """
豆瓣电影TOP250 豆瓣电影TOP250
""" """
@@ -265,7 +272,7 @@ class RecommendChain(ChainBase, metaclass=Singleton):
@log_execution_time(logger=logger) @log_execution_time(logger=logger)
@cached(ttl=recommend_ttl, region=recommend_cache_region) @cached(ttl=recommend_ttl, region=recommend_cache_region)
def douban_tv_weekly_chinese(self, page: int = 1, count: int = 30) -> List[dict]: def douban_tv_weekly_chinese(self, page: Optional[int] = 1, count: Optional[int] = 30) -> List[dict]:
""" """
豆瓣国产剧集榜 豆瓣国产剧集榜
""" """
@@ -274,7 +281,7 @@ class RecommendChain(ChainBase, metaclass=Singleton):
@log_execution_time(logger=logger) @log_execution_time(logger=logger)
@cached(ttl=recommend_ttl, region=recommend_cache_region) @cached(ttl=recommend_ttl, region=recommend_cache_region)
def douban_tv_weekly_global(self, page: int = 1, count: int = 30) -> List[dict]: def douban_tv_weekly_global(self, page: Optional[int] = 1, count: Optional[int] = 30) -> List[dict]:
""" """
豆瓣全球剧集榜 豆瓣全球剧集榜
""" """
@@ -283,7 +290,7 @@ class RecommendChain(ChainBase, metaclass=Singleton):
@log_execution_time(logger=logger) @log_execution_time(logger=logger)
@cached(ttl=recommend_ttl, region=recommend_cache_region) @cached(ttl=recommend_ttl, region=recommend_cache_region)
def douban_tv_animation(self, page: int = 1, count: int = 30) -> List[dict]: def douban_tv_animation(self, page: Optional[int] = 1, count: Optional[int] = 30) -> List[dict]:
""" """
豆瓣热门动漫 豆瓣热门动漫
""" """
@@ -292,7 +299,7 @@ class RecommendChain(ChainBase, metaclass=Singleton):
@log_execution_time(logger=logger) @log_execution_time(logger=logger)
@cached(ttl=recommend_ttl, region=recommend_cache_region) @cached(ttl=recommend_ttl, region=recommend_cache_region)
def douban_movie_hot(self, page: int = 1, count: int = 30) -> List[dict]: def douban_movie_hot(self, page: Optional[int] = 1, count: Optional[int] = 30) -> List[dict]:
""" """
豆瓣热门电影 豆瓣热门电影
""" """
@@ -301,7 +308,7 @@ class RecommendChain(ChainBase, metaclass=Singleton):
@log_execution_time(logger=logger) @log_execution_time(logger=logger)
@cached(ttl=recommend_ttl, region=recommend_cache_region) @cached(ttl=recommend_ttl, region=recommend_cache_region)
def douban_tv_hot(self, page: int = 1, count: int = 30) -> List[dict]: def douban_tv_hot(self, page: Optional[int] = 1, count: Optional[int] = 30) -> List[dict]:
""" """
豆瓣热门电视剧 豆瓣热门电视剧
""" """

View File

@@ -34,8 +34,9 @@ class SearchChain(ChainBase):
self.systemconfig = SystemConfigOper() self.systemconfig = SystemConfigOper()
self.torrenthelper = TorrentHelper() self.torrenthelper = TorrentHelper()
def search_by_id(self, tmdbid: int = None, doubanid: str = None, def search_by_id(self, tmdbid: Optional[int] = None, doubanid: Optional[str] = None,
mtype: MediaType = None, area: str = "title", season: int = None) -> List[Context]: mtype: MediaType = None, area: Optional[str] = "title", season: Optional[int] = None,
sites: List[int] = None, cache_local: bool = False) -> List[Context]:
""" """
根据TMDBID/豆瓣ID搜索资源精确匹配不过滤本地存在的资源 根据TMDBID/豆瓣ID搜索资源精确匹配不过滤本地存在的资源
:param tmdbid: TMDB ID :param tmdbid: TMDB ID
@@ -43,6 +44,8 @@ class SearchChain(ChainBase):
:param mtype: 媒体,电影 or 电视剧 :param mtype: 媒体,电影 or 电视剧
:param area: 搜索范围title or imdbid :param area: 搜索范围title or imdbid
:param season: 季数 :param season: 季数
:param sites: 站点ID列表
:param cache_local: 是否缓存到本地
""" """
mediainfo = self.recognize_media(tmdbid=tmdbid, doubanid=doubanid, mtype=mtype) mediainfo = self.recognize_media(tmdbid=tmdbid, doubanid=doubanid, mtype=mtype)
if not mediainfo: if not mediainfo:
@@ -55,25 +58,27 @@ class SearchChain(ChainBase):
season: NotExistMediaInfo(episodes=[]) season: NotExistMediaInfo(episodes=[])
} }
} }
results = self.process(mediainfo=mediainfo, area=area, no_exists=no_exists) results = self.process(mediainfo=mediainfo, sites=sites, area=area, no_exists=no_exists)
# 保存到本地文件 # 保存到本地文件
bytes_results = pickle.dumps(results) if cache_local:
self.save_cache(bytes_results, self.__result_temp_file) self.save_cache(pickle.dumps(results), self.__result_temp_file)
return results return results
def search_by_title(self, title: str, page: int = 0, site: int = None) -> List[Context]: def search_by_title(self, title: str, page: Optional[int] = 0,
sites: List[int] = None, cache_local: Optional[bool] = False) -> List[Context]:
""" """
根据标题搜索资源,不识别不过滤,直接返回站点内容 根据标题搜索资源,不识别不过滤,直接返回站点内容
:param title: 标题,为空时返回所有站点首页内容 :param title: 标题,为空时返回所有站点首页内容
:param page: 页码 :param page: 页码
:param site: 站点ID :param sites: 站点ID列表
:param cache_local: 是否缓存到本地
""" """
if title: if title:
logger.info(f'开始搜索资源,关键词:{title} ...') logger.info(f'开始搜索资源,关键词:{title} ...')
else: else:
logger.info(f'开始浏览资源,站点:{site} ...') logger.info(f'开始浏览资源,站点:{sites} ...')
# 搜索 # 搜索
torrents = self.__search_all_sites(keywords=[title], sites=[site] if site else None, page=page) or [] torrents = self.__search_all_sites(keywords=[title], sites=sites, page=page) or []
if not torrents: if not torrents:
logger.warn(f'{title} 未搜索到资源') logger.warn(f'{title} 未搜索到资源')
return [] return []
@@ -81,8 +86,8 @@ class SearchChain(ChainBase):
contexts = [Context(meta_info=MetaInfo(title=torrent.title, subtitle=torrent.description), contexts = [Context(meta_info=MetaInfo(title=torrent.title, subtitle=torrent.description),
torrent_info=torrent) for torrent in torrents] torrent_info=torrent) for torrent in torrents]
# 保存到本地文件 # 保存到本地文件
bytes_results = pickle.dumps(contexts) if cache_local:
self.save_cache(bytes_results, self.__result_temp_file) self.save_cache(pickle.dumps(contexts), self.__result_temp_file)
return contexts return contexts
def last_search_results(self) -> List[Context]: def last_search_results(self) -> List[Context]:
@@ -100,11 +105,11 @@ class SearchChain(ChainBase):
return [] return []
def process(self, mediainfo: MediaInfo, def process(self, mediainfo: MediaInfo,
keyword: str = None, keyword: Optional[str] = None,
no_exists: Dict[int, Dict[int, NotExistMediaInfo]] = None, no_exists: Dict[int, Dict[int, NotExistMediaInfo]] = None,
sites: List[int] = None, sites: List[int] = None,
rule_groups: List[str] = None, rule_groups: List[str] = None,
area: str = "title", area: Optional[str] = "title",
custom_words: List[str] = None, custom_words: List[str] = None,
filter_params: Dict[str, str] = None) -> List[Context]: filter_params: Dict[str, str] = None) -> List[Context]:
""" """
@@ -286,8 +291,8 @@ class SearchChain(ChainBase):
def __search_all_sites(self, keywords: List[str], def __search_all_sites(self, keywords: List[str],
mediainfo: Optional[MediaInfo] = None, mediainfo: Optional[MediaInfo] = None,
sites: List[int] = None, sites: List[int] = None,
page: int = 0, page: Optional[int] = 0,
area: str = "title") -> Optional[List[TorrentInfo]]: area: Optional[str] = "title") -> Optional[List[TorrentInfo]]:
""" """
多线程搜索多个站点 多线程搜索多个站点
:param mediainfo: 识别的媒体信息 :param mediainfo: 识别的媒体信息
@@ -307,11 +312,6 @@ class SearchChain(ChainBase):
for indexer in self.siteshelper.get_indexers(): for indexer in self.siteshelper.get_indexers():
# 检查站点索引开关 # 检查站点索引开关
if not sites or indexer.get("id") in sites: if not sites or indexer.get("id") in sites:
# 站点流控
state, msg = self.siteshelper.check(indexer.get("domain"))
if state:
logger.warn(msg)
continue
indexer_sites.append(indexer) indexer_sites.append(indexer)
if not indexer_sites: if not indexer_sites:
logger.warn('未开启任何有效站点,无法搜索资源') logger.warn('未开启任何有效站点,无法搜索资源')

View File

@@ -1,12 +1,10 @@
import base64 import base64
import re import re
from datetime import datetime from datetime import datetime
from time import time
from typing import Optional, Tuple, Union, Dict from typing import Optional, Tuple, Union, Dict
from urllib.parse import urljoin from urllib.parse import urljoin
from lxml import etree from lxml import etree
from ruamel.yaml import CommentedMap
from app.chain import ChainBase from app.chain import ChainBase
from app.core.config import global_vars, settings from app.core.config import global_vars, settings
@@ -53,9 +51,10 @@ class SiteChain(ChainBase):
"1ptba.com": self.__indexphp_test, "1ptba.com": self.__indexphp_test,
"star-space.net": self.__indexphp_test, "star-space.net": self.__indexphp_test,
"yemapt.org": self.__yema_test, "yemapt.org": self.__yema_test,
"hddolby.com": self.__hddolby_test,
} }
def refresh_userdata(self, site: CommentedMap = None) -> Optional[SiteUserData]: def refresh_userdata(self, site: dict = None) -> Optional[SiteUserData]:
""" """
刷新站点的用户数据 刷新站点的用户数据
:param site: 站点 :param site: 站点
@@ -178,12 +177,9 @@ class SiteChain(ChainBase):
domain = StringUtils.get_url_domain(site.url) domain = StringUtils.get_url_domain(site.url)
url = f"https://api.{domain}/api/member/profile" url = f"https://api.{domain}/api/member/profile"
headers = { headers = {
"Content-Type": "application/json",
"User-Agent": user_agent, "User-Agent": user_agent,
"Accept": "application/json, text/plain, */*", "Accept": "application/json, text/plain, */*",
"Authorization": site.token,
"x-api-key": site.apikey, "x-api-key": site.apikey,
"ts": str(int(time()))
} }
res = RequestUtils( res = RequestUtils(
headers=headers, headers=headers,
@@ -193,27 +189,10 @@ class SiteChain(ChainBase):
if res is None: if res is None:
return False, "无法打开网站!" return False, "无法打开网站!"
if res.status_code == 200: if res.status_code == 200:
state = False
message = "鉴权已过期或无效"
user_info = res.json() or {} user_info = res.json() or {}
if user_info.get("data"): if user_info.get("data"):
# 更新最后访问时间 return True, "连接成功"
del headers["x-api-key"] return False, user_info.get("message", "鉴权已过期或无效")
res = RequestUtils(headers=headers,
timeout=site.timeout or 15,
proxies=settings.PROXY if site.proxy else None,
referer=f"{site.url}index"
).post_res(url=f"https://api.{domain}/api/member/updateLastBrowse")
state = True
message = "连接成功,但更新状态失败"
if res and res.status_code == 200:
update_info = res.json() or {}
if "code" in update_info and int(update_info["code"]) == 0:
message = "连接成功"
elif user_info.get("message"):
# 使用馒头的错误提示
message = user_info.get("message")
return state, message
else: else:
return False, f"错误:{res.status_code} {res.reason}" return False, f"错误:{res.status_code} {res.reason}"
@@ -252,6 +231,32 @@ class SiteChain(ChainBase):
site.url = f"{site.url}index.php" site.url = f"{site.url}index.php"
return self.__test(site) return self.__test(site)
@staticmethod
def __hddolby_test(site: Site) -> Tuple[bool, str]:
"""
判断站点是否已经登陆hddolby
"""
url = f"{site.url}api/v1/user/data"
headers = {
"Content-Type": "application/json",
"Accept": "application/json, text/plain, */*",
"x-api-key": site.apikey,
}
res = RequestUtils(
headers=headers,
proxies=settings.PROXY if site.proxy else None,
timeout=site.timeout or 15
).get_res(url=url)
if res is None:
return False, "无法打开网站!"
if res.status_code == 200:
user_info = res.json()
if user_info and user_info.get("status") == 0:
return True, "连接成功"
return False, "APIKEY已过期"
else:
return False, f"错误:{res.status_code} {res.reason}"
@staticmethod @staticmethod
def __parse_favicon(url: str, cookie: str, ua: str) -> Tuple[str, Optional[str]]: def __parse_favicon(url: str, cookie: str, ua: str) -> Tuple[str, Optional[str]]:
""" """
@@ -584,7 +589,7 @@ class SiteChain(ChainBase):
return True, "连接成功" return True, "连接成功"
def remote_list(self, channel: MessageChannel, def remote_list(self, channel: MessageChannel,
userid: Union[str, int] = None, source: str = None): userid: Union[str, int] = None, source: Optional[str] = None):
""" """
查询所有站点,发送消息 查询所有站点,发送消息
""" """
@@ -618,7 +623,7 @@ class SiteChain(ChainBase):
) )
def remote_disable(self, arg_str: str, channel: MessageChannel, def remote_disable(self, arg_str: str, channel: MessageChannel,
userid: Union[str, int] = None, source: str = None): userid: Union[str, int] = None, source: Optional[str] = None):
""" """
禁用站点 禁用站点
""" """
@@ -643,7 +648,7 @@ class SiteChain(ChainBase):
self.remote_list(channel=channel, userid=userid, source=source) self.remote_list(channel=channel, userid=userid, source=source)
def remote_enable(self, arg_str: str, channel: MessageChannel, def remote_enable(self, arg_str: str, channel: MessageChannel,
userid: Union[str, int] = None, source: str = None): userid: Union[str, int] = None, source: Optional[str] = None):
""" """
启用站点 启用站点
""" """
@@ -669,7 +674,7 @@ class SiteChain(ChainBase):
self.remote_list(channel=channel, userid=userid, source=source) self.remote_list(channel=channel, userid=userid, source=source)
def update_cookie(self, site_info: Site, def update_cookie(self, site_info: Site,
username: str, password: str, two_step_code: str = None) -> Tuple[bool, str]: username: str, password: str, two_step_code: Optional[str] = None) -> Tuple[bool, str]:
""" """
根据用户名密码更新站点Cookie 根据用户名密码更新站点Cookie
:param site_info: 站点信息 :param site_info: 站点信息
@@ -698,7 +703,7 @@ class SiteChain(ChainBase):
return False, "未知错误" return False, "未知错误"
def remote_cookie(self, arg_str: str, channel: MessageChannel, def remote_cookie(self, arg_str: str, channel: MessageChannel,
userid: Union[str, int] = None, source: str = None): userid: Union[str, int] = None, source: Optional[str] = None):
""" """
使用用户名密码更新站点Cookie 使用用户名密码更新站点Cookie
""" """
@@ -768,7 +773,7 @@ class SiteChain(ChainBase):
userid=userid)) userid=userid))
def remote_refresh_userdatas(self, channel: MessageChannel, def remote_refresh_userdatas(self, channel: MessageChannel,
userid: Union[str, int] = None, source: str = None): userid: Union[str, int] = None, source: Optional[str] = None):
""" """
刷新所有站点用户数据 刷新所有站点用户数据
""" """

View File

@@ -63,7 +63,7 @@ class StorageChain(ChainBase):
return self.run_module("download_file", fileitem=fileitem, path=path) return self.run_module("download_file", fileitem=fileitem, path=path)
def upload_file(self, fileitem: schemas.FileItem, path: Path, def upload_file(self, fileitem: schemas.FileItem, path: Path,
new_name: str = None) -> Optional[schemas.FileItem]: new_name: Optional[str] = None) -> Optional[schemas.FileItem]:
""" """
上传文件 上传文件
:param fileitem: 保存目录项 :param fileitem: 保存目录项
@@ -84,6 +84,12 @@ class StorageChain(ChainBase):
""" """
return self.run_module("rename_file", fileitem=fileitem, name=name) return self.run_module("rename_file", fileitem=fileitem, name=name)
def exists(self, fileitem: schemas.FileItem) -> Optional[bool]:
"""
判断文件或目录是否存在
"""
return True if self.get_item(fileitem) else False
def get_item(self, fileitem: schemas.FileItem) -> Optional[schemas.FileItem]: def get_item(self, fileitem: schemas.FileItem) -> Optional[schemas.FileItem]:
""" """
查询目录或文件 查询目录或文件

View File

@@ -56,17 +56,18 @@ class SubscribeChain(ChainBase, metaclass=Singleton):
def add(self, title: str, year: str, def add(self, title: str, year: str,
mtype: MediaType = None, mtype: MediaType = None,
tmdbid: int = None, tmdbid: Optional[int] = None,
doubanid: str = None, doubanid: Optional[str] = None,
bangumiid: int = None, bangumiid: Optional[int] = None,
mediaid: str = None, mediaid: Optional[str] = None,
season: int = None, episode_group: Optional[str] = None,
season: Optional[int] = None,
channel: MessageChannel = None, channel: MessageChannel = None,
source: str = None, source: Optional[str] = None,
userid: str = None, userid: Optional[str] = None,
username: str = None, username: Optional[str] = None,
message: bool = True, message: Optional[bool] = True,
exist_ok: bool = False, exist_ok: Optional[bool] = False,
**kwargs) -> Tuple[Optional[int], str]: **kwargs) -> Tuple[Optional[int], str]:
""" """
识别媒体信息并添加订阅 识别媒体信息并添加订阅
@@ -117,7 +118,8 @@ class SubscribeChain(ChainBase, metaclass=Singleton):
mediainfo = __get_event_meida(mediaid, metainfo) mediainfo = __get_event_meida(mediaid, metainfo)
else: else:
# 使用TMDBID识别 # 使用TMDBID识别
mediainfo = self.recognize_media(meta=metainfo, mtype=mtype, tmdbid=tmdbid, cache=False) mediainfo = self.recognize_media(meta=metainfo, mtype=mtype, tmdbid=tmdbid,
episode_group=episode_group, cache=False)
else: else:
if doubanid: if doubanid:
# 豆瓣识别模式,不使用缓存 # 豆瓣识别模式,不使用缓存
@@ -134,7 +136,7 @@ class SubscribeChain(ChainBase, metaclass=Singleton):
# 使用名称识别兜底 # 使用名称识别兜底
if not mediainfo: if not mediainfo:
mediainfo = self.recognize_media(meta=metainfo) mediainfo = self.recognize_media(meta=metainfo, episode_group=episode_group)
# 识别失败 # 识别失败
if not mediainfo: if not mediainfo:
@@ -147,12 +149,13 @@ class SubscribeChain(ChainBase, metaclass=Singleton):
season = 1 season = 1
# 总集数 # 总集数
if not kwargs.get('total_episode'): if not kwargs.get('total_episode'):
if not mediainfo.seasons: if not mediainfo.seasons or episode_group:
# 补充媒体信息 # 补充媒体信息
mediainfo = self.recognize_media(mtype=mediainfo.type, mediainfo = self.recognize_media(mtype=mediainfo.type,
tmdbid=mediainfo.tmdb_id, tmdbid=mediainfo.tmdb_id,
doubanid=mediainfo.douban_id, doubanid=mediainfo.douban_id,
bangumiid=mediainfo.bangumi_id, bangumiid=mediainfo.bangumi_id,
episode_group=episode_group,
cache=False) cache=False)
if not mediainfo: if not mediainfo:
logger.error(f"媒体信息识别失败!") logger.error(f"媒体信息识别失败!")
@@ -207,8 +210,9 @@ class SubscribeChain(ChainBase, metaclass=Singleton):
'save_path': self.__get_default_subscribe_config(mediainfo.type, "save_path") if not kwargs.get( 'save_path': self.__get_default_subscribe_config(mediainfo.type, "save_path") if not kwargs.get(
"save_path") else kwargs.get("save_path"), "save_path") else kwargs.get("save_path"),
'filter_groups': self.__get_default_subscribe_config(mediainfo.type, "filter_groups") if not kwargs.get( 'filter_groups': self.__get_default_subscribe_config(mediainfo.type, "filter_groups") if not kwargs.get(
"filter_groups") else kwargs.get("filter_groups"), "filter_groups") else kwargs.get("filter_groups")
}) })
# 操作数据库
sid, err_msg = self.subscribeoper.add(mediainfo=mediainfo, season=season, username=username, **kwargs) sid, err_msg = self.subscribeoper.add(mediainfo=mediainfo, season=season, username=username, **kwargs)
if not sid: if not sid:
logger.error(f'{mediainfo.title_year} {err_msg}') logger.error(f'{mediainfo.title_year} {err_msg}')
@@ -275,7 +279,7 @@ class SubscribeChain(ChainBase, metaclass=Singleton):
return True return True
return False return False
def search(self, sid: int = None, state: str = 'N', manual: bool = False): def search(self, sid: Optional[int] = None, state: Optional[str] = 'N', manual: Optional[bool] = False):
""" """
订阅搜索 订阅搜索
:param sid: 订阅ID有值时只处理该订阅 :param sid: 订阅ID有值时只处理该订阅
@@ -323,6 +327,7 @@ class SubscribeChain(ChainBase, metaclass=Singleton):
mediainfo: MediaInfo = self.recognize_media(meta=meta, mtype=meta.type, mediainfo: MediaInfo = self.recognize_media(meta=meta, mtype=meta.type,
tmdbid=subscribe.tmdbid, tmdbid=subscribe.tmdbid,
doubanid=subscribe.doubanid, doubanid=subscribe.doubanid,
episode_group=subscribe.episode_group,
cache=False) cache=False)
if not mediainfo: if not mediainfo:
logger.warn( logger.warn(
@@ -330,7 +335,8 @@ class SubscribeChain(ChainBase, metaclass=Singleton):
continue continue
# 如果媒体已存在或已下载完毕,跳过当前订阅处理 # 如果媒体已存在或已下载完毕,跳过当前订阅处理
exist_flag, no_exists = self.check_and_handle_existing_media(subscribe=subscribe, meta=meta, exist_flag, no_exists = self.check_and_handle_existing_media(subscribe=subscribe,
meta=meta,
mediainfo=mediainfo, mediainfo=mediainfo,
mediakey=mediakey) mediakey=mediakey)
if exist_flag: if exist_flag:
@@ -382,6 +388,11 @@ class SubscribeChain(ChainBase, metaclass=Singleton):
logger.info( logger.info(
f'{subscribe.name} 正在洗版,{torrent_info.title} 优先级低于或等于已下载优先级') f'{subscribe.name} 正在洗版,{torrent_info.title} 优先级低于或等于已下载优先级')
continue continue
# 更新订阅自定义属性
if subscribe.media_category:
torrent_mediainfo.category = subscribe.media_category
if subscribe.episode_group:
torrent_mediainfo.episode_group = subscribe.episode_group
matched_contexts.append(context) matched_contexts.append(context)
if not matched_contexts: if not matched_contexts:
@@ -397,7 +408,6 @@ class SubscribeChain(ChainBase, metaclass=Singleton):
userid=subscribe.username, userid=subscribe.username,
username=subscribe.username, username=subscribe.username,
save_path=subscribe.save_path, save_path=subscribe.save_path,
media_category=subscribe.media_category,
downloader=subscribe.downloader, downloader=subscribe.downloader,
source=self.get_subscribe_source_keyword(subscribe) source=self.get_subscribe_source_keyword(subscribe)
) )
@@ -426,7 +436,7 @@ class SubscribeChain(ChainBase, metaclass=Singleton):
logger.debug(f"search Lock released at {datetime.now()}") logger.debug(f"search Lock released at {datetime.now()}")
def update_subscribe_priority(self, subscribe: Subscribe, meta: MetaBase, def update_subscribe_priority(self, subscribe: Subscribe, meta: MetaBase,
mediainfo: MediaInfo, downloads: List[Context]): mediainfo: MediaInfo, downloads: Optional[List[Context]]):
""" """
更新订阅已下载资源的优先级 更新订阅已下载资源的优先级
""" """
@@ -451,7 +461,7 @@ class SubscribeChain(ChainBase, metaclass=Singleton):
def finish_subscribe_or_not(self, subscribe: Subscribe, meta: MetaBase, mediainfo: MediaInfo, def finish_subscribe_or_not(self, subscribe: Subscribe, meta: MetaBase, mediainfo: MediaInfo,
downloads: List[Context] = None, downloads: List[Context] = None,
lefts: Dict[Union[int | str], Dict[int, schemas.NotExistMediaInfo]] = None, lefts: Dict[Union[int | str], Dict[int, schemas.NotExistMediaInfo]] = None,
force: bool = False): force: Optional[bool] = False):
""" """
判断是否应完成订阅 判断是否应完成订阅
""" """
@@ -573,6 +583,7 @@ class SubscribeChain(ChainBase, metaclass=Singleton):
mediainfo: MediaInfo = self.recognize_media(meta=meta, mtype=meta.type, mediainfo: MediaInfo = self.recognize_media(meta=meta, mtype=meta.type,
tmdbid=subscribe.tmdbid, tmdbid=subscribe.tmdbid,
doubanid=subscribe.doubanid, doubanid=subscribe.doubanid,
episode_group=subscribe.episode_group,
cache=False) cache=False)
if not mediainfo: if not mediainfo:
logger.warn( logger.warn(
@@ -602,9 +613,10 @@ class SubscribeChain(ChainBase, metaclass=Singleton):
logger.debug(f'开始匹配站点:{domain},共缓存了 {len(contexts)} 个种子...') logger.debug(f'开始匹配站点:{domain},共缓存了 {len(contexts)} 个种子...')
for context in contexts: for context in contexts:
# 提取信息 # 提取信息
torrent_meta = copy.deepcopy(context.meta_info) _context = copy.deepcopy(context)
torrent_mediainfo = copy.deepcopy(context.media_info) torrent_meta = _context.meta_info
torrent_info = context.torrent_info torrent_mediainfo = _context.media_info
torrent_info = _context.torrent_info
# 不在订阅站点范围的不处理 # 不在订阅站点范围的不处理
sub_sites = self.get_sub_sites(subscribe) sub_sites = self.get_sub_sites(subscribe)
@@ -632,7 +644,8 @@ class SubscribeChain(ChainBase, metaclass=Singleton):
if not torrent_mediainfo \ if not torrent_mediainfo \
or (not torrent_mediainfo.tmdb_id and not torrent_mediainfo.douban_id): or (not torrent_mediainfo.tmdb_id and not torrent_mediainfo.douban_id):
# 重新识别媒体信息 # 重新识别媒体信息
torrent_mediainfo = self.recognize_media(meta=torrent_meta) torrent_mediainfo = self.recognize_media(meta=torrent_meta,
episode_group=subscribe.episode_group)
if torrent_mediainfo: if torrent_mediainfo:
# 更新种子缓存 # 更新种子缓存
context.media_info = torrent_mediainfo context.media_info = torrent_mediainfo
@@ -735,7 +748,12 @@ class SubscribeChain(ChainBase, metaclass=Singleton):
# 匹配成功 # 匹配成功
logger.info(f'{mediainfo.title_year} 匹配成功:{torrent_info.title}') logger.info(f'{mediainfo.title_year} 匹配成功:{torrent_info.title}')
_match_context.append(context) # 自定义属性
if subscribe.media_category:
torrent_mediainfo.category = subscribe.media_category
if subscribe.episode_group:
torrent_mediainfo.episode_group = subscribe.episode_group
_match_context.append(_context)
if not _match_context: if not _match_context:
# 未匹配到资源 # 未匹配到资源
@@ -751,7 +769,6 @@ class SubscribeChain(ChainBase, metaclass=Singleton):
userid=subscribe.username, userid=subscribe.username,
username=subscribe.username, username=subscribe.username,
save_path=subscribe.save_path, save_path=subscribe.save_path,
media_category=subscribe.media_category,
downloader=subscribe.downloader, downloader=subscribe.downloader,
source=self.get_subscribe_source_keyword(subscribe) source=self.get_subscribe_source_keyword(subscribe)
) )
@@ -792,6 +809,7 @@ class SubscribeChain(ChainBase, metaclass=Singleton):
mediainfo: MediaInfo = self.recognize_media(meta=meta, mtype=meta.type, mediainfo: MediaInfo = self.recognize_media(meta=meta, mtype=meta.type,
tmdbid=subscribe.tmdbid, tmdbid=subscribe.tmdbid,
doubanid=subscribe.doubanid, doubanid=subscribe.doubanid,
episode_group=subscribe.episode_group,
cache=False) cache=False)
if not mediainfo: if not mediainfo:
logger.warn( logger.warn(
@@ -884,7 +902,7 @@ class SubscribeChain(ChainBase, metaclass=Singleton):
logger.error(f'follow用户分享订阅 {title} 添加失败:{message}') logger.error(f'follow用户分享订阅 {title} 添加失败:{message}')
logger.info(f'follow用户分享订阅刷新完成共添加 {success_count} 个订阅') logger.info(f'follow用户分享订阅刷新完成共添加 {success_count} 个订阅')
def __update_subscribe_note(self, subscribe: Subscribe, downloads: List[Context]): def __update_subscribe_note(self, subscribe: Subscribe, downloads: Optional[List[Context]]):
""" """
更新已下载信息到note字段 更新已下载信息到note字段
""" """
@@ -943,7 +961,7 @@ class SubscribeChain(ChainBase, metaclass=Singleton):
def __update_lack_episodes(self, lefts: Dict[Union[int, str], Dict[int, schemas.NotExistMediaInfo]], def __update_lack_episodes(self, lefts: Dict[Union[int, str], Dict[int, schemas.NotExistMediaInfo]],
subscribe: Subscribe, subscribe: Subscribe,
mediainfo: MediaInfo, mediainfo: MediaInfo,
update_date: bool = False): update_date: Optional[bool] = False):
""" """
更新订阅剩余集数及时间 更新订阅剩余集数及时间
""" """
@@ -1013,7 +1031,7 @@ class SubscribeChain(ChainBase, metaclass=Singleton):
}) })
def remote_list(self, channel: MessageChannel, def remote_list(self, channel: MessageChannel,
userid: Union[str, int] = None, source: str = None): userid: Union[str, int] = None, source: Optional[str] = None):
""" """
查询订阅并发送消息 查询订阅并发送消息
""" """
@@ -1041,7 +1059,7 @@ class SubscribeChain(ChainBase, metaclass=Singleton):
title=title, text='\n'.join(messages), userid=userid)) title=title, text='\n'.join(messages), userid=userid))
def remote_delete(self, arg_str: str, channel: MessageChannel, def remote_delete(self, arg_str: str, channel: MessageChannel,
userid: Union[str, int] = None, source: str = None): userid: Union[str, int] = None, source: Optional[str] = None):
""" """
删除订阅 删除订阅
""" """
@@ -1076,8 +1094,8 @@ class SubscribeChain(ChainBase, metaclass=Singleton):
no_exists: Dict[Union[int, str], Dict[int, schemas.NotExistMediaInfo]], no_exists: Dict[Union[int, str], Dict[int, schemas.NotExistMediaInfo]],
mediakey: Union[str, int], mediakey: Union[str, int],
begin_season: int, begin_season: int,
total_episode: int, total_episode: Optional[int],
start_episode: int, start_episode: Optional[int],
downloaded_episodes: List[int] = None downloaded_episodes: List[int] = None
) -> Tuple[bool, Dict[Union[int, str], Dict[int, schemas.NotExistMediaInfo]]]: ) -> Tuple[bool, Dict[Union[int, str], Dict[int, schemas.NotExistMediaInfo]]]:
""" """
@@ -1262,7 +1280,7 @@ class SubscribeChain(ChainBase, metaclass=Singleton):
订阅相关的下载和文件信息 订阅相关的下载和文件信息
""" """
if not subscribe: if not subscribe:
return return None
# 返回订阅数据 # 返回订阅数据
subscribe_info = schemas.SubscrbieInfo() subscribe_info = schemas.SubscrbieInfo()
@@ -1273,7 +1291,8 @@ class SubscribeChain(ChainBase, metaclass=Singleton):
# 查询TMDB中的集信息 # 查询TMDB中的集信息
tmdb_episodes = self.tmdbchain.tmdb_episodes( tmdb_episodes = self.tmdbchain.tmdb_episodes(
tmdbid=subscribe.tmdbid, tmdbid=subscribe.tmdbid,
season=subscribe.season season=subscribe.season,
episode_group=subscribe.episode_group
) )
if tmdb_episodes: if tmdb_episodes:
for episode in tmdb_episodes: for episode in tmdb_episodes:
@@ -1335,6 +1354,7 @@ class SubscribeChain(ChainBase, metaclass=Singleton):
mediainfo: MediaInfo = self.recognize_media(meta=meta, mtype=meta.type, mediainfo: MediaInfo = self.recognize_media(meta=meta, mtype=meta.type,
tmdbid=subscribe.tmdbid, tmdbid=subscribe.tmdbid,
doubanid=subscribe.doubanid, doubanid=subscribe.doubanid,
episode_group=subscribe.episode_group,
cache=False) cache=False)
if not mediainfo: if not mediainfo:
logger.warn( logger.warn(
@@ -1368,7 +1388,7 @@ class SubscribeChain(ChainBase, metaclass=Singleton):
return subscribe_info return subscribe_info
def check_and_handle_existing_media(self, subscribe: Subscribe, meta: MetaBase, def check_and_handle_existing_media(self, subscribe: Subscribe, meta: MetaBase,
mediainfo: MediaInfo, mediakey: str): mediainfo: MediaInfo, mediakey: Union[str, int]):
""" """
检查媒体是否已经存在,并根据情况执行相应的操作 检查媒体是否已经存在,并根据情况执行相应的操作
1. 查询缺失的媒体信息 1. 查询缺失的媒体信息

View File

@@ -1,7 +1,7 @@
import json import json
import re import re
from pathlib import Path from pathlib import Path
from typing import Union from typing import Union, Optional
from app.chain import ChainBase from app.chain import ChainBase
from app.core.config import settings from app.core.config import settings
@@ -25,7 +25,7 @@ class SystemChain(ChainBase, metaclass=Singleton):
# 重启完成检测 # 重启完成检测
self.restart_finish() self.restart_finish()
def remote_clear_cache(self, channel: MessageChannel, userid: Union[int, str], source: str = None): def remote_clear_cache(self, channel: MessageChannel, userid: Union[int, str], source: Optional[str] = None):
""" """
清理系统缓存 清理系统缓存
""" """
@@ -33,7 +33,7 @@ class SystemChain(ChainBase, metaclass=Singleton):
self.post_message(Notification(channel=channel, source=source, self.post_message(Notification(channel=channel, source=source,
title=f"缓存清理完成!", userid=userid)) title=f"缓存清理完成!", userid=userid))
def restart(self, channel: MessageChannel, userid: Union[int, str], source: str = None): def restart(self, channel: MessageChannel, userid: Union[int, str], source: Optional[str] = None):
""" """
重启系统 重启系统
""" """
@@ -65,7 +65,7 @@ class SystemChain(ChainBase, metaclass=Singleton):
title += f"当前前端版本:{front_local_version},远程版本:{front_release_version}" title += f"当前前端版本:{front_local_version},远程版本:{front_release_version}"
return title return title
def version(self, channel: MessageChannel, userid: Union[int, str], source: str = None): def version(self, channel: MessageChannel, userid: Union[int, str], source: Optional[str] = None):
""" """
查看当前版本、远程版本 查看当前版本、远程版本
""" """

View File

@@ -23,7 +23,7 @@ class TmdbChain(ChainBase, metaclass=Singleton):
vote_average: float, vote_average: float,
vote_count: int, vote_count: int,
release_date: str, release_date: str,
page: int = 1) -> Optional[List[MediaInfo]]: page: Optional[int] = 1) -> Optional[List[MediaInfo]]:
""" """
:param mtype: 媒体类型 :param mtype: 媒体类型
:param sort_by: 排序方式 :param sort_by: 排序方式
@@ -48,7 +48,7 @@ class TmdbChain(ChainBase, metaclass=Singleton):
release_date=release_date, release_date=release_date,
page=page) page=page)
def tmdb_trending(self, page: int = 1) -> Optional[List[MediaInfo]]: def tmdb_trending(self, page: Optional[int] = 1) -> Optional[List[MediaInfo]]:
""" """
TMDB流行趋势 TMDB流行趋势
:param page: 第几页 :param page: 第几页
@@ -70,13 +70,21 @@ class TmdbChain(ChainBase, metaclass=Singleton):
""" """
return self.run_module("tmdb_seasons", tmdbid=tmdbid) return self.run_module("tmdb_seasons", tmdbid=tmdbid)
def tmdb_episodes(self, tmdbid: int, season: int) -> List[schemas.TmdbEpisode]: def tmdb_group_seasons(self, group_id: str) -> List[schemas.TmdbSeason]:
"""
根据剧集组ID查询themoviedb所有季集信息
:param group_id: 剧集组ID
"""
return self.run_module("tmdb_group_seasons", group_id=group_id)
def tmdb_episodes(self, tmdbid: int, season: int, episode_group: Optional[str] = None) -> List[schemas.TmdbEpisode]:
""" """
根据TMDBID查询某季的所有信信息 根据TMDBID查询某季的所有信信息
:param tmdbid: TMDBID :param tmdbid: TMDBID
:param season: 季 :param season: 季
:param episode_group: 剧集组
""" """
return self.run_module("tmdb_episodes", tmdbid=tmdbid, season=season) return self.run_module("tmdb_episodes", tmdbid=tmdbid, season=season, episode_group=episode_group)
def movie_similar(self, tmdbid: int) -> Optional[List[MediaInfo]]: def movie_similar(self, tmdbid: int) -> Optional[List[MediaInfo]]:
""" """
@@ -106,7 +114,7 @@ class TmdbChain(ChainBase, metaclass=Singleton):
""" """
return self.run_module("tmdb_tv_recommend", tmdbid=tmdbid) return self.run_module("tmdb_tv_recommend", tmdbid=tmdbid)
def movie_credits(self, tmdbid: int, page: int = 1) -> Optional[List[schemas.MediaPerson]]: def movie_credits(self, tmdbid: int, page: Optional[int] = 1) -> Optional[List[schemas.MediaPerson]]:
""" """
根据TMDBID查询电影演职人员 根据TMDBID查询电影演职人员
:param tmdbid: TMDBID :param tmdbid: TMDBID
@@ -114,7 +122,7 @@ class TmdbChain(ChainBase, metaclass=Singleton):
""" """
return self.run_module("tmdb_movie_credits", tmdbid=tmdbid, page=page) return self.run_module("tmdb_movie_credits", tmdbid=tmdbid, page=page)
def tv_credits(self, tmdbid: int, page: int = 1) -> Optional[List[schemas.MediaPerson]]: def tv_credits(self, tmdbid: int, page: Optional[int] = 1) -> Optional[List[schemas.MediaPerson]]:
""" """
根据TMDBID查询电视剧演职人员 根据TMDBID查询电视剧演职人员
:param tmdbid: TMDBID :param tmdbid: TMDBID
@@ -129,7 +137,7 @@ class TmdbChain(ChainBase, metaclass=Singleton):
""" """
return self.run_module("tmdb_person_detail", person_id=person_id) return self.run_module("tmdb_person_detail", person_id=person_id)
def person_credits(self, person_id: int, page: int = 1) -> Optional[List[MediaInfo]]: def person_credits(self, person_id: int, page: Optional[int] = 1) -> Optional[List[MediaInfo]]:
""" """
根据人物ID查询人物参演作品 根据人物ID查询人物参演作品
:param person_id: 人物ID :param person_id: 人物ID
@@ -152,7 +160,7 @@ class TmdbChain(ChainBase, metaclass=Singleton):
return None return None
@cached(maxsize=1, ttl=3600) @cached(maxsize=1, ttl=3600)
def get_trending_wallpapers(self, num: int = 10) -> List[str]: def get_trending_wallpapers(self, num: Optional[int] = 10) -> List[str]:
""" """
获取所有流行壁纸 获取所有流行壁纸
""" """

View File

@@ -1,6 +1,6 @@
import re import re
import traceback import traceback
from typing import Dict, List, Union from typing import Dict, List, Union, Optional
from cachetools import cached, TTLCache from cachetools import cached, TTLCache
@@ -48,7 +48,7 @@ class TorrentsChain(ChainBase, metaclass=Singleton):
self.post_message(Notification(channel=channel, self.post_message(Notification(channel=channel,
title=f"种子刷新完成!", userid=userid)) title=f"种子刷新完成!", userid=userid))
def get_torrents(self, stype: str = None) -> Dict[str, List[Context]]: def get_torrents(self, stype: Optional[str] = None) -> Dict[str, List[Context]]:
""" """
获取当前缓存的种子 获取当前缓存的种子
:param stype: 强制指定缓存类型spider:爬虫缓存rss:rss缓存 :param stype: 强制指定缓存类型spider:爬虫缓存rss:rss缓存
@@ -73,17 +73,21 @@ class TorrentsChain(ChainBase, metaclass=Singleton):
logger.info(f'种子缓存数据清理完成') logger.info(f'种子缓存数据清理完成')
@cached(cache=TTLCache(maxsize=128, ttl=595)) @cached(cache=TTLCache(maxsize=128, ttl=595))
def browse(self, domain: str) -> List[TorrentInfo]: def browse(self, domain: str, keyword: Optional[str] = None, cat: Optional[str] = None,
page: Optional[int] = 0) -> List[TorrentInfo]:
""" """
浏览站点首页内容返回种子清单TTL缓存10分钟 浏览站点首页内容返回种子清单TTL缓存10分钟
:param domain: 站点域名 :param domain: 站点域名
:param keyword: 搜索标题
:param cat: 搜索分类
:param page: 页码
""" """
logger.info(f'开始获取站点 {domain} 最新种子 ...') logger.info(f'开始获取站点 {domain} 最新种子 ...')
site = self.siteshelper.get_indexer(domain) site = self.siteshelper.get_indexer(domain)
if not site: if not site:
logger.error(f'站点 {domain} 不存在!') logger.error(f'站点 {domain} 不存在!')
return [] return []
return self.refresh_torrents(site=site) return self.refresh_torrents(site=site, keyword=keyword, cat=cat, page=page)
@cached(cache=TTLCache(maxsize=128, ttl=295)) @cached(cache=TTLCache(maxsize=128, ttl=295))
def rss(self, domain: str) -> List[TorrentInfo]: def rss(self, domain: str) -> List[TorrentInfo]:
@@ -131,7 +135,7 @@ class TorrentsChain(ChainBase, metaclass=Singleton):
return ret_torrents return ret_torrents
def refresh(self, stype: str = None, sites: List[int] = None) -> Dict[str, List[Context]]: def refresh(self, stype: Optional[str] = None, sites: List[int] = None) -> Dict[str, List[Context]]:
""" """
刷新站点最新资源,识别并缓存起来 刷新站点最新资源,识别并缓存起来
:param stype: 强制指定缓存类型spider:爬虫缓存rss:rss缓存 :param stype: 强制指定缓存类型spider:爬虫缓存rss:rss缓存

View File

@@ -53,14 +53,14 @@ class JobManager:
self._season_episodes = {} self._season_episodes = {}
@staticmethod @staticmethod
def __get_meta_id(meta: MetaBase = None, season: int = None) -> Tuple: def __get_meta_id(meta: MetaBase = None, season: Optional[int] = None) -> Tuple:
""" """
获取元数据ID 获取元数据ID
""" """
return meta.name, season return meta.name, season
@staticmethod @staticmethod
def __get_media_id(media: MediaInfo = None, season: int = None) -> Tuple: def __get_media_id(media: MediaInfo = None, season: Optional[int] = None) -> Tuple:
""" """
获取媒体ID 获取媒体ID
""" """
@@ -104,7 +104,7 @@ class JobManager:
""" """
return schemas.MetaInfo(**task.meta.to_dict()) return schemas.MetaInfo(**task.meta.to_dict())
def add_task(self, task: TransferTask, state: str = "waiting"): def add_task(self, task: TransferTask, state: Optional[str] = "waiting"):
""" """
添加整理任务 添加整理任务
""" """
@@ -296,7 +296,7 @@ class JobManager:
media_success = True media_success = True
return meta_success and media_success return meta_success and media_success
def success_tasks(self, media: MediaInfo, season: int = None) -> List[TransferJobTask]: def success_tasks(self, media: MediaInfo, season: Optional[int] = None) -> List[TransferJobTask]:
""" """
获取某项任务成功的任务 获取某项任务成功的任务
""" """
@@ -306,7 +306,7 @@ class JobManager:
return [] return []
return [task for task in self._job_view[__mediaid__].tasks if task.state == "completed"] return [task for task in self._job_view[__mediaid__].tasks if task.state == "completed"]
def count(self, media: MediaInfo, season: int = None) -> int: def count(self, media: MediaInfo, season: Optional[int] = None) -> int:
""" """
获取某项任务总数 获取某项任务总数
""" """
@@ -317,7 +317,7 @@ class JobManager:
return 0 return 0
return len([task for task in self._job_view[__mediaid__].tasks if task.state == "completed"]) return len([task for task in self._job_view[__mediaid__].tasks if task.state == "completed"])
def size(self, media: MediaInfo, season: int = None) -> int: def size(self, media: MediaInfo, season: Optional[int] = None) -> int:
""" """
获取某项任务总大小 获取某项任务总大小
""" """
@@ -341,7 +341,7 @@ class JobManager:
""" """
return list(self._job_view.values()) return list(self._job_view.values())
def season_episodes(self, media: MediaInfo, season: int = None) -> List[int]: def season_episodes(self, media: MediaInfo, season: Optional[int] = None) -> List[int]:
""" """
获取季集清单 获取季集清单
""" """
@@ -606,7 +606,7 @@ class TransferChain(ChainBase, metaclass=Singleton):
logger.error(f"整理队列处理出现错误:{e} - {traceback.format_exc()}") logger.error(f"整理队列处理出现错误:{e} - {traceback.format_exc()}")
def __handle_transfer(self, task: TransferTask, def __handle_transfer(self, task: TransferTask,
callback: Optional[Callable] = None) -> Tuple[bool, str]: callback: Optional[Callable] = None) -> Optional[Tuple[bool, str]]:
""" """
处理整理任务 处理整理任务
""" """
@@ -623,7 +623,8 @@ class TransferChain(ChainBase, metaclass=Singleton):
# 下载记录中已存在识别信息 # 下载记录中已存在识别信息
mediainfo: Optional[MediaInfo] = self.recognize_media(mtype=MediaType(download_history.type), mediainfo: Optional[MediaInfo] = self.recognize_media(mtype=MediaType(download_history.type),
tmdbid=download_history.tmdbid, tmdbid=download_history.tmdbid,
doubanid=download_history.doubanid) doubanid=download_history.doubanid,
episode_group=download_history.episode_group)
if mediainfo: if mediainfo:
# 更新自定义媒体类别 # 更新自定义媒体类别
if download_history.media_category: if download_history.media_category:
@@ -670,13 +671,19 @@ class TransferChain(ChainBase, metaclass=Singleton):
self.jobview.add_task(task, state=curr_task.state if curr_task else "waiting") self.jobview.add_task(task, state=curr_task.state if curr_task else "waiting")
# 获取集数据 # 获取集数据
if not task.episodes_info and task.mediainfo.type == MediaType.TV: if task.mediainfo.type == MediaType.TV and not task.episodes_info:
if task.meta.begin_season is None: # 判断注意season为0的情况
task.meta.begin_season = 1 season_num = task.mediainfo.season
task.mediainfo.season = task.mediainfo.season or task.meta.begin_season if season_num is None and task.meta.season_seq:
if task.meta.season_seq.isdigit():
season_num = int(task.meta.season_seq)
# 默认值1
if season_num is None:
season_num = 1
task.episodes_info = self.tmdbchain.tmdb_episodes( task.episodes_info = self.tmdbchain.tmdb_episodes(
tmdbid=task.mediainfo.tmdb_id, tmdbid=task.mediainfo.tmdb_id,
season=task.mediainfo.season season=season_num,
episode_group=task.mediainfo.episode_group
) )
# 查询整理目标目录 # 查询整理目标目录
@@ -793,7 +800,8 @@ class TransferChain(ChainBase, metaclass=Singleton):
# 按TMDBID识别 # 按TMDBID识别
mediainfo = self.recognize_media(mtype=mtype, mediainfo = self.recognize_media(mtype=mtype,
tmdbid=downloadhis.tmdbid, tmdbid=downloadhis.tmdbid,
doubanid=downloadhis.doubanid) doubanid=downloadhis.doubanid,
episode_group=downloadhis.episode_group)
if mediainfo: if mediainfo:
# 补充图片 # 补充图片
self.obtain_images(mediainfo) self.obtain_images(mediainfo)
@@ -902,13 +910,13 @@ class TransferChain(ChainBase, metaclass=Singleton):
def do_transfer(self, fileitem: FileItem, def do_transfer(self, fileitem: FileItem,
meta: MetaBase = None, mediainfo: MediaInfo = None, meta: MetaBase = None, mediainfo: MediaInfo = None,
target_directory: TransferDirectoryConf = None, target_directory: TransferDirectoryConf = None,
target_storage: str = None, target_path: Path = None, target_storage: Optional[str] = None, target_path: Path = None,
transfer_type: str = None, scrape: bool = None, transfer_type: Optional[str] = None, scrape: Optional[bool] = None,
library_type_folder: bool = None, library_category_folder: bool = None, library_type_folder: Optional[bool] = None, library_category_folder: Optional[bool] = None,
season: int = None, epformat: EpisodeFormat = None, min_filesize: int = 0, season: Optional[int] = None, epformat: EpisodeFormat = None, min_filesize: Optional[int] = 0,
downloader: str = None, download_hash: str = None, downloader: Optional[str] = None, download_hash: Optional[str] = None,
force: bool = False, background: bool = True, force: Optional[bool] = False, background: Optional[bool] = True,
manual: bool = False) -> Tuple[bool, str]: manual: Optional[bool] = False, continue_callback: Callable = None) -> Tuple[bool, str]:
""" """
执行一个复杂目录的整理操作 执行一个复杂目录的整理操作
:param fileitem: 文件项 :param fileitem: 文件项
@@ -929,6 +937,7 @@ class TransferChain(ChainBase, metaclass=Singleton):
:param force: 是否强制整理 :param force: 是否强制整理
:param background: 是否后台运行 :param background: 是否后台运行
:param manual: 是否手动整理 :param manual: 是否手动整理
:param continue_callback: 继续处理回调
返回:成功标识,错误信息 返回:成功标识,错误信息
""" """
@@ -994,6 +1003,8 @@ class TransferChain(ChainBase, metaclass=Singleton):
for file_item, bluray_dir in file_items: for file_item, bluray_dir in file_items:
if global_vars.is_system_stopped: if global_vars.is_system_stopped:
break break
if continue_callback and not continue_callback():
break
file_path = Path(file_item.path) file_path = Path(file_item.path)
# 回收站及隐藏的文件不处理 # 回收站及隐藏的文件不处理
if file_item.path.find('/@Recycle/') != -1 \ if file_item.path.find('/@Recycle/') != -1 \
@@ -1114,6 +1125,8 @@ class TransferChain(ChainBase, metaclass=Singleton):
for transfer_task in transfer_tasks: for transfer_task in transfer_tasks:
if global_vars.is_system_stopped: if global_vars.is_system_stopped:
break break
if continue_callback and not continue_callback():
break
# 更新进度 # 更新进度
__process_msg = f"正在整理 {processed_num + fail_num + 1}/{total_num}{transfer_task.fileitem.name} ..." __process_msg = f"正在整理 {processed_num + fail_num + 1}/{total_num}{transfer_task.fileitem.name} ..."
logger.info(__process_msg) logger.info(__process_msg)
@@ -1143,7 +1156,7 @@ class TransferChain(ChainBase, metaclass=Singleton):
return all_success, "".join(err_msgs) return all_success, "".join(err_msgs)
def remote_transfer(self, arg_str: str, channel: MessageChannel, def remote_transfer(self, arg_str: str, channel: MessageChannel,
userid: Union[str, int] = None, source: str = None): userid: Union[str, int] = None, source: Optional[str] = None):
""" """
远程重新整理,参数 历史记录ID TMDBID|类型 远程重新整理,参数 历史记录ID TMDBID|类型
""" """
@@ -1185,7 +1198,7 @@ class TransferChain(ChainBase, metaclass=Singleton):
return return
def __re_transfer(self, logid: int, mtype: MediaType = None, def __re_transfer(self, logid: int, mtype: MediaType = None,
mediaid: str = None) -> Tuple[bool, str]: mediaid: Optional[str] = None) -> Tuple[bool, str]:
""" """
根据历史记录,重新识别整理,只支持简单条件 根据历史记录,重新识别整理,只支持简单条件
:param logid: 历史记录ID :param logid: 历史记录ID
@@ -1204,12 +1217,12 @@ class TransferChain(ChainBase, metaclass=Singleton):
# 查询媒体信息 # 查询媒体信息
if mtype and mediaid: if mtype and mediaid:
mediainfo = self.recognize_media(mtype=mtype, tmdbid=int(mediaid) if str(mediaid).isdigit() else None, mediainfo = self.recognize_media(mtype=mtype, tmdbid=int(mediaid) if str(mediaid).isdigit() else None,
doubanid=mediaid) doubanid=mediaid, episode_group=history.episode_group)
if mediainfo: if mediainfo:
# 更新媒体图片 # 更新媒体图片
self.obtain_images(mediainfo=mediainfo) self.obtain_images(mediainfo=mediainfo)
else: else:
mediainfo = self.mediachain.recognize_by_path(str(src_path)) mediainfo = self.mediachain.recognize_by_path(str(src_path), episode_group=history.episode_group)
if not mediainfo: if not mediainfo:
return False, f"未识别到媒体信息,类型:{mtype.value}id{mediaid}" return False, f"未识别到媒体信息,类型:{mtype.value}id{mediaid}"
# 重新执行整理 # 重新执行整理
@@ -1236,20 +1249,21 @@ class TransferChain(ChainBase, metaclass=Singleton):
def manual_transfer(self, def manual_transfer(self,
fileitem: FileItem, fileitem: FileItem,
target_storage: str = None, target_storage: Optional[str] = None,
target_path: Path = None, target_path: Path = None,
tmdbid: int = None, tmdbid: Optional[int] = None,
doubanid: str = None, doubanid: Optional[str] = None,
mtype: MediaType = None, mtype: MediaType = None,
season: int = None, season: Optional[int] = None,
transfer_type: str = None, episode_group: Optional[str] = None,
transfer_type: Optional[str] = None,
epformat: EpisodeFormat = None, epformat: EpisodeFormat = None,
min_filesize: int = 0, min_filesize: Optional[int] = 0,
scrape: bool = None, scrape: Optional[bool] = None,
library_type_folder: bool = None, library_type_folder: Optional[bool] = None,
library_category_folder: bool = None, library_category_folder: Optional[bool] = None,
force: bool = False, force: Optional[bool] = False,
background: bool = False) -> Tuple[bool, Union[str, list]]: background: Optional[bool] = False) -> Tuple[bool, Union[str, list]]:
""" """
手动整理,支持复杂条件,带进度显示 手动整理,支持复杂条件,带进度显示
:param fileitem: 文件项 :param fileitem: 文件项
@@ -1259,6 +1273,7 @@ class TransferChain(ChainBase, metaclass=Singleton):
:param doubanid: 豆瓣ID :param doubanid: 豆瓣ID
:param mtype: 媒体类型 :param mtype: 媒体类型
:param season: 季度 :param season: 季度
:param episode_group: 剧集组
:param transfer_type: 整理类型 :param transfer_type: 整理类型
:param epformat: 剧集格式 :param epformat: 剧集格式
:param min_filesize: 最小文件大小(MB) :param min_filesize: 最小文件大小(MB)
@@ -1272,7 +1287,8 @@ class TransferChain(ChainBase, metaclass=Singleton):
if tmdbid or doubanid: if tmdbid or doubanid:
# 有输入TMDBID时单个识别 # 有输入TMDBID时单个识别
# 识别媒体信息 # 识别媒体信息
mediainfo: MediaInfo = self.mediachain.recognize_media(tmdbid=tmdbid, doubanid=doubanid, mtype=mtype) mediainfo: MediaInfo = self.mediachain.recognize_media(tmdbid=tmdbid, doubanid=doubanid,
mtype=mtype, episode_group=episode_group)
if not mediainfo: if not mediainfo:
return False, f"媒体信息识别失败tmdbid{tmdbid}doubanid{doubanid}type: {mtype.value}" return False, f"媒体信息识别失败tmdbid{tmdbid}doubanid{doubanid}type: {mtype.value}"
else: else:
@@ -1324,7 +1340,7 @@ class TransferChain(ChainBase, metaclass=Singleton):
return state, errmsg return state, errmsg
def send_transfer_message(self, meta: MetaBase, mediainfo: MediaInfo, def send_transfer_message(self, meta: MetaBase, mediainfo: MediaInfo,
transferinfo: TransferInfo, season_episode: str = None, username: str = None): transferinfo: TransferInfo, season_episode: Optional[str] = None, username: Optional[str] = None):
""" """
发送入库成功的消息 发送入库成功的消息
""" """

View File

@@ -30,7 +30,7 @@ class UserChain(ChainBase, metaclass=Singleton):
password: Optional[str] = None, password: Optional[str] = None,
mfa_code: Optional[str] = None, mfa_code: Optional[str] = None,
code: Optional[str] = None, code: Optional[str] = None,
grant_type: str = "password" grant_type: Optional[str] = "password"
) -> Union[Tuple[bool, Optional[str]], Tuple[bool, Optional[User]]]: ) -> Union[Tuple[bool, Optional[str]], Tuple[bool, Optional[User]]]:
""" """
认证用户,根据不同的 grant_type 处理不同的认证流程 认证用户,根据不同的 grant_type 处理不同的认证流程

250
app/chain/workflow.py Normal file
View File

@@ -0,0 +1,250 @@
import base64
import pickle
import threading
from collections import defaultdict, deque
from concurrent.futures import ThreadPoolExecutor
from time import sleep
from typing import List, Tuple, Optional
from pydantic.fields import Callable
from app.chain import ChainBase
from app.core.config import global_vars
from app.core.workflow import WorkFlowManager
from app.db.models import Workflow
from app.db.workflow_oper import WorkflowOper
from app.log import logger
from app.schemas import ActionContext, ActionFlow, Action, ActionExecution
class WorkflowExecutor:
"""
工作流执行器
"""
def __init__(self, workflow: Workflow, step_callback: Callable = None):
"""
初始化工作流执行器
:param workflow: 工作流对象
:param step_callback: 步骤回调函数
"""
# 工作流数据
self.workflow = workflow
self.step_callback = step_callback
self.actions = {action['id']: Action(**action) for action in workflow.actions}
self.flows = [ActionFlow(**flow) for flow in workflow.flows]
self.total_actions = len(self.actions)
self.finished_actions = 0
self.success = True
self.errmsg = ""
# 工作流管理器
self.workflowmanager = WorkFlowManager()
# 线程安全队列
self.queue = deque()
# 锁用于保证线程安全
self.lock = threading.Lock()
# 线程池
self.executor = ThreadPoolExecutor()
# 跟踪运行中的任务数
self.running_tasks = 0
# 构建邻接表、入度表
self.adjacency = defaultdict(list)
self.indegree = defaultdict(int)
for flow in self.flows:
source = flow.source
target = flow.target
self.adjacency[source].append(target)
self.indegree[target] += 1
# 初始化所有节点的入度确保未被引用的节点入度为0
for action_id in self.actions:
if action_id not in self.indegree:
self.indegree[action_id] = 0
# 初始上下文
if workflow.current_action and workflow.context:
logger.info(f"工作流已执行动作:{workflow.current_action}")
# Base64解码
decoded_data = base64.b64decode(workflow.context["content"])
# 反序列化数据
self.context = pickle.loads(decoded_data)
else:
self.context = ActionContext()
# 恢复工作流
global_vars.workflow_resume(self.workflow.id)
# 初始化队列添加入度为0的节点
for action_id in self.actions:
if self.indegree[action_id] == 0:
self.queue.append(action_id)
def execute(self):
"""
执行工作流
"""
while True:
with self.lock:
# 退出条件:队列为空且无运行任务
if not self.queue and self.running_tasks == 0:
break
# 退出条件:出现了错误
if not self.success:
break
if not self.queue:
sleep(0.1)
continue
# 取出队首节点
node_id = self.queue.popleft()
# 标记任务开始
self.running_tasks += 1
# 已停机
if global_vars.is_workflow_stopped(self.workflow.id):
global_vars.workflow_resume(self.workflow.id)
break
# 已执行的跳过
if (self.workflow.current_action
and node_id in self.workflow.current_action.split(',')):
continue
# 提交任务到线程池
future = self.executor.submit(
self.execute_node,
self.workflow.id,
node_id,
self.context
)
future.add_done_callback(self.on_node_complete)
def execute_node(self, workflow_id: int, node_id: int,
context: ActionContext) -> Tuple[Action, bool, str, ActionContext]:
"""
执行单个节点操作返回修改后的上下文和节点ID
"""
action = self.actions[node_id]
state, message, result_ctx = self.workflowmanager.excute(workflow_id, action, context=context)
return action, state, message, result_ctx
def on_node_complete(self, future):
"""
节点完成回调:更新上下文、处理后继节点
"""
action, state, message, result_ctx = future.result()
try:
self.finished_actions += 1
# 更新当前进度
self.context.progress = round(self.finished_actions / self.total_actions) * 100
# 补充执行历史
self.context.execute_history.append(
ActionExecution(
action=action.name,
result=state,
message=message
)
)
# 节点执行失败
if not state:
self.success = False
self.errmsg = f"{action.name} 失败"
return
with self.lock:
# 更新主上下文
self.merge_context(result_ctx)
# 回调
if self.step_callback:
self.step_callback(action, self.context)
# 处理后继节点
successors = self.adjacency.get(action.id, [])
for succ_id in successors:
with self.lock:
self.indegree[succ_id] -= 1
if self.indegree[succ_id] == 0:
self.queue.append(succ_id)
finally:
# 标记任务完成
with self.lock:
self.running_tasks -= 1
def merge_context(self, context: ActionContext):
"""
合并上下文
"""
for key, value in context.dict().items():
if not getattr(self.context, key, None):
setattr(self.context, key, value)
class WorkflowChain(ChainBase):
"""
工作流链
"""
def __init__(self):
super().__init__()
self.workflowoper = WorkflowOper()
def process(self, workflow_id: int, from_begin: Optional[bool] = True) -> Tuple[bool, str]:
"""
处理工作流
:param workflow_id: 工作流ID
:param from_begin: 是否从头开始默认为True
"""
def save_step(action: Action, context: ActionContext):
"""
保存上下文到数据库
"""
# 序列化数据
serialized_data = pickle.dumps(context)
# 使用Base64编码字节流
encoded_data = base64.b64encode(serialized_data).decode('utf-8')
self.workflowoper.step(workflow_id, action_id=action.id, context={
"content": encoded_data
})
# 重置工作流
if from_begin:
self.workflowoper.reset(workflow_id)
# 查询工作流数据
workflow = self.workflowoper.get(workflow_id)
if not workflow:
logger.warn(f"工作流 {workflow_id} 不存在")
return False, "工作流不存在"
if not workflow.actions:
logger.warn(f"工作流 {workflow.name} 无动作")
return False, "工作流无动作"
if not workflow.flows:
logger.warn(f"工作流 {workflow.name} 无流程")
return False, "工作流无流程"
logger.info(f"开始处理 {workflow.name},共 {len(workflow.actions)} 个动作 ...")
self.workflowoper.start(workflow_id)
# 执行工作流
executor = WorkflowExecutor(workflow, step_callback=save_step)
executor.execute()
if not executor.success:
logger.info(f"工作流 {workflow.name} 执行失败:{executor.errmsg}")
self.workflowoper.fail(workflow_id, result=executor.errmsg)
return False, executor.errmsg
else:
logger.info(f"工作流 {workflow.name} 执行完成")
self.workflowoper.success(workflow_id)
return True, ""
def get_workflows(self) -> List[Workflow]:
"""
获取工作流列表
"""
return self.workflowoper.list_enabled()

View File

@@ -273,8 +273,8 @@ class Command(metaclass=Singleton):
} }
return plugin_commands return plugin_commands
def __run_command(self, command: Dict[str, any], data_str: str = "", def __run_command(self, command: Dict[str, any], data_str: Optional[str] = "",
channel: MessageChannel = None, source: str = None, userid: Union[str, int] = None): channel: MessageChannel = None, source: Optional[str] = None, userid: Union[str, int] = None):
""" """
运行定时服务 运行定时服务
""" """
@@ -339,8 +339,8 @@ class Command(metaclass=Singleton):
""" """
return self._commands.get(cmd, {}) return self._commands.get(cmd, {})
def register(self, cmd: str, func: Any, data: dict = None, def register(self, cmd: str, func: Any, data: Optional[dict] = None,
desc: str = None, category: str = None) -> None: desc: Optional[str] = None, category: Optional[str] = None) -> None:
""" """
注册单个命令 注册单个命令
""" """
@@ -352,8 +352,8 @@ class Command(metaclass=Singleton):
"data": data or {} "data": data or {}
} }
def execute(self, cmd: str, data_str: str = "", def execute(self, cmd: str, data_str: Optional[str] = "",
channel: MessageChannel = None, source: str = None, channel: MessageChannel = None, source: Optional[str] = None,
userid: Union[str, int] = None) -> None: userid: Union[str, int] = None) -> None:
""" """
执行命令 执行命令

View File

@@ -1,6 +1,7 @@
import inspect import inspect
import json import json
import pickle import pickle
import threading
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from functools import wraps from functools import wraps
from typing import Any, Dict, Optional from typing import Any, Dict, Optional
@@ -16,6 +17,8 @@ from app.log import logger
# 默认缓存区 # 默认缓存区
DEFAULT_CACHE_REGION = "DEFAULT" DEFAULT_CACHE_REGION = "DEFAULT"
lock = threading.Lock()
class CacheBackend(ABC): class CacheBackend(ABC):
""" """
@@ -23,7 +26,7 @@ class CacheBackend(ABC):
""" """
@abstractmethod @abstractmethod
def set(self, key: str, value: Any, ttl: int, region: str = DEFAULT_CACHE_REGION, **kwargs) -> None: def set(self, key: str, value: Any, ttl: int, region: Optional[str] = DEFAULT_CACHE_REGION, **kwargs) -> None:
""" """
设置缓存 设置缓存
@@ -36,7 +39,7 @@ class CacheBackend(ABC):
pass pass
@abstractmethod @abstractmethod
def exists(self, key: str, region: str = DEFAULT_CACHE_REGION) -> bool: def exists(self, key: str, region: Optional[str] = DEFAULT_CACHE_REGION) -> bool:
""" """
判断缓存键是否存在 判断缓存键是否存在
@@ -47,7 +50,7 @@ class CacheBackend(ABC):
pass pass
@abstractmethod @abstractmethod
def get(self, key: str, region: str = DEFAULT_CACHE_REGION) -> Any: def get(self, key: str, region: Optional[str] = DEFAULT_CACHE_REGION) -> Any:
""" """
获取缓存 获取缓存
@@ -58,7 +61,7 @@ class CacheBackend(ABC):
pass pass
@abstractmethod @abstractmethod
def delete(self, key: str, region: str = DEFAULT_CACHE_REGION) -> None: def delete(self, key: str, region: Optional[str] = DEFAULT_CACHE_REGION) -> None:
""" """
删除缓存 删除缓存
@@ -84,7 +87,7 @@ class CacheBackend(ABC):
pass pass
@staticmethod @staticmethod
def get_region(region: str = DEFAULT_CACHE_REGION): def get_region(region: Optional[str] = DEFAULT_CACHE_REGION):
""" """
获取缓存的区 获取缓存的区
""" """
@@ -128,7 +131,7 @@ class CacheToolsBackend(CacheBackend):
- 不支持按 `key` 独立隔离 TTL 和 Maxsize仅支持作用于 region 级别 - 不支持按 `key` 独立隔离 TTL 和 Maxsize仅支持作用于 region 级别
""" """
def __init__(self, maxsize: int = 1000, ttl: int = 1800): def __init__(self, maxsize: Optional[int] = 1000, ttl: Optional[int] = 1800):
""" """
初始化缓存实例 初始化缓存实例
@@ -147,7 +150,8 @@ class CacheToolsBackend(CacheBackend):
region = self.get_region(region) region = self.get_region(region)
return self._region_caches.get(region) return self._region_caches.get(region)
def set(self, key: str, value: Any, ttl: int = None, region: str = DEFAULT_CACHE_REGION, **kwargs) -> None: def set(self, key: str, value: Any, ttl: Optional[int] = None,
region: Optional[str] = DEFAULT_CACHE_REGION, **kwargs) -> None:
""" """
设置缓存值支持每个 key 独立配置 TTL 和 Maxsize 设置缓存值支持每个 key 独立配置 TTL 和 Maxsize
@@ -163,9 +167,10 @@ class CacheToolsBackend(CacheBackend):
# 如果该 key 尚未有缓存实例,则创建一个新的 TTLCache 实例 # 如果该 key 尚未有缓存实例,则创建一个新的 TTLCache 实例
region_cache = self._region_caches.setdefault(region, TTLCache(maxsize=maxsize, ttl=ttl)) region_cache = self._region_caches.setdefault(region, TTLCache(maxsize=maxsize, ttl=ttl))
# 设置缓存值 # 设置缓存值
region_cache[key] = value with lock:
region_cache[key] = value
def exists(self, key: str, region: str = DEFAULT_CACHE_REGION) -> bool: def exists(self, key: str, region: Optional[str] = DEFAULT_CACHE_REGION) -> bool:
""" """
判断缓存键是否存在 判断缓存键是否存在
@@ -178,7 +183,7 @@ class CacheToolsBackend(CacheBackend):
return False return False
return key in region_cache return key in region_cache
def get(self, key: str, region: str = DEFAULT_CACHE_REGION) -> Any: def get(self, key: str, region: Optional[str] = DEFAULT_CACHE_REGION) -> Any:
""" """
获取缓存的值 获取缓存的值
@@ -191,7 +196,7 @@ class CacheToolsBackend(CacheBackend):
return None return None
return region_cache.get(key) return region_cache.get(key)
def delete(self, key: str, region: str = DEFAULT_CACHE_REGION) -> None: def delete(self, key: str, region: Optional[str] = DEFAULT_CACHE_REGION) -> None:
""" """
删除缓存 删除缓存
@@ -201,7 +206,8 @@ class CacheToolsBackend(CacheBackend):
region_cache = self.__get_region_cache(region) region_cache = self.__get_region_cache(region)
if region_cache is None: if region_cache is None:
return None return None
del region_cache[key] with lock:
del region_cache[key]
def clear(self, region: Optional[str] = None) -> None: def clear(self, region: Optional[str] = None) -> None:
""" """
@@ -213,12 +219,14 @@ class CacheToolsBackend(CacheBackend):
# 清理指定缓存区 # 清理指定缓存区
region_cache = self.__get_region_cache(region) region_cache = self.__get_region_cache(region)
if region_cache: if region_cache:
region_cache.clear() with lock:
region_cache.clear()
logger.info(f"Cleared cache for region: {region}") logger.info(f"Cleared cache for region: {region}")
else: else:
# 清除所有区域的缓存 # 清除所有区域的缓存
for region_cache in self._region_caches.values(): for region_cache in self._region_caches.values():
region_cache.clear() with lock:
region_cache.clear()
logger.info("Cleared all cache") logger.info("Cleared all cache")
def close(self) -> None: def close(self) -> None:
@@ -246,7 +254,7 @@ class RedisBackend(CacheBackend):
_complex_serializable_types = set() _complex_serializable_types = set()
_simple_serializable_types = set() _simple_serializable_types = set()
def __init__(self, redis_url: str = "redis://localhost", ttl: int = 1800): def __init__(self, redis_url: Optional[str] = "redis://localhost", ttl: Optional[int] = 1800):
""" """
初始化 Redis 缓存实例 初始化 Redis 缓存实例
@@ -271,7 +279,7 @@ class RedisBackend(CacheBackend):
logger.error(f"Failed to connect to Redis: {e}") logger.error(f"Failed to connect to Redis: {e}")
raise RuntimeError("Redis connection failed") from e raise RuntimeError("Redis connection failed") from e
def set_memory_limit(self, policy: str = "allkeys-lru"): def set_memory_limit(self, policy: Optional[str] = "allkeys-lru"):
""" """
动态设置 Redis 最大内存和内存淘汰策略 动态设置 Redis 最大内存和内存淘汰策略
:param policy: 淘汰策略(如 'allkeys-lru' :param policy: 淘汰策略(如 'allkeys-lru'
@@ -349,7 +357,8 @@ class RedisBackend(CacheBackend):
region = self.get_region(quote(region)) region = self.get_region(quote(region))
return f"{region}:key:{quote(key)}" return f"{region}:key:{quote(key)}"
def set(self, key: str, value: Any, ttl: int = None, region: str = DEFAULT_CACHE_REGION, **kwargs) -> None: def set(self, key: str, value: Any, ttl: Optional[int] = None,
region: Optional[str] = DEFAULT_CACHE_REGION, **kwargs) -> None:
""" """
设置缓存 设置缓存
@@ -369,7 +378,7 @@ class RedisBackend(CacheBackend):
except Exception as e: except Exception as e:
logger.error(f"Failed to set key: {key} in region: {region}, error: {e}") logger.error(f"Failed to set key: {key} in region: {region}, error: {e}")
def exists(self, key: str, region: str = DEFAULT_CACHE_REGION) -> bool: def exists(self, key: str, region: Optional[str] = DEFAULT_CACHE_REGION) -> bool:
""" """
判断缓存键是否存在 判断缓存键是否存在
@@ -384,7 +393,7 @@ class RedisBackend(CacheBackend):
logger.error(f"Failed to exists key: {key} region: {region}, error: {e}") logger.error(f"Failed to exists key: {key} region: {region}, error: {e}")
return False return False
def get(self, key: str, region: str = DEFAULT_CACHE_REGION) -> Optional[Any]: def get(self, key: str, region: Optional[str] = DEFAULT_CACHE_REGION) -> Optional[Any]:
""" """
获取缓存的值 获取缓存的值
@@ -402,7 +411,7 @@ class RedisBackend(CacheBackend):
logger.error(f"Failed to get key: {key} in region: {region}, error: {e}") logger.error(f"Failed to get key: {key} in region: {region}, error: {e}")
return None return None
def delete(self, key: str, region: str = DEFAULT_CACHE_REGION) -> None: def delete(self, key: str, region: Optional[str] = DEFAULT_CACHE_REGION) -> None:
""" """
删除缓存 删除缓存
@@ -445,7 +454,7 @@ class RedisBackend(CacheBackend):
self.client.close() self.client.close()
def get_cache_backend(maxsize: int = 1000, ttl: int = 1800) -> CacheBackend: def get_cache_backend(maxsize: Optional[int] = 1000, ttl: Optional[int] = 1800) -> CacheBackend:
""" """
根据配置获取缓存后端实例 根据配置获取缓存后端实例
@@ -473,8 +482,8 @@ def get_cache_backend(maxsize: int = 1000, ttl: int = 1800) -> CacheBackend:
return CacheToolsBackend(maxsize=maxsize, ttl=ttl) return CacheToolsBackend(maxsize=maxsize, ttl=ttl)
def cached(region: Optional[str] = None, maxsize: int = 1000, ttl: int = 1800, def cached(region: Optional[str] = None, maxsize: Optional[int] = 1000, ttl: Optional[int] = 1800,
skip_none: bool = True, skip_empty: bool = False): skip_none: Optional[bool] = True, skip_empty: Optional[bool] = False):
""" """
自定义缓存装饰器,支持为每个 key 动态传递 maxsize 和 ttl 自定义缓存装饰器,支持为每个 key 动态传递 maxsize 和 ttl

View File

@@ -109,6 +109,10 @@ class ConfigModel(BaseModel):
FANART_ENABLE: bool = True FANART_ENABLE: bool = True
# Fanart API Key # Fanart API Key
FANART_API_KEY: str = "d2d31f9ecabea050fc7d68aa3146015f" FANART_API_KEY: str = "d2d31f9ecabea050fc7d68aa3146015f"
# 115 AppId
U115_APP_ID: str = "100196807"
# Alipan AppId
ALIPAN_APP_ID: str = "ac1bf04dc9fd4d9aaabb65b4a668d403"
# 元数据识别缓存过期时间(小时) # 元数据识别缓存过期时间(小时)
META_CACHE_EXPIRE: int = 0 META_CACHE_EXPIRE: int = 0
# 电视剧动漫的分类genre_ids # 电视剧动漫的分类genre_ids
@@ -208,7 +212,8 @@ class ConfigModel(BaseModel):
PLUGIN_MARKET: str = ("https://github.com/jxxghp/MoviePilot-Plugins," PLUGIN_MARKET: str = ("https://github.com/jxxghp/MoviePilot-Plugins,"
"https://github.com/thsrite/MoviePilot-Plugins," "https://github.com/thsrite/MoviePilot-Plugins,"
"https://github.com/honue/MoviePilot-Plugins," "https://github.com/honue/MoviePilot-Plugins,"
"https://github.com/InfinityPacer/MoviePilot-Plugins") "https://github.com/InfinityPacer/MoviePilot-Plugins,"
"https://github.com/DDS-Derek/MoviePilot-Plugins")
# 插件安装数据共享 # 插件安装数据共享
PLUGIN_STATISTIC_SHARE: bool = True PLUGIN_STATISTIC_SHARE: bool = True
# 是否开启插件热加载 # 是否开启插件热加载
@@ -247,7 +252,7 @@ class ConfigModel(BaseModel):
) )
# 允许的图片文件后缀格式 # 允许的图片文件后缀格式
SECURITY_IMAGE_SUFFIXES: List[str] = Field( SECURITY_IMAGE_SUFFIXES: List[str] = Field(
default_factory=lambda: [".jpg", ".jpeg", ".png", ".webp", ".gif", ".svg"] default_factory=lambda: [".jpg", ".jpeg", ".png", ".webp", ".gif", ".svg", ".avif"]
) )
# 重命名时支持的S0别名 # 重命名时支持的S0别名
RENAME_FORMAT_S0_NAMES: List[str] = Field( RENAME_FORMAT_S0_NAMES: List[str] = Field(
@@ -255,6 +260,8 @@ class ConfigModel(BaseModel):
) )
# 启用分词搜索 # 启用分词搜索
TOKENIZED_SEARCH: bool = False TOKENIZED_SEARCH: bool = False
# 为指定默认字幕添加.default后缀
DEFAULT_SUB: Optional[str] = "zh-cn"
class Settings(BaseSettings, ConfigModel, LogConfigModel): class Settings(BaseSettings, ConfigModel, LogConfigModel):
@@ -361,7 +368,7 @@ class Settings(BaseSettings, ConfigModel, LogConfigModel):
raise ValueError(f"配置项 '{field_name}' 的值 '{value}' 无法转换成正确的类型") from e raise ValueError(f"配置项 '{field_name}' 的值 '{value}' 无法转换成正确的类型") from e
logger.error( logger.error(
f"配置项 '{field_name}' 的值 '{value}' 无法转换成正确的类型,使用默认值 '{default}',错误信息: {e}") f"配置项 '{field_name}' 的值 '{value}' 无法转换成正确的类型,使用默认值 '{default}',错误信息: {e}")
return default, True return default, True
@validator('*', pre=True, always=True) @validator('*', pre=True, always=True)
def generic_type_validator(cls, value: Any, field): # noqa def generic_type_validator(cls, value: Any, field): # noqa
@@ -605,6 +612,8 @@ class GlobalVar(object):
STOP_EVENT: threading.Event = threading.Event() STOP_EVENT: threading.Event = threading.Event()
# webpush订阅 # webpush订阅
SUBSCRIPTIONS: List[dict] = [] SUBSCRIPTIONS: List[dict] = []
# 需应急停止的工作流
EMERGENCY_STOP_WORKFLOWS: List[int] = []
def stop_system(self): def stop_system(self):
""" """
@@ -631,6 +640,26 @@ class GlobalVar(object):
""" """
self.SUBSCRIPTIONS.append(subscription) self.SUBSCRIPTIONS.append(subscription)
def stop_workflow(self, workflow_id: int):
"""
停止工作流
"""
if workflow_id not in self.EMERGENCY_STOP_WORKFLOWS:
self.EMERGENCY_STOP_WORKFLOWS.append(workflow_id)
def workflow_resume(self, workflow_id: int):
"""
恢复工作流
"""
if workflow_id in self.EMERGENCY_STOP_WORKFLOWS:
self.EMERGENCY_STOP_WORKFLOWS.remove(workflow_id)
def is_workflow_stopped(self, workflow_id: int):
"""
是否停止工作流
"""
return self.is_system_stopped or workflow_id in self.EMERGENCY_STOP_WORKFLOWS
# 实例化配置 # 实例化配置
settings = Settings() settings = Settings()

View File

@@ -1,7 +1,7 @@
import re import re
from dataclasses import dataclass, field from dataclasses import dataclass, field
from datetime import datetime from datetime import datetime
from typing import List, Dict, Any, Tuple from typing import List, Dict, Any, Tuple, Optional
from app.core.config import settings from app.core.config import settings
from app.core.meta import MetaBase from app.core.meta import MetaBase
@@ -37,7 +37,7 @@ class TorrentInfo:
# 详情页面 # 详情页面
page_url: str = None page_url: str = None
# 种子大小 # 种子大小
size: float = 0 size: float = 0.0
# 做种者 # 做种者
seeders: int = 0 seeders: int = 0
# 下载者 # 下载者
@@ -193,7 +193,7 @@ class MediaInfo:
# LOGO # LOGO
logo_path: str = None logo_path: str = None
# 评分 # 评分
vote_average: float = 0 vote_average: float = 0.0
# 描述 # 描述
overview: str = None overview: str = None
# 风格ID # 风格ID
@@ -264,6 +264,10 @@ class MediaInfo:
next_episode_to_air: dict = field(default_factory=dict) next_episode_to_air: dict = field(default_factory=dict)
# 内容分级 # 内容分级
content_rating: str = None content_rating: str = None
# 全部剧集组
episode_groups: List[dict] = field(default_factory=list)
# 剧集组
episode_group: str = None
def __post_init__(self): def __post_init__(self):
# 设置媒体信息 # 设置媒体信息
@@ -454,6 +458,10 @@ class MediaInfo:
air_date = seainfo.get("air_date") air_date = seainfo.get("air_date")
if air_date: if air_date:
self.season_years[season] = air_date[:4] self.season_years[season] = air_date[:4]
# 剧集组
if info.get("episode_groups"):
self.episode_groups = info.pop("episode_groups").get("results") or []
# 海报 # 海报
if info.get('poster_path'): if info.get('poster_path'):
self.poster_path = f"https://{settings.TMDB_IMAGE_DOMAIN}/t/p/original{info.get('poster_path')}" self.poster_path = f"https://{settings.TMDB_IMAGE_DOMAIN}/t/p/original{info.get('poster_path')}"
@@ -714,7 +722,7 @@ class MediaInfo:
return self.backdrop_path.replace("original", "w500") return self.backdrop_path.replace("original", "w500")
return default or "" return default or ""
def get_message_image(self, default: bool = None): def get_message_image(self, default: Optional[bool] = None):
""" """
返回消息图片地址 返回消息图片地址
""" """
@@ -722,7 +730,7 @@ class MediaInfo:
return self.backdrop_path.replace("original", "w500") return self.backdrop_path.replace("original", "w500")
return self.get_poster_image(default=default) return self.get_poster_image(default=default)
def get_poster_image(self, default: bool = None): def get_poster_image(self, default: Optional[bool] = None):
""" """
返回海报图片地址 返回海报图片地址
""" """
@@ -730,7 +738,7 @@ class MediaInfo:
return self.poster_path.replace("original", "w500") return self.poster_path.replace("original", "w500")
return default or "" return default or ""
def get_overview_string(self, max_len: int = 140): def get_overview_string(self, max_len: Optional[int] = 140):
""" """
返回带限定长度的简介信息 返回带限定长度的简介信息
:param max_len: 内容长度 :param max_len: 内容长度
@@ -773,6 +781,7 @@ class MediaInfo:
self.spoken_languages = [] self.spoken_languages = []
self.networks = [] self.networks = []
self.next_episode_to_air = {} self.next_episode_to_air = {}
self.episode_groups = []
@dataclass @dataclass

View File

@@ -31,7 +31,7 @@ class Event:
def __init__(self, event_type: Union[EventType, ChainEventType], def __init__(self, event_type: Union[EventType, ChainEventType],
event_data: Optional[Union[Dict, ChainEventData]] = None, event_data: Optional[Union[Dict, ChainEventData]] = None,
priority: int = DEFAULT_EVENT_PRIORITY): priority: Optional[int] = DEFAULT_EVENT_PRIORITY):
""" """
:param event_type: 事件的类型,支持 EventType 或 ChainEventType :param event_type: 事件的类型,支持 EventType 或 ChainEventType
:param event_data: 可选,事件携带的数据,默认为空字典 :param event_data: 可选,事件携带的数据,默认为空字典
@@ -130,7 +130,7 @@ class EventManager(metaclass=Singleton):
) )
def send_event(self, etype: Union[EventType, ChainEventType], data: Optional[Union[Dict, ChainEventData]] = None, def send_event(self, etype: Union[EventType, ChainEventType], data: Optional[Union[Dict, ChainEventData]] = None,
priority: int = DEFAULT_EVENT_PRIORITY) -> Optional[Event]: priority: Optional[int] = DEFAULT_EVENT_PRIORITY) -> Optional[Event]:
""" """
发送事件,根据事件类型决定是广播事件还是链式事件 发送事件,根据事件类型决定是广播事件还是链式事件
:param etype: 事件类型 (EventType 或 ChainEventType) :param etype: 事件类型 (EventType 或 ChainEventType)
@@ -147,7 +147,7 @@ class EventManager(metaclass=Singleton):
logger.error(f"Unknown event type: {etype}") logger.error(f"Unknown event type: {etype}")
def add_event_listener(self, event_type: Union[EventType, ChainEventType], handler: Callable, def add_event_listener(self, event_type: Union[EventType, ChainEventType], handler: Callable,
priority: int = DEFAULT_EVENT_PRIORITY): priority: Optional[int] = DEFAULT_EVENT_PRIORITY):
""" """
注册事件处理器,将处理器添加到对应的事件订阅列表中 注册事件处理器,将处理器添加到对应的事件订阅列表中
:param event_type: 事件类型 (EventType 或 ChainEventType) :param event_type: 事件类型 (EventType 或 ChainEventType)
@@ -506,7 +506,7 @@ class EventManager(metaclass=Singleton):
) )
def register(self, etype: Union[EventType, ChainEventType, List[Union[EventType, ChainEventType]], type], def register(self, etype: Union[EventType, ChainEventType, List[Union[EventType, ChainEventType]], type],
priority: int = DEFAULT_EVENT_PRIORITY): priority: Optional[int] = DEFAULT_EVENT_PRIORITY):
""" """
事件注册装饰器,用于将函数注册为事件的处理器 事件注册装饰器,用于将函数注册为事件的处理器
:param etype: :param etype:

View File

@@ -172,7 +172,7 @@ class MetaVideo(MetaBase):
return None return None
@staticmethod @staticmethod
def __is_pinyin(name_str: str) -> bool: def __is_pinyin(name_str: Optional[str]) -> bool:
""" """
判断是否拼音 判断是否拼音
""" """
@@ -183,7 +183,7 @@ class MetaVideo(MetaBase):
return False return False
return True return True
def __fix_name(self, name: str): def __fix_name(self, name: Optional[str]):
""" """
去掉名字中不需要的干扰字符 去掉名字中不需要的干扰字符
""" """
@@ -207,7 +207,7 @@ class MetaVideo(MetaBase):
name = None name = None
return name return name
def __init_name(self, token: str): def __init_name(self, token: Optional[str]):
""" """
识别名称 识别名称
""" """

View File

@@ -15,32 +15,32 @@ class ReleaseGroupsMatcher(metaclass=Singleton):
"0ff": ['FF(?:(?:A|WE)B|CD|E(?:DU|B)|TV)'], "0ff": ['FF(?:(?:A|WE)B|CD|E(?:DU|B)|TV)'],
"1pt": [], "1pt": [],
"52pt": [], "52pt": [],
"audiences": ['Audies', 'AD(?:Audio|E(?:|book)|Music|Web)'], "audiences": ['Audies', 'AD(?:Audio|E(?:book|)|Music|Web)'],
"azusa": [], "azusa": [],
"beitai": ['BeiTai'], "beitai": ['BeiTai'],
"btschool": ['Bts(?:CHOOL|HD|PAD|TV)', 'Zone'], "btschool": ['Bts(?:CHOOL|HD|PAD|TV)', 'Zone'],
"carpt": ['CarPT'], "carpt": ['CarPT'],
"chdbits": ['CHD(?:|Bits|PAD|(?:|HK)TV|WEB)', 'StBOX', 'OneHD', 'Lee', 'xiaopie'], "chdbits": ['CHD(?:Bits|PAD|(?:|HK)TV|WEB|)', 'StBOX', 'OneHD', 'Lee', 'xiaopie'],
"discfan": [], "discfan": [],
"dragonhd": [], "dragonhd": [],
"eastgame": ['(?:(?:iNT|(?:HALFC|Mini(?:S|H|FH)D))-|)TLF'], "eastgame": ['(?:(?:iNT|(?:HALFC|Mini(?:S|H|FH)D))-|)TLF'],
"filelist": [], "filelist": [],
"gainbound": ['(?:DG|GBWE)B'], "gainbound": ['(?:DG|GBWE)B'],
"hares": ['Hares(?:|(?:M|T)V|Web)'], "hares": ['Hares(?:(?:M|T)V|Web|)'],
"hd4fans": [], "hd4fans": [],
"hdarea": ['HDA(?:pad|rea|TV)', 'EPiC'], "hdarea": ['HDA(?:pad|rea|TV)', 'EPiC'],
"hdatmos": [], "hdatmos": [],
"hdbd": [], "hdbd": [],
"hdchina": ['HDC(?:|hina|TV)', 'k9611', 'tudou', 'iHD'], "hdchina": ['HDC(?:hina|TV|)', 'k9611', 'tudou', 'iHD'],
"hddolby": ['D(?:ream|BTV)', '(?:HD|QHstudI)o'], "hddolby": ['D(?:ream|BTV)', '(?:HD|QHstudI)o'],
"hdfans": ['beAst(?:|TV)'], "hdfans": ['beAst(?:TV|)'],
"hdhome": ['HDH(?:|ome|Pad|TV|WEB)'], "hdhome": ['HDH(?:ome|Pad|TV|WEB|)'],
"hdpt": ['HDPT(?:|Web)'], "hdpt": ['HDPT(?:Web|)'],
"hdsky": ['HDS(?:|ky|TV|Pad|WEB)', 'AQLJ'], "hdsky": ['HDS(?:ky|TV|Pad|WEB|)', 'AQLJ'],
"hdtime": [], "hdtime": [],
"HDU": [], "HDU": [],
"hdvideo": [], "hdvideo": [],
"hdzone": ['HDZ(?:|one)'], "hdzone": ['HDZ(?:one|)'],
"hhanclub": ['HHWEB'], "hhanclub": ['HHWEB'],
"hitpt": [], "hitpt": [],
"htpt": ['HTPT'], "htpt": ['HTPT'],
@@ -48,34 +48,36 @@ class ReleaseGroupsMatcher(metaclass=Singleton):
"joyhd": [], "joyhd": [],
"keepfrds": ['FRDS', 'Yumi', 'cXcY'], "keepfrds": ['FRDS', 'Yumi', 'cXcY'],
"lemonhd": ['L(?:eague(?:(?:C|H)D|(?:M|T)V|NF|WEB)|HD)', 'i18n', 'CiNT'], "lemonhd": ['L(?:eague(?:(?:C|H)D|(?:M|T)V|NF|WEB)|HD)', 'i18n', 'CiNT'],
"mteam": ['MTeam(?:|TV)', 'MPAD'], "mteam": ['MTeam(?:TV|)', 'MPAD'],
"nanyangpt": [], "nanyangpt": [],
"nicept": [], "nicept": [],
"oshen": [], "oshen": [],
"ourbits": ['Our(?:Bits|TV)', 'FLTTH', 'Ao', 'PbK', 'MGs', 'iLove(?:HD|TV)'], "ourbits": ['Our(?:Bits|TV)', 'FLTTH', 'Ao', 'PbK', 'MGs', 'iLove(?:HD|TV)'],
"piggo": ['PiGo(?:NF|(?:H|WE)B)'], "piggo": ['PiGo(?:NF|(?:H|WE)B)'],
"ptchina": [], "ptchina": [],
"pterclub": ['PTer(?:|DIY|Game|(?:M|T)V|WEB)'], "pterclub": ['PTer(?:DIY|Game|(?:M|T)V|WEB|)'],
"pthome": ['PTH(?:|Audio|eBook|music|ome|tv|WEB)'], "pthome": ['PTH(?:Audio|eBook|music|ome|tv|WEB|)'],
"ptmsg": [], "ptmsg": [],
"ptsbao": ['PTsbao', 'OPS', 'F(?:Fans(?:AIeNcE|BD|D(?:VD|IY)|TV|WEB)|HDMv)', 'SGXT'], "ptsbao": ['PTsbao', 'OPS', 'F(?:Fans(?:AIeNcE|BD|D(?:VD|IY)|TV|WEB)|HDMv)', 'SGXT'],
"pttime": [], "pttime": [],
"putao": ['PuTao'], "putao": ['PuTao'],
"soulvoice": [], "soulvoice": [],
"springsunday": ['CMCT(?:|V)'], "springsunday": ['CMCT(?:V|)'],
"sharkpt": ['Shark(?:|WEB|DIY|TV|MV)'], "sharkpt": ['Shark(?:WEB|DIY|TV|MV|)'],
"tccf": [], "tccf": [],
"tjupt": ['TJUPT'], "tjupt": ['TJUPT'],
"totheglory": ['TTG', 'WiKi', 'NGB', 'DoA', '(?:ARi|ExRE)N'], "totheglory": ['TTG', 'WiKi', 'NGB', 'DoA', '(?:ARi|ExRE)N'],
"U2": [], "U2": [],
"ultrahd": [], "ultrahd": [],
"others": ['B(?:MDru|eyondHD|TN)', 'C(?:fandora|trlhd|MRG)', 'DON', 'EVO', 'FLUX', 'HONE(?:|yG)', "others": ['B(?:MDru|eyondHD|TN)', 'C(?:fandora|trlhd|MRG)', 'DON', 'EVO', 'FLUX', 'HONE(?:yG|)',
'N(?:oGroup|T(?:b|G))', 'PandaMoon', 'SMURF', 'T(?:EPES|aengoo|rollHD )', 'UBWEB'], 'N(?:oGroup|T(?:b|G))', 'PandaMoon', 'SMURF', 'T(?:EPES|aengoo|rollHD )',],
"anime": ['ANi', 'HYSUB', 'KTXP', 'LoliHouse', 'MCE', 'Nekomoe kissaten', 'SweetSub', 'MingY', "anime": ['ANi', 'HYSUB', 'KTXP', 'LoliHouse', 'MCE', 'Nekomoe kissaten', 'SweetSub', 'MingY',
'(?:Lilith|NC)-Raws', '织梦字幕组', '枫叶字幕组', '猎户手抄部', '喵萌奶茶屋', '漫猫字幕社', '(?:Lilith|NC)-Raws', '织梦字幕组', '枫叶字幕组', '猎户手抄部', '喵萌奶茶屋', '漫猫字幕社',
'霜庭云花Sub', '北宇治字幕组', '氢气烤肉架', '云歌字幕组', '萌樱字幕组', '极影字幕社', '霜庭云花Sub', '北宇治字幕组', '氢气烤肉架', '云歌字幕组', '萌樱字幕组', '极影字幕社',
'悠哈璃羽字幕社', '悠哈璃羽字幕社',
'❀拨雪寻春❀', '沸羊羊(?:制作|字幕组)', '(?:桜|樱)都字幕组'] '❀拨雪寻春❀', '沸羊羊(?:制作|字幕组)', '(?:桜|樱)都字幕组'],
"forge": ['FROG(?:E|Web|)'],
"ubits": ['UB(?:its|WEB|TV)'],
} }
def __init__(self): def __init__(self):
@@ -97,13 +99,15 @@ class ReleaseGroupsMatcher(metaclass=Singleton):
if not groups: if not groups:
# 自定义组 # 自定义组
custom_release_groups = self.systemconfig.get(SystemConfigKey.CustomReleaseGroups) custom_release_groups = self.systemconfig.get(SystemConfigKey.CustomReleaseGroups)
if isinstance(custom_release_groups, list):
custom_release_groups = list(filter(None, custom_release_groups))
if custom_release_groups: if custom_release_groups:
custom_release_groups_str = '|'.join(custom_release_groups) custom_release_groups_str = '|'.join(custom_release_groups)
groups = f"{self.__release_groups}|{custom_release_groups_str}" groups = f"{self.__release_groups}|{custom_release_groups_str}"
else: else:
groups = self.__release_groups groups = self.__release_groups
title = f"{title} " title = f"{title} "
groups_re = re.compile(r"(?<=[-@\[£【&])(?:%s)(?=[@.\s\]\[】&])" % groups, re.I) groups_re = re.compile(r"(?<=[-@\[£【&])(?:%s)(?=[@.\s\S\]\[】&])" % groups, re.I)
# 处理一个制作组识别多次的情况,保留顺序 # 处理一个制作组识别多次的情况,保留顺序
unique_groups = [] unique_groups = []
for item in re.findall(groups_re, title): for item in re.findall(groups_re, title):

View File

@@ -1,5 +1,5 @@
from pathlib import Path from pathlib import Path
from typing import Tuple, List from typing import Tuple, List, Optional
import regex as re import regex as re
@@ -10,7 +10,7 @@ from app.log import logger
from app.schemas.types import MediaType from app.schemas.types import MediaType
def MetaInfo(title: str, subtitle: str = None, custom_words: List[str] = None) -> MetaBase: def MetaInfo(title: str, subtitle: Optional[str] = None, custom_words: List[str] = None) -> MetaBase:
""" """
根据标题和副标题识别元数据 根据标题和副标题识别元数据
:param title: 标题、种子名、文件名 :param title: 标题、种子名、文件名
@@ -92,7 +92,8 @@ def is_anime(name: str) -> bool:
return True return True
if re.search(r'\s+-\s+[\dv]{1,4}\s+', name, re.IGNORECASE): if re.search(r'\s+-\s+[\dv]{1,4}\s+', name, re.IGNORECASE):
return True return True
if re.search(r"S\d{2}\s*-\s*S\d{2}|S\d{2}|\s+S\d{1,2}|EP?\d{2,4}\s*-\s*EP?\d{2,4}|EP?\d{2,4}|\s+EP?\d{1,4}", name, if re.search(r"S\d{2}\s*-\s*S\d{2}|S\d{2}|\s+S\d{1,2}|EP?\d{2,4}\s*-\s*EP?\d{2,4}|EP?\d{2,4}|\s+EP?\d{1,4}",
name,
re.IGNORECASE): re.IGNORECASE):
return False return False
if re.search(r'\[[+0-9XVPI-]+]\s*\[', name, re.IGNORECASE): if re.search(r'\[[+0-9XVPI-]+]\s*\[', name, re.IGNORECASE):
@@ -133,13 +134,10 @@ def find_metainfo(title: str) -> Tuple[str, dict]:
# 查找媒体类型 # 查找媒体类型
mtype = re.findall(r'(?<=type=)\w+', result) mtype = re.findall(r'(?<=type=)\w+', result)
if mtype: if mtype:
match mtype[0]: if mtype[0] == "movies":
case "movie": metainfo['type'] = MediaType.MOVIE
metainfo['type'] = MediaType.MOVIE elif mtype[0] == "tv":
case "tv": metainfo['type'] = MediaType.TV
metainfo['type'] = MediaType.TV
case _:
pass
# 查找季信息 # 查找季信息
begin_season = re.findall(r'(?<=s=)\d+', result) begin_season = re.findall(r'(?<=s=)\d+', result)
if begin_season and begin_season[0].isdigit(): if begin_season and begin_season[0].isdigit():

View File

@@ -121,7 +121,7 @@ class ModuleManager(metaclass=Singleton):
获取实现了同一方法的模块列表 获取实现了同一方法的模块列表
""" """
if not self._running_modules: if not self._running_modules:
return [] return
for _, module in self._running_modules.items(): for _, module in self._running_modules.items():
if hasattr(module, method) \ if hasattr(module, method) \
and ObjectUtils.check_method(getattr(module, method)): and ObjectUtils.check_method(getattr(module, method)):
@@ -132,7 +132,7 @@ class ModuleManager(metaclass=Singleton):
获取指定类型的模块列表 获取指定类型的模块列表
""" """
if not self._running_modules: if not self._running_modules:
return [] return
for _, module in self._running_modules.items(): for _, module in self._running_modules.items():
if hasattr(module, 'get_type') \ if hasattr(module, 'get_type') \
and module.get_type() == module_type: and module.get_type() == module_type:
@@ -143,7 +143,7 @@ class ModuleManager(metaclass=Singleton):
获取指定子类型的模块 获取指定子类型的模块
""" """
if not self._running_modules: if not self._running_modules:
return [] return
for _, module in self._running_modules.items(): for _, module in self._running_modules.items():
if hasattr(module, 'get_subtype') \ if hasattr(module, 'get_subtype') \
and module.get_subtype() == module_subtype: and module.get_subtype() == module_subtype:

View File

@@ -111,7 +111,7 @@ class PluginManager(metaclass=Singleton):
# 启动插件 # 启动插件
self.start() self.start()
def start(self, pid: str = None): def start(self, pid: Optional[str] = None):
""" """
启动加载插件 启动加载插件
:param pid: 插件ID为空加载所有插件 :param pid: 插件ID为空加载所有插件
@@ -194,7 +194,7 @@ class PluginManager(metaclass=Singleton):
# 禁用插件类的事件处理器 # 禁用插件类的事件处理器
eventmanager.disable_event_handler(type(plugin)) eventmanager.disable_event_handler(type(plugin))
def stop(self, pid: str = None): def stop(self, pid: Optional[str] = None):
""" """
停止插件服务 停止插件服务
:param pid: 插件ID为空停止所有插件 :param pid: 插件ID为空停止所有插件
@@ -431,7 +431,7 @@ class PluginManager(metaclass=Singleton):
return plugin.get_page() or [] return plugin.get_page() or []
return [] return []
def get_plugin_dashboard(self, pid: str, key: str = None, **kwargs) -> Optional[schemas.PluginDashboard]: def get_plugin_dashboard(self, pid: str, key: Optional[str] = None, **kwargs) -> Optional[schemas.PluginDashboard]:
""" """
获取插件仪表盘 获取插件仪表盘
:param pid: 插件ID :param pid: 插件ID
@@ -781,7 +781,7 @@ class PluginManager(metaclass=Singleton):
logger.debug(f"获取插件是否在本地包中存在失败,{e}") logger.debug(f"获取插件是否在本地包中存在失败,{e}")
return False return False
def get_plugins_from_market(self, market: str, package_version: str = None) -> Optional[List[schemas.Plugin]]: def get_plugins_from_market(self, market: str, package_version: Optional[str] = None) -> Optional[List[schemas.Plugin]]:
""" """
从指定的市场获取插件信息 从指定的市场获取插件信息
:param market: 市场的 URL 或标识 :param market: 市场的 URL 或标识
@@ -793,10 +793,9 @@ class PluginManager(metaclass=Singleton):
# 已安装插件 # 已安装插件
installed_apps = self.systemconfig.get(SystemConfigKey.UserInstalledPlugins) or [] installed_apps = self.systemconfig.get(SystemConfigKey.UserInstalledPlugins) or []
# 获取在线插件 # 获取在线插件
online_plugins = self.pluginhelper.get_plugins(market, package_version) or {} online_plugins = self.pluginhelper.get_plugins(market, package_version)
if not online_plugins: if online_plugins is None:
if not package_version: logger.warning(f"获取{package_version if package_version else ''}插件库失败:{market},请检查 GitHub 网络连接")
logger.warning(f"获取插件库失败:{market},请检查 GitHub 网络连接")
return [] return []
ret_plugins = [] ret_plugins = []
add_time = len(online_plugins) add_time = len(online_plugins)

View File

@@ -1,10 +1,11 @@
import base64 import base64
import datetime
import hashlib import hashlib
import hmac import hmac
import json import json
import os import os
import traceback import traceback
from datetime import datetime, timedelta from datetime import timedelta
from typing import Any, Union, Annotated, Optional from typing import Any, Union, Annotated, Optional
import jwt import jwt
@@ -43,9 +44,9 @@ api_key_query = APIKeyQuery(name="apikey", auto_error=False, scheme_name="api_ke
def create_access_token( def create_access_token(
userid: Union[str, Any], userid: Union[str, Any],
username: str, username: str,
super_user: bool = False, super_user: Optional[bool] = False,
expires_delta: Optional[timedelta] = None, expires_delta: Optional[timedelta] = None,
level: int = 1, level: Optional[int] = 1,
purpose: Optional[str] = "authentication" purpose: Optional[str] = "authentication"
) -> str: ) -> str:
""" """
@@ -69,13 +70,13 @@ def create_access_token(
if expires_delta is not None: if expires_delta is not None:
if expires_delta.total_seconds() <= 0: if expires_delta.total_seconds() <= 0:
raise ValueError("过期时间必须为正数") raise ValueError("过期时间必须为正数")
expire = datetime.utcnow() + expires_delta expire = datetime.datetime.now(datetime.UTC) + expires_delta
else: else:
expire = datetime.utcnow() + default_expire expire = datetime.datetime.now(datetime.UTC) + default_expire
to_encode = { to_encode = {
"exp": expire, "exp": expire,
"iat": datetime.utcnow(), "iat": datetime.datetime.now(datetime.UTC),
"sub": str(userid), "sub": str(userid),
"username": username, "username": username,
"super_user": super_user, "super_user": super_user,
@@ -102,7 +103,7 @@ def __set_or_refresh_resource_token_cookie(request: Request, response: Response,
decoded_token = jwt.decode(resource_token, settings.RESOURCE_SECRET_KEY, algorithms=[ALGORITHM]) decoded_token = jwt.decode(resource_token, settings.RESOURCE_SECRET_KEY, algorithms=[ALGORITHM])
exp = decoded_token.get("exp") exp = decoded_token.get("exp")
if exp: if exp:
remaining_time = datetime.utcfromtimestamp(exp) - datetime.utcnow() remaining_time = datetime.datetime.fromtimestamp(exp, tz=datetime.UTC) - datetime.datetime.now(datetime.UTC)
# 根据剩余时长提前刷新令牌 # 根据剩余时长提前刷新令牌
if remaining_time < timedelta(seconds=(settings.RESOURCE_ACCESS_TOKEN_EXPIRE_SECONDS / 3)): if remaining_time < timedelta(seconds=(settings.RESOURCE_ACCESS_TOKEN_EXPIRE_SECONDS / 3)):
raise jwt.ExpiredSignatureError raise jwt.ExpiredSignatureError
@@ -135,7 +136,7 @@ def __set_or_refresh_resource_token_cookie(request: Request, response: Response,
) )
def __verify_token(token: str, purpose: str = "authentication") -> schemas.TokenPayload: def __verify_token(token: str, purpose: Optional[str] = "authentication") -> schemas.TokenPayload:
""" """
使用 JWT Token 进行身份认证并解析 Token 的内容 使用 JWT Token 进行身份认证并解析 Token 的内容
:param token: JWT 令牌 :param token: JWT 令牌
@@ -175,7 +176,7 @@ def __verify_token(token: str, purpose: str = "authentication") -> schemas.Token
def verify_token( def verify_token(
request: Request, request: Request,
response: Response, response: Response,
token: str = Security(oauth2_scheme) token: Annotated[str, Security(oauth2_scheme)]
) -> schemas.TokenPayload: ) -> schemas.TokenPayload:
""" """
验证 JWT 令牌并自动处理 resource_token 写入 验证 JWT 令牌并自动处理 resource_token 写入
@@ -195,7 +196,7 @@ def verify_token(
def verify_resource_token( def verify_resource_token(
resource_token: str = Security(resource_token_cookie) resource_token: Annotated[str, Security(resource_token_cookie)]
) -> schemas.TokenPayload: ) -> schemas.TokenPayload:
""" """
验证资源访问令牌(从 Cookie 中获取) 验证资源访问令牌(从 Cookie 中获取)
@@ -248,7 +249,7 @@ def __verify_key(key: str, expected_key: str, key_type: str) -> str:
return key return key
def verify_apitoken(token: str = Security(__get_api_token)) -> str: def verify_apitoken(token: Annotated[str, Security(__get_api_token)]) -> str:
""" """
使用 API Token 进行身份认证 使用 API Token 进行身份认证
:param token: API Token从 URL 查询参数中获取 :param token: API Token从 URL 查询参数中获取
@@ -257,7 +258,7 @@ def verify_apitoken(token: str = Security(__get_api_token)) -> str:
return __verify_key(token, settings.API_TOKEN, "API_TOKEN") return __verify_key(token, settings.API_TOKEN, "API_TOKEN")
def verify_apikey(apikey: str = Security(__get_api_key)) -> str: def verify_apikey(apikey: Annotated[str, Security(__get_api_key)]) -> str:
""" """
使用 API Key 进行身份认证 使用 API Key 进行身份认证
:param apikey: API Key从 URL 查询参数或请求头中获取 :param apikey: API Key从 URL 查询参数或请求头中获取

112
app/core/workflow.py Normal file
View File

@@ -0,0 +1,112 @@
from time import sleep
from typing import Dict, Any, Tuple, List
from app.core.config import global_vars
from app.helper.module import ModuleHelper
from app.log import logger
from app.schemas import Action, ActionContext
from app.utils.singleton import Singleton
class WorkFlowManager(metaclass=Singleton):
"""
工作流管理器
"""
# 所有动作定义
_actions: Dict[str, Any] = {}
def __init__(self):
self.init()
def init(self):
"""
初始化
"""
def filter_func(obj: Any):
"""
过滤函数,确保只加载新定义的类
"""
if not isinstance(obj, type):
return False
if not hasattr(obj, 'execute') or not hasattr(obj, "name"):
return False
if obj.__name__ == "BaseAction":
return False
return obj.__module__.startswith("app.actions")
# 加载所有动作
self._actions = {}
actions = ModuleHelper.load(
"app.actions",
filter_func=lambda _, obj: filter_func(obj)
)
for action in actions:
logger.debug(f"加载动作: {action.__name__}")
try:
self._actions[action.__name__] = action
except Exception as err:
logger.error(f"加载动作失败: {action.__name__} - {err}")
def stop(self):
"""
停止
"""
pass
def excute(self, workflow_id: int, action: Action,
context: ActionContext = None) -> Tuple[bool, str, ActionContext]:
"""
执行工作流动作
"""
if not context:
context = ActionContext()
if action.type in self._actions:
# 实例化之前,清理掉类对象的数据
# 实例化
action_obj = self._actions[action.type](action.id)
# 执行
logger.info(f"执行动作: {action.id} - {action.name}")
try:
result_context = action_obj.execute(workflow_id, action.data, context)
except Exception as err:
logger.error(f"{action.name} 执行失败: {err}")
return False, f"{err}", context
loop = action.data.get("loop")
loop_interval = action.data.get("loop_interval")
if loop and loop_interval:
while not action_obj.done:
if global_vars.is_workflow_stopped(workflow_id):
break
# 等待
logger.info(f"{action.name} 等待 {loop_interval} 秒后继续执行 ...")
sleep(loop_interval)
# 执行
logger.info(f"继续执行动作: {action.id} - {action.name}")
result_context = action_obj.execute(workflow_id, action.data, result_context)
if action_obj.success:
logger.info(f"{action.name} 执行成功")
else:
logger.error(f"{action.name} 执行失败!")
return action_obj.success, action_obj.message, result_context
else:
logger.error(f"未找到动作: {action.type} - {action.name}")
return False, " ", context
def list_actions(self) -> List[dict]:
"""
获取所有动作
"""
return [
{
"type": key,
"name": action.name,
"description": action.description,
"data": {
"label": action.name,
**action.data
}
} for key, action in self._actions.items()
]

View File

@@ -1,4 +1,4 @@
from typing import List from typing import List, Optional
from app.db import DbOper from app.db import DbOper
from app.db.models.downloadhistory import DownloadHistory, DownloadFiles from app.db.models.downloadhistory import DownloadHistory, DownloadFiles
@@ -51,7 +51,7 @@ class DownloadHistoryOper(DbOper):
""" """
DownloadFiles.truncate(self._db) DownloadFiles.truncate(self._db)
def get_files_by_hash(self, download_hash: str, state: int = None) -> List[DownloadFiles]: def get_files_by_hash(self, download_hash: str, state: Optional[int] = None) -> List[DownloadFiles]:
""" """
按Hash查询下载文件记录 按Hash查询下载文件记录
:param download_hash: 数据key :param download_hash: 数据key
@@ -97,7 +97,7 @@ class DownloadHistoryOper(DbOper):
return fileinfo.download_hash return fileinfo.download_hash
return "" return ""
def list_by_page(self, page: int = 1, count: int = 30) -> List[DownloadHistory]: def list_by_page(self, page: Optional[int] = 1, count: Optional[int] = 30) -> List[DownloadHistory]:
""" """
分页查询下载历史 分页查询下载历史
""" """
@@ -109,8 +109,8 @@ class DownloadHistoryOper(DbOper):
""" """
DownloadHistory.truncate(self._db) DownloadHistory.truncate(self._db)
def get_last_by(self, mtype=None, title: str = None, year: str = None, def get_last_by(self, mtype=None, title: Optional[str] = None, year: Optional[str] = None,
season: str = None, episode: str = None, tmdbid=None) -> List[DownloadHistory]: season: Optional[str] = None, episode: Optional[str] = None, tmdbid=None) -> List[DownloadHistory]:
""" """
按类型、标题、年份、季集查询下载记录 按类型、标题、年份、季集查询下载记录
""" """
@@ -122,7 +122,7 @@ class DownloadHistoryOper(DbOper):
episode=episode, episode=episode,
tmdbid=tmdbid) tmdbid=tmdbid)
def list_by_user_date(self, date: str, username: str = None) -> List[DownloadHistory]: def list_by_user_date(self, date: str, username: Optional[str] = None) -> List[DownloadHistory]:
""" """
查询某用户某时间之前的下载历史 查询某用户某时间之前的下载历史
""" """
@@ -130,7 +130,7 @@ class DownloadHistoryOper(DbOper):
date=date, date=date,
username=username) username=username)
def list_by_date(self, date: str, type: str, tmdbid: str, seasons: str = None) -> List[DownloadHistory]: def list_by_date(self, date: str, type: str, tmdbid: str, seasons: Optional[str] = None) -> List[DownloadHistory]:
""" """
查询某时间之后的下载历史 查询某时间之后的下载历史
""" """
@@ -140,7 +140,7 @@ class DownloadHistoryOper(DbOper):
tmdbid=tmdbid, tmdbid=tmdbid,
seasons=seasons) seasons=seasons)
def list_by_type(self, mtype: str, days: int = 7) -> List[DownloadHistory]: def list_by_type(self, mtype: str, days: Optional[int] = 7) -> List[DownloadHistory]:
""" """
获取指定类型的下载历史 获取指定类型的下载历史
""" """

View File

@@ -18,14 +18,14 @@ class MessageOper(DbOper):
def add(self, def add(self,
channel: MessageChannel = None, channel: MessageChannel = None,
source: str = None, source: Optional[str] = None,
mtype: NotificationType = None, mtype: NotificationType = None,
title: str = None, title: Optional[str] = None,
text: str = None, text: Optional[str] = None,
image: str = None, image: Optional[str] = None,
link: str = None, link: Optional[str] = None,
userid: str = None, userid: Optional[str] = None,
action: int = 1, action: Optional[int] = 1,
note: Union[list, dict] = None, note: Union[list, dict] = None,
**kwargs): **kwargs):
""" """
@@ -62,7 +62,7 @@ class MessageOper(DbOper):
Message(**kwargs).create(self._db) Message(**kwargs).create(self._db)
def list_by_page(self, page: int = 1, count: int = 30) -> Optional[str]: def list_by_page(self, page: Optional[int] = 1, count: Optional[int] = 30) -> Optional[str]:
""" """
获取媒体服务器数据ID 获取媒体服务器数据ID
""" """

View File

@@ -8,3 +8,4 @@ from .systemconfig import SystemConfig
from .transferhistory import TransferHistory from .transferhistory import TransferHistory
from .user import User from .user import User
from .userconfig import UserConfig from .userconfig import UserConfig
from .workflow import Workflow

View File

@@ -1,4 +1,5 @@
import time import time
from typing import Optional
from sqlalchemy import Column, Integer, String, Sequence, JSON from sqlalchemy import Column, Integer, String, Sequence, JSON
from sqlalchemy.orm import Session from sqlalchemy.orm import Session
@@ -51,6 +52,8 @@ class DownloadHistory(Base):
note = Column(JSON) note = Column(JSON)
# 自定义媒体类别 # 自定义媒体类别
media_category = Column(String) media_category = Column(String)
# 剧集组
episode_group = Column(String)
@staticmethod @staticmethod
@db_query @db_query
@@ -67,7 +70,7 @@ class DownloadHistory(Base):
@staticmethod @staticmethod
@db_query @db_query
def list_by_page(db: Session, page: int = 1, count: int = 30): def list_by_page(db: Session, page: Optional[int] = 1, count: Optional[int] = 30):
result = db.query(DownloadHistory).offset((page - 1) * count).limit(count).all() result = db.query(DownloadHistory).offset((page - 1) * count).limit(count).all()
return list(result) return list(result)
@@ -78,8 +81,9 @@ class DownloadHistory(Base):
@staticmethod @staticmethod
@db_query @db_query
def get_last_by(db: Session, mtype: str = None, title: str = None, year: int = None, season: str = None, def get_last_by(db: Session, mtype: Optional[str] = None, title: Optional[str] = None,
episode: str = None, tmdbid: int = None): year: Optional[str] = None, season: Optional[str] = None,
episode: Optional[str] = None, tmdbid: Optional[int] = None):
""" """
据tmdbid、season、season_episode查询转移记录 据tmdbid、season、season_episode查询转移记录
""" """
@@ -123,7 +127,7 @@ class DownloadHistory(Base):
@staticmethod @staticmethod
@db_query @db_query
def list_by_user_date(db: Session, date: str, username: str = None): def list_by_user_date(db: Session, date: str, username: Optional[str] = None):
""" """
查询某用户某时间之后的下载历史 查询某用户某时间之后的下载历史
""" """
@@ -138,7 +142,7 @@ class DownloadHistory(Base):
@staticmethod @staticmethod
@db_query @db_query
def list_by_date(db: Session, date: str, type: str, tmdbid: str, seasons: str = None): def list_by_date(db: Session, date: str, type: str, tmdbid: str, seasons: Optional[str] = None):
""" """
查询某时间之后的下载历史 查询某时间之后的下载历史
""" """
@@ -187,7 +191,7 @@ class DownloadFiles(Base):
@staticmethod @staticmethod
@db_query @db_query
def get_by_hash(db: Session, download_hash: str, state: int = None): def get_by_hash(db: Session, download_hash: str, state: Optional[int] = None):
if state: if state:
result = db.query(DownloadFiles).filter(DownloadFiles.download_hash == download_hash, result = db.query(DownloadFiles).filter(DownloadFiles.download_hash == download_hash,
DownloadFiles.state == state).all() DownloadFiles.state == state).all()

View File

@@ -1,3 +1,5 @@
from typing import Optional
from sqlalchemy import Column, Integer, String, Sequence, JSON from sqlalchemy import Column, Integer, String, Sequence, JSON
from sqlalchemy.orm import Session from sqlalchemy.orm import Session
@@ -34,7 +36,7 @@ class Message(Base):
@staticmethod @staticmethod
@db_query @db_query
def list_by_page(db: Session, page: int = 1, count: int = 30): def list_by_page(db: Session, page: Optional[int] = 1, count: Optional[int] = 30):
result = db.query(Message).order_by(Message.reg_time.desc()).offset((page - 1) * count).limit( result = db.query(Message).order_by(Message.reg_time.desc()).offset((page - 1) * count).limit(
count).all() count).all()
result.sort(key=lambda x: x.reg_time, reverse=False) result.sort(key=lambda x: x.reg_time, reverse=False)

View File

@@ -1,4 +1,5 @@
from datetime import datetime from datetime import datetime
from typing import Optional
from sqlalchemy import Column, Integer, String, Sequence, Float, JSON, func, or_ from sqlalchemy import Column, Integer, String, Sequence, Float, JSON, func, or_
from sqlalchemy.orm import Session from sqlalchemy.orm import Session
@@ -54,7 +55,7 @@ class SiteUserData(Base):
@staticmethod @staticmethod
@db_query @db_query
def get_by_domain(db: Session, domain: str, workdate: str = None, worktime: str = None): def get_by_domain(db: Session, domain: str, workdate: Optional[str] = None, worktime: Optional[str] = None):
if workdate and worktime: if workdate and worktime:
return db.query(SiteUserData).filter(SiteUserData.domain == domain, return db.query(SiteUserData).filter(SiteUserData.domain == domain,
SiteUserData.updated_day == workdate, SiteUserData.updated_day == workdate,

View File

@@ -1,4 +1,5 @@
import time import time
from typing import Optional
from sqlalchemy import Column, Integer, String, Sequence, Float, JSON from sqlalchemy import Column, Integer, String, Sequence, Float, JSON
from sqlalchemy.orm import Session from sqlalchemy.orm import Session
@@ -83,10 +84,12 @@ class Subscribe(Base):
media_category = Column(String) media_category = Column(String)
# 过滤规则组 # 过滤规则组
filter_groups = Column(JSON, default=list) filter_groups = Column(JSON, default=list)
# 选择的剧集组
episode_group = Column(String)
@staticmethod @staticmethod
@db_query @db_query
def exists(db: Session, tmdbid: int = None, doubanid: str = None, season: int = None): def exists(db: Session, tmdbid: Optional[int] = None, doubanid: Optional[str] = None, season: Optional[int] = None):
if tmdbid: if tmdbid:
if season: if season:
return db.query(Subscribe).filter(Subscribe.tmdbid == tmdbid, return db.query(Subscribe).filter(Subscribe.tmdbid == tmdbid,
@@ -110,7 +113,7 @@ class Subscribe(Base):
@staticmethod @staticmethod
@db_query @db_query
def get_by_title(db: Session, title: str, season: int = None): def get_by_title(db: Session, title: str, season: Optional[int] = None):
if season: if season:
return db.query(Subscribe).filter(Subscribe.name == title, return db.query(Subscribe).filter(Subscribe.name == title,
Subscribe.season == season).first() Subscribe.season == season).first()
@@ -118,7 +121,7 @@ class Subscribe(Base):
@staticmethod @staticmethod
@db_query @db_query
def get_by_tmdbid(db: Session, tmdbid: int, season: int = None): def get_by_tmdbid(db: Session, tmdbid: int, season: Optional[int] = None):
if season: if season:
result = db.query(Subscribe).filter(Subscribe.tmdbid == tmdbid, result = db.query(Subscribe).filter(Subscribe.tmdbid == tmdbid,
Subscribe.season == season).all() Subscribe.season == season).all()
@@ -164,7 +167,7 @@ class Subscribe(Base):
@staticmethod @staticmethod
@db_query @db_query
def list_by_username(db: Session, username: str, state: str = None, mtype: str = None): def list_by_username(db: Session, username: str, state: Optional[str] = None, mtype: Optional[str] = None):
if mtype: if mtype:
if state: if state:
result = db.query(Subscribe).filter(Subscribe.state == state, result = db.query(Subscribe).filter(Subscribe.state == state,

View File

@@ -1,3 +1,5 @@
from typing import Optional
from sqlalchemy import Column, Integer, String, Sequence, Float, JSON from sqlalchemy import Column, Integer, String, Sequence, Float, JSON
from sqlalchemy.orm import Session from sqlalchemy.orm import Session
@@ -67,10 +69,12 @@ class SubscribeHistory(Base):
media_category = Column(String) media_category = Column(String)
# 过滤规则组 # 过滤规则组
filter_groups = Column(JSON, default=list) filter_groups = Column(JSON, default=list)
# 剧集组
episode_group = Column(String)
@staticmethod @staticmethod
@db_query @db_query
def list_by_type(db: Session, mtype: str, page: int = 1, count: int = 30): def list_by_type(db: Session, mtype: str, page: Optional[int] = 1, count: Optional[int] = 30):
result = db.query(SubscribeHistory).filter( result = db.query(SubscribeHistory).filter(
SubscribeHistory.type == mtype SubscribeHistory.type == mtype
).order_by( ).order_by(
@@ -80,7 +84,7 @@ class SubscribeHistory(Base):
@staticmethod @staticmethod
@db_query @db_query
def exists(db: Session, tmdbid: int = None, doubanid: str = None, season: int = None): def exists(db: Session, tmdbid: Optional[int] = None, doubanid: Optional[str] = None, season: Optional[int] = None):
if tmdbid: if tmdbid:
if season: if season:
return db.query(SubscribeHistory).filter(SubscribeHistory.tmdbid == tmdbid, return db.query(SubscribeHistory).filter(SubscribeHistory.tmdbid == tmdbid,

View File

@@ -1,4 +1,5 @@
import time import time
from typing import Optional
from sqlalchemy import Column, Integer, String, Sequence, Boolean, func, or_, JSON from sqlalchemy import Column, Integer, String, Sequence, Boolean, func, or_, JSON
from sqlalchemy.orm import Session from sqlalchemy.orm import Session
@@ -55,10 +56,12 @@ class TransferHistory(Base):
date = Column(String, index=True) date = Column(String, index=True)
# 文件清单以JSON存储 # 文件清单以JSON存储
files = Column(JSON, default=list) files = Column(JSON, default=list)
# 剧集组
episode_group = Column(String)
@staticmethod @staticmethod
@db_query @db_query
def list_by_title(db: Session, title: str, page: int = 1, count: int = 30, status: bool = None): def list_by_title(db: Session, title: str, page: Optional[int] = 1, count: Optional[int] = 30, status: bool = None):
if status is not None: if status is not None:
result = db.query(TransferHistory).filter( result = db.query(TransferHistory).filter(
TransferHistory.status == status TransferHistory.status == status
@@ -77,7 +80,7 @@ class TransferHistory(Base):
@staticmethod @staticmethod
@db_query @db_query
def list_by_page(db: Session, page: int = 1, count: int = 30, status: bool = None): def list_by_page(db: Session, page: Optional[int] = 1, count: Optional[int] = 30, status: bool = None):
if status is not None: if status is not None:
result = db.query(TransferHistory).filter( result = db.query(TransferHistory).filter(
TransferHistory.status == status TransferHistory.status == status
@@ -97,7 +100,7 @@ class TransferHistory(Base):
@staticmethod @staticmethod
@db_query @db_query
def get_by_src(db: Session, src: str, storage: str = None): def get_by_src(db: Session, src: str, storage: Optional[str] = None):
if storage: if storage:
return db.query(TransferHistory).filter(TransferHistory.src == src, return db.query(TransferHistory).filter(TransferHistory.src == src,
TransferHistory.src_storage == storage).first() TransferHistory.src_storage == storage).first()
@@ -117,7 +120,7 @@ class TransferHistory(Base):
@staticmethod @staticmethod
@db_query @db_query
def statistic(db: Session, days: int = 7): def statistic(db: Session, days: Optional[int] = 7):
""" """
统计最近days天的下载历史数量按日期分组返回每日数量 统计最近days天的下载历史数量按日期分组返回每日数量
""" """
@@ -150,8 +153,8 @@ class TransferHistory(Base):
@staticmethod @staticmethod
@db_query @db_query
def list_by(db: Session, mtype: str = None, title: str = None, year: str = None, season: str = None, def list_by(db: Session, mtype: Optional[str] = None, title: Optional[str] = None, year: Optional[str] = None, season: Optional[str] = None,
episode: str = None, tmdbid: int = None, dest: str = None): episode: Optional[str] = None, tmdbid: Optional[int] = None, dest: Optional[str] = None):
""" """
据tmdbid、season、season_episode查询转移记录 据tmdbid、season、season_episode查询转移记录
tmdbid + mtype 或 title + year 必输 tmdbid + mtype 或 title + year 必输
@@ -218,7 +221,7 @@ class TransferHistory(Base):
@staticmethod @staticmethod
@db_query @db_query
def get_by_type_tmdbid(db: Session, mtype: str = None, tmdbid: int = None): def get_by_type_tmdbid(db: Session, mtype: Optional[str] = None, tmdbid: Optional[int] = None):
""" """
据tmdbid、type查询转移记录 据tmdbid、type查询转移记录
""" """
@@ -227,7 +230,7 @@ class TransferHistory(Base):
@staticmethod @staticmethod
@db_update @db_update
def update_download_hash(db: Session, historyid: int = None, download_hash: str = None): def update_download_hash(db: Session, historyid: Optional[int] = None, download_hash: Optional[str] = None):
db.query(TransferHistory).filter(TransferHistory.id == historyid).update( db.query(TransferHistory).filter(TransferHistory.id == historyid).update(
{ {
"download_hash": download_hash "download_hash": download_hash

103
app/db/models/workflow.py Normal file
View File

@@ -0,0 +1,103 @@
from datetime import datetime
from typing import Optional
from sqlalchemy import Column, Integer, JSON, Sequence, String, and_
from app.db import Base, db_query, db_update
class Workflow(Base):
"""
工作流表
"""
# ID
id = Column(Integer, Sequence('id'), primary_key=True, index=True)
# 名称
name = Column(String, index=True, nullable=False)
# 描述
description = Column(String)
# 定时器
timer = Column(String)
# 状态W-等待 R-运行中 P-暂停 S-成功 F-失败
state = Column(String, nullable=False, index=True, default='W')
# 已执行动作(,分隔)
current_action = Column(String)
# 任务执行结果
result = Column(String)
# 已执行次数
run_count = Column(Integer, default=0)
# 任务列表
actions = Column(JSON, default=list)
# 任务流
flows = Column(JSON, default=list)
# 执行上下文
context = Column(JSON, default=dict)
# 创建时间
add_time = Column(String, default=datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
# 最后执行时间
last_time = Column(String)
@staticmethod
@db_query
def get_enabled_workflows(db):
return db.query(Workflow).filter(Workflow.state != 'P').all()
@staticmethod
@db_query
def get_by_name(db, name: str):
return db.query(Workflow).filter(Workflow.name == name).first()
@staticmethod
@db_update
def update_state(db, wid: int, state: str):
db.query(Workflow).filter(Workflow.id == wid).update({"state": state})
return True
@staticmethod
@db_update
def start(db, wid: int):
db.query(Workflow).filter(Workflow.id == wid).update({
"state": 'R'
})
return True
@staticmethod
@db_update
def fail(db, wid: int, result: str):
db.query(Workflow).filter(and_(Workflow.id == wid, Workflow.state != "P")).update({
"state": 'F',
"result": result,
"last_time": datetime.now().strftime('%Y-%m-%d %H:%M:%S')
})
return True
@staticmethod
@db_update
def success(db, wid: int, result: Optional[str] = None):
db.query(Workflow).filter(and_(Workflow.id == wid, Workflow.state != "P")).update({
"state": 'S',
"result": result,
"run_count": Workflow.run_count + 1,
"last_time": datetime.now().strftime('%Y-%m-%d %H:%M:%S')
})
return True
@staticmethod
@db_update
def reset(db, wid: int, reset_count: Optional[bool] = False):
db.query(Workflow).filter(Workflow.id == wid).update({
"state": 'W',
"result": None,
"current_action": None,
"run_count": 0 if reset_count else Workflow.run_count,
})
return True
@staticmethod
@db_update
def update_current_action(db, wid: int, action_id: str, context: dict):
db.query(Workflow).filter(Workflow.id == wid).update({
"current_action": Workflow.current_action + f",{action_id}" if Workflow.current_action else action_id,
"context": context
})
return True

View File

@@ -1,4 +1,4 @@
from typing import Any from typing import Any, Optional
from app.db import DbOper from app.db import DbOper
from app.db.models.plugindata import PluginData from app.db.models.plugindata import PluginData
@@ -24,7 +24,7 @@ class PluginDataOper(DbOper):
else: else:
PluginData(plugin_id=plugin_id, key=key, value=value).create(self._db) PluginData(plugin_id=plugin_id, key=key, value=value).create(self._db)
def get_data(self, plugin_id: str, key: str = None) -> Any: def get_data(self, plugin_id: str, key: Optional[str] = None) -> Any:
""" """
获取插件数据 获取插件数据
:param plugin_id: 插件id :param plugin_id: 插件id
@@ -38,7 +38,7 @@ class PluginDataOper(DbOper):
else: else:
return PluginData.get_plugin_data(self._db, plugin_id) return PluginData.get_plugin_data(self._db, plugin_id)
def del_data(self, plugin_id: str, key: str = None) -> Any: def del_data(self, plugin_id: str, key: Optional[str] = None) -> Any:
""" """
删除插件数据 删除插件数据
:param plugin_id: 插件id :param plugin_id: 插件id

View File

@@ -1,5 +1,5 @@
from datetime import datetime from datetime import datetime
from typing import List, Tuple from typing import List, Tuple, Optional
from app.db import DbOper from app.db import DbOper
from app.db.models import SiteIcon from app.db.models import SiteIcon
@@ -121,7 +121,8 @@ class SiteOper(DbOper):
siteuserdatas = SiteUserData.get_by_domain(self._db, domain=domain, workdate=current_day) siteuserdatas = SiteUserData.get_by_domain(self._db, domain=domain, workdate=current_day)
if siteuserdatas: if siteuserdatas:
# 存在则更新 # 存在则更新
siteuserdatas[0].update(self._db, payload) if not payload.get("err_msg"):
siteuserdatas[0].update(self._db, payload)
else: else:
# 不存在则插入 # 不存在则插入
SiteUserData(**payload).create(self._db) SiteUserData(**payload).create(self._db)
@@ -133,7 +134,7 @@ class SiteOper(DbOper):
""" """
return SiteUserData.list(self._db) return SiteUserData.list(self._db)
def get_userdata_by_domain(self, domain: str, workdate: str = None) -> List[SiteUserData]: def get_userdata_by_domain(self, domain: str, workdate: Optional[str] = None) -> List[SiteUserData]:
""" """
获取站点用户数据 获取站点用户数据
""" """
@@ -172,7 +173,7 @@ class SiteOper(DbOper):
}) })
return True return True
def success(self, domain: str, seconds: int = None): def success(self, domain: str, seconds: Optional[int] = None):
""" """
站点访问成功 站点访问成功
""" """

View File

@@ -1,5 +1,5 @@
import time import time
from typing import Tuple, List from typing import Tuple, List, Optional
from app.core.context import MediaInfo from app.core.context import MediaInfo
from app.db import DbOper from app.db import DbOper
@@ -20,21 +20,24 @@ class SubscribeOper(DbOper):
tmdbid=mediainfo.tmdb_id, tmdbid=mediainfo.tmdb_id,
doubanid=mediainfo.douban_id, doubanid=mediainfo.douban_id,
season=kwargs.get('season')) season=kwargs.get('season'))
kwargs.update({
"name": mediainfo.title,
"year": mediainfo.year,
"type": mediainfo.type.value,
"tmdbid": mediainfo.tmdb_id,
"imdbid": mediainfo.imdb_id,
"tvdbid": mediainfo.tvdb_id,
"doubanid": mediainfo.douban_id,
"bangumiid": mediainfo.bangumi_id,
"episode_group": mediainfo.episode_group,
"poster": mediainfo.get_poster_image(),
"backdrop": mediainfo.get_backdrop_image(),
"vote": mediainfo.vote_average,
"description": mediainfo.overview,
"date": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
})
if not subscribe: if not subscribe:
subscribe = Subscribe(name=mediainfo.title, subscribe = Subscribe(**kwargs)
year=mediainfo.year,
type=mediainfo.type.value,
tmdbid=mediainfo.tmdb_id,
imdbid=mediainfo.imdb_id,
tvdbid=mediainfo.tvdb_id,
doubanid=mediainfo.douban_id,
bangumiid=mediainfo.bangumi_id,
poster=mediainfo.get_poster_image(),
backdrop=mediainfo.get_backdrop_image(),
vote=mediainfo.vote_average,
description=mediainfo.overview,
date=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
**kwargs)
subscribe.create(self._db) subscribe.create(self._db)
# 查询订阅 # 查询订阅
subscribe = Subscribe.exists(self._db, subscribe = Subscribe.exists(self._db,
@@ -45,7 +48,7 @@ class SubscribeOper(DbOper):
else: else:
return subscribe.id, "订阅已存在" return subscribe.id, "订阅已存在"
def exists(self, tmdbid: int = None, doubanid: str = None, season: int = None) -> bool: def exists(self, tmdbid: Optional[int] = None, doubanid: Optional[str] = None, season: Optional[int] = None) -> bool:
""" """
判断是否存在 判断是否存在
""" """
@@ -64,7 +67,7 @@ class SubscribeOper(DbOper):
""" """
return Subscribe.get(self._db, rid=sid) return Subscribe.get(self._db, rid=sid)
def list(self, state: str = None) -> List[Subscribe]: def list(self, state: Optional[str] = None) -> List[Subscribe]:
""" """
获取订阅列表 获取订阅列表
""" """
@@ -87,19 +90,19 @@ class SubscribeOper(DbOper):
subscribe.update(self._db, payload) subscribe.update(self._db, payload)
return subscribe return subscribe
def list_by_tmdbid(self, tmdbid: int, season: int = None) -> List[Subscribe]: def list_by_tmdbid(self, tmdbid: int, season: Optional[int] = None) -> List[Subscribe]:
""" """
获取指定tmdb_id的订阅 获取指定tmdb_id的订阅
""" """
return Subscribe.get_by_tmdbid(self._db, tmdbid=tmdbid, season=season) return Subscribe.get_by_tmdbid(self._db, tmdbid=tmdbid, season=season)
def list_by_username(self, username: str, state: str = None, mtype: str = None) -> List[Subscribe]: def list_by_username(self, username: str, state: Optional[str] = None, mtype: Optional[str] = None) -> List[Subscribe]:
""" """
获取指定用户的订阅 获取指定用户的订阅
""" """
return Subscribe.list_by_username(self._db, username=username, state=state, mtype=mtype) return Subscribe.list_by_username(self._db, username=username, state=state, mtype=mtype)
def list_by_type(self, mtype: str, days: int = 7) -> Subscribe: def list_by_type(self, mtype: str, days: Optional[int] = 7) -> Subscribe:
""" """
获取指定类型的订阅 获取指定类型的订阅
""" """
@@ -119,7 +122,7 @@ class SubscribeOper(DbOper):
subscribe = SubscribeHistory(**kwargs) subscribe = SubscribeHistory(**kwargs)
subscribe.create(self._db) subscribe.create(self._db)
def exist_history(self, tmdbid: int = None, doubanid: str = None, season: int = None): def exist_history(self, tmdbid: Optional[int] = None, doubanid: Optional[str] = None, season: Optional[int] = None):
""" """
判断是否存在订阅历史 判断是否存在订阅历史
""" """

View File

@@ -1,5 +1,5 @@
import time import time
from typing import Any, List from typing import Any, List, Optional
from app.core.context import MediaInfo from app.core.context import MediaInfo
from app.core.meta import MetaBase from app.core.meta import MetaBase
@@ -27,7 +27,7 @@ class TransferHistoryOper(DbOper):
""" """
return TransferHistory.list_by_title(self._db, title) return TransferHistory.list_by_title(self._db, title)
def get_by_src(self, src: str, storage: str = None) -> TransferHistory: def get_by_src(self, src: str, storage: Optional[str] = None) -> TransferHistory:
""" """
按源查询转移记录 按源查询转移记录
:param src: 数据key :param src: 数据key
@@ -58,14 +58,15 @@ class TransferHistoryOper(DbOper):
}) })
TransferHistory(**kwargs).create(self._db) TransferHistory(**kwargs).create(self._db)
def statistic(self, days: int = 7) -> List[Any]: def statistic(self, days: Optional[int] = 7) -> List[Any]:
""" """
统计最近days天的下载历史数量 统计最近days天的下载历史数量
""" """
return TransferHistory.statistic(self._db, days) return TransferHistory.statistic(self._db, days)
def get_by(self, title: str = None, year: str = None, mtype: str = None, def get_by(self, title: Optional[str] = None, year: Optional[str] = None, mtype: Optional[str] = None,
season: str = None, episode: str = None, tmdbid: int = None, dest: str = None) -> List[TransferHistory]: season: Optional[str] = None, episode: Optional[str] = None, tmdbid: Optional[int] = None,
dest: Optional[str] = None) -> List[TransferHistory]:
""" """
按类型、标题、年份、季集查询转移记录 按类型、标题、年份、季集查询转移记录
""" """
@@ -78,7 +79,7 @@ class TransferHistoryOper(DbOper):
episode=episode, episode=episode,
tmdbid=tmdbid) tmdbid=tmdbid)
def get_by_type_tmdbid(self, mtype: str = None, tmdbid: int = None) -> TransferHistory: def get_by_type_tmdbid(self, mtype: Optional[str] = None, tmdbid: Optional[int] = None) -> TransferHistory:
""" """
按类型、tmdb查询转移记录 按类型、tmdb查询转移记录
""" """
@@ -120,7 +121,7 @@ class TransferHistoryOper(DbOper):
def add_success(self, fileitem: FileItem, mode: str, meta: MetaBase, def add_success(self, fileitem: FileItem, mode: str, meta: MetaBase,
mediainfo: MediaInfo, transferinfo: TransferInfo, mediainfo: MediaInfo, transferinfo: TransferInfo,
downloader: str = None, download_hash: str = None): downloader: Optional[str] = None, download_hash: Optional[str] = None):
""" """
新增转移成功历史记录 新增转移成功历史记录
""" """
@@ -150,7 +151,7 @@ class TransferHistoryOper(DbOper):
) )
def add_fail(self, fileitem: FileItem, mode: str, meta: MetaBase, mediainfo: MediaInfo = None, def add_fail(self, fileitem: FileItem, mode: str, meta: MetaBase, mediainfo: MediaInfo = None,
transferinfo: TransferInfo = None, downloader: str = None, download_hash: str = None): transferinfo: TransferInfo = None, downloader: Optional[str] = None, download_hash: Optional[str] = None):
""" """
新增转移失败历史记录 新增转移失败历史记录
""" """
@@ -176,6 +177,7 @@ class TransferHistoryOper(DbOper):
image=mediainfo.get_poster_image(), image=mediainfo.get_poster_image(),
downloader=downloader, downloader=downloader,
download_hash=download_hash, download_hash=download_hash,
episode_group=mediainfo.episode_group,
status=0, status=0,
errmsg=transferinfo.message or '未知错误', errmsg=transferinfo.message or '未知错误',
files=transferinfo.file_list files=transferinfo.file_list

68
app/db/workflow_oper.py Normal file
View File

@@ -0,0 +1,68 @@
from typing import List, Tuple, Optional
from app.db import DbOper
from app.db.models.workflow import Workflow
class WorkflowOper(DbOper):
"""
工作流管理
"""
def add(self, **kwargs) -> Tuple[bool, str]:
"""
新增工作流
"""
wf = Workflow(**kwargs)
if not wf.get_by_name(self._db, kwargs.get("name")):
wf.create(self._db)
return True, "新增工作流成功"
return False, "工作流已存在"
def get(self, wid: int) -> Workflow:
"""
查询单个工作流
"""
return Workflow.get(self._db, wid)
def list_enabled(self) -> List[Workflow]:
"""
获取启用的工作流列表
"""
return Workflow.get_enabled_workflows(self._db)
def get_by_name(self, name: str) -> Workflow:
"""
按名称获取工作流
"""
return Workflow.get_by_name(self._db, name)
def start(self, wid: int) -> bool:
"""
启动
"""
return Workflow.start(self._db, wid)
def success(self, wid: int, result: Optional[str] = None) -> bool:
"""
成功
"""
return Workflow.success(self._db, wid, result)
def fail(self, wid: int, result: str) -> bool:
"""
失败
"""
return Workflow.fail(self._db, wid, result)
def step(self, wid: int, action_id: str, context: dict) -> bool:
"""
步进
"""
return Workflow.update_current_action(self._db, wid, action_id, context)
def reset(self, wid: int, reset_count: bool = False) -> bool:
"""
重置
"""
return Workflow.reset(self._db, wid, reset_count=reset_count)

View File

@@ -1,4 +1,4 @@
from typing import Callable, Any from typing import Callable, Any, Optional
from playwright.sync_api import sync_playwright, Page from playwright.sync_api import sync_playwright, Page
from cf_clearance import sync_cf_retry, sync_stealth from cf_clearance import sync_cf_retry, sync_stealth
@@ -20,11 +20,11 @@ class PlaywrightHelper:
def action(self, url: str, def action(self, url: str,
callback: Callable, callback: Callable,
cookies: str = None, cookies: Optional[str] = None,
ua: str = None, ua: Optional[str] = None,
proxies: dict = None, proxies: Optional[dict] = None,
headless: bool = False, headless: Optional[bool] = False,
timeout: int = 30) -> Any: timeout: Optional[int] = 30) -> Any:
""" """
访问网页接收Page对象并执行操作 访问网页接收Page对象并执行操作
:param url: 网页地址 :param url: 网页地址
@@ -57,11 +57,11 @@ class PlaywrightHelper:
return None return None
def get_page_source(self, url: str, def get_page_source(self, url: str,
cookies: str = None, cookies: Optional[str] = None,
ua: str = None, ua: Optional[str] = None,
proxies: dict = None, proxies: Optional[dict] = None,
headless: bool = False, headless: Optional[bool] = False,
timeout: int = 20) -> str: timeout: Optional[int] = 20) -> Optional[str]:
""" """
获取网页源码 获取网页源码
:param url: 网页地址 :param url: 网页地址

View File

@@ -73,8 +73,8 @@ class CookieHelper:
url: str, url: str,
username: str, username: str,
password: str, password: str,
two_step_code: str = None, two_step_code: Optional[str] = None,
proxies: dict = None) -> Tuple[Optional[str], Optional[str], str]: proxies: Optional[dict] = None) -> Tuple[Optional[str], Optional[str], str]:
""" """
获取站点cookie和ua 获取站点cookie和ua
:param url: 站点地址 :param url: 站点地址

View File

@@ -49,9 +49,9 @@ class DirectoryHelper:
""" """
return [d for d in self.get_library_dirs() if d.library_storage == "local"] return [d for d in self.get_library_dirs() if d.library_storage == "local"]
def get_dir(self, media: MediaInfo, include_unsorted: bool = False, def get_dir(self, media: MediaInfo, include_unsorted: Optional[bool] = False,
storage: str = None, src_path: Path = None, storage: Optional[str] = None, src_path: Path = None,
target_storage: str = None, dest_path: Path = None target_storage: Optional[str] = None, dest_path: Path = None
) -> Optional[schemas.TransferDirectoryConf]: ) -> Optional[schemas.TransferDirectoryConf]:
""" """
根据媒体信息获取下载目录、媒体库目录配置 根据媒体信息获取下载目录、媒体库目录配置

View File

@@ -24,4 +24,3 @@ class DisplayHelper(metaclass=Singleton):
logger.info("正在停止虚拟显示...") logger.info("正在停止虚拟显示...")
self._display.stop() self._display.stop()
logger.info("虚拟显示已停止") logger.info("虚拟显示已停止")

View File

@@ -129,7 +129,7 @@ def doh_query_json(resolver: str, host: str) -> Optional[str]:
if response.status != 200: if response.status != 200:
return None return None
response_body = response.read().decode("utf-8") response_body = response.read().decode("utf-8")
logger.debug("<== body: %s", response_body) logger.debug("<== body: %s", response_body)
answer = json.loads(response_body)["Answer"] answer = json.loads(response_body)["Answer"]
return answer[0]["data"] return answer[0]["data"]
except Exception as e: except Exception as e:

View File

@@ -10,8 +10,8 @@ class FormatParser(object):
_key = "" _key = ""
_split_chars = r"\.|\s+|\(|\)|\[|]|-|\+|【|】|/||;|&|\||#|_|「|」|~" _split_chars = r"\.|\s+|\(|\)|\[|]|-|\+|【|】|/||;|&|\||#|_|「|」|~"
def __init__(self, eformat: str, details: str = None, part: str = None, def __init__(self, eformat: str, details: Optional[str] = None, part: Optional[str] = None,
offset: str = None, key: str = "ep"): offset: Optional[str] = None, key: Optional[str] = "ep"):
""" """
:params eformat: 格式化字符串 :params eformat: 格式化字符串
:params details: 格式化详情 :params details: 格式化详情

View File

@@ -1,9 +1,154 @@
from __future__ import annotations
import json import json
import queue import queue
import threading
import time import time
from typing import Optional, Any, Union from datetime import datetime
from typing import Any, Union
from typing import List, Optional, Callable
from app.utils.singleton import Singleton from app.core.config import global_vars
from app.db.systemconfig_oper import SystemConfigOper
from app.schemas.types import SystemConfigKey
from app.utils.singleton import Singleton, SingletonClass
from app.log import logger
class MessageQueueManager(metaclass=SingletonClass):
"""
消息发送队列管理器
"""
schedule_periods: List[tuple[int, int, int, int]] = []
def __init__(
self,
send_callback: Optional[Callable] = None,
check_interval: Optional[int] = 10
) -> None:
"""
消息队列管理器初始化
:param send_callback: 实际发送消息的回调函数
:param check_interval: 时间检查间隔(秒)
"""
self.init_config()
self.queue: queue.Queue[Any] = queue.Queue()
self.send_callback = send_callback
self.check_interval = check_interval
self._running = True
self.thread = threading.Thread(target=self._monitor_loop, daemon=True)
self.thread.start()
def init_config(self):
"""
初始化配置
"""
self.schedule_periods = self._parse_schedule(
SystemConfigOper().get(SystemConfigKey.NotificationSendTime)
)
@staticmethod
def _parse_schedule(periods: Union[list, dict]) -> List[tuple[int, int, int, int]]:
"""
将字符串时间格式转换为分钟数元组
"""
parsed = []
if not periods:
return parsed
if not isinstance(periods, list):
periods = [periods]
for period in periods:
if not period:
continue
if not period.get('start') or not period.get('end'):
continue
start_h, start_m = map(int, period['start'].split(':'))
end_h, end_m = map(int, period['end'].split(':'))
parsed.append((start_h, start_m, end_h, end_m))
return parsed
@staticmethod
def _time_to_minutes(time_str: str) -> int:
"""
'HH:MM' 格式转换为分钟数
"""
hours, minutes = map(int, time_str.split(':'))
return hours * 60 + minutes
def _is_in_scheduled_time(self, current_time: datetime) -> bool:
"""
检查当前时间是否在允许发送的时间段内
"""
if not self.schedule_periods:
return True
current_minutes = current_time.hour * 60 + current_time.minute
for period in self.schedule_periods:
s_h, s_m, e_h, e_m = period
start = s_h * 60 + s_m
end = e_h * 60 + e_m
if start <= end:
if start <= current_minutes <= end:
return True
else:
if current_minutes >= start or current_minutes <= end:
return True
return False
def send_message(self, *args, **kwargs) -> None:
"""
发送消息(立即发送或加入队列)
"""
if self._is_in_scheduled_time(datetime.now()):
self._send(*args, **kwargs)
else:
self.queue.put({
"args": args,
"kwargs": kwargs
})
logger.info(f"消息已加入队列,当前队列长度:{self.queue.qsize()}")
def _send(self, *args, **kwargs) -> None:
"""
实际发送消息(可通过回调函数自定义)
"""
if self.send_callback:
try:
logger.info(f"发送消息:{kwargs}")
self.send_callback(*args, **kwargs)
except Exception as e:
logger.error(f"发送消息错误:{str(e)}")
def _monitor_loop(self) -> None:
"""
后台线程循环检查时间并处理队列
"""
while self._running:
current_time = datetime.now()
if self._is_in_scheduled_time(current_time):
while not self.queue.empty():
if global_vars.is_system_stopped:
break
if not self._is_in_scheduled_time(datetime.now()):
break
try:
message = self.queue.get_nowait()
self._send(*message['args'], **message['kwargs'])
logger.info(f"队列剩余消息:{self.queue.qsize()}")
except queue.Empty:
break
time.sleep(self.check_interval)
def stop(self) -> None:
"""
停止队列管理器
"""
self._running = False
self.thread.join()
class MessageHelper(metaclass=Singleton): class MessageHelper(metaclass=Singleton):

View File

@@ -3,18 +3,26 @@ import importlib
import pkgutil import pkgutil
import traceback import traceback
from pathlib import Path from pathlib import Path
from typing import List, Any from typing import List, Any, Callable
from app.log import logger from app.log import logger
FilterFuncType = Callable[[str, Any], bool]
def _default_filter(name: str, obj: Any) -> bool:
"""
默认过滤器
"""
return True
class ModuleHelper: class ModuleHelper:
""" """
模块动态加载 模块动态加载
""" """
@classmethod @classmethod
def load(cls, package_path: str, filter_func=lambda name, obj: True) -> List[Any]: def load(cls, package_path: str, filter_func: FilterFuncType = _default_filter) -> List[Any]:
""" """
导入模块 导入模块
:param package_path: 父包名 :param package_path: 父包名
@@ -23,6 +31,7 @@ class ModuleHelper:
""" """
submodules: list = [] submodules: list = []
loaded_modules = set()
packages = importlib.import_module(package_path) packages = importlib.import_module(package_path)
for importer, package_name, _ in pkgutil.iter_modules(packages.__path__): for importer, package_name, _ in pkgutil.iter_modules(packages.__path__):
try: try:
@@ -35,6 +44,9 @@ class ModuleHelper:
if name.startswith('_'): if name.startswith('_'):
continue continue
if isinstance(obj, type) and filter_func(name, obj): if isinstance(obj, type) and filter_func(name, obj):
if name in loaded_modules:
continue
loaded_modules.add(name)
submodules.append(obj) submodules.append(obj)
except Exception as err: except Exception as err:
logger.debug(f'加载模块 {package_name} 失败:{str(err)} - {traceback.format_exc()}') logger.debug(f'加载模块 {package_name} 失败:{str(err)} - {traceback.format_exc()}')
@@ -42,7 +54,7 @@ class ModuleHelper:
return submodules return submodules
@classmethod @classmethod
def load_with_pre_filter(cls, package_path: str, filter_func=lambda name, obj: True) -> List[Any]: def load_with_pre_filter(cls, package_path: str, filter_func: FilterFuncType = _default_filter) -> List[Any]:
""" """
导入子模块 导入子模块
:param package_path: 父包名 :param package_path: 父包名

View File

@@ -1,4 +1,5 @@
import base64 import base64
from typing import Optional
from app.core.config import settings from app.core.config import settings
from app.utils.http import RequestUtils from app.utils.http import RequestUtils
@@ -8,7 +9,8 @@ class OcrHelper:
_ocr_b64_url = f"{settings.OCR_HOST}/captcha/base64" _ocr_b64_url = f"{settings.OCR_HOST}/captcha/base64"
def get_captcha_text(self, image_url=None, image_b64=None, cookie=None, ua=None): def get_captcha_text(self, image_url: Optional[str] = None, image_b64: Optional[str] = None,
cookie: Optional[str] = None, ua: Optional[str] = None):
""" """
根据图片地址,获取验证码图片,并识别内容 根据图片地址,获取验证码图片,并识别内容
:param image_url: 图片地址 :param image_url: 图片地址

View File

@@ -39,7 +39,7 @@ class PluginHelper(metaclass=Singleton):
self.systemconfig.set(SystemConfigKey.PluginInstallReport, "1") self.systemconfig.set(SystemConfigKey.PluginInstallReport, "1")
@cached(maxsize=1000, ttl=1800) @cached(maxsize=1000, ttl=1800)
def get_plugins(self, repo_url: str, package_version: str = None) -> Optional[Dict[str, dict]]: def get_plugins(self, repo_url: str, package_version: Optional[str] = None) -> Optional[Dict[str, dict]]:
""" """
获取Github所有最新插件列表 获取Github所有最新插件列表
:param repo_url: Github仓库地址 :param repo_url: Github仓库地址
@@ -63,9 +63,10 @@ class PluginHelper(metaclass=Singleton):
return json.loads(res.text) return json.loads(res.text)
except json.JSONDecodeError: except json.JSONDecodeError:
logger.error(f"插件包数据解析失败:{res.text}") logger.error(f"插件包数据解析失败:{res.text}")
return None
return {} return {}
def get_plugin_package_version(self, pid: str, repo_url: str, package_version: str = None) -> Optional[str]: def get_plugin_package_version(self, pid: str, repo_url: str, package_version: Optional[str] = None) -> Optional[str]:
""" """
检查并获取指定插件的可用版本,支持多版本优先级加载和版本兼容性检测 检查并获取指定插件的可用版本,支持多版本优先级加载和版本兼容性检测
1. 如果未指定版本,则使用系统配置的默认版本(通过 settings.VERSION_FLAG 设置) 1. 如果未指定版本,则使用系统配置的默认版本(通过 settings.VERSION_FLAG 设置)
@@ -156,7 +157,7 @@ class PluginHelper(metaclass=Singleton):
json={"plugins": [{"plugin_id": plugin} for plugin in plugins]}) json={"plugins": [{"plugin_id": plugin} for plugin in plugins]})
return True if res else False return True if res else False
def install(self, pid: str, repo_url: str, package_version: str = None, force_install: bool = False) \ def install(self, pid: str, repo_url: str, package_version: Optional[str] = None, force_install: bool = False) \
-> Tuple[bool, str]: -> Tuple[bool, str]:
""" """
安装插件,包括依赖安装和文件下载,相关资源支持自动降级策略 安装插件,包括依赖安装和文件下载,相关资源支持自动降级策略
@@ -259,7 +260,7 @@ class PluginHelper(metaclass=Singleton):
self.install_reg(pid) self.install_reg(pid)
return True, "" return True, ""
def __get_file_list(self, pid: str, user_repo: str, package_version: str = None) -> \ def __get_file_list(self, pid: str, user_repo: str, package_version: Optional[str] = None) -> \
Tuple[Optional[list], Optional[str]]: Tuple[Optional[list], Optional[str]]:
""" """
获取插件的文件列表 获取插件的文件列表
@@ -294,7 +295,7 @@ class PluginHelper(metaclass=Singleton):
return None, "插件数据解析失败" return None, "插件数据解析失败"
def __download_files(self, pid: str, file_list: List[dict], user_repo: str, def __download_files(self, pid: str, file_list: List[dict], user_repo: str,
package_version: str = None, skip_requirements: bool = False) -> Tuple[bool, str]: package_version: Optional[str] = None, skip_requirements: bool = False) -> Tuple[bool, str]:
""" """
下载插件文件 下载插件文件
:param pid: 插件 ID :param pid: 插件 ID
@@ -447,58 +448,6 @@ class PluginHelper(metaclass=Singleton):
if plugin_dir.exists(): if plugin_dir.exists():
shutil.rmtree(plugin_dir, ignore_errors=True) shutil.rmtree(plugin_dir, ignore_errors=True)
@staticmethod
def __pip_uninstall_and_install_with_fallback(requirements_file: Path) -> Tuple[bool, str]:
"""
先卸载 requirements.txt 中的依赖,再按照自动降级策略重新安装,不使用 PIP 缓存
:param requirements_file: 依赖的 requirements.txt 文件路径
:return: (是否成功, 错误信息)
"""
# 读取 requirements.txt 文件中的依赖列表
try:
with open(requirements_file, "r", encoding="utf-8") as f:
dependencies = [line.strip() for line in f if line.strip() and not line.startswith("#")]
except Exception as e:
return False, f"无法读取 requirements.txt 文件:{str(e)}"
# 1. 先卸载所有依赖包
for dep in dependencies:
pip_uninstall_command = ["pip", "uninstall", "-y", dep]
logger.debug(f"尝试卸载依赖:{dep},命令:{' '.join(pip_uninstall_command)}")
success, message = SystemUtils.execute_with_subprocess(pip_uninstall_command)
if success:
logger.debug(f"依赖 {dep} 卸载成功,输出:{message}")
else:
error_message = f"卸载依赖 {dep} 失败,错误信息:{message}"
logger.error(error_message)
# 2. 重新安装所有依赖,使用自动降级策略
strategies = []
# 添加策略到列表中
if settings.PIP_PROXY:
strategies.append(("镜像站",
["pip", "install", "-r", str(requirements_file),
"-i", settings.PIP_PROXY, "--no-cache-dir"]))
if settings.PROXY_HOST:
strategies.append(("代理",
["pip", "install", "-r", str(requirements_file),
"--proxy", settings.PROXY_HOST, "--no-cache-dir"]))
strategies.append(("直连", ["pip", "install", "-r", str(requirements_file), "--no-cache-dir"]))
# 遍历策略进行安装
for strategy_name, pip_command in strategies:
logger.debug(f"[PIP] 尝试使用策略:{strategy_name} 安装依赖,命令:{' '.join(pip_command)}")
success, message = SystemUtils.execute_with_subprocess(pip_command)
if success:
logger.debug(f"[PIP] 策略:{strategy_name} 安装依赖成功,输出:{message}")
return True, message
else:
logger.error(f"[PIP] 策略:{strategy_name} 安装依赖失败,错误信息:{message}")
return False, "[PIP] 所有策略均安装依赖失败,请检查网络连接或 PIP 配置"
@staticmethod @staticmethod
def __pip_install_with_fallback(requirements_file: Path) -> Tuple[bool, str]: def __pip_install_with_fallback(requirements_file: Path) -> Tuple[bool, str]:
""" """
@@ -531,7 +480,7 @@ class PluginHelper(metaclass=Singleton):
@staticmethod @staticmethod
def __request_with_fallback(url: str, def __request_with_fallback(url: str,
headers: Optional[dict] = None, headers: Optional[dict] = None,
timeout: int = 60, timeout: Optional[int] = 60,
is_api: bool = False) -> Optional[Any]: is_api: bool = False) -> Optional[Any]:
""" """
使用自动降级策略,请求资源,优先级依次为镜像站、代理、直连 使用自动降级策略,请求资源,优先级依次为镜像站、代理、直连

View File

@@ -1,5 +1,5 @@
from enum import Enum from enum import Enum
from typing import Union, Dict from typing import Union, Dict, Optional
from app.schemas.types import ProgressKey from app.schemas.types import ProgressKey
from app.utils.singleton import Singleton from app.utils.singleton import Singleton
@@ -40,7 +40,7 @@ class ProgressHelper(metaclass=Singleton):
"text": "正在处理..." "text": "正在处理..."
} }
def update(self, key: Union[ProgressKey, str], value: float = None, text: str = None): def update(self, key: Union[ProgressKey, str], value: Union[float, int] = None, text: Optional[str] = None):
if isinstance(key, Enum): if isinstance(key, Enum):
key = key.value key = key.value
if not self._process_detail.get(key, {}).get('enable'): if not self._process_detail.get(key, {}).get('enable'):

View File

@@ -1,7 +1,7 @@
import re import re
import traceback import traceback
import xml.dom.minidom import xml.dom.minidom
from typing import List, Tuple, Union from typing import List, Tuple, Union, Optional
from urllib.parse import urljoin from urllib.parse import urljoin
import chardet import chardet
@@ -225,27 +225,27 @@ class RssHelper:
} }
@staticmethod @staticmethod
def parse(url, proxy: bool = False, timeout: int = 15, headers: dict = None) -> Union[List[dict], None]: def parse(url, proxy: bool = False, timeout: Optional[int] = 15, headers: dict = None) -> Union[List[dict], None, bool]:
""" """
解析RSS订阅URL获取RSS中的种子信息 解析RSS订阅URL获取RSS中的种子信息
:param url: RSS地址 :param url: RSS地址
:param proxy: 是否使用代理 :param proxy: 是否使用代理
:param timeout: 请求超时 :param timeout: 请求超时
:param headers: 自定义请求头 :param headers: 自定义请求头
:return: 种子信息列表如为None代表Rss过期 :return: 种子信息列表如为None代表Rss过期如果为False则为错误
""" """
# 开始处理 # 开始处理
ret_array: list = [] ret_array: list = []
if not url: if not url:
return [] return False
try: try:
ret = RequestUtils(proxies=settings.PROXY if proxy else None, ret = RequestUtils(proxies=settings.PROXY if proxy else None,
timeout=timeout, headers=headers).get_res(url) timeout=timeout, headers=headers).get_res(url)
if not ret: if not ret:
return [] return False
except Exception as err: except Exception as err:
logger.error(f"获取RSS失败{str(err)} - {traceback.format_exc()}") logger.error(f"获取RSS失败{str(err)} - {traceback.format_exc()}")
return [] return False
if ret: if ret:
ret_xml = "" ret_xml = ""
try: try:
@@ -301,6 +301,8 @@ class RssHelper:
if pubdate: if pubdate:
# 转换为时间 # 转换为时间
pubdate = StringUtils.get_time(pubdate) pubdate = StringUtils.get_time(pubdate)
# 获取豆瓣昵称
nickname = DomUtils.tag_value(item, "dc:createor", default="")
# 返回对象 # 返回对象
tmp_dict = {'title': title, tmp_dict = {'title': title,
'enclosure': enclosure, 'enclosure': enclosure,
@@ -308,6 +310,9 @@ class RssHelper:
'description': description, 'description': description,
'link': link, 'link': link,
'pubdate': pubdate} 'pubdate': pubdate}
# 如果豆瓣昵称不为空返回数据增加豆瓣昵称供doubansync插件获取
if nickname:
tmp_dict['nickname'] = nickname
ret_array.append(tmp_dict) ret_array.append(tmp_dict)
except Exception as e1: except Exception as e1:
logger.debug(f"解析RSS失败{str(e1)} - {traceback.format_exc()}") logger.debug(f"解析RSS失败{str(e1)} - {traceback.format_exc()}")
@@ -322,6 +327,7 @@ class RssHelper:
] ]
if ret_xml in _rss_expired_msg: if ret_xml in _rss_expired_msg:
return None return None
return False
return ret_array return ret_array
def get_rss_link(self, url: str, cookie: str, ua: str, proxy: bool = False) -> Tuple[str, str]: def get_rss_link(self, url: str, cookie: str, ua: str, proxy: bool = False) -> Tuple[str, str]:

View File

@@ -33,7 +33,7 @@ class RuleHelper:
return group return group
return None return None
def get_rule_group_by_media(self, media: MediaInfo, group_names: list = None) -> List[FilterRuleGroup]: def get_rule_group_by_media(self, media: MediaInfo = None, group_names: list = None) -> List[FilterRuleGroup]:
""" """
根据媒体信息获取规则组 根据媒体信息获取规则组
""" """
@@ -44,9 +44,9 @@ class RuleHelper:
for group in rule_groups: for group in rule_groups:
if not group.media_type: if not group.media_type:
ret_groups.append(group) ret_groups.append(group)
elif not group.category and group.media_type == media.type.value: elif media and not group.category and group.media_type == media.type.value:
ret_groups.append(group) ret_groups.append(group)
elif group.category == media.category: elif media and group.category == media.category:
ret_groups.append(group) ret_groups.append(group)
return ret_groups return ret_groups

View File

@@ -1,10 +1,11 @@
from threading import Thread from threading import Thread
from typing import List, Tuple from typing import List, Tuple, Optional
from app.core.cache import cached, cache_backend from app.core.cache import cached, cache_backend
from app.core.config import settings from app.core.config import settings
from app.db.subscribe_oper import SubscribeOper from app.db.subscribe_oper import SubscribeOper
from app.db.systemconfig_oper import SystemConfigOper from app.db.systemconfig_oper import SystemConfigOper
from app.log import logger
from app.schemas.types import SystemConfigKey from app.schemas.types import SystemConfigKey
from app.utils.http import RequestUtils from app.utils.http import RequestUtils
from app.utils.singleton import Singleton from app.utils.singleton import Singleton
@@ -32,16 +33,33 @@ class SubscribeHelper(metaclass=Singleton):
_shares_cache_region = "subscribe_share" _shares_cache_region = "subscribe_share"
_github_user = None
_share_user_id = None
_admin_users = [
"jxxghp",
"thsrite",
"InfinityPacer",
"DDSRem",
"Aqr-K",
"Putarku",
"4Nest",
"xyswordzoro",
"wikrin"
]
def __init__(self): def __init__(self):
self.systemconfig = SystemConfigOper() self.systemconfig = SystemConfigOper()
self.share_user_id = SystemUtils.generate_user_unique_id()
if settings.SUBSCRIBE_STATISTIC_SHARE: if settings.SUBSCRIBE_STATISTIC_SHARE:
if not self.systemconfig.get(SystemConfigKey.SubscribeReport): if not self.systemconfig.get(SystemConfigKey.SubscribeReport):
if self.sub_report(): if self.sub_report():
self.systemconfig.set(SystemConfigKey.SubscribeReport, "1") self.systemconfig.set(SystemConfigKey.SubscribeReport, "1")
self.get_user_uuid()
self.get_github_user()
@cached(maxsize=20, ttl=1800) @cached(maxsize=20, ttl=1800)
def get_statistic(self, stype: str, page: int = 1, count: int = 30) -> List[dict]: def get_statistic(self, stype: str, page: Optional[int] = 1, count: Optional[int] = 30) -> List[dict]:
""" """
获取订阅统计数据 获取订阅统计数据
""" """
@@ -135,7 +153,7 @@ class SubscribeHelper(metaclass=Singleton):
"share_title": share_title, "share_title": share_title,
"share_comment": share_comment, "share_comment": share_comment,
"share_user": share_user, "share_user": share_user,
"share_uid": self.share_user_id, "share_uid": self._share_user_id,
**subscribe_dict **subscribe_dict
}) })
if res is None: if res is None:
@@ -155,7 +173,7 @@ class SubscribeHelper(metaclass=Singleton):
return False, "当前没有开启订阅数据共享功能" return False, "当前没有开启订阅数据共享功能"
res = RequestUtils(proxies=settings.PROXY, res = RequestUtils(proxies=settings.PROXY,
timeout=5).delete_res(f"{self._sub_share}/{share_id}", timeout=5).delete_res(f"{self._sub_share}/{share_id}",
params={"share_uid": self.share_user_id}) params={"share_uid": self._share_user_id})
if res is None: if res is None:
return False, "连接MoviePilot服务器失败" return False, "连接MoviePilot服务器失败"
if res.ok: if res.ok:
@@ -182,7 +200,7 @@ class SubscribeHelper(metaclass=Singleton):
return False, res.json().get("message") return False, res.json().get("message")
@cached(region=_shares_cache_region) @cached(region=_shares_cache_region)
def get_shares(self, name: str = None, page: int = 1, count: int = 30) -> List[dict]: def get_shares(self, name: Optional[str] = None, page: Optional[int] = 1, count: Optional[int] = 30) -> List[dict]:
""" """
获取订阅分享数据 获取订阅分享数据
""" """
@@ -196,3 +214,35 @@ class SubscribeHelper(metaclass=Singleton):
if res and res.status_code == 200: if res and res.status_code == 200:
return res.json() return res.json()
return [] return []
def get_user_uuid(self) -> str:
"""
获取用户uuid
"""
if not self._share_user_id:
self._share_user_id = SystemUtils.generate_user_unique_id()
logger.info(f"当前用户UUID: {self._share_user_id}")
return self._share_user_id
def get_github_user(self) -> str:
"""
获取github用户
"""
if self._github_user is None and settings.GITHUB_HEADERS:
res = RequestUtils(headers=settings.GITHUB_HEADERS,
proxies=settings.PROXY,
timeout=15).get_res(f"https://api.github.com/user")
if res:
self._github_user = res.json().get("login")
logger.info(f"当前Github用户: {self._github_user}")
return self._github_user
def is_admin_user(self) -> bool:
"""
判断是否是管理员
"""
if not self._github_user:
return False
if self._github_user in self._admin_users:
return True
return False

Some files were not shown because too many files have changed in this diff Show More