mirror of
https://github.com/jxxghp/MoviePilot.git
synced 2026-05-08 21:02:44 +08:00
Compare commits
195 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1336b2136d | ||
|
|
b20e21e700 | ||
|
|
c27ab4a4c7 | ||
|
|
d9e6532325 | ||
|
|
049f16ba01 | ||
|
|
6541458326 | ||
|
|
9f2912426b | ||
|
|
fde33d267a | ||
|
|
ef7f0afa37 | ||
|
|
bea77a8243 | ||
|
|
b984b83870 | ||
|
|
2153ad48db | ||
|
|
c9c43fde74 | ||
|
|
e2c9742f64 | ||
|
|
3d459a40f7 | ||
|
|
5675cd5b11 | ||
|
|
74a4d0bd66 | ||
|
|
2b8c313019 | ||
|
|
62fb6b80a3 | ||
|
|
eea86528d8 | ||
|
|
84e6abb659 | ||
|
|
da2c755b6d | ||
|
|
51f39be9bc | ||
|
|
21b762e75c | ||
|
|
54095074b6 | ||
|
|
33525730b5 | ||
|
|
71260f04b5 | ||
|
|
e2acec321d | ||
|
|
74a462a09f | ||
|
|
ad9e1a5da6 | ||
|
|
d90e3c29a5 | ||
|
|
19165eff75 | ||
|
|
52d0703812 | ||
|
|
1431a5e82a | ||
|
|
23fe643526 | ||
|
|
545b3c0482 | ||
|
|
f102119eef | ||
|
|
9bb3d707c9 | ||
|
|
b892ef50dc | ||
|
|
41e2907168 | ||
|
|
14e28ed693 | ||
|
|
79393c21ff | ||
|
|
cafa4d217c | ||
|
|
2b9e69b112 | ||
|
|
3ffcea70a7 | ||
|
|
ffc72ba6fe | ||
|
|
848becd946 | ||
|
|
71fe96d7f9 | ||
|
|
35c7238ede | ||
|
|
3578204508 | ||
|
|
c11cf17f62 | ||
|
|
5a59652684 | ||
|
|
7f5f31f143 | ||
|
|
dc1cee80b1 | ||
|
|
92cb066748 | ||
|
|
6c8ef4122b | ||
|
|
971b02ac8c | ||
|
|
d4a9643f47 | ||
|
|
e56d31fedc | ||
|
|
b9d91c5cd7 | ||
|
|
57cdb57331 | ||
|
|
0f7a7ef44f | ||
|
|
6267b3f670 | ||
|
|
82f77b4729 | ||
|
|
58da0ebb4f | ||
|
|
7a43e43478 | ||
|
|
e5ec02e043 | ||
|
|
2944c343a8 | ||
|
|
940cc566c8 | ||
|
|
db7b2cdcac | ||
|
|
8111cf5dc8 | ||
|
|
be55c7bdd9 | ||
|
|
a4288aa871 | ||
|
|
c0f15ac7ff | ||
|
|
4047d433f5 | ||
|
|
91d6769d0f | ||
|
|
ad378956bf | ||
|
|
9dcfb6dc1e | ||
|
|
2d0b21d3f2 | ||
|
|
3287c85300 | ||
|
|
fd2682bc6a | ||
|
|
7dd1e75ad7 | ||
|
|
93b8f24ec7 | ||
|
|
1c240f9d76 | ||
|
|
9a2ef5fe48 | ||
|
|
7bd55caed7 | ||
|
|
ae36f5100a | ||
|
|
b2efac0495 | ||
|
|
1dced579ea | ||
|
|
0deea17ef9 | ||
|
|
3d0c06013d | ||
|
|
2536119f60 | ||
|
|
aeede861e3 | ||
|
|
1edbfb0d2d | ||
|
|
265724bbe9 | ||
|
|
2b0b190cf8 | ||
|
|
08a2b348d8 | ||
|
|
e896068bc5 | ||
|
|
85e5338121 | ||
|
|
5c3cd8cabc | ||
|
|
5a837a4161 | ||
|
|
1e1f80b6d9 | ||
|
|
e06e00204b | ||
|
|
b98c0f205d | ||
|
|
0c266726ea | ||
|
|
b43e591e4c | ||
|
|
3d6e1335f8 | ||
|
|
361e8dd65d | ||
|
|
de865f3cf1 | ||
|
|
37985eba25 | ||
|
|
e0a251b339 | ||
|
|
f9f4d97a51 | ||
|
|
6adc0e27d5 | ||
|
|
5deb0089bb | ||
|
|
bfbeae7fa7 | ||
|
|
8a98c65026 | ||
|
|
0133c6e60c | ||
|
|
ae0e171dd2 | ||
|
|
9f0ed49d43 | ||
|
|
8df2955a67 | ||
|
|
ef0cd7d5c5 | ||
|
|
463fd3761a | ||
|
|
4af4ad0243 | ||
|
|
24aa64232f | ||
|
|
9937f6792e | ||
|
|
185b72dc8d | ||
|
|
0fb12c77eb | ||
|
|
631df4c9f8 | ||
|
|
0da08394ae | ||
|
|
6392ee627f | ||
|
|
da6ba3fa8b | ||
|
|
cb0bb8a38e | ||
|
|
e1cdc51904 | ||
|
|
79c57d8e4f | ||
|
|
681f1eaeb5 | ||
|
|
de2323d67a | ||
|
|
9cf240b8e8 | ||
|
|
b93c97938c | ||
|
|
41d347bcef | ||
|
|
060e2f225c | ||
|
|
7103b0334a | ||
|
|
354d5977e0 | ||
|
|
19a56f7d24 | ||
|
|
323ad099c3 | ||
|
|
484ecf10c3 | ||
|
|
2a333add9b | ||
|
|
90df09e64d | ||
|
|
53397536ce | ||
|
|
f902f43c56 | ||
|
|
9948db8bce | ||
|
|
1b6a06bd7b | ||
|
|
ce1db7f62b | ||
|
|
74dbae8514 | ||
|
|
7d4ec2ddec | ||
|
|
3654b9609f | ||
|
|
83e583032a | ||
|
|
35a4d77915 | ||
|
|
cbfb2027a8 | ||
|
|
ce0548632e | ||
|
|
da1f6a0997 | ||
|
|
a514ec0761 | ||
|
|
851dd85fc6 | ||
|
|
0270af5b19 | ||
|
|
f8f964106a | ||
|
|
aa0f2a571c | ||
|
|
727a14864e | ||
|
|
c7e909520c | ||
|
|
7f40863449 | ||
|
|
e994a9fc92 | ||
|
|
d8fe8b28e8 | ||
|
|
7f4f085d4a | ||
|
|
2052766a71 | ||
|
|
887fe834bd | ||
|
|
0d4f87a631 | ||
|
|
ed96241053 | ||
|
|
788104d151 | ||
|
|
f8b3dbaef5 | ||
|
|
b66ca92d72 | ||
|
|
c2a80dbedd | ||
|
|
95202af139 | ||
|
|
d77ea8f0a0 | ||
|
|
bbba9813a2 | ||
|
|
220cbc3072 | ||
|
|
fcbdef5e66 | ||
|
|
e2e1c7642d | ||
|
|
33813ecf1d | ||
|
|
ef656fcc67 | ||
|
|
8fe7e015dd | ||
|
|
7132fdbb26 | ||
|
|
0f57b39345 | ||
|
|
d13b5622c7 | ||
|
|
b5eaba26da | ||
|
|
60007cf398 | ||
|
|
65cc169391 | ||
|
|
68a9fc4a13 |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -16,4 +16,5 @@ config/sites/**
|
||||
*.pyc
|
||||
*.log
|
||||
.vscode
|
||||
venv
|
||||
venv
|
||||
.DS_Store
|
||||
|
||||
@@ -11,7 +11,7 @@ ENV LANG="C.UTF-8" \
|
||||
PORT=3001 \
|
||||
NGINX_PORT=3000 \
|
||||
PROXY_HOST="" \
|
||||
MOVIEPILOT_AUTO_UPDATE=release \
|
||||
MOVIEPILOT_AUTO_UPDATE=false \
|
||||
AUTH_SITE="iyuu" \
|
||||
IYUU_SIGN=""
|
||||
WORKDIR "/app"
|
||||
|
||||
283
README.md
283
README.md
@@ -1,283 +1,32 @@
|
||||
# MoviePilot
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
|
||||
基于 [NAStool](https://github.com/NAStool/nas-tools) 部分代码重新设计,聚焦自动化核心需求,减少问题同时更易于扩展和维护。
|
||||
|
||||
# 仅用于学习交流使用,请勿在任何国内平台宣传该项目!
|
||||
|
||||
发布频道:https://t.me/moviepilot_channel
|
||||
|
||||
Wiki:https://wiki.movie-pilot.org
|
||||
|
||||
## 主要特性
|
||||
|
||||
- 前后端分离,基于FastApi + Vue3,前端项目地址:[MoviePilot-Frontend](https://github.com/jxxghp/MoviePilot-Frontend),API:http://localhost:3001/docs
|
||||
- 聚焦核心需求,简化功能和设置,部分设置项可直接使用默认值。
|
||||
- 重新设计了用户界面,更加美观易用。
|
||||
|
||||
## 安装
|
||||
|
||||
### 注意:管理员用户不要使用弱密码!如非必要不要暴露到公网。如被盗取管理账号权限,将会导致站点Cookie等敏感数据泄露!
|
||||
|
||||
### 1. **安装CookieCloud插件**
|
||||
|
||||
站点信息需要通过CookieCloud同步获取,因此需要安装CookieCloud插件,将浏览器中的站点Cookie数据同步到云端后再同步到MoviePilot使用。 插件下载地址请点击 [这里](https://github.com/easychen/CookieCloud/releases)。
|
||||
|
||||
### 2. **安装CookieCloud服务端(可选)**
|
||||
|
||||
通过CookieCloud可以快速同步浏览器中保存的站点数据到MoviePilot,支持以下服务方式:
|
||||
|
||||
- 使用公共CookieCloud远程服务器(默认):服务器地址为:https://movie-pilot.org/cookiecloud
|
||||
- 使用内建的本地Cookie服务:在 `设定` - `站点` 中打开`启用本地CookieCloud服务器`后,将启用内建的CookieCloud提供服务,服务地址为:`http://localhost:${NGINX_PORT}/cookiecloud/`, Cookie数据加密保存在配置文件目录下的`cookies`文件中
|
||||
- 自建服务CookieCloud服务器:参考 [CookieCloud](https://github.com/easychen/CookieCloud) 项目进行搭建,docker镜像请点击 [这里](https://hub.docker.com/r/easychen/cookiecloud)
|
||||
|
||||
**声明:** 本项目不会收集用户敏感数据,Cookie同步也是基于CookieCloud项目实现,非本项目提供的能力。技术角度上CookieCloud采用端到端加密,在个人不泄露`用户KEY`和`端对端加密密码`的情况下第三方无法窃取任何用户信息(包括服务器持有者)。如果你不放心,可以不使用公共服务或者不使用本项目,但如果使用后发生了任何信息泄露与本项目无关!
|
||||
|
||||
### 3. **安装配套管理软件**
|
||||
|
||||
MoviePilot需要配套下载器和媒体服务器配合使用。
|
||||
- 下载器支持:qBittorrent、Transmission,QB版本号要求>= 4.3.9,TR版本号要求>= 3.0,推荐使用QB。
|
||||
- 媒体服务器支持:Jellyfin、Emby、Plex,推荐使用Emby。
|
||||
|
||||
### 4. **安装MoviePilot**
|
||||
|
||||
- Docker镜像
|
||||
|
||||
点击 [这里](https://hub.docker.com/r/jxxghp/moviepilot) 或执行命令:
|
||||
|
||||
```shell
|
||||
docker pull jxxghp/moviepilot:latest
|
||||
```
|
||||
|
||||
- Windows
|
||||
|
||||
1. 独立执行文件版本:下载 [MoviePilot.exe](https://github.com/jxxghp/MoviePilot/releases),双击运行后自动生成配置文件目录,访问:http://localhost:3000
|
||||
2. 安装包版本【推荐】:[Windows-MoviePilot](https://github.com/developer-wlj/Windows-MoviePilot)
|
||||
|
||||
- 群晖套件
|
||||
|
||||
添加套件源:https://spk7.imnks.com/
|
||||
|
||||
- 本地运行
|
||||
|
||||
1) 将工程 [MoviePilot-Plugins](https://github.com/jxxghp/MoviePilot-Plugins) plugins目录下的所有文件复制到`app/plugins`目录
|
||||
2) 将工程 [MoviePilot-Resources](https://github.com/jxxghp/MoviePilot-Resources) resources目录下的所有文件复制到`app/helper`目录
|
||||
3) 执行命令:`pip install -r requirements.txt` 安装依赖
|
||||
4) 执行命令:`PYTHONPATH=. python app/main.py` 启动服务
|
||||
5) 根据前端项目 [MoviePilot-Frontend](https://github.com/jxxghp/MoviePilot-Frontend) 说明,启动前端服务
|
||||
|
||||
## 配置
|
||||
|
||||
大部分配置可启动后通过WEB管理界面进行配置,但仍有部分配置需要通过环境变量/配置文件进行配置。
|
||||
|
||||
配置文件映射路径:`/config`,配置项生效优先级:环境变量 > env文件(或通过WEB界面配置) > 默认值。
|
||||
|
||||
> ❗号标识的为必填项,其它为可选项,可选项可删除配置变量从而使用默认值。
|
||||
|
||||
### 1. **环境变量**
|
||||
|
||||
- **❗NGINX_PORT:** WEB服务端口,默认`3000`,可自行修改,不能与API服务端口冲突
|
||||
- **❗PORT:** API服务端口,默认`3001`,可自行修改,不能与WEB服务端口冲突
|
||||
- **PUID**:运行程序用户的`uid`,默认`0`
|
||||
- **PGID**:运行程序用户的`gid`,默认`0`
|
||||
- **UMASK**:掩码权限,默认`000`,可以考虑设置为`022`
|
||||
- **PROXY_HOST:** 网络代理,访问themoviedb或者重启更新需要使用代理访问,格式为`http(s)://ip:port`、`socks5://user:pass@host:port`
|
||||
- **MOVIEPILOT_AUTO_UPDATE:** 重启时自动更新,`true`/`release`/`dev`/`false`,默认`release`,需要能正常连接Github **注意:如果出现网络问题可以配置`PROXY_HOST`**
|
||||
- **❗AUTH_SITE:** 认证站点(认证通过后才能使用站点相关功能),支持配置多个认证站点,使用`,`分隔,如:`iyuu,hhclub`,会依次执行认证操作,直到有一个站点认证成功。
|
||||
|
||||
配置`AUTH_SITE`后,需要根据下表配置对应站点的认证参数。
|
||||
认证资源`v1.2.8+`支持:`iyuu`/`hhclub`/`audiences`/`hddolby`/`zmpt`/`freefarm`/`hdfans`/`wintersakura`/`leaves`/`ptba` /`icc2022`/`ptlsp`/`xingtan`/`ptvicomo`/`agsvpt`/`hdkyl`/`qingwa`/`discfan`
|
||||
|
||||
| 站点 | 参数 |
|
||||
|:------------:|:-----------------------------------------------------:|
|
||||
| iyuu | `IYUU_SIGN`:IYUU登录令牌 |
|
||||
| hhclub | `HHCLUB_USERNAME`:用户名<br/>`HHCLUB_PASSKEY`:密钥 |
|
||||
| audiences | `AUDIENCES_UID`:用户ID<br/>`AUDIENCES_PASSKEY`:密钥 |
|
||||
| hddolby | `HDDOLBY_ID`:用户ID<br/>`HDDOLBY_PASSKEY`:密钥 |
|
||||
| zmpt | `ZMPT_UID`:用户ID<br/>`ZMPT_PASSKEY`:密钥 |
|
||||
| freefarm | `FREEFARM_UID`:用户ID<br/>`FREEFARM_PASSKEY`:密钥 |
|
||||
| hdfans | `HDFANS_UID`:用户ID<br/>`HDFANS_PASSKEY`:密钥 |
|
||||
| wintersakura | `WINTERSAKURA_UID`:用户ID<br/>`WINTERSAKURA_PASSKEY`:密钥 |
|
||||
| leaves | `LEAVES_UID`:用户ID<br/>`LEAVES_PASSKEY`:密钥 |
|
||||
| ptba | `PTBA_UID`:用户ID<br/>`PTBA_PASSKEY`:密钥 |
|
||||
| icc2022 | `ICC2022_UID`:用户ID<br/>`ICC2022_PASSKEY`:密钥 |
|
||||
| ptlsp | `PTLSP_UID`:用户ID<br/>`PTLSP_PASSKEY`:密钥 |
|
||||
| xingtan | `XINGTAN_UID`:用户ID<br/>`XINGTAN_PASSKEY`:密钥 |
|
||||
| ptvicomo | `PTVICOMO_UID`:用户ID<br/>`PTVICOMO_PASSKEY`:密钥 |
|
||||
| agsvpt | `AGSVPT_UID`:用户ID<br/>`AGSVPT_PASSKEY`:密钥 |
|
||||
| hdkyl | `HDKYL_UID`:用户ID<br/>`HDKYL_PASSKEY`:密钥 |
|
||||
| qingwa | `QINGWA_UID`:用户ID<br/>`QINGWA_PASSKEY`:密钥 |
|
||||
| discfan | `DISCFAN_UID`:用户ID<br/>`DISCFAN_PASSKEY`:密钥 |
|
||||
|
||||
|
||||
### 2. **环境变量 / 配置文件**
|
||||
|
||||
配置文件名:`app.env`,放配置文件根目录。
|
||||
|
||||
- **❗SUPERUSER:** 超级管理员用户名,默认`admin`,安装后使用该用户登录后台管理界面,**注意:启动一次后再次修改该值不会生效,除非删除数据库文件!**
|
||||
- **❗API_TOKEN:** API密钥,默认`moviepilot`,在媒体服务器Webhook、微信回调等地址配置中需要加上`?token=`该值,建议修改为复杂字符串
|
||||
- **APP_DOMAIN:** MoviePilot WEB使用的域名,用于生成跳转链接等
|
||||
- **BIG_MEMORY_MODE:** 大内存模式,默认为`false`,开启后会增加缓存数量,占用更多的内存,但响应速度会更快
|
||||
- **DOH_ENABLE:** DNS over HTTPS开关,`true`/`false`,默认`true`,开启后会使用DOH对api.themoviedb.org等域名进行解析,以减少被DNS污染的情况,提升网络连通性
|
||||
- **META_CACHE_EXPIRE:** 元数据识别缓存过期时间(小时),数字型,不配置或者配置为0时使用系统默认(大内存模式为7天,否则为3天),调大该值可减少themoviedb的访问次数
|
||||
- **GITHUB_TOKEN:** Github token,提高自动更新、插件安装等请求Github Api的限流阈值,格式:ghp_****
|
||||
- **GITHUB_PROXY:** Github代理地址,用于加速版本及插件升级安装,格式:`https://mirror.ghproxy.com/`
|
||||
- **DEV:** 开发者模式,`true`/`false`,默认`false`,仅用于本地开发使用,开启后会暂停所有定时任务,且插件代码文件的修改无需重启会自动重载生效
|
||||
- **AUTO_UPDATE_RESOURCE**:启动时自动检测和更新资源包(站点索引及认证等),`true`/`false`,默认`true`,需要能正常连接Github,仅支持Docker镜像
|
||||
---
|
||||
- **TMDB_API_DOMAIN:** TMDB API地址,默认`api.themoviedb.org`,也可配置为`api.tmdb.org`、`tmdb.movie-pilot.org` 或其它中转代理服务地址,能连通即可
|
||||
- **TMDB_IMAGE_DOMAIN:** TMDB图片地址,默认`image.tmdb.org`,可配置为其它中转代理以加速TMDB图片显示,如:`static-mdb.v.geilijiasu.com`
|
||||
- **WALLPAPER:** 登录首页电影海报,`tmdb`/`bing`,默认`tmdb`
|
||||
- **RECOGNIZE_SOURCE:** 媒体信息识别来源,`themoviedb`/`douban`,默认`themoviedb`,使用`douban`时不支持二级分类,且受豆瓣控流限制
|
||||
- **FANART_ENABLE:** Fanart开关,`true`/`false`,默认`true`,关闭后刮削的图片类型会大幅减少
|
||||
- **SCRAP_SOURCE:** 刮削元数据及图片使用的数据源,`themoviedb`/`douban`,默认`themoviedb`
|
||||
- **SCRAP_FOLLOW_TMDB:** 新增已入库媒体是否跟随TMDB信息变化,`true`/`false`,默认`true`,为`false`时即使TMDB信息变化了也会仍然按历史记录中已入库的信息进行刮削
|
||||
---
|
||||
- **AUTO_DOWNLOAD_USER:** 远程交互搜索时自动择优下载的用户ID(消息通知渠道的用户ID),多个用户使用,分割,设置为 all 代表全部用户自动择优下载,未设置需要手动选择资源或者回复`0`才自动择优下载
|
||||
- **DOWNLOAD_SUBTITLE:** 下载站点字幕,`true`/`false`,默认`true`
|
||||
- **SEARCH_MULTIPLE_NAME:** 搜索时是否使用多个名称搜索,`true`/`false`,默认`false`,开启后会使用多个名称进行搜索,搜索结果会更全面,但会增加搜索时间;关闭时只要其中一个名称搜索到结果或全部名称搜索完毕即停止
|
||||
- **SUBSCRIBE_STATISTIC_SHARE:** 是否匿名分享订阅数据,用于统计和展示用户热门订阅,`true`/`false`,默认`true`
|
||||
- **PLUGIN_STATISTIC_SHARE:** 是否匿名分享插件安装统计数据,用于统计和显示插件下载安装次数,`true`/`false`,默认`true`
|
||||
---
|
||||
- **OCR_HOST:** OCR识别服务器地址,格式:`http(s)://ip:port`,用于识别站点验证码实现自动登录获取Cookie等,不配置默认使用内建服务器`https://movie-pilot.org`,可使用 [这个镜像](https://hub.docker.com/r/jxxghp/moviepilot-ocr) 自行搭建。
|
||||
---
|
||||
- **MOVIE_RENAME_FORMAT:** 电影重命名格式,基于jinjia2语法
|
||||
|
||||
`MOVIE_RENAME_FORMAT`支持的配置项:
|
||||
|
||||
> `title`: TMDB/豆瓣中的标题
|
||||
> `en_title`: TMDB中的英文标题 (暂不支持豆瓣)
|
||||
> `original_title`: TMDB/豆瓣中的原语种标题
|
||||
> `name`: 从文件名中识别的名称(同时存在中英文时,优先使用中文)
|
||||
> `en_name`:从文件名中识别的英文名称(可能为空)
|
||||
> `original_name`: 原文件名(包括文件外缀)
|
||||
> `year`: 年份
|
||||
> `resourceType`:资源类型
|
||||
> `effect`:特效
|
||||
> `edition`: 版本(资源类型+特效)
|
||||
> `videoFormat`: 分辨率
|
||||
> `releaseGroup`: 制作组/字幕组
|
||||
> `customization`: 自定义占位符
|
||||
> `videoCodec`: 视频编码
|
||||
> `audioCodec`: 音频编码
|
||||
> `tmdbid`: TMDB ID(非TMDB识别源时为空)
|
||||
> `imdbid`: IMDB ID(可能为空)
|
||||
> `doubanid`:豆瓣ID(非豆瓣识别源时为空)
|
||||
> `part`:段/节
|
||||
> `fileExt`:文件扩展名
|
||||
> `customization`:自定义占位符
|
||||
|
||||
`MOVIE_RENAME_FORMAT`默认配置格式:
|
||||
|
||||
```
|
||||
{{title}}{% if year %} ({{year}}){% endif %}/{{title}}{% if year %} ({{year}}){% endif %}{% if part %}-{{part}}{% endif %}{% if videoFormat %} - {{videoFormat}}{% endif %}{{fileExt}}
|
||||
```
|
||||
|
||||
- **TV_RENAME_FORMAT:** 电视剧重命名格式,基于jinjia2语法
|
||||
|
||||
`TV_RENAME_FORMAT`额外支持的配置项:
|
||||
|
||||
> `season`: 季号
|
||||
> `episode`: 集号
|
||||
> `season_episode`: 季集 SxxExx
|
||||
> `episode_title`: 集标题
|
||||
|
||||
`TV_RENAME_FORMAT`默认配置格式:
|
||||
|
||||
```
|
||||
{{title}}{% if year %} ({{year}}){% endif %}/Season {{season}}/{{title}} - {{season_episode}}{% if part %}-{{part}}{% endif %}{% if episode %} - 第 {{episode}} 集{% endif %}{{fileExt}}
|
||||
```
|
||||
|
||||
|
||||
### 3. **优先级规则**
|
||||
|
||||
- 仅支持使用内置规则进行排列组合,通过设置多层规则来实现优先级顺序匹配
|
||||
- 符合任一层级规则的资源将被标识选中,匹配成功的层级做为该资源的优先级,排越前面优先级超高
|
||||
- 不符合过滤规则所有层级规则的资源将不会被选中
|
||||
|
||||
### 4. **插件扩展**
|
||||
|
||||
- **PLUGIN_MARKET:** 插件市场仓库地址,仅支持Github仓库`main`分支,多个地址使用`,`分隔,通过查看[MoviePilot-Plugins](https://github.com/jxxghp/MoviePilot-Plugins)项目的fork,或者查看频道置顶了解更多第三方插件仓库,目前已有 `130+` 插件。
|
||||
默认已内置以下插件库:
|
||||
1. https://github.com/jxxghp/MoviePilot-Plugins
|
||||
2. https://github.com/thsrite/MoviePilot-Plugins
|
||||
3. https://github.com/honue/MoviePilot-Plugins
|
||||
4. https://github.com/InfinityPacer/MoviePilot-Plugins
|
||||
|
||||
## 使用
|
||||
|
||||
### 1. **WEB后台管理**
|
||||
- 通过设置的超级管理员用户登录后台管理界面(`SUPERUSER`配置项,默认用户:admin,默认端口:3000)
|
||||
> ❗**注意:超级管理员用户初始密码为自动生成,需要在首次运行时的后台日志中查看!** 如首次运行日志丢失,则需要删除配置文件目录下的`user.db`文件,然后重启服务。
|
||||
### 2. **站点维护**
|
||||
- 通过CookieCloud同步快速添加站点,不需要使用的站点可在WEB管理界面中禁用或删除,无法同步的站点也可手动新增。
|
||||
- 需要通过环境变量设置用户认证信息且认证成功后才能使用站点相关功能,未认证通过时站点相关的插件也会无法显示。
|
||||
### 3. **文件整理**
|
||||
- 默认通过监控下载器实现下载完成后自动整理入库并刮削媒体信息,需要后台打开`下载器监控`开关,并在设定中维护好下载目录和媒体库目录,且仅会处理通过MoviePilot添加下载的任务(含`MOVIEPILOT`标签)。
|
||||
- 下载器监控默认轮循间隔为5分钟,如果是使用qbittorrent,可在 `QB设置`->`下载完成时运行外部程序` 处填入:`curl "http://localhost:3000/api/v1/transfer/now?token=moviepilot" `,实现无需等待轮循下载完成后立即整理入库(地址、端口和token按实际调整,curl也可更换为wget)。
|
||||
- 使用`目录监控`等插件实现更灵活的自动整理(使用MoviePilot整理其它途径下载的资源时使用)。
|
||||
### 4. **通知交互**
|
||||
- 支持通过`微信`/`Telegram`/`Slack`/`SynologyChat`/`VoceChat`等渠道远程管理和订阅下载,其中 微信/Telegram 将会自动添加操作菜单(微信菜单条数有限制,部分菜单不显示)。
|
||||
- `微信`回调地址、`SynologyChat`传入地址地址相对路径均为:`/api/v1/message/`;`VoceChat`的Webhook地址相对路径为:`/api/v1/message/?token=moviepilot`,其中moviepilot为设置的`API_TOKEN`。
|
||||
- 插件市场中有其它渠道的通知插件(仅支持单向通知),可安装使用。
|
||||
### 5. **订阅与搜索**
|
||||
- 通过MoviePilot管理后台搜索和订阅。
|
||||
- 将MoviePilot做为`Radarr`或`Sonarr`服务器添加到`Overseerr`或`Jellyseerr`,可使用`Overseerr/Jellyseerr`浏览和添加订阅。
|
||||
- 安装`豆瓣榜单订阅`、`猫眼订阅`、`热门订阅`等插件,实现自动订阅各类榜单。
|
||||
### 6. **其他**
|
||||
- 通过设置媒体服务器Webhook指向MoviePilot(相对路径为`/api/v1/webhook?token=moviepilot`,其中`moviepilot`为设置的`API_TOKEN`),可实现通过MoviePilot发送播放通知,以及配合各类插件实现播放限速等功能。
|
||||
- 映射宿主机`docker.sock`文件到容器`/var/run/docker.sock`,可支持应用内建重启操作。实例:`-v /var/run/docker.sock:/var/run/docker.sock:ro`。
|
||||
- 将WEB页面添加到手机桌面图标可获得与App一样的使用体验。
|
||||
|
||||
### **注意**
|
||||
- 容器首次启动需要下载浏览器内核,根据网络情况可能需要较长时间,此时无法登录。可映射`/moviepilot`目录避免容器重置后重新触发浏览器内核下载。
|
||||
- 使用反向代理时,需要添加以下配置,否则可能会导致部分功能无法访问(`ip:port`修改为实际值):
|
||||
```nginx configuration
|
||||
location / {
|
||||
proxy_pass http://ip:port;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
```
|
||||
- 反代使用ssl时,需要开启`http2`,否则会导致日志加载时间过长或不可用。以`Nginx`为例:
|
||||
```nginx configuration
|
||||
server {
|
||||
listen 443 ssl;
|
||||
http2 on;
|
||||
# ...
|
||||
}
|
||||
```
|
||||
- 新建的企业微信应用需要固定公网IP的代理才能收到消息,代理添加以下代码:
|
||||
```nginx configuration
|
||||
location /cgi-bin/gettoken {
|
||||
proxy_pass https://qyapi.weixin.qq.com;
|
||||
}
|
||||
location /cgi-bin/message/send {
|
||||
proxy_pass https://qyapi.weixin.qq.com;
|
||||
}
|
||||
location /cgi-bin/menu/create {
|
||||
proxy_pass https://qyapi.weixin.qq.com;
|
||||
}
|
||||
```
|
||||
|
||||
- 部分插件功能基于文件系统监控实现(如`目录监控`等),需在宿主机上(不是docker容器内)执行以下命令并重启:
|
||||
```shell
|
||||
echo fs.inotify.max_user_watches=524288 | sudo tee -a /etc/sysctl.conf
|
||||
echo fs.inotify.max_user_instances=524288 | sudo tee -a /etc/sysctl.conf
|
||||
sudo sysctl -p
|
||||
```
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||

|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
|
||||
## 安装使用
|
||||
|
||||
访问官方Wiki:https://wiki.movie-pilot.org
|
||||
|
||||
## 贡献者
|
||||
|
||||
<a href="https://github.com/jxxghp/MoviePilot/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=jxxghp/MoviePilot" />
|
||||
</a>
|
||||
|
||||
@@ -2,7 +2,7 @@ from fastapi import APIRouter
|
||||
|
||||
from app.api.endpoints import login, user, site, message, webhook, subscribe, \
|
||||
media, douban, search, plugin, tmdb, history, system, download, dashboard, \
|
||||
filebrowser, transfer, mediaserver, bangumi
|
||||
local, transfer, mediaserver, bangumi, aliyun, u115
|
||||
|
||||
api_router = APIRouter()
|
||||
api_router.include_router(login.router, prefix="/login", tags=["login"])
|
||||
@@ -20,8 +20,9 @@ api_router.include_router(system.router, prefix="/system", tags=["system"])
|
||||
api_router.include_router(plugin.router, prefix="/plugin", tags=["plugin"])
|
||||
api_router.include_router(download.router, prefix="/download", tags=["download"])
|
||||
api_router.include_router(dashboard.router, prefix="/dashboard", tags=["dashboard"])
|
||||
api_router.include_router(filebrowser.router, prefix="/filebrowser", tags=["filebrowser"])
|
||||
api_router.include_router(local.router, prefix="/local", tags=["local"])
|
||||
api_router.include_router(transfer.router, prefix="/transfer", tags=["transfer"])
|
||||
api_router.include_router(mediaserver.router, prefix="/mediaserver", tags=["mediaserver"])
|
||||
api_router.include_router(bangumi.router, prefix="/bangumi", tags=["bangumi"])
|
||||
|
||||
api_router.include_router(aliyun.router, prefix="/aliyun", tags=["aliyun"])
|
||||
api_router.include_router(u115.router, prefix="/u115", tags=["115"])
|
||||
|
||||
198
app/api/endpoints/aliyun.py
Normal file
198
app/api/endpoints/aliyun.py
Normal file
@@ -0,0 +1,198 @@
|
||||
from pathlib import Path
|
||||
from typing import Any, List
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from starlette.responses import Response
|
||||
|
||||
from app import schemas
|
||||
from app.chain.transfer import TransferChain
|
||||
from app.core.config import settings
|
||||
from app.core.metainfo import MetaInfoPath
|
||||
from app.core.security import verify_token, verify_uri_token
|
||||
from app.helper.aliyun import AliyunHelper
|
||||
from app.helper.progress import ProgressHelper
|
||||
from app.schemas.types import ProgressKey
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/qrcode", summary="生成二维码内容", response_model=schemas.Response)
|
||||
def qrcode(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
"""
|
||||
生成二维码
|
||||
"""
|
||||
qrcode_data, errmsg = AliyunHelper().generate_qrcode()
|
||||
if qrcode_data:
|
||||
return schemas.Response(success=True, data=qrcode_data)
|
||||
return schemas.Response(success=False, message=errmsg)
|
||||
|
||||
|
||||
@router.get("/check", summary="二维码登录确认", response_model=schemas.Response)
|
||||
def check(ck: str, t: str, _: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
"""
|
||||
二维码登录确认
|
||||
"""
|
||||
if not ck or not t:
|
||||
return schemas.Response(success=False, message="参数错误")
|
||||
data, errmsg = AliyunHelper().check_login(ck, t)
|
||||
if data:
|
||||
return schemas.Response(success=True, data=data)
|
||||
return schemas.Response(success=False, message=errmsg)
|
||||
|
||||
|
||||
@router.get("/userinfo", summary="查询用户信息", response_model=schemas.Response)
|
||||
def userinfo(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
"""
|
||||
查询用户信息
|
||||
"""
|
||||
aliyunhelper = AliyunHelper()
|
||||
# 查询用户信息返回
|
||||
info = aliyunhelper.user_info()
|
||||
if info:
|
||||
return schemas.Response(success=True, data=info)
|
||||
return schemas.Response(success=False)
|
||||
|
||||
|
||||
@router.post("/list", summary="所有目录和文件(阿里云盘)", response_model=List[schemas.FileItem])
|
||||
def list_aliyun(fileitem: schemas.FileItem,
|
||||
sort: str = 'updated_at',
|
||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
"""
|
||||
查询当前目录下所有目录和文件
|
||||
:param fileitem: 文件夹信息
|
||||
:param sort: 排序方式,name:按名称排序,time:按修改时间排序
|
||||
:param _: token
|
||||
:return: 所有目录和文件
|
||||
"""
|
||||
if not fileitem.fileid:
|
||||
return []
|
||||
if not fileitem.path:
|
||||
path = "/"
|
||||
else:
|
||||
path = fileitem.path
|
||||
if sort == "time":
|
||||
sort = "updated_at"
|
||||
if fileitem.type == "file":
|
||||
fileitem = AliyunHelper().detail(drive_id=fileitem.drive_id, file_id=fileitem.fileid, path=path)
|
||||
if fileitem:
|
||||
return [fileitem]
|
||||
return []
|
||||
return AliyunHelper().list(drive_id=fileitem.drive_id,
|
||||
parent_file_id=fileitem.fileid,
|
||||
path=path,
|
||||
order_by=sort)
|
||||
|
||||
|
||||
@router.post("/mkdir", summary="创建目录(阿里云盘)", response_model=schemas.Response)
|
||||
def mkdir_aliyun(fileitem: schemas.FileItem,
|
||||
name: str,
|
||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
"""
|
||||
创建目录
|
||||
"""
|
||||
if not fileitem.fileid or not name:
|
||||
return schemas.Response(success=False)
|
||||
result = AliyunHelper().create_folder(drive_id=fileitem.drive_id, parent_file_id=fileitem.fileid,
|
||||
name=name, path=fileitem.path)
|
||||
if result:
|
||||
return schemas.Response(success=True)
|
||||
return schemas.Response(success=False)
|
||||
|
||||
|
||||
@router.post("/delete", summary="删除文件或目录(阿里云盘)", response_model=schemas.Response)
|
||||
def delete_aliyun(fileitem: schemas.FileItem,
|
||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
"""
|
||||
删除文件或目录
|
||||
"""
|
||||
if not fileitem.fileid:
|
||||
return schemas.Response(success=False)
|
||||
result = AliyunHelper().delete(drive_id=fileitem.drive_id, file_id=fileitem.fileid)
|
||||
if result:
|
||||
return schemas.Response(success=True)
|
||||
return schemas.Response(success=False)
|
||||
|
||||
|
||||
@router.get("/download", summary="下载文件(阿里云盘)")
|
||||
def download_aliyun(fileid: str,
|
||||
drive_id: str = None,
|
||||
_: schemas.TokenPayload = Depends(verify_uri_token)) -> Any:
|
||||
"""
|
||||
下载文件或目录
|
||||
"""
|
||||
if not fileid:
|
||||
return schemas.Response(success=False)
|
||||
url = AliyunHelper().download(drive_id=drive_id, file_id=fileid)
|
||||
if url:
|
||||
# 重定向
|
||||
return Response(status_code=302, headers={"Location": url})
|
||||
raise HTTPException(status_code=500, detail="下载文件出错")
|
||||
|
||||
|
||||
@router.post("/rename", summary="重命名文件或目录(阿里云盘)", response_model=schemas.Response)
|
||||
def rename_aliyun(fileitem: schemas.FileItem,
|
||||
new_name: str,
|
||||
recursive: bool = False,
|
||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
"""
|
||||
重命名文件或目录
|
||||
"""
|
||||
if not fileitem.fileid or not new_name:
|
||||
return schemas.Response(success=False)
|
||||
result = AliyunHelper().rename(drive_id=fileitem.drive_id, file_id=fileitem.fileid, name=new_name)
|
||||
if result:
|
||||
if recursive:
|
||||
transferchain = TransferChain()
|
||||
media_exts = settings.RMT_MEDIAEXT + settings.RMT_SUBEXT + settings.RMT_AUDIO_TRACK_EXT
|
||||
# 递归修改目录内文件(智能识别命名)
|
||||
sub_files: List[schemas.FileItem] = list_aliyun(fileitem=fileitem)
|
||||
if sub_files:
|
||||
# 开始进度
|
||||
progress = ProgressHelper()
|
||||
progress.start(ProgressKey.BatchRename)
|
||||
total = len(sub_files)
|
||||
handled = 0
|
||||
for sub_file in sub_files:
|
||||
handled += 1
|
||||
progress.update(value=handled / total * 100,
|
||||
text=f"正在处理 {sub_file.name} ...",
|
||||
key=ProgressKey.BatchRename)
|
||||
if sub_file.type == "dir":
|
||||
continue
|
||||
if not sub_file.extension:
|
||||
continue
|
||||
if f".{sub_file.extension.lower()}" not in media_exts:
|
||||
continue
|
||||
sub_path = Path(f"{fileitem.path}{sub_file.name}")
|
||||
meta = MetaInfoPath(sub_path)
|
||||
mediainfo = transferchain.recognize_media(meta)
|
||||
if not mediainfo:
|
||||
progress.end(ProgressKey.BatchRename)
|
||||
return schemas.Response(success=False, message=f"{sub_path.name} 未识别到媒体信息")
|
||||
new_path = transferchain.recommend_name(meta=meta, mediainfo=mediainfo)
|
||||
if not new_path:
|
||||
progress.end(ProgressKey.BatchRename)
|
||||
return schemas.Response(success=False, message=f"{sub_path.name} 未识别到新名称")
|
||||
ret: schemas.Response = rename_aliyun(fileitem=sub_file,
|
||||
new_name=Path(new_path).name,
|
||||
recursive=False)
|
||||
if not ret.success:
|
||||
progress.end(ProgressKey.BatchRename)
|
||||
return schemas.Response(success=False, message=f"{sub_path.name} 重命名失败!")
|
||||
progress.end(ProgressKey.BatchRename)
|
||||
return schemas.Response(success=True)
|
||||
return schemas.Response(success=False)
|
||||
|
||||
|
||||
@router.get("/image", summary="读取图片(阿里云盘)", response_model=schemas.Response)
|
||||
def image_aliyun(fileid: str, drive_id: str = None, _: schemas.TokenPayload = Depends(verify_uri_token)) -> Any:
|
||||
"""
|
||||
读取图片
|
||||
"""
|
||||
if not fileid:
|
||||
return schemas.Response(success=False)
|
||||
url = AliyunHelper().download(drive_id=drive_id, file_id=fileid)
|
||||
if url:
|
||||
# 重定向
|
||||
return Response(status_code=302, headers={"Location": url})
|
||||
raise HTTPException(status_code=500, detail="下载图片出错")
|
||||
@@ -6,7 +6,7 @@ from sqlalchemy.orm import Session
|
||||
|
||||
from app import schemas
|
||||
from app.chain.dashboard import DashboardChain
|
||||
from app.core.security import verify_token, verify_uri_token
|
||||
from app.core.security import verify_token, verify_apitoken
|
||||
from app.db import get_db
|
||||
from app.db.models.transferhistory import TransferHistory
|
||||
from app.helper.directory import DirectoryHelper
|
||||
@@ -36,7 +36,7 @@ def statistic(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
|
||||
|
||||
@router.get("/statistic2", summary="媒体数量统计(API_TOKEN)", response_model=schemas.Statistic)
|
||||
def statistic2(_: str = Depends(verify_uri_token)) -> Any:
|
||||
def statistic2(_: str = Depends(verify_apitoken)) -> Any:
|
||||
"""
|
||||
查询媒体数量统计信息 API_TOKEN认证(?token=xxx)
|
||||
"""
|
||||
@@ -57,7 +57,7 @@ def storage(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
|
||||
|
||||
@router.get("/storage2", summary="存储空间(API_TOKEN)", response_model=schemas.Storage)
|
||||
def storage2(_: str = Depends(verify_uri_token)) -> Any:
|
||||
def storage2(_: str = Depends(verify_apitoken)) -> Any:
|
||||
"""
|
||||
查询存储空间信息 API_TOKEN认证(?token=xxx)
|
||||
"""
|
||||
@@ -94,7 +94,7 @@ def downloader(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
|
||||
|
||||
@router.get("/downloader2", summary="下载器信息(API_TOKEN)", response_model=schemas.DownloaderInfo)
|
||||
def downloader2(_: str = Depends(verify_uri_token)) -> Any:
|
||||
def downloader2(_: str = Depends(verify_apitoken)) -> Any:
|
||||
"""
|
||||
查询下载器信息 API_TOKEN认证(?token=xxx)
|
||||
"""
|
||||
@@ -110,7 +110,7 @@ def schedule(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
|
||||
|
||||
@router.get("/schedule2", summary="后台服务(API_TOKEN)", response_model=List[schemas.ScheduleInfo])
|
||||
def schedule2(_: str = Depends(verify_uri_token)) -> Any:
|
||||
def schedule2(_: str = Depends(verify_apitoken)) -> Any:
|
||||
"""
|
||||
查询下载器信息 API_TOKEN认证(?token=xxx)
|
||||
"""
|
||||
@@ -136,7 +136,7 @@ def cpu(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
|
||||
|
||||
@router.get("/cpu2", summary="获取当前CPU使用率(API_TOKEN)", response_model=int)
|
||||
def cpu2(_: str = Depends(verify_uri_token)) -> Any:
|
||||
def cpu2(_: str = Depends(verify_apitoken)) -> Any:
|
||||
"""
|
||||
获取当前CPU使用率 API_TOKEN认证(?token=xxx)
|
||||
"""
|
||||
@@ -152,7 +152,7 @@ def memory(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
|
||||
|
||||
@router.get("/memory2", summary="获取当前内存使用量和使用率(API_TOKEN)", response_model=List[int])
|
||||
def memory2(_: str = Depends(verify_uri_token)) -> Any:
|
||||
def memory2(_: str = Depends(verify_apitoken)) -> Any:
|
||||
"""
|
||||
获取当前内存使用率 API_TOKEN认证(?token=xxx)
|
||||
"""
|
||||
|
||||
@@ -2,13 +2,17 @@ import shutil
|
||||
from pathlib import Path
|
||||
from typing import Any, List
|
||||
|
||||
from fastapi import APIRouter, Depends
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from starlette.responses import FileResponse, Response
|
||||
|
||||
from app import schemas
|
||||
from app.chain.transfer import TransferChain
|
||||
from app.core.config import settings
|
||||
from app.core.security import verify_token
|
||||
from app.core.metainfo import MetaInfoPath
|
||||
from app.core.security import verify_token, verify_uri_token
|
||||
from app.helper.progress import ProgressHelper
|
||||
from app.log import logger
|
||||
from app.schemas.types import ProgressKey
|
||||
from app.utils.system import SystemUtils
|
||||
|
||||
router = APIRouter()
|
||||
@@ -16,20 +20,21 @@ router = APIRouter()
|
||||
IMAGE_TYPES = [".jpg", ".png", ".gif", ".bmp", ".jpeg", ".webp"]
|
||||
|
||||
|
||||
@router.get("/list", summary="所有目录和文件", response_model=List[schemas.FileItem])
|
||||
def list_path(path: str,
|
||||
sort: str = 'time',
|
||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
@router.post("/list", summary="所有目录和文件(本地)", response_model=List[schemas.FileItem])
|
||||
def list_local(fileitem: schemas.FileItem,
|
||||
sort: str = 'time',
|
||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
"""
|
||||
查询当前目录下所有目录和文件
|
||||
:param path: 目录路径
|
||||
:param fileitem: 文件项
|
||||
:param sort: 排序方式,name:按名称排序,time:按修改时间排序
|
||||
:param _: token
|
||||
:return: 所有目录和文件
|
||||
"""
|
||||
# 返回结果
|
||||
ret_items = []
|
||||
if not path or path == "/":
|
||||
path = fileitem.path
|
||||
if not fileitem.path or fileitem.path == "/":
|
||||
if SystemUtils.is_windows():
|
||||
partitions = SystemUtils.get_windows_drives() or ["C:/"]
|
||||
for partition in partitions:
|
||||
@@ -43,7 +48,9 @@ def list_path(path: str,
|
||||
else:
|
||||
path = "/"
|
||||
else:
|
||||
if not SystemUtils.is_windows() and not path.startswith("/"):
|
||||
if SystemUtils.is_windows():
|
||||
path = path.lstrip("/")
|
||||
elif not path.startswith("/"):
|
||||
path = "/" + path
|
||||
|
||||
# 遍历目录
|
||||
@@ -98,8 +105,8 @@ def list_path(path: str,
|
||||
return ret_items
|
||||
|
||||
|
||||
@router.get("/listdir", summary="所有目录(不含文件)", response_model=List[schemas.FileItem])
|
||||
def list_dir(path: str, _: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
@router.get("/listdir", summary="所有目录(本地,不含文件)", response_model=List[schemas.FileItem])
|
||||
def list_local_dir(path: str, _: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
"""
|
||||
查询当前目录下所有目录
|
||||
"""
|
||||
@@ -139,28 +146,30 @@ def list_dir(path: str, _: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
return ret_items
|
||||
|
||||
|
||||
@router.get("/mkdir", summary="创建目录", response_model=schemas.Response)
|
||||
def mkdir(path: str, _: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
@router.post("/mkdir", summary="创建目录(本地)", response_model=schemas.Response)
|
||||
def mkdir_local(fileitem: schemas.FileItem,
|
||||
name: str,
|
||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
"""
|
||||
创建目录
|
||||
"""
|
||||
if not path:
|
||||
if not fileitem.path:
|
||||
return schemas.Response(success=False)
|
||||
path_obj = Path(path)
|
||||
path_obj = Path(fileitem.path) / name
|
||||
if path_obj.exists():
|
||||
return schemas.Response(success=False)
|
||||
path_obj.mkdir(parents=True, exist_ok=True)
|
||||
return schemas.Response(success=True)
|
||||
|
||||
|
||||
@router.get("/delete", summary="删除文件或目录", response_model=schemas.Response)
|
||||
def delete(path: str, _: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
@router.post("/delete", summary="删除文件或目录(本地)", response_model=schemas.Response)
|
||||
def delete_local(fileitem: schemas.FileItem, _: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
"""
|
||||
删除文件或目录
|
||||
"""
|
||||
if not path:
|
||||
if not fileitem.path:
|
||||
return schemas.Response(success=False)
|
||||
path_obj = Path(path)
|
||||
path_obj = Path(fileitem.path)
|
||||
if not path_obj.exists():
|
||||
return schemas.Response(success=True)
|
||||
if path_obj.is_file():
|
||||
@@ -170,19 +179,16 @@ def delete(path: str, _: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
return schemas.Response(success=True)
|
||||
|
||||
|
||||
@router.get("/download", summary="下载文件或目录")
|
||||
def download(path: str, token: str) -> Any:
|
||||
@router.get("/download", summary="下载文件(本地)")
|
||||
def download_local(path: str, _: schemas.TokenPayload = Depends(verify_uri_token)) -> Any:
|
||||
"""
|
||||
下载文件或目录
|
||||
"""
|
||||
if not path:
|
||||
return schemas.Response(success=False)
|
||||
# 认证token
|
||||
if not verify_token(token):
|
||||
return None
|
||||
path_obj = Path(path)
|
||||
if not path_obj.exists():
|
||||
return schemas.Response(success=False)
|
||||
raise HTTPException(status_code=404, detail="文件不存在")
|
||||
if path_obj.is_file():
|
||||
# 做为文件流式下载
|
||||
return FileResponse(path_obj)
|
||||
@@ -195,30 +201,67 @@ def download(path: str, token: str) -> Any:
|
||||
return reponse
|
||||
|
||||
|
||||
@router.get("/rename", summary="重命名文件或目录", response_model=schemas.Response)
|
||||
def rename(path: str, new_name: str, _: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
@router.post("/rename", summary="重命名文件或目录(本地)", response_model=schemas.Response)
|
||||
def rename_local(fileitem: schemas.FileItem,
|
||||
new_name: str,
|
||||
recursive: bool = False,
|
||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
"""
|
||||
重命名文件或目录
|
||||
"""
|
||||
if not path or not new_name:
|
||||
if not fileitem.path or not new_name:
|
||||
return schemas.Response(success=False)
|
||||
path_obj = Path(path)
|
||||
path_obj = Path(fileitem.path)
|
||||
if not path_obj.exists():
|
||||
return schemas.Response(success=False)
|
||||
path_obj.rename(path_obj.parent / new_name)
|
||||
if recursive:
|
||||
transferchain = TransferChain()
|
||||
media_exts = settings.RMT_MEDIAEXT + settings.RMT_SUBEXT + settings.RMT_AUDIO_TRACK_EXT
|
||||
# 递归修改目录内文件(智能识别命名)
|
||||
sub_files: List[schemas.FileItem] = list_local(fileitem=fileitem)
|
||||
if sub_files:
|
||||
# 开始进度
|
||||
progress = ProgressHelper()
|
||||
progress.start(ProgressKey.BatchRename)
|
||||
total = len(sub_files)
|
||||
handled = 0
|
||||
for sub_file in sub_files:
|
||||
handled += 1
|
||||
progress.update(value=handled / total * 100,
|
||||
text=f"正在处理 {sub_file.name} ...",
|
||||
key=ProgressKey.BatchRename)
|
||||
if sub_file.type == "dir":
|
||||
continue
|
||||
if not sub_file.extension:
|
||||
continue
|
||||
if f".{sub_file.extension.lower()}" not in media_exts:
|
||||
continue
|
||||
sub_path = Path(sub_file.path)
|
||||
meta = MetaInfoPath(sub_path)
|
||||
mediainfo = transferchain.recognize_media(meta)
|
||||
if not mediainfo:
|
||||
progress.end(ProgressKey.BatchRename)
|
||||
return schemas.Response(success=False, message=f"{sub_path.name} 未识别到媒体信息")
|
||||
new_path = transferchain.recommend_name(meta=meta, mediainfo=mediainfo)
|
||||
if not new_path:
|
||||
progress.end(ProgressKey.BatchRename)
|
||||
return schemas.Response(success=False, message=f"{sub_path.name} 未识别到新名称")
|
||||
ret: schemas.Response = rename_local(fileitem, new_name=Path(new_path).name, recursive=False)
|
||||
if not ret.success:
|
||||
progress.end(ProgressKey.BatchRename)
|
||||
return schemas.Response(success=False, message=f"{sub_path.name} 重命名失败!")
|
||||
progress.end(ProgressKey.BatchRename)
|
||||
return schemas.Response(success=True)
|
||||
|
||||
|
||||
@router.get("/image", summary="读取图片")
|
||||
def image(path: str, token: str) -> Any:
|
||||
@router.get("/image", summary="读取图片(本地)")
|
||||
def image_local(path: str, _: schemas.TokenPayload = Depends(verify_uri_token)) -> Any:
|
||||
"""
|
||||
读取图片
|
||||
"""
|
||||
if not path:
|
||||
return None
|
||||
# 认证token
|
||||
if not verify_token(token):
|
||||
return None
|
||||
path_obj = Path(path)
|
||||
if not path_obj.exists():
|
||||
return None
|
||||
@@ -226,5 +269,5 @@ def image(path: str, token: str) -> Any:
|
||||
return None
|
||||
# 判断是否图片文件
|
||||
if path_obj.suffix.lower() not in IMAGE_TYPES:
|
||||
return None
|
||||
raise HTTPException(status_code=500, detail="图片读取出错")
|
||||
return Response(content=path_obj.read_bytes(), media_type="image/jpeg")
|
||||
@@ -1,5 +1,5 @@
|
||||
from datetime import timedelta
|
||||
from typing import Any
|
||||
from typing import Any, List
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Form
|
||||
from fastapi.security import OAuth2PasswordRequestForm
|
||||
@@ -13,6 +13,7 @@ from app.core.config import settings
|
||||
from app.core.security import get_password_hash
|
||||
from app.db import get_db
|
||||
from app.db.models.user import User
|
||||
from app.helper.sites import SitesHelper
|
||||
from app.log import logger
|
||||
from app.utils.web import WebUtils
|
||||
|
||||
@@ -21,9 +22,9 @@ router = APIRouter()
|
||||
|
||||
@router.post("/access-token", summary="获取token", response_model=schemas.Token)
|
||||
async def login_access_token(
|
||||
db: Session = Depends(get_db),
|
||||
form_data: OAuth2PasswordRequestForm = Depends(),
|
||||
otp_password: str = Form(None)
|
||||
db: Session = Depends(get_db),
|
||||
form_data: OAuth2PasswordRequestForm = Depends(),
|
||||
otp_password: str = Form(None)
|
||||
) -> Any:
|
||||
"""
|
||||
获取认证Token
|
||||
@@ -58,17 +59,20 @@ async def login_access_token(
|
||||
elif user and not user.is_active:
|
||||
raise HTTPException(status_code=403, detail="用户未启用")
|
||||
logger.info(f"用户 {user.name} 登录成功!")
|
||||
level = SitesHelper().auth_level
|
||||
return schemas.Token(
|
||||
access_token=security.create_access_token(
|
||||
userid=user.id,
|
||||
username=user.name,
|
||||
super_user=user.is_superuser,
|
||||
expires_delta=timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES)
|
||||
expires_delta=timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES),
|
||||
level=level
|
||||
),
|
||||
token_type="bearer",
|
||||
super_user=user.is_superuser,
|
||||
user_name=user.name,
|
||||
avatar=user.avatar
|
||||
avatar=user.avatar,
|
||||
level=level
|
||||
)
|
||||
|
||||
|
||||
@@ -78,18 +82,9 @@ def wallpaper() -> Any:
|
||||
获取登录页面电影海报
|
||||
"""
|
||||
if settings.WALLPAPER == "tmdb":
|
||||
return tmdb_wallpaper()
|
||||
elif settings.WALLPAPER == "bing":
|
||||
return bing_wallpaper()
|
||||
return schemas.Response(success=False)
|
||||
|
||||
|
||||
@router.get("/bing", summary="Bing每日壁纸", response_model=schemas.Response)
|
||||
def bing_wallpaper() -> Any:
|
||||
"""
|
||||
获取Bing每日壁纸
|
||||
"""
|
||||
url = WebUtils.get_bing_wallpaper()
|
||||
url = TmdbChain().get_random_wallpager()
|
||||
else:
|
||||
url = WebUtils.get_bing_wallpaper()
|
||||
if url:
|
||||
return schemas.Response(
|
||||
success=True,
|
||||
@@ -98,15 +93,12 @@ def bing_wallpaper() -> Any:
|
||||
return schemas.Response(success=False)
|
||||
|
||||
|
||||
@router.get("/tmdb", summary="TMDB电影海报", response_model=schemas.Response)
|
||||
def tmdb_wallpaper() -> Any:
|
||||
@router.get("/wallpapers", summary="登录页面电影海报列表", response_model=List[str])
|
||||
def wallpapers() -> Any:
|
||||
"""
|
||||
获取TMDB电影海报
|
||||
获取登录页面电影海报
|
||||
"""
|
||||
wallpager = TmdbChain().get_random_wallpager()
|
||||
if wallpager:
|
||||
return schemas.Response(
|
||||
success=True,
|
||||
message=wallpager
|
||||
)
|
||||
return schemas.Response(success=False)
|
||||
if settings.WALLPAPER == "tmdb":
|
||||
return TmdbChain().get_trending_wallpapers()
|
||||
else:
|
||||
return WebUtils.get_bing_wallpapers()
|
||||
|
||||
@@ -8,7 +8,7 @@ from app.chain.media import MediaChain
|
||||
from app.core.config import settings
|
||||
from app.core.context import Context
|
||||
from app.core.metainfo import MetaInfo, MetaInfoPath
|
||||
from app.core.security import verify_token, verify_uri_token
|
||||
from app.core.security import verify_token, verify_apitoken
|
||||
from app.schemas import MediaType
|
||||
|
||||
router = APIRouter()
|
||||
@@ -32,7 +32,7 @@ def recognize(title: str,
|
||||
@router.get("/recognize2", summary="识别种子媒体信息(API_TOKEN)", response_model=schemas.Context)
|
||||
def recognize2(title: str,
|
||||
subtitle: str = None,
|
||||
_: str = Depends(verify_uri_token)) -> Any:
|
||||
_: str = Depends(verify_apitoken)) -> Any:
|
||||
"""
|
||||
根据标题、副标题识别媒体信息 API_TOKEN认证(?token=xxx)
|
||||
"""
|
||||
@@ -55,7 +55,7 @@ def recognize_file(path: str,
|
||||
|
||||
@router.get("/recognize_file2", summary="识别文件媒体信息(API_TOKEN)", response_model=schemas.Context)
|
||||
def recognize_file2(path: str,
|
||||
_: str = Depends(verify_uri_token)) -> Any:
|
||||
_: str = Depends(verify_apitoken)) -> Any:
|
||||
"""
|
||||
根据文件路径识别媒体信息 API_TOKEN认证(?token=xxx)
|
||||
"""
|
||||
@@ -97,26 +97,31 @@ def search(title: str,
|
||||
return result[(page - 1) * count:page * count]
|
||||
|
||||
|
||||
@router.get("/scrape", summary="刮削媒体信息", response_model=schemas.Response)
|
||||
def scrape(path: str,
|
||||
@router.post("/scrape/{storage}", summary="刮削媒体信息", response_model=schemas.Response)
|
||||
def scrape(fileitem: schemas.FileItem,
|
||||
storage: str = "local",
|
||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
"""
|
||||
刮削媒体信息
|
||||
"""
|
||||
if not path:
|
||||
if not fileitem or not fileitem.path:
|
||||
return schemas.Response(success=False, message="刮削路径无效")
|
||||
scrape_path = Path(path)
|
||||
if not scrape_path.exists():
|
||||
return schemas.Response(success=False, message="刮削路径不存在")
|
||||
# 识别
|
||||
chain = MediaChain()
|
||||
# 识别媒体信息
|
||||
scrape_path = Path(fileitem.path)
|
||||
meta = MetaInfoPath(scrape_path)
|
||||
mediainfo = chain.recognize_media(meta)
|
||||
mediainfo = chain.recognize_by_meta(meta)
|
||||
if not media_info:
|
||||
return schemas.Response(success=False, message="刮削失败,无法识别媒体信息")
|
||||
# 刮削
|
||||
chain.scrape_metadata(path=scrape_path, mediainfo=mediainfo, transfer_type=settings.TRANSFER_TYPE)
|
||||
return schemas.Response(success=True, message="刮削完成")
|
||||
if storage == "local":
|
||||
if not scrape_path.exists():
|
||||
return schemas.Response(success=False, message="刮削路径不存在")
|
||||
else:
|
||||
if not fileitem.fileid:
|
||||
return schemas.Response(success=False, message="刮削文件ID无效")
|
||||
# 手动刮削
|
||||
chain.manual_scrape(storage=storage, fileitem=fileitem, meta=meta, mediainfo=mediainfo)
|
||||
return schemas.Response(success=True, message=f"{fileitem.path} 刮削完成")
|
||||
|
||||
|
||||
@router.get("/category", summary="查询自动分类配置", response_model=dict)
|
||||
|
||||
@@ -164,8 +164,10 @@ def subscribe(subscription: schemas.Subscription, _: schemas.TokenPayload = Depe
|
||||
"""
|
||||
客户端webpush通知订阅
|
||||
"""
|
||||
global_vars.push_subscription(subscription.dict())
|
||||
logger.debug(f"通知订阅成功: {subscription.dict()}")
|
||||
subinfo = subscription.dict()
|
||||
if subinfo not in global_vars.get_subscriptions():
|
||||
global_vars.push_subscription(subinfo)
|
||||
logger.debug(f"通知订阅成功: {subinfo}")
|
||||
return schemas.Response(success=True)
|
||||
|
||||
|
||||
|
||||
@@ -108,13 +108,19 @@ def install(plugin_id: str,
|
||||
"""
|
||||
# 已安装插件
|
||||
install_plugins = SystemConfigOper().get(SystemConfigKey.UserInstalledPlugins) or []
|
||||
# 如果是非本地括件,或者强制安装时,则需要下载安装
|
||||
if repo_url and (force or plugin_id not in PluginManager().get_plugin_ids()):
|
||||
# 下载安装
|
||||
state, msg = PluginHelper().install(pid=plugin_id, repo_url=repo_url)
|
||||
if not state:
|
||||
# 安装失败
|
||||
return schemas.Response(success=False, message=msg)
|
||||
# 首先检查插件是否已经存在,并且是否强制安装,否则只进行安装统计
|
||||
if not force and plugin_id in PluginManager().get_plugin_ids():
|
||||
PluginHelper().install_reg(pid=plugin_id)
|
||||
else:
|
||||
# 插件不存在或需要强制安装,下载安装并注册插件
|
||||
if repo_url:
|
||||
state, msg = PluginHelper().install(pid=plugin_id, repo_url=repo_url)
|
||||
# 安装失败则直接响应
|
||||
if not state:
|
||||
return schemas.Response(success=False, message=msg)
|
||||
else:
|
||||
# repo_url 为空时,也直接响应
|
||||
return schemas.Response(success=False, message="没有传入仓库地址,无法正确安装插件,请检查配置")
|
||||
# 安装插件
|
||||
if plugin_id not in install_plugins:
|
||||
install_plugins.append(plugin_id)
|
||||
@@ -186,10 +192,7 @@ def reset_plugin(plugin_id: str, _: schemas.TokenPayload = Depends(verify_token)
|
||||
# 删除插件所有数据
|
||||
PluginManager().delete_plugin_data(plugin_id)
|
||||
# 重新生效插件
|
||||
PluginManager().init_plugin(plugin_id, {
|
||||
"enabled": False,
|
||||
"enable": False
|
||||
})
|
||||
PluginManager().reload_plugin(plugin_id)
|
||||
# 注册插件服务
|
||||
Scheduler().update_plugin_job(plugin_id)
|
||||
# 注册插件API
|
||||
|
||||
@@ -52,6 +52,8 @@ def search_by_id(mediaid: str,
|
||||
# 通过豆瓣ID识别TMDBID
|
||||
tmdbinfo = MediaChain().get_tmdbinfo_by_doubanid(doubanid=doubanid, mtype=mtype)
|
||||
if tmdbinfo:
|
||||
if tmdbinfo.get('season') and not season:
|
||||
season = tmdbinfo.get('season')
|
||||
torrents = SearchChain().search_by_id(tmdbid=tmdbinfo.get("id"),
|
||||
mtype=mtype, area=area, season=season)
|
||||
else:
|
||||
|
||||
@@ -10,7 +10,7 @@ from app.chain.subscribe import SubscribeChain
|
||||
from app.core.config import settings
|
||||
from app.core.context import MediaInfo
|
||||
from app.core.metainfo import MetaInfo
|
||||
from app.core.security import verify_token, verify_uri_token
|
||||
from app.core.security import verify_token, verify_apitoken
|
||||
from app.db import get_db
|
||||
from app.db.models.subscribe import Subscribe
|
||||
from app.db.models.subscribehistory import SubscribeHistory
|
||||
@@ -52,7 +52,7 @@ def read_subscribes(
|
||||
|
||||
|
||||
@router.get("/list", summary="查询所有订阅(API_TOKEN)", response_model=List[schemas.Subscribe])
|
||||
def list_subscribes(_: str = Depends(verify_uri_token)) -> Any:
|
||||
def list_subscribes(_: str = Depends(verify_apitoken)) -> Any:
|
||||
"""
|
||||
查询所有订阅 API_TOKEN认证(?token=xxx)
|
||||
"""
|
||||
@@ -200,7 +200,8 @@ def reset_subscribes(
|
||||
subscribe = Subscribe.get(db, subid)
|
||||
if subscribe:
|
||||
subscribe.update(db, {
|
||||
"note": None
|
||||
"note": "",
|
||||
"lack_episode": subscribe.total_episode
|
||||
})
|
||||
return schemas.Response(success=True)
|
||||
return schemas.Response(success=False, message="订阅不存在")
|
||||
|
||||
@@ -5,8 +5,10 @@ from fastapi import APIRouter, Depends
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from app import schemas
|
||||
from app.chain.media import MediaChain
|
||||
from app.chain.transfer import TransferChain
|
||||
from app.core.security import verify_token, verify_uri_token
|
||||
from app.core.metainfo import MetaInfoPath
|
||||
from app.core.security import verify_token, verify_apitoken
|
||||
from app.db import get_db
|
||||
from app.db.models.transferhistory import TransferHistory
|
||||
from app.schemas import MediaType
|
||||
@@ -14,8 +16,41 @@ from app.schemas import MediaType
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/name", summary="查询整理后的名称", response_model=schemas.Response)
|
||||
def query_name(path: str, filetype: str,
|
||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
"""
|
||||
查询整理后的名称
|
||||
:param path: 文件路径
|
||||
:param filetype: 文件类型
|
||||
:param _: Token校验
|
||||
"""
|
||||
meta = MetaInfoPath(Path(path))
|
||||
mediainfo = MediaChain().recognize_media(meta)
|
||||
if not mediainfo:
|
||||
return schemas.Response(success=False, message="未识别到媒体信息")
|
||||
new_path = TransferChain().recommend_name(meta=meta, mediainfo=mediainfo)
|
||||
if not new_path:
|
||||
return schemas.Response(success=False, message="未识别到新名称")
|
||||
if filetype == "dir":
|
||||
parents = Path(new_path).parents
|
||||
if len(parents) > 2:
|
||||
new_name = parents[1].name
|
||||
else:
|
||||
new_name = parents[0].name
|
||||
else:
|
||||
new_name = Path(new_path).name
|
||||
return schemas.Response(success=True, data={
|
||||
"name": new_name
|
||||
})
|
||||
|
||||
|
||||
@router.post("/manual", summary="手动转移", response_model=schemas.Response)
|
||||
def manual_transfer(path: str = None,
|
||||
def manual_transfer(storage: str = "local",
|
||||
path: str = None,
|
||||
drive_id: str = None,
|
||||
fileid: str = None,
|
||||
filetype: str = None,
|
||||
logid: int = None,
|
||||
target: str = None,
|
||||
tmdbid: int = None,
|
||||
@@ -33,7 +68,11 @@ def manual_transfer(path: str = None,
|
||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
"""
|
||||
手动转移,文件或历史记录,支持自定义剧集识别格式
|
||||
:param storage: 存储类型:local/aliyun/u115
|
||||
:param path: 转移路径或文件
|
||||
:param drive_id: 云盘ID(网盘等)
|
||||
:param fileid: 文件ID(网盘等)
|
||||
:param filetype: 文件类型,dir/file
|
||||
:param logid: 转移历史记录ID
|
||||
:param target: 目标路径
|
||||
:param type_name: 媒体类型、电影/电视剧
|
||||
@@ -88,7 +127,11 @@ def manual_transfer(path: str = None,
|
||||
)
|
||||
# 开始转移
|
||||
state, errormsg = transfer.manual_transfer(
|
||||
storage=storage,
|
||||
in_path=in_path,
|
||||
drive_id=drive_id,
|
||||
fileid=fileid,
|
||||
filetype=filetype,
|
||||
target=target,
|
||||
tmdbid=tmdbid,
|
||||
doubanid=doubanid,
|
||||
@@ -110,7 +153,7 @@ def manual_transfer(path: str = None,
|
||||
|
||||
|
||||
@router.get("/now", summary="立即执行下载器文件整理", response_model=schemas.Response)
|
||||
def now(_: str = Depends(verify_uri_token)) -> Any:
|
||||
def now(_: str = Depends(verify_apitoken)) -> Any:
|
||||
"""
|
||||
立即执行下载器文件整理 API_TOKEN认证(?token=xxx)
|
||||
"""
|
||||
|
||||
213
app/api/endpoints/u115.py
Normal file
213
app/api/endpoints/u115.py
Normal file
@@ -0,0 +1,213 @@
|
||||
from pathlib import Path
|
||||
from typing import Any, List
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from starlette.responses import Response
|
||||
|
||||
from app import schemas
|
||||
from app.chain.transfer import TransferChain
|
||||
from app.core.config import settings
|
||||
from app.core.metainfo import MetaInfoPath
|
||||
from app.core.security import verify_token, verify_uri_token
|
||||
from app.helper.progress import ProgressHelper
|
||||
from app.helper.u115 import U115Helper
|
||||
from app.schemas.types import ProgressKey
|
||||
from app.utils.http import RequestUtils
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/qrcode", summary="生成二维码内容", response_model=schemas.Response)
|
||||
def qrcode(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
"""
|
||||
生成二维码
|
||||
"""
|
||||
qrcode_data = U115Helper().generate_qrcode()
|
||||
if qrcode_data:
|
||||
return schemas.Response(success=True, data={
|
||||
'codeContent': qrcode_data
|
||||
})
|
||||
return schemas.Response(success=False)
|
||||
|
||||
|
||||
@router.get("/check", summary="二维码登录确认", response_model=schemas.Response)
|
||||
def check(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
"""
|
||||
二维码登录确认
|
||||
"""
|
||||
data, errmsg = U115Helper().check_login()
|
||||
if data:
|
||||
return schemas.Response(success=True, data=data)
|
||||
return schemas.Response(success=False, message=errmsg)
|
||||
|
||||
|
||||
@router.get("/storage", summary="查询存储空间信息", response_model=schemas.Response)
|
||||
def storage(_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
"""
|
||||
查询存储空间信息
|
||||
"""
|
||||
storage_info = U115Helper().storage()
|
||||
if storage_info:
|
||||
return schemas.Response(success=True, data={
|
||||
"total": storage_info[0],
|
||||
"used": storage_info[1]
|
||||
})
|
||||
return schemas.Response(success=False)
|
||||
|
||||
|
||||
@router.post("/list", summary="所有目录和文件(115网盘)", response_model=List[schemas.FileItem])
|
||||
def list_115(fileitem: schemas.FileItem,
|
||||
sort: str = 'updated_at',
|
||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
"""
|
||||
查询当前目录下所有目录和文件
|
||||
:param fileitem: 文件项
|
||||
:param sort: 排序方式,name:按名称排序,time:按修改时间排序
|
||||
:param _: token
|
||||
:return: 所有目录和文件
|
||||
"""
|
||||
if not fileitem.fileid:
|
||||
return []
|
||||
if not fileitem.path:
|
||||
path = "/"
|
||||
else:
|
||||
path = fileitem.path
|
||||
if fileitem.fileid == "root":
|
||||
fileid = "0"
|
||||
else:
|
||||
fileid = fileitem.fileid
|
||||
if fileitem.type == "file":
|
||||
name = Path(path).name
|
||||
suffix = Path(name).suffix[1:]
|
||||
return [schemas.FileItem(
|
||||
fileid=fileid,
|
||||
type="file",
|
||||
path=path.rstrip('/'),
|
||||
name=name,
|
||||
extension=suffix,
|
||||
pickcode=fileitem.pickcode
|
||||
)]
|
||||
file_list = U115Helper().list(parent_file_id=fileid, path=path)
|
||||
if sort == "name":
|
||||
file_list.sort(key=lambda x: x.name)
|
||||
else:
|
||||
file_list.sort(key=lambda x: x.modify_time, reverse=True)
|
||||
return file_list
|
||||
|
||||
|
||||
@router.post("/mkdir", summary="创建目录(115网盘)", response_model=schemas.Response)
|
||||
def mkdir_115(fileitem: schemas.FileItem,
|
||||
name: str,
|
||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
"""
|
||||
创建目录
|
||||
"""
|
||||
if not fileitem.fileid or not name:
|
||||
return schemas.Response(success=False)
|
||||
result = U115Helper().create_folder(parent_file_id=fileitem.fileid, name=name, path=fileitem.path)
|
||||
if result:
|
||||
return schemas.Response(success=True)
|
||||
return schemas.Response(success=False)
|
||||
|
||||
|
||||
@router.post("/delete", summary="删除文件或目录(115网盘)", response_model=schemas.Response)
|
||||
def delete_115(fileitem: schemas.FileItem,
|
||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
"""
|
||||
删除文件或目录
|
||||
"""
|
||||
if not fileitem.fileid:
|
||||
return schemas.Response(success=False)
|
||||
result = U115Helper().delete(fileitem.fileid)
|
||||
if result:
|
||||
return schemas.Response(success=True)
|
||||
return schemas.Response(success=False)
|
||||
|
||||
|
||||
@router.get("/download", summary="下载文件(115网盘)")
|
||||
def download_115(pickcode: str,
|
||||
_: schemas.TokenPayload = Depends(verify_uri_token)) -> Any:
|
||||
"""
|
||||
下载文件或目录
|
||||
"""
|
||||
if not pickcode:
|
||||
return schemas.Response(success=False)
|
||||
ticket = U115Helper().download(pickcode)
|
||||
if ticket:
|
||||
# 请求数据,并以文件流的方式返回
|
||||
res = RequestUtils(headers=ticket.headers).get_res(ticket.url)
|
||||
if res:
|
||||
return Response(content=res.content, media_type="application/octet-stream")
|
||||
return schemas.Response(success=False)
|
||||
|
||||
|
||||
@router.post("/rename", summary="重命名文件或目录(115网盘)", response_model=schemas.Response)
|
||||
def rename_115(fileitem: schemas.FileItem,
|
||||
new_name: str,
|
||||
recursive: bool = False,
|
||||
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
|
||||
"""
|
||||
重命名文件或目录
|
||||
"""
|
||||
if not fileitem.fileid or not new_name:
|
||||
return schemas.Response(success=False)
|
||||
result = U115Helper().rename(fileitem.fileid, new_name)
|
||||
if result:
|
||||
if recursive:
|
||||
transferchain = TransferChain()
|
||||
media_exts = settings.RMT_MEDIAEXT + settings.RMT_SUBEXT + settings.RMT_AUDIO_TRACK_EXT
|
||||
# 递归修改目录内文件(智能识别命名)
|
||||
sub_files: List[schemas.FileItem] = list_115(fileitem)
|
||||
if sub_files:
|
||||
# 开始进度
|
||||
progress = ProgressHelper()
|
||||
progress.start(ProgressKey.BatchRename)
|
||||
total = len(sub_files)
|
||||
handled = 0
|
||||
for sub_file in sub_files:
|
||||
handled += 1
|
||||
progress.update(value=handled / total * 100,
|
||||
text=f"正在处理 {sub_file.name} ...",
|
||||
key=ProgressKey.BatchRename)
|
||||
if sub_file.type == "dir":
|
||||
continue
|
||||
if not sub_file.extension:
|
||||
continue
|
||||
if f".{sub_file.extension.lower()}" not in media_exts:
|
||||
continue
|
||||
sub_path = Path(f"{fileitem.path}{sub_file.name}")
|
||||
meta = MetaInfoPath(sub_path)
|
||||
mediainfo = transferchain.recognize_media(meta)
|
||||
if not mediainfo:
|
||||
progress.end(ProgressKey.BatchRename)
|
||||
return schemas.Response(success=False, message=f"{sub_path.name} 未识别到媒体信息")
|
||||
new_path = transferchain.recommend_name(meta=meta, mediainfo=mediainfo)
|
||||
if not new_path:
|
||||
progress.end(ProgressKey.BatchRename)
|
||||
return schemas.Response(success=False, message=f"{sub_path.name} 未识别到新名称")
|
||||
ret: schemas.Response = rename_115(fileitem=sub_file,
|
||||
new_name=Path(new_path).name,
|
||||
recursive=False)
|
||||
if not ret.success:
|
||||
progress.end(ProgressKey.BatchRename)
|
||||
return schemas.Response(success=False, message=f"{sub_path.name} 重命名失败!")
|
||||
progress.end(ProgressKey.BatchRename)
|
||||
return schemas.Response(success=True)
|
||||
return schemas.Response(success=False)
|
||||
|
||||
|
||||
@router.get("/image", summary="读取图片(115网盘)")
|
||||
def image_115(pickcode: str, _: schemas.TokenPayload = Depends(verify_uri_token)) -> Any:
|
||||
"""
|
||||
读取图片
|
||||
"""
|
||||
if not pickcode:
|
||||
return schemas.Response(success=False)
|
||||
ticket = U115Helper().download(pickcode)
|
||||
if ticket:
|
||||
# 请求数据,获取内容编码为图片base64返回
|
||||
res = RequestUtils(headers=ticket.headers).get_res(ticket.url)
|
||||
if res:
|
||||
content_type = res.headers.get("Content-Type")
|
||||
return Response(content=res.content, media_type=content_type)
|
||||
raise HTTPException(status_code=500, detail="下载图片出错")
|
||||
@@ -4,7 +4,7 @@ from fastapi import APIRouter, BackgroundTasks, Request, Depends
|
||||
|
||||
from app import schemas
|
||||
from app.chain.webhook import WebhookChain
|
||||
from app.core.security import verify_uri_token
|
||||
from app.core.security import verify_apitoken
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
@@ -19,7 +19,7 @@ def start_webhook_chain(body: Any, form: Any, args: Any):
|
||||
@router.post("/", summary="Webhook消息响应", response_model=schemas.Response)
|
||||
async def webhook_message(background_tasks: BackgroundTasks,
|
||||
request: Request,
|
||||
_: str = Depends(verify_uri_token)
|
||||
_: str = Depends(verify_apitoken)
|
||||
) -> Any:
|
||||
"""
|
||||
Webhook响应
|
||||
@@ -33,7 +33,7 @@ async def webhook_message(background_tasks: BackgroundTasks,
|
||||
|
||||
@router.get("/", summary="Webhook消息响应", response_model=schemas.Response)
|
||||
def webhook_message(background_tasks: BackgroundTasks,
|
||||
request: Request, _: str = Depends(verify_uri_token)) -> Any:
|
||||
request: Request, _: str = Depends(verify_apitoken)) -> Any:
|
||||
"""
|
||||
Webhook响应
|
||||
"""
|
||||
|
||||
@@ -7,7 +7,7 @@ from app import schemas
|
||||
from app.chain.media import MediaChain
|
||||
from app.chain.subscribe import SubscribeChain
|
||||
from app.core.metainfo import MetaInfo
|
||||
from app.core.security import verify_uri_apikey
|
||||
from app.core.security import verify_apikey
|
||||
from app.db import get_db
|
||||
from app.db.models.subscribe import Subscribe
|
||||
from app.schemas import RadarrMovie, SonarrSeries
|
||||
@@ -18,7 +18,7 @@ arr_router = APIRouter(tags=['servarr'])
|
||||
|
||||
|
||||
@arr_router.get("/system/status", summary="系统状态")
|
||||
def arr_system_status(_: str = Depends(verify_uri_apikey)) -> Any:
|
||||
def arr_system_status(_: str = Depends(verify_apikey)) -> Any:
|
||||
"""
|
||||
模拟Radarr、Sonarr系统状态
|
||||
"""
|
||||
@@ -72,7 +72,7 @@ def arr_system_status(_: str = Depends(verify_uri_apikey)) -> Any:
|
||||
|
||||
|
||||
@arr_router.get("/qualityProfile", summary="质量配置")
|
||||
def arr_qualityProfile(_: str = Depends(verify_uri_apikey)) -> Any:
|
||||
def arr_qualityProfile(_: str = Depends(verify_apikey)) -> Any:
|
||||
"""
|
||||
模拟Radarr、Sonarr质量配置
|
||||
"""
|
||||
@@ -113,7 +113,7 @@ def arr_qualityProfile(_: str = Depends(verify_uri_apikey)) -> Any:
|
||||
|
||||
|
||||
@arr_router.get("/rootfolder", summary="根目录")
|
||||
def arr_rootfolder(_: str = Depends(verify_uri_apikey)) -> Any:
|
||||
def arr_rootfolder(_: str = Depends(verify_apikey)) -> Any:
|
||||
"""
|
||||
模拟Radarr、Sonarr根目录
|
||||
"""
|
||||
@@ -129,7 +129,7 @@ def arr_rootfolder(_: str = Depends(verify_uri_apikey)) -> Any:
|
||||
|
||||
|
||||
@arr_router.get("/tag", summary="标签")
|
||||
def arr_tag(_: str = Depends(verify_uri_apikey)) -> Any:
|
||||
def arr_tag(_: str = Depends(verify_apikey)) -> Any:
|
||||
"""
|
||||
模拟Radarr、Sonarr标签
|
||||
"""
|
||||
@@ -142,7 +142,7 @@ def arr_tag(_: str = Depends(verify_uri_apikey)) -> Any:
|
||||
|
||||
|
||||
@arr_router.get("/languageprofile", summary="语言")
|
||||
def arr_languageprofile(_: str = Depends(verify_uri_apikey)) -> Any:
|
||||
def arr_languageprofile(_: str = Depends(verify_apikey)) -> Any:
|
||||
"""
|
||||
模拟Radarr、Sonarr语言
|
||||
"""
|
||||
@@ -168,7 +168,7 @@ def arr_languageprofile(_: str = Depends(verify_uri_apikey)) -> Any:
|
||||
|
||||
|
||||
@arr_router.get("/movie", summary="所有订阅电影", response_model=List[schemas.RadarrMovie])
|
||||
def arr_movies(_: str = Depends(verify_uri_apikey), db: Session = Depends(get_db)) -> Any:
|
||||
def arr_movies(_: str = Depends(verify_apikey), db: Session = Depends(get_db)) -> Any:
|
||||
"""
|
||||
查询Rardar电影
|
||||
"""
|
||||
@@ -259,7 +259,7 @@ def arr_movies(_: str = Depends(verify_uri_apikey), db: Session = Depends(get_db
|
||||
|
||||
|
||||
@arr_router.get("/movie/lookup", summary="查询电影", response_model=List[schemas.RadarrMovie])
|
||||
def arr_movie_lookup(term: str, db: Session = Depends(get_db), _: str = Depends(verify_uri_apikey)) -> Any:
|
||||
def arr_movie_lookup(term: str, db: Session = Depends(get_db), _: str = Depends(verify_apikey)) -> Any:
|
||||
"""
|
||||
查询Rardar电影 term: `tmdb:${id}`
|
||||
存在和不存在均不能返回错误
|
||||
@@ -305,7 +305,7 @@ def arr_movie_lookup(term: str, db: Session = Depends(get_db), _: str = Depends(
|
||||
|
||||
|
||||
@arr_router.get("/movie/{mid}", summary="电影订阅详情", response_model=schemas.RadarrMovie)
|
||||
def arr_movie(mid: int, db: Session = Depends(get_db), _: str = Depends(verify_uri_apikey)) -> Any:
|
||||
def arr_movie(mid: int, db: Session = Depends(get_db), _: str = Depends(verify_apikey)) -> Any:
|
||||
"""
|
||||
查询Rardar电影订阅
|
||||
"""
|
||||
@@ -333,7 +333,7 @@ def arr_movie(mid: int, db: Session = Depends(get_db), _: str = Depends(verify_u
|
||||
@arr_router.post("/movie", summary="新增电影订阅")
|
||||
def arr_add_movie(movie: RadarrMovie,
|
||||
db: Session = Depends(get_db),
|
||||
_: str = Depends(verify_uri_apikey)
|
||||
_: str = Depends(verify_apikey)
|
||||
) -> Any:
|
||||
"""
|
||||
新增Rardar电影订阅
|
||||
@@ -362,7 +362,7 @@ def arr_add_movie(movie: RadarrMovie,
|
||||
|
||||
|
||||
@arr_router.delete("/movie/{mid}", summary="删除电影订阅", response_model=schemas.Response)
|
||||
def arr_remove_movie(mid: int, db: Session = Depends(get_db), _: str = Depends(verify_uri_apikey)) -> Any:
|
||||
def arr_remove_movie(mid: int, db: Session = Depends(get_db), _: str = Depends(verify_apikey)) -> Any:
|
||||
"""
|
||||
删除Rardar电影订阅
|
||||
"""
|
||||
@@ -378,7 +378,7 @@ def arr_remove_movie(mid: int, db: Session = Depends(get_db), _: str = Depends(v
|
||||
|
||||
|
||||
@arr_router.get("/series", summary="所有剧集", response_model=List[schemas.SonarrSeries])
|
||||
def arr_series(_: str = Depends(verify_uri_apikey), db: Session = Depends(get_db)) -> Any:
|
||||
def arr_series(_: str = Depends(verify_apikey), db: Session = Depends(get_db)) -> Any:
|
||||
"""
|
||||
查询Sonarr剧集
|
||||
"""
|
||||
@@ -514,7 +514,7 @@ def arr_series(_: str = Depends(verify_uri_apikey), db: Session = Depends(get_db
|
||||
|
||||
|
||||
@arr_router.get("/series/lookup", summary="查询剧集")
|
||||
def arr_series_lookup(term: str, db: Session = Depends(get_db), _: str = Depends(verify_uri_apikey)) -> Any:
|
||||
def arr_series_lookup(term: str, db: Session = Depends(get_db), _: str = Depends(verify_apikey)) -> Any:
|
||||
"""
|
||||
查询Sonarr剧集 term: `tvdb:${id}` title
|
||||
"""
|
||||
@@ -603,7 +603,7 @@ def arr_series_lookup(term: str, db: Session = Depends(get_db), _: str = Depends
|
||||
|
||||
|
||||
@arr_router.get("/series/{tid}", summary="剧集详情")
|
||||
def arr_serie(tid: int, db: Session = Depends(get_db), _: str = Depends(verify_uri_apikey)) -> Any:
|
||||
def arr_serie(tid: int, db: Session = Depends(get_db), _: str = Depends(verify_apikey)) -> Any:
|
||||
"""
|
||||
查询Sonarr剧集
|
||||
"""
|
||||
@@ -639,7 +639,7 @@ def arr_serie(tid: int, db: Session = Depends(get_db), _: str = Depends(verify_u
|
||||
@arr_router.post("/series", summary="新增剧集订阅")
|
||||
def arr_add_series(tv: schemas.SonarrSeries,
|
||||
db: Session = Depends(get_db),
|
||||
_: str = Depends(verify_uri_apikey)) -> Any:
|
||||
_: str = Depends(verify_apikey)) -> Any:
|
||||
"""
|
||||
新增Sonarr剧集订阅
|
||||
"""
|
||||
@@ -681,7 +681,7 @@ def arr_add_series(tv: schemas.SonarrSeries,
|
||||
|
||||
|
||||
@arr_router.delete("/series/{tid}", summary="删除剧集订阅")
|
||||
def arr_remove_series(tid: int, db: Session = Depends(get_db), _: str = Depends(verify_uri_apikey)) -> Any:
|
||||
def arr_remove_series(tid: int, db: Session = Depends(get_db), _: str = Depends(verify_apikey)) -> Any:
|
||||
"""
|
||||
删除Sonarr剧集订阅
|
||||
"""
|
||||
|
||||
@@ -10,8 +10,7 @@ from ruamel.yaml import CommentedMap
|
||||
from transmission_rpc import File
|
||||
|
||||
from app.core.config import settings
|
||||
from app.core.context import Context
|
||||
from app.core.context import MediaInfo, TorrentInfo
|
||||
from app.core.context import Context, MediaInfo, TorrentInfo
|
||||
from app.core.event import EventManager
|
||||
from app.core.meta import MetaBase
|
||||
from app.core.module import ModuleManager
|
||||
@@ -79,6 +78,7 @@ class ChainBase(metaclass=ABCMeta):
|
||||
def run_module(self, method: str, *args, **kwargs) -> Any:
|
||||
"""
|
||||
运行包含该方法的所有模块,然后返回结果
|
||||
当kwargs包含命名参数raise_exception时,如模块方法抛出异常且raise_exception为True,则同步抛出异常
|
||||
"""
|
||||
|
||||
def is_result_empty(ret):
|
||||
@@ -117,6 +117,8 @@ class ChainBase(metaclass=ABCMeta):
|
||||
# 中止继续执行
|
||||
break
|
||||
except Exception as err:
|
||||
if kwargs.get("raise_exception"):
|
||||
raise
|
||||
logger.error(
|
||||
f"运行模块 {module_id}.{method} 出错:{str(err)}\n{traceback.format_exc()}")
|
||||
self.messagehelper.put(title=f"{module_name}发生了错误",
|
||||
@@ -142,7 +144,7 @@ class ChainBase(metaclass=ABCMeta):
|
||||
bangumiid: int = None,
|
||||
cache: bool = True) -> Optional[MediaInfo]:
|
||||
"""
|
||||
识别媒体信息
|
||||
识别媒体信息,不含Fanart图片
|
||||
:param meta: 识别的元数据
|
||||
:param mtype: 识别的媒体类型,与tmdbid配套
|
||||
:param tmdbid: tmdbid
|
||||
@@ -166,7 +168,8 @@ class ChainBase(metaclass=ABCMeta):
|
||||
tmdbid=tmdbid, doubanid=doubanid, bangumiid=bangumiid, cache=cache)
|
||||
|
||||
def match_doubaninfo(self, name: str, imdbid: str = None,
|
||||
mtype: MediaType = None, year: str = None, season: int = None) -> Optional[dict]:
|
||||
mtype: MediaType = None, year: str = None, season: int = None,
|
||||
raise_exception: bool = False) -> Optional[dict]:
|
||||
"""
|
||||
搜索和匹配豆瓣信息
|
||||
:param name: 标题
|
||||
@@ -174,9 +177,10 @@ class ChainBase(metaclass=ABCMeta):
|
||||
:param mtype: 类型
|
||||
:param year: 年份
|
||||
:param season: 季
|
||||
:param raise_exception: 触发速率限制时是否抛出异常
|
||||
"""
|
||||
return self.run_module("match_doubaninfo", name=name, imdbid=imdbid,
|
||||
mtype=mtype, year=year, season=season)
|
||||
mtype=mtype, year=year, season=season, raise_exception=raise_exception)
|
||||
|
||||
def match_tmdbinfo(self, name: str, mtype: MediaType = None,
|
||||
year: str = None, season: int = None) -> Optional[dict]:
|
||||
@@ -214,14 +218,15 @@ class ChainBase(metaclass=ABCMeta):
|
||||
image_prefix=image_prefix, image_type=image_type,
|
||||
season=season, episode=episode)
|
||||
|
||||
def douban_info(self, doubanid: str, mtype: MediaType = None) -> Optional[dict]:
|
||||
def douban_info(self, doubanid: str, mtype: MediaType = None, raise_exception: bool = False) -> Optional[dict]:
|
||||
"""
|
||||
获取豆瓣信息
|
||||
:param doubanid: 豆瓣ID
|
||||
:param mtype: 媒体类型
|
||||
:return: 豆瓣信息
|
||||
:param raise_exception: 触发速率限制时是否抛出异常
|
||||
"""
|
||||
return self.run_module("douban_info", doubanid=doubanid, mtype=mtype)
|
||||
return self.run_module("douban_info", doubanid=doubanid, mtype=mtype, raise_exception=raise_exception)
|
||||
|
||||
def tvdb_info(self, tvdbid: int) -> Optional[dict]:
|
||||
"""
|
||||
@@ -231,14 +236,15 @@ class ChainBase(metaclass=ABCMeta):
|
||||
"""
|
||||
return self.run_module("tvdb_info", tvdbid=tvdbid)
|
||||
|
||||
def tmdb_info(self, tmdbid: int, mtype: MediaType) -> Optional[dict]:
|
||||
def tmdb_info(self, tmdbid: int, mtype: MediaType, season: int = None) -> Optional[dict]:
|
||||
"""
|
||||
获取TMDB信息
|
||||
:param tmdbid: int
|
||||
:param mtype: 媒体类型
|
||||
:param season: 季
|
||||
:return: TVDB信息
|
||||
"""
|
||||
return self.run_module("tmdb_info", tmdbid=tmdbid, mtype=mtype)
|
||||
return self.run_module("tmdb_info", tmdbid=tmdbid, mtype=mtype, season=season)
|
||||
|
||||
def bangumi_info(self, bangumiid: int) -> Optional[dict]:
|
||||
"""
|
||||
@@ -521,6 +527,14 @@ class ChainBase(metaclass=ABCMeta):
|
||||
self.run_module("scrape_metadata", path=path, mediainfo=mediainfo, metainfo=metainfo,
|
||||
transfer_type=transfer_type, force_nfo=force_nfo, force_img=force_img)
|
||||
|
||||
def metadata_img(self, mediainfo: MediaInfo, season: int = None) -> Optional[dict]:
|
||||
"""
|
||||
获取图片名称和url
|
||||
:param mediainfo: 媒体信息
|
||||
:param season: 季号
|
||||
"""
|
||||
return self.run_module("metadata_img", mediainfo=mediainfo, season=season)
|
||||
|
||||
def media_category(self) -> Optional[Dict[str, list]]:
|
||||
"""
|
||||
获取媒体分类
|
||||
|
||||
@@ -76,6 +76,8 @@ class DownloadChain(ChainBase):
|
||||
msg_text = f"{msg_text}\n促销:{torrent.volume_factor}"
|
||||
if torrent.hit_and_run:
|
||||
msg_text = f"{msg_text}\nHit&Run:是"
|
||||
if torrent.labels:
|
||||
msg_text = f"{msg_text}\n标签:{' '.join(torrent.labels)}"
|
||||
if torrent.description:
|
||||
html_re = re.compile(r'<[^>]+>', re.S)
|
||||
description = html_re.sub('', torrent.description)
|
||||
@@ -216,6 +218,13 @@ class DownloadChain(ChainBase):
|
||||
_media = context.media_info
|
||||
_meta = context.meta_info
|
||||
|
||||
# 补充完整的media数据
|
||||
if not _media.genre_ids:
|
||||
new_media = self.recognize_media(mtype=_media.type, tmdbid=_media.tmdb_id,
|
||||
doubanid=_media.douban_id, bangumiid=_media.bangumi_id)
|
||||
if new_media:
|
||||
_media = new_media
|
||||
|
||||
# 实际下载的集数
|
||||
download_episodes = StringUtils.format_ep(list(episodes)) if episodes else None
|
||||
_folder_name = ""
|
||||
|
||||
@@ -2,17 +2,23 @@ import copy
|
||||
import time
|
||||
from pathlib import Path
|
||||
from threading import Lock
|
||||
from typing import Optional, List, Tuple
|
||||
from typing import Optional, List, Tuple, Union
|
||||
|
||||
from app import schemas
|
||||
from app.chain import ChainBase
|
||||
from app.core.config import settings
|
||||
from app.core.context import Context, MediaInfo
|
||||
from app.core.event import eventmanager, Event
|
||||
from app.core.meta import MetaBase
|
||||
from app.core.metainfo import MetaInfo, MetaInfoPath
|
||||
from app.helper.aliyun import AliyunHelper
|
||||
from app.helper.u115 import U115Helper
|
||||
from app.log import logger
|
||||
from app.schemas.types import EventType, MediaType
|
||||
from app.utils.http import RequestUtils
|
||||
from app.utils.singleton import Singleton
|
||||
from app.utils.string import StringUtils
|
||||
from app.utils.system import SystemUtils
|
||||
|
||||
recognize_lock = Lock()
|
||||
|
||||
@@ -26,6 +32,17 @@ class MediaChain(ChainBase, metaclass=Singleton):
|
||||
# 临时识别结果 {title, name, year, season, episode}
|
||||
recognize_temp: Optional[dict] = None
|
||||
|
||||
def metadata_nfo(self, meta: MetaBase, mediainfo: MediaInfo,
|
||||
season: int = None, episode: int = None) -> Optional[str]:
|
||||
"""
|
||||
获取NFO文件内容文本
|
||||
:param meta: 元数据
|
||||
:param mediainfo: 媒体信息
|
||||
:param season: 季号
|
||||
:param episode: 集号
|
||||
"""
|
||||
return self.run_module("metadata_nfo", meta=meta, mediainfo=mediainfo, season=season, episode=episode)
|
||||
|
||||
def recognize_by_meta(self, metainfo: MetaBase) -> Optional[MediaInfo]:
|
||||
"""
|
||||
根据主副标题识别媒体信息
|
||||
@@ -220,6 +237,8 @@ class MediaChain(ChainBase, metaclass=Singleton):
|
||||
season=meta.begin_season
|
||||
)
|
||||
if tmdbinfo:
|
||||
# 合季季后返回
|
||||
tmdbinfo['season'] = meta.begin_season
|
||||
break
|
||||
return tmdbinfo
|
||||
|
||||
@@ -313,3 +332,189 @@ class MediaChain(ChainBase, metaclass=Singleton):
|
||||
season=meta.begin_season
|
||||
)
|
||||
return None
|
||||
|
||||
def manual_scrape(self, storage: str, fileitem: schemas.FileItem,
|
||||
meta: MetaBase = None, mediainfo: MediaInfo = None, init_folder: bool = True):
|
||||
"""
|
||||
手动刮削媒体信息
|
||||
"""
|
||||
|
||||
def __list_files(_storage: str, _fileid: str, _path: str = None, _drive_id: str = None):
|
||||
"""
|
||||
列出下级文件
|
||||
"""
|
||||
if _storage == "aliyun":
|
||||
return AliyunHelper().list(drive_id=_drive_id, parent_file_id=_fileid, path=_path)
|
||||
elif _storage == "u115":
|
||||
return U115Helper().list(parent_file_id=_fileid, path=_path)
|
||||
else:
|
||||
items = SystemUtils.list_sub_all(Path(_path))
|
||||
return [schemas.FileItem(
|
||||
type="file" if item.is_file() else "dir",
|
||||
path=str(item),
|
||||
name=item.name,
|
||||
basename=item.stem,
|
||||
extension=item.suffix[1:],
|
||||
size=item.stat().st_size,
|
||||
modify_time=item.stat().st_mtime
|
||||
) for item in items]
|
||||
|
||||
def __save_file(_storage: str, _drive_id: str, _fileid: str, _path: Path, _content: Union[bytes, str]):
|
||||
"""
|
||||
保存或上传文件
|
||||
"""
|
||||
if _storage != "local":
|
||||
# 写入到临时目录
|
||||
temp_path = settings.TEMP_PATH / _path.name
|
||||
temp_path.write_bytes(_content)
|
||||
# 上传文件
|
||||
logger.info(f"正在上传 {_path.name} ...")
|
||||
if _storage == "aliyun":
|
||||
AliyunHelper().upload(drive_id=_drive_id, parent_file_id=_fileid, file_path=temp_path)
|
||||
elif _storage == "u115":
|
||||
U115Helper().upload(parent_file_id=_fileid, file_path=temp_path)
|
||||
logger.info(f"{_path.name} 上传完成")
|
||||
else:
|
||||
# 保存到本地
|
||||
logger.info(f"正在保存 {_path.name} ...")
|
||||
_path.write_bytes(_content)
|
||||
logger.info(f"{_path} 已保存")
|
||||
|
||||
def __save_image(_url: str) -> Optional[bytes]:
|
||||
"""
|
||||
下载图片并保存
|
||||
"""
|
||||
try:
|
||||
logger.info(f"正在下载图片:{_url} ...")
|
||||
r = RequestUtils(proxies=settings.PROXY).get_res(url=_url)
|
||||
if r:
|
||||
return r.content
|
||||
else:
|
||||
logger.info(f"{_url} 图片下载失败,请检查网络连通性!")
|
||||
except Exception as err:
|
||||
logger.error(f"{_url} 图片下载失败:{str(err)}!")
|
||||
|
||||
# 当前文件路径
|
||||
filepath = Path(fileitem.path)
|
||||
if fileitem.type == "file" \
|
||||
and (not filepath.suffix or filepath.suffix.lower() not in settings.RMT_MEDIAEXT):
|
||||
return
|
||||
if not meta:
|
||||
meta = MetaInfoPath(filepath)
|
||||
if not mediainfo:
|
||||
mediainfo = self.recognize_by_meta(meta)
|
||||
if not mediainfo:
|
||||
logger.warn(f"{filepath} 无法识别文件媒体信息!")
|
||||
return
|
||||
logger.info(f"开始刮削:{filepath} ...")
|
||||
if mediainfo.type == MediaType.MOVIE:
|
||||
# 电影
|
||||
if fileitem.type == "file":
|
||||
# 电影文件
|
||||
logger.info(f"正在生成电影nfo:{mediainfo.title_year} - {filepath.name}")
|
||||
movie_nfo = self.metadata_nfo(meta=meta, mediainfo=mediainfo)
|
||||
if not movie_nfo:
|
||||
logger.warn(f"{filepath.name} nfo文件生成失败!")
|
||||
return
|
||||
# 保存或上传nfo文件
|
||||
__save_file(_storage=storage, _drive_id=fileitem.drive_id, _fileid=fileitem.parent_fileid,
|
||||
_path=filepath.with_suffix(".nfo"), _content=movie_nfo)
|
||||
else:
|
||||
# 电影目录
|
||||
files = __list_files(_storage=storage, _fileid=fileitem.fileid,
|
||||
_drive_id=fileitem.drive_id, _path=fileitem.path)
|
||||
for file in files:
|
||||
self.manual_scrape(storage=storage, fileitem=file,
|
||||
meta=meta, mediainfo=mediainfo,
|
||||
init_folder=False)
|
||||
# 生成目录内图片文件
|
||||
if init_folder:
|
||||
# 图片
|
||||
for attr_name, attr_value in vars(mediainfo).items():
|
||||
if attr_value \
|
||||
and attr_name.endswith("_path") \
|
||||
and attr_value \
|
||||
and isinstance(attr_value, str) \
|
||||
and attr_value.startswith("http"):
|
||||
image_name = attr_name.replace("_path", "") + Path(attr_value).suffix
|
||||
image_path = filepath / image_name
|
||||
# 下载图片
|
||||
content = __save_image(_url=attr_value)
|
||||
# 写入nfo到根目录
|
||||
__save_file(_storage=storage, _drive_id=fileitem.drive_id, _fileid=fileitem.fileid,
|
||||
_path=image_path, _content=content)
|
||||
else:
|
||||
# 电视剧
|
||||
if fileitem.type == "file":
|
||||
# 当前为集文件,重新识别季集
|
||||
file_meta = MetaInfoPath(filepath)
|
||||
if not file_meta.begin_episode:
|
||||
logger.warn(f"{filepath.name} 无法识别文件集数!")
|
||||
return
|
||||
file_mediainfo = self.recognize_media(meta=file_meta)
|
||||
if not file_mediainfo:
|
||||
logger.warn(f"{filepath.name} 无法识别文件媒体信息!")
|
||||
return
|
||||
# 获取集的nfo文件
|
||||
episode_nfo = self.metadata_nfo(meta=file_meta, mediainfo=file_mediainfo,
|
||||
season=file_meta.begin_season, episode=file_meta.begin_episode)
|
||||
if not episode_nfo:
|
||||
logger.warn(f"{filepath.name} nfo生成失败!")
|
||||
return
|
||||
# 保存或上传nfo文件
|
||||
__save_file(_storage=storage, _drive_id=fileitem.drive_id, _fileid=fileitem.parent_fileid,
|
||||
_path=filepath.with_suffix(".nfo"), _content=episode_nfo)
|
||||
else:
|
||||
# 当前为目录,处理目录内的文件
|
||||
files = __list_files(_storage=storage, _fileid=fileitem.fileid,
|
||||
_drive_id=fileitem.drive_id, _path=fileitem.path)
|
||||
for file in files:
|
||||
self.manual_scrape(storage=storage, fileitem=file,
|
||||
meta=meta, mediainfo=mediainfo,
|
||||
init_folder=True if file.type == "dir" else False)
|
||||
# 生成目录的nfo和图片
|
||||
if init_folder:
|
||||
# 识别文件夹名称
|
||||
season_meta = MetaInfo(filepath.name)
|
||||
if season_meta.begin_season:
|
||||
# 当前目录有季号,生成季nfo
|
||||
season_nfo = self.metadata_nfo(meta=meta, mediainfo=mediainfo, season=meta.begin_season)
|
||||
if not season_nfo:
|
||||
logger.warn(f"无法生成电视剧季nfo文件:{meta.name}")
|
||||
return
|
||||
# 写入nfo到根目录
|
||||
nfo_path = filepath / "season.nfo"
|
||||
__save_file(_storage=storage, _drive_id=fileitem.drive_id, _fileid=fileitem.fileid,
|
||||
_path=nfo_path, _content=season_nfo)
|
||||
# TMDB季poster图片
|
||||
image_dict = self.metadata_img(mediainfo=mediainfo, season=season_meta.begin_season)
|
||||
if image_dict:
|
||||
for image_name, image_url in image_dict.items():
|
||||
image_path = filepath.with_name(image_name)
|
||||
# 下载图片
|
||||
content = __save_image(image_url)
|
||||
# 保存图片文件到当前目录
|
||||
__save_file(_storage=storage, _drive_id=fileitem.drive_id, _fileid=fileitem.fileid,
|
||||
_path=image_path, _content=content)
|
||||
if season_meta.name:
|
||||
# 当前目录有名称,生成tvshow nfo 和 tv图片
|
||||
tv_nfo = self.metadata_nfo(meta=meta, mediainfo=mediainfo)
|
||||
if not tv_nfo:
|
||||
logger.warn(f"无法生成电视剧nfo文件:{meta.name}")
|
||||
return
|
||||
# 写入tvshow nfo到根目录
|
||||
nfo_path = filepath / "tvshow.nfo"
|
||||
__save_file(_storage=storage, _drive_id=fileitem.drive_id, _fileid=fileitem.fileid,
|
||||
_path=nfo_path, _content=tv_nfo)
|
||||
# 生成目录图片
|
||||
image_dict = self.metadata_img(mediainfo=mediainfo)
|
||||
if image_dict:
|
||||
for image_name, image_url in image_dict.items():
|
||||
image_path = filepath.parent.with_name(image_name)
|
||||
# 下载图片
|
||||
content = __save_image(image_url)
|
||||
# 保存图片文件到当前目录
|
||||
__save_file(_storage=storage, _drive_id=fileitem.drive_id, _fileid=fileitem.fileid,
|
||||
_path=image_path, _content=content)
|
||||
|
||||
logger.info(f"{filepath.name} 刮削完成")
|
||||
|
||||
@@ -316,34 +316,34 @@ class SearchChain(ChainBase):
|
||||
self.progress.update(value=0,
|
||||
text=f"开始搜索,共 {total_num} 个站点 ...",
|
||||
key=ProgressKey.Search)
|
||||
# 多线程
|
||||
executor = ThreadPoolExecutor(max_workers=len(indexer_sites))
|
||||
all_task = []
|
||||
for site in indexer_sites:
|
||||
if area == "imdbid":
|
||||
# 搜索IMDBID
|
||||
task = executor.submit(self.search_torrents, site=site,
|
||||
keywords=[mediainfo.imdb_id] if mediainfo else None,
|
||||
mtype=mediainfo.type if mediainfo else None,
|
||||
page=page)
|
||||
else:
|
||||
# 搜索标题
|
||||
task = executor.submit(self.search_torrents, site=site,
|
||||
keywords=keywords,
|
||||
mtype=mediainfo.type if mediainfo else None,
|
||||
page=page)
|
||||
all_task.append(task)
|
||||
# 结果集
|
||||
results = []
|
||||
for future in as_completed(all_task):
|
||||
finish_count += 1
|
||||
result = future.result()
|
||||
if result:
|
||||
results.extend(result)
|
||||
logger.info(f"站点搜索进度:{finish_count} / {total_num}")
|
||||
self.progress.update(value=finish_count / total_num * 100,
|
||||
text=f"正在搜索{keywords or ''},已完成 {finish_count} / {total_num} 个站点 ...",
|
||||
key=ProgressKey.Search)
|
||||
# 多线程
|
||||
with ThreadPoolExecutor(max_workers=len(indexer_sites)) as executor:
|
||||
all_task = []
|
||||
for site in indexer_sites:
|
||||
if area == "imdbid":
|
||||
# 搜索IMDBID
|
||||
task = executor.submit(self.search_torrents, site=site,
|
||||
keywords=[mediainfo.imdb_id] if mediainfo else None,
|
||||
mtype=mediainfo.type if mediainfo else None,
|
||||
page=page)
|
||||
else:
|
||||
# 搜索标题
|
||||
task = executor.submit(self.search_torrents, site=site,
|
||||
keywords=keywords,
|
||||
mtype=mediainfo.type if mediainfo else None,
|
||||
page=page)
|
||||
all_task.append(task)
|
||||
for future in as_completed(all_task):
|
||||
finish_count += 1
|
||||
result = future.result()
|
||||
if result:
|
||||
results.extend(result)
|
||||
logger.info(f"站点搜索进度:{finish_count} / {total_num}")
|
||||
self.progress.update(value=finish_count / total_num * 100,
|
||||
text=f"正在搜索{keywords or ''},已完成 {finish_count} / {total_num} 个站点 ...",
|
||||
key=ProgressKey.Search)
|
||||
# 计算耗时
|
||||
end_time = datetime.now()
|
||||
# 更新进度
|
||||
|
||||
@@ -107,32 +107,27 @@ class SiteChain(ChainBase):
|
||||
判断站点是否已经登陆:m-team
|
||||
"""
|
||||
user_agent = site.ua or settings.USER_AGENT
|
||||
url = f"{site.url}api/member/profile"
|
||||
domain = StringUtils.get_url_domain(site.url)
|
||||
url = f"https://api.{domain}/api/member/profile"
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"User-Agent": user_agent,
|
||||
"Accept": "application/json, text/plain, */*",
|
||||
"Authorization": site.token
|
||||
"x-api-key": site.apikey,
|
||||
}
|
||||
res = RequestUtils(
|
||||
headers=headers,
|
||||
proxies=settings.PROXY if site.proxy else None,
|
||||
timeout=site.timeout or 15
|
||||
).post_res(url=url)
|
||||
if res and res.status_code == 200:
|
||||
user_info = res.json()
|
||||
if user_info and user_info.get("data"):
|
||||
# 更新最后访问时间
|
||||
res = RequestUtils(headers=headers,
|
||||
timeout=site.timeout or 15,
|
||||
proxies=settings.PROXY if site.proxy else None,
|
||||
referer=f"{site.url}index"
|
||||
).post_res(url=urljoin(url, "api/member/updateLastBrowse"))
|
||||
if res:
|
||||
return True, "连接成功"
|
||||
else:
|
||||
return True, f"连接成功,但更新状态失败"
|
||||
return False, "鉴权已过期或无效"
|
||||
if res is None:
|
||||
return False, "无法打开网站!"
|
||||
if res.status_code == 200:
|
||||
user_info = res.json() or {}
|
||||
if user_info.get("data"):
|
||||
return True, "连接成功"
|
||||
return False, user_info.get("message", "鉴权已过期或无效")
|
||||
else:
|
||||
return False, f"错误:{res.status_code} {res.reason}"
|
||||
|
||||
@staticmethod
|
||||
def __yema_test(site: Site) -> Tuple[bool, str]:
|
||||
@@ -226,7 +221,7 @@ class SiteChain(ChainBase):
|
||||
indexer = self.siteshelper.get_indexer(domain)
|
||||
# 数据库的站点信息
|
||||
site_info = self.siteoper.get_by_domain(domain)
|
||||
if site_info:
|
||||
if site_info and site_info.is_active == 1:
|
||||
# 站点已存在,检查站点连通性
|
||||
status, msg = self.test(domain)
|
||||
# 更新站点Cookie
|
||||
@@ -252,6 +247,11 @@ class SiteChain(ChainBase):
|
||||
self.siteoper.update_cookie(domain=domain, cookies=cookie)
|
||||
_update_count += 1
|
||||
elif indexer:
|
||||
if settings.COOKIECLOUD_BLACKLIST and any(
|
||||
StringUtils.get_url_domain(domain) == StringUtils.get_url_domain(black_domain) for black_domain
|
||||
in str(settings.COOKIECLOUD_BLACKLIST).split(",")):
|
||||
logger.warn(f"站点 {domain} 已在黑名单中,不添加站点")
|
||||
continue
|
||||
# 新增站点
|
||||
domain_url = __indexer_domain(inx=indexer, sub_domain=domain)
|
||||
res = RequestUtils(cookies=cookie,
|
||||
|
||||
@@ -139,15 +139,24 @@ class SubscribeChain(ChainBase):
|
||||
mediainfo.bangumi_id = bangumiid
|
||||
# 添加订阅
|
||||
kwargs.update({
|
||||
'quality': self.__get_default_subscribe_config(mediainfo.type, "quality"),
|
||||
'resolution': self.__get_default_subscribe_config(mediainfo.type, "resolution"),
|
||||
'effect': self.__get_default_subscribe_config(mediainfo.type, "effect"),
|
||||
'include': self.__get_default_subscribe_config(mediainfo.type, "include"),
|
||||
'exclude': self.__get_default_subscribe_config(mediainfo.type, "exclude"),
|
||||
'best_version': self.__get_default_subscribe_config(mediainfo.type, "best_version") if not kwargs.get("best_version") else kwargs.get("best_version"),
|
||||
'search_imdbid': self.__get_default_subscribe_config(mediainfo.type, "search_imdbid"),
|
||||
'sites': self.__get_default_subscribe_config(mediainfo.type, "sites") or None,
|
||||
'save_path': self.__get_default_subscribe_config(mediainfo.type, "save_path"),
|
||||
'quality': self.__get_default_subscribe_config(mediainfo.type, "quality") if not kwargs.get(
|
||||
"quality") else kwargs.get("quality"),
|
||||
'resolution': self.__get_default_subscribe_config(mediainfo.type, "resolution") if not kwargs.get(
|
||||
"resolution") else kwargs.get("resolution"),
|
||||
'effect': self.__get_default_subscribe_config(mediainfo.type, "effect") if not kwargs.get(
|
||||
"effect") else kwargs.get("effect"),
|
||||
'include': self.__get_default_subscribe_config(mediainfo.type, "include") if not kwargs.get(
|
||||
"include") else kwargs.get("include"),
|
||||
'exclude': self.__get_default_subscribe_config(mediainfo.type, "exclude") if not kwargs.get(
|
||||
"exclude") else kwargs.get("exclude"),
|
||||
'best_version': self.__get_default_subscribe_config(mediainfo.type, "best_version") if not kwargs.get(
|
||||
"best_version") else kwargs.get("best_version"),
|
||||
'search_imdbid': self.__get_default_subscribe_config(mediainfo.type, "search_imdbid") if not kwargs.get(
|
||||
"search_imdbid") else kwargs.get("search_imdbid"),
|
||||
'sites': self.__get_default_subscribe_config(mediainfo.type, "sites") or None if not kwargs.get(
|
||||
"sites") else kwargs.get("sites"),
|
||||
'save_path': self.__get_default_subscribe_config(mediainfo.type, "save_path") if not kwargs.get(
|
||||
"save_path") else kwargs.get("save_path")
|
||||
})
|
||||
sid, err_msg = self.subscribeoper.add(mediainfo=mediainfo, season=season, username=username, **kwargs)
|
||||
if not sid:
|
||||
@@ -170,9 +179,9 @@ class SubscribeChain(ChainBase):
|
||||
text = f"评分:{mediainfo.vote_average}"
|
||||
# 群发
|
||||
if mediainfo.type == MediaType.TV:
|
||||
link = settings.MP_DOMAIN('#/subscribe-tv?tab=mysub')
|
||||
link = settings.MP_DOMAIN('#/subscribe/tv?tab=mysub')
|
||||
else:
|
||||
link = settings.MP_DOMAIN('#/subscribe-movie?tab=mysub')
|
||||
link = settings.MP_DOMAIN('#/subscribe/movie?tab=mysub')
|
||||
self.post_message(Notification(mtype=NotificationType.Subscribe,
|
||||
title=f"{mediainfo.title_year} {metainfo.season} 已添加订阅",
|
||||
text=text,
|
||||
@@ -301,20 +310,16 @@ class SubscribeChain(ChainBase):
|
||||
|
||||
# 电视剧订阅处理缺失集
|
||||
if meta.type == MediaType.TV:
|
||||
# 实际缺失集与订阅开始结束集范围进行整合
|
||||
# 实际缺失集与订阅开始结束集范围进行整合,同时剔除已下载的集数
|
||||
no_exists = self.__get_subscribe_no_exits(
|
||||
subscribe_name=f'{subscribe.name} {meta.season}',
|
||||
no_exists=no_exists,
|
||||
mediakey=mediakey,
|
||||
begin_season=meta.begin_season,
|
||||
total_episode=subscribe.total_episode,
|
||||
start_episode=subscribe.start_episode,
|
||||
|
||||
downloaded_episodes=self.__get_downloaded_episodes(subscribe)
|
||||
)
|
||||
# 打印汇总缺失集信息
|
||||
if no_exists and no_exists.get(mediakey):
|
||||
no_exists_info = no_exists.get(mediakey).get(subscribe.season)
|
||||
if no_exists_info:
|
||||
logger.info(f'订阅 {mediainfo.title_year} {meta.season} 缺失集:{no_exists_info.episodes}')
|
||||
|
||||
# 站点范围
|
||||
sites = self.get_sub_sites(subscribe)
|
||||
@@ -348,23 +353,17 @@ class SubscribeChain(ChainBase):
|
||||
torrent_meta = context.meta_info
|
||||
torrent_info = context.torrent_info
|
||||
torrent_mediainfo = context.media_info
|
||||
# 非洗版
|
||||
if not subscribe.best_version:
|
||||
# 如果是电视剧过滤掉已经下载的集数
|
||||
if torrent_mediainfo.type == MediaType.TV:
|
||||
if self.__check_subscribe_note(subscribe, torrent_meta.episode_list):
|
||||
logger.info(f'{torrent_info.title} 对应剧集 {torrent_meta.episode_list} 已下载过')
|
||||
continue
|
||||
else:
|
||||
# 洗版
|
||||
if subscribe.best_version:
|
||||
# 洗版时,非整季不要
|
||||
if torrent_mediainfo.type == MediaType.TV:
|
||||
if torrent_meta.episode_list:
|
||||
logger.info(f'{subscribe.name} 正在洗版,{torrent_info.title} 不是整季')
|
||||
continue
|
||||
# 洗版时,优先级小于已下载优先级的不要
|
||||
# 洗版时,优先级小于等于已下载优先级的不要
|
||||
if subscribe.current_priority \
|
||||
and torrent_info.pri_order < subscribe.current_priority:
|
||||
logger.info(f'{subscribe.name} 正在洗版,{torrent_info.title} 优先级低于已下载优先级')
|
||||
and torrent_info.pri_order <= subscribe.current_priority:
|
||||
logger.info(f'{subscribe.name} 正在洗版,{torrent_info.title} 优先级低于或等于已下载优先级')
|
||||
continue
|
||||
matched_contexts.append(context)
|
||||
|
||||
@@ -611,20 +610,16 @@ class SubscribeChain(ChainBase):
|
||||
|
||||
# 电视剧订阅
|
||||
if meta.type == MediaType.TV:
|
||||
# 整合实际缺失集与订阅开始集结束集
|
||||
# 整合实际缺失集与订阅开始集结束集,同时剔除已下载的集数
|
||||
no_exists = self.__get_subscribe_no_exits(
|
||||
subscribe_name=f'{subscribe.name} {meta.season}',
|
||||
no_exists=no_exists,
|
||||
mediakey=mediakey,
|
||||
begin_season=meta.begin_season,
|
||||
total_episode=subscribe.total_episode,
|
||||
start_episode=subscribe.start_episode,
|
||||
|
||||
downloaded_episodes=self.__get_downloaded_episodes(subscribe)
|
||||
)
|
||||
# 打印汇总缺失集信息
|
||||
if no_exists and no_exists.get(mediakey):
|
||||
no_exists_info = no_exists.get(mediakey).get(subscribe.season)
|
||||
if no_exists_info:
|
||||
logger.info(f'订阅 {mediainfo.title_year} {meta.season} 缺失集:{no_exists_info.episodes}')
|
||||
|
||||
# 过滤规则
|
||||
filter_rule = self.get_filter_rule(subscribe)
|
||||
@@ -646,16 +641,19 @@ class SubscribeChain(ChainBase):
|
||||
_cache_key = f"{torrent_info.title}_{torrent_info.description}"
|
||||
if _cache_key not in _recognize_cached:
|
||||
_recognize_cached.append(_cache_key)
|
||||
logger.info(f'{torrent_info.site_name} - {torrent_info.title} 订阅缓存为未识别状态,尝试重新识别...')
|
||||
logger.info(
|
||||
f'{torrent_info.site_name} - {torrent_info.title} 订阅缓存为未识别状态,尝试重新识别...')
|
||||
# 重新识别(不使用缓存)
|
||||
torrent_mediainfo = self.recognize_media(meta=torrent_meta, cache=False)
|
||||
if not torrent_mediainfo:
|
||||
logger.warn(f'{torrent_info.site_name} - {torrent_info.title} 重新识别失败,尝试通过标题匹配...')
|
||||
logger.warn(
|
||||
f'{torrent_info.site_name} - {torrent_info.title} 重新识别失败,尝试通过标题匹配...')
|
||||
if self.torrenthelper.match_torrent(mediainfo=mediainfo,
|
||||
torrent_meta=torrent_meta,
|
||||
torrent=torrent_info):
|
||||
# 匹配成功
|
||||
logger.info(f'{mediainfo.title_year} 通过标题匹配到资源:{torrent_info.site_name} - {torrent_info.title}')
|
||||
logger.info(
|
||||
f'{mediainfo.title_year} 通过标题匹配到资源:{torrent_info.site_name} - {torrent_info.title}')
|
||||
# 更新缓存
|
||||
torrent_mediainfo = mediainfo
|
||||
context.media_info = mediainfo
|
||||
@@ -728,10 +726,6 @@ class SubscribeChain(ChainBase):
|
||||
f'{torrent_info.title} 对应剧集 {torrent_meta.episode_list} 未包含缺失的剧集'
|
||||
)
|
||||
continue
|
||||
# 过滤掉已经下载的集数
|
||||
if self.__check_subscribe_note(subscribe, torrent_meta.episode_list):
|
||||
logger.info(f'{torrent_info.title} 对应剧集 {torrent_meta.episode_list} 已下载过')
|
||||
continue
|
||||
else:
|
||||
# 洗版时,非整季不要
|
||||
if meta.type == MediaType.TV:
|
||||
@@ -748,8 +742,8 @@ class SubscribeChain(ChainBase):
|
||||
# 洗版时,优先级小于已下载优先级的不要
|
||||
if subscribe.best_version:
|
||||
if subscribe.current_priority \
|
||||
and torrent_info.pri_order < subscribe.current_priority:
|
||||
logger.info(f'{subscribe.name} 正在洗版,{torrent_info.title} 优先级低于已下载优先级')
|
||||
and torrent_info.pri_order <= subscribe.current_priority:
|
||||
logger.info(f'{subscribe.name} 正在洗版,{torrent_info.title} 优先级低于或等于已下载优先级')
|
||||
continue
|
||||
|
||||
# 匹配成功
|
||||
@@ -864,21 +858,21 @@ class SubscribeChain(ChainBase):
|
||||
})
|
||||
|
||||
@staticmethod
|
||||
def __check_subscribe_note(subscribe: Subscribe, episodes: List[int]) -> bool:
|
||||
def __get_downloaded_episodes(subscribe: Subscribe) -> List[int]:
|
||||
"""
|
||||
检查当前集是否已下载过
|
||||
获取已下载过的集数
|
||||
"""
|
||||
if not subscribe.note:
|
||||
return False
|
||||
if not episodes:
|
||||
return False
|
||||
return []
|
||||
if subscribe.type != MediaType.TV.value:
|
||||
return []
|
||||
try:
|
||||
note = json.loads(subscribe.note)
|
||||
episodes = json.loads(subscribe.note)
|
||||
logger.info(f'订阅 {subscribe.name} 第{subscribe.season}季 已下载集数:{episodes}')
|
||||
return episodes
|
||||
except JSONDecodeError:
|
||||
return False
|
||||
if set(episodes).issubset(set(note)):
|
||||
return True
|
||||
return False
|
||||
logger.warn(f'订阅 {subscribe.name} note字段解析失败')
|
||||
return []
|
||||
|
||||
def __update_lack_episodes(self, lefts: Dict[Union[int, str], Dict[int, NotExistMediaInfo]],
|
||||
subscribe: Subscribe,
|
||||
@@ -928,9 +922,9 @@ class SubscribeChain(ChainBase):
|
||||
self.subscribeoper.delete(subscribe.id)
|
||||
# 发送通知
|
||||
if mediainfo.type == MediaType.TV:
|
||||
link = settings.MP_DOMAIN('#/subscribe-tv?tab=mysub')
|
||||
link = settings.MP_DOMAIN('#/subscribe/tv?tab=mysub')
|
||||
else:
|
||||
link = settings.MP_DOMAIN('#/subscribe-movie?tab=mysub')
|
||||
link = settings.MP_DOMAIN('#/subscribe/movie?tab=mysub')
|
||||
self.post_message(Notification(mtype=NotificationType.Subscribe,
|
||||
title=f'{mediainfo.title_year} {meta.season} 已完成{msgstr}',
|
||||
image=mediainfo.get_message_image(),
|
||||
@@ -1004,25 +998,31 @@ class SubscribeChain(ChainBase):
|
||||
self.remote_list(channel, userid)
|
||||
|
||||
@staticmethod
|
||||
def __get_subscribe_no_exits(no_exists: Dict[Union[int, str], Dict[int, NotExistMediaInfo]],
|
||||
def __get_subscribe_no_exits(subscribe_name: str,
|
||||
no_exists: Dict[Union[int, str], Dict[int, NotExistMediaInfo]],
|
||||
mediakey: Union[str, int],
|
||||
begin_season: int,
|
||||
total_episode: int,
|
||||
start_episode: int):
|
||||
start_episode: int,
|
||||
downloaded_episodes: List[int] = None):
|
||||
"""
|
||||
根据订阅开始集数和总集数,结合TMDB信息计算当前订阅的缺失集数
|
||||
:param subscribe_name: 订阅名称
|
||||
:param no_exists: 缺失季集列表
|
||||
:param mediakey: TMDB ID或豆瓣ID
|
||||
:param begin_season: 开始季
|
||||
:param total_episode: 订阅设定总集数
|
||||
:param start_episode: 订阅设定开始集数
|
||||
:param downloaded_episodes: 已下载集数
|
||||
"""
|
||||
# 使用订阅的总集数和开始集数替换no_exists
|
||||
if no_exists \
|
||||
and no_exists.get(mediakey) \
|
||||
and (total_episode or start_episode):
|
||||
if not no_exists or not no_exists.get(mediakey):
|
||||
return no_exists
|
||||
no_exists_item = no_exists.get(mediakey)
|
||||
if total_episode or start_episode:
|
||||
logger.info(f'订阅 {subscribe_name} 设定的开始集数:{start_episode}、总集数:{total_episode}')
|
||||
# 该季原缺失信息
|
||||
no_exist_season = no_exists.get(mediakey).get(begin_season)
|
||||
no_exist_season = no_exists_item.get(begin_season)
|
||||
if no_exist_season:
|
||||
# 原集列表
|
||||
episode_list = no_exist_season.episodes
|
||||
@@ -1060,6 +1060,41 @@ class SubscribeChain(ChainBase):
|
||||
total_episode=total_episode,
|
||||
start_episode=start_episode
|
||||
)
|
||||
# 根据订阅已下载集数更新缺失集数
|
||||
if downloaded_episodes:
|
||||
logger.info(f'订阅 {subscribe_name} 已下载集数:{downloaded_episodes}')
|
||||
# 该季原缺失信息
|
||||
no_exist_season = no_exists_item.get(begin_season)
|
||||
if no_exist_season:
|
||||
# 原集列表
|
||||
episode_list = no_exist_season.episodes
|
||||
# 原总集数
|
||||
total = no_exist_season.total_episode
|
||||
# 原开始集数
|
||||
start = no_exist_season.start_episode
|
||||
# 整季缺失
|
||||
if not episode_list:
|
||||
episode_list = list(range(start, total + 1))
|
||||
# 更新剧集列表
|
||||
episodes = list(set(episode_list).difference(set(downloaded_episodes)))
|
||||
# 更新集合
|
||||
no_exists[mediakey][begin_season] = NotExistMediaInfo(
|
||||
season=begin_season,
|
||||
episodes=episodes,
|
||||
total_episode=total,
|
||||
start_episode=start
|
||||
)
|
||||
else:
|
||||
# 开始集数
|
||||
start = start_episode or 1
|
||||
# 不存在的季
|
||||
no_exists[mediakey][begin_season] = NotExistMediaInfo(
|
||||
season=begin_season,
|
||||
episodes=list(set(range(start, total_episode + 1)).difference(set(downloaded_episodes))),
|
||||
total_episode=total_episode,
|
||||
start_episode=start
|
||||
)
|
||||
logger.info(f'订阅 {subscribe_name} 缺失剧集数更新为:{no_exists}')
|
||||
return no_exists
|
||||
|
||||
@eventmanager.register(EventType.SiteDeleted)
|
||||
|
||||
@@ -153,7 +153,10 @@ class SystemChain(ChainBase, metaclass=Singleton):
|
||||
"""
|
||||
获取前端版本
|
||||
"""
|
||||
version_file = Path(settings.FRONTEND_PATH) / "version.txt"
|
||||
if SystemUtils.is_frozen() and SystemUtils.is_windows():
|
||||
version_file = settings.CONFIG_PATH.parent / "nginx" / "html" / "version.txt"
|
||||
else:
|
||||
version_file = Path(settings.FRONTEND_PATH) / "version.txt"
|
||||
if version_file.exists():
|
||||
try:
|
||||
with open(version_file, 'r') as f:
|
||||
|
||||
@@ -124,5 +124,15 @@ class TmdbChain(ChainBase, metaclass=Singleton):
|
||||
while True:
|
||||
info = random.choice(infos)
|
||||
if info and info.backdrop_path:
|
||||
return f"https://{settings.TMDB_IMAGE_DOMAIN}/t/p/original{info.backdrop_path}"
|
||||
return info.backdrop_path
|
||||
return None
|
||||
|
||||
@cached(cache=TTLCache(maxsize=1, ttl=3600))
|
||||
def get_trending_wallpapers(self, num: int = 10) -> Optional[List[str]]:
|
||||
"""
|
||||
获取所有流行壁纸
|
||||
"""
|
||||
infos = self.tmdb_trending()
|
||||
if infos:
|
||||
return [info.backdrop_path for info in infos if info and info.backdrop_path][:num]
|
||||
return None
|
||||
|
||||
@@ -4,22 +4,24 @@ import threading
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Tuple, Union, Dict
|
||||
|
||||
from app import schemas
|
||||
from app.chain import ChainBase
|
||||
from app.chain.media import MediaChain
|
||||
from app.chain.tmdb import TmdbChain
|
||||
from app.core.config import settings
|
||||
from app.core.context import MediaInfo
|
||||
from app.core.meta import MetaBase
|
||||
from app.core.metainfo import MetaInfoPath
|
||||
from app.core.metainfo import MetaInfoPath, MetaInfo
|
||||
from app.db.downloadhistory_oper import DownloadHistoryOper
|
||||
from app.db.models.downloadhistory import DownloadHistory
|
||||
from app.db.models.transferhistory import TransferHistory
|
||||
from app.db.systemconfig_oper import SystemConfigOper
|
||||
from app.db.transferhistory_oper import TransferHistoryOper
|
||||
from app.helper.aliyun import AliyunHelper
|
||||
from app.helper.directory import DirectoryHelper
|
||||
from app.helper.format import FormatParser
|
||||
from app.helper.message import MessageHelper
|
||||
from app.helper.progress import ProgressHelper
|
||||
from app.helper.u115 import U115Helper
|
||||
from app.log import logger
|
||||
from app.schemas import TransferInfo, TransferTorrent, Notification, EpisodeFormat
|
||||
from app.schemas.types import TorrentStatus, EventType, MediaType, ProgressKey, NotificationType, MessageChannel, \
|
||||
@@ -44,7 +46,16 @@ class TransferChain(ChainBase):
|
||||
self.tmdbchain = TmdbChain()
|
||||
self.systemconfig = SystemConfigOper()
|
||||
self.directoryhelper = DirectoryHelper()
|
||||
self.messagehelper = MessageHelper()
|
||||
self.all_exts = settings.RMT_MEDIAEXT + settings.RMT_SUBEXT + settings.RMT_AUDIO_TRACK_EXT
|
||||
|
||||
def recommend_name(self, meta: MetaBase, mediainfo: MediaInfo) -> Optional[str]:
|
||||
"""
|
||||
获取重命名后的名称
|
||||
:param meta: 元数据
|
||||
:param mediainfo: 媒体信息
|
||||
:return: 重命名后的名称(含目录)
|
||||
"""
|
||||
return self.run_module("recommend_name", meta=meta, mediainfo=mediainfo)
|
||||
|
||||
def process(self) -> bool:
|
||||
"""
|
||||
@@ -75,13 +86,16 @@ class TransferChain(ChainBase):
|
||||
mediainfo = self.recognize_media(mtype=mtype,
|
||||
tmdbid=downloadhis.tmdbid,
|
||||
doubanid=downloadhis.doubanid)
|
||||
if mediainfo:
|
||||
# 补充图片
|
||||
self.obtain_images(mediainfo)
|
||||
else:
|
||||
# 非MoviePilot下载的任务,按文件识别
|
||||
mediainfo = None
|
||||
|
||||
# 执行转移
|
||||
self.do_transfer(path=torrent.path, mediainfo=mediainfo,
|
||||
download_hash=torrent.hash)
|
||||
self.__do_transfer(storage="local", path=torrent.path,
|
||||
mediainfo=mediainfo, download_hash=torrent.hash)
|
||||
|
||||
# 设置下载任务状态
|
||||
self.transfer_completed(hashs=torrent.hash, path=torrent.path)
|
||||
@@ -89,15 +103,20 @@ class TransferChain(ChainBase):
|
||||
logger.info("下载器文件转移执行完成")
|
||||
return True
|
||||
|
||||
def do_transfer(self, path: Path, meta: MetaBase = None,
|
||||
mediainfo: MediaInfo = None, download_hash: str = None,
|
||||
target: Path = None, transfer_type: str = None,
|
||||
season: int = None, epformat: EpisodeFormat = None,
|
||||
min_filesize: int = 0, scrape: bool = None,
|
||||
force: bool = False) -> Tuple[bool, str]:
|
||||
def __do_transfer(self, storage: str, path: Path, drive_id: str = None, fileid: str = None, filetype: str = None,
|
||||
meta: MetaBase = None, mediainfo: MediaInfo = None,
|
||||
download_hash: str = None,
|
||||
target: Path = None, transfer_type: str = None,
|
||||
season: int = None, epformat: EpisodeFormat = None,
|
||||
min_filesize: int = 0, scrape: bool = None,
|
||||
force: bool = False) -> Tuple[bool, str]:
|
||||
"""
|
||||
执行一个复杂目录的转移操作
|
||||
:param storage: 存储器
|
||||
:param path: 待转移目录或文件
|
||||
:param drive_id: 网盘ID
|
||||
:param fileid: 文件ID
|
||||
:param filetype: 文件类型
|
||||
:param meta: 元数据
|
||||
:param mediainfo: 媒体信息
|
||||
:param download_hash: 下载记录hash
|
||||
@@ -113,20 +132,81 @@ class TransferChain(ChainBase):
|
||||
if not transfer_type:
|
||||
transfer_type = settings.TRANSFER_TYPE
|
||||
|
||||
# 获取待转移路径清单
|
||||
trans_paths = self.__get_trans_paths(path)
|
||||
if not trans_paths:
|
||||
logger.warn(f"{path.name} 没有找到可转移的媒体文件")
|
||||
return False, f"{path.name} 没有找到可转移的媒体文件"
|
||||
|
||||
# 有集自定义格式
|
||||
# 自定义格式
|
||||
formaterHandler = FormatParser(eformat=epformat.format,
|
||||
details=epformat.detail,
|
||||
part=epformat.part,
|
||||
offset=epformat.offset) if epformat else None
|
||||
|
||||
# 整理屏蔽词
|
||||
transfer_exclude_words = self.systemconfig.get(SystemConfigKey.TransferExcludeWords)
|
||||
|
||||
# 开始进度
|
||||
self.progress.start(ProgressKey.FileTransfer)
|
||||
|
||||
# 本地存储
|
||||
if storage == "local":
|
||||
# 本地整理
|
||||
result = self.__transfer_local(path=path, meta=meta, mediainfo=mediainfo,
|
||||
formaterHandler=formaterHandler,
|
||||
transfer_exclude_words=transfer_exclude_words,
|
||||
min_filesize=min_filesize, transfer_type=transfer_type,
|
||||
target=target, season=season, scrape=scrape,
|
||||
download_hash=download_hash, force=force)
|
||||
else:
|
||||
# 网盘整理
|
||||
result = self.__transfer_online(storage=storage,
|
||||
fileitem=schemas.FileItem(
|
||||
path=str(path) + ("/" if filetype == "dir" else ""),
|
||||
type=filetype,
|
||||
drive_id=drive_id,
|
||||
fileid=fileid,
|
||||
name=path.name
|
||||
),
|
||||
meta=meta,
|
||||
mediainfo=mediainfo)
|
||||
if result and result[0] and scrape:
|
||||
# 刮削元数据
|
||||
self.progress.update(value=0,
|
||||
text=f"正在刮削 {path} ...",
|
||||
key=ProgressKey.FileTransfer)
|
||||
self.mediachain.manual_scrape(storage=storage,
|
||||
fileitem=schemas.FileItem(
|
||||
path=str(path) + ("/" if filetype == "dir" else ""),
|
||||
type=filetype,
|
||||
drive_id=drive_id,
|
||||
fileid=fileid,
|
||||
name=path.name
|
||||
),
|
||||
meta=meta,
|
||||
mediainfo=mediainfo)
|
||||
# 结速进度
|
||||
self.progress.end(ProgressKey.FileTransfer)
|
||||
return result
|
||||
|
||||
def __transfer_local(self, path: Path, meta: MetaBase = None, mediainfo: MediaInfo = None,
|
||||
formaterHandler: FormatParser = None, transfer_exclude_words: List[str] = None,
|
||||
min_filesize: int = 0, transfer_type: str = None, target: Path = None,
|
||||
season: int = None, scrape: bool = None, download_hash: str = None,
|
||||
force: bool = False) -> Tuple[bool, str]:
|
||||
"""
|
||||
整理一个本地目录
|
||||
"""
|
||||
|
||||
# 汇总错误信息
|
||||
err_msgs: List[str] = []
|
||||
# 已处理数量
|
||||
processed_num = 0
|
||||
# 失败数量
|
||||
fail_num = 0
|
||||
# 跳过数量
|
||||
skip_num = 0
|
||||
|
||||
# 获取待转移路径清单
|
||||
trans_paths = self.__get_trans_paths(path)
|
||||
if not trans_paths:
|
||||
logger.warn(f"{path.name} 没有找到可转移的媒体文件")
|
||||
return False, f"{path.name} 没有找到可转移的媒体文件"
|
||||
# 目录所有文件清单
|
||||
transfer_files = SystemUtils.list_files(directory=path,
|
||||
extensions=settings.RMT_MEDIAEXT,
|
||||
@@ -135,23 +215,12 @@ class TransferChain(ChainBase):
|
||||
# 有集自定义格式,过滤文件
|
||||
transfer_files = [f for f in transfer_files if formaterHandler.match(f.name)]
|
||||
|
||||
# 汇总错误信息
|
||||
err_msgs: List[str] = []
|
||||
# 总文件数
|
||||
total_num = len(transfer_files)
|
||||
# 已处理数量
|
||||
processed_num = 0
|
||||
# 失败数量
|
||||
fail_num = 0
|
||||
# 跳过数量
|
||||
skip_num = 0
|
||||
self.progress.update(value=0,
|
||||
text=f"开始转移 {path},共 {total_num} 个文件 ...",
|
||||
key=ProgressKey.FileTransfer)
|
||||
|
||||
# 整理屏蔽词
|
||||
transfer_exclude_words = self.systemconfig.get(SystemConfigKey.TransferExcludeWords)
|
||||
|
||||
# 处理所有待转移目录或文件,默认一个转移路径或文件只有一个媒体信息
|
||||
for trans_path in trans_paths:
|
||||
# 汇总季集清单
|
||||
@@ -359,22 +428,10 @@ class TransferChain(ChainBase):
|
||||
transfers[mkey].file_list_new.extend(transferinfo.file_list_new)
|
||||
transfers[mkey].fail_list.extend(transferinfo.fail_list)
|
||||
|
||||
# 硬链接检查
|
||||
temp_transfer_type = transfer_type
|
||||
if transfer_type == "link":
|
||||
if not SystemUtils.is_hardlink(file_path, transferinfo.target_path):
|
||||
logger.warn(
|
||||
f"{file_path} 与 {transferinfo.target_path} 不是同一硬链接文件路径,请检查存储空间占用和整理耗时,确认是否为复制")
|
||||
self.messagehelper.put(
|
||||
f"{file_path} 与 {transferinfo.target_path} 不是同一硬链接文件路径,疑似硬链接失败,请检查是否为复制",
|
||||
title="硬链接失败",
|
||||
role="system")
|
||||
temp_transfer_type = "copy"
|
||||
|
||||
# 新增转移成功历史记录
|
||||
self.transferhis.add_success(
|
||||
src_path=file_path,
|
||||
mode=temp_transfer_type,
|
||||
mode=transfer_type,
|
||||
download_hash=download_hash,
|
||||
meta=file_meta,
|
||||
mediainfo=file_mediainfo,
|
||||
@@ -384,7 +441,7 @@ class TransferChain(ChainBase):
|
||||
if transferinfo.need_scrape:
|
||||
self.scrape_metadata(path=transferinfo.target_path,
|
||||
mediainfo=file_mediainfo,
|
||||
transfer_type=temp_transfer_type,
|
||||
transfer_type=transfer_type,
|
||||
metainfo=file_meta)
|
||||
# 更新进度
|
||||
processed_num += 1
|
||||
@@ -417,7 +474,6 @@ class TransferChain(ChainBase):
|
||||
'mediainfo': media,
|
||||
'transferinfo': transfer_info
|
||||
})
|
||||
|
||||
# 结束进度
|
||||
logger.info(f"{path} 转移完成,共 {total_num} 个文件,"
|
||||
f"失败 {fail_num} 个,跳过 {skip_num} 个")
|
||||
@@ -426,10 +482,218 @@ class TransferChain(ChainBase):
|
||||
text=f"{path} 转移完成,共 {total_num} 个文件,"
|
||||
f"失败 {fail_num} 个,跳过 {skip_num} 个",
|
||||
key=ProgressKey.FileTransfer)
|
||||
self.progress.end(ProgressKey.FileTransfer)
|
||||
|
||||
return True, "\n".join(err_msgs)
|
||||
|
||||
def __transfer_online(self, storage: str, fileitem: schemas.FileItem,
|
||||
meta: MetaBase, mediainfo: MediaInfo) -> Tuple[bool, str]:
|
||||
"""
|
||||
整理一个远程目录
|
||||
"""
|
||||
|
||||
def __list_files(_storage: str, _fileid: str,
|
||||
_path: str = None, _drive_id: str = None) -> List[schemas.FileItem]:
|
||||
"""
|
||||
列出下级文件
|
||||
"""
|
||||
if _storage == "aliyun":
|
||||
return AliyunHelper().list(drive_id=_drive_id, parent_file_id=_fileid, path=_path)
|
||||
elif _storage == "u115":
|
||||
return U115Helper().list(parent_file_id=_fileid, path=_path)
|
||||
return []
|
||||
|
||||
def __rename_file(_storage: str, _deive_id: str, _fileid: str, _name: str) -> bool:
|
||||
"""
|
||||
重命名文件
|
||||
"""
|
||||
if _storage == "aliyun":
|
||||
return AliyunHelper().rename(drive_id=_deive_id, file_id=_fileid, name=_name)
|
||||
elif _storage == "u115":
|
||||
return U115Helper().rename(file_id=_fileid, name=_name)
|
||||
return False
|
||||
|
||||
def __create_folder(_storage: str, _drive_id: str, _parent_fileid: str,
|
||||
_name: str, _path: str) -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
创建目录
|
||||
"""
|
||||
if _storage == "aliyun":
|
||||
return AliyunHelper().create_folder(drive_id=_drive_id, parent_file_id=_parent_fileid,
|
||||
name=_name, path=_path)
|
||||
elif _storage == "u115":
|
||||
return U115Helper().create_folder(parent_file_id=_parent_fileid, name=_name, path=_path)
|
||||
return None
|
||||
|
||||
def __move_file(_storage: str, _drive_id: str, _fileid: str, _target_fileid: str) -> bool:
|
||||
"""
|
||||
移动文件
|
||||
"""
|
||||
if _storage == "aliyun":
|
||||
return AliyunHelper().move(drive_id=_drive_id, file_id=_fileid, target_id=_target_fileid)
|
||||
elif _storage == "u115":
|
||||
return U115Helper().move(file_id=_fileid, target_id=_target_fileid)
|
||||
return False
|
||||
|
||||
def __remove_dir(_storage: str, _drive_id: str, _fileid: str) -> bool:
|
||||
"""
|
||||
删除目录
|
||||
"""
|
||||
if _storage == "aliyun":
|
||||
return AliyunHelper().delete(drive_id=_drive_id, file_id=_fileid)
|
||||
elif _storage == "u115":
|
||||
return U115Helper().delete(file_id=_fileid)
|
||||
return False
|
||||
|
||||
logger.info(f"开始整理 {fileitem.path} ...")
|
||||
self.progress.update(value=0,
|
||||
text=f"正在整理 {fileitem.path} ...",
|
||||
key=ProgressKey.FileTransfer)
|
||||
# 重新识别
|
||||
if not meta:
|
||||
# 文件元数据
|
||||
meta = MetaInfoPath(Path(fileitem.path))
|
||||
if not mediainfo:
|
||||
mediainfo = self.mediachain.recognize_by_meta(meta)
|
||||
if not mediainfo:
|
||||
logger.warn(f"{fileitem.name} 未识别到媒体信息")
|
||||
return False, f"{fileitem.name} 未识别到媒体信息"
|
||||
# 获取完整的路径命名
|
||||
full_names = self.recommend_name(meta=meta, mediainfo=mediainfo)
|
||||
if not full_names:
|
||||
logger.warn(f"{fileitem.path} 未获取到命名")
|
||||
return False, f"{fileitem.path} 未获取到命名"
|
||||
|
||||
if mediainfo.type == MediaType.TV:
|
||||
# 电视剧
|
||||
[folder_name, season_name, file_name] = Path(full_names).parts
|
||||
else:
|
||||
# 电影
|
||||
season_name = None
|
||||
[folder_name, file_name] = Path(full_names).parts
|
||||
|
||||
# 如果是单个文件,则直接重命名
|
||||
if fileitem.type == "file":
|
||||
# 重命名文件
|
||||
logger.info(f"正在整理 {fileitem.name} => {file_name} ...")
|
||||
if not __rename_file(_storage=storage, _deive_id=fileitem.drive_id, _fileid=fileitem.fileid, _name=file_name):
|
||||
logger.error(f"{fileitem.name} 重命名失败")
|
||||
return False, f"{fileitem.name} 重命名失败"
|
||||
logger.info(f"{fileitem.path} 整理完成")
|
||||
else:
|
||||
# 目录处理
|
||||
if mediainfo.type == MediaType.MOVIE:
|
||||
# 电影目录
|
||||
# 重命名当前目录
|
||||
logger.info(f"正在重命名 {fileitem.path} => {folder_name} ...")
|
||||
if not __rename_file(_storage=storage, _deive_id=fileitem.drive_id,
|
||||
_fileid=fileitem.fileid, _name=folder_name):
|
||||
logger.error(f"{fileitem.path} 重命名失败")
|
||||
return False, f"{fileitem.path} 重命名失败"
|
||||
logger.info(f"{fileitem.path} 重命名完成")
|
||||
# 处理所有子文件或目录
|
||||
files = __list_files(_storage=storage, _fileid=fileitem.fileid,
|
||||
_drive_id=fileitem.drive_id, _path=fileitem.path)
|
||||
if not files:
|
||||
logger.info(f"{fileitem.path} 未找到文件,删除空目录")
|
||||
if not __remove_dir(_storage=storage, _drive_id=fileitem.drive_id, _fileid=fileitem.fileid):
|
||||
logger.error(f"{fileitem.path} 删除失败")
|
||||
return False, f"{fileitem.path} 删除失败"
|
||||
return True, ""
|
||||
for file in files:
|
||||
# 过滤不处理的文件
|
||||
if file.type == "file" and str(file.extension) in ['nfo', 'jpg', 'png']:
|
||||
continue
|
||||
# 重新识别文件或目录
|
||||
file_meta = MetaInfoPath(Path(file.path))
|
||||
if not file_meta.name:
|
||||
# 过滤掉无效文件
|
||||
continue
|
||||
file_media = self.mediachain.recognize_by_meta(file_meta)
|
||||
if not file_media:
|
||||
logger.warn(f"{file.name} 未识别到媒体信息")
|
||||
continue
|
||||
# 整理这个文件或目录
|
||||
self.__transfer_online(storage=storage, fileitem=file, meta=file_meta, mediainfo=file_media)
|
||||
else:
|
||||
# 电视剧目录
|
||||
# 判断当前目录类型
|
||||
folder_meta = MetaInfo(fileitem.name)
|
||||
if folder_meta.begin_season and not folder_meta.name:
|
||||
# 季目录
|
||||
logger.info(f"正在重命名 {fileitem.path} => {season_name} ...")
|
||||
if not __rename_file(_storage=storage, _deive_id=fileitem.drive_id,
|
||||
_fileid=fileitem.fileid, _name=season_name):
|
||||
logger.error(f"{fileitem.path} 重命名失败")
|
||||
return False, f"{fileitem.path} 重命名失败"
|
||||
logger.info(f"{fileitem.path} 重命名完成")
|
||||
elif folder_meta.name:
|
||||
# 根目录,重命名当前目录
|
||||
logger.info(f"正在重命名 {fileitem.path} => {folder_name} ...")
|
||||
if not __rename_file(_storage=storage, _deive_id=fileitem.drive_id,
|
||||
_fileid=fileitem.fileid, _name=folder_name):
|
||||
logger.error(f"{fileitem.path} 重命名失败")
|
||||
return False, f"{fileitem.path} 重命名失败"
|
||||
logger.info(f"{fileitem.path} 重命名完成")
|
||||
# 是否有季
|
||||
if folder_meta.begin_season:
|
||||
# 创建季目录
|
||||
logger.info(f"正在创建目录 {fileitem.path}{season_name} ...")
|
||||
season_dir = __create_folder(_storage=storage, _drive_id=fileitem.drive_id,
|
||||
_parent_fileid=fileitem.fileid, _name=season_name,
|
||||
_path=fileitem.path)
|
||||
if not season_dir:
|
||||
logger.error(f"{fileitem.path}/{season_name} 创建失败")
|
||||
return False, f"{fileitem.path}/{season_name} 创建失败"
|
||||
logger.info(f"{fileitem.path}/{season_name} 创建完成")
|
||||
# 移动当前目录下的所有文件到季目录
|
||||
files = __list_files(_storage=storage, _fileid=fileitem.fileid,
|
||||
_drive_id=fileitem.drive_id, _path=fileitem.path)
|
||||
if not files:
|
||||
logger.error(f"{fileitem.path} 未找到文件,删除空目录")
|
||||
if not __remove_dir(_storage=storage, _drive_id=fileitem.drive_id, _fileid=fileitem.fileid):
|
||||
logger.error(f"{fileitem.path} 删除失败")
|
||||
return False, f"{fileitem.path} 删除失败"
|
||||
logger.info(f"{fileitem.path} 已删除")
|
||||
return True, ""
|
||||
for file in files:
|
||||
if file.type == "dir":
|
||||
continue
|
||||
logger.info(f"正在移动 {file.path} => {season_dir.path}...")
|
||||
if not __move_file(_storage=storage, _drive_id=fileitem.drive_id,
|
||||
_fileid=file.fileid, _target_fileid=season_dir.fileid):
|
||||
logger.error(f"{file.name} 移动失败")
|
||||
return False, f"{file.name} 移动失败"
|
||||
logger.info(f"{file.path} 移动完成")
|
||||
# 修改当前目录为季目录
|
||||
fileitem = season_dir
|
||||
# 列出当前目录下所有的文件或目录,并进行重命名整理
|
||||
files = __list_files(_storage=storage, _fileid=fileitem.fileid,
|
||||
_drive_id=fileitem.drive_id, _path=fileitem.path)
|
||||
if not files:
|
||||
logger.info(f"{fileitem.path} 未找到文件,删除空目录")
|
||||
if not __remove_dir(_storage=storage, _drive_id=fileitem.drive_id, _fileid=fileitem.fileid):
|
||||
logger.error(f"{fileitem.path} 删除失败")
|
||||
return False, f"{fileitem.path} 删除失败"
|
||||
logger.info(f"{fileitem.path} 已删除")
|
||||
return True, ""
|
||||
for file in files:
|
||||
# 过滤不处理的文件
|
||||
if file.type == "file" and str(file.extension) in ['nfo', 'jpg', 'png']:
|
||||
continue
|
||||
# 重新识别文件或目录
|
||||
file_meta = MetaInfoPath(Path(file.path))
|
||||
file_media = self.mediachain.recognize_by_meta(file_meta)
|
||||
if not file_media:
|
||||
logger.warn(f"{file.name} 未识别到媒体信息")
|
||||
continue
|
||||
# 整理这个文件或目录
|
||||
self.__transfer_online(storage=storage, fileitem=file, meta=file_meta, mediainfo=file_media)
|
||||
|
||||
logger.info(f"{fileitem.path} 整理完成")
|
||||
self.progress.update(value=0,
|
||||
text=f"{fileitem.path} 整理完成",
|
||||
key=ProgressKey.FileTransfer)
|
||||
return True, ""
|
||||
|
||||
@staticmethod
|
||||
def __get_trans_paths(directory: Path):
|
||||
"""
|
||||
@@ -503,16 +767,16 @@ class TransferChain(ChainBase):
|
||||
if not type_str or type_str not in [MediaType.MOVIE.value, MediaType.TV.value]:
|
||||
args_error()
|
||||
return
|
||||
state, errmsg = self.re_transfer(logid=int(logid),
|
||||
mtype=MediaType(type_str),
|
||||
mediaid=media_id)
|
||||
state, errmsg = self.__re_transfer(logid=int(logid),
|
||||
mtype=MediaType(type_str),
|
||||
mediaid=media_id)
|
||||
if not state:
|
||||
self.post_message(Notification(channel=channel, title="手动整理失败",
|
||||
text=errmsg, userid=userid, link=settings.MP_DOMAIN('#/history')))
|
||||
return
|
||||
|
||||
def re_transfer(self, logid: int, mtype: MediaType = None,
|
||||
mediaid: str = None) -> Tuple[bool, str]:
|
||||
def __re_transfer(self, logid: int, mtype: MediaType = None,
|
||||
mediaid: str = None) -> Tuple[bool, str]:
|
||||
"""
|
||||
根据历史记录,重新识别转移,只支持简单条件
|
||||
:param logid: 历史记录ID
|
||||
@@ -547,16 +811,22 @@ class TransferChain(ChainBase):
|
||||
self.delete_files(Path(history.dest))
|
||||
|
||||
# 强制转移
|
||||
state, errmsg = self.do_transfer(path=src_path,
|
||||
mediainfo=mediainfo,
|
||||
download_hash=history.download_hash,
|
||||
force=True)
|
||||
state, errmsg = self.__do_transfer(storage="local",
|
||||
path=src_path,
|
||||
mediainfo=mediainfo,
|
||||
download_hash=history.download_hash,
|
||||
force=True)
|
||||
if not state:
|
||||
return False, errmsg
|
||||
|
||||
return True, ""
|
||||
|
||||
def manual_transfer(self, in_path: Path,
|
||||
def manual_transfer(self,
|
||||
storage: str,
|
||||
in_path: Path,
|
||||
drive_id: str = None,
|
||||
fileid: str = None,
|
||||
filetype: str = None,
|
||||
target: Path = None,
|
||||
tmdbid: int = None,
|
||||
doubanid: str = None,
|
||||
@@ -569,7 +839,11 @@ class TransferChain(ChainBase):
|
||||
force: bool = False) -> Tuple[bool, Union[str, list]]:
|
||||
"""
|
||||
手动转移,支持复杂条件,带进度显示
|
||||
:param storage: 存储器
|
||||
:param in_path: 源文件路径
|
||||
:param drive_id: 网盘ID
|
||||
:param fileid: 文件ID
|
||||
:param filetype: 文件类型
|
||||
:param target: 目标路径
|
||||
:param tmdbid: TMDB ID
|
||||
:param doubanid: 豆瓣ID
|
||||
@@ -589,14 +863,21 @@ class TransferChain(ChainBase):
|
||||
mediainfo: MediaInfo = self.mediachain.recognize_media(tmdbid=tmdbid, doubanid=doubanid, mtype=mtype)
|
||||
if not mediainfo:
|
||||
return False, f"媒体信息识别失败,tmdbid:{tmdbid},doubanid:{doubanid},type: {mtype.value}"
|
||||
else:
|
||||
# 更新媒体图片
|
||||
self.obtain_images(mediainfo=mediainfo)
|
||||
# 开始进度
|
||||
self.progress.start(ProgressKey.FileTransfer)
|
||||
self.progress.update(value=0,
|
||||
text=f"开始转移 {in_path} ...",
|
||||
key=ProgressKey.FileTransfer)
|
||||
# 开始转移
|
||||
state, errmsg = self.do_transfer(
|
||||
state, errmsg = self.__do_transfer(
|
||||
storage=storage,
|
||||
path=in_path,
|
||||
drive_id=drive_id,
|
||||
fileid=fileid,
|
||||
filetype=filetype,
|
||||
mediainfo=mediainfo,
|
||||
target=target,
|
||||
transfer_type=transfer_type,
|
||||
@@ -614,13 +895,18 @@ class TransferChain(ChainBase):
|
||||
return True, ""
|
||||
else:
|
||||
# 没有输入TMDBID时,按文件识别
|
||||
state, errmsg = self.do_transfer(path=in_path,
|
||||
target=target,
|
||||
transfer_type=transfer_type,
|
||||
season=season,
|
||||
epformat=epformat,
|
||||
min_filesize=min_filesize,
|
||||
force=force)
|
||||
state, errmsg = self.__do_transfer(storage=storage,
|
||||
path=in_path,
|
||||
drive_id=drive_id,
|
||||
fileid=fileid,
|
||||
filetype=filetype,
|
||||
target=target,
|
||||
transfer_type=transfer_type,
|
||||
season=season,
|
||||
epformat=epformat,
|
||||
min_filesize=min_filesize,
|
||||
scrape=scrape,
|
||||
force=force)
|
||||
return state, errmsg
|
||||
|
||||
def send_transfer_message(self, meta: MetaBase, mediainfo: MediaInfo,
|
||||
@@ -663,6 +949,11 @@ class TransferChain(ChainBase):
|
||||
for file in files:
|
||||
Path(file).unlink()
|
||||
logger.warn(f"文件 {path} 已删除")
|
||||
# 删除thumb图片
|
||||
thumb_file = path.parent / (path.stem + "-thumb.jpg")
|
||||
if thumb_file.exists():
|
||||
thumb_file.unlink()
|
||||
logger.info(f"文件 {thumb_file} 已删除")
|
||||
# 需要删除父目录
|
||||
elif str(path.parent) == str(path.root):
|
||||
# 根目录,不删除
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import copy
|
||||
import importlib
|
||||
import threading
|
||||
import traceback
|
||||
@@ -11,8 +12,7 @@ from app.chain.subscribe import SubscribeChain
|
||||
from app.chain.system import SystemChain
|
||||
from app.chain.transfer import TransferChain
|
||||
from app.core.config import settings
|
||||
from app.core.event import Event as ManagerEvent
|
||||
from app.core.event import eventmanager, EventManager
|
||||
from app.core.event import Event as ManagerEvent, eventmanager, EventManager
|
||||
from app.core.plugin import PluginManager
|
||||
from app.helper.message import MessageHelper
|
||||
from app.helper.thread import ThreadHelper
|
||||
@@ -194,7 +194,7 @@ class Command(metaclass=Singleton):
|
||||
# 插件事件
|
||||
self.threader.submit(
|
||||
self.pluginmanager.run_plugin_method,
|
||||
class_name, method_name, event
|
||||
class_name, method_name, copy.deepcopy(event)
|
||||
)
|
||||
|
||||
else:
|
||||
@@ -217,7 +217,7 @@ class Command(metaclass=Singleton):
|
||||
if hasattr(class_obj, method_name):
|
||||
self.threader.submit(
|
||||
getattr(class_obj, method_name),
|
||||
event
|
||||
copy.deepcopy(event)
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"事件处理出错:{str(e)} - {traceback.format_exc()}")
|
||||
@@ -273,6 +273,8 @@ class Command(metaclass=Singleton):
|
||||
data = cmd_data.get("data") or {}
|
||||
data['channel'] = channel
|
||||
data['user'] = userid
|
||||
if data_str:
|
||||
data['args'] = data_str
|
||||
cmd_data['data'] = data
|
||||
command['func'](**cmd_data)
|
||||
elif args_num == 2:
|
||||
|
||||
@@ -199,6 +199,8 @@ class Settings(BaseSettings):
|
||||
COOKIECLOUD_PASSWORD: Optional[str] = None
|
||||
# CookieCloud同步间隔(分钟)
|
||||
COOKIECLOUD_INTERVAL: Optional[int] = 60 * 24
|
||||
# CookieCloud同步黑名单,多个域名,分割
|
||||
COOKIECLOUD_BLACKLIST: Optional[str] = None
|
||||
# OCR服务器地址
|
||||
OCR_HOST: str = "https://movie-pilot.org"
|
||||
# CookieCloud对应的浏览器UA
|
||||
@@ -222,14 +224,20 @@ class Settings(BaseSettings):
|
||||
PLUGIN_MARKET: str = "https://github.com/jxxghp/MoviePilot-Plugins,https://github.com/thsrite/MoviePilot-Plugins,https://github.com/honue/MoviePilot-Plugins,https://github.com/InfinityPacer/MoviePilot-Plugins"
|
||||
# Github token,提高请求api限流阈值 ghp_****
|
||||
GITHUB_TOKEN: Optional[str] = None
|
||||
# 指定的仓库Github token,多个仓库使用,分隔,格式:{user1}/{repo1}:ghp_****,{user2}/{repo2}:github_pat_****
|
||||
REPO_GITHUB_TOKEN: Optional[str] = None
|
||||
# Github代理服务器,格式:https://mirror.ghproxy.com/
|
||||
GITHUB_PROXY: Optional[str] = ''
|
||||
# 自动检查和更新站点资源包(站点索引、认证等)
|
||||
AUTO_UPDATE_RESOURCE: bool = True
|
||||
AUTO_UPDATE_RESOURCE: bool = False
|
||||
# 元数据识别缓存过期时间(小时)
|
||||
META_CACHE_EXPIRE: int = 0
|
||||
# 是否启用DOH解析域名
|
||||
DOH_ENABLE: bool = True
|
||||
# 使用 DOH 解析的域名列表
|
||||
DOH_DOMAINS: str = "api.themoviedb.org,api.tmdb.org,webservice.fanart.tv,api.github.com,github.com,raw.githubusercontent.com,api.telegram.org"
|
||||
# DOH 解析服务器列表
|
||||
DOH_RESOLVERS: str = "1.0.0.1,1.1.1.1,9.9.9.9,149.112.112.112"
|
||||
# 搜索多个名称
|
||||
SEARCH_MULTIPLE_NAME: bool = False
|
||||
# 订阅数据共享
|
||||
@@ -356,6 +364,37 @@ class Settings(BaseSettings):
|
||||
}
|
||||
return {}
|
||||
|
||||
def REPO_GITHUB_HEADERS(self, repo: str = None):
|
||||
"""
|
||||
Github指定的仓库请求头
|
||||
:param repo: 指定的仓库名称,格式为 "user/repo"。如果为空,或者没有找到指定仓库请求头,则返回默认的请求头信息
|
||||
:return: Github请求头
|
||||
"""
|
||||
# 如果没有传入指定的仓库名称,或没有配置指定的仓库Token,则返回默认的请求头信息
|
||||
if not repo or not self.REPO_GITHUB_TOKEN:
|
||||
return self.GITHUB_HEADERS
|
||||
headers = {}
|
||||
# 格式:{user1}/{repo1}:ghp_****,{user2}/{repo2}:github_pat_****
|
||||
token_pairs = self.REPO_GITHUB_TOKEN.split(",")
|
||||
for token_pair in token_pairs:
|
||||
try:
|
||||
parts = token_pair.split(":")
|
||||
if len(parts) != 2:
|
||||
print(f"无效的令牌格式: {token_pair}")
|
||||
continue
|
||||
repo_info = parts[0].strip()
|
||||
token = parts[1].strip()
|
||||
if not repo_info or not token:
|
||||
print(f"无效的令牌或仓库信息: {token_pair}")
|
||||
continue
|
||||
headers[repo_info] = {
|
||||
"Authorization": f"Bearer {token}"
|
||||
}
|
||||
except Exception as e:
|
||||
print(f"处理令牌对 '{token_pair}' 时出错: {e}")
|
||||
# 如果传入了指定的仓库名称,则返回该仓库的请求头信息,否则返回默认请求头
|
||||
return headers.get(repo, self.GITHUB_HEADERS)
|
||||
|
||||
@property
|
||||
def DEFAULT_DOWNLOADER(self):
|
||||
"""
|
||||
|
||||
@@ -347,10 +347,10 @@ class MediaInfo:
|
||||
return [], []
|
||||
directors = []
|
||||
actors = []
|
||||
for cast in _credits.get("cast"):
|
||||
for cast in _credits.get("cast") or []:
|
||||
if cast.get("known_for_department") == "Acting":
|
||||
actors.append(cast)
|
||||
for crew in _credits.get("crew"):
|
||||
for crew in _credits.get("crew") or []:
|
||||
if crew.get("job") in ["Director", "Writer", "Editor", "Producer"]:
|
||||
directors.append(crew)
|
||||
return directors, actors
|
||||
|
||||
@@ -73,6 +73,15 @@ class MetaVideo(MetaBase):
|
||||
self.begin_episode = int(title)
|
||||
self.type = MediaType.TV
|
||||
return
|
||||
# 全名为Season xx 及 Sxx 直接返回
|
||||
season_full_res = re.search(r"^Season\s+(\d{1,3})$|^S(\d{1,3})$", title)
|
||||
if season_full_res:
|
||||
self.type = MediaType.TV
|
||||
season = season_full_res.group(1)
|
||||
if season:
|
||||
self.begin_season = int(season)
|
||||
self.total_season = 1
|
||||
return
|
||||
# 去掉名称中第1个[]的内容
|
||||
title = re.sub(r'%s' % self._name_no_begin_re, "", title, count=1)
|
||||
# 把xxxx-xxxx年份换成前一个年份,常出现在季集上
|
||||
|
||||
@@ -71,7 +71,10 @@ class ReleaseGroupsMatcher(metaclass=Singleton):
|
||||
"ultrahd": [],
|
||||
"others": ['B(?:MDru|eyondHD|TN)', 'C(?:fandora|trlhd|MRG)', 'DON', 'EVO', 'FLUX', 'HONE(?:|yG)',
|
||||
'N(?:oGroup|T(?:b|G))', 'PandaMoon', 'SMURF', 'T(?:EPES|aengoo|rollHD )'],
|
||||
"anime": ['ANi', 'HYSUB', 'KTXP', 'LoliHouse', 'MCE', 'Nekomoe kissaten', '(?:Lilith|NC)-Raws', '织梦字幕组']
|
||||
"anime": ['ANi', 'HYSUB', 'KTXP', 'LoliHouse', 'MCE', 'Nekomoe kissaten', 'SweetSub', 'MingY',
|
||||
'(?:Lilith|NC)-Raws', '织梦字幕组', '枫叶字幕组', '猎户手抄部', '喵萌奶茶屋', '漫猫字幕社',
|
||||
'霜庭云花Sub', '北宇治字幕组', '氢气烤肉架', '云歌字幕组', '萌樱字幕组','极影字幕社','悠哈璃羽字幕社',
|
||||
'❀拨雪寻春❀', '沸羊羊(?:制作|字幕组)', '(?:桜|樱)都字幕组',]
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
import concurrent
|
||||
import concurrent.futures
|
||||
import importlib.util
|
||||
import inspect
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
from typing import List, Any, Dict, Tuple, Optional, Callable
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
|
||||
|
||||
from watchdog.events import FileSystemEventHandler
|
||||
from watchdog.observers import Observer
|
||||
@@ -13,13 +15,14 @@ from watchdog.observers import Observer
|
||||
from app import schemas
|
||||
from app.core.config import settings
|
||||
from app.core.event import eventmanager
|
||||
from app.db.systemconfig_oper import SystemConfigOper
|
||||
from app.db.plugindata_oper import PluginDataOper
|
||||
from app.db.systemconfig_oper import SystemConfigOper
|
||||
from app.helper.module import ModuleHelper
|
||||
from app.helper.plugin import PluginHelper
|
||||
from app.helper.sites import SitesHelper
|
||||
from app.log import logger
|
||||
from app.schemas.types import SystemConfigKey
|
||||
from app.utils.crypto import RSAUtils
|
||||
from app.utils.object import ObjectUtils
|
||||
from app.utils.singleton import Singleton
|
||||
from app.utils.string import StringUtils
|
||||
@@ -42,21 +45,35 @@ class PluginMonitorHandler(FileSystemEventHandler):
|
||||
"""
|
||||
if event.is_directory:
|
||||
return
|
||||
# 使用 pathlib 处理文件路径,跳过非 .py 文件以及 pycache 目录中的文件
|
||||
event_path = Path(event.src_path)
|
||||
if not event_path.name.endswith(".py") or "pycache" in event_path.parts:
|
||||
return
|
||||
|
||||
current_time = time.time()
|
||||
if current_time - self.__last_modified < self.__timeout:
|
||||
return
|
||||
self.__last_modified = current_time
|
||||
# 读取插件根目录下的__init__.py文件,读取class XXXX(_PluginBase)的类名
|
||||
try:
|
||||
# 使用os.path和pathlib处理跨平台的路径问题
|
||||
plugin_dir = event.src_path.split("plugins" + os.sep)[1].split(os.sep)[0]
|
||||
init_file = settings.ROOT_PATH / "app" / "plugins" / plugin_dir / "__init__.py"
|
||||
plugins_root = settings.ROOT_PATH / "app" / "plugins"
|
||||
# 确保修改的文件在 plugins 目录下
|
||||
if plugins_root not in event_path.parents:
|
||||
return
|
||||
# 获取插件目录路径,没有找到__init__.py时,说明不是有效包,跳过插件重载
|
||||
# 插件重载目前没有支持app/plugins/plugin/package/__init__.py的场景,这里也不做支持
|
||||
plugin_dir = event_path.parent
|
||||
init_file = plugin_dir / "__init__.py"
|
||||
if not init_file.exists():
|
||||
logger.debug(f"{plugin_dir} 下没有找到 __init__.py,跳过插件重载")
|
||||
return
|
||||
|
||||
with open(init_file, "r", encoding="utf-8") as f:
|
||||
lines = f.readlines()
|
||||
pid = None
|
||||
for line in lines:
|
||||
if line.startswith("class") and "(_PluginBase)" in line:
|
||||
pid = line.split("class ")[1].split("(_PluginBase)")[0]
|
||||
pid = line.split("class ")[1].split("(_PluginBase)")[0].strip()
|
||||
if pid:
|
||||
# 防抖处理,通过计时器延迟加载
|
||||
if self.__reload_timer:
|
||||
@@ -144,6 +161,12 @@ class PluginManager(metaclass=Singleton):
|
||||
if pid and plugin_id != pid:
|
||||
continue
|
||||
try:
|
||||
# 判断插件是否满足认证要求,如不满足则不进行实例化
|
||||
if not self.__set_and_check_auth_level(plugin=plugin):
|
||||
# 如果是插件热更新实例,这里则进行替换
|
||||
if plugin_id in self._plugins:
|
||||
self._plugins[plugin_id] = plugin
|
||||
continue
|
||||
# 存储Class
|
||||
self._plugins[plugin_id] = plugin
|
||||
# 未安装的不加载
|
||||
@@ -201,8 +224,6 @@ class PluginManager(metaclass=Singleton):
|
||||
# 清空指定插件
|
||||
if pid in self._running_plugins:
|
||||
self._running_plugins.pop(pid)
|
||||
if pid in self._plugins:
|
||||
self._plugins.pop(pid)
|
||||
else:
|
||||
# 清空
|
||||
self._plugins = {}
|
||||
@@ -583,11 +604,12 @@ class PluginManager(metaclass=Singleton):
|
||||
if plugin_obj and hasattr(plugin_obj, "get_page"):
|
||||
if ObjectUtils.check_method(plugin_obj.get_page):
|
||||
plugin.has_page = True
|
||||
# 公钥
|
||||
if plugin_info.get("key"):
|
||||
plugin.plugin_public_key = plugin_info.get("key")
|
||||
# 权限
|
||||
if plugin_info.get("level"):
|
||||
plugin.auth_level = plugin_info.get("level")
|
||||
if self.siteshelper.auth_level < plugin.auth_level:
|
||||
continue
|
||||
if not self.__set_and_check_auth_level(plugin=plugin, source=plugin_info):
|
||||
continue
|
||||
# 名称
|
||||
if plugin_info.get("name"):
|
||||
plugin.plugin_name = plugin_info.get("name")
|
||||
@@ -690,11 +712,12 @@ class PluginManager(metaclass=Singleton):
|
||||
plugin.has_page = True
|
||||
else:
|
||||
plugin.has_page = False
|
||||
# 公钥
|
||||
if hasattr(plugin_class, "plugin_public_key"):
|
||||
plugin.plugin_public_key = plugin_class.plugin_public_key
|
||||
# 权限
|
||||
if hasattr(plugin_class, "auth_level"):
|
||||
plugin.auth_level = plugin_class.auth_level
|
||||
if self.siteshelper.auth_level < plugin.auth_level:
|
||||
continue
|
||||
if not self.__set_and_check_auth_level(plugin=plugin, source=plugin_class):
|
||||
continue
|
||||
# 名称
|
||||
if hasattr(plugin_class, "plugin_name"):
|
||||
plugin.plugin_name = plugin_class.plugin_name
|
||||
@@ -729,10 +752,70 @@ class PluginManager(metaclass=Singleton):
|
||||
@staticmethod
|
||||
def is_plugin_exists(pid: str) -> bool:
|
||||
"""
|
||||
判断插件是否在本地文件系统存在
|
||||
判断插件是否在本地包中存在
|
||||
:param pid: 插件ID
|
||||
"""
|
||||
if not pid:
|
||||
return False
|
||||
plugin_dir = settings.ROOT_PATH / "app" / "plugins" / pid.lower()
|
||||
return plugin_dir.exists()
|
||||
try:
|
||||
# 构建包名
|
||||
package_name = f"app.plugins.{pid.lower()}"
|
||||
# 检查包是否存在
|
||||
package_exists = importlib.util.find_spec(package_name) is not None
|
||||
logger.debug(f"{pid} exists: {package_exists}")
|
||||
return package_exists
|
||||
except Exception as e:
|
||||
logger.debug(f"获取插件是否在本地包中存在失败,{e}")
|
||||
return False
|
||||
|
||||
def __set_and_check_auth_level(self, plugin: Union[schemas.Plugin, Type[Any]],
|
||||
source: Optional[Union[dict, Type[Any]]] = None) -> bool:
|
||||
"""
|
||||
设置并检查插件的认证级别
|
||||
:param plugin: 插件对象或包含 auth_level 属性的对象
|
||||
:param source: 可选的字典对象或类对象,可能包含 "level" 或 "auth_level" 键
|
||||
:return: 如果插件的认证级别有效且当前环境的认证级别满足要求,返回 True,否则返回 False
|
||||
"""
|
||||
# 检查并赋值 source 中的 level 或 auth_level
|
||||
if source:
|
||||
if isinstance(source, dict) and "level" in source:
|
||||
plugin.auth_level = source.get("level")
|
||||
elif hasattr(source, "auth_level"):
|
||||
plugin.auth_level = source.auth_level
|
||||
# 如果 source 为空且 plugin 本身没有 auth_level,直接返回 True
|
||||
elif not hasattr(plugin, "auth_level"):
|
||||
return True
|
||||
|
||||
# auth_level 级别说明
|
||||
# 1 - 所有用户可见
|
||||
# 2 - 站点认证用户可见
|
||||
# 3 - 站点&密钥认证可见
|
||||
# 99 - 站点&特殊密钥认证可见
|
||||
# 如果当前站点认证级别大于 1 且插件级别为 99,并存在插件公钥,说明为特殊密钥认证,通过密钥匹配进行认证
|
||||
if self.siteshelper.auth_level > 1 and plugin.auth_level == 99 and hasattr(plugin, "plugin_public_key"):
|
||||
plugin_id = plugin.id if isinstance(plugin, schemas.Plugin) else plugin.__name__
|
||||
public_key = plugin.plugin_public_key
|
||||
if public_key:
|
||||
private_key = PluginManager.__get_plugin_private_key(plugin_id)
|
||||
verify = RSAUtils.verify_rsa_keys(public_key=public_key, private_key=private_key)
|
||||
return verify
|
||||
# 如果当前站点认证级别小于插件级别,则返回 False
|
||||
if self.siteshelper.auth_level < plugin.auth_level:
|
||||
return False
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def __get_plugin_private_key(plugin_id: str) -> Optional[str]:
|
||||
"""
|
||||
根据插件标识获取对应的私钥
|
||||
:param plugin_id: 插件标识
|
||||
:return: 对应的插件私钥,如果未找到则返回 None
|
||||
"""
|
||||
try:
|
||||
# 将插件标识转换为大写并构建环境变量名称
|
||||
env_var_name = f"PLUGIN_{plugin_id.upper()}_PRIVATE_KEY"
|
||||
private_key = os.environ.get(env_var_name)
|
||||
return private_key
|
||||
except Exception as e:
|
||||
logger.debug(f"获取插件 {plugin_id} 的私钥时发生错误:{e}")
|
||||
return None
|
||||
|
||||
@@ -30,7 +30,7 @@ reusable_oauth2 = OAuth2PasswordBearer(
|
||||
|
||||
def create_access_token(
|
||||
userid: Union[str, Any], username: str, super_user: bool = False,
|
||||
expires_delta: timedelta = None
|
||||
expires_delta: timedelta = None, level: int = 1
|
||||
) -> str:
|
||||
if expires_delta:
|
||||
expire = datetime.utcnow() + expires_delta
|
||||
@@ -42,7 +42,8 @@ def create_access_token(
|
||||
"exp": expire,
|
||||
"sub": str(userid),
|
||||
"username": username,
|
||||
"super_user": super_user
|
||||
"super_user": super_user,
|
||||
"level": level
|
||||
}
|
||||
encoded_jwt = jwt.encode(to_encode, settings.SECRET_KEY, algorithm=ALGORITHM)
|
||||
return encoded_jwt
|
||||
@@ -61,21 +62,21 @@ def verify_token(token: str = Depends(reusable_oauth2)) -> schemas.TokenPayload:
|
||||
)
|
||||
|
||||
|
||||
def get_token(token: str = None) -> str:
|
||||
def __get_token(token: str = None) -> str:
|
||||
"""
|
||||
从请求URL中获取token
|
||||
"""
|
||||
return token
|
||||
|
||||
|
||||
def get_apikey(apikey: str = None, x_api_key: Annotated[str | None, Header()] = None) -> str:
|
||||
def __get_apikey(apikey: str = None, x_api_key: Annotated[str | None, Header()] = None) -> str:
|
||||
"""
|
||||
从请求URL中获取apikey
|
||||
"""
|
||||
return apikey or x_api_key
|
||||
|
||||
|
||||
def verify_uri_token(token: str = Depends(get_token)) -> str:
|
||||
def verify_apitoken(token: str = Depends(__get_token)) -> str:
|
||||
"""
|
||||
通过依赖项使用token进行身份认证
|
||||
"""
|
||||
@@ -87,7 +88,7 @@ def verify_uri_token(token: str = Depends(get_token)) -> str:
|
||||
return token
|
||||
|
||||
|
||||
def verify_uri_apikey(apikey: str = Depends(get_apikey)) -> str:
|
||||
def verify_apikey(apikey: str = Depends(__get_apikey)) -> str:
|
||||
"""
|
||||
通过依赖项使用apikey进行身份认证
|
||||
"""
|
||||
@@ -99,6 +100,18 @@ def verify_uri_apikey(apikey: str = Depends(get_apikey)) -> str:
|
||||
return apikey
|
||||
|
||||
|
||||
def verify_uri_token(token: str = Depends(__get_token)) -> str:
|
||||
"""
|
||||
通过依赖项使用token进行身份认证
|
||||
"""
|
||||
if not verify_token(token):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="token校验不通过"
|
||||
)
|
||||
return token
|
||||
|
||||
|
||||
def verify_password(plain_password: str, hashed_password: str) -> bool:
|
||||
return pwd_context.verify(plain_password, hashed_password)
|
||||
|
||||
|
||||
@@ -139,3 +139,15 @@ class DownloadHistoryOper(DbOper):
|
||||
return DownloadHistory.list_by_type(db=self._db,
|
||||
mtype=mtype,
|
||||
days=days)
|
||||
|
||||
def delete_history(self, historyid):
|
||||
"""
|
||||
删除下载记录
|
||||
"""
|
||||
DownloadHistory.delete(self._db, historyid)
|
||||
|
||||
def delete_downloadfile(self, downloadfileid):
|
||||
"""
|
||||
删除下载文件记录
|
||||
"""
|
||||
DownloadFiles.delete(self._db, downloadfileid)
|
||||
|
||||
@@ -57,6 +57,7 @@ class TransferHistory(Base):
|
||||
).offset((page - 1) * count).limit(count).all()
|
||||
else:
|
||||
result = db.query(TransferHistory).filter(or_(
|
||||
TransferHistory.title.like(f'%{title}%'),
|
||||
TransferHistory.src.like(f'%{title}%'),
|
||||
TransferHistory.dest.like(f'%{title}%'),
|
||||
)).order_by(
|
||||
@@ -89,6 +90,11 @@ class TransferHistory(Base):
|
||||
def get_by_src(db: Session, src: str):
|
||||
return db.query(TransferHistory).filter(TransferHistory.src == src).first()
|
||||
|
||||
@staticmethod
|
||||
@db_query
|
||||
def get_by_dest(db: Session, dest: str):
|
||||
return db.query(TransferHistory).filter(TransferHistory.dest == dest).first()
|
||||
|
||||
@staticmethod
|
||||
@db_query
|
||||
def list_by_hash(db: Session, download_hash: str):
|
||||
@@ -123,6 +129,7 @@ class TransferHistory(Base):
|
||||
return db.query(func.count(TransferHistory.id)).filter(TransferHistory.status == status).first()[0]
|
||||
else:
|
||||
return db.query(func.count(TransferHistory.id)).filter(or_(
|
||||
TransferHistory.title.like(f'%{title}%'),
|
||||
TransferHistory.src.like(f'%{title}%'),
|
||||
TransferHistory.dest.like(f'%{title}%')
|
||||
)).first()[0]
|
||||
|
||||
@@ -36,6 +36,13 @@ class TransferHistoryOper(DbOper):
|
||||
"""
|
||||
return TransferHistory.get_by_src(self._db, src)
|
||||
|
||||
def get_by_dest(self, dest: str) -> TransferHistory:
|
||||
"""
|
||||
按转移路径查询转移记录
|
||||
:param dest: 数据key
|
||||
"""
|
||||
return TransferHistory.get_by_dest(self._db, dest)
|
||||
|
||||
def list_by_hash(self, download_hash: str) -> List[TransferHistory]:
|
||||
"""
|
||||
按种子hash查询转移记录
|
||||
|
||||
620
app/helper/aliyun.py
Normal file
620
app/helper/aliyun.py
Normal file
@@ -0,0 +1,620 @@
|
||||
import base64
|
||||
import json
|
||||
import time
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
from typing import Optional, Tuple, List
|
||||
|
||||
from requests import Response
|
||||
|
||||
from app import schemas
|
||||
from app.core.config import settings
|
||||
from app.db.systemconfig_oper import SystemConfigOper
|
||||
from app.log import logger
|
||||
from app.schemas.types import SystemConfigKey
|
||||
from app.utils.http import RequestUtils
|
||||
from app.utils.string import StringUtils
|
||||
from app.utils.system import SystemUtils
|
||||
|
||||
|
||||
class AliyunHelper:
|
||||
"""
|
||||
阿里云相关操作
|
||||
"""
|
||||
|
||||
_X_SIGNATURE = ('f4b7bed5d8524a04051bd2da876dd79afe922b8205226d65855d02b267422adb1'
|
||||
'e0d8a816b021eaf5c36d101892180f79df655c5712b348c2a540ca136e6b22001')
|
||||
|
||||
_X_PUBLIC_KEY = ('04d9d2319e0480c840efeeb75751b86d0db0c5b9e72c6260a1d846958adceaf9d'
|
||||
'ee789cab7472741d23aafc1a9c591f72e7ee77578656e6c8588098dea1488ac2a')
|
||||
|
||||
# 生成二维码
|
||||
qrcode_url = ("https://passport.aliyundrive.com/newlogin/qrcode/generate.do?"
|
||||
"appName=aliyun_drive&fromSite=52&appEntrance=web&isMobile=false"
|
||||
"&lang=zh_CN&returnUrl=&bizParams=&_bx-v=2.0.31")
|
||||
# 二维码登录确认
|
||||
check_url = "https://passport.aliyundrive.com/newlogin/qrcode/query.do?appName=aliyun_drive&fromSite=52&_bx-v=2.0.31"
|
||||
# 更新访问令牌
|
||||
update_accessstoken_url = "https://auth.aliyundrive.com/v2/account/token"
|
||||
# 创建会话
|
||||
create_session_url = "https://api.aliyundrive.com/users/v1/users/device/create_session"
|
||||
# 用户信息
|
||||
user_info_url = "https://user.aliyundrive.com/v2/user/get"
|
||||
# 浏览文件
|
||||
list_file_url = "https://api.aliyundrive.com/adrive/v3/file/list"
|
||||
# 创建目录或文件
|
||||
create_folder_file_url = "https://api.aliyundrive.com/adrive/v2/file/createWithFolders"
|
||||
# 文件详情
|
||||
file_detail_url = "https://api.aliyundrive.com/v2/file/get"
|
||||
# 删除文件
|
||||
delete_file_url = " https://api.aliyundrive.com/v2/recyclebin/trash"
|
||||
# 文件重命名
|
||||
rename_file_url = "https://api.aliyundrive.com/v3/file/update"
|
||||
# 获取下载链接
|
||||
download_url = "https://api.aliyundrive.com/v2/file/get_download_url"
|
||||
# 移动文件
|
||||
move_file_url = "https://api.aliyundrive.com/v2/file/move"
|
||||
# 上传文件完成
|
||||
upload_file_complete_url = "https://api.aliyundrive.com/v2/file/complete"
|
||||
|
||||
def __init__(self):
|
||||
self.systemconfig = SystemConfigOper()
|
||||
|
||||
def __handle_error(self, res: Response, apiname: str, action: bool = True):
|
||||
"""
|
||||
统一处理和打印错误信息
|
||||
"""
|
||||
if res is None:
|
||||
logger.warn("无法连接到阿里云盘!")
|
||||
return
|
||||
try:
|
||||
result = res.json()
|
||||
except Exception as err:
|
||||
logger.error(f"解析阿里云盘返回数据失败:{str(err)}")
|
||||
return
|
||||
code = result.get("code")
|
||||
message = result.get("message")
|
||||
display_message = result.get("display_message")
|
||||
if code or message:
|
||||
logger.warn(f"Aliyun {apiname}失败:{code} - {display_message or message}")
|
||||
if action:
|
||||
if code == "DeviceSessionSignatureInvalid":
|
||||
logger.warn("设备已失效,正在重新建立会话...")
|
||||
self.__create_session(self.__get_headers(self.__auth_params))
|
||||
if code == "UserDeviceOffline":
|
||||
logger.warn("设备已离线,尝试重新登录,如仍报错请检查阿里云盘绑定设备数量是否超限!")
|
||||
self.__create_session(self.__get_headers(self.__auth_params))
|
||||
if code == "AccessTokenInvalid":
|
||||
logger.warn("访问令牌已失效,正在刷新令牌...")
|
||||
self.__update_accesstoken(self.__auth_params, self.__auth_params.get("refreshToken"))
|
||||
else:
|
||||
logger.info(f"Aliyun {apiname}成功")
|
||||
|
||||
@property
|
||||
def __auth_params(self):
|
||||
"""
|
||||
获取阿里云盘认证参数并初始化参数格式
|
||||
"""
|
||||
return self.systemconfig.get(SystemConfigKey.UserAliyunParams) or {}
|
||||
|
||||
def __update_params(self, params: dict):
|
||||
"""
|
||||
设置阿里云盘认证参数
|
||||
"""
|
||||
current_params = self.__auth_params
|
||||
current_params.update(params)
|
||||
self.systemconfig.set(SystemConfigKey.UserAliyunParams, current_params)
|
||||
|
||||
def __clear_params(self):
|
||||
"""
|
||||
清除阿里云盘认证参数
|
||||
"""
|
||||
self.systemconfig.delete(SystemConfigKey.UserAliyunParams)
|
||||
|
||||
def generate_qrcode(self) -> Optional[Tuple[dict, str]]:
|
||||
"""
|
||||
生成二维码
|
||||
"""
|
||||
res = RequestUtils(timeout=10).get_res(self.qrcode_url)
|
||||
if res:
|
||||
data = res.json().get("content", {}).get("data")
|
||||
return {
|
||||
"codeContent": data.get("codeContent"),
|
||||
"ck": data.get("ck"),
|
||||
"t": data.get("t")
|
||||
}, ""
|
||||
elif res is not None:
|
||||
self.__handle_error(res, "生成二维码")
|
||||
return {}, f"请求阿里云盘二维码失败:{res.status_code} - {res.reason}"
|
||||
return {}, f"请求阿里云盘二维码失败:无法连接!"
|
||||
|
||||
def check_login(self, ck: str, t: str) -> Optional[Tuple[dict, str]]:
|
||||
"""
|
||||
二维码登录确认
|
||||
"""
|
||||
params = {
|
||||
"t": t,
|
||||
"ck": ck,
|
||||
"appName": "aliyun_drive",
|
||||
"appEntrance": "web",
|
||||
"isMobile": "false",
|
||||
"lang": "zh_CN",
|
||||
"returnUrl": "",
|
||||
"fromSite": "52",
|
||||
"bizParams": "",
|
||||
"navlanguage": "zh-CN",
|
||||
"navPlatform": "MacIntel",
|
||||
}
|
||||
|
||||
body = "&".join([f"{key}={value}" for key, value in params.items()])
|
||||
|
||||
status = {
|
||||
"NEW": "请用阿里云盘 App 扫码",
|
||||
"SCANED": "请在手机上确认",
|
||||
"EXPIRED": "二维码已过期",
|
||||
"CANCELED": "已取消",
|
||||
"CONFIRMED": "已确认",
|
||||
}
|
||||
|
||||
headers = {
|
||||
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
|
||||
}
|
||||
|
||||
res = RequestUtils(headers=headers, timeout=5).post_res(self.check_url, data=body)
|
||||
if res:
|
||||
data = res.json().get("content", {}).get("data") or {}
|
||||
qrCodeStatus = data.get("qrCodeStatus")
|
||||
data["tip"] = status.get(qrCodeStatus) or "未知"
|
||||
if data.get("bizExt"):
|
||||
try:
|
||||
bizExt = json.loads(base64.b64decode(data["bizExt"]).decode('GBK'))
|
||||
pds_login_result = bizExt.get("pds_login_result")
|
||||
if pds_login_result:
|
||||
data.pop('bizExt')
|
||||
data.update({
|
||||
'userId': pds_login_result.get('userId'),
|
||||
'expiresIn': pds_login_result.get('expiresIn'),
|
||||
'nickName': pds_login_result.get('nickName'),
|
||||
'avatar': pds_login_result.get('avatar'),
|
||||
'tokenType': pds_login_result.get('tokenType'),
|
||||
"refreshToken": pds_login_result.get('refreshToken'),
|
||||
"accessToken": pds_login_result.get('accessToken'),
|
||||
"defaultDriveId": pds_login_result.get('defaultDriveId'),
|
||||
"updateTime": time.time(),
|
||||
})
|
||||
self.__update_params(data)
|
||||
self.user_info()
|
||||
except Exception as e:
|
||||
return {}, f"bizExt 解码失败:{str(e)}"
|
||||
return data, ""
|
||||
elif res is not None:
|
||||
self.__handle_error(res, "登录确认")
|
||||
return {}, f"阿里云盘登录确认失败:{res.status_code} - {res.reason}"
|
||||
return {}, "阿里云盘登录确认失败:无法连接!"
|
||||
|
||||
def __update_accesstoken(self, params: dict, refresh_token: str) -> bool:
|
||||
"""
|
||||
更新阿里云盘访问令牌
|
||||
"""
|
||||
headers = self.__get_headers(params)
|
||||
res = RequestUtils(headers=headers, timeout=10).post_res(
|
||||
self.update_accessstoken_url, json={
|
||||
"refresh_token": refresh_token,
|
||||
"grant_type": "refresh_token"
|
||||
})
|
||||
if res:
|
||||
data = res.json()
|
||||
code = data.get("code")
|
||||
if code in ["RefreshTokenExpired", "InvalidParameter.RefreshToken"]:
|
||||
logger.warn("刷新令牌已过期,请重新登录!")
|
||||
self.__clear_params()
|
||||
return False
|
||||
self.__update_params({
|
||||
"accessToken": data.get('access_token'),
|
||||
"expiresIn": data.get('expires_in'),
|
||||
"updateTime": time.time()
|
||||
})
|
||||
logger.info(f"阿里云盘访问令牌已更新,accessToken={data.get('access_token')}")
|
||||
return True
|
||||
else:
|
||||
self.__handle_error(res, "更新令牌", action=False)
|
||||
return False
|
||||
|
||||
def __create_session(self, headers: dict):
|
||||
"""
|
||||
创建会话
|
||||
"""
|
||||
|
||||
def __os_name():
|
||||
"""
|
||||
获取操作系统名称
|
||||
"""
|
||||
if SystemUtils.is_windows():
|
||||
return 'Windows 操作系统'
|
||||
elif SystemUtils.is_macos():
|
||||
return 'MacOS 操作系统'
|
||||
else:
|
||||
return '类 Unix 操作系统'
|
||||
|
||||
res = RequestUtils(headers=headers, timeout=5).post_res(self.create_session_url, json={
|
||||
'deviceName': f'MoviePilot {SystemUtils.platform}',
|
||||
'modelName': __os_name(),
|
||||
'pubKey': self._X_PUBLIC_KEY,
|
||||
})
|
||||
self.__handle_error(res, "创建会话", action=False)
|
||||
|
||||
@property
|
||||
def __access_params(self) -> Optional[dict]:
|
||||
"""
|
||||
获取阿里云盘访问参数,如果超时则更新后返回
|
||||
"""
|
||||
params = self.__auth_params
|
||||
if not params:
|
||||
logger.warn("阿里云盘访问令牌不存在,请先扫码登录!")
|
||||
return None
|
||||
expires_in = params.get("expiresIn")
|
||||
update_time = params.get("updateTime")
|
||||
refresh_token = params.get("refreshToken")
|
||||
if not expires_in or not update_time or not refresh_token:
|
||||
logger.warn("阿里云盘访问令牌参数错误,请重新扫码登录!")
|
||||
self.__clear_params()
|
||||
return None
|
||||
# 是否需要更新设备信息
|
||||
update_device = False
|
||||
# 判断访问令牌是否过期
|
||||
if (time.time() - update_time) >= expires_in:
|
||||
logger.info("阿里云盘访问令牌已过期,正在更新...")
|
||||
if not self.__update_accesstoken(params, refresh_token):
|
||||
# 更新失败
|
||||
return None
|
||||
update_device = True
|
||||
# 生成设备ID
|
||||
x_device_id = params.get("x_device_id")
|
||||
if not x_device_id:
|
||||
x_device_id = uuid.uuid4().hex
|
||||
params['x_device_id'] = x_device_id
|
||||
self.__update_params({"x_device_id": x_device_id})
|
||||
update_device = True
|
||||
# 更新设备信息重新创建会话
|
||||
if update_device:
|
||||
self.__create_session(self.__get_headers(params))
|
||||
return params
|
||||
|
||||
def __get_headers(self, params: dict):
|
||||
"""
|
||||
获取请求头
|
||||
"""
|
||||
if not params:
|
||||
return {}
|
||||
return {
|
||||
"Authorization": f"Bearer {params.get('accessToken')}",
|
||||
"Content-Type": "application/json;charset=UTF-8",
|
||||
"Accept": "application/json, text/plain, */*",
|
||||
"Referer": "https://www.alipan.com/",
|
||||
"User-Agent": settings.USER_AGENT,
|
||||
"X-Canary": "client=web,app=adrive,version=v4.9.0",
|
||||
"x-device-id": params.get('x_device_id'),
|
||||
"x-signature": self._X_SIGNATURE
|
||||
}
|
||||
|
||||
def user_info(self) -> dict:
|
||||
"""
|
||||
获取用户信息(drive_id等)
|
||||
"""
|
||||
params = self.__access_params
|
||||
if not params:
|
||||
return {}
|
||||
headers = self.__get_headers(params)
|
||||
res = RequestUtils(headers=headers, timeout=10).post_res(self.user_info_url)
|
||||
if res:
|
||||
result = res.json()
|
||||
self.__update_params({
|
||||
"resourceDriveId": result.get("resource_drive_id"),
|
||||
"backDriveId": result.get("backup_drive_id")
|
||||
})
|
||||
return result
|
||||
else:
|
||||
self.__handle_error(res, "获取用户信息")
|
||||
return {}
|
||||
|
||||
def list(self, drive_id: str = None, parent_file_id: str = 'root', list_type: str = None,
|
||||
limit: int = 100, order_by: str = 'updated_at', path: str = "/") -> List[schemas.FileItem]:
|
||||
"""
|
||||
浏览文件
|
||||
limit 返回文件数量,默认 50,最大 100
|
||||
order_by created_at/updated_at/name/size
|
||||
parent_file_id 根目录为root
|
||||
type all | file | folder
|
||||
"""
|
||||
params = self.__access_params
|
||||
if not params:
|
||||
return []
|
||||
# 请求头
|
||||
headers = self.__get_headers(params)
|
||||
# 根目录处理
|
||||
if not drive_id:
|
||||
return [
|
||||
schemas.FileItem(
|
||||
fileid=parent_file_id,
|
||||
drive_id=params.get("resourceDriveId"),
|
||||
parent_fileid="root",
|
||||
type="dir",
|
||||
path="/资源库/",
|
||||
name="资源库"
|
||||
),
|
||||
schemas.FileItem(
|
||||
fileid=parent_file_id,
|
||||
drive_id=params.get("backDriveId"),
|
||||
parent_fileid="root",
|
||||
type="dir",
|
||||
path="/备份盘/",
|
||||
name="备份盘"
|
||||
)
|
||||
]
|
||||
# 返回数据
|
||||
ret_items = []
|
||||
# 分页获取
|
||||
next_marker = None
|
||||
while True:
|
||||
if not parent_file_id or parent_file_id == "/":
|
||||
parent_file_id = "root"
|
||||
res = RequestUtils(headers=headers, timeout=10).post_res(self.list_file_url, json={
|
||||
"drive_id": drive_id,
|
||||
"type": list_type,
|
||||
"limit": limit,
|
||||
"order_by": order_by,
|
||||
"parent_file_id": parent_file_id,
|
||||
"marker": next_marker
|
||||
}, params={
|
||||
'jsonmask': ('next_marker,items(name,file_id,drive_id,type,size,created_at,updated_at,'
|
||||
'category,file_extension,parent_file_id,mime_type,starred,thumbnail,url,'
|
||||
'streams_info,content_hash,user_tags,user_meta,trashed,video_media_metadata,'
|
||||
'video_preview_metadata,sync_meta,sync_device_flag,sync_flag,punish_flag')
|
||||
})
|
||||
if res:
|
||||
result = res.json()
|
||||
items = result.get("items")
|
||||
if not items:
|
||||
break
|
||||
# 合并数据
|
||||
ret_items.extend(items)
|
||||
next_marker = result.get("next_marker")
|
||||
if not next_marker:
|
||||
# 没有下一页
|
||||
break
|
||||
else:
|
||||
self.__handle_error(res, "浏览文件")
|
||||
break
|
||||
return [schemas.FileItem(
|
||||
fileid=fileinfo.get("file_id"),
|
||||
parent_fileid=fileinfo.get("parent_file_id"),
|
||||
type="dir" if fileinfo.get("type") == "folder" else "file",
|
||||
path=f"{path}{fileinfo.get('name')}" + ("/" if fileinfo.get("type") == "folder" else ""),
|
||||
name=fileinfo.get("name"),
|
||||
size=fileinfo.get("size"),
|
||||
extension=fileinfo.get("file_extension"),
|
||||
modify_time=StringUtils.str_to_timestamp(fileinfo.get("updated_at")),
|
||||
thumbnail=fileinfo.get("thumbnail"),
|
||||
drive_id=fileinfo.get("drive_id"),
|
||||
) for fileinfo in ret_items]
|
||||
|
||||
def create_folder(self, drive_id: str, parent_file_id: str, name: str, path: str = "/") -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
创建目录
|
||||
"""
|
||||
params = self.__access_params
|
||||
if not params:
|
||||
return None
|
||||
headers = self.__get_headers(params)
|
||||
res = RequestUtils(headers=headers, timeout=10).post_res(self.create_folder_file_url, json={
|
||||
"drive_id": drive_id,
|
||||
"parent_file_id": parent_file_id,
|
||||
"name": name,
|
||||
"check_name_mode": "refuse",
|
||||
"type": "folder"
|
||||
})
|
||||
if res:
|
||||
"""
|
||||
{
|
||||
"parent_file_id": "root",
|
||||
"type": "folder",
|
||||
"file_id": "6673f2c8a88344741bd64ad192d7512b92087719",
|
||||
"domain_id": "bj29",
|
||||
"drive_id": "39146740",
|
||||
"file_name": "test",
|
||||
"encrypt_mode": "none"
|
||||
}
|
||||
"""
|
||||
result = res.json()
|
||||
return schemas.FileItem(
|
||||
fileid=result.get("file_id"),
|
||||
drive_id=result.get("drive_id"),
|
||||
parent_fileid=result.get("parent_file_id"),
|
||||
type=result.get("type"),
|
||||
name=result.get("file_name"),
|
||||
path=f"{path}{result.get('file_name')}",
|
||||
)
|
||||
else:
|
||||
self.__handle_error(res, "创建目录")
|
||||
return None
|
||||
|
||||
def delete(self, drive_id: str, file_id: str) -> bool:
|
||||
"""
|
||||
删除文件
|
||||
"""
|
||||
params = self.__access_params
|
||||
if not params:
|
||||
return False
|
||||
headers = self.__get_headers(params)
|
||||
res = RequestUtils(headers=headers, timeout=10).post_res(self.delete_file_url, json={
|
||||
"drive_id": drive_id,
|
||||
"file_id": file_id
|
||||
})
|
||||
if res:
|
||||
return True
|
||||
else:
|
||||
self.__handle_error(res, "删除文件")
|
||||
return False
|
||||
|
||||
def detail(self, drive_id: str, file_id: str, path: str = "/") -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
获取文件详情
|
||||
"""
|
||||
params = self.__access_params
|
||||
if not params:
|
||||
return None
|
||||
headers = self.__get_headers(params)
|
||||
res = RequestUtils(headers=headers, timeout=10).post_res(self.file_detail_url, json={
|
||||
"drive_id": drive_id,
|
||||
"file_id": file_id
|
||||
})
|
||||
if res:
|
||||
result = res.json()
|
||||
return schemas.FileItem(
|
||||
fileid=result.get("file_id"),
|
||||
drive_id=result.get("drive_id"),
|
||||
parent_fileid=result.get("parent_file_id"),
|
||||
type="file",
|
||||
name=result.get("name"),
|
||||
size=result.get("size"),
|
||||
extension=result.get("file_extension"),
|
||||
modify_time=StringUtils.str_to_timestamp(result.get("updated_at")),
|
||||
thumbnail=result.get("thumbnail"),
|
||||
path=f"{path}{result.get('name')}"
|
||||
)
|
||||
else:
|
||||
self.__handle_error(res, "获取文件详情")
|
||||
return None
|
||||
|
||||
def rename(self, drive_id: str, file_id: str, name: str) -> bool:
|
||||
"""
|
||||
重命名文件
|
||||
"""
|
||||
params = self.__access_params
|
||||
if not params:
|
||||
return False
|
||||
headers = self.__get_headers(params)
|
||||
res = RequestUtils(headers=headers, timeout=10).post_res(self.rename_file_url, json={
|
||||
"drive_id": drive_id,
|
||||
"file_id": file_id,
|
||||
"name": name,
|
||||
"check_name_mode": "refuse"
|
||||
})
|
||||
if res:
|
||||
return True
|
||||
else:
|
||||
self.__handle_error(res, "重命名文件")
|
||||
return False
|
||||
|
||||
def download(self, drive_id: str, file_id: str) -> Optional[str]:
|
||||
"""
|
||||
获取下载链接
|
||||
"""
|
||||
params = self.__access_params
|
||||
if not params:
|
||||
return None
|
||||
headers = self.__get_headers(params)
|
||||
res = RequestUtils(headers=headers, timeout=10).post_res(self.download_url, json={
|
||||
"drive_id": drive_id,
|
||||
"file_id": file_id
|
||||
})
|
||||
if res:
|
||||
return res.json().get("url")
|
||||
else:
|
||||
self.__handle_error(res, "获取下载链接")
|
||||
return None
|
||||
|
||||
def move(self, drive_id: str, file_id: str, target_id: str) -> bool:
|
||||
"""
|
||||
移动文件
|
||||
"""
|
||||
params = self.__access_params
|
||||
if not params:
|
||||
return False
|
||||
headers = self.__get_headers(params)
|
||||
res = RequestUtils(headers=headers, timeout=10).post_res(self.move_file_url, json={
|
||||
"drive_id": drive_id,
|
||||
"file_id": file_id,
|
||||
"to_parent_file_id": target_id,
|
||||
"check_name_mode": "refuse"
|
||||
})
|
||||
if res:
|
||||
return True
|
||||
else:
|
||||
self.__handle_error(res, "移动文件")
|
||||
return False
|
||||
|
||||
def upload(self, drive_id: str, parent_file_id: str, file_path: Path) -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
上传文件,并标记完成
|
||||
"""
|
||||
params = self.__access_params
|
||||
if not params:
|
||||
return None
|
||||
headers = self.__get_headers(params)
|
||||
res = RequestUtils(headers=headers, timeout=10).post_res(self.create_folder_file_url, json={
|
||||
"drive_id": drive_id,
|
||||
"parent_file_id": parent_file_id,
|
||||
"name": file_path.name,
|
||||
"check_name_mode": "refuse",
|
||||
"create_scene": "file_upload",
|
||||
"type": "file",
|
||||
"part_info_list": [
|
||||
{
|
||||
"part_number": 1
|
||||
}
|
||||
],
|
||||
"size": file_path.stat().st_size
|
||||
})
|
||||
if not res:
|
||||
self.__handle_error(res, "创建文件")
|
||||
return None
|
||||
# 获取上传参数
|
||||
result = res.json()
|
||||
if result.get("exist"):
|
||||
logger.info(f"文件{result.get('file_name')}已存在,无需上传")
|
||||
return schemas.FileItem(
|
||||
drive_id=result.get("drive_id"),
|
||||
fileid=result.get("file_id"),
|
||||
parent_fileid=result.get("parent_file_id"),
|
||||
type="file",
|
||||
name=result.get("file_name"),
|
||||
path=f"{file_path.parent}/{result.get('file_name')}"
|
||||
)
|
||||
file_id = result.get("file_id")
|
||||
upload_id = result.get("upload_id")
|
||||
part_info_list = result.get("part_info_list")
|
||||
if part_info_list:
|
||||
# 上传地址
|
||||
upload_url = part_info_list[0].get("upload_url")
|
||||
# 上传文件
|
||||
res = RequestUtils(headers={
|
||||
"Content-Type": "",
|
||||
"User-Agent": settings.USER_AGENT,
|
||||
"Referer": "https://www.alipan.com/",
|
||||
"Accept": "*/*",
|
||||
}).put_res(upload_url, data=file_path.read_bytes())
|
||||
if not res:
|
||||
self.__handle_error(res, "上传文件")
|
||||
return None
|
||||
# 标记文件上传完毕
|
||||
res = RequestUtils(headers=headers, timeout=10).post_res(self.upload_file_complete_url, json={
|
||||
"drive_id": drive_id,
|
||||
"file_id": file_id,
|
||||
"upload_id": upload_id
|
||||
})
|
||||
if not res:
|
||||
self.__handle_error(res, "标记上传状态")
|
||||
return None
|
||||
result = res.json()
|
||||
return schemas.FileItem(
|
||||
fileid=result.get("file_id"),
|
||||
drive_id=result.get("drive_id"),
|
||||
parent_fileid=result.get("parent_file_id"),
|
||||
type="file",
|
||||
name=result.get("name"),
|
||||
path=f"{file_path.parent}/{result.get('name')}",
|
||||
)
|
||||
else:
|
||||
logger.warn("上传文件失败:无法获取上传地址!")
|
||||
return None
|
||||
@@ -7,7 +7,6 @@ from app.core.context import MediaInfo
|
||||
from app.db.systemconfig_oper import SystemConfigOper
|
||||
from app.log import logger
|
||||
from app.schemas.types import SystemConfigKey, MediaType
|
||||
from app.utils.string import StringUtils
|
||||
from app.utils.system import SystemUtils
|
||||
|
||||
|
||||
@@ -144,12 +143,16 @@ class DirectoryHelper:
|
||||
relative_len = __comman_parts(in_path, Path(matched_dir.path))
|
||||
if relative_len and relative_len >= max_length:
|
||||
max_length = relative_len
|
||||
target_dirs.append(matched_dir)
|
||||
target_dirs.append({
|
||||
'path': matched_dir,
|
||||
'relative_len': relative_len
|
||||
})
|
||||
except Exception as e:
|
||||
logger.debug(f"计算目标路径时出错:{str(e)}")
|
||||
continue
|
||||
if target_dirs:
|
||||
matched_dirs = target_dirs
|
||||
target_dirs.sort(key=lambda x: x['relative_len'], reverse=True)
|
||||
matched_dirs = [x['path'] for x in target_dirs]
|
||||
|
||||
# 优先同盘
|
||||
for matched_dir in matched_dirs:
|
||||
|
||||
@@ -15,16 +15,6 @@ from typing import Dict, Optional
|
||||
from app.core.config import settings
|
||||
from app.log import logger
|
||||
|
||||
# 定义一个全局集合来存储注册的主机
|
||||
_registered_hosts = {
|
||||
'api.themoviedb.org',
|
||||
'api.tmdb.org',
|
||||
'webservice.fanart.tv',
|
||||
'api.github.com',
|
||||
'github.com',
|
||||
'raw.githubusercontent.com',
|
||||
'api.telegram.org'
|
||||
}
|
||||
|
||||
# 定义一个全局线程池执行器
|
||||
_executor = concurrent.futures.ThreadPoolExecutor()
|
||||
@@ -32,21 +22,13 @@ _executor = concurrent.futures.ThreadPoolExecutor()
|
||||
# 定义默认的DoH配置
|
||||
_doh_timeout = 5
|
||||
_doh_cache: Dict[str, str] = {}
|
||||
_doh_resolvers = [
|
||||
# https://developers.cloudflare.com/1.1.1.1/encryption/dns-over-https
|
||||
"1.0.0.1",
|
||||
"1.1.1.1",
|
||||
# https://support.quad9.net/hc/en-us
|
||||
"9.9.9.9",
|
||||
"149.112.112.112"
|
||||
]
|
||||
|
||||
|
||||
def _patched_getaddrinfo(host, *args, **kwargs):
|
||||
"""
|
||||
socket.getaddrinfo的补丁版本。
|
||||
"""
|
||||
if host not in _registered_hosts:
|
||||
if host not in settings.DOH_DOMAINS.split(","):
|
||||
return _orig_getaddrinfo(host, *args, **kwargs)
|
||||
|
||||
# 检查主机是否已解析
|
||||
@@ -57,7 +39,7 @@ def _patched_getaddrinfo(host, *args, **kwargs):
|
||||
|
||||
# 使用DoH解析主机
|
||||
futures = []
|
||||
for resolver in _doh_resolvers:
|
||||
for resolver in settings.DOH_RESOLVERS.split(","):
|
||||
futures.append(_executor.submit(_doh_query, resolver, host))
|
||||
|
||||
for future in concurrent.futures.as_completed(futures):
|
||||
|
||||
@@ -82,7 +82,7 @@ class FormatParser(object):
|
||||
return int(s) + self.__offset, int(e) + self.__offset, self.part
|
||||
return self._start_ep + self.__offset, None, self.part
|
||||
if not self._format:
|
||||
return None, None, None
|
||||
return self._start_ep, self._end_ep, self.part
|
||||
s, e = self.__handle_single(file_name)
|
||||
return s + self.__offset if s is not None else None, \
|
||||
e + self.__offset if e is not None else None, self.part
|
||||
|
||||
@@ -51,7 +51,8 @@ class PluginHelper(metaclass=Singleton):
|
||||
if not user or not repo:
|
||||
return {}
|
||||
raw_url = self._base_url % (user, repo)
|
||||
res = RequestUtils(proxies=self.proxies, headers=settings.GITHUB_HEADERS,
|
||||
res = RequestUtils(proxies=self.proxies,
|
||||
headers=settings.REPO_GITHUB_HEADERS(repo=f"{user}/{repo}"),
|
||||
timeout=10).get_res(f"{raw_url}package.json")
|
||||
if res:
|
||||
try:
|
||||
@@ -137,12 +138,16 @@ class PluginHelper(metaclass=Singleton):
|
||||
if not user or not repo:
|
||||
return False, "不支持的插件仓库地址格式"
|
||||
|
||||
user_repo = f"{user}/{repo}"
|
||||
|
||||
def __get_filelist(_p: str) -> Tuple[Optional[list], Optional[str]]:
|
||||
"""
|
||||
获取插件的文件列表
|
||||
"""
|
||||
file_api = f"https://api.github.com/repos/{user}/{repo}/contents/plugins/{_p}"
|
||||
r = RequestUtils(proxies=settings.PROXY, headers=settings.GITHUB_HEADERS, timeout=30).get_res(file_api)
|
||||
file_api = f"https://api.github.com/repos/{user_repo}/contents/plugins/{_p}"
|
||||
r = RequestUtils(proxies=settings.PROXY,
|
||||
headers=settings.REPO_GITHUB_HEADERS(repo=user_repo),
|
||||
timeout=30).get_res(file_api)
|
||||
if r is None:
|
||||
return None, "连接仓库失败"
|
||||
elif r.status_code != 200:
|
||||
@@ -164,7 +169,8 @@ class PluginHelper(metaclass=Singleton):
|
||||
download_url = f"{settings.GITHUB_PROXY}{item.get('download_url')}"
|
||||
# 下载插件文件
|
||||
res = RequestUtils(proxies=self.proxies,
|
||||
headers=settings.GITHUB_HEADERS, timeout=60).get_res(download_url)
|
||||
headers=settings.REPO_GITHUB_HEADERS(repo=user_repo),
|
||||
timeout=60).get_res(download_url)
|
||||
if not res:
|
||||
return False, f"文件 {item.get('name')} 下载失败!"
|
||||
elif res.status_code != 200:
|
||||
|
||||
@@ -34,7 +34,11 @@ class ProgressHelper(metaclass=Singleton):
|
||||
key = key.value
|
||||
if not self._process_detail.get(key):
|
||||
return
|
||||
self._process_detail[key]['enable'] = False
|
||||
self._process_detail[key] = {
|
||||
"enable": False,
|
||||
"value": 100,
|
||||
"text": "正在处理..."
|
||||
}
|
||||
|
||||
def update(self, key: Union[ProgressKey, str], value: float = None, text: str = None):
|
||||
if isinstance(key, Enum):
|
||||
|
||||
281
app/helper/u115.py
Normal file
281
app/helper/u115.py
Normal file
@@ -0,0 +1,281 @@
|
||||
import base64
|
||||
from pathlib import Path
|
||||
from typing import Optional, Tuple, List
|
||||
|
||||
import oss2
|
||||
import py115
|
||||
from py115 import Cloud
|
||||
from py115.types import LoginTarget, QrcodeSession, QrcodeStatus, Credential, DownloadTicket
|
||||
|
||||
from app import schemas
|
||||
from app.db.systemconfig_oper import SystemConfigOper
|
||||
from app.log import logger
|
||||
from app.schemas.types import SystemConfigKey
|
||||
from app.utils.singleton import Singleton
|
||||
|
||||
|
||||
class U115Helper(metaclass=Singleton):
|
||||
"""
|
||||
115相关操作
|
||||
"""
|
||||
|
||||
cloud: Optional[Cloud] = None
|
||||
_session: QrcodeSession = None
|
||||
|
||||
def __init__(self):
|
||||
self.systemconfig = SystemConfigOper()
|
||||
|
||||
def __init_cloud(self) -> bool:
|
||||
"""
|
||||
初始化Cloud
|
||||
"""
|
||||
credential = self.__credential
|
||||
if not credential:
|
||||
logger.warn("115未登录,请先登录!")
|
||||
return False
|
||||
try:
|
||||
if not self.cloud:
|
||||
self.cloud = py115.connect(credential)
|
||||
except Exception as err:
|
||||
logger.error(f"115连接失败,请重新扫码登录:{str(err)}")
|
||||
self.__clear_credential()
|
||||
return False
|
||||
return True
|
||||
|
||||
@property
|
||||
def __credential(self) -> Optional[Credential]:
|
||||
"""
|
||||
获取已保存的115认证参数
|
||||
"""
|
||||
cookie_dict = self.systemconfig.get(SystemConfigKey.User115Params)
|
||||
if not cookie_dict:
|
||||
return None
|
||||
return Credential.from_dict(cookie_dict)
|
||||
|
||||
def __save_credentail(self, credential: Credential):
|
||||
"""
|
||||
设置115认证参数
|
||||
"""
|
||||
self.systemconfig.set(SystemConfigKey.User115Params, credential.to_dict())
|
||||
|
||||
def __clear_credential(self):
|
||||
"""
|
||||
清除115认证参数
|
||||
"""
|
||||
self.systemconfig.delete(SystemConfigKey.User115Params)
|
||||
|
||||
def generate_qrcode(self) -> Optional[str]:
|
||||
"""
|
||||
生成二维码
|
||||
"""
|
||||
try:
|
||||
self.cloud = py115.connect()
|
||||
self._session = self.cloud.qrcode_login(LoginTarget.Web)
|
||||
image_bin = self._session.image_data
|
||||
if not image_bin:
|
||||
logger.warn("115生成二维码失败:未获取到二维码数据!")
|
||||
return None
|
||||
# 转换为base64图片格式
|
||||
image_base64 = base64.b64encode(image_bin).decode()
|
||||
return f"data:image/png;base64,{image_base64}"
|
||||
except Exception as e:
|
||||
logger.warn(f"115生成二维码失败:{str(e)}")
|
||||
return None
|
||||
|
||||
def check_login(self) -> Optional[Tuple[dict, str]]:
|
||||
"""
|
||||
二维码登录确认
|
||||
"""
|
||||
if not self._session:
|
||||
return {}, "请先生成二维码!"
|
||||
try:
|
||||
if not self.cloud:
|
||||
return {}, "请先生成二维码!"
|
||||
status = self.cloud.qrcode_poll(self._session)
|
||||
if status == QrcodeStatus.Done:
|
||||
# 确认完成,保存认证信息
|
||||
self.__save_credentail(self.cloud.export_credentail())
|
||||
result = {
|
||||
"status": 1,
|
||||
"tip": "登录成功!"
|
||||
}
|
||||
elif status == QrcodeStatus.Waiting:
|
||||
result = {
|
||||
"status": 0,
|
||||
"tip": "请使用微信或115客户端扫码"
|
||||
}
|
||||
elif status == QrcodeStatus.Expired:
|
||||
result = {
|
||||
"status": -1,
|
||||
"tip": "二维码已过期,请重新刷新!"
|
||||
}
|
||||
self.cloud = None
|
||||
elif status == QrcodeStatus.Failed:
|
||||
result = {
|
||||
"status": -2,
|
||||
"tip": "登录失败,请重试!"
|
||||
}
|
||||
self.cloud = None
|
||||
else:
|
||||
result = {
|
||||
"status": -3,
|
||||
"tip": "未知错误,请重试!"
|
||||
}
|
||||
self.cloud = None
|
||||
return result, ""
|
||||
except Exception as e:
|
||||
return {}, f"115登录确认失败:{str(e)}"
|
||||
|
||||
def storage(self) -> Optional[Tuple[int, int]]:
|
||||
"""
|
||||
获取存储空间
|
||||
"""
|
||||
if not self.__init_cloud():
|
||||
return None
|
||||
try:
|
||||
return self.cloud.storage().space()
|
||||
except Exception as e:
|
||||
logger.error(f"获取115存储空间失败:{str(e)}")
|
||||
return None
|
||||
|
||||
def list(self, parent_file_id: str = '0', path: str = "/") -> Optional[List[schemas.FileItem]]:
|
||||
"""
|
||||
浏览文件
|
||||
"""
|
||||
if not self.__init_cloud():
|
||||
return None
|
||||
try:
|
||||
items = self.cloud.storage().list(dir_id=parent_file_id)
|
||||
return [schemas.FileItem(
|
||||
fileid=item.file_id,
|
||||
parent_fileid=item.parent_id,
|
||||
type="dir" if item.is_dir else "file",
|
||||
path=f"{path}{item.name}" + ("/" if item.is_dir else ""),
|
||||
name=item.name,
|
||||
size=item.size,
|
||||
extension=Path(item.name).suffix[1:],
|
||||
modify_time=item.modified_time.timestamp() if item.modified_time else 0,
|
||||
pickcode=item.pickcode
|
||||
) for item in items]
|
||||
except Exception as e:
|
||||
logger.error(f"浏览115文件失败:{str(e)}")
|
||||
return None
|
||||
|
||||
def create_folder(self, parent_file_id: str, name: str, path: str = "/") -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
创建目录
|
||||
"""
|
||||
if not self.__init_cloud():
|
||||
return None
|
||||
try:
|
||||
result = self.cloud.storage().make_dir(parent_file_id, name)
|
||||
return schemas.FileItem(
|
||||
fileid=result.file_id,
|
||||
parent_fileid=result.parent_id,
|
||||
type="dir",
|
||||
path=f"{path}{name}/",
|
||||
name=name,
|
||||
modify_time=result.modified_time.timestamp() if result.modified_time else 0,
|
||||
pickcode=result.pickcode
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"创建115目录失败:{str(e)}")
|
||||
return None
|
||||
|
||||
def delete(self, file_id: str) -> bool:
|
||||
"""
|
||||
删除文件
|
||||
"""
|
||||
if not self.__init_cloud():
|
||||
return False
|
||||
try:
|
||||
self.cloud.storage().delete(file_id)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"删除115文件失败:{str(e)}")
|
||||
return False
|
||||
|
||||
def rename(self, file_id: str, name: str) -> bool:
|
||||
"""
|
||||
重命名文件
|
||||
"""
|
||||
if not self.__init_cloud():
|
||||
return False
|
||||
try:
|
||||
self.cloud.storage().rename(file_id, name)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"重命名115文件失败:{str(e)}")
|
||||
return False
|
||||
|
||||
def download(self, pickcode: str) -> Optional[DownloadTicket]:
|
||||
"""
|
||||
获取下载链接
|
||||
"""
|
||||
if not self.__init_cloud():
|
||||
return None
|
||||
try:
|
||||
return self.cloud.storage().request_download(pickcode)
|
||||
except Exception as e:
|
||||
logger.error(f"115下载失败:{str(e)}")
|
||||
return None
|
||||
|
||||
def move(self, file_id: str, target_id: str) -> bool:
|
||||
"""
|
||||
移动文件
|
||||
"""
|
||||
if not self.__init_cloud():
|
||||
return False
|
||||
try:
|
||||
self.cloud.storage().move(file_id, target_id)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"移动115文件失败:{str(e)}")
|
||||
return False
|
||||
|
||||
def upload(self, parent_file_id: str, file_path: Path) -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
上传文件
|
||||
"""
|
||||
if not self.__init_cloud():
|
||||
return None
|
||||
try:
|
||||
ticket = self.cloud.storage().request_upload(dir_id=parent_file_id, file_path=str(file_path))
|
||||
if ticket is None:
|
||||
logger.warn(f"115请求上传出错")
|
||||
return None
|
||||
elif ticket.is_done:
|
||||
logger.warn(f"115请求上传失败:文件已存在")
|
||||
return {}
|
||||
else:
|
||||
auth = oss2.StsAuth(**ticket.oss_token)
|
||||
bucket = oss2.Bucket(
|
||||
auth=auth,
|
||||
endpoint=ticket.oss_endpoint,
|
||||
bucket_name=ticket.bucket_name,
|
||||
)
|
||||
por = bucket.put_object_from_file(
|
||||
key=ticket.object_key,
|
||||
filename=str(file_path),
|
||||
headers=ticket.headers,
|
||||
)
|
||||
result = por.resp.response.json()
|
||||
if result:
|
||||
fileitem = result.get('data')
|
||||
logger.info(f"115上传文件成功:{fileitem}")
|
||||
return schemas.FileItem(
|
||||
fileid=fileitem.get('file_id'),
|
||||
parent_fileid=parent_file_id,
|
||||
type="file",
|
||||
name=fileitem.get('file_name'),
|
||||
path=f"{file_path / fileitem.get('file_name')}",
|
||||
size=fileitem.get('file_size'),
|
||||
extension=Path(fileitem.get('file_name')).suffix[1:],
|
||||
pickcode=fileitem.get('pickcode')
|
||||
)
|
||||
else:
|
||||
logger.warn(f"115上传文件失败:{por.resp.response.text}")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"上传115文件失败:{str(e)}")
|
||||
return None
|
||||
10
app/main.py
10
app/main.py
@@ -20,12 +20,20 @@ if SystemUtils.is_frozen():
|
||||
|
||||
from app.core.config import settings, global_vars
|
||||
from app.core.module import ModuleManager
|
||||
|
||||
# SitesHelper涉及资源包拉取,提前引入并容错提示
|
||||
try:
|
||||
from app.helper.sites import SitesHelper
|
||||
except ImportError as e:
|
||||
error_message = f"错误: {str(e)}\n站点认证及索引相关资源导入失败,请尝试重建容器或手动拉取资源"
|
||||
print(error_message, file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
from app.core.plugin import PluginManager
|
||||
from app.db.init import init_db, update_db, init_super_user
|
||||
from app.helper.thread import ThreadHelper
|
||||
from app.helper.display import DisplayHelper
|
||||
from app.helper.resource import ResourceHelper
|
||||
from app.helper.sites import SitesHelper
|
||||
from app.helper.message import MessageHelper
|
||||
from app.scheduler import Scheduler
|
||||
from app.command import Command, CommandChian
|
||||
|
||||
@@ -77,6 +77,8 @@ def checkMessage(channel_type: MessageChannel):
|
||||
return None
|
||||
if channel_type == MessageChannel.VoceChat and not switch.get("vocechat"):
|
||||
return None
|
||||
if channel_type == MessageChannel.WebPush and not switch.get("webpush"):
|
||||
return None
|
||||
return func(self, message, *args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
@@ -15,6 +15,7 @@ from app.modules.douban.apiv2 import DoubanApi
|
||||
from app.modules.douban.douban_cache import DoubanCache
|
||||
from app.modules.douban.scraper import DoubanScraper
|
||||
from app.schemas import MediaPerson
|
||||
from app.schemas.exception import APIRateLimitException
|
||||
from app.schemas.types import MediaType
|
||||
from app.utils.common import retry
|
||||
from app.utils.http import RequestUtils
|
||||
@@ -147,11 +148,12 @@ class DoubanModule(_ModuleBase):
|
||||
|
||||
return None
|
||||
|
||||
def douban_info(self, doubanid: str, mtype: MediaType = None) -> Optional[dict]:
|
||||
def douban_info(self, doubanid: str, mtype: MediaType = None, raise_exception: bool = True) -> Optional[dict]:
|
||||
"""
|
||||
获取豆瓣信息
|
||||
:param doubanid: 豆瓣ID
|
||||
:param mtype: 媒体类型
|
||||
:param raise_exception: 触发速率限制时是否抛出异常
|
||||
:return: 豆瓣信息
|
||||
"""
|
||||
"""
|
||||
@@ -426,6 +428,12 @@ class DoubanModule(_ModuleBase):
|
||||
"""
|
||||
info = self.doubanapi.tv_detail(doubanid)
|
||||
if info:
|
||||
if "subject_ip_rate_limit" in info.get("msg", ""):
|
||||
msg = f"触发豆瓣IP速率限制,错误信息:{info} ..."
|
||||
logger.warn(msg)
|
||||
if raise_exception:
|
||||
raise APIRateLimitException(msg)
|
||||
return None
|
||||
celebrities = self.doubanapi.tv_celebrities(doubanid)
|
||||
if celebrities:
|
||||
info["directors"] = celebrities.get("directors")
|
||||
@@ -438,6 +446,12 @@ class DoubanModule(_ModuleBase):
|
||||
"""
|
||||
info = self.doubanapi.movie_detail(doubanid)
|
||||
if info:
|
||||
if "subject_ip_rate_limit" in info.get("msg", ""):
|
||||
msg = f"触发豆瓣IP速率限制,错误信息:{info} ..."
|
||||
logger.warn(msg)
|
||||
if raise_exception:
|
||||
raise APIRateLimitException(msg)
|
||||
return None
|
||||
celebrities = self.doubanapi.movie_celebrities(doubanid)
|
||||
if celebrities:
|
||||
info["directors"] = celebrities.get("directors")
|
||||
@@ -595,7 +609,8 @@ class DoubanModule(_ModuleBase):
|
||||
|
||||
@retry(Exception, 5, 3, 3, logger=logger)
|
||||
def match_doubaninfo(self, name: str, imdbid: str = None,
|
||||
mtype: MediaType = None, year: str = None, season: int = None) -> dict:
|
||||
mtype: MediaType = None, year: str = None, season: int = None,
|
||||
raise_exception: bool = False) -> dict:
|
||||
"""
|
||||
搜索和匹配豆瓣信息
|
||||
:param name: 名称
|
||||
@@ -603,6 +618,7 @@ class DoubanModule(_ModuleBase):
|
||||
:param mtype: 类型
|
||||
:param year: 年份
|
||||
:param season: 季号
|
||||
:param raise_exception: 触发速率限制时是否抛出异常
|
||||
"""
|
||||
if imdbid:
|
||||
# 优先使用IMDBID查询
|
||||
@@ -618,13 +634,19 @@ class DoubanModule(_ModuleBase):
|
||||
# 搜索
|
||||
logger.info(f"开始使用名称 {name} 匹配豆瓣信息 ...")
|
||||
result = self.doubanapi.search(f"{name} {year or ''}".strip())
|
||||
if not result or not result.get("items"):
|
||||
if not result:
|
||||
logger.warn(f"未找到 {name} 的豆瓣信息")
|
||||
return {}
|
||||
# 触发rate limit
|
||||
if "search_access_rate_limit" in result.values():
|
||||
logger.warn(f"触发豆瓣API速率限制 错误信息 {result} ...")
|
||||
raise Exception("触发豆瓣API速率限制")
|
||||
msg = f"触发豆瓣API速率限制,错误信息:{result} ..."
|
||||
logger.warn(msg)
|
||||
if raise_exception:
|
||||
raise APIRateLimitException(msg)
|
||||
return {}
|
||||
if not result.get("items"):
|
||||
logger.warn(f"未找到 {name} 的豆瓣信息")
|
||||
return {}
|
||||
for item_obj in result.get("items"):
|
||||
type_name = item_obj.get("type_name")
|
||||
if type_name not in [MediaType.TV.value, MediaType.MOVIE.value]:
|
||||
@@ -759,6 +781,26 @@ class DoubanModule(_ModuleBase):
|
||||
logger.error(f"刮削文件 {file} 失败,原因:{str(e)}")
|
||||
logger.info(f"{path} 刮削完成")
|
||||
|
||||
def metadata_nfo(self, mediainfo: MediaInfo, season: int = None, **kwargs) -> Optional[str]:
|
||||
"""
|
||||
获取NFO文件内容文本
|
||||
:param mediainfo: 媒体信息
|
||||
:param season: 季号
|
||||
"""
|
||||
if settings.SCRAP_SOURCE != "douban":
|
||||
return None
|
||||
return self.scraper.get_metadata_nfo(mediainfo=mediainfo, season=season)
|
||||
|
||||
def metadata_img(self, mediainfo: MediaInfo, season: int = None) -> Optional[dict]:
|
||||
"""
|
||||
获取图片名称和url
|
||||
:param mediainfo: 媒体信息
|
||||
:param season: 季号
|
||||
"""
|
||||
if settings.SCRAP_SOURCE != "douban":
|
||||
return None
|
||||
return self.scraper.get_metadata_img(mediainfo=mediainfo, season=season)
|
||||
|
||||
def obtain_images(self, mediainfo: MediaInfo) -> Optional[MediaInfo]:
|
||||
"""
|
||||
补充抓取媒体信息图片
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from pathlib import Path
|
||||
from typing import Union
|
||||
from typing import Union, Optional
|
||||
from xml.dom import minidom
|
||||
|
||||
from app.core.config import settings
|
||||
@@ -17,6 +17,44 @@ class DoubanScraper:
|
||||
_force_nfo = False
|
||||
_force_img = False
|
||||
|
||||
def get_metadata_nfo(self, mediainfo: MediaInfo, season: int = None) -> Optional[str]:
|
||||
"""
|
||||
获取NFO文件内容文本
|
||||
:param mediainfo: 媒体信息
|
||||
:param season: 季号
|
||||
"""
|
||||
if mediainfo.type == MediaType.MOVIE:
|
||||
# 电影元数据文件
|
||||
doc = self.__gen_movie_nfo_file(mediainfo=mediainfo)
|
||||
else:
|
||||
if season:
|
||||
# 季元数据文件
|
||||
doc = self.__gen_tv_season_nfo_file(mediainfo=mediainfo, season=season)
|
||||
else:
|
||||
# 电视剧元数据文件
|
||||
doc = self.__gen_tv_nfo_file(mediainfo=mediainfo)
|
||||
if doc:
|
||||
return doc.toprettyxml(indent=" ", encoding="utf-8")
|
||||
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def get_metadata_img(mediainfo: MediaInfo, season: int = None) -> Optional[dict]:
|
||||
"""
|
||||
获取图片内容
|
||||
:param mediainfo: 媒体信息
|
||||
:param season: 季号
|
||||
"""
|
||||
ret_dict = {}
|
||||
if season:
|
||||
# 豆瓣无季图片
|
||||
return {}
|
||||
if mediainfo.poster_path:
|
||||
ret_dict[f"poster{Path(mediainfo.poster_path).suffix}"] = mediainfo.poster_path
|
||||
if mediainfo.backdrop_path:
|
||||
ret_dict[f"backdrop{Path(mediainfo.backdrop_path).suffix}"] = mediainfo.backdrop_path
|
||||
return ret_dict
|
||||
|
||||
def gen_scraper_files(self, meta: MetaBase, mediainfo: MediaInfo,
|
||||
file_path: Path, transfer_type: str,
|
||||
force_nfo: bool = False, force_img: bool = False):
|
||||
@@ -47,15 +85,11 @@ class DoubanScraper:
|
||||
self.__gen_movie_nfo_file(mediainfo=mediainfo,
|
||||
file_path=file_path)
|
||||
# 生成电影图片
|
||||
image_path = file_path.with_name(f"poster{Path(mediainfo.poster_path).suffix}")
|
||||
if self._force_img or not image_path.exists():
|
||||
self.__save_image(url=mediainfo.poster_path,
|
||||
file_path=image_path)
|
||||
# 背景图
|
||||
if mediainfo.backdrop_path:
|
||||
image_path = file_path.with_name(f"backdrop{Path(mediainfo.backdrop_path).suffix}")
|
||||
image_dict = self.get_metadata_img(mediainfo)
|
||||
for img_name, img_url in image_dict.items():
|
||||
image_path = file_path.with_name(img_name)
|
||||
if self._force_img or not image_path.exists():
|
||||
self.__save_image(url=mediainfo.backdrop_path,
|
||||
self.__save_image(url=img_url,
|
||||
file_path=image_path)
|
||||
# 电视剧
|
||||
else:
|
||||
@@ -65,15 +99,11 @@ class DoubanScraper:
|
||||
self.__gen_tv_nfo_file(mediainfo=mediainfo,
|
||||
dir_path=file_path.parents[1])
|
||||
# 生成根目录图片
|
||||
image_path = file_path.with_name(f"poster{Path(mediainfo.poster_path).suffix}")
|
||||
if self._force_img or not image_path.exists():
|
||||
self.__save_image(url=mediainfo.poster_path,
|
||||
file_path=image_path)
|
||||
# 背景图
|
||||
if mediainfo.backdrop_path:
|
||||
image_path = file_path.with_name(f"backdrop{Path(mediainfo.backdrop_path).suffix}")
|
||||
image_dict = self.get_metadata_img(mediainfo)
|
||||
for img_name, img_url in image_dict.items():
|
||||
image_path = file_path.with_name(img_name)
|
||||
if self._force_img or not image_path.exists():
|
||||
self.__save_image(url=mediainfo.backdrop_path,
|
||||
self.__save_image(url=img_url,
|
||||
file_path=image_path)
|
||||
# 季目录NFO
|
||||
if self._force_nfo or not file_path.with_name("season.nfo").exists():
|
||||
@@ -84,7 +114,7 @@ class DoubanScraper:
|
||||
logger.error(f"{file_path} 刮削失败:{str(e)}")
|
||||
|
||||
@staticmethod
|
||||
def __gen_common_nfo(mediainfo: MediaInfo, doc, root):
|
||||
def __gen_common_nfo(mediainfo: MediaInfo, doc: minidom.Document, root: minidom.Node):
|
||||
# 简介
|
||||
xplot = DomUtils.add_node(doc, root, "plot")
|
||||
xplot.appendChild(doc.createCDATASection(mediainfo.overview or ""))
|
||||
@@ -108,14 +138,15 @@ class DoubanScraper:
|
||||
|
||||
def __gen_movie_nfo_file(self,
|
||||
mediainfo: MediaInfo,
|
||||
file_path: Path):
|
||||
file_path: Path = None) -> minidom.Document:
|
||||
"""
|
||||
生成电影的NFO描述文件
|
||||
:param mediainfo: 豆瓣信息
|
||||
:param file_path: 电影文件路径
|
||||
"""
|
||||
# 开始生成XML
|
||||
logger.info(f"正在生成电影NFO文件:{file_path.name}")
|
||||
if file_path:
|
||||
logger.info(f"正在生成电影NFO文件:{file_path.name}")
|
||||
doc = minidom.Document()
|
||||
root = DomUtils.add_node(doc, doc, "movie")
|
||||
# 公共部分
|
||||
@@ -127,11 +158,14 @@ class DoubanScraper:
|
||||
# 年份
|
||||
DomUtils.add_node(doc, root, "year", mediainfo.year or "")
|
||||
# 保存
|
||||
self.__save_nfo(doc, file_path.with_suffix(".nfo"))
|
||||
if file_path:
|
||||
self.__save_nfo(doc, file_path.with_suffix(".nfo"))
|
||||
|
||||
return doc
|
||||
|
||||
def __gen_tv_nfo_file(self,
|
||||
mediainfo: MediaInfo,
|
||||
dir_path: Path):
|
||||
dir_path: Path = None) -> minidom.Document:
|
||||
"""
|
||||
生成电视剧的NFO描述文件
|
||||
:param mediainfo: 媒体信息
|
||||
@@ -152,9 +186,13 @@ class DoubanScraper:
|
||||
DomUtils.add_node(doc, root, "season", "-1")
|
||||
DomUtils.add_node(doc, root, "episode", "-1")
|
||||
# 保存
|
||||
self.__save_nfo(doc, dir_path.joinpath("tvshow.nfo"))
|
||||
if dir_path:
|
||||
self.__save_nfo(doc, dir_path.joinpath("tvshow.nfo"))
|
||||
|
||||
def __gen_tv_season_nfo_file(self, mediainfo: MediaInfo, season: int, season_path: Path):
|
||||
return doc
|
||||
|
||||
def __gen_tv_season_nfo_file(self, mediainfo: MediaInfo,
|
||||
season: int, season_path: Path = None) -> minidom.Document:
|
||||
"""
|
||||
生成电视剧季的NFO描述文件
|
||||
:param mediainfo: 媒体信息
|
||||
@@ -179,7 +217,9 @@ class DoubanScraper:
|
||||
# seasonnumber
|
||||
DomUtils.add_node(doc, root, "seasonnumber", str(season))
|
||||
# 保存
|
||||
self.__save_nfo(doc, season_path.joinpath("season.nfo"))
|
||||
if season_path:
|
||||
self.__save_nfo(doc, season_path.joinpath("season.nfo"))
|
||||
return doc
|
||||
|
||||
def __save_image(self, url: str, file_path: Path):
|
||||
"""
|
||||
|
||||
@@ -18,16 +18,10 @@ class Emby:
|
||||
def __init__(self):
|
||||
self._host = settings.EMBY_HOST
|
||||
if self._host:
|
||||
if not self._host.endswith("/"):
|
||||
self._host += "/"
|
||||
if not self._host.startswith("http"):
|
||||
self._host = "http://" + self._host
|
||||
self._host = RequestUtils.standardize_base_url(self._host)
|
||||
self._playhost = settings.EMBY_PLAY_HOST
|
||||
if self._playhost:
|
||||
if not self._playhost.endswith("/"):
|
||||
self._playhost += "/"
|
||||
if not self._playhost.startswith("http"):
|
||||
self._playhost = "http://" + self._playhost
|
||||
self._playhost = RequestUtils.standardize_base_url(self._playhost)
|
||||
self._apikey = settings.EMBY_API_KEY
|
||||
self.user = self.get_user(settings.SUPERUSER)
|
||||
self.folders = self.get_emby_folders()
|
||||
|
||||
@@ -83,6 +83,25 @@ class FileTransferModule(_ModuleBase):
|
||||
def init_setting(self) -> Tuple[str, Union[str, bool]]:
|
||||
pass
|
||||
|
||||
def recommend_name(self, meta: MetaBase, mediainfo: MediaInfo) -> Optional[str]:
|
||||
"""
|
||||
获取重命名后的名称
|
||||
:param meta: 元数据
|
||||
:param mediainfo: 媒体信息
|
||||
:return: 重命名后的名称(含目录)
|
||||
"""
|
||||
# 重命名格式
|
||||
rename_format = settings.TV_RENAME_FORMAT \
|
||||
if mediainfo.type == MediaType.TV else settings.MOVIE_RENAME_FORMAT
|
||||
# 获取重命名后的名称
|
||||
path = self.get_rename_path(
|
||||
template_string=rename_format,
|
||||
rename_dict=self.__get_naming_dict(meta=meta,
|
||||
mediainfo=mediainfo,
|
||||
file_ext=Path(meta.title).suffix)
|
||||
)
|
||||
return str(path)
|
||||
|
||||
def transfer(self, path: Path, meta: MetaBase, mediainfo: MediaInfo,
|
||||
transfer_type: str, target: Path = None,
|
||||
episodes_info: List[TmdbEpisode] = None,
|
||||
@@ -200,12 +219,13 @@ class FileTransferModule(_ModuleBase):
|
||||
"""
|
||||
# 字幕正则式
|
||||
_zhcn_sub_re = r"([.\[(](((zh[-_])?(cn|ch[si]|sg|sc))|zho?" \
|
||||
r"|chinese|(cn|ch[si]|sg|zho?|eng)[-_&](cn|ch[si]|sg|zho?|eng)" \
|
||||
r"|chinese|(cn|ch[si]|sg|zho?|eng)[-_&]?(cn|ch[si]|sg|zho?|eng)" \
|
||||
r"|简[体中]?)[.\])])" \
|
||||
r"|([\u4e00-\u9fa5]{0,3}[中双][\u4e00-\u9fa5]{0,2}[字文语][\u4e00-\u9fa5]{0,3})" \
|
||||
r"|简体|简中|JPSC" \
|
||||
r"|(?<![a-z0-9])gb(?![a-z0-9])"
|
||||
_zhtw_sub_re = r"([.\[(](((zh[-_])?(hk|tw|cht|tc))" \
|
||||
r"|(cht|eng)[-_&]?(cht|eng)" \
|
||||
r"|繁[体中]?)[.\])])" \
|
||||
r"|繁体中[文字]|中[文字]繁体|繁体|JPTC" \
|
||||
r"|(?<![a-z0-9])big5(?![a-z0-9])"
|
||||
@@ -671,6 +691,10 @@ class FileTransferModule(_ModuleBase):
|
||||
"doubanid": mediainfo.douban_id,
|
||||
# 季号
|
||||
"season": meta.season_seq,
|
||||
# 季年份根据season值获取
|
||||
"season_year": mediainfo.season_years.get(
|
||||
int(meta.season_seq),
|
||||
None) if (mediainfo.season_years and meta.season_seq) else None,
|
||||
# 集号
|
||||
"episode": meta.episode_seqs,
|
||||
# 季集 SxxExx
|
||||
|
||||
@@ -9,6 +9,7 @@ from app.db.sitestatistic_oper import SiteStatisticOper
|
||||
from app.helper.sites import SitesHelper
|
||||
from app.log import logger
|
||||
from app.modules import _ModuleBase
|
||||
from app.modules.indexer.haidan import HaiDanSpider
|
||||
from app.modules.indexer.mtorrent import MTorrentSpider
|
||||
from app.modules.indexer.spider import TorrentSpider
|
||||
from app.modules.indexer.tnode import TNodeSpider
|
||||
@@ -118,6 +119,11 @@ class IndexerModule(_ModuleBase):
|
||||
mtype=mtype,
|
||||
page=page
|
||||
)
|
||||
elif site.get('parser') == "Haidan":
|
||||
error_flag, result = HaiDanSpider(site).search(
|
||||
keyword=search_word,
|
||||
mtype=mtype
|
||||
)
|
||||
else:
|
||||
error_flag, result = self.__spider_search(
|
||||
search_word=search_word,
|
||||
|
||||
167
app/modules/indexer/haidan.py
Normal file
167
app/modules/indexer/haidan.py
Normal file
@@ -0,0 +1,167 @@
|
||||
import urllib.parse
|
||||
from typing import Tuple, List
|
||||
|
||||
from ruamel.yaml import CommentedMap
|
||||
|
||||
from app.core.config import settings
|
||||
from app.db.systemconfig_oper import SystemConfigOper
|
||||
from app.log import logger
|
||||
from app.schemas import MediaType
|
||||
from app.utils.http import RequestUtils
|
||||
from app.utils.string import StringUtils
|
||||
|
||||
|
||||
class HaiDanSpider:
|
||||
"""
|
||||
haidan.video API
|
||||
"""
|
||||
_indexerid = None
|
||||
_domain = None
|
||||
_url = None
|
||||
_name = ""
|
||||
_proxy = None
|
||||
_cookie = None
|
||||
_ua = None
|
||||
_size = 100
|
||||
_searchurl = "%storrents.php"
|
||||
_detailurl = "%sdetails.php?group_id=%s&torrent_id=%s"
|
||||
_timeout = 15
|
||||
|
||||
# 电影分类
|
||||
_movie_category = ['401', '404', '405']
|
||||
_tv_category = ['402', '403', '404', '405']
|
||||
|
||||
# 足销状态 1-普通,2-免费,3-2X,4-2X免费,5-50%,6-2X50%,7-30%
|
||||
_dl_state = {
|
||||
"1": 1,
|
||||
"2": 0,
|
||||
"3": 1,
|
||||
"4": 0,
|
||||
"5": 0.5,
|
||||
"6": 0.5,
|
||||
"7": 0.3
|
||||
}
|
||||
_up_state = {
|
||||
"1": 1,
|
||||
"2": 1,
|
||||
"3": 2,
|
||||
"4": 2,
|
||||
"5": 1,
|
||||
"6": 2,
|
||||
"7": 1
|
||||
}
|
||||
|
||||
def __init__(self, indexer: CommentedMap):
|
||||
self.systemconfig = SystemConfigOper()
|
||||
if indexer:
|
||||
self._indexerid = indexer.get('id')
|
||||
self._url = indexer.get('domain')
|
||||
self._domain = StringUtils.get_url_domain(self._url)
|
||||
self._searchurl = self._searchurl % self._url
|
||||
self._name = indexer.get('name')
|
||||
if indexer.get('proxy'):
|
||||
self._proxy = settings.PROXY
|
||||
self._cookie = indexer.get('cookie')
|
||||
self._ua = indexer.get('ua')
|
||||
self._timeout = indexer.get('timeout') or 15
|
||||
|
||||
def search(self, keyword: str, mtype: MediaType = None) -> Tuple[bool, List[dict]]:
|
||||
"""
|
||||
搜索
|
||||
"""
|
||||
|
||||
def __dict_to_query(_params: dict):
|
||||
"""
|
||||
将数组转换为逗号分隔的字符串
|
||||
"""
|
||||
for key, value in _params.items():
|
||||
if isinstance(value, list):
|
||||
_params[key] = ','.join(map(str, value))
|
||||
return urllib.parse.urlencode(params)
|
||||
|
||||
# 检查cookie
|
||||
if not self._cookie:
|
||||
return True, []
|
||||
|
||||
if not mtype:
|
||||
categories = []
|
||||
elif mtype == MediaType.TV:
|
||||
categories = self._tv_category
|
||||
else:
|
||||
categories = self._movie_category
|
||||
|
||||
# 搜索类型
|
||||
if keyword.startswith('tt'):
|
||||
search_area = '4'
|
||||
else:
|
||||
search_area = '0'
|
||||
|
||||
params = {
|
||||
"isapi": "1",
|
||||
"search_area": search_area, # 0-标题 1-简介(较慢)3-发种用户名 4-IMDb
|
||||
"search": keyword,
|
||||
"search_mode": "0", # 0-与 1-或 2-精准
|
||||
"cat": categories
|
||||
}
|
||||
res = RequestUtils(
|
||||
cookies=self._cookie,
|
||||
ua=self._ua,
|
||||
proxies=self._proxy,
|
||||
timeout=self._timeout
|
||||
).get_res(url=f"{self._searchurl}?{__dict_to_query(params)}")
|
||||
torrents = []
|
||||
if res and res.status_code == 200:
|
||||
result = res.json()
|
||||
code = result.get('code')
|
||||
if code != 0:
|
||||
logger.warn(f"{self._name} 搜索失败:{result.get('msg')}")
|
||||
return True, []
|
||||
data = result.get('data') or {}
|
||||
for tid, item in data.items():
|
||||
category_value = result.get('category')
|
||||
if category_value in self._tv_category \
|
||||
and category_value not in self._movie_category:
|
||||
category = MediaType.TV.value
|
||||
elif category_value in self._movie_category:
|
||||
category = MediaType.MOVIE.value
|
||||
else:
|
||||
category = MediaType.UNKNOWN.value
|
||||
torrent = {
|
||||
'title': item.get('name'),
|
||||
'description': item.get('small_descr'),
|
||||
'enclosure': item.get('url'),
|
||||
'pubdate': StringUtils.format_timestamp(item.get('added')),
|
||||
'size': int(item.get('size') or '0'),
|
||||
'seeders': int(item.get('seeders') or '0'),
|
||||
'peers': int(item.get("leechers") or '0'),
|
||||
'grabs': int(item.get("times_completed") or '0'),
|
||||
'downloadvolumefactor': self.__get_downloadvolumefactor(item.get('sp_state')),
|
||||
'uploadvolumefactor': self.__get_uploadvolumefactor(item.get('sp_state')),
|
||||
'page_url': self._detailurl % (self._url, item.get('group_id'), tid),
|
||||
'labels': [],
|
||||
'category': category
|
||||
}
|
||||
torrents.append(torrent)
|
||||
elif res is not None:
|
||||
logger.warn(f"{self._name} 搜索失败,错误码:{res.status_code}")
|
||||
return True, []
|
||||
else:
|
||||
logger.warn(f"{self._name} 搜索失败,无法连接 {self._domain}")
|
||||
return True, []
|
||||
return False, torrents
|
||||
|
||||
def __get_downloadvolumefactor(self, discount: str) -> float:
|
||||
"""
|
||||
获取下载系数
|
||||
"""
|
||||
if discount:
|
||||
return self._dl_state.get(discount, 1)
|
||||
return 1
|
||||
|
||||
def __get_uploadvolumefactor(self, discount: str) -> float:
|
||||
"""
|
||||
获取上传系数
|
||||
"""
|
||||
if discount:
|
||||
return self._up_state.get(discount, 1)
|
||||
return 1
|
||||
@@ -19,13 +19,14 @@ class MTorrentSpider:
|
||||
"""
|
||||
_indexerid = None
|
||||
_domain = None
|
||||
_url = None
|
||||
_name = ""
|
||||
_proxy = None
|
||||
_cookie = None
|
||||
_ua = None
|
||||
_size = 100
|
||||
_searchurl = "%sapi/torrent/search"
|
||||
_downloadurl = "%sapi/torrent/genDlToken"
|
||||
_searchurl = "https://api.%s/api/torrent/search"
|
||||
_downloadurl = "https://api.%s/api/torrent/genDlToken"
|
||||
_pageurl = "%sdetail/%s"
|
||||
_timeout = 15
|
||||
|
||||
@@ -54,7 +55,8 @@ class MTorrentSpider:
|
||||
self.systemconfig = SystemConfigOper()
|
||||
if indexer:
|
||||
self._indexerid = indexer.get('id')
|
||||
self._domain = indexer.get('domain')
|
||||
self._url = indexer.get('domain')
|
||||
self._domain = StringUtils.get_url_domain(self._url)
|
||||
self._searchurl = self._searchurl % self._domain
|
||||
self._name = indexer.get('name')
|
||||
if indexer.get('proxy'):
|
||||
@@ -124,7 +126,7 @@ class MTorrentSpider:
|
||||
'grabs': int(result.get('status', {}).get("timesCompleted") or '0'),
|
||||
'downloadvolumefactor': self.__get_downloadvolumefactor(result.get('status', {}).get("discount")),
|
||||
'uploadvolumefactor': self.__get_uploadvolumefactor(result.get('status', {}).get("discount")),
|
||||
'page_url': self._pageurl % (self._domain, result.get('id')),
|
||||
'page_url': self._pageurl % (self._url, result.get('id')),
|
||||
'imdbid': self.__find_imdbid(result.get('imdb')),
|
||||
'labels': labels,
|
||||
'category': category
|
||||
@@ -191,7 +193,6 @@ class MTorrentSpider:
|
||||
'id': torrent_id
|
||||
},
|
||||
'header': {
|
||||
'Content-Type': 'application/json',
|
||||
'User-Agent': f'{self._ua}',
|
||||
'Accept': 'application/json, text/plain, */*',
|
||||
'x-api-key': self._apikey
|
||||
|
||||
@@ -491,8 +491,10 @@ class TorrentSpider:
|
||||
pubdate = torrent(selector.get('selector', '')).clone()
|
||||
self.__remove(pubdate, selector)
|
||||
items = self.__attribute_or_text(pubdate, selector)
|
||||
self.torrents_info['pubdate'] = self.__index(items, selector)
|
||||
self.torrents_info['pubdate'] = self.__filter_text(self.torrents_info.get('pubdate'),
|
||||
pubdate_str = self.__index(items, selector)
|
||||
if pubdate_str:
|
||||
pubdate_str = pubdate_str.replace('\n', ' ').strip()
|
||||
self.torrents_info['pubdate'] = self.__filter_text(pubdate_str,
|
||||
selector.get('filters'))
|
||||
|
||||
def __get_date_elapsed(self, torrent):
|
||||
@@ -682,6 +684,7 @@ class TorrentSpider:
|
||||
elif method_name == "replace" and isinstance(args, list):
|
||||
text = text.replace(r"%s" % args[0], r"%s" % args[-1])
|
||||
elif method_name == "dateparse" and isinstance(args, str):
|
||||
text = text.replace("\n", " ").strip()
|
||||
text = datetime.datetime.strptime(text, r"%s" % args)
|
||||
elif method_name == "strip":
|
||||
text = text.strip()
|
||||
|
||||
@@ -15,16 +15,10 @@ class Jellyfin:
|
||||
def __init__(self):
|
||||
self._host = settings.JELLYFIN_HOST
|
||||
if self._host:
|
||||
if not self._host.endswith("/"):
|
||||
self._host += "/"
|
||||
if not self._host.startswith("http"):
|
||||
self._host = "http://" + self._host
|
||||
self._host = RequestUtils.standardize_base_url(self._host)
|
||||
self._playhost = settings.JELLYFIN_PLAY_HOST
|
||||
if self._playhost:
|
||||
if not self._playhost.endswith("/"):
|
||||
self._playhost += "/"
|
||||
if not self._playhost.startswith("http"):
|
||||
self._playhost = "http://" + self._playhost
|
||||
self._playhost = RequestUtils.standardize_base_url(self._playhost)
|
||||
self._apikey = settings.JELLYFIN_API_KEY
|
||||
self.user = self.get_user(settings.SUPERUSER)
|
||||
self.serverid = self.get_server_id()
|
||||
|
||||
@@ -1,35 +1,31 @@
|
||||
import json
|
||||
from functools import lru_cache
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Dict, Tuple, Generator, Any
|
||||
from urllib.parse import quote_plus
|
||||
|
||||
from cachetools import TTLCache, cached
|
||||
from plexapi import media
|
||||
from plexapi.server import PlexServer
|
||||
from requests import Response, Session
|
||||
|
||||
from app import schemas
|
||||
from app.core.config import settings
|
||||
from app.log import logger
|
||||
from app.schemas import MediaType
|
||||
from app.utils.http import RequestUtils
|
||||
|
||||
|
||||
class Plex:
|
||||
|
||||
_plex = None
|
||||
_session = None
|
||||
|
||||
def __init__(self):
|
||||
self._host = settings.PLEX_HOST
|
||||
if self._host:
|
||||
if not self._host.endswith("/"):
|
||||
self._host += "/"
|
||||
if not self._host.startswith("http"):
|
||||
self._host = "http://" + self._host
|
||||
self._host = RequestUtils.standardize_base_url(self._host)
|
||||
self._playhost = settings.PLEX_PLAY_HOST
|
||||
if self._playhost:
|
||||
if not self._playhost.endswith("/"):
|
||||
self._playhost += "/"
|
||||
if not self._playhost.startswith("http"):
|
||||
self._playhost = "http://" + self._playhost
|
||||
self._playhost = RequestUtils.standardize_base_url(self._playhost)
|
||||
self._token = settings.PLEX_TOKEN
|
||||
if self._host and self._token:
|
||||
try:
|
||||
@@ -38,6 +34,7 @@ class Plex:
|
||||
except Exception as e:
|
||||
self._plex = None
|
||||
logger.error(f"Plex服务器连接失败:{str(e)}")
|
||||
self._session = self.__adapt_plex_session()
|
||||
|
||||
def is_inactive(self) -> bool:
|
||||
"""
|
||||
@@ -58,7 +55,7 @@ class Plex:
|
||||
self._plex = None
|
||||
logger.error(f"Plex服务器连接失败:{str(e)}")
|
||||
|
||||
@lru_cache(maxsize=10)
|
||||
@cached(cache=TTLCache(maxsize=100, ttl=86400))
|
||||
def __get_library_images(self, library_key: str, mtype: int) -> Optional[List[str]]:
|
||||
"""
|
||||
获取媒体服务器最近添加的媒体的图片列表
|
||||
@@ -76,10 +73,11 @@ class Plex:
|
||||
# 如果总数不足,接续获取下一页
|
||||
while len(poster_urls) < total_size:
|
||||
items = self._plex.fetchItems(f"/hubs/home/recentlyAdded?type={mtype}§ionID={library_key}",
|
||||
container_size=total_size,
|
||||
container_start=container_start)
|
||||
container_start=container_start,
|
||||
container_size=8,
|
||||
maxresults=8)
|
||||
for item in items:
|
||||
if item.type == 'episode':
|
||||
if item.type == "episode":
|
||||
# 如果是剧集的单集,则去找上级的图片
|
||||
if item.parentThumb is not None:
|
||||
poster_urls[item.parentThumb] = None
|
||||
@@ -267,25 +265,59 @@ class Plex:
|
||||
season_episodes[episode.seasonNumber].append(episode.index)
|
||||
return videos.key, season_episodes
|
||||
|
||||
def get_remote_image_by_id(self, item_id: str, image_type: str) -> Optional[str]:
|
||||
def get_remote_image_by_id(self, item_id: str, image_type: str, depth: int = 0) -> Optional[str]:
|
||||
"""
|
||||
根据ItemId从Plex查询图片地址
|
||||
:param item_id: 在Emby中的ID
|
||||
:param item_id: 在Plex中的ID
|
||||
:param image_type: 图片的类型,Poster或者Backdrop等
|
||||
:param depth: 当前递归深度,默认为0
|
||||
:return: 图片对应在TMDB中的URL
|
||||
"""
|
||||
if not self._plex:
|
||||
if not self._plex or depth > 2 or not item_id:
|
||||
return None
|
||||
try:
|
||||
if image_type == "Poster":
|
||||
images = self._plex.fetchItems('/library/metadata/%s/posters' % item_id,
|
||||
cls=media.Poster)
|
||||
image_url = None
|
||||
ekey = f"/library/metadata/{item_id}"
|
||||
item = self._plex.fetchItem(ekey=ekey)
|
||||
if not item:
|
||||
return None
|
||||
# 如果配置了外网播放地址以及Token,则默认从Plex媒体服务器获取图片,否则返回有外网地址的图片资源
|
||||
if settings.PLEX_PLAY_HOST and settings.PLEX_TOKEN:
|
||||
query = {"X-Plex-Token": settings.PLEX_TOKEN}
|
||||
if image_type == "Poster":
|
||||
if item.thumb:
|
||||
image_url = RequestUtils.combine_url(host=settings.PLEX_PLAY_HOST, path=item.thumb, query=query)
|
||||
else:
|
||||
# 默认使用art也就是Backdrop进行处理
|
||||
if item.art:
|
||||
image_url = RequestUtils.combine_url(host=settings.PLEX_PLAY_HOST, path=item.art, query=query)
|
||||
# 这里对episode进行特殊处理,实际上episode的Backdrop是Poster
|
||||
# 也有个别情况,比如机智的凡人小子episode就是Poster,因此这里把episode的优先级降低,默认还是取art
|
||||
if not image_url and item.TYPE == "episode" and item.thumb:
|
||||
image_url = RequestUtils.combine_url(host=settings.PLEX_PLAY_HOST, path=item.thumb, query=query)
|
||||
else:
|
||||
images = self._plex.fetchItems('/library/metadata/%s/arts' % item_id,
|
||||
cls=media.Art)
|
||||
for image in images:
|
||||
if hasattr(image, 'key') and image.key.startswith('http'):
|
||||
return image.key
|
||||
if image_type == "Poster":
|
||||
images = self._plex.fetchItems(ekey=f"{ekey}/posters",
|
||||
cls=media.Poster)
|
||||
else:
|
||||
# 默认使用art也就是Backdrop进行处理
|
||||
images = self._plex.fetchItems(ekey=f"{ekey}/arts",
|
||||
cls=media.Art)
|
||||
# 这里对episode进行特殊处理,实际上episode的Backdrop是Poster
|
||||
# 也有个别情况,比如机智的凡人小子episode就是Poster,因此这里把episode的优先级降低,默认还是取art
|
||||
if not images and item.TYPE == "episode":
|
||||
images = self._plex.fetchItems(ekey=f"{ekey}/posters",
|
||||
cls=media.Poster)
|
||||
for image in images:
|
||||
if hasattr(image, "key") and image.key.startswith("http"):
|
||||
image_url = image.key
|
||||
break
|
||||
# 如果最后还是找不到,则递归父级进行查找
|
||||
if not image_url and hasattr(item, "parentRatingKey"):
|
||||
return self.get_remote_image_by_id(item_id=item.parentRatingKey,
|
||||
image_type=image_type,
|
||||
depth=depth + 1)
|
||||
return image_url
|
||||
except Exception as e:
|
||||
logger.error(f"获取封面出错:" + str(e))
|
||||
return None
|
||||
@@ -631,8 +663,12 @@ class Plex:
|
||||
return []
|
||||
# 媒体库白名单
|
||||
allow_library = ",".join([lib.id for lib in self.get_librarys()])
|
||||
params = {'contentDirectoryID': allow_library}
|
||||
items = self._plex.fetchItems("/hubs/continueWatching/items", container_start=0, container_size=num, params=params)
|
||||
params = {"contentDirectoryID": allow_library}
|
||||
items = self._plex.fetchItems("/hubs/continueWatching/items",
|
||||
container_start=0,
|
||||
container_size=num,
|
||||
maxresults=num,
|
||||
params=params)
|
||||
ret_resume = []
|
||||
for item in items:
|
||||
item_type = MediaType.MOVIE.value if item.TYPE == "movie" else MediaType.TV.value
|
||||
@@ -723,3 +759,71 @@ class Plex:
|
||||
))
|
||||
offset += num
|
||||
return ret_resume[:num]
|
||||
|
||||
def get_data(self, endpoint: str, **kwargs) -> Optional[Response]:
|
||||
"""
|
||||
自定义从媒体服务器获取数据
|
||||
:param endpoint: 端点
|
||||
:param kwargs: 其他请求参数,如headers, cookies, proxies等
|
||||
"""
|
||||
return self.__request(method="get", endpoint=endpoint, **kwargs)
|
||||
|
||||
def post_data(self, endpoint: str, **kwargs) -> Optional[Response]:
|
||||
"""
|
||||
自定义从媒体服务器获取数据
|
||||
:param endpoint: 端点
|
||||
:param kwargs: 其他请求参数,如headers, cookies, proxies等
|
||||
"""
|
||||
return self.__request(method="post", endpoint=endpoint, **kwargs)
|
||||
|
||||
def put_data(self, endpoint: str, **kwargs) -> Optional[Response]:
|
||||
"""
|
||||
自定义从媒体服务器获取数据
|
||||
:param endpoint: 端点
|
||||
:param kwargs: 其他请求参数,如headers, cookies, proxies等
|
||||
"""
|
||||
return self.__request(method="put", endpoint=endpoint, **kwargs)
|
||||
|
||||
def __request(self, method: str, endpoint: str, **kwargs) -> Optional[Response]:
|
||||
"""
|
||||
自定义从媒体服务器获取数据
|
||||
:param method: HTTP方法,如 get, post, put 等
|
||||
:param endpoint: 端点
|
||||
:param kwargs: 其他请求参数,如headers, cookies, proxies等
|
||||
"""
|
||||
if not self._session:
|
||||
return
|
||||
try:
|
||||
url = RequestUtils.adapt_request_url(host=self._host, endpoint=endpoint)
|
||||
kwargs.setdefault("headers", self.__get_request_headers())
|
||||
kwargs.setdefault("raise_exception", True)
|
||||
request_method = getattr(RequestUtils(session=self._session), f"{method}_res", None)
|
||||
if request_method:
|
||||
return request_method(url=url, **kwargs)
|
||||
else:
|
||||
logger.error(f"方法 {method} 不存在")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"连接Plex出错:" + str(e))
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def __get_request_headers() -> dict:
|
||||
"""获取请求头"""
|
||||
return {
|
||||
"X-Plex-Token": settings.PLEX_TOKEN,
|
||||
"Accept": "application/json",
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def __adapt_plex_session() -> Session:
|
||||
"""
|
||||
创建并配置一个针对Plex服务的requests.Session实例
|
||||
这个会话包括特定的头部信息,用于处理所有的Plex请求
|
||||
"""
|
||||
# 设置请求头部,通常包括验证令牌和接受/内容类型头部
|
||||
headers = Plex.__get_request_headers()
|
||||
session = Session()
|
||||
session.headers = headers
|
||||
return session
|
||||
|
||||
@@ -255,7 +255,7 @@ class QbittorrentModule(_ModuleBase):
|
||||
return
|
||||
self.qbittorrent.set_torrents_tag(ids=hashs, tags=['已整理'])
|
||||
# 移动模式删除种子
|
||||
if settings.TRANSFER_TYPE == "move":
|
||||
if settings.TRANSFER_TYPE in ["move", "rclone_move"]:
|
||||
if self.remove_torrents(hashs):
|
||||
logger.info(f"移动模式删除种子成功:{hashs} ")
|
||||
# 删除残留文件
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import re
|
||||
import threading
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
from threading import Event
|
||||
from typing import Optional, List, Dict
|
||||
@@ -87,6 +88,8 @@ class Telegram:
|
||||
|
||||
try:
|
||||
if text:
|
||||
# 对text进行Markdown特殊字符转义
|
||||
text = re.sub(r"([_`])", r"\\\1", text)
|
||||
caption = f"*{title}*\n{text}"
|
||||
else:
|
||||
caption = f"*{title}*"
|
||||
@@ -199,13 +202,15 @@ class Telegram:
|
||||
"""
|
||||
|
||||
if image:
|
||||
req = RequestUtils(proxies=settings.PROXY).get_res(image)
|
||||
if req is None:
|
||||
res = RequestUtils(proxies=settings.PROXY).get_res(image)
|
||||
if res is None:
|
||||
raise Exception("获取图片失败")
|
||||
if req.content:
|
||||
image_file = Path(settings.TEMP_PATH) / Path(image).name
|
||||
image_file.write_bytes(req.content)
|
||||
if res.content:
|
||||
# 使用随机标识构建图片文件的完整路径,并写入图片内容到文件
|
||||
image_file = Path(settings.TEMP_PATH) / str(uuid.uuid4())
|
||||
image_file.write_bytes(res.content)
|
||||
photo = InputFile(image_file)
|
||||
# 发送图片到Telegram
|
||||
ret = self._bot.send_photo(chat_id=userid or self._telegram_chat_id,
|
||||
photo=photo,
|
||||
caption=caption,
|
||||
|
||||
@@ -216,14 +216,18 @@ class TheMovieDbModule(_ModuleBase):
|
||||
tmdbid=info.get("id"))
|
||||
return info
|
||||
|
||||
def tmdb_info(self, tmdbid: int, mtype: MediaType) -> Optional[dict]:
|
||||
def tmdb_info(self, tmdbid: int, mtype: MediaType, season: int = None) -> Optional[dict]:
|
||||
"""
|
||||
获取TMDB信息
|
||||
:param tmdbid: int
|
||||
:param mtype: 媒体类型
|
||||
:param season: 季号
|
||||
:return: TVDB信息
|
||||
"""
|
||||
return self.tmdb.get_info(mtype=mtype, tmdbid=tmdbid)
|
||||
if not season:
|
||||
return self.tmdb.get_info(mtype=mtype, tmdbid=tmdbid)
|
||||
else:
|
||||
return self.tmdb.get_tv_season_detail(tmdbid=tmdbid, season=season)
|
||||
|
||||
def media_category(self) -> Optional[Dict[str, list]]:
|
||||
"""
|
||||
@@ -332,6 +336,29 @@ class TheMovieDbModule(_ModuleBase):
|
||||
force_img=force_img)
|
||||
logger.info(f"{path} 刮削完成")
|
||||
|
||||
def metadata_nfo(self, meta: MetaBase, mediainfo: MediaInfo,
|
||||
season: int = None, episode: int = None) -> Optional[str]:
|
||||
"""
|
||||
获取NFO文件内容文本
|
||||
:param meta: 元数据
|
||||
:param mediainfo: 媒体信息
|
||||
:param season: 季号
|
||||
:param episode: 集号
|
||||
"""
|
||||
if settings.SCRAP_SOURCE != "themoviedb":
|
||||
return None
|
||||
return self.scraper.get_metadata_nfo(meta=meta, mediainfo=mediainfo, season=season, episode=episode)
|
||||
|
||||
def metadata_img(self, mediainfo: MediaInfo, season: int = None) -> Optional[dict]:
|
||||
"""
|
||||
获取图片名称和url
|
||||
:param mediainfo: 媒体信息
|
||||
:param season: 季号
|
||||
"""
|
||||
if settings.SCRAP_SOURCE != "themoviedb":
|
||||
return None
|
||||
return self.scraper.get_metadata_img(mediainfo=mediainfo, season=season)
|
||||
|
||||
def tmdb_discover(self, mtype: MediaType, sort_by: str, with_genres: str, with_original_language: str,
|
||||
page: int = 1) -> Optional[List[MediaInfo]]:
|
||||
"""
|
||||
@@ -387,9 +414,9 @@ class TheMovieDbModule(_ModuleBase):
|
||||
:param season: 季
|
||||
"""
|
||||
season_info = self.tmdb.get_tv_season_detail(tmdbid=tmdbid, season=season)
|
||||
if not season_info:
|
||||
if not season_info or not season_info.get("episodes"):
|
||||
return []
|
||||
return [schemas.TmdbEpisode(**episode) for episode in season_info.get("episodes", [])]
|
||||
return [schemas.TmdbEpisode(**episode) for episode in season_info.get("episodes")]
|
||||
|
||||
def scheduler_job(self) -> None:
|
||||
"""
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
from typing import Union
|
||||
from typing import Union, Optional, Tuple
|
||||
from xml.dom import minidom
|
||||
|
||||
from requests import RequestException
|
||||
@@ -26,6 +26,90 @@ class TmdbScraper:
|
||||
def __init__(self, tmdb):
|
||||
self.tmdb = tmdb
|
||||
|
||||
def get_metadata_nfo(self, meta: MetaBase, mediainfo: MediaInfo,
|
||||
season: int = None, episode: int = None) -> Optional[str]:
|
||||
"""
|
||||
获取NFO文件内容文本
|
||||
:param meta: 元数据
|
||||
:param mediainfo: 媒体信息
|
||||
:param season: 季号
|
||||
:param episode: 集号
|
||||
"""
|
||||
if mediainfo.type == MediaType.MOVIE:
|
||||
# 电影元数据文件
|
||||
doc = self.__gen_movie_nfo_file(mediainfo=mediainfo)
|
||||
else:
|
||||
if season:
|
||||
# 查询季信息
|
||||
seasoninfo = self.tmdb.get_tv_season_detail(mediainfo.tmdb_id, meta.begin_season)
|
||||
if episode:
|
||||
# 集元数据文件
|
||||
episodeinfo = self.__get_episode_detail(seasoninfo, meta.begin_episode)
|
||||
doc = self.__gen_tv_episode_nfo_file(episodeinfo=episodeinfo, tmdbid=mediainfo.tmdb_id,
|
||||
season=season, episode=episode)
|
||||
else:
|
||||
# 季元数据文件
|
||||
doc = self.__gen_tv_season_nfo_file(seasoninfo=seasoninfo, season=season)
|
||||
else:
|
||||
# 电视剧元数据文件
|
||||
doc = self.__gen_tv_nfo_file(mediainfo=mediainfo)
|
||||
if doc:
|
||||
return doc.toprettyxml(indent=" ", encoding="utf-8")
|
||||
|
||||
return None
|
||||
|
||||
def get_metadata_img(self, mediainfo: MediaInfo, season: int = None) -> dict:
|
||||
"""
|
||||
获取图片名称和url
|
||||
:param mediainfo: 媒体信息
|
||||
:param season: 季号
|
||||
"""
|
||||
images = {}
|
||||
if season:
|
||||
# 只需要季的图片
|
||||
seasoninfo = self.tmdb.get_tv_season_detail(mediainfo.tmdb_id, season)
|
||||
if seasoninfo:
|
||||
# TMDB季poster图片
|
||||
poster_name, poster_url = self.get_season_poster(seasoninfo, season)
|
||||
if poster_name and poster_url:
|
||||
images[poster_name] = poster_url
|
||||
return images
|
||||
# 主媒体图片
|
||||
for attr_name, attr_value in vars(mediainfo).items():
|
||||
if attr_value \
|
||||
and attr_name.endswith("_path") \
|
||||
and attr_value \
|
||||
and isinstance(attr_value, str) \
|
||||
and attr_value.startswith("http"):
|
||||
image_name = attr_name.replace("_path", "") + Path(attr_value).suffix
|
||||
images[image_name] = attr_value
|
||||
return images
|
||||
|
||||
@staticmethod
|
||||
def get_season_poster(seasoninfo: dict, season: int) -> Tuple[str, str]:
|
||||
"""
|
||||
获取季的海报
|
||||
"""
|
||||
# TMDB季poster图片
|
||||
sea_seq = str(season).rjust(2, '0')
|
||||
if seasoninfo.get("poster_path"):
|
||||
# 后缀
|
||||
ext = Path(seasoninfo.get('poster_path')).suffix
|
||||
# URL
|
||||
url = f"https://{settings.TMDB_IMAGE_DOMAIN}/t/p/original{seasoninfo.get('poster_path')}"
|
||||
image_name = f"season{sea_seq}-poster{ext}"
|
||||
return image_name, url
|
||||
|
||||
@staticmethod
|
||||
def __get_episode_detail(seasoninfo: dict, episode: int) -> dict:
|
||||
"""
|
||||
根据季信息获取集的信息
|
||||
"""
|
||||
for _episode_info in seasoninfo.get("episodes") or []:
|
||||
if _episode_info.get("episode_number") == episode:
|
||||
return _episode_info
|
||||
return {}
|
||||
|
||||
def gen_scraper_files(self, mediainfo: MediaInfo, file_path: Path, transfer_type: str,
|
||||
metainfo: MetaBase = None, force_nfo: bool = False, force_img: bool = False):
|
||||
"""
|
||||
@@ -45,15 +129,6 @@ class TmdbScraper:
|
||||
self._force_nfo = force_nfo
|
||||
self._force_img = force_img
|
||||
|
||||
def __get_episode_detail(_seasoninfo: dict, _episode: int):
|
||||
"""
|
||||
根据季信息获取集的信息
|
||||
"""
|
||||
for _episode_info in _seasoninfo.get("episodes") or []:
|
||||
if _episode_info.get("episode_number") == _episode:
|
||||
return _episode_info
|
||||
return {}
|
||||
|
||||
try:
|
||||
# 电影,路径为文件名 名称/名称.xxx 或者蓝光原盘目录 名称/名称
|
||||
if mediainfo.type == MediaType.MOVIE:
|
||||
@@ -64,17 +139,11 @@ class TmdbScraper:
|
||||
self.__gen_movie_nfo_file(mediainfo=mediainfo,
|
||||
file_path=file_path)
|
||||
# 生成电影图片
|
||||
for attr_name, attr_value in vars(mediainfo).items():
|
||||
if attr_value \
|
||||
and attr_name.endswith("_path") \
|
||||
and attr_value \
|
||||
and isinstance(attr_value, str) \
|
||||
and attr_value.startswith("http"):
|
||||
image_name = attr_name.replace("_path", "") + Path(attr_value).suffix
|
||||
image_path = file_path.with_name(image_name)
|
||||
if self._force_img or not image_path.exists():
|
||||
self.__save_image(url=attr_value,
|
||||
file_path=image_path)
|
||||
image_dict = self.get_metadata_img(mediainfo=mediainfo)
|
||||
for image_name, image_url in image_dict.items():
|
||||
image_path = file_path.with_name(image_name)
|
||||
if self._force_img or not image_path.exists():
|
||||
self.__save_image(url=image_url, file_path=image_path)
|
||||
# 电视剧,路径为每一季的文件名 名称/Season xx/名称 SxxExx.xxx
|
||||
else:
|
||||
# 如果有上游传入的元信息则使用,否则使用文件名识别
|
||||
@@ -87,18 +156,11 @@ class TmdbScraper:
|
||||
self.__gen_tv_nfo_file(mediainfo=mediainfo,
|
||||
dir_path=file_path.parents[1])
|
||||
# 生成根目录图片
|
||||
for attr_name, attr_value in vars(mediainfo).items():
|
||||
if attr_name \
|
||||
and attr_name.endswith("_path") \
|
||||
and not attr_name.startswith("season") \
|
||||
and attr_value \
|
||||
and isinstance(attr_value, str) \
|
||||
and attr_value.startswith("http"):
|
||||
image_name = attr_name.replace("_path", "") + Path(attr_value).suffix
|
||||
image_path = file_path.parent.with_name(image_name)
|
||||
if self._force_img or not image_path.exists():
|
||||
self.__save_image(url=attr_value,
|
||||
file_path=image_path)
|
||||
image_dict = self.get_metadata_img(mediainfo=mediainfo)
|
||||
for image_name, image_url in image_dict.items():
|
||||
image_path = file_path.parent.with_name(image_name)
|
||||
if self._force_img or not image_path.exists():
|
||||
self.__save_image(url=image_url, file_path=image_path)
|
||||
# 查询季信息
|
||||
seasoninfo = self.tmdb.get_tv_season_detail(mediainfo.tmdb_id, meta.begin_season)
|
||||
if seasoninfo:
|
||||
@@ -107,31 +169,14 @@ class TmdbScraper:
|
||||
self.__gen_tv_season_nfo_file(seasoninfo=seasoninfo,
|
||||
season=meta.begin_season,
|
||||
season_path=file_path.parent)
|
||||
# TMDB季poster图片
|
||||
sea_seq = str(meta.begin_season).rjust(2, '0')
|
||||
if seasoninfo.get("poster_path"):
|
||||
# 后缀
|
||||
ext = Path(seasoninfo.get('poster_path')).suffix
|
||||
# URL
|
||||
url = f"https://{settings.TMDB_IMAGE_DOMAIN}/t/p/original{seasoninfo.get('poster_path')}"
|
||||
image_path = file_path.parent.with_name(f"season{sea_seq}-poster{ext}")
|
||||
# TMDB季图片
|
||||
poster_name, poster_url = self.get_season_poster(seasoninfo, meta.begin_season)
|
||||
if poster_name and poster_url:
|
||||
image_path = file_path.parent.with_name(poster_name)
|
||||
if self._force_img or not image_path.exists():
|
||||
self.__save_image(url=url, file_path=image_path)
|
||||
# 季的其它图片
|
||||
for attr_name, attr_value in vars(mediainfo).items():
|
||||
if attr_value \
|
||||
and attr_name.startswith("season") \
|
||||
and not attr_name.endswith("poster_path") \
|
||||
and attr_value \
|
||||
and isinstance(attr_value, str) \
|
||||
and attr_value.startswith("http"):
|
||||
image_name = attr_name.replace("_path", "") + Path(attr_value).suffix
|
||||
image_path = file_path.parent.with_name(image_name)
|
||||
if self._force_img or not image_path.exists():
|
||||
self.__save_image(url=attr_value,
|
||||
file_path=image_path)
|
||||
self.__save_image(url=poster_url, file_path=image_path)
|
||||
# 查询集详情
|
||||
episodeinfo = __get_episode_detail(seasoninfo, meta.begin_episode)
|
||||
episodeinfo = self.__get_episode_detail(seasoninfo, meta.begin_episode)
|
||||
if episodeinfo:
|
||||
# 集NFO
|
||||
if self._force_nfo or not file_path.with_suffix(".nfo").exists():
|
||||
@@ -153,7 +198,7 @@ class TmdbScraper:
|
||||
logger.error(f"{file_path} 刮削失败:{str(e)} - {traceback.format_exc()}")
|
||||
|
||||
@staticmethod
|
||||
def __gen_common_nfo(mediainfo: MediaInfo, doc, root):
|
||||
def __gen_common_nfo(mediainfo: MediaInfo, doc: minidom.Document, root: minidom.Element):
|
||||
"""
|
||||
生成公共NFO
|
||||
"""
|
||||
@@ -207,14 +252,15 @@ class TmdbScraper:
|
||||
|
||||
def __gen_movie_nfo_file(self,
|
||||
mediainfo: MediaInfo,
|
||||
file_path: Path):
|
||||
file_path: Path = None) -> minidom.Document:
|
||||
"""
|
||||
生成电影的NFO描述文件
|
||||
:param mediainfo: 识别后的媒体信息
|
||||
:param file_path: 电影文件路径
|
||||
"""
|
||||
# 开始生成XML
|
||||
logger.info(f"正在生成电影NFO文件:{file_path.name}")
|
||||
if file_path:
|
||||
logger.info(f"正在生成电影NFO文件:{file_path.name}")
|
||||
doc = minidom.Document()
|
||||
root = DomUtils.add_node(doc, doc, "movie")
|
||||
# 公共部分
|
||||
@@ -229,18 +275,21 @@ class TmdbScraper:
|
||||
# 年份
|
||||
DomUtils.add_node(doc, root, "year", mediainfo.year or "")
|
||||
# 保存
|
||||
self.__save_nfo(doc, file_path.with_suffix(".nfo"))
|
||||
if file_path:
|
||||
self.__save_nfo(doc, file_path.with_suffix(".nfo"))
|
||||
return doc
|
||||
|
||||
def __gen_tv_nfo_file(self,
|
||||
mediainfo: MediaInfo,
|
||||
dir_path: Path):
|
||||
dir_path: Path = None) -> minidom.Document:
|
||||
"""
|
||||
生成电视剧的NFO描述文件
|
||||
:param mediainfo: 媒体信息
|
||||
:param dir_path: 电视剧根目录
|
||||
"""
|
||||
# 开始生成XML
|
||||
logger.info(f"正在生成电视剧NFO文件:{dir_path.name}")
|
||||
if dir_path:
|
||||
logger.info(f"正在生成电视剧NFO文件:{dir_path.name}")
|
||||
doc = minidom.Document()
|
||||
root = DomUtils.add_node(doc, doc, "tvshow")
|
||||
# 公共部分
|
||||
@@ -257,16 +306,21 @@ class TmdbScraper:
|
||||
DomUtils.add_node(doc, root, "season", "-1")
|
||||
DomUtils.add_node(doc, root, "episode", "-1")
|
||||
# 保存
|
||||
self.__save_nfo(doc, dir_path.joinpath("tvshow.nfo"))
|
||||
if dir_path:
|
||||
self.__save_nfo(doc, dir_path.joinpath("tvshow.nfo"))
|
||||
|
||||
def __gen_tv_season_nfo_file(self, seasoninfo: dict, season: int, season_path: Path):
|
||||
return doc
|
||||
|
||||
def __gen_tv_season_nfo_file(self, seasoninfo: dict,
|
||||
season: int, season_path: Path = None) -> minidom.Document:
|
||||
"""
|
||||
生成电视剧季的NFO描述文件
|
||||
:param seasoninfo: TMDB季媒体信息
|
||||
:param season: 季号
|
||||
:param season_path: 电视剧季的目录
|
||||
"""
|
||||
logger.info(f"正在生成季NFO文件:{season_path.name}")
|
||||
if season_path:
|
||||
logger.info(f"正在生成季NFO文件:{season_path.name}")
|
||||
doc = minidom.Document()
|
||||
root = DomUtils.add_node(doc, doc, "season")
|
||||
# 简介
|
||||
@@ -285,14 +339,16 @@ class TmdbScraper:
|
||||
# seasonnumber
|
||||
DomUtils.add_node(doc, root, "seasonnumber", str(season))
|
||||
# 保存
|
||||
self.__save_nfo(doc, season_path.joinpath("season.nfo"))
|
||||
if season_path:
|
||||
self.__save_nfo(doc, season_path.joinpath("season.nfo"))
|
||||
return doc
|
||||
|
||||
def __gen_tv_episode_nfo_file(self,
|
||||
tmdbid: int,
|
||||
episodeinfo: dict,
|
||||
season: int,
|
||||
episode: int,
|
||||
file_path: Path):
|
||||
file_path: Path = None) -> minidom.Document:
|
||||
"""
|
||||
生成电视剧集的NFO描述文件
|
||||
:param tmdbid: TMDBID
|
||||
@@ -302,7 +358,8 @@ class TmdbScraper:
|
||||
:param file_path: 集文件的路径
|
||||
"""
|
||||
# 开始生成集的信息
|
||||
logger.info(f"正在生成剧集NFO文件:{file_path.name}")
|
||||
if file_path:
|
||||
logger.info(f"正在生成剧集NFO文件:{file_path.name}")
|
||||
doc = minidom.Document()
|
||||
root = DomUtils.add_node(doc, doc, "episodedetails")
|
||||
# TMDBID
|
||||
@@ -348,7 +405,9 @@ class TmdbScraper:
|
||||
DomUtils.add_node(doc, xactor, "profile",
|
||||
f"https://www.themoviedb.org/person/{actor.get('id')}")
|
||||
# 保存文件
|
||||
self.__save_nfo(doc, file_path.with_suffix(".nfo"))
|
||||
if file_path:
|
||||
self.__save_nfo(doc, file_path.with_suffix(".nfo"))
|
||||
return doc
|
||||
|
||||
@retry(RequestException, logger=logger)
|
||||
def __save_image(self, url: str, file_path: Path):
|
||||
@@ -371,7 +430,7 @@ class TmdbScraper:
|
||||
except Exception as err:
|
||||
logger.error(f"{file_path.stem}图片下载失败:{str(err)}")
|
||||
|
||||
def __save_nfo(self, doc, file_path: Path):
|
||||
def __save_nfo(self, doc: minidom.Document, file_path: Path):
|
||||
"""
|
||||
保存NFO
|
||||
"""
|
||||
|
||||
@@ -231,7 +231,7 @@ class TmdbApi:
|
||||
logger.error(f"连接TMDB出错:{str(e)} - {traceback.format_exc()}")
|
||||
return None
|
||||
logger.debug(f"API返回:{str(self.search.total_results)}")
|
||||
if len(movies) == 0:
|
||||
if (movies is None) or (len(movies) == 0):
|
||||
logger.debug(f"{name} 未找到相关电影信息!")
|
||||
return {}
|
||||
else:
|
||||
@@ -278,7 +278,7 @@ class TmdbApi:
|
||||
logger.error(f"连接TMDB出错:{str(e)} - {traceback.format_exc()}")
|
||||
return None
|
||||
logger.debug(f"API返回:{str(self.search.total_results)}")
|
||||
if len(tvs) == 0:
|
||||
if (tvs is None) or (len(tvs) == 0):
|
||||
logger.debug(f"{name} 未找到相关剧集信息!")
|
||||
return {}
|
||||
else:
|
||||
@@ -340,7 +340,7 @@ class TmdbApi:
|
||||
print(traceback.format_exc())
|
||||
return None
|
||||
|
||||
if len(tvs) == 0:
|
||||
if (tvs is None) or (len(tvs) == 0):
|
||||
logger.debug("%s 未找到季%s相关信息!" % (name, season_number))
|
||||
return {}
|
||||
else:
|
||||
@@ -422,7 +422,7 @@ class TmdbApi:
|
||||
logger.debug(f"API返回:{str(self.search.total_results)}")
|
||||
# 返回结果
|
||||
ret_info = {}
|
||||
if len(multis) == 0:
|
||||
if (multis is None) or (len(multis) == 0):
|
||||
logger.debug(f"{name} 未找到相关媒体息!")
|
||||
return {}
|
||||
else:
|
||||
@@ -1072,7 +1072,8 @@ class TmdbApi:
|
||||
return []
|
||||
try:
|
||||
logger.debug(f"正在发现电视剧:{kwargs}...")
|
||||
tmdbinfo = self.discover.discover_tv_shows(kwargs)
|
||||
params_tuple = tuple(kwargs.items())
|
||||
tmdbinfo = self.discover.discover_tv_shows(params_tuple)
|
||||
if tmdbinfo:
|
||||
for info in tmdbinfo:
|
||||
info['media_type'] = MediaType.TV
|
||||
|
||||
@@ -17,18 +17,18 @@ class Discover(TMDb):
|
||||
def discover_movies(self, params_tuple):
|
||||
"""
|
||||
Discover movies by different types of data like average rating, number of votes, genres and certifications.
|
||||
:param params: dict
|
||||
:param params_tuple: dict
|
||||
:return:
|
||||
"""
|
||||
params = dict(params_tuple)
|
||||
return self._request_obj(self._urls["movies"], urlencode(params), key="results", call_cached=False)
|
||||
|
||||
@cached(cache=TTLCache(maxsize=1, ttl=43200))
|
||||
def discover_tv_shows(self, params):
|
||||
def discover_tv_shows(self, params_tuple):
|
||||
"""
|
||||
Discover TV shows by different types of data like average rating, number of votes, genres,
|
||||
the network they aired on and air dates.
|
||||
:param params: dict
|
||||
:param params_tuple: dict
|
||||
:return:
|
||||
"""
|
||||
return self._request_obj(self._urls["tv"], urlencode(params), key="results", call_cached=False)
|
||||
return self._request_obj(self._urls["tv"], urlencode(params_tuple), key="results", call_cached=False)
|
||||
|
||||
@@ -251,7 +251,7 @@ class TransmissionModule(_ModuleBase):
|
||||
tags = ['已整理']
|
||||
self.transmission.set_torrent_tag(ids=hashs, tags=tags)
|
||||
# 移动模式删除种子
|
||||
if settings.TRANSFER_TYPE == "move":
|
||||
if settings.TRANSFER_TYPE in ["move", "rclone_move"]:
|
||||
if self.remove_torrents(hashs):
|
||||
logger.info(f"移动模式删除种子成功:{hashs} ")
|
||||
# 删除残留文件
|
||||
|
||||
@@ -15,7 +15,7 @@ class WebPushModule(_ModuleBase):
|
||||
|
||||
@staticmethod
|
||||
def get_name() -> str:
|
||||
return "VoceChat"
|
||||
return "WebPush"
|
||||
|
||||
def stop(self):
|
||||
pass
|
||||
|
||||
@@ -100,28 +100,57 @@ class WeChat:
|
||||
"""
|
||||
message_url = self._send_msg_url % self.__get_access_token()
|
||||
if text:
|
||||
conent = "%s\n%s" % (title, text.replace("\n\n", "\n"))
|
||||
content = "%s\n%s" % (title, text.replace("\n\n", "\n"))
|
||||
else:
|
||||
conent = title
|
||||
content = title
|
||||
|
||||
if link:
|
||||
conent = f"{conent}\n点击查看:{link}"
|
||||
content = f"{content}\n点击查看:{link}"
|
||||
|
||||
if not userid:
|
||||
userid = "@all"
|
||||
|
||||
req_json = {
|
||||
"touser": userid,
|
||||
"msgtype": "text",
|
||||
"agentid": self._appid,
|
||||
"text": {
|
||||
"content": conent
|
||||
},
|
||||
"safe": 0,
|
||||
"enable_id_trans": 0,
|
||||
"enable_duplicate_check": 0
|
||||
}
|
||||
return self.__post_request(message_url, req_json)
|
||||
# Check if content exceeds 2048 bytes and split if necessary
|
||||
if len(content.encode('utf-8')) > 2048:
|
||||
content_chunks = []
|
||||
current_chunk = ""
|
||||
for line in content.splitlines():
|
||||
if len(current_chunk.encode('utf-8')) + len(line.encode('utf-8')) > 2048:
|
||||
content_chunks.append(current_chunk.strip())
|
||||
current_chunk = ""
|
||||
current_chunk += line + "\n"
|
||||
if current_chunk:
|
||||
content_chunks.append(current_chunk.strip())
|
||||
|
||||
# Send each chunk as a separate message
|
||||
for chunk in content_chunks:
|
||||
req_json = {
|
||||
"touser": userid,
|
||||
"msgtype": "text",
|
||||
"agentid": self._appid,
|
||||
"text": {
|
||||
"content": chunk
|
||||
},
|
||||
"safe": 0,
|
||||
"enable_id_trans": 0,
|
||||
"enable_duplicate_check": 0
|
||||
}
|
||||
result = self.__post_request(message_url, req_json)
|
||||
else:
|
||||
req_json = {
|
||||
"touser": userid,
|
||||
"msgtype": "text",
|
||||
"agentid": self._appid,
|
||||
"text": {
|
||||
"content": content
|
||||
},
|
||||
"safe": 0,
|
||||
"enable_id_trans": 0,
|
||||
"enable_duplicate_check": 0
|
||||
}
|
||||
return self.__post_request(message_url, req_json)
|
||||
|
||||
return result
|
||||
|
||||
def __send_image_message(self, title: str, text: str, image_url: str,
|
||||
userid: str = None, link: str = None) -> Optional[bool]:
|
||||
|
||||
@@ -94,6 +94,10 @@ class Scheduler(metaclass=Singleton):
|
||||
link=settings.MP_DOMAIN('#/site')
|
||||
)
|
||||
)
|
||||
PluginManager().init_config()
|
||||
for plugin_id in PluginManager().get_running_plugin_ids():
|
||||
self.update_plugin_job(plugin_id)
|
||||
|
||||
else:
|
||||
self._auth_count += 1
|
||||
logger.error(f"用户认证失败:{msg},共失败 {self._auth_count} 次")
|
||||
@@ -160,7 +164,7 @@ class Scheduler(metaclass=Singleton):
|
||||
},
|
||||
"random_wallpager": {
|
||||
"name": "壁纸缓存",
|
||||
"func": TmdbChain().get_random_wallpager,
|
||||
"func": TmdbChain().get_trending_wallpapers,
|
||||
"running": False,
|
||||
}
|
||||
}
|
||||
@@ -420,17 +424,17 @@ class Scheduler(metaclass=Singleton):
|
||||
"plugin_name": plugin_name,
|
||||
"running": False,
|
||||
}
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
service["trigger"],
|
||||
id=sid,
|
||||
name=service["name"],
|
||||
**service["kwargs"],
|
||||
kwargs={
|
||||
'job_id': job_id
|
||||
}
|
||||
)
|
||||
logger.info(f"注册插件{plugin_name}服务:{service['name']} - {service['trigger']}")
|
||||
self._scheduler.add_job(
|
||||
self.start,
|
||||
service["trigger"],
|
||||
id=sid,
|
||||
name=service["name"],
|
||||
**service["kwargs"],
|
||||
kwargs={
|
||||
'job_id': job_id
|
||||
}
|
||||
)
|
||||
logger.info(f"注册插件{plugin_name}服务:{service['name']} - {service['trigger']}")
|
||||
except Exception as e:
|
||||
logger.error(f"注册插件{plugin_name}服务失败:{str(e)} - {service}")
|
||||
SchedulerChain().messagehelper.put(title=f"插件 {plugin_name} 服务注册失败",
|
||||
|
||||
@@ -15,3 +15,4 @@ from .tmdb import *
|
||||
from .transfer import *
|
||||
from .file import *
|
||||
from .filetransfer import *
|
||||
from .exception import *
|
||||
|
||||
14
app/schemas/exception.py
Normal file
14
app/schemas/exception.py
Normal file
@@ -0,0 +1,14 @@
|
||||
class ImmediateException(Exception):
|
||||
"""
|
||||
用于立即抛出异常而不重试的特殊异常类。
|
||||
当不希望使用重试机制时,可以抛出此异常。
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class APIRateLimitException(ImmediateException):
|
||||
"""
|
||||
用于表示API速率限制的异常类。
|
||||
当API调用触发速率限制时,可以抛出此异常以立即终止操作并报告错误。
|
||||
"""
|
||||
pass
|
||||
@@ -20,3 +20,13 @@ class FileItem(BaseModel):
|
||||
modify_time: Optional[float] = None
|
||||
# 子节点
|
||||
children: Optional[list] = []
|
||||
# ID
|
||||
fileid: Optional[str] = None
|
||||
# 父ID
|
||||
parent_fileid: Optional[str] = None
|
||||
# 缩略图
|
||||
thumbnail: Optional[str] = None
|
||||
# 115 pickcode
|
||||
pickcode: Optional[str] = None
|
||||
# drive_id
|
||||
drive_id: Optional[str] = None
|
||||
|
||||
@@ -46,6 +46,8 @@ class Plugin(BaseModel):
|
||||
history: Optional[dict] = {}
|
||||
# 添加时间,值越小表示越靠后发布
|
||||
add_time: Optional[int] = 0
|
||||
# 插件公钥
|
||||
plugin_public_key: Optional[str] = None
|
||||
|
||||
|
||||
class PluginDashboard(Plugin):
|
||||
|
||||
@@ -9,6 +9,7 @@ class Token(BaseModel):
|
||||
super_user: bool
|
||||
user_name: str
|
||||
avatar: Optional[str] = None
|
||||
level: int = 1
|
||||
|
||||
|
||||
class TokenPayload(BaseModel):
|
||||
|
||||
@@ -94,6 +94,10 @@ class SystemConfigKey(Enum):
|
||||
DownloadDirectories = "DownloadDirectories"
|
||||
# 媒体库目录定义
|
||||
LibraryDirectories = "LibraryDirectories"
|
||||
# 阿里云盘认证参数
|
||||
UserAliyunParams = "UserAliyunParams"
|
||||
# 115网盘认证参数
|
||||
User115Params = "User115Params"
|
||||
|
||||
|
||||
# 处理进度Key字典
|
||||
@@ -102,6 +106,8 @@ class ProgressKey(Enum):
|
||||
Search = "search"
|
||||
# 转移
|
||||
FileTransfer = "filetransfer"
|
||||
# 批量重命名
|
||||
BatchRename = "batchrename"
|
||||
|
||||
|
||||
# 媒体图片类型
|
||||
|
||||
@@ -6,6 +6,8 @@ from typing import Any
|
||||
from Crypto import Random
|
||||
from Crypto.Cipher import AES
|
||||
|
||||
from app.schemas.exception import ImmediateException
|
||||
|
||||
|
||||
def retry(ExceptionToCheck: Any,
|
||||
tries: int = 3, delay: int = 3, backoff: int = 2, logger: Any = None):
|
||||
@@ -23,6 +25,8 @@ def retry(ExceptionToCheck: Any,
|
||||
while mtries > 1:
|
||||
try:
|
||||
return f(*args, **kwargs)
|
||||
except ImmediateException:
|
||||
raise
|
||||
except ExceptionToCheck as e:
|
||||
msg = f"{str(e)}, {mdelay} 秒后重试 ..."
|
||||
if logger:
|
||||
|
||||
91
app/utils/crypto.py
Normal file
91
app/utils/crypto.py
Normal file
@@ -0,0 +1,91 @@
|
||||
import base64
|
||||
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
from cryptography.hazmat.primitives import serialization, hashes
|
||||
from cryptography.hazmat.primitives.asymmetric import rsa, padding
|
||||
|
||||
|
||||
class RSAUtils:
|
||||
|
||||
@staticmethod
|
||||
def generate_rsa_key_pair() -> (str, str):
|
||||
"""
|
||||
生成RSA密钥对并返回Base64编码的公钥和私钥(DER格式)
|
||||
|
||||
:return: Tuple containing Base64 encoded public key and private key
|
||||
"""
|
||||
# 生成RSA密钥对
|
||||
private_key = rsa.generate_private_key(
|
||||
public_exponent=65537,
|
||||
key_size=2048,
|
||||
)
|
||||
|
||||
public_key = private_key.public_key()
|
||||
|
||||
# 导出私钥为DER格式
|
||||
private_key_der = private_key.private_bytes(
|
||||
encoding=serialization.Encoding.DER,
|
||||
format=serialization.PrivateFormat.PKCS8,
|
||||
encryption_algorithm=serialization.NoEncryption()
|
||||
)
|
||||
|
||||
# 导出公钥为DER格式
|
||||
public_key_der = public_key.public_bytes(
|
||||
encoding=serialization.Encoding.DER,
|
||||
format=serialization.PublicFormat.SubjectPublicKeyInfo
|
||||
)
|
||||
|
||||
# 将DER格式的密钥编码为Base64
|
||||
private_key_b64 = base64.b64encode(private_key_der).decode('utf-8')
|
||||
public_key_b64 = base64.b64encode(public_key_der).decode('utf-8')
|
||||
|
||||
return private_key_b64, public_key_b64
|
||||
|
||||
@staticmethod
|
||||
def verify_rsa_keys(private_key: str, public_key: str) -> bool:
|
||||
"""
|
||||
使用 RSA 验证公钥和私钥是否匹配
|
||||
|
||||
:param private_key: 私钥字符串 (Base64 编码,无标识符)
|
||||
:param public_key: 公钥字符串 (Base64 编码,无标识符)
|
||||
:return: 如果匹配则返回 True,否则返回 False
|
||||
"""
|
||||
if not private_key or not public_key:
|
||||
return False
|
||||
|
||||
try:
|
||||
# 解码 Base64 编码的公钥和私钥
|
||||
public_key_bytes = base64.b64decode(public_key)
|
||||
private_key_bytes = base64.b64decode(private_key)
|
||||
|
||||
# 加载公钥
|
||||
public_key = serialization.load_der_public_key(public_key_bytes, backend=default_backend())
|
||||
|
||||
# 加载私钥
|
||||
private_key = serialization.load_der_private_key(private_key_bytes, password=None,
|
||||
backend=default_backend())
|
||||
|
||||
# 测试加解密
|
||||
message = b'test'
|
||||
encrypted_message = public_key.encrypt(
|
||||
message,
|
||||
padding.OAEP(
|
||||
mgf=padding.MGF1(algorithm=hashes.SHA256()),
|
||||
algorithm=hashes.SHA256(),
|
||||
label=None
|
||||
)
|
||||
)
|
||||
|
||||
decrypted_message = private_key.decrypt(
|
||||
encrypted_message,
|
||||
padding.OAEP(
|
||||
mgf=padding.MGF1(algorithm=hashes.SHA256()),
|
||||
algorithm=hashes.SHA256(),
|
||||
label=None
|
||||
)
|
||||
)
|
||||
|
||||
return message == decrypted_message
|
||||
except Exception as e:
|
||||
print(f"RSA 密钥验证失败: {e}")
|
||||
return False
|
||||
@@ -1,10 +1,13 @@
|
||||
from typing import Union, Any, Optional
|
||||
from urllib.parse import urljoin, urlparse, parse_qs, urlencode, urlunparse
|
||||
|
||||
import requests
|
||||
import urllib3
|
||||
from requests import Session, Response
|
||||
from urllib3.exceptions import InsecureRequestWarning
|
||||
|
||||
from app.log import logger
|
||||
|
||||
urllib3.disable_warnings(InsecureRequestWarning)
|
||||
|
||||
|
||||
@@ -48,128 +51,160 @@ class RequestUtils:
|
||||
if timeout:
|
||||
self._timeout = timeout
|
||||
|
||||
def post(self, url: str, data: Any = None, json: dict = None) -> Optional[Response]:
|
||||
def request(self, method: str, url: str, raise_exception: bool = False, **kwargs) -> Optional[Response]:
|
||||
"""
|
||||
发起HTTP请求
|
||||
:param method: HTTP方法,如 get, post, put 等
|
||||
:param url: 请求的URL
|
||||
:param raise_exception: 是否在发生异常时抛出异常,否则默认拦截异常返回None
|
||||
:param kwargs: 其他请求参数,如headers, cookies, proxies等
|
||||
:return: HTTP响应对象
|
||||
:raises: requests.exceptions.RequestException 仅raise_exception为True时会抛出
|
||||
"""
|
||||
if self._session is None:
|
||||
req_method = requests.request
|
||||
else:
|
||||
req_method = self._session.request
|
||||
kwargs.setdefault("headers", self._headers)
|
||||
kwargs.setdefault("cookies", self._cookies)
|
||||
kwargs.setdefault("proxies", self._proxies)
|
||||
kwargs.setdefault("timeout", self._timeout)
|
||||
kwargs.setdefault("verify", False)
|
||||
kwargs.setdefault("stream", False)
|
||||
try:
|
||||
return req_method(method, url, **kwargs)
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.debug(f"请求失败: {e}")
|
||||
if raise_exception:
|
||||
raise
|
||||
return None
|
||||
|
||||
def get(self, url: str, params: dict = None, **kwargs) -> Optional[str]:
|
||||
"""
|
||||
发送GET请求
|
||||
:param url: 请求的URL
|
||||
:param params: 请求的参数
|
||||
:param kwargs: 其他请求参数,如headers, cookies, proxies等
|
||||
:return: 响应的内容,若发生RequestException则返回None
|
||||
"""
|
||||
response = self.request(method="get", url=url, params=params, **kwargs)
|
||||
return str(response.content, "utf-8") if response else None
|
||||
|
||||
def post(self, url: str, data: Any = None, json: dict = None, **kwargs) -> Optional[Response]:
|
||||
"""
|
||||
发送POST请求
|
||||
:param url: 请求的URL
|
||||
:param data: 请求的数据
|
||||
:param json: 请求的JSON数据
|
||||
:param kwargs: 其他请求参数,如headers, cookies, proxies等
|
||||
:return: HTTP响应对象,若发生RequestException则返回None
|
||||
"""
|
||||
if json is None:
|
||||
json = {}
|
||||
try:
|
||||
if self._session:
|
||||
return self._session.post(url,
|
||||
data=data,
|
||||
verify=False,
|
||||
headers=self._headers,
|
||||
proxies=self._proxies,
|
||||
cookies=self._cookies,
|
||||
timeout=self._timeout,
|
||||
json=json,
|
||||
stream=False)
|
||||
else:
|
||||
return requests.post(url,
|
||||
data=data,
|
||||
verify=False,
|
||||
headers=self._headers,
|
||||
proxies=self._proxies,
|
||||
cookies=self._cookies,
|
||||
timeout=self._timeout,
|
||||
json=json,
|
||||
stream=False)
|
||||
except requests.exceptions.RequestException:
|
||||
return None
|
||||
return self.request(method="post", url=url, data=data, json=json, **kwargs)
|
||||
|
||||
def get(self, url: str, params: dict = None) -> Optional[str]:
|
||||
try:
|
||||
if self._session:
|
||||
r = self._session.get(url,
|
||||
verify=False,
|
||||
headers=self._headers,
|
||||
proxies=self._proxies,
|
||||
cookies=self._cookies,
|
||||
timeout=self._timeout,
|
||||
params=params)
|
||||
else:
|
||||
r = requests.get(url,
|
||||
verify=False,
|
||||
headers=self._headers,
|
||||
proxies=self._proxies,
|
||||
cookies=self._cookies,
|
||||
timeout=self._timeout,
|
||||
params=params)
|
||||
return str(r.content, 'utf-8')
|
||||
except requests.exceptions.RequestException:
|
||||
return None
|
||||
def put(self, url: str, data: Any = None, **kwargs) -> Optional[Response]:
|
||||
"""
|
||||
发送PUT请求
|
||||
:param url: 请求的URL
|
||||
:param data: 请求的数据
|
||||
:param kwargs: 其他请求参数,如headers, cookies, proxies等
|
||||
:return: HTTP响应对象,若发生RequestException则返回None
|
||||
"""
|
||||
return self.request(method="put", url=url, data=data, **kwargs)
|
||||
|
||||
def get_res(self, url: str,
|
||||
def get_res(self,
|
||||
url: str,
|
||||
params: dict = None,
|
||||
data: Any = None,
|
||||
json: dict = None,
|
||||
allow_redirects: bool = True,
|
||||
raise_exception: bool = False
|
||||
) -> Optional[Response]:
|
||||
try:
|
||||
if self._session:
|
||||
return self._session.get(url,
|
||||
params=params,
|
||||
data=data,
|
||||
json=json,
|
||||
verify=False,
|
||||
headers=self._headers,
|
||||
proxies=self._proxies,
|
||||
cookies=self._cookies,
|
||||
timeout=self._timeout,
|
||||
allow_redirects=allow_redirects,
|
||||
stream=False)
|
||||
else:
|
||||
return requests.get(url,
|
||||
params=params,
|
||||
data=data,
|
||||
json=json,
|
||||
verify=False,
|
||||
headers=self._headers,
|
||||
proxies=self._proxies,
|
||||
cookies=self._cookies,
|
||||
timeout=self._timeout,
|
||||
allow_redirects=allow_redirects,
|
||||
stream=False)
|
||||
except requests.exceptions.RequestException:
|
||||
if raise_exception:
|
||||
raise requests.exceptions.RequestException
|
||||
return None
|
||||
raise_exception: bool = False,
|
||||
**kwargs) -> Optional[Response]:
|
||||
"""
|
||||
发送GET请求并返回响应对象
|
||||
:param url: 请求的URL
|
||||
:param params: 请求的参数
|
||||
:param data: 请求的数据
|
||||
:param json: 请求的JSON数据
|
||||
:param allow_redirects: 是否允许重定向
|
||||
:param raise_exception: 是否在发生异常时抛出异常,否则默认拦截异常返回None
|
||||
:param kwargs: 其他请求参数,如headers, cookies, proxies等
|
||||
:return: HTTP响应对象,若发生RequestException则返回None
|
||||
:raises: requests.exceptions.RequestException 仅raise_exception为True时会抛出
|
||||
"""
|
||||
return self.request(method="get",
|
||||
url=url,
|
||||
params=params,
|
||||
data=data,
|
||||
json=json,
|
||||
allow_redirects=allow_redirects,
|
||||
raise_exception=raise_exception,
|
||||
**kwargs)
|
||||
|
||||
def post_res(self, url: str, data: Any = None, params: dict = None,
|
||||
def post_res(self,
|
||||
url: str,
|
||||
data: Any = None,
|
||||
params: dict = None,
|
||||
allow_redirects: bool = True,
|
||||
files: Any = None,
|
||||
json: dict = None,
|
||||
raise_exception: bool = False) -> Optional[Response]:
|
||||
try:
|
||||
if self._session:
|
||||
return self._session.post(url,
|
||||
data=data,
|
||||
params=params,
|
||||
verify=False,
|
||||
headers=self._headers,
|
||||
proxies=self._proxies,
|
||||
cookies=self._cookies,
|
||||
timeout=self._timeout,
|
||||
allow_redirects=allow_redirects,
|
||||
files=files,
|
||||
json=json,
|
||||
stream=False)
|
||||
else:
|
||||
return requests.post(url,
|
||||
data=data,
|
||||
params=params,
|
||||
verify=False,
|
||||
headers=self._headers,
|
||||
proxies=self._proxies,
|
||||
cookies=self._cookies,
|
||||
timeout=self._timeout,
|
||||
allow_redirects=allow_redirects,
|
||||
files=files,
|
||||
json=json,
|
||||
stream=False)
|
||||
except requests.exceptions.RequestException:
|
||||
if raise_exception:
|
||||
raise requests.exceptions.RequestException
|
||||
return None
|
||||
raise_exception: bool = False,
|
||||
**kwargs) -> Optional[Response]:
|
||||
"""
|
||||
发送POST请求并返回响应对象
|
||||
:param url: 请求的URL
|
||||
:param data: 请求的数据
|
||||
:param params: 请求的参数
|
||||
:param allow_redirects: 是否允许重定向
|
||||
:param files: 请求的文件
|
||||
:param json: 请求的JSON数据
|
||||
:param kwargs: 其他请求参数,如headers, cookies, proxies等
|
||||
:param raise_exception: 是否在发生异常时抛出异常,否则默认拦截异常返回None
|
||||
:return: HTTP响应对象,若发生RequestException则返回None
|
||||
:raises: requests.exceptions.RequestException 仅raise_exception为True时会抛出
|
||||
"""
|
||||
return self.request(method="post",
|
||||
url=url,
|
||||
data=data,
|
||||
params=params,
|
||||
allow_redirects=allow_redirects,
|
||||
files=files,
|
||||
json=json,
|
||||
raise_exception=raise_exception,
|
||||
**kwargs)
|
||||
|
||||
def put_res(self,
|
||||
url: str,
|
||||
data: Any = None,
|
||||
params: dict = None,
|
||||
allow_redirects: bool = True,
|
||||
files: Any = None,
|
||||
json: dict = None,
|
||||
raise_exception: bool = False,
|
||||
**kwargs) -> Optional[Response]:
|
||||
"""
|
||||
发送PUT请求并返回响应对象
|
||||
:param url: 请求的URL
|
||||
:param data: 请求的数据
|
||||
:param params: 请求的参数
|
||||
:param allow_redirects: 是否允许重定向
|
||||
:param files: 请求的文件
|
||||
:param json: 请求的JSON数据
|
||||
:param raise_exception: 是否在发生异常时抛出异常,否则默认拦截异常返回None
|
||||
:param kwargs: 其他请求参数,如headers, cookies, proxies等
|
||||
:return: HTTP响应对象,若发生RequestException则返回None
|
||||
:raises: requests.exceptions.RequestException 仅raise_exception为True时会抛出
|
||||
"""
|
||||
return self.request(method="put",
|
||||
url=url,
|
||||
data=data,
|
||||
params=params,
|
||||
allow_redirects=allow_redirects,
|
||||
files=files,
|
||||
json=json,
|
||||
raise_exception=raise_exception,
|
||||
**kwargs)
|
||||
|
||||
@staticmethod
|
||||
def cookie_parse(cookies_str: str, array: bool = False) -> Union[list, dict]:
|
||||
@@ -182,15 +217,75 @@ class RequestUtils:
|
||||
if not cookies_str:
|
||||
return {}
|
||||
cookie_dict = {}
|
||||
cookies = cookies_str.split(';')
|
||||
cookies = cookies_str.split(";")
|
||||
for cookie in cookies:
|
||||
cstr = cookie.split('=')
|
||||
cstr = cookie.split("=")
|
||||
if len(cstr) > 1:
|
||||
cookie_dict[cstr[0].strip()] = cstr[1].strip()
|
||||
if array:
|
||||
cookiesList = []
|
||||
for cookieName, cookieValue in cookie_dict.items():
|
||||
cookies = {'name': cookieName, 'value': cookieValue}
|
||||
cookiesList.append(cookies)
|
||||
return cookiesList
|
||||
return [{"name": k, "value": v} for k, v in cookie_dict.items()]
|
||||
return cookie_dict
|
||||
|
||||
@staticmethod
|
||||
def standardize_base_url(host: str) -> str:
|
||||
"""
|
||||
标准化提供的主机地址,确保它以http://或https://开头,并且以斜杠(/)结尾
|
||||
:param host: 提供的主机地址字符串
|
||||
:return: 标准化后的主机地址字符串
|
||||
"""
|
||||
if not host:
|
||||
return host
|
||||
if not host.endswith("/"):
|
||||
host += "/"
|
||||
if not host.startswith("http://") and not host.startswith("https://"):
|
||||
host = "http://" + host
|
||||
return host
|
||||
|
||||
@staticmethod
|
||||
def adapt_request_url(host: str, endpoint: str) -> Optional[str]:
|
||||
"""
|
||||
基于传入的host,适配请求的URL,确保每个请求的URL是完整的,用于在发送请求前自动处理和修正请求的URL。
|
||||
:param host: 主机头
|
||||
:param endpoint: 端点
|
||||
:return: 完整的请求URL字符串
|
||||
"""
|
||||
if not host and not endpoint:
|
||||
return None
|
||||
if endpoint.startswith(("http://", "https://")):
|
||||
return endpoint
|
||||
host = RequestUtils.standardize_base_url(host)
|
||||
return urljoin(host, endpoint) if host else endpoint
|
||||
|
||||
@staticmethod
|
||||
def combine_url(host: str, path: Optional[str] = None, query: Optional[dict] = None) -> Optional[str]:
|
||||
"""
|
||||
使用给定的主机头、路径和查询参数组合生成完整的URL。
|
||||
:param host: str, 主机头,例如 https://example.com
|
||||
:param path: Optional[str], 包含路径和可能已经包含的查询参数的端点,例如 /path/to/resource?current=1
|
||||
:param query: Optional[dict], 可选,额外的查询参数,例如 {"key": "value"}
|
||||
:return: str, 完整的请求URL字符串
|
||||
"""
|
||||
try:
|
||||
# 如果路径为空,则默认为 '/'
|
||||
if path is None:
|
||||
path = '/'
|
||||
host = RequestUtils.standardize_base_url(host)
|
||||
# 使用 urljoin 合并 host 和 path
|
||||
url = urljoin(host, path)
|
||||
# 解析当前 URL 的组成部分
|
||||
url_parts = urlparse(url)
|
||||
# 解析已存在的查询参数,并与额外的查询参数合并
|
||||
query_params = parse_qs(url_parts.query)
|
||||
if query:
|
||||
for key, value in query.items():
|
||||
query_params[key] = value
|
||||
|
||||
# 重新构建查询字符串
|
||||
query_string = urlencode(query_params, doseq=True)
|
||||
# 构建完整的 URL
|
||||
new_url_parts = url_parts._replace(query=query_string)
|
||||
complete_url = urlunparse(new_url_parts)
|
||||
return str(complete_url)
|
||||
except Exception as e:
|
||||
logger.debug(f"Error combining URL: {e}")
|
||||
return None
|
||||
|
||||
@@ -142,7 +142,7 @@ class StringUtils:
|
||||
"""
|
||||
判断是否为英文单词,有空格时返回False
|
||||
"""
|
||||
return word.isalpha()
|
||||
return word.encode().isalpha()
|
||||
|
||||
@staticmethod
|
||||
def str_int(text: str) -> int:
|
||||
@@ -186,7 +186,7 @@ class StringUtils:
|
||||
忽略特殊字符
|
||||
"""
|
||||
# 需要忽略的特殊字符
|
||||
CONVERT_EMPTY_CHARS = r"[、.。,,·::;;!!'’\"“”()()\[\]【】「」\-——\+\|\\_/&#~~]"
|
||||
CONVERT_EMPTY_CHARS = r"[、.。,,·::;;!!'’\"“”()()\[\]【】「」\-—―\+\|\\_/&#~~]"
|
||||
if not text:
|
||||
return text
|
||||
if not isinstance(text, list):
|
||||
@@ -383,6 +383,21 @@ class StringUtils:
|
||||
print(str(e))
|
||||
return timestamp
|
||||
|
||||
@staticmethod
|
||||
def str_to_timestamp(date_str: str) -> float:
|
||||
"""
|
||||
日期转时间戳
|
||||
:param date_str:
|
||||
:return:
|
||||
"""
|
||||
if not date_str:
|
||||
return 0
|
||||
try:
|
||||
return dateparser.parse(date_str).timestamp()
|
||||
except Exception as e:
|
||||
print(str(e))
|
||||
return 0
|
||||
|
||||
@staticmethod
|
||||
def to_bool(text: str, default_val: bool = False) -> bool:
|
||||
"""
|
||||
|
||||
@@ -10,6 +10,7 @@ from typing import List, Union, Tuple
|
||||
|
||||
import docker
|
||||
import psutil
|
||||
|
||||
from app import schemas
|
||||
|
||||
|
||||
@@ -292,6 +293,25 @@ class SystemUtils:
|
||||
|
||||
return dirs
|
||||
|
||||
@staticmethod
|
||||
def list_sub_all(directory: Path) -> List[Path]:
|
||||
"""
|
||||
列出当前目录下的所有子目录和文件(不递归)
|
||||
"""
|
||||
if not directory.exists():
|
||||
return []
|
||||
|
||||
if directory.is_file():
|
||||
return []
|
||||
|
||||
items = []
|
||||
|
||||
# 遍历目录
|
||||
for path in directory.iterdir():
|
||||
items.append(path)
|
||||
|
||||
return items
|
||||
|
||||
@staticmethod
|
||||
def get_directory_size(path: Path) -> float:
|
||||
"""
|
||||
@@ -469,7 +489,9 @@ class SystemUtils:
|
||||
|
||||
@staticmethod
|
||||
def is_hardlink(src: Path, dest: Path) -> bool:
|
||||
"""判断是否为硬链接"""
|
||||
"""
|
||||
判断是否为硬链接(可能无法支持宿主机挂载smb盘符映射docker的场景)
|
||||
"""
|
||||
try:
|
||||
if not src.exists() or not dest.exists():
|
||||
return False
|
||||
@@ -487,7 +509,7 @@ class SystemUtils:
|
||||
if not target_file.exists() or not src_file.samefile(target_file):
|
||||
return False
|
||||
return True
|
||||
except (PermissionError, FileNotFoundError, ValueError, OSError) as e:
|
||||
except Exception as e:
|
||||
print(f"Error occurred: {e}")
|
||||
return False
|
||||
|
||||
|
||||
@@ -88,3 +88,19 @@ class WebUtils:
|
||||
except Exception as err:
|
||||
print(str(err))
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def get_bing_wallpapers(num: int = 7) -> Optional[str]:
|
||||
"""
|
||||
获取7天的Bing每日壁纸
|
||||
"""
|
||||
url = f"https://cn.bing.com/HPImageArchive.aspx?format=js&idx=0&n={num}"
|
||||
resp = RequestUtils(timeout=5).get_res(url)
|
||||
if resp and resp.status_code == 200:
|
||||
try:
|
||||
result = resp.json()
|
||||
if isinstance(result, dict):
|
||||
return [f"https://cn.bing.com{image.get('url')}" for image in result.get('images') or []]
|
||||
except Exception as err:
|
||||
print(str(err))
|
||||
return None
|
||||
|
||||
@@ -13,10 +13,14 @@ SUPERUSER=admin
|
||||
BIG_MEMORY_MODE=false
|
||||
# 是否启用DOH域名解析,启用后对于api.themovie.org等域名通过DOH解析,避免域名DNS被污染
|
||||
DOH_ENABLE=true
|
||||
# 使用 DOH 解析的域名列表,多个域名使用`,`分隔
|
||||
DOH_DOMAINS=api.themoviedb.org,api.tmdb.org,webservice.fanart.tv,api.github.com,github.com,raw.githubusercontent.com,api.telegram.org
|
||||
# DOH 解析服务器列表,多个服务器使用`,`分隔
|
||||
DOH_RESOLVERS=1.0.0.1,1.1.1.1,9.9.9.9,149.112.112.112
|
||||
# 元数据识别缓存过期时间,数字型,单位小时,0为系统默认(大内存模式为7天,滞则为3天),调大该值可减少themoviedb的访问次数
|
||||
META_CACHE_EXPIRE=0
|
||||
# 自动检查和更新站点资源包(索引、认证等)
|
||||
AUTO_UPDATE_RESOURCE=true
|
||||
AUTO_UPDATE_RESOURCE=false
|
||||
# 【*】API密钥,建议更换复杂字符串,有Jellyseerr/Overseerr、媒体服务器Webhook等配置以及部分支持API_TOKEN的API中使用
|
||||
API_TOKEN=moviepilot
|
||||
# 登录页面电影海报,tmdb/bing,tmdb要求能正常连接api.themoviedb.org
|
||||
|
||||
@@ -9,7 +9,6 @@ import json
|
||||
from pathlib import Path
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
from app.core.config import Settings
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ http {
|
||||
try_files $uri $uri/ /index.html;
|
||||
}
|
||||
|
||||
location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg)$ {
|
||||
location ~* \.(png|jpg|jpeg|gif|ico|svg)$ {
|
||||
# 静态资源
|
||||
expires 1y;
|
||||
add_header Cache-Control "public, immutable";
|
||||
|
||||
@@ -58,3 +58,5 @@ pystray~=0.19.5
|
||||
pyotp~=2.9.0
|
||||
Pinyin2Hanzi~=0.1.1
|
||||
pywebpush~=2.0.0
|
||||
py115j~=0.0.6
|
||||
oss2~=2.18.6
|
||||
@@ -1 +1 @@
|
||||
APP_VERSION = 'v1.9.4'
|
||||
APP_VERSION = 'v1.9.19'
|
||||
|
||||
Reference in New Issue
Block a user