Compare commits

..

123 Commits
v ... v1.3.4

Author SHA1 Message Date
jxxghp
ec309180da v1.3.4 2023-10-16 17:36:01 +08:00
jxxghp
ab3b674a6e fix 目录监控通知间隔 2023-10-16 17:30:25 +08:00
jxxghp
9231144518 fix 2023-10-16 17:11:45 +08:00
jxxghp
13c04de87c fix 2023-10-16 17:10:22 +08:00
jxxghp
70f533684f fix #607 目录监控全量同步 2023-10-16 17:04:51 +08:00
jxxghp
c94866631b 更新 bug_report.yml 2023-10-16 12:33:51 +08:00
jxxghp
40a77b438e feat 下载中信息增加用户 2023-10-16 08:28:54 +08:00
jxxghp
f5de48ca30 fix #867 2023-10-16 07:16:47 +08:00
jxxghp
89a2c00e64 fix #867 2023-10-16 07:16:02 +08:00
jxxghp
35afb50b26 fix #867 2023-10-16 07:14:36 +08:00
jxxghp
0e3e01bf9c fix #864 2023-10-16 07:06:10 +08:00
jxxghp
6e3ebd73c6 Merge remote-tracking branch 'origin/main' 2023-10-16 07:04:59 +08:00
jxxghp
add9b875aa fix #863 2023-10-16 07:04:31 +08:00
jxxghp
b1790ee730 v1.3.3 2023-10-15 15:00:15 +08:00
jxxghp
47d7800250 fix #788 Rclone远程刮削 2023-10-15 14:56:04 +08:00
jxxghp
4849c281d3 fix #788 2023-10-15 14:37:20 +08:00
jxxghp
c36acd7bb4 fix #830 2023-10-15 14:19:36 +08:00
jxxghp
986e96a88e Merge pull request #847 from thsrite/main 2023-10-14 20:55:00 +08:00
thsrite
493b7c2d24 fix 重启系统发送消息 2023-10-14 20:49:03 +08:00
jxxghp
0539ddab85 fix bug 2023-10-14 20:27:04 +08:00
jxxghp
202fdf8905 fix bug 2023-10-14 20:16:15 +08:00
jxxghp
9191ed0a21 fix bug 2023-10-14 20:12:02 +08:00
jxxghp
9697cf3901 fix icon 2023-10-14 13:31:21 +08:00
jxxghp
e6a11294fd Merge pull request #842 from lightolly/dev/20231014
feat:增加自定义站点配置,仅为统计和签到使用
2023-10-14 13:25:36 +08:00
olly
cd046d8023 feat:增加自定义站点配置,仅为统计和签到使用 2023-10-14 13:22:06 +08:00
jxxghp
4d08928b8c Merge remote-tracking branch 'origin/main' 2023-10-14 13:05:13 +08:00
jxxghp
bc8a243a6d feat 整合历史记录Api 2023-10-14 13:05:00 +08:00
jxxghp
3b804e13a8 Merge pull request #839 from thsrite/main
fix 缺失消息发给交互人
2023-10-14 12:42:26 +08:00
thsrite
f126f927b4 fix 缺失消息发给交互人 2023-10-14 12:35:38 +08:00
jxxghp
d4f202c2b1 fix #838 2023-10-14 11:52:05 +08:00
jxxghp
77a1d56c5b Merge pull request #838 from thsrite/main
fix 演职人员刮削插件增加刮削条件、debug日志
2023-10-14 11:48:17 +08:00
jxxghp
7415f94da2 Merge pull request #834 from DDS-Derek/main
feat: pip add cache
2023-10-14 11:47:26 +08:00
thsrite
fa50d8b884 fix 演职人员刮削插件增加刮削条件、debug日志 2023-10-14 11:40:21 +08:00
DDSDerek
40776c10bc feat: pip add cache 2023-10-14 01:08:13 +08:00
jxxghp
6578a2f977 Merge pull request #832 from DDS-Derek/main
docs fix
2023-10-13 21:52:06 +08:00
DDSRem
e780485fc6 docs: fix 2023-10-13 21:46:55 +08:00
DDSRem
8213cdba63 docs: fix 2023-10-13 21:44:28 +08:00
DDSRem
8d5b0d4035 docs: fix 2023-10-13 21:43:19 +08:00
DDSRem
3eaa22d068 docs: fix 2023-10-13 21:42:27 +08:00
jxxghp
4797983f43 Merge pull request #831 from DDS-Derek/main
fix: rclone version is too low
2023-10-13 21:32:11 +08:00
jxxghp
0e7e2fc44b fix #829 默认过滤规则拆分 2023-10-13 21:31:13 +08:00
DDSRem
9a51286c54 docs: fix 2023-10-13 21:20:42 +08:00
DDSRem
ddbf93f2c5 docs: fix 2023-10-13 21:16:59 +08:00
DDSRem
418411b10d fix: connector error 2023-10-13 21:14:43 +08:00
DDSRem
dceb7340dd fix: rclone version is too low 2023-10-13 21:07:13 +08:00
jxxghp
e7e9ca539d fix #810 2023-10-13 15:32:11 +08:00
jxxghp
333d187615 Merge pull request #821 from thsrite/main
fix 从下载历史获取tmdbid入库刮削
2023-10-13 15:10:33 +08:00
thsrite
761e66b200 fix 从下载历史获取tmdbid入库刮削 2023-10-13 14:29:07 +08:00
thsrite
eec52fa5ba fix 下载用户精简下载进度消息 2023-10-13 13:56:23 +08:00
jxxghp
b6c3c03748 Merge pull request #819 from thsrite/main 2023-10-13 11:52:33 +08:00
thsrite
4eebaa5d75 fix 删种 2023-10-13 11:45:18 +08:00
jxxghp
f6dfe9cb88 fix rules 2023-10-13 11:41:12 +08:00
thsrite
c36c94971e fix 插件记录同步 2023-10-13 11:09:33 +08:00
jxxghp
e83a15ad1f fix plugin log 2023-10-13 11:01:58 +08:00
thsrite
16aa353cf6 Merge remote-tracking branch 'origin/main' 2023-10-13 10:26:49 +08:00
thsrite
5adfa89d10 fix bug 2023-10-13 10:26:40 +08:00
jxxghp
b1805c1a46 add logs 2023-10-13 07:18:12 +08:00
jxxghp
7e51d70cd6 - 修复了搜索页面过滤失效的问题 2023-10-12 22:46:42 +08:00
jxxghp
b5cba64227 fix 2023-10-12 21:30:35 +08:00
jxxghp
f20c81efae fix rclone 2023-10-12 20:47:23 +08:00
jxxghp
bfbd93b912 fix 2023-10-12 20:35:44 +08:00
jxxghp
6be074e647 fix doubaninfo 2023-10-12 20:31:59 +08:00
jxxghp
5f96a562d4 v1.3.2 2023-10-12 20:11:22 +08:00
jxxghp
cefbd70469 fix #807 2023-10-12 20:06:58 +08:00
jxxghp
30c9c66087 fix 豆瓣来源订阅 2023-10-12 19:59:47 +08:00
jxxghp
1ecbc2f0be fix bug 2023-10-12 18:08:09 +08:00
jxxghp
884a0feb62 fix bug 2023-10-12 17:56:15 +08:00
jxxghp
5f44f07515 fixme 2023-10-12 17:51:20 +08:00
jxxghp
a902b79684 fix #800 2023-10-12 17:07:26 +08:00
jxxghp
4e13f59b36 fix #804 2023-10-12 16:03:36 +08:00
jxxghp
cbccac87f0 feat 清理无效的插件事件响应 2023-10-12 12:48:40 +08:00
jxxghp
eb3c09a3d3 fix bug 2023-10-12 11:50:22 +08:00
jxxghp
2a9a36ac88 feat 辅助识别异步接口 && ChatGPT插件支持辅助名称识别 2023-10-12 11:41:57 +08:00
jxxghp
af2f52a050 fix 优先级规则支持杜比全景声 2023-10-12 09:46:48 +08:00
jxxghp
7a61fa1ee2 feat 订阅支持更多过滤规则 2023-10-12 08:58:59 +08:00
jxxghp
ac3009d58f fix bug 2023-10-12 08:40:56 +08:00
jxxghp
e835feb056 更新 douban.py 2023-10-11 18:11:13 +08:00
jxxghp
cd391d14f9 fix plex 2023-10-11 17:27:10 +08:00
jxxghp
d7844968ab v1.3.1 2023-10-11 17:20:11 +08:00
jxxghp
70ea398f14 fix 优化豆瓣匹配 2023-10-11 16:32:34 +08:00
jxxghp
860d55a0e2 feat 热门动漫 2023-10-11 16:13:15 +08:00
jxxghp
0e35cec6e2 fix #743 支持Rclone 2023-10-11 12:16:41 +08:00
jxxghp
5778e86260 fix #775 增加日志打印 2023-10-11 11:01:53 +08:00
jxxghp
967d0b1205 fix #778 2023-10-11 08:32:48 +08:00
jxxghp
0b2d419000 fix spec 2023-10-11 08:19:44 +08:00
jxxghp
149104063c fix #784 PROXY_HOST仅环境变量配置 2023-10-11 07:44:37 +08:00
jxxghp
498168a2d3 fix #783 2023-10-10 22:23:03 +08:00
jxxghp
88e307416d fix Dockerfile 2023-10-10 22:09:16 +08:00
jxxghp
3bb2eedb33 fix icon 2023-10-10 21:26:30 +08:00
jxxghp
36c046ad6a - 优化Windows打包 2023-10-10 20:25:00 +08:00
jxxghp
85396df221 - 优化Windows打包 2023-10-10 20:06:52 +08:00
jxxghp
2f0f58783e fix spec 2023-10-10 19:45:18 +08:00
jxxghp
2d989d4229 更新 main.py 2023-10-10 18:19:46 +08:00
jxxghp
ecc8b6b385 fix spec 2023-10-10 17:35:28 +08:00
jxxghp
aa90c5d5c0 fix build 2023-10-10 16:38:51 +08:00
jxxghp
5f7d93f170 fix startup 2023-10-10 16:23:57 +08:00
jxxghp
0fbe51f257 fix bug 2023-10-10 16:17:22 +08:00
jxxghp
be941ebdd1 fix #770 2023-10-10 16:14:12 +08:00
jxxghp
4d900c2eb0 fix #777 豆瓣流控改为随机休眠3-10秒 2023-10-10 16:02:14 +08:00
jxxghp
93c473afe7 - 优化Windows打包 2023-10-10 15:48:44 +08:00
jxxghp
4c9a66f586 fix trayicon 2023-10-10 14:44:27 +08:00
jxxghp
375e16e0dc fix trayicon 2023-10-10 14:38:50 +08:00
jxxghp
91085d13a3 Merge remote-tracking branch 'origin/main' 2023-10-10 13:58:02 +08:00
jxxghp
3f83894dc6 add trayicon 2023-10-10 13:57:56 +08:00
jxxghp
5946684ee6 Merge pull request #776 from thsrite/main
fix 更新log
2023-10-10 13:44:39 +08:00
thsrite
7e3f25879f fix 更新log 2023-10-10 13:41:39 +08:00
jxxghp
48dcc3ee1b - 优化Windows打包 2023-10-10 13:32:05 +08:00
jxxghp
fca0a4b511 Merge remote-tracking branch 'origin/main' 2023-10-10 13:27:11 +08:00
jxxghp
d6831a8881 - 优化Windows打包 2023-10-10 13:27:00 +08:00
jxxghp
39a646ed92 更新 README.md 2023-10-10 12:50:11 +08:00
jxxghp
595965c5d0 Merge pull request #774 from thsrite/main 2023-10-10 11:47:00 +08:00
thsrite
3bb6f8a0c0 Merge remote-tracking branch 'origin/main' 2023-10-10 11:15:45 +08:00
thsrite
1924a2017e fix #773 2023-10-10 11:15:26 +08:00
jxxghp
60140fd2e6 - 优化Windows打包 2023-10-10 09:50:37 +08:00
jxxghp
65b5219e45 fix TZ 2023-10-10 07:58:26 +08:00
jxxghp
ae2f649aee fix README.md 2023-10-09 21:44:39 +08:00
jxxghp
bf3e860a18 fix README.md 2023-10-09 21:34:08 +08:00
jxxghp
0b44a91493 fix README.md 2023-10-09 21:33:53 +08:00
jxxghp
16077b3341 fix #769 2023-10-09 21:13:36 +08:00
jxxghp
a7cedde721 fix build 2023-10-09 20:53:23 +08:00
jxxghp
ecd53192dc fix build 2023-10-09 20:53:11 +08:00
jxxghp
a03c76e211 更新 build-windows.yml 2023-10-09 20:35:18 +08:00
jxxghp
de427fd7a9 fix 2023-10-09 20:14:54 +08:00
72 changed files with 2231 additions and 918 deletions

View File

@@ -9,8 +9,9 @@ body:
请确认以下信息:
1. 请按此模板提交issues不按模板提交的问题将直接关闭。
2. 如果你的问题可以直接在以往 issue 或者 Telegram频道 中找到,那么你的 issue 将会被直接关闭。
3. 提交问题务必描述清楚、附上日志,描述不清导致无法理解和分析的问题会被直接关闭。
3. 提交问题务必描述清楚、附上日志,描述不清导致无法理解和分析的问题会被直接关闭。
4. 此仓库为后端仓库,如果是前端 WebUI 问题请在[前端仓库](https://github.com/jxxghp/MoviePilot-Frontend)提 issue。
5. 【不要通过issues来寻求解决你的环境问题、配置安装类问题、咨询类问题】否则直接关闭并加入用户黑名单实在没有精力陪一波又一波的伸手党玩。
- type: checkboxes
id: ensure
attributes:

View File

@@ -1,65 +0,0 @@
name: MoviePilot Windows Builder
on:
workflow_dispatch:
push:
branches:
- main
paths:
- version.py
jobs:
Windows-build:
runs-on: windows-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Release Version
id: release_version
run: |
$app_version = Select-String -Path "version.py" -Pattern "APP_VERSION\s=\s'v(.*)'" | ForEach-Object { $_.Matches.Groups[1].Value }
$env:GITHUB_ENV += "app_version=$app_version"
- name: Init Python 3.11.4
uses: actions/setup-python@v4
with:
python-version: '3.11.4'
- name: Install Dependent Packages
run: |
python -m pip install --upgrade pip
pip install wheel pyinstaller
pip install -r requirements.txt
shell: pwsh
- name: Pyinstaller
run: |
pyinstaller windows.spec
shell: pwsh
- name: Upload Windows File
uses: actions/upload-artifact@v3
with:
name: windows
path: dist/MoviePilot.exe
- name: Generate Release
id: generate_release
uses: actions/create-release@latest
with:
tag_name: v${{ env.app_version }}
release_name: v${{ env.app_version }}
body: ${{ github.event.commits[0].message }}
draft: false
prerelease: false
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload Release Asset
uses: dwenegar/upload-release-assets@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
release_id: ${{ steps.generate_release.outputs.id }}
assets_path: |
dist/MoviePilot.exe

View File

@@ -1,4 +1,4 @@
name: MoviePilot Docker Builder
name: MoviePilot Builder
on:
workflow_dispatch:
push:
@@ -21,7 +21,7 @@ jobs:
app_version=$(cat version.py |sed -ne "s/APP_VERSION\s=\s'v\(.*\)'/\1/gp")
echo "app_version=$app_version" >> $GITHUB_ENV
- name: Docker meta
- name: Docker Meta
id: meta
uses: docker/metadata-action@v5
with:
@@ -56,4 +56,98 @@ jobs:
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha, scope=${{ github.workflow }}
cache-to: type=gha, scope=${{ github.workflow }}
cache-to: type=gha, scope=${{ github.workflow }}
Windows-build:
runs-on: windows-latest
name: Build Windows Binary
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Init Python 3.11.4
uses: actions/setup-python@v4
with:
python-version: '3.11.4'
cache: 'pip'
- name: Install Dependent Packages
run: |
python -m pip install --upgrade pip
pip install wheel pyinstaller
pip install -r requirements.txt
shell: pwsh
- name: Prepare Frontend
run: |
Invoke-WebRequest -Uri "http://nginx.org/download/nginx-1.25.2.zip" -OutFile "nginx.zip"
Expand-Archive -Path "nginx.zip" -DestinationPath "nginx-1.25.2"
Move-Item -Path "nginx-1.25.2/nginx-1.25.2" -Destination "nginx"
Remove-Item -Path "nginx.zip"
Remove-Item -Path "nginx-1.25.2" -Recurse -Force
$FRONTEND_VERSION = (Invoke-WebRequest -Uri "https://api.github.com/repos/jxxghp/MoviePilot-Frontend/releases/latest" | ConvertFrom-Json).tag_name
Invoke-WebRequest -Uri "https://github.com/jxxghp/MoviePilot-Frontend/releases/download/$FRONTEND_VERSION/dist.zip" -OutFile "dist.zip"
Expand-Archive -Path "dist.zip" -DestinationPath "dist"
Move-Item -Path "dist/dist/*" -Destination "nginx/html" -Force
Remove-Item -Path "dist.zip"
Remove-Item -Path "dist" -Recurse -Force
Move-Item -Path "nginx/html/nginx.conf" -Destination "nginx/conf/nginx.conf" -Force
New-Item -Path "nginx/temp" -ItemType Directory -Force
New-Item -Path "nginx/temp/__keep__.txt" -ItemType File -Force
New-Item -Path "nginx/logs" -ItemType Directory -Force
New-Item -Path "nginx/logs/__keep__.txt" -ItemType File -Force
shell: pwsh
- name: Pyinstaller
run: |
pyinstaller windows.spec
shell: pwsh
- name: Upload Windows File
uses: actions/upload-artifact@v3
with:
name: windows
path: dist/MoviePilot.exe
Create-release:
permissions: write-all
runs-on: ubuntu-latest
needs: [ Windows-build, Docker-build ]
steps:
- uses: actions/checkout@v2
- name: Release Version
id: release_version
run: |
app_version=$(cat version.py |sed -ne "s/APP_VERSION\s=\s'v\(.*\)'/\1/gp")
echo "app_version=$app_version" >> $GITHUB_ENV
- name: Download Artifact
uses: actions/download-artifact@v3
- name: get release_informations
shell: bash
run: |
mkdir releases
mv ./windows/MoviePilot.exe ./releases/MoviePilot_v${{ env.app_version }}.exe
- name: Create Release
id: create_release
uses: actions/create-release@latest
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: v${{ env.app_version }}
release_name: v${{ env.app_version }}
body: ${{ github.event.commits[0].message }}
draft: false
prerelease: false
- name: Upload Release Asset
uses: dwenegar/upload-release-assets@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
release_id: ${{ steps.create_release.outputs.id }}
assets_path: |
./releases/

2
.gitignore vendored
View File

@@ -1,6 +1,8 @@
.idea/
*.c
build/
dist/
nginx/
test.py
app/helper/sites.py
config/user.db

View File

@@ -1,16 +1,20 @@
FROM python:3.11.4-slim-bullseye
ARG MOVIEPILOT_VERSION
ENV LANG="C.UTF-8" \
TZ="Asia/Shanghai" \
HOME="/moviepilot" \
CONFIG_DIR="/config" \
TERM="xterm" \
PUID=0 \
PGID=0 \
UMASK=000 \
PORT=3001 \
NGINX_PORT=3000 \
PROXY_HOST="" \
MOVIEPILOT_AUTO_UPDATE=true \
MOVIEPILOT_AUTO_UPDATE_DEV=false \
CONFIG_DIR="/config"
AUTH_SITE="iyuu" \
IYUU_SIGN=""
WORKDIR "/app"
RUN apt-get update -y \
&& apt-get -y install \
@@ -27,12 +31,14 @@ RUN apt-get update -y \
dumb-init \
jq \
haproxy \
fuse3 \
&& \
if [ "$(uname -m)" = "x86_64" ]; \
then ln -s /usr/lib/x86_64-linux-musl/libc.so /lib/libc.musl-x86_64.so.1; \
elif [ "$(uname -m)" = "aarch64" ]; \
then ln -s /usr/lib/aarch64-linux-musl/libc.so /lib/libc.musl-aarch64.so.1; \
fi \
&& curl https://rclone.org/install.sh | bash \
&& apt-get autoremove -y \
&& apt-get clean -y \
&& rm -rf \

View File

@@ -4,8 +4,6 @@
# 仅用于学习交流使用,请勿在任何国内平台宣传该项目!
Dockerhttps://hub.docker.com/r/jxxghp/moviepilot
发布频道https://t.me/moviepilot_channel
## 主要特性
@@ -33,36 +31,42 @@ MoviePilot需要配套下载器和媒体服务器配合使用。
### 4. **安装MoviePilot**
目前仅提供docker镜像,点击 [这里](https://hub.docker.com/r/jxxghp/moviepilot) 或执行命令:
- Docker镜像
```shell
docker pull jxxghp/moviepilot:latest
```
点击 [这里](https://hub.docker.com/r/jxxghp/moviepilot) 或执行命令:
```shell
docker pull jxxghp/moviepilot:latest
```
- Windows
下载 [MoviePilot.exe](https://github.com/jxxghp/MoviePilot/releases),双击运行后自动生成配置文件目录。
## 配置
项目的所有配置均通过环境变量进行设置,支持两种配置方式:
-docker环境变量部分进行参数配置,部分环境建立容器后会自动显示待配置项,如未自动显示配置项则需要手动增加对应环境变量。
- 下载 [app.env](https://github.com/jxxghp/MoviePilot/raw/main/config/app.env) 文件,修改好配置后放置到配置文件映射路径根目录,配置项可根据说明自主增减。
-Docker环境变量部分或Wdinows系统环境变量中进行参数配置,如未自动显示配置项则需要手动增加对应环境变量。
- 下载 [app.env](https://github.com/jxxghp/MoviePilot/raw/main/config/app.env) 配置文件,修改好配置后放置到配置文件映射路径根目录,配置项可根据说明自主增减。
配置文件映射路径:`/config`,配置项生效优先级:环境变量 > env文件 > 默认值,部分参数如路径映射、站点认证、权限端口等必须通过环境变量进行配置。
配置文件映射路径:`/config`,配置项生效优先级:环境变量 > env文件 > 默认值,**部分参数如路径映射、站点认证、权限端口、时区等必须通过环境变量进行配置**
> $\color{red}{*}$ 号标识的为必填项,其它为可选项,可选项可删除配置变量从而使用默认值。
> 号标识的为必填项,其它为可选项,可选项可删除配置变量从而使用默认值。
### 1. **基础设置**
- **NGINX_PORT $\color{red}{*}$ ** WEB服务端口默认`3000`可自行修改不能与API服务端口冲突仅支持环境变量配置
- **PORT $\color{red}{*}$ ** API服务端口默认`3001`可自行修改不能与WEB服务端口冲突仅支持环境变量配置
- **NGINX_PORT** WEB服务端口默认`3000`可自行修改不能与API服务端口冲突仅支持环境变量配置
- **PORT** API服务端口默认`3001`可自行修改不能与WEB服务端口冲突仅支持环境变量配置
- **PUID**:运行程序用户的`uid`,默认`0`(仅支持环境变量配置)
- **PGID**:运行程序用户的`gid`,默认`0`(仅支持环境变量配置)
- **UMASK**:掩码权限,默认`000`,可以考虑设置为`022`(仅支持环境变量配置)
- **MOVIEPILOT_AUTO_UPDATE**:重启更新,`true`/`false`,默认`true` **注意:如果出现网络问题可以配置`PROXY_HOST`,具体看下方`PROXY_HOST`解释**(仅支持环境变量配置)
- **PROXY_HOST** 网络代理访问themoviedb或者重启更新需要使用代理访问格式为`http(s)://ip:port`、`socks5://user:pass@host:port`(仅支持环境变量配置)
- **MOVIEPILOT_AUTO_UPDATE**:重启更新,`true`/`false`,默认`true` **注意:如果出现网络问题可以配置`PROXY_HOST`**(仅支持环境变量配置)
- **MOVIEPILOT_AUTO_UPDATE_DEV**:重启时更新到未发布的开发版本代码,`true`/`false`,默认`false`(仅支持环境变量配置)
---
- **SUPERUSER $\color{red}{*}$ ** 超级管理员用户名,默认`admin`,安装后使用该用户登录后台管理界面
- **SUPERUSER_PASSWORD $\color{red}{*}$ ** 超级管理员初始密码,默认`password`,建议修改为复杂密码
- **API_TOKEN $\color{red}{*}$ ** API密钥默认`moviepilot`在媒体服务器Webhook、微信回调等地址配置中需要加上`?token=`该值,建议修改为复杂字符串
- **PROXY_HOST** 网络代理访问themoviedb或者重启更新需要使用代理访问格式为`http(s)://ip:port``socks5://user:pass@host:port`(可选)
- **SUPERUSER** 超级管理员用户名,默认`admin`,安装后使用该用户登录后台管理界面
- **SUPERUSER_PASSWORD** 超级管理员初始密码,默认`password`,建议修改为复杂密码
- **API_TOKEN** API密钥默认`moviepilot`在媒体服务器Webhook、微信回调等地址配置中需要加上`?token=`该值,建议修改为复杂字符串
- **TMDB_API_DOMAIN** TMDB API地址默认`api.themoviedb.org`,也可配置为`api.tmdb.org`或其它中转代理服务地址,能连通即可
- **TMDB_IMAGE_DOMAIN** TMDB图片地址默认`image.tmdb.org`可配置为其它中转代理以加速TMDB图片显示`static-mdb.v.geilijiasu.com`
---
@@ -70,18 +74,18 @@ docker pull jxxghp/moviepilot:latest
- **SCRAP_SOURCE** 刮削元数据及图片使用的数据源,`themoviedb`/`douban`,默认`themoviedb`
- **SCRAP_FOLLOW_TMDB** 新增已入库媒体是否跟随TMDB信息变化`true`/`false`,默认`true`
---
- **TRANSFER_TYPE $\color{red}{*}$ ** 整理转移方式,支持`link`/`copy`/`move`/`softlink` **注意:在`link`和`softlink`转移方式下,转移后的文件会继承源文件的权限掩码,不受`UMASK`影响**
- **LIBRARY_PATH $\color{red}{*}$ ** 媒体库目录,多个目录使用`,`分隔
- **TRANSFER_TYPE** 整理转移方式,支持`link`/`copy`/`move`/`softlink`/`rclone_copy`/`rclone_move` **注意:在`link`和`softlink`转移方式下,转移后的文件会继承源文件的权限掩码,不受`UMASK`影响rclone需要自行映射rclone配置目录到容器中或在容器内完成rclone配置节点名称必须为`MP`**
- **LIBRARY_PATH** 媒体库目录,多个目录使用`,`分隔
- **LIBRARY_MOVIE_NAME** 电影媒体库目录名称(不是完整路径),默认`电影`
- **LIBRARY_TV_NAME** 电视剧媒体库目录称(不是完整路径),默认`电视剧`
- **LIBRARY_ANIME_NAME** 动漫媒体库目录称(不是完整路径),默认`电视剧/动漫`
- **LIBRARY_CATEGORY** 媒体库二级分类开关,`true`/`false`,默认`false`,开启后会根据配置 [category.yaml](https://github.com/jxxghp/MoviePilot/raw/main/config/category.yaml) 自动在媒体库目录下建立二级目录分类
---
- **COOKIECLOUD_HOST $\color{red}{*}$ ** CookieCloud服务器地址格式`http(s)://ip:port`,不配置默认使用内建服务器`https://movie-pilot.org/cookiecloud`
- **COOKIECLOUD_KEY $\color{red}{*}$ ** CookieCloud用户KEY
- **COOKIECLOUD_PASSWORD $\color{red}{*}$ ** CookieCloud端对端加密密码
- **COOKIECLOUD_INTERVAL $\color{red}{*}$ ** CookieCloud同步间隔分钟
- **USER_AGENT $\color{red}{*}$ ** CookieCloud保存Cookie对应的浏览器UA建议配置设置后可增加连接站点的成功率同步站点后可以在管理界面中修改
- **COOKIECLOUD_HOST** CookieCloud服务器地址格式`http(s)://ip:port`,不配置默认使用内建服务器`https://movie-pilot.org/cookiecloud`
- **COOKIECLOUD_KEY** CookieCloud用户KEY
- **COOKIECLOUD_PASSWORD** CookieCloud端对端加密密码
- **COOKIECLOUD_INTERVAL** CookieCloud同步间隔分钟
- **USER_AGENT** CookieCloud保存Cookie对应的浏览器UA建议配置设置后可增加连接站点的成功率同步站点后可以在管理界面中修改
- **OCR_HOST** OCR识别服务器地址格式`http(s)://ip:port`用于识别站点验证码实现自动登录获取Cookie等不配置默认使用内建服务器`https://movie-pilot.org`,可使用 [这个镜像](https://hub.docker.com/r/jxxghp/moviepilot-ocr) 自行搭建。
---
- **SUBSCRIBE_MODE** 订阅模式,`rss`/`spider`,默认`spider``rss`模式通过定时刷新RSS来匹配订阅RSS地址会自动获取也可手动维护对站点压力小同时可设置订阅刷新周期24小时运行但订阅和下载通知不能过滤和显示免费推荐使用rss模式。
@@ -90,7 +94,7 @@ docker pull jxxghp/moviepilot:latest
- **SEARCH_SOURCE** 媒体信息搜索来源,`themoviedb`/`douban`,默认`themoviedb`
---
- **AUTO_DOWNLOAD_USER** 远程交互搜索时自动择优下载的用户ID多个用户使用,分割,未设置需要选择资源或者回复`0`
- **MESSAGER $\color{red}{*}$ ** 消息通知渠道,支持 `telegram`/`wechat`/`slack`/`synologychat`,开启多个渠道时使用`,`分隔。同时还需要配置对应渠道的环境变量,非对应渠道的变量可删除,推荐使用`telegram`
- **MESSAGER** 消息通知渠道,支持 `telegram`/`wechat`/`slack`/`synologychat`,开启多个渠道时使用`,`分隔。同时还需要配置对应渠道的环境变量,非对应渠道的变量可删除,推荐使用`telegram`
- `wechat`设置项:
@@ -121,7 +125,7 @@ docker pull jxxghp/moviepilot:latest
- **SYNOLOGYCHAT_TOKEN** SynologyChat机器人`令牌`
---
- **DOWNLOAD_PATH $\color{red}{*}$ ** 下载保存目录,**注意:需要将`moviepilot``下载器`的映射路径保持一致**,否则会导致下载文件无法转移
- **DOWNLOAD_PATH** 下载保存目录,**注意:需要将`moviepilot`及`下载器`的映射路径保持一致**,否则会导致下载文件无法转移
- **DOWNLOAD_MOVIE_PATH** 电影下载保存目录路径,不设置则下载到`DOWNLOAD_PATH`
- **DOWNLOAD_TV_PATH** 电视剧下载保存目录路径,不设置则下载到`DOWNLOAD_PATH`
- **DOWNLOAD_ANIME_PATH** 动漫下载保存目录路径,不设置则下载到`DOWNLOAD_PATH`
@@ -129,7 +133,7 @@ docker pull jxxghp/moviepilot:latest
- **DOWNLOAD_SUBTITLE** 下载站点字幕,`true`/`false`,默认`true`
- **DOWNLOADER_MONITOR** 下载器监控,`true`/`false`,默认为`true`,开启后下载完成时才会自动整理入库
- **TORRENT_TAG** 下载器种子标签,默认为`MOVIEPILOT`设置后只有MoviePilot添加的下载才会处理留空所有下载器中的任务均会处理
- **DOWNLOADER $\color{red}{*}$ ** 下载器,支持`qbittorrent`/`transmission`QB版本号要求>= 4.3.9TR版本号要求>= 3.0,同时还需要配置对应渠道的环境变量,非对应渠道的变量可删除,推荐使用`qbittorrent`
- **DOWNLOADER** 下载器,支持`qbittorrent`/`transmission`QB版本号要求>= 4.3.9TR版本号要求>= 3.0,同时还需要配置对应渠道的环境变量,非对应渠道的变量可删除,推荐使用`qbittorrent`
- `qbittorrent`设置项:
@@ -146,7 +150,7 @@ docker pull jxxghp/moviepilot:latest
---
- **REFRESH_MEDIASERVER** 入库后是否刷新媒体服务器,`true`/`false`,默认`true`
- **MEDIASERVER $\color{red}{*}$ ** 媒体服务器,支持`emby`/`jellyfin`/`plex`,同时开启多个使用`,`分隔。还需要配置对应媒体服务器的环境变量,非对应媒体服务器的变量可删除,推荐使用`emby`
- **MEDIASERVER** 媒体服务器,支持`emby`/`jellyfin`/`plex`,同时开启多个使用`,`分隔。还需要配置对应媒体服务器的环境变量,非对应媒体服务器的变量可删除,推荐使用`emby`
- `emby`设置项:
@@ -169,9 +173,9 @@ docker pull jxxghp/moviepilot:latest
### 2. **用户认证**
`MoviePilot`需要认证后才能使用,配置`AUTH_SITE`后,需要根据下表配置对应站点的认证参数(**仅能通过docker环境变量配置**
`MoviePilot`需要认证后才能使用,配置`AUTH_SITE`后,需要根据下表配置对应站点的认证参数(**仅能通过环境变量配置**
- **AUTH_SITE $\color{red}{*}$ ** 认证站点,支持`iyuu`/`hhclub`/`audiences`/`hddolby`/`zmpt`/`freefarm`/`hdfans`/`wintersakura`/`leaves`/`1ptba`/`icc2022`/`ptlsp`/`xingtan`
- **AUTH_SITE** 认证站点,支持`iyuu`/`hhclub`/`audiences`/`hddolby`/`zmpt`/`freefarm`/`hdfans`/`wintersakura`/`leaves`/`1ptba`/`icc2022`/`ptlsp`/`xingtan`
| 站点 | 参数 |
|:------------:|:-----------------------------------------------------:|

View File

@@ -1,14 +1,12 @@
from typing import List, Any
from fastapi import APIRouter, Depends, Response
from sqlalchemy.orm import Session
from app import schemas
from app.chain.douban import DoubanChain
from app.core.config import settings
from app.core.context import MediaInfo
from app.core.security import verify_token
from app.db import get_db
from app.schemas import MediaType
from app.utils.http import RequestUtils
@@ -32,13 +30,12 @@ def douban_img(imgurl: str) -> Any:
@router.get("/recognize/{doubanid}", summary="豆瓣ID识别", response_model=schemas.Context)
def recognize_doubanid(doubanid: str,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
根据豆瓣ID识别媒体信息
"""
# 识别媒体信息
context = DoubanChain(db).recognize_by_doubanid(doubanid=doubanid)
context = DoubanChain().recognize_by_doubanid(doubanid=doubanid)
if context:
return context.to_dict()
else:
@@ -48,12 +45,11 @@ def recognize_doubanid(doubanid: str,
@router.get("/showing", summary="豆瓣正在热映", response_model=List[schemas.MediaInfo])
def movie_showing(page: int = 1,
count: int = 30,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
浏览豆瓣正在热映
"""
movies = DoubanChain(db).movie_showing(page=page, count=count)
movies = DoubanChain().movie_showing(page=page, count=count)
if not movies:
return []
medias = [MediaInfo(douban_info=movie) for movie in movies]
@@ -65,13 +61,12 @@ def douban_movies(sort: str = "R",
tags: str = "",
page: int = 1,
count: int = 30,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
浏览豆瓣电影信息
"""
movies = DoubanChain(db).douban_discover(mtype=MediaType.MOVIE,
sort=sort, tags=tags, page=page, count=count)
movies = DoubanChain().douban_discover(mtype=MediaType.MOVIE,
sort=sort, tags=tags, page=page, count=count)
if not movies:
return []
medias = [MediaInfo(douban_info=movie) for movie in movies]
@@ -86,13 +81,12 @@ def douban_tvs(sort: str = "R",
tags: str = "",
page: int = 1,
count: int = 30,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
浏览豆瓣剧集信息
"""
tvs = DoubanChain(db).douban_discover(mtype=MediaType.TV,
sort=sort, tags=tags, page=page, count=count)
tvs = DoubanChain().douban_discover(mtype=MediaType.TV,
sort=sort, tags=tags, page=page, count=count)
if not tvs:
return []
medias = [MediaInfo(douban_info=tv) for tv in tvs]
@@ -106,47 +100,54 @@ def douban_tvs(sort: str = "R",
@router.get("/movie_top250", summary="豆瓣电影TOP250", response_model=List[schemas.MediaInfo])
def movie_top250(page: int = 1,
count: int = 30,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
浏览豆瓣剧集信息
"""
movies = DoubanChain(db).movie_top250(page=page, count=count)
movies = DoubanChain().movie_top250(page=page, count=count)
return [MediaInfo(douban_info=movie).to_dict() for movie in movies]
@router.get("/tv_weekly_chinese", summary="豆瓣国产剧集周榜", response_model=List[schemas.MediaInfo])
def tv_weekly_chinese(page: int = 1,
count: int = 30,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
中国每周剧集口碑榜
"""
tvs = DoubanChain(db).tv_weekly_chinese(page=page, count=count)
tvs = DoubanChain().tv_weekly_chinese(page=page, count=count)
return [MediaInfo(douban_info=tv).to_dict() for tv in tvs]
@router.get("/tv_weekly_global", summary="豆瓣全球剧集周榜", response_model=List[schemas.MediaInfo])
def tv_weekly_global(page: int = 1,
count: int = 30,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
全球每周剧集口碑榜
"""
tvs = DoubanChain(db).tv_weekly_global(page=page, count=count)
tvs = DoubanChain().tv_weekly_global(page=page, count=count)
return [MediaInfo(douban_info=tv).to_dict() for tv in tvs]
@router.get("/tv_animation", summary="豆瓣动画剧集", response_model=List[schemas.MediaInfo])
def tv_animation(page: int = 1,
count: int = 30,
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
热门动画剧集
"""
tvs = DoubanChain().tv_animation(page=page, count=count)
return [MediaInfo(douban_info=tv).to_dict() for tv in tvs]
@router.get("/{doubanid}", summary="查询豆瓣详情", response_model=schemas.MediaInfo)
def douban_info(doubanid: str,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
根据豆瓣ID查询豆瓣媒体信息
"""
doubaninfo = DoubanChain(db).douban_info(doubanid=doubanid)
doubaninfo = DoubanChain().douban_info(doubanid=doubanid)
if doubaninfo:
return MediaInfo(douban_info=doubaninfo).to_dict()
else:

View File

@@ -68,12 +68,12 @@ def exists(media_in: schemas.MediaInfo,
if media_in.tmdb_id:
mediainfo.from_dict(media_in.dict())
elif media_in.douban_id:
context = DoubanChain(db).recognize_by_doubanid(doubanid=media_in.douban_id)
context = DoubanChain().recognize_by_doubanid(doubanid=media_in.douban_id)
if context:
mediainfo = context.media_info
meta = context.meta_info
else:
context = MediaChain(db).recognize_by_title(title=f"{media_in.title} {media_in.year}")
context = MediaChain().recognize_by_title(title=f"{media_in.title} {media_in.year}")
if context:
mediainfo = context.media_info
meta = context.meta_info

View File

@@ -11,7 +11,6 @@ from app.core.security import verify_token
from app.db import get_db
from app.db.models.downloadhistory import DownloadHistory
from app.db.models.transferhistory import TransferHistory
from app.schemas import MediaType
from app.schemas.types import EventType
router = APIRouter()
@@ -90,23 +89,3 @@ def delete_transfer_history(history_in: schemas.TransferHistory,
# 删除记录
TransferHistory.delete(db, history_in.id)
return schemas.Response(success=True)
@router.post("/transfer", summary="历史记录重新转移", response_model=schemas.Response)
def redo_transfer_history(history_in: schemas.TransferHistory,
mtype: str = None,
new_tmdbid: int = None,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
历史记录重新转移,不输入 mtype 和 new_tmdbid 时,自动使用文件名重新识别
"""
if mtype and new_tmdbid:
state, errmsg = TransferChain(db).re_transfer(logid=history_in.id,
mtype=MediaType(mtype), tmdbid=new_tmdbid)
else:
state, errmsg = TransferChain(db).re_transfer(logid=history_in.id)
if state:
return schemas.Response(success=True)
else:
return schemas.Response(success=False, message=errmsg)

View File

@@ -49,10 +49,10 @@ async def login_access_token(
user.create(db)
elif not user.is_active:
raise HTTPException(status_code=403, detail="用户未启用")
access_token_expires = timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES)
return schemas.Token(
access_token=security.create_access_token(
user.id, expires_delta=access_token_expires
user.id,
expires_delta=timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES)
),
token_type="bearer",
super_user=user.is_superuser,
@@ -74,11 +74,11 @@ def bing_wallpaper() -> Any:
@router.get("/tmdb", summary="TMDB电影海报", response_model=schemas.Response)
def tmdb_wallpaper(db: Session = Depends(get_db)) -> Any:
def tmdb_wallpaper() -> Any:
"""
获取TMDB电影海报
"""
wallpager = TmdbChain(db).get_random_wallpager()
wallpager = TmdbChain().get_random_wallpager()
if wallpager:
return schemas.Response(
success=True,

View File

@@ -20,13 +20,12 @@ router = APIRouter()
@router.get("/recognize", summary="识别媒体信息(种子)", response_model=schemas.Context)
def recognize(title: str,
subtitle: str = None,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
根据标题、副标题识别媒体信息
"""
# 识别媒体信息
context = MediaChain(db).recognize_by_title(title=title, subtitle=subtitle)
context = MediaChain().recognize_by_title(title=title, subtitle=subtitle)
if context:
return context.to_dict()
return schemas.Context()
@@ -34,13 +33,12 @@ def recognize(title: str,
@router.get("/recognize_file", summary="识别媒体信息(文件)", response_model=schemas.Context)
def recognize(path: str,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
根据文件路径识别媒体信息
"""
# 识别媒体信息
context = MediaChain(db).recognize_by_path(path)
context = MediaChain().recognize_by_path(path)
if context:
return context.to_dict()
return schemas.Context()
@@ -50,12 +48,11 @@ def recognize(path: str,
def search_by_title(title: str,
page: int = 1,
count: int = 8,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
模糊搜索媒体信息列表
"""
_, medias = MediaChain(db).search(title=title)
_, medias = MediaChain().search(title=title)
if medias:
return [media.to_dict() for media in medias[(page - 1) * count: page * count]]
return []
@@ -85,21 +82,20 @@ def exists(title: str = None,
@router.get("/{mediaid}", summary="查询媒体详情", response_model=schemas.MediaInfo)
def tmdb_info(mediaid: str, type_name: str,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
根据媒体ID查询themoviedb或豆瓣媒体信息type_name: 电影/电视剧
"""
mtype = MediaType(type_name)
if mediaid.startswith("tmdb:"):
result = TmdbChain(db).tmdb_info(int(mediaid[5:]), mtype)
result = TmdbChain().tmdb_info(int(mediaid[5:]), mtype)
return MediaInfo(tmdb_info=result).to_dict()
elif mediaid.startswith("douban:"):
# 查询豆瓣信息
doubaninfo = DoubanChain(db).douban_info(doubanid=mediaid[7:])
doubaninfo = DoubanChain().douban_info(doubanid=mediaid[7:])
if not doubaninfo:
return schemas.MediaInfo()
result = DoubanChain(db).recognize_by_doubaninfo(doubaninfo)
result = DoubanChain().recognize_by_doubaninfo(doubaninfo)
if result:
# TMDB
return result.media_info.to_dict()

View File

@@ -40,7 +40,7 @@ def search_by_tmdbid(mediaid: str,
elif mediaid.startswith("douban:"):
doubanid = mediaid.replace("douban:", "")
# 识别豆瓣信息
context = DoubanChain(db).recognize_by_doubanid(doubanid)
context = DoubanChain().recognize_by_doubanid(doubanid)
if not context or not context.media_info or not context.media_info.tmdb_id:
return []
torrents = SearchChain(db).search_by_tmdbid(tmdbid=context.media_info.tmdb_id,

View File

@@ -223,7 +223,7 @@ def execute_command(jobid: str,
if not jobid:
return schemas.Response(success=False, message="命令不能为空!")
if jobid == "subscribe_search":
Scheduler().start(jobid, state = 'R')
Scheduler().start(jobid, state='R')
else:
Scheduler().start(jobid)
return schemas.Response(success=True)
return schemas.Response(success=True)

View File

@@ -1,25 +1,22 @@
from typing import List, Any
from fastapi import APIRouter, Depends
from sqlalchemy.orm import Session
from app import schemas
from app.chain.tmdb import TmdbChain
from app.core.context import MediaInfo
from app.core.security import verify_token
from app.db import get_db
from app.schemas.types import MediaType
router = APIRouter()
@router.get("/seasons/{tmdbid}", summary="TMDB所有季", response_model=List[schemas.TmdbSeason])
def tmdb_seasons(tmdbid: int, db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
def tmdb_seasons(tmdbid: int, _: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
根据TMDBID查询themoviedb所有季信息
"""
seasons_info = TmdbChain(db).tmdb_seasons(tmdbid=tmdbid)
seasons_info = TmdbChain().tmdb_seasons(tmdbid=tmdbid)
if not seasons_info:
return []
else:
@@ -29,16 +26,15 @@ def tmdb_seasons(tmdbid: int, db: Session = Depends(get_db),
@router.get("/similar/{tmdbid}/{type_name}", summary="类似电影/电视剧", response_model=List[schemas.MediaInfo])
def tmdb_similar(tmdbid: int,
type_name: str,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
根据TMDBID查询类似电影/电视剧type_name: 电影/电视剧
"""
mediatype = MediaType(type_name)
if mediatype == MediaType.MOVIE:
tmdbinfos = TmdbChain(db).movie_similar(tmdbid=tmdbid)
tmdbinfos = TmdbChain().movie_similar(tmdbid=tmdbid)
elif mediatype == MediaType.TV:
tmdbinfos = TmdbChain(db).tv_similar(tmdbid=tmdbid)
tmdbinfos = TmdbChain().tv_similar(tmdbid=tmdbid)
else:
return []
if not tmdbinfos:
@@ -50,16 +46,15 @@ def tmdb_similar(tmdbid: int,
@router.get("/recommend/{tmdbid}/{type_name}", summary="推荐电影/电视剧", response_model=List[schemas.MediaInfo])
def tmdb_recommend(tmdbid: int,
type_name: str,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
根据TMDBID查询推荐电影/电视剧type_name: 电影/电视剧
"""
mediatype = MediaType(type_name)
if mediatype == MediaType.MOVIE:
tmdbinfos = TmdbChain(db).movie_recommend(tmdbid=tmdbid)
tmdbinfos = TmdbChain().movie_recommend(tmdbid=tmdbid)
elif mediatype == MediaType.TV:
tmdbinfos = TmdbChain(db).tv_recommend(tmdbid=tmdbid)
tmdbinfos = TmdbChain().tv_recommend(tmdbid=tmdbid)
else:
return []
if not tmdbinfos:
@@ -72,16 +67,15 @@ def tmdb_recommend(tmdbid: int,
def tmdb_credits(tmdbid: int,
type_name: str,
page: int = 1,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
根据TMDBID查询演员阵容type_name: 电影/电视剧
"""
mediatype = MediaType(type_name)
if mediatype == MediaType.MOVIE:
tmdbinfos = TmdbChain(db).movie_credits(tmdbid=tmdbid, page=page)
tmdbinfos = TmdbChain().movie_credits(tmdbid=tmdbid, page=page)
elif mediatype == MediaType.TV:
tmdbinfos = TmdbChain(db).tv_credits(tmdbid=tmdbid, page=page)
tmdbinfos = TmdbChain().tv_credits(tmdbid=tmdbid, page=page)
else:
return []
if not tmdbinfos:
@@ -92,12 +86,11 @@ def tmdb_credits(tmdbid: int,
@router.get("/person/{person_id}", summary="人物详情", response_model=schemas.TmdbPerson)
def tmdb_person(person_id: int,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
根据人物ID查询人物详情
"""
tmdbinfo = TmdbChain(db).person_detail(person_id=person_id)
tmdbinfo = TmdbChain().person_detail(person_id=person_id)
if not tmdbinfo:
return schemas.TmdbPerson()
else:
@@ -107,12 +100,11 @@ def tmdb_person(person_id: int,
@router.get("/person/credits/{person_id}", summary="人物参演作品", response_model=List[schemas.MediaInfo])
def tmdb_person_credits(person_id: int,
page: int = 1,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
根据人物ID查询人物参演作品
"""
tmdbinfo = TmdbChain(db).person_credits(person_id=person_id, page=page)
tmdbinfo = TmdbChain().person_credits(person_id=person_id, page=page)
if not tmdbinfo:
return []
else:
@@ -124,16 +116,15 @@ def tmdb_movies(sort_by: str = "popularity.desc",
with_genres: str = "",
with_original_language: str = "",
page: int = 1,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
浏览TMDB电影信息
"""
movies = TmdbChain(db).tmdb_discover(mtype=MediaType.MOVIE,
sort_by=sort_by,
with_genres=with_genres,
with_original_language=with_original_language,
page=page)
movies = TmdbChain().tmdb_discover(mtype=MediaType.MOVIE,
sort_by=sort_by,
with_genres=with_genres,
with_original_language=with_original_language,
page=page)
if not movies:
return []
return [MediaInfo(tmdb_info=movie).to_dict() for movie in movies]
@@ -144,16 +135,15 @@ def tmdb_tvs(sort_by: str = "popularity.desc",
with_genres: str = "",
with_original_language: str = "",
page: int = 1,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
浏览TMDB剧集信息
"""
tvs = TmdbChain(db).tmdb_discover(mtype=MediaType.TV,
sort_by=sort_by,
with_genres=with_genres,
with_original_language=with_original_language,
page=page)
tvs = TmdbChain().tmdb_discover(mtype=MediaType.TV,
sort_by=sort_by,
with_genres=with_genres,
with_original_language=with_original_language,
page=page)
if not tvs:
return []
return [MediaInfo(tmdb_info=tv).to_dict() for tv in tvs]
@@ -161,12 +151,11 @@ def tmdb_tvs(sort_by: str = "popularity.desc",
@router.get("/trending", summary="TMDB流行趋势", response_model=List[schemas.MediaInfo])
def tmdb_trending(page: int = 1,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
浏览TMDB剧集信息
"""
infos = TmdbChain(db).tmdb_trending(page=page)
infos = TmdbChain().tmdb_trending(page=page)
if not infos:
return []
return [MediaInfo(tmdb_info=info).to_dict() for info in infos]
@@ -174,12 +163,11 @@ def tmdb_trending(page: int = 1,
@router.get("/{tmdbid}/{season}", summary="TMDB季所有集", response_model=List[schemas.TmdbEpisode])
def tmdb_season_episodes(tmdbid: int, season: int,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
根据TMDBID查询某季的所有信信息
"""
episodes_info = TmdbChain(db).tmdb_episodes(tmdbid=tmdbid, season=season)
episodes_info = TmdbChain().tmdb_episodes(tmdbid=tmdbid, season=season)
if not episodes_info:
return []
else:

View File

@@ -8,13 +8,15 @@ from app import schemas
from app.chain.transfer import TransferChain
from app.core.security import verify_token
from app.db import get_db
from app.db.models.transferhistory import TransferHistory
from app.schemas import MediaType
router = APIRouter()
@router.post("/manual", summary="手动转移", response_model=schemas.Response)
def manual_transfer(path: str,
def manual_transfer(path: str = None,
logid: int = None,
target: str = None,
tmdbid: int = None,
type_name: str = None,
@@ -28,8 +30,9 @@ def manual_transfer(path: str,
db: Session = Depends(get_db),
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
手动转移,支持自定义剧集识别格式
手动转移,文件或历史记录,支持自定义剧集识别格式
:param path: 转移路径或文件
:param logid: 转移历史记录ID
:param target: 目标路径
:param type_name: 媒体类型、电影/电视剧
:param tmdbid: tmdbid
@@ -43,11 +46,30 @@ def manual_transfer(path: str,
:param db: 数据库
:param _: Token校验
"""
in_path = Path(path)
force = False
if logid:
# 查询历史记录
history = TransferHistory.get(db, logid)
if not history:
return schemas.Response(success=False, message=f"历史记录不存在ID{logid}")
# 强制转移
force = True
# 源路径
in_path = Path(history.src)
# 目的路径
if history.dest:
# 删除旧的已整理文件
TransferChain(db).delete_files(Path(history.dest))
if not target:
target = history.dest
elif path:
in_path = Path(path)
else:
return schemas.Response(success=False, message=f"缺少参数path/logid")
if target:
target = Path(target)
if not target.exists():
return schemas.Response(success=False, message=f"目标路径不存在")
# 类型
mtype = MediaType(type_name) if type_name else None
# 自定义格式
@@ -68,7 +90,8 @@ def manual_transfer(path: str,
season=season,
transfer_type=transfer_type,
epformat=epformat,
min_filesize=min_filesize
min_filesize=min_filesize,
force=force
)
# 失败
if not state:

View File

@@ -301,11 +301,11 @@ def arr_movie_lookup(apikey: str, term: str, db: Session = Depends(get_db)) -> A
)
tmdbid = term.replace("tmdb:", "")
# 查询媒体信息
mediainfo = MediaChain(db).recognize_media(mtype=MediaType.MOVIE, tmdbid=int(tmdbid))
mediainfo = MediaChain().recognize_media(mtype=MediaType.MOVIE, tmdbid=int(tmdbid))
if not mediainfo:
return [RadarrMovie()]
# 查询是否已存在
exists = MediaChain(db).media_exists(mediainfo=mediainfo)
exists = MediaChain().media_exists(mediainfo=mediainfo)
if not exists:
# 文件不存在
hasfile = False
@@ -581,7 +581,7 @@ def arr_series_lookup(apikey: str, term: str, db: Session = Depends(get_db)) ->
# 获取TVDBID
if not term.startswith("tvdb:"):
mediainfo = MediaChain(db).recognize_media(meta=MetaInfo(term),
mediainfo = MediaChain().recognize_media(meta=MetaInfo(term),
mtype=MediaType.TV)
if not mediainfo:
return [SonarrSeries()]
@@ -593,7 +593,7 @@ def arr_series_lookup(apikey: str, term: str, db: Session = Depends(get_db)) ->
tvdbid = int(term.replace("tvdb:", ""))
# 查询TVDB信息
tvdbinfo = MediaChain(db).tvdb_info(tvdbid=tvdbid)
tvdbinfo = MediaChain().tvdb_info(tvdbid=tvdbid)
if not tvdbinfo:
return [SonarrSeries()]
@@ -605,11 +605,11 @@ def arr_series_lookup(apikey: str, term: str, db: Session = Depends(get_db)) ->
# 根据TVDB查询媒体信息
if not mediainfo:
mediainfo = MediaChain(db).recognize_media(meta=MetaInfo(tvdbinfo.get('seriesName')),
mediainfo = MediaChain().recognize_media(meta=MetaInfo(tvdbinfo.get('seriesName')),
mtype=MediaType.TV)
# 查询是否存在
exists = MediaChain(db).media_exists(mediainfo)
exists = MediaChain().media_exists(mediainfo)
if exists:
hasfile = True
else:

View File

@@ -115,16 +115,18 @@ class ChainBase(metaclass=ABCMeta):
"""
return self.run_module("recognize_media", meta=meta, mtype=mtype, tmdbid=tmdbid)
def match_doubaninfo(self, name: str, mtype: str = None,
year: str = None, season: int = None) -> Optional[dict]:
def match_doubaninfo(self, name: str, imdbid: str = None,
mtype: str = None, year: str = None, season: int = None) -> Optional[dict]:
"""
搜索和匹配豆瓣信息
:param name: 标题
:param imdbid: imdbid
:param mtype: 类型
:param year: 年份
:param season: 季
"""
return self.run_module("match_doubaninfo", name=name, mtype=mtype, year=year, season=season)
return self.run_module("match_doubaninfo", name=name, imdbid=imdbid,
mtype=mtype, year=year, season=season)
def obtain_images(self, mediainfo: MediaInfo) -> Optional[MediaInfo]:
"""
@@ -396,14 +398,15 @@ class ChainBase(metaclass=ABCMeta):
"""
return self.run_module("post_torrents_message", message=message, torrents=torrents)
def scrape_metadata(self, path: Path, mediainfo: MediaInfo) -> None:
def scrape_metadata(self, path: Path, mediainfo: MediaInfo, transfer_type: str) -> None:
"""
刮削元数据
:param path: 媒体文件路径
:param mediainfo: 识别的媒体信息
:param transfer_type: 转移模式
:return: 成功或失败
"""
self.run_module("scrape_metadata", path=path, mediainfo=mediainfo)
self.run_module("scrape_metadata", path=path, mediainfo=mediainfo, transfer_type=transfer_type)
def register_commands(self, commands: Dict[str, dict]) -> None:
"""

View File

@@ -6,11 +6,12 @@ from app.core.context import MediaInfo
from app.core.metainfo import MetaInfo
from app.log import logger
from app.schemas import MediaType
from app.utils.singleton import Singleton
class DoubanChain(ChainBase):
class DoubanChain(ChainBase, metaclass=Singleton):
"""
豆瓣处理链
豆瓣处理链,单例运行
"""
def recognize_by_doubanid(self, doubanid: str) -> Optional[Context]:
@@ -29,18 +30,32 @@ class DoubanChain(ChainBase):
"""
根据豆瓣信息识别媒体信息
"""
# 使用原标题匹配
meta = MetaInfo(title=doubaninfo.get("original_title") or doubaninfo.get("title"))
# 优先使用原标题匹配
season_meta = None
if doubaninfo.get("original_title"):
meta = MetaInfo(title=doubaninfo.get("original_title"))
season_meta = MetaInfo(title=doubaninfo.get("title"))
# 合并季
meta.begin_season = season_meta.begin_season
else:
meta = MetaInfo(title=doubaninfo.get("title"))
# 年份
if doubaninfo.get("year"):
meta.year = doubaninfo.get("year")
# 处理类型
if isinstance(doubaninfo.get('media_type'), MediaType):
meta.type = doubaninfo.get('media_type')
else:
meta.type = MediaType.MOVIE if doubaninfo.get("type") == "movie" else MediaType.TV
# 识别媒体信息
mediainfo: MediaInfo = self.recognize_media(meta=meta, mtype=meta.type)
# 使用原标题识别媒体信息
mediainfo = self.recognize_media(meta=meta, mtype=meta.type)
if not mediainfo:
logger.warn(f'{meta.name} 未识别到TMDB媒体信息')
return Context(meta_info=meta, media_info=MediaInfo(douban_info=doubaninfo))
if season_meta and season_meta.name != meta.name:
# 使用主标题识别媒体信息
mediainfo = self.recognize_media(meta=season_meta, mtype=season_meta.type)
if not mediainfo:
logger.warn(f'{meta.name} 未识别到TMDB媒体信息')
return Context(meta_info=meta, media_info=MediaInfo(douban_info=doubaninfo))
logger.info(f'识别到媒体信息:{mediainfo.type.value} {mediainfo.title_year} {meta.season}')
mediainfo.set_douban_info(doubaninfo)
return Context(meta_info=meta, media_info=mediainfo)
@@ -84,3 +99,9 @@ class DoubanChain(ChainBase):
"""
return self.run_module("douban_discover", mtype=mtype, sort=sort, tags=tags,
page=page, count=count)
def tv_animation(self, page: int = 1, count: int = 30) -> List[dict]:
"""
获取动画剧集
"""
return self.run_module("tv_animation", page=page, count=count)

View File

@@ -786,6 +786,7 @@ class DownloadChain(ChainBase):
for torrent in torrents:
history = self.downloadhis.get_by_hash(torrent.hash)
if history:
# 媒体信息
torrent.media = {
"tmdbid": history.tmdbid,
"type": history.type,
@@ -794,6 +795,8 @@ class DownloadChain(ChainBase):
"episode": history.episodes,
"image": history.image,
}
# 下载用户
torrent.userid = history.userid
ret_torrents.append(torrent)
return ret_torrents

View File

@@ -1,18 +1,31 @@
import copy
import time
from pathlib import Path
from threading import Lock
from typing import Optional, List, Tuple
from app.chain import ChainBase
from app.core.context import Context, MediaInfo
from app.core.event import eventmanager, Event
from app.core.meta import MetaBase
from app.core.metainfo import MetaInfo, MetaInfoPath
from app.log import logger
from app.schemas.types import EventType, MediaType
from app.utils.singleton import Singleton
from app.utils.string import StringUtils
class MediaChain(ChainBase):
recognize_lock = Lock()
class MediaChain(ChainBase, metaclass=Singleton):
"""
媒体信息处理链
媒体信息处理链,单例运行
"""
# 临时识别标题
recognize_title: Optional[str] = None
# 临时识别结果 {title, name, year, season, episode}
recognize_temp: Optional[dict] = None
def recognize_by_title(self, title: str, subtitle: str = None) -> Optional[Context]:
"""
@@ -24,14 +37,104 @@ class MediaChain(ChainBase):
# 识别媒体信息
mediainfo: MediaInfo = self.recognize_media(meta=metainfo)
if not mediainfo:
logger.warn(f'{title} 未识别到媒体信息')
return Context(meta_info=metainfo)
# 偿试使用辅助识别,如果有注册响应事件的话
if eventmanager.check(EventType.NameRecognize):
logger.info(f'请求辅助识别,标题:{title} ...')
mediainfo = self.recognize_help(title=title, org_meta=metainfo)
if not mediainfo:
logger.warn(f'{title} 未识别到媒体信息')
return Context(meta_info=metainfo)
# 识别成功
logger.info(f'{title} 识别到媒体信息:{mediainfo.type.value} {mediainfo.title_year}')
# 更新媒体图片
self.obtain_images(mediainfo=mediainfo)
# 返回上下文
return Context(meta_info=metainfo, media_info=mediainfo)
def recognize_help(self, title: str, org_meta: MetaBase) -> Optional[MediaInfo]:
"""
请求辅助识别,返回媒体信息
:param title: 标题
:param org_meta: 原始元数据
"""
with recognize_lock:
self.recognize_temp = None
self.recognize_title = title
# 发送请求事件
eventmanager.send_event(
EventType.NameRecognize,
{
'title': title,
}
)
# 每0.5秒循环一次等待结果直到10秒后超时
for i in range(10):
if self.recognize_temp is not None:
break
time.sleep(0.5)
# 加锁
with recognize_lock:
mediainfo = None
if not self.recognize_temp or self.recognize_title != title:
# 没有识别结果或者识别标题已改变
return None
# 有识别结果
meta_dict = copy.deepcopy(self.recognize_temp)
logger.info(f'获取到辅助识别结果:{meta_dict}')
if meta_dict.get("name") == org_meta.name and meta_dict.get("year") == org_meta.year:
logger.info(f'辅助识别结果与原始识别结果一致')
else:
logger.info(f'辅助识别结果与原始识别结果不一致,重新匹配媒体信息 ...')
org_meta.name = meta_dict.get("name")
org_meta.year = meta_dict.get("year")
org_meta.begin_season = meta_dict.get("season")
org_meta.begin_episode = meta_dict.get("episode")
if org_meta.begin_season or org_meta.begin_episode:
org_meta.type = MediaType.TV
# 重新识别
mediainfo = self.recognize_media(meta=org_meta)
return mediainfo
@eventmanager.register(EventType.NameRecognizeResult)
def recognize_result(self, event: Event):
"""
监控识别结果事件,获取辅助识别结果,结果格式:{title, name, year, season, episode}
"""
if not event:
return
event_data = event.event_data or {}
# 加锁
with recognize_lock:
# 不是原标题的结果不要
if event_data.get("title") != self.recognize_title:
return
# 标志收到返回
self.recognize_temp = {}
# 处理数据格式
file_title, file_year, season_number, episode_number = None, None, None, None
if event_data.get("name"):
file_title = str(event_data["name"]).split("/")[0].strip().replace(".", " ")
if event_data.get("year"):
file_year = str(event_data["year"]).split("/")[0].strip()
if event_data.get("season") and str(event_data["season"]).isdigit():
season_number = int(event_data["season"])
if event_data.get("episode") and str(event_data["episode"]).isdigit():
episode_number = int(event_data["episode"])
if not file_title:
return
if file_title == 'Unknown':
return
if not str(file_year).isdigit():
file_year = None
# 结果赋值
self.recognize_temp = {
"name": file_title,
"year": file_year,
"season": season_number,
"episode": episode_number
}
def recognize_by_path(self, path: str) -> Optional[Context]:
"""
根据文件路径识别媒体信息
@@ -43,8 +146,13 @@ class MediaChain(ChainBase):
# 识别媒体信息
mediainfo = self.recognize_media(meta=file_meta)
if not mediainfo:
logger.warn(f'{path} 未识别到媒体信息')
return Context(meta_info=file_meta)
# 偿试使用辅助识别,如果有注册响应事件的话
if eventmanager.check(EventType.NameRecognize):
logger.info(f'请求辅助识别,标题:{file_path.name} ...')
mediainfo = self.recognize_help(title=path, org_meta=file_meta)
if not mediainfo:
logger.warn(f'{path} 未识别到媒体信息')
return Context(meta_info=file_meta)
logger.info(f'{path} 识别到媒体信息:{mediainfo.type.value} {mediainfo.title_year}')
# 更新媒体图片
self.obtain_images(mediainfo=mediainfo)

View File

@@ -32,7 +32,7 @@ class MessageChain(ChainBase):
self.downloadchain = DownloadChain(self._db)
self.subscribechain = SubscribeChain(self._db)
self.searchchain = SearchChain(self._db)
self.medtachain = MediaChain(self._db)
self.medtachain = MediaChain()
self.torrent = TorrentHelper()
self.eventmanager = EventManager()
self.torrenthelper = TorrentHelper()
@@ -111,7 +111,8 @@ class MessageChain(ChainBase):
f"{sea} 季缺失 {StringUtils.str_series(no_exist.episodes) if no_exist.episodes else no_exist.total_episode}"
for sea, no_exist in no_exists.get(mediainfo.tmdb_id).items()]
self.post_message(Notification(channel=channel,
title=f"{mediainfo.title_year}\n" + "\n".join(messages)))
title=f"{mediainfo.title_year}\n" + "\n".join(messages),
userid=userid))
# 搜索种子,过滤掉不需要的剧集,以便选择
logger.info(f"{mediainfo.title_year} 媒体库中不存在,开始搜索 ...")
self.post_message(

View File

@@ -141,7 +141,7 @@ class SearchChain(ChainBase):
if not torrents:
logger.warn(f'{keyword or mediainfo.title} 没有符合优先级规则的资源')
return []
# 使用默认过滤规则再次过滤
# 使用过滤规则再次过滤
torrents = self.filter_torrents_by_rule(torrents=torrents,
filter_rule=filter_rule)
if not torrents:
@@ -333,15 +333,21 @@ class SearchChain(ChainBase):
:param filter_rule: 过滤规则
"""
# 取默认过滤规则
if not filter_rule:
filter_rule = self.systemconfig.get(SystemConfigKey.DefaultFilterRules)
# 没有则取搜索默认过滤规则
filter_rule = self.systemconfig.get(SystemConfigKey.DefaultSearchFilterRules)
if not filter_rule:
return torrents
# 包含
include = filter_rule.get("include")
# 排除
exclude = filter_rule.get("exclude")
# 质量
quality = filter_rule.get("quality")
# 分辨率
resolution = filter_rule.get("resolution")
# 特效
effect = filter_rule.get("effect")
def __filter_torrent(t: TorrentInfo) -> bool:
"""
@@ -359,6 +365,24 @@ class SearchChain(ChainBase):
f"{t.title} {t.description}", re.I):
logger.info(f"{t.title} 匹配排除规则 {exclude}")
return False
# 质量
if quality:
if not re.search(r"%s" % quality, t.title, re.I):
logger.info(f"{t.title} 不匹配质量规则 {quality}")
return False
# 分辨率
if resolution:
if not re.search(r"%s" % resolution, t.title, re.I):
logger.info(f"{t.title} 不匹配分辨率规则 {resolution}")
return False
# 特效
if effect:
if not re.search(r"%s" % effect, t.title, re.I):
logger.info(f"{t.title} 不匹配特效规则 {effect}")
return False
return True
# 使用默认过滤规则再次过滤

View File

@@ -6,6 +6,7 @@ from typing import Dict, List, Optional, Union, Tuple
from sqlalchemy.orm import Session
from app.chain import ChainBase
from app.chain.douban import DoubanChain
from app.chain.download import DownloadChain
from app.chain.search import SearchChain
from app.chain.torrents import TorrentsChain
@@ -50,18 +51,28 @@ class SubscribeChain(ChainBase):
识别媒体信息并添加订阅
"""
logger.info(f'开始添加订阅,标题:{title} ...')
# 识别元数据
metainfo = MetaInfo(title)
if year:
metainfo.year = year
if mtype:
metainfo.type = mtype
if season:
metainfo.type = MediaType.TV
metainfo.begin_season = season
# 识别媒体信息
mediainfo: MediaInfo = self.recognize_media(meta=metainfo, mtype=mtype, tmdbid=tmdbid)
if not mediainfo:
metainfo = None
mediainfo = None
if not tmdbid and doubanid:
# 将豆瓣信息转换为TMDB信息
context = DoubanChain().recognize_by_doubanid(doubanid)
if context:
metainfo = context.meta_info
mediainfo = context.media_info
else:
# 识别元数据
metainfo = MetaInfo(title)
if year:
metainfo.year = year
if mtype:
metainfo.type = mtype
if season:
metainfo.type = MediaType.TV
metainfo.begin_season = season
# 识别媒体信息
mediainfo = self.recognize_media(meta=metainfo, mtype=mtype, tmdbid=tmdbid)
# 识别失败
if not mediainfo or not metainfo or not mediainfo.tmdb_id:
logger.warn(f'未识别到媒体信息,标题:{title}tmdbid{tmdbid}')
return None, "未识别到媒体信息"
# 更新媒体图片
@@ -74,8 +85,8 @@ class SubscribeChain(ChainBase):
if not kwargs.get('total_episode'):
if not mediainfo.seasons:
# 补充媒体信息
mediainfo: MediaInfo = self.recognize_media(mtype=mediainfo.type,
tmdbid=mediainfo.tmdb_id)
mediainfo = self.recognize_media(mtype=mediainfo.type,
tmdbid=mediainfo.tmdb_id)
if not mediainfo:
logger.error(f"媒体信息识别失败!")
return None, "媒体信息识别失败"
@@ -85,7 +96,7 @@ class SubscribeChain(ChainBase):
total_episode = len(mediainfo.seasons.get(season) or [])
if not total_episode:
logger.error(f'未获取到总集数,标题:{title}tmdbid{tmdbid}')
return None, "未获取到总集数"
return None, f"未获取到{season} 季的总集数"
kwargs.update({
'total_episode': total_episode
})
@@ -176,66 +187,66 @@ class SubscribeChain(ChainBase):
totals = {
subscribe.season: subscribe.total_episode
}
# 查询缺失的媒体信息
# 查询媒体库缺失的媒体信息
exist_flag, no_exists = self.downloadchain.get_no_exists_info(
meta=meta,
mediainfo=mediainfo,
totals=totals
)
if exist_flag:
logger.info(f'{mediainfo.title_year} 媒体库中已存在,完成订阅')
self.subscribeoper.delete(subscribe.id)
# 发送通知
self.post_message(Notification(mtype=NotificationType.Subscribe,
title=f'{mediainfo.title_year} {meta.season} 已完成订阅',
image=mediainfo.get_message_image()))
continue
# 电视剧订阅
if meta.type == MediaType.TV:
# 使用订阅的总集数和开始集数替换no_exists
no_exists = self.__get_subscribe_no_exits(
no_exists=no_exists,
tmdb_id=mediainfo.tmdb_id,
begin_season=meta.begin_season,
total_episode=subscribe.total_episode,
start_episode=subscribe.start_episode,
)
# 打印缺失集信息
if no_exists and no_exists.get(subscribe.tmdbid):
no_exists_info = no_exists.get(subscribe.tmdbid).get(subscribe.season)
if no_exists_info:
logger.info(f'订阅 {mediainfo.title_year} {meta.season} 缺失集:{no_exists_info.episodes}')
else:
# 洗版状态
exist_flag = False
if meta.type == MediaType.TV:
no_exists = {
subscribe.season: NotExistMediaInfo(
season=subscribe.season,
episodes=[],
total_episode=subscribe.total_episode,
start_episode=subscribe.start_episode or 1)
subscribe.tmdbid: {
subscribe.season: NotExistMediaInfo(
season=subscribe.season,
episodes=[],
total_episode=subscribe.total_episode,
start_episode=subscribe.start_episode or 1)
}
}
else:
no_exists = {}
# 已存在
if exist_flag:
logger.info(f'{mediainfo.title_year} 媒体库中已存在')
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta, mediainfo=mediainfo)
continue
# 电视剧订阅处理缺失集
if meta.type == MediaType.TV:
# 使用订阅的总集数和开始集数替换no_exists
no_exists = self.__get_subscribe_no_exits(
no_exists=no_exists,
tmdb_id=mediainfo.tmdb_id,
begin_season=meta.begin_season,
total_episode=subscribe.total_episode,
start_episode=subscribe.start_episode,
)
# 打印缺失集信息
if no_exists and no_exists.get(subscribe.tmdbid):
no_exists_info = no_exists.get(subscribe.tmdbid).get(subscribe.season)
if no_exists_info:
logger.info(f'订阅 {mediainfo.title_year} {meta.season} 缺失集:{no_exists_info.episodes}')
# 站点范围
if subscribe.sites:
sites = json.loads(subscribe.sites)
else:
sites = None
# 优先级过滤规则
if subscribe.best_version:
priority_rule = self.systemconfig.get(SystemConfigKey.BestVersionFilterRules)
else:
priority_rule = self.systemconfig.get(SystemConfigKey.SubscribeFilterRules)
# 默认过滤规则
if subscribe.include or subscribe.exclude:
filter_rule = {
"include": subscribe.include,
"exclude": subscribe.exclude
}
else:
filter_rule = self.systemconfig.get(SystemConfigKey.DefaultFilterRules)
# 过滤规则
filter_rule = self.get_filter_rule(subscribe)
# 搜索,同时电视剧会过滤掉不需要的剧集
contexts = self.searchchain.process(mediainfo=mediainfo,
keyword=subscribe.keyword,
@@ -247,8 +258,10 @@ class SubscribeChain(ChainBase):
logger.warn(f'订阅 {subscribe.keyword or subscribe.name} 未搜索到资源')
if meta.type == MediaType.TV:
# 未搜索到资源,但本地缺失可能有变化,更新订阅剩余集数
self.__update_lack_episodes(lefts=no_exists, subscribe=subscribe, mediainfo=mediainfo)
self.__update_lack_episodes(lefts=no_exists, subscribe=subscribe,
meta=meta, mediainfo=mediainfo)
continue
# 过滤
matched_contexts = []
for context in contexts:
@@ -278,8 +291,10 @@ class SubscribeChain(ChainBase):
logger.warn(f'订阅 {subscribe.name} 没有符合过滤条件的资源')
# 非洗版未搜索到资源,但本地缺失可能有变化,更新订阅剩余集数
if meta.type == MediaType.TV and not subscribe.best_version:
self.__update_lack_episodes(lefts=no_exists, subscribe=subscribe, mediainfo=mediainfo)
self.__update_lack_episodes(lefts=no_exists, subscribe=subscribe,
meta=meta, mediainfo=mediainfo)
continue
# 自动下载
downloads, lefts = self.downloadchain.batch_download(contexts=matched_contexts,
no_exists=no_exists)
@@ -299,8 +314,9 @@ class SubscribeChain(ChainBase):
if meta.type == MediaType.TV and not subscribe.best_version:
# 更新订阅剩余集数和时间
update_date = True if downloads else False
self.__update_lack_episodes(lefts=lefts, subscribe=subscribe,
self.__update_lack_episodes(lefts=lefts, subscribe=subscribe, meta=meta,
mediainfo=mediainfo, update_date=update_date)
# 手动触发时发送系统消息
if manual:
if sid:
@@ -309,19 +325,19 @@ class SubscribeChain(ChainBase):
self.message.put('所有订阅搜索完成!')
def finish_subscribe_or_not(self, subscribe: Subscribe, meta: MetaInfo,
mediainfo: MediaInfo, downloads: List[Context]):
mediainfo: MediaInfo, downloads: List[Context] = None):
"""
判断是否应完成订阅
"""
if not subscribe.best_version:
# 全部下载完成
logger.info(f'{mediainfo.title_year} 下载完成,完成订阅')
logger.info(f'{mediainfo.title_year} 完成订阅')
self.subscribeoper.delete(subscribe.id)
# 发送通知
self.post_message(Notification(mtype=NotificationType.Subscribe,
title=f'{mediainfo.title_year} {meta.season} 已完成订阅',
image=mediainfo.get_message_image()))
else:
elif downloads:
# 当前下载资源的优先级
priority = max([item.torrent_info.pri_order for item in downloads])
if priority == 100:
@@ -375,6 +391,67 @@ class SubscribeChain(ChainBase):
return ret_sites
def get_filter_rule(self, subscribe: Subscribe):
"""
获取订阅过滤规则,没有则返回默认规则
"""
# 默认过滤规则
if (subscribe.include
or subscribe.exclude
or subscribe.quality
or subscribe.resolution
or subscribe.effect):
return {
"include": subscribe.include,
"exclude": subscribe.exclude,
"quality": subscribe.quality,
"resolution": subscribe.resolution,
"effect": subscribe.effect,
}
# 订阅默认过滤规则
return self.systemconfig.get(SystemConfigKey.DefaultFilterRules) or {}
@staticmethod
def check_filter_rule(torrent_info: TorrentInfo, filter_rule: Dict[str, str]) -> bool:
"""
检查种子是否匹配订阅过滤规则
"""
if not filter_rule:
return True
# 包含
include = filter_rule.get("include")
if include:
if not re.search(r"%s" % include,
f"{torrent_info.title} {torrent_info.description}", re.I):
logger.info(f"{torrent_info.title} 不匹配包含规则 {include}")
return False
# 排除
exclude = filter_rule.get("exclude")
if exclude:
if re.search(r"%s" % exclude,
f"{torrent_info.title} {torrent_info.description}", re.I):
logger.info(f"{torrent_info.title} 匹配排除规则 {exclude}")
return False
# 质量
quality = filter_rule.get("quality")
if quality:
if not re.search(r"%s" % quality, torrent_info.title, re.I):
logger.info(f"{torrent_info.title} 不匹配质量规则 {quality}")
return False
# 分辨率
resolution = filter_rule.get("resolution")
if resolution:
if not re.search(r"%s" % resolution, torrent_info.title, re.I):
logger.info(f"{torrent_info.title} 不匹配分辨率规则 {resolution}")
return False
# 特效
effect = filter_rule.get("effect")
if effect:
if not re.search(r"%s" % effect, torrent_info.title, re.I):
logger.info(f"{torrent_info.title} 不匹配特效规则 {effect}")
return False
return True
def match(self, torrents: Dict[str, List[Context]]):
"""
从缓存中匹配订阅,并自动下载
@@ -411,46 +488,48 @@ class SubscribeChain(ChainBase):
mediainfo=mediainfo,
totals=totals
)
if exist_flag:
logger.info(f'{mediainfo.title_year} 媒体库中已存在,完成订阅')
self.subscribeoper.delete(subscribe.id)
# 发送通知
self.post_message(Notification(mtype=NotificationType.Subscribe,
title=f'{mediainfo.title_year} {meta.season} 已完成订阅',
image=mediainfo.get_message_image()))
continue
# 电视剧订阅
if meta.type == MediaType.TV:
# 使用订阅的总集数和开始集数替换no_exists
no_exists = self.__get_subscribe_no_exits(
no_exists=no_exists,
tmdb_id=mediainfo.tmdb_id,
begin_season=meta.begin_season,
total_episode=subscribe.total_episode,
start_episode=subscribe.start_episode,
)
# 打印缺失集信息
if no_exists and no_exists.get(subscribe.tmdbid):
no_exists_info = no_exists.get(subscribe.tmdbid).get(subscribe.season)
if no_exists_info:
logger.info(f'订阅 {mediainfo.title_year} {meta.season} 缺失集:{no_exists_info.episodes}')
else:
# 洗版
exist_flag = False
if meta.type == MediaType.TV:
no_exists = {
subscribe.season: NotExistMediaInfo(
season=subscribe.season,
episodes=[],
total_episode=subscribe.total_episode,
start_episode=subscribe.start_episode or 1)
subscribe.tmdbid: {
subscribe.season: NotExistMediaInfo(
season=subscribe.season,
episodes=[],
total_episode=subscribe.total_episode,
start_episode=subscribe.start_episode or 1)
}
}
else:
no_exists = {}
# 默认过滤规则
default_filter = self.systemconfig.get(SystemConfigKey.DefaultFilterRules) or {}
include = subscribe.include or default_filter.get("include")
exclude = subscribe.exclude or default_filter.get("exclude")
# 已存在
if exist_flag:
logger.info(f'{mediainfo.title_year} 媒体库中已存在')
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta, mediainfo=mediainfo)
continue
# 电视剧订阅
if meta.type == MediaType.TV:
# 使用订阅的总集数和开始集数替换no_exists
no_exists = self.__get_subscribe_no_exits(
no_exists=no_exists,
tmdb_id=mediainfo.tmdb_id,
begin_season=meta.begin_season,
total_episode=subscribe.total_episode,
start_episode=subscribe.start_episode,
)
# 打印缺失集信息
if no_exists and no_exists.get(subscribe.tmdbid):
no_exists_info = no_exists.get(subscribe.tmdbid).get(subscribe.season)
if no_exists_info:
logger.info(f'订阅 {mediainfo.title_year} {meta.season} 缺失集:{no_exists_info.episodes}')
# 过滤规则
filter_rule = self.get_filter_rule(subscribe)
# 遍历缓存种子
_match_context = []
for domain, contexts in torrents.items():
@@ -465,11 +544,11 @@ class SubscribeChain(ChainBase):
continue
# 优先级过滤规则
if subscribe.best_version:
filter_rule = self.systemconfig.get(SystemConfigKey.BestVersionFilterRules)
priority_rule = self.systemconfig.get(SystemConfigKey.BestVersionFilterRules)
else:
filter_rule = self.systemconfig.get(SystemConfigKey.SubscribeFilterRules)
priority_rule = self.systemconfig.get(SystemConfigKey.SubscribeFilterRules)
result: List[TorrentInfo] = self.filter_torrents(
rule_string=filter_rule,
rule_string=priority_rule,
torrent_list=[torrent_info],
mediainfo=torrent_mediainfo)
if result is not None and not result:
@@ -510,7 +589,8 @@ class SubscribeChain(ChainBase):
set(torrent_meta.episode_list)
):
logger.info(
f'{torrent_info.title} 对应剧集 {torrent_meta.episode_list} 未包含缺失的剧集')
f'{torrent_info.title} 对应剧集 {torrent_meta.episode_list} 未包含缺失的剧集'
)
continue
# 过滤掉已经下载的集数
if self.__check_subscribe_note(subscribe, torrent_meta.episode_list):
@@ -522,21 +602,16 @@ class SubscribeChain(ChainBase):
if torrent_meta.episode_list:
logger.info(f'{subscribe.name} 正在洗版,{torrent_info.title} 不是整季')
continue
# 包含
if include:
if not re.search(r"%s" % include,
f"{torrent_info.title} {torrent_info.description}", re.I):
logger.info(f"{torrent_info.title} 不匹配包含规则 {include}")
continue
# 排除
if exclude:
if re.search(r"%s" % exclude,
f"{torrent_info.title} {torrent_info.description}", re.I):
logger.info(f"{torrent_info.title} 匹配排除规则 {exclude}")
continue
# 过滤规则
if not self.check_filter_rule(torrent_info=torrent_info,
filter_rule=filter_rule):
continue
# 匹配成功
logger.info(f'{mediainfo.title_year} 匹配成功:{torrent_info.title}')
_match_context.append(context)
# 开始下载
logger.info(f'{mediainfo.title_year} 匹配完成,共匹配到{len(_match_context)}个资源')
if _match_context:
@@ -554,12 +629,13 @@ class SubscribeChain(ChainBase):
if meta.type == MediaType.TV and not subscribe.best_version:
update_date = True if downloads else False
# 未完成下载,计算剩余集数
self.__update_lack_episodes(lefts=lefts, subscribe=subscribe,
self.__update_lack_episodes(lefts=lefts, subscribe=subscribe, meta=meta,
mediainfo=mediainfo, update_date=update_date)
else:
if meta.type == MediaType.TV:
# 未搜索到资源,但本地缺失可能有变化,更新订阅剩余集数
self.__update_lack_episodes(lefts=no_exists, subscribe=subscribe, mediainfo=mediainfo)
self.__update_lack_episodes(lefts=no_exists, subscribe=subscribe,
meta=meta, mediainfo=mediainfo)
def check(self):
"""
@@ -651,31 +727,36 @@ class SubscribeChain(ChainBase):
def __update_lack_episodes(self, lefts: Dict[int, Dict[int, NotExistMediaInfo]],
subscribe: Subscribe,
meta: MetaBase,
mediainfo: MediaInfo,
update_date: bool = False):
"""
更新订阅剩余集数
"""
left_seasons = lefts.get(mediainfo.tmdb_id) or {}
for season_info in left_seasons.values():
season = season_info.season
if season == subscribe.season:
left_episodes = season_info.episodes
if not left_episodes:
lack_episode = season_info.total_episode
else:
lack_episode = len(left_episodes)
logger.info(f'{mediainfo.title_year}{season} 更新缺失集数为{lack_episode} ...')
if update_date:
# 同时更新最后时间
self.subscribeoper.update(subscribe.id, {
"lack_episode": lack_episode,
"last_update": datetime.now().strftime('%Y-%m-%d %H:%M:%S')
})
else:
self.subscribeoper.update(subscribe.id, {
"lack_episode": lack_episode
})
left_seasons = lefts.get(mediainfo.tmdb_id)
if left_seasons:
for season_info in left_seasons.values():
season = season_info.season
if season == subscribe.season:
left_episodes = season_info.episodes
if not left_episodes:
lack_episode = season_info.total_episode
else:
lack_episode = len(left_episodes)
logger.info(f'{mediainfo.title_year}{season} 更新缺失集数为{lack_episode} ...')
if update_date:
# 同时更新最后时间
self.subscribeoper.update(subscribe.id, {
"lack_episode": lack_episode,
"last_update": datetime.now().strftime('%Y-%m-%d %H:%M:%S')
})
else:
self.subscribeoper.update(subscribe.id, {
"lack_episode": lack_episode
})
else:
# 判断是否应完成订阅
self.finish_subscribe_or_not(subscribe=subscribe, meta=meta, mediainfo=mediainfo)
def remote_list(self, channel: MessageChannel, userid: Union[str, int] = None):
"""

View File

@@ -2,12 +2,14 @@ from typing import Union
from app.chain import ChainBase
from app.schemas import Notification, MessageChannel
from app.utils.system import SystemUtils
class SystemChain(ChainBase):
"""
系统级处理链
"""
def remote_clear_cache(self, channel: MessageChannel, userid: Union[int, str]):
"""
清理系统缓存
@@ -15,3 +17,11 @@ class SystemChain(ChainBase):
self.clear_cache()
self.post_message(Notification(channel=channel,
title=f"缓存清理完成!", userid=userid))
def restart(self, channel: MessageChannel, userid: Union[int, str]):
"""
重启系统
"""
self.post_message(Notification(channel=channel,
title=f"系统正在重启,请耐心等候!", userid=userid))
SystemUtils.restart()

View File

@@ -12,7 +12,7 @@ from app.utils.singleton import Singleton
class TmdbChain(ChainBase, metaclass=Singleton):
"""
TheMovieDB处理链
TheMovieDB处理链,单例运行
"""
def tmdb_discover(self, mtype: MediaType, sort_by: str, with_genres: str,

View File

@@ -1,4 +1,3 @@
import glob
import re
import shutil
import threading
@@ -41,8 +40,8 @@ class TransferChain(ChainBase):
self.downloadhis = DownloadHistoryOper(self._db)
self.transferhis = TransferHistoryOper(self._db)
self.progress = ProgressHelper()
self.mediachain = MediaChain(self._db)
self.tmdbchain = TmdbChain(self._db)
self.mediachain = MediaChain()
self.tmdbchain = TmdbChain()
self.systemconfig = SystemConfigOper()
def process(self) -> bool:
@@ -237,7 +236,7 @@ class TransferChain(ChainBase):
# 自定义识别
if formaterHandler:
# 开始集、结束集、PART
begin_ep, end_ep, part = formaterHandler.split_episode(file_path.stem)
begin_ep, end_ep, part = formaterHandler.split_episode(file_path.name)
if begin_ep is not None:
file_meta.begin_episode = begin_ep
file_meta.part = part
@@ -358,7 +357,9 @@ class TransferChain(ChainBase):
)
# 刮削单个文件
if settings.SCRAP_METADATA:
self.scrape_metadata(path=transferinfo.target_path, mediainfo=file_mediainfo)
self.scrape_metadata(path=transferinfo.target_path,
mediainfo=file_mediainfo,
transfer_type=transfer_type)
# 更新进度
processed_num += 1
self.progress.update(value=processed_num / total_num * 100,
@@ -489,7 +490,7 @@ class TransferChain(ChainBase):
def re_transfer(self, logid: int,
mtype: MediaType = None, tmdbid: int = None) -> Tuple[bool, str]:
"""
根据历史记录,重新识别转移,只处理对应的src目录
根据历史记录,重新识别转移,只支持简单条件
:param logid: 历史记录ID
:param mtype: 媒体类型
:param tmdbid: TMDB ID
@@ -499,7 +500,7 @@ class TransferChain(ChainBase):
if not history:
logger.error(f"历史记录不存在ID{logid}")
return False, "历史记录不存在"
# 没有下载记录,按源目录路径重新转移
# 按源目录路径重新转移
src_path = Path(history.src)
if not src_path.exists():
return False, f"源目录不存在:{src_path}"
@@ -539,9 +540,10 @@ class TransferChain(ChainBase):
season: int = None,
transfer_type: str = None,
epformat: EpisodeFormat = None,
min_filesize: int = 0) -> Tuple[bool, Union[str, list]]:
min_filesize: int = 0,
force: bool = False) -> Tuple[bool, Union[str, list]]:
"""
手动转移
手动转移,支持复杂条件,带进度显示
:param in_path: 源文件路径
:param target: 目标路径
:param tmdbid: TMDB ID
@@ -550,6 +552,7 @@ class TransferChain(ChainBase):
:param transfer_type: 转移类型
:param epformat: 剧集格式
:param min_filesize: 最小文件大小(MB)
:param force: 是否强制转移
"""
logger.info(f"手动转移:{in_path} ...")
@@ -571,7 +574,8 @@ class TransferChain(ChainBase):
target=target,
season=season,
epformat=epformat,
min_filesize=min_filesize
min_filesize=min_filesize,
force=force
)
if not state:
return False, errmsg
@@ -586,7 +590,8 @@ class TransferChain(ChainBase):
transfer_type=transfer_type,
season=season,
epformat=epformat,
min_filesize=min_filesize)
min_filesize=min_filesize,
force=force)
return state, errmsg
def send_transfer_message(self, meta: MetaBase, mediainfo: MediaInfo,

View File

@@ -1,3 +1,4 @@
import importlib
import traceback
from threading import Thread, Event
from typing import Any, Union, Dict
@@ -18,7 +19,6 @@ from app.schemas import Notification
from app.schemas.types import EventType, MessageChannel
from app.utils.object import ObjectUtils
from app.utils.singleton import Singleton
from app.utils.system import SystemUtils
class CommandChian(ChainBase):
@@ -138,7 +138,7 @@ class Command(metaclass=Singleton):
"data": {}
},
"/restart": {
"func": SystemUtils.restart,
"func": SystemChain(self._db).restart,
"description": "重启系统",
"category": "管理",
"data": {}
@@ -175,10 +175,24 @@ class Command(metaclass=Singleton):
for handler in handlers:
try:
names = handler.__qualname__.split(".")
if names[0] == "Command":
self.command_event(event)
[class_name, method_name] = names
if class_name in self.pluginmanager.get_plugin_ids():
# 插件事件
self.pluginmanager.run_plugin_method(class_name, method_name, event)
else:
self.pluginmanager.run_plugin_method(names[0], names[1], event)
# 检查全局变量中是否存在
if class_name not in globals():
# 导入模块除了插件和Command本身只有chain能响应事件
module = importlib.import_module(
f"app.chain.{class_name[:-5].lower()}"
)
class_obj = getattr(module, class_name)()
else:
# 通过类名创建类实例
class_obj = globals()[class_name]()
# 检查类是否存在并调用方法
if hasattr(class_obj, method_name):
getattr(class_obj, method_name)(event)
except Exception as e:
logger.error(f"事件处理出错:{str(e)} - {traceback.format_exc()}")

View File

@@ -1,4 +1,3 @@
import os
import secrets
import sys
from pathlib import Path
@@ -26,6 +25,8 @@ class Settings(BaseSettings):
HOST: str = "0.0.0.0"
# API监听端口
PORT: int = 3001
# 前端监听端口
NGINX_PORT: int = 3000
# 是否调试模式
DEBUG: bool = False
# 是否开发模式

View File

@@ -10,16 +10,13 @@ class EventManager(metaclass=Singleton):
事件管理器
"""
# 事件队列
_eventQueue: Queue = None
# 事件响应函数字典
_handlers: dict = {}
def __init__(self):
# 事件队列
self._eventQueue = Queue()
# 事件响应函数字典
self._handlers = {}
# 已禁用的事件响应
self._disabled_handlers = []
def get_event(self):
"""
@@ -27,11 +24,21 @@ class EventManager(metaclass=Singleton):
"""
try:
event = self._eventQueue.get(block=True, timeout=1)
handlerList = self._handlers.get(event.event_type)
return event, handlerList or []
handlerList = self._handlers.get(event.event_type) or []
if handlerList:
# 去除掉被禁用的事件响应
handlerList = [handler for handler in handlerList
if handler.__qualname__.split(".")[0] not in self._disabled_handlers]
return event, handlerList
except Empty:
return None, []
def check(self, etype: EventType):
"""
检查事件是否存在响应
"""
return etype.value in self._handlers
def add_event_listener(self, etype: EventType, handler: type):
"""
注册事件处理
@@ -45,18 +52,21 @@ class EventManager(metaclass=Singleton):
handlerList.append(handler)
logger.debug(f"Event Registed{etype.value} - {handler}")
def remove_event_listener(self, etype: EventType, handler: type):
def disable_events_hander(self, class_name: str):
"""
移除监听器的处理函数
标记对应类事件处理为不可用
"""
try:
handlerList = self._handlers[etype.value]
if handler in handlerList[:]:
handlerList.remove(handler)
if not handlerList:
del self._handlers[etype.value]
except KeyError:
pass
if class_name not in self._disabled_handlers:
self._disabled_handlers.append(class_name)
logger.debug(f"Event Disabled{class_name}")
def enable_events_hander(self, class_name: str):
"""
标记对应类事件处理为可用
"""
if class_name in self._disabled_handlers:
self._disabled_handlers.remove(class_name)
logger.debug(f"Event Enabled{class_name}")
def send_event(self, etype: EventType, data: dict = None):
"""

View File

@@ -87,6 +87,17 @@ class MetaBase(object):
return self.cn_name
return ""
@name.setter
def name(self, name: str):
"""
设置名称
"""
if StringUtils.is_all_chinese(name):
self.cn_name = name
else:
self.en_name = name
self.cn_name = None
def init_subtitle(self, title_text: str):
"""
副标题识别

View File

@@ -61,8 +61,7 @@ class WordsMatcher(metaclass=Singleton):
if state:
appley_words.append(word)
else:
logger.debug(f"自定义识别词替换失败:{message}")
except Exception as err:
print(str(err))

View File

@@ -1,6 +1,7 @@
import traceback
from typing import List, Any, Dict, Tuple
from app.core.event import eventmanager
from app.db.systemconfig_oper import SystemConfigOper
from app.helper.module import ModuleHelper
from app.helper.sites import SitesHelper
@@ -58,6 +59,8 @@ class PluginManager(metaclass=Singleton):
self._plugins[plugin_id] = plugin
# 未安装的不加载
if plugin_id not in installed_plugins:
# 设置事件状态为不可用
eventmanager.disable_events_hander(plugin_id)
continue
# 生成实例
plugin_obj = plugin()
@@ -66,6 +69,8 @@ class PluginManager(metaclass=Singleton):
# 存储运行实例
self._running_plugins[plugin_id] = plugin_obj
logger.info(f"Plugin Loaded{plugin_id}")
# 设置事件注册状态可用
eventmanager.enable_events_hander(plugin_id)
except Exception as err:
logger.error(f"加载插件 {plugin_id} 出错:{err} - {traceback.format_exc()}")
@@ -177,6 +182,12 @@ class PluginManager(metaclass=Singleton):
return None
return getattr(self._running_plugins[pid], method)(*args, **kwargs)
def get_plugin_ids(self) -> List[str]:
"""
获取所有插件ID
"""
return list(self._plugins.keys())
def get_plugin_apps(self) -> List[dict]:
"""
获取所有插件信息

View File

@@ -37,6 +37,12 @@ class Subscribe(Base):
include = Column(String)
# 排除
exclude = Column(String)
# 质量
quality = Column(String)
# 分辨率
resolution = Column(String)
# 特效
effect = Column(String)
# 总集数
total_episode = Column(Integer)
# 开始集数

View File

@@ -1,28 +1,12 @@
import logging
from logging.handlers import RotatingFileHandler
from pathlib import Path
import click
from app.core.config import settings
# logger
logger = logging.getLogger()
if settings.DEBUG:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# 创建终端输出Handler
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
# 创建文件输出Handler
file_handler = RotatingFileHandler(filename=settings.LOG_PATH / 'moviepilot.log',
mode='w',
maxBytes=5 * 1024 * 1024,
backupCount=3,
encoding='utf-8')
file_handler.setLevel(logging.INFO)
# 日志级别颜色
level_name_colors = {
logging.DEBUG: lambda level_name: click.style(str(level_name), fg="cyan"),
logging.INFO: lambda level_name: click.style(str(level_name), fg="green"),
@@ -34,20 +18,40 @@ level_name_colors = {
}
# 定义日志输出格式
class CustomFormatter(logging.Formatter):
"""
定义日志输出格式
"""
def format(self, record):
seperator = " " * (8 - len(record.levelname))
record.leveltext = level_name_colors[record.levelno](record.levelname + ":") + seperator
if record.filename == "__init__.py":
record.filename = Path(record.pathname).parent.name
return super().format(record)
# DEBUG
logger = logging.getLogger()
if settings.DEBUG:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# 终端日志
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_formatter = CustomFormatter("%(leveltext)s%(filename)s - %(message)s")
console_handler.setFormatter(console_formatter)
logger.addHandler(console_handler)
# 文件日志
file_handler = RotatingFileHandler(filename=settings.LOG_PATH / 'moviepilot.log',
mode='w',
maxBytes=5 * 1024 * 1024,
backupCount=3,
encoding='utf-8')
file_handler.setLevel(logging.INFO)
file_formater = CustomFormatter("%(levelname)s%(asctime)s - %(filename)s - %(message)s")
file_handler.setFormatter(file_formater)
logger.addHandler(file_handler)

View File

@@ -1,10 +1,21 @@
import multiprocessing
import os
import sys
import threading
import uvicorn as uvicorn
from PIL import Image
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from uvicorn import Config
from app.utils.system import SystemUtils
# 禁用输出
if SystemUtils.is_frozen():
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
from app.command import Command
from app.core.config import settings
from app.core.module import ModuleManager
@@ -44,6 +55,82 @@ def init_routers():
App.include_router(arr_router, prefix="/api/v3")
def start_frontend():
"""
启动前端服务
"""
if not SystemUtils.is_frozen():
return
nginx_path = settings.ROOT_PATH / 'nginx'
if not nginx_path.exists():
return
import subprocess
if SystemUtils.is_windows():
subprocess.Popen("start nginx.exe",
cwd=nginx_path,
shell=True)
else:
subprocess.Popen("nohup ./nginx &",
cwd=nginx_path,
shell=True)
def stop_frontend():
"""
停止前端服务
"""
if not SystemUtils.is_frozen():
return
import subprocess
if SystemUtils.is_windows():
subprocess.Popen(f"taskkill /f /im nginx.exe", shell=True)
else:
subprocess.Popen(f"killall nginx", shell=True)
def start_tray():
"""
启动托盘图标
"""
if not SystemUtils.is_frozen():
return
def open_web():
"""
调用浏览器打开前端页面
"""
import webbrowser
webbrowser.open(f"http://localhost:{settings.NGINX_PORT}")
def quit_app():
"""
退出程序
"""
TrayIcon.stop()
Server.should_exit = True
import pystray
# 托盘图标
TrayIcon = pystray.Icon(
settings.PROJECT_NAME,
icon=Image.open(settings.ROOT_PATH / 'app.ico'),
menu=pystray.Menu(
pystray.MenuItem(
'打开',
open_web,
),
pystray.MenuItem(
'退出',
quit_app,
)
)
)
# 启动托盘图标
threading.Thread(target=TrayIcon.run, daemon=True).start()
@App.on_event("shutdown")
def shutdown_server():
"""
@@ -59,6 +146,8 @@ def shutdown_server():
DisplayHelper().stop()
# 停止定时服务
Scheduler().stop()
# 停止前端服务
stop_frontend()
@App.on_event("startup")
@@ -66,7 +155,7 @@ def start_module():
"""
启动模块
"""
# 虚显示
# 虚显示
DisplayHelper()
# 站点管理
SitesHelper()
@@ -80,12 +169,16 @@ def start_module():
Command()
# 初始化路由
init_routers()
# 启动前端服务
start_frontend()
if __name__ == '__main__':
# 启动托盘
start_tray()
# 初始化数据库
init_db()
# 更新数据库
update_db()
# 启动服务
# 启动API服务
Server.run()

View File

@@ -1,4 +1,4 @@
from datetime import datetime
import re
from pathlib import Path
from typing import List, Optional, Tuple, Union
@@ -369,6 +369,16 @@ class DoubanModule(_ModuleBase):
return []
return infos.get("subject_collection_items")
def tv_animation(self, page: int = 1, count: int = 30) -> List[dict]:
"""
获取豆瓣动画剧
"""
infos = self.doubanapi.tv_animation(start=(page - 1) * count,
count=count)
if not infos:
return []
return infos.get("subject_collection_items")
def search_medias(self, meta: MetaBase) -> Optional[List[MediaInfo]]:
"""
搜索媒体信息
@@ -396,17 +406,30 @@ class DoubanModule(_ModuleBase):
return ret_medias
@retry(Exception, 5, 3, 3, logger=logger)
def match_doubaninfo(self, name: str, mtype: str = None,
year: str = None, season: int = None) -> dict:
def match_doubaninfo(self, name: str, imdbid: str = None,
mtype: str = None, year: str = None, season: int = None) -> dict:
"""
搜索和匹配豆瓣信息
:param name: 名称
:param imdbid: IMDB ID
:param mtype: 类型 电影/电视剧
:param year: 年份
:param season: 季号
"""
result = self.doubanapi.search(f"{name} {year or ''}".strip(),
ts=datetime.strftime(datetime.now(), '%Y%m%d%H%M%S'))
if imdbid:
# 优先使用IMDBID查询
logger.info(f"开始使用IMDBID {imdbid} 查询豆瓣信息 ...")
result = self.doubanapi.imdbid(imdbid)
if result:
doubanid = result.get("id")
if doubanid and not str(doubanid).isdigit():
doubanid = re.search(r"\d+", doubanid).group(0)
result["id"] = doubanid
logger.info(f"{imdbid} 查询到豆瓣信息:{result.get('title')}")
return result
# 搜索
logger.info(f"开始使用名称 {name} 匹配豆瓣信息 ...")
result = self.doubanapi.search(f"{name} {year or ''}".strip())
if not result:
logger.warn(f"未找到 {name} 的豆瓣信息")
return {}
@@ -433,6 +456,7 @@ class DoubanModule(_ModuleBase):
if meta.name == name \
and ((not season and not meta.begin_season) or meta.begin_season == season) \
and (not year or item.get('year') == year):
logger.info(f"{name} 匹配到豆瓣信息:{item.get('id')} {item.get('title')}")
return item
return {}
@@ -446,11 +470,12 @@ class DoubanModule(_ModuleBase):
return []
return infos.get("subject_collection_items")
def scrape_metadata(self, path: Path, mediainfo: MediaInfo) -> None:
def scrape_metadata(self, path: Path, mediainfo: MediaInfo, transfer_type: str) -> None:
"""
刮削元数据
:param path: 媒体文件路径
:param mediainfo: 识别的媒体信息
:param transfer_type: 传输类型
:return: 成功或失败
"""
if settings.SCRAP_SOURCE != "douban":
@@ -463,16 +488,21 @@ class DoubanModule(_ModuleBase):
return
# 根据名称查询豆瓣数据
doubaninfo = self.match_doubaninfo(name=mediainfo.title,
imdbid=mediainfo.imdb_id,
mtype=mediainfo.type.value,
year=mediainfo.year,
season=meta.begin_season)
if not doubaninfo:
logger.warn(f"未找到 {mediainfo.title} 的豆瓣信息")
return
# 查询豆瓣详情
doubaninfo = self.douban_info(doubaninfo.get("id"))
# 刮削路径
scrape_path = path / path.name
self.scraper.gen_scraper_files(meta=meta,
mediainfo=MediaInfo(douban_info=doubaninfo),
file_path=scrape_path)
file_path=scrape_path,
transfer_type=transfer_type)
else:
# 目录下的所有文件
for file in SystemUtils.list_files(path, settings.RMT_MEDIAEXT):
@@ -485,16 +515,20 @@ class DoubanModule(_ModuleBase):
continue
# 根据名称查询豆瓣数据
doubaninfo = self.match_doubaninfo(name=mediainfo.title,
imdbid=mediainfo.imdb_id,
mtype=mediainfo.type.value,
year=mediainfo.year,
season=meta.begin_season)
if not doubaninfo:
logger.warn(f"未找到 {mediainfo.title} 的豆瓣信息")
break
# 查询豆瓣详情
doubaninfo = self.douban_info(doubaninfo.get("id"))
# 刮削
self.scraper.gen_scraper_files(meta=meta,
mediainfo=MediaInfo(douban_info=doubaninfo),
file_path=file)
file_path=file,
transfer_type=transfer_type)
except Exception as e:
logger.error(f"刮削文件 {file} 失败,原因:{e}")
logger.info(f"{path} 刮削完成")

View File

@@ -18,28 +18,29 @@ class DoubanApi(metaclass=Singleton):
_urls = {
# 搜索类
# sort=U:近期热门 T:标记最多 S:评分最高 R:最新上映
# q=search_word&start=0&count=20&sort=U
# q=search_word&start: int = 0&count: int = 20&sort=U
# 聚合搜索
"search": "/search/weixin",
"search_agg": "/search",
"imdbid": "/movie/imdb/%s",
# 电影探索
# sort=U:综合排序 T:近期热度 S:高分优先 R:首播时间
# tags='日本,动画,2022'&start=0&count=20&sort=U
# tags='日本,动画,2022'&start: int = 0&count: int = 20&sort=U
"movie_recommend": "/movie/recommend",
# 电视剧探索
"tv_recommend": "/tv/recommend",
# 搜索
"movie_tag": "/movie/tag",
"tv_tag": "/tv/tag",
# q=search_word&start=0&count=20
# q=search_word&start: int = 0&count: int = 20
"movie_search": "/search/movie",
"tv_search": "/search/movie",
"book_search": "/search/book",
"group_search": "/search/group",
# 各类主题合集
# start=0&count=20
# start: int = 0&count: int = 20
# 正在上映
"movie_showing": "/subject_collection/movie_showing/items",
# 热门电影
@@ -145,7 +146,9 @@ class DoubanApi(metaclass=Singleton):
"api-client/1 com.douban.frodo/7.3.0(207) Android/22 product/MI 9 vendor/Xiaomi model/MI 9 brand/Android rom/miui6 network/wifi platform/mobile nd/1"]
_api_secret_key = "bf7dddc7c9cfe6f7"
_api_key = "0dad551ec0f84ed02907ff5c42e8ec70"
_api_key2 = "0ab215a8b1977939201640fa14c66bab"
_base_url = "https://frodo.douban.com/api/v2"
_api_url = "https://api.douban.com/v2"
_session = None
def __init__(self):
@@ -153,6 +156,9 @@ class DoubanApi(metaclass=Singleton):
@classmethod
def __sign(cls, url: str, ts: int, method='GET') -> str:
"""
签名
"""
url_path = parse.urlparse(url).path
raw_sign = '&'.join([method.upper(), parse.quote(url_path, safe=''), str(ts)])
return base64.b64encode(
@@ -164,7 +170,10 @@ class DoubanApi(metaclass=Singleton):
).decode()
@lru_cache(maxsize=settings.CACHE_CONF.get('douban'))
def __invoke(self, url, **kwargs):
def __invoke(self, url: str, **kwargs) -> dict:
"""
GET请求
"""
req_url = self._base_url + url
params = {'apiKey': self._api_key}
@@ -189,119 +198,224 @@ class DoubanApi(metaclass=Singleton):
return resp.json()
return resp.json() if resp else {}
def search(self, keyword, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
@lru_cache(maxsize=settings.CACHE_CONF.get('douban'))
def __post(self, url: str, **kwargs) -> dict:
"""
POST请求
esponse = requests.post(
url="https://api.douban.com/v2/movie/imdb/tt29139455",
headers={
"Content-Type": "application/x-www-form-urlencoded; charset=utf-8",
"Cookie": "bid=J9zb1zA5sJc",
},
data={
"apikey": "0ab215a8b1977939201640fa14c66bab",
},
)
"""
req_url = self._api_url + url
params = {'apikey': self._api_key2}
if kwargs:
params.update(kwargs)
if '_ts' in params:
params.pop('_ts')
resp = RequestUtils(
ua=settings.USER_AGENT,
session=self._session,
).post_res(url=req_url, data=params)
if resp.status_code == 400 and "rate_limit" in resp.text:
return resp.json()
return resp.json() if resp else {}
def search(self, keyword: str, start: int = 0, count: int = 20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')) -> dict:
"""
关键字搜索
"""
return self.__invoke(self._urls["search"], q=keyword,
start=start, count=count, _ts=ts)
def movie_search(self, keyword, start=0, count=20,
def imdbid(self, imdbid: str,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
"""
IMDBID搜索
"""
return self.__post(self._urls["imdbid"] % imdbid, _ts=ts)
def movie_search(self, keyword: str, start: int = 0, count: int = 20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
"""
电影搜索
"""
return self.__invoke(self._urls["movie_search"], q=keyword,
start=start, count=count, _ts=ts)
def tv_search(self, keyword, start=0, count=20,
def tv_search(self, keyword: str, start: int = 0, count: int = 20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
"""
电视搜索
"""
return self.__invoke(self._urls["tv_search"], q=keyword,
start=start, count=count, _ts=ts)
def book_search(self, keyword, start=0, count=20,
def book_search(self, keyword: str, start: int = 0, count: int = 20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
"""
书籍搜索
"""
return self.__invoke(self._urls["book_search"], q=keyword,
start=start, count=count, _ts=ts)
def group_search(self, keyword, start=0, count=20,
def group_search(self, keyword: str, start: int = 0, count: int = 20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
"""
小组搜索
"""
return self.__invoke(self._urls["group_search"], q=keyword,
start=start, count=count, _ts=ts)
def movie_showing(self, start=0, count=20,
def movie_showing(self, start: int = 0, count: int = 20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
"""
正在热映
"""
return self.__invoke(self._urls["movie_showing"],
start=start, count=count, _ts=ts)
def movie_soon(self, start=0, count=20,
def movie_soon(self, start: int = 0, count: int = 20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
"""
即将上映
"""
return self.__invoke(self._urls["movie_soon"],
start=start, count=count, _ts=ts)
def movie_hot_gaia(self, start=0, count=20,
def movie_hot_gaia(self, start: int = 0, count: int = 20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
"""
热门电影
"""
return self.__invoke(self._urls["movie_hot_gaia"],
start=start, count=count, _ts=ts)
def tv_hot(self, start=0, count=20,
def tv_hot(self, start: int = 0, count: int = 20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
"""
热门剧集
"""
return self.__invoke(self._urls["tv_hot"],
start=start, count=count, _ts=ts)
def tv_animation(self, start=0, count=20,
def tv_animation(self, start: int = 0, count: int = 20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
"""
动画
"""
return self.__invoke(self._urls["tv_animation"],
start=start, count=count, _ts=ts)
def tv_variety_show(self, start=0, count=20,
def tv_variety_show(self, start: int = 0, count: int = 20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
"""
综艺
"""
return self.__invoke(self._urls["tv_variety_show"],
start=start, count=count, _ts=ts)
def tv_rank_list(self, start=0, count=20,
def tv_rank_list(self, start: int = 0, count: int = 20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
"""
电视剧排行榜
"""
return self.__invoke(self._urls["tv_rank_list"],
start=start, count=count, _ts=ts)
def show_hot(self, start=0, count=20,
def show_hot(self, start: int = 0, count: int = 20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
"""
综艺热门
"""
return self.__invoke(self._urls["show_hot"],
start=start, count=count, _ts=ts)
def movie_detail(self, subject_id):
def movie_detail(self, subject_id: str):
"""
电影详情
"""
return self.__invoke(self._urls["movie_detail"] + subject_id)
def movie_celebrities(self, subject_id):
def movie_celebrities(self, subject_id: str):
"""
电影演职员
"""
return self.__invoke(self._urls["movie_celebrities"] % subject_id)
def tv_detail(self, subject_id):
def tv_detail(self, subject_id: str):
"""
电视剧详情
"""
return self.__invoke(self._urls["tv_detail"] + subject_id)
def tv_celebrities(self, subject_id):
def tv_celebrities(self, subject_id: str):
"""
电视剧演职员
"""
return self.__invoke(self._urls["tv_celebrities"] % subject_id)
def book_detail(self, subject_id):
def book_detail(self, subject_id: str):
"""
书籍详情
"""
return self.__invoke(self._urls["book_detail"] + subject_id)
def movie_top250(self, start=0, count=20,
def movie_top250(self, start: int = 0, count: int = 20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
"""
电影TOP250
"""
return self.__invoke(self._urls["movie_top250"],
start=start, count=count, _ts=ts)
def movie_recommend(self, tags='', sort='R', start=0, count=20,
def movie_recommend(self, tags='', sort='R', start: int = 0, count: int = 20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
"""
电影探索
"""
return self.__invoke(self._urls["movie_recommend"], tags=tags, sort=sort,
start=start, count=count, _ts=ts)
def tv_recommend(self, tags='', sort='R', start=0, count=20,
def tv_recommend(self, tags='', sort='R', start: int = 0, count: int = 20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
"""
电视剧探索
"""
return self.__invoke(self._urls["tv_recommend"], tags=tags, sort=sort,
start=start, count=count, _ts=ts)
def tv_chinese_best_weekly(self, start=0, count=20,
def tv_chinese_best_weekly(self, start: int = 0, count: int = 20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
"""
华语口碑周榜
"""
return self.__invoke(self._urls["tv_chinese_best_weekly"],
start=start, count=count, _ts=ts)
def tv_global_best_weekly(self, start=0, count=20,
def tv_global_best_weekly(self, start: int = 0, count: int = 20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
"""
全球口碑周榜
"""
return self.__invoke(self._urls["tv_global_best_weekly"],
start=start, count=count, _ts=ts)
def doulist_detail(self, subject_id):
def doulist_detail(self, subject_id: str):
"""
豆列详情
:param subject_id: 豆列id
"""
return self.__invoke(self._urls["doulist"] + subject_id)
def doulist_items(self, subject_id, start=0, count=20,
def doulist_items(self, subject_id: str, start: int = 0, count: int = 20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
"""
豆列列表

View File

@@ -1,25 +1,34 @@
import time
from pathlib import Path
from typing import Union
from xml.dom import minidom
from app.core.config import settings
from app.core.context import MediaInfo
from app.core.meta import MetaBase
from app.log import logger
from app.schemas.types import MediaType
from app.utils.dom import DomUtils
from app.utils.http import RequestUtils
from app.utils.system import SystemUtils
class DoubanScraper:
def gen_scraper_files(self, meta: MetaBase, mediainfo: MediaInfo, file_path: Path):
_transfer_type = settings.TRANSFER_TYPE
def gen_scraper_files(self, meta: MetaBase, mediainfo: MediaInfo,
file_path: Path, transfer_type: str):
"""
生成刮削文件
:param meta: 元数据
:param mediainfo: 媒体信息
:param file_path: 文件路径或者目录路径
:param transfer_type: 转输类型
"""
self._transfer_type = transfer_type
try:
# 电影
if mediainfo.type == MediaType.MOVIE:
@@ -154,31 +163,55 @@ class DoubanScraper:
# 保存
self.__save_nfo(doc, season_path.joinpath("season.nfo"))
@staticmethod
def __save_image(url: str, file_path: Path):
def __save_image(self, url: str, file_path: Path):
"""
下载图片并保存
"""
if file_path.exists():
return
if not url:
return
try:
# 没有后缀时处理URL转化为jpg格式
if not file_path.suffix:
url = url.replace("/format/webp", "/format/jpg")
file_path.with_suffix(".jpg")
logger.info(f"正在下载{file_path.stem}图片:{url} ...")
r = RequestUtils().get_res(url=url)
if r:
file_path.write_bytes(r.content)
if self._transfer_type in ['rclone_move', 'rclone_copy']:
self.__save_remove_file(file_path, r.content)
else:
file_path.write_bytes(r.content)
logger.info(f"图片已保存:{file_path}")
else:
logger.info(f"{file_path.stem}图片下载失败,请检查网络连通性")
except Exception as err:
logger.error(f"{file_path.stem}图片下载失败:{err}")
@staticmethod
def __save_nfo(doc, file_path: Path):
def __save_nfo(self, doc, file_path: Path):
"""
保存NFO
"""
if file_path.exists():
return
xml_str = doc.toprettyxml(indent=" ", encoding="utf-8")
file_path.write_bytes(xml_str)
if self._transfer_type in ['rclone_move', 'rclone_copy']:
self.__save_remove_file(file_path, xml_str)
else:
file_path.write_bytes(xml_str)
logger.info(f"NFO文件已保存{file_path}")
def __save_remove_file(self, out_file: Path, content: Union[str, bytes]):
"""
保存文件到远端
"""
temp_file = settings.TEMP_PATH / str(out_file)[1:]
temp_file_dir = temp_file.parent
if not temp_file_dir.exists():
temp_file_dir.mkdir(parents=True, exist_ok=True)
temp_file.write_bytes(content)
if self._transfer_type == 'rclone_move':
SystemUtils.rclone_move(temp_file, out_file)
elif self._transfer_type == 'rclone_copy':
SystemUtils.rclone_copy(temp_file, out_file)

View File

@@ -23,7 +23,7 @@ class Emby(metaclass=Singleton):
if not self._host.startswith("http"):
self._host = "http://" + self._host
self._apikey = settings.EMBY_API_KEY
self.user = self.get_user()
self.user = self.get_user(settings.SUPERUSER)
self.folders = self.get_emby_folders()
def is_inactive(self) -> bool:
@@ -800,7 +800,7 @@ class Emby(metaclass=Singleton):
eventType = message.get('Event')
if not eventType:
return None
logger.info(f"接收到emby webhook{message}")
logger.debug(f"接收到emby webhook{message}")
eventItem = schemas.WebhookEventInfo(event=eventType, channel="emby")
if message.get('Item'):
if message.get('Item', {}).get('Type') == 'Episode':

View File

@@ -45,7 +45,8 @@ class FileTransferModule(_ModuleBase):
# 获取目标路径
if not target:
target = self.get_target_path(in_path=path)
else:
elif not target.exists() or target.is_file():
# 目的路径不存在或者是文件时,找对应的媒体库目录
target = self.get_library_path(target)
if not target:
logger.error("未找到媒体库目录,无法转移文件")
@@ -80,6 +81,12 @@ class FileTransferModule(_ModuleBase):
elif transfer_type == 'move':
# 移动
retcode, retmsg = SystemUtils.move(file_item, target_file)
elif transfer_type == 'rclone_move':
# Rclone 移动
retcode, retmsg = SystemUtils.rclone_move(file_item, target_file)
elif transfer_type == 'rclone_copy':
# Rclone 复制
retcode, retmsg = SystemUtils.rclone_copy(file_item, target_file)
else:
# 复制
retcode, retmsg = SystemUtils.copy(file_item, target_file)
@@ -376,10 +383,12 @@ class FileTransferModule(_ModuleBase):
path=in_path,
message=f"{in_path} 路径不存在")
if not target_dir.exists():
return TransferInfo(success=False,
path=in_path,
message=f"{target_dir} 目标路径不存在")
if transfer_type not in ['rclone_copy', 'rclone_move']:
# 检查目标路径
if not target_dir.exists():
return TransferInfo(success=False,
path=in_path,
message=f"{target_dir} 目标路径不存在")
# 媒体库目的目录
target_dir = self.__get_dest_dir(mediainfo=mediainfo, target_dir=target_dir)
@@ -395,6 +404,8 @@ class FileTransferModule(_ModuleBase):
bluray_flag = SystemUtils.is_bluray_dir(in_path)
if bluray_flag:
logger.info(f"{in_path} 是蓝光原盘文件夹")
# 原文件大小
file_size = in_path.stat().st_size
# 目的路径
new_path = self.get_rename_path(
path=target_dir,
@@ -419,7 +430,7 @@ class FileTransferModule(_ModuleBase):
return TransferInfo(success=True,
path=in_path,
target_path=new_path,
total_size=new_path.stat().st_size,
total_size=file_size,
is_bluray=bluray_flag)
else:
# 转移单个文件
@@ -460,7 +471,8 @@ class FileTransferModule(_ModuleBase):
if new_file.stat().st_size < in_path.stat().st_size:
logger.info(f"目标文件已存在,但文件大小更小,将覆盖:{new_file}")
overflag = True
# 原文件大小
file_size = in_path.stat().st_size
# 转移文件
retcode = self.__transfer_file(file_item=in_path,
new_file=new_file,
@@ -479,7 +491,7 @@ class FileTransferModule(_ModuleBase):
path=in_path,
target_path=new_file,
file_count=1,
total_size=new_file.stat().st_size,
total_size=file_size,
is_bluray=False,
file_list=[str(in_path)],
file_list_new=[str(new_file)])
@@ -565,7 +577,7 @@ class FileTransferModule(_ModuleBase):
@staticmethod
def get_library_path(path: Path):
"""
根据目录查询其所在的媒体库目录,查询不到的返回输入目录
根据文件路径查询其所在的媒体库目录,查询不到的返回输入目录
"""
if not path:
return None

View File

@@ -70,16 +70,26 @@ class FilterModule(_ModuleBase):
"include": [r'[Hx].?264|AVC'],
"exclude": []
},
# 杜比
# 杜比视界
"DOLBY": {
"include": [r"Dolby[\s.]+Vision|DOVI|[\s.]+DV[\s.]+|杜比视界"],
"exclude": []
},
# 杜比全景声
"ATMOS": {
"include": [r"Dolby[\s.+]+Atmos|Atmos|杜比全景[声聲]"],
"exclude": []
},
# HDR
"HDR": {
"include": [r"[\s.]+HDR[\s.]+|HDR10|HDR10\+"],
"exclude": []
},
# SDR
"SDR": {
"include": [r"[\s.]+SDR[\s.]+"],
"exclude": []
},
# 重编码
"REMUX": {
"include": [r'REMUX'],
@@ -98,7 +108,22 @@ class FilterModule(_ModuleBase):
"CNVOI": {
"include": [r'[国國][语語]配音|[国國]配|[国國][语語]'],
"exclude": []
}
},
# 粤语配音
"HKVOI": {
"include": [r'粤语配音|粤语'],
"exclude": []
},
# 60FPS
"60FPS": {
"include": [r'60fps'],
"exclude": []
},
# 3D
"3D": {
"include": [r'3D'],
"exclude": []
},
}
def init_module(self) -> None:

View File

@@ -21,7 +21,7 @@ class Jellyfin(metaclass=Singleton):
if not self._host.startswith("http"):
self._host = "http://" + self._host
self._apikey = settings.JELLYFIN_API_KEY
self.user = self.get_user()
self.user = self.get_user(settings.SUPERUSER)
self.serverid = self.get_server_id()
def is_inactive(self) -> bool:
@@ -452,7 +452,7 @@ class Jellyfin(metaclass=Singleton):
return None
if not message:
return None
logger.info(f"接收到jellyfin webhook{message}")
logger.debug(f"接收到jellyfin webhook{message}")
eventType = message.get('NotificationType')
if not eventType:
return None

View File

@@ -492,7 +492,7 @@ class Plex(metaclass=Singleton):
eventType = message.get('event')
if not eventType:
return None
logger.info(f"接收到plex webhook{message}")
logger.debug(f"接收到plex webhook{message}")
eventItem = schemas.WebhookEventInfo(event=eventType, channel="plex")
if message.get('Metadata'):
if message.get('Metadata', {}).get('type') == 'episode':

View File

@@ -63,7 +63,10 @@ class TheMovieDbModule(_ModuleBase):
# 直接查询详情
info = self.tmdb.get_info(mtype=mtype, tmdbid=tmdbid)
elif meta:
logger.info(f"正在识别 {meta.name} ...")
if meta.begin_season:
logger.info(f"正在识别 {meta.name}{meta.begin_season}季 ...")
else:
logger.info(f"正在识别 {meta.name} ...")
if meta.type == MediaType.UNKNOWN and not meta.year:
info = self.tmdb.match_multi(meta.name)
else:
@@ -184,11 +187,12 @@ class TheMovieDbModule(_ModuleBase):
return [MediaInfo(tmdb_info=info) for info in results]
def scrape_metadata(self, path: Path, mediainfo: MediaInfo) -> None:
def scrape_metadata(self, path: Path, mediainfo: MediaInfo, transfer_type: str) -> None:
"""
刮削元数据
:param path: 媒体文件路径
:param mediainfo: 识别的媒体信息
:param transfer_type: 转移类型
:return: 成功或失败
"""
if settings.SCRAP_SOURCE != "themoviedb":
@@ -199,12 +203,14 @@ class TheMovieDbModule(_ModuleBase):
logger.info(f"开始刮削蓝光原盘:{path} ...")
scrape_path = path / path.name
self.scraper.gen_scraper_files(mediainfo=mediainfo,
file_path=scrape_path)
file_path=scrape_path,
transfer_type=transfer_type)
elif path.is_file():
# 单个文件
logger.info(f"开始刮削媒体库文件:{path} ...")
self.scraper.gen_scraper_files(mediainfo=mediainfo,
file_path=path)
file_path=path,
transfer_type=transfer_type)
else:
# 目录下的所有文件
logger.info(f"开始刮削目录:{path} ...")
@@ -212,7 +218,8 @@ class TheMovieDbModule(_ModuleBase):
if not file:
continue
self.scraper.gen_scraper_files(mediainfo=mediainfo,
file_path=file)
file_path=file,
transfer_type=transfer_type)
logger.info(f"{path} 刮削完成")
def tmdb_discover(self, mtype: MediaType, sort_by: str, with_genres: str, with_original_language: str,
@@ -280,6 +287,8 @@ class TheMovieDbModule(_ModuleBase):
:param mediainfo: 识别的媒体信息
:return: 更新后的媒体信息
"""
if not mediainfo.tmdb_id:
return mediainfo
if mediainfo.logo_path \
and mediainfo.poster_path \
and mediainfo.backdrop_path:

View File

@@ -1,5 +1,6 @@
import time
from pathlib import Path
from typing import Union
from xml.dom import minidom
from requests import RequestException
@@ -12,21 +13,26 @@ from app.schemas.types import MediaType
from app.utils.common import retry
from app.utils.dom import DomUtils
from app.utils.http import RequestUtils
from app.utils.system import SystemUtils
class TmdbScraper:
tmdb = None
_transfer_type = settings.TRANSFER_TYPE
def __init__(self, tmdb):
self.tmdb = tmdb
def gen_scraper_files(self, mediainfo: MediaInfo, file_path: Path):
def gen_scraper_files(self, mediainfo: MediaInfo, file_path: Path, transfer_type: str):
"""
生成刮削文件包括NFO和图片传入路径为文件路径
:param mediainfo: 媒体信息
:param file_path: 文件路径或者目录路径
:param transfer_type: 传输类型
"""
self._transfer_type = transfer_type
def __get_episode_detail(_seasoninfo: dict, _episode: int):
"""
根据季信息获取集的信息
@@ -328,9 +334,8 @@ class TmdbScraper:
# 保存文件
self.__save_nfo(doc, file_path.with_suffix(".nfo"))
@staticmethod
@retry(RequestException, logger=logger)
def __save_image(url: str, file_path: Path):
def __save_image(self, url: str, file_path: Path):
"""
下载图片并保存
"""
@@ -340,7 +345,10 @@ class TmdbScraper:
logger.info(f"正在下载{file_path.stem}图片:{url} ...")
r = RequestUtils().get_res(url=url, raise_exception=True)
if r:
file_path.write_bytes(r.content)
if self._transfer_type in ['rclone_move', 'rclone_copy']:
self.__save_remove_file(file_path, r.content)
else:
file_path.write_bytes(r.content)
logger.info(f"图片已保存:{file_path}")
else:
logger.info(f"{file_path.stem}图片下载失败,请检查网络连通性")
@@ -349,13 +357,29 @@ class TmdbScraper:
except Exception as err:
logger.error(f"{file_path.stem}图片下载失败:{err}")
@staticmethod
def __save_nfo(doc, file_path: Path):
def __save_nfo(self, doc, file_path: Path):
"""
保存NFO
"""
if file_path.exists():
return
xml_str = doc.toprettyxml(indent=" ", encoding="utf-8")
file_path.write_bytes(xml_str)
if self._transfer_type in ['rclone_move', 'rclone_copy']:
self.__save_remove_file(file_path, xml_str)
else:
file_path.write_bytes(xml_str)
logger.info(f"NFO文件已保存{file_path}")
def __save_remove_file(self, out_file: Path, content: Union[str, bytes]):
"""
保存文件到远端
"""
temp_file = settings.TEMP_PATH / str(out_file)[1:]
temp_file_dir = temp_file.parent
if not temp_file_dir.exists():
temp_file_dir.mkdir(parents=True, exist_ok=True)
temp_file.write_bytes(content)
if self._transfer_type == 'rclone_move':
SystemUtils.rclone_move(temp_file, out_file)
elif self._transfer_type == 'rclone_copy':
SystemUtils.rclone_copy(temp_file, out_file)

View File

@@ -172,6 +172,9 @@ class TMDb(object):
else:
req = self.request(method, url, data, json)
if req is None:
raise TMDbException("Failed to establish a new connection: no response from the server.")
headers = req.headers
if "X-RateLimit-Remaining" in headers:

View File

@@ -92,7 +92,7 @@ class AutoSignIn(_PluginBase):
self._clean = config.get("clean")
# 过滤掉已删除的站点
all_sites = [site for site in self.sites.get_indexers() if not site.get("public")]
all_sites = [site for site in self.sites.get_indexers() if not site.get("public")] + self.__custom_sites()
self._sign_sites = [site.get("id") for site in all_sites if site.get("id") in self._sign_sites]
self._login_sites = [site.get("id") for site in all_sites if site.get("id") in self._login_sites]
# 保存配置
@@ -244,9 +244,13 @@ class AutoSignIn(_PluginBase):
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
# 站点的可选项
site_options = [{"title": site.name, "value": site.id}
for site in Site.list_order_by_pri(self.db)]
# 站点的可选项(内置站点 + 自定义站点)
customSites = self.__custom_sites()
site_options = ([{"title": site.name, "value": site.id}
for site in Site.list_order_by_pri(self.db)]
+ [{"title": site.get("name"), "value": site.get("id")}
for site in customSites])
return [
{
'component': 'VForm',
@@ -452,6 +456,13 @@ class AutoSignIn(_PluginBase):
"retry_keyword": "错误|失败"
}
def __custom_sites(self) -> List[dict]:
custom_sites = []
custom_sites_config = self.get_config("CustomSites")
if custom_sites_config and custom_sites_config.get("enabled"):
custom_sites = custom_sites_config.get("sites")
return custom_sites
def get_page(self) -> List[dict]:
"""
拼装插件详情页面,需要返回页面配置,同时附带数据
@@ -590,7 +601,7 @@ class AutoSignIn(_PluginBase):
today_history = self.get_data(key=type + "-" + today)
# 查询所有站点
all_sites = [site for site in self.sites.get_indexers() if not site.get("public")]
all_sites = [site for site in self.sites.get_indexers() if not site.get("public")] + self.__custom_sites()
# 过滤掉没有选中的站点
if do_sites:
do_sites = [site for site in all_sites if site.get("id") in do_sites]

View File

@@ -747,7 +747,7 @@ class BrushFlow(_PluginBase):
def get_page(self) -> List[dict]:
# 种子明细
data_list = self.get_data("torrents") or {}
torrents = self.get_data("torrents") or {}
# 统计数据
stattistic_data: Dict[str, dict] = self.get_data("statistic") or {
"count": 0,
@@ -755,7 +755,7 @@ class BrushFlow(_PluginBase):
"uploaded": 0,
"downloaded": 0,
}
if not data_list:
if not torrents:
return [
{
'component': 'div',
@@ -766,7 +766,9 @@ class BrushFlow(_PluginBase):
}
]
else:
data_list = data_list.values()
data_list = torrents.values()
# 按time倒序排序
data_list = sorted(data_list, key=lambda x: x.get("time") or 0, reverse=True)
# 总上传量格式化
total_upload = StringUtils.str_filesize(stattistic_data.get("uploaded") or 0)
# 总下载量格式化
@@ -858,7 +860,7 @@ class BrushFlow(_PluginBase):
{
'component': 'VImg',
'props': {
'src': '/plugin/upload.png'
'src': '/plugin_icon/upload.png'
}
}
]
@@ -928,7 +930,7 @@ class BrushFlow(_PluginBase):
{
'component': 'VImg',
'props': {
'src': '/plugin/download.png'
'src': '/plugin_icon/download.png'
}
}
]
@@ -998,7 +1000,7 @@ class BrushFlow(_PluginBase):
{
'component': 'VImg',
'props': {
'src': '/plugin/seed.png'
'src': '/plugin_icon/seed.png'
}
}
]
@@ -1068,7 +1070,7 @@ class BrushFlow(_PluginBase):
{
'component': 'VImg',
'props': {
'src': '/plugin/delete.png'
'src': '/plugin_icon/delete.png'
}
}
]
@@ -1379,6 +1381,7 @@ class BrushFlow(_PluginBase):
"downloaded": 0,
"uploaded": 0,
"deleted": False,
"time": time.time()
}
# 统计数据
torrents_size += torrent.size

View File

@@ -1,7 +1,8 @@
from typing import Any, List, Dict, Tuple
from app.core.config import settings
from app.core.event import eventmanager
from app.core.event import eventmanager, Event
from app.log import logger
from app.plugins import _PluginBase
from app.plugins.chatgpt.openai import OpenAi
from app.schemas.types import EventType
@@ -33,6 +34,7 @@ class ChatGPT(_PluginBase):
openai = None
_enabled = False
_proxy = False
_recognize = False
_openai_url = None
_openai_key = None
@@ -40,6 +42,7 @@ class ChatGPT(_PluginBase):
if config:
self._enabled = config.get("enabled")
self._proxy = config.get("proxy")
self._recognize = config.get("recognize")
self._openai_url = config.get("openai_url")
self._openai_key = config.get("openai_key")
self.openai = OpenAi(api_key=self._openai_key, api_url=self._openai_url,
@@ -70,7 +73,7 @@ class ChatGPT(_PluginBase):
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
'md': 4
},
'content': [
{
@@ -86,7 +89,7 @@ class ChatGPT(_PluginBase):
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
'md': 4
},
'content': [
{
@@ -97,6 +100,22 @@ class ChatGPT(_PluginBase):
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'recognize',
'label': '辅助识别',
}
}
]
}
]
},
@@ -143,6 +162,7 @@ class ChatGPT(_PluginBase):
], {
"enabled": False,
"proxy": False,
"recognize": False,
"openai_url": "https://api.openai.com",
"openai_key": ""
}
@@ -151,10 +171,12 @@ class ChatGPT(_PluginBase):
pass
@eventmanager.register(EventType.UserMessage)
def talk(self, event):
def talk(self, event: Event):
"""
监听用户消息获取ChatGPT回复
"""
if not self._enabled:
return
if not self.openai:
return
text = event.event_data.get("text")
@@ -166,6 +188,42 @@ class ChatGPT(_PluginBase):
if response:
self.post_message(channel=channel, title=response, userid=userid)
@eventmanager.register(EventType.NameRecognize)
def recognize(self, event: Event):
"""
监听识别事件使用ChatGPT辅助识别名称
"""
if not event.event_data:
return
title = event.event_data.get("title")
if not title:
return
# 收到事件后需要立码返回,避免主程序等待
if not self._enabled \
or not self.openai \
or not self._recognize:
eventmanager.send_event(
EventType.NameRecognizeResult,
{
'title': title
}
)
return
# 调用ChatGPT
response = self.openai.get_media_name(filename=title)
logger.info(f"ChatGPT辅助识别结果{response}")
if response:
eventmanager.send_event(
EventType.NameRecognizeResult,
{
'title': title,
'name': response.get("title"),
'year': response.get("year"),
'season': response.get("season"),
'episode': response.get("episode")
}
)
def stop_service(self):
"""
退出插件

View File

@@ -251,6 +251,6 @@ class ChineseSubFinder(_PluginBase):
else:
logger.info("ChineseSubFinder任务添加成功%s" % job_id)
else:
logger.error("%s 目录缺失nfo元数据" % file_path)
logger.warn(f"ChineseSubFinder调用出错{res.status_code} - {res.reason}")
except Exception as e:
logger.error("连接ChineseSubFinder出错" + str(e))

View File

@@ -211,7 +211,7 @@ class CustomHosts(_PluginBase):
# 添加新的Hosts
system_hosts.add(new_entrys)
system_hosts.write()
logger.info("更新系统hosts文件成功")
logger.info("更新系统hosts文件成功容器运行则更新容器hosts")
except Exception as err:
err_flag = True
logger.error(f"更新系统hosts文件失败{str(err) or '请检查权限'}")

View File

@@ -0,0 +1,250 @@
from typing import Any, List, Dict, Tuple
from urllib.parse import urlparse
from app.core.config import settings
from app.core.event import EventManager
from app.helper.cookiecloud import CookieCloudHelper
from app.log import logger
from app.plugins import _PluginBase
from app.schemas.types import EventType
class CustomSites(_PluginBase):
# 插件名称
plugin_name = "自定义站点"
# 插件描述
plugin_desc = "增加自定义站点为签到和统计使用。"
# 插件图标
plugin_icon = "world.png"
# 主题色
plugin_color = "#9AC16C"
# 插件版本
plugin_version = "0.1"
# 插件作者
plugin_author = "lightolly"
# 作者主页
author_url = "https://github.com/lightolly"
# 插件配置项ID前缀
plugin_config_prefix = "customsites_"
# 加载顺序
plugin_order = 0
# 可使用的用户级别
auth_level = 2
# 自定义站点起始 id
site_id_base = 60000
site_id_alloc = site_id_base
# 私有属性
cookie_cloud: CookieCloudHelper = None
# 配置属性
_enabled: bool = False
"""
{
"id": "站点ID",
"name": "站点名称",
"url": "站点地址",
"cookie": "站点Cookie",
"ua": "User-Agent",
"proxy": "是否使用代理",
"render": "是否仿真",
}
"""
_sites: list[Dict] = []
"""
格式
站点名称|url|是否仿真
"""
_site_urls: str = ""
def init_plugin(self, config: dict = None):
self.cookie_cloud = CookieCloudHelper(
server=settings.COOKIECLOUD_HOST,
key=settings.COOKIECLOUD_KEY,
password=settings.COOKIECLOUD_PASSWORD
)
del_sites = []
sites = []
new_site_urls = []
# 配置
if config:
self._enabled = config.get("enabled", False)
self._sites = config.get("sites", [])
self._site_urls = config.get("site_urls", "")
if not self._enabled:
return
site_urls = self._site_urls.splitlines()
# 只保留 匹配site_urls的 sites
urls = [site_url.split('|')[1] for site_url in site_urls]
for site in self._sites:
if site.get("url") not in urls:
del_sites.append(site)
else:
sites.append(site)
for item in site_urls:
_, url, _ = item.split("|")
if url in [site.get("url") for site in self._sites]:
continue
else:
new_site_urls.append(item)
# 获取待分配的最大ID
alloc_ids = [site.get("id") for site in self._sites if site.get("id")] + [self.site_id_base]
self.site_id_alloc = max(alloc_ids) + 1
# 补全 site_id
for item in new_site_urls:
site_name, item, site_render = item.split("|")
sites.append({
"id": self.site_id_alloc,
"name": site_name,
"url": item,
"render": True if site_render.upper() == 'Y' else False,
"cookie": "",
})
self.site_id_alloc += 1
self._sites = sites
# 保存配置
self.sync_cookie()
self.__update_config()
# 通知站点删除
for site in del_sites:
self.delete_site(site.get("id"))
logger.info(f"删除站点 {site.get('name')}")
def get_state(self) -> bool:
return self._enabled
def __update_config(self):
# 保存配置
self.update_config(
{
"enabled": self._enabled,
"sites": self._sites,
"site_urls": self._site_urls
}
)
def __get_site_by_domain(self, domain):
for site in self._sites:
site_domain = urlparse(site.get("url")).netloc
if site_domain.endswith(domain):
return site
return None
def sync_cookie(self):
"""
通过CookieCloud同步站点Cookie
"""
logger.info("开始同步CookieCloud站点 ...")
cookies, msg = self.cookie_cloud.download()
if not cookies:
logger.error(f"CookieCloud同步失败{msg}")
return
# 保存Cookie或新增站点
_update_count = 0
for domain, cookie in cookies.items():
# 获取站点信息
site_info = self.__get_site_by_domain(domain)
if site_info:
# 更新站点Cookie
logger.info(f"更新站点 {domain} Cookie ...")
site_info.update({"cookie": cookie})
_update_count += 1
# 处理完成
ret_msg = f"更新了{_update_count}个站点,总{len(self._sites)}个站点"
logger.info(f"自定义站点 Cookie同步成功{ret_msg}")
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12
},
'content': [
{
'component': 'VTextarea',
'props': {
'model': 'site_urls',
'label': '站点列表',
'rows': 5,
'placeholder': '每一行一个站点,配置方式:\n'
'站点名称|站点地址|是否仿真(Y/N)\n'
}
}
]
}
]
}
]
}
], {
"enabled": False,
"site_urls": [],
"sites": self._sites
}
def get_page(self) -> List[dict]:
pass
def stop_service(self):
"""
退出插件
"""
pass
@staticmethod
def delete_site(site_id):
"""
删除站点通知
"""
# 插件站点删除
EventManager().send_event(EventType.SiteDeleted,
{
"site_id": site_id
})

View File

@@ -1,12 +1,12 @@
import datetime
import re
import shutil
import threading
import traceback
from datetime import datetime
from pathlib import Path
from threading import Event
from typing import List, Tuple, Dict, Any
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
@@ -16,6 +16,7 @@ from app.chain.tmdb import TmdbChain
from app.chain.transfer import TransferChain
from app.core.config import settings
from app.core.context import MediaInfo
from app.core.event import eventmanager, Event
from app.core.metainfo import MetaInfoPath
from app.db.downloadhistory_oper import DownloadHistoryOper
from app.db.transferhistory_oper import TransferHistoryOper
@@ -79,6 +80,7 @@ class DirMonitor(_PluginBase):
_observer = []
_enabled = False
_notify = False
_onlyonce = False
# 模式 compatibility/fast
_mode = "fast"
# 转移方式
@@ -91,13 +93,13 @@ class DirMonitor(_PluginBase):
_transferconf: Dict[str, str] = {}
_medias = {}
# 退出事件
_event = Event()
_event = threading.Event()
def init_plugin(self, config: dict = None):
self.transferhis = TransferHistoryOper(self.db)
self.downloadhis = DownloadHistoryOper(self.db)
self.transferchian = TransferChain(self.db)
self.tmdbchain = TmdbChain(self.db)
self.tmdbchain = TmdbChain()
# 清空配置
self._dirconf = {}
self._transferconf = {}
@@ -106,6 +108,7 @@ class DirMonitor(_PluginBase):
if config:
self._enabled = config.get("enabled")
self._notify = config.get("notify")
self._onlyonce = config.get("onlyonce")
self._mode = config.get("mode")
self._transfer_type = config.get("transfer_type")
self._monitor_dirs = config.get("monitor_dirs") or ""
@@ -114,10 +117,13 @@ class DirMonitor(_PluginBase):
# 停止现有任务
self.stop_service()
if self._enabled:
if self._enabled or self._onlyonce:
# 定时服务管理器
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
# 追加入库消息统一发送服务
self._scheduler.add_job(self.send_msg, trigger='interval', seconds=30)
# 启动任务
# 读取目录配置
monitor_dirs = self._monitor_dirs.split("\n")
if not monitor_dirs:
return
@@ -126,6 +132,12 @@ class DirMonitor(_PluginBase):
if not mon_path:
continue
# 自定义转移方式
_transfer_type = self._transfer_type
if mon_path.count("#") == 1:
_transfer_type = mon_path.split("#")[1]
mon_path = mon_path.split("#")[0]
# 存储目的目录
if SystemUtils.is_windows():
if mon_path.count(":") > 1:
@@ -136,59 +148,110 @@ class DirMonitor(_PluginBase):
else:
paths = mon_path.split(":")
# 自定义转移方式
if mon_path.count("#") == 1:
self._transferconf[mon_path] = mon_path.split("#")[1]
else:
self._transferconf[mon_path] = self._transfer_type
# 目的目录
target_path = None
if len(paths) > 1:
mon_path = paths[0]
target_path = Path(paths[1])
self._dirconf[mon_path] = target_path
# 检查媒体库目录是不是下载目录的子目录
try:
if target_path.is_relative_to(Path(mon_path)):
logger.warn(f"{target_path} 是下载目录 {mon_path} 的子目录,无法监控")
self.systemmessage.put(f"{target_path} 是下载目录 {mon_path} 的子目录,无法监控")
continue
except Exception as e:
logger.debug(str(e))
pass
# 转移方式
self._transferconf[mon_path] = _transfer_type
try:
if self._mode == "compatibility":
# 兼容模式目录同步性能降低且NAS不能休眠但可以兼容挂载的远程共享目录如SMB
observer = PollingObserver(timeout=10)
else:
# 内部处理系统操作类型选择最优解
observer = Observer(timeout=10)
self._observer.append(observer)
observer.schedule(FileMonitorHandler(mon_path, self), path=mon_path, recursive=True)
observer.daemon = True
observer.start()
logger.info(f"{mon_path} 的目录监控服务启动")
except Exception as e:
err_msg = str(e)
if "inotify" in err_msg and "reached" in err_msg:
logger.warn(
f"目录监控服务启动出现异常:{err_msg}请在宿主机上不是docker容器内执行以下命令并重启"
+ """
echo fs.inotify.max_user_watches=524288 | sudo tee -a /etc/sysctl.conf
echo fs.inotify.max_user_instances=524288 | sudo tee -a /etc/sysctl.conf
sudo sysctl -p
""")
else:
logger.error(f"{mon_path} 启动目录监控失败:{err_msg}")
self.systemmessage.put(f"{mon_path} 启动目录监控失败:{err_msg}")
# 启用目录监控
if self._enabled:
# 检查媒体库目录是不是下载目录的子目录
try:
if target_path and target_path.is_relative_to(Path(mon_path)):
logger.warn(f"{target_path} 是下载目录 {mon_path} 的子目录,无法监控")
self.systemmessage.put(f"{target_path} 是下载目录 {mon_path} 的子目录,无法监控")
continue
except Exception as e:
logger.debug(str(e))
pass
# 追加入库消息统一发送服务
self._scheduler.add_job(self.send_msg, trigger='interval', seconds=15)
# 启动服务
self._scheduler.print_jobs()
self._scheduler.start()
try:
if self._mode == "compatibility":
# 兼容模式目录同步性能降低且NAS不能休眠但可以兼容挂载的远程共享目录如SMB
observer = PollingObserver(timeout=10)
else:
# 内部处理系统操作类型选择最优解
observer = Observer(timeout=10)
self._observer.append(observer)
observer.schedule(FileMonitorHandler(mon_path, self), path=mon_path, recursive=True)
observer.daemon = True
observer.start()
logger.info(f"{mon_path} 的目录监控服务启动")
except Exception as e:
err_msg = str(e)
if "inotify" in err_msg and "reached" in err_msg:
logger.warn(
f"目录监控服务启动出现异常:{err_msg}请在宿主机上不是docker容器内执行以下命令并重启"
+ """
echo fs.inotify.max_user_watches=524288 | sudo tee -a /etc/sysctl.conf
echo fs.inotify.max_user_instances=524288 | sudo tee -a /etc/sysctl.conf
sudo sysctl -p
""")
else:
logger.error(f"{mon_path} 启动目录监控失败:{err_msg}")
self.systemmessage.put(f"{mon_path} 启动目录监控失败:{err_msg}")
# 运行一次定时服务
if self._onlyonce:
logger.info("目录监控服务启动,立即运行一次")
self._scheduler.add_job(func=self.sync_all, trigger='date',
run_date=datetime.datetime.now(
tz=pytz.timezone(settings.TZ)) + datetime.timedelta(seconds=3)
)
# 关闭一次性开关
self._onlyonce = False
# 保存配置
self.__update_config()
# 启动定时服务
if self._scheduler.get_jobs():
self._scheduler.print_jobs()
self._scheduler.start()
def __update_config(self):
"""
更新配置
"""
self.update_config({
"enabled": self._enabled,
"notify": self._notify,
"onlyonce": self._onlyonce,
"mode": self._mode,
"transfer_type": self._transfer_type,
"monitor_dirs": self._monitor_dirs,
"exclude_keywords": self._exclude_keywords
})
@eventmanager.register(EventType.DirectorySync)
def remote_sync(self, event: Event):
"""
远程全量同步
"""
if event:
self.post_message(channel=event.event_data.get("channel"),
title="开始同步监控目录 ...",
userid=event.event_data.get("user"))
self.sync_all()
if event:
self.post_message(channel=event.event_data.get("channel"),
title="监控目录同步完成!", userid=event.event_data.get("user"))
def sync_all(self):
"""
立即运行一次,全量同步目录中所有文件
"""
logger.info("开始全量同步监控目录 ...")
# 遍历所有监控目录
for mon_path in self._dirconf.keys():
# 遍历目录下所有文件
for file_path in SystemUtils.list_files(Path(mon_path), settings.RMT_MEDIAEXT):
self.__handle_file(event_path=str(file_path), mon_path=mon_path)
logger.info("全量同步监控目录完成!")
def event_handler(self, event, mon_path: str, text: str, event_path: str):
"""
@@ -200,137 +263,136 @@ class DirMonitor(_PluginBase):
"""
if not event.is_directory:
# 文件发生变化
file_path = Path(event_path)
try:
if not file_path.exists():
logger.debug("文件%s%s" % (text, event_path))
self.__handle_file(event_path=event_path, mon_path=mon_path)
def __handle_file(self, event_path: str, mon_path: str):
"""
同步一个文件
:param event_path: 事件文件路径
:param mon_path: 监控目录
"""
file_path = Path(event_path)
try:
if not file_path.exists():
return
# 全程加锁
with lock:
transfer_history = self.transferhis.get_by_src(event_path)
if transfer_history:
logger.debug("文件已处理过:%s" % event_path)
return
logger.debug("文件%s%s" % (text, event_path))
# 回收站及隐藏的文件不处理
if event_path.find('/@Recycle/') != -1 \
or event_path.find('/#recycle/') != -1 \
or event_path.find('/.') != -1 \
or event_path.find('/@eaDir') != -1:
logger.debug(f"{event_path} 是回收站或隐藏的文件")
return
# 全程加锁
with lock:
transfer_history = self.transferhis.get_by_src(event_path)
if transfer_history:
logger.debug("文件已处理过:%s" % event_path)
return
# 命中过滤关键字不处理
if self._exclude_keywords:
for keyword in self._exclude_keywords.split("\n"):
if keyword and re.findall(keyword, event_path):
logger.info(f"{event_path} 命中过滤关键字 {keyword},不处理")
return
# 回收站及隐藏的文件不处理
if event_path.find('/@Recycle/') != -1 \
or event_path.find('/#recycle/') != -1 \
or event_path.find('/.') != -1 \
or event_path.find('/@eaDir') != -1:
logger.debug(f"{event_path} 是回收站或隐藏的文件")
return
# 整理屏蔽词不处理
transfer_exclude_words = self.systemconfig.get(SystemConfigKey.TransferExcludeWords)
if transfer_exclude_words:
for keyword in transfer_exclude_words:
if not keyword:
continue
if keyword and re.search(r"%s" % keyword, event_path, re.IGNORECASE):
logger.info(f"{event_path} 命中整理屏蔽词 {keyword},不处理")
return
# 命中过滤关键字不处理
if self._exclude_keywords:
for keyword in self._exclude_keywords.split("\n"):
if keyword and re.findall(keyword, event_path):
logger.info(f"{event_path} 命中过滤关键字 {keyword},不处理")
return
# 不是媒体文件不处理
if file_path.suffix not in settings.RMT_MEDIAEXT:
logger.debug(f"{event_path} 不是媒体文件")
return
# 整理屏蔽词不处理
transfer_exclude_words = self.systemconfig.get(SystemConfigKey.TransferExcludeWords)
if transfer_exclude_words:
for keyword in transfer_exclude_words:
if not keyword:
continue
if keyword and re.search(r"%s" % keyword, event_path, re.IGNORECASE):
logger.info(f"{event_path} 命中整理屏蔽词 {keyword},不处理")
return
# 元数据
file_meta = MetaInfoPath(file_path)
if not file_meta.name:
logger.error(f"{file_path.name} 无法识别有效信息")
return
# 不是媒体文件不处理
if file_path.suffix not in settings.RMT_MEDIAEXT:
logger.debug(f"{event_path} 不是媒体文件")
return
# 判断是不是蓝光目录
if re.search(r"BDMV[/\\]STREAM", event_path, re.IGNORECASE):
# 截取BDMV前面的路径
event_path = event_path[:event_path.find("BDMV")]
file_path = Path(event_path)
# 查询历史记录,已转移的不处理
if self.transferhis.get_by_src(event_path):
logger.info(f"{event_path} 已整理过")
return
# 查询历史记录,已转移的不处理
if self.transferhis.get_by_src(event_path):
logger.info(f"{event_path} 已整理过")
return
# 元数据
file_meta = MetaInfoPath(file_path)
if not file_meta.name:
logger.error(f"{file_path.name} 无法识别有效信息")
return
# 查询转移目的目录
target: Path = self._dirconf.get(mon_path)
# 查询转移方式
transfer_type = self._transferconf.get(mon_path)
# 识别媒体信息
mediainfo: MediaInfo = self.chain.recognize_media(meta=file_meta)
if not mediainfo:
logger.warn(f'未识别到媒体信息,标题:{file_meta.name}')
if self._notify:
self.chain.post_message(Notification(
mtype=NotificationType.Manual,
title=f"{file_path.name} 未识别到媒体信息,无法入库!"
))
# 新增转移成功历史记录
self.transferhis.add_fail(
src_path=file_path,
mode=transfer_type,
meta=file_meta
)
return
# 如果未开启新增已入库媒体是否跟随TMDB信息变化则根据tmdbid查询之前的title
if not settings.SCRAP_FOLLOW_TMDB:
transfer_history = self.transferhis.get_by_type_tmdbid(tmdbid=mediainfo.tmdb_id,
mtype=mediainfo.type.value)
if transfer_history:
mediainfo.title = transfer_history.title
logger.info(f"{file_path.name} 识别为:{mediainfo.type.value} {mediainfo.title_year}")
# 更新媒体图片
self.chain.obtain_images(mediainfo=mediainfo)
# 获取集数据
if mediainfo.type == MediaType.TV:
episodes_info = self.tmdbchain.tmdb_episodes(tmdbid=mediainfo.tmdb_id,
season=file_meta.begin_season or 1)
else:
episodes_info = None
# 获取downloadhash
download_hash = self.get_download_hash(src=str(file_path))
# 转移
transferinfo: TransferInfo = self.chain.transfer(mediainfo=mediainfo,
path=file_path,
transfer_type=transfer_type,
target=target,
meta=file_meta,
episodes_info=episodes_info)
if not transferinfo:
logger.error("文件转移模块运行失败")
return
if not transferinfo.success:
# 转移失败
logger.warn(f"{file_path.name} 入库失败:{transferinfo.message}")
# 新增转移失败历史记录
self.transferhis.add_fail(
src_path=file_path,
mode=transfer_type,
download_hash=download_hash,
meta=file_meta,
mediainfo=mediainfo,
transferinfo=transferinfo
)
if self._notify:
self.chain.post_message(Notification(
title=f"{mediainfo.title_year}{file_meta.season_episode} 入库失败!",
text=f"原因:{transferinfo.message or '未知'}",
image=mediainfo.get_message_image()
))
return
# 查询转移目的目录
target: Path = self._dirconf.get(mon_path)
# 查询转移方式
transfer_type = self._transferconf.get(mon_path)
# 根据父路径获取下载历史
download_history = self.downloadhis.get_by_path(Path(event_path).parent)
# 识别媒体信息
mediainfo: MediaInfo = self.chain.recognize_media(meta=file_meta,
tmdbid=download_history.tmdbid if download_history else None)
if not mediainfo:
logger.warn(f'未识别到媒体信息,标题:{file_meta.name}')
# 新增转移成功历史记录
self.transferhis.add_success(
his = self.transferhis.add_fail(
src_path=file_path,
mode=transfer_type,
meta=file_meta
)
if self._notify:
self.chain.post_message(Notification(
mtype=NotificationType.Manual,
title=f"{file_path.name} 未识别到媒体信息,无法入库!\n"
f"回复:```\n/redo {his.id} [tmdbid]|[类型]\n``` 手动识别转移。"
))
return
# 如果未开启新增已入库媒体是否跟随TMDB信息变化则根据tmdbid查询之前的title
if not settings.SCRAP_FOLLOW_TMDB:
transfer_history = self.transferhis.get_by_type_tmdbid(tmdbid=mediainfo.tmdb_id,
mtype=mediainfo.type.value)
if transfer_history:
mediainfo.title = transfer_history.title
logger.info(f"{file_path.name} 识别为:{mediainfo.type.value} {mediainfo.title_year}")
# 更新媒体图片
self.chain.obtain_images(mediainfo=mediainfo)
# 获取集数据
if mediainfo.type == MediaType.TV:
episodes_info = self.tmdbchain.tmdb_episodes(tmdbid=mediainfo.tmdb_id,
season=file_meta.begin_season or 1)
else:
episodes_info = None
# 获取downloadhash
download_hash = self.get_download_hash(src=str(file_path))
# 转移
transferinfo: TransferInfo = self.chain.transfer(mediainfo=mediainfo,
path=file_path,
transfer_type=transfer_type,
target=target,
meta=file_meta,
episodes_info=episodes_info)
if not transferinfo:
logger.error("文件转移模块运行失败")
return
if not transferinfo.success:
# 转移失败
logger.warn(f"{file_path.name} 入库失败:{transferinfo.message}")
# 新增转移失败历史记录
self.transferhis.add_fail(
src_path=file_path,
mode=transfer_type,
download_hash=download_hash,
@@ -338,94 +400,112 @@ class DirMonitor(_PluginBase):
mediainfo=mediainfo,
transferinfo=transferinfo
)
if self._notify:
self.chain.post_message(Notification(
title=f"{mediainfo.title_year}{file_meta.season_episode} 入库失败!",
text=f"原因:{transferinfo.message or '未知'}",
image=mediainfo.get_message_image()
))
return
# 刮削单个文件
if settings.SCRAP_METADATA:
self.chain.scrape_metadata(path=transferinfo.target_path,
mediainfo=mediainfo)
# 新增转移成功历史记录
self.transferhis.add_success(
src_path=file_path,
mode=transfer_type,
download_hash=download_hash,
meta=file_meta,
mediainfo=mediainfo,
transferinfo=transferinfo
)
"""
{
"title_year season": {
"files": [
{
"path":,
"mediainfo":,
"file_meta":,
"transferinfo":
}
],
"time": "2023-08-24 23:23:23.332"
}
# 刮削单个文件
if settings.SCRAP_METADATA:
self.chain.scrape_metadata(path=transferinfo.target_path,
mediainfo=mediainfo,
transfer_type=transfer_type)
"""
{
"title_year season": {
"files": [
{
"path":,
"mediainfo":,
"file_meta":,
"transferinfo":
}
],
"time": "2023-08-24 23:23:23.332"
}
"""
# 发送消息汇总
media_list = self._medias.get(mediainfo.title_year + " " + file_meta.season) or {}
if media_list:
media_files = media_list.get("files") or []
if media_files:
file_exists = False
for file in media_files:
if str(event_path) == file.get("path"):
file_exists = True
break
if not file_exists:
media_files.append({
"path": event_path,
"mediainfo": mediainfo,
"file_meta": file_meta,
"transferinfo": transferinfo
})
else:
media_files = [
{
"path": event_path,
"mediainfo": mediainfo,
"file_meta": file_meta,
"transferinfo": transferinfo
}
]
media_list = {
"files": media_files,
"time": datetime.now()
}
else:
media_list = {
"files": [
}
"""
# 发送消息汇总
media_list = self._medias.get(mediainfo.title_year + " " + file_meta.season) or {}
if media_list:
media_files = media_list.get("files") or []
if media_files:
file_exists = False
for file in media_files:
if str(event_path) == file.get("path"):
file_exists = True
break
if not file_exists:
media_files.append({
"path": event_path,
"mediainfo": mediainfo,
"file_meta": file_meta,
"transferinfo": transferinfo
})
else:
media_files = [
{
"path": event_path,
"mediainfo": mediainfo,
"file_meta": file_meta,
"transferinfo": transferinfo
}
],
"time": datetime.now()
}
self._medias[mediainfo.title_year + " " + file_meta.season] = media_list
]
media_list = {
"files": media_files,
"time": datetime.datetime.now()
}
else:
media_list = {
"files": [
{
"path": event_path,
"mediainfo": mediainfo,
"file_meta": file_meta,
"transferinfo": transferinfo
}
],
"time": datetime.datetime.now()
}
self._medias[mediainfo.title_year + " " + file_meta.season] = media_list
# 汇总刷新媒体库
if settings.REFRESH_MEDIASERVER:
self.chain.refresh_mediaserver(mediainfo=mediainfo, file_path=transferinfo.target_path)
# 广播事件
self.eventmanager.send_event(EventType.TransferComplete, {
'meta': file_meta,
'mediainfo': mediainfo,
'transferinfo': transferinfo
})
# 汇总刷新媒体库
if settings.REFRESH_MEDIASERVER:
self.chain.refresh_mediaserver(mediainfo=mediainfo, file_path=transferinfo.target_path)
# 广播事件
self.eventmanager.send_event(EventType.TransferComplete, {
'meta': file_meta,
'mediainfo': mediainfo,
'transferinfo': transferinfo
})
# 移动模式删除空目录
if transfer_type == "move":
for file_dir in file_path.parents:
if len(str(file_dir)) <= len(str(Path(mon_path))):
# 重要,删除到监控目录为止
break
files = SystemUtils.list_files(file_dir, settings.RMT_MEDIAEXT)
if not files:
logger.warn(f"移动模式,删除空目录:{file_dir}")
shutil.rmtree(file_dir, ignore_errors=True)
# 移动模式删除空目录
if transfer_type == "move":
for file_dir in file_path.parents:
if len(str(file_dir)) <= len(str(Path(mon_path))):
# 重要,删除到监控目录为止
break
files = SystemUtils.list_files(file_dir, settings.RMT_MEDIAEXT)
if not files:
logger.warn(f"移动模式,删除空目录:{file_dir}")
shutil.rmtree(file_dir, ignore_errors=True)
except Exception as e:
logger.error("目录监控发生错误:%s - %s" % (str(e), traceback.format_exc()))
except Exception as e:
logger.error("目录监控发生错误:%s - %s" % (str(e), traceback.format_exc()))
def send_msg(self):
"""
@@ -452,7 +532,7 @@ class DirMonitor(_PluginBase):
file_meta = media_files[0].get("file_meta")
mediainfo = media_files[0].get("mediainfo")
# 判断最后更新时间距现在是已超过5秒超过则发送消息
if (datetime.now() - last_update_time).total_seconds() > 5:
if (datetime.datetime.now() - last_update_time).total_seconds() > 5:
# 发送通知
if self._notify:
@@ -504,7 +584,17 @@ class DirMonitor(_PluginBase):
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
"""
定义远程控制命令
:return: 命令关键字、事件、描述、附带数据
"""
return [{
"cmd": "/directory_sync",
"event": EventType.DirectorySync,
"desc": "目录监控同步",
"category": "管理",
"data": {}
}]
def get_api(self) -> List[Dict[str, Any]]:
pass
@@ -521,7 +611,7 @@ class DirMonitor(_PluginBase):
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
'md': 4
},
'content': [
{
@@ -537,7 +627,7 @@ class DirMonitor(_PluginBase):
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
'md': 4
},
'content': [
{
@@ -548,6 +638,22 @@ class DirMonitor(_PluginBase):
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'onlyonce',
'label': '立即运行一次',
}
}
]
}
]
},
@@ -613,10 +719,11 @@ class DirMonitor(_PluginBase):
'model': 'monitor_dirs',
'label': '监控目录',
'rows': 5,
'placeholder': '每一行一个目录,支持种配置方式:\n'
'placeholder': '每一行一个目录,支持以下几种配置方式,转移方式支持 move、copy、link、softlink、rclone_copy、rclone_move\n'
'监控目录\n'
'监控目录:转移目的目录(需同时在媒体库目录中配置该目的目录)\n'
'监控目录:转移目的目录#转移方式move|copy|link|softlink'
'监控目录#转移方式\n'
'监控目录:转移目的目录\n'
'监控目录:转移目的目录#转移方式'
}
}
]
@@ -650,6 +757,7 @@ class DirMonitor(_PluginBase):
], {
"enabled": False,
"notify": False,
"onlyonce": False,
"mode": "fast",
"transfer_type": settings.TRANSFER_TYPE,
"monitor_dirs": "",

View File

@@ -449,15 +449,19 @@ class DoubanSync(_PluginBase):
results = self.rsshelper.parse(url)
if not results:
logger.error(f"未获取到用户 {user_id} 豆瓣RSS数据{url}")
return
continue
else:
logger.info(f"获取到用户 {user_id} 豆瓣RSS数据{len(results)}")
# 解析数据
for result in results:
try:
dtype = result.get("title", "")[:2]
title = result.get("title", "")[2:]
if dtype not in ["想看"]:
if dtype not in ["想看", "在看"]:
logger.info(f'标题:{title},非想看/在看数据,跳过')
continue
if not result.get("link"):
logger.warn(f'标题:{title},未获取到链接,跳过')
continue
# 判断是否在天数范围
pubdate: Optional[datetime.datetime] = result.get("pubdate")
@@ -468,6 +472,7 @@ class DoubanSync(_PluginBase):
douban_id = result.get("link", "").split("/")[-2]
# 检查是否处理过
if not douban_id or douban_id in [h.get("doubanid") for h in history]:
logger.info(f'标题:{title}豆瓣ID{douban_id} 已处理过')
continue
# 根据豆瓣ID获取豆瓣数据
doubaninfo: Optional[dict] = self.chain.douban_info(doubanid=douban_id)

View File

@@ -117,7 +117,7 @@ class DownloadingMsg(_PluginBase):
if not userid:
continue
# 如果用户是管理员,无需重复推送
if self._type == "admin" or self._type == "both" and self._adminuser and userid in str(
if (self._type == "admin" or self._type == "both") and self._adminuser and userid in str(
self._adminuser).split(","):
logger.debug("管理员已推送")
continue
@@ -156,7 +156,7 @@ class DownloadingMsg(_PluginBase):
channel_value = downloadhis.channel
else:
try:
context = MediaChain(self.db).recognize_by_title(title=torrent.title)
context = MediaChain().recognize_by_title(title=torrent.title)
if not context or not context.media_info:
continue
media_info = context.media_info
@@ -177,10 +177,14 @@ class DownloadingMsg(_PluginBase):
else:
media_name = torrent.title
messages.append(f"{index}. {media_name}\n"
f"{torrent.title} "
f"{StringUtils.str_filesize(torrent.size)} "
f"{round(torrent.progress, 1)}%")
if not self._adminuser or userid not in str(self._adminuser).split(","):
# 下载用户发送精简消息
messages.append(f"{index}. {media_name} {round(torrent.progress, 1)}%")
else:
messages.append(f"{index}. {media_name}\n"
f"{torrent.title} "
f"{StringUtils.str_filesize(torrent.size)} "
f"{round(torrent.progress, 1)}%")
index += 1
# 用户消息渠道

View File

@@ -22,7 +22,7 @@ class InvitesSignin(_PluginBase):
# 插件图标
plugin_icon = "invites.png"
# 主题色
plugin_color = "#4FB647"
plugin_color = "#FFFFFF"
# 插件版本
plugin_version = "1.0"
# 插件作者

View File

@@ -390,7 +390,7 @@ class LibraryScraper(_PluginBase):
# 刮削单个文件
if scrap_metadata:
self.chain.scrape_metadata(path=file, mediainfo=mediainfo)
self.chain.scrape_metadata(path=file, mediainfo=mediainfo, transfer_type=settings.TRANSFER_TYPE)
@staticmethod
def __get_tmdbid_from_nfo(file_path: Path):

View File

@@ -1013,40 +1013,42 @@ class MediaSyncDel(_PluginBase):
if not isinstance(torrents, list):
torrents = [torrents]
# 删除辅种历史中与本下载器相同的辅种记录
if str(downloader) == str(download):
for torrent in torrents:
handle_cnt += 1
if str(download) == "qbittorrent":
# 删除辅种
if action_flag == "del":
logger.info(f"删除辅种:{downloader} - {torrent}")
self.qb.delete_torrents(delete_file=True,
ids=torrent)
# 暂停辅种
if action_flag == "stop":
self.qb.stop_torrents(torrent)
logger.info(f"辅种:{downloader} - {torrent} 暂停")
else:
# 删除辅种
if action_flag == "del":
logger.info(f"删除辅种:{downloader} - {torrent}")
self.tr.delete_torrents(delete_file=True,
ids=torrent)
# 暂停辅种
if action_flag == "stop":
self.tr.stop_torrents(torrent)
logger.info(f"辅种:{downloader} - {torrent} 暂停")
# 删除本下载器辅种历史
if action_flag == "del":
del history
break
# 删除辅种历史
for torrent in torrents:
handle_cnt += 1
if str(download) == "qbittorrent":
# 删除辅种
if action_flag == "del":
logger.info(f"删除辅种:{downloader} - {torrent}")
self.qb.delete_torrents(delete_file=True,
ids=torrent)
# 暂停辅种
if action_flag == "stop":
self.qb.stop_torrents(torrent)
logger.info(f"辅种:{downloader} - {torrent} 暂停")
else:
# 删除辅种
if action_flag == "del":
logger.info(f"删除辅种:{downloader} - {torrent}")
self.tr.delete_torrents(delete_file=True,
ids=torrent)
# 暂停辅种
if action_flag == "stop":
self.tr.stop_torrents(torrent)
logger.info(f"辅种:{downloader} - {torrent} 暂停")
# 删除本下载器辅种历史
if action_flag == "del":
del history
break
# 更新辅种历史
self.save_data(key=history_key,
value=seed_history,
plugin_id=plugin_id)
if seed_history:
self.save_data(key=history_key,
value=seed_history,
plugin_id=plugin_id)
else:
self.del_data(key=history_key,
plugin_id=plugin_id)
return handle_cnt
@staticmethod
@@ -1276,8 +1278,6 @@ class MediaSyncDel(_PluginBase):
"""
下载文件删除处理事件
"""
if not self._enabled:
return
if not event:
return
event_data = event.event_data

View File

@@ -139,43 +139,61 @@ class NAStoolSync(_PluginBase):
plugin_key = history[2]
plugin_value = history[3]
# 处理下载器映射
if self._downloader:
downloaders = self._downloader.split("\n")
for downloader in downloaders:
if not downloader:
continue
sub_downloaders = downloader.split(":")
if not str(sub_downloaders[0]).isdigit():
logger.error(f"下载器映射配置错误NAStool下载器id 应为数字!")
continue
# 替换转种记录
if str(plugin_id) == "TorrentTransfer":
keys = str(plugin_key).split("-")
if keys[0].isdigit() and int(keys[0]) == int(sub_downloaders[0]):
# 替换key
plugin_key = plugin_key.replace(keys[0], sub_downloaders[1])
# 替换转种记录
if str(plugin_id) == "TorrentTransfer":
keys = str(plugin_key).split("-")
# 替换value
if isinstance(plugin_value, str):
_value: dict = json.loads(plugin_value)
elif isinstance(plugin_value, dict):
if str(plugin_value.get("to_download")).isdigit() and int(
plugin_value.get("to_download")) == int(sub_downloaders[0]):
plugin_value["to_download"] = sub_downloaders[1]
# 1-2cd5d6fe32dca4e39a3e9f10961bfbdb00437e91
if len(keys) == 2 and keys[0].isdigit():
mp_downloader = self.__get_target_downloader(int(keys[0]))
# 替换key
plugin_key = mp_downloader + "-" + keys[1]
# 替换辅种记录
if str(plugin_id) == "IYUUAutoSeed":
if isinstance(plugin_value, str):
plugin_value: list = json.loads(plugin_value)
if not isinstance(plugin_value, list):
plugin_value = [plugin_value]
for value in plugin_value:
if not str(value.get("downloader")).isdigit():
continue
if str(value.get("downloader")).isdigit() and int(value.get("downloader")) == int(
sub_downloaders[0]):
value["downloader"] = sub_downloaders[1]
# 替换value
"""
{
"to_download":2,
"to_download_id":"2cd5d6fe32dca4e39a3e9f10961bfbdb00437e91",
"delete_source":true
}
"""
if isinstance(plugin_value, str):
plugin_value: dict = json.loads(plugin_value)
if isinstance(plugin_value, dict):
if str(plugin_value.get("to_download")).isdigit():
to_downloader = self.__get_target_downloader(int(plugin_value.get("to_download")))
plugin_value["to_download"] = to_downloader
# 替换辅种记录
elif str(plugin_id) == "IYUUAutoSeed":
"""
[
{
"downloader":"2",
"torrents":[
"a18aa62abab42613edba15e7dbad0d729d8500da",
"e494f372316bbfd8572da80138a6ef4c491d5991",
"cc2bbc1e654d8fc0f83297f6cd36a38805aa2864",
"68aec0db3aa7fe28a887e5e41a0d0d5bc284910f",
"f02962474287e11441e34e40b8326ddf28d034f6"
]
},
{
"downloader":"2",
"torrents":[
"4f042003ce90519e1aadd02b76f51c0c0711adb3"
]
}
]
"""
if isinstance(plugin_value, str):
plugin_value: list = json.loads(plugin_value)
if not isinstance(plugin_value, list):
plugin_value = [plugin_value]
for value in plugin_value:
if str(value.get("downloader")).isdigit():
downloader = self.__get_target_downloader(int(value.get("downloader")))
value["downloader"] = downloader
self._plugindata.save(plugin_id=plugin_id,
key=plugin_key,
@@ -189,6 +207,24 @@ class NAStoolSync(_PluginBase):
logger.info(f"插件记录已同步完成。总耗时 {(end_time - start_time).seconds}")
def __get_target_downloader(self, download_id: int):
"""
获取NAStool下载器id对应的Moviepilot下载器
"""
# 处理下载器映射
if self._downloader:
downloaders = self._downloader.split("\n")
for downloader in downloaders:
if not downloader:
continue
sub_downloaders = downloader.split(":")
if not str(sub_downloaders[0]).isdigit():
logger.error(f"下载器映射配置错误NAStool下载器id 应为数字!")
continue
if int(sub_downloaders[0]) == download_id:
return str(sub_downloaders[1])
return download_id
def sync_download_history(self, download_history):
"""
导入下载记录

View File

@@ -64,15 +64,17 @@ class PersonMeta(_PluginBase):
_onlyonce = False
_cron = None
_delay = 0
_type = "all"
_remove_nozh = False
def init_plugin(self, config: dict = None):
self.tmdbchain = TmdbChain(self.db)
self.tmdbchain = TmdbChain()
self.mschain = MediaServerChain(self.db)
if config:
self._enabled = config.get("enabled")
self._onlyonce = config.get("onlyonce")
self._cron = config.get("cron")
self._type = config.get("type") or "all"
self._delay = config.get("delay") or 0
self._remove_nozh = config.get("remove_nozh") or False
@@ -116,6 +118,7 @@ class PersonMeta(_PluginBase):
"enabled": self._enabled,
"onlyonce": self._onlyonce,
"cron": self._cron,
"type": self._type,
"delay": self._delay,
"remove_nozh": self._remove_nozh
})
@@ -182,7 +185,7 @@ class PersonMeta(_PluginBase):
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
'md': 4
},
'content': [
{
@@ -199,7 +202,7 @@ class PersonMeta(_PluginBase):
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
'md': 4
},
'content': [
{
@@ -211,6 +214,27 @@ class PersonMeta(_PluginBase):
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSelect',
'props': {
'model': 'type',
'label': '刮削条件',
'items': [
{'title': '全部', 'value': 'all'},
{'title': '演员非中文', 'value': 'name'},
{'title': '角色非中文', 'value': 'role'},
]
}
}
]
}
]
},
@@ -241,6 +265,7 @@ class PersonMeta(_PluginBase):
"enabled": False,
"onlyonce": False,
"cron": "",
"type": "all",
"delay": 30,
"remove_nozh": False
}
@@ -350,10 +375,21 @@ class PersonMeta(_PluginBase):
"""
def __need_trans_actor(_item):
# 是否需要处理人物信息
_peoples = [x for x in _item.get("People", []) if
(x.get("Name") and not StringUtils.is_chinese(x.get("Name")))
or (x.get("Role") and not StringUtils.is_chinese(x.get("Role")))]
"""
是否需要处理人物信息
"""
if self._type == "name":
# 是否需要处理人物名称
_peoples = [x for x in _item.get("People", []) if
(x.get("Name") and not StringUtils.is_chinese(x.get("Name")))]
elif self._type == "role":
# 是否需要处理人物角色
_peoples = [x for x in _item.get("People", []) if
(x.get("Role") and not StringUtils.is_chinese(x.get("Role")))]
else:
_peoples = [x for x in _item.get("People", []) if
(x.get("Name") and not StringUtils.is_chinese(x.get("Name")))
or (x.get("Role") and not StringUtils.is_chinese(x.get("Role")))]
if _peoples:
return True
return False
@@ -456,7 +492,7 @@ class PersonMeta(_PluginBase):
# 查询媒体库人物详情
personinfo = self.get_iteminfo(server=server, itemid=people.get("Id"))
if not personinfo:
logger.warn(f"未找到人物 {people.get('Name')} 的信息")
logger.debug(f"未找到人物 {people.get('Name')} 的信息")
return None
# 是否更新标志
@@ -473,20 +509,20 @@ class PersonMeta(_PluginBase):
cn_name = self.__get_chinese_name(person_tmdbinfo)
if cn_name:
# 更新中文名
logger.info(f"{people.get('Name')} 从TMDB获取到中文名{cn_name}")
logger.debug(f"{people.get('Name')} 从TMDB获取到中文名{cn_name}")
personinfo["Name"] = cn_name
ret_people["Name"] = cn_name
updated_name = True
# 更新中文描述
biography = person_tmdbinfo.get("biography")
if biography and StringUtils.is_chinese(biography):
logger.info(f"{people.get('Name')} 从TMDB获取到中文描述")
logger.debug(f"{people.get('Name')} 从TMDB获取到中文描述")
personinfo["Overview"] = biography
updated_overview = True
# 图片
profile_path = person_tmdbinfo.get('profile_path')
if profile_path:
logger.info(f"{people.get('Name')} 从TMDB获取到图片{profile_path}")
logger.debug(f"{people.get('Name')} 从TMDB获取到图片{profile_path}")
profile_path = f"https://{settings.TMDB_IMAGE_DOMAIN}/t/p/original{profile_path}"
# 从豆瓣信息中更新人物信息
@@ -522,14 +558,14 @@ class PersonMeta(_PluginBase):
or douban_actor.get("name") == people.get("Name"):
# 名称
if not updated_name:
logger.info(f"{people.get('Name')} 从豆瓣中获取到中文名:{douban_actor.get('name')}")
logger.debug(f"{people.get('Name')} 从豆瓣中获取到中文名:{douban_actor.get('name')}")
personinfo["Name"] = douban_actor.get("name")
ret_people["Name"] = douban_actor.get("name")
updated_name = True
# 描述
if not updated_overview:
if douban_actor.get("title"):
logger.info(f"{people.get('Name')} 从豆瓣中获取到中文描述:{douban_actor.get('title')}")
logger.debug(f"{people.get('Name')} 从豆瓣中获取到中文描述:{douban_actor.get('title')}")
personinfo["Overview"] = douban_actor.get("title")
updated_overview = True
# 饰演角色
@@ -541,20 +577,20 @@ class PersonMeta(_PluginBase):
character = re.sub("演员", "",
character)
if character:
logger.info(f"{people.get('Name')} 从豆瓣中获取到饰演角色:{character}")
logger.debug(f"{people.get('Name')} 从豆瓣中获取到饰演角色:{character}")
ret_people["Role"] = character
update_character = True
# 图片
if not profile_path:
avatar = douban_actor.get("avatar") or {}
if avatar.get("large"):
logger.info(f"{people.get('Name')} 从豆瓣中获取到图片:{avatar.get('large')}")
logger.debug(f"{people.get('Name')} 从豆瓣中获取到图片:{avatar.get('large')}")
profile_path = avatar.get("large")
break
# 更新人物图片
if profile_path:
logger.info(f"更新人物 {people.get('Name')} 的图片:{profile_path}")
logger.debug(f"更新人物 {people.get('Name')} 的图片:{profile_path}")
self.set_item_image(server=server, itemid=people.get("Id"), imageurl=profile_path)
# 锁定人物信息
@@ -567,12 +603,12 @@ class PersonMeta(_PluginBase):
# 更新人物信息
if updated_name or updated_overview or update_character:
logger.info(f"更新人物 {people.get('Name')} 的信息:{personinfo}")
logger.debug(f"更新人物 {people.get('Name')} 的信息:{personinfo}")
ret = self.set_iteminfo(server=server, itemid=people.get("Id"), iteminfo=personinfo)
if ret:
return ret_people
else:
logger.info(f"人物 {people.get('Name')} 未找到中文数据")
logger.debug(f"人物 {people.get('Name')} 未找到中文数据")
except Exception as err:
logger.error(f"更新人物信息失败:{err}")
return None
@@ -581,10 +617,13 @@ class PersonMeta(_PluginBase):
"""
获取豆瓣演员信息
"""
# 随机休眠1-5
time.sleep(1 + int(time.time()) % 5)
# 随机休眠 3-10
sleep_time = 3 + int(time.time()) % 7
logger.debug(f"随机休眠 {sleep_time}秒 ...")
time.sleep(sleep_time)
# 匹配豆瓣信息
doubaninfo = self.chain.match_doubaninfo(name=mediainfo.title,
imdbid=mediainfo.imdb_id,
mtype=mediainfo.type.value,
year=mediainfo.year,
season=season)
@@ -593,7 +632,7 @@ class PersonMeta(_PluginBase):
doubanitem = self.chain.douban_info(doubaninfo.get("id")) or {}
return (doubanitem.get("actors") or []) + (doubanitem.get("directors") or [])
else:
logger.warn(f"未找到豆瓣信息:{mediainfo.title_year}")
logger.debug(f"未找到豆瓣信息:{mediainfo.title_year}")
return []
@staticmethod
@@ -712,7 +751,7 @@ class PersonMeta(_PluginBase):
logger.error(f"获取Jellyfin媒体的所有子媒体项失败{err}")
return {}
def __get_plex_items(t: str) -> dict:
def __get_plex_items() -> dict:
"""
获得Plex媒体的所有子媒体项
"""
@@ -721,7 +760,7 @@ class PersonMeta(_PluginBase):
plex = Plex().get_plex()
items['Items'] = []
if parentid:
if mtype and 'Season' in t:
if mtype and 'Season' in mtype:
plexitem = plex.library.fetchItem(ekey=parentid)
items['Items'] = []
for season in plexitem.seasons():
@@ -732,7 +771,7 @@ class PersonMeta(_PluginBase):
'Overview': season.summary
}
items['Items'].append(item)
elif mtype and 'Episode' in t:
elif mtype and 'Episode' in mtype:
plexitem = plex.library.fetchItem(ekey=parentid)
items['Items'] = []
for episode in plexitem.episodes():
@@ -783,7 +822,7 @@ class PersonMeta(_PluginBase):
elif server == "jellyfin":
return __get_jellyfin_items()
else:
return __get_plex_items(mtype)
return __get_plex_items()
@staticmethod
def set_iteminfo(server: str, itemid: str, iteminfo: dict):
@@ -879,7 +918,7 @@ class PersonMeta(_PluginBase):
if r:
return base64.b64encode(r.content).decode()
else:
logger.info(f"{imageurl} 图片下载失败,请检查网络连通性")
logger.warn(f"{imageurl} 图片下载失败,请检查网络连通性")
except Exception as err:
logger.error(f"下载图片失败:{err}")
return None

View File

@@ -86,7 +86,8 @@ class SiteStatistic(_PluginBase):
self._statistic_sites = config.get("statistic_sites") or []
# 过滤掉已删除的站点
self._statistic_sites = [site.get("id") for site in self.sites.get_indexers() if
all_sites = [site for site in self.sites.get_indexers() if not site.get("public")] + self.__custom_sites()
self._statistic_sites = [site.get("id") for site in all_sites if
not site.get("public") and site.get("id") in self._statistic_sites]
self.__update_config()
@@ -182,9 +183,14 @@ class SiteStatistic(_PluginBase):
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
# 站点的可选项
site_options = [{"title": site.name, "value": site.id}
# 站点的可选项(内置站点 + 自定义站点)
customSites = self.__custom_sites()
site_options = ([{"title": site.name, "value": site.id}
for site in Site.list_order_by_pri(self.db)]
+ [{"title": site.get("name"), "value": site.get("id")}
for site in customSites])
return [
{
'component': 'VForm',
@@ -467,7 +473,7 @@ class SiteStatistic(_PluginBase):
{
'component': 'VImg',
'props': {
'src': '/plugin/upload.png'
'src': '/plugin_icon/upload.png'
}
}
]
@@ -537,7 +543,7 @@ class SiteStatistic(_PluginBase):
{
'component': 'VImg',
'props': {
'src': '/plugin/download.png'
'src': '/plugin_icon/download.png'
}
}
]
@@ -607,7 +613,7 @@ class SiteStatistic(_PluginBase):
{
'component': 'VImg',
'props': {
'src': '/plugin/seed.png'
'src': '/plugin_icon/seed.png'
}
}
]
@@ -677,7 +683,7 @@ class SiteStatistic(_PluginBase):
{
'component': 'VImg',
'props': {
'src': '/plugin/database.png'
'src': '/plugin_icon/database.png'
}
}
]
@@ -1048,11 +1054,12 @@ class SiteStatistic(_PluginBase):
with lock:
all_sites = [site for site in self.sites.get_indexers() if not site.get("public")] + self.__custom_sites()
# 没有指定站点,默认使用全部站点
if not self._statistic_sites:
refresh_sites = [site for site in self.sites.get_indexers() if not site.get("public")]
refresh_sites = all_sites
else:
refresh_sites = [site for site in self.sites.get_indexers() if
refresh_sites = [site for site in all_sites if
site.get("id") in self._statistic_sites]
if not refresh_sites:
return
@@ -1115,6 +1122,13 @@ class SiteStatistic(_PluginBase):
self.save_data("last_update_time", key)
logger.info("站点数据刷新完成")
def __custom_sites(self) -> List[dict]:
custom_sites = []
custom_sites_config = self.get_config("CustomSites")
if custom_sites_config and custom_sites_config.get("enabled"):
custom_sites = custom_sites_config.get("sites")
return custom_sites
def __update_config(self):
self.update_config({
"enabled": self._enabled,

View File

@@ -56,7 +56,7 @@ class TransferHistory(BaseModel):
src: Optional[str] = None
# 目的目录
dest: Optional[str] = None
# 转移模式link/copy/move/softlink
# 转移模式
mode: Optional[str] = None
# 类型:电影、电视剧
type: Optional[str] = None

View File

@@ -31,6 +31,12 @@ class Subscribe(BaseModel):
include: Optional[str] = None
# 排除
exclude: Optional[str] = None
# 质量
quality: Optional[str] = None
# 分辨率
resolution: Optional[str] = None
# 特效
effect: Optional[str] = None
# 总集数
total_episode: Optional[int] = 0
# 开始集数

View File

@@ -12,6 +12,7 @@ class TransferTorrent(BaseModel):
path: Optional[Path] = None
hash: Optional[str] = None
tags: Optional[str] = None
userid: Optional[str] = None
class DownloadingTorrent(BaseModel):
@@ -29,6 +30,7 @@ class DownloadingTorrent(BaseModel):
upspeed: Optional[str] = None
dlspeed: Optional[str] = None
media: Optional[dict] = {}
userid: Optional[str] = None
class TransferInfo(BaseModel):

View File

@@ -40,6 +40,12 @@ class EventType(Enum):
UserMessage = "user.message"
# 通知消息
NoticeMessage = "notice.message"
# 名称识别请求
NameRecognize = "name.recognize"
# 名称识别结果
NameRecognizeResult = "name.recognize.result"
# 目录监控同步
DirectorySync = "directory.sync"
# 系统配置Key字典
@@ -68,8 +74,10 @@ class SystemConfigKey(Enum):
SubscribeFilterRules = "SubscribeFilterRules"
# 洗版规则
BestVersionFilterRules = "BestVersionFilterRules"
# 默认过滤规则
# 默认订阅过滤规则
DefaultFilterRules = "DefaultFilterRules"
# 默认搜索过滤规则
DefaultSearchFilterRules = "DefaultSearchFilterRules"
# 转移屏蔽词
TransferExcludeWords = "TransferExcludeWords"

View File

@@ -3,6 +3,7 @@ import os
import platform
import re
import shutil
import subprocess
import sys
from pathlib import Path
from typing import List, Union, Tuple
@@ -97,7 +98,9 @@ class SystemUtils:
"""
try:
# link到当前目录并改名
tmp_path = (src.parent / dest.name).with_suffix(".mp")
tmp_path = src.parent / (dest.name + ".mp")
if tmp_path.exists():
tmp_path.unlink()
tmp_path.hardlink_to(src)
# 移动到目标目录
shutil.move(tmp_path, dest)
@@ -118,6 +121,54 @@ class SystemUtils:
print(str(err))
return -1, str(err)
@staticmethod
def rclone_move(src: Path, dest: Path):
"""
Rclone移动
"""
try:
retcode = subprocess.run(
[
'rclone', 'moveto',
str(src),
f'MP:{dest}'
],
startupinfo=SystemUtils.__get_hidden_shell()
).returncode
return retcode, ""
except Exception as err:
print(str(err))
return -1, str(err)
@staticmethod
def rclone_copy(src: Path, dest: Path):
"""
Rclone复制
"""
try:
retcode = subprocess.run(
[
'rclone', 'copyto',
str(src),
f'MP:{dest}'
],
startupinfo=SystemUtils.__get_hidden_shell()
).returncode
return retcode, ""
except Exception as err:
print(str(err))
return -1, str(err)
@staticmethod
def __get_hidden_shell():
if SystemUtils.is_windows():
st = subprocess.STARTUPINFO()
st.dwFlags = subprocess.STARTF_USESHOWWINDOW
st.wShowWindow = subprocess.SW_HIDE
return st
else:
return None
@staticmethod
def list_files(directory: Path, extensions: list, min_filesize: int = 0) -> List[Path]:
"""

View File

@@ -5,8 +5,6 @@
####################################
# 基础设置 #
####################################
# 时区
TZ=Asia/Shanghai
# 【*】API监听地址
HOST=0.0.0.0
# 是否调试模式
@@ -19,8 +17,6 @@ SUPERUSER=admin
SUPERUSER_PASSWORD=password
# 【*】API密钥建议更换复杂字符串
API_TOKEN=moviepilot
# 网络代理 IP:PORT
PROXY_HOST=
# TMDB图片地址无需修改需保留默认值
TMDB_IMAGE_DOMAIN=image.tmdb.org
# TMDB API地址无需修改需保留默认值
@@ -43,7 +39,7 @@ SCRAP_SOURCE=themoviedb
####################################
# 媒体库 #
####################################
# 【*】转移方式 link/copy/move/softlink
# 【*】转移方式 link/copy/move/softlink/rclone_copy/rclone_move
TRANSFER_TYPE=copy
# 【*】媒体库目录,多个目录使用,分隔
LIBRARY_PATH=

View File

@@ -8,7 +8,6 @@ Create Date: 2023-09-28 13:37:16.479360
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a521fbc28b18'
down_revision = 'b2f011d3a8b7'
@@ -26,5 +25,6 @@ def upgrade() -> None:
pass
# ### end Alembic commands ###
def downgrade() -> None:
pass
pass

View File

@@ -0,0 +1,32 @@
"""1.0.10
Revision ID: d633ca6cd572
Revises: a521fbc28b18
Create Date: 2023-10-12 08:54:49.728638
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd633ca6cd572'
down_revision = 'a521fbc28b18'
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
try:
with op.batch_alter_table("subscribe") as batch_op:
batch_op.add_column(sa.Column('quality', sa.String, nullable=True))
batch_op.add_column(sa.Column('resolution', sa.String, nullable=True))
batch_op.add_column(sa.Column('effect', sa.String, nullable=True))
except Exception as e:
pass
# ### end Alembic commands ###
def downgrade() -> None:
pass

View File

@@ -53,4 +53,5 @@ requests_cache~=0.5.2
parse~=1.19.0
docker~=6.1.3
cachetools~=5.3.1
fast-bencode==1.1.3
fast-bencode~=1.1.3
pystray~=0.19.5

View File

@@ -1 +1 @@
APP_VERSION = 'v1.2.9'
APP_VERSION = 'v1.3.4'

View File

@@ -1,11 +1,12 @@
# -*- mode: python ; coding: utf-8 -*-
def collect_pkg_data(package, include_py_files=False, subdir=None):
def collect_pkg_data(package: str, include_py_files: bool = False, subdir: str = None):
"""
Collect all data files from the given package.
"""
import os
from PyInstaller.utils.hooks import get_package_paths, remove_prefix, PY_IGNORE_EXTENSIONS
from pathlib import Path
from PyInstaller.utils.hooks import get_package_paths, PY_IGNORE_EXTENSIONS
from PyInstaller.building.datastruct import TOC
# Accept only strings as packages.
if type(package) is not str:
@@ -13,36 +14,36 @@ def collect_pkg_data(package, include_py_files=False, subdir=None):
pkg_base, pkg_dir = get_package_paths(package)
if subdir:
pkg_dir = os.path.join(pkg_dir, subdir)
pkg_path = Path(pkg_dir) / subdir
else:
pkg_path = Path(pkg_dir)
# Walk through all file in the given package, looking for data files.
data_toc = TOC()
for dir_path, dir_names, files in os.walk(pkg_dir):
for f in files:
extension = os.path.splitext(f)[1]
if include_py_files or (extension not in PY_IGNORE_EXTENSIONS):
source_file = os.path.join(dir_path, f)
dest_folder = remove_prefix(dir_path, os.path.dirname(pkg_base) + os.sep)
dest_file = os.path.join(dest_folder, f)
data_toc.append((dest_file, source_file, 'DATA'))
for file in pkg_path.rglob('*'):
if file.is_file():
extension = file.suffix
if not include_py_files and (extension in PY_IGNORE_EXTENSIONS):
continue
data_toc.append((str(file.relative_to(pkg_base)), str(file), 'DATA'))
return data_toc
def collect_local_submodules(package):
def collect_local_submodules(package: str):
"""
Collect all local submodules from the given package.
"""
import os
base_dir = '..'
package_dir = os.path.join(base_dir, package.replace('.', os.sep))
submodules = []
for dir_path, dir_names, files in os.walk(package_dir):
for f in files:
if f == '__init__.py':
submodules.append(f"{package}.{os.path.basename(dir_path)}")
elif f.endswith('.py'):
submodules.append(f"{package}.{os.path.basename(dir_path)}.{os.path.splitext(f)[0]}")
for d in dir_names:
submodules.append(f"{package}.{os.path.basename(dir_path)}.{d}")
from pathlib import Path
package_dir = Path(package.replace('.', os.sep))
submodules = [package]
# Walk through all file in the given package, looking for data files.
for file in package_dir.rglob('*.py'):
if file.name == '__init__.py':
module = f"{file.parent}".replace(os.sep, '.')
else:
module = f"{file.parent}.{file.stem}".replace(os.sep, '.')
if module not in submodules:
submodules.append(module)
return submodules
@@ -50,8 +51,7 @@ hiddenimports = [
'passlib.handlers.bcrypt',
'app.modules',
'app.plugins',
] + collect_local_submodules('app.modules') \
+ collect_local_submodules('app.plugins')
] + collect_local_submodules('app.modules') + collect_local_submodules('app.plugins')
block_cipher = None
@@ -75,8 +75,9 @@ exe = EXE(
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
a.datas + [('./app.ico', './app.ico', 'DATA')],
collect_pkg_data('config'),
collect_pkg_data('nginx'),
collect_pkg_data('cf_clearance'),
collect_pkg_data('database', include_py_files=True),
[],
@@ -87,7 +88,7 @@ exe = EXE(
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True,
console=False,
disable_windowed_traceback=False,
argv_emulation=False,
target_arch=None,