Compare commits

..

105 Commits
v1.2.7-1 ... v

Author SHA1 Message Date
jxxghp
c37e02009f fix build 2023-10-09 19:39:19 +08:00
jxxghp
a96b8a4e07 fix build 2023-10-09 19:37:49 +08:00
jxxghp
79b4d5fb8e fix build 2023-10-09 19:33:05 +08:00
jxxghp
de128f5e6a fix 2023-10-09 15:04:54 +08:00
jxxghp
ef8ddcde07 fix 2023-10-09 14:46:23 +08:00
jxxghp
eaff557d70 windows package 2023-10-09 14:11:03 +08:00
jxxghp
38f7a31200 windows package 2023-10-09 13:40:09 +08:00
jxxghp
97f16289c9 windows package 2023-10-09 12:57:52 +08:00
jxxghp
e15f5ab93e Merge pull request #767 from thsrite/main 2023-10-09 11:50:18 +08:00
thsrite
15fd312765 fix #766 2023-10-09 11:41:59 +08:00
jxxghp
eea316865f fix #753 2023-10-09 11:05:53 +08:00
jxxghp
05bbfbbd54 Merge pull request #765 from thsrite/main
fix #701
2023-10-09 10:09:46 +08:00
thsrite
6039a9d0d5 fix 2023-10-09 10:06:04 +08:00
thsrite
0159b02916 fix 8bbd4dc9 2023-10-09 09:50:30 +08:00
thsrite
8bbd4dc913 fix #701 2023-10-09 09:37:16 +08:00
jxxghp
9e3ded6ad5 Merge pull request #764 from thsrite/main
fix 下载消息发送所有
2023-10-09 09:27:40 +08:00
jxxghp
fe63275a6b fix bug 2023-10-09 09:09:59 +08:00
jxxghp
81ed465607 fix #759 2023-10-09 09:05:48 +08:00
thsrite
d9aa281ce1 fix 下载消息发送所有 2023-10-09 09:02:01 +08:00
jxxghp
56648d664e fix README.md 2023-10-08 17:03:20 +08:00
jxxghp
da49d5577a fix app.env 2023-10-08 16:41:53 +08:00
jxxghp
f3dbdefdb1 fix README.md 2023-10-08 16:26:22 +08:00
jxxghp
d4302759e6 fix README.md 2023-10-08 16:25:27 +08:00
jxxghp
914f192fb2 test 2023-10-08 16:24:40 +08:00
jxxghp
522b554e36 fix README.md 2023-10-08 16:12:27 +08:00
jxxghp
4c54ab5319 fix README.md 2023-10-08 15:58:42 +08:00
jxxghp
d7f4ed069c Merge pull request #757 from lightolly/dev/20231008 2023-10-08 14:04:00 +08:00
olly
7ea0c5ee4c fix:演职员刮削优化
1.豆瓣查询增加速率限制后重试
2.全中文演职员跳过处理
2023-10-08 14:00:55 +08:00
jxxghp
e773a9d9d4 Merge pull request #755 from thsrite/customization 2023-10-08 12:22:56 +08:00
thsrite
b570542fab fix 2023-10-08 12:16:45 +08:00
thsrite
09716e98ba feat 自定义占位符 2023-10-08 11:59:52 +08:00
jxxghp
9236b361e2 Merge remote-tracking branch 'origin/main' 2023-10-08 06:56:57 +08:00
jxxghp
f281d8c068 fix #749 2023-10-08 06:56:45 +08:00
jxxghp
83ed17d5c1 Merge pull request #752 from thsrite/main
feat 药丸论坛签到
2023-10-07 20:54:25 +08:00
jxxghp
e2671dd4ed fix dockerfile 2023-10-07 05:52:43 -07:00
thsrite
4c4d640331 feat 药丸论坛签到 2023-10-07 20:51:32 +08:00
jxxghp
6c4307c918 fix #750 2023-10-07 05:29:23 -07:00
jxxghp
5a7062c699 fix 2023-10-07 05:03:19 -07:00
jxxghp
7da01f7404 fix 2023-10-07 05:03:06 -07:00
jxxghp
2b695cb8c6 fix #748 2023-10-07 04:59:07 -07:00
jxxghp
599817eec7 test 2023-10-07 04:44:06 -07:00
jxxghp
11fa33be0a test 2023-10-07 04:33:52 -07:00
jxxghp
b5ac9d4ce4 fix app.env 2023-10-07 04:08:19 -07:00
jxxghp
78f0ac0042 fix README.md 2023-10-07 04:01:21 -07:00
jxxghp
00ecd7adc5 更新 app.env 2023-10-07 18:24:02 +08:00
jxxghp
c39cb3bffc 更新 app.env 2023-10-07 18:22:32 +08:00
jxxghp
2fa902bfff Merge pull request #747 from thsrite/main 2023-10-07 18:09:25 +08:00
thsrite
f8bcd351ae fix 依赖 2023-10-07 18:08:33 +08:00
jxxghp
6013d99bf6 v1.2.9 2023-10-07 17:21:08 +08:00
jxxghp
e7c3977f7b fix README.md 2023-10-07 12:26:16 +08:00
jxxghp
47e1218fe0 fix #732 2023-10-07 10:31:33 +08:00
jxxghp
a71a95892f fix 2023-10-05 23:23:33 -07:00
jxxghp
b5f53e309f fix 2023-10-05 23:12:46 -07:00
jxxghp
3164ba2d98 fix #734 2023-10-05 17:57:47 -07:00
jxxghp
89854d188d fix actor thumb 2023-10-05 17:49:31 -07:00
jxxghp
79c7475435 fix tmdb lru cache 2023-10-05 17:41:02 -07:00
jxxghp
2ee477c35e fix requests session stream 2023-10-05 17:32:23 -07:00
jxxghp
5bcd90c569 fix requests session 2023-10-05 17:21:59 -07:00
jxxghp
1a49c7c59e try fix 2023-10-05 07:44:21 +08:00
jxxghp
d995932a1c fix personmeta 2023-10-04 14:34:42 +08:00
jxxghp
1b0bbbbbfd fix webhook plugin 2023-10-04 08:01:30 +08:00
jxxghp
2aa93fa341 fix webhook plugin 2023-10-04 08:01:02 +08:00
jxxghp
a970f90c6f Merge remote-tracking branch 'origin/main' 2023-10-04 07:33:38 +08:00
jxxghp
44f612fed5 v1.2.8 2023-10-04 07:33:31 +08:00
jxxghp
564a48dd8f fix 2023-10-03 16:24:27 -07:00
jxxghp
9d029de56a fix 2023-10-03 16:23:05 -07:00
jxxghp
2dd3fc5d8c fix #722 2023-10-03 16:19:43 -07:00
jxxghp
9c335dbdfb fix #724 2023-10-03 16:17:19 -07:00
jxxghp
0e30ea92f1 fix #726 2023-10-03 16:14:04 -07:00
jxxghp
a0ced4e43c 认证站点支持xingtan.one 2023-10-03 16:05:50 -07:00
jxxghp
cfaaf65edc support xingtan 2023-10-04 07:03:13 +08:00
jxxghp
35be18bb1a fix 2023-10-01 21:55:49 +08:00
jxxghp
02296e1758 fix 2023-10-01 21:46:09 +08:00
jxxghp
0b84b05cdd fix #705 2023-10-01 21:36:33 +08:00
jxxghp
99e3d5acca fix #707 2023-10-01 21:33:58 +08:00
jxxghp
8001511484 fix #690 2023-10-01 21:23:41 +08:00
jxxghp
8420b2ea85 fix personmeta 2023-10-01 21:08:16 +08:00
jxxghp
9af883acbb fix personmeta 2023-10-01 18:27:26 +08:00
jxxghp
e21ba5ad51 fix personmeta 2023-10-01 18:11:01 +08:00
jxxghp
1293fafd34 fix 2023-10-01 16:47:47 +08:00
jxxghp
4bcc6bd733 fix bug 2023-10-01 14:18:56 +08:00
jxxghp
53a514feb6 fix personmeta支持豆瓣 2023-10-01 14:16:36 +08:00
jxxghp
e697889aad fix 2023-10-01 12:37:18 +08:00
jxxghp
8b0fba054e Merge remote-tracking branch 'origin/main' 2023-10-01 12:28:46 +08:00
jxxghp
32ff385444 fix personmeta 2023-10-01 12:28:41 +08:00
jxxghp
8456c7f4a3 Merge pull request #718 from DDS-Derek/main
功能改进增加选择类型
2023-10-01 11:55:56 +08:00
jxxghp
fcbfb63645 fix personmeta 2023-10-01 11:52:25 +08:00
DDSDerek
1fa7d15982 fix: issue 2023-10-01 10:07:51 +08:00
DDSDerek
a173978f6b feat: optimize issue 2023-10-01 10:06:11 +08:00
jxxghp
2f069afc77 fix personmeta 2023-10-01 08:15:19 +08:00
jxxghp
ea998b4e41 fix personmeta 2023-10-01 07:53:50 +08:00
jxxghp
ba27d02854 fix 2023-09-30 20:40:48 +08:00
jxxghp
f78df58906 fix 2023-09-30 20:36:51 +08:00
jxxghp
308683a7e9 fix scraper 2023-09-30 20:27:48 +08:00
jxxghp
b3f4a6f251 fix mediaserver 2023-09-30 15:27:01 +08:00
jxxghp
d1841d8f15 fix mediaserver 2023-09-30 15:16:53 +08:00
jxxghp
c8d6de3e9b Merge pull request #706 from song-zhou/main 2023-09-29 22:04:22 +08:00
Elsie Weber
938f5c8cea Merge branch 'jxxghp:main' into main 2023-09-29 21:57:50 +08:00
songzhou
d166930b0a 修复手动执行订阅搜索服务无效bug 2023-09-29 21:57:41 +08:00
jxxghp
e1ac3c0d15 fix personmeta 2023-09-29 12:01:00 +08:00
jxxghp
59da489e05 Merge pull request #704 from developer-wlj/wlj0909 2023-09-29 10:30:16 +08:00
developer-wlj
be12c736fb Merge branch 'jxxghp:main' into wlj0909 2023-09-29 10:14:36 +08:00
jxxghp
71c52aae7b Merge pull request #703 from DDS-Derek/main 2023-09-29 10:12:32 +08:00
mayun110
dbfe2af53c fix PersonMeta插件jellyfin无法显示头像问题 2023-09-29 10:11:18 +08:00
DDSRem
cca898f5b6 feat: docker build use cache 2023-09-29 09:31:47 +08:00
82 changed files with 3137 additions and 1119 deletions

View File

@@ -14,6 +14,18 @@ body:
description: 目前使用的程序版本
validations:
required: true
- type: dropdown
id: type
attributes:
label: 功能改进类型
description: 你需要在下面哪个方面改进功能
options:
- 主程序
- 插件
- Docker
- 其他
validations:
required: true
- type: textarea
id: feature-request
attributes:

65
.github/workflows/build-windows.yml vendored Normal file
View File

@@ -0,0 +1,65 @@
name: MoviePilot Windows Builder
on:
workflow_dispatch:
push:
branches:
- main
paths:
- version.py
jobs:
Windows-build:
runs-on: windows-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Release Version
id: release_version
run: |
$app_version = Select-String -Path "version.py" -Pattern "APP_VERSION\s=\s'v(.*)'" | ForEach-Object { $_.Matches.Groups[1].Value }
$env:GITHUB_ENV += "app_version=$app_version"
- name: Init Python 3.11.4
uses: actions/setup-python@v4
with:
python-version: '3.11.4'
- name: Install Dependent Packages
run: |
python -m pip install --upgrade pip
pip install wheel pyinstaller
pip install -r requirements.txt
shell: pwsh
- name: Pyinstaller
run: |
pyinstaller windows.spec
shell: pwsh
- name: Upload Windows File
uses: actions/upload-artifact@v3
with:
name: windows
path: dist/MoviePilot.exe
- name: Generate Release
id: generate_release
uses: actions/create-release@latest
with:
tag_name: v${{ env.app_version }}
release_name: v${{ env.app_version }}
body: ${{ github.event.commits[0].message }}
draft: false
prerelease: false
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload Release Asset
uses: dwenegar/upload-release-assets@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
release_id: ${{ steps.generate_release.outputs.id }}
assets_path: |
dist/MoviePilot.exe

View File

@@ -1,4 +1,4 @@
name: MoviePilot Builder
name: MoviePilot Docker Builder
on:
workflow_dispatch:
push:
@@ -8,23 +8,20 @@ on:
- version.py
jobs:
build:
Docker-build:
runs-on: ubuntu-latest
name: Build Docker Image
steps:
-
name: Checkout
- name: Checkout
uses: actions/checkout@v4
-
name: Release version
- name: Release version
id: release_version
run: |
app_version=$(cat version.py |sed -ne "s/APP_VERSION\s=\s'v\(.*\)'/\1/gp")
echo "app_version=$app_version" >> $GITHUB_ENV
-
name: Docker meta
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
with:
@@ -33,23 +30,19 @@ jobs:
type=raw,value=${{ env.app_version }}
type=raw,value=latest
-
name: Set Up QEMU
- name: Set Up QEMU
uses: docker/setup-qemu-action@v3
-
name: Set Up Buildx
- name: Set Up Buildx
uses: docker/setup-buildx-action@v3
-
name: Login DockerHub
- name: Login DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Build Image
- name: Build Image
uses: docker/build-push-action@v5
with:
context: .
@@ -62,3 +55,5 @@ jobs:
MOVIEPILOT_VERSION=${{ env.app_version }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha, scope=${{ github.workflow }}
cache-to: type=gha, scope=${{ github.workflow }}

View File

@@ -1,36 +0,0 @@
name: MoviePilot Release
on:
workflow_dispatch:
push:
branches:
- main
paths:
- version.py
jobs:
build:
runs-on: ubuntu-latest
name: Build Docker Image
steps:
-
name: Checkout
uses: actions/checkout@v4
-
name: Release Version
id: release_version
run: |
app_version=$(cat version.py |sed -ne "s/APP_VERSION\s=\s'v\(.*\)'/\1/gp")
echo "app_version=$app_version" >> $GITHUB_ENV
-
name: Generate Release
uses: actions/create-release@latest
with:
tag_name: v${{ env.app_version }}
release_name: v${{ env.app_version }}
body: ${{ github.event.commits[0].message }}
draft: false
prerelease: false
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -3,39 +3,16 @@ ARG MOVIEPILOT_VERSION
ENV LANG="C.UTF-8" \
HOME="/moviepilot" \
TERM="xterm" \
TZ="Asia/Shanghai" \
PUID=0 \
PGID=0 \
UMASK=000 \
MOVIEPILOT_AUTO_UPDATE=true \
MOVIEPILOT_AUTO_UPDATE_DEV=false \
PORT=3001 \
NGINX_PORT=3000 \
CONFIG_DIR="/config" \
API_TOKEN="moviepilot" \
AUTH_SITE="iyuu" \
DOWNLOAD_PATH="/downloads" \
DOWNLOAD_CATEGORY="false" \
TORRENT_TAG="MOVIEPILOT" \
LIBRARY_PATH="" \
LIBRARY_CATEGORY="false" \
TRANSFER_TYPE="copy" \
COOKIECLOUD_HOST="https://movie-pilot.org/cookiecloud" \
COOKIECLOUD_KEY="" \
COOKIECLOUD_PASSWORD="" \
MESSAGER="telegram" \
TELEGRAM_TOKEN="" \
TELEGRAM_CHAT_ID="" \
DOWNLOADER="qbittorrent" \
QB_HOST="127.0.0.1:8080" \
QB_USER="admin" \
QB_PASSWORD="adminadmin" \
MEDIASERVER="emby" \
EMBY_HOST="http://127.0.0.1:8096" \
EMBY_API_KEY=""
MOVIEPILOT_AUTO_UPDATE=true \
MOVIEPILOT_AUTO_UPDATE_DEV=false \
CONFIG_DIR="/config"
WORKDIR "/app"
COPY . .
RUN apt-get update \
RUN apt-get update -y \
&& apt-get -y install \
musl-dev \
nginx \
@@ -56,26 +33,20 @@ RUN apt-get update \
elif [ "$(uname -m)" = "aarch64" ]; \
then ln -s /usr/lib/aarch64-linux-musl/libc.so /lib/libc.musl-aarch64.so.1; \
fi \
&& cp -f /app/nginx.conf /etc/nginx/nginx.template.conf \
&& cp -f /app/update /usr/local/bin/mp_update \
&& cp -f /app/entrypoint /entrypoint \
&& chmod +x /entrypoint /usr/local/bin/mp_update \
&& mkdir -p ${HOME} /var/lib/haproxy/server-state \
&& groupadd -r moviepilot -g 911 \
&& useradd -r moviepilot -g moviepilot -d ${HOME} -s /bin/bash -u 911 \
&& apt-get autoremove -y \
&& apt-get clean -y \
&& rm -rf \
/tmp/* \
/moviepilot/.cache \
/var/lib/apt/lists/* \
/var/tmp/*
COPY requirements.txt requirements.txt
RUN apt-get update -y \
&& apt-get install -y build-essential \
&& pip install --upgrade pip \
&& pip install Cython \
&& pip install -r requirements.txt \
&& playwright install-deps chromium \
&& python_ver=$(python3 -V | awk '{print $2}') \
&& echo "/app/" > /usr/local/lib/python${python_ver%.*}/site-packages/app.pth \
&& echo 'fs.inotify.max_user_watches=5242880' >> /etc/sysctl.conf \
&& echo 'fs.inotify.max_user_instances=5242880' >> /etc/sysctl.conf \
&& locale-gen zh_CN.UTF-8 \
&& FRONTEND_VERSION=$(curl -sL "https://api.github.com/repos/jxxghp/MoviePilot-Frontend/releases/latest" | jq -r .tag_name) \
&& curl -sL "https://github.com/jxxghp/MoviePilot-Frontend/releases/download/${FRONTEND_VERSION}/dist.zip" | busybox unzip -d / - \
&& mv /dist /public \
&& apt-get remove -y build-essential \
&& apt-get autoremove -y \
&& apt-get clean -y \
@@ -84,6 +55,22 @@ RUN apt-get update \
/moviepilot/.cache \
/var/lib/apt/lists/* \
/var/tmp/*
COPY . .
RUN cp -f /app/nginx.conf /etc/nginx/nginx.template.conf \
&& cp -f /app/update /usr/local/bin/mp_update \
&& cp -f /app/entrypoint /entrypoint \
&& chmod +x /entrypoint /usr/local/bin/mp_update \
&& mkdir -p ${HOME} /var/lib/haproxy/server-state \
&& groupadd -r moviepilot -g 911 \
&& useradd -r moviepilot -g moviepilot -d ${HOME} -s /bin/bash -u 911 \
&& python_ver=$(python3 -V | awk '{print $2}') \
&& echo "/app/" > /usr/local/lib/python${python_ver%.*}/site-packages/app.pth \
&& echo 'fs.inotify.max_user_watches=5242880' >> /etc/sysctl.conf \
&& echo 'fs.inotify.max_user_instances=5242880' >> /etc/sysctl.conf \
&& locale-gen zh_CN.UTF-8 \
&& FRONTEND_VERSION=$(curl -sL "https://api.github.com/repos/jxxghp/MoviePilot-Frontend/releases/latest" | jq -r .tag_name) \
&& curl -sL "https://github.com/jxxghp/MoviePilot-Frontend/releases/download/${FRONTEND_VERSION}/dist.zip" | busybox unzip -d / - \
&& mv /dist /public
EXPOSE 3000
VOLUME [ "/config" ]
ENTRYPOINT [ "/entrypoint" ]

124
README.md
View File

@@ -15,23 +15,23 @@ Dockerhttps://hub.docker.com/r/jxxghp/moviepilot
## 安装
1. **安装CookieCloud插件**
### 1. **安装CookieCloud插件**
站点信息需要通过CookieCloud同步获取因此需要安装CookieCloud插件将浏览器中的站点Cookie数据同步到云端后再同步到MoviePilot使用。 插件下载地址请点击 [这里](https://github.com/easychen/CookieCloud/releases)。
2. **安装CookieCloud服务端可选**
### 2. **安装CookieCloud服务端可选**
MoviePilot内置了公共CookieCloud服务器如果需要自建服务可参考 [CookieCloud](https://github.com/easychen/CookieCloud) 项目进行搭建docker镜像请点击 [这里](https://hub.docker.com/r/easychen/cookiecloud)。
**声明:** 本项目不会收集用户敏感数据Cookie同步也是基于CookieCloud项目实现非本项目提供的能力。技术角度上CookieCloud采用端到端加密在个人不泄露`用户KEY``端对端加密密码`的情况下第三方无法窃取任何用户信息(包括服务器持有者)。如果你不放心,可以不使用公共服务或者不使用本项目,但如果使用后发生了任何信息泄露与本项目无关!
3. **安装配套管理软件**
### 3. **安装配套管理软件**
MoviePilot需要配套下载器和媒体服务器配合使用。
- 下载器支持qBittorrent、TransmissionQB版本号要求>= 4.3.9TR版本号要求>= 3.0推荐使用QB。
- 媒体服务器支持Jellyfin、Emby、Plex推荐使用Emby。
4. **安装MoviePilot**
### 4. **安装MoviePilot**
目前仅提供docker镜像点击 [这里](https://hub.docker.com/r/jxxghp/moviepilot) 或执行命令:
@@ -41,51 +41,56 @@ docker pull jxxghp/moviepilot:latest
## 配置
项目的所有配置均通过环境变量进行设置,部分环境建立容器后会自动显示待配置项,如未自动显示配置项则需要手动增加对应环境变量。
项目的所有配置均通过环境变量进行设置,支持两种配置方式:
- 在docker环境变量部分进行参数配置部分环境建立容器后会自动显示待配置项如未自动显示配置项则需要手动增加对应环境变量。
- 下载 [app.env](https://github.com/jxxghp/MoviePilot/raw/main/config/app.env) 文件,修改好配置后放置到配置文件映射路径根目录,配置项可根据说明自主增减。
配置文件映射路径:`/config`
配置文件映射路径:`/config`,配置项生效优先级:环境变量 > env文件 > 默认值,部分参数如路径映射、站点认证、权限端口等必须通过环境变量进行配置。
> $\color{red}{*}$ 号标识的为必填项,其它为可选项,可选项可删除配置变量从而使用默认值。
### 1. **基础设置**
- **PUID**:运行程序用户的`uid`,默认`0`
- **PGID**:运行程序用户的`gid`,默认`0`
- **UMASK**:掩码权限,默认`000`,可以考虑设置为`022`
- **MOVIEPILOT_AUTO_UPDATE**:重启更新,`true`/`false`,默认`true` **注意:如果出现网络问题可以配置`PROXY_HOST`,具体看下方`PROXY_HOST`解释**
- **MOVIEPILOT_AUTO_UPDATE_DEV**:重启时更新到未发布的开发版本代码,`true`/`false`,默认`false`
- **NGINX_PORT** WEB服务端口默认`3000`可自行修改不能与API服务端口冲突
- **PORT** API服务端口默认`3001`可自行修改不能与WEB服务端口冲突
- **SUPERUSER** 超级管理员用户名,默认`admin`,安装后使用该用户登录后台管理界面
- **SUPERUSER_PASSWORD** 超级管理员初始密码,默认`password`,建议修改为复杂密码
- **API_TOKEN** API密钥默认`moviepilot`在媒体服务器Webhook、微信回调等地址配置中需要加上`?token=`该值,建议修改为复杂字符串
- **PROXY_HOST** 网络代理可选访问themoviedb或者重启更新需要使用代理访问格式为`http(s)://ip:port`
- **NGINX_PORT $\color{red}{*}$ ** WEB服务端口默认`3000`可自行修改不能与API服务端口冲突仅支持环境变量配置
- **PORT $\color{red}{*}$ ** API服务端口默认`3001`可自行修改不能与WEB服务端口冲突仅支持环境变量配置
- **PUID**:运行程序用户的`uid`,默认`0`(仅支持环境变量配置)
- **PGID**:运行程序用户的`gid`,默认`0`(仅支持环境变量配置)
- **UMASK**:掩码权限,默认`000`,可以考虑设置为`022`(仅支持环境变量配置)
- **MOVIEPILOT_AUTO_UPDATE**:重启更新,`true`/`false`,默认`true` **注意:如果出现网络问题可以配置`PROXY_HOST`,具体看下方`PROXY_HOST`解释**(仅支持环境变量配置)
- **MOVIEPILOT_AUTO_UPDATE_DEV**:重启时更新到未发布的开发版本代码,`true`/`false`,默认`false`(仅支持环境变量配置)
---
- **SUPERUSER $\color{red}{*}$ ** 超级管理员用户名,默认`admin`,安装后使用该用户登录后台管理界面
- **SUPERUSER_PASSWORD $\color{red}{*}$ ** 超级管理员初始密码,默认`password`,建议修改为复杂密码
- **API_TOKEN $\color{red}{*}$ ** API密钥默认`moviepilot`在媒体服务器Webhook、微信回调等地址配置中需要加上`?token=`该值,建议修改为复杂字符串
- **PROXY_HOST** 网络代理访问themoviedb或者重启更新需要使用代理访问格式为`http(s)://ip:port``socks5://user:pass@host:port`(可选)
- **TMDB_API_DOMAIN** TMDB API地址默认`api.themoviedb.org`,也可配置为`api.tmdb.org`或其它中转代理服务地址,能连通即可
- **DOWNLOAD_PATH** 下载保存目录,**注意:需要将`moviepilot``下载器`的映射路径保持一致**,否则会导致下载文件无法转移
- **DOWNLOAD_MOVIE_PATH** 电影下载保存目录,不设置则下载到`DOWNLOAD_PATH`
- **DOWNLOAD_TV_PATH** 电视剧下载保存目录,不设置则下载到`DOWNLOAD_PATH`
- **DOWNLOAD_ANIME_PATH** 动漫下载保存目录,不设置则下载到`DOWNLOAD_PATH`
- **DOWNLOAD_CATEGORY** 下载二级分类开关,`true`/`false`,默认`false`,开启后会根据配置`category.yaml`自动在下载目录下建立二级目录分类
- **DOWNLOAD_SUBTITLE** 下载站点字幕,`true`/`false`,默认`true`
- **REFRESH_MEDIASERVER** 入库刷新媒体库,`true`/`false`,默认`true`
- **TMDB_IMAGE_DOMAIN** TMDB图片地址默认`image.tmdb.org`可配置为其它中转代理以加速TMDB图片显示`static-mdb.v.geilijiasu.com`
---
- **SCRAP_METADATA** 刮削入库的媒体文件,`true`/`false`,默认`true`
- **SCRAP_SOURCE** 刮削元数据及图片使用的数据源,`themoviedb`/`douban`,默认`themoviedb`
- **SCRAP_FOLLOW_TMDB** 新增已入库媒体是否跟随TMDB信息变化`true`/`false`,默认`true`
- **TORRENT_TAG** 种子标签,默认为`MOVIEPILOT`设置后只有MoviePilot添加的下载才会处理留空所有下载器中的任务均会处理
- **LIBRARY_PATH** 媒体库目录,多个目录使用`,`分隔
- **LIBRARY_MOVIE_NAME** 电影媒体库目录名,默认`电影`
- **LIBRARY_TV_NAME** 电视剧媒体库目录名,默认`电视剧`
- **LIBRARY_ANIME_NAME** 动漫媒体库目录,默认`电视剧/动漫`
- **LIBRARY_CATEGORY** 媒体库二级分类开关,`true`/`false`,默认`false`,开启后会根据配置`category.yaml`自动在媒体库目录下建立二级目录分类
- **TRANSFER_TYPE** 转移方式,支持`link`/`copy`/`move`/`softlink` **注意:在`link`和`softlink`转移方式下,转移后的文件会继承源文件的权限掩码,不受`UMASK`影响**
- **COOKIECLOUD_HOST** CookieCloud服务器地址格式`http(s)://ip:port`,不配置默认使用内建服务器`https://movie-pilot.org/cookiecloud`
- **COOKIECLOUD_KEY** CookieCloud用户KEY
- **COOKIECLOUD_PASSWORD** CookieCloud端对端加密密码
- **COOKIECLOUD_INTERVAL** CookieCloud同步间隔(分钟)
- **OCR_HOST** OCR识别服务器地址格式`http(s)://ip:port`用于识别站点二维码实现自动登录获取Cookie等不配置默认使用内建服务器`https://movie-pilot.org`,可使用 [这个镜像](https://hub.docker.com/r/jxxghp/moviepilot-ocr) 自行搭建。
- **USER_AGENT** CookieCloud对应的浏览器UA可选,设置后可增加连接站点的成功率,同步站点后可以在管理界面中修改
- **AUTO_DOWNLOAD_USER** 交互搜索自动下载用户ID使用,分割
---
- **TRANSFER_TYPE $\color{red}{*}$ ** 整理转移方式,支持`link`/`copy`/`move`/`softlink` **注意:在`link`和`softlink`转移方式下,转移后的文件会继承源文件的权限掩码,不受`UMASK`影响**
- **LIBRARY_PATH $\color{red}{*}$ ** 媒体库目录,多个目录使用`,`分隔
- **LIBRARY_MOVIE_NAME** 电媒体库目录名称(不是完整路径),默认`电`
- **LIBRARY_TV_NAME** 电视剧媒体库目录称(不是完整路径),默认`电视剧`
- **LIBRARY_ANIME_NAME** 动漫媒体库目录称(不是完整路径),默认`电视剧/动漫`
- **LIBRARY_CATEGORY** 媒体库二级分类开关,`true`/`false`,默认`false`,开启后会根据配置 [category.yaml](https://github.com/jxxghp/MoviePilot/raw/main/config/category.yaml) 自动在媒体库目录下建立二级目录分类
---
- **COOKIECLOUD_HOST $\color{red}{*}$ ** CookieCloud服务器地址格式`http(s)://ip:port`,不配置默认使用内建服务器`https://movie-pilot.org/cookiecloud`
- **COOKIECLOUD_KEY $\color{red}{*}$ ** CookieCloud用户KEY
- **COOKIECLOUD_PASSWORD $\color{red}{*}$ ** CookieCloud端对端加密密码
- **COOKIECLOUD_INTERVAL $\color{red}{*}$ ** CookieCloud同步间隔分钟
- **USER_AGENT $\color{red}{*}$ ** CookieCloud保存Cookie对应的浏览器UA建议配置,设置后可增加连接站点的成功率,同步站点后可以在管理界面中修改
- **OCR_HOST** OCR识别服务器地址格式`http(s)://ip:port`用于识别站点验证码实现自动登录获取Cookie等不配置默认使用内建服务器`https://movie-pilot.org`,可使用 [这个镜像](https://hub.docker.com/r/jxxghp/moviepilot-ocr) 自行搭建。
---
- **SUBSCRIBE_MODE** 订阅模式,`rss`/`spider`,默认`spider``rss`模式通过定时刷新RSS来匹配订阅RSS地址会自动获取也可手动维护对站点压力小同时可设置订阅刷新周期24小时运行但订阅和下载通知不能过滤和显示免费推荐使用rss模式。
- **SUBSCRIBE_RSS_INTERVAL** RSS订阅模式刷新时间间隔分钟默认`30`分钟不能小于5分钟。
- **SUBSCRIBE_SEARCH** 订阅搜索,`true`/`false`,默认`false`开启后会每隔24小时对所有订阅进行全量搜索以补齐缺失剧集一般情况下正常订阅即可订阅搜索只做为兜底会增加站点压力不建议开启
- **MESSAGER** 消息通知渠道,支持 `telegram`/`wechat`/`slack`/`synologychat`,开启多个渠道时使用`,`分隔。同时还需要配置对应渠道的环境变量,非对应渠道的变量可删除,推荐使用`telegram`
- **SEARCH_SOURCE** 媒体信息搜索来源,`themoviedb`/`douban`,默认`themoviedb`
---
- **AUTO_DOWNLOAD_USER** 远程交互搜索时自动择优下载的用户ID多个用户使用,分割,未设置需要选择资源或者回复`0`
- **MESSAGER $\color{red}{*}$ ** 消息通知渠道,支持 `telegram`/`wechat`/`slack`/`synologychat`,开启多个渠道时使用`,`分隔。同时还需要配置对应渠道的环境变量,非对应渠道的变量可删除,推荐使用`telegram`
- `wechat`设置项:
@@ -102,21 +107,29 @@ docker pull jxxghp/moviepilot:latest
- **TELEGRAM_TOKEN** Telegram Bot Token
- **TELEGRAM_CHAT_ID** Telegram Chat ID
- **TELEGRAM_USERS** Telegram 用户ID多个使用,分隔只有用户ID在列表中才可以使用Bot如未设置则均可以使用Bot
- **TELEGRAM_ADMINS** Telegram 管理员ID多个使用,分隔只有管理员才可以操作Bot菜单如未设置则均可以操作菜单
- **TELEGRAM_ADMINS** Telegram 管理员ID多个使用,分隔只有管理员才可以操作Bot菜单如未设置则均可以操作菜单(可选)
- `slack`设置项:
- **SLACK_OAUTH_TOKEN** Slack Bot User OAuth Token
- **SLACK_APP_TOKEN** Slack App-Level Token
- **SLACK_CHANNEL** Slack 频道名称,默认`全体`
- **SLACK_CHANNEL** Slack 频道名称,默认`全体`(可选)
- `synologychat`设置项:
- **SYNOLOGYCHAT_WEBHOOK** 在Synology Chat中创建机器人获取机器人`传入URL`
- **SYNOLOGYCHAT_TOKEN** SynologyChat机器人`令牌`
- **DOWNLOADER** 下载器,支持`qbittorrent`/`transmission`QB版本号要求>= 4.3.9TR版本号要求>= 3.0,同时还需要配置对应渠道的环境变量,非对应渠道的变量可删除,推荐使用`qbittorrent`
---
- **DOWNLOAD_PATH $\color{red}{*}$ ** 下载保存目录,**注意:需要将`moviepilot``下载器`的映射路径保持一致**,否则会导致下载文件无法转移
- **DOWNLOAD_MOVIE_PATH** 电影下载保存目录路径,不设置则下载到`DOWNLOAD_PATH`
- **DOWNLOAD_TV_PATH** 电视剧下载保存目录路径,不设置则下载到`DOWNLOAD_PATH`
- **DOWNLOAD_ANIME_PATH** 动漫下载保存目录路径,不设置则下载到`DOWNLOAD_PATH`
- **DOWNLOAD_CATEGORY** 下载二级分类开关,`true`/`false`,默认`false`,开启后会根据配置 [category.yaml](https://github.com/jxxghp/MoviePilot/raw/main/config/category.yaml) 自动在下载目录下建立二级目录分类
- **DOWNLOAD_SUBTITLE** 下载站点字幕,`true`/`false`,默认`true`
- **DOWNLOADER_MONITOR** 下载器监控,`true`/`false`,默认为`true`,开启后下载完成时才会自动整理入库
- **TORRENT_TAG** 下载器种子标签,默认为`MOVIEPILOT`设置后只有MoviePilot添加的下载才会处理留空所有下载器中的任务均会处理
- **DOWNLOADER $\color{red}{*}$ ** 下载器,支持`qbittorrent`/`transmission`QB版本号要求>= 4.3.9TR版本号要求>= 3.0,同时还需要配置对应渠道的环境变量,非对应渠道的变量可删除,推荐使用`qbittorrent`
- `qbittorrent`设置项:
@@ -131,9 +144,9 @@ docker pull jxxghp/moviepilot:latest
- **TR_USER** transmission用户名
- **TR_PASSWORD** transmission密码
- **DOWNLOADER_MONITOR** 下载器监控,`true`/`false`,默认为`true`,开启后下载完成时才会自动整理入库
- **MEDIASERVER** 媒体服务器,支持`emby`/`jellyfin`/`plex`,同时开启多个使用`,`分隔。还需要配置对应媒体服务器的环境变量,非对应媒体服务器的变量可删除,推荐使用`emby`
---
- **REFRESH_MEDIASERVER** 入库后是否刷新媒体服务器,`true`/`false`,默认`true`
- **MEDIASERVER $\color{red}{*}$ ** 媒体服务器,支持`emby`/`jellyfin`/`plex`,同时开启多个使用`,`分隔。还需要配置对应媒体服务器的环境变量,非对应媒体服务器的变量可删除,推荐使用`emby`
- `emby`设置项:
@@ -156,9 +169,9 @@ docker pull jxxghp/moviepilot:latest
### 2. **用户认证**
- **AUTH_SITE** 认证站点,支持`iyuu`/`hhclub`/`audiences`/`hddolby`/`zmpt`/`freefarm`/`hdfans`/`wintersakura`/`leaves`/`1ptba`/`icc2022`/`ptlsp`
`MoviePilot`需要认证后才能使用,配置`AUTH_SITE`后,需要根据下表配置对应站点的认证参数(**仅能通过docker环境变量配置**
`MoviePilot`需要认证后才能使用,配置`AUTH_SITE`后,需要根据下表配置对应站点的认证参数。
- **AUTH_SITE $\color{red}{*}$ ** 认证站点,支持`iyuu`/`hhclub`/`audiences`/`hddolby`/`zmpt`/`freefarm`/`hdfans`/`wintersakura`/`leaves`/`1ptba`/`icc2022`/`ptlsp`/`xingtan`
| 站点 | 参数 |
|:------------:|:-----------------------------------------------------:|
@@ -174,6 +187,7 @@ docker pull jxxghp/moviepilot:latest
| 1ptba | `1PTBA_UID`用户ID<br/>`1PTBA_PASSKEY`:密钥 |
| icc2022 | `ICC2022_UID`用户ID<br/>`ICC2022_PASSKEY`:密钥 |
| ptlsp | `PTLSP_UID`用户ID<br/>`PTLSP_PASSKEY`:密钥 |
| xingtan | `XINGTAN_UID`用户ID<br/>`XINGTAN_PASSKEY`:密钥 |
### 2. **进阶配置**
@@ -194,6 +208,7 @@ docker pull jxxghp/moviepilot:latest
> `edition` 版本(资源类型+特效)
> `videoFormat` 分辨率
> `releaseGroup` 制作组/字幕组
> `customization` 自定义占位符
> `videoCodec` 视频编码
> `audioCodec` 音频编码
> `tmdbid` TMDBID
@@ -223,9 +238,7 @@ docker pull jxxghp/moviepilot:latest
```
### 3. **过滤规则**
`设定`-`规则`中设定,规则说明:
### 3. **优先级规则**
- 仅支持使用内置规则进行排列组合,内置规则有:`蓝光原盘``4K``1080P``中文字幕``特效字幕``H265``H264``杜比``HDR``REMUX``WEB-DL``免费``国语配音`
- 符合任一层级规则的资源将被标识选中,匹配成功的层级做为该资源的优先级,排越前面优先级超高
@@ -242,10 +255,9 @@ docker pull jxxghp/moviepilot:latest
- 将MoviePilot做为Radarr或Sonarr服务器添加到Overseerr或Jellyseerr`API服务端口`可使用Overseerr/Jellyseerr浏览订阅。
- 映射宿主机docker.sock文件到容器`/var/run/docker.sock`,以支持内建重启操作。实例:`-v /var/run/docker.sock:/var/run/docker.sock:ro`
**注意**
1) 容器首次启动需要下载浏览器内核,根据网络情况可能需要较长时间,此时无法登录。可映射`/moviepilot`目录避免容器重置后重新触发浏览器内核下载。
2) 使用反向代理时,需要添加以下配置,否则可能会导致部分功能无法访问(`ip:port`修改为实际值):
### **注意**
- 容器首次启动需要下载浏览器内核,根据网络情况可能需要较长时间,此时无法登录。可映射`/moviepilot`目录避免容器重置后重新触发浏览器内核下载。
- 使用反向代理时,需要添加以下配置,否则可能会导致部分功能无法访问(`ip:port`修改为实际值):
```nginx configuration
location / {
proxy_pass http://ip:port;
@@ -255,7 +267,7 @@ location / {
proxy_set_header X-Forwarded-Proto $scheme;
}
```
3) 新建的企业微信应用需要固定公网IP的代理才能收到消息代理添加以下代码
- 新建的企业微信应用需要固定公网IP的代理才能收到消息代理添加以下代码
```nginx configuration
location /cgi-bin/gettoken {
proxy_pass https://qyapi.weixin.qq.com;

BIN
app.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 174 KiB

View File

@@ -2,7 +2,7 @@ from pathlib import Path
from typing import Any, List, Optional
from fastapi import APIRouter, Depends
from requests import Session
from sqlalchemy.orm import Session
from app import schemas
from app.chain.dashboard import DashboardChain

View File

@@ -16,10 +16,16 @@ router = APIRouter()
IMAGE_TYPES = [".jpg", ".png", ".gif", ".bmp", ".jpeg", ".webp"]
@router.get("/list", summary="所有", response_model=List[schemas.FileItem])
def list_path(path: str, sort: str = 'time', _: schemas.TokenPayload = Depends(verify_token)) -> Any:
@router.get("/list", summary="所有目录和文", response_model=List[schemas.FileItem])
def list_path(path: str,
sort: str = 'time',
_: schemas.TokenPayload = Depends(verify_token)) -> Any:
"""
查询当前目录下所有目录和文件
:param path: 目录路径
:param sort: 排序方式name:按名称排序time:按修改时间排序
:param _: token
:return: 所有目录和文件
"""
# 返回结果
ret_items = []

View File

@@ -222,5 +222,8 @@ def execute_command(jobid: str,
"""
if not jobid:
return schemas.Response(success=False, message="命令不能为空!")
Scheduler().start(jobid)
return schemas.Response(success=True)
if jobid == "subscribe_search":
Scheduler().start(jobid, state = 'R')
else:
Scheduler().start(jobid)
return schemas.Response(success=True)

View File

@@ -1,7 +1,7 @@
from typing import Any, List
from fastapi import APIRouter, HTTPException, Depends
from requests import Session
from sqlalchemy.orm import Session
from app import schemas
from app.chain.media import MediaChain

View File

@@ -115,6 +115,17 @@ class ChainBase(metaclass=ABCMeta):
"""
return self.run_module("recognize_media", meta=meta, mtype=mtype, tmdbid=tmdbid)
def match_doubaninfo(self, name: str, mtype: str = None,
year: str = None, season: int = None) -> Optional[dict]:
"""
搜索和匹配豆瓣信息
:param name: 标题
:param mtype: 类型
:param year: 年份
:param season: 季
"""
return self.run_module("match_doubaninfo", name=name, mtype=mtype, year=year, season=season)
def obtain_images(self, mediainfo: MediaInfo) -> Optional[MediaInfo]:
"""
补充抓取媒体信息图片

View File

@@ -74,8 +74,7 @@ class DownloadChain(ChainBase):
title=f"{mediainfo.title_year} "
f"{meta.season_episode} 开始下载",
text=msg_text,
image=mediainfo.get_message_image(),
userid=userid))
image=mediainfo.get_message_image()))
def download_torrent(self, torrent: TorrentInfo,
channel: MessageChannel = None,

View File

@@ -28,12 +28,18 @@ class MediaServerChain(ChainBase):
"""
return self.run_module("mediaserver_librarys", server=server)
def items(self, server: str, library_id: Union[str, int]) -> Generator:
def items(self, server: str, library_id: Union[str, int]) -> List[schemas.MediaServerItem]:
"""
获取媒体服务器所有项目
"""
return self.run_module("mediaserver_items", server=server, library_id=library_id)
def iteminfo(self, server: str, item_id: Union[str, int]) -> schemas.MediaServerItem:
"""
获取媒体服务器项目信息
"""
return self.run_module("mediaserver_iteminfo", server=server, item_id=item_id)
def episodes(self, server: str, item_id: Union[str, int]) -> List[schemas.MediaServerSeasonInfo]:
"""
获取媒体服务器剧集信息

View File

@@ -3,7 +3,7 @@ import re
from datetime import datetime
from typing import Dict, List, Optional, Union, Tuple
from requests import Session
from sqlalchemy.orm import Session
from app.chain import ChainBase
from app.chain.download import DownloadChain

View File

@@ -5,6 +5,7 @@ from cachetools import cached, TTLCache
from app import schemas
from app.chain import ChainBase
from app.core.config import settings
from app.schemas import MediaType
from app.utils.singleton import Singleton
@@ -122,5 +123,5 @@ class TmdbChain(ChainBase, metaclass=Singleton):
while True:
info = random.choice(infos)
if info and info.get("backdrop_path"):
return f"https://image.tmdb.org/t/p/original{info.get('backdrop_path')}"
return f"https://{settings.TMDB_IMAGE_DOMAIN}/t/p/original{info.get('backdrop_path')}"
return None

View File

@@ -622,8 +622,9 @@ class TransferChain(ChainBase):
if not path.exists():
return
if path.is_file():
# 删除文件、nfo、jpg
files = glob.glob(f"{Path(path.parent).joinpath(path.stem)}*")
# 删除文件、nfo、jpg等同名文件
pattern = path.stem.replace('[', '?').replace(']', '?')
files = path.parent.glob(f"{pattern}.*")
for file in files:
Path(file).unlink()
logger.warn(f"文件 {path} 已删除")

View File

@@ -1,9 +1,13 @@
import os
import secrets
import sys
from pathlib import Path
from typing import List
from pydantic import BaseSettings
from app.utils.system import SystemUtils
class Settings(BaseSettings):
# 项目名称
@@ -208,7 +212,11 @@ class Settings(BaseSettings):
def CONFIG_PATH(self):
if self.CONFIG_DIR:
return Path(self.CONFIG_DIR)
return self.INNER_CONFIG_PATH
elif SystemUtils.is_docker():
return Path("/config")
elif SystemUtils.is_frozen():
return Path(sys.executable).parent / "config"
return self.ROOT_PATH / "config"
@property
def TEMP_PATH(self):
@@ -268,11 +276,14 @@ class Settings(BaseSettings):
return [Path(path) for path in self.LIBRARY_PATH.split(",")]
return []
def __init__(self):
super().__init__()
def __init__(self, **kwargs):
super().__init__(**kwargs)
with self.CONFIG_PATH as p:
if not p.exists():
p.mkdir(parents=True, exist_ok=True)
if SystemUtils.is_frozen():
if not (p / "app.env").exists():
SystemUtils.copy(self.INNER_CONFIG_PATH / "app.env", p / "app.env")
with self.TEMP_PATH as p:
if not p.exists():
p.mkdir(parents=True, exist_ok=True)
@@ -284,4 +295,7 @@ class Settings(BaseSettings):
case_sensitive = True
settings = Settings()
settings = Settings(
_env_file=Settings().CONFIG_PATH / "app.env",
_env_file_encoding="utf-8"
)

View File

@@ -0,0 +1,47 @@
import regex as re
from app.db.systemconfig_oper import SystemConfigOper
from app.schemas.types import SystemConfigKey
from app.utils.singleton import Singleton
class CustomizationMatcher(metaclass=Singleton):
"""
识别自定义占位符
"""
customization = None
custom_separator = None
def __init__(self):
self.systemconfig = SystemConfigOper()
self.customization = None
self.custom_separator = None
def match(self, title=None):
"""
:param title: 资源标题或文件名
:return: 匹配结果
"""
if not title:
return ""
if not self.customization:
# 自定义占位符
customization = self.systemconfig.get(SystemConfigKey.Customization)
if not customization:
return ""
if isinstance(customization, str):
customization = customization.replace("\n", ";").replace("|", ";").strip(";").split(";")
self.customization = "|".join([f"({item})" for item in customization])
customization_re = re.compile(r"%s" % self.customization)
# 处理重复多次的情况,保留先后顺序(按添加自定义占位符的顺序)
unique_customization = {}
for item in re.findall(customization_re, title):
if not isinstance(item, tuple):
item = (item,)
for i in range(len(item)):
if item[i] and unique_customization.get(item[i]) is None:
unique_customization[item[i]] = i
unique_customization = list(dict(sorted(unique_customization.items(), key=lambda x: x[1])).keys())
separator = self.custom_separator or "@"
return separator.join(unique_customization)

View File

@@ -1,6 +1,7 @@
import re
import zhconv
import anitopy
from app.core.meta.customization import CustomizationMatcher
from app.core.meta.metabase import MetaBase
from app.core.meta.releasegroup import ReleaseGroupsMatcher
from app.utils.string import StringUtils
@@ -144,6 +145,8 @@ class MetaAnime(MetaBase):
self.resource_team = \
ReleaseGroupsMatcher().match(title=original_title) or \
anitopy_info_origin.get("release_group") or None
# 自定义占位符
self.customization = CustomizationMatcher().match(title=original_title) or None
# 视频编码
self.video_encode = anitopy_info.get("video_term")
if isinstance(self.video_encode, list):

View File

@@ -51,6 +51,8 @@ class MetaBase(object):
resource_pix: Optional[str] = None
# 识别的制作组/字幕组
resource_team: Optional[str] = None
# 识别的自定义占位符
customization: Optional[str] = None
# 视频编码
video_encode: Optional[str] = None
# 音频编码
@@ -492,6 +494,9 @@ class MetaBase(object):
# 制作组/字幕组
if not self.resource_team:
self.resource_team = meta.resource_team
# 自定义占位符
if not self.customization:
self.customization = meta.customization
# 特效
if not self.resource_effect:
self.resource_effect = meta.resource_effect

View File

@@ -2,6 +2,7 @@ import re
from pathlib import Path
from app.core.config import settings
from app.core.meta.customization import CustomizationMatcher
from app.core.meta.metabase import MetaBase
from app.core.meta.releasegroup import ReleaseGroupsMatcher
from app.utils.string import StringUtils
@@ -130,6 +131,8 @@ class MetaVideo(MetaBase):
self.part = None
# 制作组/字幕组
self.resource_team = ReleaseGroupsMatcher().match(title=original_title) or None
# 自定义占位符
self.customization = CustomizationMatcher().match(title=original_title) or None
def __fix_name(self, name: str):
if not name:

View File

@@ -39,7 +39,7 @@ def update_db():
更新数据库
"""
db_location = settings.CONFIG_PATH / 'user.db'
script_location = settings.ROOT_PATH / 'alembic'
script_location = settings.ROOT_PATH / 'database'
try:
alembic_cfg = Config()
alembic_cfg.set_main_option('script_location', str(script_location))

View File

@@ -2,12 +2,15 @@ from pyvirtualdisplay import Display
from app.log import logger
from app.utils.singleton import Singleton
from app.utils.system import SystemUtils
class DisplayHelper(metaclass=Singleton):
_display: Display = None
def __init__(self):
if not SystemUtils.is_docker():
return
try:
self._display = Display(visible=False, size=(1024, 768))
self._display.start()

Binary file not shown.

View File

@@ -1,3 +1,4 @@
from datetime import datetime
from pathlib import Path
from typing import List, Optional, Tuple, Union
@@ -10,11 +11,11 @@ from app.modules import _ModuleBase
from app.modules.douban.apiv2 import DoubanApi
from app.modules.douban.scraper import DoubanScraper
from app.schemas.types import MediaType
from app.utils.common import retry
from app.utils.system import SystemUtils
class DoubanModule(_ModuleBase):
doubanapi: DoubanApi = None
scraper: DoubanScraper = None
@@ -34,6 +35,271 @@ class DoubanModule(_ModuleBase):
:param doubanid: 豆瓣ID
:return: 豆瓣信息
"""
"""
{
"rating": {
"count": 287365,
"max": 10,
"star_count": 3.5,
"value": 6.6
},
"lineticket_url": "",
"controversy_reason": "",
"pubdate": [
"2021-10-29(中国大陆)"
],
"last_episode_number": null,
"interest_control_info": null,
"pic": {
"large": "https://img9.doubanio.com/view/photo/m_ratio_poster/public/p2707553644.webp",
"normal": "https://img9.doubanio.com/view/photo/s_ratio_poster/public/p2707553644.webp"
},
"vendor_count": 6,
"body_bg_color": "f4f5f9",
"is_tv": false,
"head_info": null,
"album_no_interact": false,
"ticket_price_info": "",
"webisode_count": 0,
"year": "2021",
"card_subtitle": "2021 / 英国 美国 / 动作 惊悚 冒险 / 凯瑞·福永 / 丹尼尔·克雷格 蕾雅·赛杜",
"forum_info": null,
"webisode": null,
"id": "20276229",
"gallery_topic_count": 0,
"languages": [
"英语",
"法语",
"意大利语",
"俄语",
"西班牙语"
],
"genres": [
"动作",
"惊悚",
"冒险"
],
"review_count": 926,
"title": "007无暇赴死",
"intro": "世界局势波诡云谲,再度出山的邦德(丹尼尔·克雷格 饰面临有史以来空前的危机传奇特工007的故事在本片中达到高潮。新老角色集结亮相蕾雅·赛杜回归二度饰演邦女郎玛德琳。系列最恐怖反派萨芬拉米·马雷克 饰重磅登场毫不留情地展示了自己狠辣的一面不仅揭开了玛德琳身上隐藏的秘密还酝酿着危及数百万人性命的阴谋幽灵党的身影也似乎再次浮出水面。半路杀出的新00号特工拉什纳·林奇 饰)与神秘女子(安娜·德·阿玛斯 饰)看似与邦德同阵作战,但其真实目的依然成谜。关乎邦德生死的新仇旧怨接踵而至,暗潮汹涌之下他能否拯救世界?",
"interest_cmt_earlier_tip_title": "发布于上映前",
"has_linewatch": true,
"ugc_tabs": [
{
"source": "reviews",
"type": "review",
"title": "影评"
},
{
"source": "forum_topics",
"type": "forum",
"title": "讨论"
}
],
"forum_topic_count": 857,
"ticket_promo_text": "",
"webview_info": {},
"is_released": true,
"actors": [
{
"name": "丹尼尔·克雷格",
"roles": [
"演员",
"制片人",
"配音"
],
"title": "丹尼尔·克雷格(同名)英国,英格兰,柴郡,切斯特影视演员",
"url": "https://movie.douban.com/celebrity/1025175/",
"user": null,
"character": "饰 詹姆斯·邦德 James Bond 007",
"uri": "douban://douban.com/celebrity/1025175?subject_id=27230907",
"avatar": {
"large": "https://qnmob3.doubanio.com/view/celebrity/raw/public/p42588.jpg?imageView2/2/q/80/w/600/h/3000/format/webp",
"normal": "https://qnmob3.doubanio.com/view/celebrity/raw/public/p42588.jpg?imageView2/2/q/80/w/200/h/300/format/webp"
},
"sharing_url": "https://www.douban.com/doubanapp/dispatch?uri=/celebrity/1025175/",
"type": "celebrity",
"id": "1025175",
"latin_name": "Daniel Craig"
}
],
"interest": null,
"vendor_icons": [
"https://img9.doubanio.com/f/frodo/fbc90f355fc45d5d2056e0d88c697f9414b56b44/pics/vendors/tencent.png",
"https://img2.doubanio.com/f/frodo/8286b9b5240f35c7e59e1b1768cd2ccf0467cde5/pics/vendors/migu_video.png",
"https://img9.doubanio.com/f/frodo/88a62f5e0cf9981c910e60f4421c3e66aac2c9bc/pics/vendors/bilibili.png"
],
"episodes_count": 0,
"color_scheme": {
"is_dark": true,
"primary_color_light": "868ca5",
"_base_color": [
0.6333333333333333,
0.18867924528301885,
0.20784313725490197
],
"secondary_color": "f4f5f9",
"_avg_color": [
0.059523809523809625,
0.09790209790209795,
0.5607843137254902
],
"primary_color_dark": "676c7f"
},
"type": "movie",
"null_rating_reason": "",
"linewatches": [
{
"url": "http://v.youku.com/v_show/id_XNTIwMzM2NDg5Mg==.html?tpa=dW5pb25faWQ9MzAwMDA4XzEwMDAwMl8wMl8wMQ&refer=esfhz_operation.xuka.xj_00003036_000000_FNZfau_19010900",
"source": {
"literal": "youku",
"pic": "https://img1.doubanio.com/img/files/file-1432869267.png",
"name": "优酷视频"
},
"source_uri": "youku://play?vid=XNTIwMzM2NDg5Mg==&source=douban&refer=esfhz_operation.xuka.xj_00003036_000000_FNZfau_19010900",
"free": false
},
],
"info_url": "https://www.douban.com/doubanapp//h5/movie/20276229/desc",
"tags": [],
"durations": [
"163分钟"
],
"comment_count": 97204,
"cover": {
"description": "",
"author": {
"loc": {
"id": "108288",
"name": "北京",
"uid": "beijing"
},
"kind": "user",
"name": "雨落下",
"reg_time": "2020-08-11 16:22:48",
"url": "https://www.douban.com/people/221011676/",
"uri": "douban://douban.com/user/221011676",
"id": "221011676",
"avatar_side_icon_type": 3,
"avatar_side_icon_id": "234",
"avatar": "https://img2.doubanio.com/icon/up221011676-2.jpg",
"is_club": false,
"type": "user",
"avatar_side_icon": "https://img2.doubanio.com/view/files/raw/file-1683625971.png",
"uid": "221011676"
},
"url": "https://movie.douban.com/photos/photo/2707553644/",
"image": {
"large": {
"url": "https://img9.doubanio.com/view/photo/l/public/p2707553644.webp",
"width": 1082,
"height": 1600,
"size": 0
},
"raw": null,
"small": {
"url": "https://img9.doubanio.com/view/photo/s/public/p2707553644.webp",
"width": 405,
"height": 600,
"size": 0
},
"normal": {
"url": "https://img9.doubanio.com/view/photo/m/public/p2707553644.webp",
"width": 405,
"height": 600,
"size": 0
},
"is_animated": false
},
"uri": "douban://douban.com/photo/2707553644",
"create_time": "2021-10-26 15:05:01",
"position": 0,
"owner_uri": "douban://douban.com/movie/20276229",
"type": "photo",
"id": "2707553644",
"sharing_url": "https://www.douban.com/doubanapp/dispatch?uri=/photo/2707553644/"
},
"cover_url": "https://img9.doubanio.com/view/photo/m_ratio_poster/public/p2707553644.webp",
"restrictive_icon_url": "",
"header_bg_color": "676c7f",
"is_douban_intro": false,
"ticket_vendor_icons": [
"https://img9.doubanio.com/view/dale-online/dale_ad/public/0589a62f2f2d7c2.jpg"
],
"honor_infos": [],
"sharing_url": "https://movie.douban.com/subject/20276229/",
"subject_collections": [],
"wechat_timeline_share": "screenshot",
"countries": [
"英国",
"美国"
],
"url": "https://movie.douban.com/subject/20276229/",
"release_date": null,
"original_title": "No Time to Die",
"uri": "douban://douban.com/movie/20276229",
"pre_playable_date": null,
"episodes_info": "",
"subtype": "movie",
"directors": [
{
"name": "凯瑞·福永",
"roles": [
"导演",
"制片人",
"编剧",
"摄影",
"演员"
],
"title": "凯瑞·福永(同名)美国,加利福尼亚州,奥克兰影视演员",
"url": "https://movie.douban.com/celebrity/1009531/",
"user": null,
"character": "导演",
"uri": "douban://douban.com/celebrity/1009531?subject_id=27215222",
"avatar": {
"large": "https://qnmob3.doubanio.com/view/celebrity/raw/public/p1392285899.57.jpg?imageView2/2/q/80/w/600/h/3000/format/webp",
"normal": "https://qnmob3.doubanio.com/view/celebrity/raw/public/p1392285899.57.jpg?imageView2/2/q/80/w/200/h/300/format/webp"
},
"sharing_url": "https://www.douban.com/doubanapp/dispatch?uri=/celebrity/1009531/",
"type": "celebrity",
"id": "1009531",
"latin_name": "Cary Fukunaga"
}
],
"is_show": false,
"in_blacklist": false,
"pre_release_desc": "",
"video": null,
"aka": [
"007生死有时(港)",
"007生死交战(台)",
"007间不容死",
"邦德25",
"007没空去死(豆友译名)",
"James Bond 25",
"Never Dream of Dying",
"Shatterhand"
],
"is_restrictive": false,
"trailer": {
"sharing_url": "https://www.douban.com/doubanapp/dispatch?uri=/movie/20276229/trailer%3Ftrailer_id%3D282585%26trailer_type%3DA",
"video_url": "https://vt1.doubanio.com/202310011325/3b1f5827e91dde7826dc20930380dfc2/view/movie/M/402820585.mp4",
"title": "中国预告片:终极决战版 (中文字幕)",
"uri": "douban://douban.com/movie/20276229/trailer?trailer_id=282585&trailer_type=A",
"cover_url": "https://img1.doubanio.com/img/trailer/medium/2712944408.jpg",
"term_num": 0,
"n_comments": 21,
"create_time": "2021-11-01",
"subject_title": "007无暇赴死",
"file_size": 10520074,
"runtime": "00:42",
"type": "A",
"id": "282585",
"desc": ""
},
"interest_cmt_earlier_tip_desc": "该短评的发布时间早于公开上映时间,作者可能通过其他渠道提前观看,请谨慎参考。其评分将不计入总评分。"
}
"""
if not doubanid:
return None
logger.info(f"开始获取豆瓣信息:{doubanid} ...")
@@ -129,22 +395,45 @@ class DoubanModule(_ModuleBase):
return ret_medias
def __match(self, name: str, year: str, season: int = None) -> dict:
@retry(Exception, 5, 3, 3, logger=logger)
def match_doubaninfo(self, name: str, mtype: str = None,
year: str = None, season: int = None) -> dict:
"""
搜索和匹配豆瓣信息
:param name: 名称
:param mtype: 类型 电影/电视剧
:param year: 年份
:param season: 季号
"""
result = self.doubanapi.search(f"{name} {year or ''}")
result = self.doubanapi.search(f"{name} {year or ''}".strip(),
ts=datetime.strftime(datetime.now(), '%Y%m%d%H%M%S'))
if not result:
logger.warn(f"未找到 {name} 的豆瓣信息")
return {}
# 触发rate limit
if "search_access_rate_limit" in result.values():
logger.warn(f"触发豆瓣API速率限制 错误信息 {result} ...")
raise Exception("触发豆瓣API速率限制")
for item_obj in result.get("items"):
if item_obj.get("type_name") not in (MediaType.TV.value, MediaType.MOVIE.value):
type_name = item_obj.get("type_name")
if type_name not in [MediaType.TV.value, MediaType.MOVIE.value]:
continue
title = item_obj.get("title")
if mtype and mtype != type_name:
continue
if mtype == MediaType.TV and not season:
season = 1
item = item_obj.get("target")
title = item.get("title")
if not title:
continue
meta = MetaInfo(title)
if meta.name == name and (not season or meta.begin_season == season):
return item_obj
if type_name == MediaType.TV.value:
meta.type = MediaType.TV
meta.begin_season = meta.begin_season or 1
if meta.name == name \
and ((not season and not meta.begin_season) or meta.begin_season == season) \
and (not year or item.get('year') == year):
return item
return {}
def movie_top250(self, page: int = 1, count: int = 30) -> List[dict]:
@@ -173,7 +462,10 @@ class DoubanModule(_ModuleBase):
if not meta.name:
return
# 根据名称查询豆瓣数据
doubaninfo = self.__match(name=mediainfo.title, year=mediainfo.year, season=meta.begin_season)
doubaninfo = self.match_doubaninfo(name=mediainfo.title,
mtype=mediainfo.type.value,
year=mediainfo.year,
season=meta.begin_season)
if not doubaninfo:
logger.warn(f"未找到 {mediainfo.title} 的豆瓣信息")
return
@@ -192,9 +484,10 @@ class DoubanModule(_ModuleBase):
if not meta.name:
continue
# 根据名称查询豆瓣数据
doubaninfo = self.__match(name=mediainfo.title,
year=mediainfo.year,
season=meta.begin_season)
doubaninfo = self.match_doubaninfo(name=mediainfo.title,
mtype=mediainfo.type.value,
year=mediainfo.year,
season=meta.begin_season)
if not doubaninfo:
logger.warn(f"未找到 {mediainfo.title} 的豆瓣信息")
break

View File

@@ -146,72 +146,113 @@ class DoubanApi(metaclass=Singleton):
_api_secret_key = "bf7dddc7c9cfe6f7"
_api_key = "0dad551ec0f84ed02907ff5c42e8ec70"
_base_url = "https://frodo.douban.com/api/v2"
_session = requests.Session()
_session = None
def __init__(self):
pass
self._session = requests.Session()
@classmethod
def __sign(cls, url: str, ts: int, method='GET') -> str:
url_path = parse.urlparse(url).path
raw_sign = '&'.join([method.upper(), parse.quote(url_path, safe=''), str(ts)])
return base64.b64encode(hmac.new(cls._api_secret_key.encode(), raw_sign.encode(), hashlib.sha1).digest()
).decode()
return base64.b64encode(
hmac.new(
cls._api_secret_key.encode(),
raw_sign.encode(),
hashlib.sha1
).digest()
).decode()
@classmethod
@lru_cache(maxsize=settings.CACHE_CONF.get('douban'))
def __invoke(cls, url, **kwargs):
req_url = cls._base_url + url
def __invoke(self, url, **kwargs):
req_url = self._base_url + url
params = {'apiKey': cls._api_key}
params = {'apiKey': self._api_key}
if kwargs:
params.update(kwargs)
ts = params.pop('_ts', int(datetime.strftime(datetime.now(), '%Y%m%d')))
params.update({'os_rom': 'android', 'apiKey': cls._api_key, '_ts': ts, '_sig': cls.__sign(url=req_url, ts=ts)})
resp = RequestUtils(ua=choice(cls._user_agents), session=cls._session).get_res(url=req_url, params=params)
ts = params.pop(
'_ts',
datetime.strftime(datetime.now(), '%Y%m%d')
)
params.update({
'os_rom': 'android',
'apiKey': self._api_key,
'_ts': ts,
'_sig': self.__sign(url=req_url, ts=ts)
})
resp = RequestUtils(
ua=choice(self._user_agents),
session=self._session
).get_res(url=req_url, params=params)
if resp.status_code == 400 and "rate_limit" in resp.text:
return resp.json()
return resp.json() if resp else {}
def search(self, keyword, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["search"], q=keyword, start=start, count=count, _ts=ts)
def search(self, keyword, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["search"], q=keyword,
start=start, count=count, _ts=ts)
def movie_search(self, keyword, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["movie_search"], q=keyword, start=start, count=count, _ts=ts)
def movie_search(self, keyword, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["movie_search"], q=keyword,
start=start, count=count, _ts=ts)
def tv_search(self, keyword, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_search"], q=keyword, start=start, count=count, _ts=ts)
def tv_search(self, keyword, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_search"], q=keyword,
start=start, count=count, _ts=ts)
def book_search(self, keyword, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["book_search"], q=keyword, start=start, count=count, _ts=ts)
def book_search(self, keyword, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["book_search"], q=keyword,
start=start, count=count, _ts=ts)
def group_search(self, keyword, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["group_search"], q=keyword, start=start, count=count, _ts=ts)
def group_search(self, keyword, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["group_search"], q=keyword,
start=start, count=count, _ts=ts)
def movie_showing(self, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["movie_showing"], start=start, count=count, _ts=ts)
def movie_showing(self, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["movie_showing"],
start=start, count=count, _ts=ts)
def movie_soon(self, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["movie_soon"], start=start, count=count, _ts=ts)
def movie_soon(self, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["movie_soon"],
start=start, count=count, _ts=ts)
def movie_hot_gaia(self, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["movie_hot_gaia"], start=start, count=count, _ts=ts)
def movie_hot_gaia(self, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["movie_hot_gaia"],
start=start, count=count, _ts=ts)
def tv_hot(self, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_hot"], start=start, count=count, _ts=ts)
def tv_hot(self, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_hot"],
start=start, count=count, _ts=ts)
def tv_animation(self, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_animation"], start=start, count=count, _ts=ts)
def tv_animation(self, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_animation"],
start=start, count=count, _ts=ts)
def tv_variety_show(self, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_variety_show"], start=start, count=count, _ts=ts)
def tv_variety_show(self, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_variety_show"],
start=start, count=count, _ts=ts)
def tv_rank_list(self, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_rank_list"], start=start, count=count, _ts=ts)
def tv_rank_list(self, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_rank_list"],
start=start, count=count, _ts=ts)
def show_hot(self, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["show_hot"], start=start, count=count, _ts=ts)
def show_hot(self, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["show_hot"],
start=start, count=count, _ts=ts)
def movie_detail(self, subject_id):
return self.__invoke(self._urls["movie_detail"] + subject_id)
@@ -228,20 +269,30 @@ class DoubanApi(metaclass=Singleton):
def book_detail(self, subject_id):
return self.__invoke(self._urls["book_detail"] + subject_id)
def movie_top250(self, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["movie_top250"], start=start, count=count, _ts=ts)
def movie_top250(self, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["movie_top250"],
start=start, count=count, _ts=ts)
def movie_recommend(self, tags='', sort='R', start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["movie_recommend"], tags=tags, sort=sort, start=start, count=count, _ts=ts)
def movie_recommend(self, tags='', sort='R', start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["movie_recommend"], tags=tags, sort=sort,
start=start, count=count, _ts=ts)
def tv_recommend(self, tags='', sort='R', start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_recommend"], tags=tags, sort=sort, start=start, count=count, _ts=ts)
def tv_recommend(self, tags='', sort='R', start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_recommend"], tags=tags, sort=sort,
start=start, count=count, _ts=ts)
def tv_chinese_best_weekly(self, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_chinese_best_weekly"], start=start, count=count, _ts=ts)
def tv_chinese_best_weekly(self, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_chinese_best_weekly"],
start=start, count=count, _ts=ts)
def tv_global_best_weekly(self, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_global_best_weekly"], start=start, count=count, _ts=ts)
def tv_global_best_weekly(self, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
return self.__invoke(self._urls["tv_global_best_weekly"],
start=start, count=count, _ts=ts)
def doulist_detail(self, subject_id):
"""
@@ -250,7 +301,8 @@ class DoubanApi(metaclass=Singleton):
"""
return self.__invoke(self._urls["doulist"] + subject_id)
def doulist_items(self, subject_id, start=0, count=20, ts=datetime.strftime(datetime.now(), '%Y%m%d')):
def doulist_items(self, subject_id, start=0, count=20,
ts=datetime.strftime(datetime.now(), '%Y%m%d')):
"""
豆列列表
:param subject_id: 豆列id
@@ -258,4 +310,9 @@ class DoubanApi(metaclass=Singleton):
:param count: 数量
:param ts: 时间戳
"""
return self.__invoke(self._urls["doulist_items"] % subject_id, start=start, count=count, _ts=ts)
return self.__invoke(self._urls["doulist_items"] % subject_id,
start=start, count=count, _ts=ts)
def __del__(self):
if self._session:
self._session.close()

View File

@@ -6,7 +6,6 @@ from app.core.context import MediaInfo
from app.log import logger
from app.modules import _ModuleBase
from app.modules.emby.emby import Emby
from app.schemas import ExistMediaInfo, RefreshMediaItem, WebhookEventInfo
from app.schemas.types import MediaType
@@ -40,7 +39,7 @@ class EmbyModule(_ModuleBase):
# Emby认证
return self.emby.authenticate(name, password)
def webhook_parser(self, body: Any, form: Any, args: Any) -> Optional[WebhookEventInfo]:
def webhook_parser(self, body: Any, form: Any, args: Any) -> Optional[schemas.WebhookEventInfo]:
"""
解析Webhook报文体
:param body: 请求体
@@ -50,7 +49,7 @@ class EmbyModule(_ModuleBase):
"""
return self.emby.get_webhook_message(form, args)
def media_exists(self, mediainfo: MediaInfo, itemid: str = None) -> Optional[ExistMediaInfo]:
def media_exists(self, mediainfo: MediaInfo, itemid: str = None) -> Optional[schemas.ExistMediaInfo]:
"""
判断媒体文件是否存在
:param mediainfo: 识别的媒体信息
@@ -62,25 +61,40 @@ class EmbyModule(_ModuleBase):
movie = self.emby.get_iteminfo(itemid)
if movie:
logger.info(f"媒体库中已存在:{movie}")
return ExistMediaInfo(type=MediaType.MOVIE)
movies = self.emby.get_movies(title=mediainfo.title, year=mediainfo.year, tmdb_id=mediainfo.tmdb_id)
return schemas.ExistMediaInfo(
type=MediaType.MOVIE,
server="emby",
itemid=movie.item_id
)
movies = self.emby.get_movies(title=mediainfo.title,
year=mediainfo.year,
tmdb_id=mediainfo.tmdb_id)
if not movies:
logger.info(f"{mediainfo.title_year} 在媒体库中不存在")
return None
else:
logger.info(f"媒体库中已存在:{movies}")
return ExistMediaInfo(type=MediaType.MOVIE)
return schemas.ExistMediaInfo(
type=MediaType.MOVIE,
server="emby",
itemid=movies[0].item_id
)
else:
tvs = self.emby.get_tv_episodes(title=mediainfo.title,
year=mediainfo.year,
tmdb_id=mediainfo.tmdb_id,
item_id=itemid)
itemid, tvs = self.emby.get_tv_episodes(title=mediainfo.title,
year=mediainfo.year,
tmdb_id=mediainfo.tmdb_id,
item_id=itemid)
if not tvs:
logger.info(f"{mediainfo.title_year} 在媒体库中不存在")
return None
else:
logger.info(f"{mediainfo.title_year} 媒体库中已存在:{tvs}")
return ExistMediaInfo(type=MediaType.TV, seasons=tvs)
return schemas.ExistMediaInfo(
type=MediaType.TV,
seasons=tvs,
server="emby",
itemid=itemid
)
def refresh_mediaserver(self, mediainfo: MediaInfo, file_path: Path) -> None:
"""
@@ -90,7 +104,7 @@ class EmbyModule(_ModuleBase):
:return: 成功或失败
"""
items = [
RefreshMediaItem(
schemas.RefreshMediaItem(
title=mediainfo.title,
year=mediainfo.year,
type=mediainfo.type,
@@ -105,13 +119,8 @@ class EmbyModule(_ModuleBase):
媒体数量统计
"""
media_statistic = self.emby.get_medias_count()
user_count = self.emby.get_user_count()
return [schemas.Statistic(
movie_count=media_statistic.get("MovieCount") or 0,
tv_count=media_statistic.get("SeriesCount") or 0,
episode_count=media_statistic.get("EpisodeCount") or 0,
user_count=user_count or 0
)]
media_statistic.user_count = self.emby.get_user_count()
return [media_statistic]
def mediaserver_librarys(self, server: str) -> Optional[List[schemas.MediaServerLibrary]]:
"""
@@ -119,16 +128,7 @@ class EmbyModule(_ModuleBase):
"""
if server != "emby":
return None
librarys = self.emby.get_librarys()
if not librarys:
return []
return [schemas.MediaServerLibrary(
server="emby",
id=library.get("id"),
name=library.get("name"),
type=library.get("type"),
path=library.get("path")
) for library in librarys]
return self.emby.get_librarys()
def mediaserver_items(self, server: str, library_id: str) -> Optional[Generator]:
"""
@@ -136,21 +136,15 @@ class EmbyModule(_ModuleBase):
"""
if server != "emby":
return None
items = self.emby.get_items(library_id)
for item in items:
yield schemas.MediaServerItem(
server="emby",
library=item.get("library"),
item_id=item.get("id"),
item_type=item.get("type"),
title=item.get("title"),
original_title=item.get("original_title"),
year=item.get("year"),
tmdbid=int(item.get("tmdbid")) if item.get("tmdbid") else None,
imdbid=item.get("imdbid"),
tvdbid=item.get("tvdbid"),
path=item.get("path"),
)
return self.emby.get_items(library_id)
def mediaserver_iteminfo(self, server: str, item_id: str) -> Optional[schemas.MediaServerItem]:
"""
媒体库项目详情
"""
if server != "emby":
return None
return self.emby.get_iteminfo(item_id)
def mediaserver_tv_episodes(self, server: str,
item_id: Union[str, int]) -> Optional[List[schemas.MediaServerSeasonInfo]]:
@@ -159,7 +153,7 @@ class EmbyModule(_ModuleBase):
"""
if server != "emby":
return None
seasoninfo = self.emby.get_tv_episodes(item_id=item_id)
_, seasoninfo = self.emby.get_tv_episodes(item_id=item_id)
if not seasoninfo:
return []
return [schemas.MediaServerSeasonInfo(

View File

@@ -1,17 +1,16 @@
import json
import re
from pathlib import Path
from typing import List, Optional, Union, Dict, Generator
from typing import List, Optional, Union, Dict, Generator, Tuple
from requests import Response
from app import schemas
from app.core.config import settings
from app.log import logger
from app.schemas import RefreshMediaItem, WebhookEventInfo
from app.schemas.types import MediaType
from app.utils.http import RequestUtils
from app.utils.singleton import Singleton
from app.utils.string import StringUtils
class Emby(metaclass=Singleton):
@@ -78,7 +77,7 @@ class Emby(metaclass=Singleton):
logger.error(f"连接User/Views 出错:" + str(e))
return []
def get_librarys(self):
def get_librarys(self) -> List[schemas.MediaServerLibrary]:
"""
获取媒体服务器所有媒体库列表
"""
@@ -93,12 +92,15 @@ class Emby(metaclass=Singleton):
library_type = MediaType.TV.value
case _:
continue
libraries.append({
"id": library.get("Id"),
"name": library.get("Name"),
"path": library.get("Path"),
"type": library_type
})
libraries.append(
schemas.MediaServerLibrary(
server="emby",
id=library.get("Id"),
name=library.get("Name"),
path=library.get("Path"),
type=library_type
)
)
return libraries
def get_user(self, user_name: str = None) -> Optional[Union[str, int]]:
@@ -200,59 +202,29 @@ class Emby(metaclass=Singleton):
logger.error(f"连接Users/Query出错" + str(e))
return 0
def get_activity_log(self, num: int = 30) -> List[dict]:
"""
获取Emby活动记录
"""
if not self._host or not self._apikey:
return []
req_url = "%semby/System/ActivityLog/Entries?api_key=%s&" % (self._host, self._apikey)
ret_array = []
try:
res = RequestUtils().get_res(req_url)
if res:
ret_json = res.json()
items = ret_json.get('Items')
for item in items:
if item.get("Type") == "AuthenticationSucceeded":
event_type = "LG"
event_date = StringUtils.get_time(item.get("Date"))
event_str = "%s, %s" % (item.get("Name"), item.get("ShortOverview"))
activity = {"type": event_type, "event": event_str, "date": event_date}
ret_array.append(activity)
if item.get("Type") in ["VideoPlayback", "VideoPlaybackStopped"]:
event_type = "PL"
event_date = StringUtils.get_time(item.get("Date"))
event_str = item.get("Name")
activity = {"type": event_type, "event": event_str, "date": event_date}
ret_array.append(activity)
else:
logger.error(f"System/ActivityLog/Entries 未获取到返回数据")
return []
except Exception as e:
logger.error(f"连接System/ActivityLog/Entries出错" + str(e))
return []
return ret_array[:num]
def get_medias_count(self) -> dict:
def get_medias_count(self) -> schemas.Statistic:
"""
获得电影、电视剧、动漫媒体数量
:return: MovieCount SeriesCount SongCount
"""
if not self._host or not self._apikey:
return {}
return schemas.Statistic()
req_url = "%semby/Items/Counts?api_key=%s" % (self._host, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res:
return res.json()
result = res.json()
return schemas.Statistic(
movie_count=result.get("MovieCount") or 0,
tv_count=result.get("SeriesCount") or 0,
episode_count=result.get("EpisodeCount") or 0
)
else:
logger.error(f"Items/Counts 未获取到返回数据")
return {}
return schemas.Statistic()
except Exception as e:
logger.error(f"连接Items/Counts出错" + str(e))
return {}
return schemas.Statistic()
def __get_emby_series_id_by_name(self, name: str, year: str) -> Optional[str]:
"""
@@ -263,7 +235,15 @@ class Emby(metaclass=Singleton):
"""
if not self._host or not self._apikey:
return None
req_url = "%semby/Items?IncludeItemTypes=Series&Fields=ProductionYear&StartIndex=0&Recursive=true&SearchTerm=%s&Limit=10&IncludeSearchTypes=false&api_key=%s" % (
req_url = ("%semby/Items?"
"IncludeItemTypes=Series"
"&Fields=ProductionYear"
"&StartIndex=0"
"&Recursive=true"
"&SearchTerm=%s"
"&Limit=10"
"&IncludeSearchTypes=false"
"&api_key=%s") % (
self._host, name, self._apikey)
try:
res = RequestUtils().get_res(req_url)
@@ -282,7 +262,7 @@ class Emby(metaclass=Singleton):
def get_movies(self,
title: str,
year: str = None,
tmdb_id: int = None) -> Optional[List[dict]]:
tmdb_id: int = None) -> Optional[List[schemas.MediaServerItem]]:
"""
根据标题和年份检查电影是否在Emby中存在存在则返回列表
:param title: 标题
@@ -303,17 +283,28 @@ class Emby(metaclass=Singleton):
ret_movies = []
for res_item in res_items:
item_tmdbid = res_item.get("ProviderIds", {}).get("Tmdb")
mediaserver_item = schemas.MediaServerItem(
server="emby",
library=res_item.get("ParentId"),
item_id=res_item.get("Id"),
item_type=res_item.get("Type"),
title=res_item.get("Name"),
original_title=res_item.get("OriginalTitle"),
year=res_item.get("ProductionYear"),
tmdbid=int(item_tmdbid) if item_tmdbid else None,
imdbid=res_item.get("ProviderIds", {}).get("Imdb"),
tvdbid=res_item.get("ProviderIds", {}).get("Tvdb"),
path=res_item.get("Path")
)
if tmdb_id and item_tmdbid:
if str(item_tmdbid) != str(tmdb_id):
continue
else:
ret_movies.append(
{'title': res_item.get('Name'), 'year': str(res_item.get('ProductionYear'))})
ret_movies.append(mediaserver_item)
continue
if res_item.get('Name') == title and (
not year or str(res_item.get('ProductionYear')) == str(year)):
ret_movies.append(
{'title': res_item.get('Name'), 'year': str(res_item.get('ProductionYear'))})
if (mediaserver_item.title == title
and (not year or str(mediaserver_item.year) == str(year))):
ret_movies.append(mediaserver_item)
return ret_movies
except Exception as e:
logger.error(f"连接Items出错" + str(e))
@@ -325,7 +316,8 @@ class Emby(metaclass=Singleton):
title: str = None,
year: str = None,
tmdb_id: int = None,
season: int = None) -> Optional[Dict[int, list]]:
season: int = None
) -> Tuple[Optional[str], Optional[Dict[int, List[Dict[int, list]]]]]:
"""
根据标题和年份和季返回Emby中的剧集列表
:param item_id: Emby中的ID
@@ -336,22 +328,21 @@ class Emby(metaclass=Singleton):
:return: 每一季的已有集数
"""
if not self._host or not self._apikey:
return None
return None, None
# 电视剧
if not item_id:
item_id = self.__get_emby_series_id_by_name(title, year)
if item_id is None:
return None
return None, None
if not item_id:
return {}
return None, {}
# 验证tmdbid是否相同
item_info = self.get_iteminfo(item_id)
if item_info:
item_tmdbid = (item_info.get("ProviderIds") or {}).get("Tmdb")
if tmdb_id and item_tmdbid:
if str(tmdb_id) != str(item_tmdbid):
return {}
# /Shows/Id/Episodes 查集的信息
if tmdb_id and item_info.tmdbid:
if str(tmdb_id) != str(item_info.tmdbid):
return None, {}
# 查集的信息
if not season:
season = ""
try:
@@ -359,7 +350,8 @@ class Emby(metaclass=Singleton):
self._host, item_id, season, self._apikey)
res_json = RequestUtils().get_res(req_url)
if res_json:
res_items = res_json.json().get("Items")
tv_item = res_json.json()
res_items = tv_item.get("Items")
season_episodes = {}
for res_item in res_items:
season_index = res_item.get("ParentIndexNumber")
@@ -374,11 +366,11 @@ class Emby(metaclass=Singleton):
season_episodes[season_index] = []
season_episodes[season_index].append(episode_index)
# 返回
return season_episodes
return tv_item.get("Id"), season_episodes
except Exception as e:
logger.error(f"连接Shows/Id/Episodes出错" + str(e))
return None
return {}
return None, None
return None, {}
def get_remote_image_by_id(self, item_id: str, image_type: str) -> Optional[str]:
"""
@@ -441,7 +433,7 @@ class Emby(metaclass=Singleton):
return False
return False
def refresh_library_by_items(self, items: List[RefreshMediaItem]) -> bool:
def refresh_library_by_items(self, items: List[schemas.RefreshMediaItem]) -> bool:
"""
按类型、名称、年份来刷新媒体库
:param items: 已识别的需要刷新媒体库的媒体信息列表
@@ -463,7 +455,7 @@ class Emby(metaclass=Singleton):
return self.__refresh_emby_library_by_id(library_id)
logger.info(f"Emby媒体库刷新完成")
def __get_emby_library_id_by_item(self, item: RefreshMediaItem) -> Optional[str]:
def __get_emby_library_id_by_item(self, item: schemas.RefreshMediaItem) -> Optional[str]:
"""
根据媒体信息查询在哪个媒体库返回要刷新的位置的ID
:param item: {title, year, type, category, target_path}
@@ -481,8 +473,8 @@ class Emby(metaclass=Singleton):
return None
# 查找需要刷新的媒体库ID
item_path = Path(item.target_path)
# 匹配子目录
for folder in self.folders:
# 匹配子目录
for subfolder in folder.get("SubFolders"):
try:
# 匹配子目录
@@ -492,38 +484,53 @@ class Emby(metaclass=Singleton):
except Exception as err:
print(str(err))
# 如果找不到,只要路径中有分类目录名就命中
for subfolder in folder.get("SubFolders"):
if subfolder.get("Path") and re.search(r"[/\\]%s" % item.category,
subfolder.get("Path")):
return folder.get("Id")
for folder in self.folders:
for subfolder in folder.get("SubFolders"):
if subfolder.get("Path") and re.search(r"[/\\]%s" % item.category,
subfolder.get("Path")):
return folder.get("Id")
# 刷新根目录
return "/"
def get_iteminfo(self, itemid: str) -> dict:
def get_iteminfo(self, itemid: str) -> Optional[schemas.MediaServerItem]:
"""
获取单个项目详情
"""
if not itemid:
return {}
return None
if not self._host or not self._apikey:
return {}
return None
req_url = "%semby/Users/%s/Items/%s?api_key=%s" % (self._host, self.user, itemid, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res and res.status_code == 200:
return res.json()
item = res.json()
tmdbid = item.get("ProviderIds", {}).get("Tmdb")
return schemas.MediaServerItem(
server="emby",
library=item.get("ParentId"),
item_id=item.get("Id"),
item_type=item.get("Type"),
title=item.get("Name"),
original_title=item.get("OriginalTitle"),
year=item.get("ProductionYear"),
tmdbid=int(tmdbid) if tmdbid else None,
imdbid=item.get("ProviderIds", {}).get("Imdb"),
tvdbid=item.get("ProviderIds", {}).get("Tvdb"),
path=item.get("Path")
)
except Exception as e:
logger.error(f"连接Items/Id出错" + str(e))
return {}
return None
def get_items(self, parent: str) -> Generator:
"""
获取媒体服务器所有媒体库列表
"""
if not parent:
yield {}
yield None
if not self._host or not self._apikey:
yield {}
yield None
req_url = "%semby/Users/%s/Items?ParentId=%s&api_key=%s" % (self._host, self.user, parent, self._apikey)
try:
res = RequestUtils().get_res(req_url)
@@ -533,26 +540,15 @@ class Emby(metaclass=Singleton):
if not result:
continue
if result.get("Type") in ["Movie", "Series"]:
item_info = self.get_iteminfo(result.get("Id"))
yield {"id": result.get("Id"),
"library": item_info.get("ParentId"),
"type": item_info.get("Type"),
"title": item_info.get("Name"),
"original_title": item_info.get("OriginalTitle"),
"year": item_info.get("ProductionYear"),
"tmdbid": item_info.get("ProviderIds", {}).get("Tmdb"),
"imdbid": item_info.get("ProviderIds", {}).get("Imdb"),
"tvdbid": item_info.get("ProviderIds", {}).get("Tvdb"),
"path": item_info.get("Path"),
"json": str(item_info)}
yield self.get_iteminfo(result.get("Id"))
elif "Folder" in result.get("Type"):
for item in self.get_items(parent=result.get('Id')):
yield item
except Exception as e:
logger.error(f"连接Users/Items出错" + str(e))
yield {}
yield None
def get_webhook_message(self, form: any, args: dict) -> Optional[WebhookEventInfo]:
def get_webhook_message(self, form: any, args: dict) -> Optional[schemas.WebhookEventInfo]:
"""
解析Emby Webhook报文
电影:
@@ -805,7 +801,7 @@ class Emby(metaclass=Singleton):
if not eventType:
return None
logger.info(f"接收到emby webhook{message}")
eventItem = WebhookEventInfo(event=eventType, channel="emby")
eventItem = schemas.WebhookEventInfo(event=eventType, channel="emby")
if message.get('Item'):
if message.get('Item', {}).get('Type') == 'Episode':
eventItem.item_type = "TV"
@@ -871,16 +867,36 @@ class Emby(metaclass=Singleton):
def get_data(self, url: str) -> Optional[Response]:
"""
自定义URL从媒体服务器获取数据其中{HOST}{APIKEY}{USER}会被替换成实际的值
自定义URL从媒体服务器获取数据其中[HOST]、[APIKEY]、[USER]会被替换成实际的值
:param url: 请求地址
"""
if not self._host or not self._apikey:
return None
url = url.replace("{HOST}", self._host) \
.replace("{APIKEY}", self._apikey) \
.replace("{USER}", self.user)
url = url.replace("[HOST]", self._host) \
.replace("[APIKEY]", self._apikey) \
.replace("[USER]", self.user)
try:
return RequestUtils().get_res(url=url)
return RequestUtils(content_type="application/json").get_res(url=url)
except Exception as e:
logger.error(f"连接Emby出错" + str(e))
return None
def post_data(self, url: str, data: str = None, headers: dict = None) -> Optional[Response]:
"""
自定义URL从媒体服务器获取数据其中[HOST]、[APIKEY]、[USER]会被替换成实际的值
:param url: 请求地址
:param data: 请求数据
:param headers: 请求头
"""
if not self._host or not self._apikey:
return None
url = url.replace("[HOST]", self._host) \
.replace("[APIKEY]", self._apikey) \
.replace("[USER]", self.user)
try:
return RequestUtils(
headers=headers,
).post_res(url=url, data=data)
except Exception as e:
logger.error(f"连接Emby出错" + str(e))
return None

View File

@@ -542,7 +542,9 @@ class FileTransferModule(_ModuleBase):
# 剧集标题
"episode_title": episode_title,
# 文件后缀
"fileExt": file_ext
"fileExt": file_ext,
# 自定义占位符
"customization": meta.customization
}
@staticmethod

View File

@@ -6,7 +6,6 @@ from app.core.context import MediaInfo
from app.log import logger
from app.modules import _ModuleBase
from app.modules.jellyfin.jellyfin import Jellyfin
from app.schemas import ExistMediaInfo, WebhookEventInfo
from app.schemas.types import MediaType
@@ -40,7 +39,7 @@ class JellyfinModule(_ModuleBase):
# Jellyfin认证
return self.jellyfin.authenticate(name, password)
def webhook_parser(self, body: Any, form: Any, args: Any) -> Optional[WebhookEventInfo]:
def webhook_parser(self, body: Any, form: Any, args: Any) -> Optional[schemas.WebhookEventInfo]:
"""
解析Webhook报文体
:param body: 请求体
@@ -50,7 +49,7 @@ class JellyfinModule(_ModuleBase):
"""
return self.jellyfin.get_webhook_message(body)
def media_exists(self, mediainfo: MediaInfo, itemid: str = None) -> Optional[ExistMediaInfo]:
def media_exists(self, mediainfo: MediaInfo, itemid: str = None) -> Optional[schemas.ExistMediaInfo]:
"""
判断媒体文件是否存在
:param mediainfo: 识别的媒体信息
@@ -62,25 +61,38 @@ class JellyfinModule(_ModuleBase):
movie = self.jellyfin.get_iteminfo(itemid)
if movie:
logger.info(f"媒体库中已存在:{movie}")
return ExistMediaInfo(type=MediaType.MOVIE)
return schemas.ExistMediaInfo(
type=MediaType.MOVIE,
server="jellyfin",
itemid=movie.item_id
)
movies = self.jellyfin.get_movies(title=mediainfo.title, year=mediainfo.year, tmdb_id=mediainfo.tmdb_id)
if not movies:
logger.info(f"{mediainfo.title_year} 在媒体库中不存在")
return None
else:
logger.info(f"媒体库中已存在:{movies}")
return ExistMediaInfo(type=MediaType.MOVIE)
return schemas.ExistMediaInfo(
type=MediaType.MOVIE,
server="jellyfin",
itemid=movies[0].item_id
)
else:
tvs = self.jellyfin.get_tv_episodes(title=mediainfo.title,
year=mediainfo.year,
tmdb_id=mediainfo.tmdb_id,
item_id=itemid)
itemid, tvs = self.jellyfin.get_tv_episodes(title=mediainfo.title,
year=mediainfo.year,
tmdb_id=mediainfo.tmdb_id,
item_id=itemid)
if not tvs:
logger.info(f"{mediainfo.title_year} 在媒体库中不存在")
return None
else:
logger.info(f"{mediainfo.title_year} 媒体库中已存在:{tvs}")
return ExistMediaInfo(type=MediaType.TV, seasons=tvs)
return schemas.ExistMediaInfo(
type=MediaType.TV,
seasons=tvs,
server="jellyfin",
itemid=itemid
)
def refresh_mediaserver(self, mediainfo: MediaInfo, file_path: Path) -> None:
"""
@@ -96,13 +108,8 @@ class JellyfinModule(_ModuleBase):
媒体数量统计
"""
media_statistic = self.jellyfin.get_medias_count()
user_count = self.jellyfin.get_user_count()
return [schemas.Statistic(
movie_count=media_statistic.get("MovieCount") or 0,
tv_count=media_statistic.get("SeriesCount") or 0,
episode_count=media_statistic.get("EpisodeCount") or 0,
user_count=user_count or 0
)]
media_statistic.user_count = self.jellyfin.get_user_count()
return [media_statistic]
def mediaserver_librarys(self, server: str) -> Optional[List[schemas.MediaServerLibrary]]:
"""
@@ -110,16 +117,7 @@ class JellyfinModule(_ModuleBase):
"""
if server != "jellyfin":
return None
librarys = self.jellyfin.get_librarys()
if not librarys:
return []
return [schemas.MediaServerLibrary(
server="jellyfin",
id=library.get("id"),
name=library.get("name"),
type=library.get("type"),
path=library.get("path")
) for library in librarys]
return self.jellyfin.get_librarys()
def mediaserver_items(self, server: str, library_id: str) -> Optional[Generator]:
"""
@@ -127,21 +125,15 @@ class JellyfinModule(_ModuleBase):
"""
if server != "jellyfin":
return None
items = self.jellyfin.get_items(library_id)
for item in items:
yield schemas.MediaServerItem(
server="jellyfin",
library=item.get("library"),
item_id=item.get("id"),
item_type=item.get("type"),
title=item.get("title"),
original_title=item.get("original_title"),
year=item.get("year"),
tmdbid=item.get("tmdbid"),
imdbid=item.get("imdbid"),
tvdbid=item.get("tvdbid"),
path=item.get("path"),
)
return self.jellyfin.get_items(library_id)
def mediaserver_iteminfo(self, server: str, item_id: str) -> Optional[schemas.MediaServerItem]:
"""
媒体库项目详情
"""
if server != "jellyfin":
return None
return self.jellyfin.get_iteminfo(item_id)
def mediaserver_tv_episodes(self, server: str,
item_id: Union[str, int]) -> Optional[List[schemas.MediaServerSeasonInfo]]:
@@ -150,7 +142,7 @@ class JellyfinModule(_ModuleBase):
"""
if server != "jellyfin":
return None
seasoninfo = self.jellyfin.get_tv_episodes(item_id=item_id)
_, seasoninfo = self.jellyfin.get_tv_episodes(item_id=item_id)
if not seasoninfo:
return []
return [schemas.MediaServerSeasonInfo(

View File

@@ -1,15 +1,14 @@
import json
import re
from typing import List, Union, Optional, Dict, Generator
from typing import List, Union, Optional, Dict, Generator, Tuple
from requests import Response
from app import schemas
from app.core.config import settings
from app.log import logger
from app.schemas import MediaType, WebhookEventInfo
from app.schemas import MediaType
from app.utils.http import RequestUtils
from app.utils.singleton import Singleton
from app.utils.string import StringUtils
class Jellyfin(metaclass=Singleton):
@@ -73,12 +72,14 @@ class Jellyfin(metaclass=Singleton):
library_type = MediaType.TV.value
case _:
continue
libraries.append({
"id": library.get("Id"),
"name": library.get("Name"),
"path": library.get("Path"),
"type": library_type
})
libraries.append(
schemas.MediaServerLibrary(
server="jellyfin",
id=library.get("Id"),
name=library.get("Name"),
path=library.get("Path"),
type=library_type
))
return libraries
def get_user_count(self) -> int:
@@ -179,59 +180,29 @@ class Jellyfin(metaclass=Singleton):
logger.error(f"连接System/Info出错" + str(e))
return None
def get_activity_log(self, num: int = 30) -> List[dict]:
"""
获取Jellyfin活动记录
"""
if not self._host or not self._apikey:
return []
req_url = "%sSystem/ActivityLog/Entries?api_key=%s&Limit=%s" % (self._host, self._apikey, num)
ret_array = []
try:
res = RequestUtils().get_res(req_url)
if res:
ret_json = res.json()
items = ret_json.get('Items')
for item in items:
if item.get("Type") == "SessionStarted":
event_type = "LG"
event_date = re.sub(r'\dZ', 'Z', item.get("Date"))
event_str = "%s, %s" % (item.get("Name"), item.get("ShortOverview"))
activity = {"type": event_type, "event": event_str,
"date": StringUtils.get_time(event_date)}
ret_array.append(activity)
if item.get("Type") in ["VideoPlayback", "VideoPlaybackStopped"]:
event_type = "PL"
event_date = re.sub(r'\dZ', 'Z', item.get("Date"))
activity = {"type": event_type, "event": item.get("Name"),
"date": StringUtils.get_time(event_date)}
ret_array.append(activity)
else:
logger.error(f"System/ActivityLog/Entries 未获取到返回数据")
return []
except Exception as e:
logger.error(f"连接System/ActivityLog/Entries出错" + str(e))
return []
return ret_array
def get_medias_count(self) -> Optional[dict]:
def get_medias_count(self) -> schemas.Statistic:
"""
获得电影、电视剧、动漫媒体数量
:return: MovieCount SeriesCount SongCount
"""
if not self._host or not self._apikey:
return None
return schemas.Statistic()
req_url = "%sItems/Counts?api_key=%s" % (self._host, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res:
return res.json()
result = res.json()
return schemas.Statistic(
movie_count=result.get("MovieCount") or 0,
tv_count=result.get("SeriesCount") or 0,
episode_count=result.get("EpisodeCount") or 0
)
else:
logger.error(f"Items/Counts 未获取到返回数据")
return {}
return schemas.Statistic()
except Exception as e:
logger.error(f"连接Items/Counts出错" + str(e))
return {}
return schemas.Statistic()
def __get_jellyfin_series_id_by_name(self, name: str, year: str) -> Optional[str]:
"""
@@ -239,7 +210,8 @@ class Jellyfin(metaclass=Singleton):
"""
if not self._host or not self._apikey or not self.user:
return None
req_url = "%sUsers/%s/Items?api_key=%s&searchTerm=%s&IncludeItemTypes=Series&Limit=10&Recursive=true" % (
req_url = ("%sUsers/%s/Items?"
"api_key=%s&searchTerm=%s&IncludeItemTypes=Series&Limit=10&Recursive=true") % (
self._host, self.user, self._apikey, name)
try:
res = RequestUtils().get_res(req_url)
@@ -258,7 +230,7 @@ class Jellyfin(metaclass=Singleton):
def get_movies(self,
title: str,
year: str = None,
tmdb_id: int = None) -> Optional[List[dict]]:
tmdb_id: int = None) -> Optional[List[schemas.MediaServerItem]]:
"""
根据标题和年份检查电影是否在Jellyfin中存在存在则返回列表
:param title: 标题
@@ -268,7 +240,8 @@ class Jellyfin(metaclass=Singleton):
"""
if not self._host or not self._apikey or not self.user:
return None
req_url = "%sUsers/%s/Items?api_key=%s&searchTerm=%s&IncludeItemTypes=Movie&Limit=10&Recursive=true" % (
req_url = ("%sUsers/%s/Items?"
"api_key=%s&searchTerm=%s&IncludeItemTypes=Movie&Limit=10&Recursive=true") % (
self._host, self.user, self._apikey, title)
try:
res = RequestUtils().get_res(req_url)
@@ -276,19 +249,30 @@ class Jellyfin(metaclass=Singleton):
res_items = res.json().get("Items")
if res_items:
ret_movies = []
for res_item in res_items:
item_tmdbid = res_item.get("ProviderIds", {}).get("Tmdb")
for item in res_items:
item_tmdbid = item.get("ProviderIds", {}).get("Tmdb")
mediaserver_item = schemas.MediaServerItem(
server="jellyfin",
library=item.get("ParentId"),
item_id=item.get("Id"),
item_type=item.get("Type"),
title=item.get("Name"),
original_title=item.get("OriginalTitle"),
year=item.get("ProductionYear"),
tmdbid=int(item_tmdbid) if item_tmdbid else None,
imdbid=item.get("ProviderIds", {}).get("Imdb"),
tvdbid=item.get("ProviderIds", {}).get("Tvdb"),
path=item.get("Path")
)
if tmdb_id and item_tmdbid:
if str(item_tmdbid) != str(tmdb_id):
continue
else:
ret_movies.append(
{'title': res_item.get('Name'), 'year': str(res_item.get('ProductionYear'))})
ret_movies.append(mediaserver_item)
continue
if res_item.get('Name') == title and (
not year or str(res_item.get('ProductionYear')) == str(year)):
ret_movies.append(
{'title': res_item.get('Name'), 'year': str(res_item.get('ProductionYear'))})
if mediaserver_item.title == title and (
not year or str(mediaserver_item.year) == str(year)):
ret_movies.append(mediaserver_item)
return ret_movies
except Exception as e:
logger.error(f"连接Items出错" + str(e))
@@ -300,7 +284,7 @@ class Jellyfin(metaclass=Singleton):
title: str = None,
year: str = None,
tmdb_id: int = None,
season: int = None) -> Optional[Dict[int, list]]:
season: int = None) -> Tuple[Optional[str], Optional[Dict[int, list]]]:
"""
根据标题和年份和季返回Jellyfin中的剧集列表
:param item_id: Jellyfin中的Id
@@ -311,19 +295,20 @@ class Jellyfin(metaclass=Singleton):
:return: 集号的列表
"""
if not self._host or not self._apikey or not self.user:
return None
return None, None
# 查TVID
if not item_id:
item_id = self.__get_jellyfin_series_id_by_name(title, year)
if item_id is None:
return None
return None, None
if not item_id:
return {}
return None, {}
# 验证tmdbid是否相同
item_tmdbid = (self.get_iteminfo(item_id).get("ProviderIds") or {}).get("Tmdb")
if tmdb_id and item_tmdbid:
if str(tmdb_id) != str(item_tmdbid):
return {}
item_info = self.get_iteminfo(item_id)
if item_info:
if tmdb_id and item_info.tmdbid:
if str(tmdb_id) != str(item_info.tmdbid):
return None, {}
if not season:
season = ""
try:
@@ -331,7 +316,8 @@ class Jellyfin(metaclass=Singleton):
self._host, item_id, season, self.user, self._apikey)
res_json = RequestUtils().get_res(req_url)
if res_json:
res_items = res_json.json().get("Items")
tv_info = res_json.json()
res_items = tv_info.get("Items")
# 返回的季集信息
season_episodes = {}
for res_item in res_items:
@@ -346,11 +332,11 @@ class Jellyfin(metaclass=Singleton):
if not season_episodes.get(season_index):
season_episodes[season_index] = []
season_episodes[season_index].append(episode_index)
return season_episodes
return tv_info.get('Id'), season_episodes
except Exception as e:
logger.error(f"连接Shows/Id/Episodes出错" + str(e))
return None
return {}
return None, None
return None, {}
def get_remote_image_by_id(self, item_id: str, image_type: str) -> Optional[str]:
"""
@@ -394,7 +380,7 @@ class Jellyfin(metaclass=Singleton):
logger.error(f"连接Library/Refresh出错" + str(e))
return False
def get_webhook_message(self, body: any) -> Optional[WebhookEventInfo]:
def get_webhook_message(self, body: any) -> Optional[schemas.WebhookEventInfo]:
"""
解析Jellyfin报文
{
@@ -470,7 +456,7 @@ class Jellyfin(metaclass=Singleton):
eventType = message.get('NotificationType')
if not eventType:
return None
eventItem = WebhookEventInfo(
eventItem = schemas.WebhookEventInfo(
event=eventType,
channel="jellyfin"
)
@@ -506,32 +492,46 @@ class Jellyfin(metaclass=Singleton):
return eventItem
def get_iteminfo(self, itemid: str) -> dict:
def get_iteminfo(self, itemid: str) -> Optional[schemas.MediaServerItem]:
"""
获取单个项目详情
"""
if not itemid:
return {}
return None
if not self._host or not self._apikey:
return {}
return None
req_url = "%sUsers/%s/Items/%s?api_key=%s" % (
self._host, self.user, itemid, self._apikey)
try:
res = RequestUtils().get_res(req_url)
if res and res.status_code == 200:
return res.json()
item = res.json()
tmdbid = item.get("ProviderIds", {}).get("Tmdb")
return schemas.MediaServerItem(
server="jellyfin",
library=item.get("ParentId"),
item_id=item.get("Id"),
item_type=item.get("Type"),
title=item.get("Name"),
original_title=item.get("OriginalTitle"),
year=item.get("ProductionYear"),
tmdbid=int(tmdbid) if tmdbid else None,
imdbid=item.get("ProviderIds", {}).get("Imdb"),
tvdbid=item.get("ProviderIds", {}).get("Tvdb"),
path=item.get("Path")
)
except Exception as e:
logger.error(f"连接Users/Items出错" + str(e))
return {}
return None
def get_items(self, parent: str) -> Generator:
"""
获取媒体服务器所有媒体库列表
"""
if not parent:
yield {}
yield None
if not self._host or not self._apikey:
yield {}
yield None
req_url = "%sUsers/%s/Items?parentId=%s&api_key=%s" % (self._host, self.user, parent, self._apikey)
try:
res = RequestUtils().get_res(req_url)
@@ -541,37 +541,46 @@ class Jellyfin(metaclass=Singleton):
if not result:
continue
if result.get("Type") in ["Movie", "Series"]:
item_info = self.get_iteminfo(result.get("Id"))
yield {"id": result.get("Id"),
"library": item_info.get("ParentId"),
"type": item_info.get("Type"),
"title": item_info.get("Name"),
"original_title": item_info.get("OriginalTitle"),
"year": item_info.get("ProductionYear"),
"tmdbid": item_info.get("ProviderIds", {}).get("Tmdb"),
"imdbid": item_info.get("ProviderIds", {}).get("Imdb"),
"tvdbid": item_info.get("ProviderIds", {}).get("Tvdb"),
"path": item_info.get("Path"),
"json": str(item_info)}
yield self.get_iteminfo(result.get("Id"))
elif "Folder" in result.get("Type"):
for item in self.get_items(result.get("Id")):
yield item
except Exception as e:
logger.error(f"连接Users/Items出错" + str(e))
yield {}
yield None
def get_data(self, url: str) -> Optional[Response]:
"""
自定义URL从媒体服务器获取数据其中{HOST}{APIKEY}{USER}会被替换成实际的值
自定义URL从媒体服务器获取数据其中[HOST]、[APIKEY]、[USER]会被替换成实际的值
:param url: 请求地址
"""
if not self._host or not self._apikey:
return None
url = url.replace("{HOST}", self._host) \
.replace("{APIKEY}", self._apikey) \
.replace("{USER}", self.user)
url = url.replace("[HOST]", self._host) \
.replace("[APIKEY]", self._apikey) \
.replace("[USER]", self.user)
try:
return RequestUtils().get_res(url=url)
return RequestUtils(accept_type="application/json").get_res(url=url)
except Exception as e:
logger.error(f"连接Jellyfin出错" + str(e))
return None
def post_data(self, url: str, data: str = None, headers: dict = None) -> Optional[Response]:
"""
自定义URL从媒体服务器获取数据其中[HOST]、[APIKEY]、[USER]会被替换成实际的值
:param url: 请求地址
:param data: 请求数据
:param headers: 请求头
"""
if not self._host or not self._apikey:
return None
url = url.replace("[HOST]", self._host) \
.replace("[APIKEY]", self._apikey) \
.replace("[USER]", self.user)
try:
return RequestUtils(
headers=headers
).post_res(url=url, data=data)
except Exception as e:
logger.error(f"连接Jellyfin出错" + str(e))
return None

View File

@@ -6,12 +6,10 @@ from app.core.context import MediaInfo
from app.log import logger
from app.modules import _ModuleBase
from app.modules.plex.plex import Plex
from app.schemas import ExistMediaInfo, RefreshMediaItem, WebhookEventInfo
from app.schemas.types import MediaType
class PlexModule(_ModuleBase):
plex: Plex = None
def init_module(self) -> None:
@@ -31,7 +29,7 @@ class PlexModule(_ModuleBase):
if not self.plex.is_inactive():
self.plex.reconnect()
def webhook_parser(self, body: Any, form: Any, args: Any) -> Optional[WebhookEventInfo]:
def webhook_parser(self, body: Any, form: Any, args: Any) -> Optional[schemas.WebhookEventInfo]:
"""
解析Webhook报文体
:param body: 请求体
@@ -41,7 +39,7 @@ class PlexModule(_ModuleBase):
"""
return self.plex.get_webhook_message(form)
def media_exists(self, mediainfo: MediaInfo, itemid: str = None) -> Optional[ExistMediaInfo]:
def media_exists(self, mediainfo: MediaInfo, itemid: str = None) -> Optional[schemas.ExistMediaInfo]:
"""
判断媒体文件是否存在
:param mediainfo: 识别的媒体信息
@@ -53,29 +51,42 @@ class PlexModule(_ModuleBase):
movie = self.plex.get_iteminfo(itemid)
if movie:
logger.info(f"媒体库中已存在:{movie}")
return ExistMediaInfo(type=MediaType.MOVIE)
return schemas.ExistMediaInfo(
type=MediaType.MOVIE,
server="plex",
itemid=movie.item_id
)
movies = self.plex.get_movies(title=mediainfo.title,
original_title=mediainfo.original_title,
year=mediainfo.year,
original_title=mediainfo.original_title,
year=mediainfo.year,
tmdb_id=mediainfo.tmdb_id)
if not movies:
logger.info(f"{mediainfo.title_year} 在媒体库中不存在")
return None
else:
logger.info(f"媒体库中已存在:{movies}")
return ExistMediaInfo(type=MediaType.MOVIE)
return schemas.ExistMediaInfo(
type=MediaType.MOVIE,
server="plex",
itemid=movies[0].item_id
)
else:
tvs = self.plex.get_tv_episodes(title=mediainfo.title,
original_title=mediainfo.original_title,
year=mediainfo.year,
tmdb_id=mediainfo.tmdb_id,
item_id=itemid)
item_id, tvs = self.plex.get_tv_episodes(title=mediainfo.title,
original_title=mediainfo.original_title,
year=mediainfo.year,
tmdb_id=mediainfo.tmdb_id,
item_id=itemid)
if not tvs:
logger.info(f"{mediainfo.title_year} 在媒体库中不存在")
return None
else:
logger.info(f"{mediainfo.title_year} 媒体库中已存在:{tvs}")
return ExistMediaInfo(type=MediaType.TV, seasons=tvs)
return schemas.ExistMediaInfo(
type=MediaType.TV,
seasons=tvs,
server="plex",
itemid=item_id
)
def refresh_mediaserver(self, mediainfo: MediaInfo, file_path: Path) -> None:
"""
@@ -85,7 +96,7 @@ class PlexModule(_ModuleBase):
:return: 成功或失败
"""
items = [
RefreshMediaItem(
schemas.RefreshMediaItem(
title=mediainfo.title,
year=mediainfo.year,
type=mediainfo.type,
@@ -100,12 +111,8 @@ class PlexModule(_ModuleBase):
媒体数量统计
"""
media_statistic = self.plex.get_medias_count()
return [schemas.Statistic(
movie_count=media_statistic.get("MovieCount") or 0,
tv_count=media_statistic.get("SeriesCount") or 0,
episode_count=media_statistic.get("EpisodeCount") or 0,
user_count=1
)]
media_statistic.user_count = 1
return [media_statistic]
def mediaserver_librarys(self, server: str) -> Optional[List[schemas.MediaServerLibrary]]:
"""
@@ -113,16 +120,7 @@ class PlexModule(_ModuleBase):
"""
if server != "plex":
return None
librarys = self.plex.get_librarys()
if not librarys:
return []
return [schemas.MediaServerLibrary(
server="plex",
id=library.get("id"),
name=library.get("name"),
type=library.get("type"),
path=library.get("path")
) for library in librarys]
return self.plex.get_librarys()
def mediaserver_items(self, server: str, library_id: str) -> Optional[Generator]:
"""
@@ -130,21 +128,15 @@ class PlexModule(_ModuleBase):
"""
if server != "plex":
return None
items = self.plex.get_items(library_id)
for item in items:
yield schemas.MediaServerItem(
server="plex",
library=item.get("library"),
item_id=item.get("id"),
item_type=item.get("type"),
title=item.get("title"),
original_title=item.get("original_title"),
year=item.get("year"),
tmdbid=item.get("tmdbid"),
imdbid=item.get("imdbid"),
tvdbid=item.get("tvdbid"),
path=item.get("path"),
)
return self.plex.get_items(library_id)
def mediaserver_iteminfo(self, server: str, item_id: str) -> Optional[schemas.MediaServerItem]:
"""
媒体库项目详情
"""
if server != "plex":
return None
return self.plex.get_iteminfo(item_id)
def mediaserver_tv_episodes(self, server: str,
item_id: Union[str, int]) -> Optional[List[schemas.MediaServerSeasonInfo]]:
@@ -153,7 +145,7 @@ class PlexModule(_ModuleBase):
"""
if server != "plex":
return None
seasoninfo = self.plex.get_tv_episodes(item_id=item_id)
_, seasoninfo = self.plex.get_tv_episodes(item_id=item_id)
if not seasoninfo:
return []
return [schemas.MediaServerSeasonInfo(

View File

@@ -6,9 +6,10 @@ from urllib.parse import quote_plus
from plexapi import media
from plexapi.server import PlexServer
from app import schemas
from app.core.config import settings
from app.log import logger
from app.schemas import RefreshMediaItem, MediaType, WebhookEventInfo
from app.schemas import MediaType
from app.utils.singleton import Singleton
@@ -49,7 +50,7 @@ class Plex(metaclass=Singleton):
self._plex = None
logger.error(f"Plex服务器连接失败{str(e)}")
def get_librarys(self):
def get_librarys(self) -> List[schemas.MediaServerLibrary]:
"""
获取媒体服务器所有媒体库列表
"""
@@ -69,81 +70,42 @@ class Plex(metaclass=Singleton):
library_type = MediaType.TV.value
case _:
continue
libraries.append({
"id": library.key,
"name": library.title,
"path": library.locations,
"type": library_type
})
libraries.append(
schemas.MediaServerLibrary(
id=library.key,
name=library.title,
path=library.locations,
type=library_type
)
)
return libraries
def get_activity_log(self, num: int = 30) -> Optional[List[dict]]:
"""
获取Plex活动记录
"""
if not self._plex:
return []
ret_array = []
try:
# type的含义: 1 电影 4 剧集单集 详见 plexapi/utils.py中SEARCHTYPES的定义
# 根据最后播放时间倒序获取数据
historys = self._plex.library.search(sort='lastViewedAt:desc', limit=num, type='1,4')
for his in historys:
# 过滤掉最后播放时间为空的
if his.lastViewedAt:
if his.type == "episode":
event_title = "%s %s%s %s" % (
his.grandparentTitle,
"S" + str(his.parentIndex),
"E" + str(his.index),
his.title
)
event_str = "开始播放剧集 %s" % event_title
else:
event_title = "%s %s" % (
his.title, "(" + str(his.year) + ")")
event_str = "开始播放电影 %s" % event_title
event_type = "PL"
event_date = his.lastViewedAt.strftime('%Y-%m-%d %H:%M:%S')
activity = {"type": event_type, "event": event_str, "date": event_date}
ret_array.append(activity)
except Exception as e:
logger.error(f"连接System/ActivityLog/Entries出错" + str(e))
return []
if ret_array:
ret_array = sorted(ret_array, key=lambda x: x['date'], reverse=True)
return ret_array
def get_medias_count(self) -> dict:
def get_medias_count(self) -> schemas.Statistic:
"""
获得电影、电视剧、动漫媒体数量
:return: MovieCount SeriesCount SongCount
"""
if not self._plex:
return {}
return schemas.Statistic()
sections = self._plex.library.sections()
MovieCount = SeriesCount = SongCount = EpisodeCount = 0
MovieCount = SeriesCount = EpisodeCount = 0
for sec in sections:
if sec.type == "movie":
MovieCount += sec.totalSize
if sec.type == "show":
SeriesCount += sec.totalSize
EpisodeCount += sec.totalViewSize(libtype='episode')
if sec.type == "artist":
SongCount += sec.totalSize
return {
"MovieCount": MovieCount,
"SeriesCount": SeriesCount,
"SongCount": SongCount,
"EpisodeCount": EpisodeCount
}
return schemas.Statistic(
movie_count=MovieCount,
tv_count=SeriesCount,
episode_count=EpisodeCount
)
def get_movies(self,
title: str,
def get_movies(self,
title: str,
original_title: str = None,
year: str = None,
tmdb_id: int = None) -> Optional[List[dict]]:
tmdb_id: int = None) -> Optional[List[schemas.MediaServerItem]]:
"""
根据标题和年份检查电影是否在Plex中存在存在则返回列表
:param title: 标题
@@ -156,20 +118,43 @@ class Plex(metaclass=Singleton):
return None
ret_movies = []
if year:
movies = self._plex.library.search(title=title, year=year, libtype="movie")
movies = self._plex.library.search(title=title,
year=year,
libtype="movie")
# 根据原标题再查一遍
if original_title and str(original_title) != str(title):
movies.extend(self._plex.library.search(title=original_title, year=year, libtype="movie"))
movies.extend(self._plex.library.search(title=original_title,
year=year,
libtype="movie"))
else:
movies = self._plex.library.search(title=title, libtype="movie")
movies = self._plex.library.search(title=title,
libtype="movie")
if original_title and str(original_title) != str(title):
movies.extend(self._plex.library.search(title=original_title, year=year, libtype="movie"))
for movie in set(movies):
movie_tmdbid = self.__get_ids(movie.guids).get("tmdb_id")
if tmdb_id and movie_tmdbid:
if str(movie_tmdbid) != str(tmdb_id):
movies.extend(self._plex.library.search(title=original_title,
libtype="movie"))
for item in set(movies):
ids = self.__get_ids(item.guids)
if tmdb_id and ids['tmdb_id']:
if str(ids['tmdb_id']) != str(tmdb_id):
continue
ret_movies.append({'title': movie.title, 'year': movie.year})
path = None
if item.locations:
path = item.locations[0]
ret_movies.append(
schemas.MediaServerItem(
server="plex",
library=item.librarySectionID,
item_id=item.key,
item_type=item.type,
title=item.title,
original_title=item.originalTitle,
year=item.year,
tmdbid=ids['tmdb_id'],
imdbid=ids['imdb_id'],
tvdbid=ids['tvdb_id'],
path=path,
)
)
return ret_movies
def get_tv_episodes(self,
@@ -178,7 +163,7 @@ class Plex(metaclass=Singleton):
original_title: str = None,
year: str = None,
tmdb_id: int = None,
season: int = None) -> Optional[Dict[int, list]]:
season: int = None) -> Tuple[Optional[str], Optional[Dict[int, list]]]:
"""
根据标题、年份、季查询电视剧所有集信息
:param item_id: 媒体ID
@@ -190,22 +175,28 @@ class Plex(metaclass=Singleton):
:return: 所有集的列表
"""
if not self._plex:
return {}
return None, {}
if item_id:
videos = self._plex.fetchItem(item_id)
else:
# 根据标题和年份模糊搜索,该结果不够准确
videos = self._plex.library.search(title=title, year=year, libtype="show")
if not videos and original_title and str(original_title) != str(title):
videos = self._plex.library.search(title=original_title, year=year, libtype="show")
videos = self._plex.library.search(title=title,
year=year,
libtype="show")
if (not videos
and original_title
and str(original_title) != str(title)):
videos = self._plex.library.search(title=original_title,
year=year,
libtype="show")
if not videos:
return {}
return None, {}
if isinstance(videos, list):
videos = videos[0]
video_tmdbid = self.__get_ids(videos.guids).get('tmdb_id')
if tmdb_id and video_tmdbid:
if str(video_tmdbid) != str(tmdb_id):
return {}
return None, {}
episodes = videos.episodes()
season_episodes = {}
for episode in episodes:
@@ -214,7 +205,7 @@ class Plex(metaclass=Singleton):
if episode.seasonNumber not in season_episodes:
season_episodes[episode.seasonNumber] = []
season_episodes[episode.seasonNumber].append(episode.index)
return season_episodes
return videos.key, season_episodes
def get_remote_image_by_id(self, item_id: str, image_type: str) -> Optional[str]:
"""
@@ -227,9 +218,11 @@ class Plex(metaclass=Singleton):
return None
try:
if image_type == "Poster":
images = self._plex.fetchItems('/library/metadata/%s/posters' % item_id, cls=media.Poster)
images = self._plex.fetchItems('/library/metadata/%s/posters' % item_id,
cls=media.Poster)
else:
images = self._plex.fetchItems('/library/metadata/%s/arts' % item_id, cls=media.Art)
images = self._plex.fetchItems('/library/metadata/%s/arts' % item_id,
cls=media.Art)
for image in images:
if hasattr(image, 'key') and image.key.startswith('http'):
return image.key
@@ -245,7 +238,7 @@ class Plex(metaclass=Singleton):
return False
return self._plex.library.update()
def refresh_library_by_items(self, items: List[RefreshMediaItem]) -> bool:
def refresh_library_by_items(self, items: List[schemas.RefreshMediaItem]) -> bool:
"""
按路径刷新媒体库 item: target_path
"""
@@ -294,19 +287,34 @@ class Plex(metaclass=Singleton):
logger.error(f"查找媒体库出错:{err}")
return "", ""
def get_iteminfo(self, itemid: str) -> dict:
def get_iteminfo(self, itemid: str) -> Optional[schemas.MediaServerItem]:
"""
获取单个项目详情
"""
if not self._plex:
return {}
return None
try:
item = self._plex.fetchItem(itemid)
ids = self.__get_ids(item.guids)
return {'ProviderIds': {'Tmdb': ids['tmdb_id'], 'Imdb': ids['imdb_id']}}
path = None
if item.locations:
path = item.locations[0]
return schemas.MediaServerItem(
server="plex",
library=item.librarySectionID,
item_id=item.key,
item_type=item.type,
title=item.title,
original_title=item.originalTitle,
year=item.year,
tmdbid=ids['tmdb_id'],
imdbid=ids['imdb_id'],
tvdbid=ids['tvdb_id'],
path=path,
)
except Exception as err:
logger.error(f"获取项目详情出错:{err}")
return {}
return None
@staticmethod
def __get_ids(guids: List[Any]) -> dict:
@@ -337,9 +345,9 @@ class Plex(metaclass=Singleton):
获取媒体服务器所有媒体库列表
"""
if not parent:
yield {}
yield None
if not self._plex:
yield {}
yield None
try:
section = self._plex.library.sectionByID(int(parent))
if section:
@@ -350,21 +358,24 @@ class Plex(metaclass=Singleton):
path = None
if item.locations:
path = item.locations[0]
yield {"id": item.key,
"library": item.librarySectionID,
"type": item.type,
"title": item.title,
"original_title": item.originalTitle,
"year": item.year,
"tmdbid": ids['tmdb_id'],
"imdbid": ids['imdb_id'],
"tvdbid": ids['tvdb_id'],
"path": path}
yield schemas.MediaServerItem(
server="plex",
library=item.librarySectionID,
item_id=item.key,
item_type=item.type,
title=item.title,
original_title=item.originalTitle,
year=item.year,
tmdbid=ids['tmdb_id'],
imdbid=ids['imdb_id'],
tvdbid=ids['tvdb_id'],
path=path,
)
except Exception as err:
logger.error(f"获取媒体库列表出错:{err}")
yield {}
yield None
def get_webhook_message(self, form: any) -> Optional[WebhookEventInfo]:
def get_webhook_message(self, form: any) -> Optional[schemas.WebhookEventInfo]:
"""
解析Plex报文
eventItem 字段的含义
@@ -413,7 +424,7 @@ class Plex(metaclass=Singleton):
"parentTitle": "Combat Shadow Fighting Saga / Great Prison Battle Saga",
"originalTitle": "Baki Hanma",
"contentRating": "TV-MA",
"summary": "The world is shaken by news of a man taking down a monstrous elephant with his bare hands. Back in Japan, Baki is confronted by a knife-wielding child.",
"summary": "The world is shaken by news",
"index": 1,
"parentIndex": 1,
"audienceRating": 8.5,
@@ -482,7 +493,7 @@ class Plex(metaclass=Singleton):
if not eventType:
return None
logger.info(f"接收到plex webhook{message}")
eventItem = WebhookEventInfo(event=eventType, channel="plex")
eventItem = schemas.WebhookEventInfo(event=eventType, channel="plex")
if message.get('Metadata'):
if message.get('Metadata', {}).get('type') == 'episode':
eventItem.item_type = "TV"
@@ -495,14 +506,17 @@ class Plex(metaclass=Singleton):
eventItem.season_id = message.get('Metadata', {}).get('parentIndex')
eventItem.episode_id = message.get('Metadata', {}).get('index')
if message.get('Metadata', {}).get('summary') and len(message.get('Metadata', {}).get('summary')) > 100:
if (message.get('Metadata', {}).get('summary')
and len(message.get('Metadata', {}).get('summary')) > 100):
eventItem.overview = str(message.get('Metadata', {}).get('summary'))[:100] + "..."
else:
eventItem.overview = message.get('Metadata', {}).get('summary')
else:
eventItem.item_type = "MOV" if message.get('Metadata', {}).get('type') == 'movie' else "SHOW"
eventItem.item_type = "MOV" if message.get('Metadata',
{}).get('type') == 'movie' else "SHOW"
eventItem.item_name = "%s %s" % (
message.get('Metadata', {}).get('title'), "(" + str(message.get('Metadata', {}).get('year')) + ")")
message.get('Metadata', {}).get('title'),
"(" + str(message.get('Metadata', {}).get('year')) + ")")
eventItem.item_id = message.get('Metadata', {}).get('ratingKey')
if len(message.get('Metadata', {}).get('summary')) > 100:
eventItem.overview = str(message.get('Metadata', {}).get('summary'))[:100] + "..."

View File

@@ -225,6 +225,8 @@ class QbittorrentModule(_ModuleBase):
"""
# 调用Qbittorrent API查询实时信息
info = self.qbittorrent.transfer_info()
if not info:
return schemas.DownloaderInfo()
return schemas.DownloaderInfo(
download_speed=info.get("dl_info_speed"),
upload_speed=info.get("up_info_speed"),

View File

@@ -158,10 +158,8 @@ class Telegram(metaclass=Singleton):
title = re.sub(r"\s+", " ", title).strip()
free = torrent.volume_factor
seeder = f"{torrent.seeders}"
description = torrent.description
caption = f"{caption}\n{index}.【{site_name}】[{title}]({link}) " \
f"{StringUtils.str_filesize(torrent.size)} {free} {seeder}\n" \
f"_{description}_"
f"{StringUtils.str_filesize(torrent.size)} {free} {seeder}"
index += 1
if userid:

View File

@@ -345,7 +345,7 @@ class TheMovieDbModule(_ModuleBase):
image_path = seasoninfo.get(image_type.value)
if image_path:
return f"https://image.tmdb.org/t/p/{image_prefix}{image_path}"
return f"https://{settings.TMDB_IMAGE_DOMAIN}/t/p/{image_prefix}{image_path}"
return None
def movie_similar(self, tmdbid: int) -> List[dict]:

View File

@@ -12,7 +12,6 @@ from app.schemas.types import MediaType
from app.utils.common import retry
from app.utils.dom import DomUtils
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class TmdbScraper:
@@ -122,25 +121,8 @@ class TmdbScraper:
except Exception as e:
logger.error(f"{file_path} 刮削失败:{e}")
def __get_chinese_name(self, person: dict):
"""
获取TMDB别名中的中文名
"""
if not person.get("id"):
return ""
try:
personinfo = self.tmdb.get_person_detail(person.get("id"))
if personinfo:
also_known_as = personinfo.get("also_known_as") or []
if also_known_as:
for name in also_known_as:
if name and StringUtils.is_chinese(name):
return name
except Exception as err:
logger.error(f"获取人物中文名失败:{err}")
return person.get("name") or ""
def __gen_common_nfo(self, mediainfo: MediaInfo, doc, root):
@staticmethod
def __gen_common_nfo(mediainfo: MediaInfo, doc, root):
"""
生成公共NFO
"""
@@ -173,19 +155,20 @@ class TmdbScraper:
xoutline.appendChild(doc.createCDATASection(mediainfo.overview or ""))
# 导演
for director in mediainfo.directors:
# 获取中文名
cn_name = self.__get_chinese_name(director)
xdirector = DomUtils.add_node(doc, root, "director", cn_name)
xdirector = DomUtils.add_node(doc, root, "director", director.get("name") or "")
xdirector.setAttribute("tmdbid", str(director.get("id") or ""))
# 演员
for actor in mediainfo.actors:
# 获取中文名
cn_name = self.__get_chinese_name(actor)
xactor = DomUtils.add_node(doc, root, "actor")
DomUtils.add_node(doc, xactor, "name", cn_name)
DomUtils.add_node(doc, xactor, "name", actor.get("name") or "")
DomUtils.add_node(doc, xactor, "type", "Actor")
DomUtils.add_node(doc, xactor, "role", actor.get("character") or actor.get("role") or "")
DomUtils.add_node(doc, xactor, "tmdbid", actor.get("id") or "")
DomUtils.add_node(doc, xactor, "thumb",
f"https://{settings.TMDB_IMAGE_DOMAIN}/t/p/original{actor.get('profile_path')}")
DomUtils.add_node(doc, xactor, "profile",
f"https://www.themoviedb.org/person/{actor.get('id')}")
# 风格
genres = mediainfo.genres or []
for genre in genres:
@@ -260,7 +243,8 @@ class TmdbScraper:
doc = minidom.Document()
root = DomUtils.add_node(doc, doc, "season")
# 添加时间
DomUtils.add_node(doc, root, "dateadded", time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
DomUtils.add_node(doc, root, "dateadded",
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
# 简介
xplot = DomUtils.add_node(doc, root, "plot")
xplot.appendChild(doc.createCDATASection(seasoninfo.get("overview") or ""))
@@ -272,7 +256,8 @@ class TmdbScraper:
DomUtils.add_node(doc, root, "premiered", seasoninfo.get("air_date") or "")
DomUtils.add_node(doc, root, "releasedate", seasoninfo.get("air_date") or "")
# 发行年份
DomUtils.add_node(doc, root, "year", seasoninfo.get("air_date")[:4] if seasoninfo.get("air_date") else "")
DomUtils.add_node(doc, root, "year",
seasoninfo.get("air_date")[:4] if seasoninfo.get("air_date") else "")
# seasonnumber
DomUtils.add_node(doc, root, "seasonnumber", str(season))
# 保存
@@ -326,20 +311,20 @@ class TmdbScraper:
directors = episodeinfo.get("crew") or []
for director in directors:
if director.get("known_for_department") == "Directing":
# 获取中文名
cn_name = self.__get_chinese_name(director)
xdirector = DomUtils.add_node(doc, root, "director", cn_name)
xdirector = DomUtils.add_node(doc, root, "director", director.get("name") or "")
xdirector.setAttribute("tmdbid", str(director.get("id") or ""))
# 演员
actors = episodeinfo.get("guest_stars") or []
for actor in actors:
if actor.get("known_for_department") == "Acting":
# 获取中文名
cn_name = self.__get_chinese_name(actor)
xactor = DomUtils.add_node(doc, root, "actor")
DomUtils.add_node(doc, xactor, "name", cn_name)
DomUtils.add_node(doc, xactor, "name", actor.get("name") or "")
DomUtils.add_node(doc, xactor, "type", "Actor")
DomUtils.add_node(doc, xactor, "tmdbid", actor.get("id") or "")
DomUtils.add_node(doc, xactor, "thumb",
f"https://{settings.TMDB_IMAGE_DOMAIN}/t/p/original{actor.get('profile_path')}")
DomUtils.add_node(doc, xactor, "profile",
f"https://www.themoviedb.org/person/{actor.get('id')}")
# 保存文件
self.__save_nfo(doc, file_path.with_suffix(".nfo"))

View File

@@ -3,18 +3,19 @@
import logging
import os
import time
from datetime import datetime
from functools import lru_cache
import requests
import requests.exceptions
from app.utils.http import RequestUtils
from .exceptions import TMDbException
logger = logging.getLogger(__name__)
class TMDb(object):
_session = None
TMDB_API_KEY = "TMDB_API_KEY"
TMDB_LANGUAGE = "TMDB_LANGUAGE"
TMDB_SESSION_ID = "TMDB_SESSION_ID"
@@ -25,9 +26,15 @@ class TMDb(object):
TMDB_DOMAIN = "TMDB_DOMAIN"
REQUEST_CACHE_MAXSIZE = None
_req = None
_session = None
def __init__(self, obj_cached=True, session=None):
if self.__class__._session is None or session is not None:
self.__class__._session = requests.Session() if session is None else session
if session is not None:
self._req = RequestUtils(session=session, proxies=self.proxies)
else:
self._session = requests.Session()
self._req = RequestUtils(session=self._session, proxies=self.proxies)
self._remaining = 40
self._reset = None
self._timeout = 15
@@ -54,7 +61,7 @@ class TMDb(object):
@property
def domain(self):
return os.environ.get(self.TMDB_DOMAIN)
@property
def proxies(self):
proxy = os.environ.get(self.TMDB_PROXIES)
@@ -131,9 +138,18 @@ class TMDb(object):
os.environ[self.TMDB_CACHE_ENABLED] = str(cache)
@lru_cache(maxsize=REQUEST_CACHE_MAXSIZE)
def cached_request(self, method, url, data, json):
return requests.request(method, url, data=data, json=json,
timeout=self._timeout, proxies=self.proxies)
def cached_request(self, method, url, data, json,
_ts=datetime.strftime(datetime.now(), '%Y%m%d')):
"""
缓存请求时间默认1天
"""
return self.request(method, url, data, json)
def request(self, method, url, data, json):
if method == "GET":
return self._req.get_res(url, params=data, json=json)
else:
return self._req.post_res(url, data=data, json=json)
def cache_clear(self):
return self.cached_request.cache_clear()
@@ -154,8 +170,7 @@ class TMDb(object):
if self.cache and self.obj_cached and call_cached and method != "POST":
req = self.cached_request(method, url, data, json)
else:
req = self.__class__._session.request(method, url, data=data, json=json,
timeout=self._timeout, proxies=self.proxies)
req = self.request(method, url, data, json)
headers = req.headers
@@ -200,3 +215,7 @@ class TMDb(object):
if key:
return json.get(key)
return json
def __del__(self):
if self._session:
self._session.close()

View File

@@ -211,6 +211,8 @@ class TransmissionModule(_ModuleBase):
下载器信息
"""
info = self.transmission.transfer_info()
if not info:
return schemas.DownloaderInfo()
return schemas.DownloaderInfo(
download_speed=info.download_speed,
upload_speed=info.upload_speed,

View File

@@ -420,8 +420,7 @@ class BestFilmVersion(_PluginBase):
item_info_resp = Emby().get_iteminfo(itemid=data.get('Id'))
else:
item_info_resp = self.plex_get_iteminfo(itemid=data.get('Id'))
logger.info(f'BestFilmVersion插件 item打印 {item_info_resp}')
logger.debug(f'BestFilmVersion插件 item打印 {item_info_resp}')
if not item_info_resp:
continue
@@ -430,41 +429,35 @@ class BestFilmVersion(_PluginBase):
continue
# 获取tmdb_id
media_info_ids = item_info_resp.get('ExternalUrls')
if not media_info_ids:
tmdb_id = item_info_resp.tmdbid
if not tmdb_id:
continue
for media_info_id in media_info_ids:
if 'TheMovieDb' != media_info_id.get('Name'):
continue
tmdb_find_id = str(media_info_id.get('Url')).split('/')
tmdb_find_id.reverse()
tmdb_id = tmdb_find_id[0]
# 识别媒体信息
mediainfo: MediaInfo = self.chain.recognize_media(tmdbid=tmdb_id, mtype=MediaType.MOVIE)
if not mediainfo:
logger.warn(f'未识别到媒体信息,标题:{data.get("Name")}tmdbID{tmdb_id}')
continue
# 添加订阅
self.subscribechain.add(mtype=MediaType.MOVIE,
title=mediainfo.title,
year=mediainfo.year,
tmdbid=mediainfo.tmdb_id,
best_version=True,
username="收藏洗版",
exist_ok=True)
# 加入缓存
caches.append(data.get('Name'))
# 存储历史记录
if mediainfo.tmdb_id not in [h.get("tmdbid") for h in history]:
history.append({
"title": mediainfo.title,
"type": mediainfo.type.value,
"year": mediainfo.year,
"poster": mediainfo.get_poster_image(),
"overview": mediainfo.overview,
"tmdbid": mediainfo.tmdb_id,
"time": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
})
# 识别媒体信息
mediainfo: MediaInfo = self.chain.recognize_media(tmdbid=tmdb_id, mtype=MediaType.MOVIE)
if not mediainfo:
logger.warn(f'未识别到媒体信息,标题:{data.get("Name")}tmdbid{tmdb_id}')
continue
# 添加订阅
self.subscribechain.add(mtype=MediaType.MOVIE,
title=mediainfo.title,
year=mediainfo.year,
tmdbid=mediainfo.tmdb_id,
best_version=True,
username="收藏洗版",
exist_ok=True)
# 加入缓存
caches.append(data.get('Name'))
# 存储历史记录
if mediainfo.tmdb_id not in [h.get("tmdbid") for h in history]:
history.append({
"title": mediainfo.title,
"type": mediainfo.type.value,
"year": mediainfo.year,
"poster": mediainfo.get_poster_image(),
"overview": mediainfo.overview,
"tmdbid": mediainfo.tmdb_id,
"time": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
})
# 保存历史记录
self.save_data('history', history)
# 保存缓存
@@ -474,7 +467,7 @@ class BestFilmVersion(_PluginBase):
def jellyfin_get_items(self) -> List[dict]:
# 获取所有user
users_url = "{HOST}Users?&apikey={APIKEY}"
users_url = "[HOST]Users?&apikey=[APIKEY]"
users = self.get_users(Jellyfin().get_data(users_url))
if not users:
logger.info(f"bestfilmversion/users_url: {users_url}")
@@ -482,7 +475,7 @@ class BestFilmVersion(_PluginBase):
all_items = []
for user in users:
# 根据加入日期 降序排序
url = "{HOST}Users/" + user + "/Items?SortBy=DateCreated%2CSortName" \
url = "[HOST]Users/" + user + "/Items?SortBy=DateCreated%2CSortName" \
"&SortOrder=Descending" \
"&Filters=IsFavorite" \
"&Recursive=true" \
@@ -491,7 +484,7 @@ class BestFilmVersion(_PluginBase):
"&ExcludeLocationTypes=Virtual" \
"&EnableTotalRecordCount=false" \
"&Limit=20" \
"&apikey={APIKEY}"
"&apikey=[APIKEY]"
resp = self.get_items(Jellyfin().get_data(url))
if not resp:
continue
@@ -500,14 +493,14 @@ class BestFilmVersion(_PluginBase):
def emby_get_items(self) -> List[dict]:
# 获取所有user
get_users_url = "{HOST}Users?&api_key={APIKEY}"
get_users_url = "[HOST]Users?&api_key=[APIKEY]"
users = self.get_users(Emby().get_data(get_users_url))
if not users:
return []
all_items = []
for user in users:
# 根据加入日期 降序排序
url = "{HOST}emby/Users/" + user + "/Items?SortBy=DateCreated%2CSortName" \
url = "[HOST]emby/Users/" + user + "/Items?SortBy=DateCreated%2CSortName" \
"&SortOrder=Descending" \
"&Filters=IsFavorite" \
"&Recursive=true" \
@@ -515,7 +508,7 @@ class BestFilmVersion(_PluginBase):
"&CollapseBoxSetItems=false" \
"&ExcludeLocationTypes=Virtual" \
"&EnableTotalRecordCount=false" \
"&Limit=20&api_key={APIKEY}"
"&Limit=20&api_key=[APIKEY]"
resp = self.get_items(Emby().get_data(url))
if not resp:
continue
@@ -634,52 +627,34 @@ class BestFilmVersion(_PluginBase):
if not _is_lock:
return
try:
mediainfo: Optional[MediaInfo] = None
if not data.tmdb_id:
info = None
if data.channel == 'jellyfin' and data.save_reason == 'UpdateUserRating' and data.item_favorite:
if (data.channel == 'jellyfin'
and data.save_reason == 'UpdateUserRating'
and data.item_favorite):
info = Jellyfin().get_iteminfo(itemid=data.item_id)
elif data.channel == 'emby' and data.event == 'item.rate':
info = Emby().get_iteminfo(itemid=data.item_id)
elif data.channel == 'plex' and data.event == 'item.rate':
info = Plex().get_iteminfo(itemid=data.item_id)
logger.info(f'BestFilmVersion/webhook_message_action item打印{info}')
logger.debug(f'BestFilmVersion/webhook_message_action item打印{info}')
if not info:
return
if info['Type'] not in ['Movie', 'MOV', 'movie']:
if info.item_type not in ['Movie', 'MOV', 'movie']:
return
# 获取tmdb_id
media_info_ids = info.get('ExternalUrls')
if not media_info_ids:
return
for media_info_id in media_info_ids:
if 'TheMovieDb' != media_info_id.get('Name'):
continue
tmdb_find_id = str(media_info_id.get('Url')).split('/')
tmdb_find_id.reverse()
tmdb_id = tmdb_find_id[0]
mediainfo = self.chain.recognize_media(tmdbid=tmdb_id, mtype=MediaType.MOVIE)
if not mediainfo:
logger.warn(f'未识别到媒体信息,标题:{data.item_name}tmdbID{tmdb_id}')
return
tmdb_id = info.tmdbid
else:
if data.channel == 'jellyfin' and (data.save_reason != 'UpdateUserRating' or not data.item_favorite):
tmdb_id = data.tmdb_id
if (data.channel == 'jellyfin'
and (data.save_reason != 'UpdateUserRating' or not data.item_favorite)):
return
if data.item_type not in ['Movie', 'MOV', 'movie']:
return
mediainfo = self.chain.recognize_media(tmdbid=data.tmdb_id, mtype=MediaType.MOVIE)
if not mediainfo:
logger.warn(f'未识别到媒体信息,标题:{data.item_name}tmdbID{data.tmdb_id}')
return
# 识别媒体信息
mediainfo = self.chain.recognize_media(tmdbid=tmdb_id, mtype=MediaType.MOVIE)
if not mediainfo:
logger.warn(f'未识别到媒体信息,标题:{data.item_name}tmdbID{tmdb_id}')
return
# 读取缓存
caches = self._cache_path.read_text().split("\n") if self._cache_path.exists() else []

View File

@@ -49,6 +49,7 @@ class BrushFlow(_PluginBase):
siteshelper = None
siteoper = None
torrents = None
sites = None
qb = None
tr = None
# 添加种子定时
@@ -88,6 +89,7 @@ class BrushFlow(_PluginBase):
self.siteshelper = SitesHelper()
self.siteoper = SiteOper()
self.torrents = TorrentsChain()
self.sites = SitesHelper()
if config:
self._enabled = config.get("enabled")
self._notify = config.get("notify")
@@ -115,6 +117,13 @@ class BrushFlow(_PluginBase):
self._save_path = config.get("save_path")
self._clear_task = config.get("clear_task")
# 过滤掉已删除的站点
self._brushsites = [site.get("id") for site in self.sites.get_indexers() if
not site.get("public") and site.get("id") in self._brushsites]
# 保存配置
self.__update_config()
if self._clear_task:
# 清除统计数据
self.save_data("statistic", {})
@@ -228,7 +237,7 @@ class BrushFlow(_PluginBase):
self._scheduler.add_job(self.brush, 'interval', minutes=self._cron)
except Exception as e:
logger.error(f"站点刷流服务启动失败:{e}")
self.systemmessage(f"站点刷流服务启动失败:{e}")
self.systemmessage.put(f"站点刷流服务启动失败:{e}")
return
if self._onlyonce:
logger.info(f"站点刷流服务启动,立即运行一次")
@@ -1113,7 +1122,7 @@ class BrushFlow(_PluginBase):
{
'component': 'thead',
'props': {
'class': 'text-no-wrap'
'class': 'text-no-wrap'
},
'content': [
{
@@ -1290,10 +1299,10 @@ class BrushFlow(_PluginBase):
else:
end_size = 0
if begin_size and not end_size \
and torrent.size > float(begin_size) * 1024**3:
and torrent.size > float(begin_size) * 1024 ** 3:
continue
elif begin_size and end_size \
and not float(begin_size) * 1024**3 <= torrent.size <= float(end_size) * 1024**3:
and not float(begin_size) * 1024 ** 3 <= torrent.size <= float(end_size) * 1024 ** 3:
continue
# 做种人数
if self._seeder:
@@ -1350,7 +1359,7 @@ class BrushFlow(_PluginBase):
break
# 保种体积GB
if self._disksize \
and (torrents_size + torrent.size) > float(self._disksize) * 1024**3:
and (torrents_size + torrent.size) > float(self._disksize) * 1024 ** 3:
logger.warn(f"当前做种体积 {StringUtils.str_filesize(torrents_size)} "
f"已超过保种体积 {self._disksize},停止新增任务")
break
@@ -1372,6 +1381,7 @@ class BrushFlow(_PluginBase):
"deleted": False,
}
# 统计数据
torrents_size += torrent.size
statistic_info["count"] += 1
# 发送消息
self.__send_add_message(torrent)
@@ -1860,7 +1870,7 @@ class BrushFlow(_PluginBase):
return 0
torrents = downlader.get_downloading_torrents()
return len(torrents) or 0
@staticmethod
def __get_pubminutes(pubdate: str) -> int:
"""
@@ -1872,8 +1882,7 @@ class BrushFlow(_PluginBase):
pubdate = pubdate.replace("T", " ").replace("Z", "")
pubdate = datetime.strptime(pubdate, "%Y-%m-%d %H:%M:%S")
now = datetime.now()
return (now - pubdate).seconds // 60
return (now - pubdate).total_seconds() // 60
except Exception as e:
print(str(e))
return 0

View File

@@ -725,9 +725,9 @@ class CloudflareSpeedTest(_PluginBase):
new_entrys.append(host_entry)
except Exception as err:
err_hosts.append(host + "\n")
logger.error(f"{host} 格式转换错误:{str(err)}")
logger.error(f"[HOST] 格式转换错误:{str(err)}")
# 推送实时消息
self.systemmessage.put(f"{host} 格式转换错误:{str(err)}")
self.systemmessage.put(f"[HOST] 格式转换错误:{str(err)}")
# 写入系统hosts
if new_entrys:

View File

@@ -199,9 +199,9 @@ class CustomHosts(_PluginBase):
new_entrys.append(host_entry)
except Exception as err:
err_hosts.append(host + "\n")
logger.error(f"{host} 格式转换错误:{str(err)}")
logger.error(f"[HOST] 格式转换错误:{str(err)}")
# 推送实时消息
self.systemmessage.put(f"{host} 格式转换错误:{str(err)}")
self.systemmessage.put(f"[HOST] 格式转换错误:{str(err)}")
# 写入系统hosts
if new_entrys:

View File

@@ -87,6 +87,8 @@ class DirMonitor(_PluginBase):
_exclude_keywords = ""
# 存储源目录与目的目录关系
_dirconf: Dict[str, Path] = {}
# 存储源目录转移方式
_transferconf: Dict[str, str] = {}
_medias = {}
# 退出事件
_event = Event()
@@ -98,6 +100,7 @@ class DirMonitor(_PluginBase):
self.tmdbchain = TmdbChain(self.db)
# 清空配置
self._dirconf = {}
self._transferconf = {}
# 读取配置
if config:
@@ -132,6 +135,13 @@ class DirMonitor(_PluginBase):
paths = [mon_path]
else:
paths = mon_path.split(":")
# 自定义转移方式
if mon_path.count("#") == 1:
self._transferconf[mon_path] = mon_path.split("#")[1]
else:
self._transferconf[mon_path] = self._transfer_type
target_path = None
if len(paths) > 1:
mon_path = paths[0]
@@ -247,6 +257,8 @@ class DirMonitor(_PluginBase):
# 查询转移目的目录
target: Path = self._dirconf.get(mon_path)
# 查询转移方式
transfer_type = self._transferconf.get(mon_path)
# 识别媒体信息
mediainfo: MediaInfo = self.chain.recognize_media(meta=file_meta)
@@ -260,7 +272,7 @@ class DirMonitor(_PluginBase):
# 新增转移成功历史记录
self.transferhis.add_fail(
src_path=file_path,
mode=self._transfer_type,
mode=transfer_type,
meta=file_meta
)
return
@@ -289,7 +301,7 @@ class DirMonitor(_PluginBase):
# 转移
transferinfo: TransferInfo = self.chain.transfer(mediainfo=mediainfo,
path=file_path,
transfer_type=self._transfer_type,
transfer_type=transfer_type,
target=target,
meta=file_meta,
episodes_info=episodes_info)
@@ -303,7 +315,7 @@ class DirMonitor(_PluginBase):
# 新增转移失败历史记录
self.transferhis.add_fail(
src_path=file_path,
mode=self._transfer_type,
mode=transfer_type,
download_hash=download_hash,
meta=file_meta,
mediainfo=mediainfo,
@@ -320,7 +332,7 @@ class DirMonitor(_PluginBase):
# 新增转移成功历史记录
self.transferhis.add_success(
src_path=file_path,
mode=self._transfer_type,
mode=transfer_type,
download_hash=download_hash,
meta=file_meta,
mediainfo=mediainfo,
@@ -402,7 +414,7 @@ class DirMonitor(_PluginBase):
})
# 移动模式删除空目录
if self._transfer_type == "move":
if transfer_type == "move":
for file_dir in file_path.parents:
if len(str(file_dir)) <= len(str(Path(mon_path))):
# 重要,删除到监控目录为止
@@ -601,9 +613,10 @@ class DirMonitor(_PluginBase):
'model': 'monitor_dirs',
'label': '监控目录',
'rows': 5,
'placeholder': '每一行一个目录,支持种配置方式:\n'
'placeholder': '每一行一个目录,支持种配置方式:\n'
'监控目录\n'
'监控目录:转移目的目录(需同时在媒体库目录中配置该目的目录)'
'监控目录:转移目的目录(需同时在媒体库目录中配置该目的目录)\n'
'监控目录:转移目的目录#转移方式move|copy|link|softlink'
}
}
]

View File

@@ -4,6 +4,7 @@ import xml.dom.minidom
from threading import Event
from typing import Tuple, List, Dict, Any, Optional
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
@@ -100,11 +101,19 @@ class DoubanRank(_PluginBase):
logger.error(f"豆瓣榜单订阅服务启动失败,错误信息:{str(e)}")
self.systemmessage.put(f"豆瓣榜单订阅服务启动失败,错误信息:{str(e)}")
else:
self._scheduler.add_job(func=self.__refresh_rss,
trigger=CronTrigger.from_crontab("0 8 * * *"),
name="豆瓣榜单订阅")
self._scheduler.add_job(func=self.__refresh_rss, trigger='date',
run_date=datetime.datetime.now(
tz=pytz.timezone(settings.TZ)) + datetime.timedelta(seconds=3)
)
logger.info("豆瓣榜单订阅服务启动,周期:每天 08:00")
if self._onlyonce:
logger.info("豆瓣榜单订阅服务启动,立即运行一次")
self._scheduler.add_job(func=self.__refresh_rss, trigger='date',
run_date=datetime.datetime.now(
tz=pytz.timezone(settings.TZ)) + datetime.timedelta(seconds=3)
)
if self._onlyonce or self._clear:
# 关闭一次性开关
self._onlyonce = False

View File

@@ -0,0 +1,292 @@
import json
import re
from datetime import datetime, timedelta
import pytz
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from app.core.config import settings
from app.plugins import _PluginBase
from typing import Any, List, Dict, Tuple, Optional
from app.log import logger
from app.schemas import NotificationType
from app.utils.http import RequestUtils
class InvitesSignin(_PluginBase):
# 插件名称
plugin_name = "药丸签到"
# 插件描述
plugin_desc = "药丸论坛签到。"
# 插件图标
plugin_icon = "invites.png"
# 主题色
plugin_color = "#4FB647"
# 插件版本
plugin_version = "1.0"
# 插件作者
plugin_author = "thsrite"
# 作者主页
author_url = "https://github.com/thsrite"
# 插件配置项ID前缀
plugin_config_prefix = "invitessignin"
# 加载顺序
plugin_order = 24
# 可使用的用户级别
auth_level = 2
# 私有属性
_enabled = False
# 任务执行间隔
_cron = None
_cookie = None
_onlyonce = False
_notify = False
# 定时器
_scheduler: Optional[BackgroundScheduler] = None
def init_plugin(self, config: dict = None):
# 停止现有任务
self.stop_service()
if config:
self._enabled = config.get("enabled")
self._cron = config.get("cron")
self._cookie = config.get("cookie")
self._notify = config.get("notify")
self._onlyonce = config.get("onlyonce")
# 加载模块
if self._enabled:
# 定时服务
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
if self._cron:
try:
self._scheduler.add_job(func=self.__signin,
trigger=CronTrigger.from_crontab(self._cron),
name="药丸签到")
except Exception as err:
logger.error(f"定时任务配置错误:{err}")
if self._onlyonce:
logger.info(f"药丸签到服务启动,立即运行一次")
self._scheduler.add_job(func=self.__signin, trigger='date',
run_date=datetime.now(tz=pytz.timezone(settings.TZ)) + timedelta(seconds=3),
name="药丸签到")
# 关闭一次性开关
self._onlyonce = False
self.update_config({
"onlyonce": False,
"cron": self._cron,
"enabled": self._enabled,
"cookie": self._cookie,
"notify": self._notify,
})
# 启动任务
if self._scheduler.get_jobs():
self._scheduler.print_jobs()
self._scheduler.start()
def __signin(self):
"""
药丸签到
"""
res = RequestUtils(cookies=self._cookie).get_res(url="https://invites.fun")
if not res or res.status_code != 200:
logger.error("请求药丸错误")
return
# 获取csrfToken
pattern = r'"csrfToken":"(.*?)"'
csrfToken = re.findall(pattern, res.text)
if not csrfToken:
logger.error("请求csrfToken失败")
return
csrfToken = csrfToken[0]
logger.info(f"获取csrfToken成功 {csrfToken}")
# 获取userid
pattern = r'"userId":(\d+)'
match = re.search(pattern, res.text)
if match:
userId = match.group(1)
logger.info(f"获取userid成功 {userId}")
else:
logger.error("未找到userId")
return
headers = {
"X-Csrf-Token": csrfToken,
"X-Http-Method-Override": "PATCH",
"Cookie": self._cookie
}
data = {
"data": {
"type": "users",
"attributes": {
"canCheckin": False,
"totalContinuousCheckIn": 2
},
"id": userId
}
}
# 开始签到
res = RequestUtils(headers=headers).post_res(url=f"https://invites.fun/api/users/{userId}", json=data)
if not res or res.status_code != 200:
logger.error("药丸签到失败")
return
sign_dict = json.loads(res.text)
money = sign_dict['data']['attributes']['money']
totalContinuousCheckIn = sign_dict['data']['attributes']['totalContinuousCheckIn']
# 发送通知
if self._notify:
self.post_message(
mtype=NotificationType.SiteMessage,
title="【药丸签到任务完成】",
text=f"累计签到 {totalContinuousCheckIn} \n"
f"剩余药丸 {money}")
def get_state(self) -> bool:
return self._enabled
@staticmethod
def get_command() -> List[Dict[str, Any]]:
pass
def get_api(self) -> List[Dict[str, Any]]:
pass
def get_form(self) -> Tuple[List[dict], Dict[str, Any]]:
"""
拼装插件配置页面需要返回两块数据1、页面配置2、数据结构
"""
return [
{
'component': 'VForm',
'content': [
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'enabled',
'label': '启用插件',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'notify',
'label': '开启通知',
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 4
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'onlyonce',
'label': '立即运行一次',
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'cron',
'label': '签到周期'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'cookie',
'label': '药丸cookie'
}
}
]
}
]
}
]
}
], {
"enabled": False,
"onlyonce": False,
"notify": False,
"cookie": "",
"cron": "0 9 * * *"
}
def get_page(self) -> List[dict]:
pass
def stop_service(self):
"""
退出插件
"""
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._scheduler.shutdown()
self._scheduler = None
except Exception as e:
logger.error("退出插件失败:%s" % str(e))

View File

@@ -109,7 +109,7 @@ class IYUUAutoSeed(_PluginBase):
self._nolabels = config.get("nolabels")
self._nopaths = config.get("nopaths")
self._clearcache = config.get("clearcache")
self._permanent_error_caches = config.get("permanent_error_caches") or []
self._permanent_error_caches = [] if self._clearcache else config.get("permanent_error_caches") or []
self._error_caches = [] if self._clearcache else config.get("error_caches") or []
self._success_caches = [] if self._clearcache else config.get("success_caches") or []

View File

@@ -673,6 +673,8 @@ class MediaSyncDel(_PluginBase):
paths = self._library_path.split("\n")
for path in paths:
sub_paths = path.split(":")
if len(sub_paths) < 2:
continue
media_path = media_path.replace(sub_paths[0], sub_paths[1]).replace('\\', '/')
# 删除电影
@@ -739,7 +741,11 @@ class MediaSyncDel(_PluginBase):
return
# 遍历删除
last_del_time = None
for del_media in del_medias:
# 删除时间
del_time = del_media.get("time")
last_del_time = del_time
# 媒体类型 Movie|Series|Season|Episode
media_type = del_media.get("type")
# 媒体名称 蜀山战纪
@@ -765,6 +771,8 @@ class MediaSyncDel(_PluginBase):
paths = self._library_path.split("\n")
for path in paths:
sub_paths = path.split(":")
if len(sub_paths) < 2:
continue
media_path = media_path.replace(sub_paths[0], sub_paths[1]).replace('\\', '/')
# 获取删除的记录
@@ -877,7 +885,7 @@ class MediaSyncDel(_PluginBase):
# 保存历史
self.save_data("history", history)
self.save_data("last_time", datetime.datetime.now())
self.save_data("last_time", last_del_time or datetime.datetime.now())
def handle_torrent(self, src: str, torrent_hash: str):
"""
@@ -1043,146 +1051,207 @@ class MediaSyncDel(_PluginBase):
@staticmethod
def parse_emby_log(last_time):
log_url = "{HOST}System/Logs/embyserver.txt?api_key={APIKEY}"
log_res = Emby().get_data(log_url)
if not log_res or log_res.status_code != 200:
logger.error("获取emby日志失败请检查服务器配置")
return []
"""
获取emby日志列表、解析emby日志
"""
# 正则解析删除的媒体信息
pattern = r'(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3}) Info App: Removing item from database, Type: (\w+), Name: (.*), Path: (.*), Id: (\d+)'
matches = re.findall(pattern, log_res.text)
def __parse_log(file_name: str, del_list: list):
"""
解析emby日志
"""
log_url = f"[HOST]System/Logs/{file_name}?api_key=[APIKEY]"
log_res = Emby().get_data(log_url)
if not log_res or log_res.status_code != 200:
logger.error("获取emby日志失败请检查服务器配置")
return del_list
# 正则解析删除的媒体信息
pattern = r'(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}.\d{3}) Info App: Removing item from database, Type: (\w+), Name: (.*), Path: (.*), Id: (\d+)'
matches = re.findall(pattern, log_res.text)
# 循环获取媒体信息
for match in matches:
mtime = match[0]
# 排除已处理的媒体信息
if last_time and mtime < last_time:
continue
mtype = match[1]
name = match[2]
path = match[3]
year = None
year_pattern = r'\(\d+\)'
year_match = re.search(year_pattern, path)
if year_match:
year = year_match.group()[1:-1]
season = None
episode = None
if mtype == 'Episode' or mtype == 'Season':
name_pattern = r"\/([\u4e00-\u9fa5]+)(?= \()"
season_pattern = r"Season\s*(\d+)"
episode_pattern = r"S\d+E(\d+)"
name_match = re.search(name_pattern, path)
season_match = re.search(season_pattern, path)
episode_match = re.search(episode_pattern, path)
if name_match:
name = name_match.group(1)
if season_match:
season = season_match.group(1)
if int(season) < 10:
season = f'S0{season}'
else:
season = f'S{season}'
else:
season = None
if episode_match:
episode = episode_match.group(1)
episode = f'E{episode}'
else:
episode = None
media = {
"time": mtime,
"type": mtype,
"name": name,
"year": year,
"path": path,
"season": season,
"episode": episode,
}
logger.debug(f"解析到删除媒体:{json.dumps(media)}")
del_list.append(media)
return del_list
log_files = []
try:
# 获取所有emby日志
log_list_url = "[HOST]System/Logs/Query?Limit=3&api_key=[APIKEY]"
log_list_res = Emby().get_data(log_list_url)
if log_list_res and log_list_res.status_code == 200:
log_files_dict = json.loads(log_list_res.text)
for item in log_files_dict.get("Items"):
if str(item.get('Name')).startswith("embyserver"):
log_files.append(str(item.get('Name')))
except Exception as e:
print(str(e))
if not log_files:
log_files.append("embyserver.txt")
del_medias = []
# 循环获取媒体信息
for match in matches:
mtime = match[0]
# 排除已处理的媒体信息
if last_time and mtime < last_time:
continue
mtype = match[1]
name = match[2]
path = match[3]
year = None
year_pattern = r'\(\d+\)'
year_match = re.search(year_pattern, path)
if year_match:
year = year_match.group()[1:-1]
season = None
episode = None
if mtype == 'Episode' or mtype == 'Season':
name_pattern = r"\/([\u4e00-\u9fa5]+)(?= \()"
season_pattern = r"Season\s*(\d+)"
episode_pattern = r"S\d+E(\d+)"
name_match = re.search(name_pattern, path)
season_match = re.search(season_pattern, path)
episode_match = re.search(episode_pattern, path)
if name_match:
name = name_match.group(1)
if season_match:
season = season_match.group(1)
if int(season) < 10:
season = f'S0{season}'
else:
season = f'S{season}'
else:
season = None
if episode_match:
episode = episode_match.group(1)
episode = f'E{episode}'
else:
episode = None
media = {
"time": mtime,
"type": mtype,
"name": name,
"year": year,
"path": path,
"season": season,
"episode": episode,
}
logger.debug(f"解析到删除媒体:{json.dumps(media)}")
del_medias.append(media)
log_files.reverse()
for log_file in log_files:
del_medias = __parse_log(log_file, del_medias)
return del_medias
@staticmethod
def parse_jellyfin_log(last_time: datetime):
# 根据加入日期 降序排序
log_url = "{HOST}System/Logs/Log?name=log_%s.log&api_key={APIKEY}" % datetime.date.today().strftime("%Y%m%d")
log_res = Jellyfin().get_data(log_url)
if not log_res or log_res.status_code != 200:
logger.error("获取jellyfin日志失败请检查服务器配置")
return []
"""
获取jellyfin日志列表、解析jellyfin日志
"""
# 正则解析删除的媒体信息
pattern = r'\[(.*?)\].*?Removing item, Type: "(.*?)", Name: "(.*?)", Path: "(.*?)"'
matches = re.findall(pattern, log_res.text)
def __parse_log(file_name: str, del_list: list):
"""
解析jellyfin日志
"""
log_url = f"[HOST]System/Logs/Log?name={file_name}&api_key=[APIKEY]"
log_res = Jellyfin().get_data(log_url)
if not log_res or log_res.status_code != 200:
logger.error("获取jellyfin日志失败请检查服务器配置")
return del_list
# 正则解析删除的媒体信息
pattern = r'\[(.*?)\].*?Removing item, Type: "(.*?)", Name: "(.*?)", Path: "(.*?)"'
matches = re.findall(pattern, log_res.text)
# 循环获取媒体信息
for match in matches:
mtime = match[0]
# 排除已处理的媒体信息
if last_time and mtime < last_time:
continue
mtype = match[1]
name = match[2]
path = match[3]
year = None
year_pattern = r'\(\d+\)'
year_match = re.search(year_pattern, path)
if year_match:
year = year_match.group()[1:-1]
season = None
episode = None
if mtype == 'Episode' or mtype == 'Season':
name_pattern = r"\/([\u4e00-\u9fa5]+)(?= \()"
season_pattern = r"Season\s*(\d+)"
episode_pattern = r"S\d+E(\d+)"
name_match = re.search(name_pattern, path)
season_match = re.search(season_pattern, path)
episode_match = re.search(episode_pattern, path)
if name_match:
name = name_match.group(1)
if season_match:
season = season_match.group(1)
if int(season) < 10:
season = f'S0{season}'
else:
season = f'S{season}'
else:
season = None
if episode_match:
episode = episode_match.group(1)
episode = f'E{episode}'
else:
episode = None
media = {
"time": mtime,
"type": mtype,
"name": name,
"year": year,
"path": path,
"season": season,
"episode": episode,
}
logger.debug(f"解析到删除媒体:{json.dumps(media)}")
del_list.append(media)
return del_list
log_files = []
try:
# 获取所有jellyfin日志
log_list_url = "[HOST]System/Logs?api_key=[APIKEY]"
log_list_res = Jellyfin().get_data(log_list_url)
if log_list_res and log_list_res.status_code == 200:
log_files_dict = json.loads(log_list_res.text)
for item in log_files_dict:
if str(item.get('Name')).startswith("log_"):
log_files.append(str(item.get('Name')))
except Exception as e:
print(str(e))
if not log_files:
log_files.append("log_%s.log" % datetime.date.today().strftime("%Y%m%d"))
del_medias = []
# 循环获取媒体信息
for match in matches:
mtime = match[0]
# 排除已处理的媒体信息
if last_time and mtime < last_time:
continue
mtype = match[1]
name = match[2]
path = match[3]
year = None
year_pattern = r'\(\d+\)'
year_match = re.search(year_pattern, path)
if year_match:
year = year_match.group()[1:-1]
season = None
episode = None
if mtype == 'Episode' or mtype == 'Season':
name_pattern = r"\/([\u4e00-\u9fa5]+)(?= \()"
season_pattern = r"Season\s*(\d+)"
episode_pattern = r"S\d+E(\d+)"
name_match = re.search(name_pattern, path)
season_match = re.search(season_pattern, path)
episode_match = re.search(episode_pattern, path)
if name_match:
name = name_match.group(1)
if season_match:
season = season_match.group(1)
if int(season) < 10:
season = f'S0{season}'
else:
season = f'S{season}'
else:
season = None
if episode_match:
episode = episode_match.group(1)
episode = f'E{episode}'
else:
episode = None
media = {
"time": mtime,
"type": mtype,
"name": name,
"year": year,
"path": path,
"season": season,
"episode": episode,
}
logger.debug(f"解析到删除媒体:{json.dumps(media)}")
del_medias.append(media)
log_files.reverse()
for log_file in log_files:
del_medias = __parse_log(log_file, del_medias)
return del_medias

View File

@@ -1,17 +1,34 @@
import base64
import copy
import datetime
import json
import re
import threading
import time
from pathlib import Path
from typing import Any, List, Dict, Tuple
from typing import Any, List, Dict, Tuple, Optional
import pytz
import zhconv
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from requests import RequestException
from app.chain.mediaserver import MediaServerChain
from app.chain.tmdb import TmdbChain
from app.core.config import settings
from app.core.event import eventmanager, Event
from app.helper.nfo import NfoReader
from app.core.meta import MetaBase
from app.log import logger
from app.modules.emby import Emby
from app.modules.jellyfin import Jellyfin
from app.modules.plex import Plex
from app.plugins import _PluginBase
from app.schemas import TransferInfo, MediaInfo
from app.schemas import MediaInfo, MediaServerItem
from app.schemas.types import EventType, MediaType
from app.utils.common import retry
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
class PersonMeta(_PluginBase):
@@ -36,16 +53,72 @@ class PersonMeta(_PluginBase):
# 可使用的用户级别
auth_level = 1
# 退出事件
_event = threading.Event()
# 私有属性
_scheduler = None
tmdbchain = None
mschain = None
_enabled = False
_metadir = ""
_onlyonce = False
_cron = None
_delay = 0
_remove_nozh = False
def init_plugin(self, config: dict = None):
self.tmdbchain = TmdbChain(self.db)
self.mschain = MediaServerChain(self.db)
if config:
self._enabled = config.get("enabled")
self._metadir = config.get("metadir")
self._onlyonce = config.get("onlyonce")
self._cron = config.get("cron")
self._delay = config.get("delay") or 0
self._remove_nozh = config.get("remove_nozh") or False
# 停止现有任务
self.stop_service()
# 启动服务
if self._enabled or self._onlyonce:
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
if self._cron or self._onlyonce:
if self._cron:
try:
self._scheduler.add_job(func=self.scrap_library,
trigger=CronTrigger.from_crontab(self._cron),
name="演职人员刮削")
logger.info(f"演职人员刮削服务启动,周期:{self._cron}")
except Exception as e:
logger.error(f"演职人员刮削服务启动失败,错误信息:{str(e)}")
self.systemmessage.put(f"演职人员刮削服务启动失败,错误信息:{str(e)}")
if self._onlyonce:
self._scheduler.add_job(func=self.scrap_library, trigger='date',
run_date=datetime.datetime.now(
tz=pytz.timezone(settings.TZ)) + datetime.timedelta(seconds=3)
)
logger.info(f"演职人员刮削服务启动,立即运行一次")
# 关闭一次性开关
self._onlyonce = False
# 保存配置
self.__update_config()
if self._scheduler.get_jobs():
# 启动服务
self._scheduler.print_jobs()
self._scheduler.start()
def __update_config(self):
"""
更新配置
"""
self.update_config({
"enabled": self._enabled,
"onlyonce": self._onlyonce,
"cron": self._cron,
"delay": self._delay,
"remove_nozh": self._remove_nozh
})
def get_state(self) -> bool:
return self._enabled
@@ -83,6 +156,22 @@ class PersonMeta(_PluginBase):
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'onlyonce',
'label': '立即运行一次',
}
}
]
}
]
},
@@ -93,14 +182,53 @@ class PersonMeta(_PluginBase):
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'metadir',
'label': '人物元数据目录',
'placeholder': '/metadata/people'
'model': 'cron',
'label': '媒体库扫描周期',
'placeholder': '5位cron表达式'
}
}
]
},
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VTextField',
'props': {
'model': 'delay',
'label': '入库延迟时间(秒)',
'placeholder': '30'
}
}
]
}
]
},
{
'component': 'VRow',
'content': [
{
'component': 'VCol',
'props': {
'cols': 12,
'md': 6
},
'content': [
{
'component': 'VSwitch',
'props': {
'model': 'remove_nozh',
'label': '删除非中文演员',
}
}
]
@@ -111,7 +239,10 @@ class PersonMeta(_PluginBase):
}
], {
"enabled": False,
"metadir": ""
"onlyonce": False,
"cron": "",
"delay": 30,
"remove_nozh": False
}
def get_page(self) -> List[dict]:
@@ -124,81 +255,726 @@ class PersonMeta(_PluginBase):
"""
if not self._enabled:
return
# 下载人物头像
if not self._metadir:
logger.warning("人物元数据目录未配置,无法下载人物头像")
return
# 事件数据
mediainfo: MediaInfo = event.event_data.get("mediainfo")
transferinfo: TransferInfo = event.event_data.get("transferinfo")
if not mediainfo or not transferinfo:
meta: MetaBase = event.event_data.get("meta")
if not mediainfo or not meta:
return
# 文件路径
filepath = transferinfo.target_path
if not filepath:
# 延迟
if self._delay:
time.sleep(int(self._delay))
# 查询媒体服务器中的条目
existsinfo = self.chain.media_exists(mediainfo=mediainfo)
if not existsinfo or not existsinfo.itemid:
logger.warn(f"演职人员刮削 {mediainfo.title_year} 在媒体库中不存在")
return
# 电影
if mediainfo.type == MediaType.MOVIE:
# nfo文件
nfofile = filepath / "movie.nfo"
if not nfofile.exists():
nfofile = filepath / f"{filepath.stem}.nfo"
if not nfofile.exists():
logger.warning(f"电影nfo文件不存在{nfofile}")
return
else:
# nfo文件
nfofile = filepath.with_name("tvshow.nfo")
if not nfofile.exists():
logger.warning(f"剧集nfo文件不存在{nfofile}")
# 查询条目详情
iteminfo = self.mschain.iteminfo(server=existsinfo.server, item_id=existsinfo.itemid)
if not iteminfo:
logger.warn(f"演职人员刮削 {mediainfo.title_year} 条目详情获取失败")
return
# 刮削演职人员信息
self.__update_item(server=existsinfo.server, item=iteminfo,
mediainfo=mediainfo, season=meta.begin_season)
def scrap_library(self):
"""
扫描整个媒体库,刮削演员信息
"""
# 所有媒体服务器
if not settings.MEDIASERVER:
return
for server in settings.MEDIASERVER.split(","):
# 扫描所有媒体库
logger.info(f"开始刮削服务器 {server} 的演员信息 ...")
for library in self.mschain.librarys(server):
logger.info(f"开始刮削媒体库 {library.name} 的演员信息 ...")
for item in self.mschain.items(server, library.id):
if not item:
continue
if not item.item_id:
continue
if "Series" not in item.item_type \
and "Movie" not in item.item_type:
continue
if self._event.is_set():
logger.info(f"演职人员刮削服务停止")
return
# 处理条目
logger.info(f"开始刮削 {item.title} 的演员信息 ...")
self.__update_item(server=server, item=item)
logger.info(f"{item.title} 的演员信息刮削完成")
logger.info(f"媒体库 {library.name} 的演员信息刮削完成")
logger.info(f"服务器 {server} 的演员信息刮削完成")
def __update_peoples(self, server: str, itemid: str, iteminfo: dict, douban_actors):
# 处理媒体项中的人物信息
"""
"People": [
{
"Name": "丹尼尔·克雷格",
"Id": "33625",
"Role": "James Bond",
"Type": "Actor",
"PrimaryImageTag": "bef4f764540f10577f804201d8d27918"
}
]
"""
peoples = []
# 更新当前媒体项人物
for people in iteminfo["People"] or []:
if self._event.is_set():
logger.info(f"演职人员刮削服务停止")
return
# 读取nfo文件
nfo = NfoReader(nfofile)
# 读取演员信息
actors = nfo.get_elements("actor") or []
for actor in actors:
# 演员ID
actor_id = actor.find("id").text
if not actor_id:
if not people.get("Name"):
continue
# 演员名称
actor_name = actor.find("name").text
# 查询演员详情
actor_info = self.tmdbchain.person_detail(actor_id)
if not actor_info:
if StringUtils.is_chinese(people.get("Name")) \
and StringUtils.is_chinese(people.get("Role")):
peoples.append(people)
continue
# 演员头像
actor_image = actor_info.get("profile_path")
if not actor_image:
continue
# 计算保存路径
image_path = Path(self._metadir) / f"{actor_name}-tmdb-{actor_id}" / f"folder{Path(actor_image).suffix}"
if image_path.exists():
continue
# 下载图片
self.download_image(f"https://image.tmdb.org/t/p/original{actor_image}", image_path)
info = self.__update_people(server=server, people=people,
douban_actors=douban_actors)
if info:
peoples.append(info)
elif not self._remove_nozh:
peoples.append(people)
# 保存媒体项信息
if peoples:
iteminfo["People"] = peoples
self.set_iteminfo(server=server, itemid=itemid, iteminfo=iteminfo)
def __update_item(self, server: str, item: MediaServerItem,
mediainfo: MediaInfo = None, season: int = None):
"""
更新媒体服务器中的条目
"""
def __need_trans_actor(_item):
# 是否需要处理人物信息
_peoples = [x for x in _item.get("People", []) if
(x.get("Name") and not StringUtils.is_chinese(x.get("Name")))
or (x.get("Role") and not StringUtils.is_chinese(x.get("Role")))]
if _peoples:
return True
return False
# 识别媒体信息
if not mediainfo:
if not item.tmdbid:
logger.warn(f"{item.title} 未找到tmdbid无法识别媒体信息")
return
mtype = MediaType.TV if item.item_type in ['Series', 'show'] else MediaType.MOVIE
mediainfo = self.chain.recognize_media(mtype=mtype, tmdbid=item.tmdbid)
if not mediainfo:
logger.warn(f"{item.title} 未识别到媒体信息")
return
# 获取媒体项
iteminfo = self.get_iteminfo(server=server, itemid=item.item_id)
if not iteminfo:
logger.warn(f"{item.title} 未找到媒体项")
return
if __need_trans_actor(iteminfo):
# 获取豆瓣演员信息
logger.info(f"开始获取 {item.title} 的豆瓣演员信息 ...")
douban_actors = self.__get_douban_actors(mediainfo=mediainfo, season=season)
self.__update_peoples(server=server, itemid=item.item_id, iteminfo=iteminfo, douban_actors=douban_actors)
else:
logger.info(f"{item.title} 的人物信息已是中文,无需更新")
# 处理季和集人物
if iteminfo.get("Type") and "Series" in iteminfo["Type"]:
# 获取季媒体项
seasons = self.get_items(server=server, parentid=item.item_id, mtype="Season")
if not seasons:
logger.warn(f"{item.title} 未找到季媒体项")
return
for season in seasons["Items"]:
# 获取豆瓣演员信息
season_actors = self.__get_douban_actors(mediainfo=mediainfo, season=season.get("IndexNumber"))
# 如果是Jellyfin更新季的人物Emby/Plex季没有人物
if server == "jellyfin":
seasoninfo = self.get_iteminfo(server=server, itemid=season.get("Id"))
if not seasoninfo:
logger.warn(f"{item.title} 未找到季媒体项:{season.get('Id')}")
continue
if __need_trans_actor(seasoninfo):
# 更新季媒体项人物
self.__update_peoples(server=server, itemid=season.get("Id"), iteminfo=seasoninfo,
douban_actors=season_actors)
logger.info(f"{seasoninfo.get('Id')} 的人物信息更新完成")
else:
logger.info(f"{seasoninfo.get('Id')} 的人物信息已是中文,无需更新")
# 获取集媒体项
episodes = self.get_items(server=server, parentid=season.get("Id"), mtype="Episode")
if not episodes:
logger.warn(f"{item.title} 未找到集媒体项")
continue
# 更新集媒体项人物
for episode in episodes["Items"]:
# 获取集媒体项详情
episodeinfo = self.get_iteminfo(server=server, itemid=episode.get("Id"))
if not episodeinfo:
logger.warn(f"{item.title} 未找到集媒体项:{episode.get('Id')}")
continue
if __need_trans_actor(episodeinfo):
# 更新集媒体项人物
self.__update_peoples(server=server, itemid=episode.get("Id"), iteminfo=episodeinfo,
douban_actors=season_actors)
logger.info(f"{episodeinfo.get('Id')} 的人物信息更新完成")
else:
logger.info(f"{episodeinfo.get('Id')} 的人物信息已是中文,无需更新")
def __update_people(self, server: str, people: dict, douban_actors: list = None) -> Optional[dict]:
"""
更新人物信息,返回替换后的人物信息
"""
def __get_peopleid(p: dict) -> Tuple[Optional[str], Optional[str]]:
"""
获取人物的TMDBID、IMDBID
"""
if not p.get("ProviderIds"):
return None, None
peopletmdbid, peopleimdbid = None, None
if "Tmdb" in p["ProviderIds"]:
peopletmdbid = p["ProviderIds"]["Tmdb"]
if "tmdb" in p["ProviderIds"]:
peopletmdbid = p["ProviderIds"]["tmdb"]
if "Imdb" in p["ProviderIds"]:
peopleimdbid = p["ProviderIds"]["Imdb"]
if "imdb" in p["ProviderIds"]:
peopleimdbid = p["ProviderIds"]["imdb"]
return peopletmdbid, peopleimdbid
# 返回的人物信息
ret_people = copy.deepcopy(people)
try:
# 查询媒体库人物详情
personinfo = self.get_iteminfo(server=server, itemid=people.get("Id"))
if not personinfo:
logger.warn(f"未找到人物 {people.get('Name')} 的信息")
return None
# 是否更新标志
updated_name = False
updated_overview = False
update_character = False
profile_path = None
# 从TMDB信息中更新人物信息
person_tmdbid, person_imdbid = __get_peopleid(personinfo)
if person_tmdbid:
person_tmdbinfo = self.tmdbchain.person_detail(int(person_tmdbid))
if person_tmdbinfo:
cn_name = self.__get_chinese_name(person_tmdbinfo)
if cn_name:
# 更新中文名
logger.info(f"{people.get('Name')} 从TMDB获取到中文名{cn_name}")
personinfo["Name"] = cn_name
ret_people["Name"] = cn_name
updated_name = True
# 更新中文描述
biography = person_tmdbinfo.get("biography")
if biography and StringUtils.is_chinese(biography):
logger.info(f"{people.get('Name')} 从TMDB获取到中文描述")
personinfo["Overview"] = biography
updated_overview = True
# 图片
profile_path = person_tmdbinfo.get('profile_path')
if profile_path:
logger.info(f"{people.get('Name')} 从TMDB获取到图片{profile_path}")
profile_path = f"https://{settings.TMDB_IMAGE_DOMAIN}/t/p/original{profile_path}"
# 从豆瓣信息中更新人物信息
"""
{
"name": "丹尼尔·克雷格",
"roles": [
"演员",
"制片人",
"配音"
],
"title": "丹尼尔·克雷格(同名)英国,英格兰,柴郡,切斯特影视演员",
"url": "https://movie.douban.com/celebrity/1025175/",
"user": null,
"character": "饰 詹姆斯·邦德 James Bond 007",
"uri": "douban://douban.com/celebrity/1025175?subject_id=27230907",
"avatar": {
"large": "https://qnmob3.doubanio.com/view/celebrity/raw/public/p42588.jpg?imageView2/2/q/80/w/600/h/3000/format/webp",
"normal": "https://qnmob3.doubanio.com/view/celebrity/raw/public/p42588.jpg?imageView2/2/q/80/w/200/h/300/format/webp"
},
"sharing_url": "https://www.douban.com/doubanapp/dispatch?uri=/celebrity/1025175/",
"type": "celebrity",
"id": "1025175",
"latin_name": "Daniel Craig"
}
"""
if douban_actors and (not updated_name
or not updated_overview
or not update_character):
# 从豆瓣演员中匹配中文名称、角色和简介
for douban_actor in douban_actors:
if douban_actor.get("latin_name") == people.get("Name") \
or douban_actor.get("name") == people.get("Name"):
# 名称
if not updated_name:
logger.info(f"{people.get('Name')} 从豆瓣中获取到中文名:{douban_actor.get('name')}")
personinfo["Name"] = douban_actor.get("name")
ret_people["Name"] = douban_actor.get("name")
updated_name = True
# 描述
if not updated_overview:
if douban_actor.get("title"):
logger.info(f"{people.get('Name')} 从豆瓣中获取到中文描述:{douban_actor.get('title')}")
personinfo["Overview"] = douban_actor.get("title")
updated_overview = True
# 饰演角色
if not update_character:
if douban_actor.get("character"):
# "饰 詹姆斯·邦德 James Bond 007"
character = re.sub(r"\s+", "",
douban_actor.get("character"))
character = re.sub("演员", "",
character)
if character:
logger.info(f"{people.get('Name')} 从豆瓣中获取到饰演角色:{character}")
ret_people["Role"] = character
update_character = True
# 图片
if not profile_path:
avatar = douban_actor.get("avatar") or {}
if avatar.get("large"):
logger.info(f"{people.get('Name')} 从豆瓣中获取到图片:{avatar.get('large')}")
profile_path = avatar.get("large")
break
# 更新人物图片
if profile_path:
logger.info(f"更新人物 {people.get('Name')} 的图片:{profile_path}")
self.set_item_image(server=server, itemid=people.get("Id"), imageurl=profile_path)
# 锁定人物信息
if updated_name:
if "Name" not in personinfo["LockedFields"]:
personinfo["LockedFields"].append("Name")
if updated_overview:
if "Overview" not in personinfo["LockedFields"]:
personinfo["LockedFields"].append("Overview")
# 更新人物信息
if updated_name or updated_overview or update_character:
logger.info(f"更新人物 {people.get('Name')} 的信息:{personinfo}")
ret = self.set_iteminfo(server=server, itemid=people.get("Id"), iteminfo=personinfo)
if ret:
return ret_people
else:
logger.info(f"人物 {people.get('Name')} 未找到中文数据")
except Exception as err:
logger.error(f"更新人物信息失败:{err}")
return None
def __get_douban_actors(self, mediainfo: MediaInfo, season: int = None) -> List[dict]:
"""
获取豆瓣演员信息
"""
# 随机休眠1-5秒
time.sleep(1 + int(time.time()) % 5)
# 匹配豆瓣信息
doubaninfo = self.chain.match_doubaninfo(name=mediainfo.title,
mtype=mediainfo.type.value,
year=mediainfo.year,
season=season)
# 豆瓣演员
if doubaninfo:
doubanitem = self.chain.douban_info(doubaninfo.get("id")) or {}
return (doubanitem.get("actors") or []) + (doubanitem.get("directors") or [])
else:
logger.warn(f"未找到豆瓣信息:{mediainfo.title_year}")
return []
@staticmethod
def get_iteminfo(server: str, itemid: str) -> dict:
"""
获得媒体项详情
"""
def __get_emby_iteminfo() -> dict:
"""
获得Emby媒体项详情
"""
try:
url = f'[HOST]emby/Users/[USER]/Items/{itemid}?' \
f'Fields=ChannelMappingInfo&api_key=[APIKEY]'
res = Emby().get_data(url=url)
if res:
return res.json()
except Exception as err:
logger.error(f"获取Emby媒体项详情失败{err}")
return {}
def __get_jellyfin_iteminfo() -> dict:
"""
获得Jellyfin媒体项详情
"""
try:
url = f'[HOST]Users/[USER]/Items/{itemid}?Fields=ChannelMappingInfo&api_key=[APIKEY]'
res = Jellyfin().get_data(url=url)
if res:
result = res.json()
if result:
result['FileName'] = Path(result['Path']).name
return result
except Exception as err:
logger.error(f"获取Jellyfin媒体项详情失败{err}")
return {}
def __get_plex_iteminfo() -> dict:
"""
获得Plex媒体项详情
"""
iteminfo = {}
try:
plexitem = Plex().get_plex().library.fetchItem(ekey=itemid)
if 'movie' in plexitem.METADATA_TYPE:
iteminfo['Type'] = 'Movie'
iteminfo['IsFolder'] = False
elif 'episode' in plexitem.METADATA_TYPE:
iteminfo['Type'] = 'Series'
iteminfo['IsFolder'] = False
if 'show' in plexitem.TYPE:
iteminfo['ChildCount'] = plexitem.childCount
iteminfo['Name'] = plexitem.title
iteminfo['Id'] = plexitem.key
iteminfo['ProductionYear'] = plexitem.year
iteminfo['ProviderIds'] = {}
for guid in plexitem.guids:
idlist = str(guid.id).split(sep='://')
if len(idlist) < 2:
continue
iteminfo['ProviderIds'][idlist[0]] = idlist[1]
for location in plexitem.locations:
iteminfo['Path'] = location
iteminfo['FileName'] = Path(location).name
iteminfo['Overview'] = plexitem.summary
iteminfo['CommunityRating'] = plexitem.audienceRating
return iteminfo
except Exception as err:
logger.error(f"获取Plex媒体项详情失败{err}")
return {}
if server == "emby":
return __get_emby_iteminfo()
elif server == "jellyfin":
return __get_jellyfin_iteminfo()
else:
return __get_plex_iteminfo()
@staticmethod
def get_items(server: str, parentid: str, mtype: str = None) -> dict:
"""
获得媒体的所有子媒体项
"""
pass
def __get_emby_items() -> dict:
"""
获得Emby媒体的所有子媒体项
"""
try:
if parentid:
url = f'[HOST]emby/Users/[USER]/Items?ParentId={parentid}&api_key=[APIKEY]'
else:
url = '[HOST]emby/Users/[USER]/Items?api_key=[APIKEY]'
res = Emby().get_data(url=url)
if res:
return res.json()
except Exception as err:
logger.error(f"获取Emby媒体的所有子媒体项失败{err}")
return {}
def __get_jellyfin_items() -> dict:
"""
获得Jellyfin媒体的所有子媒体项
"""
try:
if parentid:
url = f'[HOST]Users/[USER]/Items?ParentId={parentid}&api_key=[APIKEY]'
else:
url = '[HOST]Users/[USER]/Items?api_key=[APIKEY]'
res = Jellyfin().get_data(url=url)
if res:
return res.json()
except Exception as err:
logger.error(f"获取Jellyfin媒体的所有子媒体项失败{err}")
return {}
def __get_plex_items(t: str) -> dict:
"""
获得Plex媒体的所有子媒体项
"""
items = {}
try:
plex = Plex().get_plex()
items['Items'] = []
if parentid:
if mtype and 'Season' in t:
plexitem = plex.library.fetchItem(ekey=parentid)
items['Items'] = []
for season in plexitem.seasons():
item = {
'Name': season.title,
'Id': season.key,
'IndexNumber': season.seasonNumber,
'Overview': season.summary
}
items['Items'].append(item)
elif mtype and 'Episode' in t:
plexitem = plex.library.fetchItem(ekey=parentid)
items['Items'] = []
for episode in plexitem.episodes():
item = {
'Name': episode.title,
'Id': episode.key,
'IndexNumber': episode.episodeNumber,
'Overview': episode.summary,
'CommunityRating': episode.audienceRating
}
items['Items'].append(item)
else:
plexitems = plex.library.sectionByID(sectionID=parentid)
for plexitem in plexitems.all():
item = {}
if 'movie' in plexitem.METADATA_TYPE:
item['Type'] = 'Movie'
item['IsFolder'] = False
elif 'episode' in plexitem.METADATA_TYPE:
item['Type'] = 'Series'
item['IsFolder'] = False
item['Name'] = plexitem.title
item['Id'] = plexitem.key
items['Items'].append(item)
else:
plexitems = plex.library.sections()
for plexitem in plexitems:
item = {}
if 'Directory' in plexitem.TAG:
item['Type'] = 'Folder'
item['IsFolder'] = True
elif 'movie' in plexitem.METADATA_TYPE:
item['Type'] = 'Movie'
item['IsFolder'] = False
elif 'episode' in plexitem.METADATA_TYPE:
item['Type'] = 'Series'
item['IsFolder'] = False
item['Name'] = plexitem.title
item['Id'] = plexitem.key
items['Items'].append(item)
return items
except Exception as err:
logger.error(f"获取Plex媒体的所有子媒体项失败{err}")
return {}
if server == "emby":
return __get_emby_items()
elif server == "jellyfin":
return __get_jellyfin_items()
else:
return __get_plex_items(mtype)
@staticmethod
def set_iteminfo(server: str, itemid: str, iteminfo: dict):
"""
更新媒体项详情
"""
def __set_emby_iteminfo():
"""
更新Emby媒体项详情
"""
try:
res = Emby().post_data(
url=f'[HOST]emby/Items/{itemid}?api_key=[APIKEY]&reqformat=json',
data=json.dumps(iteminfo),
headers={
"Content-Type": "application/json"
}
)
if res and res.status_code in [200, 204]:
return True
else:
logger.error(f"更新Emby媒体项详情失败错误码{res.status_code}")
return False
except Exception as err:
logger.error(f"更新Emby媒体项详情失败{err}")
return False
def __set_jellyfin_iteminfo():
"""
更新Jellyfin媒体项详情
"""
try:
res = Jellyfin().post_data(
url=f'[HOST]Items/{itemid}?api_key=[APIKEY]',
data=json.dumps(iteminfo),
headers={
"Content-Type": "application/json"
}
)
if res and res.status_code in [200, 204]:
return True
else:
logger.error(f"更新Jellyfin媒体项详情失败错误码{res.status_code}")
return False
except Exception as err:
logger.error(f"更新Jellyfin媒体项详情失败{err}")
return False
def __set_plex_iteminfo():
"""
更新Plex媒体项详情
"""
try:
plexitem = Plex().get_plex().library.fetchItem(ekey=itemid)
if 'CommunityRating' in iteminfo:
edits = {
'audienceRating.value': iteminfo['CommunityRating'],
'audienceRating.locked': 1
}
plexitem.edit(**edits)
plexitem.editTitle(iteminfo['Name']).editSummary(iteminfo['Overview']).reload()
return True
except Exception as err:
logger.error(f"更新Plex媒体项详情失败{err}")
return False
if server == "emby":
return __set_emby_iteminfo()
elif server == "jellyfin":
return __set_jellyfin_iteminfo()
else:
return __set_plex_iteminfo()
@staticmethod
@retry(RequestException, logger=logger)
def download_image(image_url: str, path: Path):
def set_item_image(server: str, itemid: str, imageurl: str):
"""
下载图片,保存到指定路径
更新媒体项图片
"""
def __download_image():
"""
下载图片
"""
try:
if "doubanio.com" in imageurl:
r = RequestUtils(headers={
'Referer': "https://movie.douban.com/"
}, ua=settings.USER_AGENT).get_res(url=imageurl, raise_exception=True)
else:
r = RequestUtils().get_res(url=imageurl, raise_exception=True)
if r:
return base64.b64encode(r.content).decode()
else:
logger.info(f"{imageurl} 图片下载失败,请检查网络连通性")
except Exception as err:
logger.error(f"下载图片失败:{err}")
return None
def __set_emby_item_image(_base64: str):
"""
更新Emby媒体项图片
"""
try:
url = f'[HOST]emby/Items/{itemid}/Images/Primary?api_key=[APIKEY]'
res = Emby().post_data(
url=url,
data=_base64,
headers={
"Content-Type": "image/png"
}
)
if res and res.status_code in [200, 204]:
return True
else:
logger.error(f"更新Emby媒体项图片失败错误码{res.status_code}")
return False
except Exception as result:
logger.error(f"更新Emby媒体项图片失败{result}")
return False
def __set_jellyfin_item_image():
"""
更新Jellyfin媒体项图片
# FIXME 改为预下载图片
"""
try:
url = f'[HOST]Items/{itemid}/RemoteImages/Download?' \
f'Type=Primary&ImageUrl={imageurl}&ProviderName=TheMovieDb&api_key=[APIKEY]'
res = Jellyfin().post_data(url=url)
if res and res.status_code in [200, 204]:
return True
else:
logger.error(f"更新Jellyfin媒体项图片失败错误码{res.status_code}")
return False
except Exception as err:
logger.error(f"更新Jellyfin媒体项图片失败{err}")
return False
def __set_plex_item_image():
"""
更新Plex媒体项图片
# FIXME 改为预下载图片
"""
try:
plexitem = Plex().get_plex().library.fetchItem(ekey=itemid)
plexitem.uploadPoster(url=imageurl)
return True
except Exception as err:
logger.error(f"更新Plex媒体项图片失败{err}")
return False
if server == "emby":
# 下载图片获取base64
image_base64 = __download_image()
if image_base64:
return __set_emby_item_image(image_base64)
elif server == "jellyfin":
return __set_jellyfin_item_image()
else:
return __set_plex_item_image()
return None
@staticmethod
def __get_chinese_name(personinfo: dict) -> str:
"""
获取TMDB别名中的中文名
"""
try:
logger.info(f"正在下载演职人员图片:{image_url} ...")
r = RequestUtils().get_res(url=image_url, raise_exception=True)
if r:
path.write_bytes(r.content)
logger.info(f"图片已保存:{path}")
else:
logger.info(f"图片下载失败,请检查网络连通性:{image_url}")
except RequestException as err:
raise err
also_known_as = personinfo.get("also_known_as") or []
if also_known_as:
for name in also_known_as:
if name and StringUtils.is_chinese(name):
# 使用cn2an将繁体转化为简体
return zhconv.convert(name, "zh-hans")
except Exception as err:
logger.error(f"图片下载失败:{err}")
logger.error(f"获取人物中文名失败:{err}")
return ""
def stop_service(self):
"""
退出插件
停止服务
"""
pass
try:
if self._scheduler:
self._scheduler.remove_all_jobs()
if self._scheduler.running:
self._event.set()
self._scheduler.shutdown()
self._event.clear()
self._scheduler = None
except Exception as e:
print(str(e))

View File

@@ -841,87 +841,88 @@ class SiteStatistic(_PluginBase):
url = site_info.get("url")
proxy = site_info.get("proxy")
ua = site_info.get("ua")
session = requests.Session()
proxies = settings.PROXY if proxy else None
proxy_server = settings.PROXY_SERVER if proxy else None
render = site_info.get("render")
# 会话管理
with requests.Session() as session:
proxies = settings.PROXY if proxy else None
proxy_server = settings.PROXY_SERVER if proxy else None
render = site_info.get("render")
logger.debug(f"站点 {site_name} url={url} site_cookie={site_cookie} ua={ua}")
if render:
# 演染模式
html_text = PlaywrightHelper().get_page_source(url=url,
cookies=site_cookie,
ua=ua,
proxies=proxy_server)
else:
# 普通模式
res = RequestUtils(cookies=site_cookie,
session=session,
ua=ua,
proxies=proxies
).get_res(url=url)
if res and res.status_code == 200:
if re.search(r"charset=\"?utf-8\"?", res.text, re.IGNORECASE):
res.encoding = "utf-8"
else:
res.encoding = res.apparent_encoding
html_text = res.text
# 第一次登录反爬
if html_text.find("title") == -1:
i = html_text.find("window.location")
if i == -1:
return None
tmp_url = url + html_text[i:html_text.find(";")] \
.replace("\"", "") \
.replace("+", "") \
.replace(" ", "") \
.replace("window.location=", "")
res = RequestUtils(cookies=site_cookie,
session=session,
ua=ua,
proxies=proxies
).get_res(url=tmp_url)
if res and res.status_code == 200:
if "charset=utf-8" in res.text or "charset=UTF-8" in res.text:
res.encoding = "UTF-8"
else:
res.encoding = res.apparent_encoding
html_text = res.text
if not html_text:
return None
else:
logger.error("站点 %s 被反爬限制:%s, 状态码:%s" % (site_name, url, res.status_code))
return None
# 兼容假首页情况,假首页通常没有 <link rel="search" 属性
if '"search"' not in html_text and '"csrf-token"' not in html_text:
res = RequestUtils(cookies=site_cookie,
session=session,
ua=ua,
proxies=proxies
).get_res(url=url + "/index.php")
if res and res.status_code == 200:
if re.search(r"charset=\"?utf-8\"?", res.text, re.IGNORECASE):
res.encoding = "utf-8"
else:
res.encoding = res.apparent_encoding
html_text = res.text
if not html_text:
return None
elif res is not None:
logger.error(f"站点 {site_name} 连接失败,状态码:{res.status_code}")
return None
logger.debug(f"站点 {site_name} url={url} site_cookie={site_cookie} ua={ua}")
if render:
# 演染模式
html_text = PlaywrightHelper().get_page_source(url=url,
cookies=site_cookie,
ua=ua,
proxies=proxy_server)
else:
logger.error(f"站点 {site_name} 无法访问:{url}")
return None
# 解析站点类型
if html_text:
site_schema = self.__build_class(html_text)
if not site_schema:
logger.error("站点 %s 无法识别站点类型" % site_name)
return None
return site_schema(site_name, url, site_cookie, html_text, session=session, ua=ua, proxy=proxy)
return None
# 普通模式
res = RequestUtils(cookies=site_cookie,
session=session,
ua=ua,
proxies=proxies
).get_res(url=url)
if res and res.status_code == 200:
if re.search(r"charset=\"?utf-8\"?", res.text, re.IGNORECASE):
res.encoding = "utf-8"
else:
res.encoding = res.apparent_encoding
html_text = res.text
# 第一次登录反爬
if html_text.find("title") == -1:
i = html_text.find("window.location")
if i == -1:
return None
tmp_url = url + html_text[i:html_text.find(";")] \
.replace("\"", "") \
.replace("+", "") \
.replace(" ", "") \
.replace("window.location=", "")
res = RequestUtils(cookies=site_cookie,
session=session,
ua=ua,
proxies=proxies
).get_res(url=tmp_url)
if res and res.status_code == 200:
if "charset=utf-8" in res.text or "charset=UTF-8" in res.text:
res.encoding = "UTF-8"
else:
res.encoding = res.apparent_encoding
html_text = res.text
if not html_text:
return None
else:
logger.error("站点 %s 被反爬限制:%s, 状态码:%s" % (site_name, url, res.status_code))
return None
# 兼容假首页情况,假首页通常没有 <link rel="search" 属性
if '"search"' not in html_text and '"csrf-token"' not in html_text:
res = RequestUtils(cookies=site_cookie,
session=session,
ua=ua,
proxies=proxies
).get_res(url=url + "/index.php")
if res and res.status_code == 200:
if re.search(r"charset=\"?utf-8\"?", res.text, re.IGNORECASE):
res.encoding = "utf-8"
else:
res.encoding = res.apparent_encoding
html_text = res.text
if not html_text:
return None
elif res is not None:
logger.error(f"站点 {site_name} 连接失败,状态码:{res.status_code}")
return None
else:
logger.error(f"站点 {site_name} 无法访问:{url}")
return None
# 解析站点类型
if html_text:
site_schema = self.__build_class(html_text)
if not site_schema:
logger.error("站点 %s 无法识别站点类型" % site_name)
return None
return site_schema(site_name, url, site_cookie, html_text, session=session, ua=ua, proxy=proxy)
return None
def refresh_by_domain(self, domain: str) -> schemas.Response:
"""

View File

@@ -6,7 +6,6 @@ from enum import Enum
from typing import Optional
from urllib.parse import urljoin, urlsplit
import requests
from requests import Session
from app.core.config import settings
@@ -107,7 +106,7 @@ class ISiteUserInfo(metaclass=ABCMeta):
self._base_url = f"{split_url.scheme}://{split_url.netloc}"
self._site_cookie = site_cookie
self._index_html = index_html
self._session = session if session else requests.Session()
self._session = session if session else None
self._ua = ua
self._emulate = emulate

View File

@@ -383,7 +383,14 @@ class SpeedLimiter(_PluginBase):
return
if event:
event_data: WebhookEventInfo = event.event_data
if event_data.event not in ["playback.start", "PlaybackStart", "media.play"]:
if event_data.event not in [
"playback.start",
"PlaybackStart",
"media.play",
"media.stop",
"PlaybackStop",
"playback.stop"
]:
return
# 当前播放的总比特率
total_bit_rate = 0
@@ -396,7 +403,7 @@ class SpeedLimiter(_PluginBase):
# 查询播放中会话
playing_sessions = []
if media_server == "emby":
req_url = "{HOST}emby/Sessions?api_key={APIKEY}"
req_url = "[HOST]emby/Sessions?api_key=[APIKEY]"
try:
res = Emby().get_data(req_url)
if res and res.status_code == 200:
@@ -419,7 +426,7 @@ class SpeedLimiter(_PluginBase):
and session.get("NowPlayingItem", {}).get("MediaType") == "Video":
total_bit_rate += int(session.get("NowPlayingItem", {}).get("Bitrate") or 0)
elif media_server == "jellyfin":
req_url = "{HOST}Sessions?api_key={APIKEY}"
req_url = "[HOST]Sessions?api_key=[APIKEY]"
try:
res = Jellyfin().get_data(req_url)
if res and res.status_code == 200:

View File

@@ -100,7 +100,7 @@ class TorrentTransfer(_PluginBase):
return
if self._fromdownloader == self._todownloader:
logger.error(f"源下载器和目的下载器不能相同")
self.systemmessage(f"源下载器和目的下载器不能相同")
self.systemmessage.put(f"源下载器和目的下载器不能相同")
return
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
if self._cron:
@@ -110,7 +110,7 @@ class TorrentTransfer(_PluginBase):
CronTrigger.from_crontab(self._cron))
except Exception as e:
logger.error(f"转移做种服务启动失败:{e}")
self.systemmessage(f"转移做种服务启动失败:{e}")
self.systemmessage.put(f"转移做种服务启动失败:{e}")
return
if self._onlyonce:
logger.info(f"转移做种服务启动,立即运行一次")

View File

@@ -137,13 +137,29 @@ class WebHook(_PluginBase):
return
def __to_dict(_event):
result = {}
for key, value in _event.items():
if hasattr(value, 'to_dict'):
result[key] = value.to_dict()
else:
result[key] = str(value)
return result
"""
递归将对象转换为字典
"""
if isinstance(_event, dict):
for k, v in _event.items():
_event[k] = __to_dict(v)
return _event
elif isinstance(_event, list):
for i in range(len(_event)):
_event[i] = __to_dict(_event[i])
return _event
elif isinstance(_event, tuple):
return tuple(__to_dict(list(_event)))
elif isinstance(_event, set):
return set(__to_dict(list(_event)))
elif hasattr(_event, 'to_dict'):
return __to_dict(_event.to_dict())
elif hasattr(_event, '__dict__'):
return __to_dict(_event.__dict__)
elif isinstance(_event, (int, float, str, bool, type(None))):
return _event
else:
return str(_event)
event_info = {
"type": event.event_type,

View File

@@ -11,6 +11,7 @@ from app.chain import ChainBase
from app.chain.cookiecloud import CookieCloudChain
from app.chain.mediaserver import MediaServerChain
from app.chain.subscribe import SubscribeChain
from app.chain.tmdb import TmdbChain
from app.chain.transfer import TransferChain
from app.core.config import settings
from app.db import SessionFactory
@@ -183,6 +184,14 @@ class Scheduler(metaclass=Singleton):
}
)
# 后台刷新TMDB壁纸
self._scheduler.add_job(
TmdbChain(self._db).get_random_wallpager,
"interval",
minutes=30,
next_run_time=datetime.now(pytz.timezone(settings.TZ)) + timedelta(seconds=3)
)
# 公共定时服务
self._scheduler.add_job(
SchedulerChain(self._db).scheduler_job,

View File

@@ -14,6 +14,10 @@ class ExistMediaInfo(BaseModel):
type: Optional[MediaType]
# 季
seasons: Optional[Dict[int, list]] = {}
# 媒体服务器
server: Optional[str] = None
# 媒体ID
itemid: Optional[Union[str, int]] = None
class NotExistMediaInfo(BaseModel):

View File

@@ -58,6 +58,8 @@ class SystemConfigKey(Enum):
NotificationChannels = "NotificationChannels"
# 自定义制作组/字幕组
CustomReleaseGroups = "CustomReleaseGroups"
# 自定义占位符
Customization = "Customization"
# 自定义识别词
CustomIdentifiers = "CustomIdentifiers"
# 搜索优先级规则

View File

@@ -59,7 +59,8 @@ class RequestUtils:
headers=self._headers,
proxies=self._proxies,
timeout=self._timeout,
json=json)
json=json,
stream=False)
else:
return requests.post(url,
data=data,
@@ -67,7 +68,8 @@ class RequestUtils:
headers=self._headers,
proxies=self._proxies,
timeout=self._timeout,
json=json)
json=json,
stream=False)
except requests.exceptions.RequestException:
return None
@@ -91,27 +93,38 @@ class RequestUtils:
except requests.exceptions.RequestException:
return None
def get_res(self, url: str, params: dict = None,
allow_redirects: bool = True, raise_exception: bool = False) -> Optional[Response]:
def get_res(self, url: str,
params: dict = None,
data: Any = None,
json: dict = None,
allow_redirects: bool = True,
raise_exception: bool = False
) -> Optional[Response]:
try:
if self._session:
return self._session.get(url,
params=params,
data=data,
json=json,
verify=False,
headers=self._headers,
proxies=self._proxies,
cookies=self._cookies,
timeout=self._timeout,
allow_redirects=allow_redirects)
allow_redirects=allow_redirects,
stream=False)
else:
return requests.get(url,
params=params,
data=data,
json=json,
verify=False,
headers=self._headers,
proxies=self._proxies,
cookies=self._cookies,
timeout=self._timeout,
allow_redirects=allow_redirects)
allow_redirects=allow_redirects,
stream=False)
except requests.exceptions.RequestException:
if raise_exception:
raise requests.exceptions.RequestException
@@ -120,7 +133,8 @@ class RequestUtils:
def post_res(self, url: str, data: Any = None, params: dict = None,
allow_redirects: bool = True,
files: Any = None,
json: dict = None) -> Optional[Response]:
json: dict = None,
raise_exception: bool = False) -> Optional[Response]:
try:
if self._session:
return self._session.post(url,
@@ -133,7 +147,8 @@ class RequestUtils:
timeout=self._timeout,
allow_redirects=allow_redirects,
files=files,
json=json)
json=json,
stream=False)
else:
return requests.post(url,
data=data,
@@ -145,8 +160,11 @@ class RequestUtils:
timeout=self._timeout,
allow_redirects=allow_redirects,
files=files,
json=json)
json=json,
stream=False)
except requests.exceptions.RequestException:
if raise_exception:
raise requests.exceptions.RequestException
return None
@staticmethod

View File

@@ -68,6 +68,8 @@ class StringUtils:
"""
判断是否含有中文
"""
if not word:
return False
if isinstance(word, list):
word = " ".join(word)
chn = re.compile(r'[\u4e00-\u9fff]')

View File

@@ -3,6 +3,7 @@ import os
import platform
import re
import shutil
import sys
from pathlib import Path
from typing import List, Union, Tuple
@@ -27,20 +28,39 @@ class SystemUtils:
@staticmethod
def is_docker() -> bool:
"""
判断是否为Docker环境
"""
return Path("/.dockerenv").exists()
@staticmethod
def is_synology() -> bool:
"""
判断是否为群晖系统
"""
if SystemUtils.is_windows():
return False
return True if "synology" in SystemUtils.execute('uname -a') else False
@staticmethod
def is_windows() -> bool:
"""
判断是否为Windows系统
"""
return True if os.name == "nt" else False
@staticmethod
def is_frozen() -> bool:
"""
判断是否为冻结的二进制文件
"""
return True if getattr(sys, 'frozen', False) else False
@staticmethod
def is_macos() -> bool:
"""
判断是否为MacOS系统
"""
return True if platform.system() == 'Darwin' else False
@staticmethod

187
config/app.env Normal file
View File

@@ -0,0 +1,187 @@
#######################################################################
# 【*】为必配项,其余为选配项,选配项可以删除整项配置项或者保留配置默认值 #
#######################################################################
####################################
# 基础设置 #
####################################
# 时区
TZ=Asia/Shanghai
# 【*】API监听地址
HOST=0.0.0.0
# 是否调试模式
DEBUG=false
# 是否开发模式
DEV=false
# 【*】超级管理员
SUPERUSER=admin
# 【*】超级管理员初始密码
SUPERUSER_PASSWORD=password
# 【*】API密钥建议更换复杂字符串
API_TOKEN=moviepilot
# 网络代理 IP:PORT
PROXY_HOST=
# TMDB图片地址无需修改需保留默认值
TMDB_IMAGE_DOMAIN=image.tmdb.org
# TMDB API地址无需修改需保留默认值
TMDB_API_DOMAIN=api.themoviedb.org
# 大内存模式
BIG_MEMORY_MODE=false
####################################
# 媒体识别&刮削 #
####################################
# 媒体信息搜索来源 themoviedb/douban
SEARCH_SOURCE=themoviedb
# 刮削入库的媒体文件 true/false
SCRAP_METADATA=true
# 新增已入库媒体是否跟随TMDB信息变化
SCRAP_FOLLOW_TMDB=true
# 刮削来源 themoviedb/douban
SCRAP_SOURCE=themoviedb
####################################
# 媒体库 #
####################################
# 【*】转移方式 link/copy/move/softlink
TRANSFER_TYPE=copy
# 【*】媒体库目录,多个目录使用,分隔
LIBRARY_PATH=
# 电影媒体库目录名,默认电影
LIBRARY_MOVIE_NAME=
# 电视剧媒体库目录名,默认电视剧
LIBRARY_TV_NAME=
# 动漫媒体库目录名,默认电视剧/动漫
LIBRARY_ANIME_NAME=
# 二级分类
LIBRARY_CATEGORY=true
# 电影重命名格式
MOVIE_RENAME_FORMAT={{title}}{% if year %} ({{year}}){% endif %}/{{title}}{% if year %} ({{year}}){% endif %}{% if part %}-{{part}}{% endif %}{% if videoFormat %} - {{videoFormat}}{% endif %}{{fileExt}}
# 电视剧重命名格式
TV_RENAME_FORMAT={{title}}{% if year %} ({{year}}){% endif %}/Season {{season}}/{{title}} - {{season_episode}}{% if part %}-{{part}}{% endif %}{% if episode %} - 第 {{episode}}{% endif %}{{fileExt}}
####################################
# 站点 #
####################################
# 【*】CookieCloud服务器地址默认为公共服务器
COOKIECLOUD_HOST=https://movie-pilot.org/cookiecloud
# 【*】CookieCloud用户KEY
COOKIECLOUD_KEY=
# 【*】CookieCloud端对端加密密码
COOKIECLOUD_PASSWORD=
# 【*】CookieCloud同步间隔分钟
COOKIECLOUD_INTERVAL=1440
# OCR服务器地址
OCR_HOST=https://movie-pilot.org
# 【*】CookieCloud对应的浏览器UA
USER_AGENT=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.57
####################################
# 订阅 & 搜索 #
####################################
# 订阅模式 spider/rss
SUBSCRIBE_MODE=spider
# RSS订阅模式刷新时间间隔分钟
SUBSCRIBE_RSS_INTERVAL=30
# 订阅搜索开关
SUBSCRIBE_SEARCH=false
# 交互搜索自动下载用户ID使用,分割
AUTO_DOWNLOAD_USER=
####################################
# 消息通知 #
####################################
# 【*】消息通知渠道 telegram/wechat/slack多个通知渠道用,分隔
MESSAGER=telegram
# WeChat企业ID
WECHAT_CORPID=
# WeChat应用Secret
WECHAT_APP_SECRET=
# WeChat应用ID
WECHAT_APP_ID=
# WeChat代理服务器无需代理需保留默认值
WECHAT_PROXY=https://qyapi.weixin.qq.com
# WeChat Token
WECHAT_TOKEN=
# WeChat EncodingAESKey
WECHAT_ENCODING_AESKEY=
# WeChat 管理员
WECHAT_ADMINS=
# Telegram Bot Token
TELEGRAM_TOKEN=
# Telegram Chat ID
TELEGRAM_CHAT_ID=
# Telegram 用户ID使用,分隔
TELEGRAM_USERS=
# Telegram 管理员ID使用,分隔
TELEGRAM_ADMINS=
# Slack Bot User OAuth Token
SLACK_OAUTH_TOKEN=
# Slack App-Level Token
SLACK_APP_TOKEN=
# Slack 频道名称
SLACK_CHANNEL=
# SynologyChat Webhook
SYNOLOGYCHAT_WEBHOOK=
# SynologyChat Token
SYNOLOGYCHAT_TOKEN=
####################################
# 下载 #
####################################
# 【*】下载器 qbittorrent/transmission
DOWNLOADER=qbittorrent
# 下载器监控开关
DOWNLOADER_MONITOR=true
# Qbittorrent地址IP:PORT
QB_HOST=
# Qbittorrent用户名
QB_USER=
# Qbittorrent密码
QB_PASSWORD=
# Qbittorrent分类自动管理
QB_CATEGORY=false
# Transmission地址IP:PORT
TR_HOST=
# Transmission用户名
TR_USER=
# Transmission密码
TR_PASSWORD=
# 种子标签
TORRENT_TAG=MOVIEPILOT
# 【*】下载保存目录,容器内映射路径需要一致
DOWNLOAD_PATH=/downloads
# 电影下载保存目录,容器内映射路径需要一致
DOWNLOAD_MOVIE_PATH=
# 电视剧下载保存目录,容器内映射路径需要一致
DOWNLOAD_TV_PATH=
# 动漫下载保存目录,容器内映射路径需要一致
DOWNLOAD_ANIME_PATH=
# 下载目录二级分类
DOWNLOAD_CATEGORY=false
# 下载站点字幕
DOWNLOAD_SUBTITLE=true
####################################
# 媒体服务器 #
####################################
# 【*】媒体服务器 emby/jellyfin/plex多个媒体服务器,分割
MEDIASERVER=emby
# 入库刷新媒体库
REFRESH_MEDIASERVER=true
# 媒体服务器同步间隔(小时)
MEDIASERVER_SYNC_INTERVAL=6
# 媒体服务器同步黑名单,多个媒体库名称,分割
MEDIASERVER_SYNC_BLACKLIST=
# EMBY服务器地址IP:PORT
EMBY_HOST=
# EMBY Api Key
EMBY_API_KEY=
# Jellyfin服务器地址IP:PORT
JELLYFIN_HOST=
# Jellyfin Api Key
JELLYFIN_API_KEY=
# Plex服务器地址IP:PORT
PLEX_HOST=
# Plex Token
PLEX_TOKEN=

File diff suppressed because one or more lines are too long

View File

@@ -12,7 +12,7 @@ for module in Path(__file__).with_name("models").glob("*.py"):
db_version = input("请输入版本号:")
db_location = settings.CONFIG_PATH / 'user.db'
script_location = settings.ROOT_PATH / 'alembic'
script_location = settings.ROOT_PATH / 'database'
alembic_cfg = AlembicConfig()
alembic_cfg.set_main_option('script_location', str(script_location))
alembic_cfg.set_main_option('sqlalchemy.url', f"sqlite:///{db_location}")

View File

@@ -42,6 +42,7 @@ chardet~=4.0.0
starlette~=0.27.0
PyVirtualDisplay~=3.0
psutil~=5.9.4
python_dotenv~=1.0.0
python_hosts~=1.0.3
watchdog~=3.0.0
tailer~=0.4.1

View File

@@ -1 +1 @@
APP_VERSION = 'v1.2.7-1'
APP_VERSION = 'v1.2.9'

97
windows.spec Normal file
View File

@@ -0,0 +1,97 @@
# -*- mode: python ; coding: utf-8 -*-
def collect_pkg_data(package, include_py_files=False, subdir=None):
"""
Collect all data files from the given package.
"""
import os
from PyInstaller.utils.hooks import get_package_paths, remove_prefix, PY_IGNORE_EXTENSIONS
# Accept only strings as packages.
if type(package) is not str:
raise ValueError
pkg_base, pkg_dir = get_package_paths(package)
if subdir:
pkg_dir = os.path.join(pkg_dir, subdir)
# Walk through all file in the given package, looking for data files.
data_toc = TOC()
for dir_path, dir_names, files in os.walk(pkg_dir):
for f in files:
extension = os.path.splitext(f)[1]
if include_py_files or (extension not in PY_IGNORE_EXTENSIONS):
source_file = os.path.join(dir_path, f)
dest_folder = remove_prefix(dir_path, os.path.dirname(pkg_base) + os.sep)
dest_file = os.path.join(dest_folder, f)
data_toc.append((dest_file, source_file, 'DATA'))
return data_toc
def collect_local_submodules(package):
"""
Collect all local submodules from the given package.
"""
import os
base_dir = '..'
package_dir = os.path.join(base_dir, package.replace('.', os.sep))
submodules = []
for dir_path, dir_names, files in os.walk(package_dir):
for f in files:
if f == '__init__.py':
submodules.append(f"{package}.{os.path.basename(dir_path)}")
elif f.endswith('.py'):
submodules.append(f"{package}.{os.path.basename(dir_path)}.{os.path.splitext(f)[0]}")
for d in dir_names:
submodules.append(f"{package}.{os.path.basename(dir_path)}.{d}")
return submodules
hiddenimports = [
'passlib.handlers.bcrypt',
'app.modules',
'app.plugins',
] + collect_local_submodules('app.modules') \
+ collect_local_submodules('app.plugins')
block_cipher = None
a = Analysis(
['app/main.py'],
pathex=[],
binaries=[],
datas=[],
hiddenimports=hiddenimports,
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
noarchive=False,
)
pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)
exe = EXE(
pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
collect_pkg_data('config'),
collect_pkg_data('cf_clearance'),
collect_pkg_data('database', include_py_files=True),
[],
name='MoviePilot',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True,
disable_windowed_traceback=False,
argv_emulation=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None,
icon="app.ico"
)