Compare commits

..

156 Commits

Author SHA1 Message Date
jxxghp
8c1be70c85 更新 version.py 2025-08-26 12:20:16 +08:00
jxxghp
b8e0c0db9e feat:精细化事件错误 2025-08-26 08:41:47 +08:00
jxxghp
7b7fb6cc82 Merge pull request #4836 from jxxghp/cursor/alter-siteuser-data-userid-to-character-type-9f4d 2025-08-25 22:05:19 +08:00
Cursor Agent
62512ba215 Remove SQLite-specific migration code for userid field
Co-authored-by: jxxghp <jxxghp@live.cn>
2025-08-25 14:00:33 +00:00
Cursor Agent
e1beb64c01 Simplify userid conversion to integer in Synology Chat module
Co-authored-by: jxxghp <jxxghp@live.cn>
2025-08-25 13:58:15 +00:00
Cursor Agent
c81f26ddad Remove downgrade methods for PostgreSQL and SQLite userid migration
Co-authored-by: jxxghp <jxxghp@live.cn>
2025-08-25 13:56:21 +00:00
Cursor Agent
340114c2a1 Remove migration README after completing SiteUserData userid type migration
Co-authored-by: jxxghp <jxxghp@live.cn>
2025-08-25 13:54:58 +00:00
Cursor Agent
cd7767b331 Checkpoint before follow-up message
Co-authored-by: jxxghp <jxxghp@live.cn>
2025-08-25 13:54:48 +00:00
Cursor Agent
25289dad8a Migrate SiteUserData userid field from Integer to String type
Co-authored-by: jxxghp <jxxghp@live.cn>
2025-08-25 13:50:58 +00:00
jxxghp
47c6917129 remove _check_restart_policy 2025-08-25 21:30:53 +08:00
jxxghp
6379cda148 fix 异步定时服务 2025-08-25 21:19:07 +08:00
jxxghp
91a124ab8f fix 异步定时服务 2025-08-25 20:44:38 +08:00
jxxghp
2357a7135e fix run_async 2025-08-25 17:46:06 +08:00
jxxghp
da0b3b3de9 fix:日历缓存 2025-08-25 16:46:10 +08:00
jxxghp
6664fb1716 feat:增加插件和日历的自动缓存 2025-08-25 16:37:02 +08:00
jxxghp
1206f24fa9 修复缓存迭代时的并发问题 2025-08-25 13:11:44 +08:00
jxxghp
ffb5823e84 fix #4829 优化模块导入逻辑,增加对 Async 类的特殊处理 2025-08-25 08:14:43 +08:00
jxxghp
d45a7fb262 更新 version.py 2025-08-24 19:59:31 +08:00
jxxghp
918d192c0f OpenList自动延迟重试获取文件项 2025-08-24 19:47:00 +08:00
jxxghp
f7cd6eac50 feat:整理手动中止功能 2025-08-24 19:17:41 +08:00
jxxghp
88f4428ff0 fix bug 2025-08-24 17:07:45 +08:00
jxxghp
069ea22ba2 fix bug 2025-08-24 16:55:37 +08:00
jxxghp
8fac8c5307 fix progress step 2025-08-24 16:33:44 +08:00
jxxghp
2285befebb fix cache set 2025-08-24 16:10:48 +08:00
jxxghp
1cd0648e4e fix cache set 2025-08-24 15:36:56 +08:00
jxxghp
0b7ba285c6 fix:优雅停止超时处理 2025-08-24 13:07:52 +08:00
jxxghp
30446c4526 fix cache is_redis 2025-08-24 12:27:14 +08:00
jxxghp
9b843c9ed2 fix:整理记录登记 2025-08-24 12:19:12 +08:00
jxxghp
2ce1c3bef8 feat:整理进度登记 2025-08-24 12:04:05 +08:00
jxxghp
e463094dc7 feat:整理进度 2025-08-24 09:21:55 +08:00
jxxghp
71a9fe10f4 refactor ProgressHelper 2025-08-24 09:02:55 +08:00
jxxghp
ba146e13ef fix 优化cache模块声明 2025-08-24 08:36:37 +08:00
jxxghp
c060d7e3e0 更新 postgresql-setup.md 2025-08-23 22:26:34 +08:00
jxxghp
ba96678822 v2.7.5 2025-08-23 20:46:36 +08:00
jxxghp
4f6354f383 Merge pull request #4820 from DDS-Derek/dev 2025-08-23 18:46:52 +08:00
DDSRem
2766e80346 fix(database): use logger as log output
Co-Authored-By: Aqr-K <95741669+Aqr-K@users.noreply.github.com>
2025-08-23 18:36:11 +08:00
jxxghp
7cc3777a60 fix async cache 2025-08-23 18:34:47 +08:00
DDSRem
cb1dd9f17d fix(database): upgrade error in pg database
Co-Authored-By: Aqr-K <95741669+Aqr-K@users.noreply.github.com>
2025-08-23 18:12:13 +08:00
jxxghp
31f342fe4f fix torrent 2025-08-23 18:10:33 +08:00
jxxghp
e90359eb08 fix douban 2025-08-23 15:56:30 +08:00
jxxghp
58b0768a30 fix redis key 2025-08-23 15:53:03 +08:00
jxxghp
3b04506893 fix redis key 2025-08-23 15:40:38 +08:00
jxxghp
354165aa0a fix cache 2025-08-23 14:21:50 +08:00
jxxghp
343109836f fix cache 2025-08-23 14:06:44 +08:00
jxxghp
fcadac2adb Merge pull request #4817 from jxxghp/cursor/add-dict-operations-to-cachebackend-3877 2025-08-23 12:42:04 +08:00
Cursor Agent
5e7dcdfe97 Modify cache region key generation to use consistent prefix format
Co-authored-by: jxxghp <jxxghp@live.cn>
2025-08-23 04:13:25 +00:00
Cursor Agent
2ec9a57391 Remove implementation and migration documentation files
Co-authored-by: jxxghp <jxxghp@live.cn>
2025-08-23 04:07:04 +00:00
Cursor Agent
973c545723 Checkpoint before follow-up message
Co-authored-by: jxxghp <jxxghp@live.cn>
2025-08-23 04:06:16 +00:00
Cursor Agent
fd62eecfef Simplify TTLCache, remove dict-like methods, enhance Cache interface
Co-authored-by: jxxghp <jxxghp@live.cn>
2025-08-23 04:01:17 +00:00
Cursor Agent
b5ca7058c2 Add helper methods for cache backend in sync and async versions
Co-authored-by: jxxghp <jxxghp@live.cn>
2025-08-23 03:58:04 +00:00
Cursor Agent
57a48f099f Add dict-like operations to CacheBackend with sync and async support
Co-authored-by: jxxghp <jxxghp@live.cn>
2025-08-23 03:50:52 +00:00
jxxghp
4699f511bf Handle magnet links in torrent parsing and downloader modules (#4815)
Co-authored-by: Cursor Agent <cursoragent@cursor.com>
Co-authored-by: jxxghp <jxxghp@live.cn>
2025-08-23 10:51:32 +08:00
jxxghp
cd8f7e72e0 同步错误修复 2025-08-22 17:33:24 +08:00
jxxghp
78803fa284 fix search_imdbid type 2025-08-22 16:37:30 +08:00
jxxghp
2e8d75df16 fix monitor cache 2025-08-22 15:30:49 +08:00
jxxghp
7e3bbfd960 Merge pull request #4807 from carolcoral/v2 2025-08-22 15:23:04 +08:00
jxxghp
1734d53b3c Replace file-based snapshot caching with FileCache implementation (#4809)
Co-authored-by: Cursor Agent <cursoragent@cursor.com>
Co-authored-by: jxxghp <jxxghp@live.cn>
2025-08-22 13:59:30 +08:00
jxxghp
f37540f4e5 fix get_rss timeout 2025-08-22 11:44:16 +08:00
jxxghp
addb9d836a remove cache singleton 2025-08-22 11:33:53 +08:00
Carol
4184d8c7ac 补充迁移数据库异常的注意事项
add: sqlite迁移到postgresql的注意事项
2025-08-22 10:55:26 +08:00
jxxghp
724c15a68c add 插件内存统计API 2025-08-22 09:46:11 +08:00
jxxghp
499bdf9b48 fix cache clear 2025-08-22 07:22:23 +08:00
jxxghp
41cd1ccda1 Merge pull request #4803 from Sowevo/v2
兼容负数的LIMIT
2025-08-22 07:20:21 +08:00
jxxghp
b9521cb3a9 Fix typo: change "未就续" to "未就绪" in module status messages (#4804)
Co-authored-by: Cursor Agent <cursoragent@cursor.com>
Co-authored-by: jxxghp <jxxghp@live.cn>
2025-08-22 07:05:16 +08:00
jxxghp
1f40663b90 Merge pull request #4802 from Aqr-K/remove-docker 2025-08-22 06:45:45 +08:00
sowevo
5261ed7c4c 兼容两种库对负数的处理 2025-08-22 03:32:26 +08:00
sowevo
aa8768b18a 兼容两种库对负数的处理 2025-08-22 03:00:50 +08:00
Aqr-K
aad07433f4 fix(docker): Remove musl-dev and related code 2025-08-22 01:20:50 +08:00
jxxghp
4a7630079b Merge pull request #4800 from DDS-Derek/dev 2025-08-21 22:18:16 +08:00
DDSRem
44a6ee1994 fix(docker): 作業ディレクトリが間違っています 2025-08-21 22:17:18 +08:00
jxxghp
56bd6e69ed Merge pull request #4799 from DDS-Derek/dev 2025-08-21 22:11:58 +08:00
DDSRem
d1e04588d0 feat(docker): refactor docker build process 2025-08-21 22:09:49 +08:00
jxxghp
21cdaef6d5 Merge pull request #4798 from DDS-Derek/dev 2025-08-21 21:57:49 +08:00
DDSRem
a1723d18fb fix(docker): 不要な権限設定を削除する 2025-08-21 21:54:33 +08:00
jxxghp
9e065138e9 fix cache default 2025-08-21 21:49:00 +08:00
jxxghp
1c73c92bfd fix cache Singleton 2025-08-21 21:45:34 +08:00
jxxghp
bcd560d74e Merge pull request #4797 from DDS-Derek/dev 2025-08-21 21:28:40 +08:00
DDSRem
02339562ed fix(docker): レイヤー数を減らす 2025-08-21 21:28:18 +08:00
DDSRem
e5804378c2 fix(docker): fuck ai bugs 2025-08-21 21:24:09 +08:00
jxxghp
da1c8a162d fix cache maxsize 2025-08-21 20:10:27 +08:00
jxxghp
d457a23a1f fix build 2025-08-21 19:24:04 +08:00
jxxghp
b6154e58b8 rollback dockerfile 2025-08-21 18:44:47 +08:00
jxxghp
5f18776c61 更新 douban_cache.py 2025-08-21 17:52:55 +08:00
jxxghp
68b0b9ec7a 更新 tmdb_cache.py 2025-08-21 17:52:19 +08:00
jxxghp
0f5036972e v2.7.4 2025-08-21 17:03:17 +08:00
jxxghp
0b199b8421 fix TTLCache 2025-08-21 16:54:49 +08:00
jxxghp
a59730f6eb 优化cache模块的默认值 2025-08-21 16:29:49 +08:00
jxxghp
c6c84fe65b rename 2025-08-21 16:02:50 +08:00
jxxghp
03c757bba6 fix TTLCache 2025-08-21 13:17:59 +08:00
jxxghp
bfeb8d238a fix build 2025-08-21 12:45:05 +08:00
jxxghp
daf0c08c4b remove 重复的 aiofiles 2025-08-21 12:33:51 +08:00
jxxghp
d12c1b9ac4 remove musl-dev 2025-08-21 12:32:53 +08:00
jxxghp
bc242f4fd4 fix yield 2025-08-21 12:04:15 +08:00
jxxghp
a240c1bca9 优化 Dockerfile 2025-08-21 09:47:23 +08:00
jxxghp
219aa6c574 Merge pull request #4790 from wikrin/delete_media_file 2025-08-21 09:35:07 +08:00
Attente
abca1b481a refactor(storage): 优化空目录删除逻辑
- 添加对资源目录和媒体库目录的保护机制
- 实现递归向上检查并删除空目录
2025-08-21 09:16:15 +08:00
jxxghp
db72fd2ef5 fix 2025-08-21 09:07:28 +08:00
jxxghp
31cca58943 fix cache 2025-08-21 08:26:32 +08:00
jxxghp
c06a4b759c fix redis 2025-08-21 08:14:21 +08:00
jxxghp
f05a23a490 更新 redis.py 2025-08-21 07:59:34 +08:00
jxxghp
1e0f2ffde0 更新 config.py 2025-08-21 07:48:16 +08:00
jxxghp
06df42ee3d 更新 Dockerfile 2025-08-21 07:21:58 +08:00
jxxghp
65ee1638f7 add VENV_PATH 2025-08-21 00:28:32 +08:00
jxxghp
87eefe7673 Merge pull request #4788 from jxxghp/cursor/install-playwright-dependencies-in-dockerfile-b7d6
Install playwright dependencies in dockerfile
2025-08-21 00:16:48 +08:00
Cursor Agent
5c124d3988 fix: use full path for playwright command in Dockerfile
- Fix 'playwright: not found' error during Docker build
- Use /bin/playwright instead of playwright to ensure
  the command is executed from the virtual environment
- This resolves the issue where playwright install-deps chromium
  was failing because playwright wasn't in the system PATH
2025-08-20 16:16:02 +00:00
jxxghp
8c69ce624f Merge pull request #4787 from jxxghp/cursor/optimize-docker-build-and-pip-environment-e8ad
Optimize docker build and pip environment
2025-08-21 00:08:50 +08:00
Cursor Agent
bb73acdde5 Checkpoint before follow-up message
Co-authored-by: jxxghp <jxxghp@live.cn>
2025-08-20 16:06:39 +00:00
Cursor Agent
993bc3775b Checkpoint before follow-up message
Co-authored-by: jxxghp <jxxghp@live.cn>
2025-08-20 16:04:44 +00:00
jxxghp
3d2ff28bcd fix download 2025-08-20 23:38:51 +08:00
jxxghp
9b78deb802 fix torrent 2025-08-20 23:07:29 +08:00
jxxghp
dadc525d0b feat:种子下载使用缓存 2025-08-20 22:03:18 +08:00
DDSRem
22b2140c94 fix requirement 2025-08-20 21:18:33 +08:00
jxxghp
f07496a4a0 fix cache 2025-08-20 21:11:10 +08:00
jxxghp
1b2938cbc8 Merge pull request #4785 from jxxghp/cursor/fix-postgresql-textual-sql-expression-error-e023 2025-08-20 20:13:56 +08:00
Cursor Agent
d4d2f58830 Checkpoint before follow-up message
Co-authored-by: jxxghp <jxxghp@live.cn>
2025-08-20 12:10:52 +00:00
jxxghp
b3113e13ec refactor:新增文件缓存组合 2025-08-20 19:04:07 +08:00
jxxghp
055c8e26f0 refactor:重构缓存系统 2025-08-20 17:35:32 +08:00
jxxghp
2a7a7239d7 新增全局图片缓存配置和临时文件清理天数设置 2025-08-20 13:52:38 +08:00
jxxghp
2fa40dac3f 优化监控和消息服务的资源管理 2025-08-20 13:35:24 +08:00
jxxghp
6b4fbd7dc2 新增 PostgreSQL 和 Redis 数据库模块,包含模块初始化、连接测试等功能 2025-08-20 13:35:12 +08:00
jxxghp
5b0bb19717 统一使用 app.core.cache 中的 TTLCache 2025-08-20 12:43:30 +08:00
jxxghp
843dfc430a fix log 2025-08-20 09:36:46 +08:00
jxxghp
69cb07c527 优化缓存机制,支持Redis和本地缓存的切换 2025-08-20 09:16:30 +08:00
jxxghp
89e8a64734 重构Redis缓存机制 2025-08-20 08:51:03 +08:00
jxxghp
5eb2dec32d 新增 RedisHelper 类 2025-08-20 08:50:45 +08:00
jxxghp
db0ea7d6c4 Fix database sequence errors (#4777)
* Fix database upgrade script to handle existing identity columns

Co-authored-by: jxxghp <jxxghp@live.cn>

* Improve identity column conversion with error handling and cleanup

Co-authored-by: jxxghp <jxxghp@live.cn>

* Fix database upgrade script to handle existing identity columns

Co-authored-by: jxxghp <jxxghp@live.cn>

---------

Co-authored-by: Cursor Agent <cursoragent@cursor.com>
Co-authored-by: jxxghp <jxxghp@live.cn>
2025-08-20 00:29:35 +08:00
jxxghp
1eb85003de 更新 version.py 2025-08-19 17:58:27 +08:00
jxxghp
cca170f84a 更新 emby.py 2025-08-19 15:30:22 +08:00
jxxghp
c8c016caa8 更新 __init__.py 2025-08-19 14:27:02 +08:00
jxxghp
45d5874026 更新 __init__.py 2025-08-19 14:20:46 +08:00
jxxghp
69b1ce60ff fix db config 2025-08-19 14:15:33 +08:00
jxxghp
3ff3e4b106 fix db config 2025-08-19 14:05:24 +08:00
jxxghp
dc50a68b01 修复数据库表名引用 2025-08-19 12:54:47 +08:00
jxxghp
968cfd8654 fix db 2025-08-19 12:41:07 +08:00
jxxghp
cf28d93be6 fix db 2025-08-19 12:35:52 +08:00
jxxghp
be08d6ebb5 fix db 2025-08-19 12:02:53 +08:00
jxxghp
4bc24f3b00 fix db 2025-08-19 11:53:59 +08:00
jxxghp
15833f94cf fix db 2025-08-19 11:40:34 +08:00
jxxghp
aeb297efcf 优化站点激活状态的判断逻辑,简化数据库查询条件 2025-08-19 11:23:09 +08:00
jxxghp
d48c6b98e8 rollback local postgresql 2025-08-19 08:30:07 +08:00
jxxghp
b79ccfafed 优化 entrypoint.sh 中 PostgreSQL 命令的执行方式 2025-08-19 07:15:02 +08:00
jxxghp
c87ba59552 更新 entrypoint.sh 2025-08-18 22:42:55 +08:00
jxxghp
91fd71c858 fix entrypoint.sh 2025-08-18 22:26:01 +08:00
jxxghp
6f64e67538 fix dockerfile 2025-08-18 21:42:44 +08:00
jxxghp
bd7a0b072f fix entrypoint.sh 2025-08-18 21:22:29 +08:00
jxxghp
01ca001c97 fix entrypoint.sh 2025-08-18 21:10:24 +08:00
jxxghp
324ad2a87c 优化 PostgreSQL 数据目录初始化和启动逻辑 2025-08-18 20:55:33 +08:00
jxxghp
d9ad2630f0 fix postgresql 2025-08-18 19:14:47 +08:00
jxxghp
83958a4a48 fix postgresql 2025-08-18 19:12:20 +08:00
jxxghp
f6a6efdc42 fix app.env 2025-08-18 15:17:26 +08:00
jxxghp
1bbe7657b9 fix dockerfile 2025-08-18 11:42:53 +08:00
jxxghp
38189753b5 在构建工作流中添加新的 Docker 镜像配置 2025-08-18 11:31:00 +08:00
jxxghp
5b0e658617 重构配置文件项目顺序 2025-08-18 11:29:04 +08:00
jxxghp
b6cf54d57f 添加对 PostgreSQL 的支持 2025-08-18 11:19:17 +08:00
jxxghp
e8058c8813 添加 PostgreSQL 数据库支持 2025-08-18 11:19:06 +08:00
jxxghp
784868048d 更新 scheduler.py 2025-08-18 07:04:39 +08:00
106 changed files with 5091 additions and 1977 deletions

View File

@@ -1,3 +1,84 @@
# Ignore git
# Git
.github
.git
.git
.gitignore
# Documentation
docs/
README.md
LICENSE
# Development files
.pylintrc
*.pyc
__pycache__/
*.pyo
*.pyd
.Python
*.so
.pytest_cache/
.coverage
htmlcov/
.tox/
.nox/
.hypothesis/
.mypy_cache/
.dmypy.json
dmypy.json
# Virtual environments
venv/
env/
ENV/
env.bak/
venv.bak/
# IDE
.vscode/
.idea/
*.swp
*.swo
*~
# OS
.DS_Store
.DS_Store?
._*
.Spotlight-V100
.Trashes
ehthumbs.db
Thumbs.db
# Logs
*.log
logs/
# Temporary files
*.tmp
*.temp
tmp/
temp/
# Database
*.db
*.sqlite
*.sqlite3
# Test files
tests/
test_*
*_test.py
# Build artifacts
build/
dist/
*.egg-info/
# Docker
Dockerfile*
docker-compose*
.dockerignore
# Other
app.ico
frozen.spec

60
.github/workflows/beta.yml vendored Normal file
View File

@@ -0,0 +1,60 @@
name: MoviePilot Builder Beta
on:
workflow_dispatch:
jobs:
Docker-build:
runs-on: ubuntu-latest
name: Build Docker Image
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Release version
id: release_version
run: |
app_version=$(cat version.py |sed -ne "s/APP_VERSION\s=\s'v\(.*\)'/\1/gp")
echo "app_version=$app_version" >> $GITHUB_ENV
- name: Docker Meta
id: meta
uses: docker/metadata-action@v5
with:
images: |
${{ secrets.DOCKER_USERNAME }}/moviepilot-v2
ghcr.io/${{ github.repository }}
tags: |
type=raw,value=beta
- name: Set Up QEMU
uses: docker/setup-qemu-action@v3
- name: Set Up Buildx
uses: docker/setup-buildx-action@v3
- name: Login DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Login GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build Image
uses: docker/build-push-action@v5
with:
context: .
file: docker/Dockerfile
platforms: |
linux/amd64
linux/arm64/v8
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha, scope=${{ github.workflow }}-docker
cache-to: type=gha, scope=${{ github.workflow }}-docker

View File

@@ -27,6 +27,7 @@ jobs:
with:
images: |
${{ secrets.DOCKER_USERNAME }}/moviepilot-v2
${{ secrets.DOCKER_USERNAME }}/moviepilot
ghcr.io/${{ github.repository }}
tags: |
type=raw,value=${{ env.app_version }}

View File

@@ -90,7 +90,7 @@ def delete_transfer_history(history_in: schemas.TransferHistory,
# 册除媒体库文件
if deletedest and history.dest_fileitem:
dest_fileitem = schemas.FileItem(**history.dest_fileitem)
StorageChain().delete_media_file(fileitem=dest_fileitem, mtype=MediaType(history.type))
StorageChain().delete_media_file(dest_fileitem)
# 删除源文件
if deletesrc and history.src_fileitem:

View File

@@ -13,7 +13,7 @@ from app import schemas
from app.command import Command
from app.core.config import settings
from app.core.plugin import PluginManager
from app.core.security import verify_apikey, verify_token
from app.core.security import verify_apikey, verify_token, verify_apitoken
from app.db.models import User
from app.db.systemconfig_oper import SystemConfigOper
from app.db.user_oper import get_current_active_superuser, get_current_active_superuser_async
@@ -21,6 +21,7 @@ from app.factory import app
from app.helper.plugin import PluginHelper
from app.log import logger
from app.scheduler import Scheduler
from app.schemas.plugin import PluginMemoryInfo
from app.schemas.types import SystemConfigKey
PROTECTED_ROUTES = {"/api/v1/openapi.json", "/docs", "/docs/oauth2-redirect", "/redoc"}
@@ -463,6 +464,87 @@ async def update_folder_plugins(folder_name: str, plugin_ids: List[str],
return schemas.Response(success=True, message=f"文件夹 '{folder_name}' 中的插件已更新")
@router.post("/clone/{plugin_id}", summary="创建插件分身", response_model=schemas.Response)
def clone_plugin(plugin_id: str,
clone_data: dict,
_: User = Depends(get_current_active_superuser)) -> Any:
"""
创建插件分身
"""
try:
success, message = PluginManager().clone_plugin(
plugin_id=plugin_id,
suffix=clone_data.get("suffix", ""),
name=clone_data.get("name", ""),
description=clone_data.get("description", ""),
version=clone_data.get("version", ""),
icon=clone_data.get("icon", "")
)
if success:
# 注册插件服务
reload_plugin(message)
# 将分身插件添加到原插件所在的文件夹中
_add_clone_to_plugin_folder(plugin_id, message)
return schemas.Response(success=True, message="插件分身创建成功")
else:
return schemas.Response(success=False, message=message)
except Exception as e:
logger.error(f"创建插件分身失败:{str(e)}")
return schemas.Response(success=False, message=f"创建插件分身失败:{str(e)}")
@router.get("/memory", summary="插件内存使用统计", response_model=List[PluginMemoryInfo])
def plugin_memory_stats(_: Annotated[str, Depends(verify_apitoken)]) -> Any:
"""
获取所有插件的内存使用统计信息
"""
try:
plugin_manager = PluginManager()
memory_stats = plugin_manager.get_plugin_memory_stats()
return memory_stats
except Exception as e:
logger.error(f"获取插件内存统计失败:{str(e)}")
raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=f"获取插件内存统计失败:{str(e)}")
@router.get("/memory/{plugin_id}", summary="单个插件内存使用统计", response_model=PluginMemoryInfo)
def plugin_memory_stat(plugin_id: str, _: Annotated[str, Depends(verify_apitoken)]) -> Any:
"""
获取指定插件的内存使用统计信息
"""
try:
plugin_manager = PluginManager()
memory_stats = plugin_manager.get_plugin_memory_stats(plugin_id)
if not memory_stats:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"插件 {plugin_id} 不存在或未运行")
return memory_stats[0]
except HTTPException:
raise
except Exception as e:
logger.error(f"获取插件 {plugin_id} 内存统计失败:{str(e)}")
raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=f"获取插件内存统计失败:{str(e)}")
@router.delete("/memory/cache", summary="清除插件内存统计缓存")
def clear_plugin_memory_cache(_: Annotated[str, Depends(verify_apitoken)],
plugin_id: Optional[str] = None) -> Any:
"""
清除插件内存统计缓存
"""
try:
plugin_manager = PluginManager()
plugin_manager.clear_plugin_memory_cache(plugin_id)
message = f"已清除插件 {plugin_id} 的内存统计缓存" if plugin_id else "已清除所有插件的内存统计缓存"
return schemas.Response(success=True, message=message)
except Exception as e:
logger.error(f"清除插件内存统计缓存失败:{str(e)}")
return schemas.Response(success=False, message=f"清除缓存失败:{str(e)}")
@router.get("/{plugin_id}", summary="获取插件配置")
async def plugin_config(plugin_id: str,
_: User = Depends(get_current_active_superuser_async)) -> dict:
@@ -528,36 +610,6 @@ def uninstall_plugin(plugin_id: str,
return schemas.Response(success=True)
@router.post("/clone/{plugin_id}", summary="创建插件分身", response_model=schemas.Response)
def clone_plugin(plugin_id: str,
clone_data: dict,
_: User = Depends(get_current_active_superuser)) -> Any:
"""
创建插件分身
"""
try:
success, message = PluginManager().clone_plugin(
plugin_id=plugin_id,
suffix=clone_data.get("suffix", ""),
name=clone_data.get("name", ""),
description=clone_data.get("description", ""),
version=clone_data.get("version", ""),
icon=clone_data.get("icon", "")
)
if success:
# 注册插件服务
reload_plugin(message)
# 将分身插件添加到原插件所在的文件夹中
_add_clone_to_plugin_folder(plugin_id, message)
return schemas.Response(success=True, message="插件分身创建成功")
else:
return schemas.Response(success=False, message=message)
except Exception as e:
logger.error(f"创建插件分身失败:{str(e)}")
return schemas.Response(success=False, message=f"创建插件分身失败:{str(e)}")
def _add_clone_to_plugin_folder(original_plugin_id: str, clone_plugin_id: str):
"""
将分身插件添加到原插件所在的文件夹中

View File

@@ -171,15 +171,14 @@ def rename(fileitem: schemas.FileItem,
sub_files: List[schemas.FileItem] = StorageChain().list_files(fileitem)
if sub_files:
# 开始进度
progress = ProgressHelper()
progress.start(ProgressKey.BatchRename)
progress = ProgressHelper(ProgressKey.BatchRename)
progress.start()
total = len(sub_files)
handled = 0
for sub_file in sub_files:
handled += 1
progress.update(value=handled / total * 100,
text=f"正在处理 {sub_file.name} ...",
key=ProgressKey.BatchRename)
text=f"正在处理 {sub_file.name} ...")
if sub_file.type == "dir":
continue
if not sub_file.extension:
@@ -190,19 +189,19 @@ def rename(fileitem: schemas.FileItem,
meta = MetaInfoPath(sub_path)
mediainfo = transferchain.recognize_media(meta)
if not mediainfo:
progress.end(ProgressKey.BatchRename)
progress.end()
return schemas.Response(success=False, message=f"{sub_path.name} 未识别到媒体信息")
new_path = transferchain.recommend_name(meta=meta, mediainfo=mediainfo)
if not new_path:
progress.end(ProgressKey.BatchRename)
progress.end()
return schemas.Response(success=False, message=f"{sub_path.name} 未识别到新名称")
ret: schemas.Response = rename(fileitem=sub_file,
new_name=Path(new_path).name,
recursive=False)
if not ret.success:
progress.end(ProgressKey.BatchRename)
progress.end()
return schemas.Response(success=False, message=f"{sub_path.name} 重命名失败!")
progress.end(ProgressKey.BatchRename)
progress.end()
# 重命名自己
result = StorageChain().rename_file(fileitem, new_name)
if result:

View File

@@ -4,19 +4,20 @@ import json
import re
from collections import deque
from datetime import datetime
from pathlib import Path
from typing import Optional, Union, Annotated
import aiofiles
import pillow_avif # noqa 用于自动注册AVIF支持
from PIL import Image
from anyio import Path as AsyncPath
from app.helper.sites import SitesHelper # noqa # noqa
from fastapi import APIRouter, Body, Depends, HTTPException, Header, Request, Response
from fastapi.responses import StreamingResponse
from app import schemas
from app.chain.search import SearchChain
from app.chain.system import SystemChain
from app.core.cache import AsyncFileCache
from app.core.config import global_vars, settings
from app.core.event import eventmanager
from app.core.metainfo import MetaInfo
@@ -30,6 +31,7 @@ from app.helper.mediaserver import MediaServerHelper
from app.helper.message import MessageHelper
from app.helper.progress import ProgressHelper
from app.helper.rule import RuleHelper
from app.helper.sites import SitesHelper # noqa # noqa
from app.helper.subscribe import SubscribeHelper
from app.helper.system import SystemHelper
from app.log import logger
@@ -48,7 +50,7 @@ router = APIRouter()
async def fetch_image(
url: str,
proxy: bool = False,
use_disk_cache: bool = False,
use_cache: bool = False,
if_none_match: Optional[str] = None,
allowed_domains: Optional[set[str]] = None) -> Response:
"""
@@ -64,37 +66,31 @@ async def fetch_image(
if not SecurityUtils.is_safe_url(url, allowed_domains):
raise HTTPException(status_code=404, detail="Unsafe URL")
# 后续观察系统性能表现如果发现磁盘缓存和HTTP缓存无法满足高并发情况下的响应速度需求可以考虑重新引入内存缓存
cache_path: Optional[AsyncPath] = None
if use_disk_cache:
# 生成缓存路径
base_path = AsyncPath(settings.CACHE_PATH)
sanitized_path = SecurityUtils.sanitize_url_path(url)
cache_path = base_path / "images" / sanitized_path
# 缓存路径
sanitized_path = SecurityUtils.sanitize_url_path(url)
cache_path = Path("images") / sanitized_path
if not cache_path.suffix:
# 没有文件类型,则添加后缀,在恶意文件类型和实际需求下的折衷选择
if not cache_path.suffix:
cache_path = cache_path.with_suffix(".jpg")
cache_path = cache_path.with_suffix(".jpg")
# 确保缓存路径和文件类型合法
if not await SecurityUtils.async_is_safe_path(base_path=base_path,
user_path=cache_path,
allowed_suffixes=settings.SECURITY_IMAGE_SUFFIXES):
raise HTTPException(status_code=400, detail="Invalid cache path or file type")
# 缓存对像,缓存过期时间为全局图片缓存天数
cache_backend = AsyncFileCache(base=settings.CACHE_PATH,
ttl=settings.GLOBAL_IMAGE_CACHE_DAYS * 24 * 3600)
# 目前暂不考虑磁盘缓存文件是否过期,后续通过缓存清理机制处理
if cache_path and await cache_path.exists():
try:
async with aiofiles.open(cache_path, 'rb') as f:
content = await f.read()
etag = HashUtils.md5(content)
headers = RequestUtils.generate_cache_headers(etag, max_age=86400 * 7)
if if_none_match == etag:
return Response(status_code=304, headers=headers)
return Response(content=content, media_type="image/jpeg", headers=headers)
except Exception as e:
# 如果读取磁盘缓存发生异常,这里仅记录日志,尝试再次请求远端进行处理
logger.debug(f"Failed to read cache file {cache_path}: {e}")
if use_cache:
content = await cache_backend.get(cache_path.as_posix(), region="images")
if content:
# 检查 If-None-Match
etag = HashUtils.md5(content)
headers = RequestUtils.generate_cache_headers(etag, max_age=86400 * 7)
if if_none_match == etag:
return Response(status_code=304, headers=headers)
# 返回缓存图片
return Response(
content=content,
media_type=UrlUtils.get_mime_type(url, "image/jpeg"),
headers=headers
)
# 请求远程图片
referer = "https://movie.douban.com/" if "doubanio.com" in url else None
@@ -112,22 +108,15 @@ async def fetch_image(
logger.debug(f"Invalid image format for URL {url}: {e}")
raise HTTPException(status_code=502, detail="Invalid image format")
# 获取请求响应头
response_headers = response.headers
cache_control_header = response_headers.get("Cache-Control", "")
cache_directive, max_age = RequestUtils.parse_cache_control(cache_control_header)
# 如果需要使用磁盘缓存,则保存到磁盘
if use_disk_cache and cache_path:
try:
if not await cache_path.parent.exists():
await cache_path.parent.mkdir(parents=True, exist_ok=True)
async with aiofiles.tempfile.NamedTemporaryFile(dir=cache_path.parent, delete=False) as tmp_file:
await tmp_file.write(content)
temp_path = AsyncPath(tmp_file.name)
await temp_path.replace(cache_path)
except Exception as e:
logger.debug(f"Failed to write cache file {cache_path}: {e}")
# 保存缓存
if use_cache:
await cache_backend.set(cache_path.as_posix(), content, region="images")
logger.debug(f"Image cached at {cache_path.as_posix()}")
# 检查 If-None-Match
etag = HashUtils.md5(content)
@@ -135,8 +124,8 @@ async def fetch_image(
headers = RequestUtils.generate_cache_headers(etag, cache_directive, max_age)
return Response(status_code=304, headers=headers)
# 响应
headers = RequestUtils.generate_cache_headers(etag, cache_directive, max_age)
return Response(
content=content,
media_type=response_headers.get("Content-Type") or UrlUtils.get_mime_type(url, "image/jpeg"),
@@ -159,7 +148,7 @@ async def proxy_img(
hosts = [config.config.get("host") for config in MediaServerHelper().get_configs().values() if
config and config.config and config.config.get("host")]
allowed_domains = set(settings.SECURITY_IMAGE_DOMAINS) | set(hosts)
return await fetch_image(url=imgurl, proxy=proxy, use_disk_cache=cache,
return await fetch_image(url=imgurl, proxy=proxy, use_cache=cache,
if_none_match=if_none_match, allowed_domains=allowed_domains)
@@ -174,7 +163,7 @@ async def cache_img(
"""
# 如果没有启用全局图片缓存,则不使用磁盘缓存
proxy = "doubanio.com" not in url
return await fetch_image(url=url, proxy=proxy, use_disk_cache=settings.GLOBAL_IMAGE_CACHE,
return await fetch_image(url=url, proxy=proxy, use_cache=settings.GLOBAL_IMAGE_CACHE,
if_none_match=if_none_match)
@@ -265,14 +254,14 @@ async def get_progress(request: Request, process_type: str, _: schemas.TokenPayl
"""
实时获取处理进度返回格式为SSE
"""
progress = ProgressHelper()
progress = ProgressHelper(process_type)
async def event_generator():
try:
while not global_vars.is_system_stopped:
if await request.is_disconnected():
break
detail = progress.get(process_type)
detail = progress.get()
yield f"data: {json.dumps(detail)}\n\n"
await asyncio.sleep(0.5)
except asyncio.CancelledError:

View File

@@ -135,8 +135,8 @@ def refresh_cache(_: User = Depends(get_current_active_superuser)):
@router.post("/cache/reidentify/{domain}/{torrent_hash}", summary="重新识别种子", response_model=schemas.Response)
async def reidentify_cache(domain: str, torrent_hash: str,
tmdbid: Optional[int] = None, doubanid: Optional[str] = None,
_: User = Depends(get_current_active_superuser_async)):
tmdbid: Optional[int] = None, doubanid: Optional[str] = None,
_: User = Depends(get_current_active_superuser_async)):
"""
重新识别指定的种子
:param domain: 站点域名

View File

@@ -8,7 +8,7 @@ from app import schemas
from app.chain.media import MediaChain
from app.chain.storage import StorageChain
from app.chain.transfer import TransferChain
from app.core.config import settings
from app.core.config import settings, global_vars
from app.core.metainfo import MetaInfoPath
from app.core.security import verify_token, verify_apitoken
from app.db import get_db
@@ -75,6 +75,8 @@ async def remove_queue(fileitem: schemas.FileItem, _: schemas.TokenPayload = Dep
:param _: Token校验
"""
TransferChain().remove_from_queue(fileitem)
# 取消整理
global_vars.stop_transfer(fileitem.path)
return schemas.Response(success=True)
@@ -109,7 +111,7 @@ def manual_transfer(transer_item: ManualTransferItem,
if history.dest_fileitem:
# 删除旧的已整理文件
dest_fileitem = FileItem(**history.dest_fileitem)
state = StorageChain().delete_media_file(dest_fileitem, mtype=MediaType(history.type))
state = StorageChain().delete_media_file(dest_fileitem)
if not state:
return schemas.Response(success=False, message=f"{dest_fileitem.path} 删除失败")

View File

@@ -8,12 +8,10 @@ from pathlib import Path
from typing import Optional, Any, Tuple, List, Set, Union, Dict
from fastapi.concurrency import run_in_threadpool
import aiofiles
from anyio import Path as AsyncPath
from qbittorrentapi import TorrentFilesList
from transmission_rpc import File
from app.core.cache import FileCache, AsyncFileCache
from app.core.config import settings
from app.core.context import Context, MediaInfo, TorrentInfo
from app.core.event import EventManager
@@ -48,78 +46,66 @@ class ChainBase(metaclass=ABCMeta):
send_callback=self.run_module
)
self.pluginmanager = PluginManager()
self.filecache = FileCache()
self.async_filecache = AsyncFileCache()
@staticmethod
def load_cache(filename: str) -> Any:
def load_cache(self, filename: str) -> Any:
"""
从本地加载缓存
加载缓存
"""
cache_path = settings.TEMP_PATH / filename
if cache_path.exists():
try:
with open(cache_path, 'rb') as f:
return pickle.load(f)
except Exception as err:
logger.error(f"加载缓存 {filename} 出错:{str(err)}")
return None
content = self.filecache.get(filename)
if not content:
return None
try:
return pickle.loads(content)
except Exception as err:
logger.error(f"加载缓存 {filename} 出错:{str(err)}")
return None
@staticmethod
async def async_load_cache(filename: str) -> Any:
async def async_load_cache(self, filename: str) -> Any:
"""
异步从本地加载缓存
异步加载缓存
"""
cache_path = settings.TEMP_PATH / filename
if cache_path.exists():
try:
async with aiofiles.open(cache_path, 'rb') as f:
content = await f.read()
return pickle.loads(content)
except Exception as err:
logger.error(f"加载缓存 {filename} 出错:{str(err)}")
return None
content = await self.async_filecache.get(filename)
if not content:
return None
try:
return pickle.loads(content)
except Exception as err:
logger.error(f"异步加载缓存 {filename} 出错:{str(err)}")
return None
@staticmethod
async def async_save_cache(cache: Any, filename: str) -> None:
async def async_save_cache(self, cache: Any, filename: str) -> None:
"""
异步保存缓存到本地
异步保存缓存
"""
try:
async with aiofiles.open(settings.TEMP_PATH / filename, 'wb') as f:
await f.write(pickle.dumps(cache))
await self.async_filecache.set(filename, pickle.dumps(cache))
except Exception as err:
logger.error(f"保存缓存 {filename} 出错:{str(err)}")
logger.error(f"异步保存缓存 {filename} 出错:{str(err)}")
return
@staticmethod
def save_cache(cache: Any, filename: str) -> None:
def save_cache(self, cache: Any, filename: str) -> None:
"""
保存缓存到本地
保存缓存
"""
try:
with open(settings.TEMP_PATH / filename, 'wb') as f:
pickle.dump(cache, f) # noqa
self.filecache.set(filename, pickle.dumps(cache))
except Exception as err:
logger.error(f"保存缓存 {filename} 出错:{str(err)}")
return
@staticmethod
def remove_cache(filename: str) -> None:
def remove_cache(self, filename: str) -> None:
"""
删除本地缓存
删除缓存同时删除Redis和本地缓存
"""
cache_path = settings.TEMP_PATH / filename
if cache_path.exists():
cache_path.unlink()
self.filecache.delete(filename)
@staticmethod
async def async_remove_cache(filename: str) -> None:
async def async_remove_cache(self, filename: str) -> None:
"""
异步删除本地缓存
异步删除缓存同时删除Redis和本地缓存
"""
cache_path = AsyncPath(settings.TEMP_PATH) / filename
if await cache_path.exists():
try:
await cache_path.unlink()
except Exception as err:
logger.error(f"异步删除缓存 {filename} 出错:{str(err)}")
pass
@staticmethod
def __is_valid_empty(ret):
@@ -700,13 +686,13 @@ class ChainBase(metaclass=ABCMeta):
return self.run_module("filter_torrents", rule_groups=rule_groups,
torrent_list=torrent_list, mediainfo=mediainfo)
def download(self, content: Union[Path, str], download_dir: Path, cookie: str,
def download(self, content: Union[Path, str, bytes], download_dir: Path, cookie: str,
episodes: Set[int] = None, category: Optional[str] = None, label: Optional[str] = None,
downloader: Optional[str] = None
) -> Optional[Tuple[Optional[str], Optional[str], Optional[str], str]]:
"""
根据种子文件,选择并添加下载任务
:param content: 种子文件地址或者磁力链接
:param content: 种子文件地址或者磁力链接或者种子内容
:param download_dir: 下载目录
:param cookie: cookie
:param episodes: 需要下载的集数
@@ -719,15 +705,16 @@ class ChainBase(metaclass=ABCMeta):
cookie=cookie, episodes=episodes, category=category, label=label,
downloader=downloader)
def download_added(self, context: Context, download_dir: Path, torrent_path: Path = None) -> None:
def download_added(self, context: Context, download_dir: Path, torrent_content: Union[str, bytes] = None) -> None:
"""
添加下载任务成功后,从站点下载字幕,保存到下载目录
:param context: 上下文,包括识别信息、媒体信息、种子信息
:param download_dir: 下载目录
:param torrent_path: 种子文件地址
:param torrent_content: 种子内容如果有则直接使用该内容否则从context中获取种子文件路径
:return: None该方法可被多个模块同时处理
"""
return self.run_module("download_added", context=context, torrent_path=torrent_path,
return self.run_module("download_added", context=context,
torrent_content=torrent_content,
download_dir=download_dir)
def list_torrents(self, status: TorrentStatus = None,
@@ -923,12 +910,12 @@ class ChainBase(metaclass=ABCMeta):
immediately=True if message.userid else False)
async def async_post_message(self,
message: Optional[Notification] = None,
meta: Optional[MetaBase] = None,
mediainfo: Optional[MediaInfo] = None,
torrentinfo: Optional[TorrentInfo] = None,
transferinfo: Optional[TransferInfo] = None,
**kwargs) -> None:
message: Optional[Notification] = None,
meta: Optional[MetaBase] = None,
mediainfo: Optional[MediaInfo] = None,
torrentinfo: Optional[TorrentInfo] = None,
transferinfo: Optional[TransferInfo] = None,
**kwargs) -> None:
"""
异步发送消息
:param message: Notification实例
@@ -991,15 +978,16 @@ class ChainBase(metaclass=ABCMeta):
break
# 按设定发送
await self.eventmanager.async_send_event(etype=EventType.NoticeMessage,
data={**send_message.dict(), "type": send_message.mtype})
data={**send_message.dict(), "type": send_message.mtype})
await self.messagequeue.async_send_message("post_message", message=send_message)
if not send_orignal:
return
# 发送消息事件
await self.eventmanager.async_send_event(etype=EventType.NoticeMessage, data={**message.dict(), "type": message.mtype})
await self.eventmanager.async_send_event(etype=EventType.NoticeMessage,
data={**message.dict(), "type": message.mtype})
# 按原消息发送
await self.messagequeue.async_send_message("post_message", message=message,
immediately=True if message.userid else False)
immediately=True if message.userid else False)
def post_medias_message(self, message: Notification, medias: List[MediaInfo]) -> None:
"""

View File

@@ -8,6 +8,7 @@ from typing import List, Optional, Tuple, Set, Dict, Union
from app import schemas
from app.chain import ChainBase
from app.core.cache import FileCache
from app.core.config import settings, global_vars
from app.core.context import MediaInfo, TorrentInfo, Context
from app.core.event import eventmanager, Event
@@ -35,10 +36,10 @@ class DownloadChain(ChainBase):
channel: MessageChannel = None,
source: Optional[str] = None,
userid: Union[str, int] = None
) -> Tuple[Optional[Union[Path, str]], str, list]:
) -> Tuple[Optional[Union[str, bytes]], str, list]:
"""
下载种子文件,如果是磁力链,会返回磁力链接本身
:return: 种子路径,种子目录名,种子文件清单
:return: 种子内容,种子目录名,种子文件清单
"""
def __get_redict_url(url: str, ua: Optional[str] = None, cookie: Optional[str] = None) -> Optional[str]:
@@ -117,7 +118,7 @@ class DownloadChain(ChainBase):
logger.error(f"{torrent.title} 无法获取下载地址:{torrent.enclosure}")
return None, "", []
# 下载种子文件
torrent_file, content, download_folder, files, error_msg = TorrentHelper().download_torrent(
_, content, download_folder, files, error_msg = TorrentHelper().download_torrent(
url=torrent_url,
cookie=site_cookie,
ua=torrent.site_ua or settings.USER_AGENT,
@@ -127,7 +128,7 @@ class DownloadChain(ChainBase):
# 磁力链
return content, "", []
if not torrent_file:
if not content:
logger.error(f"下载种子文件失败:{torrent.title} - {torrent_url}")
self.post_message(Notification(
channel=channel,
@@ -139,9 +140,11 @@ class DownloadChain(ChainBase):
return None, "", []
# 返回 种子文件路径,种子目录名,种子文件清单
return torrent_file, download_folder, files
return content, download_folder, files
def download_single(self, context: Context, torrent_file: Path = None,
def download_single(self, context: Context,
torrent_file: Path = None,
torrent_content: Optional[Union[str, bytes]] = None,
episodes: Set[int] = None,
channel: MessageChannel = None,
source: Optional[str] = None,
@@ -154,6 +157,7 @@ class DownloadChain(ChainBase):
下载及发送通知
:param context: 资源上下文
:param torrent_file: 种子文件路径
:param torrent_content: 种子内容(磁力链或种子文件内容)
:param episodes: 需要下载的集数
:param channel: 通知渠道
:param source: 来源消息通知、Subscribe、Manual等
@@ -207,18 +211,26 @@ class DownloadChain(ChainBase):
# 实际下载的集数
download_episodes = StringUtils.format_ep(list(episodes)) if episodes else None
_folder_name = ""
if not torrent_file:
if not torrent_file and not torrent_content:
# 下载种子文件,得到的可能是文件也可能是磁力链
content, _folder_name, _file_list = self.download_torrent(_torrent,
channel=channel,
source=source,
userid=userid)
if not content:
return None
else:
content = torrent_file
# 获取种子文件的文件夹名和文件清单
_folder_name, _file_list = TorrentHelper().get_torrent_info(torrent_file)
torrent_content, _folder_name, _file_list = self.download_torrent(_torrent,
channel=channel,
source=source,
userid=userid)
elif torrent_file:
if torrent_file.exists():
torrent_content = torrent_file.read_bytes()
else:
# 缓存处理器
cache_backend = FileCache()
# 读取缓存的种子文件
torrent_content = cache_backend.get(torrent_file.as_posix(), region="torrents")
if not torrent_content:
return None
# 获取种子文件的文件夹名和文件清单
_folder_name, _file_list = TorrentHelper().get_fileinfo_from_torrent_content(torrent_content)
# 下载目录
if save_path:
@@ -249,7 +261,7 @@ class DownloadChain(ChainBase):
return None
# 添加下载
result: Optional[tuple] = self.download(content=content,
result: Optional[tuple] = self.download(content=torrent_content,
cookie=_torrent.site_cookie,
episodes=episodes,
download_dir=download_dir,
@@ -346,7 +358,7 @@ class DownloadChain(ChainBase):
username=username,
)
# 下载成功后处理
self.download_added(context=context, download_dir=download_dir, torrent_path=torrent_file)
self.download_added(context=context, download_dir=download_dir, torrent_content=torrent_content)
# 广播事件
self.eventmanager.send_event(EventType.DownloadAdded, {
"hash": _hash,
@@ -560,7 +572,7 @@ class DownloadChain(ChainBase):
logger.info(f"开始下载 {torrent.title} ...")
download_id = self.download_single(
context=context,
torrent_file=content if isinstance(content, Path) else None,
torrent_content=content,
save_path=save_path,
channel=channel,
source=source,
@@ -727,7 +739,7 @@ class DownloadChain(ChainBase):
logger.info(f"开始下载 {torrent.title} ...")
download_id = self.download_single(
context=context,
torrent_file=content if isinstance(content, Path) else None,
torrent_content=content,
episodes=selected_episodes,
save_path=save_path,
channel=channel,

View File

@@ -1,48 +1,142 @@
import asyncio
import io
from pathlib import Path
from typing import List, Optional
import aiofiles
import pillow_avif # noqa 用于自动注册AVIF支持
from PIL import Image
from anyio import Path as AsyncPath
from app.chain import ChainBase
from app.chain.bangumi import BangumiChain
from app.chain.douban import DoubanChain
from app.chain.tmdb import TmdbChain
from app.core.cache import cache_backend, cached
from app.core.cache import cached, FileCache
from app.core.config import settings, global_vars
from app.log import logger
from app.schemas import MediaType
from app.utils.asyncio import AsyncUtils
from app.utils.common import log_execution_time
from app.utils.http import AsyncRequestUtils
from app.utils.http import RequestUtils
from app.utils.security import SecurityUtils
from app.utils.singleton import Singleton
# 推荐相关的专用缓存
recommend_ttl = 24 * 3600
recommend_cache_region = "recommend"
class RecommendChain(ChainBase, metaclass=Singleton):
"""
推荐处理链,单例运行
"""
# 推荐数据的缓存页数
# 推荐缓存时间
recommend_ttl = 24 * 3600
# 推荐缓存页数
cache_max_pages = 5
# 推荐缓存区域
recommend_cache_region = "recommend"
def refresh_recommend(self):
"""
刷新推荐数据 - 同步包装器
刷新推荐
"""
logger.debug("Starting to refresh Recommend data.")
# 推荐来源方法
recommend_methods = [
self.tmdb_movies,
self.tmdb_tvs,
self.tmdb_trending,
self.bangumi_calendar,
self.douban_movie_showing,
self.douban_movies,
self.douban_tvs,
self.douban_movie_top250,
self.douban_tv_weekly_chinese,
self.douban_tv_weekly_global,
self.douban_tv_animation,
self.douban_movie_hot,
self.douban_tv_hot,
]
# 缓存并刷新所有推荐数据
recommends = []
# 记录哪些方法已完成
methods_finished = set()
# 这里避免区间内连续调用相同来源,因此遍历方案为每页遍历所有推荐来源,再进行页数遍历
for page in range(1, self.cache_max_pages + 1):
for method in recommend_methods:
if global_vars.is_system_stopped:
return
if method in methods_finished:
continue
logger.debug(f"Fetch {method.__name__} data for page {page}.")
data = method(page=page)
if not data:
logger.debug("All recommendation methods have finished fetching data. Ending pagination early.")
methods_finished.add(method)
continue
recommends.extend(data)
# 如果所有方法都已经完成,提前结束循环
if len(methods_finished) == len(recommend_methods):
break
# 缓存收集到的海报
self.__cache_posters(recommends)
logger.debug("Recommend data refresh completed.")
def __cache_posters(self, datas: List[dict]):
"""
提取 poster_path 并缓存图片
:param datas: 数据列表
"""
if not settings.GLOBAL_IMAGE_CACHE:
return
for data in datas:
if global_vars.is_system_stopped:
return
poster_path = data.get("poster_path")
if poster_path:
poster_url = poster_path.replace("original", "w500")
logger.debug(f"Caching poster image: {poster_url}")
self.__fetch_and_save_image(poster_url)
@staticmethod
def __fetch_and_save_image(url: str):
"""
请求并保存图片
:param url: 图片路径
"""
# 生成缓存路径
sanitized_path = SecurityUtils.sanitize_url_path(url)
cache_path = Path("images") / sanitized_path
# 没有文件类型,则添加后缀,在恶意文件类型和实际需求下的折衷选择
if not cache_path.suffix:
cache_path = cache_path.with_suffix(".jpg")
# 获取缓存后端,并设置缓存时间为全局配置的缓存天数
cache_backend = FileCache(base=settings.CACHE_PATH,
ttl=settings.GLOBAL_IMAGE_CACHE_DAYS * 24 * 3600)
# 本地存在缓存图片,则直接跳过
if cache_backend.get(cache_path.as_posix(), region="images"):
logger.debug(f"Cache hit: Image already exists at {cache_path}")
return
# 请求远程图片
referer = "https://movie.douban.com/" if "doubanio.com" in url else None
proxies = settings.PROXY if not referer else None
response = RequestUtils(ua=settings.NORMAL_USER_AGENT, proxies=proxies, referer=referer).get_res(url=url)
if not response:
logger.debug(f"Empty response for URL: {url}")
return
# 验证下载的内容是否为有效图片
try:
AsyncUtils.run_async(self.async_refresh_recommend())
Image.open(io.BytesIO(response.content)).verify()
except Exception as e:
logger.error(f"刷新推荐数据失败:{str(e)}")
raise
logger.debug(f"Invalid image format for URL {url}: {e}")
return
# 保存缓存
cache_backend.set(cache_path.as_posix(), response.content, region="images")
logger.debug(f"Successfully cached image at {cache_path} for URL: {url}")
@log_execution_time(logger=logger)
@cached(ttl=recommend_ttl, region=recommend_cache_region)
@@ -199,162 +293,6 @@ class RecommendChain(ChainBase, metaclass=Singleton):
tvs = DoubanChain().tv_hot(page=page, count=count)
return [media.to_dict() for media in tvs] if tvs else []
# 异步版本的方法
async def async_refresh_recommend(self):
"""
异步刷新推荐
"""
logger.debug("Starting to async refresh Recommend data.")
cache_backend.clear(region=recommend_cache_region)
logger.debug("Recommend Cache has been cleared.")
# 推荐来源方法
recommend_methods = [
self.async_tmdb_movies,
self.async_tmdb_tvs,
self.async_tmdb_trending,
self.async_bangumi_calendar,
self.async_douban_movie_showing,
self.async_douban_movies,
self.async_douban_tvs,
self.async_douban_movie_top250,
self.async_douban_tv_weekly_chinese,
self.async_douban_tv_weekly_global,
self.async_douban_tv_animation,
self.async_douban_movie_hot,
self.async_douban_tv_hot,
]
# 缓存并刷新所有推荐数据
recommends = []
# 记录哪些方法已完成
methods_finished = set()
# 这里避免区间内连续调用相同来源,因此遍历方案为每页遍历所有推荐来源,再进行页数遍历
for page in range(1, self.cache_max_pages + 1):
# 为每个页面并发执行所有方法
tasks = []
for method in recommend_methods:
if global_vars.is_system_stopped:
return
if method in methods_finished:
continue
tasks.append(self._async_fetch_method_data(method, page, methods_finished))
# 并发执行所有任务
if tasks:
results = await asyncio.gather(*tasks, return_exceptions=True)
for result in results:
if isinstance(result, list) and result:
recommends.extend(result)
# 如果所有方法都已经完成,提前结束循环
if len(methods_finished) == len(recommend_methods):
break
# 缓存收集到的海报
await self.__async_cache_posters(recommends)
logger.debug("Async recommend data refresh completed.")
@staticmethod
async def _async_fetch_method_data(method, page: int, methods_finished: set):
"""
异步获取方法数据的辅助函数
"""
try:
logger.debug(f"Async fetch {method.__name__} data for page {page}.")
data = await method(page=page)
if not data:
logger.debug(f"Method {method.__name__} finished fetching data. Ending pagination early.")
methods_finished.add(method)
return []
return data
except Exception as e:
logger.error(f"Error fetching data from {method.__name__}: {e}")
methods_finished.add(method)
return []
async def __async_cache_posters(self, datas: List[dict]):
"""
异步提取 poster_path 并缓存图片
:param datas: 数据列表
"""
if not settings.GLOBAL_IMAGE_CACHE:
return
tasks = []
for data in datas:
if global_vars.is_system_stopped:
return
poster_path = data.get("poster_path")
if poster_path:
poster_url = poster_path.replace("original", "w500")
logger.debug(f"Async caching poster image: {poster_url}")
tasks.append(self.__async_fetch_and_save_image(poster_url))
# 并发缓存图片
if tasks:
await asyncio.gather(*tasks, return_exceptions=True)
@staticmethod
async def __async_fetch_and_save_image(url: str):
"""
异步请求并保存图片
:param url: 图片路径
"""
if not settings.GLOBAL_IMAGE_CACHE or not url:
return
# 生成缓存路径
base_path = AsyncPath(settings.CACHE_PATH)
sanitized_path = SecurityUtils.sanitize_url_path(url)
cache_path = base_path / "images" / sanitized_path
# 没有文件类型,则添加后缀,在恶意文件类型和实际需求下的折衷选择
if not cache_path.suffix:
cache_path = cache_path.with_suffix(".jpg")
# 确保缓存路径和文件类型合法
if not await SecurityUtils.async_is_safe_path(base_path=base_path,
user_path=cache_path,
allowed_suffixes=settings.SECURITY_IMAGE_SUFFIXES):
logger.debug(f"Invalid cache path or file type for URL: {url}, sanitized path: {sanitized_path}")
return
# 本地存在缓存图片,则直接跳过
if await cache_path.exists():
logger.debug(f"Cache hit: Image already exists at {cache_path}")
return
# 请求远程图片
referer = "https://movie.douban.com/" if "doubanio.com" in url else None
proxies = settings.PROXY if not referer else None
response = await AsyncRequestUtils(ua=settings.NORMAL_USER_AGENT,
proxies=proxies, referer=referer).get_res(url=url)
if not response:
logger.debug(f"Empty response for URL: {url}")
return
# 验证下载的内容是否为有效图片
try:
Image.open(io.BytesIO(response.content)).verify()
except Exception as e:
logger.debug(f"Invalid image format for URL {url}: {e}")
return
if not cache_path:
return
try:
if not await cache_path.parent.exists():
await cache_path.parent.mkdir(parents=True, exist_ok=True)
async with aiofiles.tempfile.NamedTemporaryFile(dir=cache_path.parent, delete=False) as tmp_file:
await tmp_file.write(response.content)
temp_path = AsyncPath(tmp_file.name)
await temp_path.replace(cache_path)
logger.debug(f"Successfully cached image at {cache_path} for URL: {url}")
except Exception as e:
logger.debug(f"Failed to write cache file {cache_path} for URL {url}: {e}")
@log_execution_time(logger=logger)
@cached(ttl=recommend_ttl, region=recommend_cache_region)
async def async_tmdb_movies(self, sort_by: Optional[str] = "popularity.desc",

View File

@@ -1,8 +1,6 @@
import asyncio
import pickle
import random
import time
import traceback
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime
from typing import Dict, Tuple
@@ -59,7 +57,7 @@ class SearchChain(ChainBase):
results = self.process(mediainfo=mediainfo, sites=sites, area=area, no_exists=no_exists)
# 保存到本地文件
if cache_local:
self.save_cache(pickle.dumps(results), self.__result_temp_file)
self.save_cache(results, self.__result_temp_file)
return results
def search_by_title(self, title: str, page: Optional[int] = 0,
@@ -85,36 +83,20 @@ class SearchChain(ChainBase):
torrent_info=torrent) for torrent in torrents]
# 保存到本地文件
if cache_local:
self.save_cache(pickle.dumps(contexts), self.__result_temp_file)
self.save_cache(contexts, self.__result_temp_file)
return contexts
def last_search_results(self) -> List[Context]:
"""
获取上次搜索结果
"""
# 读取本地文件缓存
content = self.load_cache(self.__result_temp_file)
if not content:
return []
try:
return pickle.loads(content)
except Exception as e:
logger.error(f'加载搜索结果失败:{str(e)} - {traceback.format_exc()}')
return []
return self.load_cache(self.__result_temp_file)
async def async_last_search_results(self) -> List[Context]:
"""
异步获取上次搜索结果
"""
# 读取本地文件缓存
content = await self.async_load_cache(self.__result_temp_file)
if not content:
return []
try:
return pickle.loads(content)
except Exception as e:
logger.error(f'加载搜索结果失败:{str(e)} - {traceback.format_exc()}')
return []
return await self.async_load_cache(self.__result_temp_file)
async def async_search_by_id(self, tmdbid: Optional[int] = None, doubanid: Optional[str] = None,
mtype: MediaType = None, area: Optional[str] = "title", season: Optional[int] = None,
@@ -143,7 +125,7 @@ class SearchChain(ChainBase):
results = await self.async_process(mediainfo=mediainfo, sites=sites, area=area, no_exists=no_exists)
# 保存到本地文件
if cache_local:
await self.async_save_cache(pickle.dumps(results), self.__result_temp_file)
await self.async_save_cache(results, self.__result_temp_file)
return results
async def async_search_by_title(self, title: str, page: Optional[int] = 0,
@@ -169,7 +151,7 @@ class SearchChain(ChainBase):
torrent_info=torrent) for torrent in torrents]
# 保存到本地文件
if cache_local:
await self.async_save_cache(pickle.dumps(contexts), self.__result_temp_file)
await self.async_save_cache(contexts, self.__result_temp_file)
return contexts
@staticmethod
@@ -233,12 +215,11 @@ class SearchChain(ChainBase):
return []
# 开始新进度
progress = ProgressHelper()
progress.start(ProgressKey.Search)
progress = ProgressHelper(ProgressKey.Search)
progress.start()
# 开始过滤
progress.update(value=0, text=f'开始过滤,总 {len(torrents)} 个资源,请稍候...',
key=ProgressKey.Search)
progress.update(value=0, text=f'开始过滤,总 {len(torrents)} 个资源,请稍候...')
# 匹配订阅附加参数
if filter_params:
logger.info(f'开始附加参数过滤,附加参数:{filter_params} ...')
@@ -256,7 +237,7 @@ class SearchChain(ChainBase):
logger.info(f"过滤规则/剧集过滤完成,剩余 {len(torrents)} 个资源")
# 过滤完成
progress.update(value=50, text=f'过滤完成,剩余 {len(torrents)} 个资源', key=ProgressKey.Search)
progress.update(value=50, text=f'过滤完成,剩余 {len(torrents)} 个资源')
# 总数
_total = len(torrents)
@@ -269,14 +250,13 @@ class SearchChain(ChainBase):
try:
# 英文标题应该在别名/原标题中,不需要再匹配
logger.info(f"开始匹配结果 标题:{mediainfo.title},原标题:{mediainfo.original_title},别名:{mediainfo.names}")
progress.update(value=51, text=f'开始匹配,总 {_total} 个资源 ...', key=ProgressKey.Search)
progress.update(value=51, text=f'开始匹配,总 {_total} 个资源 ...')
for torrent in torrents:
if global_vars.is_system_stopped:
break
_count += 1
progress.update(value=(_count / _total) * 96,
text=f'正在匹配 {torrent.site_name},已完成 {_count} / {_total} ...',
key=ProgressKey.Search)
text=f'正在匹配 {torrent.site_name},已完成 {_count} / {_total} ...')
if not torrent.title:
continue
@@ -309,8 +289,7 @@ class SearchChain(ChainBase):
# 匹配完成
logger.info(f"匹配完成,共匹配到 {len(_match_torrents)} 个资源")
progress.update(value=97,
text=f'匹配完成,共匹配到 {len(_match_torrents)} 个资源',
key=ProgressKey.Search)
text=f'匹配完成,共匹配到 {len(_match_torrents)} 个资源')
# 去掉mediainfo中多余的数据
mediainfo.clear()
@@ -326,16 +305,14 @@ class SearchChain(ChainBase):
# 排序
progress.update(value=99,
text=f'正在对 {len(contexts)} 个资源进行排序,请稍候...',
key=ProgressKey.Search)
text=f'正在对 {len(contexts)} 个资源进行排序,请稍候...')
contexts = torrenthelper.sort_torrents(contexts)
# 结束进度
logger.info(f'搜索完成,共 {len(contexts)} 个资源')
progress.update(value=100,
text=f'搜索完成,共 {len(contexts)} 个资源',
key=ProgressKey.Search)
progress.end(ProgressKey.Search)
text=f'搜索完成,共 {len(contexts)} 个资源')
progress.end()
# 去重后返回
return self.__remove_duplicate(contexts)
@@ -539,8 +516,8 @@ class SearchChain(ChainBase):
return []
# 开始进度
progress = ProgressHelper()
progress.start(ProgressKey.Search)
progress = ProgressHelper(ProgressKey.Search)
progress.start()
# 开始计时
start_time = datetime.now()
# 总数
@@ -549,8 +526,7 @@ class SearchChain(ChainBase):
finish_count = 0
# 更新进度
progress.update(value=0,
text=f"开始搜索,共 {total_num} 个站点 ...",
key=ProgressKey.Search)
text=f"开始搜索,共 {total_num} 个站点 ...")
# 结果集
results = []
# 多线程
@@ -579,17 +555,15 @@ class SearchChain(ChainBase):
results.extend(result)
logger.info(f"站点搜索进度:{finish_count} / {total_num}")
progress.update(value=finish_count / total_num * 100,
text=f"正在搜索{keyword or ''},已完成 {finish_count} / {total_num} 个站点 ...",
key=ProgressKey.Search)
text=f"正在搜索{keyword or ''},已完成 {finish_count} / {total_num} 个站点 ...")
# 计算耗时
end_time = datetime.now()
# 更新进度
progress.update(value=100,
text=f"站点搜索完成,有效资源数:{len(results)},总耗时 {(end_time - start_time).seconds}",
key=ProgressKey.Search)
text=f"站点搜索完成,有效资源数:{len(results)},总耗时 {(end_time - start_time).seconds}")
logger.info(f"站点搜索完成,有效资源数:{len(results)},总耗时 {(end_time - start_time).seconds}")
# 结束进度
progress.end(ProgressKey.Search)
progress.end()
# 返回
return results
@@ -624,8 +598,8 @@ class SearchChain(ChainBase):
return []
# 开始进度
progress = ProgressHelper()
progress.start(ProgressKey.Search)
progress = ProgressHelper(ProgressKey.Search)
progress.start()
# 开始计时
start_time = datetime.now()
# 总数
@@ -634,8 +608,7 @@ class SearchChain(ChainBase):
finish_count = 0
# 更新进度
progress.update(value=0,
text=f"开始搜索,共 {total_num} 个站点 ...",
key=ProgressKey.Search)
text=f"开始搜索,共 {total_num} 个站点 ...")
# 结果集
results = []
@@ -666,18 +639,16 @@ class SearchChain(ChainBase):
results.extend(result)
logger.info(f"站点搜索进度:{finish_count} / {total_num}")
progress.update(value=finish_count / total_num * 100,
text=f"正在搜索{keyword or ''},已完成 {finish_count} / {total_num} 个站点 ...",
key=ProgressKey.Search)
text=f"正在搜索{keyword or ''},已完成 {finish_count} / {total_num} 个站点 ...")
# 计算耗时
end_time = datetime.now()
# 更新进度
progress.update(value=100,
text=f"站点搜索完成,有效资源数:{len(results)},总耗时 {(end_time - start_time).seconds}",
key=ProgressKey.Search)
text=f"站点搜索完成,有效资源数:{len(results)},总耗时 {(end_time - start_time).seconds}")
logger.info(f"站点搜索完成,有效资源数:{len(results)},总耗时 {(end_time - start_time).seconds}")
# 结束进度
progress.end(ProgressKey.Search)
progress.end()
# 返回
return results

View File

@@ -313,11 +313,16 @@ class SiteChain(ChainBase):
siteoper = SiteOper()
rsshelper = RssHelper()
for domain, cookie in cookies.items():
# 检查系统是否停止
if global_vars.is_system_stopped:
logger.info("系统正在停止中断CookieCloud同步")
return False, "系统正在停止,同步被中断"
# 索引器信息
indexer = siteshelper.get_indexer(domain)
# 数据库的站点信息
site_info = siteoper.get_by_domain(domain)
if site_info and site_info.is_active == 1:
if site_info and site_info.is_active:
# 站点已存在,检查站点连通性
status, msg = self.test(domain)
# 更新站点Cookie
@@ -331,7 +336,7 @@ class SiteChain(ChainBase):
cookie=cookie,
ua=site_info.ua or settings.USER_AGENT,
proxy=True if site_info.proxy else False,
timeout=site_info.timeout
timeout=site_info.timeout or 15
)
if rss_url:
logger.info(f"更新站点 {domain} RSS地址 ...")

View File

@@ -6,7 +6,6 @@ from app.chain import ChainBase
from app.core.config import settings
from app.helper.directory import DirectoryHelper
from app.log import logger
from app.schemas import MediaType
class StorageChain(ChainBase):
@@ -134,8 +133,7 @@ class StorageChain(ChainBase):
"""
return self.run_module("support_transtype", storage=storage)
def delete_media_file(self, fileitem: schemas.FileItem,
mtype: MediaType = None, delete_self: bool = True) -> bool:
def delete_media_file(self, fileitem: schemas.FileItem, delete_self: bool = True) -> bool:
"""
删除媒体文件,以及不含媒体文件的目录
"""
@@ -152,7 +150,8 @@ class StorageChain(ChainBase):
return False
media_exts = settings.RMT_MEDIAEXT + settings.DOWNLOAD_TMPEXT
if fileitem.path == "/" or len(Path(fileitem.path).parts) <= 2:
fileitem_path = Path(fileitem.path) if fileitem.path else Path("")
if len(fileitem_path.parts) <= 2:
logger.warn(f"{fileitem.storage}{fileitem.path} 根目录或一级目录不允许删除")
return False
if fileitem.type == "dir":
@@ -162,13 +161,7 @@ class StorageChain(ChainBase):
if not self.delete_file(fileitem):
logger.warn(f"{fileitem.storage}{fileitem.path} 删除失败")
return False
elif self.any_files(fileitem, extensions=media_exts) is False:
logger.warn(f"{fileitem.storage}{fileitem.path} 不存在其它媒体文件,正在删除空目录")
if not self.delete_file(fileitem):
logger.warn(f"{fileitem.storage}{fileitem.path} 删除失败")
return False
# 不处理父目录
return True
elif delete_self:
# 本身是文件,需要删除文件
logger.warn(f"正在删除文件【{fileitem.storage}{fileitem.path}")
@@ -176,35 +169,43 @@ class StorageChain(ChainBase):
logger.warn(f"{fileitem.storage}{fileitem.path} 删除失败")
return False
if mtype:
# 重命名格式
rename_format = settings.RENAME_FORMAT(mtype)
media_path = DirectoryHelper.get_media_root_path(
rename_format, rename_path=Path(fileitem.path)
)
if not media_path:
return True
# 处理媒体文件根目录
dir_item = self.get_file_item(storage=fileitem.storage, path=media_path)
else:
# 处理上级目录
dir_item = self.get_parent_item(fileitem)
# 检查和删除上级空目录
dir_item = fileitem if fileitem.type == "dir" else self.get_parent_item(fileitem)
if not dir_item:
logger.warn(f"{fileitem.storage}{fileitem.path} 上级目录不存在")
return False
# 检查和删除上级目录
if dir_item and len(Path(dir_item.path).parts) > 2:
# 如何目录是所有下载目录、媒体库目录的上级,则不处理
for d in DirectoryHelper().get_dirs():
if d.download_path and Path(d.download_path).is_relative_to(Path(dir_item.path)):
logger.debug(f"{dir_item.storage}{dir_item.path} 是下载目录本级或上级目录,不删除")
return True
if d.library_path and Path(d.library_path).is_relative_to(Path(dir_item.path)):
logger.debug(f"{dir_item.storage}{dir_item.path} 是媒体库目录本级或上级目录,不删除")
return True
# 不存在其他媒体文件,删除空目录
if self.any_files(dir_item, extensions=media_exts) is False:
logger.warn(f"{dir_item.storage}{dir_item.path} 不存在其它媒体文件,正在删除空目录")
if not self.delete_file(dir_item):
logger.warn(f"{dir_item.storage}{dir_item.path} 删除失败")
return False
# 查找操作文件项匹配的配置目录(资源目录、媒体库目录)
associated_dir = max(
(
Path(p)
for d in DirectoryHelper().get_dirs()
for p in (d.download_path, d.library_path)
if p and fileitem_path.is_relative_to(p)
),
key=lambda path: len(path.parts),
default=None,
)
while dir_item and len(Path(dir_item.path).parts) > 2:
# 目录是资源目录、媒体库目录的上级,则不处理
if associated_dir and associated_dir.is_relative_to(Path(dir_item.path)):
logger.debug(f"{dir_item.storage}{dir_item.path} 位于资源或媒体库目录结构中,不删除")
break
elif not associated_dir and self.list_files(dir_item, recursion=False):
logger.debug(f"{dir_item.storage}{dir_item.path} 不是空目录,不删除")
break
if self.any_files(dir_item, extensions=media_exts) is not False:
logger.debug(f"{dir_item.storage}{dir_item.path} 存在媒体文件,不删除")
break
# 删除空目录并继续处理父目录
logger.warn(f"{dir_item.storage}{dir_item.path} 不存在其它媒体文件,正在删除空目录")
if not self.delete_file(dir_item):
logger.warn(f"{dir_item.storage}{dir_item.path} 删除失败")
return False
dir_item = self.get_parent_item(dir_item)
return True

View File

@@ -1184,6 +1184,42 @@ class SubscribeChain(ChainBase):
logger.error(f'follow用户分享订阅 {title} 添加失败:{message}')
logger.info(f'follow用户分享订阅刷新完成共添加 {success_count} 个订阅')
async def cache_calendar(self):
"""
预缓存订阅日历,实际上就是查询一遍所有订阅的媒体信息
前端请示是异常的,所以需要使用异步缓存方法
"""
logger.info(f'开始预缓存订阅日历 ...')
for subscribe in await SubscribeOper().async_list():
if global_vars.is_system_stopped:
break
try:
mtype = MediaType(subscribe.type)
except ValueError:
logger.error(f'订阅 {subscribe.name} 类型错误:{subscribe.type}')
continue
# 识别媒体信息
if mtype == MediaType.MOVIE:
mediainfo: MediaInfo = await self.async_recognize_media(mtype=mtype,
tmdbid=subscribe.tmdbid,
doubanid=subscribe.doubanid,
bangumiid=subscribe.bangumiid,
episode_group=subscribe.episode_group,
cache=False)
if not mediainfo:
logger.warn(
f'未识别到媒体信息,标题:{subscribe.name}tmdbid{subscribe.tmdbid}doubanid{subscribe.doubanid}')
continue
else:
episodes = await TmdbChain().async_tmdb_episodes(tmdbid=subscribe.tmdbid,
season=subscribe.season,
episode_group=subscribe.episode_group)
if not episodes:
logger.warn(
f'未识别到季集信息,标题:{subscribe.name}tmdbid{subscribe.tmdbid}豆瓣ID{subscribe.doubanid},季:{subscribe.season}')
continue
logger.info(f'订阅日历预缓存完成')
@staticmethod
def __update_subscribe_note(subscribe: Subscribe, downloads: Optional[List[Context]]):
"""

View File

@@ -501,7 +501,8 @@ class TransferChain(ChainBase, metaclass=Singleton):
# 获取整理屏蔽词
transfer_exclude_words = SystemConfigOper().get(SystemConfigKey.TransferExcludeWords)
for t in tasks:
if t.download_hash and self._can_delete_torrent(t.download_hash, t.downloader, transfer_exclude_words):
if t.download_hash and self._can_delete_torrent(t.download_hash, t.downloader,
transfer_exclude_words):
if self.remove_torrents(t.download_hash, downloader=t.downloader):
logger.info(f"移动模式删除种子成功:{t.download_hash}")
if t.fileitem:
@@ -554,8 +555,10 @@ class TransferChain(ChainBase, metaclass=Singleton):
processed_num = 0
# 失败数量
fail_num = 0
# 已完成文件
finished_files = []
progress = ProgressHelper()
progress = ProgressHelper(ProgressKey.FileTransfer)
while not global_vars.is_system_stopped:
try:
@@ -570,7 +573,7 @@ class TransferChain(ChainBase, metaclass=Singleton):
if __queue_start:
logger.info("开始整理队列处理...")
# 启动进度
progress.start(ProgressKey.FileTransfer)
progress.start()
# 重置计数
processed_num = 0
fail_num = 0
@@ -578,8 +581,7 @@ class TransferChain(ChainBase, metaclass=Singleton):
__process_msg = f"开始整理队列处理,当前共 {total_num} 个文件 ..."
logger.info(__process_msg)
progress.update(value=0,
text=__process_msg,
key=ProgressKey.FileTransfer)
text=__process_msg)
# 队列已开始
__queue_start = False
# 更新进度
@@ -587,7 +589,10 @@ class TransferChain(ChainBase, metaclass=Singleton):
logger.info(__process_msg)
progress.update(value=processed_num / total_num * 100,
text=__process_msg,
key=ProgressKey.FileTransfer)
data={
"current": Path(fileitem.path).as_posix(),
"finished":finished_files
})
# 整理
state, err_msg = self.__handle_transfer(task=task, callback=item.callback)
if not state:
@@ -595,20 +600,20 @@ class TransferChain(ChainBase, metaclass=Singleton):
fail_num += 1
# 更新进度
processed_num += 1
finished_files.append(Path(fileitem.path).as_posix())
__process_msg = f"{fileitem.name} 整理完成"
logger.info(__process_msg)
progress.update(value=processed_num / total_num * 100,
progress.update(value=(processed_num / total_num) * 100,
text=__process_msg,
key=ProgressKey.FileTransfer)
data={})
except queue.Empty:
if not __queue_start:
# 结束进度
__end_msg = f"整理队列处理完成,共整理 {processed_num} 个文件,失败 {fail_num}"
logger.info(__end_msg)
progress.update(value=100,
text=__end_msg,
key=ProgressKey.FileTransfer)
progress.end(ProgressKey.FileTransfer)
text=__end_msg)
progress.end()
# 重置计数
processed_num = 0
fail_num = 0
@@ -1164,15 +1169,16 @@ class TransferChain(ChainBase, metaclass=Singleton):
processed_num = 0
# 失败数量
fail_num = 0
# 已完成文件
finished_files = []
# 启动进度
progress = ProgressHelper()
progress.start(ProgressKey.FileTransfer)
progress = ProgressHelper(ProgressKey.FileTransfer)
progress.start()
__process_msg = f"开始整理,共 {total_num} 个文件 ..."
logger.info(__process_msg)
progress.update(value=0,
text=__process_msg,
key=ProgressKey.FileTransfer)
text=__process_msg)
try:
for transfer_task in transfer_tasks:
if global_vars.is_system_stopped:
@@ -1184,7 +1190,10 @@ class TransferChain(ChainBase, metaclass=Singleton):
logger.info(__process_msg)
progress.update(value=(processed_num + fail_num) / total_num * 100,
text=__process_msg,
key=ProgressKey.FileTransfer)
data={
"current": Path(transfer_task.fileitem.path).as_posix(),
"finished": finished_files,
})
state, err_msg = self.__handle_transfer(
task=transfer_task,
callback=self.__default_callback
@@ -1196,6 +1205,8 @@ class TransferChain(ChainBase, metaclass=Singleton):
fail_num += 1
else:
processed_num += 1
# 记录已完成
finished_files.append(Path(transfer_task.fileitem.path).as_posix())
finally:
transfer_tasks.clear()
del transfer_tasks
@@ -1205,8 +1216,8 @@ class TransferChain(ChainBase, metaclass=Singleton):
logger.info(__end_msg)
progress.update(value=100,
text=__end_msg,
key=ProgressKey.FileTransfer)
progress.end(ProgressKey.FileTransfer)
data={})
progress.end()
error_msg = "".join(err_msgs[:2]) + (f",等{len(err_msgs)}个文件错误!" if len(err_msgs) > 2 else "")
return all_success, error_msg
@@ -1351,12 +1362,7 @@ class TransferChain(ChainBase, metaclass=Singleton):
else:
# 更新媒体图片
self.obtain_images(mediainfo=mediainfo)
# 开始进度
progress = ProgressHelper()
progress.start(ProgressKey.FileTransfer)
progress.update(value=0,
text=f"开始整理 {fileitem.path} ...",
key=ProgressKey.FileTransfer)
# 开始整理
state, errmsg = self.do_transfer(
fileitem=fileitem,
@@ -1377,7 +1383,6 @@ class TransferChain(ChainBase, metaclass=Singleton):
if not state:
return False, errmsg
progress.end(ProgressKey.FileTransfer)
logger.info(f"{fileitem.path} 整理完成")
return True, ""
else:
@@ -1435,7 +1440,6 @@ class TransferChain(ChainBase, metaclass=Singleton):
return True
return False
def _can_delete_torrent(self, download_hash: str, downloader: str, transfer_exclude_words) -> bool:
"""
检查是否可以删除种子文件
@@ -1468,11 +1472,11 @@ class TransferChain(ChainBase, metaclass=Singleton):
file_path = save_path / file.name
# 如果存在未被屏蔽的媒体文件,则不删除种子
if (
file_path.suffix in self.all_exts
and not self._is_blocked_by_exclude_words(
str(file_path), transfer_exclude_words
)
and file_path.exists()
file_path.suffix in self.all_exts
and not self._is_blocked_by_exclude_words(
str(file_path), transfer_exclude_words
)
and file_path.exists()
):
return False

File diff suppressed because it is too large Load Diff

View File

@@ -42,10 +42,6 @@ class SystemConfModel(BaseModel):
scheduler: int = 0
# 线程池大小
threadpool: int = 0
# 数据库连接池大小
dbpool: int = 0
# 数据库连接池溢出数量
dbpooloverflow: int = 0
class ConfigModel(BaseModel):
@@ -56,6 +52,7 @@ class ConfigModel(BaseModel):
class Config:
extra = "ignore" # 忽略未定义的配置项
# ==================== 基础应用配置 ====================
# 项目名称
PROJECT_NAME: str = "MoviePilot"
# 域名 格式https://movie-pilot.org
@@ -64,6 +61,22 @@ class ConfigModel(BaseModel):
API_V1_STR: str = "/api/v1"
# 前端资源路径
FRONTEND_PATH: str = "/public"
# 时区
TZ: str = "Asia/Shanghai"
# API监听地址
HOST: str = "0.0.0.0"
# API监听端口
PORT: int = 3001
# 前端监听端口
NGINX_PORT: int = 3000
# 配置文件目录
CONFIG_DIR: Optional[str] = None
# 是否调试模式
DEBUG: bool = False
# 是否开发模式
DEV: bool = False
# ==================== 安全认证配置 ====================
# 密钥
SECRET_KEY: str = secrets.token_urlsafe(32)
# RESOURCE密钥
@@ -74,20 +87,24 @@ class ConfigModel(BaseModel):
ACCESS_TOKEN_EXPIRE_MINUTES: int = 60 * 24 * 8
# RESOURCE_TOKEN过期时间
RESOURCE_ACCESS_TOKEN_EXPIRE_SECONDS: int = 60 * 30
# 时区
TZ: str = "Asia/Shanghai"
# API监听地址
HOST: str = "0.0.0.0"
# API监听端口
PORT: int = 3001
# 前端监听端口
NGINX_PORT: int = 3000
# 是否调试模式
DEBUG: bool = False
# 是否开发模式
DEV: bool = False
# 超级管理员
SUPERUSER: str = "admin"
# 辅助认证,允许通过外部服务进行认证、单点登录以及自动创建用户
AUXILIARY_AUTH_ENABLE: bool = False
# API密钥,需要更换
API_TOKEN: Optional[str] = None
# 用户认证站点
AUTH_SITE: str = ""
# ==================== 数据库配置 ====================
# 数据库类型,支持 sqlite 和 postgresql默认使用 sqlite
DB_TYPE: str = "sqlite"
# 是否在控制台输出 SQL 语句,默认关闭
DB_ECHO: bool = False
# 数据库连接超时时间(秒),默认为 60 秒
DB_TIMEOUT: int = 60
# 是否启用 WAL 模式仅适用于SQLite默认开启
DB_WAL_ENABLE: bool = True
# 数据库连接池类型QueuePool, NullPool
DB_POOL_TYPE: str = "QueuePool"
# 是否在获取连接时进行预先 ping 操作
@@ -96,71 +113,44 @@ class ConfigModel(BaseModel):
DB_POOL_RECYCLE: int = 300
# 数据库连接池获取连接的超时时间(秒)
DB_POOL_TIMEOUT: int = 30
# SQLite 的 busy_timeout 参数,默认为 60 秒
DB_TIMEOUT: int = 60
# SQLite 是否启用 WAL 模式,默认开启
DB_WAL_ENABLE: bool = True
# SQLite 连接池大小
DB_SQLITE_POOL_SIZE: int = 30
# SQLite 连接池溢出数量
DB_SQLITE_MAX_OVERFLOW: int = 50
# PostgreSQL 主机地址
DB_POSTGRESQL_HOST: str = "localhost"
# PostgreSQL 端口
DB_POSTGRESQL_PORT: int = 5432
# PostgreSQL 数据库名
DB_POSTGRESQL_DATABASE: str = "moviepilot"
# PostgreSQL 用户名
DB_POSTGRESQL_USERNAME: str = "moviepilot"
# PostgreSQL 密码
DB_POSTGRESQL_PASSWORD: str = "moviepilot"
# PostgreSQL 连接池大小
DB_POSTGRESQL_POOL_SIZE: int = 30
# PostgreSQL 连接池溢出数量
DB_POSTGRESQL_MAX_OVERFLOW: int = 50
# ==================== 缓存配置 ====================
# 缓存类型,支持 cachetools 和 redis默认使用 cachetools
CACHE_BACKEND_TYPE: str = "cachetools"
# 缓存连接字符串,仅外部缓存(如 Redis、Memcached需要
CACHE_BACKEND_URL: Optional[str] = None
CACHE_BACKEND_URL: Optional[str] = "redis://localhost:6379"
# Redis 缓存最大内存限制,未配置时,如开启大内存模式时为 "1024mb",未开启时为 "256mb"
CACHE_REDIS_MAXMEMORY: Optional[str] = None
# 配置文件目录
CONFIG_DIR: Optional[str] = None
# 超级管理员
SUPERUSER: str = "admin"
# 辅助认证,允许通过外部服务进行认证、单点登录以及自动创建用户
AUXILIARY_AUTH_ENABLE: bool = False
# API密钥需要更换
API_TOKEN: Optional[str] = None
# 全局图片缓存,将媒体图片缓存到本地
GLOBAL_IMAGE_CACHE: bool = False
# 全局图片缓存保留天数
GLOBAL_IMAGE_CACHE_DAYS: int = 7
# 临时文件保留天数
TEMP_FILE_DAYS: int = 3
# 元数据识别缓存过期时间小时0为自动
META_CACHE_EXPIRE: int = 0
# ==================== 网络代理配置 ====================
# 网络代理服务器地址
PROXY_HOST: Optional[str] = None
# 登录页面电影海报,tmdb/bing/mediaserver
WALLPAPER: str = "tmdb"
# 自定义壁纸api地址
CUSTOMIZE_WALLPAPER_API_URL: Optional[str] = None
# 媒体搜索来源 themoviedb/douban/bangumi多个用,分隔
SEARCH_SOURCE: str = "themoviedb,douban,bangumi"
# 媒体识别来源 themoviedb/douban
RECOGNIZE_SOURCE: str = "themoviedb"
# 刮削来源 themoviedb/douban
SCRAP_SOURCE: str = "themoviedb"
# 新增已入库媒体是否跟随TMDB信息变化
SCRAP_FOLLOW_TMDB: bool = True
# TMDB图片地址
TMDB_IMAGE_DOMAIN: str = "image.tmdb.org"
# TMDB API地址
TMDB_API_DOMAIN: str = "api.themoviedb.org"
# TMDB元数据语言
TMDB_LOCALE: str = "zh"
# 刮削使用TMDB原始语种图片
TMDB_SCRAP_ORIGINAL_IMAGE: bool = False
# TMDB API Key
TMDB_API_KEY: str = "db55323b8d3e4154498498a75642b381"
# TVDB API Key
TVDB_V4_API_KEY: str = "ed2aa66b-7899-4677-92a7-67bc9ce3d93a"
TVDB_V4_API_PIN: str = ""
# Fanart开关
FANART_ENABLE: bool = True
# Fanart语言
FANART_LANG: str = "zh,en"
# Fanart API Key
FANART_API_KEY: str = "d2d31f9ecabea050fc7d68aa3146015f"
# 115 AppId
U115_APP_ID: str = "100196807"
# Alipan AppId
ALIPAN_APP_ID: str = "ac1bf04dc9fd4d9aaabb65b4a668d403"
# 元数据识别缓存过期时间(小时)
META_CACHE_EXPIRE: int = 0
# 电视剧动漫的分类genre_ids
ANIME_GENREIDS: List[int] = Field(default=[16])
# 用户认证站点
AUTH_SITE: str = ""
# 重启自动升级
MOVIEPILOT_AUTO_UPDATE: str = 'release'
# 自动检查和更新站点资源包(站点索引、认证等)
AUTO_UPDATE_RESOURCE: bool = True
# 是否启用DOH解析域名
DOH_ENABLE: bool = False
# 使用 DOH 解析的域名列表
@@ -174,6 +164,55 @@ class ConfigModel(BaseModel):
"api.telegram.org")
# DOH 解析服务器列表
DOH_RESOLVERS: str = "1.0.0.1,1.1.1.1,9.9.9.9,149.112.112.112"
# ==================== 媒体元数据配置 ====================
# 媒体搜索来源 themoviedb/douban/bangumi多个用,分隔
SEARCH_SOURCE: str = "themoviedb,douban,bangumi"
# 媒体识别来源 themoviedb/douban
RECOGNIZE_SOURCE: str = "themoviedb"
# 刮削来源 themoviedb/douban
SCRAP_SOURCE: str = "themoviedb"
# 电视剧动漫的分类genre_ids
ANIME_GENREIDS: List[int] = Field(default=[16])
# ==================== TMDB配置 ====================
# TMDB图片地址
TMDB_IMAGE_DOMAIN: str = "image.tmdb.org"
# TMDB API地址
TMDB_API_DOMAIN: str = "api.themoviedb.org"
# TMDB元数据语言
TMDB_LOCALE: str = "zh"
# 刮削使用TMDB原始语种图片
TMDB_SCRAP_ORIGINAL_IMAGE: bool = False
# TMDB API Key
TMDB_API_KEY: str = "db55323b8d3e4154498498a75642b381"
# ==================== TVDB配置 ====================
# TVDB API Key
TVDB_V4_API_KEY: str = "ed2aa66b-7899-4677-92a7-67bc9ce3d93a"
TVDB_V4_API_PIN: str = ""
# ==================== Fanart配置 ====================
# Fanart开关
FANART_ENABLE: bool = True
# Fanart语言
FANART_LANG: str = "zh,en"
# Fanart API Key
FANART_API_KEY: str = "d2d31f9ecabea050fc7d68aa3146015f"
# ==================== 云盘配置 ====================
# 115 AppId
U115_APP_ID: str = "100196807"
# Alipan AppId
ALIPAN_APP_ID: str = "ac1bf04dc9fd4d9aaabb65b4a668d403"
# ==================== 系统升级配置 ====================
# 重启自动升级
MOVIEPILOT_AUTO_UPDATE: str = 'release'
# 自动检查和更新站点资源包(站点索引、认证等)
AUTO_UPDATE_RESOURCE: bool = True
# ==================== 媒体文件格式配置 ====================
# 支持的后缀格式
RMT_MEDIAEXT: list = Field(
default_factory=lambda: ['.mp4', '.mkv', '.ts', '.iso',
@@ -196,10 +235,12 @@ class ConfigModel(BaseModel):
'.aifc', '.aiff', '.alac', '.adif', '.adts',
'.flac', '.midi', '.opus', '.sfalc']
)
# 下载器临时文件后缀
DOWNLOAD_TMPEXT: list = Field(default_factory=lambda: ['.!qb', '.part'])
# ==================== 媒体服务器配置 ====================
# 媒体服务器同步间隔(小时)
MEDIASERVER_SYNC_INTERVAL: int = 6
# ==================== 订阅配置 ====================
# 订阅模式
SUBSCRIBE_MODE: str = "spider"
# RSS订阅模式刷新时间间隔分钟
@@ -210,22 +251,38 @@ class ConfigModel(BaseModel):
SUBSCRIBE_SEARCH: bool = False
# 检查本地媒体库是否存在资源开关
LOCAL_EXISTS_SEARCH: bool = False
# 搜索多个名称
SEARCH_MULTIPLE_NAME: bool = False
# 最大搜索名称数量
MAX_SEARCH_NAME_LIMIT: int = 2
# ==================== 站点配置 ====================
# 站点数据刷新间隔(小时)
SITEDATA_REFRESH_INTERVAL: int = 6
# 读取和发送站点消息
SITE_MESSAGE: bool = True
# 不能缓存站点资源的站点域名,多个使用,分隔
NO_CACHE_SITE_KEY: str = "m-team"
# OCR服务器地址用于识别站点验证码
OCR_HOST: str = "https://movie-pilot.org"
# 仿真类型playwright 或 flaresolverr
BROWSER_EMULATION: str = "playwright"
# FlareSolverr 服务地址,例如 http://127.0.0.1:8191
FLARESOLVERR_URL: Optional[str] = None
# ==================== 搜索配置 ====================
# 搜索多个名称
SEARCH_MULTIPLE_NAME: bool = False
# 最大搜索名称数量
MAX_SEARCH_NAME_LIMIT: int = 2
# ==================== 下载配置 ====================
# 种子标签
TORRENT_TAG: str = "MOVIEPILOT"
# 下载站点字幕
DOWNLOAD_SUBTITLE: bool = True
# 交互搜索自动下载用户ID使用,分割
AUTO_DOWNLOAD_USER: Optional[str] = None
# 下载器临时文件后缀
DOWNLOAD_TMPEXT: list = Field(default_factory=lambda: ['.!qb', '.part'])
# ==================== CookieCloud配置 ====================
# CookieCloud是否启动本地服务
COOKIECLOUD_ENABLE_LOCAL: Optional[bool] = False
# CookieCloud服务器地址
@@ -238,6 +295,8 @@ class ConfigModel(BaseModel):
COOKIECLOUD_INTERVAL: Optional[int] = 60 * 24
# CookieCloud同步黑名单多个域名,分割
COOKIECLOUD_BLACKLIST: Optional[str] = None
# ==================== 整理配置 ====================
# 电影重命名格式
MOVIE_RENAME_FORMAT: str = "{{title}}{% if year %} ({{year}}){% endif %}" \
"/{{title}}{% if year %} ({{year}}){% endif %}{% if part %}-{{part}}{% endif %}{% if videoFormat %} - {{videoFormat}}{% endif %}" \
@@ -247,10 +306,24 @@ class ConfigModel(BaseModel):
"/Season {{season}}" \
"/{{title}} - {{season_episode}}{% if part %}-{{part}}{% endif %}{% if episode %} - 第 {{episode}} 集{% endif %}" \
"{{fileExt}}"
# OCR服务器地址
OCR_HOST: str = "https://movie-pilot.org"
# 重命名时支持的S0别名
RENAME_FORMAT_S0_NAMES: list = Field(default=["Specials", "SPs"])
# 为指定默认字幕添加.default后缀
DEFAULT_SUB: Optional[str] = "zh-cn"
# 新增已入库媒体是否跟随TMDB信息变化
SCRAP_FOLLOW_TMDB: bool = True
# ==================== 服务地址配置 ====================
# 服务器地址,对应 https://github.com/jxxghp/MoviePilot-Server 项目
MP_SERVER_HOST: str = "https://movie-pilot.org"
# ==================== 个性化 ====================
# 登录页面电影海报,tmdb/bing/mediaserver
WALLPAPER: str = "tmdb"
# 自定义壁纸api地址
CUSTOMIZE_WALLPAPER_API_URL: Optional[str] = None
# ==================== 插件配置 ====================
# 插件市场仓库地址,多个地址使用,分隔,地址以/结尾
PLUGIN_MARKET: str = ("https://github.com/jxxghp/MoviePilot-Plugins,"
"https://github.com/thsrite/MoviePilot-Plugins,"
@@ -271,6 +344,8 @@ class ConfigModel(BaseModel):
PLUGIN_STATISTIC_SHARE: bool = True
# 是否开启插件热加载
PLUGIN_AUTO_RELOAD: bool = False
# ==================== Github & PIP ====================
# Github token提高请求api限流阈值 ghp_****
GITHUB_TOKEN: Optional[str] = None
# Github代理服务器格式https://mirror.ghproxy.com/
@@ -279,16 +354,18 @@ class ConfigModel(BaseModel):
PIP_PROXY: Optional[str] = ''
# 指定的仓库Github token多个仓库使用,分隔,格式:{user1}/{repo1}:ghp_****,{user2}/{repo2}:github_pat_****
REPO_GITHUB_TOKEN: Optional[str] = None
# ==================== 性能配置 ====================
# 大内存模式
BIG_MEMORY_MODE: bool = False
# FastApi性能监控
PERFORMANCE_MONITOR_ENABLE: bool = False
# 全局图片缓存,将媒体图片缓存到本地
GLOBAL_IMAGE_CACHE: bool = False
# 是否启用编码探测的性能模式
ENCODING_DETECTION_PERFORMANCE_MODE: bool = True
# 编码探测的最低置信度阈值
ENCODING_DETECTION_MIN_CONFIDENCE: float = 0.8
# ==================== 安全配置 ====================
# 允许的图片缓存域名
SECURITY_IMAGE_DOMAINS: list = Field(default=[
"image.tmdb.org",
@@ -308,22 +385,20 @@ class ConfigModel(BaseModel):
])
# 允许的图片文件后缀格式
SECURITY_IMAGE_SUFFIXES: list = Field(default=[".jpg", ".jpeg", ".png", ".webp", ".gif", ".svg", ".avif"])
# 重命名时支持的S0别名
RENAME_FORMAT_S0_NAMES: list = Field(default=["Specials", "SPs"])
# 为指定默认字幕添加.default后缀
DEFAULT_SUB: Optional[str] = "zh-cn"
# Docker Client API地址
DOCKER_CLIENT_API: Optional[str] = "tcp://127.0.0.1:38379"
# ==================== 工作流配置 ====================
# 工作流数据共享
WORKFLOW_STATISTIC_SHARE: bool = True
# ==================== 存储配置 ====================
# 对rclone进行快照对比时是否检查文件夹的修改时间
RCLONE_SNAPSHOT_CHECK_FOLDER_MODTIME = True
# 对OpenList进行快照对比时是否检查文件夹的修改时间
OPENLIST_SNAPSHOT_CHECK_FOLDER_MODTIME = True
# 仿真类型playwright 或 flaresolverr
BROWSER_EMULATION: str = "playwright"
# FlareSolverr 服务地址,例如 http://127.0.0.1:8191
FLARESOLVERR_URL: Optional[str] = None
# ==================== Docker配置 ====================
# Docker Client API地址
DOCKER_CLIENT_API: Optional[str] = "tcp://127.0.0.1:38379"
class Settings(BaseSettings, ConfigModel, LogConfigModel):
@@ -590,9 +665,7 @@ class Settings(BaseSettings, ConfigModel, LogConfigModel):
fanart=512,
meta=(self.META_CACHE_EXPIRE or 24) * 3600,
scheduler=100,
threadpool=100,
dbpool=100,
dbpooloverflow=50
threadpool=100
)
return SystemConfModel(
torrents=100,
@@ -603,9 +676,7 @@ class Settings(BaseSettings, ConfigModel, LogConfigModel):
fanart=128,
meta=(self.META_CACHE_EXPIRE or 2) * 3600,
scheduler=50,
threadpool=50,
dbpool=50,
dbpooloverflow=20
threadpool=50
)
@property
@@ -727,6 +798,8 @@ class GlobalVar(object):
SUBSCRIPTIONS: List[dict] = []
# 需应急停止的工作流
EMERGENCY_STOP_WORKFLOWS: List[int] = []
# 需应急停止文件整理
EMERGENCY_STOP_TRANSFER: List[str] = []
def stop_system(self):
"""
@@ -767,12 +840,30 @@ class GlobalVar(object):
if workflow_id in self.EMERGENCY_STOP_WORKFLOWS:
self.EMERGENCY_STOP_WORKFLOWS.remove(workflow_id)
def is_workflow_stopped(self, workflow_id: int):
def is_workflow_stopped(self, workflow_id: int) -> bool:
"""
是否停止工作流
"""
return self.is_system_stopped or workflow_id in self.EMERGENCY_STOP_WORKFLOWS
def stop_transfer(self, path: str):
"""
停止文件整理
"""
if path not in self.EMERGENCY_STOP_TRANSFER:
self.EMERGENCY_STOP_TRANSFER.append(path)
def is_transfer_stopped(self, path: str) -> bool:
"""
是否停止文件整理
"""
if self.is_system_stopped:
return True
if path in self.EMERGENCY_STOP_TRANSFER:
self.EMERGENCY_STOP_TRANSFER.remove(path)
return True
return False
# 全局标识
global_vars = GlobalVar()

View File

@@ -483,7 +483,7 @@ class MediaInfo:
continue
if current_value is None:
setattr(self, key, value)
elif type(current_value) == type(value):
elif type(current_value) is type(value):
setattr(self, key, value)
def set_douban_info(self, info: dict):
@@ -624,7 +624,7 @@ class MediaInfo:
continue
if current_value is None:
setattr(self, key, value)
elif type(current_value) == type(value):
elif type(current_value) is type(value):
setattr(self, key, value)
def set_bangumi_info(self, info: dict):

View File

@@ -450,10 +450,7 @@ class EventManager(metaclass=Singleton):
logger.debug(f"Handler {self.__get_handler_identifier(handler)} is disabled. Skipping execution")
return
try:
self.__invoke_handler_by_type_sync(handler, event)
except Exception as e:
self.__handle_event_error(event, handler, e)
self.__invoke_handler_by_type_sync(handler, event)
async def __safe_invoke_handler_async(self, handler: Callable, event: Event):
"""
@@ -465,10 +462,7 @@ class EventManager(metaclass=Singleton):
logger.debug(f"Handler {self.__get_handler_identifier(handler)} is disabled. Skipping execution")
return
try:
await self.__invoke_handler_by_type_async(handler, event)
except Exception as e:
self.__handle_event_error(event, handler, e)
await self.__invoke_handler_by_type_async(handler, event)
def __invoke_handler_by_type_sync(self, handler: Callable, event: Event):
"""
@@ -486,7 +480,17 @@ class EventManager(metaclass=Singleton):
if class_name in plugin_manager.get_plugin_ids():
# 插件处理器
plugin_manager.run_plugin_method(class_name, method_name, event)
plugin = plugin_manager.running_plugins.get(class_name)
if not plugin:
return
method = getattr(plugin, method_name, None)
if not method:
return
try:
method(event)
except Exception as e:
self.__handle_event_error(event=event, module_name=plugin.name,
class_name=class_name, method_name=method_name, e=e)
elif class_name in module_manager.get_module_ids():
# 模块处理器
module = module_manager.get_running_module(class_name)
@@ -495,16 +499,24 @@ class EventManager(metaclass=Singleton):
method = getattr(module, method_name, None)
if not method:
return
method(event)
try:
method(event)
except Exception as e:
self.__handle_event_error(event=event, module_name=module.get_name(),
class_name=class_name, method_name=method_name, e=e)
else:
# 全局处理器
class_obj = self.__get_class_instance(class_name)
if not class_obj or not hasattr(class_obj, method_name):
return
method = getattr(class_obj, method_name)
method = getattr(class_obj, method_name, None)
if not method:
return
method(event)
try:
method(event)
except Exception as e:
self.__handle_event_error(event=event, module_name=class_name,
class_name=class_name, method_name=method_name, e=e)
async def __invoke_handler_by_type_async(self, handler: Callable, event: Event):
"""
@@ -537,52 +549,62 @@ class EventManager(metaclass=Singleton):
names = handler.__qualname__.split(".")
return names[0], names[1]
@staticmethod
async def __invoke_plugin_method_async(handler: Any, class_name: str, method_name: str, event: Event):
async def __invoke_plugin_method_async(self, handler: Any, class_name: str, method_name: str, event: Event):
"""
异步调用插件方法
"""
plugin = handler.running_plugins.get(class_name)
if plugin and hasattr(plugin, method_name):
method = getattr(plugin, method_name)
if not plugin:
return
method = getattr(plugin, method_name, None)
if not method:
return
try:
if inspect.iscoroutinefunction(method):
await method(event)
else:
# 插件同步函数在异步环境中运行,避免阻塞
await run_in_threadpool(method, event)
except Exception as e:
self.__handle_event_error(event=event, handler=handler, e=e, module_name=plugin.name)
@staticmethod
async def __invoke_module_method_async(handler: Any, class_name: str, method_name: str, event: Event):
async def __invoke_module_method_async(self, handler: Any, class_name: str, method_name: str, event: Event):
"""
异步调用模块方法
"""
module = handler.get_running_module(class_name)
if not module:
return
method = getattr(module, method_name, None)
if not method:
return
if inspect.iscoroutinefunction(method):
await method(event)
else:
method(event)
try:
if inspect.iscoroutinefunction(method):
await method(event)
else:
method(event)
except Exception as e:
self.__handle_event_error(event=event, module_name=module.get_name(),
class_name=class_name, method_name=method_name, e=e)
async def __invoke_global_method_async(self, class_name: str, method_name: str, event: Event):
"""
异步调用全局对象方法
"""
class_obj = self.__get_class_instance(class_name)
if not class_obj or not hasattr(class_obj, method_name):
if not class_obj:
return
method = getattr(class_obj, method_name)
if inspect.iscoroutinefunction(method):
await method(event)
else:
method(event)
method = getattr(class_obj, method_name, None)
if not method:
return
try:
if inspect.iscoroutinefunction(method):
await method(event)
else:
method(event)
except Exception as e:
self.__handle_event_error(event=event, module_name=class_name,
class_name=class_name, method_name=method_name, e=e)
@staticmethod
def __get_class_instance(class_name: str):
@@ -609,7 +631,11 @@ class EventManager(metaclass=Singleton):
module_name = f"app.chain.{class_name[:-5].lower()}"
module = importlib.import_module(module_name)
elif class_name.endswith("Helper"):
module_name = f"app.helper.{class_name[:-6].lower()}"
# 特殊处理 Async 类
if class_name.startswith("Async"):
module_name = f"app.helper.{class_name[5:-6].lower()}"
else:
module_name = f"app.helper.{class_name[:-6].lower()}"
module = importlib.import_module(module_name)
else:
module_name = f"app.{class_name.lower()}"
@@ -649,18 +675,16 @@ class EventManager(metaclass=Singleton):
"""
logger.debug(f"{stage} - {event}")
def __handle_event_error(self, event: Event, handler: Callable, e: Exception):
def __handle_event_error(self, event: Event, module_name: str,
class_name: str, method_name: str, e: Exception):
"""
全局错误处理器,用于处理事件处理中的异常
"""
logger.error(f"事件处理出错:{str(e)} - {traceback.format_exc()}")
names = handler.__qualname__.split(".")
class_name, method_name = names[0], names[1]
logger.error(f"{module_name} 事件处理出错:{str(e)} - {traceback.format_exc()}")
# 发送系统错误通知
from app.helper.message import MessageHelper
MessageHelper().put(title=f"{event.event_type} 事件处理出错",
MessageHelper().put(title=f"{module_name} 处理事件 {event.event_type} 出错",
message=f"{class_name}.{method_name}{str(e)}",
role="system")
self.send_event(

View File

@@ -21,7 +21,7 @@ from app.core.config import settings
from app.core.event import eventmanager, Event
from app.db.plugindata_oper import PluginDataOper
from app.db.systemconfig_oper import SystemConfigOper
from app.helper.plugin import PluginHelper
from app.helper.plugin import PluginHelper, PluginMemoryMonitor
from app.helper.sites import SitesHelper # noqa
from app.log import logger
from app.schemas.types import EventType, SystemConfigKey
@@ -98,6 +98,8 @@ class PluginManager(metaclass=Singleton):
self._config_key: str = "plugin.%s"
# 监听器
self._observer: Observer = None
# 内存监控器
self._memory_monitor = PluginMemoryMonitor()
# 开发者模式监测插件修改
if settings.DEV or settings.PLUGIN_AUTO_RELOAD:
self.__start_monitor()
@@ -863,6 +865,28 @@ class PluginManager(metaclass=Singleton):
"""
return list(self._running_plugins.keys())
def get_plugin_memory_stats(self, pid: Optional[str] = None) -> List[Dict[str, Any]]:
"""
获取插件内存统计信息
:param pid: 插件ID为空则获取所有插件
:return: 内存统计信息列表
"""
if pid:
plugin_instance = self._running_plugins.get(pid)
if plugin_instance:
return [self._memory_monitor.get_plugin_memory_usage(pid, plugin_instance)]
else:
return []
else:
return self._memory_monitor.get_all_plugins_memory_usage(self._running_plugins)
def clear_plugin_memory_cache(self, pid: Optional[str] = None):
"""
清除插件内存统计缓存
:param pid: 插件ID为空则清除所有缓存
"""
self._memory_monitor.clear_cache(pid)
def get_online_plugins(self, force: bool = False) -> List[schemas.Plugin]:
"""
获取所有在线插件信息
@@ -1165,6 +1189,7 @@ class PluginManager(metaclass=Singleton):
async def async_get_online_plugins(self, force: bool = False) -> List[schemas.Plugin]:
"""
异步获取所有在线插件信息
:param force: 是否强制刷新(忽略缓存)
"""
if not settings.PLUGIN_MARKET:
return []

View File

@@ -252,19 +252,19 @@ def __verify_key(key: str, expected_key: str, key_type: str) -> str:
def verify_apitoken(token: Annotated[str, Security(__get_api_token)]) -> str:
"""
使用 API Token 进行身份认证
:param token: API Token从 URL 查询参数中获取
:param token: API Token从 URL 查询参数中获取 token=xxx
:return: 返回校验通过的 API Token
"""
return __verify_key(token, settings.API_TOKEN, "API_TOKEN")
return __verify_key(token, settings.API_TOKEN, "token")
def verify_apikey(apikey: Annotated[str, Security(__get_api_key)]) -> str:
"""
使用 API Key 进行身份认证
:param apikey: API Key从 URL 查询参数或请求头中获取
:param apikey: API Key从 URL 查询参数中获取 apikey=xxx
:return: 返回校验通过的 API Key
"""
return __verify_key(apikey, settings.API_TOKEN, "API_KEY")
return __verify_key(apikey, settings.API_TOKEN, "apikey")
def verify_password(plain_password: str, hashed_password: str) -> bool:

View File

@@ -1,19 +1,43 @@
import asyncio
from typing import Any, Generator, List, Optional, Self, Tuple, AsyncGenerator, Sequence, Union
from typing import Any, Generator, List, Optional, Self, Tuple, AsyncGenerator, Union
from sqlalchemy import NullPool, QueuePool, and_, create_engine, inspect, text, select, delete
from sqlalchemy import NullPool, QueuePool, and_, create_engine, inspect, text, select, delete, Column, Integer, \
Sequence, Identity
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession, async_sessionmaker
from sqlalchemy.orm import Session, as_declarative, declared_attr, scoped_session, sessionmaker
from app.core.config import settings
def get_id_column():
"""
根据数据库类型返回合适的ID列定义
"""
if settings.DB_TYPE.lower() == "postgresql":
# PostgreSQL使用SERIAL类型让数据库自动处理序列
return Column(Integer, Identity(start=1, cycle=True), primary_key=True, index=True)
else:
# SQLite使用Sequence
return Column(Integer, Sequence('id'), primary_key=True, index=True)
def _get_database_engine(is_async: bool = False):
"""
获取数据库连接参数并设置WAL模式
:param is_async: 是否创建异步引擎True - 异步引擎, False - 同步引擎
:return: 返回对应的数据库引擎
"""
# 根据数据库类型选择连接方式
if settings.DB_TYPE.lower() == "postgresql":
return _get_postgresql_engine(is_async)
else:
return _get_sqlite_engine(is_async)
def _get_sqlite_engine(is_async: bool = False):
"""
获取SQLite数据库引擎
"""
# 连接参数
_connect_args = {
"timeout": settings.DB_TIMEOUT,
@@ -40,9 +64,9 @@ def _get_database_engine(is_async: bool = False):
# 当使用 QueuePool 时,添加 QueuePool 特有的参数
if _pool_class == QueuePool:
_db_kwargs.update({
"pool_size": settings.CONF.dbpool,
"pool_size": settings.DB_SQLITE_POOL_SIZE,
"pool_timeout": settings.DB_POOL_TIMEOUT,
"max_overflow": settings.CONF.dbpooloverflow
"max_overflow": settings.DB_SQLITE_MAX_OVERFLOW
})
# 创建数据库引擎
@@ -52,7 +76,7 @@ def _get_database_engine(is_async: bool = False):
_journal_mode = "WAL" if settings.DB_WAL_ENABLE else "DELETE"
with engine.connect() as connection:
current_mode = connection.execute(text(f"PRAGMA journal_mode={_journal_mode};")).scalar()
print(f"Database journal mode set to: {current_mode}")
print(f"SQLite database journal mode set to: {current_mode}")
return engine
else:
@@ -78,12 +102,73 @@ def _get_database_engine(is_async: bool = False):
async with async_engine.connect() as _connection:
result = await _connection.execute(text(f"PRAGMA journal_mode={_journal_mode};"))
_current_mode = result.scalar()
print(f"Async database journal mode set to: {_current_mode}")
print(f"Async SQLite database journal mode set to: {_current_mode}")
try:
asyncio.run(set_async_wal_mode())
except Exception as e:
print(f"Failed to set async WAL mode: {e}")
print(f"Failed to set async SQLite WAL mode: {e}")
return async_engine
def _get_postgresql_engine(is_async: bool = False):
"""
获取PostgreSQL数据库引擎
"""
# 构建PostgreSQL连接URL
if settings.DB_POSTGRESQL_PASSWORD:
db_url = f"postgresql://{settings.DB_POSTGRESQL_USERNAME}:{settings.DB_POSTGRESQL_PASSWORD}@{settings.DB_POSTGRESQL_HOST}:{settings.DB_POSTGRESQL_PORT}/{settings.DB_POSTGRESQL_DATABASE}"
else:
db_url = f"postgresql://{settings.DB_POSTGRESQL_USERNAME}@{settings.DB_POSTGRESQL_HOST}:{settings.DB_POSTGRESQL_PORT}/{settings.DB_POSTGRESQL_DATABASE}"
# PostgreSQL连接参数
_connect_args = {}
# 创建同步引擎
if not is_async:
# 根据池类型设置 poolclass 和相关参数
_pool_class = NullPool if settings.DB_POOL_TYPE == "NullPool" else QueuePool
# 数据库参数
_db_kwargs = {
"url": db_url,
"pool_pre_ping": settings.DB_POOL_PRE_PING,
"echo": settings.DB_ECHO,
"poolclass": _pool_class,
"pool_recycle": settings.DB_POOL_RECYCLE,
"connect_args": _connect_args
}
# 当使用 QueuePool 时,添加 QueuePool 特有的参数
if _pool_class == QueuePool:
_db_kwargs.update({
"pool_size": settings.DB_POSTGRESQL_POOL_SIZE,
"pool_timeout": settings.DB_POOL_TIMEOUT,
"max_overflow": settings.DB_POSTGRESQL_MAX_OVERFLOW
})
# 创建数据库引擎
engine = create_engine(**_db_kwargs)
print(f"PostgreSQL database connected to {settings.DB_POSTGRESQL_HOST}:{settings.DB_POSTGRESQL_PORT}/{settings.DB_POSTGRESQL_DATABASE}")
return engine
else:
# 构建异步PostgreSQL连接URL
async_db_url = f"postgresql+asyncpg://{settings.DB_POSTGRESQL_USERNAME}:{settings.DB_POSTGRESQL_PASSWORD}@{settings.DB_POSTGRESQL_HOST}:{settings.DB_POSTGRESQL_PORT}/{settings.DB_POSTGRESQL_DATABASE}"
# 数据库参数,只能使用 NullPool
_db_kwargs = {
"url": async_db_url,
"pool_pre_ping": settings.DB_POOL_PRE_PING,
"echo": settings.DB_ECHO,
"poolclass": NullPool,
"pool_recycle": settings.DB_POOL_RECYCLE,
"connect_args": _connect_args
}
# 创建异步数据库引擎
async_engine = create_async_engine(**_db_kwargs)
print(f"Async PostgreSQL database connected to {settings.DB_POSTGRESQL_HOST}:{settings.DB_POSTGRESQL_PORT}/{settings.DB_POSTGRESQL_DATABASE}")
return async_engine

View File

@@ -18,12 +18,22 @@ def update_db():
"""
更新数据库
"""
db_location = settings.CONFIG_PATH / 'user.db'
script_location = settings.ROOT_PATH / 'database'
try:
alembic_cfg = Config()
alembic_cfg.set_main_option('script_location', str(script_location))
alembic_cfg.set_main_option('sqlalchemy.url', f"sqlite:///{db_location}")
# 根据数据库类型设置不同的URL
if settings.DB_TYPE.lower() == "postgresql":
if settings.DB_POSTGRESQL_PASSWORD:
db_url = f"postgresql://{settings.DB_POSTGRESQL_USERNAME}:{settings.DB_POSTGRESQL_PASSWORD}@{settings.DB_POSTGRESQL_HOST}:{settings.DB_POSTGRESQL_PORT}/{settings.DB_POSTGRESQL_DATABASE}"
else:
db_url = f"postgresql://{settings.DB_POSTGRESQL_USERNAME}@{settings.DB_POSTGRESQL_HOST}:{settings.DB_POSTGRESQL_PORT}/{settings.DB_POSTGRESQL_DATABASE}"
else:
db_location = settings.CONFIG_PATH / 'user.db'
db_url = f"sqlite:///{db_location}"
alembic_cfg.set_main_option('sqlalchemy.url', db_url)
upgrade(alembic_cfg, 'head')
except Exception as e:
logger.error(f'数据库更新失败:{str(e)}')

View File

@@ -1,18 +1,18 @@
import time
from typing import Optional
from sqlalchemy import Column, Integer, String, Sequence, JSON, select
from sqlalchemy import Column, Integer, String, JSON, select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import Session
from app.db import db_query, db_update, Base, async_db_query
from app.db import db_query, db_update, get_id_column, Base, async_db_query
class DownloadHistory(Base):
"""
下载历史记录
"""
id = Column(Integer, Sequence('id'), primary_key=True, index=True)
id = get_id_column()
# 保存路径
path = Column(String, nullable=False, index=True)
# 类型 电影/电视剧
@@ -188,7 +188,7 @@ class DownloadFiles(Base):
"""
下载文件记录
"""
id = Column(Integer, Sequence('id'), primary_key=True, index=True)
id = get_id_column()
# 下载器
downloader = Column(String)
# 下载任务Hash

View File

@@ -1,19 +1,19 @@
from datetime import datetime
from typing import Optional
from sqlalchemy import Column, Integer, String, Sequence, JSON
from sqlalchemy import Column, Integer, String, JSON
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import Session
from app.db import db_query, db_update, async_db_query, Base
from app.db import db_query, db_update, get_id_column, async_db_query, Base
class MediaServerItem(Base):
"""
媒体服务器媒体条目表
"""
id = Column(Integer, Sequence('id'), primary_key=True, index=True)
id = get_id_column()
# 服务器类型
server = Column(String)
# 媒体库ID

View File

@@ -1,17 +1,17 @@
from typing import Optional
from sqlalchemy import Column, Integer, String, Sequence, JSON, select
from sqlalchemy import Column, Integer, String, JSON, select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import Session
from app.db import db_query, Base, async_db_query
from app.db import db_query, Base, get_id_column, async_db_query
class Message(Base):
"""
消息表
"""
id = Column(Integer, Sequence('id'), primary_key=True, index=True)
id = get_id_column()
# 消息渠道
channel = Column(String)
# 消息来源

View File

@@ -1,14 +1,14 @@
from sqlalchemy import Column, Integer, String, Sequence, JSON
from sqlalchemy import Column, String, JSON
from sqlalchemy.orm import Session
from app.db import db_query, db_update, Base
from app.db import db_query, db_update, get_id_column, Base
class PluginData(Base):
"""
插件数据表
"""
id = Column(Integer, Sequence('id'), primary_key=True, index=True)
id = get_id_column()
plugin_id = Column(String, nullable=False, index=True)
key = Column(String, index=True, nullable=False)
value = Column(JSON)

View File

@@ -1,17 +1,17 @@
from datetime import datetime
from sqlalchemy import Boolean, Column, Integer, String, Sequence, JSON, select, delete
from sqlalchemy import Boolean, Column, Integer, String, JSON, select, delete
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import Session
from app.db import db_query, db_update, Base, async_db_query, async_db_update
from app.db import db_query, db_update, Base, async_db_query, async_db_update, get_id_column
class Site(Base):
"""
站点表
"""
id = Column(Integer, Sequence('id'), primary_key=True, index=True)
id = get_id_column()
# 站点名
name = Column(String, nullable=False)
# 域名Key
@@ -69,12 +69,12 @@ class Site(Base):
@classmethod
@db_query
def get_actives(cls, db: Session):
return db.query(cls).filter(cls.is_active == 1).all()
return db.query(cls).filter(cls.is_active).all()
@classmethod
@async_db_query
async def async_get_actives(cls, db: AsyncSession):
result = await db.execute(select(cls).where(cls.is_active == 1))
result = await db.execute(select(cls).where(cls.is_active))
return result.scalars().all()
@classmethod

View File

@@ -1,15 +1,15 @@
from sqlalchemy import Column, Integer, String, Sequence, select
from sqlalchemy import Column, String, select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import Session
from app.db import db_query, Base, async_db_query
from app.db import db_query, Base, get_id_column, async_db_query
class SiteIcon(Base):
"""
站点图标表
"""
id = Column(Integer, Sequence('id'), primary_key=True, index=True)
id = get_id_column()
# 站点名称
name = Column(String, nullable=False)
# 域名Key

View File

@@ -1,17 +1,17 @@
from datetime import datetime
from sqlalchemy import Column, Integer, String, Sequence, JSON, select
from sqlalchemy import Column, Integer, String, JSON, select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import Session
from app.db import db_query, db_update, Base, async_db_query
from app.db import db_query, db_update, get_id_column, Base, async_db_query
class SiteStatistic(Base):
"""
站点统计表
"""
id = Column(Integer, Sequence('id'), primary_key=True, index=True)
id = get_id_column()
# 域名Key
domain = Column(String, index=True)
# 成功次数

View File

@@ -1,18 +1,18 @@
from datetime import datetime
from typing import Optional
from sqlalchemy import Column, Integer, String, Sequence, Float, JSON, func, or_, select
from sqlalchemy import Column, Integer, String, Float, JSON, func, or_, select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import Session
from app.db import db_query, Base, async_db_query
from app.db import db_query, Base, get_id_column, async_db_query
class SiteUserData(Base):
"""
站点数据表
"""
id = Column(Integer, Sequence('id'), primary_key=True, index=True)
id = get_id_column()
# 站点域名
domain = Column(String, index=True)
# 站点名称
@@ -20,7 +20,7 @@ class SiteUserData(Base):
# 用户名
username = Column(String)
# 用户ID
userid = Column(Integer)
userid = Column(String)
# 用户等级
user_level = Column(String)
# 加入时间

View File

@@ -1,18 +1,18 @@
import time
from typing import Optional
from sqlalchemy import Column, Integer, String, Sequence, Float, JSON, select
from sqlalchemy import Column, Integer, String, Float, JSON, select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import Session
from app.db import db_query, db_update, Base, async_db_query, async_db_update
from app.db import db_query, db_update, get_id_column, Base, async_db_query, async_db_update
class Subscribe(Base):
"""
订阅表
"""
id = Column(Integer, Sequence('id'), primary_key=True, index=True)
id = get_id_column()
# 标题
name = Column(String, nullable=False, index=True)
# 年份

View File

@@ -1,17 +1,17 @@
from typing import Optional
from sqlalchemy import Column, Integer, String, Sequence, Float, JSON, select
from sqlalchemy import Column, Integer, String, Float, JSON, select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import Session
from app.db import db_query, Base, async_db_query
from app.db import db_query, Base, get_id_column, async_db_query
class SubscribeHistory(Base):
"""
订阅历史表
"""
id = Column(Integer, Sequence('id'), primary_key=True, index=True)
id = get_id_column()
# 标题
name = Column(String, nullable=False, index=True)
# 年份

View File

@@ -1,15 +1,15 @@
from sqlalchemy import Column, Integer, String, Sequence, JSON, select
from sqlalchemy import Column, String, JSON, select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import Session
from app.db import db_query, db_update, Base, async_db_query
from app.db import db_query, db_update, Base, async_db_query, get_id_column
class SystemConfig(Base):
"""
配置表
"""
id = Column(Integer, Sequence('id'), primary_key=True, index=True)
id = get_id_column()
# 主键
key = Column(String, index=True)
# 值

View File

@@ -1,18 +1,18 @@
import time
from typing import Optional
from sqlalchemy import Column, Integer, String, Sequence, Boolean, func, or_, JSON, select
from sqlalchemy import Column, Integer, String, Boolean, func, or_, JSON, select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import Session
from app.db import db_query, db_update, Base, async_db_query
from app.db import db_query, db_update, get_id_column, Base, async_db_query
class TransferHistory(Base):
"""
整理记录
"""
id = Column(Integer, Sequence('id'), primary_key=True, index=True)
id = get_id_column()
# 源路径
src = Column(String, index=True)
# 源存储
@@ -65,76 +65,92 @@ class TransferHistory(Base):
def list_by_title(cls, db: Session, title: str, page: Optional[int] = 1, count: Optional[int] = 30,
status: bool = None):
if status is not None:
return db.query(cls).filter(
query = db.query(cls).filter(
cls.status == status
).order_by(
cls.date.desc()
).offset((page - 1) * count).limit(count).all()
)
else:
return db.query(cls).filter(or_(
query = db.query(cls).filter(or_(
cls.title.like(f'%{title}%'),
cls.src.like(f'%{title}%'),
cls.dest.like(f'%{title}%'),
)).order_by(
cls.date.desc()
).offset((page - 1) * count).limit(count).all()
)
# 当count为负数时不限制页数查询所有
if count >= 0:
query = query.offset((page - 1) * count).limit(count)
return query.all()
@classmethod
@async_db_query
async def async_list_by_title(cls, db: AsyncSession, title: str, page: Optional[int] = 1, count: Optional[int] = 30,
status: bool = None):
if status is not None:
result = await db.execute(
select(cls).filter(
cls.status == status
).order_by(
cls.date.desc()
).offset((page - 1) * count).limit(count)
query = select(cls).filter(
cls.status == status
).order_by(
cls.date.desc()
)
else:
result = await db.execute(
select(cls).filter(or_(
cls.title.like(f'%{title}%'),
cls.src.like(f'%{title}%'),
cls.dest.like(f'%{title}%'),
)).order_by(
cls.date.desc()
).offset((page - 1) * count).limit(count)
query = select(cls).filter(or_(
cls.title.like(f'%{title}%'),
cls.src.like(f'%{title}%'),
cls.dest.like(f'%{title}%'),
)).order_by(
cls.date.desc()
)
# 当count为负数时不限制页数查询所有
if count >= 0:
query = query.offset((page - 1) * count).limit(count)
result = await db.execute(query)
return result.scalars().all()
@classmethod
@db_query
def list_by_page(cls, db: Session, page: Optional[int] = 1, count: Optional[int] = 30, status: bool = None):
if status is not None:
return db.query(cls).filter(
query = db.query(cls).filter(
cls.status == status
).order_by(
cls.date.desc()
).offset((page - 1) * count).limit(count).all()
)
else:
return db.query(cls).order_by(
query = db.query(cls).order_by(
cls.date.desc()
).offset((page - 1) * count).limit(count).all()
)
# 当count为负数时不限制页数查询所有
if count >= 0:
query = query.offset((page - 1) * count).limit(count)
return query.all()
@classmethod
@async_db_query
async def async_list_by_page(cls, db: AsyncSession, page: Optional[int] = 1, count: Optional[int] = 30,
status: bool = None):
if status is not None:
result = await db.execute(
select(cls).filter(
cls.status == status
).order_by(
cls.date.desc()
).offset((page - 1) * count).limit(count)
query = select(cls).filter(
cls.status == status
).order_by(
cls.date.desc()
)
else:
result = await db.execute(
select(cls).order_by(
cls.date.desc()
).offset((page - 1) * count).limit(count)
query = select(cls).order_by(
cls.date.desc()
)
# 当count为负数时不限制页数查询所有
if count >= 0:
query = query.offset((page - 1) * count).limit(count)
result = await db.execute(query)
return result.scalars().all()
@classmethod

View File

@@ -1,8 +1,8 @@
from sqlalchemy import Boolean, Column, Integer, JSON, Sequence, String, select
from sqlalchemy import Boolean, Column, JSON, String, select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import Session
from app.db import Base, db_query, db_update, async_db_query, async_db_update
from app.db import Base, db_query, db_update, async_db_query, async_db_update, get_id_column
class User(Base):
@@ -10,7 +10,7 @@ class User(Base):
用户表
"""
# ID
id = Column(Integer, Sequence('id'), primary_key=True, index=True)
id = get_id_column()
# 用户名,唯一值
name = Column(String, index=True, nullable=False)
# 邮箱

View File

@@ -1,14 +1,14 @@
from sqlalchemy import Column, Integer, String, Sequence, UniqueConstraint, Index, JSON
from sqlalchemy import Column, String, UniqueConstraint, Index, JSON
from sqlalchemy.orm import Session
from app.db import db_query, db_update, Base
from app.db import db_query, db_update, get_id_column, Base
class UserConfig(Base):
"""
用户配置表
"""
id = Column(Integer, Sequence('id'), primary_key=True, index=True)
id = get_id_column()
# 用户名
username = Column(String, index=True)
# 配置键

View File

@@ -1,10 +1,10 @@
from datetime import datetime
from typing import Optional
from sqlalchemy import Column, Integer, JSON, Sequence, String, and_, or_, select
from sqlalchemy import Column, Integer, JSON, String, and_, or_, select
from sqlalchemy.ext.asyncio import AsyncSession
from app.db import Base, db_query, db_update, async_db_query, async_db_update
from app.db import Base, db_query, get_id_column, db_update, async_db_query, async_db_update
class Workflow(Base):
@@ -12,7 +12,7 @@ class Workflow(Base):
工作流表
"""
# ID
id = Column(Integer, Sequence('id'), primary_key=True, index=True)
id = get_id_column()
# 名称
name = Column(String, index=True, nullable=False)
# 描述

View File

@@ -34,6 +34,7 @@ class SubscribeOper(DbOper):
"backdrop": mediainfo.get_backdrop_image(),
"vote": mediainfo.vote_average,
"description": mediainfo.overview,
"search_imdbid": 1 if kwargs.get('search_imdbid') else 0,
"date": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
})
if not subscribe:
@@ -118,6 +119,14 @@ class SubscribeOper(DbOper):
return Subscribe.get_by_state(self._db, state)
return Subscribe.list(self._db)
async def async_list(self, state: Optional[str] = None) -> List[Subscribe]:
"""
异步获取订阅列表
"""
if state:
return await Subscribe.async_get_by_state(self._db, state)
return await Subscribe.async_list(self._db)
def delete(self, sid: int):
"""
删除订阅

View File

@@ -10,9 +10,9 @@ from datetime import datetime
from typing import Any, Literal, Optional, List, Dict, Union
from typing import Callable
from cachetools import TTLCache
from jinja2 import Template
from app.core.cache import TTLCache
from app.core.config import global_vars
from app.core.context import MediaInfo, TorrentInfo
from app.core.meta import MetaBase
@@ -307,7 +307,7 @@ class TemplateHelper(metaclass=SingletonClass):
def __init__(self):
self.builder = TemplateContextBuilder()
self.cache = TTLCache(maxsize=100, ttl=600)
self.cache = TTLCache(region="notification", maxsize=100, ttl=600)
@staticmethod
def _generate_cache_key(cuntent: Union[str, dict]) -> str:
@@ -471,6 +471,13 @@ class TemplateHelper(metaclass=SingletonClass):
except json.JSONDecodeError:
return rendered
def close(self):
"""
清理资源
"""
if self.cache:
self.cache.close()
class MessageTemplateHelper:
"""
@@ -704,6 +711,7 @@ class MessageQueueManager(metaclass=SingletonClass):
停止队列管理器
"""
self._running = False
logger.info("正在停止消息队列...")
self.thread.join()
@@ -765,3 +773,13 @@ class MessageHelper(metaclass=Singleton):
if not self.user_queue.empty():
return self.user_queue.get(block=False)
return None
def stop_message():
"""
停止消息服务
"""
# 停止消息队列
MessageQueueManager().stop()
# 关闭消息演染器
TemplateHelper().close()

View File

@@ -1,13 +1,14 @@
import importlib
import io
import json
import shutil
import site
import sys
import time
import traceback
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Set, Callable, Awaitable
import zipfile
import io
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Set, Callable, Awaitable, Any
import aiofiles
import aioshutil
@@ -24,6 +25,7 @@ from app.db.systemconfig_oper import SystemConfigOper
from app.log import logger
from app.schemas.types import SystemConfigKey
from app.utils.http import RequestUtils, AsyncRequestUtils
from app.utils.memory import MemoryCalculator
from app.utils.singleton import WeakSingleton
from app.utils.system import SystemUtils
from app.utils.url import UrlUtils
@@ -248,6 +250,7 @@ class PluginHelper(metaclass=WeakSingleton):
return False, f"未在插件清单中找到 {pid} 的版本号,无法进行 Release 安装"
# 拼接 release_tag
release_tag = f"{pid}_v{plugin_version}"
# 使用 release 进行安装
def prepare_release() -> Tuple[bool, str]:
return self.__install_from_release(
@@ -533,12 +536,12 @@ class PluginHelper(metaclass=WeakSingleton):
return None
def __get_plugin_meta(self, pid: str, repo_url: str,
package_version: Optional[str]) -> dict:
package_version: Optional[str]) -> dict:
try:
plugins = (
self.get_plugins(repo_url) if not package_version
else self.get_plugins(repo_url, package_version)
) or {}
self.get_plugins(repo_url) if not package_version
else self.get_plugins(repo_url, package_version)
) or {}
meta = plugins.get(pid)
return meta if isinstance(meta, dict) else {}
except Exception as e:
@@ -908,10 +911,10 @@ class PluginHelper(metaclass=WeakSingleton):
"""
# 异步版本直接调用不带缓存的版本(缓存在异步环境下可能有并发问题)
if force:
return await self._async_get_plugins_uncached(repo_url, package_version)
await self._async_get_plugins_cached.cache_clear()
return await self._async_get_plugins_cached(repo_url, package_version)
@cached(maxsize=64, ttl=1800)
@cached(maxsize=128, ttl=1800)
async def _async_get_plugins_cached(self, repo_url: str,
package_version: Optional[str] = None) -> Optional[Dict[str, dict]]:
"""
@@ -1393,6 +1396,7 @@ class PluginHelper(metaclass=WeakSingleton):
return False, f"未在插件清单中找到 {pid} 的版本号,无法进行 Release 安装"
# 拼接 release_tag
release_tag = f"{pid}_v{plugin_version}"
# 使用 release 进行安装
async def prepare_release() -> Tuple[bool, str]:
return await self.__async_install_from_release(
@@ -1411,9 +1415,9 @@ class PluginHelper(metaclass=WeakSingleton):
package_version: Optional[str]) -> dict:
try:
plugins = (
await self.async_get_plugins(repo_url) if not package_version
else await self.async_get_plugins(repo_url, package_version)
) or {}
await self.async_get_plugins(repo_url) if not package_version
else await self.async_get_plugins(repo_url, package_version)
) or {}
meta = plugins.get(pid)
return meta if isinstance(meta, dict) else {}
except Exception as e:
@@ -1528,7 +1532,8 @@ class PluginHelper(metaclass=WeakSingleton):
logger.error(f"解析 Release 信息失败:{e}")
return False, f"解析 Release 信息失败:{e}"
res = await self.__async_request_with_fallback(download_url, headers=settings.REPO_GITHUB_HEADERS(repo=user_repo))
res = await self.__async_request_with_fallback(download_url,
headers=settings.REPO_GITHUB_HEADERS(repo=user_repo))
if res is None or res.status_code != 200:
return False, f"下载资产失败:{res.status_code if res else '连接失败'}"
@@ -1566,3 +1571,87 @@ class PluginHelper(metaclass=WeakSingleton):
except Exception as e:
logger.error(f"解压 Release 压缩包失败:{e}")
return False, f"解压 Release 压缩包失败:{e}"
class PluginMemoryMonitor:
"""
插件内存监控器
"""
def __init__(self):
self._calculator = MemoryCalculator()
self._cache = {}
self._cache_ttl = 300 # 缓存5分钟
def get_plugin_memory_usage(self, plugin_id: str, plugin_instance: Any) -> Dict[str, Any]:
"""
获取插件内存使用情况
:param plugin_id: 插件ID
:param plugin_instance: 插件实例
:return: 内存使用信息
"""
# 检查缓存
if self._is_cache_valid(plugin_id):
return self._cache[plugin_id]
# 计算内存使用
memory_info = self._calculator.calculate_object_memory(plugin_instance)
# 添加插件信息
result = {
'plugin_id': plugin_id,
'plugin_name': getattr(plugin_instance, 'plugin_name', 'Unknown'),
'plugin_version': getattr(plugin_instance, 'plugin_version', 'Unknown'),
'timestamp': time.time(),
**memory_info
}
# 更新缓存
self._cache[plugin_id] = result
return result
def get_all_plugins_memory_usage(self, plugins: Dict[str, Any]) -> List[Dict[str, Any]]:
"""
获取所有插件的内存使用情况
:param plugins: 插件实例字典
:return: 内存使用信息列表
"""
results = []
for plugin_id, plugin_instance in plugins.items():
if plugin_instance:
try:
memory_info = self.get_plugin_memory_usage(plugin_id, plugin_instance)
results.append(memory_info)
except Exception as e:
logger.error(f"获取插件 {plugin_id} 内存使用情况失败:{str(e)}")
results.append({
'plugin_id': plugin_id,
'plugin_name': getattr(plugin_instance, 'plugin_name', 'Unknown'),
'error': str(e),
'total_memory_bytes': 0,
'total_memory_mb': 0,
'object_count': 0,
'calculation_time_ms': 0
})
# 按内存使用量排序
results.sort(key=lambda x: x.get('total_memory_bytes', 0), reverse=True)
return results
def _is_cache_valid(self, plugin_id: str) -> bool:
"""
检查缓存是否有效
"""
if plugin_id not in self._cache:
return False
return time.time() - self._cache[plugin_id]['timestamp'] < self._cache_ttl
def clear_cache(self, plugin_id: Optional[str] = None):
"""
清除缓存
:param plugin_id: 插件ID为空则清除所有缓存
"""
if plugin_id:
self._cache.pop(plugin_id, None)
else:
self._cache.clear()

View File

@@ -1,55 +1,76 @@
from enum import Enum
from typing import Union, Optional
from app.core.cache import TTLCache
from app.schemas.types import ProgressKey
from app.utils.singleton import WeakSingleton
class ProgressHelper(metaclass=WeakSingleton):
"""
处理进度辅助类
"""
def __init__(self):
self._process_detail = {}
def init_config(self):
pass
def __reset(self, key: Union[ProgressKey, str]):
def __init__(self, key: Union[ProgressKey, str]):
if isinstance(key, Enum):
key = key.value
self._process_detail[key] = {
self._key = key
self._progress = TTLCache(region="progress", maxsize=1024, ttl=24 * 60 * 60)
def __reset(self):
"""
重置进度
"""
self._progress[self._key] = {
"enable": False,
"value": 0,
"text": "请稍候..."
"text": "请稍候...",
"data": {}
}
def start(self, key: Union[ProgressKey, str]):
self.__reset(key)
if isinstance(key, Enum):
key = key.value
self._process_detail[key]['enable'] = True
def end(self, key: Union[ProgressKey, str]):
if isinstance(key, Enum):
key = key.value
if not self._process_detail.get(key):
def start(self):
"""
开始进度
"""
self.__reset()
current = self._progress.get(self._key)
if not current:
return
self._process_detail[key] = {
"enable": False,
"value": 100,
"text": "正在处理..."
}
current['enable'] = True
self._progress[self._key] = current
def update(self, key: Union[ProgressKey, str], value: Union[float, int] = None, text: Optional[str] = None):
if isinstance(key, Enum):
key = key.value
if not self._process_detail.get(key, {}).get('enable'):
def end(self):
"""
结束进度
"""
current = self._progress.get(self._key)
if not current:
return
current.update(
{
"enable": False,
"value": 100,
"text": ""
}
)
self._progress[self._key] = current
def update(self, value: Union[float, int] = None, text: Optional[str] = None, data: dict = None):
"""
更新进度
"""
current = self._progress.get(self._key)
if not current or not current.get('enable'):
return
if value:
self._process_detail[key]['value'] = value
current['value'] = value
if text:
self._process_detail[key]['text'] = text
current['text'] = text
if data:
if not current.get('data'):
current['data'] = {}
current['data'].update(data)
self._progress[self._key] = current
def get(self, key: Union[ProgressKey, str]) -> dict:
if isinstance(key, Enum):
key = key.value
return self._process_detail.get(key)
def get(self) -> dict:
return self._progress.get(self._key)

547
app/helper/redis.py Normal file
View File

@@ -0,0 +1,547 @@
import json
import pickle
from typing import Any, Optional, Generator, Tuple, AsyncGenerator, Union
from urllib.parse import quote
import redis
from redis.asyncio import Redis
from app.core.config import settings
from app.core.event import eventmanager, Event
from app.log import logger
from app.schemas import ConfigChangeEventData
from app.schemas.types import EventType
from app.utils.singleton import Singleton
# 类型缓存集合,针对非容器简单类型
_complex_serializable_types = set()
_simple_serializable_types = set()
def serialize(value: Any) -> bytes:
"""
将值序列化为二进制数据,根据序列化方式标识格式
"""
def _is_container_type(t):
"""
判断是否为容器类型
"""
return t in (list, dict, tuple, set)
vt = type(value)
# 针对非容器类型使用缓存策略
if not _is_container_type(vt):
# 如果已知需要复杂序列化
if vt in _complex_serializable_types:
return b"PICKLE" + b"\x00" + pickle.dumps(value)
# 如果已知可以简单序列化
if vt in _simple_serializable_types:
json_data = json.dumps(value).encode("utf-8")
return b"JSON" + b"\x00" + json_data
# 对于未知的非容器类型,尝试简单序列化,如抛出异常,再使用复杂序列化
try:
json_data = json.dumps(value).encode("utf-8")
_simple_serializable_types.add(vt)
return b"JSON" + b"\x00" + json_data
except TypeError:
_complex_serializable_types.add(vt)
return b"PICKLE" + b"\x00" + pickle.dumps(value)
else:
# 针对容器类型,每次尝试简单序列化,不使用缓存
try:
json_data = json.dumps(value).encode("utf-8")
return b"JSON" + b"\x00" + json_data
except TypeError:
return b"PICKLE" + b"\x00" + pickle.dumps(value)
def deserialize(value: bytes) -> Any:
"""
将二进制数据反序列化为原始值,根据格式标识区分序列化方式
"""
format_marker, data = value.split(b"\x00", 1)
if format_marker == b"JSON":
return json.loads(data.decode("utf-8"))
elif format_marker == b"PICKLE":
return pickle.loads(data)
else:
raise ValueError("Unknown serialization format")
class RedisHelper(metaclass=Singleton):
"""
Redis连接和操作助手类单例模式
特性:
- 管理Redis连接池和客户端
- 提供序列化和反序列化功能
- 支持内存限制和淘汰策略设置
- 提供键名生成和区域管理功能
"""
def __init__(self):
"""
初始化Redis助手实例
"""
self.redis_url = settings.CACHE_BACKEND_URL
self.client = None
def _connect(self):
"""
建立Redis连接
"""
try:
if self.client is None:
self.client = redis.Redis.from_url(
self.redis_url,
decode_responses=False,
socket_timeout=30,
socket_connect_timeout=5,
health_check_interval=60,
)
# 测试连接确保Redis可用
self.client.ping()
logger.info(f"Successfully connected to Redis{self.redis_url}")
self.set_memory_limit()
except Exception as e:
logger.error(f"Failed to connect to Redis: {e}")
self.client = None
raise RuntimeError("Redis connection failed") from e
@eventmanager.register(EventType.ConfigChanged)
def handle_config_changed(self, event: Event):
"""
处理配置变更事件更新Redis设置
:param event: 事件对象
"""
if not event:
return
event_data: ConfigChangeEventData = event.event_data
if event_data.key not in ['CACHE_BACKEND_TYPE', 'CACHE_BACKEND_URL', 'CACHE_REDIS_MAXMEMORY']:
return
logger.info("配置变更重连Redis...")
self.close()
self._connect()
def set_memory_limit(self, policy: Optional[str] = "allkeys-lru"):
"""
动态设置Redis最大内存和内存淘汰策略
:param policy: 淘汰策略(如'allkeys-lru'
"""
try:
# 如果有显式值则直接使用为0时说明不限制如果未配置开启BIG_MEMORY_MODE时为"1024mb",未开启时为"256mb"
maxmemory = settings.CACHE_REDIS_MAXMEMORY or ("1024mb" if settings.BIG_MEMORY_MODE else "256mb")
self.client.config_set("maxmemory", maxmemory)
self.client.config_set("maxmemory-policy", policy)
logger.debug(f"Redis maxmemory set to {maxmemory}, policy: {policy}")
except Exception as e:
logger.error(f"Failed to set Redis maxmemory or policy: {e}")
@staticmethod
def __get_region(region: Optional[str] = None):
"""
获取缓存的区
"""
return f"region:{quote(region)}" if region else "region:DEFAULT"
def __make_redis_key(self, region: str, key: str) -> str:
"""
获取缓存Key
"""
# 使用region作为缓存键的一部分
region = self.__get_region(region)
return f"{region}:key:{quote(key)}"
@staticmethod
def __get_original_key(redis_key: Union[str, bytes]) -> str:
"""
从Redis键中提取原始key
"""
try:
if isinstance(redis_key, bytes):
redis_key = redis_key.decode("utf-8")
parts = redis_key.split(":key:")
return parts[-1]
except Exception as e:
logger.warn(f"Failed to parse redis key: {redis_key}, error: {e}")
return redis_key
def set(self, key: str, value: Any, ttl: Optional[int] = None,
region: Optional[str] = "DEFAULT", **kwargs) -> None:
"""
设置缓存
:param key: 缓存的键
:param value: 缓存的值
:param ttl: 缓存的存活时间,单位秒
:param region: 缓存的区
:param kwargs: 其他参数
"""
try:
self._connect()
redis_key = self.__make_redis_key(region, key)
# 对值进行序列化
serialized_value = serialize(value)
kwargs.pop("maxsize", None)
self.client.set(redis_key, serialized_value, ex=ttl, **kwargs)
except Exception as e:
logger.error(f"Failed to set key: {key} in region: {region}, error: {e}")
def exists(self, key: str, region: Optional[str] = "DEFAULT") -> bool:
"""
判断缓存键是否存在
:param key: 缓存的键
:param region: 缓存的区
:return: 存在返回True否则返回False
"""
try:
self._connect()
redis_key = self.__make_redis_key(region, key)
return self.client.exists(redis_key) == 1
except Exception as e:
logger.error(f"Failed to exists key: {key} region: {region}, error: {e}")
return False
def get(self, key: str, region: Optional[str] = "DEFAULT") -> Optional[Any]:
"""
获取缓存的值
:param key: 缓存的键
:param region: 缓存的区
:return: 返回缓存的值如果缓存不存在返回None
"""
try:
self._connect()
redis_key = self.__make_redis_key(region, key)
value = self.client.get(redis_key)
if value is not None:
return deserialize(value)
return None
except Exception as e:
logger.error(f"Failed to get key: {key} in region: {region}, error: {e}")
return None
def delete(self, key: str, region: Optional[str] = "DEFAULT") -> None:
"""
删除缓存
:param key: 缓存的键
:param region: 缓存的区
"""
try:
self._connect()
redis_key = self.__make_redis_key(region, key)
self.client.delete(redis_key)
except Exception as e:
logger.error(f"Failed to delete key: {key} in region: {region}, error: {e}")
def clear(self, region: Optional[str] = None) -> None:
"""
清除指定区域的缓存或全部缓存
:param region: 缓存的区
"""
try:
self._connect()
if region:
cache_region = self.__get_region(region)
redis_key = f"{cache_region}:key:*"
with self.client.pipeline() as pipe:
for key in self.client.scan_iter(redis_key):
pipe.delete(key)
pipe.execute()
logger.info(f"Cleared Redis cache for region: {region}")
else:
self.client.flushdb()
logger.info("Cleared all Redis cache")
except Exception as e:
logger.error(f"Failed to clear cache, region: {region}, error: {e}")
def items(self, region: Optional[str] = None) -> Generator[Tuple[str, Any], None, None]:
"""
获取指定区域的所有缓存键值对
:param region: 缓存的区
:return: 返回键值对生成器
"""
try:
self._connect()
if region:
cache_region = self.__get_region(region)
redis_key = f"{cache_region}:key:*"
for key in self.client.scan_iter(redis_key):
value = self.client.get(key)
if value is not None:
yield self.__get_original_key(key), deserialize(value)
else:
for key in self.client.scan_iter("*"):
value = self.client.get(key)
if value is not None:
yield self.__get_original_key(key), deserialize(value)
except Exception as e:
logger.error(f"Failed to get items from Redis, region: {region}, error: {e}")
def test(self) -> bool:
"""
测试Redis连接性
"""
try:
self._connect()
return True
except Exception as e:
logger.error(f"Redis connection test failed: {e}")
return False
def close(self) -> None:
"""
关闭Redis客户端的连接池
"""
if self.client:
self.client.close()
self.client = None
logger.debug("Redis connection closed")
class AsyncRedisHelper(metaclass=Singleton):
"""
异步Redis连接和操作助手类单例模式
特性:
- 管理异步Redis连接池和客户端
- 提供序列化和反序列化功能
- 支持内存限制和淘汰策略设置
- 提供键名生成和区域管理功能
- 所有操作都是异步的
"""
# 类型缓存集合,针对非容器简单类型
_complex_serializable_types = set()
_simple_serializable_types = set()
def __init__(self):
"""
初始化异步Redis助手实例
"""
self.redis_url = settings.CACHE_BACKEND_URL
self.client: Optional[Redis] = None
async def _connect(self):
"""
建立异步Redis连接
"""
try:
if self.client is None:
self.client = Redis.from_url(
self.redis_url,
decode_responses=False,
socket_timeout=30,
socket_connect_timeout=5,
health_check_interval=60,
)
# 测试连接确保Redis可用
await self.client.ping()
logger.info(f"Successfully connected to Redis (async){self.redis_url}")
await self.set_memory_limit()
except Exception as e:
logger.error(f"Failed to connect to Redis (async): {e}")
self.client = None
raise RuntimeError("Redis async connection failed") from e
@eventmanager.register(EventType.ConfigChanged)
async def handle_config_changed(self, event: Event):
"""
处理配置变更事件更新Redis设置
:param event: 事件对象
"""
if not event:
return
event_data: ConfigChangeEventData = event.event_data
if event_data.key not in ['CACHE_BACKEND_TYPE', 'CACHE_BACKEND_URL', 'CACHE_REDIS_MAXMEMORY']:
return
logger.info("配置变更重连Redis (async)...")
await self.close()
await self._connect()
async def set_memory_limit(self, policy: Optional[str] = "allkeys-lru"):
"""
动态设置Redis最大内存和内存淘汰策略
:param policy: 淘汰策略(如'allkeys-lru'
"""
try:
# 如果有显式值则直接使用为0时说明不限制如果未配置开启BIG_MEMORY_MODE时为"1024mb",未开启时为"256mb"
maxmemory = settings.CACHE_REDIS_MAXMEMORY or ("1024mb" if settings.BIG_MEMORY_MODE else "256mb")
await self.client.config_set("maxmemory", maxmemory)
await self.client.config_set("maxmemory-policy", policy)
logger.debug(f"Redis maxmemory set to {maxmemory}, policy: {policy} (async)")
except Exception as e:
logger.error(f"Failed to set Redis maxmemory or policy (async): {e}")
@staticmethod
def __get_region(region: Optional[str] = "DEFAULT"):
"""
获取缓存的区
"""
return f"region:{region}" if region else "region:default"
def __make_redis_key(self, region: str, key: str) -> str:
"""
获取缓存Key
"""
# 使用region作为缓存键的一部分
region = self.__get_region(region)
return f"{region}:key:{quote(key)}"
@staticmethod
def __get_original_key(redis_key: Union[str, bytes]) -> str:
"""
从Redis键中提取原始key
"""
try:
if isinstance(redis_key, bytes):
redis_key = redis_key.decode("utf-8")
parts = redis_key.split(":key:")
return parts[-1]
except Exception as e:
logger.warn(f"Failed to parse redis key: {redis_key}, error: {e}")
return redis_key
async def set(self, key: str, value: Any, ttl: Optional[int] = None,
region: Optional[str] = "DEFAULT", **kwargs) -> None:
"""
异步设置缓存
:param key: 缓存的键
:param value: 缓存的值
:param ttl: 缓存的存活时间,单位秒
:param region: 缓存的区
:param kwargs: 其他参数
"""
try:
await self._connect()
redis_key = self.__make_redis_key(region, key)
# 对值进行序列化
serialized_value = serialize(value)
kwargs.pop("maxsize", None)
await self.client.set(redis_key, serialized_value, ex=ttl, **kwargs)
except Exception as e:
logger.error(f"Failed to set key (async): {key} in region: {region}, error: {e}")
async def exists(self, key: str, region: Optional[str] = "DEFAULT") -> bool:
"""
异步判断缓存键是否存在
:param key: 缓存的键
:param region: 缓存的区
:return: 存在返回True否则返回False
"""
try:
await self._connect()
redis_key = self.__make_redis_key(region, key)
result = await self.client.exists(redis_key)
return result == 1
except Exception as e:
logger.error(f"Failed to exists key (async): {key} region: {region}, error: {e}")
return False
async def get(self, key: str, region: Optional[str] = "DEFAULT") -> Optional[Any]:
"""
异步获取缓存的值
:param key: 缓存的键
:param region: 缓存的区
:return: 返回缓存的值如果缓存不存在返回None
"""
try:
await self._connect()
redis_key = self.__make_redis_key(region, key)
value = await self.client.get(redis_key)
if value is not None:
return deserialize(value)
return None
except Exception as e:
logger.error(f"Failed to get key (async): {key} in region: {region}, error: {e}")
return None
async def delete(self, key: str, region: Optional[str] = "DEFAULT") -> None:
"""
异步删除缓存
:param key: 缓存的键
:param region: 缓存的区
"""
try:
await self._connect()
redis_key = self.__make_redis_key(region, key)
await self.client.delete(redis_key)
except Exception as e:
logger.error(f"Failed to delete key (async): {key} in region: {region}, error: {e}")
async def clear(self, region: Optional[str] = None) -> None:
"""
异步清除指定区域的缓存或全部缓存
:param region: 缓存的区
"""
try:
await self._connect()
if region:
cache_region = self.__get_region(region)
redis_key = f"{cache_region}:key:*"
async with self.client.pipeline() as pipe:
async for key in self.client.scan_iter(redis_key):
await pipe.delete(key)
await pipe.execute()
logger.info(f"Cleared Redis cache for region (async): {region}")
else:
await self.client.flushdb()
logger.info("Cleared all Redis cache (async)")
except Exception as e:
logger.error(f"Failed to clear cache (async), region: {region}, error: {e}")
async def items(self, region: Optional[str] = None) -> AsyncGenerator[Tuple[str, Any], None]:
"""
获取指定区域的所有缓存键值对
:param region: 缓存的区
:return: 返回键值对生成器
"""
try:
await self._connect()
if region:
cache_region = self.__get_region(region)
redis_key = f"{cache_region}:key:*"
async for key in self.client.scan_iter(redis_key):
value = await self.client.get(key)
if value is not None:
yield self.__get_original_key(key), deserialize(value)
else:
async for key in self.client.scan_iter("*"):
value = await self.client.get(key)
if value is not None:
yield self.__get_original_key(key), deserialize(value)
except Exception as e:
logger.error(f"Failed to get items from Redis, region: {region}, error: {e}")
async def test(self) -> bool:
"""
异步测试Redis连接性
"""
try:
await self._connect()
return True
except Exception as e:
logger.error(f"Redis async connection test failed: {e}")
return False
async def close(self) -> None:
"""
关闭异步Redis客户端的连接池
"""
if self.client:
await self.client.close()
self.client = None
logger.debug("Redis async connection closed")

View File

@@ -1,7 +1,7 @@
from threading import Thread
from typing import List, Tuple, Optional
from app.core.cache import cached, cache_backend
from app.core.cache import cached
from app.core.config import settings
from app.db.subscribe_oper import SubscribeOper
from app.db.systemconfig_oper import SystemConfigOper
@@ -111,7 +111,12 @@ class SubscribeHelper(metaclass=WeakSingleton):
if res and res.status_code == 200:
# 清除缓存
if clear_cache:
cache_backend.clear(region=self._shares_cache_region)
self.get_shares.cache_clear()
self.get_statistic.cache_clear()
self.get_share_statistics.cache_clear()
self.async_get_shares.cache_clear()
self.async_get_statistic.cache_clear()
self.async_get_share_statistics.cache_clear()
return True, ""
else:
return False, res.json().get("message")

View File

@@ -1,5 +1,3 @@
import os
import signal
from pathlib import Path
from typing import Tuple
@@ -41,8 +39,8 @@ class SystemHelper:
判断是否可以内部重启
"""
return (
Path("/var/run/docker.sock").exists()
or settings.DOCKER_CLIENT_API != "tcp://127.0.0.1:38379"
Path("/var/run/docker.sock").exists()
or settings.DOCKER_CLIENT_API != "tcp://127.0.0.1:38379"
)
@staticmethod
@@ -64,41 +62,13 @@ class SystemHelper:
if index_resolv_conf != -1:
index_second_slash = data.rfind(" ", 0, index_resolv_conf)
index_first_slash = (
data.rfind("/", 0, index_second_slash) + 1
data.rfind("/", 0, index_second_slash) + 1
)
container_id = data[index_first_slash:index_second_slash]
except Exception as e:
logger.debug(f"获取容器ID失败: {str(e)}")
return container_id.strip() if container_id else None
@staticmethod
def _check_restart_policy() -> bool:
"""
检查当前容器是否配置了自动重启策略
"""
try:
# 获取当前容器ID
container_id = SystemHelper._get_container_id()
if not container_id:
return False
# 创建 Docker 客户端
client = docker.DockerClient(base_url=settings.DOCKER_CLIENT_API)
# 获取容器信息
container = client.containers.get(container_id)
restart_policy = container.attrs.get('HostConfig', {}).get('RestartPolicy', {})
policy_name = restart_policy.get('Name', 'no')
# 检查是否有有效的重启策略
auto_restart_policies = ['always', 'unless-stopped', 'on-failure']
has_restart_policy = policy_name in auto_restart_policies
logger.info(f"容器重启策略: {policy_name}, 支持自动重启: {has_restart_policy}")
return has_restart_policy
except Exception as e:
logger.warning(f"检查重启策略失败: {str(e)}")
return False
@staticmethod
def restart() -> Tuple[bool, str]:
"""
@@ -107,24 +77,8 @@ class SystemHelper:
if not SystemUtils.is_docker():
return False, "非Docker环境无法重启"
try:
# 检查容器是否配置了自动重启策略
has_restart_policy = SystemHelper._check_restart_policy()
if has_restart_policy:
# 有重启策略,使用优雅退出方式
logger.info("检测到容器配置了自动重启策略,使用优雅重启方式...")
# 发送SIGTERM信号给当前进程触发优雅停止
os.kill(os.getpid(), signal.SIGTERM)
return True, ""
else:
# 没有重启策略使用Docker API强制重启
logger.info("容器未配置自动重启策略使用Docker API重启...")
return SystemHelper._docker_api_restart()
except Exception as err:
logger.error(f"重启失败: {str(err)}")
# 降级为Docker API重启
logger.warning("降级为Docker API重启...")
return SystemHelper._docker_api_restart()
logger.info("正在重启容器...")
return SystemHelper._docker_api_restart()
@staticmethod
def _docker_api_restart() -> Tuple[bool, str]:

View File

@@ -6,6 +6,8 @@ from urllib.parse import unquote
from torrentool.api import Torrent
from app.core.cache import FileCache
from app.core.cache import TTLCache
from app.core.config import settings
from app.core.context import Context, TorrentInfo, MediaInfo
from app.core.meta import MetaBase
@@ -15,17 +17,16 @@ from app.db.systemconfig_oper import SystemConfigOper
from app.log import logger
from app.schemas.types import MediaType, SystemConfigKey
from app.utils.http import RequestUtils
from app.utils.singleton import WeakSingleton
from app.utils.string import StringUtils
class TorrentHelper(metaclass=WeakSingleton):
class TorrentHelper:
"""
种子帮助类
"""
def __init__(self):
self._invalid_torrents = []
self._invalid_torrents = TTLCache(maxsize=128, ttl=3600 * 24)
def download_torrent(self, url: str,
cookie: Optional[str] = None,
@@ -35,27 +36,29 @@ class TorrentHelper(metaclass=WeakSingleton):
-> Tuple[Optional[Path], Optional[Union[str, bytes]], Optional[str], Optional[list], Optional[str]]:
"""
把种子下载到本地
:return: 种子保存路径、种子内容、种子主目录、种子文件清单、错误信息
:return: 种子缓存相对路径【用于索引缓存】, 种子内容、种子主目录、种子文件清单、错误信息
"""
if url.startswith("magnet:"):
return None, url, "", [], f"磁力链接"
# 构建 torrent 种子文件的存路径
file_path = (Path(settings.TEMP_PATH) / StringUtils.md5_hash(url)).with_suffix(".torrent")
if file_path.exists():
# 构建 torrent 种子文件的存路径
cache_path = Path(StringUtils.md5_hash(url)).with_suffix(".torrent")
# 缓存处理器
cache_backend = FileCache()
# 读取缓存的种子文件
torrent_content = cache_backend.get(cache_path.as_posix(), region="torrents")
if torrent_content:
# 缓存已存在
try:
# 获取种子目录和文件清单
folder_name, file_list = self.get_torrent_info(file_path)
folder_name, file_list = self.get_fileinfo_from_torrent_content(torrent_content)
# 无法获取信息,则认为缓存文件无效
if not folder_name and not file_list:
raise ValueError("无效的缓存种子文件")
# 获取种子数据
content = file_path.read_bytes()
# 成功拿到种子数据
return file_path, content, folder_name, file_list, ""
return cache_path, torrent_content, folder_name, file_list, ""
except Exception as err:
logger.error(f"处理缓存的种子文件 {file_path} 时出错: {err},将重新下载")
file_path.unlink(missing_ok=True)
# 请求种子文件
logger.error(f"处理缓存的种子文件 {cache_path} 时出错: {err},将重新下载")
# 下载种子文件
req = RequestUtils(
ua=ua,
cookies=cookie,
@@ -74,11 +77,11 @@ class TorrentHelper(metaclass=WeakSingleton):
).get_res(url=url, allow_redirects=False)
if req and req.status_code == 200:
if not req.content:
return None, None, "", [], "未下载到种子数据"
return cache_path, None, "", [], "未下载到种子数据"
# 解析内容格式
if req.content.startswith(b"magnet:"):
# 磁力链接
return None, req.text, "", [], f"获取到磁力链接"
return cache_path, req.text, "", [], f"获取到磁力链接"
if "下载种子文件".encode("utf-8") in req.content:
# 首次下载提示页面
skip_flag = False
@@ -116,34 +119,34 @@ class TorrentHelper(metaclass=WeakSingleton):
except Exception as err:
logger.warn(f"触发了站点首次种子下载,尝试自动跳过时出现错误:{str(err)},链接:{url}")
if not skip_flag:
return None, None, "", [], "种子数据有误请确认链接是否正确如为PT站点则需手工在站点下载一次种子"
return cache_path, None, "", [], "种子数据有误请确认链接是否正确如为PT站点则需手工在站点下载一次种子"
# 种子内容
if req.content:
# 检查是不是种子文件,如果不是仍然抛出异常
try:
# 保存到文件
file_path.write_bytes(req.content)
# 获取种子目录和文件清单
folder_name, file_list = self.get_torrent_info(file_path)
folder_name, file_list = self.get_fileinfo_from_torrent_content(req.content)
if file_list:
# 保存到缓存
cache_backend.set(cache_path.as_posix(), req.content, region="torrents")
# 成功拿到种子数据
return file_path, req.content, folder_name, file_list, ""
return cache_path, req.content, folder_name, file_list, ""
except Exception as err:
logger.error(f"种子文件解析失败:{str(err)}")
# 种子数据仍然错误
return None, None, "", [], "种子数据有误,请确认链接是否正确"
return cache_path, None, "", [], "种子数据有误,请确认链接是否正确"
# 返回失败
return None, None, "", [], ""
return cache_path, None, "", [], ""
elif req is None:
return None, None, "", [], "无法打开链接"
return cache_path, None, "", [], "无法打开链接"
elif req.status_code == 429:
return None, None, "", [], "触发站点流控,请稍后重试"
return cache_path, None, "", [], "触发站点流控,请稍后重试"
else:
# 把错误的种子记下来,避免重复使用
self.add_invalid(url)
return None, None, "", [], f"下载种子出错,状态码:{req.status_code}"
return cache_path, None, "", [], f"下载种子出错,状态码:{req.status_code}"
@staticmethod
def get_torrent_info(torrent_path: Path) -> Tuple[str, List[str]]:
def get_torrent_info(self, torrent_path: Path) -> Tuple[str, List[str]]:
"""
获取种子文件的文件夹名和文件清单
:param torrent_path: 种子文件路径
@@ -154,32 +157,65 @@ class TorrentHelper(metaclass=WeakSingleton):
try:
torrentinfo = Torrent.from_file(torrent_path)
# 获取文件清单
if (not torrentinfo.files
or (len(torrentinfo.files) == 1
and torrentinfo.files[0].name == torrentinfo.name)):
# 单文件种子目录名返回空
folder_name = ""
# 单文件种子
file_list = [torrentinfo.name]
else:
# 目录名
folder_name = torrentinfo.name
# 文件清单,如果一级目录与种子名相同则去掉
file_list = []
for fileinfo in torrentinfo.files:
file_path = Path(fileinfo.name)
# 根路径
root_path = file_path.parts[0]
if root_path == folder_name:
file_list.append(str(file_path.relative_to(root_path)))
else:
file_list.append(fileinfo.name)
logger.debug(f"解析种子:{torrent_path.name} => 目录:{folder_name},文件清单:{file_list}")
return folder_name, file_list
return self.get_fileinfo_from_torrent(torrentinfo)
except Exception as err:
logger.error(f"种子文件解析失败:{str(err)}")
return "", []
@staticmethod
def get_fileinfo_from_torrent(torrent: Torrent) -> Tuple[str, List[str]]:
"""
从种子文件中获取文件清单
:param torrent: 种子文件对象
:return: 文件夹名、文件清单,单文件种子返回空文件夹名
"""
if not torrent or not torrent.files:
return "", []
# 获取文件清单
if len(torrent.files) == 1 and torrent.files[0].name == torrent.name:
# 单文件种子目录名返回空
folder_name = ""
# 单文件种子
file_list = [torrent.name]
else:
# 目录名
folder_name = torrent.name
# 文件清单,如果一级目录与种子名相同则去掉
file_list = []
for fileinfo in torrent.files:
file_path = Path(fileinfo.name)
# 根路径
root_path = file_path.parts[0]
if root_path == folder_name:
file_list.append(str(file_path.relative_to(root_path)))
else:
file_list.append(fileinfo.name)
logger.debug(f"解析种子:{torrent.name} => 目录:{folder_name},文件清单:{file_list}")
return folder_name, file_list
def get_fileinfo_from_torrent_content(self, torrent_content: Union[str, bytes]) -> Tuple[str, List[str]]:
"""
从种子内容中获取文件夹名和文件清单
:param torrent_content: 种子内容
:return: 文件夹名、文件清单,单文件种子返回空文件夹名
"""
if not torrent_content:
return "", []
# 检查是否为磁力链接
if StringUtils.is_magnet_link(torrent_content):
return "", []
try:
# 解析种子内容
torrentinfo = Torrent.from_string(torrent_content)
# 获取文件清单
return self.get_fileinfo_from_torrent(torrentinfo)
except Exception as err:
logger.error(f"种子内容解析失败:{str(err)}")
return "", []
@staticmethod
def get_url_filename(req: Any, url: str) -> str:
"""
@@ -316,7 +352,7 @@ class TorrentHelper(metaclass=WeakSingleton):
添加无效种子
"""
if url not in self._invalid_torrents:
self._invalid_torrents.append(url)
self._invalid_torrents[url] = True
@staticmethod
def match_torrent(mediainfo: MediaInfo, torrent_meta: MetaBase, torrent: TorrentInfo) -> bool:

View File

@@ -1,7 +1,7 @@
import json
from typing import List, Tuple, Optional
from app.core.cache import cached, cache_backend
from app.core.cache import cached
from app.core.config import settings
from app.db.models import Workflow
from app.db.workflow_oper import WorkflowOper
@@ -89,7 +89,8 @@ class WorkflowHelper(metaclass=WeakSingleton):
if success:
# 清除缓存
if clear_cache:
cache_backend.clear(region=self._shares_cache_region)
self.get_shares.cache_clear()
self.async_get_shares.cache_clear()
return True, ""
else:
try:

View File

@@ -938,6 +938,8 @@ class DoubanModule(_ModuleBase):
"""
搜索人物信息
"""
if settings.SEARCH_SOURCE and "douban" not in settings.SEARCH_SOURCE:
return None
if not name:
return []
result = self.doubanapi.person_search(keyword=name)
@@ -956,6 +958,8 @@ class DoubanModule(_ModuleBase):
"""
搜索人物信息(异步版本)
"""
if settings.SEARCH_SOURCE and "douban" not in settings.SEARCH_SOURCE:
return None
if not name:
return []
result = await self.doubanapi.async_person_search(keyword=name)

View File

@@ -1,23 +1,19 @@
import pickle
import random
import time
import traceback
from pathlib import Path
from threading import RLock
from typing import Optional
from app.core.cache import TTLCache
from app.core.config import settings
from app.core.meta import MetaBase
from app.core.metainfo import MetaInfo
from app.log import logger
from app.utils.singleton import WeakSingleton
from app.schemas.types import MediaType
from app.utils.singleton import WeakSingleton
lock = RLock()
CACHE_EXPIRE_TIMESTAMP_STR = "cache_expire_timestamp"
EXPIRE_TIMESTAMP = settings.CONF.meta
class DoubanCache(metaclass=WeakSingleton):
"""
@@ -30,18 +26,26 @@ class DoubanCache(metaclass=WeakSingleton):
}
"""
# TMDB缓存过期
_tmdb_cache_expire: bool = True
_douban_cache_expire: bool = True
def __init__(self):
self._meta_path = settings.TEMP_PATH / "__douban_cache__"
self._meta_data = self.__load(self._meta_path)
self.maxsize = settings.CONF.douban
self.ttl = settings.CONF.meta
self.region = "__douban_cache__"
self._meta_filepath = settings.TEMP_PATH / self.region
# 初始化缓存
self._cache = TTLCache(region=self.region, maxsize=self.maxsize, ttl=self.ttl)
# 非Redis加载本地缓存数据
if not self._cache.is_redis():
for key, value in self.__load(self._meta_filepath).items():
self._cache.set(key, value)
def clear(self):
"""
清空所有TMDB缓存
清空所有豆瓣缓存
"""
with lock:
self._meta_data = {}
self._cache.clear()
@staticmethod
def __get_key(meta: MetaBase) -> str:
@@ -57,15 +61,7 @@ class DoubanCache(metaclass=WeakSingleton):
"""
key = self.__get_key(meta)
with lock:
info: dict = self._meta_data.get(key)
if info:
expire = info.get(CACHE_EXPIRE_TIMESTAMP_STR)
if not expire or int(time.time()) < expire:
info[CACHE_EXPIRE_TIMESTAMP_STR] = int(time.time()) + EXPIRE_TIMESTAMP
self._meta_data[key] = info
elif expire and self._tmdb_cache_expire:
self.delete(key)
return info or {}
return self._cache.get(key) or {}
def delete(self, key: str) -> dict:
"""
@@ -74,38 +70,26 @@ class DoubanCache(metaclass=WeakSingleton):
@return: 被删除的缓存内容
"""
with lock:
return self._meta_data.pop(key, {})
def delete_by_doubanid(self, doubanid: str) -> None:
"""
清空对应豆瓣ID的所有缓存记录以强制更新TMDB中最新的数据
"""
for key in list(self._meta_data):
if self._meta_data.get(key, {}).get("id") == doubanid:
with lock:
self._meta_data.pop(key)
def delete_unknown(self) -> None:
"""
清除未识别的缓存记录以便重新搜索TMDB
"""
for key in list(self._meta_data):
if self._meta_data.get(key, {}).get("id") == "0":
with lock:
self._meta_data.pop(key)
redis_data = self._cache.get(key)
if redis_data:
self._cache.delete(key)
return redis_data
return {}
def modify(self, key: str, title: str) -> dict:
"""
删除缓存信息
修改缓存信息
@param key: 缓存key
@param title: 标题
@return: 被修改后缓存内容
"""
with lock:
if self._meta_data.get(key):
self._meta_data[key]['title'] = title
self._meta_data[key][CACHE_EXPIRE_TIMESTAMP_STR] = int(time.time()) + EXPIRE_TIMESTAMP
return self._meta_data.get(key)
redis_data = self._cache.get(key)
if redis_data:
redis_data["title"] = title
self._cache.set(key, redis_data)
return redis_data
return {}
@staticmethod
def __load(path: Path) -> dict:
@@ -117,119 +101,72 @@ class DoubanCache(metaclass=WeakSingleton):
with open(path, 'rb') as f:
data = pickle.load(f)
return data
return {}
except Exception as e:
logger.error(f"加载缓存失败: {str(e)} - {traceback.format_exc()}")
return {}
return {}
def update(self, meta: MetaBase, info: dict) -> None:
"""
新增或更新缓存条目
"""
with lock:
if info:
# 缓存标题
cache_title = info.get("title")
# 缓存年份
cache_year = info.get('year')
# 类型
if isinstance(info.get('media_type'), MediaType):
mtype = info.get('media_type')
elif info.get("type"):
mtype = MediaType.MOVIE if info.get("type") == "movie" else MediaType.TV
if info:
# 缓存标题
cache_title = info.get("title")
# 缓存年份
cache_year = info.get('year')
# 类型
if isinstance(info.get('media_type'), MediaType):
mtype = info.get('media_type')
elif info.get("type"):
mtype = MediaType.MOVIE if info.get("type") == "movie" else MediaType.TV
else:
meta = MetaInfo(cache_title)
if meta.begin_season:
mtype = MediaType.TV
else:
meta = MetaInfo(cache_title)
if meta.begin_season:
mtype = MediaType.TV
else:
mtype = MediaType.MOVIE
# 海报
poster_path = info.get("pic", {}).get("large")
if not poster_path and info.get("cover_url"):
poster_path = info.get("cover_url")
if not poster_path and info.get("cover"):
poster_path = info.get("cover").get("url")
mtype = MediaType.MOVIE
# 海报
poster_path = info.get("pic", {}).get("large")
if not poster_path and info.get("cover_url"):
poster_path = info.get("cover_url")
if not poster_path and info.get("cover"):
poster_path = info.get("cover").get("url")
self._meta_data[self.__get_key(meta)] = {
"id": info.get("id"),
"type": mtype,
"year": cache_year,
"title": cache_title,
"poster_path": poster_path,
CACHE_EXPIRE_TIMESTAMP_STR: int(time.time()) + EXPIRE_TIMESTAMP
}
elif info is not None:
# None时不缓存此时代表网络错误允许重复请求
self._meta_data[self.__get_key(meta)] = {'id': "0"}
with lock:
self._cache.set(self.__get_key(meta), {
"id": info.get("id"),
"type": mtype,
"year": cache_year,
"title": cache_title,
"poster_path": poster_path
})
elif info is not None:
# None时不缓存此时代表网络错误允许重复请求
with lock:
self._cache.set(self.__get_key(meta), {
"id": 0
})
def save(self, force: Optional[bool] = False) -> None:
"""
保存缓存数据到文件
"""
# Redis不需要保存到本地文件
if self._cache.is_redis():
return
meta_data = self.__load(self._meta_path)
new_meta_data = {k: v for k, v in self._meta_data.items() if v.get("id")}
# 本地文件
meta_data = self.__load(self._meta_filepath)
# 当前缓存数据(去除无法识别)
new_meta_data = {k: v for k, v in self._cache.items() if v.get("id")}
if not force \
and not self._random_sample(new_meta_data) \
and meta_data.keys() == new_meta_data.keys():
return
with open(self._meta_path, 'wb') as f:
pickle.dump(new_meta_data, f, pickle.HIGHEST_PROTOCOL) # noqa
def _random_sample(self, new_meta_data: dict) -> bool:
"""
采样分析是否需要保存
"""
ret = False
if len(new_meta_data) < 25:
keys = list(new_meta_data.keys())
for k in keys:
info = new_meta_data.get(k)
expire = info.get(CACHE_EXPIRE_TIMESTAMP_STR)
if not expire:
ret = True
info[CACHE_EXPIRE_TIMESTAMP_STR] = int(time.time()) + EXPIRE_TIMESTAMP
elif int(time.time()) >= expire:
ret = True
if self._tmdb_cache_expire:
new_meta_data.pop(k)
else:
count = 0
keys = random.sample(sorted(new_meta_data.keys()), 25)
for k in keys:
info = new_meta_data.get(k)
expire = info.get(CACHE_EXPIRE_TIMESTAMP_STR)
if not expire:
ret = True
info[CACHE_EXPIRE_TIMESTAMP_STR] = int(time.time()) + EXPIRE_TIMESTAMP
elif int(time.time()) >= expire:
ret = True
if self._tmdb_cache_expire:
new_meta_data.pop(k)
count += 1
if count >= 5:
ret |= self._random_sample(new_meta_data)
return ret
def get_title(self, key: str) -> Optional[str]:
"""
获取缓存的标题
"""
cache_media_info = self._meta_data.get(key)
if not cache_media_info or not cache_media_info.get("id"):
return None
return cache_media_info.get("title")
def set_title(self, key: str, cn_title: str) -> None:
"""
重新设置缓存标题
"""
cache_media_info = self._meta_data.get(key)
if not cache_media_info:
return
self._meta_data[key]['title'] = cn_title
# 写入本地
with open(self._meta_filepath, 'wb') as f:
pickle.dump(new_meta_data, f, pickle.HIGHEST_PROTOCOL) # noqa
def __del__(self):
self.save()

View File

@@ -282,9 +282,8 @@ class EmbyModule(_ModuleBase, _MediaServerBase[Emby]):
episodes=episodes
) for season, episodes in seasoninfo.items()]
def mediaserver_playing(self, server: str,
count: Optional[int] = 20, username: Optional[str] = None) -> List[
schemas.MediaServerPlayItem]:
def mediaserver_playing(self, server: str, count: Optional[int] = 20,
username: Optional[str] = None) -> List[schemas.MediaServerPlayItem]:
"""
获取媒体服务器正在播放信息
"""
@@ -302,9 +301,8 @@ class EmbyModule(_ModuleBase, _MediaServerBase[Emby]):
return None
return server_obj.get_play_url(item_id)
def mediaserver_latest(self, server: Optional[str] = None,
count: Optional[int] = 20, username: Optional[str] = None) -> List[
schemas.MediaServerPlayItem]:
def mediaserver_latest(self, server: Optional[str] = None, count: Optional[int] = 20,
username: Optional[str] = None) -> List[schemas.MediaServerPlayItem]:
"""
获取媒体服务器最新入库条目
"""

View File

@@ -167,7 +167,7 @@ class Emby:
image=image,
link=f'{self._playhost or self._host}web/index.html'
f'#!/videos?serverId={self.serverid}&parentId={library.get("Id")}',
server_type= "emby"
server_type="emby"
)
)
return libraries
@@ -497,7 +497,7 @@ class Emby:
logger.info(f"影片图片链接:{res.url}")
return res.url
else:
logger.error("Items/Id/Images 未获取到返回数据或无该影片{}图片".format(image_type))
logger.info("Items/Id/Images 未获取到返回数据或无该影片{}图片".format(image_type))
return None
except Exception as e:
logger.error(f"连接Items/Id/Images出错" + str(e))

View File

@@ -1,10 +1,39 @@
from abc import ABCMeta, abstractmethod
from pathlib import Path
from typing import Optional, List, Dict, Tuple
from typing import Optional, List, Dict, Tuple, Callable, Union
from tqdm import tqdm
from app import schemas
from app.helper.progress import ProgressHelper
from app.helper.storage import StorageHelper
from app.log import logger
from app.utils.crypto import HashUtils
def transfer_process(path: str) -> Callable[[int | float], None]:
"""
传输进度回调
"""
pbar = tqdm(total=100, desc="整理进度", unit="%")
progress = ProgressHelper(HashUtils.md5(path))
progress.start()
def update_progress(percent: Union[int, float]) -> None:
"""
更新进度百分比
"""
percent_value = int(percent)
pbar.n = percent_value
# 更新进度
pbar.refresh()
progress.update(value=percent_value, text=f"{path} 进度:{percent_value}%")
# 完成时结束
if percent_value >= 100:
progress.end()
pbar.close()
return update_progress
class StorageBase(metaclass=ABCMeta):

View File

@@ -1,6 +1,5 @@
import base64
import hashlib
import io
import secrets
import threading
import time
@@ -8,12 +7,12 @@ from pathlib import Path
from typing import List, Optional, Tuple, Union
import requests
from tqdm import tqdm
from app import schemas
from app.core.config import settings
from app.core.config import settings, global_vars
from app.log import logger
from app.modules.filemanager import StorageBase
from app.modules.filemanager.storages import transfer_process
from app.schemas.types import StorageSchema
from app.utils.singleton import WeakSingleton
from app.utils.string import StringUtils
@@ -46,6 +45,9 @@ class AliPan(StorageBase, metaclass=WeakSingleton):
# 基础url
base_url = "https://openapi.alipan.com"
# 文件块大小默认10MB
chunk_size = 10 * 1024 * 1024
def __init__(self):
super().__init__()
self._auth_state = {}
@@ -580,29 +582,6 @@ class AliPan(StorageBase, metaclass=WeakSingleton):
raise Exception(resp.get("message"))
return resp
@staticmethod
def _log_progress(desc: str, total: int) -> tqdm:
"""
创建一个可以输出到日志的进度条
"""
class TqdmToLogger(io.StringIO):
def write(s, buf): # noqa
buf = buf.strip('\r\n\t ')
if buf:
logger.info(buf)
return tqdm(
total=total,
unit='B',
unit_scale=True,
desc=desc,
file=TqdmToLogger(),
mininterval=1.0,
maxinterval=5.0,
miniters=1
)
def upload(self, target_dir: schemas.FileItem, local_path: Path,
new_name: Optional[str] = None) -> Optional[schemas.FileItem]:
"""
@@ -643,21 +622,26 @@ class AliPan(StorageBase, metaclass=WeakSingleton):
# 4. 初始化进度条
logger.info(f"【阿里云盘】开始上传: {local_path} -> {target_path},分片数:{len(part_info_list)}")
progress_bar = self._log_progress(f"【阿里云盘】{target_name} 上传进度", file_size)
progress_callback = transfer_process(local_path.as_posix())
# 5. 分片上传循环
uploaded_size = 0
with open(local_path, 'rb') as f:
for part_info in part_info_list:
part_num = part_info['part_number']
if global_vars.is_transfer_stopped(local_path.as_posix()):
logger.info(f"【阿里云盘】{target_name} 上传已取消!")
return None
# 计算分片参数
part_num = part_info['part_number']
start = (part_num - 1) * chunk_size
end = min(start + chunk_size, file_size)
current_chunk_size = end - start
# 更新进度条(已存在的分片)
if part_num in uploaded_parts:
progress_bar.update(current_chunk_size)
uploaded_size += current_chunk_size
progress_callback((uploaded_size * 100) / file_size)
continue
# 准备分片数据
@@ -675,7 +659,6 @@ class AliPan(StorageBase, metaclass=WeakSingleton):
upload_url = new_urls[0]['upload_url']
else:
upload_url = part_info['upload_url']
# 执行上传
logger.info(
f"【阿里云盘】开始 第{attempt + 1}次 上传 {target_name} 分片 {part_num} ...")
@@ -694,13 +677,13 @@ class AliPan(StorageBase, metaclass=WeakSingleton):
# 处理上传结果
if success:
uploaded_parts.add(part_num)
progress_bar.update(current_chunk_size)
uploaded_size += current_chunk_size
progress_callback((uploaded_size * 100) / file_size)
else:
raise Exception(f"【阿里云盘】{target_name} 分片 {part_num} 上传失败!")
# 6. 关闭进度条
if progress_bar:
progress_bar.close()
progress_callback(100)
# 7. 完成上传
result = self._complete_upload(drive_id=target_dir.drive_id, file_id=file_id, upload_id=upload_id)
@@ -712,7 +695,7 @@ class AliPan(StorageBase, metaclass=WeakSingleton):
def download(self, fileitem: schemas.FileItem, path: Path = None) -> Optional[Path]:
"""
限速处理的下载
实时进度显示的下载
"""
download_info = self._request_api(
"POST",
@@ -723,14 +706,57 @@ class AliPan(StorageBase, metaclass=WeakSingleton):
}
)
if not download_info:
logger.error(f"【阿里云盘】获取下载链接失败: {fileitem.name}")
return None
download_url = download_info.get("url")
if not download_url:
logger.error(f"【阿里云盘】下载链接为空: {fileitem.name}")
return None
local_path = path or settings.TEMP_PATH / fileitem.name
with requests.get(download_url, stream=True) as r:
r.raise_for_status()
with open(local_path, "wb") as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
# 获取文件大小
file_size = fileitem.size
# 初始化进度条
logger.info(f"【阿里云盘】开始下载: {fileitem.name} -> {local_path}")
progress_callback = transfer_process(Path(fileitem.path).as_posix())
try:
with requests.get(download_url, stream=True) as r:
r.raise_for_status()
downloaded_size = 0
with open(local_path, "wb") as f:
for chunk in r.iter_content(chunk_size=self.chunk_size):
if global_vars.is_transfer_stopped(fileitem.path):
logger.info(f"【阿里云盘】{fileitem.path} 下载已取消!")
return None
if chunk:
f.write(chunk)
# 更新进度
downloaded_size += len(chunk)
if file_size:
progress = (downloaded_size * 100) / file_size
progress_callback(progress)
# 完成下载
progress_callback(100)
logger.info(f"【阿里云盘】下载完成: {fileitem.name}")
except requests.exceptions.RequestException as e:
logger.error(f"【阿里云盘】下载网络错误: {fileitem.name} - {str(e)}")
# 删除可能部分下载的文件
if local_path.exists():
local_path.unlink()
return None
except Exception as e:
logger.error(f"【阿里云盘】下载失败: {fileitem.name} - {str(e)}")
# 删除可能部分下载的文件
if local_path.exists():
local_path.unlink()
return None
return local_path
def check(self) -> bool:

View File

@@ -1,4 +1,5 @@
import json
import time
from datetime import datetime
from pathlib import Path
from typing import Optional, List
@@ -7,9 +8,9 @@ import requests
from app import schemas
from app.core.cache import cached
from app.core.config import settings
from app.core.config import settings, global_vars
from app.log import logger
from app.modules.filemanager.storages import StorageBase
from app.modules.filemanager.storages import StorageBase, transfer_process
from app.schemas.types import StorageSchema
from app.utils.http import RequestUtils
from app.utils.singleton import WeakSingleton
@@ -31,6 +32,7 @@ class Alist(StorageBase, metaclass=WeakSingleton):
"move": "移动",
}
# 快照检查目录修改时间
snapshot_check_folder_modtime = settings.OPENLIST_SNAPSHOT_CHECK_FOLDER_MODTIME
def __init__(self):
@@ -42,6 +44,17 @@ class Alist(StorageBase, metaclass=WeakSingleton):
"""
self.__generate_token.cache_clear() # noqa
def _delay_get_item(self, path: Path) -> Optional[schemas.FileItem]:
"""
自动延迟重试 get_item 模块
"""
for _ in range(2):
time.sleep(2)
fileitem = self.get_item(path)
if fileitem:
return fileitem
return None
@property
def __get_base_url(self) -> str:
"""
@@ -269,7 +282,7 @@ class Alist(StorageBase, metaclass=WeakSingleton):
logger.warn(f'【OpenList】创建目录 {path} 失败,错误信息:{result["message"]}')
return None
return self.get_item(path)
return self._delay_get_item(path)
def get_folder(self, path: Path) -> Optional[schemas.FileItem]:
"""
@@ -560,6 +573,9 @@ class Alist(StorageBase, metaclass=WeakSingleton):
r.raise_for_status()
with open(local_path, "wb") as f:
for chunk in r.iter_content(chunk_size=8192):
if global_vars.is_transfer_stopped(fileitem.path):
logger.info(f"【OpenList】{fileitem.path} 下载已取消!")
return None
f.write(chunk)
if local_path.exists():
@@ -570,36 +586,81 @@ class Alist(StorageBase, metaclass=WeakSingleton):
self, fileitem: schemas.FileItem, path: Path, new_name: Optional[str] = None, task: bool = False
) -> Optional[schemas.FileItem]:
"""
上传文件
上传文件(带进度)
:param fileitem: 上传目录项
:param path: 本地文件路径
:param new_name: 上传后文件名
:param task: 是否为任务默认为False避免未完成上传时对文件进行操作
"""
encoded_path = UrlUtils.quote((Path(fileitem.path) / path.name).as_posix())
headers = self.__get_header_with_token()
headers.setdefault("Content-Type", "application/octet-stream")
headers.setdefault("As-Task", str(task).lower())
headers.setdefault("File-Path", encoded_path)
with open(path, "rb") as f:
resp = RequestUtils(headers=headers).put_res(
self.__get_api_url("/api/fs/put"),
data=f,
)
try:
# 获取文件大小
target_name = new_name or path.name
target_path = Path(fileitem.path) / target_name
if resp is None:
logger.warn(f"【OpenList】请求上传文件 {path} 失败")
# 初始化进度回调
progress_callback = transfer_process(path.as_posix())
# 准备上传请求
encoded_path = UrlUtils.quote(target_path.as_posix())
headers = self.__get_header_with_token()
headers.setdefault("Content-Type", "application/octet-stream")
headers.setdefault("As-Task", str(task).lower())
headers.setdefault("File-Path", encoded_path)
# 创建自定义的文件流,支持进度回调
class ProgressFileReader:
def __init__(self, file_path: Path, callback):
self.file = open(file_path, 'rb')
self.callback = callback
self.uploaded_size = 0
self.file_size = file_path.stat().st_size
def read(self, size=-1):
if global_vars.is_transfer_stopped(path.as_posix()):
logger.info(f"【OpenList】{path} 上传已取消!")
return None
chunk = self.file.read(size)
if chunk:
self.uploaded_size += len(chunk)
if self.callback:
percent = (self.uploaded_size * 100) / self.file_size
self.callback(percent)
return chunk
def close(self):
self.file.close()
# 使用自定义文件流上传
progress_reader = ProgressFileReader(path, progress_callback)
try:
resp = RequestUtils(headers=headers).put_res(
self.__get_api_url("/api/fs/put"),
data=progress_reader,
)
finally:
progress_reader.close()
if resp is None:
logger.warn(f"【OpenList】请求上传文件 {path} 失败")
return None
if resp.status_code != 200:
logger.warn(f"【OpenList】请求上传文件 {path} 失败,状态码:{resp.status_code}")
return None
# 完成上传
progress_callback(100)
# 获取上传后的文件项
new_item = self._delay_get_item(target_path)
if new_item and new_name and new_name != path.name:
if self.rename(new_item, new_name):
return self._delay_get_item(Path(new_item.path).with_name(new_name))
return new_item
except Exception as e:
logger.error(f"【OpenList】上传文件 {path} 失败:{e}")
return None
if resp.status_code != 200:
logger.warn(f"【OpenList】请求上传文件 {path} 失败,状态码:{resp.status_code}")
return None
new_item = self.get_item(Path(fileitem.path) / path.name)
if new_item and new_name and new_name != path.name:
if self.rename(new_item, new_name):
return self.get_item(Path(new_item.path).with_name(new_name))
return new_item
def detail(self, fileitem: schemas.FileItem) -> Optional[schemas.FileItem]:
"""
@@ -658,9 +719,9 @@ class Alist(StorageBase, metaclass=WeakSingleton):
return False
# 重命名
if fileitem.name != new_name:
self.rename(
self.get_item(path / fileitem.name), new_name
)
new_item = self._delay_get_item(path / fileitem.name)
if new_item:
self.rename(new_item, new_name)
return True
def move(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:

View File

@@ -3,9 +3,10 @@ from pathlib import Path
from typing import Optional, List
from app import schemas
from app.core.config import global_vars
from app.helper.directory import DirectoryHelper
from app.log import logger
from app.modules.filemanager.storages import StorageBase
from app.modules.filemanager.storages import StorageBase, transfer_process
from app.schemas.types import StorageSchema
from app.utils.system import SystemUtils
@@ -25,6 +26,9 @@ class LocalStorage(StorageBase):
"softlink": "软链接"
}
# 文件块大小默认100MB
chunk_size = 100 * 1024 * 1024
def init_storage(self):
"""
初始化
@@ -95,7 +99,7 @@ class LocalStorage(StorageBase):
# 遍历目录
path_obj = Path(path)
if not path_obj.exists():
logger.warn(f"local】目录不存在:{path}")
logger.warn(f"本地】目录不存在:{path}")
return []
# 如果是文件
@@ -167,7 +171,7 @@ class LocalStorage(StorageBase):
else:
shutil.rmtree(path_obj, ignore_errors=True)
except Exception as e:
logger.error(f"local】删除文件失败:{e}")
logger.error(f"本地】删除文件失败:{e}")
return False
return True
@@ -181,7 +185,7 @@ class LocalStorage(StorageBase):
try:
path_obj.rename(path_obj.parent / name)
except Exception as e:
logger.error(f"local】重命名文件失败:{e}")
logger.error(f"本地】重命名文件失败:{e}")
return False
return True
@@ -191,21 +195,94 @@ class LocalStorage(StorageBase):
"""
return Path(fileitem.path)
def upload(self, fileitem: schemas.FileItem, path: Path,
new_name: Optional[str] = None) -> Optional[schemas.FileItem]:
def _copy_with_progress(self, src: Path, dest: Path):
"""
上传文件
:param fileitem: 上传目录项
:param path: 本地文件路径
:param new_name: 上传后文件名
分块复制文件并回调进度
"""
dir_path = Path(fileitem.path)
target_path = dir_path / (new_name or path.name)
code, message = SystemUtils.move(path, target_path)
if code != 0:
logger.error(f"【local】移动文件失败{message}")
return None
return self.get_item(target_path)
total_size = src.stat().st_size
copied_size = 0
progress_callback = transfer_process(src.as_posix())
try:
with open(src, "rb") as fsrc, open(dest, "wb") as fdst:
while True:
if global_vars.is_transfer_stopped(src.as_posix()):
logger.info(f"【本地】{src} 复制已取消!")
return False
buf = fsrc.read(self.chunk_size)
if not buf:
break
fdst.write(buf)
copied_size += len(buf)
# 更新进度
if progress_callback:
percent = copied_size / total_size * 100
progress_callback(percent)
# 保留文件时间戳、权限等信息
shutil.copystat(src, dest)
return True
except Exception as e:
logger.error(f"【本地】复制文件 {src} 失败:{e}")
return False
finally:
progress_callback(100)
def upload(
self,
fileitem: schemas.FileItem,
path: Path,
new_name: Optional[str] = None
) -> Optional[schemas.FileItem]:
"""
上传文件(带进度)
"""
try:
dir_path = Path(fileitem.path)
target_path = dir_path / (new_name or path.name)
if self._copy_with_progress(path, target_path):
# 上传删除源文件
path.unlink()
return self.get_item(target_path)
except Exception as err:
logger.error(f"【本地】移动文件失败:{err}")
return None
def copy(
self,
fileitem: schemas.FileItem,
path: Path,
new_name: str
) -> bool:
"""
复制文件(带进度)
"""
try:
src = Path(fileitem.path)
dest = path / new_name
if self._copy_with_progress(src, dest):
return True
except Exception as err:
logger.error(f"【本地】复制文件失败:{err}")
return False
def move(
self,
fileitem: schemas.FileItem,
path: Path,
new_name: str
) -> bool:
"""
移动文件(带进度)
"""
try:
src = Path(fileitem.path)
dest = path / new_name
if self._copy_with_progress(src, dest):
# 复制成功删除源文件
src.unlink()
return True
except Exception as err:
logger.error(f"【本地】移动文件失败:{err}")
return False
def link(self, fileitem: schemas.FileItem, target_file: Path) -> bool:
"""
@@ -214,7 +291,7 @@ class LocalStorage(StorageBase):
file_path = Path(fileitem.path)
code, message = SystemUtils.link(file_path, target_file)
if code != 0:
logger.error(f"local】硬链接文件失败:{message}")
logger.error(f"本地】硬链接文件失败:{message}")
return False
return True
@@ -225,35 +302,7 @@ class LocalStorage(StorageBase):
file_path = Path(fileitem.path)
code, message = SystemUtils.softlink(file_path, target_file)
if code != 0:
logger.error(f"local】软链接文件失败:{message}")
return False
return True
def copy(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
"""
复制文件
:param fileitem: 文件项
:param path: 目标目录
:param new_name: 新文件名
"""
file_path = Path(fileitem.path)
code, message = SystemUtils.copy(file_path, path / new_name)
if code != 0:
logger.error(f"【local】复制文件失败{message}")
return False
return True
def move(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
"""
移动文件
:param fileitem: 文件项
:param path: 目标目录
:param new_name: 新文件名
"""
file_path = Path(fileitem.path)
code, message = SystemUtils.move(file_path, path / new_name)
if code != 0:
logger.error(f"【local】移动文件失败{message}")
logger.error(f"本地】软链接文件失败:{message}")
return False
return True

View File

@@ -6,7 +6,7 @@ from typing import Optional, List
from app import schemas
from app.core.config import settings
from app.log import logger
from app.modules.filemanager.storages import StorageBase
from app.modules.filemanager.storages import StorageBase, transfer_process
from app.schemas.types import StorageSchema
from app.utils.string import StringUtils
from app.utils.system import SystemUtils
@@ -58,6 +58,41 @@ class Rclone(StorageBase):
else:
return None
@staticmethod
def __parse_rclone_progress(line: str) -> Optional[float]:
"""
解析rclone进度输出
"""
if not line:
return None
line = line.strip()
# 检查是否包含百分比
if '%' not in line:
return None
try:
# 尝试多种进度输出格式
if 'ETA' in line:
# 格式: "Transferred: 1.234M / 5.678M, 22%, 1.234MB/s, ETA 2m3s"
percent_str = line.split('%')[0].split()[-1]
return float(percent_str)
elif 'Transferred:' in line and '100%' in line:
# 传输完成
return 100.0
else:
# 其他包含百分比的格式
parts = line.split()
for part in parts:
if '%' in part:
percent_str = part.replace('%', '')
return float(percent_str)
except (ValueError, IndexError):
pass
return None
def __get_rcloneitem(self, item: dict, parent: Optional[str] = "/") -> schemas.FileItem:
"""
获取rclone文件项
@@ -238,47 +273,115 @@ class Rclone(StorageBase):
def download(self, fileitem: schemas.FileItem, path: Path = None) -> Optional[Path]:
"""
下载文件
带实时进度显示的下载
"""
path = (path or settings.TEMP_PATH) / fileitem.name
local_path = (path or settings.TEMP_PATH) / fileitem.name
# 初始化进度条
logger.info(f"【rclone】开始下载: {fileitem.name} -> {local_path}")
progress_callback = transfer_process(Path(fileitem.path).as_posix())
try:
retcode = subprocess.run(
# 使用rclone的进度显示功能
process = subprocess.Popen(
[
'rclone', 'copyto',
'--progress', # 启用进度显示
'--stats', '1s', # 每秒更新一次统计信息
f'MP:{fileitem.path}',
f'{path}'
f'{local_path}'
],
startupinfo=self.__get_hidden_shell()
).returncode
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
startupinfo=self.__get_hidden_shell(),
universal_newlines=True,
bufsize=1
)
# 监控进度输出
last_progress = 0
for line in process.stdout:
if line:
# 解析rclone的进度输出
progress = self.__parse_rclone_progress(line)
if progress is not None and progress > last_progress:
progress_callback(progress)
last_progress = progress
if progress >= 100:
break
# 等待进程完成
retcode = process.wait()
if retcode == 0:
return path
logger.info(f"【rclone】下载完成: {fileitem.name}")
return local_path
else:
logger.error(f"【rclone】下载失败: {fileitem.name}")
return None
except Exception as err:
logger.error(f"【rclone】复制文件失败:{err}")
return None
logger.error(f"【rclone】下载失败: {fileitem.name} - {err}")
# 删除可能部分下载的文件
if local_path.exists():
local_path.unlink()
return None
def upload(self, fileitem: schemas.FileItem, path: Path,
new_name: Optional[str] = None) -> Optional[schemas.FileItem]:
"""
上传文件
带实时进度显示的上传
:param fileitem: 上传目录项
:param path: 本地文件路径
:param new_name: 上传后文件名
"""
target_name = new_name or path.name
new_path = Path(fileitem.path) / target_name
# 初始化进度条
logger.info(f"【rclone】开始上传: {path} -> {new_path}")
progress_callback = transfer_process(path.as_posix())
try:
new_path = Path(fileitem.path) / (new_name or path.name)
retcode = subprocess.run(
# 使用rclone的进度显示功能
process = subprocess.Popen(
[
'rclone', 'copyto',
'--progress', # 启用进度显示
'--stats', '1s', # 每秒更新一次统计信息
path.as_posix(),
f'MP:{new_path}'
],
startupinfo=self.__get_hidden_shell()
).returncode
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
startupinfo=self.__get_hidden_shell(),
universal_newlines=True,
bufsize=1
)
# 监控进度输出
last_progress = 0
for line in process.stdout:
if line:
# 解析rclone的进度输出
progress = self.__parse_rclone_progress(line)
if progress is not None and progress > last_progress:
progress_callback(progress)
last_progress = progress
if progress >= 100:
break
# 等待进程完成
retcode = process.wait()
if retcode == 0:
logger.info(f"【rclone】上传完成: {target_name}")
return self.get_item(new_path)
else:
logger.error(f"【rclone】上传失败: {target_name}")
return None
except Exception as err:
logger.error(f"【rclone】上传文件失败:{err}")
return None
logger.error(f"【rclone】上传失败: {target_name} - {err}")
return None
def detail(self, fileitem: schemas.FileItem) -> Optional[schemas.FileItem]:
"""
@@ -307,20 +410,53 @@ class Rclone(StorageBase):
:param path: 目标目录
:param new_name: 新文件名
"""
target_path = path / new_name
# 初始化进度条
logger.info(f"【rclone】开始移动: {fileitem.path} -> {target_path}")
progress_callback = transfer_process(Path(fileitem.path).as_posix())
try:
retcode = subprocess.run(
# 使用rclone的进度显示功能
process = subprocess.Popen(
[
'rclone', 'moveto',
'--progress', # 启用进度显示
'--stats', '1s', # 每秒更新一次统计信息
f'MP:{fileitem.path}',
f'MP:{path / new_name}'
f'MP:{target_path}'
],
startupinfo=self.__get_hidden_shell()
).returncode
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
startupinfo=self.__get_hidden_shell(),
universal_newlines=True,
bufsize=1
)
# 监控进度输出
last_progress = 0
for line in process.stdout:
if line:
# 解析rclone的进度输出
progress = self.__parse_rclone_progress(line)
if progress is not None and progress > last_progress:
progress_callback(progress)
last_progress = progress
if progress >= 100:
break
# 等待进程完成
retcode = process.wait()
if retcode == 0:
logger.info(f"【rclone】移动完成: {fileitem.name}")
return True
else:
logger.error(f"【rclone】移动失败: {fileitem.name}")
return False
except Exception as err:
logger.error(f"【rclone】移动文件失败:{err}")
return False
logger.error(f"【rclone】移动失败: {fileitem.name} - {err}")
return False
def copy(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:
"""
@@ -329,20 +465,53 @@ class Rclone(StorageBase):
:param path: 目标目录
:param new_name: 新文件名
"""
target_path = path / new_name
# 初始化进度条
logger.info(f"【rclone】开始复制: {fileitem.path} -> {target_path}")
progress_callback = transfer_process(Path(fileitem.path).as_posix())
try:
retcode = subprocess.run(
# 使用rclone的进度显示功能
process = subprocess.Popen(
[
'rclone', 'copyto',
'--progress', # 启用进度显示
'--stats', '1s', # 每秒更新一次统计信息
f'MP:{fileitem.path}',
f'MP:{path / new_name}'
f'MP:{target_path}'
],
startupinfo=self.__get_hidden_shell()
).returncode
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
startupinfo=self.__get_hidden_shell(),
universal_newlines=True,
bufsize=1
)
# 监控进度输出
last_progress = 0
for line in process.stdout:
if line:
# 解析rclone的进度输出
progress = self.__parse_rclone_progress(line)
if progress is not None and progress > last_progress:
progress_callback(progress)
last_progress = progress
if progress >= 100:
break
# 等待进程完成
retcode = process.wait()
if retcode == 0:
logger.info(f"【rclone】复制完成: {fileitem.name}")
return True
else:
logger.error(f"【rclone】复制失败: {fileitem.name}")
return False
except Exception as err:
logger.error(f"【rclone】复制文件失败:{err}")
return False
logger.error(f"【rclone】复制失败: {fileitem.name} - {err}")
return False
def link(self, fileitem: schemas.FileItem, target_file: Path) -> bool:
pass

View File

@@ -8,9 +8,10 @@ from smbclient import ClientConfig, register_session, reset_connection_cache
from smbprotocol.exceptions import SMBException, SMBResponseException, SMBAuthenticationError
from app import schemas
from app.core.config import settings
from app.core.config import settings, global_vars
from app.log import logger
from app.modules.filemanager import StorageBase
from app.modules.filemanager.storages import transfer_process
from app.schemas.types import StorageSchema
from app.utils.singleton import WeakSingleton
@@ -38,6 +39,9 @@ class SMB(StorageBase, metaclass=WeakSingleton):
"copy": "复制",
}
# 文件块大小默认100MB
chunk_size = 100 * 1024 * 1024
def __init__(self):
super().__init__()
self._connected = False
@@ -412,63 +416,99 @@ class SMB(StorageBase, metaclass=WeakSingleton):
def download(self, fileitem: schemas.FileItem, path: Path = None) -> Optional[Path]:
"""
下载文件
带实时进度显示的下载
"""
local_path = path or settings.TEMP_PATH / fileitem.name
smb_path = self._normalize_path(fileitem.path)
try:
self._check_connection()
smb_path = self._normalize_path(fileitem.path)
local_path = path or settings.TEMP_PATH / fileitem.name
# 确保本地目录存在
local_path.parent.mkdir(parents=True, exist_ok=True)
# 获取文件大小
file_size = fileitem.size
# 初始化进度条
logger.info(f"【SMB】开始下载: {fileitem.name} -> {local_path}")
progress_callback = transfer_process(Path(fileitem.path).as_posix())
# 使用更高效的文件传输方式
with smbclient.open_file(smb_path, mode="rb") as src_file:
with open(local_path, "wb") as dst_file:
# 使用更大的缓冲区提高性能
buffer_size = 1024 * 1024 # 1MB
downloaded_size = 0
while True:
chunk = src_file.read(buffer_size)
if global_vars.is_transfer_stopped(fileitem.path):
logger.info(f"【SMB】{fileitem.path} 下载已取消!")
return None
chunk = src_file.read(self.chunk_size)
if not chunk:
break
dst_file.write(chunk)
downloaded_size += len(chunk)
# 更新进度
if file_size:
progress = (downloaded_size * 100) / file_size
progress_callback(progress)
logger.info(f"【SMB】下载成功: {fileitem.path} -> {local_path}")
# 完成下载
progress_callback(100)
logger.info(f"【SMB】下载完成: {fileitem.name}")
return local_path
except Exception as e:
logger.error(f"【SMB】下载失败: {e}")
logger.error(f"【SMB】下载失败: {fileitem.name} - {e}")
# 删除可能部分下载的文件
if local_path.exists():
local_path.unlink()
return None
def upload(self, fileitem: schemas.FileItem, path: Path,
new_name: Optional[str] = None) -> Optional[schemas.FileItem]:
"""
上传文件
带实时进度显示的上传
"""
target_name = new_name or path.name
target_path = Path(fileitem.path) / target_name
smb_path = self._normalize_path(str(target_path))
try:
self._check_connection()
target_name = new_name or path.name
target_path = Path(fileitem.path) / target_name
smb_path = self._normalize_path(str(target_path))
# 获取文件大小
file_size = path.stat().st_size
# 初始化进度条
logger.info(f"【SMB】开始上传: {path} -> {target_path}")
progress_callback = transfer_process(path.as_posix())
# 使用更高效的文件传输方式
with open(path, "rb") as src_file:
with smbclient.open_file(smb_path, mode="wb") as dst_file:
# 使用更大的缓冲区提高性能
buffer_size = 1024 * 1024 # 1MB
uploaded_size = 0
while True:
chunk = src_file.read(buffer_size)
if global_vars.is_transfer_stopped(path.as_posix()):
logger.info(f"【SMB】{path} 上传已取消!")
return None
chunk = src_file.read(self.chunk_size)
if not chunk:
break
dst_file.write(chunk)
uploaded_size += len(chunk)
# 更新进度
if file_size:
progress = (uploaded_size * 100) / file_size
progress_callback(progress)
logger.info(f"【SMB】上传成功: {path} -> {target_path}")
# 完成上传
progress_callback(100)
logger.info(f"【SMB】上传完成: {target_name}")
# 返回上传后的文件信息
return self.get_item(target_path)
except Exception as e:
logger.error(f"【SMB】上传失败: {e}")
logger.error(f"【SMB】上传失败: {target_name} - {e}")
return None
def copy(self, fileitem: schemas.FileItem, path: Path, new_name: str) -> bool:

View File

@@ -1,6 +1,5 @@
import base64
import hashlib
import io
import secrets
import threading
import time
@@ -11,12 +10,12 @@ import oss2
import requests
from oss2 import SizedFileAdapter, determine_part_size
from oss2.models import PartInfo
from tqdm import tqdm
from app import schemas
from app.core.config import settings
from app.core.config import settings, global_vars
from app.log import logger
from app.modules.filemanager import StorageBase
from app.modules.filemanager.storages import transfer_process
from app.schemas.types import StorageSchema
from app.utils.singleton import WeakSingleton
from app.utils.string import StringUtils
@@ -44,6 +43,9 @@ class U115Pan(StorageBase, metaclass=WeakSingleton):
# 基础url
base_url = "https://proapi.115.com"
# 文件块大小默认10MB
chunk_size = 10 * 1024 * 1024
def __init__(self):
super().__init__()
self._auth_state = {}
@@ -352,29 +354,6 @@ class U115Pan(StorageBase, metaclass=WeakSingleton):
modify_time=int(time.time())
)
@staticmethod
def _log_progress(desc: str, total: int) -> tqdm:
"""
创建一个可以输出到日志的进度条
"""
class TqdmToLogger(io.StringIO):
def write(s, buf): # noqa
buf = buf.strip('\r\n\t ')
if buf:
logger.info(buf)
return tqdm(
total=total,
unit='B',
unit_scale=True,
desc=desc,
file=TqdmToLogger(),
mininterval=1.0,
maxinterval=5.0,
miniters=1
)
def upload(self, target_dir: schemas.FileItem, local_path: Path,
new_name: Optional[str] = None) -> Optional[schemas.FileItem]:
"""
@@ -539,13 +518,7 @@ class U115Pan(StorageBase, metaclass=WeakSingleton):
# 初始化进度条
logger.info(f"【115】开始上传: {local_path} -> {target_path},分片大小:{StringUtils.str_filesize(part_size)}")
progress_bar = tqdm(
total=file_size,
unit='B',
unit_scale=True,
desc="上传进度",
ascii=True
)
progress_callback = transfer_process(local_path.as_posix())
# 初始化分片
upload_id = bucket.init_multipart_upload(object_name,
@@ -559,6 +532,9 @@ class U115Pan(StorageBase, metaclass=WeakSingleton):
part_number = 1
offset = 0
while offset < file_size:
if global_vars.is_transfer_stopped(local_path.as_posix()):
logger.info(f"【115】{local_path} 上传已取消!")
return None
num_to_upload = min(part_size, file_size - offset)
# 调用SizedFileAdapter(fileobj, size)方法会生成一个新的文件对象,重新计算起始追加位置。
logger.info(f"【115】开始上传 {target_name} 分片 {part_number}: {offset} -> {offset + num_to_upload}")
@@ -569,11 +545,11 @@ class U115Pan(StorageBase, metaclass=WeakSingleton):
offset += num_to_upload
part_number += 1
# 更新进度
progress_bar.update(num_to_upload)
progress = (offset * 100) / file_size
progress_callback(progress)
# 关闭进度条
if progress_bar:
progress_bar.close()
# 完成上传
progress_callback(100)
# 请求头
headers = {
@@ -601,11 +577,13 @@ class U115Pan(StorageBase, metaclass=WeakSingleton):
def download(self, fileitem: schemas.FileItem, path: Path = None) -> Optional[Path]:
"""
限速处理的下载
实时进度显示的下载
"""
detail = self.get_item(Path(fileitem.path))
if not detail:
logger.error(f"【115】获取文件详情失败: {fileitem.name}")
return None
download_info = self._request_api(
"POST",
"/open/ufile/downurl",
@@ -615,14 +593,58 @@ class U115Pan(StorageBase, metaclass=WeakSingleton):
}
)
if not download_info:
logger.error(f"【115】获取下载链接失败: {fileitem.name}")
return None
download_url = list(download_info.values())[0].get("url", {}).get("url")
if not download_url:
logger.error(f"【115】下载链接为空: {fileitem.name}")
return None
local_path = path or settings.TEMP_PATH / fileitem.name
with self.session.get(download_url, stream=True) as r:
r.raise_for_status()
with open(local_path, "wb") as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
# 获取文件大小
file_size = detail.size
# 初始化进度条
logger.info(f"【115】开始下载: {fileitem.name} -> {local_path}")
progress_callback = transfer_process(Path(fileitem.path).as_posix())
try:
with self.session.get(download_url, stream=True) as r:
r.raise_for_status()
downloaded_size = 0
with open(local_path, "wb") as f:
for chunk in r.iter_content(chunk_size=self.chunk_size):
if global_vars.is_transfer_stopped(fileitem.path):
logger.info(f"【115】{fileitem.path} 下载已取消!")
return None
if chunk:
f.write(chunk)
downloaded_size += len(chunk)
# 更新进度
if file_size:
progress = (downloaded_size * 100) / file_size
progress_callback(progress)
# 完成下载
progress_callback(100)
logger.info(f"【115】下载完成: {fileitem.name}")
except requests.exceptions.RequestException as e:
logger.error(f"【115】下载网络错误: {fileitem.name} - {str(e)}")
# 删除可能部分下载的文件
if local_path.exists():
local_path.unlink()
return None
except Exception as e:
logger.error(f"【115】下载失败: {fileitem.name} - {str(e)}")
# 删除可能部分下载的文件
if local_path.exists():
local_path.unlink()
return None
return local_path
def check(self) -> bool:

View File

@@ -0,0 +1,63 @@
from typing import Tuple, Union
from app.core.config import settings
from app.db import SessionFactory
from app.modules import _ModuleBase
from app.schemas.types import ModuleType, OtherModulesType
from sqlalchemy import text
class PostgreSQLModule(_ModuleBase):
"""
PostgreSQL 数据库模块
"""
def init_module(self) -> None:
pass
@staticmethod
def get_name() -> str:
return "PostgreSQL"
@staticmethod
def get_type() -> ModuleType:
"""
获取模块类型
"""
return ModuleType.Other
@staticmethod
def get_subtype() -> OtherModulesType:
"""
获取模块子类型
"""
return OtherModulesType.PostgreSQL
@staticmethod
def get_priority() -> int:
"""
获取模块优先级,数字越小优先级越高,只有同一接口下优先级才生效
"""
return 0
def init_setting(self) -> Tuple[str, Union[str, bool]]:
pass
def stop(self) -> None:
pass
def test(self):
"""
测试模块连接性
"""
if settings.DB_TYPE != "postgresql":
return None
# 测试数据库连接
db = SessionFactory()
try:
db.execute(text("SELECT 1"))
except Exception as e:
return False, f"PostgreSQL连接失败{e}"
finally:
db.close()
return True, ""

View File

@@ -5,9 +5,10 @@ from qbittorrentapi import TorrentFilesList
from torrentool.torrent import Torrent
from app import schemas
from app.core.cache import FileCache
from app.core.config import settings
from app.core.metainfo import MetaInfo
from app.core.event import eventmanager, Event
from app.core.metainfo import MetaInfo
from app.log import logger
from app.modules import _ModuleBase, _DownloaderBase
from app.modules.qbittorrent.qbittorrent import Qbittorrent
@@ -92,12 +93,12 @@ class QbittorrentModule(_ModuleBase, _DownloaderBase[Qbittorrent]):
logger.info(f"Qbittorrent下载器 {name} 连接断开,尝试重连 ...")
server.reconnect()
def download(self, content: Union[Path, str], download_dir: Path, cookie: str,
def download(self, content: Union[Path, str, bytes], download_dir: Path, cookie: str,
episodes: Set[int] = None, category: Optional[str] = None, label: Optional[str] = None,
downloader: Optional[str] = None) -> Optional[Tuple[Optional[str], Optional[str], Optional[str], str]]:
"""
根据种子文件,选择并添加下载任务
:param content: 种子文件地址或者磁力链接
:param content: 种子文件地址或者磁力链接或者种子内容
:param download_dir: 下载目录
:param cookie: cookie
:param episodes: 需要下载的集数
@@ -107,25 +108,44 @@ class QbittorrentModule(_ModuleBase, _DownloaderBase[Qbittorrent]):
:return: 下载器名称、种子Hash、种子文件布局、错误原因
"""
def __get_torrent_info() -> Tuple[str, int]:
def __get_torrent_info() -> Tuple[Optional[Torrent], Optional[bytes]]:
"""
获取种子名称
"""
torrent_info, torrent_content = None, None
try:
if isinstance(content, Path):
torrentinfo = Torrent.from_file(content)
if content.exists():
torrent_content = content.read_bytes()
else:
# 读取缓存的种子文件
torrent_content = FileCache().get(content.as_posix(), region="torrents")
else:
torrentinfo = Torrent.from_string(content)
return torrentinfo.name, torrentinfo.total_size
torrent_content = content
if torrent_content:
# 检查是否为磁力链接
if StringUtils.is_magnet_link(torrent_content):
return None, torrent_content
else:
torrent_info = Torrent.from_string(torrent_content)
return torrent_info, torrent_content
except Exception as e:
logger.error(f"获取种子名称失败:{e}")
return "", 0
return None, None
if not content:
return None, None, None, "下载内容为空"
if isinstance(content, Path) and not content.exists():
logger.error(f"种子文件不存在:{content}")
return None, None, None, f"种子文件不存在:{content}"
# 读取种子的名称
torrent, content = __get_torrent_info()
# 检查是否为磁力链接
is_magnet = isinstance(content, str) and content.startswith("magnet:") or isinstance(content,
bytes) and content.startswith(
b"magnet:")
if not torrent and not is_magnet:
return None, None, None, f"添加种子任务失败:无法读取种子文件"
# 获取下载器
server: Qbittorrent = self.get_instance(downloader)
@@ -144,7 +164,7 @@ class QbittorrentModule(_ModuleBase, _DownloaderBase[Qbittorrent]):
is_paused = True if episodes else False
# 添加任务
state = server.add_torrent(
content=content.read_bytes() if isinstance(content, Path) else content,
content=content,
download_dir=str(download_dir),
is_paused=is_paused,
tag=tags,
@@ -157,10 +177,6 @@ class QbittorrentModule(_ModuleBase, _DownloaderBase[Qbittorrent]):
torrent_layout = server.get_content_layout()
if not state:
# 读取种子的名称
torrent_name, torrent_size = __get_torrent_info()
if not torrent_name:
return None, None, None, f"添加种子任务失败:无法读取种子文件"
# 查询所有下载器的种子
torrents, error = server.get_torrents()
if error:
@@ -169,7 +185,8 @@ class QbittorrentModule(_ModuleBase, _DownloaderBase[Qbittorrent]):
try:
for torrent in torrents:
# 名称与大小相等则认为是同一个种子
if torrent.get("name") == torrent_name and torrent.get("total_size") == torrent_size:
if torrent.get("name") == torrent.name \
and torrent.get("total_size") == torrent.total_size:
torrent_hash = torrent.get("hash")
torrent_tags = [str(tag).strip() for tag in torrent.get("tags").split(',')]
logger.warn(f"下载器中已存在该种子任务:{torrent_hash} - {torrent.get('name')}")
@@ -326,7 +343,7 @@ class QbittorrentModule(_ModuleBase, _DownloaderBase[Qbittorrent]):
del torrents
else:
return None
return ret_torrents # noqa
return ret_torrents # noqa
def transfer_completed(self, hashs: str, downloader: Optional[str] = None) -> None:
"""

View File

@@ -0,0 +1,60 @@
from typing import Tuple, Union
from app.core.config import settings
from app.helper.redis import RedisHelper
from app.modules import _ModuleBase
from app.schemas.types import ModuleType, OtherModulesType
class RedisModule(_ModuleBase):
"""
Redis 数据库模块
"""
def init_module(self) -> None:
pass
@staticmethod
def get_name() -> str:
return "Redis缓存"
@staticmethod
def get_type() -> ModuleType:
"""
获取模块类型
"""
return ModuleType.Other
@staticmethod
def get_subtype() -> OtherModulesType:
"""
获取模块子类型
"""
return OtherModulesType.Redis
@staticmethod
def get_priority() -> int:
"""
获取模块优先级,数字越小优先级越高,只有同一接口下优先级才生效
"""
return 0
def init_setting(self) -> Tuple[str, Union[str, bool]]:
pass
def stop(self) -> None:
pass
def test(self):
"""
测试模块连接性
"""
if settings.CACHE_BACKEND_TYPE != "redis":
return None
redis_helper = RedisHelper()
try:
if redis_helper.test():
return True, ""
return False, "Redis连接失败请检查配置"
finally:
redis_helper.close()

View File

@@ -76,7 +76,7 @@ class SlackModule(_ModuleBase, _MessageBase[Slack]):
for name, client in self.get_instances().items():
state = client.get_state()
if not state:
return False, f"Slack {name} 未就"
return False, f"Slack {name} 未就"
return True, ""
def init_setting(self) -> Tuple[str, Union[str, bool]]:

View File

@@ -63,19 +63,19 @@ class SubtitleModule(_ModuleBase):
def test(self):
pass
def download_added(self, context: Context, download_dir: Path, torrent_path: Path = None) -> None:
def download_added(self, context: Context, download_dir: Path, torrent_content: Union[str, bytes] = None):
"""
添加下载任务成功后,从站点下载字幕,保存到下载目录
:param context: 上下文,包括识别信息、媒体信息、种子信息
:param download_dir: 下载目录
:param torrent_path: 种子文件地址
:param torrent_content: 种子内容,如果是种子文件,则为文件内容,否则为种子字符串
:return: None该方法可被多个模块同时处理
"""
if not settings.DOWNLOAD_SUBTITLE:
return None
return
# 没有种子文件不处理
if not torrent_path:
if not torrent_content:
return
# 没有详情页不处理
@@ -85,7 +85,7 @@ class SubtitleModule(_ModuleBase):
# 字幕下载目录
logger.info("开始从站点下载字幕:%s" % torrent.page_url)
# 获取种子信息
folder_name, _ = TorrentHelper.get_torrent_info(torrent_path)
folder_name, _ = TorrentHelper().get_fileinfo_from_torrent_content(torrent_content)
# 文件保存目录如果是单文件种子则folder_name是空此时文件保存目录就是下载目录
download_dir = download_dir / folder_name
# 等待目录存在

View File

@@ -70,7 +70,7 @@ class SynologyChatModule(_ModuleBase, _MessageBase[SynologyChat]):
for name, client in self.get_instances().items():
state = client.get_state()
if not state:
return False, f"Synology Chat {name} 未就"
return False, f"Synology Chat {name} 未就"
return True, ""
def init_setting(self) -> Tuple[str, Union[str, bool]]:

View File

@@ -81,7 +81,7 @@ class TelegramModule(_ModuleBase, _MessageBase[Telegram]):
for name, client in self.get_instances().items():
state = client.get_state()
if not state:
return False, f"Telegram {name} 未就"
return False, f"Telegram {name} 未就"
return True, ""
def init_setting(self) -> Tuple[str, Union[str, bool]]:

View File

@@ -639,6 +639,8 @@ class TheMovieDbModule(_ModuleBase):
"""
搜索人物信息
"""
if settings.SEARCH_SOURCE and "themoviedb" not in settings.SEARCH_SOURCE:
return None
if not name:
return []
results = self.tmdb.search_persons(name)
@@ -646,6 +648,19 @@ class TheMovieDbModule(_ModuleBase):
return [MediaPerson(source='themoviedb', **person) for person in results]
return []
async def async_search_persons(self, name: str) -> Optional[List[MediaPerson]]:
"""
异步搜索人物信息
"""
if settings.SEARCH_SOURCE and "themoviedb" not in settings.SEARCH_SOURCE:
return None
if not name:
return []
results = await self.tmdb.async_search_persons(name)
if results:
return [MediaPerson(source='themoviedb', **person) for person in results]
return []
def search_collections(self, name: str) -> Optional[List[MediaInfo]]:
"""
搜索集合信息

View File

@@ -127,7 +127,7 @@ class CategoryHelper(metaclass=WeakSingleton):
continue
elif attr == "production_countries":
# 制片国家
info_values = [str(val.get("iso_3166_1")).upper() for val in info_value] # type: ignore
info_values = [str(val.get("iso_3166_1")).upper() for val in info_value] # type: ignore
else:
if isinstance(info_value, list):
info_values = [str(val).upper() for val in info_value]

View File

@@ -1,22 +1,17 @@
import pickle
import random
import time
import traceback
from pathlib import Path
from threading import RLock
from typing import Optional
from app.core.cache import TTLCache
from app.core.config import settings
from app.core.meta import MetaBase
from app.log import logger
from app.utils.singleton import WeakSingleton
from app.schemas.types import MediaType
from app.utils.singleton import WeakSingleton
lock = RLock()
CACHE_EXPIRE_TIMESTAMP_STR = "cache_expire_timestamp"
EXPIRE_TIMESTAMP = settings.CONF.meta
class TmdbCache(metaclass=WeakSingleton):
"""
@@ -32,15 +27,23 @@ class TmdbCache(metaclass=WeakSingleton):
_tmdb_cache_expire: bool = True
def __init__(self):
self._meta_path = settings.TEMP_PATH / "__tmdb_cache__"
self._meta_data = self.__load(self._meta_path)
self.maxsize = settings.CONF.douban
self.ttl = settings.CONF.meta
self.region = "__tmdb_cache__"
self._meta_filepath = settings.TEMP_PATH / self.region
# 初始化缓存
self._cache = TTLCache(region=self.region, maxsize=self.maxsize, ttl=self.ttl)
# 非Redis加载本地缓存数据
if not self._cache.is_redis():
for key, value in self.__load(self._meta_filepath).items():
self._cache.set(key, value)
def clear(self):
"""
清空所有TMDB缓存
"""
with lock:
self._meta_data = {}
self._cache.clear()
@staticmethod
def __get_key(meta: MetaBase) -> str:
@@ -54,16 +57,9 @@ class TmdbCache(metaclass=WeakSingleton):
根据KEY值获取缓存值
"""
key = self.__get_key(meta)
with lock:
info: dict = self._meta_data.get(key)
if info:
expire = info.get(CACHE_EXPIRE_TIMESTAMP_STR)
if not expire or int(time.time()) < expire:
info[CACHE_EXPIRE_TIMESTAMP_STR] = int(time.time()) + EXPIRE_TIMESTAMP
self._meta_data[key] = info
elif expire and self._tmdb_cache_expire:
self.delete(key)
return info or {}
return self._cache.get(key) or {}
def delete(self, key: str) -> dict:
"""
@@ -72,38 +68,26 @@ class TmdbCache(metaclass=WeakSingleton):
@return: 被删除的缓存内容
"""
with lock:
return self._meta_data.pop(key, {})
def delete_by_tmdbid(self, tmdbid: int) -> None:
"""
清空对应TMDBID的所有缓存记录以强制更新TMDB中最新的数据
"""
for key in list(self._meta_data):
if self._meta_data.get(key, {}).get("id") == tmdbid:
with lock:
self._meta_data.pop(key)
def delete_unknown(self) -> None:
"""
清除未识别的缓存记录以便重新搜索TMDB
"""
for key in list(self._meta_data):
if self._meta_data.get(key, {}).get("id") == 0:
with lock:
self._meta_data.pop(key)
redis_data = self._cache.get(key)
if redis_data:
self._cache.delete(key)
return redis_data
return {}
def modify(self, key: str, title: str) -> dict:
"""
删除缓存信息
修改缓存信息
@param key: 缓存key
@param title: 标题
@return: 被修改后缓存内容
"""
with lock:
if self._meta_data.get(key):
self._meta_data[key]['title'] = title
self._meta_data[key][CACHE_EXPIRE_TIMESTAMP_STR] = int(time.time()) + EXPIRE_TIMESTAMP
return self._meta_data.get(key)
redis_data = self._cache.get(key)
if redis_data:
redis_data['title'] = title
self._cache.set(key, redis_data)
return redis_data
return {}
@staticmethod
def __load(path: Path) -> dict:
@@ -115,106 +99,61 @@ class TmdbCache(metaclass=WeakSingleton):
with open(path, 'rb') as f:
data = pickle.load(f)
return data
return {}
except Exception as e:
logger.error(f'加载缓存失败:{str(e)} - {traceback.format_exc()}')
return {}
return {}
def update(self, meta: MetaBase, info: dict) -> None:
"""
新增或更新缓存条目
"""
with lock:
if info:
# 缓存标题
cache_title = info.get("title") \
if info.get("media_type") == MediaType.MOVIE else info.get("name")
# 缓存年份
cache_year = info.get('release_date') \
if info.get("media_type") == MediaType.MOVIE else info.get('first_air_date')
if cache_year:
cache_year = cache_year[:4]
self._meta_data[self.__get_key(meta)] = {
key = self.__get_key(meta)
if info:
# 缓存标题
cache_title = info.get("title") \
if info.get("media_type") == MediaType.MOVIE else info.get("name")
# 缓存年份
cache_year = info.get('release_date') \
if info.get("media_type") == MediaType.MOVIE else info.get('first_air_date')
if cache_year:
cache_year = cache_year[:4]
with lock:
# 缓存数据
cache_data = {
"id": info.get("id"),
"type": info.get("media_type"),
"year": cache_year,
"title": cache_title,
"poster_path": info.get("poster_path"),
"backdrop_path": info.get("backdrop_path"),
CACHE_EXPIRE_TIMESTAMP_STR: int(time.time()) + EXPIRE_TIMESTAMP
"backdrop_path": info.get("backdrop_path")
}
elif info is not None:
# None时不缓存此时代表网络错误允许重复请求
self._meta_data[self.__get_key(meta)] = {'id': 0}
self._cache.set(key, cache_data)
elif info is not None:
# None时不缓存此时代表网络错误允许重复请求
with lock:
self._cache.set(key, {"id": 0})
def save(self, force: bool = False) -> None:
"""
保存缓存数据到文件
"""
# Redis不需要保存到本地文件
if self._cache.is_redis():
return
meta_data = self.__load(self._meta_path)
new_meta_data = {k: v for k, v in self._meta_data.items() if v.get("id")}
# Redis不可用时保存到本地文件
meta_data = self.__load(self._meta_filepath)
# 当前缓存,去除无法识别
new_meta_data = {k: v for k, v in self._cache.items() if v.get("id")}
if not force \
and not self._random_sample(new_meta_data) \
and meta_data.keys() == new_meta_data.keys():
return
with open(self._meta_path, 'wb') as f:
with open(self._meta_filepath, 'wb') as f:
pickle.dump(new_meta_data, f, pickle.HIGHEST_PROTOCOL) # type: ignore
def _random_sample(self, new_meta_data: dict) -> bool:
"""
采样分析是否需要保存
"""
ret = False
if len(new_meta_data) < 25:
keys = list(new_meta_data.keys())
for k in keys:
info = new_meta_data.get(k)
expire = info.get(CACHE_EXPIRE_TIMESTAMP_STR)
if not expire:
ret = True
info[CACHE_EXPIRE_TIMESTAMP_STR] = int(time.time()) + EXPIRE_TIMESTAMP
elif int(time.time()) >= expire:
ret = True
if self._tmdb_cache_expire:
new_meta_data.pop(k)
else:
count = 0
keys = random.sample(sorted(new_meta_data.keys()), 25)
for k in keys:
info = new_meta_data.get(k)
expire = info.get(CACHE_EXPIRE_TIMESTAMP_STR)
if not expire:
ret = True
info[CACHE_EXPIRE_TIMESTAMP_STR] = int(time.time()) + EXPIRE_TIMESTAMP
elif int(time.time()) >= expire:
ret = True
if self._tmdb_cache_expire:
new_meta_data.pop(k)
count += 1
if count >= 5:
ret |= self._random_sample(new_meta_data)
return ret
def get_title(self, key: str) -> Optional[str]:
"""
获取缓存的标题
"""
cache_media_info = self._meta_data.get(key)
if not cache_media_info or not cache_media_info.get("id"):
return None
return cache_media_info.get("title")
def set_title(self, key: str, cn_title: str) -> None:
"""
重新设置缓存标题
"""
cache_media_info = self._meta_data.get(key)
if not cache_media_info:
return
self._meta_data[key]['title'] = cn_title
def __del__(self):
self.save()

View File

@@ -402,7 +402,8 @@ class TmdbApi:
if match:
try:
return int(match.group(1))
except Exception:
except Exception as err:
logger.debug(f"解析TMDBID失败{str(err)} - {traceback.format_exc()}")
return None
return None
@@ -832,7 +833,6 @@ class TmdbApi:
return None
# dict[地区:分级]
ratings = {}
results = []
if results := (tmdb_info.get("release_dates") or {}).get("results"):
"""
[

View File

@@ -5,9 +5,10 @@ from torrentool.torrent import Torrent
from transmission_rpc import File
from app import schemas
from app.core.cache import FileCache
from app.core.config import settings
from app.core.metainfo import MetaInfo
from app.core.event import eventmanager, Event
from app.core.metainfo import MetaInfo
from app.log import logger
from app.modules import _ModuleBase, _DownloaderBase
from app.modules.transmission.transmission import Transmission
@@ -93,12 +94,12 @@ class TransmissionModule(_ModuleBase, _DownloaderBase[Transmission]):
logger.info(f"Transmission下载器 {name} 连接断开,尝试重连 ...")
server.reconnect()
def download(self, content: Union[Path, str], download_dir: Path, cookie: str,
def download(self, content: Union[Path, str, bytes], download_dir: Path, cookie: str,
episodes: Set[int] = None, category: Optional[str] = None, label: Optional[str] = None,
downloader: Optional[str] = None) -> Optional[Tuple[Optional[str], Optional[str], Optional[str], str]]:
"""
根据种子文件,选择并添加下载任务
:param content: 种子文件地址或者磁力链接
:param content: 种子文件地址或者磁力链接或种子内容
:param download_dir: 下载目录
:param cookie: cookie
:param episodes: 需要下载的集数
@@ -108,24 +109,44 @@ class TransmissionModule(_ModuleBase, _DownloaderBase[Transmission]):
:return: 下载器名称、种子Hash、种子文件布局、错误原因
"""
def __get_torrent_info() -> Tuple[str, int]:
def __get_torrent_info() -> Tuple[Optional[Torrent], Optional[bytes]]:
"""
获取种子名称
"""
torrent_info, torrent_content = None, None
try:
if isinstance(content, Path):
torrentinfo = Torrent.from_file(content)
if content.exists():
torrent_content = content.read_bytes()
else:
# 读取缓存的种子文件
torrent_content = FileCache().get(content.as_posix(), region="torrents")
else:
torrentinfo = Torrent.from_string(content)
return torrentinfo.name, torrentinfo.total_size
torrent_content = content
if torrent_content:
# 检查是否为磁力链接
if StringUtils.is_magnet_link(torrent_content):
return None, torrent_content
else:
torrent_info = Torrent.from_string(torrent_content)
return torrent_info, torrent_content
except Exception as e:
logger.error(f"获取种子名称失败:{e}")
return "", 0
return None, None
if not content:
return None, None, None, "下载内容为空"
if isinstance(content, Path) and not content.exists():
return None, None, None, f"种子文件不存在:{content}"
# 读取种子的名称
torrent, content = __get_torrent_info()
# 检查是否为磁力链接
is_magnet = isinstance(content, str) and content.startswith("magnet:") or isinstance(content,
bytes) and content.startswith(
b"magnet:")
if not torrent and not is_magnet:
return None, None, None, f"添加种子任务失败:无法读取种子文件"
# 获取下载器
server: Transmission = self.get_instance(downloader)
@@ -144,7 +165,7 @@ class TransmissionModule(_ModuleBase, _DownloaderBase[Transmission]):
labels = None
# 添加任务
torrent = server.add_torrent(
content=content.read_bytes() if isinstance(content, Path) else content,
content=content,
download_dir=str(download_dir),
is_paused=is_paused,
labels=labels,
@@ -154,10 +175,6 @@ class TransmissionModule(_ModuleBase, _DownloaderBase[Transmission]):
torrent_layout = "Original"
if not torrent:
# 读取种子的名称
torrent_name, torrent_size = __get_torrent_info()
if not torrent_name:
return None, None, None, f"添加种子任务失败:无法读取种子文件"
# 查询所有下载器的种子
torrents, error = server.get_torrents()
if error:
@@ -166,7 +183,7 @@ class TransmissionModule(_ModuleBase, _DownloaderBase[Transmission]):
try:
for torrent in torrents:
# 名称与大小相等则认为是同一个种子
if torrent.name == torrent_name and torrent.total_size == torrent_size:
if torrent.name == torrent.name and torrent.total_size == torrent.total_size:
torrent_hash = torrent.hashString
logger.warn(f"下载器中已存在该种子任务:{torrent_hash} - {torrent.name}")
# 给种子打上标签
@@ -314,7 +331,7 @@ class TransmissionModule(_ModuleBase, _DownloaderBase[Transmission]):
del torrents
else:
return None
return ret_torrents # noqa
return ret_torrents # noqa
def transfer_completed(self, hashs: str, downloader: Optional[str] = None) -> None:
"""

View File

@@ -71,7 +71,7 @@ class VoceChatModule(_ModuleBase, _MessageBase[VoceChat]):
for name, client in self.get_instances().items():
state = client.get_state()
if not state:
return False, f"VoceChat {name} 未就"
return False, f"VoceChat {name} 未就"
return True, ""
def init_setting(self) -> Tuple[str, Union[str, bool]]:

View File

@@ -75,7 +75,7 @@ class WechatModule(_ModuleBase, _MessageBase[WeChat]):
for name, client in self.get_instances().items():
state = client.get_state()
if not state:
return False, f"企业微信 {name} 未就"
return False, f"企业微信 {name} 未就"
return True, ""
def init_setting(self) -> Tuple[str, Union[str, bool]]:

View File

@@ -10,7 +10,7 @@ from threading import Lock
from typing import Any, Optional, Dict, List
from apscheduler.schedulers.background import BackgroundScheduler
from cachetools import TTLCache
from app.core.cache import TTLCache, FileCache
from watchdog.events import FileSystemEventHandler, FileSystemMovedEvent, FileSystemEvent
from watchdog.observers.polling import PollingObserver
@@ -25,7 +25,7 @@ from app.log import logger
from app.schemas import ConfigChangeEventData
from app.schemas import FileItem
from app.schemas.types import SystemConfigKey, EventType
from app.utils.singleton import Singleton
from app.utils.singleton import SingletonClass
lock = Lock()
snapshot_lock = Lock()
@@ -54,7 +54,7 @@ class FileMonitorHandler(FileSystemEventHandler):
file_size=Path(event.dest_path).stat().st_size)
class Monitor(metaclass=Singleton):
class Monitor(metaclass=SingletonClass):
"""
目录监控处理链,单例模式
"""
@@ -67,17 +67,14 @@ class Monitor(metaclass=Singleton):
self._observers = []
# 定时服务
self._scheduler = None
# 存储快照缓存目录
self._snapshot_cache_dir = None
# 存储过照间隔(分钟)
self._snapshot_interval = 5
# TTL缓存10秒钟有效
self._cache = TTLCache(maxsize=1024, ttl=10)
self._cache = TTLCache(region="monitor", maxsize=1024, ttl=10)
# 快照文件缓存
self._snapshot_cache = FileCache(base=settings.CACHE_PATH / "snapshots")
# 监控的文件扩展名
self.all_exts = settings.RMT_MEDIAEXT
# 初始化快照缓存目录
self._snapshot_cache_dir = settings.TEMP_PATH / "snapshots"
self._snapshot_cache_dir.mkdir(exist_ok=True)
# 启动目录监控和文件整理
self.init()
@@ -98,14 +95,13 @@ class Monitor(metaclass=Singleton):
def save_snapshot(self, storage: str, snapshot: Dict, file_count: int = 0,
last_snapshot_time: Optional[float] = None):
"""
保存快照到文件
保存快照到文件缓存
:param storage: 存储名称
:param snapshot: 快照数据
:param last_snapshot_time: 上次快照时间戳
:param file_count: 文件数量,用于调整监控间隔
"""
try:
cache_file = self._snapshot_cache_dir / f"{storage}_snapshot.json"
snapshot_time = max((item.get('modify_time', 0) for item in snapshot.values()), default=None)
if snapshot_time is None:
snapshot_time = last_snapshot_time or time.time()
@@ -114,9 +110,11 @@ class Monitor(metaclass=Singleton):
'file_count': file_count,
'snapshot': snapshot
}
with open(cache_file, 'w', encoding='utf-8') as f:
json.dump(snapshot_data, f, ensure_ascii=False, indent=2) # noqa
logger.debug(f"快照已保存到 {cache_file}")
# 使用FileCache保存快照数据
cache_key = f"{storage}_snapshot"
snapshot_json = json.dumps(snapshot_data, ensure_ascii=False, indent=2)
self._snapshot_cache.set(cache_key, snapshot_json.encode('utf-8'), region="snapshots")
logger.debug(f"快照已保存到缓存: {storage}")
except Exception as e:
logger.error(f"保存快照失败: {e}")
@@ -127,9 +125,9 @@ class Monitor(metaclass=Singleton):
:return: 是否成功
"""
try:
cache_file = self._snapshot_cache_dir / f"{storage}_snapshot.json"
if cache_file.exists():
cache_file.unlink()
cache_key = f"{storage}_snapshot"
if self._snapshot_cache.exists(cache_key, region="snapshots"):
self._snapshot_cache.delete(cache_key, region="snapshots")
logger.info(f"快照已重置: {storage}")
return True
logger.debug(f"快照文件不存在,无需重置: {storage}")
@@ -187,18 +185,18 @@ class Monitor(metaclass=Singleton):
def load_snapshot(self, storage: str) -> Optional[Dict]:
"""
从文件加载快照
从文件缓存加载快照
:param storage: 存储名称
:return: 快照数据或None
"""
try:
cache_file = self._snapshot_cache_dir / f"{storage}_snapshot.json"
if cache_file.exists():
with open(cache_file, 'r', encoding='utf-8') as f:
data = json.load(f)
logger.debug(f"成功加载快照: {cache_file}, 包含 {len(data.get('snapshot', {}))} 个文件")
return data
logger.debug(f"快照文件不存在: {cache_file}")
cache_key = f"{storage}_snapshot"
snapshot_data = self._snapshot_cache.get(cache_key, region="snapshots")
if snapshot_data:
data = json.loads(snapshot_data.decode('utf-8'))
logger.debug(f"成功加载快照: {storage}, 包含 {len(data.get('snapshot', {}))} 个文件")
return data
logger.debug(f"快照文件不存在: {storage}")
return None
except Exception as e:
logger.error(f"加载快照失败: {e}")
@@ -768,7 +766,7 @@ class Monitor(metaclass=Singleton):
def stop(self):
"""
退出插件
退出监控
"""
self._event.set()
if self._observers:
@@ -791,4 +789,8 @@ class Monitor(metaclass=Singleton):
except Exception as e:
logger.error(f"停止定时服务出现了错误:{e}")
self._scheduler = None
if self._cache:
self._cache.close()
if self._snapshot_cache:
self._snapshot_cache.close()
self._event.clear()

View File

@@ -78,7 +78,7 @@ class FastAPIMonitor:
# 告警状态
self.alerts: List[str] = []
logger.info("FastAPI性能监控器已初始化")
logger.debug("FastAPI性能监控器已初始化")
def record_request(self, request: Request, response: Response, response_time: float):
"""
@@ -172,7 +172,7 @@ class FastAPIMonitor:
'count': 0,
'total_time': 0,
'errors': 0,
'avg_time': 0
'avg_time': 0.0
})
for req in self.request_history:

View File

@@ -1,3 +1,5 @@
import asyncio
import inspect
import threading
import traceback
from datetime import datetime, timedelta
@@ -18,16 +20,16 @@ from app.chain.subscribe import SubscribeChain
from app.chain.transfer import TransferChain
from app.chain.workflow import WorkflowChain
from app.core.config import settings
from app.core.event import EventManager, eventmanager, Event
from app.core.event import eventmanager, Event
from app.core.plugin import PluginManager
from app.db.systemconfig_oper import SystemConfigOper
from app.helper.message import MessageHelper
from app.helper.sites import SitesHelper # noqa
from app.helper.message import MessageHelper
from app.helper.wallpaper import WallpaperHelper
from app.log import logger
from app.schemas import Notification, NotificationType, Workflow, ConfigChangeEventData
from app.schemas.types import EventType, SystemConfigKey
from app.utils.singleton import Singleton
from app.utils.singleton import SingletonClass
from app.utils.timer import TimerUtils
lock = threading.Lock()
@@ -37,7 +39,7 @@ class SchedulerChain(ChainBase):
pass
class Scheduler(metaclass=Singleton):
class Scheduler(metaclass=SingletonClass):
"""
定时任务管理
"""
@@ -55,6 +57,8 @@ class Scheduler(metaclass=Singleton):
self._auth_count = 0
# 用户认证失败消息发送
self._auth_message = False
# 当前事件循环
self.loop = asyncio.get_event_loop()
self.init()
@eventmanager.register(EventType.ConfigChanged)
@@ -162,6 +166,19 @@ class Scheduler(metaclass=Singleton):
"name": "推荐缓存",
"func": RecommendChain().refresh_recommend,
"running": False,
},
"plugin_market_refresh": {
"name": "插件市场缓存",
"func": PluginManager().async_get_online_plugins,
"running": False,
"kwargs": {
"force": True
}
},
"subscribe_calendar_cache": {
"name": "订阅日历缓存",
"func": SubscribeChain().cache_calendar,
"running": False
}
}
@@ -180,7 +197,7 @@ class Scheduler(metaclass=Singleton):
id="cookiecloud",
name="同步CookieCloud站点",
minutes=int(settings.COOKIECLOUD_INTERVAL),
next_run_time=datetime.now(pytz.timezone(settings.TZ)) + timedelta(minutes=1),
next_run_time=datetime.now(pytz.timezone(settings.TZ)) + timedelta(minutes=5),
kwargs={
'job_id': 'cookiecloud'
}
@@ -195,7 +212,7 @@ class Scheduler(metaclass=Singleton):
id="mediaserver_sync",
name="同步媒体服务器",
hours=int(settings.MEDIASERVER_SYNC_INTERVAL),
next_run_time=datetime.now(pytz.timezone(settings.TZ)) + timedelta(minutes=5),
next_run_time=datetime.now(pytz.timezone(settings.TZ)) + timedelta(minutes=10),
kwargs={
'job_id': 'mediaserver_sync'
}
@@ -301,7 +318,7 @@ class Scheduler(metaclass=Singleton):
id="random_wallpager",
name="壁纸缓存",
minutes=30,
next_run_time=datetime.now(pytz.timezone(settings.TZ)) + timedelta(seconds=3),
next_run_time=datetime.now(pytz.timezone(settings.TZ)) + timedelta(seconds=1),
kwargs={
'job_id': 'random_wallpager'
}
@@ -363,12 +380,37 @@ class Scheduler(metaclass=Singleton):
id="recommend_refresh",
name="推荐缓存",
hours=24,
next_run_time=datetime.now(pytz.timezone(settings.TZ)) + timedelta(seconds=3),
next_run_time=datetime.now(pytz.timezone(settings.TZ)) + timedelta(seconds=5),
kwargs={
'job_id': 'recommend_refresh'
}
)
# 插件市场缓存
self._scheduler.add_job(
self.start,
"interval",
id="plugin_market_refresh",
name="插件市场缓存",
minutes=30,
kwargs={
'job_id': 'plugin_market_refresh'
}
)
# 订阅日历缓存
self._scheduler.add_job(
self.start,
"interval",
id="subscribe_calendar_cache",
name="订阅日历缓存",
hours=6,
next_run_time=datetime.now(pytz.timezone(settings.TZ)) + timedelta(minutes=2),
kwargs={
'job_id': 'subscribe_calendar_cache'
}
)
# 初始化工作流服务
self.init_workflow_jobs()
@@ -390,7 +432,7 @@ class Scheduler(metaclass=Singleton):
if not job:
return None
if job.get("running"):
logger.warning(f"定时任务 {job_id} - {job.get("name")} 正在运行 ...")
logger.warning(f"定时任务 {job_id} - {job.get('name')} 正在运行 ...")
return None
self._jobs[job_id]["running"] = True
return job
@@ -409,6 +451,13 @@ class Scheduler(metaclass=Singleton):
"""
启动定时服务
"""
def __start_coro(coro):
"""
启动协程
"""
return asyncio.run_coroutine_threadsafe(coro, self.loop)
# 获取定时任务
job = self.__prepare_job(job_id)
if not job:
@@ -417,7 +466,13 @@ class Scheduler(metaclass=Singleton):
try:
if not kwargs:
kwargs = job.get("kwargs") or {}
job["func"](*args, **kwargs)
func = job.get("func")
if not func:
return
if inspect.iscoroutinefunction(func):
__start_coro(func(*args, **kwargs))
else:
job["func"](*args, **kwargs)
except Exception as e:
logger.error(f"定时任务 {job.get('name')} 执行失败:{str(e)} - {traceback.format_exc()}")
MessageHelper().put(title=f"{job.get('name')} 执行失败",
@@ -519,7 +574,7 @@ class Scheduler(metaclass=Singleton):
except JobLookupError:
pass
if job_removed:
logger.info(f"移除插件服务({plugin_name}){service.get('name')}")
logger.info(f"移除插件服务({plugin_name}){service.get('name')}") # noqa
except Exception as e:
logger.error(f"移除插件服务失败:{str(e)} - {job_id}: {service}")
SchedulerChain().messagehelper.put(title=f"插件 {plugin_name} 服务移除失败",

View File

@@ -1,4 +1,4 @@
from typing import Optional, List
from typing import Optional, List, Dict, Any
from pydantic import BaseModel, Field
@@ -67,3 +67,17 @@ class PluginDashboard(Plugin):
cols: Optional[dict] = Field(default_factory=dict)
# 页面元素
elements: Optional[List[dict]] = Field(default_factory=list)
class PluginMemoryInfo(BaseModel):
"""插件内存信息"""
plugin_id: str = Field(description="插件ID")
plugin_name: str = Field(description="插件名称")
plugin_version: str = Field(description="插件版本")
total_memory_bytes: int = Field(description="总内存使用量(字节)")
total_memory_mb: float = Field(description="总内存使用量(MB)")
object_count: int = Field(description="对象数量")
calculation_time_ms: float = Field(description="计算耗时(毫秒)")
timestamp: float = Field(description="统计时间戳")
error: Optional[str] = Field(default=None, description="错误信息")
object_details: Optional[List[Dict[str, Any]]] = Field(default=None, description="大对象详情")

View File

@@ -77,7 +77,7 @@ class SiteUserData(BaseModel):
# 用户名
username: Optional[str] = None
# 用户ID
userid: Optional[Union[int, str]] = None
userid: Optional[str] = None
# 用户等级
user_level: Optional[str] = None
# 加入时间

View File

@@ -294,20 +294,6 @@ class MediaRecognizeType(Enum):
Bangumi = "Bangumi"
# 其他杂项模块类型
class OtherModulesType(Enum):
# 字幕
Subtitle = "站点字幕"
# Fanart
Fanart = "Fanart"
# 文件整理
FileManager = "文件整理"
# 过滤器
Filter = "过滤器"
# 站点索引
Indexer = "站点索引"
# 用户配置Key字典
class UserConfigKey(Enum):
# 监控面板
@@ -339,3 +325,21 @@ class ModuleType(Enum):
Indexer = "indexer"
# 其它
Other = "other"
# 其他杂项模块类型
class OtherModulesType(Enum):
# 字幕
Subtitle = "站点字幕"
# Fanart
Fanart = "Fanart"
# 文件整理
FileManager = "文件整理"
# 过滤器
Filter = "过滤器"
# 站点索引
Indexer = "站点索引"
# PostgreSQL
PostgreSQL = "PostgreSQL"
# Redis
Redis = "Redis"

View File

@@ -35,10 +35,10 @@ async def lifespan(app: FastAPI):
定义应用的生命周期事件
"""
print("Starting up...")
# 初始化模块
init_modules()
# 初始化路由
init_routers(app)
# 初始化模块
init_modules()
# 恢复插件备份
SystemChain().restore_plugins()
# 初始化插件

View File

@@ -1,5 +1,7 @@
import sys
from app.helper.redis import RedisHelper, AsyncRedisHelper
# SitesHelper涉及资源包拉取提前引入并容错提示
try:
from app.helper.sites import SitesHelper # noqa
@@ -12,14 +14,13 @@ except ImportError as e:
from app.utils.system import SystemUtils
from app.log import logger
from app.core.config import settings
from app.core.cache import close_cache
from app.core.module import ModuleManager
from app.core.event import EventManager
from app.helper.thread import ThreadHelper
from app.helper.display import DisplayHelper
from app.helper.doh import DohHelper
from app.helper.resource import ResourceHelper
from app.helper.message import MessageHelper
from app.helper.message import MessageHelper, stop_message
from app.helper.subscribe import SubscribeHelper
from app.db import close_database
from app.db.systemconfig_oper import SystemConfigOper
@@ -68,9 +69,9 @@ def clear_temp():
清理临时文件和图片缓存
"""
# 清理临时目录中3天前的文件
SystemUtils.clear(settings.TEMP_PATH, days=3)
SystemUtils.clear(settings.TEMP_PATH, days=settings.TEMP_FILE_DAYS)
# 清理图片缓存目录中7天前的文件
SystemUtils.clear(settings.CACHE_PATH / "images", days=7)
SystemUtils.clear(settings.CACHE_PATH / "images", days=settings.GLOBAL_IMAGE_CACHE_DAYS)
def user_auth():
@@ -117,8 +118,11 @@ async def stop_modules():
DisplayHelper().stop()
# 停止线程池
ThreadHelper().shutdown()
# 停止缓存连接
close_cache()
# 停止消息服务
stop_message()
# 关闭Redis缓存连接
RedisHelper().close()
await AsyncRedisHelper().close()
# 停止数据库连接
await close_database()
# 停止前端服务

View File

@@ -1,83 +0,0 @@
import asyncio
import threading
from concurrent.futures import ThreadPoolExecutor
from typing import Coroutine, Any, TypeVar
T = TypeVar('T')
class AsyncUtils:
"""
异步工具类,用于在同步环境中调用异步方法
"""
@staticmethod
def run_async(coro: Coroutine[Any, Any, T]) -> T:
"""
在同步环境中安全地执行异步协程
:param coro: 要执行的协程
:return: 协程的返回值
:raises: 协程执行过程中的任何异常
"""
try:
# 尝试获取当前运行的事件循环
asyncio.get_running_loop()
# 如果有运行中的事件循环,在新线程中执行
return AsyncUtils._run_in_thread(coro)
except RuntimeError:
# 没有运行中的事件循环,直接使用 asyncio.run
return asyncio.run(coro)
@staticmethod
def _run_in_thread(coro: Coroutine[Any, Any, T]) -> T:
"""
在新线程中创建事件循环并执行协程
:param coro: 要执行的协程
:return: 协程的返回值
"""
result = None
exception = None
def _run():
nonlocal result, exception
try:
# 在新线程中创建新的事件循环
new_loop = asyncio.new_event_loop()
asyncio.set_event_loop(new_loop)
try:
result = new_loop.run_until_complete(coro)
finally:
new_loop.close()
except Exception as e:
exception = e
# 在新线程中执行
thread = threading.Thread(target=_run)
thread.start()
thread.join()
if exception:
raise exception
return result
@staticmethod
def run_async_in_executor(coro: Coroutine[Any, Any, T]) -> T:
"""
使用线程池执行器在新线程中运行异步协程
:param coro: 要执行的协程
:return: 协程的返回值
"""
try:
# 检查是否有运行中的事件循环
asyncio.get_running_loop()
# 有运行中的事件循环,使用线程池
with ThreadPoolExecutor() as executor:
future = executor.submit(asyncio.run, coro)
return future.result()
except RuntimeError:
# 没有运行中的事件循环,直接运行
return asyncio.run(coro)

178
app/utils/memory.py Normal file
View File

@@ -0,0 +1,178 @@
import sys
import time
from collections import deque
from typing import Any, Dict, Set
from app.log import logger
class MemoryCalculator:
"""
内存计算器,用于递归计算对象的内存占用
"""
def __init__(self):
# 缓存已计算的对象ID避免重复计算
self._calculated_ids: Set[int] = set()
# 最大递归深度,防止无限递归
self._max_depth = 10
# 最大对象数量,防止计算过多对象
self._max_objects = 10000
def calculate_object_memory(self, obj: Any, max_depth: int = None, max_objects: int = None) -> Dict[str, Any]:
"""
计算对象的内存占用
:param obj: 要计算的对象
:param max_depth: 最大递归深度
:param max_objects: 最大对象数量
:return: 内存统计信息
"""
if max_depth is None:
max_depth = self._max_depth
if max_objects is None:
max_objects = self._max_objects
# 重置缓存
self._calculated_ids.clear()
start_time = time.time()
object_details = []
try:
# 递归计算内存
memory_info = self._calculate_recursive(obj, depth=0, max_depth=max_depth,
max_objects=max_objects, object_count=0)
total_memory = memory_info['total_memory']
object_count = memory_info['object_count']
object_details = memory_info['object_details']
except Exception as e:
logger.error(f"计算对象内存时出错:{str(e)}")
total_memory = 0
object_count = 0
calculation_time = time.time() - start_time
return {
'total_memory_bytes': total_memory,
'total_memory_mb': round(total_memory / (1024 * 1024), 2),
'object_count': object_count,
'calculation_time_ms': round(calculation_time * 1000, 2),
'object_details': object_details[:10] # 只返回前10个最大的对象
}
def _calculate_recursive(self, obj: Any, depth: int, max_depth: int,
max_objects: int, object_count: int) -> Dict[str, Any]:
"""
递归计算对象内存
"""
if depth > max_depth or object_count > max_objects:
return {
'total_memory': 0,
'object_count': object_count,
'object_details': []
}
total_memory = 0
object_details = []
# 获取对象ID避免重复计算
obj_id = id(obj)
if obj_id in self._calculated_ids:
return {
'total_memory': 0,
'object_count': object_count,
'object_details': []
}
self._calculated_ids.add(obj_id)
object_count += 1
try:
# 计算对象本身的内存
obj_memory = sys.getsizeof(obj)
total_memory += obj_memory
# 记录大对象
if obj_memory > 1024: # 大于1KB的对象
object_details.append({
'type': type(obj).__name__,
'memory_bytes': obj_memory,
'memory_mb': round(obj_memory / (1024 * 1024), 2),
'depth': depth
})
# 递归计算容器对象的内容
if depth < max_depth:
container_memory = self._calculate_container_memory(
obj, depth + 1, max_depth, max_objects, object_count
)
total_memory += container_memory['total_memory']
object_count = container_memory['object_count']
object_details.extend(container_memory['object_details'])
except Exception as e:
logger.debug(f"计算对象 {type(obj).__name__} 内存时出错:{str(e)}")
return {
'total_memory': total_memory,
'object_count': object_count,
'object_details': object_details
}
def _calculate_container_memory(self, obj: Any, depth: int, max_depth: int,
max_objects: int, object_count: int) -> Dict[str, Any]:
"""
计算容器对象的内存
"""
total_memory = 0
object_details = []
try:
# 处理不同类型的容器
if isinstance(obj, (list, tuple, deque)):
for item in obj:
if object_count > max_objects:
break
item_memory = self._calculate_recursive(item, depth, max_depth, max_objects, object_count)
total_memory += item_memory['total_memory']
object_count = item_memory['object_count']
object_details.extend(item_memory['object_details'])
elif isinstance(obj, dict):
for key, value in obj.items():
if object_count > max_objects:
break
# 计算key的内存
key_memory = self._calculate_recursive(key, depth, max_depth, max_objects, object_count)
total_memory += key_memory['total_memory']
object_count = key_memory['object_count']
object_details.extend(key_memory['object_details'])
# 计算value的内存
value_memory = self._calculate_recursive(value, depth, max_depth, max_objects, object_count)
total_memory += value_memory['total_memory']
object_count = value_memory['object_count']
object_details.extend(value_memory['object_details'])
elif hasattr(obj, '__dict__'):
# 处理有__dict__属性的对象
for attr_name, attr_value in obj.__dict__.items():
if object_count > max_objects:
break
# 跳过一些特殊属性
if attr_name.startswith('_') and attr_name not in ['_calculated_ids']:
continue
attr_memory = self._calculate_recursive(attr_value, depth, max_depth, max_objects, object_count)
total_memory += attr_memory['total_memory']
object_count = attr_memory['object_count']
object_details.extend(attr_memory['object_details'])
except Exception as e:
logger.debug(f"计算容器对象 {type(obj).__name__} 内存时出错:{str(e)}")
return {
'total_memory': total_memory,
'object_count': object_count,
'object_details': object_details
}

View File

@@ -229,7 +229,7 @@ class StringUtils:
size = float(size)
d = [(1024 - 1, 'K'), (1024 ** 2 - 1, 'M'), (1024 ** 3 - 1, 'G'), (1024 ** 4 - 1, 'T')]
s = [x[0] for x in d]
index = bisect.bisect_left(s, size) - 1 # noqa
index = bisect.bisect_left(s, size) - 1 # noqa
if index == -1:
return str(size) + "B"
else:
@@ -925,3 +925,16 @@ class StringUtils:
if re.match(r'^[a-zA-Z0-9.-]+(\.[a-zA-Z]{2,})?$', text):
return True
return False
@staticmethod
def is_magnet_link(content: Union[str, bytes]) -> bool:
"""
判断内容是否为磁力链接
"""
if not content:
return False
if isinstance(content, str) and content.startswith("magnet:"):
return True
if isinstance(content, bytes) and content.startswith(b"magnet:"):
return True
return False

View File

@@ -1,17 +1 @@
#######################################################################################################
# V2版本中大部分设置可通过后台设置界面进行配置本文件仅展示界面无法配置的项 这些项同样可以通过环境变量进行设置 #
#######################################################################################################
# 【*】API监听地址注意不是前端访问地址
HOST=0.0.0.0
# 【*】超级管理员,设置后一但重启将固化到数据库中,修改将无效(初始化超级管理员密码仅会生成一次,请在日志中查看并自行登录系统修改)
SUPERUSER=admin
# 开发调试模式,仅开发人员使用,打开后将停止后台服务
DEV=false
# 为指定字幕添加.default后缀设置为默认字幕支持为'zh-cn''zh-tw''eng'添加默认字幕未定义或设置为None则不添加
DEFAULT_SUB=zh-cn
# 是否启用内存监控,开启后将定期生成内存快照文件
MEMORY_ANALYSIS=false
# 内存快照间隔(分钟)
MEMORY_SNAPSHOT_INTERVAL=30
# 保留的内存快照文件数量
MEMORY_SNAPSHOT_KEEP_COUNT=20
# MoviePilot V2版本大部分设置可通过后台设置界面进行配置仅个别配置需要通过环境变量或本配置文件配置所有可配置项参考https://wiki.movie-pilot.org/zh/configuration

View File

@@ -40,13 +40,25 @@ def run_migrations_offline() -> None:
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
render_as_batch=True
)
# 根据数据库类型配置不同的参数
if url and "postgresql" in url:
# PostgreSQL配置
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
else:
# SQLite配置
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
render_as_batch=True
)
with context.begin_transaction():
context.run_migrations()
@@ -66,9 +78,22 @@ def run_migrations_online() -> None:
)
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata
)
url = config.get_main_option("sqlalchemy.url")
# 根据数据库类型配置不同的参数
if url and "postgresql" in url:
# PostgreSQL配置
context.configure(
connection=connection,
target_metadata=target_metadata
)
else:
# SQLite配置
context.configure(
connection=connection,
target_metadata=target_metadata,
render_as_batch=True
)
with context.begin_transaction():
context.run_migrations()

View File

@@ -21,7 +21,11 @@ depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
# 站点数据统计增加站点名称
with contextlib.suppress(Exception):
conn = op.get_bind()
inspector = sa.inspect(conn)
columns = inspector.get_columns('siteuserdata')
# 检查 'name' 字段是否已存在
if not any(c['name'] == 'name' for c in columns):
op.add_column('siteuserdata', sa.Column('name', sa.String(), nullable=True))
# ### end Alembic commands ###

View File

@@ -18,19 +18,18 @@ depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
with contextlib.suppress(Exception):
# 添加触发类型字段
conn = op.get_bind()
inspector = sa.inspect(conn)
columns = inspector.get_columns('workflow')
if not any(c['name'] == 'trigger_type' for c in columns):
op.add_column('workflow', sa.Column('trigger_type', sa.String(), nullable=True, default='timer'))
with contextlib.suppress(Exception):
# 添加事件类型字段
if not any(c['name'] == 'event_type' for c in columns):
op.add_column('workflow', sa.Column('event_type', sa.String(), nullable=True))
with contextlib.suppress(Exception):
# 添加事件条件字段
if not any(c['name'] == 'event_conditions' for c in columns):
op.add_column('workflow', sa.Column('event_conditions', sa.JSON(), nullable=True, default={}))
# ### end Alembic commands ###
def downgrade() -> None:

View File

@@ -19,13 +19,28 @@ depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
with contextlib.suppress(Exception):
conn = op.get_bind()
inspector = sa.inspect(conn)
# 检查并添加 downloadhistory.episode_group
dh_columns = inspector.get_columns('downloadhistory')
if not any(c['name'] == 'episode_group' for c in dh_columns):
op.add_column('downloadhistory', sa.Column('episode_group', sa.String, nullable=True))
# 检查并添加 subscribe.episode_group
s_columns = inspector.get_columns('subscribe')
if not any(c['name'] == 'episode_group' for c in s_columns):
op.add_column('subscribe', sa.Column('episode_group', sa.String, nullable=True))
# 检查并添加 subscribehistory.episode_group
sh_columns = inspector.get_columns('subscribehistory')
if not any(c['name'] == 'episode_group' for c in sh_columns):
op.add_column('subscribehistory', sa.Column('episode_group', sa.String, nullable=True))
# 检查并添加 transferhistory.episode_group
th_columns = inspector.get_columns('transferhistory')
if not any(c['name'] == 'episode_group' for c in th_columns):
op.add_column('transferhistory', sa.Column('episode_group', sa.String, nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:

View File

@@ -18,11 +18,11 @@ depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
# 整理历史记录 增加下载器字段
with contextlib.suppress(Exception):
conn = op.get_bind()
inspector = sa.inspect(conn)
columns = inspector.get_columns('transferhistory')
if not any(c['name'] == 'downloader' for c in columns):
op.add_column('transferhistory', sa.Column('downloader', sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:

View File

@@ -0,0 +1,118 @@
"""2.2.0
Revision ID: 5b3355c964bb
Revises: d58298a0879f
Create Date: 2025-08-19 12:27:08.451371
"""
import sqlalchemy as sa
from alembic import op
from app.log import logger
from app.core.config import settings
# revision identifiers, used by Alembic.
revision = '5b3355c964bb'
down_revision = 'd58298a0879f'
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
if settings.DB_TYPE.lower() == "postgresql":
# 将SQLite的Sequence转换为PostgreSQL的Identity
fix_postgresql_sequences()
# ### end Alembic commands ###
def fix_postgresql_sequences():
"""
修复PostgreSQL数据库中的序列问题
将SQLite迁移过来的Sequence转换为PostgreSQL的Identity
"""
connection = op.get_bind()
# 获取所有表名
result = connection.execute(sa.text("""
SELECT table_name
FROM information_schema.tables
WHERE table_schema = 'public'
AND table_type = 'BASE TABLE'
"""))
tables = [row[0] for row in result.fetchall()]
logger.info(f"发现 {len(tables)} 个表需要检查序列")
for table_name in tables:
fix_table_sequence(connection, table_name)
def fix_table_sequence(connection, table_name):
"""
修复单个表的序列
"""
try:
# 跳过alembic_version表它没有id列
if table_name == 'alembic_version':
logger.debug(f"跳过表 {table_name}这是Alembic版本表")
return
# 检查表是否有id列
result = connection.execute(sa.text(f"""
SELECT is_identity, column_default
FROM information_schema.columns
WHERE table_name = '{table_name}'
AND column_name = 'id'
"""))
id_column = result.fetchone()
if not id_column:
logger.debug(f"{table_name} 没有id列跳过")
return
is_identity, column_default = id_column
# 检查是否已经是Identity类型
if is_identity == 'YES' or (column_default and 'GENERATED BY DEFAULT AS IDENTITY' in column_default):
logger.debug(f"{table_name} 的id列已经是Identity类型跳过")
return
# 检查是否有序列
logger.info(f"{table_name} 存在序列,需要修复")
convert_to_identity(connection, table_name)
except Exception as e:
logger.error(f"修复表 {table_name} 序列时出错: {e}")
# 回滚当前事务,避免影响后续操作
connection.rollback()
def convert_to_identity(connection, table_name):
"""
将序列转换为Identity保持原有约束不变
"""
try:
# 获取当前序列的最大值
result = connection.execute(sa.text(f"""
SELECT COALESCE(MAX(id), 0) + 1 as next_value
FROM "{table_name}"
"""))
next_value = result.fetchone()[0]
# 直接修改列属性添加Identity保持其他约束不变
# 这种方式不会删除主键约束和索引
connection.execute(sa.text(f"""
ALTER TABLE "{table_name}"
ALTER COLUMN id ADD GENERATED BY DEFAULT AS IDENTITY (START WITH {next_value})
"""))
logger.info(f"{table_name} 序列已转换为Identity起始值为 {next_value}")
except Exception as e:
# 如果是已经存在的Identity错误则忽略
if "already an identity column" in str(e):
logger.warn(f"{table_name} 的id列已经是Identity类型忽略此错误: {e}")
return
logger.error(f"转换表 {table_name} 序列时出错: {e}")
raise

View File

@@ -19,10 +19,11 @@ depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
with contextlib.suppress(Exception):
conn = op.get_bind()
inspector = sa.inspect(conn)
columns = inspector.get_columns('workflow')
if not any(c['name'] == 'flows' for c in columns):
op.add_column('workflow', sa.Column('flows', sa.JSON(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:

View File

@@ -0,0 +1,80 @@
"""2.2.1
Revision ID: a946dae52526
Revises: 5b3355c964bb
Create Date: 2025-08-20 17:50:00.000000
"""
import sqlalchemy as sa
from alembic import op
from app.log import logger
from app.core.config import settings
# revision identifiers, used by Alembic.
revision = 'a946dae52526'
down_revision = '5b3355c964bb'
branch_labels = None
depends_on = None
def upgrade() -> None:
"""
升级将SiteUserData表的userid字段从Integer改为String
"""
connection = op.get_bind()
if settings.DB_TYPE.lower() == "postgresql":
# PostgreSQL数据库迁移
migrate_postgresql_userid(connection)
def downgrade() -> None:
"""
降级将SiteUserData表的userid字段从String改回Integer
"""
pass
def migrate_postgresql_userid(connection):
"""
PostgreSQL数据库userid字段迁移
"""
try:
logger.info("开始PostgreSQL数据库userid字段迁移...")
# 1. 创建临时列
connection.execute(sa.text("""
ALTER TABLE siteuserdata
ADD COLUMN userid_new VARCHAR
"""))
# 2. 将现有数据转换为字符串并复制到新列
connection.execute(sa.text("""
UPDATE siteuserdata
SET userid_new = CAST(userid AS VARCHAR)
WHERE userid IS NOT NULL
"""))
# 3. 删除旧列
connection.execute(sa.text("""
ALTER TABLE siteuserdata
DROP COLUMN userid
"""))
# 4. 重命名新列
connection.execute(sa.text("""
ALTER TABLE siteuserdata
RENAME COLUMN userid_new TO userid
"""))
logger.info("PostgreSQL数据库userid字段迁移完成")
except Exception as e:
logger.error(f"PostgreSQL数据库userid字段迁移失败: {e}")
raise

View File

@@ -18,11 +18,11 @@ depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
# 下载历史记录 增加下载器字段
with contextlib.suppress(Exception):
conn = op.get_bind()
inspector = sa.inspect(conn)
columns = inspector.get_columns('downloadhistory')
if not any(c['name'] == 'downloader' for c in columns):
op.add_column('downloadhistory', sa.Column('downloader', sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:

View File

@@ -18,13 +18,23 @@ depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
# 订阅增加mediaid
with contextlib.suppress(Exception):
conn = op.get_bind()
inspector = sa.inspect(conn)
# 检查并添加 subscribe.mediaid
s_columns = inspector.get_columns('subscribe')
if not any(c['name'] == 'mediaid' for c in s_columns):
op.add_column('subscribe', sa.Column('mediaid', sa.String(), nullable=True))
# 检查并创建索引
s_indexes = inspector.get_indexes('subscribe')
if not any(i['name'] == 'ix_subscribe_mediaid' for i in s_indexes):
op.create_index('ix_subscribe_mediaid', 'subscribe', ['mediaid'], unique=False)
# 检查并添加 subscribehistory.mediaid
sh_columns = inspector.get_columns('subscribehistory')
if not any(c['name'] == 'mediaid' for c in sh_columns):
op.add_column('subscribehistory', sa.Column('mediaid', sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:

View File

@@ -0,0 +1,21 @@
"""2.1.9
Revision ID: d58298a0879f
Revises: 4666ce24a443
Create Date: 2025-08-19 11:56:39.652032
"""
# revision identifiers, used by Alembic.
revision = 'd58298a0879f'
down_revision = '4666ce24a443'
branch_labels = None
depends_on = None
def upgrade() -> None:
pass
def downgrade() -> None:
pass

View File

@@ -10,6 +10,7 @@ import contextlib
from alembic import op
import sqlalchemy as sa
from app.log import logger
from app.db import SessionFactory
from app.db.models import UserConfig
@@ -21,28 +22,58 @@ depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
# 支持订阅自定义媒体类别和过滤规则组、自定义识别词
with contextlib.suppress(Exception):
conn = op.get_bind()
inspector = sa.inspect(conn)
# 检查并添加 downloadhistory.media_category
dh_columns = inspector.get_columns('downloadhistory')
if not any(c['name'] == 'media_category' for c in dh_columns):
op.add_column('downloadhistory', sa.Column('media_category', sa.String(), nullable=True))
# 检查并添加 subscribe 表的列
sub_columns = inspector.get_columns('subscribe')
if not any(c['name'] == 'custom_words' for c in sub_columns):
op.add_column('subscribe', sa.Column('custom_words', sa.String(), nullable=True))
if not any(c['name'] == 'media_category' for c in sub_columns):
op.add_column('subscribe', sa.Column('media_category', sa.String(), nullable=True))
if not any(c['name'] == 'filter_groups' for c in sub_columns):
op.add_column('subscribe', sa.Column('filter_groups', sa.JSON(), nullable=True))
# 将String转换为JSON类型
with contextlib.suppress(Exception):
op.alter_column('subscribe', 'note', existing_type=sa.String(), type_=sa.JSON())
op.alter_column('downloadhistory', 'note', existing_type=sa.String(), type_=sa.JSON())
op.alter_column('mediaserveritem', 'note', existing_type=sa.String(), type_=sa.JSON())
op.alter_column('message', 'note', existing_type=sa.String(), type_=sa.JSON())
op.alter_column('plugindata', 'value', existing_type=sa.String(), type_=sa.JSON())
op.alter_column('site', 'note', existing_type=sa.String(), type_=sa.JSON())
op.alter_column('sitestatistic', 'note', existing_type=sa.String(), type_=sa.JSON())
op.alter_column('systemconfig', 'value', existing_type=sa.String(), type_=sa.JSON())
op.alter_column('userconfig', 'value', existing_type=sa.String(), type_=sa.JSON())
# 清空用户配置表中不兼容的数据
# 定义需要检查和转换的表和列
columns_to_alter = {
'subscribe': 'note',
'downloadhistory': 'note',
'mediaserveritem': 'note',
'message': 'note',
'plugindata': 'value',
'site': 'note',
'sitestatistic': 'note',
'systemconfig': 'value',
'userconfig': 'value'
}
for table, column_name in columns_to_alter.items():
try:
cols = inspector.get_columns(table)
# 找到对应的列信息
target_col = next((c for c in cols if c['name'] == column_name), None)
# 如果列存在且类型不是JSON则进行修改
if target_col and not isinstance(target_col['type'], sa.JSON):
# PostgreSQL需要指定USING子句来处理类型转换
if conn.dialect.name == 'postgresql':
op.alter_column(table, column_name,
existing_type=sa.String(),
type_=sa.JSON(),
postgresql_using=f'"{column_name}"::json')
else:
op.alter_column(table, column_name,
existing_type=sa.String(),
type_=sa.JSON())
except Exception as e:
logger.error(f"Could not alter column {column_name} in table {table}: {e}")
with SessionFactory() as db:
UserConfig.truncate(db)
# ### end Alembic commands ###
def downgrade() -> None:

View File

@@ -18,14 +18,19 @@ depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
# 站点管理、订阅增加下载器选项
with contextlib.suppress(Exception):
conn = op.get_bind()
inspector = sa.inspect(conn)
# 检查并添加 site.downloader
site_columns = inspector.get_columns('site')
if not any(c['name'] == 'downloader' for c in site_columns):
op.add_column('site', sa.Column('downloader', sa.String(), nullable=True))
# 检查并添加 subscribe.downloader
subscribe_columns = inspector.get_columns('subscribe')
if not any(c['name'] == 'downloader' for c in subscribe_columns):
op.add_column('subscribe', sa.Column('downloader', sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
pass

View File

@@ -10,6 +10,8 @@ import contextlib
from alembic import op
import sqlalchemy as sa
from app.log import logger
# revision identifiers, used by Alembic.
revision = 'ecf3c693fdf3'
@@ -19,15 +21,35 @@ depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
# 将String转换为JSON类型
with contextlib.suppress(Exception):
op.alter_column('subscribehistory', 'sites', existing_type=sa.String(), type_=sa.JSON())
with contextlib.suppress(Exception):
op.add_column('subscribehistory', sa.Column('custom_words', sa.String(), nullable=True))
op.add_column('subscribehistory', sa.Column('media_category', sa.String(), nullable=True))
op.add_column('subscribehistory', sa.Column('filter_groups', sa.JSON(), nullable=True))
# ### end Alembic commands ###
conn = op.get_bind()
inspector = sa.inspect(conn)
table_name = 'subscribehistory'
columns = inspector.get_columns(table_name)
try:
sites_col = next((c for c in columns if c['name'] == 'sites'), None)
# 如果 'sites' 列存在且类型不是 JSON则进行修改
if sites_col and not isinstance(sites_col['type'], sa.JSON):
if conn.dialect.name == 'postgresql':
op.alter_column(table_name, 'sites',
existing_type=sa.String(),
type_=sa.JSON(),
postgresql_using='sites::json')
else:
op.alter_column(table_name, 'sites',
existing_type=sa.String(),
type_=sa.JSON())
except Exception as e:
logger.error(f"Could not alter column 'sites' in table {table_name}: {e}")
if not any(c['name'] == 'custom_words' for c in columns):
op.add_column(table_name, sa.Column('custom_words', sa.String(), nullable=True))
if not any(c['name'] == 'media_category' for c in columns):
op.add_column(table_name, sa.Column('media_category', sa.String(), nullable=True))
if not any(c['name'] == 'filter_groups' for c in columns):
op.add_column(table_name, sa.Column('filter_groups', sa.JSON(), nullable=True))
def downgrade() -> None:

Some files were not shown because too many files have changed in this diff Show More