mirror of
https://github.com/jxxghp/MoviePilot.git
synced 2026-05-10 06:22:48 +08:00
Compare commits
26 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7de3bb2a91 | ||
|
|
3a8a2bcab4 | ||
|
|
eb1adbe992 | ||
|
|
b55966d42b | ||
|
|
451ca9cb5a | ||
|
|
1e2c607ced | ||
|
|
5ff7da0d19 | ||
|
|
8e06c6f8e6 | ||
|
|
4497cd3904 | ||
|
|
2945679a94 | ||
|
|
1eaf7e3c85 | ||
|
|
8146b680c6 | ||
|
|
99e667382f | ||
|
|
4c03759d3f | ||
|
|
8593a6cdd0 | ||
|
|
cd18c31618 | ||
|
|
f29c918700 | ||
|
|
0f0c3e660b | ||
|
|
1cf4639db3 | ||
|
|
f5da9b5780 | ||
|
|
e4c87c8a96 | ||
|
|
4b4bf153f0 | ||
|
|
ec227d0d56 | ||
|
|
53c8c50779 | ||
|
|
07b4c8b462 | ||
|
|
f3cfc5b9f0 |
2
.github/ISSUE_TEMPLATE/rfc.yml
vendored
2
.github/ISSUE_TEMPLATE/rfc.yml
vendored
@@ -10,7 +10,7 @@ body:
|
||||
目的是让协作的开发者间清晰的知道「要做什么」和「具体会怎么做」,以及所有的开发者都能公开透明的参与讨论;
|
||||
以便评估和讨论产生的影响 (遗漏的考虑、向后兼容性、与现有功能的冲突),
|
||||
因此提案侧重在对解决问题的 **方案、设计、步骤** 的描述上。
|
||||
|
||||
|
||||
如果仅希望讨论是否添加或改进某功能本身,请使用 -> [Issue: 功能改进](https://github.com/jxxghp/MoviePilot/issues/new?assignees=&labels=feature+request&projects=&template=feature_request.yml&title=%5BFeature+Request%5D%3A+)
|
||||
- type: textarea
|
||||
id: background
|
||||
|
||||
24
.github/workflows/pylint.yml
vendored
24
.github/workflows/pylint.yml
vendored
@@ -8,17 +8,17 @@ jobs:
|
||||
pylint:
|
||||
runs-on: ubuntu-latest
|
||||
name: Pylint Code Quality Check
|
||||
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.12'
|
||||
cache: 'pip'
|
||||
|
||||
|
||||
- name: Cache pip dependencies
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt', '**/requirements.in') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pip-
|
||||
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip setuptools wheel
|
||||
@@ -41,7 +41,7 @@ jobs:
|
||||
else
|
||||
echo "⚠️ 未找到依赖文件,仅安装 pylint"
|
||||
fi
|
||||
|
||||
|
||||
- name: Verify pylint config
|
||||
run: |
|
||||
# 检查项目中的pylint配置文件是否存在
|
||||
@@ -57,35 +57,35 @@ jobs:
|
||||
run: |
|
||||
# 运行pylint,检查主要的Python文件
|
||||
echo "🚀 运行 Pylint 错误检查..."
|
||||
|
||||
|
||||
# 检查主要目录 - 只关注错误,如果有错误则退出
|
||||
echo "📂 检查 app/ 目录..."
|
||||
pylint app/ --output-format=colorized --reports=yes --score=yes
|
||||
|
||||
|
||||
# 检查根目录的Python文件
|
||||
echo "📂 检查根目录 Python 文件..."
|
||||
for file in $(find . -name "*.py" -not -path "./.*" -not -path "./.venv/*" -not -path "./build/*" -not -path "./dist/*" -not -path "./tests/*" -not -path "./docs/*" -not -path "./__pycache__/*" -maxdepth 1); do
|
||||
echo "检查文件: $file"
|
||||
pylint "$file" --output-format=colorized || exit 1
|
||||
done
|
||||
|
||||
|
||||
# 生成详细报告
|
||||
echo "📊 生成 Pylint 详细报告..."
|
||||
pylint app/ --output-format=json > pylint-report.json || true
|
||||
|
||||
|
||||
# 显示评分(仅供参考)
|
||||
echo "📈 Pylint 评分(仅供参考):"
|
||||
pylint app/ --score=yes --reports=no | tail -2 || true
|
||||
|
||||
|
||||
- name: Upload pylint report
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: pylint-report
|
||||
path: pylint-report.json
|
||||
|
||||
|
||||
- name: Summary
|
||||
run: |
|
||||
echo "🎉 Pylint 检查完成!"
|
||||
echo "✅ 没有发现语法错误或严重问题"
|
||||
echo "📊 详细报告已保存为构建工件"
|
||||
echo "📊 详细报告已保存为构建工件"
|
||||
@@ -12,7 +12,7 @@ jobs=0
|
||||
# 只关注错误级别的问题,禁用警告、约定和重构建议
|
||||
# E = Error (错误) - 会导致构建失败
|
||||
# W = Warning (警告) - 仅显示,不会失败
|
||||
# R = Refactor (重构建议) - 仅显示,不会失败
|
||||
# R = Refactor (重构建议) - 仅显示,不会失败
|
||||
# C = Convention (约定) - 仅显示,不会失败
|
||||
# I = Information (信息) - 仅显示,不会失败
|
||||
|
||||
@@ -80,4 +80,4 @@ ignore-imports=yes
|
||||
|
||||
[TYPECHECK]
|
||||
# 生成缺失成员提示的类列表
|
||||
generated-members=requests.packages.urllib3
|
||||
generated-members=requests.packages.urllib3
|
||||
@@ -44,6 +44,8 @@ def download(
|
||||
# 种子信息
|
||||
torrentinfo = TorrentInfo()
|
||||
torrentinfo.from_dict(torrent_in.dict())
|
||||
# 手动下载始终使用选择的下载器
|
||||
torrentinfo.site_downloader = downloader
|
||||
# 上下文
|
||||
context = Context(
|
||||
meta_info=metainfo,
|
||||
@@ -51,7 +53,7 @@ def download(
|
||||
torrent_info=torrentinfo
|
||||
)
|
||||
did = DownloadChain().download_single(context=context, username=current_user.name,
|
||||
downloader=downloader, save_path=save_path, source="Manual")
|
||||
save_path=save_path, source="Manual")
|
||||
if not did:
|
||||
return schemas.Response(success=False, message="任务添加失败")
|
||||
return schemas.Response(success=True, data={
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from typing import List, Any, Dict, Optional
|
||||
|
||||
from app.helper.sites import SitesHelper
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from sqlalchemy.orm import Session
|
||||
from starlette.background import BackgroundTasks
|
||||
@@ -21,7 +22,6 @@ from app.db.models.siteuserdata import SiteUserData
|
||||
from app.db.site_oper import SiteOper
|
||||
from app.db.systemconfig_oper import SystemConfigOper
|
||||
from app.db.user_oper import get_current_active_superuser
|
||||
from app.helper.sites import SitesHelper
|
||||
from app.scheduler import Scheduler
|
||||
from app.schemas.types import SystemConfigKey, EventType
|
||||
from app.utils.string import StringUtils
|
||||
@@ -333,8 +333,8 @@ def read_site_by_domain(
|
||||
return site
|
||||
|
||||
|
||||
@router.get("/statistic/{site_url}", summary="站点统计信息", response_model=schemas.SiteStatistic)
|
||||
def read_site_by_domain(
|
||||
@router.get("/statistic/{site_url}", summary="特定站点统计信息", response_model=schemas.SiteStatistic)
|
||||
def read_statistic_by_domain(
|
||||
site_url: str,
|
||||
db: Session = Depends(get_db),
|
||||
_: schemas.TokenPayload = Depends(verify_token)
|
||||
@@ -349,6 +349,17 @@ def read_site_by_domain(
|
||||
return schemas.SiteStatistic(domain=domain)
|
||||
|
||||
|
||||
@router.get("/statistic", summary="所有站点统计信息", response_model=List[schemas.SiteStatistic])
|
||||
def read_statistics(
|
||||
db: Session = Depends(get_db),
|
||||
_: schemas.TokenPayload = Depends(verify_token)
|
||||
) -> Any:
|
||||
"""
|
||||
获取所有站点统计信息
|
||||
"""
|
||||
return SiteStatistic.list(db)
|
||||
|
||||
|
||||
@router.get("/rss", summary="所有订阅站点", response_model=List[schemas.Site])
|
||||
def read_rss_sites(db: Session = Depends(get_db),
|
||||
_: schemas.TokenPayload = Depends(verify_token)) -> List[dict]:
|
||||
|
||||
@@ -43,7 +43,7 @@ class MediaChain(ChainBase):
|
||||
'movie_banner': True, # 电影横幅图
|
||||
'movie_thumb': True, # 电影缩略图
|
||||
'tv_nfo': True, # 电视剧NFO
|
||||
'tv_poster': True, # 电视剧海报
|
||||
'tv_poster': True, # 电视剧海报
|
||||
'tv_backdrop': True, # 电视剧背景图
|
||||
'tv_banner': True, # 电视剧横幅图
|
||||
'tv_logo': True, # 电视剧Logo
|
||||
@@ -448,7 +448,7 @@ class MediaChain(ChainBase):
|
||||
if not mediainfo:
|
||||
logger.warn(f"{filepath} 无法识别文件媒体信息!")
|
||||
return
|
||||
|
||||
|
||||
# 获取刮削开关配置
|
||||
scraping_switchs = self._get_scraping_switchs()
|
||||
logger.info(f"开始刮削:{filepath} ...")
|
||||
@@ -520,7 +520,7 @@ class MediaChain(ChainBase):
|
||||
should_scrape = scraping_switchs.get('movie_thumb', True)
|
||||
else:
|
||||
should_scrape = True # 未知类型默认刮削
|
||||
|
||||
|
||||
if should_scrape:
|
||||
image_path = filepath.with_name(image_name)
|
||||
if overwrite or not storagechain.get_file_item(storage=fileitem.storage,
|
||||
@@ -653,7 +653,7 @@ class MediaChain(ChainBase):
|
||||
should_scrape = scraping_switchs.get('season_thumb', True)
|
||||
else:
|
||||
should_scrape = True # 未知类型默认刮削
|
||||
|
||||
|
||||
if should_scrape:
|
||||
image_path = filepath.with_name(image_name)
|
||||
# 只下载当前刮削季的图片
|
||||
@@ -714,7 +714,7 @@ class MediaChain(ChainBase):
|
||||
should_scrape = scraping_switchs.get('tv_thumb', True)
|
||||
else:
|
||||
should_scrape = True # 未知类型默认刮削
|
||||
|
||||
|
||||
if should_scrape:
|
||||
image_path = filepath / image_name
|
||||
if overwrite or not storagechain.get_file_item(storage=fileitem.storage,
|
||||
|
||||
@@ -110,11 +110,17 @@ class StorageChain(ChainBase):
|
||||
"""
|
||||
return self.run_module("get_parent_item", fileitem=fileitem)
|
||||
|
||||
def snapshot_storage(self, storage: str, path: Path) -> Optional[Dict[str, float]]:
|
||||
def snapshot_storage(self, storage: str, path: Path,
|
||||
last_snapshot_time: float = None, max_depth: int = 5) -> Optional[Dict[str, Dict]]:
|
||||
"""
|
||||
快照存储
|
||||
:param storage: 存储类型
|
||||
:param path: 路径
|
||||
:param last_snapshot_time: 上次快照时间,用于增量快照
|
||||
:param max_depth: 最大递归深度,避免过深遍历
|
||||
"""
|
||||
return self.run_module("snapshot_storage", storage=storage, path=path)
|
||||
return self.run_module("snapshot_storage", storage=storage, path=path,
|
||||
last_snapshot_time=last_snapshot_time, max_depth=max_depth)
|
||||
|
||||
def storage_usage(self, storage: str) -> Optional[schemas.StorageUsage]:
|
||||
"""
|
||||
|
||||
@@ -647,153 +647,150 @@ class SubscribeChain(ChainBase):
|
||||
if domains and domain not in domains:
|
||||
continue
|
||||
logger.debug(f'开始匹配站点:{domain},共缓存了 {len(contexts)} 个种子...')
|
||||
try:
|
||||
for context in contexts:
|
||||
if global_vars.is_system_stopped:
|
||||
break
|
||||
# 提取信息
|
||||
_context = copy.copy(context)
|
||||
torrent_meta = _context.meta_info
|
||||
torrent_mediainfo = _context.media_info
|
||||
torrent_info = _context.torrent_info
|
||||
for context in contexts:
|
||||
if global_vars.is_system_stopped:
|
||||
break
|
||||
# 提取信息
|
||||
_context = copy.copy(context)
|
||||
torrent_meta = _context.meta_info
|
||||
torrent_mediainfo = _context.media_info
|
||||
torrent_info = _context.torrent_info
|
||||
|
||||
# 不在订阅站点范围的不处理
|
||||
sub_sites = self.get_sub_sites(subscribe)
|
||||
if sub_sites and torrent_info.site not in sub_sites:
|
||||
logger.debug(f"{torrent_info.site_name} - {torrent_info.title} 不符合订阅站点要求")
|
||||
continue
|
||||
# 不在订阅站点范围的不处理
|
||||
sub_sites = self.get_sub_sites(subscribe)
|
||||
if sub_sites and torrent_info.site not in sub_sites:
|
||||
logger.debug(f"{torrent_info.site_name} - {torrent_info.title} 不符合订阅站点要求")
|
||||
continue
|
||||
|
||||
# 有自定义识别词时,需要判断是否需要重新识别
|
||||
if custom_words_list:
|
||||
# 使用org_string,应用一次后理论上不能再次应用
|
||||
_, apply_words = wordsmatcher.prepare(torrent_meta.org_string,
|
||||
custom_words=custom_words_list)
|
||||
if apply_words:
|
||||
logger.info(
|
||||
f'{torrent_info.site_name} - {torrent_info.title} 因订阅存在自定义识别词,重新识别元数据...')
|
||||
# 重新识别元数据
|
||||
torrent_meta = MetaInfo(title=torrent_info.title, subtitle=torrent_info.description,
|
||||
custom_words=custom_words_list)
|
||||
# 更新元数据缓存
|
||||
_context.meta_info = torrent_meta
|
||||
# 重新识别媒体信息
|
||||
torrent_mediainfo = self.recognize_media(meta=torrent_meta,
|
||||
episode_group=subscribe.episode_group)
|
||||
if torrent_mediainfo:
|
||||
# 清理多余信息
|
||||
torrent_mediainfo.clear()
|
||||
# 更新种子缓存
|
||||
_context.media_info = torrent_mediainfo
|
||||
|
||||
# 如果仍然没有识别到媒体信息,尝试标题匹配
|
||||
if not torrent_mediainfo or (not torrent_mediainfo.tmdb_id and not torrent_mediainfo.douban_id):
|
||||
# 有自定义识别词时,需要判断是否需要重新识别
|
||||
if custom_words_list:
|
||||
# 使用org_string,应用一次后理论上不能再次应用
|
||||
_, apply_words = wordsmatcher.prepare(torrent_meta.org_string,
|
||||
custom_words=custom_words_list)
|
||||
if apply_words:
|
||||
logger.info(
|
||||
f'{torrent_info.site_name} - {torrent_info.title} 重新识别失败,尝试通过标题匹配...')
|
||||
if torrenthelper.match_torrent(mediainfo=mediainfo,
|
||||
torrent_meta=torrent_meta,
|
||||
torrent=torrent_info):
|
||||
# 匹配成功
|
||||
logger.info(
|
||||
f'{mediainfo.title_year} 通过标题匹配到可选资源:{torrent_info.site_name} - {torrent_info.title}')
|
||||
torrent_mediainfo = mediainfo
|
||||
f'{torrent_info.site_name} - {torrent_info.title} 因订阅存在自定义识别词,重新识别元数据...')
|
||||
# 重新识别元数据
|
||||
torrent_meta = MetaInfo(title=torrent_info.title, subtitle=torrent_info.description,
|
||||
custom_words=custom_words_list)
|
||||
# 更新元数据缓存
|
||||
_context.meta_info = torrent_meta
|
||||
# 重新识别媒体信息
|
||||
torrent_mediainfo = self.recognize_media(meta=torrent_meta,
|
||||
episode_group=subscribe.episode_group)
|
||||
if torrent_mediainfo:
|
||||
# 清理多余信息
|
||||
torrent_mediainfo.clear()
|
||||
# 更新种子缓存
|
||||
_context.media_info = mediainfo
|
||||
else:
|
||||
continue
|
||||
_context.media_info = torrent_mediainfo
|
||||
|
||||
# 直接比对媒体信息
|
||||
if torrent_mediainfo and (torrent_mediainfo.tmdb_id or torrent_mediainfo.douban_id):
|
||||
if torrent_mediainfo.type != mediainfo.type:
|
||||
continue
|
||||
if torrent_mediainfo.tmdb_id \
|
||||
and torrent_mediainfo.tmdb_id != mediainfo.tmdb_id:
|
||||
continue
|
||||
if torrent_mediainfo.douban_id \
|
||||
and torrent_mediainfo.douban_id != mediainfo.douban_id:
|
||||
continue
|
||||
# 如果仍然没有识别到媒体信息,尝试标题匹配
|
||||
if not torrent_mediainfo or (
|
||||
not torrent_mediainfo.tmdb_id and not torrent_mediainfo.douban_id):
|
||||
logger.info(
|
||||
f'{torrent_info.site_name} - {torrent_info.title} 重新识别失败,尝试通过标题匹配...')
|
||||
if torrenthelper.match_torrent(mediainfo=mediainfo,
|
||||
torrent_meta=torrent_meta,
|
||||
torrent=torrent_info):
|
||||
# 匹配成功
|
||||
logger.info(
|
||||
f'{mediainfo.title_year} 通过媒体信ID匹配到可选资源:{torrent_info.site_name} - {torrent_info.title}')
|
||||
f'{mediainfo.title_year} 通过标题匹配到可选资源:{torrent_info.site_name} - {torrent_info.title}')
|
||||
torrent_mediainfo = mediainfo
|
||||
# 更新种子缓存
|
||||
_context.media_info = mediainfo
|
||||
else:
|
||||
continue
|
||||
|
||||
# 如果是电视剧
|
||||
if torrent_mediainfo.type == MediaType.TV:
|
||||
# 有多季的不要
|
||||
if len(torrent_meta.season_list) > 1:
|
||||
logger.debug(f'{torrent_info.title} 有多季,不处理')
|
||||
continue
|
||||
# 比对季
|
||||
if torrent_meta.begin_season:
|
||||
if meta.begin_season != torrent_meta.begin_season:
|
||||
logger.debug(f'{torrent_info.title} 季不匹配')
|
||||
continue
|
||||
elif meta.begin_season != 1:
|
||||
# 直接比对媒体信息
|
||||
if torrent_mediainfo and (torrent_mediainfo.tmdb_id or torrent_mediainfo.douban_id):
|
||||
if torrent_mediainfo.type != mediainfo.type:
|
||||
continue
|
||||
if torrent_mediainfo.tmdb_id \
|
||||
and torrent_mediainfo.tmdb_id != mediainfo.tmdb_id:
|
||||
continue
|
||||
if torrent_mediainfo.douban_id \
|
||||
and torrent_mediainfo.douban_id != mediainfo.douban_id:
|
||||
continue
|
||||
logger.info(
|
||||
f'{mediainfo.title_year} 通过媒体信ID匹配到可选资源:{torrent_info.site_name} - {torrent_info.title}')
|
||||
else:
|
||||
continue
|
||||
|
||||
# 如果是电视剧
|
||||
if torrent_mediainfo.type == MediaType.TV:
|
||||
# 有多季的不要
|
||||
if len(torrent_meta.season_list) > 1:
|
||||
logger.debug(f'{torrent_info.title} 有多季,不处理')
|
||||
continue
|
||||
# 比对季
|
||||
if torrent_meta.begin_season:
|
||||
if meta.begin_season != torrent_meta.begin_season:
|
||||
logger.debug(f'{torrent_info.title} 季不匹配')
|
||||
continue
|
||||
# 非洗版
|
||||
if not subscribe.best_version:
|
||||
# 不是缺失的剧集不要
|
||||
if no_exists and no_exists.get(mediakey):
|
||||
# 缺失集
|
||||
no_exists_info = no_exists.get(mediakey).get(subscribe.season)
|
||||
if no_exists_info:
|
||||
# 是否有交集
|
||||
if no_exists_info.episodes and \
|
||||
torrent_meta.episode_list and \
|
||||
not set(no_exists_info.episodes).intersection(
|
||||
set(torrent_meta.episode_list)
|
||||
):
|
||||
logger.debug(
|
||||
f'{torrent_info.title} 对应剧集 {torrent_meta.episode_list} 未包含缺失的剧集'
|
||||
)
|
||||
continue
|
||||
else:
|
||||
# 洗版时,非整季不要
|
||||
if meta.type == MediaType.TV:
|
||||
if torrent_meta.episode_list:
|
||||
logger.debug(f'{subscribe.name} 正在洗版,{torrent_info.title} 不是整季')
|
||||
elif meta.begin_season != 1:
|
||||
logger.debug(f'{torrent_info.title} 季不匹配')
|
||||
continue
|
||||
# 非洗版
|
||||
if not subscribe.best_version:
|
||||
# 不是缺失的剧集不要
|
||||
if no_exists and no_exists.get(mediakey):
|
||||
# 缺失集
|
||||
no_exists_info = no_exists.get(mediakey).get(subscribe.season)
|
||||
if no_exists_info:
|
||||
# 是否有交集
|
||||
if no_exists_info.episodes and \
|
||||
torrent_meta.episode_list and \
|
||||
not set(no_exists_info.episodes).intersection(
|
||||
set(torrent_meta.episode_list)
|
||||
):
|
||||
logger.debug(
|
||||
f'{torrent_info.title} 对应剧集 {torrent_meta.episode_list} 未包含缺失的剧集'
|
||||
)
|
||||
continue
|
||||
|
||||
# 匹配订阅附加参数
|
||||
if not torrenthelper.filter_torrent(torrent_info=torrent_info,
|
||||
filter_params=self.get_params(subscribe)):
|
||||
continue
|
||||
|
||||
# 优先级过滤规则
|
||||
if subscribe.best_version:
|
||||
rule_groups = subscribe.filter_groups \
|
||||
or systemconfig.get(SystemConfigKey.BestVersionFilterRuleGroups)
|
||||
else:
|
||||
rule_groups = subscribe.filter_groups \
|
||||
or systemconfig.get(SystemConfigKey.SubscribeFilterRuleGroups)
|
||||
result: List[TorrentInfo] = self.filter_torrents(
|
||||
rule_groups=rule_groups,
|
||||
torrent_list=[torrent_info],
|
||||
mediainfo=torrent_mediainfo)
|
||||
if result is not None and not result:
|
||||
# 不符合过滤规则
|
||||
logger.debug(f"{torrent_info.title} 不匹配过滤规则")
|
||||
# 洗版时,非整季不要
|
||||
if meta.type == MediaType.TV:
|
||||
if torrent_meta.episode_list:
|
||||
logger.debug(f'{subscribe.name} 正在洗版,{torrent_info.title} 不是整季')
|
||||
continue
|
||||
|
||||
# 匹配订阅附加参数
|
||||
if not torrenthelper.filter_torrent(torrent_info=torrent_info,
|
||||
filter_params=self.get_params(subscribe)):
|
||||
continue
|
||||
|
||||
# 优先级过滤规则
|
||||
if subscribe.best_version:
|
||||
rule_groups = subscribe.filter_groups \
|
||||
or systemconfig.get(SystemConfigKey.BestVersionFilterRuleGroups)
|
||||
else:
|
||||
rule_groups = subscribe.filter_groups \
|
||||
or systemconfig.get(SystemConfigKey.SubscribeFilterRuleGroups)
|
||||
result: List[TorrentInfo] = self.filter_torrents(
|
||||
rule_groups=rule_groups,
|
||||
torrent_list=[torrent_info],
|
||||
mediainfo=torrent_mediainfo)
|
||||
if result is not None and not result:
|
||||
# 不符合过滤规则
|
||||
logger.debug(f"{torrent_info.title} 不匹配过滤规则")
|
||||
continue
|
||||
|
||||
# 洗版时,优先级小于已下载优先级的不要
|
||||
if subscribe.best_version:
|
||||
if subscribe.current_priority \
|
||||
and torrent_info.pri_order <= subscribe.current_priority:
|
||||
logger.info(
|
||||
f'{subscribe.name} 正在洗版,{torrent_info.title} 优先级低于或等于已下载优先级')
|
||||
continue
|
||||
|
||||
# 洗版时,优先级小于已下载优先级的不要
|
||||
if subscribe.best_version:
|
||||
if subscribe.current_priority \
|
||||
and torrent_info.pri_order <= subscribe.current_priority:
|
||||
logger.info(
|
||||
f'{subscribe.name} 正在洗版,{torrent_info.title} 优先级低于或等于已下载优先级')
|
||||
continue
|
||||
|
||||
# 匹配成功
|
||||
logger.info(f'{mediainfo.title_year} 匹配成功:{torrent_info.title}')
|
||||
# 自定义属性
|
||||
if subscribe.media_category:
|
||||
torrent_mediainfo.category = subscribe.media_category
|
||||
if subscribe.episode_group:
|
||||
torrent_mediainfo.episode_group = subscribe.episode_group
|
||||
_match_context.append(_context)
|
||||
finally:
|
||||
contexts.clear()
|
||||
del contexts
|
||||
# 匹配成功
|
||||
logger.info(f'{mediainfo.title_year} 匹配成功:{torrent_info.title}')
|
||||
# 自定义属性
|
||||
if subscribe.media_category:
|
||||
torrent_mediainfo.category = subscribe.media_category
|
||||
if subscribe.episode_group:
|
||||
torrent_mediainfo.episode_group = subscribe.episode_group
|
||||
_match_context.append(_context)
|
||||
|
||||
if not _match_context:
|
||||
# 未匹配到资源
|
||||
|
||||
@@ -35,7 +35,7 @@ class SystemChain(ChainBase):
|
||||
重启系统
|
||||
"""
|
||||
from app.core.config import global_vars
|
||||
|
||||
|
||||
if channel and userid:
|
||||
self.post_message(Notification(channel=channel, source=source,
|
||||
title="系统正在重启,请耐心等候!", userid=userid))
|
||||
|
||||
@@ -880,7 +880,7 @@ class TransferChain(ChainBase, metaclass=Singleton):
|
||||
) -> List[Tuple[FileItem, bool]]:
|
||||
"""
|
||||
获取整理目录或文件列表
|
||||
|
||||
|
||||
:param fileitem: 文件项
|
||||
:param depth: 递归深度,默认为1
|
||||
"""
|
||||
|
||||
@@ -225,6 +225,9 @@ class Command(metaclass=Singleton):
|
||||
添加命令集合
|
||||
"""
|
||||
for cmd, command in source.items():
|
||||
if not command.get("show", True):
|
||||
continue
|
||||
|
||||
command_data = {
|
||||
"type": command_type,
|
||||
"description": command.get("description"),
|
||||
@@ -261,6 +264,7 @@ class Command(metaclass=Singleton):
|
||||
"func": self.send_plugin_event,
|
||||
"description": command.get("desc"),
|
||||
"category": command.get("category"),
|
||||
"show": command.get("show", True),
|
||||
"data": {
|
||||
"etype": command.get("event"),
|
||||
"data": command.get("data")
|
||||
@@ -335,7 +339,8 @@ class Command(metaclass=Singleton):
|
||||
return self._commands.get(cmd, {})
|
||||
|
||||
def register(self, cmd: str, func: Any, data: Optional[dict] = None,
|
||||
desc: Optional[str] = None, category: Optional[str] = None) -> None:
|
||||
desc: Optional[str] = None, category: Optional[str] = None,
|
||||
show: bool = True) -> None:
|
||||
"""
|
||||
注册单个命令
|
||||
"""
|
||||
@@ -344,7 +349,8 @@ class Command(metaclass=Singleton):
|
||||
"func": func,
|
||||
"description": desc,
|
||||
"category": category,
|
||||
"data": data or {}
|
||||
"data": data or {},
|
||||
"show": show
|
||||
}
|
||||
|
||||
def execute(self, cmd: str, data_str: Optional[str] = "",
|
||||
|
||||
@@ -150,7 +150,7 @@ class CacheToolsBackend(CacheBackend):
|
||||
region = self.get_region(region)
|
||||
return self._region_caches.get(region)
|
||||
|
||||
def set(self, key: str, value: Any, ttl: Optional[int] = None,
|
||||
def set(self, key: str, value: Any, ttl: Optional[int] = None,
|
||||
region: Optional[str] = DEFAULT_CACHE_REGION, **kwargs) -> None:
|
||||
"""
|
||||
设置缓存值支持每个 key 独立配置 TTL 和 Maxsize
|
||||
@@ -357,7 +357,7 @@ class RedisBackend(CacheBackend):
|
||||
region = self.get_region(quote(region))
|
||||
return f"{region}:key:{quote(key)}"
|
||||
|
||||
def set(self, key: str, value: Any, ttl: Optional[int] = None,
|
||||
def set(self, key: str, value: Any, ttl: Optional[int] = None,
|
||||
region: Optional[str] = DEFAULT_CACHE_REGION, **kwargs) -> None:
|
||||
"""
|
||||
设置缓存
|
||||
|
||||
@@ -55,6 +55,8 @@ class MetaBase(object):
|
||||
resource_team: Optional[str] = None
|
||||
# 识别的自定义占位符
|
||||
customization: Optional[str] = None
|
||||
# 识别的流媒体平台
|
||||
web_source: Optional[str] = None
|
||||
# 视频编码
|
||||
video_encode: Optional[str] = None
|
||||
# 音频编码
|
||||
|
||||
@@ -67,7 +67,6 @@ class MetaVideo(MetaBase):
|
||||
original_title = title
|
||||
self._source = ""
|
||||
self._effect = []
|
||||
self.web_source = None
|
||||
self._index = 0
|
||||
# 判断是否纯数字命名
|
||||
if isfile \
|
||||
@@ -140,9 +139,6 @@ class MetaVideo(MetaBase):
|
||||
self.resource_effect = " ".join(self._effect)
|
||||
if self._source:
|
||||
self.resource_type = self._source.strip()
|
||||
# 添加流媒体平台
|
||||
if self.web_source:
|
||||
self.resource_type = f"{self.web_source} {self.resource_type}"
|
||||
# 提取原盘DIY
|
||||
if self.resource_type and "BluRay" in self.resource_type:
|
||||
if (self.subtitle and re.findall(r'D[Ii]Y', self.subtitle)) \
|
||||
|
||||
@@ -154,35 +154,35 @@ def find_metainfo(title: str) -> Tuple[str, dict]:
|
||||
# 去除title中该部分
|
||||
if tmdbid or mtype or begin_season or end_season or begin_episode or end_episode:
|
||||
title = title.replace(f"{{[{result}]}}", '')
|
||||
|
||||
|
||||
# 支持Emby格式的ID标签
|
||||
# 1. [tmdbid=xxxx] 或 [tmdbid-xxxx] 格式
|
||||
tmdb_match = re.search(r'\[tmdbid[=\-](\d+)\]', title)
|
||||
if tmdb_match:
|
||||
metainfo['tmdbid'] = tmdb_match.group(1)
|
||||
title = re.sub(r'\[tmdbid[=\-](\d+)\]', '', title).strip()
|
||||
|
||||
|
||||
# 2. [tmdb=xxxx] 或 [tmdb-xxxx] 格式
|
||||
if not metainfo['tmdbid']:
|
||||
tmdb_match = re.search(r'\[tmdb[=\-](\d+)\]', title)
|
||||
if tmdb_match:
|
||||
metainfo['tmdbid'] = tmdb_match.group(1)
|
||||
title = re.sub(r'\[tmdb[=\-](\d+)\]', '', title).strip()
|
||||
|
||||
|
||||
# 3. {tmdbid=xxxx} 或 {tmdbid-xxxx} 格式
|
||||
if not metainfo['tmdbid']:
|
||||
tmdb_match = re.search(r'\{tmdbid[=\-](\d+)\}', title)
|
||||
if tmdb_match:
|
||||
metainfo['tmdbid'] = tmdb_match.group(1)
|
||||
title = re.sub(r'\{tmdbid[=\-](\d+)\}', '', title).strip()
|
||||
|
||||
|
||||
# 4. {tmdb=xxxx} 或 {tmdb-xxxx} 格式
|
||||
if not metainfo['tmdbid']:
|
||||
tmdb_match = re.search(r'\{tmdb[=\-](\d+)\}', title)
|
||||
if tmdb_match:
|
||||
metainfo['tmdbid'] = tmdb_match.group(1)
|
||||
title = re.sub(r'\{tmdb[=\-](\d+)\}', '', title).strip()
|
||||
|
||||
|
||||
# 计算季集总数
|
||||
if metainfo.get('begin_season') and metainfo.get('end_season'):
|
||||
if metainfo['begin_season'] > metainfo['end_season']:
|
||||
|
||||
@@ -3,6 +3,7 @@ import concurrent.futures
|
||||
import importlib.util
|
||||
import inspect
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
@@ -198,10 +199,14 @@ class PluginManager(metaclass=Singleton):
|
||||
# 清空指定插件
|
||||
self._plugins.pop(pid, None)
|
||||
self._running_plugins.pop(pid, None)
|
||||
# 清除插件模块缓存,包括所有子模块
|
||||
self._clear_plugin_modules(pid)
|
||||
else:
|
||||
# 清空
|
||||
self._plugins = {}
|
||||
self._running_plugins = {}
|
||||
# 清除所有插件模块缓存
|
||||
self._clear_plugin_modules()
|
||||
logger.info("插件停止完成")
|
||||
|
||||
@staticmethod
|
||||
@@ -366,25 +371,51 @@ class PluginManager(metaclass=Singleton):
|
||||
"""
|
||||
self.stop(plugin_id)
|
||||
|
||||
# 从模块列表中移除插件
|
||||
from sys import modules
|
||||
try:
|
||||
del modules[f"app.plugins.{plugin_id.lower()}"]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def reload_plugin(self, plugin_id: str):
|
||||
"""
|
||||
将一个插件重新加载到内存
|
||||
:param plugin_id: 插件ID
|
||||
"""
|
||||
# 先移除
|
||||
# 先移除插件实例
|
||||
self.stop(plugin_id)
|
||||
# 重新加载
|
||||
self.start(plugin_id)
|
||||
# 广播事件
|
||||
eventmanager.send_event(EventType.PluginReload, data={"plugin_id": plugin_id})
|
||||
|
||||
@staticmethod
|
||||
def _clear_plugin_modules(plugin_id: Optional[str] = None):
|
||||
"""
|
||||
清除插件及其所有子模块的缓存
|
||||
:param plugin_id: 插件ID
|
||||
"""
|
||||
|
||||
# 构建插件模块前缀
|
||||
if plugin_id:
|
||||
plugin_module_prefix = f"app.plugins.{plugin_id.lower()}"
|
||||
else:
|
||||
plugin_module_prefix = "app.plugins"
|
||||
|
||||
# 收集需要删除的模块名(创建模块名列表的副本以避免迭代时修改字典)
|
||||
modules_to_remove = []
|
||||
for module_name in list(sys.modules.keys()):
|
||||
if module_name == plugin_module_prefix or module_name.startswith(plugin_module_prefix + "."):
|
||||
modules_to_remove.append(module_name)
|
||||
|
||||
# 删除模块
|
||||
for module_name in modules_to_remove:
|
||||
try:
|
||||
del sys.modules[module_name]
|
||||
logger.debug(f"已清除插件模块缓存:{module_name}")
|
||||
except KeyError:
|
||||
# 模块可能已经被删除
|
||||
pass
|
||||
if plugin_id:
|
||||
if modules_to_remove:
|
||||
logger.info(f"插件 {plugin_id} 共清除 {len(modules_to_remove)} 个模块缓存:{modules_to_remove}")
|
||||
else:
|
||||
logger.debug(f"插件 {plugin_id} 没有找到需要清除的模块缓存")
|
||||
|
||||
def sync(self) -> List[str]:
|
||||
"""
|
||||
安装本地不存在或需要更新的插件
|
||||
@@ -1416,8 +1447,9 @@ class PluginManager(metaclass=Singleton):
|
||||
content = f.read()
|
||||
|
||||
# 替换CSS中可能的类名引用
|
||||
content = content.replace(original_class_name.lower(), clone_class_name.lower())
|
||||
content = content.replace(original_class_name, clone_class_name)
|
||||
content = content.replace(original_class_name.lower(),
|
||||
clone_class_name.lower()).replace(original_class_name,
|
||||
clone_class_name)
|
||||
|
||||
with open(file_path, 'w', encoding='utf-8') as f:
|
||||
f.write(content)
|
||||
|
||||
@@ -46,17 +46,17 @@ class PlaywrightHelper:
|
||||
browser = playwright[self.browser_type].launch(headless=headless)
|
||||
context = browser.new_context(user_agent=ua, proxy=proxies)
|
||||
page = context.new_page()
|
||||
|
||||
|
||||
if cookies:
|
||||
page.set_extra_http_headers({"cookie": cookies})
|
||||
|
||||
|
||||
if not self.__pass_cloudflare(url, page):
|
||||
logger.warn("cloudflare challenge fail!")
|
||||
page.wait_for_load_state("networkidle", timeout=timeout * 1000)
|
||||
|
||||
|
||||
# 回调函数
|
||||
result = callback(page)
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"网页操作失败: {str(e)}")
|
||||
finally:
|
||||
@@ -69,7 +69,7 @@ class PlaywrightHelper:
|
||||
browser.close()
|
||||
except Exception as e:
|
||||
logger.error(f"Playwright初始化失败: {str(e)}")
|
||||
|
||||
|
||||
return result
|
||||
|
||||
def get_page_source(self, url: str,
|
||||
@@ -97,16 +97,16 @@ class PlaywrightHelper:
|
||||
browser = playwright[self.browser_type].launch(headless=headless)
|
||||
context = browser.new_context(user_agent=ua, proxy=proxies)
|
||||
page = context.new_page()
|
||||
|
||||
|
||||
if cookies:
|
||||
page.set_extra_http_headers({"cookie": cookies})
|
||||
|
||||
|
||||
if not self.__pass_cloudflare(url, page):
|
||||
logger.warn("cloudflare challenge fail!")
|
||||
page.wait_for_load_state("networkidle", timeout=timeout * 1000)
|
||||
|
||||
|
||||
source = page.content()
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"获取网页源码失败: {str(e)}")
|
||||
source = None
|
||||
@@ -120,7 +120,7 @@ class PlaywrightHelper:
|
||||
browser.close()
|
||||
except Exception as e:
|
||||
logger.error(f"Playwright初始化失败: {str(e)}")
|
||||
|
||||
|
||||
return source
|
||||
|
||||
|
||||
|
||||
@@ -361,7 +361,7 @@ class MemoryHelper(metaclass=Singleton):
|
||||
|
||||
# 对于较大的对象,使用 asizeof 进行深度计算
|
||||
size_bytes = asizeof.asizeof(obj)
|
||||
|
||||
|
||||
# 只处理大于10KB的对象,提高分析效率
|
||||
if size_bytes < 10240:
|
||||
continue
|
||||
|
||||
@@ -183,6 +183,8 @@ class TemplateContextBuilder:
|
||||
"videoCodec": meta.video_encode,
|
||||
# 音频编码
|
||||
"audioCodec": meta.audio_encode,
|
||||
# 流媒体平台
|
||||
"webSource": meta.web_source,
|
||||
}
|
||||
self._context.update({**meta_info, **tech_metadata, **episode_data})
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ class OcrHelper:
|
||||
|
||||
_ocr_b64_url = f"{settings.OCR_HOST}/captcha/base64"
|
||||
|
||||
def get_captcha_text(self, image_url: Optional[str] = None, image_b64: Optional[str] = None,
|
||||
def get_captcha_text(self, image_url: Optional[str] = None, image_b64: Optional[str] = None,
|
||||
cookie: Optional[str] = None, ua: Optional[str] = None):
|
||||
"""
|
||||
根据图片地址,获取验证码图片,并识别内容
|
||||
|
||||
@@ -53,10 +53,10 @@ class PluginHelper(metaclass=Singleton):
|
||||
# 如果强制刷新,直接调用不带缓存的版本
|
||||
if force:
|
||||
return self._get_plugins_uncached(repo_url, package_version)
|
||||
|
||||
|
||||
# 正常情况下调用带缓存的版本
|
||||
return self._get_plugins_cached(repo_url, package_version)
|
||||
|
||||
|
||||
@cached(maxsize=64, ttl=1800)
|
||||
def _get_plugins_cached(self, repo_url: str, package_version: Optional[str] = None) -> Optional[Dict[str, dict]]:
|
||||
"""
|
||||
@@ -65,7 +65,7 @@ class PluginHelper(metaclass=Singleton):
|
||||
:param package_version: 首选插件版本 (如 "v2", "v3"),如果不指定则获取 v1 版本
|
||||
"""
|
||||
return self._get_plugins_uncached(repo_url, package_version)
|
||||
|
||||
|
||||
def _get_plugins_uncached(self, repo_url: str, package_version: Optional[str] = None) -> Optional[Dict[str, dict]]:
|
||||
"""
|
||||
获取Github所有最新插件列表(不使用缓存)
|
||||
|
||||
@@ -246,12 +246,17 @@ class RssHelper:
|
||||
ret = RequestUtils(proxies=settings.PROXY if proxy else None,
|
||||
timeout=timeout, headers=headers).get_res(url)
|
||||
if not ret:
|
||||
logger.error(f"获取RSS失败:请求返回空值,URL: {url}")
|
||||
return False
|
||||
except Exception as err:
|
||||
logger.error(f"获取RSS失败:{str(err)} - {traceback.format_exc()}")
|
||||
return False
|
||||
|
||||
if ret:
|
||||
# 检查HTTP状态码
|
||||
if ret.status_code != 200:
|
||||
logger.error(f"RSS请求失败,状态码: {ret.status_code}, URL: {url}")
|
||||
return False
|
||||
ret_xml = None
|
||||
root = None
|
||||
try:
|
||||
@@ -280,6 +285,17 @@ class RssHelper:
|
||||
if not ret_xml:
|
||||
ret_xml = ret.text
|
||||
|
||||
# 验证RSS内容是否有效
|
||||
if not ret_xml or not ret_xml.strip():
|
||||
logger.error("RSS内容为空")
|
||||
return False
|
||||
|
||||
# 检查是否包含基本的RSS/XML结构
|
||||
ret_xml_stripped = ret_xml.strip()
|
||||
if not ret_xml_stripped.startswith('<'):
|
||||
logger.error("RSS内容不是有效的XML格式")
|
||||
return False
|
||||
|
||||
# 使用lxml.etree解析XML
|
||||
parser = None
|
||||
try:
|
||||
@@ -292,7 +308,8 @@ class RssHelper:
|
||||
huge_tree=False # 禁用大文档解析,避免内存问题
|
||||
)
|
||||
root = etree.fromstring(ret_xml.encode('utf-8'), parser=parser)
|
||||
except etree.XMLSyntaxError:
|
||||
except etree.XMLSyntaxError as xml_error:
|
||||
logger.debug(f"XML解析失败:{str(xml_error)},尝试HTML解析")
|
||||
# 如果XML解析失败,尝试作为HTML解析
|
||||
try:
|
||||
root = etree.HTML(ret_xml)
|
||||
@@ -304,9 +321,15 @@ class RssHelper:
|
||||
except Exception as e:
|
||||
logger.error(f"HTML解析也失败:{str(e)}")
|
||||
return False
|
||||
except Exception as general_error:
|
||||
logger.error(f"解析RSS时发生未预期错误:{str(general_error)}")
|
||||
return False
|
||||
finally:
|
||||
if parser is not None:
|
||||
parser.close()
|
||||
try:
|
||||
parser.close()
|
||||
except Exception as close_error:
|
||||
logger.debug(f"关闭解析器时出错:{str(close_error)}")
|
||||
del parser
|
||||
|
||||
if root is None:
|
||||
|
||||
@@ -91,10 +91,10 @@ class SystemHelper:
|
||||
# 检查是否有有效的重启策略
|
||||
auto_restart_policies = ['always', 'unless-stopped', 'on-failure']
|
||||
has_restart_policy = policy_name in auto_restart_policies
|
||||
|
||||
|
||||
logger.info(f"容器重启策略: {policy_name}, 支持自动重启: {has_restart_policy}")
|
||||
return has_restart_policy
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"检查重启策略失败: {str(e)}")
|
||||
return False
|
||||
@@ -106,7 +106,7 @@ class SystemHelper:
|
||||
"""
|
||||
if not SystemUtils.is_docker():
|
||||
return False, "非Docker环境,无法重启!"
|
||||
|
||||
|
||||
try:
|
||||
# 检查容器是否配置了自动重启策略
|
||||
has_restart_policy = SystemHelper._check_restart_policy()
|
||||
|
||||
@@ -18,14 +18,14 @@ class WallpaperHelper(metaclass=Singleton):
|
||||
获取登录页面壁纸
|
||||
"""
|
||||
if settings.WALLPAPER == "bing":
|
||||
url = self.get_bing_wallpaper()
|
||||
return self.get_bing_wallpaper()
|
||||
elif settings.WALLPAPER == "mediaserver":
|
||||
url = self.get_mediaserver_wallpaper()
|
||||
return self.get_mediaserver_wallpaper()
|
||||
elif settings.WALLPAPER == "customize":
|
||||
url = self.get_customize_wallpaper()
|
||||
else:
|
||||
url = self.get_tmdb_wallpaper()
|
||||
return url
|
||||
return self.get_customize_wallpaper()
|
||||
elif settings.WALLPAPER == "tmdb":
|
||||
return self.get_tmdb_wallpaper()
|
||||
return ''
|
||||
|
||||
def get_wallpapers(self, num: int = 10) -> List[str]:
|
||||
"""
|
||||
@@ -37,8 +37,9 @@ class WallpaperHelper(metaclass=Singleton):
|
||||
return self.get_mediaserver_wallpapers(num)
|
||||
elif settings.WALLPAPER == "customize":
|
||||
return self.get_customize_wallpapers()
|
||||
else:
|
||||
elif settings.WALLPAPER == "tmdb":
|
||||
return self.get_tmdb_wallpapers(num)
|
||||
return []
|
||||
|
||||
@cached(maxsize=1, ttl=3600)
|
||||
def get_tmdb_wallpaper(self) -> Optional[str]:
|
||||
|
||||
@@ -83,7 +83,7 @@ if __name__ == '__main__':
|
||||
# 注册信号处理器
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
|
||||
|
||||
# 启动托盘
|
||||
start_tray()
|
||||
# 初始化数据库
|
||||
|
||||
@@ -51,7 +51,7 @@ class BangumiModule(_ModuleBase):
|
||||
获取模块子类型
|
||||
"""
|
||||
return MediaRecognizeType.Bangumi
|
||||
|
||||
|
||||
@staticmethod
|
||||
def get_priority() -> int:
|
||||
"""
|
||||
|
||||
@@ -344,9 +344,14 @@ class FileManagerModule(_ModuleBase):
|
||||
return None
|
||||
return storage_oper.get_parent(fileitem)
|
||||
|
||||
def snapshot_storage(self, storage: str, path: Path) -> Optional[Dict[str, float]]:
|
||||
def snapshot_storage(self, storage: str, path: Path,
|
||||
last_snapshot_time: float = None, max_depth: int = 5) -> Optional[Dict[str, Dict]]:
|
||||
"""
|
||||
快照存储
|
||||
:param storage: 存储类型
|
||||
:param path: 路径
|
||||
:param last_snapshot_time: 上次快照时间,用于增量快照
|
||||
:param max_depth: 最大递归深度,避免过深遍历
|
||||
"""
|
||||
if storage not in self._support_storages:
|
||||
return None
|
||||
@@ -354,7 +359,7 @@ class FileManagerModule(_ModuleBase):
|
||||
if not storage_oper:
|
||||
logger.error(f"不支持 {storage} 的快照处理")
|
||||
return None
|
||||
return storage_oper.snapshot(path)
|
||||
return storage_oper.snapshot(path, last_snapshot_time=last_snapshot_time, max_depth=max_depth)
|
||||
|
||||
def storage_usage(self, storage: str) -> Optional[StorageUsage]:
|
||||
"""
|
||||
|
||||
@@ -4,6 +4,7 @@ from typing import Optional, List, Dict, Tuple
|
||||
|
||||
from app import schemas
|
||||
from app.helper.storage import StorageHelper
|
||||
from app.log import logger
|
||||
|
||||
|
||||
class StorageBase(metaclass=ABCMeta):
|
||||
@@ -135,7 +136,8 @@ class StorageBase(metaclass=ABCMeta):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def upload(self, fileitem: schemas.FileItem, path: Path, new_name: Optional[str] = None) -> Optional[schemas.FileItem]:
|
||||
def upload(self, fileitem: schemas.FileItem, path: Path,
|
||||
new_name: Optional[str] = None) -> Optional[schemas.FileItem]:
|
||||
"""
|
||||
上传文件
|
||||
:param fileitem: 上传目录项
|
||||
@@ -192,21 +194,44 @@ class StorageBase(metaclass=ABCMeta):
|
||||
"""
|
||||
pass
|
||||
|
||||
def snapshot(self, path: Path) -> Dict[str, float]:
|
||||
def snapshot(self, path: Path, last_snapshot_time: float = None, max_depth: int = 5) -> Dict[str, Dict]:
|
||||
"""
|
||||
快照文件系统,输出所有层级文件信息(不含目录)
|
||||
:param path: 路径
|
||||
:param last_snapshot_time: 上次快照时间,用于增量快照
|
||||
:param max_depth: 最大递归深度,避免过深遍历
|
||||
"""
|
||||
files_info = {}
|
||||
|
||||
def __snapshot_file(_fileitm: schemas.FileItem):
|
||||
def __snapshot_file(_fileitm: schemas.FileItem, current_depth: int = 0):
|
||||
"""
|
||||
递归获取文件信息
|
||||
"""
|
||||
if _fileitm.type == "dir":
|
||||
for sub_file in self.list(_fileitm):
|
||||
__snapshot_file(sub_file)
|
||||
else:
|
||||
files_info[_fileitm.path] = _fileitm.size
|
||||
try:
|
||||
if _fileitm.type == "dir":
|
||||
# 检查递归深度限制
|
||||
if current_depth >= max_depth:
|
||||
return
|
||||
|
||||
# 增量检查:如果目录修改时间早于上次快照,跳过
|
||||
if (last_snapshot_time and
|
||||
_fileitm.modify_time and
|
||||
_fileitm.modify_time <= last_snapshot_time):
|
||||
return
|
||||
|
||||
# 遍历子文件
|
||||
sub_files = self.list(_fileitm)
|
||||
for sub_file in sub_files:
|
||||
__snapshot_file(sub_file, current_depth + 1)
|
||||
else:
|
||||
# 记录文件的完整信息用于比对
|
||||
files_info[_fileitm.path] = {
|
||||
'size': _fileitm.size or 0,
|
||||
'modify_time': getattr(_fileitm, 'modify_time', 0),
|
||||
'type': _fileitm.type
|
||||
}
|
||||
except Exception as e:
|
||||
logger.debug(f"Snapshot error for {_fileitm.path}: {e}")
|
||||
|
||||
fileitem = self.get_item(path)
|
||||
if not fileitem:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import json
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Optional, List, Dict
|
||||
from typing import Optional, List
|
||||
|
||||
import requests
|
||||
|
||||
@@ -710,30 +710,6 @@ class Alist(StorageBase, metaclass=Singleton):
|
||||
"""
|
||||
pass
|
||||
|
||||
def snapshot(self, path: Path) -> Dict[str, float]:
|
||||
"""
|
||||
快照文件系统,输出所有层级文件信息(不含目录)
|
||||
"""
|
||||
files_info = {}
|
||||
|
||||
def __snapshot_file(_fileitm: schemas.FileItem):
|
||||
"""
|
||||
递归获取文件信息
|
||||
"""
|
||||
if _fileitm.type == "dir":
|
||||
for sub_file in self.list(_fileitm):
|
||||
__snapshot_file(sub_file)
|
||||
else:
|
||||
files_info[_fileitm.path] = _fileitm.size
|
||||
|
||||
fileitem = self.get_item(path)
|
||||
if not fileitem:
|
||||
return {}
|
||||
|
||||
__snapshot_file(fileitem)
|
||||
|
||||
return files_info
|
||||
|
||||
@staticmethod
|
||||
def __parse_timestamp(time_str: str) -> float:
|
||||
"""
|
||||
|
||||
@@ -54,7 +54,7 @@ class RuleParser:
|
||||
if __name__ == '__main__':
|
||||
# 测试代码
|
||||
expression_str = """
|
||||
SPECSUB & CNVOI & 4K & !BLU & !REMUX & !WEBDL & 60FPS & !DOLBY & !SDR & !3D > CNSUB & CNVOI & 4K & !BLU & !REMUX & !WEBDL & 60FPS & !DOLBY & !SDR & !3D > SPECSUB & 4K & !BLU & !REMUX & !WEBDL & 60FPS & !DOLBY & !SDR & !3D > CNSUB & 4K & !BLU & !REMUX & !WEBDL & 60FPS & !DOLBY & !SDR & !3D > SPECSUB & CNVOI & 4K & !BLU & !REMUX & !WEBDL & !DOLBY & HDR & !3D > CNSUB & CNVOI & 4K & !BLU & !REMUX & !WEBDL & !DOLBY & HDR & !3D > SPECSUB & CNVOI & 4K & !BLU & !REMUX & !WEBDL & !DOLBY & !3D > CNSUB & CNVOI & 4K & !BLU & !REMUX & !WEBDL & !DOLBY & !3D > SPECSUB & 4K & !BLU & !REMUX & !WEBDL & !DOLBY & HDR & !3D > CNSUB & 4K & !BLU & !REMUX & !WEBDL & !DOLBY & HDR & !3D > SPECSUB & 4K & !BLU & !REMUX & !WEBDL & !DOLBY & !3D > CNSUB & 4K & !BLU & !REMUX & !WEBDL & !DOLBY & !3D > SPECSUB & CNVOI & 4K & WEBDL & 60FPS & !DOLBY & !SDR & !3D > CNSUB & CNVOI & 4K & WEBDL & 60FPS & !DOLBY & !SDR & !3D > SPECSUB & 4K & WEBDL & 60FPS & !DOLBY & !SDR & !3D > CNSUB & 4K & WEBDL & 60FPS & !DOLBY & !SDR & !3D > SPECSUB & CNVOI & 4K & WEBDL & !DOLBY & HDR & !3D > CNSUB & CNVOI & 4K & WEBDL & !DOLBY & HDR & !3D > SPECSUB & CNVOI & 4K & WEBDL & !DOLBY & !3D > CNSUB & CNVOI & 4K & WEBDL & !DOLBY & !3D > SPECSUB & 4K & WEBDL & !DOLBY & HDR & !3D > CNSUB & 4K & WEBDL & !DOLBY & HDR & !3D > SPECSUB & 4K & WEBDL & !DOLBY & !3D > CNSUB & 4K & WEBDL & !DOLBY & !3D > SPECSUB & CNVOI & 4K & !BLU & !WEBDL & !DOLBY & HDR & !3D > CNSUB & CNVOI & 4K & !BLU & !WEBDL & !DOLBY & HDR & !3D > SPECSUB & CNVOI & 4K & !BLU & !WEBDL & !DOLBY & !3D > CNSUB & CNVOI & 4K & !BLU & !WEBDL & !DOLBY & !3D > SPECSUB & 4K & !BLU & !WEBDL & !DOLBY & HDR & !3D > CNSUB & 4K & !BLU & !WEBDL & !DOLBY & HDR & !3D > SPECSUB & 4K & !BLU & !WEBDL & !DOLBY & !SDR & !3D > CNSUB & 4K & !BLU & !WEBDL & !DOLBY & !SDR & !3D > 4K & !BLU & !REMUX & !DOLBY & HDR & !3D > 4K & !BLURAY & !REMUX & !DOLBY & !3D > SPECSUB & 1080P & !BLU & !REMUX & !WEBDL & !DOLBY & HDR & !3D > CNSUB & 1080P & !BLU & !REMUX & !WEBDL & !DOLBY & HDR & !3D > SPECSUB & 1080P & !BLU & !REMUX & !WEBDL & !DOLBY & !3D > CNSUB & 1080P & !BLU & !REMUX & !WEBDL & !DOLBY & !3D > SPECSUB & 1080P & !BLU & !WEBDL & !DOLBY & HDR & !3D > CNSUB & 1080P & !BLU & !WEBDL & !DOLBY & HDR & !3D > SPECSUB & 1080P & !BLU & !WEBDL & !DOLBY & !3D > CNSUB & 1080P & !BLU & !WEBDL & !DOLBY & !3D > SPECSUB & 1080P & WEBDL & !DOLBY & HDR & !3D > CNSUB & 1080P & WEBDL & !DOLBY & HDR & !3D > SPECSUB & 1080P & WEBDL & !DOLBY & !3D > CNSUB & 1080P & WEBDL & !DOLBY & !3D > 1080P & !BLU & !REMUX & !DOLBY & HDR & !3D > 1080P & !BLU & !REMUX & !DOLBY & !3D
|
||||
SPECSUB & CNVOI & 4K & !BLU & !REMUX & !WEBDL & 60FPS & !DOLBY & !SDR & !3D > CNSUB & CNVOI & 4K & !BLU & !REMUX & !WEBDL & 60FPS & !DOLBY & !SDR & !3D > SPECSUB & 4K & !BLU & !REMUX & !WEBDL & 60FPS & !DOLBY & !SDR & !3D > CNSUB & 4K & !BLU & !REMUX & !WEBDL & 60FPS & !DOLBY & !SDR & !3D > SPECSUB & CNVOI & 4K & !BLU & !REMUX & !WEBDL & !DOLBY & HDR & !3D > CNSUB & CNVOI & 4K & !BLU & !REMUX & !WEBDL & !DOLBY & HDR & !3D > SPECSUB & CNVOI & 4K & !BLU & !REMUX & !WEBDL & !DOLBY & !3D > CNSUB & CNVOI & 4K & !BLU & !REMUX & !WEBDL & !DOLBY & !3D > SPECSUB & 4K & !BLU & !REMUX & !WEBDL & !DOLBY & HDR & !3D > CNSUB & 4K & !BLU & !REMUX & !WEBDL & !DOLBY & HDR & !3D > SPECSUB & 4K & !BLU & !REMUX & !WEBDL & !DOLBY & !3D > CNSUB & 4K & !BLU & !REMUX & !WEBDL & !DOLBY & !3D > SPECSUB & CNVOI & 4K & WEBDL & 60FPS & !DOLBY & !SDR & !3D > CNSUB & CNVOI & 4K & WEBDL & 60FPS & !DOLBY & !SDR & !3D > SPECSUB & 4K & WEBDL & 60FPS & !DOLBY & !SDR & !3D > CNSUB & 4K & WEBDL & 60FPS & !DOLBY & !SDR & !3D > SPECSUB & CNVOI & 4K & WEBDL & !DOLBY & HDR & !3D > CNSUB & CNVOI & 4K & WEBDL & !DOLBY & HDR & !3D > SPECSUB & CNVOI & 4K & WEBDL & !DOLBY & !3D > CNSUB & CNVOI & 4K & WEBDL & !DOLBY & !3D > SPECSUB & 4K & WEBDL & !DOLBY & HDR & !3D > CNSUB & 4K & WEBDL & !DOLBY & HDR & !3D > SPECSUB & 4K & WEBDL & !DOLBY & !3D > CNSUB & 4K & WEBDL & !DOLBY & !3D > SPECSUB & CNVOI & 4K & !BLU & !WEBDL & !DOLBY & HDR & !3D > CNSUB & CNVOI & 4K & !BLU & !WEBDL & !DOLBY & HDR & !3D > SPECSUB & CNVOI & 4K & !BLU & !WEBDL & !DOLBY & !3D > CNSUB & CNVOI & 4K & !BLU & !WEBDL & !DOLBY & !3D > SPECSUB & 4K & !BLU & !WEBDL & !DOLBY & HDR & !3D > CNSUB & 4K & !BLU & !WEBDL & !DOLBY & HDR & !3D > SPECSUB & 4K & !BLU & !WEBDL & !DOLBY & !SDR & !3D > CNSUB & 4K & !BLU & !WEBDL & !DOLBY & !SDR & !3D > 4K & !BLU & !REMUX & !DOLBY & HDR & !3D > 4K & !BLURAY & !REMUX & !DOLBY & !3D > SPECSUB & 1080P & !BLU & !REMUX & !WEBDL & !DOLBY & HDR & !3D > CNSUB & 1080P & !BLU & !REMUX & !WEBDL & !DOLBY & HDR & !3D > SPECSUB & 1080P & !BLU & !REMUX & !WEBDL & !DOLBY & !3D > CNSUB & 1080P & !BLU & !REMUX & !WEBDL & !DOLBY & !3D > SPECSUB & 1080P & !BLU & !WEBDL & !DOLBY & HDR & !3D > CNSUB & 1080P & !BLU & !WEBDL & !DOLBY & HDR & !3D > SPECSUB & 1080P & !BLU & !WEBDL & !DOLBY & !3D > CNSUB & 1080P & !BLU & !WEBDL & !DOLBY & !3D > SPECSUB & 1080P & WEBDL & !DOLBY & HDR & !3D > CNSUB & 1080P & WEBDL & !DOLBY & HDR & !3D > SPECSUB & 1080P & WEBDL & !DOLBY & !3D > CNSUB & 1080P & WEBDL & !DOLBY & !3D > 1080P & !BLU & !REMUX & !DOLBY & HDR & !3D > 1080P & !BLU & !REMUX & !DOLBY & !3D
|
||||
"""
|
||||
for exp in expression_str.split('>'):
|
||||
parsed_expr = RuleParser().parse(exp.strip())
|
||||
|
||||
@@ -302,11 +302,11 @@ class IndexerModule(_ModuleBase):
|
||||
bonus=site_obj.bonus,
|
||||
seeding=site_obj.seeding,
|
||||
seeding_size=site_obj.seeding_size,
|
||||
seeding_info=site_obj.seeding_info or [],
|
||||
seeding_info=site_obj.seeding_info.copy() if site_obj.seeding_info else [],
|
||||
leeching=site_obj.leeching,
|
||||
leeching_size=site_obj.leeching_size,
|
||||
message_unread=site_obj.message_unread,
|
||||
message_unread_contents=site_obj.message_unread_contents or [],
|
||||
message_unread_contents=site_obj.message_unread_contents.copy() if site_obj.message_unread_contents else [],
|
||||
updated_day=datetime.now().strftime('%Y-%m-%d'),
|
||||
err_msg=site_obj.err_msg
|
||||
)
|
||||
|
||||
@@ -788,7 +788,7 @@ class Plex:
|
||||
|
||||
# 合并排序
|
||||
for hub in hubs:
|
||||
for item in hub.items:
|
||||
for item in hub.items():
|
||||
sub_result.append(item)
|
||||
sub_result.sort(key=lambda x: x.addedAt, reverse=True)
|
||||
|
||||
|
||||
@@ -122,7 +122,7 @@ class TelegramModule(_ModuleBase, _MessageBase[Telegram]):
|
||||
'text': ''
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
按钮回调格式:
|
||||
{
|
||||
'callback_query': {
|
||||
|
||||
@@ -59,7 +59,7 @@ class AsObj:
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
return setattr(self, key, value)
|
||||
|
||||
|
||||
def __str__(self):
|
||||
return str(self._obj_list) if self._list_only else str(self._dict())
|
||||
|
||||
@@ -91,10 +91,10 @@ class AsObj:
|
||||
|
||||
def pop(self, key, value=None):
|
||||
return self.__dict__.pop(key, value)
|
||||
|
||||
|
||||
def popitem(self):
|
||||
return self.__dict__.popitem()
|
||||
|
||||
|
||||
def setdefault(self, key, value=None):
|
||||
return self.__dict__.setdefault(key, value)
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ from ..tmdb import TMDb
|
||||
class Collection(TMDb):
|
||||
_urls = {
|
||||
"details": "/collection/%s",
|
||||
"images": "/collection/%s/images",
|
||||
"images": "/collection/%s/images",
|
||||
"translations": "/collection/%s/translations"
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ from ..tmdb import TMDb
|
||||
|
||||
class Company(TMDb):
|
||||
_urls = {
|
||||
"details": "/company/%s",
|
||||
"details": "/company/%s",
|
||||
"alternative_names": "/company/%s/alternative_names",
|
||||
"images": "/company/%s/images",
|
||||
"movies": "/company/%s/movies"
|
||||
|
||||
@@ -101,11 +101,11 @@ class Movie(TMDb):
|
||||
:return:
|
||||
"""
|
||||
return self._request_obj(self._urls["external_ids"] % movie_id)
|
||||
|
||||
|
||||
def images(self, movie_id, include_image_language=None):
|
||||
"""
|
||||
Get the images that belong to a movie.
|
||||
Querying images with a language parameter will filter the results.
|
||||
Querying images with a language parameter will filter the results.
|
||||
If you want to include a fallback language (especially useful for backdrops)
|
||||
you can use the include_image_language parameter.
|
||||
This should be a comma separated value like so: include_image_language=en,null.
|
||||
|
||||
@@ -55,7 +55,7 @@ class Search(TMDb):
|
||||
params="query=%s&page=%s" % (quote(term), page),
|
||||
key="results"
|
||||
)
|
||||
|
||||
|
||||
def movies(self, term, adult=None, region=None, year=None, release_year=None, page=1):
|
||||
"""
|
||||
Search for movies.
|
||||
|
||||
@@ -19,7 +19,7 @@ class Transmission:
|
||||
"peersGettingFromUs", "peersSendingToUs", "uploadRatio", "uploadedEver", "downloadedEver", "downloadDir",
|
||||
"error", "errorString", "doneDate", "queuePosition", "activityDate", "trackers"]
|
||||
|
||||
def __init__(self, host: Optional[str] = None, port: Optional[int] = None,
|
||||
def __init__(self, host: Optional[str] = None, port: Optional[int] = None,
|
||||
username: Optional[str] = None, password: Optional[str] = None, **kwargs):
|
||||
"""
|
||||
若不设置参数,则创建配置文件设置的下载器
|
||||
|
||||
@@ -128,7 +128,7 @@ class WechatModule(_ModuleBase, _MessageBase[WeChat]):
|
||||
1、消息格式:
|
||||
<xml>
|
||||
<ToUserName><![CDATA[toUser]]></ToUserName>
|
||||
<FromUserName><![CDATA[fromUser]]></FromUserName>
|
||||
<FromUserName><![CDATA[fromUser]]></FromUserName>
|
||||
<CreateTime>1348831860</CreateTime>
|
||||
<MsgType><![CDATA[text]]></MsgType>
|
||||
<Content><![CDATA[this is a test]]></Content>
|
||||
@@ -143,7 +143,7 @@ class WechatModule(_ModuleBase, _MessageBase[WeChat]):
|
||||
<MsgType><![CDATA[event]]></MsgType>
|
||||
<Event><![CDATA[subscribe]]></Event>
|
||||
<AgentID>1</AgentID>
|
||||
</xml>
|
||||
</xml>
|
||||
"""
|
||||
dom_tree = xml.dom.minidom.parseString(sMsg.decode('UTF-8'))
|
||||
root_node = dom_tree.documentElement
|
||||
|
||||
516
app/monitor.py
516
app/monitor.py
@@ -1,10 +1,13 @@
|
||||
import json
|
||||
import platform
|
||||
import re
|
||||
import subprocess
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
from threading import Lock
|
||||
from typing import Any, Optional
|
||||
from typing import Any, Optional, Dict, List
|
||||
|
||||
from apscheduler.schedulers.background import BackgroundScheduler
|
||||
from cachetools import TTLCache
|
||||
@@ -65,8 +68,8 @@ class Monitor(metaclass=Singleton):
|
||||
# 定时服务
|
||||
_scheduler = None
|
||||
|
||||
# 存储快照
|
||||
_storage_snapshot = {}
|
||||
# 存储快照缓存目录
|
||||
_snapshot_cache_dir = None
|
||||
|
||||
# 存储过照间隔(分钟)
|
||||
_snapshot_interval = 5
|
||||
@@ -77,6 +80,9 @@ class Monitor(metaclass=Singleton):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.all_exts = settings.RMT_MEDIAEXT
|
||||
# 初始化快照缓存目录
|
||||
self._snapshot_cache_dir = settings.TEMP_PATH / "snapshots"
|
||||
self._snapshot_cache_dir.mkdir(exist_ok=True)
|
||||
# 启动目录监控和文件整理
|
||||
self.init()
|
||||
|
||||
@@ -94,6 +100,249 @@ class Monitor(metaclass=Singleton):
|
||||
logger.info("配置变更事件触发,重新初始化目录监控...")
|
||||
self.init()
|
||||
|
||||
def save_snapshot(self, storage: str, snapshot: Dict, file_count: int = 0):
|
||||
"""
|
||||
保存快照到文件
|
||||
:param storage: 存储名称
|
||||
:param snapshot: 快照数据
|
||||
:param file_count: 文件数量,用于调整监控间隔
|
||||
"""
|
||||
try:
|
||||
cache_file = self._snapshot_cache_dir / f"{storage}_snapshot.json"
|
||||
snapshot_data = {
|
||||
'timestamp': time.time(),
|
||||
'file_count': file_count,
|
||||
'snapshot': snapshot
|
||||
}
|
||||
with open(cache_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(snapshot_data, f, ensure_ascii=False, indent=2) # noqa
|
||||
logger.debug(f"快照已保存到 {cache_file}")
|
||||
except Exception as e:
|
||||
logger.error(f"保存快照失败: {e}")
|
||||
|
||||
def load_snapshot(self, storage: str) -> Optional[Dict]:
|
||||
"""
|
||||
从文件加载快照
|
||||
:param storage: 存储名称
|
||||
:return: 快照数据或None
|
||||
"""
|
||||
try:
|
||||
cache_file = self._snapshot_cache_dir / f"{storage}_snapshot.json"
|
||||
if cache_file.exists():
|
||||
with open(cache_file, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
return data
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"加载快照失败: {e}")
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def adjust_monitor_interval(file_count: int) -> int:
|
||||
"""
|
||||
根据文件数量动态调整监控间隔
|
||||
:param file_count: 文件数量
|
||||
:return: 监控间隔(分钟)
|
||||
"""
|
||||
if file_count < 100:
|
||||
return 5 # 5分钟
|
||||
elif file_count < 500:
|
||||
return 10 # 10分钟
|
||||
elif file_count < 1000:
|
||||
return 15 # 15分钟
|
||||
else:
|
||||
return 30 # 30分钟
|
||||
|
||||
@staticmethod
|
||||
def compare_snapshots(old_snapshot: Dict, new_snapshot: Dict) -> Dict[str, List]:
|
||||
"""
|
||||
比对快照,找出变化的文件(只处理新增和修改,不处理删除)
|
||||
:param old_snapshot: 旧快照
|
||||
:param new_snapshot: 新快照
|
||||
:return: 变化信息
|
||||
"""
|
||||
changes = {
|
||||
'added': [],
|
||||
'modified': []
|
||||
}
|
||||
|
||||
old_files = set(old_snapshot.keys())
|
||||
new_files = set(new_snapshot.keys())
|
||||
|
||||
# 新增文件
|
||||
changes['added'] = list(new_files - old_files)
|
||||
|
||||
# 修改文件(大小或时间变化)
|
||||
for file_path in old_files & new_files:
|
||||
old_info = old_snapshot[file_path]
|
||||
new_info = new_snapshot[file_path]
|
||||
|
||||
# 检查文件大小变化
|
||||
old_size = old_info.get('size', 0) if isinstance(old_info, dict) else old_info
|
||||
new_size = new_info.get('size', 0) if isinstance(new_info, dict) else new_info
|
||||
|
||||
# 检查修改时间变化(如果有的话)
|
||||
old_time = old_info.get('modify_time', 0) if isinstance(old_info, dict) else 0
|
||||
new_time = new_info.get('modify_time', 0) if isinstance(new_info, dict) else 0
|
||||
|
||||
if old_size != new_size or (old_time and new_time and old_time != new_time):
|
||||
changes['modified'].append(file_path)
|
||||
|
||||
return changes
|
||||
|
||||
@staticmethod
|
||||
def count_directory_files(directory: Path, max_check: int = 10000) -> int:
|
||||
"""
|
||||
统计目录下的文件数量(用于检测是否超过系统限制)
|
||||
:param directory: 目录路径
|
||||
:param max_check: 最大检查数量,避免长时间阻塞
|
||||
:return: 文件数量
|
||||
"""
|
||||
try:
|
||||
count = 0
|
||||
import os
|
||||
for root, dirs, files in os.walk(str(directory)):
|
||||
count += len(files)
|
||||
if count > max_check:
|
||||
return count
|
||||
return count
|
||||
except Exception as err:
|
||||
logger.debug(f"统计目录文件数量失败: {err}")
|
||||
return 0
|
||||
|
||||
@staticmethod
|
||||
def check_system_limits() -> Dict[str, Any]:
|
||||
"""
|
||||
检查系统限制
|
||||
:return: 系统限制信息
|
||||
"""
|
||||
limits = {
|
||||
'max_user_watches': 0,
|
||||
'max_user_instances': 0,
|
||||
'current_watches': 0,
|
||||
'warnings': []
|
||||
}
|
||||
|
||||
try:
|
||||
system = platform.system()
|
||||
if system == 'Linux':
|
||||
# 检查 inotify 限制
|
||||
try:
|
||||
with open('/proc/sys/fs/inotify/max_user_watches', 'r') as f:
|
||||
limits['max_user_watches'] = int(f.read().strip())
|
||||
except Exception as e:
|
||||
logger.debug(f"读取 inotify 限制失败: {e}")
|
||||
limits['max_user_watches'] = 8192 # 默认值
|
||||
|
||||
try:
|
||||
with open('/proc/sys/fs/inotify/max_user_instances', 'r') as f:
|
||||
limits['max_user_instances'] = int(f.read().strip())
|
||||
except Exception as e:
|
||||
logger.debug(f"读取 inotify 实例限制失败: {e}")
|
||||
|
||||
# 检查当前使用的watches
|
||||
try:
|
||||
import subprocess
|
||||
result = subprocess.run(['find', '/proc/*/fd', '-lname', 'anon_inode:inotify', '-printf', '%h\n'],
|
||||
capture_output=True, text=True, timeout=5)
|
||||
if result.returncode == 0:
|
||||
limits['current_watches'] = len(result.stdout.strip().split('\n'))
|
||||
except Exception as e:
|
||||
logger.debug(f"检查当前 inotify 使用失败: {e}")
|
||||
|
||||
except Exception as e:
|
||||
limits['warnings'].append(f"检查系统限制时出错: {e}")
|
||||
|
||||
return limits
|
||||
|
||||
@staticmethod
|
||||
def get_system_optimization_tips() -> List[str]:
|
||||
"""
|
||||
获取系统优化建议
|
||||
:return: 优化建议列表
|
||||
"""
|
||||
tips = []
|
||||
system = platform.system()
|
||||
|
||||
if system == 'Linux':
|
||||
tips.extend([
|
||||
"增加 inotify 监控数量限制:",
|
||||
"echo fs.inotify.max_user_watches=524288 | sudo tee -a /etc/sysctl.conf",
|
||||
"echo fs.inotify.max_user_instances=524288 | sudo tee -a /etc/sysctl.conf",
|
||||
"sudo sysctl -p",
|
||||
"",
|
||||
"如果在Docker中运行,请在宿主机上执行以上命令"
|
||||
])
|
||||
elif system == 'Darwin':
|
||||
tips.extend([
|
||||
"macOS 系统优化建议:",
|
||||
"sudo sysctl kern.maxfiles=65536",
|
||||
"sudo sysctl kern.maxfilesperproc=32768",
|
||||
"ulimit -n 32768"
|
||||
])
|
||||
elif system == 'Windows':
|
||||
tips.extend([
|
||||
"Windows 系统优化建议:",
|
||||
"1. 关闭不必要的实时保护软件对监控目录的扫描",
|
||||
"2. 将监控目录添加到Windows Defender排除列表",
|
||||
"3. 确保有足够的可用内存"
|
||||
])
|
||||
|
||||
return tips
|
||||
|
||||
def should_use_polling(self, directory: Path, monitor_mode: str,
|
||||
file_count: int, limits: dict) -> tuple[bool, str]:
|
||||
"""
|
||||
判断是否应该使用轮询模式
|
||||
:param directory: 监控目录
|
||||
:param monitor_mode: 配置的监控模式
|
||||
:param file_count: 目录文件数量
|
||||
:param limits: 系统限制信息
|
||||
:return: (是否使用轮询, 原因)
|
||||
"""
|
||||
if monitor_mode == "compatible":
|
||||
return True, "用户配置为兼容模式"
|
||||
|
||||
# 检查网络文件系统
|
||||
if self.is_network_filesystem(directory):
|
||||
return True, "检测到网络文件系统,建议使用兼容模式"
|
||||
|
||||
max_watches = limits.get('max_user_watches')
|
||||
if max_watches and file_count > max_watches * 0.8:
|
||||
return True, f"目录文件数量({file_count})接近系统限制({max_watches})"
|
||||
return False, "使用快速模式"
|
||||
|
||||
@staticmethod
|
||||
def is_network_filesystem(directory: Path) -> bool:
|
||||
"""
|
||||
检测是否为网络文件系统
|
||||
:param directory: 目录路径
|
||||
:return: 是否为网络文件系统
|
||||
"""
|
||||
try:
|
||||
system = platform.system()
|
||||
if system == 'Linux':
|
||||
# 检查挂载信息
|
||||
result = subprocess.run(['df', '-T', str(directory)],
|
||||
capture_output=True, text=True, timeout=5)
|
||||
if result.returncode == 0:
|
||||
output = result.stdout.lower()
|
||||
network_fs = ['nfs', 'cifs', 'smbfs', 'fuse', 'sshfs', 'ftpfs']
|
||||
return any(fs in output for fs in network_fs)
|
||||
elif system == 'Darwin':
|
||||
# macOS 检查
|
||||
result = subprocess.run(['df', '-T', str(directory)],
|
||||
capture_output=True, text=True, timeout=5)
|
||||
if result.returncode == 0:
|
||||
output = result.stdout.lower()
|
||||
return 'nfs' in output or 'smbfs' in output
|
||||
elif system == 'Windows':
|
||||
# Windows 检查网络驱动器
|
||||
return str(directory).startswith('\\\\')
|
||||
except Exception as e:
|
||||
logger.debug(f"检测网络文件系统时出错: {e}")
|
||||
return False
|
||||
|
||||
def init(self):
|
||||
"""
|
||||
启动监控
|
||||
@@ -104,10 +353,12 @@ class Monitor(metaclass=Singleton):
|
||||
# 读取目录配置
|
||||
monitor_dirs = DirectoryHelper().get_download_dirs()
|
||||
if not monitor_dirs:
|
||||
logger.info("未找到任何目录监控配置")
|
||||
return
|
||||
|
||||
# 按下载目录去重
|
||||
monitor_dirs = list({f"{d.storage}_{d.download_path}": d for d in monitor_dirs}.values())
|
||||
logger.info(f"找到 {len(monitor_dirs)} 个目录监控配置")
|
||||
|
||||
# 启动定时服务进程
|
||||
self._scheduler = BackgroundScheduler(timezone=settings.TZ)
|
||||
@@ -115,9 +366,12 @@ class Monitor(metaclass=Singleton):
|
||||
messagehelper = MessageHelper()
|
||||
for mon_dir in monitor_dirs:
|
||||
if not mon_dir.library_path:
|
||||
logger.warn(f"跳过监控配置 {mon_dir.download_path}:未设置媒体库目录")
|
||||
continue
|
||||
if mon_dir.monitor_type != "monitor":
|
||||
logger.debug(f"跳过监控配置 {mon_dir.download_path}:监控类型为 {mon_dir.monitor_type}")
|
||||
continue
|
||||
|
||||
# 检查媒体库目录是不是下载目录的子目录
|
||||
mon_path = Path(mon_dir.download_path)
|
||||
target_path = Path(mon_dir.library_path)
|
||||
@@ -129,83 +383,235 @@ class Monitor(metaclass=Singleton):
|
||||
# 启动监控
|
||||
if mon_dir.storage == "local":
|
||||
# 本地目录监控
|
||||
logger.info(f"正在启动本地目录监控: {mon_path}")
|
||||
logger.info("*** 重要提示:目录监控只处理新增和修改的文件,不会处理监控启动前已存在的文件 ***")
|
||||
|
||||
try:
|
||||
if mon_dir.monitor_mode == "fast":
|
||||
observer = self.__choose_observer()
|
||||
else:
|
||||
# 统计文件数量并给出提示
|
||||
file_count = self.count_directory_files(mon_path)
|
||||
logger.info(f"监控目录 {mon_path} 包含约 {file_count} 个文件")
|
||||
|
||||
# 检查系统限制
|
||||
limits = self.check_system_limits()
|
||||
|
||||
# 检查是否需要使用轮询模式
|
||||
use_polling, reason = self.should_use_polling(mon_path,
|
||||
monitor_mode=mon_dir.monitor_mode,
|
||||
file_count=file_count,
|
||||
limits=limits)
|
||||
logger.info(f"监控模式决策: {reason}")
|
||||
|
||||
if use_polling:
|
||||
observer = PollingObserver()
|
||||
logger.info(f"使用兼容模式(轮询)监控 {mon_path}")
|
||||
else:
|
||||
observer = self.__choose_observer()
|
||||
if observer is None:
|
||||
logger.warn(f"快速模式不可用,自动切换到兼容模式监控 {mon_path}")
|
||||
observer = PollingObserver()
|
||||
else:
|
||||
logger.info(f"使用快速模式监控 {mon_path}")
|
||||
if limits['warnings']:
|
||||
for warning in limits['warnings']:
|
||||
logger.warn(f"系统限制警告: {warning}")
|
||||
if limits['max_user_watches'] > 0:
|
||||
usage_percent = (file_count / limits['max_user_watches']) * 100
|
||||
logger.info(
|
||||
f"系统监控资源使用率: {usage_percent:.1f}% ({file_count}/{limits['max_user_watches']})")
|
||||
|
||||
self._observers.append(observer)
|
||||
observer.schedule(FileMonitorHandler(mon_path=mon_path, callback=self),
|
||||
path=str(mon_path),
|
||||
recursive=True)
|
||||
observer.daemon = True
|
||||
observer.start()
|
||||
logger.info(f"已启动 {mon_path} 的目录监控服务, 监控模式:{mon_dir.monitor_mode}")
|
||||
|
||||
mode_name = "兼容模式(轮询)" if use_polling else "快速模式"
|
||||
logger.info(f"✓ 本地目录监控已启动: {mon_path} [{mode_name}]")
|
||||
|
||||
except Exception as e:
|
||||
err_msg = str(e)
|
||||
if "inotify" in err_msg and "reached" in err_msg:
|
||||
logger.warn(
|
||||
f"目录监控服务启动出现异常:{err_msg},请在宿主机上(不是docker容器内)执行以下命令并重启:"
|
||||
+ """
|
||||
echo fs.inotify.max_user_watches=524288 | sudo tee -a /etc/sysctl.conf
|
||||
echo fs.inotify.max_user_instances=524288 | sudo tee -a /etc/sysctl.conf
|
||||
sudo sysctl -p
|
||||
""")
|
||||
logger.error(f"启动本地目录监控失败: {mon_path}")
|
||||
logger.error(f"错误详情: {err_msg}")
|
||||
|
||||
if "inotify" in err_msg.lower():
|
||||
logger.error("inotify 相关错误,这通常是由于系统监控数量限制导致的")
|
||||
logger.error("解决方案:")
|
||||
tips = self.get_system_optimization_tips()
|
||||
for tip in tips:
|
||||
logger.error(f" {tip}")
|
||||
logger.error("执行上述命令后重启 MoviePilot")
|
||||
elif "permission" in err_msg.lower():
|
||||
logger.error("权限错误,请检查 MoviePilot 是否有足够的权限访问监控目录")
|
||||
else:
|
||||
logger.error(f"{mon_path} 启动目录监控失败:{err_msg}")
|
||||
messagehelper.put(f"{mon_path} 启动目录监控失败:{err_msg}", title="目录监控")
|
||||
logger.error("建议尝试使用兼容模式进行监控")
|
||||
|
||||
messagehelper.put(f"启动本地目录监控失败: {mon_path}\n错误: {err_msg}", title="目录监控")
|
||||
else:
|
||||
# 远程目录监控
|
||||
self._scheduler.add_job(self.polling_observer, 'interval', minutes=self._snapshot_interval,
|
||||
kwargs={
|
||||
'storage': mon_dir.storage,
|
||||
'mon_path': mon_path
|
||||
})
|
||||
# 远程目录监控 - 使用智能间隔
|
||||
# 先尝试加载已有快照获取文件数量
|
||||
snapshot_data = self.load_snapshot(mon_dir.storage)
|
||||
file_count = snapshot_data.get('file_count', 0) if snapshot_data else 0
|
||||
interval = self.adjust_monitor_interval(file_count)
|
||||
|
||||
logger.info(f"正在启动远程目录监控: {mon_path} [{mon_dir.storage}]")
|
||||
logger.info("*** 重要提示:远程目录监控只处理新增和修改的文件,不会处理监控启动前已存在的文件 ***")
|
||||
logger.info(f"预估文件数量: {file_count}, 监控间隔: {interval}分钟")
|
||||
|
||||
self._scheduler.add_job(
|
||||
self.polling_observer,
|
||||
'interval',
|
||||
minutes=interval,
|
||||
kwargs={
|
||||
'storage': mon_dir.storage,
|
||||
'mon_path': mon_path
|
||||
},
|
||||
id=f"monitor_{mon_dir.storage}_{mon_dir.download_path}",
|
||||
replace_existing=True
|
||||
)
|
||||
logger.info(f"✓ 远程目录监控已启动: {mon_path} [间隔: {interval}分钟]")
|
||||
|
||||
# 启动定时服务
|
||||
if self._scheduler.get_jobs():
|
||||
self._scheduler.print_jobs()
|
||||
self._scheduler.start()
|
||||
logger.info("定时监控服务已启动")
|
||||
|
||||
@staticmethod
|
||||
def __choose_observer() -> Any:
|
||||
# 输出监控总结
|
||||
local_count = len([d for d in monitor_dirs if d.storage == "local" and d.monitor_type == "monitor"])
|
||||
remote_count = len([d for d in monitor_dirs if d.storage != "local" and d.monitor_type == "monitor"])
|
||||
logger.info(f"目录监控启动完成: 本地监控 {local_count} 个,远程监控 {remote_count} 个")
|
||||
|
||||
def __choose_observer(self) -> Optional[Any]:
|
||||
"""
|
||||
选择最优的监控模式
|
||||
选择最优的监控模式(带错误处理和自动回退)
|
||||
"""
|
||||
system = platform.system()
|
||||
|
||||
observers_to_try = []
|
||||
|
||||
try:
|
||||
if system == 'Linux':
|
||||
from watchdog.observers.inotify import InotifyObserver
|
||||
return InotifyObserver()
|
||||
observers_to_try = [
|
||||
('InotifyObserver',
|
||||
lambda: self.__try_import_observer('watchdog.observers.inotify', 'InotifyObserver')),
|
||||
]
|
||||
elif system == 'Darwin':
|
||||
from watchdog.observers.fsevents import FSEventsObserver
|
||||
return FSEventsObserver()
|
||||
observers_to_try = [
|
||||
('FSEventsObserver',
|
||||
lambda: self.__try_import_observer('watchdog.observers.fsevents', 'FSEventsObserver')),
|
||||
]
|
||||
elif system == 'Windows':
|
||||
from watchdog.observers.read_directory_changes import WindowsApiObserver
|
||||
return WindowsApiObserver()
|
||||
except Exception as error:
|
||||
logger.warn(f"导入模块错误:{error},将使用 PollingObserver 监控目录")
|
||||
return PollingObserver()
|
||||
observers_to_try = [
|
||||
('WindowsApiObserver',
|
||||
lambda: self.__try_import_observer('watchdog.observers.read_directory_changes',
|
||||
'WindowsApiObserver')),
|
||||
]
|
||||
|
||||
# 尝试每个观察者
|
||||
for observer_name, observer_func in observers_to_try:
|
||||
try:
|
||||
observer_class = observer_func()
|
||||
if observer_class:
|
||||
# 尝试创建实例以验证是否可用
|
||||
test_observer = observer_class()
|
||||
test_observer.stop() # 立即停止测试实例
|
||||
logger.debug(f"成功初始化 {observer_name}")
|
||||
return observer_class()
|
||||
except Exception as e:
|
||||
logger.debug(f"初始化 {observer_name} 失败: {e}")
|
||||
continue
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"选择观察者时出错: {e}")
|
||||
|
||||
logger.debug("所有快速监控模式都不可用,将使用兼容模式")
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def __try_import_observer(module_name: str, class_name: str):
|
||||
"""
|
||||
尝试导入观察者类
|
||||
"""
|
||||
try:
|
||||
module = __import__(module_name, fromlist=[class_name])
|
||||
return getattr(module, class_name)
|
||||
except (ImportError, AttributeError) as e:
|
||||
logger.debug(f"导入 {module_name}.{class_name} 失败: {e}")
|
||||
return None
|
||||
|
||||
def polling_observer(self, storage: str, mon_path: Path):
|
||||
"""
|
||||
轮询监控
|
||||
轮询监控(改进版)
|
||||
"""
|
||||
with snapshot_lock:
|
||||
# 快照存储
|
||||
new_snapshot = StorageChain().snapshot_storage(storage=storage, path=mon_path)
|
||||
if new_snapshot:
|
||||
# 比较快照
|
||||
old_snapshot = self._storage_snapshot.get(storage)
|
||||
try:
|
||||
logger.debug(f"开始对 {storage}:{mon_path} 进行快照...")
|
||||
|
||||
# 加载上次快照数据
|
||||
old_snapshot_data = self.load_snapshot(storage)
|
||||
old_snapshot = old_snapshot_data.get('snapshot', {}) if old_snapshot_data else {}
|
||||
last_snapshot_time = old_snapshot_data.get('timestamp', 0) if old_snapshot_data else 0
|
||||
|
||||
# 生成新快照(增量模式)
|
||||
new_snapshot = StorageChain().snapshot_storage(
|
||||
storage=storage,
|
||||
path=mon_path,
|
||||
last_snapshot_time=last_snapshot_time
|
||||
)
|
||||
|
||||
if new_snapshot is None:
|
||||
logger.warn(f"获取 {storage}:{mon_path} 快照失败")
|
||||
return
|
||||
|
||||
file_count = len(new_snapshot)
|
||||
logger.info(f"{storage}:{mon_path} 快照完成,发现 {file_count} 个文件")
|
||||
|
||||
if old_snapshot:
|
||||
# 新增的文件
|
||||
new_files = new_snapshot.keys() - old_snapshot.keys()
|
||||
for new_file in new_files:
|
||||
# 添加到待整理队列
|
||||
self.__handle_file(storage=storage, event_path=Path(new_file),
|
||||
file_size=new_snapshot.get(new_file))
|
||||
# 更新快照
|
||||
self._storage_snapshot[storage] = new_snapshot
|
||||
# 比较快照找出变化
|
||||
changes = self.compare_snapshots(old_snapshot, new_snapshot)
|
||||
|
||||
# 处理新增文件
|
||||
for new_file in changes['added']:
|
||||
logger.info(f"发现新增文件:{new_file}")
|
||||
file_info = new_snapshot.get(new_file, {})
|
||||
file_size = file_info.get('size', 0) if isinstance(file_info, dict) else file_info
|
||||
self.__handle_file(storage=storage, event_path=Path(new_file), file_size=file_size)
|
||||
|
||||
# 处理修改文件
|
||||
for modified_file in changes['modified']:
|
||||
logger.info(f"发现修改文件:{modified_file}")
|
||||
file_info = new_snapshot.get(modified_file, {})
|
||||
file_size = file_info.get('size', 0) if isinstance(file_info, dict) else file_info
|
||||
self.__handle_file(storage=storage, event_path=Path(modified_file), file_size=file_size)
|
||||
|
||||
if changes['added'] or changes['modified']:
|
||||
logger.info(
|
||||
f"{storage}:{mon_path} 发现 {len(changes['added'])} 个新增文件,{len(changes['modified'])} 个修改文件")
|
||||
else:
|
||||
logger.debug(f"{storage}:{mon_path} 无文件变化")
|
||||
else:
|
||||
logger.info(f"{storage}:{mon_path} 首次快照完成,共 {file_count} 个文件")
|
||||
logger.info("*** 首次快照仅建立基准,不会处理现有文件。后续监控将处理新增和修改的文件 ***")
|
||||
|
||||
# 保存新快照
|
||||
self.save_snapshot(storage, new_snapshot, file_count)
|
||||
|
||||
# 动态调整监控间隔
|
||||
new_interval = self.adjust_monitor_interval(file_count)
|
||||
current_job = self._scheduler.get_job(f"monitor_{storage}_{mon_path}")
|
||||
if current_job and current_job.trigger.interval.total_seconds() / 60 != new_interval:
|
||||
# 重新安排任务
|
||||
self._scheduler.modify_job(
|
||||
f"monitor_{storage}_{mon_path}",
|
||||
trigger='interval',
|
||||
minutes=new_interval
|
||||
)
|
||||
logger.info(f"{storage}:{mon_path} 监控间隔已调整为 {new_interval} 分钟")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"轮询监控 {storage}:{mon_path} 出现错误:{e}")
|
||||
logger.debug(traceback.format_exc())
|
||||
|
||||
def event_handler(self, event, text: str, event_path: str, file_size: float = None):
|
||||
"""
|
||||
@@ -217,7 +623,7 @@ class Monitor(metaclass=Singleton):
|
||||
"""
|
||||
if not event.is_directory:
|
||||
# 文件发生变化
|
||||
logger.debug(f"文件 {event_path} 发生了 {text}")
|
||||
logger.debug(f"检测到文件变化: {event_path} [{text}]")
|
||||
# 整理文件
|
||||
self.__handle_file(storage="local", event_path=Path(event_path), file_size=file_size)
|
||||
|
||||
@@ -254,10 +660,12 @@ class Monitor(metaclass=Singleton):
|
||||
|
||||
# TTL缓存控重
|
||||
if self._cache.get(str(event_path)):
|
||||
logger.debug(f"文件 {event_path} 在缓存中,跳过处理")
|
||||
return
|
||||
self._cache[str(event_path)] = True
|
||||
|
||||
try:
|
||||
logger.info(f"开始整理文件: {event_path}")
|
||||
# 开始整理
|
||||
TransferChain().do_transfer(
|
||||
fileitem=FileItem(
|
||||
@@ -271,7 +679,7 @@ class Monitor(metaclass=Singleton):
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error("目录监控发生错误:%s - %s" % (str(e), traceback.format_exc()))
|
||||
logger.error("目录监控整理文件发生错误:%s - %s" % (str(e), traceback.format_exc()))
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
@@ -279,20 +687,22 @@ class Monitor(metaclass=Singleton):
|
||||
"""
|
||||
self._event.set()
|
||||
if self._observers:
|
||||
logger.info("正在停止本地目录监控服务...")
|
||||
for observer in self._observers:
|
||||
try:
|
||||
logger.info(f"正在停止目录监控服务:{observer}...")
|
||||
observer.stop()
|
||||
observer.join()
|
||||
logger.info(f"{observer} 目录监控已停止")
|
||||
logger.debug(f"已停止监控服务: {observer}")
|
||||
except Exception as e:
|
||||
logger.error(f"停止目录监控服务出现了错误:{e}")
|
||||
self._observers = []
|
||||
logger.info("本地目录监控服务已停止")
|
||||
if self._scheduler:
|
||||
self._scheduler.remove_all_jobs()
|
||||
if self._scheduler.running:
|
||||
try:
|
||||
self._scheduler.shutdown()
|
||||
logger.info("定时监控服务已停止")
|
||||
except Exception as e:
|
||||
logger.error(f"停止定时服务出现了错误:{e}")
|
||||
self._scheduler = None
|
||||
|
||||
@@ -57,6 +57,8 @@ class MetaInfo(BaseModel):
|
||||
audio_encode: Optional[str] = None
|
||||
# 资源类型
|
||||
edition: Optional[str] = None
|
||||
# 流媒体平台
|
||||
web_source: Optional[str] = None
|
||||
# 应用的识别词信息
|
||||
apply_words: Optional[List[str]] = None
|
||||
|
||||
|
||||
@@ -76,6 +76,9 @@ class AutoCloseResponse:
|
||||
"""
|
||||
self._auto_close()
|
||||
|
||||
def __setstate__(self, state):
|
||||
for name, value in state.items():
|
||||
setattr(self, name, value)
|
||||
|
||||
class RequestUtils:
|
||||
|
||||
@@ -523,7 +526,7 @@ class RequestUtils:
|
||||
def get_json(self, url: str, params: dict = None, **kwargs) -> Optional[dict]:
|
||||
"""
|
||||
发送GET请求并返回JSON数据,自动关闭连接
|
||||
:param url: 请求的URL
|
||||
:param url: 请求的URL
|
||||
:param params: 请求的参数
|
||||
:param kwargs: 其他请求参数
|
||||
:return: JSON数据,若发生异常则返回None
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"""2.0.0
|
||||
|
||||
Revision ID: 294b007932ef
|
||||
Revises:
|
||||
Revises:
|
||||
Create Date: 2024-07-20 08:43:40.741251
|
||||
|
||||
"""
|
||||
|
||||
@@ -15,25 +15,25 @@ http {
|
||||
server {
|
||||
listen 38379;
|
||||
server_name localhost;
|
||||
|
||||
|
||||
access_log /dev/stdout combined;
|
||||
error_log /dev/stdout;
|
||||
|
||||
|
||||
location / {
|
||||
proxy_pass http://docker;
|
||||
proxy_redirect off;
|
||||
|
||||
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
|
||||
client_max_body_size 10m;
|
||||
client_body_buffer_size 128k;
|
||||
|
||||
|
||||
proxy_connect_timeout 90;
|
||||
proxy_send_timeout 120;
|
||||
proxy_read_timeout 120;
|
||||
|
||||
|
||||
proxy_buffer_size 4k;
|
||||
proxy_buffers 4 32k;
|
||||
proxy_busy_buffers_size 64k;
|
||||
|
||||
@@ -131,9 +131,9 @@ function load_config_from_app_env() {
|
||||
# (例如 envsubst, mp_update.sh, cert.sh)
|
||||
if declare -gx "${var_name}=${final_value}"; then
|
||||
if [ -z "${final_value}" ]; then
|
||||
INFO "变量 ${var_name}, 值为空, 来源: ${value_source})。"
|
||||
INFO "变量 ${var_name}, 值为空 (来源: ${value_source})。"
|
||||
else
|
||||
INFO "变量 ${var_name}, 值: ${final_value} , (来源: ${value_source})。"
|
||||
INFO "变量 ${var_name}, 值: ${final_value} (来源: ${value_source})。"
|
||||
fi
|
||||
|
||||
# 如果变量不是来自初始环境变量,则记录下来以便稍后 unset
|
||||
@@ -151,7 +151,7 @@ function load_config_from_app_env() {
|
||||
fi
|
||||
fi
|
||||
else
|
||||
ERROR "导出变量 ${var_name} (值: '${final_value}', 来源: ${value_source}) 失败。"
|
||||
ERROR "导出变量 ${var_name}, 值: '${final_value}'失败 (来源: ${value_source}) "
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
@@ -61,7 +61,7 @@ pip install pip-tools
|
||||
```bash
|
||||
pip-compile --upgrade-package requests requirements.in
|
||||
```
|
||||
|
||||
|
||||
3. **全量更新依赖项**:
|
||||
|
||||
如果你想更新 `requirements.in` 中的所有依赖包,运行以下命令生成或更新 `requirements.txt` 文件:
|
||||
|
||||
@@ -46,7 +46,6 @@ psutil~=7.0.0
|
||||
python-dotenv~=1.1.1
|
||||
python-hosts~=1.1.2
|
||||
watchdog~=6.0.0
|
||||
openai~=1.92.2
|
||||
cacheout~=0.16.0
|
||||
click~=8.2.1
|
||||
requests-cache~=1.2.1
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
APP_VERSION = 'v2.5.9'
|
||||
FRONTEND_VERSION = 'v2.5.9'
|
||||
APP_VERSION = 'v2.6.0'
|
||||
FRONTEND_VERSION = 'v2.6.0'
|
||||
|
||||
Reference in New Issue
Block a user