Compare commits

...

13 Commits

Author SHA1 Message Date
jxxghp
b343c601be v2.5.5
- 支持更精细的用户权限控制
- 高级设置中增加了刮削内容设定
2025-06-11 20:27:49 +08:00
jxxghp
e56d7006b4 init users 2025-06-11 20:24:59 +08:00
jxxghp
1b7bcd7784 init users 2025-06-11 19:57:21 +08:00
jxxghp
4cb9025b6c fix season_nfo 2025-06-11 19:48:02 +08:00
jxxghp
f8864ab053 fix reload 2025-06-11 07:11:50 +08:00
jxxghp
64eba46a67 fix 2025-06-11 07:07:55 +08:00
jxxghp
35d9cc1d40 remove jiaba 2025-06-11 00:00:08 +08:00
jxxghp
3036107dac fix user api 2025-06-10 23:42:57 +08:00
jxxghp
214089b4ea Merge pull request #4423 from lonelyman0108/v2 2025-06-10 18:04:13 +08:00
LM
95b7ba28e4 update: 添加fanart环境变量 2025-06-10 17:59:25 +08:00
LM
880272f96e update: 优化fanart获取逻辑,支持设定语言 2025-06-10 17:59:03 +08:00
LM
7ed26fadb6 update: 更新fanart刮削逻辑,优先获取中文、英文内容 2025-06-10 17:25:58 +08:00
jxxghp
f0d25a02a6 feat:支持刮削详细设定 2025-06-10 16:37:15 +08:00
14 changed files with 318 additions and 142 deletions

View File

@@ -1,12 +1,10 @@
from typing import List, Any, Optional
import jieba
from fastapi import APIRouter, Depends
from sqlalchemy.orm import Session
from app import schemas
from app.chain.storage import StorageChain
from app.core.config import settings
from app.core.event import eventmanager
from app.core.security import verify_token
from app.db import get_db
@@ -59,9 +57,6 @@ def transfer_history(title: Optional[str] = None,
status = True
if title:
if settings.TOKENIZED_SEARCH:
words = jieba.cut(title, HMM=False)
title = "%".join(words)
total = TransferHistory.count_by_title(db, title=title, status=status)
result = TransferHistory.list_by_title(db, title=title, page=page,
count=count, status=status)

View File

@@ -43,7 +43,8 @@ def login_access_token(
user_id=user_or_message.id,
user_name=user_or_message.name,
avatar=user_or_message.avatar,
level=level
level=level,
permissions= user_or_message.permissions or {},
)

View File

@@ -10,9 +10,10 @@ from app.core.context import Context, MediaInfo
from app.core.event import eventmanager, Event
from app.core.meta import MetaBase
from app.core.metainfo import MetaInfo, MetaInfoPath
from app.db.systemconfig_oper import SystemConfigOper
from app.log import logger
from app.schemas import FileItem
from app.schemas.types import EventType, MediaType, ChainEventType
from app.schemas.types import EventType, MediaType, ChainEventType, SystemConfigKey
from app.utils.http import RequestUtils
from app.utils.string import StringUtils
@@ -26,6 +27,49 @@ class MediaChain(ChainBase):
媒体信息处理链,单例运行
"""
@staticmethod
def _get_scraping_switchs() -> dict:
"""
获取刮削开关配置
"""
switchs = SystemConfigOper().get(SystemConfigKey.ScrapingSwitchs) or {}
# 默认配置
default_switchs = {
'movie_nfo': True, # 电影NFO
'movie_poster': True, # 电影海报
'movie_backdrop': True, # 电影背景图
'movie_logo': True, # 电影Logo
'movie_disc': True, # 电影光盘图
'movie_banner': True, # 电影横幅图
'movie_thumb': True, # 电影缩略图
'tv_nfo': True, # 电视剧NFO
'tv_poster': True, # 电视剧海报
'tv_backdrop': True, # 电视剧背景图
'tv_banner': True, # 电视剧横幅图
'tv_logo': True, # 电视剧Logo
'tv_thumb': True, # 电视剧缩略图
'season_nfo': True, # 季NFO
'season_poster': True, # 季海报
'season_banner': True, # 季横幅图
'season_thumb': True, # 季缩略图
'episode_nfo': True, # 集NFO
'episode_thumb': True # 集缩略图
}
# 合并用户配置和默认配置
for key, default_value in default_switchs.items():
if key not in switchs:
switchs[key] = default_value
return switchs
@staticmethod
def set_scraping_switchs(switchs: dict) -> bool:
"""
设置刮削开关配置
:param switchs: 开关配置字典
:return: 是否设置成功
"""
return SystemConfigOper().set(SystemConfigKey.ScrapingSwitchs, switchs)
def metadata_nfo(self, meta: MetaBase, mediainfo: MediaInfo,
season: Optional[int] = None, episode: Optional[int] = None) -> Optional[str]:
"""
@@ -404,37 +448,47 @@ class MediaChain(ChainBase):
if not mediainfo:
logger.warn(f"{filepath} 无法识别文件媒体信息!")
return
# 获取刮削开关配置
scraping_switchs = self._get_scraping_switchs()
logger.info(f"开始刮削:{filepath} ...")
if mediainfo.type == MediaType.MOVIE:
# 电影
if fileitem.type == "file":
# 是否已存在
nfo_path = filepath.with_suffix(".nfo")
if overwrite or not storagechain.get_file_item(storage=fileitem.storage, path=nfo_path):
# 电影文件
movie_nfo = self.metadata_nfo(meta=meta, mediainfo=mediainfo)
if movie_nfo:
# 保存或上传nfo文件到上级目录
__save_file(_fileitem=parent, _path=nfo_path, _content=movie_nfo)
else:
logger.warn(f"{filepath.name} nfo文件生成失败")
else:
logger.info(f"已存在nfo文件{nfo_path}")
else:
# 电影目录
if is_bluray_folder(fileitem):
# 原盘目录
nfo_path = filepath / (filepath.name + ".nfo")
# 检查电影NFO开关
if scraping_switchs.get('movie_nfo', True):
# 是否已存在
nfo_path = filepath.with_suffix(".nfo")
if overwrite or not storagechain.get_file_item(storage=fileitem.storage, path=nfo_path):
# 生成原盘nfo
# 电影文件
movie_nfo = self.metadata_nfo(meta=meta, mediainfo=mediainfo)
if movie_nfo:
# 保存或上传nfo文件到当前目录
__save_file(_fileitem=fileitem, _path=nfo_path, _content=movie_nfo)
# 保存或上传nfo文件到上级目录
__save_file(_fileitem=parent, _path=nfo_path, _content=movie_nfo)
else:
logger.warn(f"{filepath.name} nfo文件生成失败")
else:
logger.info(f"已存在nfo文件{nfo_path}")
else:
logger.info("电影NFO刮削已关闭跳过")
else:
# 电影目录
if is_bluray_folder(fileitem):
# 原盘目录
if scraping_switchs.get('movie_nfo', True):
nfo_path = filepath / (filepath.name + ".nfo")
if overwrite or not storagechain.get_file_item(storage=fileitem.storage, path=nfo_path):
# 生成原盘nfo
movie_nfo = self.metadata_nfo(meta=meta, mediainfo=mediainfo)
if movie_nfo:
# 保存或上传nfo文件到当前目录
__save_file(_fileitem=fileitem, _path=nfo_path, _content=movie_nfo)
else:
logger.warn(f"{filepath.name} nfo文件生成失败")
else:
logger.info(f"已存在nfo文件{nfo_path}")
else:
logger.info("电影NFO刮削已关闭跳过")
else:
# 处理目录内的文件
files = __list_files(_fileitem=fileitem)
@@ -449,16 +503,35 @@ class MediaChain(ChainBase):
image_dict = self.metadata_img(mediainfo=mediainfo)
if image_dict:
for image_name, image_url in image_dict.items():
image_path = filepath.with_name(image_name)
if overwrite or not storagechain.get_file_item(storage=fileitem.storage,
path=image_path):
# 下载图片
content = __download_image(image_url)
# 写入图片到当前目录
if content:
__save_file(_fileitem=fileitem, _path=image_path, _content=content)
# 根据图片类型检查开关
if 'poster' in image_name.lower():
should_scrape = scraping_switchs.get('movie_poster', True)
elif 'backdrop' in image_name.lower() or 'fanart' in image_name.lower():
should_scrape = scraping_switchs.get('movie_backdrop', True)
elif 'logo' in image_name.lower():
should_scrape = scraping_switchs.get('movie_logo', True)
elif 'disc' in image_name.lower() or 'cdart' in image_name.lower():
should_scrape = scraping_switchs.get('movie_disc', True)
elif 'banner' in image_name.lower():
should_scrape = scraping_switchs.get('movie_banner', True)
elif 'thumb' in image_name.lower():
should_scrape = scraping_switchs.get('movie_thumb', True)
else:
should_scrape = True # 未知类型默认刮削
if should_scrape:
image_path = filepath.with_name(image_name)
if overwrite or not storagechain.get_file_item(storage=fileitem.storage,
path=image_path):
# 下载图片
content = __download_image(image_url)
# 写入图片到当前目录
if content:
__save_file(_fileitem=fileitem, _path=image_path, _content=content)
else:
logger.info(f"已存在图片文件:{image_path}")
else:
logger.info(f"电影图片刮削已关闭,跳过:{image_name}")
else:
# 电视剧
if fileitem.type == "file":
@@ -472,38 +545,45 @@ class MediaChain(ChainBase):
if not file_mediainfo:
logger.warn(f"{filepath.name} 无法识别文件媒体信息!")
return
# 是否已存在
nfo_path = filepath.with_suffix(".nfo")
if overwrite or not storagechain.get_file_item(storage=fileitem.storage, path=nfo_path):
# 获取集的nfo文件
episode_nfo = self.metadata_nfo(meta=file_meta, mediainfo=file_mediainfo,
season=file_meta.begin_season,
episode=file_meta.begin_episode)
if episode_nfo:
# 保存或上传nfo文件到上级目录
if not parent:
parent = storagechain.get_parent_item(fileitem)
__save_file(_fileitem=parent, _path=nfo_path, _content=episode_nfo)
else:
logger.warn(f"{filepath.name} nfo文件生成失败")
else:
logger.info(f"已存在nfo文件{nfo_path}")
# 获取集的图片
image_dict = self.metadata_img(mediainfo=file_mediainfo,
season=file_meta.begin_season, episode=file_meta.begin_episode)
if image_dict:
for episode, image_url in image_dict.items():
image_path = filepath.with_suffix(Path(image_url).suffix)
if overwrite or not storagechain.get_file_item(storage=fileitem.storage, path=image_path):
# 下载图片
content = __download_image(image_url)
# 保存图片文件到当前目录
if content:
if not parent:
parent = storagechain.get_parent_item(fileitem)
__save_file(_fileitem=parent, _path=image_path, _content=content)
# 检查集NFO开关
if scraping_switchs.get('episode_nfo', True):
# 是否已存在
nfo_path = filepath.with_suffix(".nfo")
if overwrite or not storagechain.get_file_item(storage=fileitem.storage, path=nfo_path):
# 获取集的nfo文件
episode_nfo = self.metadata_nfo(meta=file_meta, mediainfo=file_mediainfo,
season=file_meta.begin_season,
episode=file_meta.begin_episode)
if episode_nfo:
# 保存或上传nfo文件到上级目录
if not parent:
parent = storagechain.get_parent_item(fileitem)
__save_file(_fileitem=parent, _path=nfo_path, _content=episode_nfo)
else:
logger.info(f"已存在图片文件:{image_path}")
logger.warn(f"{filepath.name} nfo文件生成失败")
else:
logger.info(f"已存在nfo文件{nfo_path}")
else:
logger.info("集NFO刮削已关闭跳过")
# 获取集的图片
if scraping_switchs.get('episode_thumb', True):
image_dict = self.metadata_img(mediainfo=file_mediainfo,
season=file_meta.begin_season, episode=file_meta.begin_episode)
if image_dict:
for episode, image_url in image_dict.items():
image_path = filepath.with_suffix(Path(image_url).suffix)
if overwrite or not storagechain.get_file_item(storage=fileitem.storage, path=image_path):
# 下载图片
content = __download_image(image_url)
# 保存图片文件到当前目录
if content:
if not parent:
parent = storagechain.get_parent_item(fileitem)
__save_file(_fileitem=parent, _path=image_path, _content=content)
else:
logger.info(f"已存在图片文件:{image_path}")
else:
logger.info("集缩略图刮削已关闭,跳过")
else:
# 当前为目录,处理目录内的文件
files = __list_files(_fileitem=fileitem)
@@ -521,71 +601,95 @@ class MediaChain(ChainBase):
if filepath.name in settings.RENAME_FORMAT_S0_NAMES:
season_meta.begin_season = 0
if season_meta.begin_season is not None:
# 是否已存在
nfo_path = filepath / "season.nfo"
if overwrite or not storagechain.get_file_item(storage=fileitem.storage, path=nfo_path):
# 当前目录有季号生成季nfo
season_nfo = self.metadata_nfo(meta=meta, mediainfo=mediainfo,
season=season_meta.begin_season)
if season_nfo:
# 写入nfo到根目录
__save_file(_fileitem=fileitem, _path=nfo_path, _content=season_nfo)
else:
logger.warn(f"无法生成电视剧季nfo文件{meta.name}")
else:
logger.info(f"已存在nfo文件{nfo_path}")
# TMDB季poster图片
image_dict = self.metadata_img(mediainfo=mediainfo, season=season_meta.begin_season)
if image_dict:
for image_name, image_url in image_dict.items():
image_path = filepath.with_name(image_name)
if overwrite or not storagechain.get_file_item(storage=fileitem.storage,
path=image_path):
# 下载图片
content = __download_image(image_url)
# 保存图片文件到剧集目录
if content:
if not parent:
parent = storagechain.get_parent_item(fileitem)
__save_file(_fileitem=parent, _path=image_path, _content=content)
# 检查季NFO开关
if scraping_switchs.get('season_nfo', True):
# 是否已存在
nfo_path = filepath / "season.nfo"
if overwrite or not storagechain.get_file_item(storage=fileitem.storage, path=nfo_path):
# 当前目录有季号生成季nfo
season_nfo = self.metadata_nfo(meta=meta, mediainfo=mediainfo,
season=season_meta.begin_season)
if season_nfo:
# 写入nfo到根目录
__save_file(_fileitem=fileitem, _path=nfo_path, _content=season_nfo)
else:
logger.info(f"已存在图片文件:{image_path}")
# 额外fanart季图片poster thumb banner
image_dict = self.metadata_img(mediainfo=mediainfo)
if image_dict:
for image_name, image_url in image_dict.items():
if image_name.startswith("season"):
logger.warn(f"无法生成电视剧季nfo文件{meta.name}")
else:
logger.info(f"已存在nfo文件{nfo_path}")
else:
logger.info("季NFO刮削已关闭跳过")
# TMDB季poster图片
if scraping_switchs.get('season_poster', True):
image_dict = self.metadata_img(mediainfo=mediainfo, season=season_meta.begin_season)
if image_dict:
for image_name, image_url in image_dict.items():
image_path = filepath.with_name(image_name)
# 只下载当前刮削季的图片
image_season = "00" if "specials" in image_name else image_name[6:8]
if image_season != str(season_meta.begin_season).rjust(2, '0'):
logger.info(f"当前刮削季为:{season_meta.begin_season},跳过文件:{image_path}")
continue
if overwrite or not storagechain.get_file_item(storage=fileitem.storage,
path=image_path):
# 下载图片
content = __download_image(image_url)
# 保存图片文件到当前目录
# 保存图片文件到剧集目录
if content:
if not parent:
parent = storagechain.get_parent_item(fileitem)
__save_file(_fileitem=parent, _path=image_path, _content=content)
else:
logger.info(f"已存在图片文件:{image_path}")
else:
logger.info("季海报刮削已关闭,跳过")
# 额外fanart季图片poster thumb banner
image_dict = self.metadata_img(mediainfo=mediainfo)
if image_dict:
for image_name, image_url in image_dict.items():
if image_name.startswith("season"):
# 根据季图片类型检查开关
if 'poster' in image_name.lower():
should_scrape = scraping_switchs.get('season_poster', True)
elif 'banner' in image_name.lower():
should_scrape = scraping_switchs.get('season_banner', True)
elif 'thumb' in image_name.lower():
should_scrape = scraping_switchs.get('season_thumb', True)
else:
should_scrape = True # 未知类型默认刮削
if should_scrape:
image_path = filepath.with_name(image_name)
# 只下载当前刮削季的图片
image_season = "00" if "specials" in image_name else image_name[6:8]
if image_season != str(season_meta.begin_season).rjust(2, '0'):
logger.info(f"当前刮削季为:{season_meta.begin_season},跳过文件:{image_path}")
continue
if overwrite or not storagechain.get_file_item(storage=fileitem.storage,
path=image_path):
# 下载图片
content = __download_image(image_url)
# 保存图片文件到当前目录
if content:
if not parent:
parent = storagechain.get_parent_item(fileitem)
__save_file(_fileitem=parent, _path=image_path, _content=content)
else:
logger.info(f"已存在图片文件:{image_path}")
else:
logger.info(f"季图片刮削已关闭,跳过:{image_name}")
# 判断当前目录是不是剧集根目录
if not season_meta.season:
# 是否已存在
nfo_path = filepath / "tvshow.nfo"
if overwrite or not storagechain.get_file_item(storage=fileitem.storage, path=nfo_path):
# 当前目录有名称,生成tvshow nfo 和 tv图片
tv_nfo = self.metadata_nfo(meta=meta, mediainfo=mediainfo)
if tv_nfo:
# 写入tvshow nfo到根目录
__save_file(_fileitem=fileitem, _path=nfo_path, _content=tv_nfo)
# 检查电视剧NFO开关
if scraping_switchs.get('tv_nfo', True):
# 是否已存在
nfo_path = filepath / "tvshow.nfo"
if overwrite or not storagechain.get_file_item(storage=fileitem.storage, path=nfo_path):
# 当前目录有名称生成tvshow nfo 和 tv图片
tv_nfo = self.metadata_nfo(meta=meta, mediainfo=mediainfo)
if tv_nfo:
# 写入tvshow nfo到根目录
__save_file(_fileitem=fileitem, _path=nfo_path, _content=tv_nfo)
else:
logger.warn(f"无法生成电视剧nfo文件{meta.name}")
else:
logger.warn(f"无法生成电视剧nfo文件{meta.name}")
logger.info(f"已存在nfo文件{nfo_path}")
else:
logger.info(f"已存在nfo文件{nfo_path}")
logger.info("电视剧NFO刮削已关闭跳过")
# 生成目录图片
image_dict = self.metadata_img(mediainfo=mediainfo)
if image_dict:
@@ -593,14 +697,31 @@ class MediaChain(ChainBase):
# 不下载季图片
if image_name.startswith("season"):
continue
image_path = filepath / image_name
if overwrite or not storagechain.get_file_item(storage=fileitem.storage,
path=image_path):
# 下载图片
content = __download_image(image_url)
# 保存图片文件到当前目录
if content:
__save_file(_fileitem=fileitem, _path=image_path, _content=content)
# 根据电视剧图片类型检查开关
if 'poster' in image_name.lower():
should_scrape = scraping_switchs.get('tv_poster', True)
elif 'backdrop' in image_name.lower() or 'fanart' in image_name.lower():
should_scrape = scraping_switchs.get('tv_backdrop', True)
elif 'banner' in image_name.lower():
should_scrape = scraping_switchs.get('tv_banner', True)
elif 'logo' in image_name.lower():
should_scrape = scraping_switchs.get('tv_logo', True)
elif 'thumb' in image_name.lower():
should_scrape = scraping_switchs.get('tv_thumb', True)
else:
logger.info(f"已存在图片文件:{image_path}")
should_scrape = True # 未知类型默认刮削
if should_scrape:
image_path = filepath / image_name
if overwrite or not storagechain.get_file_item(storage=fileitem.storage,
path=image_path):
# 下载图片
content = __download_image(image_url)
# 保存图片文件到当前目录
if content:
__save_file(_fileitem=fileitem, _path=image_path, _content=content)
else:
logger.info(f"已存在图片文件:{image_path}")
else:
logger.info(f"电视剧图片刮削已关闭,跳过:{image_name}")
logger.info(f"{filepath.name} 刮削完成")

View File

@@ -114,6 +114,8 @@ class ConfigModel(BaseModel):
TVDB_V4_API_PIN: str = ""
# Fanart开关
FANART_ENABLE: bool = True
# Fanart语言
FANART_LANG: str = "zh,en"
# Fanart API Key
FANART_API_KEY: str = "d2d31f9ecabea050fc7d68aa3146015f"
# 115 AppId
@@ -280,8 +282,6 @@ class ConfigModel(BaseModel):
SECURITY_IMAGE_SUFFIXES: list = Field(default=[".jpg", ".jpeg", ".png", ".webp", ".gif", ".svg", ".avif"])
# 重命名时支持的S0别名
RENAME_FORMAT_S0_NAMES: list = Field(default=["Specials", "SPs"])
# 启用分词搜索
TOKENIZED_SEARCH: bool = False
# 为指定默认字幕添加.default后缀
DEFAULT_SUB: Optional[str] = "zh-cn"
# Docker Client API地址

View File

@@ -191,8 +191,6 @@ class _MessageBase(ServiceBase[TService, NotificationConf]):
:return: 返回消息通知的配置字典
"""
if self._configs is not None:
return self._configs
configs = ServiceConfigHelper.get_notification_configs()
if not self._service_name:
return {}
@@ -260,8 +258,6 @@ class _DownloaderBase(ServiceBase[TService, DownloaderConf]):
:return: 返回下载器配置字典
"""
if self._configs is not None:
return self._configs
configs = ServiceConfigHelper.get_downloader_configs()
if not self._service_name:
return {}
@@ -279,8 +275,6 @@ class _MediaServerBase(ServiceBase[TService, MediaServerConf]):
:return: 返回媒体服务器配置字典
"""
if self._configs is not None:
return self._configs
configs = ServiceConfigHelper.get_mediaserver_configs()
if not self._service_name:
return {}

View File

@@ -399,10 +399,28 @@ class FanartModule(_ModuleBase):
if not mediainfo.get_image(season_image):
mediainfo.set_image(season_image, image_obj.get('url'))
else:
# 其他图片,按欢迎程度倒排
images.sort(key=lambda x: int(x.get('likes', 0)), reverse=True)
# 取第一张图片
image_obj = images[0]
# 其他图片,优先环境变量指定语言再like最多
def pick_best_image(images):
lang_env = settings.FANART_LANG
if lang_env:
langs = [lang.strip() for lang in lang_env.split(",") if lang.strip()]
for lang in langs:
lang_images = [img for img in images if img.get('lang') == lang]
if lang_images:
lang_images.sort(key=lambda x: int(x.get('likes', 0)), reverse=True)
return lang_images[0]
# 没设置或没找到,按原逻辑 zh、en、like最多
zh_images = [img for img in images if img.get('lang') == 'zh']
if zh_images:
zh_images.sort(key=lambda x: int(x.get('likes', 0)), reverse=True)
return zh_images[0]
en_images = [img for img in images if img.get('lang') == 'en']
if en_images:
en_images.sort(key=lambda x: int(x.get('likes', 0)), reverse=True)
return en_images[0]
images.sort(key=lambda x: int(x.get('likes', 0)), reverse=True)
return images[0]
image_obj = pick_best_image(images)
# 设置图片,没有图片才设置
if not mediainfo.get_image(image_name):
mediainfo.set_image(image_name, image_obj.get('url'))

View File

@@ -306,7 +306,6 @@ class QbittorrentModule(_ModuleBase, _DownloaderBase[Qbittorrent]):
))
else:
return None
return ret_torrents
def transfer_completed(self, hashs: str, downloader: Optional[str] = None) -> None:
"""
@@ -318,6 +317,7 @@ class QbittorrentModule(_ModuleBase, _DownloaderBase[Qbittorrent]):
if not server:
return None
server.set_torrents_tag(ids=hashs, tags=['已整理'])
return None
def remove_torrents(self, hashs: Union[str, list], delete_file: Optional[bool] = True,
downloader: Optional[str] = None) -> Optional[bool]:

View File

@@ -292,7 +292,6 @@ class TransmissionModule(_ModuleBase, _DownloaderBase[Transmission]):
))
else:
return None
return ret_torrents
def transfer_completed(self, hashs: str, downloader: Optional[str] = None) -> None:
"""
@@ -312,6 +311,7 @@ class TransmissionModule(_ModuleBase, _DownloaderBase[Transmission]):
else:
tags = ['已整理']
server.set_torrent_tag(ids=hashs, tags=tags)
return None
def remove_torrents(self, hashs: Union[str, list], delete_file: Optional[bool] = True,
downloader: Optional[str] = None) -> Optional[bool]:

View File

@@ -1,6 +1,6 @@
from typing import Optional
from pydantic import BaseModel
from pydantic import BaseModel, Field
class Token(BaseModel):
@@ -18,6 +18,8 @@ class Token(BaseModel):
avatar: Optional[str] = None
# 权限级别
level: int = 1
# 详细权限
permissions: Optional[dict] = Field(default_factory=dict)
class TokenPayload(BaseModel):

View File

@@ -157,6 +157,8 @@ class SystemConfigKey(Enum):
NotificationSendTime = "NotificationSendTime"
# 通知消息格式模板
NotificationTemplates = "NotificationTemplates"
# 刮削开关设置
ScrapingSwitchs = "ScrapingSwitchs"
# 处理进度Key字典

View File

@@ -32,6 +32,7 @@ class UserCreate(UserBase):
email: Optional[str] = None
password: Optional[str] = None
settings: Optional[dict] = Field(default_factory=dict)
permissions: Optional[dict] = Field(default_factory=dict)
# Properties to receive via API on update
@@ -41,6 +42,7 @@ class UserUpdate(UserBase):
email: Optional[str] = None
password: Optional[str] = None
settings: Optional[dict] = Field(default_factory=dict)
permissions: Optional[dict] = Field(default_factory=dict)
class UserInDBBase(UserBase):

View File

@@ -0,0 +1,42 @@
"""2.1.6
Revision ID: 3df653756eec
Revises: 486e56a62dcb
Create Date: 2025-06-11 19:52:57.185355
"""
import json
from app.db import SessionFactory
from app.db.models import User
# revision identifiers, used by Alembic.
revision = '3df653756eec'
down_revision = '486e56a62dcb'
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
with SessionFactory() as db:
# 所有用户
users = User.list(db)
for user in users:
if user.is_superuser:
continue
if not user.permissions:
permissions = {
"discovery": True,
"search": True,
"subscribe": True,
"manage": False,
}
user.update(db, {
"permissions": permissions,
})
# ### end Alembic commands ###
def downgrade() -> None:
pass

View File

@@ -61,7 +61,6 @@ Pinyin2Hanzi~=0.1.1
pywebpush~=2.0.0
python-cookietools==0.0.2.1
aiofiles~=24.1.0
jieba~=0.42.1
rsa~=4.9
redis~=5.2.1
async_timeout~=5.0.1; python_full_version < "3.11.3"

View File

@@ -1,2 +1,2 @@
APP_VERSION = 'v2.5.4'
FRONTEND_VERSION = 'v2.5.4'
APP_VERSION = 'v2.5.5'
FRONTEND_VERSION = 'v2.5.5'