mirror of
https://github.com/JefferyHcool/BiliNote.git
synced 2026-05-11 18:10:06 +08:00
### 性能优化 - 后端任务执行从串行锁改为 ThreadPoolExecutor 并发执行(默认3线程) - 添加 GZipMiddleware 响应压缩 + Nginx gzip 配置 - 数据库连接池参数优化(pool_size=10, max_overflow=20) - 视频帧提取并行化(ThreadPoolExecutor) - LLM 重试配置缓存到实例,避免每次请求读 env var - 前端路由级代码拆分(React.lazy + Suspense) - Vite manualChunks 拆分 markdown/markmap/vendor - MarkdownViewer 用 React.memo + useMemo 减少不必要渲染 - NoteHistory Fuse.js 实例 useMemo 缓存 - useTaskPolling 无待处理任务时跳过轮询 - 移除 antd 依赖(NoteForm Alert、modelForm Tag),改用 shadcn/ui ### 前端转写器配置(新功能) - 新增 TranscriberConfigManager(JSON 文件存储,替代环境变量) - 新增 GET/POST /transcriber_config API 端点 - 新增 GET /transcriber_models_status 模型下载状态查询 - 新增 POST /transcriber_download 后台模型下载触发 - 前端转写器设置页面:引擎选择、模型大小选择、模型下载管理 - deploy_status 端点同步从配置文件读取 ### Bug 修复 - 修复任务进行中切换页面后进度丢失:Home.tsx status 派生逻辑补全中间状态 - 修复 MLX Whisper 静默回退 fast-whisper:移除环境变量门控,macOS 下自动尝试导入 - MLX Whisper 不可用时抛出 RuntimeError 而非静默回退 - 前端展示 MLX Whisper 可用性状态,不可用时禁用保存 Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
59 lines
1.9 KiB
Python
59 lines
1.9 KiB
Python
import json
|
||
import os
|
||
from pathlib import Path
|
||
from typing import Optional, Dict, Any
|
||
|
||
|
||
class TranscriberConfigManager:
|
||
"""管理转写器配置,存储在 JSON 文件中,支持前端动态修改。"""
|
||
|
||
def __init__(self, filepath: str = "config/transcriber.json"):
|
||
self.path = Path(filepath)
|
||
self.path.parent.mkdir(parents=True, exist_ok=True)
|
||
|
||
def _read(self) -> Dict[str, Any]:
|
||
if not self.path.exists():
|
||
return {}
|
||
try:
|
||
with self.path.open("r", encoding="utf-8") as f:
|
||
return json.load(f)
|
||
except Exception:
|
||
return {}
|
||
|
||
def _write(self, data: Dict[str, Any]):
|
||
with self.path.open("w", encoding="utf-8") as f:
|
||
json.dump(data, f, ensure_ascii=False, indent=2)
|
||
|
||
def get_config(self) -> Dict[str, Any]:
|
||
"""获取当前转写器配置,fallback 到环境变量默认值。"""
|
||
data = self._read()
|
||
return {
|
||
"transcriber_type": data.get(
|
||
"transcriber_type",
|
||
os.getenv("TRANSCRIBER_TYPE", "fast-whisper"),
|
||
),
|
||
"whisper_model_size": data.get(
|
||
"whisper_model_size",
|
||
os.getenv("WHISPER_MODEL_SIZE", "medium"),
|
||
),
|
||
}
|
||
|
||
def update_config(
|
||
self,
|
||
transcriber_type: str,
|
||
whisper_model_size: Optional[str] = None,
|
||
) -> Dict[str, Any]:
|
||
"""更新转写器配置并持久化。"""
|
||
data = self._read()
|
||
data["transcriber_type"] = transcriber_type
|
||
if whisper_model_size is not None:
|
||
data["whisper_model_size"] = whisper_model_size
|
||
self._write(data)
|
||
return self.get_config()
|
||
|
||
def get_transcriber_type(self) -> str:
|
||
return self.get_config()["transcriber_type"]
|
||
|
||
def get_whisper_model_size(self) -> str:
|
||
return self.get_config()["whisper_model_size"]
|