mirror of
https://github.com/JefferyHcool/BiliNote.git
synced 2026-05-12 02:20:18 +08:00
feat: add deployment monitor page
- Add /deploy_status API endpoint for system status check - Create Monitor.tsx component with real-time status display - Support CUDA, FFmpeg, Whisper model status monitoring - Auto-refresh every 30 seconds with manual refresh option
This commit is contained in:
@@ -42,4 +42,37 @@ async def sys_health():
|
||||
|
||||
@router.get("/sys_check")
|
||||
async def sys_check():
|
||||
return R.success()
|
||||
return R.success()
|
||||
|
||||
|
||||
@router.get("/deploy_status")
|
||||
async def deploy_status():
|
||||
"""返回部署监控所需的所有状态信息"""
|
||||
import torch
|
||||
import os
|
||||
|
||||
# CUDA 状态
|
||||
cuda_available = torch.cuda.is_available()
|
||||
cuda_info = {
|
||||
"available": cuda_available,
|
||||
"version": torch.version.cuda if cuda_available else None,
|
||||
"gpu_name": torch.cuda.get_device_name(0) if cuda_available else None,
|
||||
}
|
||||
|
||||
# Whisper 模型状态
|
||||
model_size = os.getenv("WHISPER_MODEL_SIZE", "base")
|
||||
transcriber_type = os.getenv("TRANSCRIBER_TYPE", "fast-whisper")
|
||||
|
||||
# FFmpeg 状态
|
||||
try:
|
||||
ensure_ffmpeg_or_raise()
|
||||
ffmpeg_ok = True
|
||||
except:
|
||||
ffmpeg_ok = False
|
||||
|
||||
return R.success(data={
|
||||
"backend": {"status": "running", "port": int(os.getenv("BACKEND_PORT", 8483))},
|
||||
"cuda": cuda_info,
|
||||
"whisper": {"model_size": model_size, "transcriber_type": transcriber_type},
|
||||
"ffmpeg": {"available": ffmpeg_ok},
|
||||
})
|
||||
Reference in New Issue
Block a user