feat: add deployment monitor page

- Add /deploy_status API endpoint for system status check
- Create Monitor.tsx component with real-time status display
- Support CUDA, FFmpeg, Whisper model status monitoring
- Auto-refresh every 30 seconds with manual refresh option
This commit is contained in:
sibuchen
2026-02-06 16:15:11 +08:00
parent 7b45db2f59
commit 8cd8c6f7b4
5 changed files with 309 additions and 9 deletions

View File

@@ -42,4 +42,37 @@ async def sys_health():
@router.get("/sys_check")
async def sys_check():
return R.success()
return R.success()
@router.get("/deploy_status")
async def deploy_status():
"""返回部署监控所需的所有状态信息"""
import torch
import os
# CUDA 状态
cuda_available = torch.cuda.is_available()
cuda_info = {
"available": cuda_available,
"version": torch.version.cuda if cuda_available else None,
"gpu_name": torch.cuda.get_device_name(0) if cuda_available else None,
}
# Whisper 模型状态
model_size = os.getenv("WHISPER_MODEL_SIZE", "base")
transcriber_type = os.getenv("TRANSCRIBER_TYPE", "fast-whisper")
# FFmpeg 状态
try:
ensure_ffmpeg_or_raise()
ffmpeg_ok = True
except:
ffmpeg_ok = False
return R.success(data={
"backend": {"status": "running", "port": int(os.getenv("BACKEND_PORT", 8483))},
"cuda": cuda_info,
"whisper": {"model_size": model_size, "transcriber_type": transcriber_type},
"ffmpeg": {"available": ffmpeg_ok},
})