feat: add AI Settings tab for managing providers and models

This commit is contained in:
ShiYu
2025-10-18 11:35:18 +08:00
parent 02cc31d296
commit bf83187d8c
23 changed files with 3280 additions and 649 deletions

View File

@@ -1,6 +1,6 @@
from fastapi import FastAPI from fastapi import FastAPI
from .routes import adapters, virtual_fs, auth, config, processors, tasks, logs, share, backup, search, vector_db, offline_downloads from .routes import adapters, virtual_fs, auth, config, processors, tasks, logs, share, backup, search, vector_db, offline_downloads, ai_providers
from .routes import webdav from .routes import webdav
from .routes import plugins from .routes import plugins
@@ -18,6 +18,7 @@ def include_routers(app: FastAPI):
app.include_router(share.public_router) app.include_router(share.public_router)
app.include_router(backup.router) app.include_router(backup.router)
app.include_router(vector_db.router) app.include_router(vector_db.router)
app.include_router(ai_providers.router)
app.include_router(plugins.router) app.include_router(plugins.router)
app.include_router(webdav.router) app.include_router(webdav.router)
app.include_router(offline_downloads.router) app.include_router(offline_downloads.router)

177
api/routes/ai_providers.py Normal file
View File

@@ -0,0 +1,177 @@
from typing import Annotated, Dict, Optional
import httpx
from fastapi import APIRouter, Depends, HTTPException, Path
from api.response import success
from schemas.ai import (
AIDefaultsUpdate,
AIModelCreate,
AIModelUpdate,
AIProviderCreate,
AIProviderUpdate,
)
from services.ai_providers import AIProviderService
from services.auth import User, get_current_active_user
from services.vector_db import VectorDBService
router = APIRouter(prefix="/api/ai", tags=["ai"])
service = AIProviderService()
@router.get("/providers")
async def list_providers(
current_user: Annotated[User, Depends(get_current_active_user)]
):
providers = await service.list_providers()
return success({"providers": providers})
@router.post("/providers")
async def create_provider(
payload: AIProviderCreate,
current_user: Annotated[User, Depends(get_current_active_user)]
):
provider = await service.create_provider(payload.dict())
return success(provider)
@router.get("/providers/{provider_id}")
async def get_provider(
provider_id: Annotated[int, Path(..., gt=0)],
current_user: Annotated[User, Depends(get_current_active_user)],
):
provider = await service.get_provider(provider_id, with_models=True)
return success(provider)
@router.put("/providers/{provider_id}")
async def update_provider(
provider_id: Annotated[int, Path(..., gt=0)],
payload: AIProviderUpdate,
current_user: Annotated[User, Depends(get_current_active_user)],
):
data = {k: v for k, v in payload.dict().items() if v is not None}
if not data:
raise HTTPException(status_code=400, detail="No fields to update")
provider = await service.update_provider(provider_id, data)
return success(provider)
@router.delete("/providers/{provider_id}")
async def delete_provider(
provider_id: Annotated[int, Path(..., gt=0)],
current_user: Annotated[User, Depends(get_current_active_user)],
):
await service.delete_provider(provider_id)
return success({"id": provider_id})
@router.post("/providers/{provider_id}/sync-models")
async def sync_models(
provider_id: Annotated[int, Path(..., gt=0)],
current_user: Annotated[User, Depends(get_current_active_user)],
):
try:
result = await service.sync_models(provider_id)
except (httpx.RequestError, httpx.HTTPStatusError) as exc:
raise HTTPException(status_code=502, detail=f"Failed to synchronize models: {exc}") from exc
except ValueError as exc:
raise HTTPException(status_code=400, detail=str(exc)) from exc
return success(result)
@router.get("/providers/{provider_id}/remote-models")
async def fetch_remote_models(
provider_id: Annotated[int, Path(..., gt=0)],
current_user: Annotated[User, Depends(get_current_active_user)],
):
try:
models = await service.fetch_remote_models(provider_id)
except (httpx.RequestError, httpx.HTTPStatusError) as exc:
raise HTTPException(status_code=502, detail=f"Failed to pull models: {exc}") from exc
except ValueError as exc:
raise HTTPException(status_code=400, detail=str(exc)) from exc
return success({"models": models})
@router.get("/providers/{provider_id}/models")
async def list_models(
provider_id: Annotated[int, Path(..., gt=0)],
current_user: Annotated[User, Depends(get_current_active_user)],
):
models = await service.list_models(provider_id)
return success({"models": models})
@router.post("/providers/{provider_id}/models")
async def create_model(
provider_id: Annotated[int, Path(..., gt=0)],
payload: AIModelCreate,
current_user: Annotated[User, Depends(get_current_active_user)],
):
model = await service.create_model(provider_id, payload.dict())
return success(model)
@router.put("/models/{model_id}")
async def update_model(
model_id: Annotated[int, Path(..., gt=0)],
payload: AIModelUpdate,
current_user: Annotated[User, Depends(get_current_active_user)],
):
data = {k: v for k, v in payload.dict().items() if v is not None}
if not data:
raise HTTPException(status_code=400, detail="No fields to update")
model = await service.update_model(model_id, data)
return success(model)
@router.delete("/models/{model_id}")
async def delete_model(
model_id: Annotated[int, Path(..., gt=0)],
current_user: Annotated[User, Depends(get_current_active_user)],
):
await service.delete_model(model_id)
return success({"id": model_id})
def _get_embedding_dimension(entry: Optional[Dict]) -> Optional[int]:
if not entry:
return None
value = entry.get("embedding_dimensions")
return int(value) if value is not None else None
@router.get("/defaults")
async def get_defaults(
current_user: Annotated[User, Depends(get_current_active_user)],
):
defaults = await service.get_default_models()
return success(defaults)
@router.put("/defaults")
async def update_defaults(
payload: AIDefaultsUpdate,
current_user: Annotated[User, Depends(get_current_active_user)],
):
previous = await service.get_default_models()
try:
updated = await service.set_default_models(payload.as_mapping())
except ValueError as exc:
raise HTTPException(status_code=400, detail=str(exc)) from exc
prev_dim = _get_embedding_dimension(previous.get("embedding"))
next_dim = _get_embedding_dimension(updated.get("embedding"))
if prev_dim and next_dim and prev_dim != next_dim:
try:
await VectorDBService().clear_all_data()
except Exception as exc: # noqa: BLE001
raise HTTPException(status_code=500, detail=f"Failed to clear vector database: {exc}") from exc
return success(updated)

View File

@@ -1,11 +1,10 @@
import httpx import httpx
import time import time
from fastapi import APIRouter, Depends, Form, HTTPException from fastapi import APIRouter, Depends, Form
from typing import Annotated from typing import Annotated
from services.config import ConfigCenter, VERSION from services.config import ConfigCenter, VERSION
from services.auth import get_current_active_user, User, has_users from services.auth import get_current_active_user, User, has_users
from api.response import success from api.response import success
from services.vector_db import VectorDBService
router = APIRouter(prefix="/api/config", tags=["config"]) router = APIRouter(prefix="/api/config", tags=["config"])
@@ -24,27 +23,8 @@ async def set_config(
key: str = Form(...), key: str = Form(...),
value: str = Form(...) value: str = Form(...)
): ):
original_value = await ConfigCenter.get(key) await ConfigCenter.set(key, value)
value_to_save = value return success({"key": key, "value": value})
if key == "AI_EMBED_DIM":
try:
parsed_value = int(value)
except (TypeError, ValueError):
raise HTTPException(status_code=400, detail="AI_EMBED_DIM must be an integer")
if parsed_value <= 0:
raise HTTPException(status_code=400, detail="AI_EMBED_DIM must be greater than zero")
value_to_save = str(parsed_value)
await ConfigCenter.set(key, value_to_save)
if key == "AI_EMBED_DIM" and str(original_value) != value_to_save:
try:
service = VectorDBService()
await service.clear_all_data()
except Exception as exc:
raise HTTPException(status_code=500, detail=f"Failed to clear vector database: {exc}")
return success({"key": key, "value": value_to_save})
@router.get("/all") @router.get("/all")

View File

@@ -36,6 +36,81 @@ class Configuration(Model):
table = "configurations" table = "configurations"
class AIProvider(Model):
id = fields.IntField(pk=True)
name = fields.CharField(max_length=100)
identifier = fields.CharField(max_length=100, unique=True)
provider_type = fields.CharField(max_length=50, null=True)
api_format = fields.CharField(max_length=20)
base_url = fields.CharField(max_length=512, null=True)
api_key = fields.CharField(max_length=512, null=True)
logo_url = fields.CharField(max_length=512, null=True)
extra_config = fields.JSONField(null=True)
created_at = fields.DatetimeField(auto_now_add=True)
updated_at = fields.DatetimeField(auto_now=True)
class Meta:
table = "ai_providers"
class AIModel(Model):
id = fields.IntField(pk=True)
provider: fields.ForeignKeyRelation[AIProvider] = fields.ForeignKeyField(
"models.AIProvider", related_name="models", on_delete=fields.CASCADE
)
name = fields.CharField(max_length=255)
display_name = fields.CharField(max_length=255, null=True)
description = fields.TextField(null=True)
capabilities = fields.JSONField(null=True)
context_window = fields.IntField(null=True)
metadata = fields.JSONField(null=True)
created_at = fields.DatetimeField(auto_now_add=True)
updated_at = fields.DatetimeField(auto_now=True)
class Meta:
table = "ai_models"
unique_together = ("provider", "name")
@property
def embedding_dimensions(self) -> int | None:
metadata = self.metadata or {}
if not isinstance(metadata, dict):
return None
value = metadata.get("embedding_dimensions")
if value is None:
return None
try:
return int(value)
except (TypeError, ValueError):
return None
@embedding_dimensions.setter
def embedding_dimensions(self, value: int | None) -> None:
base_metadata = self.metadata if isinstance(self.metadata, dict) else {}
metadata = dict(base_metadata or {})
if value is None:
metadata.pop("embedding_dimensions", None)
else:
try:
metadata["embedding_dimensions"] = int(value)
except (TypeError, ValueError):
metadata.pop("embedding_dimensions", None)
self.metadata = metadata or None
class AIDefaultModel(Model):
id = fields.IntField(pk=True)
ability = fields.CharField(max_length=50, unique=True)
model: fields.ForeignKeyRelation[AIModel] = fields.ForeignKeyField(
"models.AIModel", related_name="default_for", on_delete=fields.CASCADE
)
created_at = fields.DatetimeField(auto_now_add=True)
updated_at = fields.DatetimeField(auto_now=True)
class Meta:
table = "ai_default_models"
class AutomationTask(Model): class AutomationTask(Model):
id = fields.IntField(pk=True) id = fields.IntField(pk=True)
name = fields.CharField(max_length=100) name = fields.CharField(max_length=100)

101
schemas/ai.py Normal file
View File

@@ -0,0 +1,101 @@
from typing import List, Optional
from pydantic import BaseModel, Field, field_validator
from services.ai_providers import ABILITIES, normalize_capabilities
class AIProviderBase(BaseModel):
name: str
identifier: str = Field(..., pattern=r"^[a-z0-9_\-\.]+$")
provider_type: Optional[str] = None
api_format: str
base_url: Optional[str] = None
api_key: Optional[str] = None
logo_url: Optional[str] = None
extra_config: Optional[dict] = None
@field_validator("api_format")
def normalize_format(cls, value: str) -> str:
fmt = value.lower()
if fmt not in {"openai", "gemini"}:
raise ValueError("api_format must be 'openai' or 'gemini'")
return fmt
class AIProviderCreate(AIProviderBase):
pass
class AIProviderUpdate(BaseModel):
name: Optional[str] = None
provider_type: Optional[str] = None
api_format: Optional[str] = None
base_url: Optional[str] = None
api_key: Optional[str] = None
logo_url: Optional[str] = None
extra_config: Optional[dict] = None
@field_validator("api_format")
def normalize_format(cls, value: Optional[str]) -> Optional[str]:
if value is None:
return value
fmt = value.lower()
if fmt not in {"openai", "gemini"}:
raise ValueError("api_format must be 'openai' or 'gemini'")
return fmt
class AIModelBase(BaseModel):
name: str
display_name: Optional[str] = None
description: Optional[str] = None
capabilities: Optional[List[str]] = None
context_window: Optional[int] = None
embedding_dimensions: Optional[int] = None
metadata: Optional[dict] = None
@field_validator("capabilities")
def validate_capabilities(cls, items: Optional[List[str]]) -> Optional[List[str]]:
if items is None:
return None
normalized = normalize_capabilities(items)
invalid = set(items) - set(normalized)
if invalid:
raise ValueError(f"Unsupported capabilities: {', '.join(invalid)}")
return normalized
class AIModelCreate(AIModelBase):
pass
class AIModelUpdate(BaseModel):
display_name: Optional[str] = None
description: Optional[str] = None
capabilities: Optional[List[str]] = None
context_window: Optional[int] = None
embedding_dimensions: Optional[int] = None
metadata: Optional[dict] = None
@field_validator("capabilities")
def validate_capabilities(cls, items: Optional[List[str]]) -> Optional[List[str]]:
if items is None:
return None
normalized = normalize_capabilities(items)
invalid = set(items) - set(normalized)
if invalid:
raise ValueError(f"Unsupported capabilities: {', '.join(invalid)}")
return normalized
class AIDefaultsUpdate(BaseModel):
chat: Optional[int] = None
vision: Optional[int] = None
embedding: Optional[int] = None
rerank: Optional[int] = None
voice: Optional[int] = None
tools: Optional[int] = None
def as_mapping(self) -> dict:
return {ability: getattr(self, ability) for ability in ABILITIES}

View File

@@ -1,113 +1,247 @@
from __future__ import annotations
import httpx import httpx
from typing import List from typing import List, Sequence, Tuple
from services.config import ConfigCenter
from models.database import AIModel, AIProvider
from services.ai_providers import AIProviderService
provider_service = AIProviderService()
class MissingModelError(RuntimeError):
pass
async def describe_image_base64(base64_image: str, detail: str = "high") -> str: async def describe_image_base64(base64_image: str, detail: str = "high") -> str:
""" """
传入base64图片和文本提示,返回图片描述文本 传入 base64 图片并返回描述文本。缺省时返回错误提示
""" """
OAI_API_URL = await ConfigCenter.get("AI_VISION_API_URL")
VISION_MODEL = await ConfigCenter.get("AI_VISION_MODEL")
API_KEY = await ConfigCenter.get("AI_VISION_API_KEY")
payload = {
"model": VISION_MODEL,
"messages": [
{"role": "user", "content": [
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}",
"detail": detail
}
},
{
"type": "text",
"text": "描述这个图片"
}
]}
]
}
headers = {
"Authorization": f"Bearer {API_KEY}",
"Content-Type": "application/json"
}
try: try:
async with httpx.AsyncClient(timeout=60.0) as client: model, provider = await _require_model("vision")
resp = await client.post(OAI_API_URL, headers=headers, json=payload) if provider.api_format == "openai":
resp.raise_for_status() return await _describe_with_openai(provider, model, base64_image, detail)
result = resp.json() return await _describe_with_gemini(provider, model, base64_image, detail)
return result["choices"][0]["message"]["content"] except MissingModelError as exc:
return str(exc)
except httpx.ReadTimeout: except httpx.ReadTimeout:
return "请求超时,请稍后重试。" return "请求超时,请稍后重试。"
except Exception as e: except Exception as exc: # noqa: BLE001
return f"请求失败: {str(e)}" return f"请求失败: {exc}"
async def get_text_embedding(text: str) -> List[float]: async def get_text_embedding(text: str) -> List[float]:
""" """
传入文本,返回嵌入向量。 传入文本,返回嵌入向量。若未配置模型则抛出异常。
""" """
OAI_API_URL = await ConfigCenter.get("AI_EMBED_API_URL") model, provider = await _require_model("embedding")
EMBED_MODEL = await ConfigCenter.get("AI_EMBED_MODEL") if provider.api_format == "openai":
API_KEY = await ConfigCenter.get("AI_EMBED_API_KEY") return await _embedding_with_openai(provider, model, text)
payload = { return await _embedding_with_gemini(provider, model, text)
"model": EMBED_MODEL,
"input": text
}
headers = {
"Authorization": f"Bearer {API_KEY}",
"Content-Type": "application/json"
}
async with httpx.AsyncClient() as client:
if OAI_API_URL.endswith("chat/completions"):
url = OAI_API_URL.replace("chat/completions", "embeddings")
else:
url = OAI_API_URL
resp = await client.post(url, headers=headers, json=payload)
resp.raise_for_status()
result = resp.json()
return result["data"][0]["embedding"]
async def rerank_texts(query: str, documents: List[str]) -> List[float]: async def rerank_texts(query: str, documents: Sequence[str]) -> List[float]:
"""调用重排序模型,为一组文档返回得分。未配置时返回空列表。""" """调用重排序模型,为一组文档返回得分。未配置时返回空列表。"""
if not documents: if not documents:
return [] return []
try:
api_url = await ConfigCenter.get("AI_RERANK_API_URL") model, provider = await _require_model("rerank")
model = await ConfigCenter.get("AI_RERANK_MODEL") except MissingModelError:
api_key = await ConfigCenter.get("AI_RERANK_API_KEY")
if not api_url or not model or not api_key:
return [] return []
try:
if provider.api_format == "openai":
return await _rerank_with_openai(provider, model, query, documents)
return await _rerank_with_gemini(provider, model, query, documents)
except Exception: # noqa: BLE001
return []
async def _require_model(ability: str) -> Tuple[AIModel, AIProvider]:
model = await provider_service.get_default_model(ability)
if not model:
raise MissingModelError(f"未配置默认 {ability} 模型,请前往系统设置完成配置。")
provider = getattr(model, "provider", None)
if provider is None:
await model.fetch_related("provider")
provider = model.provider
if provider is None:
raise MissingModelError("模型缺少关联的提供商配置。")
if not provider.base_url:
raise MissingModelError("该提供商未设置 API 地址。")
return model, provider
def _openai_endpoint(provider: AIProvider, path: str) -> str:
base = (provider.base_url or "").rstrip("/")
if not base:
raise MissingModelError("提供商 API 地址未配置。")
return f"{base}/{path.lstrip('/')}"
def _openai_headers(provider: AIProvider) -> dict:
headers = {"Content-Type": "application/json"}
if provider.api_key:
headers["Authorization"] = f"Bearer {provider.api_key}"
return headers
def _gemini_endpoint(provider: AIProvider, path: str) -> str:
base = (provider.base_url or "").rstrip("/")
if not base:
raise MissingModelError("提供商 API 地址未配置。")
url = f"{base}/{path.lstrip('/')}"
if provider.api_key:
connector = "&" if "?" in url else "?"
url = f"{url}{connector}key={provider.api_key}"
return url
async def _describe_with_openai(provider: AIProvider, model: AIModel, base64_image: str, detail: str) -> str:
url = _openai_endpoint(provider, "/chat/completions")
payload = { payload = {
"model": model, "model": model.name,
"query": query, "messages": [
"documents": documents, {
} "role": "user",
headers = { "content": [
"Authorization": f"Bearer {api_key}", {
"Content-Type": "application/json", "type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}",
"detail": detail,
},
},
{"type": "text", "text": "描述这个图片"},
],
}
],
} }
async with httpx.AsyncClient(timeout=60.0) as client:
response = await client.post(url, headers=_openai_headers(provider), json=payload)
response.raise_for_status()
body = response.json()
return body["choices"][0]["message"]["content"]
async with httpx.AsyncClient() as client:
async def _describe_with_gemini(provider: AIProvider, model: AIModel, base64_image: str, detail: str) -> str:
detail_text = f"描述这个图片,细节等级:{detail}"
model_name = model.name if model.name.startswith("models/") else f"models/{model.name}"
url = _gemini_endpoint(provider, f"{model_name}:generateContent")
payload = {
"contents": [
{
"role": "user",
"parts": [
{
"inline_data": {
"mime_type": "image/jpeg",
"data": base64_image,
}
},
{"text": detail_text},
],
}
]
}
async with httpx.AsyncClient(timeout=60.0) as client:
response = await client.post(url, json=payload)
response.raise_for_status()
body = response.json()
candidates = body.get("candidates") or []
if not candidates:
return ""
parts = candidates[0].get("content", {}).get("parts", [])
text_parts = [part.get("text") for part in parts if isinstance(part, dict) and part.get("text")]
return "\n".join(text_parts)
async def _embedding_with_openai(provider: AIProvider, model: AIModel, text: str) -> List[float]:
url = _openai_endpoint(provider, "/embeddings")
payload = {
"model": model.name,
"input": text,
}
async with httpx.AsyncClient(timeout=30.0) as client:
response = await client.post(url, headers=_openai_headers(provider), json=payload)
response.raise_for_status()
body = response.json()
return body["data"][0]["embedding"]
async def _embedding_with_gemini(provider: AIProvider, model: AIModel, text: str) -> List[float]:
model_name = model.name if model.name.startswith("models/") else f"models/{model.name}"
url = _gemini_endpoint(provider, f"{model_name}:embedContent")
payload = {
"model": model_name,
"content": {
"parts": [{"text": text}],
},
}
async with httpx.AsyncClient(timeout=30.0) as client:
response = await client.post(url, json=payload)
response.raise_for_status()
body = response.json()
embedding = body.get("embedding") or {}
return embedding.get("values") or []
async def _rerank_with_openai(
provider: AIProvider,
model: AIModel,
query: str,
documents: Sequence[str],
) -> List[float]:
url = _openai_endpoint(provider, "/rerank")
payload = {
"model": model.name,
"query": query,
"documents": [
{"id": str(idx), "text": content}
for idx, content in enumerate(documents)
],
}
async with httpx.AsyncClient(timeout=30.0) as client:
response = await client.post(url, headers=_openai_headers(provider), json=payload)
response.raise_for_status()
body = response.json()
results = body.get("results") or body.get("data") or []
scores: List[float] = []
for item in results:
try:
scores.append(float(item.get("score", 0.0)))
except (TypeError, ValueError):
scores.append(0.0)
return scores
async def _rerank_with_gemini(
provider: AIProvider,
model: AIModel,
query: str,
documents: Sequence[str],
) -> List[float]:
model_name = model.name if model.name.startswith("models/") else f"models/{model.name}"
url = _gemini_endpoint(provider, f"{model_name}:rankContent")
payload = {
"query": {"text": query},
"documents": [
{"id": str(idx), "content": {"parts": [{"text": content}]}}
for idx, content in enumerate(documents)
],
}
async with httpx.AsyncClient(timeout=30.0) as client:
response = await client.post(url, json=payload)
response.raise_for_status()
body = response.json()
scores: List[float] = []
ranked = body.get("rankedDocuments") or body.get("results") or []
for item in ranked:
raw_score = item.get("relevanceScore") or item.get("score") or item.get("confidenceScore")
try: try:
resp = await client.post(api_url, headers=headers, json=payload) scores.append(float(raw_score))
resp.raise_for_status() except (TypeError, ValueError):
except httpx.HTTPStatusError: scores.append(0.0)
return [] return scores
data = resp.json()
if isinstance(data, dict):
results = data.get("results")
if isinstance(results, list):
scores = []
for item in results:
if isinstance(item, dict) and "score" in item:
try:
scores.append(float(item["score"]))
except (TypeError, ValueError):
scores.append(0.0)
return scores
return []

347
services/ai_providers.py Normal file
View File

@@ -0,0 +1,347 @@
from __future__ import annotations
from collections.abc import Iterable
from typing import Any, Dict, List, Optional, Tuple
import httpx
from tortoise.exceptions import DoesNotExist
from tortoise.transactions import in_transaction
from models.database import AIDefaultModel, AIModel, AIProvider
ABILITIES = ["chat", "vision", "embedding", "rerank", "voice", "tools"]
OPENAI_EMBEDDING_DIMS = {
"text-embedding-3-large": 3072,
"text-embedding-3-small": 1536,
"text-embedding-ada-002": 1536,
}
def _normalize_embedding_dim(value: Any) -> Optional[int]:
if value is None:
return None
try:
casted = int(value)
except (TypeError, ValueError):
return None
return casted if casted > 0 else None
def _apply_embedding_dim_to_metadata(
data: Dict[str, Any],
embedding_dim: Optional[int],
base_metadata: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
source = base_metadata if isinstance(base_metadata, dict) else {}
metadata: Dict[str, Any] = dict(source)
override = data.get("metadata")
if isinstance(override, dict) and override:
metadata.update(override)
if embedding_dim is None:
metadata.pop("embedding_dimensions", None)
else:
metadata["embedding_dimensions"] = embedding_dim
data["metadata"] = metadata or None
return data
def normalize_capabilities(items: Optional[Iterable[str]]) -> List[str]:
if not items:
return []
normalized = []
for cap in items:
key = str(cap).strip().lower()
if key in ABILITIES and key not in normalized:
normalized.append(key)
return normalized
def infer_openai_capabilities(model_id: str) -> Tuple[List[str], Optional[int]]:
lower = model_id.lower()
caps = set()
if any(keyword in lower for keyword in ["gpt", "chat", "turbo", "o1", "sonnet", "haiku", "thinking"]):
caps.update({"chat", "tools"})
if any(keyword in lower for keyword in ["vision", "gpt-4o", "gpt-4.1", "o1", "vision-preview", "omni"]):
caps.add("vision")
if any(keyword in lower for keyword in ["embed", "embedding"]):
caps.add("embedding")
if "rerank" in lower or "re-rank" in lower:
caps.add("rerank")
if any(keyword in lower for keyword in ["tts", "speech", "audio"]):
caps.add("voice")
embedding_dim = OPENAI_EMBEDDING_DIMS.get(model_id)
return normalize_capabilities(caps), embedding_dim
def infer_gemini_capabilities(methods: Iterable[str]) -> List[str]:
caps = set()
for method in methods:
m = method.lower()
if m in {"generatecontent", "counttokens"}:
caps.update({"chat", "tools", "vision"})
if m == "embedcontent":
caps.add("embedding")
if m in {"generatespeech", "audiogeneration"}:
caps.add("voice")
if m == "rerank":
caps.add("rerank")
return normalize_capabilities(caps)
def serialize_provider(provider: AIProvider) -> Dict[str, Any]:
return {
"id": provider.id,
"name": provider.name,
"identifier": provider.identifier,
"provider_type": provider.provider_type,
"api_format": provider.api_format,
"base_url": provider.base_url,
"api_key": provider.api_key,
"logo_url": provider.logo_url,
"extra_config": provider.extra_config or {},
"created_at": provider.created_at,
"updated_at": provider.updated_at,
}
def model_to_dict(model: AIModel, provider: Optional[AIProvider] = None) -> Dict[str, Any]:
provider_obj = provider or getattr(model, "provider", None)
provider_data = serialize_provider(provider_obj) if provider_obj else None
return {
"id": model.id,
"provider_id": model.provider_id,
"name": model.name,
"display_name": model.display_name,
"description": model.description,
"capabilities": normalize_capabilities(model.capabilities),
"context_window": model.context_window,
"embedding_dimensions": model.embedding_dimensions,
"metadata": model.metadata or {},
"created_at": model.created_at,
"updated_at": model.updated_at,
"provider": provider_data,
}
def provider_to_dict(provider: AIProvider, models: Optional[List[AIModel]] = None) -> Dict[str, Any]:
data = serialize_provider(provider)
if models is not None:
data["models"] = [model_to_dict(m, provider=provider) for m in models]
return data
class AIProviderService:
async def list_providers(self) -> List[Dict[str, Any]]:
providers = await AIProvider.all().order_by("id").prefetch_related("models")
return [provider_to_dict(p, models=list(p.models)) for p in providers]
async def get_provider(self, provider_id: int, with_models: bool = False) -> Dict[str, Any]:
if with_models:
provider = await AIProvider.get(id=provider_id)
models = await provider.models.all()
return provider_to_dict(provider, models=models)
else:
provider = await AIProvider.get(id=provider_id)
return provider_to_dict(provider)
async def create_provider(self, payload: Dict[str, Any]) -> Dict[str, Any]:
data = payload.copy()
data.setdefault("extra_config", {})
provider = await AIProvider.create(**data)
return provider_to_dict(provider)
async def update_provider(self, provider_id: int, payload: Dict[str, Any]) -> Dict[str, Any]:
provider = await AIProvider.get(id=provider_id)
for field, value in payload.items():
setattr(provider, field, value)
await provider.save()
return provider_to_dict(provider)
async def delete_provider(self, provider_id: int) -> None:
await AIProvider.filter(id=provider_id).delete()
async def list_models(self, provider_id: int) -> List[Dict[str, Any]]:
models = await AIModel.filter(provider_id=provider_id).order_by("id").prefetch_related("provider")
return [model_to_dict(m) for m in models]
async def create_model(self, provider_id: int, payload: Dict[str, Any]) -> Dict[str, Any]:
data = payload.copy()
data["provider_id"] = provider_id
data["capabilities"] = normalize_capabilities(data.get("capabilities"))
embedding_dim = _normalize_embedding_dim(data.pop("embedding_dimensions", None))
data = _apply_embedding_dim_to_metadata(data, embedding_dim)
model = await AIModel.create(**data)
await model.fetch_related("provider")
return model_to_dict(model)
async def update_model(self, model_id: int, payload: Dict[str, Any]) -> Dict[str, Any]:
model = await AIModel.get(id=model_id)
data = payload.copy()
if "capabilities" in data:
data["capabilities"] = normalize_capabilities(data.get("capabilities"))
embedding_dim = None
if "embedding_dimensions" in data:
embedding_dim = _normalize_embedding_dim(data.pop("embedding_dimensions", None))
_apply_embedding_dim_to_metadata(data, embedding_dim, base_metadata=model.metadata)
for field, value in data.items():
setattr(model, field, value)
if embedding_dim is not None or ("embedding_dimensions" in payload and embedding_dim is None):
model.embedding_dimensions = embedding_dim
await model.save()
await model.fetch_related("provider")
return model_to_dict(model)
async def delete_model(self, model_id: int) -> None:
await AIModel.filter(id=model_id).delete()
async def fetch_remote_models(self, provider_id: int) -> List[Dict[str, Any]]:
provider = await AIProvider.get(id=provider_id)
return await self._get_remote_models(provider)
async def _get_remote_models(self, provider: AIProvider) -> List[Dict[str, Any]]:
if not provider.base_url:
raise ValueError("Provider base_url is required for syncing models")
fmt = (provider.api_format or "").lower()
if fmt not in {"openai", "gemini"}:
raise ValueError(f"Unsupported api_format '{provider.api_format}' for syncing models")
if fmt == "openai":
return await self._fetch_openai_models(provider)
return await self._fetch_gemini_models(provider)
async def sync_models(self, provider_id: int) -> Dict[str, int]:
provider = await AIProvider.get(id=provider_id)
remote_models = await self._get_remote_models(provider)
created = 0
updated = 0
for entry in remote_models:
defaults = entry.copy()
model_id = defaults.pop("name")
defaults["capabilities"] = normalize_capabilities(defaults.get("capabilities"))
embedding_dim = _normalize_embedding_dim(defaults.pop("embedding_dimensions", None))
defaults = _apply_embedding_dim_to_metadata(defaults, embedding_dim)
obj, is_created = await AIModel.get_or_create(
provider_id=provider.id,
name=model_id,
defaults=defaults,
)
if is_created:
created += 1
continue
for field, value in defaults.items():
setattr(obj, field, value)
if embedding_dim is not None or ("embedding_dimensions" in entry and embedding_dim is None):
obj.embedding_dimensions = embedding_dim
await obj.save()
updated += 1
return {"created": created, "updated": updated}
async def get_default_models(self) -> Dict[str, Optional[Dict[str, Any]]]:
defaults = await AIDefaultModel.all().prefetch_related("model__provider")
result: Dict[str, Optional[Dict[str, Any]]] = {ability: None for ability in ABILITIES}
for item in defaults:
result[item.ability] = model_to_dict(item.model, provider=item.model.provider) # type: ignore[attr-defined]
return result
async def set_default_models(self, mapping: Dict[str, Optional[int]]) -> Dict[str, Optional[Dict[str, Any]]]:
normalized = {ability: mapping.get(ability) for ability in ABILITIES}
async with in_transaction() as connection:
for ability, model_id in normalized.items():
record = await AIDefaultModel.get_or_none(ability=ability)
if model_id:
try:
model = await AIModel.get(id=model_id)
except DoesNotExist:
raise ValueError(f"Model {model_id} not found")
if record:
record.model_id = model_id
await record.save(using_db=connection)
else:
await AIDefaultModel.create(ability=ability, model_id=model_id)
elif record:
await record.delete(using_db=connection)
return await self.get_default_models()
async def get_default_model(self, ability: str) -> Optional[AIModel]:
ability_key = ability.lower()
if ability_key not in ABILITIES:
return None
record = await AIDefaultModel.get_or_none(ability=ability_key)
if not record:
return None
model = await AIModel.get_or_none(id=record.model_id)
if model:
await model.fetch_related("provider")
return model
async def _fetch_openai_models(self, provider: AIProvider) -> List[Dict[str, Any]]:
base_url = provider.base_url.rstrip("/")
url = f"{base_url}/models"
headers = {}
if provider.api_key:
headers["Authorization"] = f"Bearer {provider.api_key}"
async with httpx.AsyncClient(timeout=30.0) as client:
response = await client.get(url, headers=headers)
response.raise_for_status()
payload = response.json()
data = payload.get("data", [])
entries: List[Dict[str, Any]] = []
for item in data:
model_id = item.get("id")
if not model_id:
continue
capabilities, embedding_dim = infer_openai_capabilities(model_id)
entries.append({
"name": model_id,
"display_name": item.get("display_name"),
"description": item.get("description"),
"capabilities": capabilities,
"context_window": item.get("context_window"),
"embedding_dimensions": embedding_dim,
"metadata": item,
})
return entries
async def _fetch_gemini_models(self, provider: AIProvider) -> List[Dict[str, Any]]:
base_url = provider.base_url.rstrip("/")
suffix = "/models"
if provider.api_key:
suffix += f"?key={provider.api_key}"
url = f"{base_url}{suffix}"
async with httpx.AsyncClient(timeout=30.0) as client:
response = await client.get(url)
response.raise_for_status()
payload = response.json()
data = payload.get("models", [])
entries: List[Dict[str, Any]] = []
for item in data:
model_id = item.get("name")
if not model_id:
continue
methods = item.get("supportedGenerationMethods") or []
capabilities = infer_gemini_capabilities(methods)
entries.append({
"name": model_id,
"display_name": item.get("displayName"),
"description": item.get("description"),
"capabilities": capabilities,
"context_window": item.get("inputTokenLimit"),
"embedding_dimensions": item.get("embeddingDimensions"),
"metadata": item,
})
return entries

View File

@@ -5,15 +5,11 @@ import mimetypes
import os import os
from io import BytesIO from io import BytesIO
from services.ai import describe_image_base64, get_text_embedding from services.ai import describe_image_base64, get_text_embedding, provider_service
from services.vector_db import VectorDBService, DEFAULT_VECTOR_DIMENSION from services.vector_db import VectorDBService, DEFAULT_VECTOR_DIMENSION
from services.logging import LogService from services.logging import LogService
from services.config import ConfigCenter from PIL import Image
try: # Pillow is optional but bundled with the project dependencies
from PIL import Image
except ImportError: # pragma: no cover - fallback when pillow missing
Image = None
CHUNK_SIZE = 800 CHUNK_SIZE = 800
@@ -150,13 +146,15 @@ class VectorIndexProcessor:
file_ext = path.split('.')[-1].lower() file_ext = path.split('.')[-1].lower()
details: Dict[str, Any] = {"path": path, "action": "create", "index_type": "vector"} details: Dict[str, Any] = {"path": path, "action": "create", "index_type": "vector"}
raw_dim = await ConfigCenter.get('AI_EMBED_DIM', DEFAULT_VECTOR_DIMENSION) embedding_model = await provider_service.get_default_model("embedding")
try: vector_dim = DEFAULT_VECTOR_DIMENSION
vector_dim = int(raw_dim) if embedding_model and getattr(embedding_model, "embedding_dimensions", None):
except (TypeError, ValueError): try:
vector_dim = DEFAULT_VECTOR_DIMENSION vector_dim = int(embedding_model.embedding_dimensions)
if vector_dim <= 0: except (TypeError, ValueError):
vector_dim = DEFAULT_VECTOR_DIMENSION vector_dim = DEFAULT_VECTOR_DIMENSION
if vector_dim <= 0:
vector_dim = DEFAULT_VECTOR_DIMENSION
await vector_db.ensure_collection(collection_name, vector=True, dim=vector_dim) await vector_db.ensure_collection(collection_name, vector=True, dim=vector_dim)
await vector_db.delete_vector(collection_name, path) await vector_db.delete_vector(collection_name, path)

View File

@@ -0,0 +1 @@
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24" width="1em" xmlns="http://www.w3.org/2000/svg"><title>Claude</title><path d="M4.709 15.955l4.72-2.647.08-.23-.08-.128H9.2l-.79-.048-2.698-.073-2.339-.097-2.266-.122-.571-.121L0 11.784l.055-.352.48-.321.686.06 1.52.103 2.278.158 1.652.097 2.449.255h.389l.055-.157-.134-.098-.103-.097-2.358-1.596-2.552-1.688-1.336-.972-.724-.491-.364-.462-.158-1.008.656-.722.881.06.225.061.893.686 1.908 1.476 2.491 1.833.365.304.145-.103.019-.073-.164-.274-1.355-2.446-1.446-2.49-.644-1.032-.17-.619a2.97 2.97 0 01-.104-.729L6.283.134 6.696 0l.996.134.42.364.62 1.414 1.002 2.229 1.555 3.03.456.898.243.832.091.255h.158V9.01l.128-1.706.237-2.095.23-2.695.08-.76.376-.91.747-.492.584.28.48.685-.067.444-.286 1.851-.559 2.903-.364 1.942h.212l.243-.242.985-1.306 1.652-2.064.73-.82.85-.904.547-.431h1.033l.76 1.129-.34 1.166-1.064 1.347-.881 1.142-1.264 1.7-.79 1.36.073.11.188-.02 2.856-.606 1.543-.28 1.841-.315.833.388.091.395-.328.807-1.969.486-2.309.462-3.439.813-.042.03.049.061 1.549.146.662.036h1.622l3.02.225.79.522.474.638-.079.485-1.215.62-1.64-.389-3.829-.91-1.312-.329h-.182v.11l1.093 1.068 2.006 1.81 2.509 2.33.127.578-.322.455-.34-.049-2.205-1.657-.851-.747-1.926-1.62h-.128v.17l.444.649 2.345 3.521.122 1.08-.17.353-.608.213-.668-.122-1.374-1.925-1.415-2.167-1.143-1.943-.14.08-.674 7.254-.316.37-.729.28-.607-.461-.322-.747.322-1.476.389-1.924.315-1.53.286-1.9.17-.632-.012-.042-.14.018-1.434 1.967-2.18 2.945-1.726 1.845-.414.164-.717-.37.067-.662.401-.589 2.388-3.036 1.44-1.882.93-1.086-.006-.158h-.055L4.132 18.56l-1.13.146-.487-.456.061-.746.231-.243 1.908-1.312-.006.006z" fill="#D97757" fill-rule="nonzero"></path></svg>

After

Width:  |  Height:  |  Size: 1.7 KiB

View File

@@ -0,0 +1 @@
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24" width="1em" xmlns="http://www.w3.org/2000/svg"><title>DeepSeek</title><path d="M23.748 4.482c-.254-.124-.364.113-.512.234-.051.039-.094.09-.137.136-.372.397-.806.657-1.373.626-.829-.046-1.537.214-2.163.848-.133-.782-.575-1.248-1.247-1.548-.352-.156-.708-.311-.955-.65-.172-.241-.219-.51-.305-.774-.055-.16-.11-.323-.293-.35-.2-.031-.278.136-.356.276-.313.572-.434 1.202-.422 1.84.027 1.436.633 2.58 1.838 3.393.137.093.172.187.129.323-.082.28-.18.552-.266.833-.055.179-.137.217-.329.14a5.526 5.526 0 01-1.736-1.18c-.857-.828-1.631-1.742-2.597-2.458a11.365 11.365 0 00-.689-.471c-.985-.957.13-1.743.388-1.836.27-.098.093-.432-.779-.428-.872.004-1.67.295-2.687.684a3.055 3.055 0 01-.465.137 9.597 9.597 0 00-2.883-.102c-1.885.21-3.39 1.102-4.497 2.623C.082 8.606-.231 10.684.152 12.85c.403 2.284 1.569 4.175 3.36 5.653 1.858 1.533 3.997 2.284 6.438 2.14 1.482-.085 3.133-.284 4.994-1.86.47.234.962.327 1.78.397.63.059 1.236-.03 1.705-.128.735-.156.684-.837.419-.961-2.155-1.004-1.682-.595-2.113-.926 1.096-1.296 2.746-2.642 3.392-7.003.05-.347.007-.565 0-.845-.004-.17.035-.237.23-.256a4.173 4.173 0 001.545-.475c1.396-.763 1.96-2.015 2.093-3.517.02-.23-.004-.467-.247-.588zM11.581 18c-2.089-1.642-3.102-2.183-3.52-2.16-.392.024-.321.471-.235.763.09.288.207.486.371.739.114.167.192.416-.113.603-.673.416-1.842-.14-1.897-.167-1.361-.802-2.5-1.86-3.301-3.307-.774-1.393-1.224-2.887-1.298-4.482-.02-.386.093-.522.477-.592a4.696 4.696 0 011.529-.039c2.132.312 3.946 1.265 5.468 2.774.868.86 1.525 1.887 2.202 2.891.72 1.066 1.494 2.082 2.48 2.914.348.292.625.514.891.677-.802.09-2.14.11-3.054-.614zm1-6.44a.306.306 0 01.415-.287.302.302 0 01.2.288.306.306 0 01-.31.307.303.303 0 01-.304-.308zm3.11 1.596c-.2.081-.399.151-.59.16a1.245 1.245 0 01-.798-.254c-.274-.23-.47-.358-.552-.758a1.73 1.73 0 01.016-.588c.07-.327-.008-.537-.239-.727-.187-.156-.426-.199-.688-.199a.559.559 0 01-.254-.078c-.11-.054-.2-.19-.114-.358.028-.054.16-.186.192-.21.356-.202.767-.136 1.146.016.352.144.618.408 1.001.782.391.451.462.576.685.914.176.265.336.537.445.848.067.195-.019.354-.25.452z" fill="#4D6BFE"></path></svg>

After

Width:  |  Height:  |  Size: 2.1 KiB

View File

@@ -0,0 +1 @@
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24" width="1em" xmlns="http://www.w3.org/2000/svg"><title>Gemini</title><path d="M20.616 10.835a14.147 14.147 0 01-4.45-3.001 14.111 14.111 0 01-3.678-6.452.503.503 0 00-.975 0 14.134 14.134 0 01-3.679 6.452 14.155 14.155 0 01-4.45 3.001c-.65.28-1.318.505-2.002.678a.502.502 0 000 .975c.684.172 1.35.397 2.002.677a14.147 14.147 0 014.45 3.001 14.112 14.112 0 013.679 6.453.502.502 0 00.975 0c.172-.685.397-1.351.677-2.003a14.145 14.145 0 013.001-4.45 14.113 14.113 0 016.453-3.678.503.503 0 000-.975 13.245 13.245 0 01-2.003-.678z" fill="#3186FF"></path><path d="M20.616 10.835a14.147 14.147 0 01-4.45-3.001 14.111 14.111 0 01-3.678-6.452.503.503 0 00-.975 0 14.134 14.134 0 01-3.679 6.452 14.155 14.155 0 01-4.45 3.001c-.65.28-1.318.505-2.002.678a.502.502 0 000 .975c.684.172 1.35.397 2.002.677a14.147 14.147 0 014.45 3.001 14.112 14.112 0 013.679 6.453.502.502 0 00.975 0c.172-.685.397-1.351.677-2.003a14.145 14.145 0 013.001-4.45 14.113 14.113 0 016.453-3.678.503.503 0 000-.975 13.245 13.245 0 01-2.003-.678z" fill="url(#lobe-icons-gemini-fill-0)"></path><path d="M20.616 10.835a14.147 14.147 0 01-4.45-3.001 14.111 14.111 0 01-3.678-6.452.503.503 0 00-.975 0 14.134 14.134 0 01-3.679 6.452 14.155 14.155 0 01-4.45 3.001c-.65.28-1.318.505-2.002.678a.502.502 0 000 .975c.684.172 1.35.397 2.002.677a14.147 14.147 0 014.45 3.001 14.112 14.112 0 013.679 6.453.502.502 0 00.975 0c.172-.685.397-1.351.677-2.003a14.145 14.145 0 013.001-4.45 14.113 14.113 0 016.453-3.678.503.503 0 000-.975 13.245 13.245 0 01-2.003-.678z" fill="url(#lobe-icons-gemini-fill-1)"></path><path d="M20.616 10.835a14.147 14.147 0 01-4.45-3.001 14.111 14.111 0 01-3.678-6.452.503.503 0 00-.975 0 14.134 14.134 0 01-3.679 6.452 14.155 14.155 0 01-4.45 3.001c-.65.28-1.318.505-2.002.678a.502.502 0 000 .975c.684.172 1.35.397 2.002.677a14.147 14.147 0 014.45 3.001 14.112 14.112 0 013.679 6.453.502.502 0 00.975 0c.172-.685.397-1.351.677-2.003a14.145 14.145 0 013.001-4.45 14.113 14.113 0 016.453-3.678.503.503 0 000-.975 13.245 13.245 0 01-2.003-.678z" fill="url(#lobe-icons-gemini-fill-2)"></path><defs><linearGradient gradientUnits="userSpaceOnUse" id="lobe-icons-gemini-fill-0" x1="7" x2="11" y1="15.5" y2="12"><stop stop-color="#08B962"></stop><stop offset="1" stop-color="#08B962" stop-opacity="0"></stop></linearGradient><linearGradient gradientUnits="userSpaceOnUse" id="lobe-icons-gemini-fill-1" x1="8" x2="11.5" y1="5.5" y2="11"><stop stop-color="#F94543"></stop><stop offset="1" stop-color="#F94543" stop-opacity="0"></stop></linearGradient><linearGradient gradientUnits="userSpaceOnUse" id="lobe-icons-gemini-fill-2" x1="3.5" x2="17.5" y1="13.5" y2="12"><stop stop-color="#FABC12"></stop><stop offset=".46" stop-color="#FABC12" stop-opacity="0"></stop></linearGradient></defs></svg>

After

Width:  |  Height:  |  Size: 2.8 KiB

View File

@@ -0,0 +1 @@
<svg fill="currentColor" fill-rule="evenodd" height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24" width="1em" xmlns="http://www.w3.org/2000/svg"><title>OpenAI</title><path d="M21.55 10.004a5.416 5.416 0 00-.478-4.501c-1.217-2.09-3.662-3.166-6.05-2.66A5.59 5.59 0 0010.831 1C8.39.995 6.224 2.546 5.473 4.838A5.553 5.553 0 001.76 7.496a5.487 5.487 0 00.691 6.5 5.416 5.416 0 00.477 4.502c1.217 2.09 3.662 3.165 6.05 2.66A5.586 5.586 0 0013.168 23c2.443.006 4.61-1.546 5.361-3.84a5.553 5.553 0 003.715-2.66 5.488 5.488 0 00-.693-6.497v.001zm-8.381 11.558a4.199 4.199 0 01-2.675-.954c.034-.018.093-.05.132-.074l4.44-2.53a.71.71 0 00.364-.623v-6.176l1.877 1.069c.02.01.033.029.036.05v5.115c-.003 2.274-1.87 4.118-4.174 4.123zM4.192 17.78a4.059 4.059 0 01-.498-2.763c.032.02.09.055.131.078l4.44 2.53c.225.13.504.13.73 0l5.42-3.088v2.138a.068.068 0 01-.027.057L9.9 19.288c-1.999 1.136-4.552.46-5.707-1.51h-.001zM3.023 8.216A4.15 4.15 0 015.198 6.41l-.002.151v5.06a.711.711 0 00.364.624l5.42 3.087-1.876 1.07a.067.067 0 01-.063.005l-4.489-2.559c-1.995-1.14-2.679-3.658-1.53-5.63h.001zm15.417 3.54l-5.42-3.088L14.896 7.6a.067.067 0 01.063-.006l4.489 2.557c1.998 1.14 2.683 3.662 1.529 5.633a4.163 4.163 0 01-2.174 1.807V12.38a.71.71 0 00-.363-.623zm1.867-2.773a6.04 6.04 0 00-.132-.078l-4.44-2.53a.731.731 0 00-.729 0l-5.42 3.088V7.325a.068.068 0 01.027-.057L14.1 4.713c2-1.137 4.555-.46 5.707 1.513.487.833.664 1.809.499 2.757h.001zm-11.741 3.81l-1.877-1.068a.065.065 0 01-.036-.051V6.559c.001-2.277 1.873-4.122 4.181-4.12.976 0 1.92.338 2.671.954-.034.018-.092.05-.131.073l-4.44 2.53a.71.71 0 00-.365.623l-.003 6.173v.002zm1.02-2.168L12 9.25l2.414 1.375v2.75L12 14.75l-2.415-1.375v-2.75z"></path></svg>

After

Width:  |  Height:  |  Size: 1.7 KiB

View File

@@ -0,0 +1 @@
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24" width="1em" xmlns="http://www.w3.org/2000/svg"><title>SiliconCloud</title><path clip-rule="evenodd" d="M22.956 6.521H12.522c-.577 0-1.044.468-1.044 1.044v3.13c0 .577-.466 1.044-1.043 1.044H1.044c-.577 0-1.044.467-1.044 1.044v4.174C0 17.533.467 18 1.044 18h10.434c.577 0 1.044-.467 1.044-1.043v-3.13c0-.578.466-1.044 1.043-1.044h9.391c.577 0 1.044-.467 1.044-1.044V7.565c0-.576-.467-1.044-1.044-1.044z" fill="#6E29F6" fill-rule="evenodd"></path></svg>

After

Width:  |  Height:  |  Size: 520 B

View File

@@ -0,0 +1,89 @@
import request from './client';
export type AIAbility = 'chat' | 'vision' | 'embedding' | 'rerank' | 'voice' | 'tools';
export interface AIProviderPayload {
name: string;
identifier: string;
provider_type?: string | null;
api_format: 'openai' | 'gemini';
base_url?: string | null;
api_key?: string | null;
logo_url?: string | null;
extra_config?: Record<string, unknown> | null;
}
export interface AIProvider extends Omit<AIProviderPayload, 'extra_config'> {
id: number;
extra_config: Record<string, unknown>;
created_at: string;
updated_at: string;
models?: AIModel[];
}
export interface AIModelPayload {
name: string;
display_name?: string | null;
description?: string | null;
capabilities?: AIAbility[];
context_window?: number | null;
embedding_dimensions?: number | null;
metadata?: Record<string, unknown> | null;
}
export interface AIModel extends Omit<AIModelPayload, 'metadata'> {
id: number;
provider_id: number;
metadata: Record<string, unknown>;
created_at: string;
updated_at: string;
provider?: AIProvider;
}
export type AIDefaultAssignments = Partial<Record<AIAbility, number | null>>;
export type AIDefaultModels = Partial<Record<AIAbility, AIModel | null>>;
export async function fetchProviders() {
const data = await request<{ providers: AIProvider[] }>('/ai/providers');
return data.providers;
}
export async function createProvider(payload: AIProviderPayload) {
return request<AIProvider>('/ai/providers', { method: 'POST', json: payload });
}
export async function updateProvider(id: number, payload: Partial<AIProviderPayload>) {
return request<AIProvider>(`/ai/providers/${id}`, { method: 'PUT', json: payload });
}
export async function deleteProvider(id: number) {
await request(`/ai/providers/${id}`, { method: 'DELETE' });
}
export async function syncProviderModels(id: number) {
return request<{ created: number; updated: number }>(`/ai/providers/${id}/sync-models`, { method: 'POST' });
}
export async function fetchRemoteModels(providerId: number) {
return request<{ models: AIModelPayload[] }>(`/ai/providers/${providerId}/remote-models`);
}
export async function createModel(providerId: number, payload: AIModelPayload) {
return request<AIModel>(`/ai/providers/${providerId}/models`, { method: 'POST', json: payload });
}
export async function updateModel(modelId: number, payload: Partial<AIModelPayload>) {
return request<AIModel>(`/ai/models/${modelId}`, { method: 'PUT', json: payload });
}
export async function deleteModel(modelId: number) {
await request(`/ai/models/${modelId}`, { method: 'DELETE' });
}
export async function fetchDefaults() {
return request<AIDefaultModels>('/ai/defaults');
}
export async function updateDefaults(payload: AIDefaultAssignments) {
return request<AIDefaultModels>('/ai/defaults', { method: 'PUT', json: payload });
}

View File

@@ -307,6 +307,89 @@ export const en = {
'Vision API Key': 'Vision API Key', 'Vision API Key': 'Vision API Key',
'Embedding API URL': 'Embedding API URL', 'Embedding API URL': 'Embedding API URL',
'Embedding API Key': 'Embedding API Key', 'Embedding API Key': 'Embedding API Key',
'AI Providers & Models': 'AI Providers & Models',
'Manage AI providers, synchronize compatible models, and configure default capabilities across the system.': 'Manage AI providers, synchronize compatible models, and configure default capabilities across the system.',
'Add Provider': 'Add Provider',
'Edit Provider': 'Edit Provider',
'Pull Models': 'Pull Models',
'Manual Add': 'Manual Add',
'Clear Remote List': 'Clear Remote List',
'Select models from the list to add them automatically': 'Select models from the list to add them automatically',
'No remote models': 'No remote models',
'No remote models found': 'No remote models found',
'No remote models match search': 'No remote models match search',
'Search fetched models': 'Search fetched models',
'Already Added': 'Already Added',
'Add Selected Models': 'Add Selected Models',
'Fetch failed': 'Fetch failed',
'Select models to add': 'Select models to add',
'Added {count} models': 'Added {count} models',
'Choose Template': 'Choose Template',
'Configure Provider': 'Configure Provider',
'Back to Templates': 'Back to Templates',
'View Docs': 'View Docs',
'Custom Provider': 'Custom Provider',
'Custom Provider Description': 'Bring your own endpoint compatible with OpenAI or Gemini formats.',
'OpenAI Provider': 'OpenAI',
'OpenAI Provider Description': 'Access GPT-4o, GPT-4.1, GPT-3.5 and more models from OpenAI.',
'Azure OpenAI Provider': 'Azure OpenAI',
'Azure OpenAI Provider Description': 'Use OpenAI models deployed on Microsoft Azure.',
'Google AI Provider': 'Google AI',
'Google AI Provider Description': 'Gemini series models served via the Google AI platform.',
'SiliconFlow Provider': 'SiliconFlow',
'SiliconFlow Provider Description': 'High-performance inference platform with OpenAI-compatible APIs.',
'OpenRouter Provider': 'OpenRouter',
'OpenRouter Provider Description': 'Connect to multiple AI providers through a single OpenAI-style endpoint.',
'Anthropic Provider': 'Anthropic',
'Anthropic Provider Description': 'Claude 3 family models exposed through the Claude API.',
'DeepSeek Provider': 'DeepSeek',
'DeepSeek Provider Description': 'DeepSeek language models via OpenAI-compatible API.',
'Grok Provider': 'Grok (xAI)',
'Grok Provider Description': 'Grok models powered by xAI with OpenAI-style routes.',
'Ollama Provider': 'Ollama',
'Ollama Provider Description': 'Self-host and run models locally with Ollama\'s OpenAI bridge.',
'Voyage Provider': 'Voyage AI',
'Voyage Provider Description': 'High-quality embeddings and rerankers from Voyage AI.',
'Delete provider?': 'Delete provider?',
'Deleting this provider will also remove all associated models. Continue?': 'Deleting this provider will also remove all associated models. Continue?',
'Deleted successfully': 'Deleted successfully',
'Sync Models': 'Sync Models',
'Sync completed: {created} created, {updated} updated': 'Sync completed: {created} created, {updated} updated',
'Sync failed': 'Sync failed',
'Add Model': 'Add Model',
'Edit Model': 'Edit Model',
'Delete model?': 'Delete model?',
'This operation cannot be undone. Continue?': 'This operation cannot be undone. Continue?',
'No models yet': 'No models yet',
'Add your first AI provider to get started': 'Add your first AI provider to get started',
'Default Models Configuration': 'Default Models Configuration',
'Main Chat Model': 'Main Chat Model',
'Primary assistant for conversations, reasoning, and tool calls.': 'Primary assistant for conversations, reasoning, and tool calls.',
'Handles multimodal perception such as image understanding.': 'Handles multimodal perception such as image understanding.',
'Transforms content into dense vectors for search and retrieval.': 'Transforms content into dense vectors for search and retrieval.',
'Optimises ranking quality for search candidates.': 'Optimises ranking quality for search candidates.',
'Covers text-to-speech and speech understanding scenarios.': 'Covers text-to-speech and speech understanding scenarios.',
'Supports function calling, orchestration, and automation.': 'Supports function calling, orchestration, and automation.',
'Select a model': 'Select a model',
'Template': 'Template',
'Select a template': 'Select a template',
'Display Name': 'Display Name',
'Enter name': 'Enter name',
'Identifier': 'Identifier',
'Enter identifier': 'Enter identifier',
'Only lowercase letters, numbers, dash, dot and underscore are allowed': 'Only lowercase letters, numbers, dash, dot and underscore are allowed',
'API Format': 'API Format',
'Base URL': 'Base URL',
'Enter base url': 'Enter base URL',
'Optional, can also be provided per request': 'Optional, can also be provided per request',
'Model Identifier': 'Model Identifier',
'Enter model identifier': 'Enter model identifier',
'Description': 'Description',
'Capabilities': 'Capabilities',
'Context Window': 'Context Window',
'Embedding Dimensions': 'Embedding Dimensions',
'Price /1K input tokens': 'Price /1K input tokens',
'Price /1K output tokens': 'Price /1K output tokens',
// Adapters // Adapters
'Missing required config:': 'Missing required config:', 'Missing required config:': 'Missing required config:',

View File

@@ -259,6 +259,10 @@ export const zh = {
'Save': '保存', 'Save': '保存',
'App Settings': '应用设置', 'App Settings': '应用设置',
'AI Settings': 'AI设置', 'AI Settings': 'AI设置',
'Choose Template': '选择模板',
'Configure Provider': '配置提供商',
'Back to Templates': '返回选择',
'View Docs': '查看文档',
'Vision Model': '视觉模型', 'Vision Model': '视觉模型',
'Embedding Model': '嵌入模型', 'Embedding Model': '嵌入模型',
'Embedding Dimension': '向量维度', 'Embedding Dimension': '向量维度',
@@ -308,6 +312,85 @@ export const zh = {
'Vision API Key': '视觉模型 API Key', 'Vision API Key': '视觉模型 API Key',
'Embedding API URL': '嵌入模型 API 地址', 'Embedding API URL': '嵌入模型 API 地址',
'Embedding API Key': '嵌入模型 API Key', 'Embedding API Key': '嵌入模型 API Key',
'AI Providers & Models': 'AI 提供商与模型',
'Manage AI providers, synchronize compatible models, and configure default capabilities across the system.': '管理所有 AI 提供商,批量同步兼容模型,并配置系统默认能力。',
'Add Provider': '添加提供商',
'Edit Provider': '编辑提供商',
'Pull Models': '拉取模型',
'Manual Add': '手动添加',
'Clear Remote List': '清空列表',
'Select models from the list to add them automatically': '选择模型后可一键添加到系统',
'No remote models': '暂无远程模型',
'No remote models found': '未获取到远程模型',
'No remote models match search': '没有匹配的远程模型',
'Search fetched models': '搜索已拉取模型',
'Already Added': '已添加',
'Add Selected Models': '添加所选模型',
'Fetch failed': '拉取失败',
'Select models to add': '请选择要添加的模型',
'Added {count} models': '已添加 {count} 个模型',
'Custom Provider': '自定义提供商',
'Custom Provider Description': '自定义兼容 OpenAI 或 Gemini 标准的 API 端点。',
'OpenAI Provider': 'OpenAI',
'OpenAI Provider Description': '访问 OpenAI 的 GPT-4o、GPT-4.1、GPT-3.5 等模型。',
'Azure OpenAI Provider': 'Azure OpenAI',
'Azure OpenAI Provider Description': '使用托管在微软 Azure 上的 OpenAI 模型。',
'Google AI Provider': 'Google AI',
'Google AI Provider Description': 'Google AI 平台提供的 Gemini 系列模型。',
'SiliconFlow Provider': '硅基流动',
'SiliconFlow Provider Description': '硅基流动高性能推理平台,兼容 OpenAI 接口。',
'OpenRouter Provider': 'OpenRouter',
'OpenRouter Provider Description': '通过一个 OpenAI 风格入口接入多家 AI 提供商。',
'Anthropic Provider': 'Anthropic',
'Anthropic Provider Description': '通过 Claude API 使用 Claude 3 系列模型。',
'DeepSeek Provider': 'DeepSeek',
'DeepSeek Provider Description': 'DeepSeek 语言模型,支持 OpenAI 兼容接口。',
'Grok Provider': 'Grok (xAI)',
'Grok Provider Description': 'xAI 的 Grok 模型,提供 OpenAI 风格接口。',
'Ollama Provider': 'Ollama',
'Ollama Provider Description': '使用 Ollama 在本地运行并管理大模型。',
'Voyage Provider': 'Voyage AI',
'Voyage Provider Description': 'Voyage AI 提供的高质量嵌入与重排序模型。',
'Delete provider?': '确认删除该提供商?',
'Deleting this provider will also remove all associated models. Continue?': '删除后将同时移除该提供商下的全部模型,是否继续?',
'Deleted successfully': '删除成功',
'Sync Models': '同步模型',
'Sync completed: {created} created, {updated} updated': '同步完成:新增 {created} 个,更新 {updated} 个',
'Sync failed': '同步失败',
'Add Model': '添加模型',
'Edit Model': '编辑模型',
'Delete model?': '确认删除该模型?',
'This operation cannot be undone. Continue?': '此操作不可撤销,是否继续?',
'No models yet': '暂无模型',
'Add your first AI provider to get started': '添加第一个 AI 提供商开始配置',
'Default Models Configuration': '默认模型配置',
'Main Chat Model': '主对话模型',
'Primary assistant for conversations, reasoning, and tool calls.': '用于对话、推理与工具调用的核心模型。',
'Handles multimodal perception such as image understanding.': '负责多模态感知与图像理解。',
'Transforms content into dense vectors for search and retrieval.': '将内容向量化以驱动搜索与检索。',
'Optimises ranking quality for search candidates.': '重新排序候选结果,提升检索相关性。',
'Covers text-to-speech and speech understanding scenarios.': '覆盖文本转语音与语音理解场景。',
'Supports function calling, orchestration, and automation.': '支持函数调用、编排与自动化。',
'Select a model': '选择模型',
'Template': '模板',
'Select a template': '选择模板',
'Display Name': '显示名称',
'Enter name': '请输入名称',
'Identifier': '标识符',
'Enter identifier': '请输入标识符',
'Only lowercase letters, numbers, dash, dot and underscore are allowed': '仅允许小写字母、数字、连字符、点和下划线',
'API Format': 'API 格式',
'Base URL': '基础 URL',
'Enter base url': '请输入基础 URL',
'Optional, can also be provided per request': '可选,也可在请求时提供',
'Model Identifier': '模型标识',
'Enter model identifier': '请输入模型标识',
'Description': '描述',
'Capabilities': '能力标签',
'Context Window': '上下文窗口',
'Embedding Dimensions': '向量维度',
'Price /1K input tokens': '价格 /1K 输入 token',
'Price /1K output tokens': '价格 /1K 输出 token',
// Adapters // Adapters
'Missing required config:': '缺少必填配置:', 'Missing required config:': '缺少必填配置:',

View File

@@ -1,12 +1,27 @@
import { Form, Input, Button, message, Tabs, Space, Card, Select, Modal, Radio, InputNumber, Spin, Empty, Alert } from 'antd'; import { message, Tabs, Space } from 'antd';
import { useEffect, useState, useCallback } from 'react'; import { useEffect, useState } from 'react';
import PageCard from '../../components/PageCard'; import PageCard from '../../components/PageCard';
import { getAllConfig, setConfig } from '../../api/config'; import { getAllConfig, setConfig } from '../../api/config';
import { vectorDBApi, type VectorDBStats, type VectorDBProviderMeta, type VectorDBCurrentConfig } from '../../api/vectorDB';
import { AppstoreOutlined, RobotOutlined, DatabaseOutlined, SkinOutlined } from '@ant-design/icons'; import { AppstoreOutlined, RobotOutlined, DatabaseOutlined, SkinOutlined } from '@ant-design/icons';
import { useTheme } from '../../contexts/ThemeContext'; import { useTheme } from '../../contexts/ThemeContext';
import '../../styles/settings-tabs.css'; import '../../styles/settings-tabs.css';
import { useI18n } from '../../i18n'; import { useI18n } from '../../i18n';
import AppearanceSettingsTab from './components/AppearanceSettingsTab';
import AppSettingsTab from './components/AppSettingsTab';
import AiSettingsTab from './components/AiSettingsTab';
import VectorDbSettingsTab from './components/VectorDbSettingsTab';
type TabKey = 'appearance' | 'app' | 'ai' | 'vector-db';
const TAB_KEYS: TabKey[] = ['appearance', 'app', 'ai', 'vector-db'];
const DEFAULT_TAB: TabKey = 'appearance';
const isValidTab = (key?: string): key is TabKey => !!key && (TAB_KEYS as string[]).includes(key);
interface SystemSettingsPageProps {
tabKey?: string;
onTabNavigate?: (key: TabKey, options?: { replace?: boolean }) => void;
}
const APP_CONFIG_KEYS: { key: string, label: string, default?: string }[] = [ const APP_CONFIG_KEYS: { key: string, label: string, default?: string }[] = [
{ key: 'APP_NAME', label: 'App Name' }, { key: 'APP_NAME', label: 'App Name' },
@@ -15,57 +30,6 @@ const APP_CONFIG_KEYS: { key: string, label: string, default?: string }[] = [
{ key: 'FILE_DOMAIN', label: 'File Domain' }, { key: 'FILE_DOMAIN', label: 'File Domain' },
]; ];
interface AiConfigKeyBase {
key: string;
default?: string | number;
}
interface AiConfigKeyWithLabel extends AiConfigKeyBase {
label: string;
}
const VISION_CONFIG_KEYS: AiConfigKeyWithLabel[] = [
{ key: 'AI_VISION_API_URL', label: 'Vision API URL' },
{ key: 'AI_VISION_MODEL', label: 'Vision Model', default: 'Qwen/Qwen2.5-VL-32B-Instruct' },
{ key: 'AI_VISION_API_KEY', label: 'Vision API Key' },
];
const DEFAULT_EMBED_DIMENSION = 4096;
const EMBED_DIM_KEY = 'AI_EMBED_DIM';
const EMBED_CONFIG_KEYS: AiConfigKeyWithLabel[] = [
{ key: 'AI_EMBED_API_URL', label: 'Embedding API URL' },
{ key: 'AI_EMBED_MODEL', label: 'Embedding Model', default: 'Qwen/Qwen3-Embedding-8B' },
{ key: 'AI_EMBED_API_KEY', label: 'Embedding API Key' },
];
const RERANK_CONFIG_KEYS: AiConfigKeyWithLabel[] = [
{ key: 'AI_RERANK_API_URL', label: 'Rerank API URL' },
{ key: 'AI_RERANK_MODEL', label: 'Rerank Model' },
{ key: 'AI_RERANK_API_KEY', label: 'Rerank API Key' },
];
const ALL_AI_KEYS: AiConfigKeyBase[] = [
...VISION_CONFIG_KEYS,
...EMBED_CONFIG_KEYS,
...RERANK_CONFIG_KEYS,
{ key: EMBED_DIM_KEY, default: DEFAULT_EMBED_DIMENSION },
];
const formatBytes = (bytes?: number | null) => {
if (bytes === null || bytes === undefined) return '-';
if (bytes === 0) return '0 B';
const units = ['B', 'KB', 'MB', 'GB', 'TB'];
let value = bytes;
let unitIndex = 0;
while (value >= 1024 && unitIndex < units.length - 1) {
value /= 1024;
unitIndex += 1;
}
const precision = value >= 10 || unitIndex === 0 ? 0 : 1;
return `${value.toFixed(precision)} ${units[unitIndex]}`;
};
// Theme related config keys // Theme related config keys
const THEME_KEYS = { const THEME_KEYS = {
MODE: 'THEME_MODE', MODE: 'THEME_MODE',
@@ -75,101 +39,30 @@ const THEME_KEYS = {
CSS: 'THEME_CUSTOM_CSS', CSS: 'THEME_CUSTOM_CSS',
}; };
export default function SystemSettingsPage() { export default function SystemSettingsPage({ tabKey, onTabNavigate }: SystemSettingsPageProps) {
const [vectorConfigForm] = Form.useForm();
const [loading, setLoading] = useState(false); const [loading, setLoading] = useState(false);
const [config, setConfigState] = useState<Record<string, string> | null>(null); const [config, setConfigState] = useState<Record<string, string> | null>(null);
const [activeTab, setActiveTab] = useState('appearance'); const [activeTab, setActiveTab] = useState<TabKey>(() =>
const [vectorStats, setVectorStats] = useState<VectorDBStats | null>(null); isValidTab(tabKey) ? tabKey : DEFAULT_TAB
const [vectorStatsLoading, setVectorStatsLoading] = useState(false); );
const [vectorStatsError, setVectorStatsError] = useState<string | null>(null); const { refreshTheme } = useTheme();
const [vectorProviders, setVectorProviders] = useState<VectorDBProviderMeta[]>([]);
const [vectorConfig, setVectorConfig] = useState<VectorDBCurrentConfig | null>(null);
const [vectorConfigLoading, setVectorConfigLoading] = useState(false);
const [vectorConfigSaving, setVectorConfigSaving] = useState(false);
const [vectorMetaError, setVectorMetaError] = useState<string | null>(null);
const [selectedProviderType, setSelectedProviderType] = useState<string | null>(null);
const { refreshTheme, previewTheme } = useTheme();
const { t } = useI18n(); const { t } = useI18n();
useEffect(() => { useEffect(() => {
getAllConfig().then((data) => setConfigState(data as Record<string, string>)); getAllConfig().then((data) => setConfigState(data as Record<string, string>));
}, []); }, []);
const fetchVectorStats = useCallback(async () => { const handleSave = async (values: Record<string, unknown>) => {
setVectorStatsLoading(true);
setVectorStatsError(null);
try {
const data = await vectorDBApi.getStats();
setVectorStats(data);
} catch (e: any) {
const msg = e?.message || t('Load failed');
setVectorStatsError(msg);
message.error(msg);
} finally {
setVectorStatsLoading(false);
}
}, [t]);
const buildProviderConfigValues = useCallback((provider: VectorDBProviderMeta | undefined, existing?: Record<string, string>) => {
if (!provider) return {};
const values: Record<string, string> = {};
const schema = provider.config_schema || [];
schema.forEach((field) => {
const current = existing && existing[field.key] !== undefined && existing[field.key] !== null
? String(existing[field.key])
: undefined;
if (current !== undefined) {
values[field.key] = current;
} else if (field.default !== undefined && field.default !== null) {
values[field.key] = String(field.default);
} else {
values[field.key] = '';
}
});
return values;
}, []);
const fetchVectorMeta = useCallback(async () => {
setVectorConfigLoading(true);
setVectorMetaError(null);
try {
const [providers, current] = await Promise.all([
vectorDBApi.getProviders(),
vectorDBApi.getConfig(),
]);
setVectorProviders(providers);
setVectorConfig(current);
const enabled = providers.filter((item) => item.enabled);
let nextType: string | null = current?.type ?? null;
if (nextType && !providers.some((item) => item.type === nextType)) {
nextType = null;
}
if (!nextType) {
nextType = enabled[0]?.type ?? providers[0]?.type ?? null;
}
setSelectedProviderType(nextType);
const provider = providers.find((item) => item.type === nextType);
const configValues = buildProviderConfigValues(provider, nextType === current?.type ? current?.config : undefined);
vectorConfigForm.setFieldsValue({ type: nextType || undefined, config: configValues });
} catch (e: any) {
const msg = e?.message || t('Load failed');
setVectorMetaError(msg);
message.error(msg);
} finally {
setVectorConfigLoading(false);
}
}, [buildProviderConfigValues, message, t, vectorConfigForm]);
const handleSave = async (values: any) => {
setLoading(true); setLoading(true);
try { try {
for (const [key, value] of Object.entries(values)) { for (const [key, value] of Object.entries(values)) {
await setConfig(key, String(value ?? '')); await setConfig(key, String(value ?? ''));
} }
message.success(t('Saved successfully')); message.success(t('Saved successfully'));
setConfigState({ ...config, ...values }); const stringValues = Object.fromEntries(
Object.entries(values).map(([key, value]) => [key, String(value ?? '')]),
) as Record<string, string>;
setConfigState((prev) => ({ ...(prev ?? {}), ...stringValues }));
// trigger theme refresh if related keys changed // trigger theme refresh if related keys changed
if (Object.keys(values).some(k => Object.values(THEME_KEYS).includes(k))) { if (Object.keys(values).some(k => Object.values(THEME_KEYS).includes(k))) {
await refreshTheme(); await refreshTheme();
@@ -180,69 +73,31 @@ export default function SystemSettingsPage() {
setLoading(false); setLoading(false);
}; };
const handleProviderChange = useCallback((value: string) => { // 离开“外观设置”时,恢复后端持久化配置(取消未保存的预览)
setSelectedProviderType(value); useEffect(() => {
const provider = vectorProviders.find((item) => item.type === value); if (!isValidTab(tabKey)) {
const existing = value === vectorConfig?.type ? vectorConfig?.config : undefined; setActiveTab((prev) => (prev === DEFAULT_TAB ? prev : DEFAULT_TAB));
const configValues = buildProviderConfigValues(provider, existing); if (tabKey !== DEFAULT_TAB) {
vectorConfigForm.setFieldsValue({ type: value, config: configValues }); onTabNavigate?.(DEFAULT_TAB, { replace: true });
}, [vectorProviders, vectorConfig, buildProviderConfigValues, vectorConfigForm]); }
const handleVectorConfigSave = useCallback(async (values: { type: string; config?: Record<string, string> }) => {
if (!values?.type) {
return; return;
} }
setVectorConfigSaving(true); setActiveTab((prev) => (prev === tabKey ? prev : tabKey));
try { }, [tabKey, onTabNavigate]);
const configPayload = Object.fromEntries(
Object.entries(values.config || {}).filter(([, val]) => val !== undefined && val !== null && String(val).trim() !== '')
.map(([key, val]) => [key, String(val)])
);
const response = await vectorDBApi.updateConfig({ type: values.type, config: configPayload });
setVectorConfig(response.config);
setVectorStats(response.stats);
setVectorStatsError(null);
setSelectedProviderType(response.config.type);
const provider = vectorProviders.find((item) => item.type === response.config.type);
const mergedValues = buildProviderConfigValues(provider, response.config.config);
vectorConfigForm.setFieldsValue({ type: response.config.type, config: mergedValues });
message.success(t('Saved successfully'));
} catch (e: any) {
message.error(e?.message || t('Save failed'));
} finally {
setVectorConfigSaving(false);
}
}, [buildProviderConfigValues, message, t, vectorConfigForm, vectorProviders]);
const vectorSectionLoading = vectorStatsLoading || vectorConfigLoading;
// 离开“外观设置”时,恢复后端持久化配置(取消未保存的预览)
useEffect(() => { useEffect(() => {
if (activeTab !== 'appearance') { if (activeTab !== 'appearance') {
refreshTheme(); refreshTheme();
} }
}, [activeTab]); }, [activeTab, refreshTheme]);
useEffect(() => { const handleTabChange = (key: string) => {
if (activeTab === 'vector-db') { const nextKey: TabKey = isValidTab(key) ? key : DEFAULT_TAB;
if (!vectorProviders.length && !vectorConfigLoading) { if (nextKey !== activeTab) {
fetchVectorMeta(); setActiveTab(nextKey);
}
if (!vectorStats && !vectorStatsLoading) {
fetchVectorStats();
}
} }
}, [ onTabNavigate?.(nextKey);
activeTab, };
fetchVectorMeta,
fetchVectorStats,
vectorProviders.length,
vectorConfigLoading,
vectorStats,
vectorStatsLoading,
]);
const selectedProvider = vectorProviders.find((item) => item.type === selectedProviderType || (!selectedProviderType && item.enabled));
if (!config) { if (!config) {
return <PageCard title={t('System Settings')}><div>{t('Loading...')}</div></PageCard>; return <PageCard title={t('System Settings')}><div>{t('Loading...')}</div></PageCard>;
@@ -256,7 +111,7 @@ export default function SystemSettingsPage() {
<Tabs <Tabs
className="fx-settings-tabs" className="fx-settings-tabs"
activeKey={activeTab} activeKey={activeTab}
onChange={setActiveTab} onChange={handleTabChange}
centered centered
tabPosition="left" tabPosition="left"
items={[ items={[
@@ -269,75 +124,12 @@ export default function SystemSettingsPage() {
</span> </span>
), ),
children: ( children: (
<Form <AppearanceSettingsTab
layout="vertical" config={config}
initialValues={{ loading={loading}
[THEME_KEYS.MODE]: config[THEME_KEYS.MODE] ?? 'light', onSave={handleSave}
[THEME_KEYS.PRIMARY]: config[THEME_KEYS.PRIMARY] ?? '#111111', themeKeys={THEME_KEYS}
[THEME_KEYS.RADIUS]: Number(config[THEME_KEYS.RADIUS] ?? '10'), />
[THEME_KEYS.TOKENS]: config[THEME_KEYS.TOKENS] ?? '',
[THEME_KEYS.CSS]: config[THEME_KEYS.CSS] ?? '',
}}
onValuesChange={(_, all) => {
try {
const tokens = all[THEME_KEYS.TOKENS] ? JSON.parse(all[THEME_KEYS.TOKENS]) : undefined;
previewTheme({
mode: all[THEME_KEYS.MODE],
primaryColor: all[THEME_KEYS.PRIMARY],
borderRadius: typeof all[THEME_KEYS.RADIUS] === 'number' ? all[THEME_KEYS.RADIUS] : undefined,
customTokens: tokens,
customCSS: all[THEME_KEYS.CSS],
});
} catch {
// JSON 不合法时忽略 tokens 预览,其他项仍然生效
previewTheme({
mode: all[THEME_KEYS.MODE],
primaryColor: all[THEME_KEYS.PRIMARY],
borderRadius: typeof all[THEME_KEYS.RADIUS] === 'number' ? all[THEME_KEYS.RADIUS] : undefined,
customCSS: all[THEME_KEYS.CSS],
});
}
}}
onFinish={async (vals) => {
// Validate JSON if provided
if (vals[THEME_KEYS.TOKENS]) {
try { JSON.parse(vals[THEME_KEYS.TOKENS]); }
catch { return message.error(t('Advanced tokens must be valid JSON')); }
}
await handleSave(vals);
}}
style={{ marginTop: 24 }}
key={'appearance-' + JSON.stringify(config)}
>
<Card title={t('Theme')}>
<Form.Item name={THEME_KEYS.MODE} label={t('Theme Mode')}>
<Radio.Group buttonStyle="solid">
<Radio.Button value="light">{t('Light')}</Radio.Button>
<Radio.Button value="dark">{t('Dark')}</Radio.Button>
<Radio.Button value="system">{t('Follow System')}</Radio.Button>
</Radio.Group>
</Form.Item>
<Form.Item name={THEME_KEYS.PRIMARY} label={t('Primary Color')}>
<Input type="color" size="large" />
</Form.Item>
<Form.Item name={THEME_KEYS.RADIUS} label={t('Border Radius')}>
<InputNumber min={0} max={24} style={{ width: '100%' }} />
</Form.Item>
</Card>
<Card title={t('Advanced')} style={{ marginTop: 24 }}>
<Form.Item name={THEME_KEYS.TOKENS} label={t('Override AntD Tokens (JSON)')} tooltip={t('e.g. {"colorText": "#222"}')}>
<Input.TextArea autoSize={{ minRows: 4 }} placeholder='{ "colorText": "#222" }' />
</Form.Item>
<Form.Item name={THEME_KEYS.CSS} label={t('Custom CSS')}>
<Input.TextArea autoSize={{ minRows: 6 }} placeholder={":root{ }\n/* CSS */"} />
</Form.Item>
</Card>
<Form.Item style={{ marginTop: 24 }}>
<Button type="primary" htmlType="submit" loading={loading} block>
{t('Save')}
</Button>
</Form.Item>
</Form>
) )
}, },
{ {
@@ -349,26 +141,12 @@ export default function SystemSettingsPage() {
</span> </span>
), ),
children: ( children: (
<Form <AppSettingsTab
layout="vertical" config={config}
initialValues={{ loading={loading}
...Object.fromEntries(APP_CONFIG_KEYS.map(({ key, default: def }) => [key, config[key] ?? def ?? ''])), onSave={handleSave}
}} configKeys={APP_CONFIG_KEYS}
onFinish={handleSave} />
style={{ marginTop: 24 }}
key={JSON.stringify(config)}
>
{APP_CONFIG_KEYS.map(({ key, label }) => (
<Form.Item key={key} name={key} label={t(label)}>
<Input size="large" />
</Form.Item>
))}
<Form.Item>
<Button type="primary" htmlType="submit" loading={loading} block>
{t('Save')}
</Button>
</Form.Item>
</Form>
), ),
}, },
{ {
@@ -380,63 +158,8 @@ export default function SystemSettingsPage() {
</span> </span>
), ),
children: ( children: (
<Form <AiSettingsTab
layout="vertical" />
initialValues={{
...Object.fromEntries(ALL_AI_KEYS.map(({ key, default: def }) => [key, key === EMBED_DIM_KEY
? Number(config[key] ?? def ?? DEFAULT_EMBED_DIMENSION)
: config[key] ?? def ?? ''])),
}}
onFinish={async (vals) => {
const currentDim = Number(config[EMBED_DIM_KEY] ?? DEFAULT_EMBED_DIMENSION);
const nextDim = Number(vals[EMBED_DIM_KEY] ?? DEFAULT_EMBED_DIMENSION);
if (currentDim !== nextDim) {
Modal.confirm({
title: t('Confirm embedding dimension change'),
content: t('Changing the embedding dimension will clear the vector database automatically. You will need to rebuild indexes afterwards. Continue?'),
okText: t('Confirm'),
cancelText: t('Cancel'),
onOk: async () => {
await handleSave(vals);
},
});
return;
}
await handleSave(vals);
}}
style={{ marginTop: 24 }}
key={JSON.stringify(config)}
>
<Card title={t('Vision Model')} style={{ marginBottom: 24 }}>
{VISION_CONFIG_KEYS.map(({ key, label }) => (
<Form.Item key={key} name={key} label={t(label)}>
<Input size="large" />
</Form.Item>
))}
</Card>
<Card title={t('Embedding Model')}>
{EMBED_CONFIG_KEYS.map(({ key, label }) => (
<Form.Item key={key} name={key} label={t(label)}>
<Input size="large" />
</Form.Item>
))}
<Form.Item name={EMBED_DIM_KEY} label={t('Embedding Dimension')}>
<InputNumber min={1} max={32768} style={{ width: '100%' }} />
</Form.Item>
</Card>
<Card title={t('Rerank Model')} style={{ marginTop: 24 }}>
{RERANK_CONFIG_KEYS.map(({ key, label }) => (
<Form.Item key={key} name={key} label={t(label)}>
<Input size="large" />
</Form.Item>
))}
</Card>
<Form.Item style={{ marginTop: 24 }}>
<Button type="primary" htmlType="submit" loading={loading} block>
{t('Save')}
</Button>
</Form.Item>
</Form>
), ),
}, },
{ {
@@ -448,191 +171,7 @@ export default function SystemSettingsPage() {
</span> </span>
), ),
children: ( children: (
<Card title={t('Vector Database Settings')} style={{ marginTop: 24 }}> <VectorDbSettingsTab isActive={activeTab === 'vector-db'} />
<Space direction="vertical" size={24} style={{ width: '100%' }}>
<Space direction="vertical" size={16} style={{ width: '100%' }}>
<div style={{ display: 'flex', justifyContent: 'space-between', alignItems: 'center', flexWrap: 'wrap', gap: 12 }}>
<strong>{t('Current Statistics')}</strong>
<Button onClick={() => { fetchVectorMeta(); fetchVectorStats(); }} loading={vectorStatsLoading || vectorConfigLoading} disabled={(vectorStatsLoading || vectorConfigLoading) && !vectorStats}>
{t('Refresh')}
</Button>
</div>
{vectorSectionLoading ? (
<div style={{ display: 'flex', justifyContent: 'center', padding: '24px 0' }}>
<Spin />
</div>
) : (
<>
{vectorMetaError ? (
<Alert type="error" showIcon message={vectorMetaError} />
) : null}
{vectorStats ? (
<Space direction="vertical" size={16} style={{ width: '100%' }}>
<div style={{ display: 'flex', flexWrap: 'wrap', gap: 24 }}>
<div>
<div style={{ color: '#888' }}>{t('Collections')}</div>
<div style={{ fontSize: 20, fontWeight: 600 }}>{vectorStats.collection_count}</div>
</div>
<div>
<div style={{ color: '#888' }}>{t('Vectors')}</div>
<div style={{ fontSize: 20, fontWeight: 600 }}>{vectorStats.total_vectors}</div>
</div>
<div>
<div style={{ color: '#888' }}>{t('Database Size')}</div>
<div style={{ fontSize: 20, fontWeight: 600 }}>{formatBytes(vectorStats.db_file_size_bytes)}</div>
</div>
<div>
<div style={{ color: '#888' }}>{t('Estimated Memory')}</div>
<div style={{ fontSize: 20, fontWeight: 600 }}>{formatBytes(vectorStats.estimated_total_memory_bytes)}</div>
</div>
</div>
{vectorStats.collections.length ? (
<Space direction="vertical" style={{ width: '100%' }} size={16}>
{vectorStats.collections.map((collection) => (
<div key={collection.name} style={{ border: '1px solid #f0f0f0', borderRadius: 8, padding: 16 }}>
<Space direction="vertical" size={12} style={{ width: '100%' }}>
<div style={{ display: 'flex', justifyContent: 'space-between', alignItems: 'center', flexWrap: 'wrap', gap: 12 }}>
<strong>{collection.name}</strong>
<span style={{ color: '#888' }}>
{collection.is_vector_collection && collection.dimension
? `${t('Dimension')}: ${collection.dimension}`
: t('Non-vector collection')}
</span>
</div>
<div>{t('Vectors')}: {collection.row_count}</div>
{collection.is_vector_collection ? (
<div>{t('Estimated memory')}: {formatBytes(collection.estimated_memory_bytes)}</div>
) : null}
{collection.indexes.length ? (
<Space direction="vertical" size={4} style={{ width: '100%' }}>
<span>{t('Indexes')}:</span>
<ul style={{ paddingLeft: 20, margin: 0 }}>
{collection.indexes.map((index) => (
<li key={`${collection.name}-${index.index_name || 'default'}`}>
<span>{index.index_name || t('Unnamed index')}</span>
<span>{' · '}{index.index_type || '-'}</span>
<span>{' · '}{index.metric_type || '-'}</span>
<span>{' · '}{t('Indexed rows')}: {index.indexed_rows}</span>
<span>{' · '}{t('Pending rows')}: {index.pending_index_rows}</span>
<span>{' · '}{t('Status')}: {index.state || '-'}</span>
</li>
))}
</ul>
</Space>
) : null}
</Space>
</div>
))}
</Space>
) : (
<Empty description={t('No collections')} />
)}
<div style={{ color: '#888' }}>
{t('Estimated memory is calculated as vectors x dimension x 4 bytes (float32).')}
</div>
</Space>
) : vectorStatsError ? (
<div style={{ color: '#ff4d4f' }}>{vectorStatsError}</div>
) : (
<Empty description={t('No collections')} />
)}
<Form
layout="vertical"
form={vectorConfigForm}
onFinish={handleVectorConfigSave}
initialValues={{ type: selectedProviderType || undefined, config: {} }}
>
<Form.Item
name="type"
label={t('Database Provider')}
rules={[{ required: true, message: t('Please select a provider') }]}
>
<Select
size="large"
options={vectorProviders.map((provider) => ({
value: provider.type,
label: provider.enabled ? provider.label : `${provider.label} (${t('Coming soon')})`,
disabled: !provider.enabled,
}))}
onChange={handleProviderChange}
loading={vectorConfigLoading && !vectorProviders.length}
/>
</Form.Item>
{selectedProvider?.description ? (
<Alert
type="info"
showIcon
message={t(selectedProvider.description)}
style={{ marginBottom: 16 }}
/>
) : null}
{selectedProvider?.config_schema?.map((field) => (
<Form.Item
key={field.key}
name={['config', field.key]}
label={t(field.label)}
rules={field.required ? [{ required: true, message: t('Please input {label}', { label: t(field.label) }) }] : []}
>
{field.type === 'password' ? (
<Input.Password size="large" placeholder={field.placeholder ? t(field.placeholder) : undefined} />
) : (
<Input size="large" placeholder={field.placeholder ? t(field.placeholder) : undefined} />
)}
</Form.Item>
))}
{selectedProvider && !selectedProvider.enabled ? (
<Alert
type="warning"
showIcon
message={t('This provider is not available yet')}
style={{ marginBottom: 16 }}
/>
) : null}
<Form.Item>
<Space direction="vertical" style={{ width: '100%' }}>
<Button
type="primary"
htmlType="submit"
loading={vectorConfigSaving}
block
disabled={!selectedProvider?.enabled}
>
{t('Save')}
</Button>
<Button
danger
htmlType="button"
block
onClick={() => {
Modal.confirm({
title: t('Confirm clear vector database?'),
content: t('This will delete all collections irreversibly.'),
okText: t('Confirm Clear'),
okType: 'danger',
cancelText: t('Cancel'),
onOk: async () => {
try {
await vectorDBApi.clearAll();
message.success(t('Vector database cleared'));
await fetchVectorStats();
await fetchVectorMeta();
} catch (e: any) {
message.error(e.message || t('Clear failed'));
}
},
});
}}
>
{t('Clear Vector DB')}
</Button>
</Space>
</Form.Item>
</Form>
</>
)}
</Space>
</Space>
</Card>
), ),
}, },
]} ]}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,47 @@
import { Form, Input, Button } from 'antd';
import { useI18n } from '../../../i18n';
interface AppConfigKey {
key: string;
label: string;
default?: string;
}
interface AppSettingsTabProps {
config: Record<string, string>;
loading: boolean;
onSave: (values: Record<string, unknown>) => Promise<void>;
configKeys: AppConfigKey[];
}
export default function AppSettingsTab({
config,
loading,
onSave,
configKeys,
}: AppSettingsTabProps) {
const { t } = useI18n();
return (
<Form
layout="vertical"
initialValues={{
...Object.fromEntries(configKeys.map(({ key, default: def }) => [key, config[key] ?? def ?? ''])),
}}
onFinish={onSave}
style={{ marginTop: 24 }}
key={JSON.stringify(config)}
>
{configKeys.map(({ key, label }) => (
<Form.Item key={key} name={key} label={t(label)}>
<Input size="large" />
</Form.Item>
))}
<Form.Item>
<Button type="primary" htmlType="submit" loading={loading} block>
{t('Save')}
</Button>
</Form.Item>
</Form>
);
}

View File

@@ -0,0 +1,102 @@
import { Form, Input, Button, InputNumber, Card, Radio, message } from 'antd';
import { useTheme } from '../../../contexts/ThemeContext';
import { useI18n } from '../../../i18n';
interface ThemeKeyMap {
MODE: string;
PRIMARY: string;
RADIUS: string;
TOKENS: string;
CSS: string;
}
interface AppearanceSettingsTabProps {
config: Record<string, string>;
loading: boolean;
onSave: (values: Record<string, unknown>) => Promise<void>;
themeKeys: ThemeKeyMap;
}
export default function AppearanceSettingsTab({
config,
loading,
onSave,
themeKeys,
}: AppearanceSettingsTabProps) {
const { previewTheme } = useTheme();
const { t } = useI18n();
return (
<Form
layout="vertical"
initialValues={{
[themeKeys.MODE]: config[themeKeys.MODE] ?? 'light',
[themeKeys.PRIMARY]: config[themeKeys.PRIMARY] ?? '#111111',
[themeKeys.RADIUS]: Number(config[themeKeys.RADIUS] ?? '10'),
[themeKeys.TOKENS]: config[themeKeys.TOKENS] ?? '',
[themeKeys.CSS]: config[themeKeys.CSS] ?? '',
}}
onValuesChange={(_, all) => {
try {
const tokens = all[themeKeys.TOKENS] ? JSON.parse(all[themeKeys.TOKENS]) : undefined;
previewTheme({
mode: all[themeKeys.MODE],
primaryColor: all[themeKeys.PRIMARY],
borderRadius: typeof all[themeKeys.RADIUS] === 'number' ? all[themeKeys.RADIUS] : undefined,
customTokens: tokens,
customCSS: all[themeKeys.CSS],
});
} catch {
previewTheme({
mode: all[themeKeys.MODE],
primaryColor: all[themeKeys.PRIMARY],
borderRadius: typeof all[themeKeys.RADIUS] === 'number' ? all[themeKeys.RADIUS] : undefined,
customCSS: all[themeKeys.CSS],
});
}
}}
onFinish={async (vals) => {
if (vals[themeKeys.TOKENS]) {
try {
JSON.parse(String(vals[themeKeys.TOKENS]));
} catch {
message.error(t('Advanced tokens must be valid JSON'));
return;
}
}
await onSave(vals);
}}
style={{ marginTop: 24 }}
key={'appearance-' + JSON.stringify(config)}
>
<Card title={t('Theme')}>
<Form.Item name={themeKeys.MODE} label={t('Theme Mode')}>
<Radio.Group buttonStyle="solid">
<Radio.Button value="light">{t('Light')}</Radio.Button>
<Radio.Button value="dark">{t('Dark')}</Radio.Button>
<Radio.Button value="system">{t('Follow System')}</Radio.Button>
</Radio.Group>
</Form.Item>
<Form.Item name={themeKeys.PRIMARY} label={t('Primary Color')}>
<Input type="color" size="large" />
</Form.Item>
<Form.Item name={themeKeys.RADIUS} label={t('Border Radius')}>
<InputNumber min={0} max={24} style={{ width: '100%' }} />
</Form.Item>
</Card>
<Card title={t('Advanced')} style={{ marginTop: 24 }}>
<Form.Item name={themeKeys.TOKENS} label={t('Override AntD Tokens (JSON)')} tooltip={t('e.g. {"colorText": "#222"}')}>
<Input.TextArea autoSize={{ minRows: 4 }} placeholder='{ "colorText": "#222" }' />
</Form.Item>
<Form.Item name={themeKeys.CSS} label={t('Custom CSS')}>
<Input.TextArea autoSize={{ minRows: 6 }} placeholder={":root{ }\n/* CSS */"} />
</Form.Item>
</Card>
<Form.Item style={{ marginTop: 24 }}>
<Button type="primary" htmlType="submit" loading={loading} block>
{t('Save')}
</Button>
</Form.Item>
</Form>
);
}

View File

@@ -0,0 +1,359 @@
import { useCallback, useEffect, useState } from 'react';
import { Form, Button, Card, Space, Spin, Empty, Alert, Select, Input, Modal, message } from 'antd';
import { vectorDBApi, type VectorDBStats, type VectorDBProviderMeta, type VectorDBCurrentConfig } from '../../../api/vectorDB';
import { useI18n } from '../../../i18n';
interface VectorDbSettingsTabProps {
isActive: boolean;
}
const formatBytes = (bytes?: number | null) => {
if (bytes === null || bytes === undefined) return '-';
if (bytes === 0) return '0 B';
const units = ['B', 'KB', 'MB', 'GB', 'TB'];
let value = bytes;
let unitIndex = 0;
while (value >= 1024 && unitIndex < units.length - 1) {
value /= 1024;
unitIndex += 1;
}
const precision = value >= 10 || unitIndex === 0 ? 0 : 1;
return `${value.toFixed(precision)} ${units[unitIndex]}`;
};
const buildProviderConfigValues = (
provider: VectorDBProviderMeta | undefined,
existing?: Record<string, string>,
) => {
if (!provider) return {};
const values: Record<string, string> = {};
const schema = provider.config_schema || [];
schema.forEach((field) => {
const current = existing && existing[field.key] !== undefined && existing[field.key] !== null
? String(existing[field.key])
: undefined;
if (current !== undefined) {
values[field.key] = current;
} else if (field.default !== undefined && field.default !== null) {
values[field.key] = String(field.default);
} else {
values[field.key] = '';
}
});
return values;
};
export default function VectorDbSettingsTab({ isActive }: VectorDbSettingsTabProps) {
const [form] = Form.useForm();
const { t } = useI18n();
const [vectorStats, setVectorStats] = useState<VectorDBStats | null>(null);
const [vectorStatsLoading, setVectorStatsLoading] = useState(false);
const [vectorStatsError, setVectorStatsError] = useState<string | null>(null);
const [vectorProviders, setVectorProviders] = useState<VectorDBProviderMeta[]>([]);
const [vectorConfig, setVectorConfig] = useState<VectorDBCurrentConfig | null>(null);
const [vectorConfigLoading, setVectorConfigLoading] = useState(false);
const [vectorConfigSaving, setVectorConfigSaving] = useState(false);
const [vectorMetaError, setVectorMetaError] = useState<string | null>(null);
const [selectedProviderType, setSelectedProviderType] = useState<string | null>(null);
const fetchVectorStats = useCallback(async () => {
setVectorStatsLoading(true);
setVectorStatsError(null);
try {
const data = await vectorDBApi.getStats();
setVectorStats(data);
} catch (e: any) {
const msg = e?.message || t('Load failed');
setVectorStatsError(msg);
message.error(msg);
} finally {
setVectorStatsLoading(false);
}
}, [t]);
const fetchVectorMeta = useCallback(async () => {
setVectorConfigLoading(true);
setVectorMetaError(null);
try {
const [providers, current] = await Promise.all([
vectorDBApi.getProviders(),
vectorDBApi.getConfig(),
]);
setVectorProviders(providers);
setVectorConfig(current);
const enabled = providers.filter((item) => item.enabled);
let nextType: string | null = current?.type ?? null;
if (nextType && !providers.some((item) => item.type === nextType)) {
nextType = null;
}
if (!nextType) {
nextType = enabled[0]?.type ?? providers[0]?.type ?? null;
}
setSelectedProviderType(nextType);
const provider = providers.find((item) => item.type === nextType);
const configValues = buildProviderConfigValues(
provider,
nextType === current?.type ? current?.config : undefined,
);
form.setFieldsValue({ type: nextType || undefined, config: configValues });
} catch (e: any) {
const msg = e?.message || t('Load failed');
setVectorMetaError(msg);
message.error(msg);
} finally {
setVectorConfigLoading(false);
}
}, [form, t]);
const handleProviderChange = useCallback((value: string) => {
setSelectedProviderType(value);
const provider = vectorProviders.find((item) => item.type === value);
const existing = value === vectorConfig?.type ? vectorConfig?.config : undefined;
const configValues = buildProviderConfigValues(provider, existing);
form.setFieldsValue({ type: value, config: configValues });
}, [form, vectorConfig, vectorProviders]);
const handleVectorConfigSave = useCallback(async (values: { type: string; config?: Record<string, string> }) => {
if (!values?.type) {
return;
}
setVectorConfigSaving(true);
try {
const configPayload = Object.fromEntries(
Object.entries(values.config || {})
.filter(([, val]) => val !== undefined && val !== null && String(val).trim() !== '')
.map(([key, val]) => [key, String(val)]),
);
const response = await vectorDBApi.updateConfig({ type: values.type, config: configPayload });
setVectorConfig(response.config);
setVectorStats(response.stats);
setVectorStatsError(null);
setSelectedProviderType(response.config.type);
const provider = vectorProviders.find((item) => item.type === response.config.type);
const mergedValues = buildProviderConfigValues(provider, response.config.config);
form.setFieldsValue({ type: response.config.type, config: mergedValues });
message.success(t('Saved successfully'));
} catch (e: any) {
message.error(e?.message || t('Save failed'));
} finally {
setVectorConfigSaving(false);
}
}, [form, t, vectorProviders]);
const handleClearVectorDb = useCallback(() => {
Modal.confirm({
title: t('Confirm clear vector database?'),
content: t('This will delete all collections irreversibly.'),
okText: t('Confirm Clear'),
okType: 'danger',
cancelText: t('Cancel'),
onOk: async () => {
try {
await vectorDBApi.clearAll();
message.success(t('Vector database cleared'));
await fetchVectorStats();
await fetchVectorMeta();
} catch (e: any) {
message.error(e?.message || t('Clear failed'));
}
},
});
}, [fetchVectorMeta, fetchVectorStats, t]);
useEffect(() => {
if (!isActive) {
return;
}
if (!vectorProviders.length && !vectorConfigLoading) {
fetchVectorMeta();
}
if (!vectorStats && !vectorStatsLoading) {
fetchVectorStats();
}
}, [
isActive,
fetchVectorMeta,
fetchVectorStats,
vectorProviders.length,
vectorConfigLoading,
vectorStats,
vectorStatsLoading,
]);
const vectorSectionLoading = vectorStatsLoading || vectorConfigLoading;
const selectedProvider = vectorProviders.find(
(item) => item.type === selectedProviderType || (!selectedProviderType && item.enabled),
);
return (
<Card title={t('Vector Database Settings')} style={{ marginTop: 24 }}>
<Space direction="vertical" size={24} style={{ width: '100%' }}>
<Space direction="vertical" size={16} style={{ width: '100%' }}>
<div style={{ display: 'flex', justifyContent: 'space-between', alignItems: 'center', flexWrap: 'wrap', gap: 12 }}>
<strong>{t('Current Statistics')}</strong>
<Button onClick={() => { fetchVectorMeta(); fetchVectorStats(); }} loading={vectorStatsLoading || vectorConfigLoading} disabled={(vectorStatsLoading || vectorConfigLoading) && !vectorStats}>
{t('Refresh')}
</Button>
</div>
{vectorSectionLoading ? (
<div style={{ display: 'flex', justifyContent: 'center', padding: '24px 0' }}>
<Spin />
</div>
) : (
<>
{vectorMetaError ? (
<Alert type="error" showIcon message={vectorMetaError} />
) : null}
{vectorStats ? (
<Space direction="vertical" size={16} style={{ width: '100%' }}>
<div style={{ display: 'flex', flexWrap: 'wrap', gap: 24 }}>
<div>
<div style={{ color: '#888' }}>{t('Collections')}</div>
<div style={{ fontSize: 20, fontWeight: 600 }}>{vectorStats.collection_count}</div>
</div>
<div>
<div style={{ color: '#888' }}>{t('Vectors')}</div>
<div style={{ fontSize: 20, fontWeight: 600 }}>{vectorStats.total_vectors}</div>
</div>
<div>
<div style={{ color: '#888' }}>{t('Database Size')}</div>
<div style={{ fontSize: 20, fontWeight: 600 }}>{formatBytes(vectorStats.db_file_size_bytes)}</div>
</div>
<div>
<div style={{ color: '#888' }}>{t('Estimated Memory')}</div>
<div style={{ fontSize: 20, fontWeight: 600 }}>{formatBytes(vectorStats.estimated_total_memory_bytes)}</div>
</div>
</div>
{vectorStats.collections.length ? (
<Space direction="vertical" style={{ width: '100%' }} size={16}>
{vectorStats.collections.map((collection) => (
<div key={collection.name} style={{ border: '1px solid #f0f0f0', borderRadius: 8, padding: 16 }}>
<Space direction="vertical" size={12} style={{ width: '100%' }}>
<div style={{ display: 'flex', justifyContent: 'space-between', alignItems: 'center', flexWrap: 'wrap', gap: 12 }}>
<strong>{collection.name}</strong>
<span style={{ color: '#888' }}>
{collection.is_vector_collection && collection.dimension
? `${t('Dimension')}: ${collection.dimension}`
: t('Non-vector collection')}
</span>
</div>
<div>{t('Vectors')}: {collection.row_count}</div>
{collection.is_vector_collection ? (
<div>{t('Estimated memory')}: {formatBytes(collection.estimated_memory_bytes)}</div>
) : null}
{collection.indexes.length ? (
<Space direction="vertical" size={4} style={{ width: '100%' }}>
<span>{t('Indexes')}:</span>
<ul style={{ paddingLeft: 20, margin: 0 }}>
{collection.indexes.map((index) => (
<li key={`${collection.name}-${index.index_name || 'default'}`}>
<span>{index.index_name || t('Unnamed index')}</span>
<span>{' · '}{index.index_type || '-'}</span>
<span>{' · '}{index.metric_type || '-'}</span>
<span>{' · '}{t('Indexed rows')}: {index.indexed_rows}</span>
<span>{' · '}{t('Pending rows')}: {index.pending_index_rows}</span>
<span>{' · '}{t('Status')}: {index.state || '-'}</span>
</li>
))}
</ul>
</Space>
) : null}
</Space>
</div>
))}
</Space>
) : (
<Empty description={t('No collections')} />
)}
<div style={{ color: '#888' }}>
{t('Estimated memory is calculated as vectors x dimension x 4 bytes (float32).')}
</div>
</Space>
) : vectorStatsError ? (
<div style={{ color: '#ff4d4f' }}>{vectorStatsError}</div>
) : (
<Empty description={t('No collections')} />
)}
<Form
layout="vertical"
form={form}
onFinish={handleVectorConfigSave}
initialValues={{ type: selectedProviderType || undefined, config: {} }}
>
<Form.Item
name="type"
label={t('Database Provider')}
rules={[{ required: true, message: t('Please select a provider') }]}
>
<Select
size="large"
options={vectorProviders.map((provider) => ({
value: provider.type,
label: provider.enabled ? provider.label : `${provider.label} (${t('Coming soon')})`,
disabled: !provider.enabled,
}))}
onChange={handleProviderChange}
loading={vectorConfigLoading && !vectorProviders.length}
/>
</Form.Item>
{selectedProvider?.description ? (
<Alert
type="info"
showIcon
message={t(selectedProvider.description)}
style={{ marginBottom: 16 }}
/>
) : null}
{selectedProvider?.config_schema?.map((field) => (
<Form.Item
key={field.key}
name={['config', field.key]}
label={t(field.label)}
rules={field.required ? [{ required: true, message: t('Please input {label}', { label: t(field.label) }) }] : []}
>
{field.type === 'password' ? (
<Input.Password size="large" placeholder={field.placeholder ? t(field.placeholder) : undefined} />
) : (
<Input size="large" placeholder={field.placeholder ? t(field.placeholder) : undefined} />
)}
</Form.Item>
))}
{selectedProvider && !selectedProvider.enabled ? (
<Alert
type="warning"
showIcon
message={t('This provider is not available yet')}
style={{ marginBottom: 16 }}
/>
) : null}
<Form.Item>
<Space direction="vertical" style={{ width: '100%' }}>
<Button
type="primary"
htmlType="submit"
loading={vectorConfigSaving}
block
disabled={!selectedProvider?.enabled}
>
{t('Save')}
</Button>
<Button
danger
htmlType="button"
block
onClick={handleClearVectorDb}
>
{t('Clear Vector DB')}
</Button>
</Space>
</Form.Item>
</Form>
</>
)}
</Space>
</Space>
</Card>
);
}

View File

@@ -18,17 +18,26 @@ import { AppWindowsProvider, useAppWindows } from '../contexts/AppWindowsContext
import { AppWindowsLayer } from '../apps/AppWindowsLayer'; import { AppWindowsLayer } from '../apps/AppWindowsLayer';
const ShellBody = memo(function ShellBody() { const ShellBody = memo(function ShellBody() {
const { navKey = 'files' } = useParams(); const params = useParams<{ navKey?: string; '*': string }>();
const navKey = params.navKey ?? 'files';
const subPath = params['*'] ?? '';
const navigate = useNavigate(); const navigate = useNavigate();
const [collapsed, setCollapsed] = useState(false); const [collapsed, setCollapsed] = useState(false);
const { windows, closeWindow, toggleMax, bringToFront, updateWindow } = useAppWindows(); const { windows, closeWindow, toggleMax, bringToFront, updateWindow } = useAppWindows();
const settingsTab = navKey === 'settings' ? (subPath.split('/')[0] || undefined) : undefined;
return ( return (
<Layout style={{ minHeight: '100vh', background: 'var(--ant-color-bg-layout)' }}> <Layout style={{ minHeight: '100vh', background: 'var(--ant-color-bg-layout)' }}>
<SideNav <SideNav
collapsed={collapsed} collapsed={collapsed}
onToggle={() => setCollapsed(c => !c)} onToggle={() => setCollapsed(c => !c)}
activeKey={navKey} activeKey={navKey}
onChange={(key) => navigate(`/${key}`)} onChange={(key) => {
if (key === 'settings') {
navigate('/settings/appearance', { replace: true });
} else {
navigate(`/${key}`);
}
}}
/> />
<Layout style={{ background: 'var(--ant-color-bg-layout)' }}> <Layout style={{ background: 'var(--ant-color-bg-layout)' }}>
<TopHeader collapsed={collapsed} onToggle={() => setCollapsed(c => !c)} /> <TopHeader collapsed={collapsed} onToggle={() => setCollapsed(c => !c)} />
@@ -43,7 +52,12 @@ const ShellBody = memo(function ShellBody() {
{navKey === 'processors' && <ProcessorsPage />} {navKey === 'processors' && <ProcessorsPage />}
{navKey === 'offline' && <OfflineDownloadPage />} {navKey === 'offline' && <OfflineDownloadPage />}
{navKey === 'plugins' && <PluginsPage />} {navKey === 'plugins' && <PluginsPage />}
{navKey === 'settings' && <SystemSettingsPage />} {navKey === 'settings' && (
<SystemSettingsPage
tabKey={settingsTab}
onTabNavigate={(key, options) => navigate(`/settings/${key}`, options)}
/>
)}
{navKey === 'logs' && <LogsPage />} {navKey === 'logs' && <LogsPage />}
{navKey === 'backup' && <BackupPage />} {navKey === 'backup' && <BackupPage />}
</Flex> </Flex>

View File

@@ -0,0 +1,361 @@
.fx-ai-top-bar {
display: flex;
justify-content: space-between;
align-items: center;
padding: 20px 28px;
border-radius: 16px;
background: linear-gradient(120deg, rgba(99, 102, 241, 0.16), rgba(167, 139, 250, 0.12));
border: 1px solid rgba(99, 102, 241, 0.15);
}
.fx-ai-provider-card {
border-radius: 16px;
overflow: hidden;
box-shadow: var(--ant-box-shadow-secondary);
}
.fx-ai-provider-header {
display: flex;
justify-content: flex-start;
align-items: center;
gap: 16px;
height: 80px;
width: 100%;
}
.fx-ai-provider-meta {
display: flex;
align-items: center;
gap: 16px;
}
.fx-ai-provider-logo {
width: 36px;
height: 36px;
border-radius: 12px;
object-fit: cover;
background: var(--ant-color-fill-alter);
padding: 4px;
}
.fx-ai-provider-name {
font-size: 16px;
font-weight: 600;
}
.fx-ai-provider-sub {
display: flex;
align-items: center;
gap: 12px;
margin-top: 4px;
color: var(--ant-color-text-tertiary);
}
.fx-ai-model-list {
display: flex;
flex-direction: column;
gap: 8px;
}
.fx-ai-model-item {
display: flex;
justify-content: space-between;
align-items: flex-start;
gap: 12px;
padding: 12px 14px;
border-radius: 10px;
background: var(--ant-color-fill-secondary);
border: 1px solid var(--ant-color-border);
}
.fx-ai-model-info {
display: flex;
flex-direction: column;
gap: 6px;
flex: 1;
}
.fx-ai-model-header {
display: flex;
align-items: center;
gap: 8px;
}
.fx-ai-model-title {
margin: 0;
font-size: 15px;
}
.fx-ai-model-tags .ant-tag {
border-radius: 999px;
padding: 0 8px;
line-height: 20px;
}
.fx-ai-model-meta {
display: flex;
flex-direction: column;
gap: 6px;
}
.fx-ai-model-desc {
line-height: 1.4;
}
.fx-ai-model-metrics {
color: var(--ant-color-text-quaternary);
}
.fx-ai-model-actions {
align-self: center;
}
.fx-ai-model-actions .ant-btn {
min-width: 32px;
}
.fx-ai-empty-card {
border-radius: 16px;
background: var(--ant-color-fill-tertiary);
}
.fx-ai-provider-actions {
display: flex;
align-items: center;
justify-content: flex-end;
gap: 8px;
flex-wrap: wrap;
}
.fx-ai-defaults-card {
border-radius: 16px;
box-shadow: var(--ant-box-shadow-secondary);
}
.fx-ai-default-row {
display: flex;
align-items: center;
justify-content: space-between;
gap: 16px;
padding: 12px 0;
border-bottom: 1px solid var(--ant-color-border-secondary);
}
.fx-ai-default-row:last-child {
border-bottom: none;
}
.fx-ai-default-meta {
display: flex;
gap: 16px;
align-items: center;
}
.fx-ai-default-icon {
width: 46px;
height: 46px;
border-radius: 16px;
display: flex;
align-items: center;
justify-content: center;
font-size: 22px;
color: var(--ant-color-text-light-solid);
}
.fx-ai-default-desc {
color: var(--ant-color-text-tertiary);
}
.fx-ai-provider-option {
display: flex;
align-items: center;
gap: 8px;
}
.fx-ai-provider-option img {
width: 20px;
height: 20px;
border-radius: 6px;
object-fit: cover;
}
.fx-ai-model-option {
display: flex;
align-items: center;
gap: 8px;
}
.fx-ai-model-name {
flex: 1;
min-width: 0;
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
}
.fx-ai-model-provider-tag {
padding: 0 8px;
border-radius: 999px;
background: var(--ant-color-fill-tertiary);
color: var(--ant-color-text-tertiary);
font-size: 12px;
line-height: 20px;
white-space: nowrap;
}
.fx-ai-add-provider-steps {
padding: 0 8px;
}
.fx-ai-template-grid {
display: grid;
grid-template-columns: repeat(auto-fill, minmax(240px, 1fr));
gap: 16px;
}
.fx-ai-template-card {
display: flex;
align-items: center;
justify-content: space-between;
padding: 16px;
border-radius: 16px;
background: var(--ant-color-fill-quaternary);
border: 1px solid transparent;
cursor: pointer;
transition: border-color 0.2s ease, box-shadow 0.2s ease, transform 0.2s ease;
}
.fx-ai-template-card:hover {
border-color: var(--ant-color-primary);
box-shadow: var(--ant-box-shadow-secondary);
transform: translateY(-2px);
}
.fx-ai-template-card:focus-visible {
outline: 2px solid var(--ant-color-primary);
outline-offset: 2px;
}
.fx-ai-template-card-main {
display: flex;
align-items: center;
gap: 16px;
}
.fx-ai-template-icon {
display: flex;
align-items: center;
justify-content: center;
}
.fx-ai-template-icon.summary {
width: 56px;
height: 56px;
border-radius: 18px;
font-size: 26px;
}
.fx-ai-template-icon img {
width: 100%;
height: 100%;
border-radius: inherit;
object-fit: cover;
}
.fx-ai-template-text {
display: flex;
flex-direction: column;
gap: 6px;
}
.fx-ai-template-name {
font-size: 15px;
font-weight: 600;
color: var(--ant-color-text);
}
.fx-ai-template-desc {
font-size: 12px;
color: var(--ant-color-text-tertiary);
}
.fx-ai-template-summary {
display: flex;
align-items: center;
gap: 16px;
padding: 16px;
border-radius: 16px;
background: var(--ant-color-fill-quaternary);
}
.fx-ai-template-summary-text {
display: flex;
flex-direction: column;
gap: 6px;
}
.fx-ai-template-summary-text .fx-ai-template-name {
font-size: 18px;
}
.fx-ai-template-summary-text .fx-ai-template-desc {
font-size: 13px;
}
.fx-ai-template-arrow {
color: var(--ant-color-text-quaternary);
font-size: 16px;
}
.fx-ai-remote-models {
display: flex;
flex-direction: column;
gap: 12px;
max-height: 320px;
padding: 12px;
border-radius: 12px;
background: var(--ant-color-fill-quaternary);
overflow-y: auto;
}
.fx-ai-remote-item {
padding: 8px 0;
}
.fx-ai-remote-item .ant-checkbox-wrapper {
width: 100%;
}
.fx-ai-remote-item-main {
display: flex;
flex-direction: column;
gap: 6px;
}
.fx-ai-remote-desc {
color: var(--ant-color-text-tertiary);
}
.fx-ai-chat {
background: linear-gradient(135deg, #805ad5, #6b46c1);
}
.fx-ai-vision {
background: linear-gradient(135deg, #4c6ef5, #4263eb);
}
.fx-ai-embedding {
background: linear-gradient(135deg, #f7b733, #fc4a1a);
}
.fx-ai-rerank {
background: linear-gradient(135deg, #0ea5e9, #0284c7);
}
.fx-ai-voice {
background: linear-gradient(135deg, #f97316, #ea580c);
}
.fx-ai-tools {
background: linear-gradient(135deg, #ec4899, #db2777);
}