Compare commits

...

13 Commits

Author SHA1 Message Date
shiyu
1679b03d3a chore: update version to v1.7.1 in service configuration 2026-01-12 10:24:18 +08:00
shiyu
ab6562fc79 feat: add new AI models and improve UI layout in settings 2026-01-11 23:18:54 +08:00
shiyu
87770176b6 feat: expand AI provider support and update descriptions
- Updated AIProviderBase and AIProviderUpdate to support new API formats: 'anthropic' and 'ollama'.
- Added SVG icons for Anthropic, Azure, Ollama, and Z.ai providers.
- Updated AI provider payload interface to include new formats.
- Enhanced English and Chinese localization for new providers and updated descriptions for OpenAI and Anthropic.
- Added new provider templates for Azure OpenAI, Anthropic, Z.ai, and Ollama in the settings tab.
- Updated the API format selection in the settings tab to include new options.
2026-01-11 22:29:22 +08:00
shiyu
e7cf8dbdb8 chore: update version to v1.7.0 in service configuration 2026-01-11 14:09:29 +08:00
shiyu
e7eafdee97 feat: add session locking mechanism in Telegram adapter and improve SPA fallback handling 2026-01-11 14:08:52 +08:00
shiyu
051b49d3f6 feat: improve error handling in propfind function and enhance directory listing logic 2026-01-11 13:32:48 +08:00
shiyu
b059b0eb44 feat: enhance Telegram adapter to support parsing legacy session_string and fetching thumbnails 2026-01-11 11:20:10 +08:00
shiyu
59ad2cb622 feat: update AIProvider structure to include has_api_key and adjust API key handling in settings 2026-01-10 13:22:07 +08:00
shiyu
6b2ada0b42 refactor: imports and reorganize domain structure
- Updated import statements across multiple modules to use relative imports for better encapsulation.
- Consolidated and organized the `__init__.py` files in various domain packages to expose necessary classes and functions.
- Improved code readability and maintainability by grouping related imports and removing unused ones.
- Ensured consistent import patterns across the domain, enhancing the overall structure of the codebase.
2026-01-09 17:28:10 +08:00
时雨
a727e77341 feat: Implement AI Agent with enhanced tool processing capabilities (#89)
* feat: Implement AI Agent with tool processing capabilities

- Added tools for listing and running processors in the agent.
- Created data models for agent chat requests and tool calls.
- Developed API integration for agent chat and streaming responses.
- Built the AI Agent widget with a user interface for interaction.
- Styled the agent components for better user experience.

* feat: 增强 AI 助手工具功能,添加文件操作和搜索功能,更新界面显示

* feat: 更新 AI 助手组件

* feat: 更新 AiAgentWidget 组件样式,调整背景和边距以提升界面一致性
2026-01-09 16:19:20 +08:00
shiyu
4638356a45 chore: update version to v1.6.1 2026-01-08 12:20:26 +08:00
shiyu
e51344b43e feat: enhance plugin frame URL building and improve query handling for plugin styles and entry 2026-01-08 11:34:38 +08:00
shiyu
b7685db0e8 feat: add versioning support for plugin assets and improve loading status handling 2026-01-08 10:13:09 +08:00
88 changed files with 4271 additions and 327 deletions

View File

@@ -11,10 +11,11 @@ from domain.processors import api as processors
from domain.share import api as share
from domain.tasks import api as tasks
from domain.ai import api as ai
from domain.agent import api as agent
from domain.virtual_fs import api as virtual_fs
from domain.virtual_fs.mapping import s3_api, webdav_api
from domain.virtual_fs.search import search_api
from domain.audit import router as audit
from domain.audit import api as audit
def include_routers(app: FastAPI):
@@ -30,9 +31,10 @@ def include_routers(app: FastAPI):
app.include_router(backup.router)
app.include_router(ai.router_vector_db)
app.include_router(ai.router_ai)
app.include_router(agent.router)
app.include_router(plugins.router)
app.include_router(webdav_api.router)
app.include_router(s3_api.router)
app.include_router(offline_downloads.router)
app.include_router(email.router)
app.include_router(audit)
app.include_router(audit.router)

View File

@@ -1,6 +1,6 @@
from tortoise import Tortoise
from domain.adapters.registry import runtime_registry
from domain.adapters import runtime_registry
TORTOISE_ORM = {
"connections": {"default": "sqlite://data/db/db.sqlite3"},

7
domain/__init__.py Normal file
View File

@@ -0,0 +1,7 @@
"""
domain业务域层
约定:跨包只从各子包 `__init__.py` 导入公开 API。
"""
__all__: list[str] = []

View File

@@ -1 +1,24 @@
from .providers import BaseAdapter
from .registry import (
RuntimeRegistry,
discover_adapters,
get_config_schema,
get_config_schemas,
normalize_adapter_type,
runtime_registry,
)
from .service import AdapterService
from .types import AdapterCreate, AdapterOut
__all__ = [
"BaseAdapter",
"RuntimeRegistry",
"discover_adapters",
"get_config_schema",
"get_config_schemas",
"normalize_adapter_type",
"runtime_registry",
"AdapterService",
"AdapterCreate",
"AdapterOut",
]

View File

@@ -4,10 +4,9 @@ from fastapi import APIRouter, Depends, Request
from api.response import success
from domain.audit import AuditAction, audit
from domain.adapters.service import AdapterService
from domain.adapters.types import AdapterCreate
from domain.auth.service import get_current_active_user
from domain.auth.types import User
from domain.auth import User, get_current_active_user
from .service import AdapterService
from .types import AdapterCreate
router = APIRouter(prefix="/api/adapters", tags=["adapters"])

View File

@@ -1,11 +1,26 @@
from typing import List, Dict, Tuple, AsyncIterator
import asyncio
import base64
import io
import os
import struct
from models import StorageAdapter
from telethon import TelegramClient
from telethon.crypto import AuthKey
from telethon.sessions import StringSession
from telethon.tl import types
import socks
_SESSION_LOCKS: Dict[str, asyncio.Lock] = {}
def _get_session_lock(session_string: str) -> asyncio.Lock:
lock = _SESSION_LOCKS.get(session_string)
if lock is None:
lock = asyncio.Lock()
_SESSION_LOCKS[session_string] = lock
return lock
# 适配器类型标识
ADAPTER_TYPE = "telegram"
@@ -54,9 +69,93 @@ class TelegramAdapter:
if not all([self.api_id, self.api_hash, self.session_string, self.chat_id]):
raise ValueError("Telegram 适配器需要 api_id, api_hash, session_string 和 chat_id")
@staticmethod
def _parse_legacy_session_string(value: str) -> StringSession:
"""
兼容旧版 session_string 格式:
- version(1B char) + base64(data)
- data: dc_id(1B) + ip_len(2B) + ip(ASCII, ip_len bytes) + port(2B) + auth_key(256B)
"""
s = (value or "").strip()
if not s:
raise ValueError("session_string 为空")
body = s[1:] if s.startswith("1") else s
raw = base64.urlsafe_b64decode(body)
if len(raw) < 1 + 2 + 2 + 256:
raise ValueError("legacy session 数据长度不足")
dc_id = raw[0]
ip_len = struct.unpack(">H", raw[1:3])[0]
expected_len = 1 + 2 + ip_len + 2 + 256
if len(raw) != expected_len:
raise ValueError("legacy session 数据长度不匹配")
ip_start = 3
ip_end = ip_start + ip_len
ip = raw[ip_start:ip_end].decode("utf-8")
port = struct.unpack(">H", raw[ip_end : ip_end + 2])[0]
key = raw[ip_end + 2 : ip_end + 2 + 256]
sess = StringSession()
sess.set_dc(dc_id, ip, port)
sess.auth_key = AuthKey(key)
return sess
@staticmethod
def _pick_photo_thumb(thumbs: list | None):
if not thumbs:
return None
cached = []
others = []
for t in thumbs:
if isinstance(t, (types.PhotoCachedSize, types.PhotoStrippedSize)):
cached.append(t)
elif isinstance(t, (types.PhotoSize, types.PhotoSizeProgressive)):
if not isinstance(t, types.PhotoSizeEmpty):
others.append(t)
if cached:
cached.sort(key=lambda x: len(getattr(x, "bytes", b"") or b""))
return cached[-1]
if others:
def _sz(x):
if isinstance(x, types.PhotoSizeProgressive):
return max(x.sizes or [0])
return int(getattr(x, "size", 0) or 0)
others.sort(key=_sz)
return others[-1]
return None
def _build_session(self) -> StringSession:
s = (self.session_string or "").strip()
if not s:
raise ValueError("Telegram 适配器 session_string 为空")
try:
return StringSession(s)
except Exception:
pass
# 少数工具可能去掉了 version 前缀,这里做一次兼容
if not s.startswith("1"):
try:
return StringSession("1" + s)
except Exception:
pass
try:
return self._parse_legacy_session_string(s)
except Exception as exc:
raise ValueError("Telegram session_string 无效,请使用 Telethon StringSession 重新生成") from exc
def _get_client(self) -> TelegramClient:
"""创建一个新的 TelegramClient 实例"""
return TelegramClient(StringSession(self.session_string), self.api_id, self.api_hash, proxy=self.proxy)
return TelegramClient(self._build_session(), self.api_id, self.api_hash, proxy=self.proxy)
def get_effective_root(self, sub_path: str | None) -> str:
return ""
@@ -198,6 +297,41 @@ class TelegramAdapter:
async def mkdir(self, root: str, rel: str):
raise NotImplementedError("Telegram 适配器不支持创建目录。")
async def get_thumbnail(self, root: str, rel: str, size: str = "medium"):
try:
message_id_str, _ = rel.split('_', 1)
message_id = int(message_id_str)
except (ValueError, IndexError):
return None
client = self._get_client()
try:
await client.connect()
message = await client.get_messages(self.chat_id, ids=message_id)
if not message:
return None
doc = message.document or message.video
thumbs = None
if doc and getattr(doc, "thumbs", None):
thumbs = list(doc.thumbs or [])
elif message.photo and getattr(message.photo, "sizes", None):
thumbs = list(message.photo.sizes or [])
thumb = self._pick_photo_thumb(thumbs)
if not thumb:
return None
result = await client.download_media(message, bytes, thumb=thumb)
if isinstance(result, (bytes, bytearray)):
return bytes(result)
return None
except Exception:
return None
finally:
if client.is_connected():
await client.disconnect()
async def delete(self, root: str, rel: str):
"""删除一个文件 (即一条消息)"""
try:
@@ -236,6 +370,8 @@ class TelegramAdapter:
raise HTTPException(status_code=400, detail=f"无效的文件路径格式: {rel}")
client = self._get_client()
lock = _get_session_lock(self.session_string)
await lock.acquire()
try:
await client.connect()
@@ -273,7 +409,6 @@ class TelegramAdapter:
headers = {
"Accept-Ranges": "bytes",
"Content-Type": mime_type,
"Content-Length": str(file_size),
}
if range_header:
@@ -285,7 +420,6 @@ class TelegramAdapter:
if start >= file_size or end >= file_size or start > end:
raise HTTPException(status_code=416, detail="Requested Range Not Satisfiable")
status = 206
headers["Content-Length"] = str(end - start + 1)
headers["Content-Range"] = f"bytes {start}-{end}/{file_size}"
except ValueError:
raise HTTPException(status_code=400, detail="Invalid Range header")
@@ -304,18 +438,28 @@ class TelegramAdapter:
if downloaded >= limit:
break
finally:
if client.is_connected():
await client.disconnect()
try:
if client.is_connected():
await client.disconnect()
finally:
lock.release()
return StreamingResponse(iterator(), status_code=status, headers=headers)
except HTTPException:
if client.is_connected():
await client.disconnect()
lock.release()
raise
except FileNotFoundError as e:
if client.is_connected():
await client.disconnect()
lock.release()
raise HTTPException(status_code=404, detail=str(e))
except Exception as e:
if client.is_connected():
await client.disconnect()
lock.release()
raise HTTPException(status_code=500, detail=f"Streaming failed: {str(e)}")
async def stat_file(self, root: str, rel: str):

View File

@@ -4,7 +4,7 @@ from importlib import import_module
from typing import Callable, Dict
from models import StorageAdapter
from domain.adapters.providers.base import BaseAdapter
from .providers.base import BaseAdapter
AdapterFactory = Callable[[StorageAdapter], BaseAdapter]
@@ -21,7 +21,7 @@ def normalize_adapter_type(value: str | None) -> str | None:
def discover_adapters():
"""扫描 domain.adapters.providers 包, 自动注册适配器类型、工厂与配置 schema。"""
from domain.adapters import providers as adapters_pkg
from . import providers as adapters_pkg
TYPE_MAP.clear()
CONFIG_SCHEMAS.clear()

View File

@@ -2,13 +2,13 @@ from typing import Optional
from fastapi import HTTPException
from domain.adapters.registry import (
from domain.auth import User
from .registry import (
get_config_schemas,
normalize_adapter_type,
runtime_registry,
)
from domain.adapters.types import AdapterCreate, AdapterOut
from domain.auth.types import User
from .types import AdapterCreate, AdapterOut
from models import StorageAdapter

9
domain/agent/__init__.py Normal file
View File

@@ -0,0 +1,9 @@
from .service import AgentService
from .types import AgentChatContext, AgentChatRequest, PendingToolCall
__all__ = [
"AgentService",
"AgentChatContext",
"AgentChatRequest",
"PendingToolCall",
]

38
domain/agent/api.py Normal file
View File

@@ -0,0 +1,38 @@
from typing import Annotated
from fastapi import APIRouter, Depends, Request
from fastapi.responses import StreamingResponse
from api.response import success
from domain.audit import AuditAction, audit
from domain.auth import User, get_current_active_user
from .service import AgentService
from .types import AgentChatRequest
router = APIRouter(prefix="/api/agent", tags=["agent"])
@router.post("/chat")
@audit(action=AuditAction.CREATE, description="Agent 对话", body_fields=["auto_execute"])
async def chat(
request: Request,
payload: AgentChatRequest,
current_user: Annotated[User, Depends(get_current_active_user)],
):
data = await AgentService.chat(payload, current_user)
return success(data)
@router.post("/chat/stream")
@audit(action=AuditAction.CREATE, description="Agent 对话SSE", body_fields=["auto_execute"])
async def chat_stream(
request: Request,
payload: AgentChatRequest,
current_user: Annotated[User, Depends(get_current_active_user)],
):
return StreamingResponse(
AgentService.chat_stream(payload, current_user),
media_type="text/event-stream",
headers={"Cache-Control": "no-cache"},
)

470
domain/agent/service.py Normal file
View File

@@ -0,0 +1,470 @@
import asyncio
import json
import uuid
from typing import Any, Dict, List, Optional, Tuple
import httpx
from fastapi import HTTPException
from domain.ai import AIProviderService, MissingModelError, chat_completion, chat_completion_stream
from domain.auth import User
from .tools import get_tool, openai_tools, tool_result_to_content
from .types import AgentChatRequest, PendingToolCall
def _normalize_path(p: Optional[str]) -> Optional[str]:
if not p:
return None
s = str(p).strip()
if not s:
return None
s = s.replace("\\", "/")
if not s.startswith("/"):
s = "/" + s
s = s.rstrip("/") or "/"
return s
def _build_system_prompt(current_path: Optional[str]) -> str:
lines = [
"你是 Foxel 的 AI 助手。",
"你可以通过工具对文件/目录进行查询、读写、移动、复制、删除以及运行处理器processor",
"",
"可用工具:",
"- vfs_list_dir浏览目录列出 entries + pagination",
"- vfs_stat查看文件/目录信息。",
"- vfs_read_text读取文本文件内容不支持二进制",
"- vfs_search搜索文件vector/filename",
"- vfs_write_text写入文本文件内容覆盖",
"- vfs_mkdir创建目录。",
"- vfs_delete删除文件或目录。",
"- vfs_move移动路径。",
"- vfs_copy复制路径。",
"- vfs_rename重命名路径。",
"- processors_list获取可用处理器列表含 type/name/config_schema/produces_file/supports_directory",
"- processors_run运行处理器处理文件或目录会返回 task_id 或 task_ids",
"",
"规则:",
"1) 读操作vfs_list_dir/vfs_stat/vfs_read_text/vfs_search可直接调用工具。",
"2) 写/改/删操作vfs_write_text/vfs_mkdir/vfs_delete/vfs_move/vfs_copy/vfs_rename/processors_run默认需要用户确认只有在开启自动执行时才应直接执行。",
"3) 用户未给出明确路径时先追问;若提供了“当前文件管理目录”,可以基于它把相对描述补全为绝对路径(以 / 开头)。",
"4) 修改文件内容先读取vfs_read_text→给出改动点→确认后再写入vfs_write_text",
"5) processors_run 返回任务 id 后,说明任务已提交,可在任务队列查看进度。",
"6) 回答保持简洁中文。",
]
if current_path:
lines.append("")
lines.append(f"当前文件管理目录:{current_path}")
return "\n".join(lines)
def _ensure_tool_call_ids(message: Dict[str, Any]) -> Dict[str, Any]:
tool_calls = message.get("tool_calls")
if not isinstance(tool_calls, list):
return message
changed = False
for idx, call in enumerate(tool_calls):
if not isinstance(call, dict):
continue
call_id = call.get("id")
if isinstance(call_id, str) and call_id.strip():
continue
call["id"] = f"call_{idx}"
changed = True
if changed:
message["tool_calls"] = tool_calls
return message
def _extract_pending(tool_call: Dict[str, Any], requires_confirmation: bool) -> PendingToolCall:
call_id = str(tool_call.get("id") or "")
fn = tool_call.get("function") or {}
name = str((fn.get("name") if isinstance(fn, dict) else None) or "")
raw_args = fn.get("arguments") if isinstance(fn, dict) else None
arguments: Dict[str, Any] = {}
if isinstance(raw_args, str) and raw_args.strip():
try:
parsed = json.loads(raw_args)
if isinstance(parsed, dict):
arguments = parsed
except json.JSONDecodeError:
arguments = {}
return PendingToolCall(
id=call_id,
name=name,
arguments=arguments,
requires_confirmation=requires_confirmation,
)
def _find_last_assistant_tool_calls(messages: List[Dict[str, Any]]) -> Tuple[int, Dict[str, Any]]:
for idx in range(len(messages) - 1, -1, -1):
msg = messages[idx]
if not isinstance(msg, dict):
continue
if msg.get("role") != "assistant":
continue
tool_calls = msg.get("tool_calls")
if isinstance(tool_calls, list) and tool_calls:
return idx, msg
raise HTTPException(status_code=400, detail="没有可确认的待执行操作")
def _existing_tool_result_ids(messages: List[Dict[str, Any]]) -> set[str]:
ids: set[str] = set()
for msg in messages:
if not isinstance(msg, dict):
continue
if msg.get("role") != "tool":
continue
tool_call_id = msg.get("tool_call_id")
if isinstance(tool_call_id, str) and tool_call_id.strip():
ids.add(tool_call_id)
return ids
async def _choose_chat_ability() -> str:
tools_model = await AIProviderService.get_default_model("tools")
return "tools" if tools_model else "chat"
def _sse(event: str, data: Any) -> bytes:
payload = json.dumps(data, ensure_ascii=False, separators=(",", ":"))
return f"event: {event}\ndata: {payload}\n\n".encode("utf-8")
def _format_exc(exc: BaseException) -> str:
text = str(exc)
return text if text else exc.__class__.__name__
class AgentService:
@classmethod
async def chat(cls, req: AgentChatRequest, user: Optional[User]) -> Dict[str, Any]:
history: List[Dict[str, Any]] = list(req.messages or [])
current_path = _normalize_path(req.context.current_path if req.context else None)
system_prompt = _build_system_prompt(current_path)
internal_messages: List[Dict[str, Any]] = [{"role": "system", "content": system_prompt}] + history
new_messages: List[Dict[str, Any]] = []
pending: List[PendingToolCall] = []
approved_ids = {i for i in (req.approved_tool_call_ids or []) if isinstance(i, str) and i.strip()}
rejected_ids = {i for i in (req.rejected_tool_call_ids or []) if isinstance(i, str) and i.strip()}
if approved_ids or rejected_ids:
_, last_call_msg = _find_last_assistant_tool_calls(internal_messages)
last_call_msg = _ensure_tool_call_ids(last_call_msg)
tool_calls = last_call_msg.get("tool_calls") or []
call_map: Dict[str, Dict[str, Any]] = {
str(c.get("id")): c
for c in tool_calls
if isinstance(c, dict) and isinstance(c.get("id"), str)
}
existing_ids = _existing_tool_result_ids(internal_messages)
for call_id in approved_ids | rejected_ids:
if call_id in existing_ids:
continue
tool_call = call_map.get(call_id)
if not tool_call:
continue
fn = tool_call.get("function") or {}
name = fn.get("name") if isinstance(fn, dict) else None
args_raw = fn.get("arguments") if isinstance(fn, dict) else None
args: Dict[str, Any] = {}
if isinstance(args_raw, str) and args_raw.strip():
try:
parsed = json.loads(args_raw)
if isinstance(parsed, dict):
args = parsed
except json.JSONDecodeError:
args = {}
spec = get_tool(str(name or ""))
if call_id in rejected_ids:
content = tool_result_to_content({"canceled": True, "reason": "user_rejected"})
tool_msg = {"role": "tool", "tool_call_id": call_id, "content": content}
internal_messages.append(tool_msg)
new_messages.append(tool_msg)
continue
if not spec:
content = tool_result_to_content({"error": f"unknown_tool: {name}"})
tool_msg = {"role": "tool", "tool_call_id": call_id, "content": content}
internal_messages.append(tool_msg)
new_messages.append(tool_msg)
continue
try:
result = await spec.handler(args)
content = tool_result_to_content(result)
except Exception as exc: # noqa: BLE001
content = tool_result_to_content({"error": str(exc)})
tool_msg = {"role": "tool", "tool_call_id": call_id, "content": content}
internal_messages.append(tool_msg)
new_messages.append(tool_msg)
tools_schema = openai_tools()
ability = await _choose_chat_ability()
max_loops = 4
for _ in range(max_loops):
try:
assistant = await chat_completion(
internal_messages,
ability=ability,
tools=tools_schema,
tool_choice="auto",
timeout=60.0,
)
except MissingModelError as exc:
raise HTTPException(status_code=400, detail=str(exc)) from exc
except httpx.HTTPStatusError as exc:
raise HTTPException(status_code=502, detail=f"对话请求失败: {exc}") from exc
except httpx.RequestError as exc:
raise HTTPException(status_code=502, detail=f"对话请求异常: {exc}") from exc
assistant = _ensure_tool_call_ids(assistant)
internal_messages.append(assistant)
new_messages.append(assistant)
tool_calls = assistant.get("tool_calls")
if not isinstance(tool_calls, list) or not tool_calls:
break
pending = []
for call in tool_calls:
if not isinstance(call, dict):
continue
call_id = str(call.get("id") or "")
fn = call.get("function") or {}
name = fn.get("name") if isinstance(fn, dict) else None
args_raw = fn.get("arguments") if isinstance(fn, dict) else None
args: Dict[str, Any] = {}
if isinstance(args_raw, str) and args_raw.strip():
try:
parsed = json.loads(args_raw)
if isinstance(parsed, dict):
args = parsed
except json.JSONDecodeError:
args = {}
spec = get_tool(str(name or ""))
if not spec:
content = tool_result_to_content({"error": f"unknown_tool: {name}"})
tool_msg = {"role": "tool", "tool_call_id": call_id, "content": content}
internal_messages.append(tool_msg)
new_messages.append(tool_msg)
continue
if spec.requires_confirmation and not req.auto_execute:
pending.append(_extract_pending(call, True))
continue
try:
result = await spec.handler(args)
content = tool_result_to_content(result)
except Exception as exc: # noqa: BLE001
content = tool_result_to_content({"error": str(exc)})
tool_msg = {"role": "tool", "tool_call_id": call_id, "content": content}
internal_messages.append(tool_msg)
new_messages.append(tool_msg)
if pending:
break
payload: Dict[str, Any] = {"messages": new_messages}
if pending:
payload["pending_tool_calls"] = [p.model_dump() for p in pending]
return payload
@classmethod
async def chat_stream(cls, req: AgentChatRequest, user: Optional[User]):
history: List[Dict[str, Any]] = list(req.messages or [])
current_path = _normalize_path(req.context.current_path if req.context else None)
system_prompt = _build_system_prompt(current_path)
internal_messages: List[Dict[str, Any]] = [{"role": "system", "content": system_prompt}] + history
new_messages: List[Dict[str, Any]] = []
pending: List[PendingToolCall] = []
approved_ids = {i for i in (req.approved_tool_call_ids or []) if isinstance(i, str) and i.strip()}
rejected_ids = {i for i in (req.rejected_tool_call_ids or []) if isinstance(i, str) and i.strip()}
try:
if approved_ids or rejected_ids:
_, last_call_msg = _find_last_assistant_tool_calls(internal_messages)
last_call_msg = _ensure_tool_call_ids(last_call_msg)
tool_calls = last_call_msg.get("tool_calls") or []
call_map: Dict[str, Dict[str, Any]] = {
str(c.get("id")): c
for c in tool_calls
if isinstance(c, dict) and isinstance(c.get("id"), str)
}
existing_ids = _existing_tool_result_ids(internal_messages)
for call_id in approved_ids | rejected_ids:
if call_id in existing_ids:
continue
tool_call = call_map.get(call_id)
if not tool_call:
continue
fn = tool_call.get("function") or {}
name = fn.get("name") if isinstance(fn, dict) else None
args_raw = fn.get("arguments") if isinstance(fn, dict) else None
args: Dict[str, Any] = {}
if isinstance(args_raw, str) and args_raw.strip():
try:
parsed = json.loads(args_raw)
if isinstance(parsed, dict):
args = parsed
except json.JSONDecodeError:
args = {}
spec = get_tool(str(name or ""))
if call_id in rejected_ids:
content = tool_result_to_content({"canceled": True, "reason": "user_rejected"})
tool_msg = {"role": "tool", "tool_call_id": call_id, "content": content}
internal_messages.append(tool_msg)
new_messages.append(tool_msg)
yield _sse("tool_end", {"tool_call_id": call_id, "name": str(name or ""), "message": tool_msg})
continue
if not spec:
content = tool_result_to_content({"error": f"unknown_tool: {name}"})
tool_msg = {"role": "tool", "tool_call_id": call_id, "content": content}
internal_messages.append(tool_msg)
new_messages.append(tool_msg)
yield _sse("tool_end", {"tool_call_id": call_id, "name": str(name or ""), "message": tool_msg})
continue
yield _sse("tool_start", {"tool_call_id": call_id, "name": spec.name})
try:
result = await spec.handler(args)
content = tool_result_to_content(result)
except Exception as exc: # noqa: BLE001
content = tool_result_to_content({"error": str(exc)})
tool_msg = {"role": "tool", "tool_call_id": call_id, "content": content}
internal_messages.append(tool_msg)
new_messages.append(tool_msg)
yield _sse("tool_end", {"tool_call_id": call_id, "name": spec.name, "message": tool_msg})
tools_schema = openai_tools()
ability = await _choose_chat_ability()
max_loops = 4
for _ in range(max_loops):
assistant_event_id = uuid.uuid4().hex
yield _sse("assistant_start", {"id": assistant_event_id})
assistant_message: Dict[str, Any] | None = None
try:
async for event in chat_completion_stream(
internal_messages,
ability=ability,
tools=tools_schema,
tool_choice="auto",
timeout=60.0,
):
if event.get("type") == "delta":
delta = event.get("delta")
if isinstance(delta, str) and delta:
yield _sse("assistant_delta", {"id": assistant_event_id, "delta": delta})
elif event.get("type") == "message":
msg = event.get("message")
if isinstance(msg, dict):
assistant_message = msg
except MissingModelError as exc:
raise HTTPException(status_code=400, detail=_format_exc(exc)) from exc
except httpx.HTTPStatusError as exc:
raise HTTPException(status_code=502, detail=f"对话请求失败: {_format_exc(exc)}") from exc
except httpx.RequestError as exc:
raise HTTPException(status_code=502, detail=f"对话请求异常: {_format_exc(exc)}") from exc
if not assistant_message:
assistant_message = {"role": "assistant", "content": ""}
assistant_message = _ensure_tool_call_ids(assistant_message)
internal_messages.append(assistant_message)
new_messages.append(assistant_message)
yield _sse("assistant_end", {"id": assistant_event_id, "message": assistant_message})
tool_calls = assistant_message.get("tool_calls")
if not isinstance(tool_calls, list) or not tool_calls:
break
pending = []
for call in tool_calls:
if not isinstance(call, dict):
continue
call_id = str(call.get("id") or "")
fn = call.get("function") or {}
name = fn.get("name") if isinstance(fn, dict) else None
args_raw = fn.get("arguments") if isinstance(fn, dict) else None
args: Dict[str, Any] = {}
if isinstance(args_raw, str) and args_raw.strip():
try:
parsed = json.loads(args_raw)
if isinstance(parsed, dict):
args = parsed
except json.JSONDecodeError:
args = {}
spec = get_tool(str(name or ""))
if not spec:
content = tool_result_to_content({"error": f"unknown_tool: {name}"})
tool_msg = {"role": "tool", "tool_call_id": call_id, "content": content}
internal_messages.append(tool_msg)
new_messages.append(tool_msg)
yield _sse("tool_end", {"tool_call_id": call_id, "name": str(name or ""), "message": tool_msg})
continue
if spec.requires_confirmation and not req.auto_execute:
pending.append(_extract_pending(call, True))
continue
yield _sse("tool_start", {"tool_call_id": call_id, "name": spec.name})
try:
result = await spec.handler(args)
content = tool_result_to_content(result)
except Exception as exc: # noqa: BLE001
content = tool_result_to_content({"error": str(exc)})
tool_msg = {"role": "tool", "tool_call_id": call_id, "content": content}
internal_messages.append(tool_msg)
new_messages.append(tool_msg)
yield _sse("tool_end", {"tool_call_id": call_id, "name": spec.name, "message": tool_msg})
if pending:
yield _sse("pending", {"pending_tool_calls": [p.model_dump() for p in pending]})
break
payload: Dict[str, Any] = {"messages": new_messages}
if pending:
payload["pending_tool_calls"] = [p.model_dump() for p in pending]
yield _sse("done", payload)
except asyncio.CancelledError:
return
except HTTPException as exc:
detail = exc.detail
content = detail if isinstance(detail, str) else str(detail)
if not content.strip():
content = f"请求失败({exc.status_code})"
new_messages.append({"role": "assistant", "content": content})
payload: Dict[str, Any] = {"messages": new_messages}
if pending:
payload["pending_tool_calls"] = [p.model_dump() for p in pending]
yield _sse("done", payload)
return
except Exception as exc: # noqa: BLE001
new_messages.append({"role": "assistant", "content": f"服务端异常: {_format_exc(exc)}"})
payload: Dict[str, Any] = {"messages": new_messages}
if pending:
payload["pending_tool_calls"] = [p.model_dump() for p in pending]
yield _sse("done", payload)
return

412
domain/agent/tools.py Normal file
View File

@@ -0,0 +1,412 @@
import json
from dataclasses import dataclass
from typing import Any, Awaitable, Callable, Dict, List, Optional
from domain.processors import ProcessDirectoryRequest, ProcessRequest, ProcessorService
from domain.virtual_fs import VirtualFSService
from domain.virtual_fs.search import VirtualFSSearchService
@dataclass(frozen=True)
class ToolSpec:
name: str
description: str
parameters: Dict[str, Any]
requires_confirmation: bool
handler: Callable[[Dict[str, Any]], Awaitable[Any]]
async def _processors_list(_: Dict[str, Any]) -> Dict[str, Any]:
return {"processors": ProcessorService.list_processors()}
async def _processors_run(args: Dict[str, Any]) -> Dict[str, Any]:
path = str(args.get("path") or "")
processor_type = str(args.get("processor_type") or "")
config = args.get("config")
if not isinstance(config, dict):
config = {}
save_to = args.get("save_to")
save_to = str(save_to) if isinstance(save_to, str) and save_to.strip() else None
max_depth = args.get("max_depth")
max_depth_value: Optional[int] = None
if max_depth is not None:
try:
max_depth_value = int(max_depth)
except (TypeError, ValueError):
max_depth_value = None
suffix = args.get("suffix")
suffix_value = str(suffix) if isinstance(suffix, str) and suffix.strip() else None
overwrite_value = args.get("overwrite")
overwrite = bool(overwrite_value) if overwrite_value is not None else None
is_dir = await VirtualFSService.path_is_directory(path)
if is_dir and (max_depth_value is not None or suffix_value is not None):
req = ProcessDirectoryRequest(
path=path,
processor_type=processor_type,
config=config,
overwrite=True if overwrite is None else overwrite,
max_depth=max_depth_value,
suffix=suffix_value,
)
result = await ProcessorService.process_directory(req)
return {"mode": "directory", **result}
req = ProcessRequest(
path=path,
processor_type=processor_type,
config=config,
save_to=save_to,
overwrite=False if overwrite is None else overwrite,
)
result = await ProcessorService.process_file(req)
return {"mode": "file", **result}
def _normalize_vfs_path(value: Any) -> str:
s = str(value or "").strip().replace("\\", "/")
if not s:
return ""
if not s.startswith("/"):
s = "/" + s
s = s.rstrip("/") or "/"
return s
def _require_vfs_path(value: Any, field: str) -> str:
path = _normalize_vfs_path(value)
if not path:
raise ValueError(f"missing_{field}")
return path
async def _vfs_list_dir(args: Dict[str, Any]) -> Dict[str, Any]:
path = _normalize_vfs_path(args.get("path") or "/") or "/"
page = int(args.get("page") or 1)
page_size = int(args.get("page_size") or 50)
sort_by = str(args.get("sort_by") or "name")
sort_order = str(args.get("sort_order") or "asc")
return await VirtualFSService.list_directory(path, page, page_size, sort_by, sort_order)
async def _vfs_stat(args: Dict[str, Any]) -> Any:
path = _require_vfs_path(args.get("path"), "path")
return await VirtualFSService.stat(path)
async def _vfs_read_text(args: Dict[str, Any]) -> Dict[str, Any]:
path = _require_vfs_path(args.get("path"), "path")
encoding = str(args.get("encoding") or "utf-8")
max_chars = int(args.get("max_chars") or 8000)
data = await VirtualFSService.read_file(path)
if isinstance(data, (bytes, bytearray)):
try:
text = bytes(data).decode(encoding)
except UnicodeDecodeError:
return {"error": "binary_or_invalid_text", "path": path}
elif isinstance(data, str):
text = data
else:
text = str(data)
original_len = len(text)
truncated = original_len > max_chars
if truncated:
text = text[:max_chars]
return {
"path": path,
"encoding": encoding,
"content": text,
"truncated": truncated,
"length": original_len,
}
async def _vfs_write_text(args: Dict[str, Any]) -> Dict[str, Any]:
path = _require_vfs_path(args.get("path"), "path")
if path == "/":
raise ValueError("invalid_path")
encoding = str(args.get("encoding") or "utf-8")
content = str(args.get("content") or "")
data = content.encode(encoding)
await VirtualFSService.write_file(path, data)
return {"written": True, "path": path, "encoding": encoding, "bytes": len(data)}
async def _vfs_mkdir(args: Dict[str, Any]) -> Dict[str, Any]:
path = _require_vfs_path(args.get("path"), "path")
return await VirtualFSService.mkdir(path)
async def _vfs_delete(args: Dict[str, Any]) -> Dict[str, Any]:
path = _require_vfs_path(args.get("path"), "path")
return await VirtualFSService.delete(path)
async def _vfs_move(args: Dict[str, Any]) -> Dict[str, Any]:
src = _require_vfs_path(args.get("src"), "src")
dst = _require_vfs_path(args.get("dst"), "dst")
if src == "/" or dst == "/":
raise ValueError("invalid_path")
overwrite = bool(args.get("overwrite") or False)
return await VirtualFSService.move(src, dst, overwrite)
async def _vfs_copy(args: Dict[str, Any]) -> Dict[str, Any]:
src = _require_vfs_path(args.get("src"), "src")
dst = _require_vfs_path(args.get("dst"), "dst")
if src == "/" or dst == "/":
raise ValueError("invalid_path")
overwrite = bool(args.get("overwrite") or False)
return await VirtualFSService.copy(src, dst, overwrite)
async def _vfs_rename(args: Dict[str, Any]) -> Dict[str, Any]:
src = _require_vfs_path(args.get("src"), "src")
dst = _require_vfs_path(args.get("dst"), "dst")
if src == "/" or dst == "/":
raise ValueError("invalid_path")
overwrite = bool(args.get("overwrite") or False)
return await VirtualFSService.rename(src, dst, overwrite)
async def _vfs_search(args: Dict[str, Any]) -> Dict[str, Any]:
q = str(args.get("q") or "").strip()
if not q:
raise ValueError("missing_q")
mode = str(args.get("mode") or "vector")
top_k = int(args.get("top_k") or 10)
page = int(args.get("page") or 1)
page_size = int(args.get("page_size") or 10)
return await VirtualFSSearchService.search(q, top_k, mode, page, page_size)
TOOLS: Dict[str, ToolSpec] = {
"processors_list": ToolSpec(
name="processors_list",
description="获取可用处理器列表type/name/config_schema 等)。",
parameters={
"type": "object",
"properties": {},
"additionalProperties": False,
},
requires_confirmation=False,
handler=_processors_list,
),
"processors_run": ToolSpec(
name="processors_run",
description=(
"运行处理器处理文件或目录。"
" 对目录可选 max_depth/suffix对文件可选 overwrite/save_to。"
" 返回任务 id去任务队列查看进度"
),
parameters={
"type": "object",
"properties": {
"path": {"type": "string", "description": "文件或目录路径(绝对路径,如 /foo/bar"},
"processor_type": {"type": "string", "description": "处理器类型(例如 image_watermark"},
"config": {"type": "object", "description": "处理器配置,按 processors_list 返回的 config_schema 填写"},
"overwrite": {"type": "boolean", "description": "是否覆盖原文件/目录内文件"},
"save_to": {"type": "string", "description": "保存到指定路径(仅文件模式,且 overwrite=false 时使用)"},
"max_depth": {"type": "integer", "description": "目录遍历深度(仅目录模式)"},
"suffix": {"type": "string", "description": "目录批处理时的输出后缀(仅 produces_file 且 overwrite=false"},
},
"required": ["path", "processor_type"],
},
requires_confirmation=True,
handler=_processors_run,
),
"vfs_list_dir": ToolSpec(
name="vfs_list_dir",
description="浏览目录(列出 entries + pagination",
parameters={
"type": "object",
"properties": {
"path": {"type": "string", "description": "目录路径(绝对路径,如 /foo/bar"},
"page": {"type": "integer", "description": "页码(从 1 开始)"},
"page_size": {"type": "integer", "description": "每页条数"},
"sort_by": {"type": "string", "description": "排序字段name/size/mtime"},
"sort_order": {"type": "string", "description": "排序顺序asc/desc"},
},
"required": ["path"],
"additionalProperties": False,
},
requires_confirmation=False,
handler=_vfs_list_dir,
),
"vfs_stat": ToolSpec(
name="vfs_stat",
description="查看文件/目录信息size/mtime/is_dir/has_thumbnail/vector_index 等)。",
parameters={
"type": "object",
"properties": {
"path": {"type": "string", "description": "路径(绝对路径,如 /foo/bar.txt"},
},
"required": ["path"],
"additionalProperties": False,
},
requires_confirmation=False,
handler=_vfs_stat,
),
"vfs_read_text": ToolSpec(
name="vfs_read_text",
description="读取文本文件内容(解码失败视为二进制,返回 error",
parameters={
"type": "object",
"properties": {
"path": {"type": "string", "description": "文件路径(绝对路径,如 /foo/bar.md"},
"encoding": {"type": "string", "description": "文本编码(默认 utf-8"},
"max_chars": {"type": "integer", "description": "最多返回的字符数(默认 8000"},
},
"required": ["path"],
"additionalProperties": False,
},
requires_confirmation=False,
handler=_vfs_read_text,
),
"vfs_write_text": ToolSpec(
name="vfs_write_text",
description="写入文本文件内容(会覆盖目标文件)。",
parameters={
"type": "object",
"properties": {
"path": {"type": "string", "description": "文件路径(绝对路径,如 /foo/bar.md"},
"content": {"type": "string", "description": "要写入的文本内容"},
"encoding": {"type": "string", "description": "文本编码(默认 utf-8"},
},
"required": ["path", "content"],
"additionalProperties": False,
},
requires_confirmation=True,
handler=_vfs_write_text,
),
"vfs_mkdir": ToolSpec(
name="vfs_mkdir",
description="创建目录。",
parameters={
"type": "object",
"properties": {
"path": {"type": "string", "description": "目录路径(绝对路径,如 /foo/bar"},
},
"required": ["path"],
"additionalProperties": False,
},
requires_confirmation=True,
handler=_vfs_mkdir,
),
"vfs_delete": ToolSpec(
name="vfs_delete",
description="删除文件或目录(由底层适配器决定是否递归)。",
parameters={
"type": "object",
"properties": {
"path": {"type": "string", "description": "路径(绝对路径,如 /foo/bar 或 /foo/bar.txt"},
},
"required": ["path"],
"additionalProperties": False,
},
requires_confirmation=True,
handler=_vfs_delete,
),
"vfs_move": ToolSpec(
name="vfs_move",
description="移动路径(可能进入任务队列)。",
parameters={
"type": "object",
"properties": {
"src": {"type": "string", "description": "源路径(绝对路径)"},
"dst": {"type": "string", "description": "目标路径(绝对路径)"},
"overwrite": {"type": "boolean", "description": "是否允许覆盖已存在目标(默认 false"},
},
"required": ["src", "dst"],
"additionalProperties": False,
},
requires_confirmation=True,
handler=_vfs_move,
),
"vfs_copy": ToolSpec(
name="vfs_copy",
description="复制路径(可能进入任务队列)。",
parameters={
"type": "object",
"properties": {
"src": {"type": "string", "description": "源路径(绝对路径)"},
"dst": {"type": "string", "description": "目标路径(绝对路径)"},
"overwrite": {"type": "boolean", "description": "是否覆盖已存在目标(默认 false"},
},
"required": ["src", "dst"],
"additionalProperties": False,
},
requires_confirmation=True,
handler=_vfs_copy,
),
"vfs_rename": ToolSpec(
name="vfs_rename",
description="重命名路径(本质是同目录 move",
parameters={
"type": "object",
"properties": {
"src": {"type": "string", "description": "源路径(绝对路径)"},
"dst": {"type": "string", "description": "目标路径(绝对路径)"},
"overwrite": {"type": "boolean", "description": "是否允许覆盖已存在目标(默认 false"},
},
"required": ["src", "dst"],
"additionalProperties": False,
},
requires_confirmation=True,
handler=_vfs_rename,
),
"vfs_search": ToolSpec(
name="vfs_search",
description="搜索文件mode=vector 或 filename",
parameters={
"type": "object",
"properties": {
"q": {"type": "string", "description": "搜索关键词"},
"mode": {"type": "string", "description": "搜索模式vector/filename默认 vector"},
"top_k": {"type": "integer", "description": "返回数量vector 模式使用,默认 10"},
"page": {"type": "integer", "description": "页码filename 模式使用,默认 1"},
"page_size": {"type": "integer", "description": "分页大小filename 模式使用,默认 10"},
},
"required": ["q"],
"additionalProperties": False,
},
requires_confirmation=False,
handler=_vfs_search,
),
}
def get_tool(name: str) -> Optional[ToolSpec]:
return TOOLS.get(name)
def openai_tools() -> List[Dict[str, Any]]:
out: List[Dict[str, Any]] = []
for spec in TOOLS.values():
out.append({
"type": "function",
"function": {
"name": spec.name,
"description": spec.description,
"parameters": spec.parameters,
},
})
return out
def tool_result_to_content(result: Any) -> str:
if result is None:
return ""
if isinstance(result, str):
return result
try:
return json.dumps(result, ensure_ascii=False)
except TypeError:
return json.dumps({"result": str(result)}, ensure_ascii=False)

23
domain/agent/types.py Normal file
View File

@@ -0,0 +1,23 @@
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
class AgentChatContext(BaseModel):
current_path: Optional[str] = None
class AgentChatRequest(BaseModel):
messages: List[Dict[str, Any]] = Field(default_factory=list)
auto_execute: bool = False
approved_tool_call_ids: List[str] = Field(default_factory=list)
rejected_tool_call_ids: List[str] = Field(default_factory=list)
context: Optional[AgentChatContext] = None
class PendingToolCall(BaseModel):
id: str
name: str
arguments: Dict[str, Any] = Field(default_factory=dict)
requires_confirmation: bool = True

View File

@@ -1,28 +1,61 @@
from .api import router_ai, router_vector_db
from .inference import (
MissingModelError,
chat_completion,
chat_completion_stream,
describe_image_base64,
get_text_embedding,
provider_service,
rerank_texts,
)
from .service import (
AIProviderService,
FILE_COLLECTION_NAME,
VECTOR_COLLECTION_NAME,
DEFAULT_VECTOR_DIMENSION,
VectorDBConfigManager,
VectorDBService,
DEFAULT_VECTOR_DIMENSION,
ABILITIES,
normalize_capabilities,
)
from .types import (
ABILITIES,
AIDefaultsUpdate,
AIModelCreate,
AIModelUpdate,
AIProviderCreate,
AIProviderUpdate,
VectorDBConfigPayload,
normalize_capabilities,
)
from .vector_providers import (
BaseVectorProvider,
MilvusLiteProvider,
MilvusServerProvider,
QdrantProvider,
get_provider_class,
get_provider_entry,
list_providers,
)
__all__ = [
"router_ai",
"router_vector_db",
"MissingModelError",
"chat_completion",
"chat_completion_stream",
"describe_image_base64",
"get_text_embedding",
"provider_service",
"rerank_texts",
"AIProviderService",
"VectorDBService",
"VectorDBConfigManager",
"DEFAULT_VECTOR_DIMENSION",
"VECTOR_COLLECTION_NAME",
"FILE_COLLECTION_NAME",
"BaseVectorProvider",
"MilvusLiteProvider",
"MilvusServerProvider",
"QdrantProvider",
"list_providers",
"get_provider_entry",
"get_provider_class",
"ABILITIES",
"normalize_capabilities",
"AIDefaultsUpdate",

View File

@@ -5,8 +5,9 @@ from fastapi import APIRouter, Depends, HTTPException, Path, Request
from api.response import success
from domain.audit import AuditAction, audit
from domain.ai.service import AIProviderService, VectorDBConfigManager, VectorDBService
from domain.ai.types import (
from domain.auth import User, get_current_active_user
from .service import AIProviderService, VectorDBConfigManager, VectorDBService
from .types import (
AIDefaultsUpdate,
AIModelCreate,
AIModelUpdate,
@@ -14,9 +15,7 @@ from domain.ai.types import (
AIProviderUpdate,
VectorDBConfigPayload,
)
from domain.ai.vector_providers import get_provider_class, get_provider_entry, list_providers
from domain.auth.service import get_current_active_user
from domain.auth.types import User
from .vector_providers import get_provider_class, get_provider_entry, list_providers
router_ai = APIRouter(prefix="/api/ai", tags=["ai"])
router_vector_db = APIRouter(prefix="/api/vector-db", tags=["vector-db"])

File diff suppressed because it is too large Load Diff

View File

@@ -7,7 +7,7 @@ import httpx
from tortoise.exceptions import DoesNotExist
from tortoise.transactions import in_transaction
from domain.config.service import ConfigService
from domain.config import ConfigService
from models.database import AIDefaultModel, AIModel, AIProvider
from .types import ABILITIES, normalize_capabilities
@@ -140,7 +140,7 @@ def serialize_provider(provider: AIProvider) -> Dict[str, Any]:
"provider_type": provider.provider_type,
"api_format": provider.api_format,
"base_url": provider.base_url,
"api_key": provider.api_key,
"has_api_key": bool(provider.api_key),
"logo_url": provider.logo_url,
"extra_config": provider.extra_config or {},
"created_at": provider.created_at,

View File

@@ -30,8 +30,8 @@ class AIProviderBase(BaseModel):
@classmethod
def normalize_format(cls, value: str) -> str:
fmt = value.lower()
if fmt not in {"openai", "gemini"}:
raise ValueError("api_format must be 'openai' or 'gemini'")
if fmt not in {"openai", "gemini", "anthropic", "ollama"}:
raise ValueError("api_format must be 'openai', 'gemini', 'anthropic', or 'ollama'")
return fmt
@@ -54,8 +54,8 @@ class AIProviderUpdate(BaseModel):
if value is None:
return value
fmt = value.lower()
if fmt not in {"openai", "gemini"}:
raise ValueError("api_format must be 'openai' or 'gemini'")
if fmt not in {"openai", "gemini", "anthropic", "ollama"}:
raise ValueError("api_format must be 'openai', 'gemini', 'anthropic', or 'ollama'")
return fmt

View File

@@ -1,5 +1,4 @@
from domain.audit.decorator import audit
from domain.audit.types import AuditAction
from domain.audit.api import router
from .decorator import audit
from .types import AuditAction
__all__ = ["audit", "AuditAction", "router"]
__all__ = ["audit", "AuditAction"]

View File

@@ -4,10 +4,9 @@ from typing import Annotated, Optional
from fastapi import APIRouter, Depends, HTTPException, Query
from api import response
from domain.audit.service import AuditService
from domain.audit.types import AuditAction
from domain.auth.service import get_current_active_user
from domain.auth.types import User
from domain.auth import User, get_current_active_user
from .service import AuditService
from .types import AuditAction
CurrentUser = Annotated[User, Depends(get_current_active_user)]

View File

@@ -7,11 +7,11 @@ import jwt
from fastapi import Request
from jwt.exceptions import InvalidTokenError
from domain.audit.service import AuditService
from domain.audit.types import AuditAction
from domain.auth.service import ALGORITHM
from domain.config.service import ConfigService
from domain.auth import ALGORITHM
from domain.config import ConfigService
from models.database import UserAccount
from .service import AuditService
from .types import AuditAction
def _extract_request(bound_args: Mapping[str, Any]) -> Request | None:

View File

@@ -2,7 +2,7 @@ from typing import Any, Dict, Optional
from models.database import AuditLog
from domain.audit.types import AuditAction
from .types import AuditAction
class AuditService:

49
domain/auth/__init__.py Normal file
View File

@@ -0,0 +1,49 @@
from .service import (
ALGORITHM,
AuthService,
authenticate_user_db,
create_access_token,
get_current_active_user,
get_current_user,
get_password_hash,
has_users,
register_user,
request_password_reset,
reset_password_with_token,
verify_password,
verify_password_reset_token,
)
from .types import (
PasswordResetConfirm,
PasswordResetRequest,
RegisterRequest,
Token,
TokenData,
UpdateMeRequest,
User,
UserInDB,
)
__all__ = [
"ALGORITHM",
"AuthService",
"authenticate_user_db",
"create_access_token",
"get_current_active_user",
"get_current_user",
"get_password_hash",
"has_users",
"register_user",
"request_password_reset",
"reset_password_with_token",
"verify_password",
"verify_password_reset_token",
"PasswordResetConfirm",
"PasswordResetRequest",
"RegisterRequest",
"Token",
"TokenData",
"UpdateMeRequest",
"User",
"UserInDB",
]

View File

@@ -5,8 +5,8 @@ from fastapi.security import OAuth2PasswordRequestForm
from api.response import success
from domain.audit import AuditAction, audit
from domain.auth.service import AuthService, get_current_active_user
from domain.auth.types import (
from .service import AuthService, get_current_active_user
from .types import (
PasswordResetConfirm,
PasswordResetRequest,
RegisterRequest,

View File

@@ -11,7 +11,9 @@ from fastapi import Depends, HTTPException, status
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from jwt.exceptions import InvalidTokenError
from domain.auth.types import (
from domain.config import ConfigService
from models.database import UserAccount
from .types import (
PasswordResetConfirm,
PasswordResetRequest,
RegisterRequest,
@@ -21,8 +23,6 @@ from domain.auth.types import (
User,
UserInDB,
)
from models.database import UserAccount
from domain.config.service import ConfigService
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 60 * 24 * 365
@@ -324,7 +324,7 @@ class AuthService:
@classmethod
async def _send_password_reset_email(cls, user: UserAccount, token: str) -> None:
from domain.email.service import EmailService
from domain.email import EmailService
app_domain = await ConfigService.get("APP_DOMAIN", None)
base_url = (app_domain or "http://localhost:5173").rstrip("/")

View File

@@ -1 +1,7 @@
from .service import BackupService
from .types import BackupData
__all__ = [
"BackupService",
"BackupData",
]

View File

@@ -4,8 +4,8 @@ from fastapi import APIRouter, Depends, File, Request, UploadFile
from fastapi.responses import JSONResponse
from domain.audit import AuditAction, audit
from domain.auth.service import get_current_active_user
from domain.backup.service import BackupService
from domain.auth import get_current_active_user
from .service import BackupService
router = APIRouter(
prefix="/api/backup",

View File

@@ -4,8 +4,8 @@ from datetime import datetime
from fastapi import HTTPException
from tortoise.transactions import in_transaction
from domain.backup.types import BackupData
from domain.config.service import VERSION
from domain.config import VERSION
from .types import BackupData
from models.database import (
AIDefaultModel,
AIModel,

10
domain/config/__init__.py Normal file
View File

@@ -0,0 +1,10 @@
from .service import ConfigService, VERSION
from .types import ConfigItem, LatestVersionInfo, SystemStatus
__all__ = [
"ConfigService",
"VERSION",
"ConfigItem",
"LatestVersionInfo",
"SystemStatus",
]

View File

@@ -4,10 +4,9 @@ from fastapi import APIRouter, Depends, Form, Request
from api.response import success
from domain.audit import AuditAction, audit
from domain.auth.service import get_current_active_user
from domain.auth.types import User
from domain.config.service import ConfigService
from domain.config.types import ConfigItem
from domain.auth import User, get_current_active_user
from .service import ConfigService
from .types import ConfigItem
router = APIRouter(prefix="/api/config", tags=["config"])

View File

@@ -5,12 +5,12 @@ from typing import Any, Dict, Optional
import httpx
from dotenv import load_dotenv
from domain.config.types import LatestVersionInfo, SystemStatus
from .types import LatestVersionInfo, SystemStatus
from models.database import Configuration, UserAccount
load_dotenv(dotenv_path=".env")
VERSION = "v1.6.0"
VERSION = "v1.7.1"
class ConfigService:

20
domain/email/__init__.py Normal file
View File

@@ -0,0 +1,20 @@
from .service import EmailService, EmailTemplateRenderer
from .types import (
EmailConfig,
EmailSecurity,
EmailSendPayload,
EmailTemplatePreviewPayload,
EmailTemplateUpdate,
EmailTestRequest,
)
__all__ = [
"EmailService",
"EmailTemplateRenderer",
"EmailConfig",
"EmailSecurity",
"EmailSendPayload",
"EmailTemplatePreviewPayload",
"EmailTemplateUpdate",
"EmailTestRequest",
]

View File

@@ -2,10 +2,9 @@ from fastapi import APIRouter, Depends, HTTPException, Request
from api.response import success
from domain.audit import AuditAction, audit
from domain.auth.service import get_current_active_user
from domain.auth.types import User
from domain.email.service import EmailService, EmailTemplateRenderer
from domain.email.types import (
from domain.auth import User, get_current_active_user
from .service import EmailService, EmailTemplateRenderer
from .types import (
EmailTemplatePreviewPayload,
EmailTemplateUpdate,
EmailTestRequest,

View File

@@ -7,8 +7,8 @@ from pathlib import Path
from string import Template
from typing import Any, Dict, List, Optional
from domain.config.service import ConfigService
from domain.email.types import EmailConfig, EmailSecurity, EmailSendPayload
from domain.config import ConfigService
from .types import EmailConfig, EmailSecurity, EmailSendPayload
class EmailTemplateRenderer:
@@ -104,7 +104,7 @@ class EmailService:
template: str,
context: Optional[Dict[str, Any]] = None,
):
from domain.tasks.task_queue import TaskProgress, task_queue_service
from domain.tasks import TaskProgress, task_queue_service
payload = EmailSendPayload(
recipients=recipients,
@@ -126,7 +126,7 @@ class EmailService:
@classmethod
async def send_from_task(cls, task_id: str, data: Dict[str, Any]):
from domain.tasks.task_queue import TaskProgress, task_queue_service
from domain.tasks import TaskProgress, task_queue_service
payload = EmailSendPayload(**data)

View File

@@ -0,0 +1,7 @@
from .service import OfflineDownloadService
from .types import OfflineDownloadCreate
__all__ = [
"OfflineDownloadService",
"OfflineDownloadCreate",
]

View File

@@ -4,10 +4,9 @@ from fastapi import APIRouter, Depends, Request
from api.response import success
from domain.audit import AuditAction, audit
from domain.auth.service import get_current_active_user
from domain.auth.types import User
from domain.offline_downloads.service import OfflineDownloadService
from domain.offline_downloads.types import OfflineDownloadCreate
from domain.auth import User, get_current_active_user
from .service import OfflineDownloadService
from .types import OfflineDownloadCreate
CurrentUser = Annotated[User, Depends(get_current_active_user)]

View File

@@ -7,11 +7,10 @@ import aiofiles
import aiohttp
from fastapi import Depends, HTTPException
from domain.auth.service import get_current_active_user
from domain.auth.types import User
from domain.offline_downloads.types import OfflineDownloadCreate
from domain.virtual_fs.service import VirtualFSService
from domain.tasks.task_queue import Task, TaskProgress, task_queue_service
from domain.auth import User, get_current_active_user
from domain.tasks import Task, TaskProgress, task_queue_service
from domain.virtual_fs import VirtualFSService
from .types import OfflineDownloadCreate
class OfflineDownloadService:

View File

@@ -4,9 +4,9 @@ Foxel 插件系统
提供 .foxpkg 插件包的安装、管理和运行时加载功能。
"""
from domain.plugins.loader import PluginLoader, PluginLoadError
from domain.plugins.service import PluginService
from domain.plugins.startup import init_plugins, load_installed_plugins
from .loader import PluginLoadError, PluginLoader
from .service import PluginService
from .startup import init_plugins, load_installed_plugins
__all__ = [
"PluginLoader",

View File

@@ -8,8 +8,8 @@ from fastapi import APIRouter, File, Request, UploadFile
from fastapi.responses import FileResponse
from domain.audit import AuditAction, audit
from domain.plugins.service import PluginService
from domain.plugins.types import (
from .service import PluginService
from .types import (
PluginInstallResult,
PluginOut,
)
@@ -67,10 +67,12 @@ async def delete_plugin(request: Request, key_or_id: str):
async def get_bundle(request: Request, key_or_id: str):
"""获取插件前端 bundle"""
path = await PluginService.get_bundle_path(key_or_id)
v = (request.query_params.get("v") or "").strip()
cache_control = "public, max-age=31536000, immutable" if v else "no-cache"
return FileResponse(
path,
media_type="application/javascript",
headers={"Cache-Control": "no-store"},
headers={"Cache-Control": cache_control},
)

View File

@@ -20,7 +20,7 @@ from typing import Any, Dict, List, Optional, Tuple
from fastapi import APIRouter
from domain.plugins.types import (
from .types import (
ManifestProcessorConfig,
ManifestRouteConfig,
PluginManifest,
@@ -344,7 +344,7 @@ class PluginLoader:
supported_exts = getattr(module, "SUPPORTED_EXTS", [])
# 注册到处理器注册表
from domain.processors.registry import CONFIG_SCHEMAS, TYPE_MAP
from domain.processors import CONFIG_SCHEMAS, TYPE_MAP
processor_type = processor_config.type
TYPE_MAP[processor_type] = factory
@@ -401,7 +401,7 @@ class PluginLoader:
"""
# 卸载处理器
if manifest and manifest.backend and manifest.backend.processors:
from domain.processors.registry import CONFIG_SCHEMAS, TYPE_MAP
from domain.processors import CONFIG_SCHEMAS, TYPE_MAP
for proc_config in manifest.backend.processors:
proc_type = proc_config.type

View File

@@ -12,8 +12,8 @@ from typing import List, Optional, Union
from fastapi import HTTPException
from domain.plugins.loader import PluginLoadError, PluginLoader
from domain.plugins.types import (
from .loader import PluginLoadError, PluginLoader
from .types import (
PluginInstallResult,
PluginManifest,
PluginOut,

View File

@@ -7,8 +7,8 @@
import logging
from typing import TYPE_CHECKING, List, Tuple
from domain.plugins.loader import PluginLoadError, PluginLoader
from domain.plugins.types import PluginManifest
from .loader import PluginLoadError, PluginLoader
from .types import PluginManifest
if TYPE_CHECKING:
from fastapi import FastAPI
@@ -113,4 +113,3 @@ async def init_plugins(app: "FastAPI") -> None:
logger.warning(f" - {error}")
else:
logger.info(f"插件加载完成,共 {loaded_count} 个插件")

View File

@@ -0,0 +1,35 @@
from .base import BaseProcessor
from .registry import (
CONFIG_SCHEMAS,
TYPE_MAP,
get_config_schema,
get_config_schemas,
get_last_discovery_errors,
get_module_path,
reload_processors,
)
from .service import (
ProcessorService,
get_processor,
list_processors,
reload_processor_modules,
)
from .types import ProcessDirectoryRequest, ProcessRequest, UpdateSourceRequest
__all__ = [
"BaseProcessor",
"CONFIG_SCHEMAS",
"TYPE_MAP",
"get_config_schema",
"get_config_schemas",
"get_last_discovery_errors",
"get_module_path",
"reload_processors",
"ProcessorService",
"get_processor",
"list_processors",
"reload_processor_modules",
"ProcessDirectoryRequest",
"ProcessRequest",
"UpdateSourceRequest",
]

View File

@@ -4,10 +4,9 @@ from fastapi import APIRouter, Body, Depends, Request
from api.response import success
from domain.audit import AuditAction, audit
from domain.auth.service import get_current_active_user
from domain.auth.types import User
from domain.processors.service import ProcessorService
from domain.processors.types import (
from domain.auth import User, get_current_active_user
from .service import ProcessorService
from .types import (
ProcessDirectoryRequest,
ProcessRequest,
UpdateSourceRequest,

View File

@@ -8,12 +8,14 @@ from fastapi.responses import Response
from PIL import Image
from ..base import BaseProcessor
from domain.ai.inference import describe_image_base64, get_text_embedding, provider_service
from domain.ai.service import (
VectorDBService,
from domain.ai import (
DEFAULT_VECTOR_DIMENSION,
VECTOR_COLLECTION_NAME,
FILE_COLLECTION_NAME,
VECTOR_COLLECTION_NAME,
VectorDBService,
describe_image_base64,
get_text_embedding,
provider_service,
)

View File

@@ -5,7 +5,7 @@ from pathlib import Path
from types import ModuleType
from typing import Callable, Dict, Optional
from domain.processors.base import BaseProcessor
from .base import BaseProcessor
ProcessorFactory = Callable[[], BaseProcessor]
TYPE_MAP: Dict[str, ProcessorFactory] = {}
@@ -16,7 +16,7 @@ LAST_DISCOVERY_ERRORS: list[str] = []
def discover_processors(force_reload: bool = False) -> list[str]:
"""扫描并缓存可用的处理器模块。"""
from domain.processors import builtin as processors_pkg
from . import builtin as processors_pkg
TYPE_MAP.clear()
CONFIG_SCHEMAS.clear()

View File

@@ -3,20 +3,20 @@ from typing import List, Tuple
from fastapi import HTTPException
from fastapi.concurrency import run_in_threadpool
from domain.processors.registry import (
from domain.tasks import task_queue_service
from domain.virtual_fs import VirtualFSService
from .registry import (
get,
get_config_schema,
get_config_schemas,
get_module_path,
reload_processors,
)
from domain.processors.types import (
from .types import (
ProcessDirectoryRequest,
ProcessRequest,
UpdateSourceRequest,
)
from domain.virtual_fs.service import VirtualFSService
from domain.tasks.task_queue import task_queue_service
class ProcessorService:

View File

@@ -0,0 +1 @@
__all__: list[str] = []

10
domain/share/__init__.py Normal file
View File

@@ -0,0 +1,10 @@
from .service import ShareService
from .types import ShareCreate, ShareInfo, ShareInfoWithPassword, SharePassword
__all__ = [
"ShareService",
"ShareCreate",
"ShareInfo",
"ShareInfoWithPassword",
"SharePassword",
]

View File

@@ -4,10 +4,9 @@ from fastapi import APIRouter, Depends, Request
from api.response import success
from domain.audit import AuditAction, audit
from domain.auth.service import get_current_active_user
from domain.auth.types import User
from domain.share.service import ShareService
from domain.share.types import (
from domain.auth import User, get_current_active_user
from .service import ShareService
from .types import (
ShareCreate,
ShareInfo,
ShareInfoWithPassword,

View File

@@ -7,7 +7,7 @@ import bcrypt
from fastapi import HTTPException, status
from fastapi.responses import Response
from domain.virtual_fs.service import VirtualFSService
from domain.virtual_fs import VirtualFSService
from models.database import ShareLink, UserAccount

24
domain/tasks/__init__.py Normal file
View File

@@ -0,0 +1,24 @@
from .service import TaskService
from .task_queue import Task, TaskProgress, TaskStatus, task_queue_service
from .types import (
AutomationTaskBase,
AutomationTaskCreate,
AutomationTaskRead,
AutomationTaskUpdate,
TaskQueueSettings,
TaskQueueSettingsResponse,
)
__all__ = [
"TaskService",
"Task",
"TaskProgress",
"TaskStatus",
"task_queue_service",
"AutomationTaskBase",
"AutomationTaskCreate",
"AutomationTaskRead",
"AutomationTaskUpdate",
"TaskQueueSettings",
"TaskQueueSettingsResponse",
]

View File

@@ -2,9 +2,9 @@ from fastapi import APIRouter, Depends, Request
from api.response import success
from domain.audit import AuditAction, audit
from domain.auth.service import get_current_active_user
from domain.tasks.service import TaskService
from domain.tasks.types import (
from domain.auth import get_current_active_user
from .service import TaskService
from .types import (
AutomationTaskCreate,
AutomationTaskUpdate,
TaskQueueSettings,

View File

@@ -3,17 +3,16 @@ from typing import Annotated, Any, Dict, Optional
from fastapi import Depends, HTTPException
from domain.auth.service import get_current_active_user
from domain.auth.types import User
from domain.config.service import ConfigService
from domain.tasks.types import (
from domain.auth import User, get_current_active_user
from domain.config import ConfigService
from .task_queue import task_queue_service
from .types import (
AutomationTaskCreate,
AutomationTaskUpdate,
TaskQueueSettings,
TaskQueueSettingsResponse,
)
from models.database import AutomationTask
from domain.tasks.task_queue import task_queue_service
class TaskService:

View File

@@ -74,7 +74,7 @@ class TaskQueueService:
try:
# Local import to avoid circular dependency during module load.
from domain.virtual_fs.service import VirtualFSService
from domain.virtual_fs import VirtualFSService
if task.name == "process_file":
params = task.task_info
@@ -88,7 +88,7 @@ class TaskQueueService:
task.result = result
elif task.name == "automation_task" or self._is_processor_task(task.name):
from models.database import AutomationTask
from domain.processors.service import get_processor
from domain.processors import get_processor
params = task.task_info
auto_task = await AutomationTask.get(id=params["task_id"])
@@ -116,7 +116,7 @@ class TaskQueueService:
await VirtualFSService.write_file(save_to, result)
task.result = "Automation task completed"
elif task.name == "offline_http_download":
from domain.offline_downloads.service import OfflineDownloadService
from domain.offline_downloads import OfflineDownloadService
result_path = await OfflineDownloadService.run_http_download(task)
task.result = {"path": result_path}
@@ -124,7 +124,7 @@ class TaskQueueService:
result = await VirtualFSService.run_cross_mount_transfer_task(task)
task.result = result
elif task.name == "send_email":
from domain.email.service import EmailService
from domain.email import EmailService
await EmailService.send_from_task(task.id, task.task_info)
task.result = "Email sent"
else:
@@ -141,7 +141,7 @@ class TaskQueueService:
def _is_processor_task(self, task_name: str) -> bool:
try:
from domain.processors.service import get_processor
from domain.processors import get_processor
return get_processor(task_name) is not None
except Exception:
@@ -180,7 +180,7 @@ class TaskQueueService:
async def start_worker(self, concurrency: int | None = None):
if concurrency is None:
from domain.config.service import ConfigService
from domain.config import ConfigService
stored_value = await ConfigService.get("TASK_QUEUE_CONCURRENCY", self._concurrency)
try:

View File

@@ -0,0 +1,11 @@
from .service import VirtualFSService
from .types import DirListing, MkdirRequest, MoveRequest, SearchResultItem, VfsEntry
__all__ = [
"VirtualFSService",
"DirListing",
"MkdirRequest",
"MoveRequest",
"SearchResultItem",
"VfsEntry",
]

View File

@@ -4,10 +4,9 @@ from fastapi import APIRouter, Depends, File, Query, Request, UploadFile
from api.response import success
from domain.audit import AuditAction, audit
from domain.auth.service import get_current_active_user
from domain.auth.types import User
from domain.virtual_fs.service import VirtualFSService
from domain.virtual_fs.types import MkdirRequest, MoveRequest
from domain.auth import User, get_current_active_user
from .service import VirtualFSService
from .types import MkdirRequest, MoveRequest
router = APIRouter(prefix="/api/fs", tags=["virtual-fs"])

View File

@@ -4,8 +4,8 @@ from typing import Any, AsyncIterator, Union
from fastapi import HTTPException
from fastapi.responses import Response
from domain.tasks.service import TaskService
from domain.virtual_fs.thumbnail import is_raw_filename, raw_bytes_to_jpeg
from domain.tasks import TaskService
from .thumbnail import is_raw_filename, raw_bytes_to_jpeg
from .listing import VirtualFSListingMixin

View File

@@ -3,9 +3,9 @@ from typing import Any, Dict, List, Tuple
from fastapi import HTTPException
from api.response import page
from domain.adapters.registry import runtime_registry
from domain.ai.service import VectorDBService, VECTOR_COLLECTION_NAME, FILE_COLLECTION_NAME
from domain.virtual_fs.thumbnail import is_image_filename, is_video_filename
from domain.adapters import runtime_registry
from domain.ai import FILE_COLLECTION_NAME, VECTOR_COLLECTION_NAME, VectorDBService
from .thumbnail import is_image_filename, is_video_filename
from models import StorageAdapter
from .resolver import VirtualFSResolverMixin

View File

@@ -0,0 +1 @@
__all__: list[str] = []

View File

@@ -15,8 +15,8 @@ from fastapi import APIRouter, Request, Response
from fastapi import HTTPException
from domain.audit import AuditAction, audit
from domain.config.service import ConfigService
from domain.virtual_fs.service import VirtualFSService
from domain.config import ConfigService
from domain.virtual_fs import VirtualFSService
router = APIRouter(prefix="/s3", tags=["s3"])

View File

@@ -9,10 +9,9 @@ from fastapi import APIRouter, Request, Response, HTTPException, Depends
import xml.etree.ElementTree as ET
from domain.audit import AuditAction, audit
from domain.auth.service import AuthService
from domain.auth.types import User, UserInDB
from domain.virtual_fs.service import VirtualFSService
from domain.config.service import ConfigService
from domain.auth import AuthService, User, UserInDB
from domain.config import ConfigService
from domain.virtual_fs import VirtualFSService
_WEBDAV_ENABLED_KEY = "WEBDAV_MAPPING_ENABLED"
@@ -172,12 +171,32 @@ async def propfind(
ctype = None if is_dir else (mimetypes.guess_type(name)[0] or "application/octet-stream")
responses.append(_build_prop_response(full_path, name, is_dir, size, mtime, ctype))
except FileNotFoundError:
raise HTTPException(404, detail="Not found")
st = None
except HTTPException as e:
if e.status_code != 404:
raise
st = None
if st is None:
is_mount_root = False
try:
_, rel = await VirtualFSService.resolve_adapter_by_path(full_path)
is_mount_root = rel == ""
except HTTPException:
is_mount_root = False
if not is_mount_root and full_path != "/":
listing_probe = await VirtualFSService.list_virtual_dir(full_path, page_num=1, page_size=1)
if not (listing_probe.get("items") or []):
raise HTTPException(404, detail="Not found")
name = "/" if full_path == "/" else (full_path.rstrip("/").rsplit("/", 1)[-1] or "/")
responses.append(_build_prop_response(full_path, name, True, None, 0, None))
if depth in ("1", "infinity"):
try:
listing = await VirtualFSService.list_virtual_dir(full_path, page_num=1, page_size=1000)
for ent in listing["items"]:
for ent in (listing.get("items") or []):
is_dir = bool(ent.get("is_dir"))
name = ent.get("name")
child_path = full_path.rstrip("/") + "/" + name

View File

@@ -16,7 +16,7 @@ class VirtualFSProcessingMixin(VirtualFSTransferMixin):
save_to: str | None = None,
overwrite: bool = False,
) -> Any:
from domain.processors.service import get_processor
from domain.processors import get_processor
processor = get_processor(processor_type)
if not processor:

View File

@@ -3,7 +3,7 @@ from typing import Tuple
from fastapi import HTTPException
from fastapi.responses import Response
from domain.adapters.registry import runtime_registry
from domain.adapters import runtime_registry
from models import StorageAdapter
from .common import VirtualFSCommonMixin

View File

@@ -4,8 +4,8 @@ import re
from fastapi import HTTPException, UploadFile
from fastapi.responses import Response
from domain.config.service import ConfigService
from domain.virtual_fs.thumbnail import (
from domain.config import ConfigService
from .thumbnail import (
get_or_create_thumb,
is_image_filename,
is_raw_filename,

View File

@@ -0,0 +1,3 @@
from .search_service import VirtualFSSearchService
__all__ = ["VirtualFSSearchService"]

View File

@@ -1,9 +1,8 @@
from fastapi import APIRouter, Depends, Query
from api.response import success
from domain.auth.service import get_current_active_user
from domain.auth.types import User
from domain.virtual_fs.search.search_service import VirtualFSSearchService
from domain.auth import User, get_current_active_user
from .search_service import VirtualFSSearchService
router = APIRouter(prefix="/api/fs/search", tags=["search"])

View File

@@ -1,8 +1,7 @@
from typing import Any, Dict, List, Tuple
from domain.virtual_fs.types import SearchResultItem
from domain.ai.inference import get_text_embedding
from domain.ai.service import VectorDBService, VECTOR_COLLECTION_NAME, FILE_COLLECTION_NAME
from domain.ai import FILE_COLLECTION_NAME, VECTOR_COLLECTION_NAME, VectorDBService, get_text_embedding
from ..types import SearchResultItem
def _normalize_result(raw: Dict[str, Any], source: str, fallback_score: float = 0.0) -> SearchResultItem:

View File

@@ -5,7 +5,7 @@ import time
from fastapi import HTTPException
from domain.config.service import ConfigService
from domain.config import ConfigService
from .processing import VirtualFSProcessingMixin

View File

@@ -273,7 +273,7 @@ class VirtualFSTransferMixin(VirtualFSFileOpsMixin):
"overwrite": overwrite,
}
from domain.tasks.task_queue import task_queue_service
from domain.tasks import task_queue_service
task = await task_queue_service.add_task("cross_mount_transfer", payload)
return {
@@ -286,7 +286,7 @@ class VirtualFSTransferMixin(VirtualFSFileOpsMixin):
@classmethod
async def run_cross_mount_transfer_task(cls, task: "Task") -> Dict[str, Any]:
from domain.tasks.task_queue import task_queue_service
from domain.tasks import task_queue_service
params = task.task_info or {}
operation = params.get("operation")

51
main.py
View File

@@ -2,15 +2,16 @@ import os
from pathlib import Path
from contextlib import asynccontextmanager
from domain.config.service import ConfigService, VERSION
from domain.adapters.registry import runtime_registry
from domain.adapters import runtime_registry
from domain.config import ConfigService, VERSION
from db.session import close_db, init_db
from api.routers import include_routers
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles
from fastapi.responses import FileResponse
from fastapi import FastAPI, HTTPException, Request
from fastapi import FastAPI, HTTPException
from fastapi.exceptions import RequestValidationError
from starlette.exceptions import HTTPException as StarletteHTTPException
from middleware.exception_handler import (
global_exception_handler,
http_exception_handler,
@@ -19,34 +20,45 @@ from middleware.exception_handler import (
)
import httpx
from dotenv import load_dotenv
from domain.tasks.task_queue import task_queue_service
from domain.tasks import task_queue_service
load_dotenv()
class SPAStaticFiles(StaticFiles):
async def get_response(self, path, scope):
response = await super().get_response(path, scope)
if response.status_code == 404:
return await super().get_response("index.html", scope)
try:
response = await super().get_response(path, scope)
except StarletteHTTPException as exc:
if exc.status_code != 404:
raise
if self._should_spa_fallback(scope):
return FileResponse(INDEX_FILE)
raise
if response.status_code == 404 and self._should_spa_fallback(scope):
return FileResponse(INDEX_FILE)
return response
@staticmethod
def _should_spa_fallback(scope) -> bool:
return (
scope.get("method") == "GET"
and _request_accepts_html(scope)
and not (scope.get("path") or "").startswith(SPA_EXCLUDE_PREFIXES)
and INDEX_FILE.exists()
)
INDEX_FILE = Path("web/dist/index.html")
SPA_EXCLUDE_PREFIXES = ("/api", "/docs", "/openapi.json", "/webdav", "/s3")
async def spa_fallback_middleware(request: Request, call_next):
response = await call_next(request)
if (
response.status_code == 404
and request.method == "GET"
and "text/html" in request.headers.get("accept", "")
and not request.url.path.startswith(SPA_EXCLUDE_PREFIXES)
and INDEX_FILE.exists()
):
return FileResponse(INDEX_FILE)
return response
def _request_accepts_html(scope) -> bool:
for k, v in scope.get("headers") or []:
if k == b"accept":
return "text/html" in v.decode("latin-1")
return False
@asynccontextmanager
@@ -59,7 +71,7 @@ async def lifespan(app: FastAPI):
await task_queue_service.start_worker()
# 加载已安装的插件
from domain.plugins.startup import init_plugins
from domain.plugins import init_plugins
await init_plugins(app)
# 在所有路由加载完成后,挂载静态文件服务(放在最后以避免覆盖 API 路由)
@@ -78,7 +90,6 @@ def create_app() -> FastAPI:
description="A highly extensible private cloud storage solution for individuals and teams",
lifespan=lifespan,
)
app.middleware("http")(spa_fallback_middleware)
include_routers(app)
app.add_exception_handler(HTTPException, http_exception_handler)
app.add_exception_handler(RequestValidationError, validation_exception_handler)

View File

@@ -13,8 +13,8 @@ PROJECT_ROOT = Path(__file__).resolve().parents[1]
if str(PROJECT_ROOT) not in sys.path:
sys.path.insert(0, str(PROJECT_ROOT))
from domain.auth.service import get_password_hash
from domain.config.service import VERSION
from domain.config import VERSION
from domain.auth import get_password_hash
def _project_root() -> Path:

View File

@@ -0,0 +1 @@
<svg fill="currentColor" fill-rule="evenodd" height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24" width="1em" xmlns="http://www.w3.org/2000/svg"><title>Anthropic</title><path d="M13.827 3.52h3.603L24 20h-3.603l-6.57-16.48zm-7.258 0h3.767L16.906 20h-3.674l-1.343-3.461H5.017l-1.344 3.46H0L6.57 3.522zm4.132 9.959L8.453 7.687 6.205 13.48H10.7z"></path></svg>

After

Width:  |  Height:  |  Size: 368 B

View File

@@ -0,0 +1 @@
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24" width="1em" xmlns="http://www.w3.org/2000/svg"><title>Azure</title><path d="M7.242 1.613A1.11 1.11 0 018.295.857h6.977L8.03 22.316a1.11 1.11 0 01-1.052.755h-5.43a1.11 1.11 0 01-1.053-1.466L7.242 1.613z" fill="url(#lobe-icons-azure-fill-0)"></path><path d="M18.397 15.296H7.4a.51.51 0 00-.347.882l7.066 6.595c.206.192.477.298.758.298h6.226l-2.706-7.775z" fill="#0078D4"></path><path d="M15.272.857H7.497L0 23.071h7.775l1.596-4.73 5.068 4.73h6.665l-2.707-7.775h-7.998L15.272.857z" fill="url(#lobe-icons-azure-fill-1)"></path><path d="M17.193 1.613a1.11 1.11 0 00-1.052-.756h-7.81.035c.477 0 .9.304 1.052.756l6.748 19.992a1.11 1.11 0 01-1.052 1.466h-.12 7.895a1.11 1.11 0 001.052-1.466L17.193 1.613z" fill="url(#lobe-icons-azure-fill-2)"></path><defs><linearGradient gradientUnits="userSpaceOnUse" id="lobe-icons-azure-fill-0" x1="8.247" x2="1.002" y1="1.626" y2="23.03"><stop stop-color="#114A8B"></stop><stop offset="1" stop-color="#0669BC"></stop></linearGradient><linearGradient gradientUnits="userSpaceOnUse" id="lobe-icons-azure-fill-1" x1="14.042" x2="12.324" y1="15.302" y2="15.888"><stop stop-opacity=".3"></stop><stop offset=".071" stop-opacity=".2"></stop><stop offset=".321" stop-opacity=".1"></stop><stop offset=".623" stop-opacity=".05"></stop><stop offset="1" stop-opacity="0"></stop></linearGradient><linearGradient gradientUnits="userSpaceOnUse" id="lobe-icons-azure-fill-2" x1="12.841" x2="20.793" y1="1.626" y2="22.814"><stop stop-color="#3CCBF4"></stop><stop offset="1" stop-color="#2892DF"></stop></linearGradient></defs></svg>

After

Width:  |  Height:  |  Size: 1.6 KiB

View File

@@ -0,0 +1 @@
<svg fill="currentColor" fill-rule="evenodd" height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24" width="1em" xmlns="http://www.w3.org/2000/svg"><title>Ollama</title><path d="M7.905 1.09c.216.085.411.225.588.41.295.306.544.744.734 1.263.191.522.315 1.1.362 1.68a5.054 5.054 0 012.049-.636l.051-.004c.87-.07 1.73.087 2.48.474.101.053.2.11.297.17.05-.569.172-1.134.36-1.644.19-.52.439-.957.733-1.264a1.67 1.67 0 01.589-.41c.257-.1.53-.118.796-.042.401.114.745.368 1.016.737.248.337.434.769.561 1.287.23.934.27 2.163.115 3.645l.053.04.026.019c.757.576 1.284 1.397 1.563 2.35.435 1.487.216 3.155-.534 4.088l-.018.021.002.003c.417.762.67 1.567.724 2.4l.002.03c.064 1.065-.2 2.137-.814 3.19l-.007.01.01.024c.472 1.157.62 2.322.438 3.486l-.006.039a.651.651 0 01-.747.536.648.648 0 01-.54-.742c.167-1.033.01-2.069-.48-3.123a.643.643 0 01.04-.617l.004-.006c.604-.924.854-1.83.8-2.72-.046-.779-.325-1.544-.8-2.273a.644.644 0 01.18-.886l.009-.006c.243-.159.467-.565.58-1.12a4.229 4.229 0 00-.095-1.974c-.205-.7-.58-1.284-1.105-1.683-.595-.454-1.383-.673-2.38-.61a.653.653 0 01-.632-.371c-.314-.665-.772-1.141-1.343-1.436a3.288 3.288 0 00-1.772-.332c-1.245.099-2.343.801-2.67 1.686a.652.652 0 01-.61.425c-1.067.002-1.893.252-2.497.703-.522.39-.878.935-1.066 1.588a4.07 4.07 0 00-.068 1.886c.112.558.331 1.02.582 1.269l.008.007c.212.207.257.53.109.785-.36.622-.629 1.549-.673 2.44-.05 1.018.186 1.902.719 2.536l.016.019a.643.643 0 01.095.69c-.576 1.236-.753 2.252-.562 3.052a.652.652 0 01-1.269.298c-.243-1.018-.078-2.184.473-3.498l.014-.035-.008-.012a4.339 4.339 0 01-.598-1.309l-.005-.019a5.764 5.764 0 01-.177-1.785c.044-.91.278-1.842.622-2.59l.012-.026-.002-.002c-.293-.418-.51-.953-.63-1.545l-.005-.024a5.352 5.352 0 01.093-2.49c.262-.915.777-1.701 1.536-2.269.06-.045.123-.09.186-.132-.159-1.493-.119-2.73.112-3.67.127-.518.314-.95.562-1.287.27-.368.614-.622 1.015-.737.266-.076.54-.059.797.042zm4.116 9.09c.936 0 1.8.313 2.446.855.63.527 1.005 1.235 1.005 1.94 0 .888-.406 1.58-1.133 2.022-.62.375-1.451.557-2.403.557-1.009 0-1.871-.259-2.493-.734-.617-.47-.963-1.13-.963-1.845 0-.707.398-1.417 1.056-1.946.668-.537 1.55-.849 2.485-.849zm0 .896a3.07 3.07 0 00-1.916.65c-.461.37-.722.835-.722 1.25 0 .428.21.829.61 1.134.455.347 1.124.548 1.943.548.799 0 1.473-.147 1.932-.426.463-.28.7-.686.7-1.257 0-.423-.246-.89-.683-1.256-.484-.405-1.14-.643-1.864-.643zm.662 1.21l.004.004c.12.151.095.37-.056.49l-.292.23v.446a.375.375 0 01-.376.373.375.375 0 01-.376-.373v-.46l-.271-.218a.347.347 0 01-.052-.49.353.353 0 01.494-.051l.215.172.22-.174a.353.353 0 01.49.051zm-5.04-1.919c.478 0 .867.39.867.871a.87.87 0 01-.868.871.87.87 0 01-.867-.87.87.87 0 01.867-.872zm8.706 0c.48 0 .868.39.868.871a.87.87 0 01-.868.871.87.87 0 01-.867-.87.87.87 0 01.867-.872zM7.44 2.3l-.003.002a.659.659 0 00-.285.238l-.005.006c-.138.189-.258.467-.348.832-.17.692-.216 1.631-.124 2.782.43-.128.899-.208 1.404-.237l.01-.001.019-.034c.046-.082.095-.161.148-.239.123-.771.022-1.692-.253-2.444-.134-.364-.297-.65-.453-.813a.628.628 0 00-.107-.09L7.44 2.3zm9.174.04l-.002.001a.628.628 0 00-.107.09c-.156.163-.32.45-.453.814-.29.794-.387 1.776-.23 2.572l.058.097.008.014h.03a5.184 5.184 0 011.466.212c.086-1.124.038-2.043-.128-2.722-.09-.365-.21-.643-.349-.832l-.004-.006a.659.659 0 00-.285-.239h-.004z"></path></svg>

After

Width:  |  Height:  |  Size: 3.2 KiB

1
web/public/icon/zai.svg Normal file
View File

@@ -0,0 +1 @@
<svg fill="currentColor" fill-rule="evenodd" height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24" width="1em" xmlns="http://www.w3.org/2000/svg"><title>Z.ai</title><path d="M12.105 2L9.927 4.953H.653L2.83 2h9.276zM23.254 19.048L21.078 22h-9.242l2.174-2.952h9.244zM24 2L9.264 22H0L14.736 2H24z"></path></svg>

After

Width:  |  Height:  |  Size: 319 B

121
web/src/api/agent.ts Normal file
View File

@@ -0,0 +1,121 @@
import request, { API_BASE_URL } from './client';
export type AgentChatMessage = Record<string, any>;
export interface AgentChatContext {
current_path?: string | null;
}
export interface AgentChatRequest {
messages: AgentChatMessage[];
auto_execute?: boolean;
approved_tool_call_ids?: string[];
rejected_tool_call_ids?: string[];
context?: AgentChatContext;
}
export interface PendingToolCall {
id: string;
name: string;
arguments: Record<string, any>;
requires_confirmation: boolean;
}
export interface AgentChatResponse {
messages: AgentChatMessage[];
pending_tool_calls?: PendingToolCall[];
}
export type AgentSseEvent =
| { event: 'assistant_start'; data: { id: string } }
| { event: 'assistant_delta'; data: { id: string; delta: string } }
| { event: 'assistant_end'; data: { id: string; message: AgentChatMessage } }
| { event: 'tool_start'; data: { tool_call_id: string; name: string } }
| { event: 'tool_end'; data: { tool_call_id: string; name: string; message: AgentChatMessage } }
| { event: 'pending'; data: { pending_tool_calls: PendingToolCall[] } }
| { event: 'done'; data: AgentChatResponse };
export const agentApi = {
chat: (payload: AgentChatRequest) =>
request<AgentChatResponse>('/agent/chat', {
method: 'POST',
json: payload,
}),
chatStream: async (
payload: AgentChatRequest,
onEvent: (evt: AgentSseEvent) => void,
options?: { signal?: AbortSignal }
) => {
const headers: Record<string, string> = {
'Content-Type': 'application/json',
'Accept': 'text/event-stream',
};
const token = localStorage.getItem('token');
if (token) headers['Authorization'] = `Bearer ${token}`;
const resp = await fetch(`${API_BASE_URL}/agent/chat/stream`, {
method: 'POST',
headers,
body: JSON.stringify(payload),
signal: options?.signal,
});
if (!resp.ok) {
let errMsg = resp.statusText;
try {
const data = await resp.json();
if (Array.isArray((data as any)?.detail)) {
errMsg = (data as any).detail.map((e: any) => e.msg || JSON.stringify(e)).join('; ');
} else {
errMsg = (typeof (data as any)?.detail === 'string') ? (data as any).detail : JSON.stringify(data);
}
} catch {
try {
errMsg = await resp.text();
} catch { void 0; }
}
throw new Error(errMsg || `Request failed: ${resp.status}`);
}
const reader = resp.body?.getReader();
if (!reader) throw new Error('Stream not supported');
const decoder = new TextDecoder();
let buffer = '';
const flush = (raw: string) => {
const lines = raw.split(/\r?\n/);
let eventName = 'message';
const dataLines: string[] = [];
for (const line of lines) {
if (line.startsWith('event:')) {
eventName = line.slice(6).trim();
} else if (line.startsWith('data:')) {
dataLines.push(line.slice(5).trimStart());
}
}
const dataStr = dataLines.join('\n').trim();
if (!eventName || !dataStr) return;
try {
const data = JSON.parse(dataStr);
onEvent({ event: eventName as any, data } as any);
} catch {
// ignore parse error
}
};
while (true) {
const { value, done } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
while (true) {
const idx = buffer.indexOf('\n\n');
if (idx === -1) break;
const chunk = buffer.slice(0, idx);
buffer = buffer.slice(idx + 2);
if (chunk.trim()) flush(chunk);
}
}
if (buffer.trim()) flush(buffer);
},
};

View File

@@ -6,15 +6,16 @@ export interface AIProviderPayload {
name: string;
identifier: string;
provider_type?: string | null;
api_format: 'openai' | 'gemini';
api_format: 'openai' | 'gemini' | 'anthropic' | 'ollama';
base_url?: string | null;
api_key?: string | null;
logo_url?: string | null;
extra_config?: Record<string, unknown> | null;
}
export interface AIProvider extends Omit<AIProviderPayload, 'extra_config'> {
export interface AIProvider extends Omit<AIProviderPayload, 'extra_config' | 'api_key'> {
id: number;
has_api_key: boolean;
extra_config: Record<string, unknown>;
created_at: string;
updated_at: string;

View File

@@ -7,10 +7,22 @@ export interface PluginAppHostProps extends AppComponentProps {
}
function buildPluginFrameUrl(params: Record<string, string>): string {
const qs = new URLSearchParams(params);
const qs = new URLSearchParams();
Object.entries(params).forEach(([k, v]) => {
if (typeof v !== 'string') return;
const value = v.trim();
if (!value) return;
qs.set(k, value);
});
return `/plugin-frame.html?${qs.toString()}`;
}
function getPluginStylePaths(plugin: PluginItem): string[] {
const styles = (plugin.manifest as any)?.frontend?.styles as unknown;
if (!Array.isArray(styles)) return [];
return styles.filter((s) => typeof s === 'string' && s.trim().length > 0);
}
/**
* 插件宿主组件 - 文件打开模式
* 使用 iframe 隔离渲染与样式,避免插件污染宿主 DOM/CSS。
@@ -19,6 +31,7 @@ function buildPluginFrameUrl(params: Record<string, string>): string {
export const PluginAppHost: React.FC<PluginAppHostProps> = ({
plugin,
filePath,
entry,
onRequestClose,
}) => {
const iframeRef = useRef<HTMLIFrameElement>(null);
@@ -29,10 +42,13 @@ export const PluginAppHost: React.FC<PluginAppHostProps> = ({
() =>
buildPluginFrameUrl({
pluginKey: plugin.key,
pluginVersion: plugin.version || '',
pluginStyles: JSON.stringify(getPluginStylePaths(plugin)),
mode: 'file',
filePath,
entry: JSON.stringify(entry),
}),
[plugin.key, filePath]
[plugin, filePath, entry]
);
useEffect(() => {
@@ -78,9 +94,11 @@ export const PluginAppOpenHost: React.FC<PluginAppOpenHostProps> = ({ plugin, on
() =>
buildPluginFrameUrl({
pluginKey: plugin.key,
pluginVersion: plugin.version || '',
pluginStyles: JSON.stringify(getPluginStylePaths(plugin)),
mode: 'app',
}),
[plugin.key]
[plugin]
);
useEffect(() => {

View File

@@ -0,0 +1,932 @@
import { memo, useCallback, useEffect, useMemo, useRef, useState } from 'react';
import { Avatar, Button, Divider, Drawer, Flex, Input, List, Space, Switch, Tag, Typography, message, theme } from 'antd';
import { RobotOutlined, SendOutlined, FolderOpenOutlined, DeleteOutlined, ToolOutlined, DownOutlined, UpOutlined, CodeOutlined, CopyOutlined, LoadingOutlined } from '@ant-design/icons';
import ReactMarkdown from 'react-markdown';
import PathSelectorModal from './PathSelectorModal';
import { agentApi, type AgentChatMessage, type PendingToolCall } from '../api/agent';
import { useI18n } from '../i18n';
import '../styles/ai-agent.css';
const { Text, Paragraph } = Typography;
function normalizePath(p?: string | null): string | null {
if (!p) return null;
const s = ('/' + p).replace(/\/+/, '/').replace(/\\/g, '/').replace(/\/+$/, '') || '/';
return s;
}
function extractTextContent(content: any): string {
if (content == null) return '';
if (typeof content === 'string') return content;
if (Array.isArray(content)) {
const parts: string[] = [];
for (const item of content) {
if (typeof item === 'string') {
if (item.trim()) parts.push(item);
continue;
}
const text = typeof item?.text === 'string' ? item.text : '';
if (text.trim()) parts.push(text);
}
return parts.join('\n');
}
try {
return JSON.stringify(content, null, 2);
} catch {
return String(content);
}
}
function tryParseJson<T = any>(raw: string): T | null {
if (typeof raw !== 'string') return null;
const s = raw.trim();
if (!s) return null;
try {
return JSON.parse(s) as T;
} catch {
return null;
}
}
function shortId(id: string, keep: number = 6): string {
const s = String(id || '');
if (s.length <= keep * 2 + 3) return s;
return `${s.slice(0, keep)}${s.slice(-keep)}`;
}
interface AiAgentWidgetProps {
currentPath?: string | null;
open: boolean;
onOpenChange(open: boolean): void;
}
const AiAgentWidget = memo(function AiAgentWidget({ currentPath, open, onOpenChange }: AiAgentWidgetProps) {
const { t } = useI18n();
const { token } = theme.useToken();
const [autoExecute, setAutoExecute] = useState(false);
const [input, setInput] = useState('');
const [loading, setLoading] = useState(false);
const [messages, setMessages] = useState<AgentChatMessage[]>([]);
const [pending, setPending] = useState<PendingToolCall[]>([]);
const [pathModalOpen, setPathModalOpen] = useState(false);
const [expandedTools, setExpandedTools] = useState<Record<string, boolean>>({});
const [expandedRaw, setExpandedRaw] = useState<Record<string, boolean>>({});
const [runningTools, setRunningTools] = useState<Record<string, string>>({});
const scrollRef = useRef<HTMLDivElement | null>(null);
const streamControllerRef = useRef<AbortController | null>(null);
const streamSeqRef = useRef(0);
const baseMessagesRef = useRef<AgentChatMessage[]>([]);
const assistantIndexRef = useRef<Record<string, number>>({});
const toolNameByIdRef = useRef<Record<string, string>>({});
const effectivePath = useMemo(() => normalizePath(currentPath), [currentPath]);
const scrollToBottom = useCallback(() => {
const el = scrollRef.current;
if (!el) return;
el.scrollTop = el.scrollHeight;
}, []);
useEffect(() => {
if (!open) return;
const t = window.setTimeout(scrollToBottom, 0);
return () => window.clearTimeout(t);
}, [messages, open, pending, scrollToBottom]);
useEffect(() => {
return () => {
streamControllerRef.current?.abort();
};
}, []);
const toolCallsById = useMemo(() => {
const map = new Map<string, { name: string; args: Record<string, any> }>();
for (const msg of messages) {
if (!msg || typeof msg !== 'object') continue;
if (msg.role !== 'assistant') continue;
const toolCalls = (msg as any).tool_calls;
if (!Array.isArray(toolCalls)) continue;
for (const call of toolCalls) {
const id = typeof call?.id === 'string' ? call.id : '';
const fn = call?.function;
const name = typeof fn?.name === 'string' ? fn.name : '';
const rawArgs = typeof fn?.arguments === 'string' ? fn.arguments : '';
if (!id || !name) continue;
const parsedArgs = tryParseJson<Record<string, any>>(rawArgs) || {};
map.set(id, { name, args: parsedArgs });
}
}
return map;
}, [messages]);
const runStream = useCallback(async (payload: Partial<Parameters<typeof agentApi.chat>[0]> & { messages: AgentChatMessage[] }) => {
streamControllerRef.current?.abort();
const controller = new AbortController();
streamControllerRef.current = controller;
streamSeqRef.current += 1;
const seq = streamSeqRef.current;
baseMessagesRef.current = payload.messages;
assistantIndexRef.current = {};
setLoading(true);
const approvedIds = payload.approved_tool_call_ids || [];
if (Array.isArray(approvedIds) && approvedIds.length > 0) {
const preRunning: Record<string, string> = {};
approvedIds.forEach((id) => {
if (typeof id === 'string' && id.trim()) preRunning[id] = '';
});
setRunningTools(preRunning);
} else {
setRunningTools({});
}
try {
await agentApi.chatStream(
{
messages: payload.messages,
auto_execute: autoExecute,
context: effectivePath ? { current_path: effectivePath } : undefined,
approved_tool_call_ids: payload.approved_tool_call_ids,
rejected_tool_call_ids: payload.rejected_tool_call_ids,
},
(evt) => {
if (seq !== streamSeqRef.current) return;
switch (evt.event) {
case 'assistant_start': {
const id = String((evt.data as any)?.id || '');
if (!id) return;
setMessages((prev) => {
const idx = prev.length;
assistantIndexRef.current[id] = idx;
return [...prev, { role: 'assistant', content: '' }];
});
return;
}
case 'assistant_delta': {
const id = String((evt.data as any)?.id || '');
const delta = String((evt.data as any)?.delta || '');
if (!id || !delta) return;
setMessages((prev) => {
const idx = assistantIndexRef.current[id];
if (idx === undefined || idx < 0 || idx >= prev.length) return prev;
const cur = prev[idx] as any;
const curContent = typeof cur?.content === 'string' ? cur.content : extractTextContent(cur?.content);
const next = prev.slice();
next[idx] = { ...cur, content: (curContent || '') + delta };
return next;
});
return;
}
case 'assistant_end': {
const id = String((evt.data as any)?.id || '');
const msg = (evt.data as any)?.message;
if (!id || !msg || typeof msg !== 'object') return;
setMessages((prev) => {
const idx = assistantIndexRef.current[id];
if (idx === undefined || idx < 0 || idx >= prev.length) return prev;
const next = prev.slice();
next[idx] = msg;
return next;
});
delete assistantIndexRef.current[id];
return;
}
case 'tool_start': {
const toolCallId = String((evt.data as any)?.tool_call_id || '');
const name = String((evt.data as any)?.name || '');
if (!toolCallId) return;
if (name) toolNameByIdRef.current[toolCallId] = name;
setRunningTools((prev) => ({ ...prev, [toolCallId]: name || prev[toolCallId] || '' }));
return;
}
case 'tool_end': {
const toolCallId = String((evt.data as any)?.tool_call_id || '');
const name = String((evt.data as any)?.name || '');
const msg = (evt.data as any)?.message;
if (toolCallId && name) toolNameByIdRef.current[toolCallId] = name;
if (toolCallId) {
setRunningTools((prev) => {
const next = { ...prev };
delete next[toolCallId];
return next;
});
}
if (msg && typeof msg === 'object') {
setMessages((prev) => [...prev, msg]);
}
return;
}
case 'pending': {
const items = Array.isArray((evt.data as any)?.pending_tool_calls) ? (evt.data as any).pending_tool_calls : [];
setPending(items);
return;
}
case 'done': {
const base = baseMessagesRef.current || [];
const newMessages = Array.isArray((evt.data as any)?.messages) ? (evt.data as any).messages : [];
const nextPending = Array.isArray((evt.data as any)?.pending_tool_calls) ? (evt.data as any).pending_tool_calls : [];
setMessages([...base, ...newMessages]);
setPending(nextPending);
setRunningTools({});
assistantIndexRef.current = {};
return;
}
default:
return;
}
},
{ signal: controller.signal }
);
} catch (err: any) {
if (controller.signal.aborted) return;
message.error(err?.message || t('Operation failed'));
} finally {
if (seq === streamSeqRef.current) {
setLoading(false);
if (controller.signal.aborted) {
setRunningTools({});
assistantIndexRef.current = {};
}
}
}
}, [autoExecute, effectivePath, t]);
const handleSend = useCallback(async () => {
const text = input.trim();
if (!text) return;
if (pending.length > 0) {
message.warning(t('Please confirm pending actions first'));
return;
}
const nextUserMsg: AgentChatMessage = { role: 'user', content: text };
setInput('');
const base = [...messages, nextUserMsg];
setMessages(base);
setPending([]);
await runStream({ messages: base });
}, [input, messages, pending.length, runStream, t]);
const clearChat = useCallback(() => {
streamControllerRef.current?.abort();
setMessages([]);
setPending([]);
setExpandedTools({});
setExpandedRaw({});
setRunningTools({});
}, []);
const approveOne = useCallback(async (id: string) => {
await runStream({ messages, approved_tool_call_ids: [id] });
}, [messages, runStream]);
const rejectOne = useCallback(async (id: string) => {
await runStream({ messages, rejected_tool_call_ids: [id] });
}, [messages, runStream]);
const approveAll = useCallback(async () => {
const ids = pending.map((p) => p.id).filter(Boolean);
if (ids.length === 0) return;
await runStream({ messages, approved_tool_call_ids: ids });
}, [messages, pending, runStream]);
const rejectAll = useCallback(async () => {
const ids = pending.map((p) => p.id).filter(Boolean);
if (ids.length === 0) return;
await runStream({ messages, rejected_tool_call_ids: ids });
}, [messages, pending, runStream]);
const handlePathSelected = useCallback((path: string) => {
const p = normalizePath(path) || '/';
setInput((prev) => (prev.trim() ? `${prev.trim()} ${p}` : p));
setPathModalOpen(false);
}, []);
const messageItems = useMemo(() => {
return messages.filter((m) => {
if (!m || typeof m !== 'object') return false;
const role = typeof (m as any).role === 'string' ? String((m as any).role) : '';
if (!role || role === 'system') return false;
if (role === 'assistant') {
const text = extractTextContent((m as any).content);
return !!text.trim();
}
return true;
});
}, [messages]);
const runningToolEntries = useMemo(() => Object.entries(runningTools).filter(([id]) => !!id), [runningTools]);
const runningToolCount = runningToolEntries.length;
const copyToClipboard = useCallback(async (raw: string) => {
try {
await navigator.clipboard.writeText(raw);
message.success(t('Copied'));
} catch (err: any) {
message.error(err?.message || t('Operation failed'));
}
}, [t]);
const renderToolResultSummary = useCallback((toolName: string, rawContent: string, toolArgs?: Record<string, any> | null) => {
const data = tryParseJson<Record<string, any>>(rawContent);
if (!data) return '';
if (data.canceled) return t('Canceled');
if (data.error) return `${t('Error')}: ${String(data.error)}`;
if (toolName === 'processors_list') {
const processors = Array.isArray(data.processors) ? data.processors : [];
return `${t('Processors')}: ${processors.length}`;
}
if (toolName === 'processors_run') {
const ctx = (() => {
const processorType = typeof toolArgs?.processor_type === 'string' ? toolArgs.processor_type.trim() : '';
const path = typeof toolArgs?.path === 'string' ? toolArgs.path.trim() : '';
const parts = [processorType, path].filter(Boolean);
return parts.length ? parts.join(' · ') : '';
})();
if (typeof data.task_id === 'string') {
return ctx ? `${t('Task submitted')}: ${ctx} · ${shortId(data.task_id)}` : `${t('Task submitted')}: ${shortId(data.task_id)}`;
}
const taskIds = Array.isArray(data.task_ids) ? data.task_ids : [];
const scheduled = typeof data.scheduled === 'number' ? data.scheduled : taskIds.length;
if (scheduled) return ctx ? `${t('Tasks submitted')}: ${ctx} · ${scheduled}` : `${t('Tasks submitted')}: ${scheduled}`;
return t('Task submitted');
}
if (toolName === 'vfs_list_dir') {
const path = typeof data.path === 'string' ? data.path : '';
const entries = Array.isArray(data.entries) ? data.entries : [];
const names = entries
.map((it: any) => String(it?.name || '').trim())
.filter(Boolean)
.slice(0, 3);
const head = `${t('Directory')}: ${path || '/'}`;
const tail = `${entries.length} ${t('items')}`;
const sample = names.length ? ` · ${names.join(', ')}` : '';
return `${head} · ${tail}${sample}`;
}
if (toolName === 'vfs_search') {
const query = typeof data.query === 'string' ? data.query : '';
const items = Array.isArray(data.items) ? data.items : [];
return `${t('Search')}: ${query || '-'} · ${items.length} ${t('results')}`;
}
if (toolName === 'vfs_stat') {
const isDir = Boolean(data.is_dir);
const path = typeof data.path === 'string' ? data.path : '';
return `${t('Info')}: ${path || '-'} · ${isDir ? t('Folder') : t('File')}`;
}
if (toolName === 'vfs_read_text') {
const path = typeof data.path === 'string' ? data.path : '';
const length = typeof data.length === 'number' ? data.length : undefined;
const truncated = Boolean(data.truncated);
const tail = length != null ? ` · ${length} ${t('chars')}${truncated ? `(${t('Truncated')})` : ''}` : '';
return `${t('Read')}: ${path || '-'}${tail}`;
}
if (toolName === 'vfs_write_text') {
const path = typeof data.path === 'string' ? data.path : '';
const bytes = typeof data.bytes === 'number' ? data.bytes : undefined;
return `${t('Write')}: ${path || '-'}${bytes != null ? ` · ${bytes} bytes` : ''}`;
}
if (toolName === 'vfs_mkdir') {
const path = typeof data.path === 'string' ? data.path : '';
return `${t('Created')}: ${path || '-'}`;
}
if (toolName === 'vfs_delete') {
const path = typeof data.path === 'string' ? data.path : '';
return `${t('Deleted')}: ${path || '-'}`;
}
if (toolName === 'vfs_move') {
const src = typeof data.src === 'string' ? data.src : '';
const dst = typeof data.dst === 'string' ? data.dst : '';
return `${t('Moved')}: ${src || '-'}${dst || '-'}`;
}
if (toolName === 'vfs_copy') {
const src = typeof data.src === 'string' ? data.src : '';
const dst = typeof data.dst === 'string' ? data.dst : '';
return `${t('Copied')}: ${src || '-'}${dst || '-'}`;
}
if (toolName === 'vfs_rename') {
const src = typeof data.src === 'string' ? data.src : '';
const dst = typeof data.dst === 'string' ? data.dst : '';
return `${t('Renamed')}: ${src || '-'}${dst || '-'}`;
}
return '';
}, [t]);
const renderToolDetails = useCallback((toolKey: string, toolName: string, rawContent: string) => {
const data = tryParseJson<Record<string, any>>(rawContent);
const showRaw = !!expandedRaw[toolKey];
const toggleRaw = () => setExpandedRaw((prev) => ({ ...prev, [toolKey]: !prev[toolKey] }));
const rawJson = (() => {
if (!rawContent?.trim()) return '';
const parsed = tryParseJson<any>(rawContent);
if (!parsed) return rawContent;
try {
return JSON.stringify(parsed, null, 2);
} catch {
return rawContent;
}
})();
const header = (
<Space size={10} wrap>
<Button
type="text"
size="small"
icon={<CodeOutlined />}
onClick={(e) => { e.stopPropagation(); toggleRaw(); }}
>
{t('Raw JSON')}
</Button>
{showRaw && (
<Button
type="text"
size="small"
icon={<CopyOutlined />}
onClick={(e) => { e.stopPropagation(); void copyToClipboard(rawJson); }}
>
{t('Copy')}
</Button>
)}
</Space>
);
if (toolName === 'processors_list') {
const processors = Array.isArray(data?.processors) ? data!.processors : [];
return (
<div className="fx-agent-tool-details">
{header}
<Divider style={{ margin: '10px 0' }} />
<List
size="small"
dataSource={processors}
locale={{ emptyText: t('No results') }}
renderItem={(item: any) => (
<List.Item>
<Space size={10} wrap>
<Text code style={{ fontVariantNumeric: 'tabular-nums' }}>{String(item?.type || '')}</Text>
<Text>{String(item?.name || '')}</Text>
</Space>
</List.Item>
)}
style={{ background: 'transparent' }}
/>
{showRaw && (
<>
<Divider style={{ margin: '10px 0' }} />
<pre className="fx-agent-pre">{rawJson}</pre>
</>
)}
</div>
);
}
if (toolName === 'vfs_list_dir') {
const path = typeof data?.path === 'string' ? data!.path : '/';
const entries = Array.isArray(data?.entries) ? data!.entries : [];
const pagination = data?.pagination && typeof data.pagination === 'object' ? data.pagination : null;
return (
<div className="fx-agent-tool-details">
{header}
<Divider style={{ margin: '10px 0' }} />
<Space direction="vertical" size={6} style={{ width: '100%' }}>
<Text type="secondary" style={{ fontSize: 12 }}>{t('Directory')}: {path}</Text>
{pagination?.total != null ? (
<Text type="secondary" style={{ fontSize: 12 }}>
{t('Total')}: {String(pagination.total)}
</Text>
) : null}
</Space>
<Divider style={{ margin: '10px 0' }} />
<List
size="small"
dataSource={entries}
locale={{ emptyText: t('No results') }}
renderItem={(item: any) => {
const name = String(item?.name || '');
const type = String(item?.type || (item?.is_dir ? 'dir' : 'file'));
return (
<List.Item>
<Space size={10} wrap style={{ width: '100%', justifyContent: 'space-between' }}>
<Space size={10} wrap>
<Text code style={{ fontVariantNumeric: 'tabular-nums' }}>{type}</Text>
<Text>{name}</Text>
</Space>
{!item?.is_dir && typeof item?.size === 'number' ? (
<Text type="secondary" style={{ fontSize: 12 }}>{item.size} bytes</Text>
) : null}
</Space>
</List.Item>
);
}}
style={{ background: 'transparent' }}
/>
{showRaw && (
<>
<Divider style={{ margin: '10px 0' }} />
<pre className="fx-agent-pre">{rawJson}</pre>
</>
)}
</div>
);
}
if (toolName === 'vfs_search') {
const query = typeof data?.query === 'string' ? data!.query : '';
const mode = typeof data?.mode === 'string' ? data!.mode : '';
const items = Array.isArray(data?.items) ? data!.items : [];
const pagination = data?.pagination && typeof data.pagination === 'object' ? data.pagination : null;
return (
<div className="fx-agent-tool-details">
{header}
<Divider style={{ margin: '10px 0' }} />
<Space direction="vertical" size={6} style={{ width: '100%' }}>
<Text type="secondary" style={{ fontSize: 12 }}>{t('Search')}: {query || '-'}</Text>
<Text type="secondary" style={{ fontSize: 12 }}>{t('Mode')}: {mode || '-'}</Text>
{pagination?.has_more != null ? (
<Text type="secondary" style={{ fontSize: 12 }}>
{t('Page')}: {String(pagination.page)} · {t('Has more')}: {String(Boolean(pagination.has_more))}
</Text>
) : null}
</Space>
<Divider style={{ margin: '10px 0' }} />
<List
size="small"
dataSource={items}
locale={{ emptyText: t('No results') }}
renderItem={(item: any) => {
const type = String(item?.source_type || item?.mime || '');
const path = String(item?.path || '');
const score = item?.score != null ? Number(item.score) : null;
return (
<List.Item>
<Space size={10} wrap style={{ width: '100%', justifyContent: 'space-between' }}>
<Space size={10} wrap>
{type ? <Text code style={{ fontVariantNumeric: 'tabular-nums' }}>{type}</Text> : null}
<Text>{path}</Text>
</Space>
{score != null && !Number.isNaN(score) ? (
<Text type="secondary" style={{ fontSize: 12 }}>{score.toFixed(3)}</Text>
) : null}
</Space>
</List.Item>
);
}}
style={{ background: 'transparent' }}
/>
{showRaw && (
<>
<Divider style={{ margin: '10px 0' }} />
<pre className="fx-agent-pre">{rawJson}</pre>
</>
)}
</div>
);
}
if (toolName === 'vfs_read_text') {
const path = typeof data?.path === 'string' ? data!.path : '';
const content = typeof data?.content === 'string' ? data!.content : '';
return (
<div className="fx-agent-tool-details">
{header}
<Divider style={{ margin: '10px 0' }} />
<Text type="secondary" style={{ fontSize: 12 }}>{t('File')}: {path || '-'}</Text>
<pre className="fx-agent-pre" style={{ marginTop: 10 }}>{content || ''}</pre>
{showRaw && (
<>
<Divider style={{ margin: '10px 0' }} />
<pre className="fx-agent-pre">{rawJson}</pre>
</>
)}
</div>
);
}
return (
<div className="fx-agent-tool-details">
{header}
<Divider style={{ margin: '10px 0' }} />
{showRaw ? (
<pre className="fx-agent-pre">{rawJson}</pre>
) : (
<Paragraph style={{ marginBottom: 0, whiteSpace: 'pre-wrap' }}>
{extractTextContent(data ?? rawContent) || <Text type="secondary">{t('No content')}</Text>}
</Paragraph>
)}
</div>
);
}, [copyToClipboard, expandedRaw, t]);
const renderToolArgsSummary = useCallback((toolName: string, args?: Record<string, any> | null) => {
const a = args || {};
if (toolName === 'processors_run') {
const path = typeof a.path === 'string' ? a.path : '';
return path ? `${t('Path')}: ${path}` : '';
}
if (toolName === 'vfs_read_text' || toolName === 'vfs_list_dir' || toolName === 'vfs_stat' || toolName === 'vfs_delete' || toolName === 'vfs_mkdir') {
const path = typeof a.path === 'string' ? a.path : '';
return path ? `${t('Path')}: ${path}` : '';
}
if (toolName === 'vfs_search') {
const query = typeof a.query === 'string' ? a.query : '';
return query ? `${t('Search')}: ${query}` : '';
}
if (toolName === 'vfs_write_text') {
const path = typeof a.path === 'string' ? a.path : '';
return path ? `${t('Path')}: ${path}` : '';
}
if (toolName === 'vfs_move' || toolName === 'vfs_copy' || toolName === 'vfs_rename') {
const src = typeof a.src === 'string' ? a.src : '';
const dst = typeof a.dst === 'string' ? a.dst : '';
if (src && dst) return `${src}${dst}`;
if (src) return src;
if (dst) return dst;
return '';
}
return '';
}, [t]);
return (
<>
<Drawer
title={t('AI Agent')}
open={open}
onClose={() => { streamControllerRef.current?.abort(); onOpenChange(false); }}
width={520}
mask={false}
destroyOnHidden
styles={{
body: {
padding: 8,
background: token.colorBgContainer,
},
}}
extra={
<Space align="center">
<Text type="secondary">{t('Auto execute')}</Text>
<Switch size="small" checked={autoExecute} onChange={setAutoExecute} />
<Button
type="text"
size="small"
icon={<DeleteOutlined />}
onClick={clearChat}
disabled={loading || messageItems.length === 0}
>
{t('Clear')}
</Button>
</Space>
}
>
<Flex vertical gap={0} style={{ height: '100%' }} className="fx-agent-container">
<div
ref={scrollRef}
className="fx-agent-chat-scroll"
>
{messageItems.length === 0 ? (
<div className="fx-agent-empty">
<Avatar size={36} icon={<RobotOutlined />} style={{ background: token.colorPrimary }} />
<div style={{ marginTop: 8 }}>
<Text type="secondary">{t('Start a conversation')}</Text>
</div>
</div>
) : (
<div className="fx-agent-messages">
{messageItems.map((m, idx) => {
const role = String((m as any).role);
const isUser = role === 'user';
const isTool = role === 'tool';
const toolCallId = typeof (m as any).tool_call_id === 'string' ? String((m as any).tool_call_id) : '';
const toolInfo = toolCallId ? toolCallsById.get(toolCallId) : null;
const toolName = toolInfo?.name || (toolCallId ? toolNameByIdRef.current[toolCallId] : '') || '';
const msgKey = toolCallId ? `tool:${toolCallId}` : `${role}:${idx}`;
if (isTool) {
const rawContent = extractTextContent((m as any).content);
const expanded = !!expandedTools[msgKey];
const summary = toolName ? renderToolResultSummary(toolName, rawContent, toolInfo?.args || null) : '';
return (
<div key={msgKey} className="fx-agent-msg fx-agent-msg-tool">
<div className="fx-agent-tool-block">
<div className="fx-agent-tool-bar">
<Space size={6} wrap className="fx-agent-tool-pills">
<Tag className="fx-agent-pill" bordered={false} icon={<ToolOutlined />}>
{t('MCP Tool')}
</Tag>
<Tag className="fx-agent-pill fx-agent-pill-strong" bordered={false} icon={<CodeOutlined />}>
{toolName || t('Tool')}
</Tag>
</Space>
<Button
type="text"
size="small"
icon={expanded ? <UpOutlined /> : <DownOutlined />}
onClick={() => setExpandedTools((prev) => ({ ...prev, [msgKey]: !prev[msgKey] }))}
>
{expanded ? t('Collapse') : t('Expand')}
</Button>
</div>
{summary ? (
<div className="fx-agent-tool-summary-line">
<Text type="secondary">{summary}</Text>
</div>
) : null}
{expanded && (
<div className="fx-agent-tool-expanded">
{toolInfo?.args && Object.keys(toolInfo.args).length > 0 && (
<div style={{ marginBottom: 10 }}>
<Text type="secondary" style={{ fontSize: 12 }}>{t('Arguments')}</Text>
<pre className="fx-agent-pre fx-agent-pre-compact">
{JSON.stringify(toolInfo.args, null, 2)}
</pre>
</div>
)}
{renderToolDetails(msgKey, toolName || t('Tool'), rawContent)}
</div>
)}
</div>
</div>
);
}
const text = extractTextContent((m as any).content);
if (isUser) {
return (
<div key={msgKey} className="fx-agent-msg fx-agent-msg-user">
<div className="fx-agent-user-block fx-agent-content">
{text.trim() ? <div className="fx-agent-text">{text}</div> : <Text type="secondary">{t('No content')}</Text>}
</div>
</div>
);
}
return (
<div key={msgKey} className="fx-agent-msg fx-agent-msg-assistant">
<div className="fx-agent-assistant-block fx-agent-content">
{text.trim() ? (
<div className="fx-agent-md">
<ReactMarkdown>{text}</ReactMarkdown>
</div>
) : (
<Text type="secondary">{t('No content')}</Text>
)}
</div>
</div>
);
})}
{runningToolCount > 0 && (
<div className="fx-agent-running">
<LoadingOutlined spin />
<Text type="secondary">{t('Calling tools')}</Text>
<Space size={6} wrap>
{runningToolEntries.slice(0, 2).map(([id, name]) => (
<Tag key={id} bordered={false} color="blue">
{(name || t('Tool'))} #{shortId(id, 4)}
</Tag>
))}
{runningToolCount > 2 && (
<Text type="secondary">+{runningToolCount - 2}</Text>
)}
</Space>
</div>
)}
{pending.length > 0 && (
<div className="fx-agent-pending-group">
<div className="fx-agent-pending-head">
<Space size={8} wrap>
<Tag className="fx-agent-pill fx-agent-pill-warn" bordered={false}>
{t('Pending actions')}
</Tag>
<Text type="secondary">{pending.length}</Text>
</Space>
<Space size={6}>
<Button size="small" type="primary" onClick={approveAll} loading={loading}>
{t('Execute all')}
</Button>
<Button size="small" onClick={rejectAll} disabled={loading}>
{t('Cancel all')}
</Button>
</Space>
</div>
<div className="fx-agent-pending-list">
{pending.map((p) => {
const args = p.arguments || {};
const key = `pending:${p.id}`;
const expanded = !!expandedTools[key];
const running = Object.prototype.hasOwnProperty.call(runningTools, p.id);
const summary = renderToolArgsSummary(p.name, args);
return (
<div key={p.id} className="fx-agent-tool-block fx-agent-pending-item">
<div className="fx-agent-tool-bar">
<Space size={6} wrap className="fx-agent-tool-pills">
<Tag className="fx-agent-pill" bordered={false} icon={<ToolOutlined />}>
{t('MCP Tool')}
</Tag>
<Tag className="fx-agent-pill fx-agent-pill-strong" bordered={false} icon={<CodeOutlined />}>
{p.name}
</Tag>
{running ? <LoadingOutlined spin style={{ color: token.colorPrimary }} /> : null}
</Space>
<Space size={6}>
<Button
size="small"
type="primary"
onClick={() => void approveOne(p.id)}
loading={loading && running}
disabled={loading && !running}
>
{t('Execute')}
</Button>
<Button
size="small"
onClick={() => void rejectOne(p.id)}
disabled={loading && !running}
>
{t('Cancel')}
</Button>
<Button
type="text"
size="small"
icon={expanded ? <UpOutlined /> : <DownOutlined />}
onClick={() => setExpandedTools((prev) => ({ ...prev, [key]: !prev[key] }))}
/>
</Space>
</div>
{summary ? (
<div className="fx-agent-tool-summary-line">
<Text type="secondary">{summary}</Text>
</div>
) : null}
{expanded && (
<div className="fx-agent-tool-expanded">
<Text type="secondary" style={{ fontSize: 12 }}>{t('Arguments')}</Text>
<pre className="fx-agent-pre">
{JSON.stringify(args, null, 2)}
</pre>
</div>
)}
</div>
);
})}
</div>
</div>
)}
</div>
)}
</div>
<div className="fx-agent-composer">
<Flex vertical gap={8}>
<Space wrap>
<Button size="small" icon={<FolderOpenOutlined />} onClick={() => setPathModalOpen(true)} disabled={loading}>
{t('Select Path')}
</Button>
{effectivePath && (
<Tag bordered={false} color="blue">{t('Current')}: {effectivePath}</Tag>
)}
</Space>
<Input.TextArea
value={input}
onChange={(e) => setInput(e.target.value)}
placeholder={t('Type a message')}
autoSize={{ minRows: 2, maxRows: 6 }}
disabled={loading || pending.length > 0}
variant="borderless"
onPressEnter={(e) => {
if (e.shiftKey) return;
e.preventDefault();
void handleSend();
}}
/>
<div style={{ display: 'flex', justifyContent: 'flex-end' }}>
<Button
type="primary"
size="small"
icon={<SendOutlined />}
onClick={handleSend}
loading={loading}
disabled={loading || pending.length > 0 || !input.trim()}
>
{t('Send')}
</Button>
</div>
</Flex>
</div>
</Flex>
</Drawer>
<PathSelectorModal
open={pathModalOpen}
mode="any"
initialPath={effectivePath || '/'}
onOk={handlePathSelected}
onCancel={() => setPathModalOpen(false)}
/>
</>
);
});
export default AiAgentWidget;

View File

@@ -415,7 +415,7 @@
"Custom Provider": "Custom Provider",
"Custom Provider Description": "Bring your own endpoint compatible with OpenAI or Gemini formats.",
"OpenAI Provider": "OpenAI",
"OpenAI Provider Description": "Access GPT-4o, GPT-4.1, GPT-3.5 and more models from OpenAI.",
"OpenAI Provider Description": "Access GPT-4o, GPT-4.1, GPT-5 and more models from OpenAI.",
"Azure OpenAI Provider": "Azure OpenAI",
"Azure OpenAI Provider Description": "Use OpenAI models deployed on Microsoft Azure.",
"Google AI Provider": "Google AI",
@@ -425,13 +425,15 @@
"OpenRouter Provider": "OpenRouter",
"OpenRouter Provider Description": "Connect to multiple AI providers through a single OpenAI-style endpoint.",
"Anthropic Provider": "Anthropic",
"Anthropic Provider Description": "Claude 3 family models exposed through the Claude API.",
"Anthropic Provider Description": "Claude 4 family models exposed through the Claude API.",
"Z.ai Provider": "Z.ai",
"Z.ai Provider Description": "Z.ai models served via BigModel Open Platform (OpenAI-style).",
"DeepSeek Provider": "DeepSeek",
"DeepSeek Provider Description": "DeepSeek language models via OpenAI-compatible API.",
"Grok Provider": "Grok (xAI)",
"Grok Provider Description": "Grok models powered by xAI with OpenAI-style routes.",
"Ollama Provider": "Ollama",
"Ollama Provider Description": "Self-host and run models locally with Ollama's OpenAI bridge.",
"Ollama Provider Description": "Self-host and run models locally with Ollama's native HTTP API.",
"Voyage Provider": "Voyage AI",
"Voyage Provider Description": "High-quality embeddings and rerankers from Voyage AI.",
"Delete provider?": "Delete provider?",
@@ -690,5 +692,40 @@
"App \"{key}\" not found.": "App \"{key}\" not found.",
"Open with {app}": "Open with {app}",
"Set as default for .{ext}": "Set as default for .{ext}",
"Advanced tokens must be valid JSON": "Advanced tokens must be valid JSON"
"AI Agent": "AI Agent",
"Auto execute": "Auto execute",
"Start a conversation": "Start a conversation",
"No content": "No content",
"Pending actions": "Pending actions",
"Execute": "Execute",
"Execute all": "Execute all",
"Cancel all": "Cancel all",
"Type a message": "Type a message",
"Send": "Send",
"Please confirm pending actions first": "Please confirm pending actions first",
"You": "You",
"Tool": "Tool",
"MCP Tool": "MCP Tool",
"Arguments": "Arguments",
"Raw JSON": "Raw JSON",
"Collapse": "Collapse",
"Copied": "Copied",
"Canceled": "Canceled",
"Tasks submitted": "Tasks submitted",
"Calling tools": "Calling tools",
"Advanced tokens must be valid JSON": "Advanced tokens must be valid JSON",
"Search": "Search",
"Total": "Total",
"Mode": "Mode",
"Has more": "Has more",
"Page": "Page",
"results": "results",
"chars": "chars",
"Truncated": "Truncated",
"Write": "Write",
"Read": "Read",
"Created": "Created",
"Moved": "Moved",
"Renamed": "Renamed",
"Info": "Info"
}

View File

@@ -411,15 +411,23 @@
"Added {count} models": "已添加 {count} 个模型",
"Custom Provider": "自定义提供商",
"Custom Provider Description": "自定义兼容 OpenAI 或 Gemini 标准的 API 端点。",
"OpenAI Provider Description": "访问 OpenAI 的 GPT-4o、GPT-4.1、GPT-3.5 等模型。",
"OpenAI Provider": "OpenAI",
"OpenAI Provider Description": "访问 OpenAI 的 GPT-4o、GPT-4.1、GPT-5 等模型。",
"Azure OpenAI Provider": "Azure OpenAI",
"Azure OpenAI Provider Description": "使用托管在微软 Azure 上的 OpenAI 模型。",
"Google AI Provider": "Google AI",
"Google AI Provider Description": "Google AI 平台提供的 Gemini 系列模型。",
"SiliconFlow Provider": "硅基流动",
"SiliconFlow Provider Description": "硅基流动高性能推理平台,兼容 OpenAI 接口。",
"OpenRouter Provider Description": "通过一个 OpenAI 风格入口接入多家 AI 提供商。",
"Anthropic Provider Description": "通过 Claude API 使用 Claude 3 系列模型。",
"Anthropic Provider": "Anthropic",
"Anthropic Provider Description": "通过 Claude API 使用 Claude 4 系列模型。",
"Z.ai Provider": "Z.ai",
"Z.ai Provider Description": "通过智谱开放平台接入OpenAI 风格接口)。",
"DeepSeek Provider": "DeepSeek",
"DeepSeek Provider Description": "DeepSeek 语言模型,支持 OpenAI 兼容接口。",
"Grok Provider Description": "xAI 的 Grok 模型,提供 OpenAI 风格接口。",
"Ollama Provider": "Ollama",
"Ollama Provider Description": "使用 Ollama 在本地运行并管理大模型。",
"Voyage Provider Description": "Voyage AI 提供的高质量嵌入与重排序模型。",
"Delete provider?": "确认删除该提供商?",
@@ -436,6 +444,9 @@
"Add your first AI provider to get started": "添加第一个 AI 提供商开始配置",
"Default Models Configuration": "默认模型配置",
"Main Chat Model": "主对话模型",
"Rerank Model": "重排序模型",
"Voice Model": "语音模型",
"Tools Model": "工具模型",
"Primary assistant for conversations, reasoning, and tool calls.": "用于对话、推理与工具调用的核心模型。",
"Handles multimodal perception such as image understanding.": "负责多模态感知与图像理解。",
"Transforms content into dense vectors for search and retrieval.": "将内容向量化以驱动搜索与检索。",
@@ -683,5 +694,40 @@
"App \"{key}\" not found.": "应用 \"{key}\" 不存在。",
"Open with {app}": "使用 {app} 打开",
"Set as default for .{ext}": "设为该类型(.{ext})默认应用",
"Advanced tokens must be valid JSON": "高级 Token 需为合法 JSON"
"AI Agent": "AI 助手",
"Auto execute": "自动执行",
"Start a conversation": "开始对话",
"No content": "无内容",
"Pending actions": "待确认操作",
"Execute": "执行",
"Execute all": "全部执行",
"Cancel all": "全部取消",
"Type a message": "输入消息",
"Send": "发送",
"Please confirm pending actions first": "请先确认待执行操作",
"You": "你",
"Tool": "工具",
"MCP Tool": "MCP 工具",
"Arguments": "参数",
"Raw JSON": "原始 JSON",
"Collapse": "收起",
"Copied": "已复制",
"Canceled": "已取消",
"Tasks submitted": "已提交任务",
"Calling tools": "正在调用工具",
"Advanced tokens must be valid JSON": "高级 Token 需为合法 JSON",
"Search": "搜索",
"Total": "总计",
"Mode": "模式",
"Has more": "更多",
"Page": "页",
"results": "条结果",
"chars": "字符",
"Truncated": "已截断",
"Write": "写入",
"Read": "读取",
"Created": "已创建",
"Moved": "已移动",
"Renamed": "已重命名",
"Info": "信息"
}

View File

@@ -1,5 +1,5 @@
import { Layout, Button, Dropdown, theme, Flex, Avatar, Typography } from 'antd';
import { SearchOutlined, MenuUnfoldOutlined, LogoutOutlined, UserOutlined } from '@ant-design/icons';
import { Layout, Button, Dropdown, theme, Flex, Avatar, Typography, Tooltip } from 'antd';
import { SearchOutlined, MenuUnfoldOutlined, LogoutOutlined, UserOutlined, RobotOutlined } from '@ant-design/icons';
import { memo, useState } from 'react';
import SearchDialog from './SearchDialog.tsx';
import { authApi } from '../api/auth.ts';
@@ -14,9 +14,10 @@ const { Header } = Layout;
export interface TopHeaderProps {
collapsed: boolean;
onToggle(): void;
onOpenAiAgent(): void;
}
const TopHeader = memo(function TopHeader({ collapsed, onToggle }: TopHeaderProps) {
const TopHeader = memo(function TopHeader({ collapsed, onToggle, onOpenAiAgent }: TopHeaderProps) {
const { token } = theme.useToken();
const [searchOpen, setSearchOpen] = useState(false);
const navigate = useNavigate();
@@ -50,6 +51,15 @@ const TopHeader = memo(function TopHeader({ collapsed, onToggle }: TopHeaderProp
</Button>
<SearchDialog open={searchOpen} onClose={() => setSearchOpen(false)} />
<Flex style={{ marginLeft: 'auto' }} align="center" gap={12}>
<Tooltip title={t('AI Agent')}>
<Button
type="text"
icon={<RobotOutlined />}
aria-label={t('AI Agent')}
onClick={onOpenAiAgent}
style={{ paddingInline: 8, height: 40 }}
/>
</Tooltip>
<LanguageSwitcher />
<Dropdown
menu={{

View File

@@ -80,7 +80,7 @@ interface ProviderTemplate {
key: string;
nameKey: string;
descriptionKey: string;
api_format: 'openai' | 'gemini';
api_format: AIProviderPayload['api_format'];
identifier: string;
base_url?: string;
logo_url?: string;
@@ -150,6 +150,28 @@ const providerTemplates: ProviderTemplate[] = [
provider_type: 'builtin',
doc_url: 'https://platform.openai.com/docs/api-reference',
},
{
key: 'azure-openai',
nameKey: 'Azure OpenAI Provider',
descriptionKey: 'Azure OpenAI Provider Description',
api_format: 'openai',
identifier: 'azure-openai',
base_url: 'https://{resource-name}.openai.azure.com/openai/deployments/{deployment-name}',
logo_url: '/icon/azure-color.svg',
provider_type: 'builtin',
doc_url: 'https://learn.microsoft.com/en-us/azure/ai-services/openai/reference',
},
{
key: 'anthropic',
nameKey: 'Anthropic Provider',
descriptionKey: 'Anthropic Provider Description',
api_format: 'anthropic',
identifier: 'anthropic',
base_url: 'https://api.anthropic.com/v1',
logo_url: '/icon/anthropic.svg',
provider_type: 'builtin',
doc_url: 'https://docs.anthropic.com/claude/reference/messages_post',
},
{
key: 'google-ai',
nameKey: 'Google AI Provider',
@@ -161,6 +183,17 @@ const providerTemplates: ProviderTemplate[] = [
provider_type: 'builtin',
doc_url: 'https://ai.google.dev/api/rest',
},
{
key: 'zai',
nameKey: 'Z.ai Provider',
descriptionKey: 'Z.ai Provider Description',
api_format: 'openai',
identifier: 'zai',
base_url: 'https://open.bigmodel.cn/api/paas/v4',
logo_url: '/icon/zai.svg',
provider_type: 'builtin',
doc_url: 'https://open.bigmodel.cn/dev/api',
},
{
key: 'siliconflow',
nameKey: 'SiliconFlow Provider',
@@ -183,6 +216,17 @@ const providerTemplates: ProviderTemplate[] = [
provider_type: 'builtin',
doc_url: 'https://platform.deepseek.com/api-docs',
},
{
key: 'ollama',
nameKey: 'Ollama Provider',
descriptionKey: 'Ollama Provider Description',
api_format: 'ollama',
identifier: 'ollama',
base_url: 'http://localhost:11434',
logo_url: '/icon/ollama.svg',
provider_type: 'builtin',
doc_url: 'https://github.com/ollama/ollama/blob/main/docs/api.md',
},
];
const abilityTagColor: Record<AIAbility, string> = {
@@ -295,7 +339,7 @@ export default function AiSettingsTab() {
identifier: existing.identifier,
api_format: existing.api_format,
base_url: existing.base_url ?? undefined,
api_key: existing.api_key ?? undefined,
api_key: '',
logo_url: existing.logo_url ?? undefined,
provider_type: existing.provider_type ?? undefined,
});
@@ -345,10 +389,12 @@ export default function AiSettingsTab() {
identifier: (values.identifier || '').trim(),
api_format: values.api_format,
base_url: trimmedBaseUrl ? trimmedBaseUrl : null,
api_key: trimmedApiKey ? trimmedApiKey : null,
logo_url: trimmedLogoUrl ? trimmedLogoUrl : null,
provider_type: trimmedProviderType ? trimmedProviderType : null,
};
if (trimmedApiKey) {
payload.api_key = trimmedApiKey;
}
try {
if (providerModal.editing) {
await updateProvider(providerModal.editing.id, payload);
@@ -952,14 +998,16 @@ export default function AiSettingsTab() {
<div className="fx-ai-default-desc">{t(info.description)}</div>
</div>
</div>
<Select
allowClear
style={{ minWidth: 280 }}
placeholder={t('Select a model')}
value={defaultSelections[ability] ?? undefined}
options={options}
onChange={(value) => updateSelection(ability, value ?? null)}
/>
<div className="fx-ai-default-control">
<Select
allowClear
className="fx-ai-default-select"
placeholder={t('Select a model')}
value={defaultSelections[ability] ?? undefined}
options={options}
onChange={(value) => updateSelection(ability, value ?? null)}
/>
</div>
</div>
);
})}
@@ -1069,19 +1117,41 @@ export default function AiSettingsTab() {
label={t('API Format')}
rules={[{ required: true }]}
>
<Select
disabled={!allowFormatChange}
options={[
{ value: 'openai', label: 'OpenAI Compatible' },
{ value: 'gemini', label: 'Gemini Compatible' },
]}
/>
</Form.Item>
<Select
disabled={!allowFormatChange}
options={[
{ value: 'openai', label: 'OpenAI Compatible' },
{ value: 'gemini', label: 'Gemini Compatible' },
{ value: 'anthropic', label: 'Anthropic Native' },
{ value: 'ollama', label: 'Ollama Native' },
]}
/>
</Form.Item>
<Form.Item name="base_url" label={t('Base URL')} rules={[{ required: true, message: t('Enter base url') }]}>
<Input placeholder="https://" />
</Form.Item>
<Form.Item name="api_key" label={t('API Key')}>
<Input placeholder={t('Optional, can also be provided per request')} />
<Form.Item
name="api_key"
label={(
<Space size={8}>
{t('API Key')}
{providerModal.editing ? (
<Tag color={providerModal.editing.has_api_key ? 'green' : 'default'}>
{providerModal.editing.has_api_key ? '已设置' : '未设置'}
</Tag>
) : null}
</Space>
)}
>
<Input.Password
placeholder={
providerModal.editing
? '留空不更新,填写将覆盖'
: t('Optional, can also be provided per request')
}
autoComplete="new-password"
visibilityToggle={false}
/>
</Form.Item>
<Form.Item name="logo_url" label={t('Logo URL')}>
<Input placeholder="https://" />

View File

@@ -10,6 +10,15 @@ import { vfsApi, type VfsEntry } from './api/vfs';
type FrameMode = 'file' | 'app';
type FrameQuery = {
pluginKey: string;
mode: FrameMode;
filePath: string;
pluginVersion: string;
pluginStyles: string[] | null;
entry: VfsEntry | null;
};
function renderStatus(text: string, isError: boolean = false) {
const root = document.getElementById('root');
if (!root) return;
@@ -31,12 +40,55 @@ function renderStatus(text: string, isError: boolean = false) {
root.appendChild(el);
}
function getQuery() {
function scheduleStatus(text: string, delayMs: number) {
let canceled = false;
const t = window.setTimeout(() => {
if (canceled) return;
renderStatus(text);
}, delayMs);
return () => {
canceled = true;
window.clearTimeout(t);
};
}
function tryParseJson<T = unknown>(raw: string): T | null {
try {
return JSON.parse(raw) as T;
} catch {
return null;
}
}
function getQuery(): FrameQuery {
const params = new URLSearchParams(window.location.search);
const pluginKey = (params.get('pluginKey') || '').trim();
const mode = (params.get('mode') || 'file') as FrameMode;
const filePath = (params.get('filePath') || '').trim();
return { pluginKey, mode, filePath };
const pluginVersion = (params.get('pluginVersion') || '').trim();
const rawStyles = (params.get('pluginStyles') || '').trim();
const parsedStyles = rawStyles ? tryParseJson<unknown>(rawStyles) : null;
const pluginStyles = Array.isArray(parsedStyles)
? parsedStyles.filter((s) => typeof s === 'string' && s.trim().length > 0)
: null;
const rawEntry = (params.get('entry') || '').trim();
const parsedEntry = rawEntry ? tryParseJson<any>(rawEntry) : null;
const entry: VfsEntry | null =
parsedEntry && typeof parsedEntry === 'object' && typeof parsedEntry.name === 'string'
? {
name: String(parsedEntry.name),
is_dir: Boolean(parsedEntry.is_dir),
size: Number(parsedEntry.size || 0),
mtime: Number(parsedEntry.mtime || 0),
type: typeof parsedEntry.type === 'string' ? parsedEntry.type : undefined,
has_thumbnail: Boolean(parsedEntry.has_thumbnail),
}
: null;
return { pluginKey, mode, filePath, pluginVersion, pluginStyles, entry };
}
function postToParent(data: any) {
@@ -45,6 +97,51 @@ function postToParent(data: any) {
}
}
type TempLinkCache = {
url: string;
fetchedAt: number;
expiresIn: number;
};
const TEMP_LINK_CACHE_PREFIX = 'foxel:tempLink:';
const TEMP_LINK_DEFAULT_EXPIRES_IN = 3600;
function getTempLinkCacheKey(filePath: string) {
return `${TEMP_LINK_CACHE_PREFIX}${filePath}`;
}
function readTempLinkCache(filePath: string): TempLinkCache | null {
try {
const raw = sessionStorage.getItem(getTempLinkCacheKey(filePath));
if (!raw) return null;
const parsed = JSON.parse(raw) as TempLinkCache;
if (!parsed || typeof parsed.url !== 'string') return null;
if (!parsed.fetchedAt || !parsed.expiresIn) return null;
if (Date.now() - parsed.fetchedAt >= parsed.expiresIn * 1000 - 10_000) return null;
return parsed;
} catch {
return null;
}
}
function writeTempLinkCache(filePath: string, item: TempLinkCache) {
try {
sessionStorage.setItem(getTempLinkCacheKey(filePath), JSON.stringify(item));
} catch {
void 0;
}
}
async function getTempLinkUrl(filePath: string, expiresIn: number = TEMP_LINK_DEFAULT_EXPIRES_IN) {
const cached = readTempLinkCache(filePath);
if (cached) return cached.url;
const tokenData = await vfsApi.getTempLinkToken(filePath, expiresIn);
const url = typeof tokenData?.url === 'string' && tokenData.url.trim() ? tokenData.url : vfsApi.getTempPublicUrl(tokenData.token);
writeTempLinkCache(filePath, { url, fetchedAt: Date.now(), expiresIn });
return url;
}
function createHostApi(pluginKey: string): FoxelHostApi {
const showMessage: FoxelHostApi['showMessage'] = (type, content) => {
const antd = window.__FOXEL_EXTERNALS__?.antd;
@@ -76,8 +173,7 @@ function createHostApi(pluginKey: string): FoxelHostApi {
callApi: async <T = unknown>(path: string, options?: RequestInit & { json?: unknown }) =>
request<T>(path, options),
getTempLink: async (filePath: string) => {
const token = await vfsApi.getTempLinkToken(filePath);
return vfsApi.getTempPublicUrl(token.token);
return await getTempLinkUrl(filePath);
},
getStreamUrl: (filePath: string) => vfsApi.streamUrl(filePath),
};
@@ -89,28 +185,28 @@ function getPluginStylePaths(plugin: PluginItem): string[] {
return styles.filter((s) => typeof s === 'string' && s.trim().length > 0);
}
async function loadPluginStyles(pluginKey: string, plugin: PluginItem) {
const stylePaths = getPluginStylePaths(plugin);
if (stylePaths.length === 0) return;
const tasks = stylePaths.map(
(p) =>
new Promise<void>((resolve) => {
const href = `/api/plugins/${pluginKey}/assets/${p.replace(/^\/+/, '')}`;
const link = document.createElement('link');
link.rel = 'stylesheet';
link.href = href;
link.onload = () => resolve();
link.onerror = () => resolve();
document.head.appendChild(link);
})
);
await Promise.all(tasks);
function withVersion(url: string, version?: string | null): string {
const v = typeof version === 'string' ? version.trim() : '';
if (!v) return url;
const u = new URL(url, window.location.origin);
u.searchParams.set('v', v);
return u.pathname + u.search;
}
async function loadPluginBundle(pluginKey: string): Promise<RegisteredPlugin> {
const url = `/api/plugins/${pluginKey}/bundle.js`;
function injectPluginStyles(pluginKey: string, stylePaths: string[], version?: string | null) {
if (stylePaths.length === 0) return;
stylePaths.forEach((p) => {
const href = withVersion(`/api/plugins/${pluginKey}/assets/${p.replace(/^\/+/, '')}`, version);
const link = document.createElement('link');
link.rel = 'stylesheet';
link.href = href;
document.head.appendChild(link);
});
}
async function loadPluginBundle(pluginKey: string, version?: string | null): Promise<RegisteredPlugin> {
const url = withVersion(`/api/plugins/${pluginKey}/bundle.js`, version);
return new Promise<RegisteredPlugin>((resolve, reject) => {
let done = false;
@@ -140,24 +236,43 @@ async function loadPluginBundle(pluginKey: string): Promise<RegisteredPlugin> {
});
}
async function buildFileContext(filePath: string) {
const stat = (await vfsApi.stat(filePath)) as any;
const name =
typeof stat?.name === 'string' && stat.name.trim().length > 0
? stat.name
: filePath.replace(/\\/g, '/').split('/').filter(Boolean).pop() || 'unknown';
function isLikelyImage(pathOrName: string) {
return /\.(jpg|jpeg|png|gif|bmp|webp|svg)$/i.test(pathOrName);
}
const entry: VfsEntry = {
name,
is_dir: Boolean(stat?.is_dir),
size: Number(stat?.size || 0),
mtime: Number(stat?.mtime || 0),
type: typeof stat?.type === 'string' ? stat.type : undefined,
has_thumbnail: Boolean(stat?.has_thumbnail),
};
function preloadImage(url: string) {
const img = new Image();
img.decoding = 'async';
img.src = url;
}
const token = await vfsApi.getTempLinkToken(filePath);
const downloadUrl = vfsApi.getTempPublicUrl(token.token);
async function buildFileContext(filePath: string, entryOverride: VfsEntry | null) {
const entryPromise = entryOverride
? Promise.resolve(entryOverride)
: (async () => {
const stat = (await vfsApi.stat(filePath)) as any;
const name =
typeof stat?.name === 'string' && stat.name.trim().length > 0
? stat.name
: filePath.replace(/\\/g, '/').split('/').filter(Boolean).pop() || 'unknown';
const entry: VfsEntry = {
name,
is_dir: Boolean(stat?.is_dir),
size: Number(stat?.size || 0),
mtime: Number(stat?.mtime || 0),
type: typeof stat?.type === 'string' ? stat.type : undefined,
has_thumbnail: Boolean(stat?.has_thumbnail),
};
return entry;
})();
const downloadUrlPromise = getTempLinkUrl(filePath);
if (isLikelyImage(filePath)) {
downloadUrlPromise.then(preloadImage).catch(() => void 0);
}
const [entry, downloadUrl] = await Promise.all([entryPromise, downloadUrlPromise]);
const streamUrl = vfsApi.streamUrl(filePath);
return { entry, urls: { downloadUrl, streamUrl } };
@@ -166,7 +281,7 @@ async function buildFileContext(filePath: string) {
async function main() {
initExternals();
const { pluginKey, mode, filePath } = getQuery();
const { pluginKey, mode, filePath, pluginVersion, pluginStyles, entry } = getQuery();
if (!pluginKey) {
renderStatus('Missing pluginKey in query string', true);
return;
@@ -178,34 +293,37 @@ async function main() {
return;
}
renderStatus('Loading plugin...');
let plugin: PluginItem;
try {
plugin = await pluginsApi.get(pluginKey);
} catch (e) {
const msg = e instanceof Error ? e.message : String(e);
renderStatus(`Failed to load plugin info: ${msg}`, true);
return;
}
try {
await loadPluginStyles(pluginKey, plugin);
} catch {
// ignore
}
let registered: RegisteredPlugin;
try {
registered = await loadPluginBundle(pluginKey);
} catch (e) {
const msg = e instanceof Error ? e.message : String(e);
renderStatus(`Failed to load plugin bundle: ${msg}`, true);
return;
}
const cancelLoading = scheduleStatus('Loading plugin...', 200);
const host = createHostApi(pluginKey);
const pluginPromise = (async () => {
if (pluginVersion && pluginStyles) {
injectPluginStyles(pluginKey, pluginStyles, pluginVersion);
return await loadPluginBundle(pluginKey, pluginVersion);
}
const plugin: PluginItem = await pluginsApi.get(pluginKey);
const resolvedVersion = plugin.version || '';
injectPluginStyles(pluginKey, getPluginStylePaths(plugin), resolvedVersion);
return await loadPluginBundle(pluginKey, resolvedVersion);
})();
const ctxPromise = mode === 'file' ? buildFileContext(filePath, entry) : Promise.resolve(null);
let registered: RegisteredPlugin;
let ctx: Awaited<ReturnType<typeof buildFileContext>> | null;
try {
[registered, ctx] = await Promise.all([pluginPromise, ctxPromise]);
} catch (e) {
const msg = e instanceof Error ? e.message : String(e);
cancelLoading();
renderStatus(`Failed to load plugin: ${msg}`, true);
return;
}
cancelLoading();
let cleanup: (() => void) | null = null;
const mountError = async () => {
try {
@@ -224,8 +342,11 @@ async function main() {
throw new Error('Missing filePath in query string');
}
const { entry, urls } = await buildFileContext(filePath);
const ret = await registered.mount(root, { filePath, entry, urls, host });
if (!ctx) {
throw new Error('Missing file context');
}
const ret = await registered.mount(root, { filePath, entry: ctx.entry, urls: ctx.urls, host });
if (typeof ret === 'function') cleanup = ret;
} catch (e) {
const msg = e instanceof Error ? e.message : String(e);

View File

@@ -16,6 +16,7 @@ import BackupPage from '../pages/SystemSettingsPage/BackupPage.tsx';
import PluginsPage from '../pages/PluginsPage.tsx';
import { AppWindowsProvider, useAppWindows } from '../contexts/AppWindowsContext';
import { AppWindowsLayer } from '../apps/AppWindowsLayer';
import AiAgentWidget from '../components/AiAgentWidget';
const ShellBody = memo(function ShellBody() {
const params = useParams<{ navKey?: string; '*': string }>();
@@ -24,11 +25,13 @@ const ShellBody = memo(function ShellBody() {
const navigate = useNavigate();
const COLLAPSED_KEY = 'layout.siderCollapsed';
const [collapsed, setCollapsed] = useState(() => localStorage.getItem(COLLAPSED_KEY) === '1');
const [agentOpen, setAgentOpen] = useState(false);
useEffect(() => {
localStorage.setItem(COLLAPSED_KEY, collapsed ? '1' : '0');
}, [collapsed]);
const { windows, closeWindow, toggleMax, bringToFront, updateWindow } = useAppWindows();
const settingsTab = navKey === 'settings' ? (subPath.split('/')[0] || undefined) : undefined;
const agentCurrentPath = navKey === 'files' ? ('/' + subPath).replace(/\/+/g, '/').replace(/\/+$/, '') || '/' : null;
return (
<Layout style={{ minHeight: '100vh', background: 'var(--ant-color-bg-layout)' }}>
<SideNav
@@ -44,7 +47,7 @@ const ShellBody = memo(function ShellBody() {
}}
/>
<Layout style={{ background: 'var(--ant-color-bg-layout)' }}>
<TopHeader collapsed={collapsed} onToggle={() => setCollapsed(c => !c)} />
<TopHeader collapsed={collapsed} onToggle={() => setCollapsed(c => !c)} onOpenAiAgent={() => setAgentOpen(true)} />
<Layout.Content style={{ padding: 16, background: 'var(--ant-color-bg-layout)' }}>
<div style={{ minHeight: 'calc(100vh - 56px - 32px)', background: 'var(--ant-color-bg-layout)' }}>
<Flex vertical gap={16}>
@@ -76,6 +79,7 @@ const ShellBody = memo(function ShellBody() {
onBringToFront={bringToFront}
onUpdateWindow={updateWindow}
/>
<AiAgentWidget currentPath={agentCurrentPath} open={agentOpen} onOpenChange={setAgentOpen} />
</Layout>
);
});

244
web/src/styles/ai-agent.css Normal file
View File

@@ -0,0 +1,244 @@
.fx-agent-container {
height: 100%;
}
.fx-agent-chat-scroll {
flex: 1;
overflow-y: auto;
padding: 0;
border-radius: 0;
background: transparent;
border: 0;
box-shadow: none;
}
.fx-agent-empty {
height: 100%;
min-height: 240px;
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
}
.fx-agent-messages {
display: flex;
flex-direction: column;
gap: 14px;
}
.fx-agent-msg {
display: flex;
flex-direction: column;
}
.fx-agent-msg-user {
align-items: flex-end;
}
.fx-agent-msg-assistant {
align-items: flex-start;
}
.fx-agent-msg-tool {
align-items: stretch;
}
.fx-agent-user-block {
max-width: 85%;
padding: 10px 12px;
border-radius: 12px;
border: 1px solid var(--ant-color-border-secondary);
background: var(--ant-color-fill-quaternary);
box-shadow: 0 1px 0 rgba(0, 0, 0, 0.03);
}
.fx-agent-assistant-block {
max-width: 100%;
padding: 2px 2px;
}
.fx-agent-tool-block {
width: 100%;
padding: 10px 12px;
border-radius: 12px;
border: 1px solid var(--ant-color-border-secondary);
background: var(--ant-color-bg-container);
box-shadow: 0 1px 0 rgba(0, 0, 0, 0.03);
}
.fx-agent-tool-bar {
display: flex;
align-items: center;
justify-content: space-between;
gap: 12px;
}
.fx-agent-content {
font-size: 13px;
line-height: 1.75;
word-break: break-word;
}
.fx-agent-tool-pills .ant-tag {
margin-inline-end: 0;
}
.fx-agent-pill {
border-radius: 999px;
padding-inline: 10px;
padding-block: 2px;
border: 0;
background: rgba(0, 0, 0, 0.04);
}
.fx-agent-pill-strong {
background: var(--ant-color-primary-bg);
color: var(--ant-color-primary);
}
.fx-agent-pill-warn {
background: var(--ant-color-warning-bg);
color: var(--ant-color-warning);
}
.fx-agent-tool-summary-line {
margin-top: 6px;
font-size: 12px;
line-height: 1.6;
color: var(--ant-color-text-tertiary);
}
.fx-agent-tool-expanded {
margin-top: 10px;
}
.fx-agent-text {
white-space: pre-wrap;
}
.fx-agent-md {
white-space: normal;
}
.fx-agent-md p {
margin: 0 0 0.5em;
}
.fx-agent-md p:last-child {
margin-bottom: 0;
}
.fx-agent-md ul,
.fx-agent-md ol {
margin: 0 0 0.5em;
padding-left: 1.2em;
}
.fx-agent-md code {
padding: 1px 6px;
border-radius: 6px;
background: rgba(0, 0, 0, 0.04);
border: 1px solid var(--ant-color-border-secondary);
font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;
font-size: 11px;
}
.fx-agent-md pre {
margin: 0 0 0.5em;
padding: 8px 10px;
border-radius: 10px;
background: var(--ant-color-bg-container);
border: 1px solid var(--ant-color-border-secondary);
overflow: auto;
}
.fx-agent-md pre code {
display: block;
padding: 0;
border: 0;
background: transparent;
font-size: 11px;
line-height: 1.55;
}
.fx-agent-md blockquote {
margin: 0 0 0.65em;
padding: 0 0 0 10px;
border-left: 3px solid var(--ant-color-border);
color: var(--ant-color-text-tertiary);
}
.fx-agent-md a {
color: var(--ant-color-primary);
}
.fx-agent-tool-details {
padding: 8px;
border-radius: 10px;
background: rgba(0, 0, 0, 0.02);
border: 1px solid var(--ant-color-border-secondary);
}
.fx-agent-pre {
margin: 8px 0 0;
padding: 8px 10px;
border-radius: 10px;
background: var(--ant-color-bg-container);
border: 1px solid var(--ant-color-border-secondary);
font-size: 11px;
line-height: 1.5;
white-space: pre;
overflow: auto;
max-height: 260px;
}
.fx-agent-pre.fx-agent-pre-compact {
max-height: 200px;
}
.fx-agent-pending-group {
margin-top: 6px;
display: flex;
flex-direction: column;
gap: 10px;
}
.fx-agent-pending-head {
display: flex;
align-items: center;
justify-content: space-between;
gap: 10px;
padding: 8px 10px;
border-radius: 12px;
border: 1px solid var(--ant-color-border-secondary);
background: rgba(0, 0, 0, 0.02);
}
.fx-agent-pending-list {
display: flex;
flex-direction: column;
gap: 10px;
}
.fx-agent-composer {
padding: 8px 0 0;
background: transparent;
border-top: 1px solid var(--ant-color-border-secondary);
}
.fx-agent-composer .ant-input {
font-size: 12px;
line-height: 1.6;
}
.fx-agent-running {
margin-top: 4px;
padding: 6px 8px;
border-radius: 10px;
background: rgba(0, 0, 0, 0.03);
border: 1px dashed var(--ant-color-border-secondary);
display: flex;
align-items: center;
gap: 10px;
}

View File

@@ -1,15 +1,13 @@
.fx-ai-top-bar {
display: flex;
justify-content: space-between;
align-items: center;
padding: 20px 28px;
border-radius: 16px;
background: linear-gradient(120deg, rgba(99, 102, 241, 0.16), rgba(167, 139, 250, 0.12));
border: 1px solid rgba(99, 102, 241, 0.15);
align-items: flex-start;
gap: 16px;
padding: 0 4px;
}
.fx-ai-provider-card {
border-radius: 16px;
border-radius: 12px;
overflow: hidden;
box-shadow: var(--ant-box-shadow-secondary);
}
@@ -115,7 +113,7 @@
}
.fx-ai-empty-card {
border-radius: 16px;
border-radius: 12px;
background: var(--ant-color-fill-tertiary);
}
@@ -128,16 +126,16 @@
}
.fx-ai-defaults-card {
border-radius: 16px;
border-radius: 12px;
box-shadow: var(--ant-box-shadow-secondary);
}
.fx-ai-default-row {
display: flex;
display: grid;
grid-template-columns: 1fr minmax(240px, 360px);
align-items: center;
justify-content: space-between;
gap: 16px;
padding: 12px 0;
padding: 14px 0;
border-bottom: 1px solid var(--ant-color-border-secondary);
}
@@ -151,15 +149,26 @@
align-items: center;
}
.fx-ai-default-control {
display: flex;
justify-content: flex-end;
}
.fx-ai-default-select {
width: 100%;
}
.fx-ai-default-icon {
width: 46px;
height: 46px;
border-radius: 16px;
width: 40px;
height: 40px;
border-radius: 12px;
display: flex;
align-items: center;
justify-content: center;
font-size: 22px;
color: var(--ant-color-text-light-solid);
font-size: 20px;
background: var(--ant-color-fill-quaternary);
border: 1px solid var(--ant-color-border-secondary);
color: var(--ant-color-text-secondary);
}
.fx-ai-default-desc {
@@ -218,7 +227,7 @@
align-items: center;
justify-content: space-between;
padding: 16px;
border-radius: 16px;
border-radius: 12px;
background: var(--ant-color-fill-quaternary);
border: 1px solid transparent;
cursor: pointer;
@@ -251,7 +260,7 @@
.fx-ai-template-icon.summary {
width: 56px;
height: 56px;
border-radius: 18px;
border-radius: 14px;
font-size: 26px;
}
@@ -284,7 +293,7 @@
align-items: center;
gap: 16px;
padding: 16px;
border-radius: 16px;
border-radius: 12px;
background: var(--ant-color-fill-quaternary);
}
@@ -336,26 +345,78 @@
color: var(--ant-color-text-tertiary);
}
.fx-ai-chat {
background: linear-gradient(135deg, #805ad5, #6b46c1);
.fx-ai-default-icon.fx-ai-chat {
background: rgba(128, 90, 213, 0.12);
border-color: rgba(128, 90, 213, 0.25);
color: rgb(128, 90, 213);
}
.fx-ai-vision {
background: linear-gradient(135deg, #4c6ef5, #4263eb);
.fx-ai-default-icon.fx-ai-vision {
background: rgba(76, 110, 245, 0.12);
border-color: rgba(76, 110, 245, 0.25);
color: rgb(76, 110, 245);
}
.fx-ai-embedding {
background: linear-gradient(135deg, #f7b733, #fc4a1a);
.fx-ai-default-icon.fx-ai-embedding {
background: rgba(247, 183, 51, 0.14);
border-color: rgba(247, 183, 51, 0.28);
color: rgb(215, 145, 0);
}
.fx-ai-rerank {
background: linear-gradient(135deg, #0ea5e9, #0284c7);
.fx-ai-default-icon.fx-ai-rerank {
background: rgba(14, 165, 233, 0.12);
border-color: rgba(14, 165, 233, 0.25);
color: rgb(14, 165, 233);
}
.fx-ai-voice {
background: linear-gradient(135deg, #f97316, #ea580c);
.fx-ai-default-icon.fx-ai-voice {
background: rgba(249, 115, 22, 0.12);
border-color: rgba(249, 115, 22, 0.25);
color: rgb(249, 115, 22);
}
.fx-ai-tools {
background: linear-gradient(135deg, #ec4899, #db2777);
.fx-ai-default-icon.fx-ai-tools {
background: rgba(236, 72, 153, 0.12);
border-color: rgba(236, 72, 153, 0.25);
color: rgb(236, 72, 153);
}
html[data-theme='dark'] .fx-ai-default-icon.fx-ai-chat {
background: rgba(128, 90, 213, 0.18);
border-color: rgba(128, 90, 213, 0.35);
}
html[data-theme='dark'] .fx-ai-default-icon.fx-ai-vision {
background: rgba(76, 110, 245, 0.18);
border-color: rgba(76, 110, 245, 0.35);
}
html[data-theme='dark'] .fx-ai-default-icon.fx-ai-embedding {
background: rgba(247, 183, 51, 0.2);
border-color: rgba(247, 183, 51, 0.38);
}
html[data-theme='dark'] .fx-ai-default-icon.fx-ai-rerank {
background: rgba(14, 165, 233, 0.18);
border-color: rgba(14, 165, 233, 0.35);
}
html[data-theme='dark'] .fx-ai-default-icon.fx-ai-voice {
background: rgba(249, 115, 22, 0.18);
border-color: rgba(249, 115, 22, 0.35);
}
html[data-theme='dark'] .fx-ai-default-icon.fx-ai-tools {
background: rgba(236, 72, 153, 0.18);
border-color: rgba(236, 72, 153, 0.35);
}
@media (max-width: 768px) {
.fx-ai-default-row {
grid-template-columns: 1fr;
}
.fx-ai-default-control {
justify-content: flex-start;
}
}