diff --git a/CHANGELOG.md b/CHANGELOG.md
index 7e640ba..54c5ca0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,6 +5,23 @@
格式遵循 [Keep a Changelog](https://keepachangelog.com/zh-CN/1.1.0/),
版本号遵循 [语义化版本](https://semver.org/lang/zh-CN/)。
+## [0.11.6] - 2026-04-07
+
+### 新功能 (Features)
+
+- **Skills 多 Agent 支持** — Skills 页面新增 Agent 选择器,不同 Agent 可独立管理各自的 Skills 目录;后端 Rust/Node.js 双端均支持 agent_id 参数路由
+- **助手工具模式流式输出** — 晴辰助手工具调用模式从非流式改为流式,AI 文字逐 token 打字机显示,tool_calls 分块累积后再执行
+
+### 改进 (Improvements)
+
+- **OpenClaw 4.5 兼容** — 实时聊天页面支持全部 Agent 事件流(lifecycle / item / plan / approval / thinking / command_output),新增 3 分钟终极超时和实时计时器,解决无回复时 UI 永远卡住的问题
+- **热更新替换为稳定版下载** — 关于页和全局更新横幅不再展示热更新/重载,改为引导用户前往官网或 GitHub 下载最新稳定版
+
+### 修复 (Fixes)
+
+- **Gateway 状态抖动** — 仪表盘刷新增加 5 秒节流和并发保护;TCP 端口检测增加重试(1s+2s);Gateway 停止判定从 2 次提高到 3 次连续检测;自动重启前增加 3 秒延迟确认
+- **助手空灰色气泡** — 修复流式响应 0 内容块时静默成功导致空消息持久化的 bug;新增流内错误事件捕获、渲染时过滤空消息、finally 块清理机制
+
## [0.11.5] - 2026-04-07
### 新功能 (Features)
diff --git a/docs/index.html b/docs/index.html
index 92a84d9..08f0068 100644
--- a/docs/index.html
+++ b/docs/index.html
@@ -34,7 +34,7 @@
"description": "OpenClaw AI Agent 可视化管理面板,基于 Tauri v2 的跨平台桌面应用。内置晴辰助手支持工具调用,晴辰云 AI 接口一键接入。支持仪表盘监控、多模型配置、消息渠道管理、内置 QQ 机器人、实时 AI 聊天、记忆管理、Agent 管理、网关配置、内网穿透等功能。支持 11 种语言。",
"url": "https://claw.qt.cool/",
"downloadUrl": "https://github.com/qingchencloud/clawpanel/releases/latest",
- "softwareVersion": "0.11.5",
+ "softwareVersion": "0.11.6",
"author": {
"@type": "Organization",
"name": "晴辰云 QingchenCloud",
@@ -1155,7 +1155,7 @@
@@ -1165,11 +1165,11 @@
macOS
支持 Apple Silicon 和 Intel 芯片
-
+
Apple Silicon (M1/M2/M3/M4)
.dmg
-
+
Intel 芯片
.dmg
@@ -1187,15 +1187,15 @@
Windows
支持 Windows 10 及以上版本
-
+
安装程序
.exe
-
+
完整包(含 WebView2)
.exe
-
+
MSI 安装包
.msi
@@ -1206,11 +1206,11 @@
Linux
支持主流 Linux 发行版
-
+
通用版
.AppImage
-
+
Debian / Ubuntu
.deb
diff --git a/package-lock.json b/package-lock.json
index 1fd0de9..c1d479b 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "clawpanel",
- "version": "0.11.5",
+ "version": "0.11.6",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "clawpanel",
- "version": "0.11.5",
+ "version": "0.11.6",
"license": "AGPL-3.0",
"dependencies": {
"@tauri-apps/api": "^2.5.0",
diff --git a/package.json b/package.json
index 3419508..afd0b6b 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "clawpanel",
- "version": "0.11.5",
+ "version": "0.11.6",
"private": true,
"description": "ClawPanel - OpenClaw 可视化管理面板,基于 Tauri v2 的跨平台桌面应用",
"type": "module",
diff --git a/scripts/dev-api.js b/scripts/dev-api.js
index 78d5e55..c66ea77 100644
--- a/scripts/dev-api.js
+++ b/scripts/dev-api.js
@@ -899,7 +899,19 @@ function parseSkillFrontmatterFile(skillMdPath) {
}
}
-function collectLocalSkillRoots() {
+function resolveAgentSkillsDir(agentId) {
+ const id = (agentId || '').trim()
+ if (!id || id === 'main') return null
+ try {
+ const config = JSON.parse(fs.readFileSync(CONFIG_PATH, 'utf8'))
+ const ws = resolveAgentWorkspace(config, id)
+ return path.join(ws, 'skills')
+ } catch {
+ return path.join(OPENCLAW_DIR, 'agents', id, 'workspace', 'skills')
+ }
+}
+
+function collectLocalSkillRoots(agentSkillsDir) {
const roots = []
const seen = new Set()
const pushRoot = (dir, source, bundled = false) => {
@@ -911,7 +923,11 @@ function collectLocalSkillRoots() {
roots.push({ dir: normalized, source, bundled })
}
- pushRoot(path.join(OPENCLAW_DIR, 'skills'), 'OpenClaw 自定义', false)
+ if (agentSkillsDir) {
+ pushRoot(agentSkillsDir, 'Agent 自定义', false)
+ } else {
+ pushRoot(path.join(OPENCLAW_DIR, 'skills'), 'OpenClaw 自定义', false)
+ }
pushRoot(path.join(homedir(), '.claude', 'skills'), 'Claude 自定义', false)
const cliPath = resolveOpenclawCliPath()
@@ -979,8 +995,8 @@ function scanSingleSkill(root, name) {
return result
}
-function scanLocalSkillsFallback(cliError = null) {
- const roots = collectLocalSkillRoots()
+function scanLocalSkillsFallback(agentSkillsDir = null) {
+ const roots = collectLocalSkillRoots(agentSkillsDir)
const skills = []
const seen = new Set()
const scannedRoots = []
@@ -5350,12 +5366,14 @@ const handlers = {
},
// Skills 管理(纯本地扫描,不依赖 CLI)
- skills_list() {
- return scanLocalSkillsFallback()
+ skills_list({ agent_id } = {}) {
+ const agentDir = resolveAgentSkillsDir(agent_id)
+ return scanLocalSkillsFallback(agentDir)
},
- skills_info({ name }) {
+ skills_info({ name, agent_id } = {}) {
const n = String(name || '').trim()
- const fallback = scanLocalSkillsFallback().skills.find(skill => skill.name === n)
+ const agentDir = resolveAgentSkillsDir(agent_id)
+ const fallback = scanLocalSkillsFallback(agentDir).skills.find(skill => skill.name === n)
if (fallback) return fallback
throw new Error(`Skill「${n}」不存在`)
},
@@ -5384,9 +5402,11 @@ const handlers = {
throw new Error(`安装失败: ${e.message || e}`)
}
},
- skills_uninstall({ name }) {
+ skills_uninstall({ name, agent_id } = {}) {
if (!name || name.includes('..') || name.includes('/') || name.includes('\\')) throw new Error('无效的 Skill 名称')
- const skillDir = path.join(OPENCLAW_DIR, 'skills', name)
+ const agentDir = resolveAgentSkillsDir(agent_id)
+ const baseDir = agentDir || path.join(OPENCLAW_DIR, 'skills')
+ const skillDir = path.join(baseDir, name)
if (!fs.existsSync(skillDir)) throw new Error(`Skill「${name}」不存在`)
fs.rmSync(skillDir, { recursive: true, force: true })
return { success: true, name }
@@ -5398,8 +5418,9 @@ const handlers = {
async skillhub_index() {
return await skillhubSdk.fetchIndex()
},
- async skillhub_install({ slug }) {
- const skillsDir = path.join(OPENCLAW_DIR, 'skills')
+ async skillhub_install({ slug, agent_id } = {}) {
+ const agentDir = resolveAgentSkillsDir(agent_id)
+ const skillsDir = agentDir || path.join(OPENCLAW_DIR, 'skills')
if (!fs.existsSync(skillsDir)) fs.mkdirSync(skillsDir, { recursive: true })
const installedPath = await skillhubSdk.install(slug, skillsDir)
return { success: true, slug, path: installedPath }
diff --git a/src-tauri/Cargo.lock b/src-tauri/Cargo.lock
index 05680b6..79c6924 100644
--- a/src-tauri/Cargo.lock
+++ b/src-tauri/Cargo.lock
@@ -351,7 +351,7 @@ dependencies = [
[[package]]
name = "clawpanel"
-version = "0.11.5"
+version = "0.11.6"
dependencies = [
"base64 0.22.1",
"chrono",
diff --git a/src-tauri/Cargo.toml b/src-tauri/Cargo.toml
index 5db7d46..c30e220 100644
--- a/src-tauri/Cargo.toml
+++ b/src-tauri/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "clawpanel"
-version = "0.11.5"
+version = "0.11.6"
edition = "2021"
description = "ClawPanel - OpenClaw 可视化管理面板"
authors = ["qingchencloud"]
diff --git a/src-tauri/src/commands/agent.rs b/src-tauri/src/commands/agent.rs
index 9aaca33..d6e6a93 100644
--- a/src-tauri/src/commands/agent.rs
+++ b/src-tauri/src/commands/agent.rs
@@ -1006,6 +1006,11 @@ fn resolve_agent_workspace(id: &str, config: &Value) -> String {
})
}
+/// Public wrapper for expand_user_path, used by skills.rs
+pub fn expand_user_path_pub(raw: &str) -> std::path::PathBuf {
+ expand_user_path(raw)
+}
+
fn expand_user_path(raw: &str) -> std::path::PathBuf {
let trimmed = raw.trim();
let path = if let Some(rest) = trimmed
diff --git a/src-tauri/src/commands/service.rs b/src-tauri/src/commands/service.rs
index ec813db..322e867 100644
--- a/src-tauri/src/commands/service.rs
+++ b/src-tauri/src/commands/service.rs
@@ -568,14 +568,17 @@ mod platform {
Ok(a) => a,
Err(_) => return (false, None),
};
- match std::net::TcpStream::connect_timeout(&socket_addr, std::time::Duration::from_secs(1))
- {
- Ok(_) => {
- // 尝试通过 lsof 获取 PID
- let pid = get_pid_by_lsof(port);
- (true, pid)
- }
- Err(_) => (false, None),
+ // 两次尝试:第一次 1 秒,失败后短暂等待再用 2 秒重试,避免瞬态超时误判
+ let connected = std::net::TcpStream::connect_timeout(&socket_addr, std::time::Duration::from_secs(1)).is_ok()
+ || {
+ std::thread::sleep(std::time::Duration::from_millis(300));
+ std::net::TcpStream::connect_timeout(&socket_addr, std::time::Duration::from_secs(2)).is_ok()
+ };
+ if connected {
+ let pid = get_pid_by_lsof(port);
+ (true, pid)
+ } else {
+ (false, None)
}
}
@@ -1120,7 +1123,13 @@ mod platform {
Ok(a) => a,
Err(_) => return (false, None),
};
- if std::net::TcpStream::connect_timeout(&socket_addr, Duration::from_secs(1)).is_err() {
+ // 两次尝试:第一次 1 秒,失败后短暂等待再用 2 秒重试,避免瞬态超时误判
+ let connected = std::net::TcpStream::connect_timeout(&socket_addr, Duration::from_secs(1)).is_ok()
+ || {
+ std::thread::sleep(Duration::from_millis(300));
+ std::net::TcpStream::connect_timeout(&socket_addr, Duration::from_secs(2)).is_ok()
+ };
+ if !connected {
// 端口不通,先清空已知的僵死 PID
let mut known = LAST_KNOWN_GATEWAY_PID.lock().unwrap();
*known = None;
diff --git a/src-tauri/src/commands/skills.rs b/src-tauri/src/commands/skills.rs
index ac5308c..21c4ffb 100644
--- a/src-tauri/src/commands/skills.rs
+++ b/src-tauri/src/commands/skills.rs
@@ -5,15 +5,18 @@ use serde_json::Value;
use std::os::windows::process::CommandExt;
/// 列出所有 Skills 及其状态(纯本地扫描,不依赖 CLI)
+/// agent_id: 可选,指定 Agent ID,不同 Agent 有不同的 workspace/skills 目录
#[tauri::command]
-pub async fn skills_list() -> Result
{
- scan_local_skills(None)
+pub async fn skills_list(agent_id: Option) -> Result {
+ let agent_ws = resolve_agent_skills_dir(agent_id.as_deref());
+ scan_local_skills(None, agent_ws.as_deref())
}
/// 查看单个 Skill 详情(纯本地文件解析,不依赖 CLI)
#[tauri::command]
-pub async fn skills_info(name: String) -> Result {
- scan_custom_skill_detail(&name).ok_or_else(|| format!("Skill「{name}」不存在"))
+pub async fn skills_info(name: String, agent_id: Option) -> Result {
+ let agent_ws = resolve_agent_skills_dir(agent_id.as_deref());
+ scan_custom_skill_detail(&name, agent_ws.as_deref()).ok_or_else(|| format!("Skill「{name}」不存在"))
}
/// 检查 Skills 依赖状态(纯本地扫描)
@@ -124,8 +127,11 @@ pub async fn skillhub_index() -> Result {
/// 从 SkillHub 安装 Skill(内置 HTTP 下载 + zip 解压)
#[tauri::command]
-pub async fn skillhub_install(slug: String) -> Result {
- let skills_dir = super::openclaw_dir().join("skills");
+pub async fn skillhub_install(slug: String, agent_id: Option) -> Result {
+ let skills_dir = match resolve_agent_skills_dir(agent_id.as_deref()) {
+ Some(dir) => dir,
+ None => super::openclaw_dir().join("skills"),
+ };
if !skills_dir.exists() {
std::fs::create_dir_all(&skills_dir).map_err(|e| format!("创建 skills 目录失败: {e}"))?;
}
@@ -137,14 +143,15 @@ pub async fn skillhub_install(slug: String) -> Result {
}))
}
-/// 卸载 Skill(删除 ~/.openclaw/skills// 目录)
+/// 卸载 Skill(删除 skills// 目录)
#[tauri::command]
-pub async fn skills_uninstall(name: String) -> Result {
+pub async fn skills_uninstall(name: String, agent_id: Option) -> Result {
if name.is_empty() || name.contains("..") || name.contains('/') || name.contains('\\') {
return Err("无效的 Skill 名称".to_string());
}
+ let agent_ws = resolve_agent_skills_dir(agent_id.as_deref());
let skills_dir =
- resolve_custom_skill_dir(&name).ok_or_else(|| format!("Skill「{name}」不存在"))?;
+ resolve_custom_skill_dir_with_agent(&name, agent_ws.as_deref()).ok_or_else(|| format!("Skill「{name}」不存在"))?;
if !skills_dir.exists() {
return Err(format!("Skill「{name}」不存在"));
}
@@ -160,7 +167,7 @@ pub async fn skills_validate(name: String) -> Result {
}
let skill_dir =
- resolve_custom_skill_dir(&name).ok_or_else(|| format!("Skill「{name}」不存在"))?;
+ resolve_custom_skill_dir_with_agent(&name, None).ok_or_else(|| format!("Skill「{name}」不存在"))?;
if !skill_dir.exists() {
return Err(format!("Skill「{name}」不存在"));
}
@@ -447,8 +454,48 @@ fn clean_cli_output(text: &str) -> String {
.join("\n")
}
-fn custom_skill_roots() -> Vec<(std::path::PathBuf, &'static str)> {
- let mut roots = vec![(super::openclaw_dir().join("skills"), "OpenClaw 自定义")];
+/// 根据 agentId 解析该 Agent 的 workspace/skills 目录
+/// 如果 agentId 为 None 或 "main",返回 None(使用默认的 ~/.openclaw/skills)
+fn resolve_agent_skills_dir(agent_id: Option<&str>) -> Option {
+ let id = agent_id.map(|s| s.trim()).filter(|s| !s.is_empty() && *s != "main")?;
+ // 读取 openclaw.json 获取 agent workspace
+ let config = super::config::load_openclaw_json().ok()?;
+ let workspace = config
+ .get("agents")
+ .and_then(|a| a.get("list"))
+ .and_then(|l| l.as_array())
+ .and_then(|list| {
+ list.iter()
+ .find(|a| a.get("id").and_then(|v| v.as_str()) == Some(id))
+ .and_then(|a| a.get("workspace"))
+ .and_then(|v| v.as_str())
+ .map(|s| s.trim().to_string())
+ .filter(|s| !s.is_empty())
+ })
+ .unwrap_or_else(|| {
+ // 默认:~/.openclaw/agents/{id}/workspace
+ super::openclaw_dir()
+ .join("agents")
+ .join(id)
+ .join("workspace")
+ .to_string_lossy()
+ .to_string()
+ });
+ let expanded = super::agent::expand_user_path_pub(&workspace);
+ Some(expanded.join("skills"))
+}
+
+fn custom_skill_roots_for_agent(agent_skills_dir: Option<&std::path::Path>) -> Vec<(std::path::PathBuf, &'static str)> {
+ let mut roots = Vec::new();
+
+ // 如果指定了 agent 的 skills 目录,优先放在第一位
+ if let Some(agent_dir) = agent_skills_dir {
+ roots.push((agent_dir.to_path_buf(), "Agent 自定义"));
+ } else {
+ // 默认 agent 使用全局 skills 目录
+ roots.push((super::openclaw_dir().join("skills"), "OpenClaw 自定义"));
+ }
+
if let Some(home) = dirs::home_dir() {
let claude_skills = home.join(".claude").join("skills");
if !roots.iter().any(|(dir, _)| dir == &claude_skills) {
@@ -456,13 +503,9 @@ fn custom_skill_roots() -> Vec<(std::path::PathBuf, &'static str)> {
}
}
// 从已解析的 CLI 路径推导 npm 包内的 bundled skills 目录
- // 例如 CLI 在 /usr/lib/node_modules/openclaw/bin/openclaw
- // → 包根 /usr/lib/node_modules/openclaw/
- // → skills 目录 /usr/lib/node_modules/openclaw/skills/
if let Some(cli_path) = crate::utils::resolve_openclaw_cli_path() {
let cli = std::path::PathBuf::from(&cli_path);
let cli = std::fs::canonicalize(&cli).unwrap_or(cli);
- // CLI 可能在 bin/ 子目录或包根目录
for pkg_root in [cli.parent(), cli.parent().and_then(|p| p.parent())]
.into_iter()
.flatten()
@@ -489,15 +532,15 @@ fn custom_skill_roots() -> Vec<(std::path::PathBuf, &'static str)> {
roots
}
-fn resolve_custom_skill_dir(name: &str) -> Option {
- custom_skill_roots()
+fn resolve_custom_skill_dir_with_agent(name: &str, agent_skills_dir: Option<&std::path::Path>) -> Option {
+ custom_skill_roots_for_agent(agent_skills_dir)
.into_iter()
.map(|(root, _)| root.join(name))
.find(|path| path.exists())
}
-fn scan_custom_skill_detail(name: &str) -> Option {
- for (root, source_label) in custom_skill_roots() {
+fn scan_custom_skill_detail(name: &str, agent_skills_dir: Option<&std::path::Path>) -> Option {
+ for (root, source_label) in custom_skill_roots_for_agent(agent_skills_dir) {
let skill_path = root.join(name);
if !skill_path.exists() {
continue;
@@ -550,10 +593,10 @@ fn scan_custom_skill_detail(name: &str) -> Option {
None
}
-fn scan_local_skill_entries() -> Result, String> {
+fn scan_local_skill_entries_for_agent(agent_skills_dir: Option<&std::path::Path>) -> Result, String> {
let mut skills = Vec::new();
- for (skills_dir, source_label) in custom_skill_roots() {
+ for (skills_dir, source_label) in custom_skill_roots_for_agent(agent_skills_dir) {
if !skills_dir.exists() {
continue;
}
@@ -615,14 +658,18 @@ fn scan_local_skill_entries() -> Result, String> {
Ok(skills)
}
-/// CLI 不可用或当前结果不可用时的兜底:扫描本地自定义 Skills 目录(含 ~/.openclaw/skills 与 ~/.claude/skills)
-fn scan_local_skills(cli_diagnostic: Option) -> Result {
- let roots = custom_skill_roots();
+fn scan_local_skill_entries() -> Result, String> {
+ scan_local_skill_entries_for_agent(None)
+}
+
+/// CLI 不可用或当前结果不可用时的兜底:扫描本地自定义 Skills 目录
+fn scan_local_skills(cli_diagnostic: Option, agent_skills_dir: Option<&std::path::Path>) -> Result {
+ let roots = custom_skill_roots_for_agent(agent_skills_dir);
let scanned_roots: Vec = roots
.iter()
.map(|(dir, label)| format!("{}: {}", label, dir.to_string_lossy()))
.collect();
- let skills = scan_local_skill_entries()?;
+ let skills = scan_local_skill_entries_for_agent(agent_skills_dir)?;
let cli_available = cli_diagnostic
.as_ref()
.and_then(|v| v.get("cliAvailable"))
diff --git a/src-tauri/tauri.conf.json b/src-tauri/tauri.conf.json
index d22ca2c..0e3e441 100644
--- a/src-tauri/tauri.conf.json
+++ b/src-tauri/tauri.conf.json
@@ -1,7 +1,7 @@
{
"$schema": "https://raw.githubusercontent.com/tauri-apps/tauri/dev/crates/tauri-config-schema/schema.json",
"productName": "ClawPanel",
- "version": "0.11.5",
+ "version": "0.11.6",
"identifier": "ai.openclaw.clawpanel",
"build": {
"frontendDist": "../dist",
diff --git a/src/lib/app-state.js b/src/lib/app-state.js
index 1782ad6..afda78d 100644
--- a/src/lib/app-state.js
+++ b/src/lib/app-state.js
@@ -183,8 +183,11 @@ async function _tryAutoRestart() {
return
}
- // 重启前再次确认端口确实空闲,防止端口被其他程序占用时无限拉起
+ // 延迟 3 秒后再次确认端口确实空闲,防止瞬态 TCP 超时误判触发不必要的重启
+ await new Promise(r => setTimeout(r, 3000))
try {
+ const { invalidate } = await import('./tauri-api.js')
+ invalidate('get_services_status')
const services = await api.getServicesStatus()
const gw = services?.find?.(s => s.label === 'ai.openclaw.gateway') || services?.[0]
if (gw?.running) {
@@ -213,7 +216,7 @@ async function _tryAutoRestart() {
}
/** 刷新 Gateway 运行状态(轻量,仅查服务状态)
- * 防抖:running→stopped 需要连续 2 次检测才切换,避免瞬态误判 */
+ * 防抖:running→stopped 需要连续 3 次检测才切换,避免瞬态误判 */
export async function refreshGatewayStatus() {
try {
const services = await api.getServicesStatus()
@@ -239,14 +242,14 @@ export async function refreshGatewayStatus() {
} else {
_gwStopCount++
}
- if (foreignRunning || _gwStopCount >= 2 || !_gatewayRunning) {
+ if (foreignRunning || _gwStopCount >= 3 || !_gatewayRunning) {
_setGatewayRunning(false, foreignRunning)
}
}
}
} catch {
_gwStopCount++
- if (_gwStopCount >= 2) _setGatewayRunning(false)
+ if (_gwStopCount >= 3) _setGatewayRunning(false)
}
return _gatewayRunning
}
diff --git a/src/lib/model-presets.js b/src/lib/model-presets.js
index 95d6de6..1cb216b 100644
--- a/src/lib/model-presets.js
+++ b/src/lib/model-presets.js
@@ -14,7 +14,7 @@ export const API_TYPES = [
// 服务商快捷预设
export const PROVIDER_PRESETS = [
- { key: 'qtcool', label: '晴辰云', badge: '推荐', baseUrl: 'https://gpt.qt.cool/v1', api: 'openai-completions', site: 'https://gpt.qt.cool/', desc: '面板用户免费使用部分模型,付费用户享全系列顶级模型支持,全部模型低至 2-3 折' },
+ { key: 'qtcool', label: '晴辰云', badge: '推荐', baseUrl: 'https://gpt.qt.cool/v1', api: 'openai-completions', site: 'https://gpt.qt.cool/', desc: '每日签到领免费模型测试额度,邀请好友再送额度,付费低至官方价 2-3 折' },
{ key: 'shengsuanyun', label: '胜算云', baseUrl: 'https://router.shengsuanyun.com/api/v1', api: 'openai-completions', site: 'https://www.shengsuanyun.com/?from=CH_4BVI0BM2', desc: '国内知名 AI 模型聚合平台,支持多种主流模型' },
{ key: 'siliconflow', label: '硅基流动', baseUrl: 'https://api.siliconflow.cn/v1', api: 'openai-completions', site: 'https://cloud.siliconflow.cn/i/PFrw2an5', desc: '高性价比推理平台,支持 DeepSeek、Qwen 等开源模型' },
{ key: 'volcengine', label: '火山引擎', baseUrl: 'https://ark.cn-beijing.volces.com/api/v3', api: 'openai-completions', site: 'https://volcengine.com/L/Ph1OP5I3_GY', desc: '字节跳动旗下云平台,支持豆包等模型' },
diff --git a/src/lib/openclaw-kb.js b/src/lib/openclaw-kb.js
index 439e0b8..f4f3a14 100644
--- a/src/lib/openclaw-kb.js
+++ b/src/lib/openclaw-kb.js
@@ -223,11 +223,11 @@ npm install -g openclaw@latest
晴辰云为 ClawPanel 用户提供 AI 模型调用服务,通过每日签到和邀请好友即可获取免费额度。
### 核心信息
-- **GPT-AI 网关地址**:https://gpt.qt.cool/v1
+- **API 网关地址**:https://gpt.qt.cool/v1
- **支持接口**:/v1/chat/completions、/v1/responses(OpenAI 兼容)
-- **支持模型**:OpenAI 全系列(GPT-5、GPT-5.1、GPT-5.2 及其 Codex 变体)
-- **免费额度**:每日签到领取免费额度,邀请好友可获得额外奖励
-- **高级模型**:低至官方价 2-3 折
+- **支持模型**:多种顶级 AI 模型(具体可用模型以平台实时列表为准)
+- **免费额度**:每日签到领取免费模型测试额度,邀请好友可获得额外奖励
+- **付费优惠**:低至官方价 2-3 折,不满意随时可退
- **用户后台**:https://gpt.qt.cool/user(查看用量、管理密钥)
### 官方入口
diff --git a/src/lib/tauri-api.js b/src/lib/tauri-api.js
index cdd7848..cc17d3e 100644
--- a/src/lib/tauri-api.js
+++ b/src/lib/tauri-api.js
@@ -322,15 +322,15 @@ export const api = {
assistantFetchUrl: (url) => invoke('assistant_fetch_url', { url }),
// Skills 管理
- skillsList: () => invoke('skills_list'),
- skillsInfo: (name) => invoke('skills_info', { name }),
+ skillsList: (agentId) => invoke('skills_list', { agent_id: agentId || null }),
+ skillsInfo: (name, agentId) => invoke('skills_info', { name, agent_id: agentId || null }),
skillsCheck: () => invoke('skills_check'),
skillsInstallDep: (kind, spec) => invoke('skills_install_dep', { kind, spec }),
- skillsUninstall: (name) => invoke('skills_uninstall', { name }),
+ skillsUninstall: (name, agentId) => invoke('skills_uninstall', { name, agent_id: agentId || null }),
// SkillHub SDK(内置 HTTP,不依赖 CLI)
skillhubSearch: (query, limit) => invoke('skillhub_search', { query, limit }),
skillhubIndex: () => invoke('skillhub_index'),
- skillhubInstall: (slug) => invoke('skillhub_install', { slug }),
+ skillhubInstall: (slug, agentId) => invoke('skillhub_install', { slug, agent_id: agentId || null }),
// 实例管理
instanceList: () => cachedInvoke('instance_list', {}, 10000),
diff --git a/src/locales/de.json b/src/locales/de.json
index 45282a3..bd4b55f 100644
--- a/src/locales/de.json
+++ b/src/locales/de.json
@@ -317,7 +317,7 @@
"providerHint": "Providers are sources of models (e.g. OpenAI, DeepSeek). Each provider can have multiple models. The \"primary\" model is used first; others serve as fallbacks. Changes are auto-saved.",
"qtcoolName": "QingChen Cloud",
"qtcoolRecommend": "Recommended",
- "qtcoolDesc": "GPT-5 / Codex full series, as low as 20-30% of official price. Refund anytime.",
+ "qtcoolDesc": "Daily check-in for free model credits, invite friends for bonus. Paid plans as low as 20-30% of official price. Refund anytime.",
"qtcoolMore": "Learn more →",
"qtcoolCheckin": "Daily check-in for credits",
"qtcoolKeyPlaceholder": "Paste API Key (get from dashboard after check-in)",
@@ -1972,7 +1972,7 @@
"visitSite": "Visit Website",
"notConfigured": "Not configured",
"qtcoolName": "QingChen Cloud",
- "qtcoolDesc": "GPT-5 / Codex full series, as low as 20-30% of official price",
+ "qtcoolDesc": "Daily check-in for free credits, premium models as low as 20-30% of official price",
"qtcoolRecommend": "Recommended",
"qtcoolCheckin": "Daily check-in for credits",
"qtcoolLearnMore": "Learn more →",
diff --git a/src/locales/en.json b/src/locales/en.json
index 1234295..f7a2dd2 100644
--- a/src/locales/en.json
+++ b/src/locales/en.json
@@ -317,7 +317,7 @@
"providerHint": "Providers are sources of models (e.g. OpenAI, DeepSeek). Each provider can have multiple models. The \"primary\" model is used first; others serve as fallbacks. Changes are auto-saved.",
"qtcoolName": "QingChen Cloud",
"qtcoolRecommend": "Recommended",
- "qtcoolDesc": "GPT-5 / Codex full series, as low as 20-30% of official price. Refund anytime.",
+ "qtcoolDesc": "Daily check-in for free model credits, invite friends for bonus. Paid plans as low as 20-30% of official price. Refund anytime.",
"qtcoolMore": "Learn more →",
"qtcoolCheckin": "Daily check-in for credits",
"qtcoolKeyPlaceholder": "Paste API Key (get from dashboard after check-in)",
@@ -2068,7 +2068,7 @@
"visitSite": "Visit Website",
"notConfigured": "Not configured",
"qtcoolName": "QingChen Cloud",
- "qtcoolDesc": "GPT-5 / Codex full series, as low as 20-30% of official price",
+ "qtcoolDesc": "Daily check-in for free credits, premium models as low as 20-30% of official price",
"qtcoolRecommend": "Recommended",
"qtcoolCheckin": "Daily check-in for credits",
"qtcoolLearnMore": "Learn more →",
diff --git a/src/locales/es.json b/src/locales/es.json
index d825edb..b9b74c5 100644
--- a/src/locales/es.json
+++ b/src/locales/es.json
@@ -317,7 +317,7 @@
"providerHint": "Providers are sources of models (e.g. OpenAI, DeepSeek). Each provider can have multiple models. The \"primary\" model is used first; others serve as fallbacks. Changes are auto-saved.",
"qtcoolName": "QingChen Cloud",
"qtcoolRecommend": "Recommended",
- "qtcoolDesc": "GPT-5 / Codex full series, as low as 20-30% of official price. Refund anytime.",
+ "qtcoolDesc": "Daily check-in for free model credits, invite friends for bonus. Paid plans as low as 20-30% of official price. Refund anytime.",
"qtcoolMore": "Learn more →",
"qtcoolCheckin": "Daily check-in for credits",
"qtcoolKeyPlaceholder": "Paste API Key (get from dashboard after check-in)",
@@ -1972,7 +1972,7 @@
"visitSite": "Visit Website",
"notConfigured": "Not configured",
"qtcoolName": "QingChen Cloud",
- "qtcoolDesc": "GPT-5 / Codex full series, as low as 20-30% of official price",
+ "qtcoolDesc": "Daily check-in for free credits, premium models as low as 20-30% of official price",
"qtcoolRecommend": "Recommended",
"qtcoolCheckin": "Daily check-in for credits",
"qtcoolLearnMore": "Learn more →",
diff --git a/src/locales/fr.json b/src/locales/fr.json
index 9e61313..b5c1bf3 100644
--- a/src/locales/fr.json
+++ b/src/locales/fr.json
@@ -317,7 +317,7 @@
"providerHint": "Providers are sources of models (e.g. OpenAI, DeepSeek). Each provider can have multiple models. The \"primary\" model is used first; others serve as fallbacks. Changes are auto-saved.",
"qtcoolName": "QingChen Cloud",
"qtcoolRecommend": "Recommended",
- "qtcoolDesc": "GPT-5 / Codex full series, as low as 20-30% of official price. Refund anytime.",
+ "qtcoolDesc": "Daily check-in for free model credits, invite friends for bonus. Paid plans as low as 20-30% of official price. Refund anytime.",
"qtcoolMore": "Learn more →",
"qtcoolCheckin": "Daily check-in for credits",
"qtcoolKeyPlaceholder": "Paste API Key (get from dashboard after check-in)",
@@ -1972,7 +1972,7 @@
"visitSite": "Visit Website",
"notConfigured": "Not configured",
"qtcoolName": "QingChen Cloud",
- "qtcoolDesc": "GPT-5 / Codex full series, as low as 20-30% of official price",
+ "qtcoolDesc": "Daily check-in for free credits, premium models as low as 20-30% of official price",
"qtcoolRecommend": "Recommended",
"qtcoolCheckin": "Daily check-in for credits",
"qtcoolLearnMore": "Learn more →",
diff --git a/src/locales/ja.json b/src/locales/ja.json
index 936b8c9..1fed400 100644
--- a/src/locales/ja.json
+++ b/src/locales/ja.json
@@ -317,7 +317,7 @@
"providerHint": "Providers are sources of models (e.g. OpenAI, DeepSeek). Each provider can have multiple models. The \"primary\" model is used first; others serve as fallbacks. Changes are auto-saved.",
"qtcoolName": "QingChen Cloud",
"qtcoolRecommend": "Recommended",
- "qtcoolDesc": "GPT-5 / Codex 全シリーズ、公式価格の2〜3割",
+ "qtcoolDesc": "毎日チェックインで無料クレジット獲得、友達招待でボーナス。有料は公式価格の2〜3割",
"qtcoolMore": "Learn more →",
"qtcoolCheckin": "Daily check-in for credits",
"qtcoolKeyPlaceholder": "Paste API Key (get from dashboard after check-in)",
@@ -1972,7 +1972,7 @@
"visitSite": "サイトを訪問",
"notConfigured": "未設定",
"qtcoolName": "QingChen Cloud",
- "qtcoolDesc": "GPT-5 / Codex full series, as low as 20-30% of official price",
+ "qtcoolDesc": "Daily check-in for free credits, premium models as low as 20-30% of official price",
"qtcoolRecommend": "Recommended",
"qtcoolCheckin": "Daily check-in for credits",
"qtcoolLearnMore": "Learn more →",
diff --git a/src/locales/ko.json b/src/locales/ko.json
index d5dfe23..d51d6c4 100644
--- a/src/locales/ko.json
+++ b/src/locales/ko.json
@@ -317,7 +317,7 @@
"providerHint": "Providers are sources of models (e.g. OpenAI, DeepSeek). Each provider can have multiple models. The \"primary\" model is used first; others serve as fallbacks. Changes are auto-saved.",
"qtcoolName": "QingChen Cloud",
"qtcoolRecommend": "Recommended",
- "qtcoolDesc": "GPT-5 / Codex 전 시리즈, 공식가의 2~3할",
+ "qtcoolDesc": "매일 체크인으로 무료 크레딧, 친구 초대 보너스. 유료는 공식가의 2~3할",
"qtcoolMore": "Learn more →",
"qtcoolCheckin": "Daily check-in for credits",
"qtcoolKeyPlaceholder": "Paste API Key (get from dashboard after check-in)",
@@ -1972,7 +1972,7 @@
"visitSite": "사이트 방문",
"notConfigured": "미설정",
"qtcoolName": "QingChen Cloud",
- "qtcoolDesc": "GPT-5 / Codex full series, as low as 20-30% of official price",
+ "qtcoolDesc": "Daily check-in for free credits, premium models as low as 20-30% of official price",
"qtcoolRecommend": "Recommended",
"qtcoolCheckin": "Daily check-in for credits",
"qtcoolLearnMore": "Learn more →",
diff --git a/src/locales/modules/about.js b/src/locales/modules/about.js
index 620a460..994e11b 100644
--- a/src/locales/modules/about.js
+++ b/src/locales/modules/about.js
@@ -81,6 +81,9 @@ export default {
retry: _('重试', 'Retry', '重試', '再試行', '재시도'),
needFullUpdate: _('需要更新完整安装包', 'Full install package update required', '需要更新完整安裝包'),
goToWebsite: _('前往官网下载', 'Go to Website', '前往官網下載'),
+ downloadFromWebsite: _('官网下载', 'Website Download', '官網下載'),
+ downloadFromGitHub: _('GitHub 下载', 'GitHub Download', 'GitHub 下載'),
+ newVersionAvailable: _('发现新版本 v{version},请前往下载更新', 'New version v{version} available, please download to update', '發現新版本 v{version},請前往下載更新'),
upToDate: _('已是最新', 'Up to date', '', '最新です', '최신 상태', 'Đã cập nhật', 'Actualizado', 'Atualizado', 'Актуально', 'À jour', 'Aktuell'),
checkUpdateFailed: _('暂无法检查更新', 'Unable to check for updates', '暫無法檢查更新', '更新を確認できません', '업데이트 확인 실패', 'Kiểm tra cập nhật thất bại', 'Error al verificar actualizaciones', 'Falha ao verificar atualizações', 'Ошибка проверки обновлений', 'Échec de la vérification des mises à jour', 'Update-Prüfung fehlgeschlagen'),
qqGroup: _('QQ 交流群', 'QQ Group'),
diff --git a/src/locales/modules/assistant.js b/src/locales/modules/assistant.js
index 0f26311..2a01d0b 100644
--- a/src/locales/modules/assistant.js
+++ b/src/locales/modules/assistant.js
@@ -112,6 +112,7 @@ export default {
confirmAiRequest: _('AI 请求执行以下操作', 'AI requests to perform the following operation', 'AI 請求執行以下操作', 'AI が以下の操作を要求しています'),
aiThinking: _('思考中...', 'Thinking...', '', '考え中...', '생각 중...', 'Đang suy nghĩ...', 'Pensando...', 'Pensando...', 'Думает...', 'Réflexion...', 'Denkt nach...'),
aiProcessingRound: _('第 {round} 轮处理中...', 'Processing round {round}...', '第 {round} 轮處理中...', '第 {round} ラウンド処理中...', '{round}라운드 처리 중...'),
+ aiCallingTools: _('调用工具: {tools}', 'Calling tools: {tools}', '調用工具: {tools}', 'ツール呼出: {tools}', '도구 호출: {tools}', 'Gọi công cụ: {tools}', 'Llamando herramientas: {tools}', 'Chamando ferramentas: {tools}', 'Вызов инструментов: {tools}', 'Appel d\'outils: {tools}', 'Werkzeugaufruf: {tools}'),
toolLoopQuestion: _('AI 已执行 {round} 轮工具调用,如何继续?', 'AI has executed {round} rounds of tool calls. How to proceed?', 'AI 已執行 {round} 轮工具呼叫,如何繼續?', 'AI が {round} ラウンドのツール呼び出しを実行しました。続行しますか?', 'AI가 {round}라운드의 도구 호출을 실행했습니다. 계속하시겠습니까?'),
toolLoopContinue: _('继续自动执行 {rounds} 轮', 'Continue auto-executing {rounds} rounds', '繼續自動執行 {rounds} 轮', 'さらに {rounds} ラウンド自動実行', '추가 {rounds}라운드 자동 실행'),
toolLoopNoBreak: _('不再询问,持续执行', 'Don\'t ask again, keep executing', '不再询問,持續執行', '中断せず継続実行'),
@@ -147,13 +148,14 @@ export default {
visitSite: _('访问官网', 'Visit Website', '訪問官網', 'サイトを訪問', '사이트 방문'),
notConfigured: _('未配置', 'Not configured', '未設定', '未設定', '미설정'),
qtcoolName: _('晴辰云', 'QingChen Cloud', '晴辰雲'),
- qtcoolDesc: _('GPT-5 / Codex 全系列,低至官方价 2-3 折', 'GPT-5 / Codex full series, as low as 20-30% of official price', 'GPT-5 / Codex 全系列,低至官方價 2-3 折'),
+ qtcoolDesc: _('每日签到领取免费额度,多种高级模型低至官方价 2-3 折', 'Daily check-in for free credits, premium models as low as 20-30% of official price', '每日簽到領取免費額度,多種高級模型低至官方價 2-3 折'),
qtcoolRecommend: _('推荐', 'Recommended', '推薦'),
- qtcoolCheckin: _('每日签到领额度', 'Daily check-in for credits', '每日簽到领额度'),
+ qtcoolCheckin: _('每日签到领额度', 'Daily check-in for credits', '每日簽到領額度'),
qtcoolLearnMore: _('了解更多 →', 'Learn more →'),
qtcoolKeyPlaceholder: _('粘贴你的 API Key', 'Paste your API Key', '貼上你的 API Key'),
- qtcoolInstructions: _('没有 Key?前往 签到页 免费领取', 'No key? Go to check-in page to get one for free', '沒有 Key?前往 簽到頁 免費领取'),
+ qtcoolInstructions: _('没有 Key?前往 签到页 免费领取', 'No key? Go to check-in page to get one for free', '沒有 Key?前往 簽到頁 免費領取'),
qtcoolEnterKey: _('请输入 API Key', 'Please enter API Key', '請輸入 API Key'),
+ qtcoolFillFirst: _('请先填写 API Key', 'Please fill in API Key first', '請先填寫 API Key'),
qtcoolFillFirst: _('请先填写 API Key', 'Please fill in API Key first', '請先填写 API Key'),
qtcoolConnecting: _('连接中...', 'Connecting...', '連線中...'),
qtcoolLoadingModels: _('正在获取模型列表...', 'Fetching model list...', '正在取得模型列表...'),
diff --git a/src/locales/modules/models.js b/src/locales/modules/models.js
index d2c224f..07fe05c 100644
--- a/src/locales/modules/models.js
+++ b/src/locales/modules/models.js
@@ -10,7 +10,7 @@ export default {
providerHint: _('服务商是模型的来源(如 OpenAI、DeepSeek 等)。每个服务商下可添加多个模型。标记为「主模型」的将优先使用,其余作为备选自动切换。配置修改后自动保存。', 'Providers are sources of models (e.g. OpenAI, DeepSeek). Each provider can have multiple models. The "primary" model is used first; others serve as fallbacks. Changes are auto-saved.', '服務商是模型的來源(如 OpenAI、DeepSeek 等)。每個服務商下可新增多個模型。標記為「主模型」的將優先使用,其餘作為備選自動切換。設定修改后自動儲存。'),
qtcoolName: _('晴辰云', 'QingChen Cloud', '晴辰雲'),
qtcoolRecommend: _('推荐', 'Recommended', '推薦'),
- qtcoolDesc: _('GPT-5 / Codex 全系列,低至官方价 2-3 折,不满意随时可退。', 'GPT-5 / Codex full series, as low as 20-30% of official price. Refund anytime.', 'GPT-5 / Codex 全系列,低至官方價 2-3 折,不滿意隨時可退。', 'GPT-5 / Codex 全シリーズ、公式価格の2〜3割', 'GPT-5 / Codex 전 시리즈, 공식가의 2~3할'),
+ qtcoolDesc: _('每日签到领取免费模型测试额度,邀请好友再送额度。付费低至官方价 2-3 折,不满意随时可退。', 'Daily check-in for free model credits, invite friends for bonus. Paid plans as low as 20-30% of official price. Refund anytime.', '每日簽到領取免費模型測試額度,邀請好友再送額度。付費低至官方價 2-3 折,不滿意隨時可退。', '毎日チェックインで無料クレジット獲得、友達招待でボーナス。有料は公式価格の2〜3割', '매일 체크인으로 무료 크레딧, 친구 초대 보너스. 유료는 공식가의 2~3할'),
qtcoolMore: _('了解更多 →', 'Learn more →'),
qtcoolCheckin: _('每日签到领额度', 'Daily check-in for credits', '每日簽到领额度'),
qtcoolKeyPlaceholder: _('粘贴 API Key(签到后在用户后台获取)', 'Paste API Key (get from dashboard after check-in)', '貼上 API Key(簽到后在使用者後台取得)'),
diff --git a/src/locales/modules/skills.js b/src/locales/modules/skills.js
index 8df42a9..5f93b86 100644
--- a/src/locales/modules/skills.js
+++ b/src/locales/modules/skills.js
@@ -59,4 +59,6 @@ export default {
uninstalled: _('已卸载 {name}', 'Uninstalled {name}', '已卸載 {name}'),
uninstallFailed: _('卸载失败', 'Uninstall failed', '卸載失敗', 'アンインストール失敗', '제거 실패'),
skillInstalled: _('Skill {name} 安装成功', 'Skill {name} installed', 'Skill {name} 安裝成功'),
+ agentLabel: _('Agent:', 'Agent:'),
+ allAgents: _('默认 (main)', 'Default (main)', '預設 (main)'),
}
diff --git a/src/locales/pt.json b/src/locales/pt.json
index df294ab..768641e 100644
--- a/src/locales/pt.json
+++ b/src/locales/pt.json
@@ -317,7 +317,7 @@
"providerHint": "Providers are sources of models (e.g. OpenAI, DeepSeek). Each provider can have multiple models. The \"primary\" model is used first; others serve as fallbacks. Changes are auto-saved.",
"qtcoolName": "QingChen Cloud",
"qtcoolRecommend": "Recommended",
- "qtcoolDesc": "GPT-5 / Codex full series, as low as 20-30% of official price. Refund anytime.",
+ "qtcoolDesc": "Daily check-in for free model credits, invite friends for bonus. Paid plans as low as 20-30% of official price. Refund anytime.",
"qtcoolMore": "Learn more →",
"qtcoolCheckin": "Daily check-in for credits",
"qtcoolKeyPlaceholder": "Paste API Key (get from dashboard after check-in)",
@@ -1972,7 +1972,7 @@
"visitSite": "Visit Website",
"notConfigured": "Not configured",
"qtcoolName": "QingChen Cloud",
- "qtcoolDesc": "GPT-5 / Codex full series, as low as 20-30% of official price",
+ "qtcoolDesc": "Daily check-in for free credits, premium models as low as 20-30% of official price",
"qtcoolRecommend": "Recommended",
"qtcoolCheckin": "Daily check-in for credits",
"qtcoolLearnMore": "Learn more →",
diff --git a/src/locales/ru.json b/src/locales/ru.json
index 2bcad7d..39934aa 100644
--- a/src/locales/ru.json
+++ b/src/locales/ru.json
@@ -317,7 +317,7 @@
"providerHint": "Providers are sources of models (e.g. OpenAI, DeepSeek). Each provider can have multiple models. The \"primary\" model is used first; others serve as fallbacks. Changes are auto-saved.",
"qtcoolName": "QingChen Cloud",
"qtcoolRecommend": "Recommended",
- "qtcoolDesc": "GPT-5 / Codex full series, as low as 20-30% of official price. Refund anytime.",
+ "qtcoolDesc": "Daily check-in for free model credits, invite friends for bonus. Paid plans as low as 20-30% of official price. Refund anytime.",
"qtcoolMore": "Learn more →",
"qtcoolCheckin": "Daily check-in for credits",
"qtcoolKeyPlaceholder": "Paste API Key (get from dashboard after check-in)",
@@ -1972,7 +1972,7 @@
"visitSite": "Visit Website",
"notConfigured": "Not configured",
"qtcoolName": "QingChen Cloud",
- "qtcoolDesc": "GPT-5 / Codex full series, as low as 20-30% of official price",
+ "qtcoolDesc": "Daily check-in for free credits, premium models as low as 20-30% of official price",
"qtcoolRecommend": "Recommended",
"qtcoolCheckin": "Daily check-in for credits",
"qtcoolLearnMore": "Learn more →",
diff --git a/src/locales/vi.json b/src/locales/vi.json
index 7e21eaa..1ee57bd 100644
--- a/src/locales/vi.json
+++ b/src/locales/vi.json
@@ -317,7 +317,7 @@
"providerHint": "Providers are sources of models (e.g. OpenAI, DeepSeek). Each provider can have multiple models. The \"primary\" model is used first; others serve as fallbacks. Changes are auto-saved.",
"qtcoolName": "QingChen Cloud",
"qtcoolRecommend": "Recommended",
- "qtcoolDesc": "GPT-5 / Codex full series, as low as 20-30% of official price. Refund anytime.",
+ "qtcoolDesc": "Daily check-in for free model credits, invite friends for bonus. Paid plans as low as 20-30% of official price. Refund anytime.",
"qtcoolMore": "Learn more →",
"qtcoolCheckin": "Daily check-in for credits",
"qtcoolKeyPlaceholder": "Paste API Key (get from dashboard after check-in)",
@@ -1972,7 +1972,7 @@
"visitSite": "Visit Website",
"notConfigured": "Not configured",
"qtcoolName": "QingChen Cloud",
- "qtcoolDesc": "GPT-5 / Codex full series, as low as 20-30% of official price",
+ "qtcoolDesc": "Daily check-in for free credits, premium models as low as 20-30% of official price",
"qtcoolRecommend": "Recommended",
"qtcoolCheckin": "Daily check-in for credits",
"qtcoolLearnMore": "Learn more →",
diff --git a/src/locales/zh-CN.json b/src/locales/zh-CN.json
index d97ac28..cfe7643 100644
--- a/src/locales/zh-CN.json
+++ b/src/locales/zh-CN.json
@@ -317,7 +317,7 @@
"providerHint": "服务商是模型的来源(如 OpenAI、DeepSeek 等)。每个服务商下可添加多个模型。标记为「主模型」的将优先使用,其余作为备选自动切换。配置修改后自动保存。",
"qtcoolName": "晴辰云",
"qtcoolRecommend": "推荐",
- "qtcoolDesc": "GPT-5 / Codex 全系列,低至官方价 2-3 折,不满意随时可退。",
+ "qtcoolDesc": "每日签到领取免费模型测试额度,邀请好友再送额度。付费低至官方价 2-3 折,不满意随时可退。",
"qtcoolMore": "了解更多 →",
"qtcoolCheckin": "每日签到领额度",
"qtcoolKeyPlaceholder": "粘贴 API Key(签到后在用户后台获取)",
@@ -2093,7 +2093,7 @@
"visitSite": "访问官网",
"notConfigured": "未配置",
"qtcoolName": "晴辰云",
- "qtcoolDesc": "GPT-5 / Codex 全系列,低至官方价 2-3 折",
+ "qtcoolDesc": "每日签到领免费额度,多种顶级模型低至官方价 2-3 折",
"qtcoolRecommend": "推荐",
"qtcoolCheckin": "每日签到领额度",
"qtcoolLearnMore": "了解更多 →",
diff --git a/src/locales/zh-TW.json b/src/locales/zh-TW.json
index c45ae91..b23813b 100644
--- a/src/locales/zh-TW.json
+++ b/src/locales/zh-TW.json
@@ -317,7 +317,7 @@
"providerHint": "服務商是模型的來源(如 OpenAI、DeepSeek 等)。每個服務商下可新增多個模型。標記為「主模型」的將優先使用,其餘作為備選自動切換。設定修改后自動儲存。",
"qtcoolName": "晴辰雲",
"qtcoolRecommend": "推薦",
- "qtcoolDesc": "GPT-5 / Codex 全系列,低至官方價 2-3 折,不滿意隨時可退。",
+ "qtcoolDesc": "每日簽到領取免費模型測試額度,邀請好友再送額度。付費低至官方價 2-3 折,不滿意隨時可退。",
"qtcoolMore": "了解更多 →",
"qtcoolCheckin": "每日簽到领额度",
"qtcoolKeyPlaceholder": "貼上 API Key(簽到后在使用者後台取得)",
@@ -1973,7 +1973,7 @@
"visitSite": "訪問官網",
"notConfigured": "未設定",
"qtcoolName": "晴辰雲",
- "qtcoolDesc": "GPT-5 / Codex 全系列,低至官方價 2-3 折",
+ "qtcoolDesc": "每日簽到領免費額度,多種頂級模型低至官方價 2-3 折",
"qtcoolRecommend": "推薦",
"qtcoolCheckin": "每日簽到领额度",
"qtcoolLearnMore": "了解更多 →",
diff --git a/src/main.js b/src/main.js
index 6e2a05e..a5f65bd 100644
--- a/src/main.js
+++ b/src/main.js
@@ -741,12 +741,7 @@ async function checkGlobalUpdate() {
const dismissed = localStorage.getItem('clawpanel_update_dismissed')
if (dismissed === ver) return
- // 热更新已下载并重载过,不再重复提示同一版本
- const hotApplied = localStorage.getItem('clawpanel_hot_update_applied')
- if (hotApplied === ver) return
-
const changelog = info.manifest?.changelog || ''
- const isWeb = !isTauriRuntime()
banner.classList.remove('update-banner-hidden')
banner.innerHTML = `
@@ -756,12 +751,8 @@ async function checkGlobalUpdate() {
${t('about.versionAvailable', { version: ver })}
${changelog ? `· ${changelog}` : ''}
- ${isWeb
- ? `
-
${t('about.releaseNotes')}`
- : `
-
${t('about.fullInstaller')}`
- }
+
${t('about.downloadFromWebsite')}
+
${t('about.downloadFromGitHub')}
`
@@ -771,55 +762,6 @@ async function checkGlobalUpdate() {
localStorage.setItem('clawpanel_update_dismissed', ver)
banner.classList.add('update-banner-hidden')
})
-
- // Web 模式:显示更新命令弹窗
- banner.querySelector('#btn-update-show-cmd')?.addEventListener('click', () => {
- const overlay = document.createElement('div')
- overlay.className = 'modal-overlay'
- overlay.innerHTML = `
-
-
${t('about.updateToVersion', { version: ver })}
-
-
${t('about.runOnServer')}
-
cd /opt/clawpanel
-git pull origin main
-npm install
-npm run build
-sudo systemctl restart clawpanel
-
- ${t('about.updateCommandHint')}
-
-
-
-
-
-
- `
- document.body.appendChild(overlay)
- overlay.addEventListener('click', (e) => { if (e.target === overlay) overlay.remove() })
- overlay.querySelector('[data-action="close"]').onclick = () => overlay.remove()
- overlay.addEventListener('keydown', (e) => { if (e.key === 'Escape') overlay.remove() })
- })
-
- // Tauri 热更新按钮
- banner.querySelector('#btn-update-hot')?.addEventListener('click', async () => {
- const btn = banner.querySelector('#btn-update-hot')
- if (!btn) return
- btn.disabled = true
- btn.textContent = t('about.downloading')
- try {
- await api.downloadFrontendUpdate(info.manifest?.url || '', info.manifest?.hash || '')
- localStorage.setItem('clawpanel_hot_update_applied', ver)
- btn.textContent = t('about.reloadApp')
- btn.disabled = false
- btn.onclick = () => window.location.reload()
- } catch (e) {
- btn.textContent = t('about.downloadFailedShort')
- btn.disabled = false
- const { toast } = await import('./components/toast.js')
- toast(t('about.downloadFailed') + (e.message || e), 'error')
- }
- })
} catch {
// 检查失败静默忽略
}
diff --git a/src/pages/about.js b/src/pages/about.js
index f785242..a371c6a 100644
--- a/src/pages/about.js
+++ b/src/pages/about.js
@@ -416,63 +416,30 @@ async function doInstall(page, title, source, version) {
async function checkHotUpdate(cards, panelVersion) {
const el = () => cards.querySelector('#panel-update-meta')
+ const btnSm = 'padding:2px 8px;font-size:var(--font-size-xs)'
try {
const info = await api.checkFrontendUpdate()
const meta = el()
if (!meta) return
- if (info.updateReady) {
- // 已下载更新,等待重载
- const ver = info.manifest?.version || info.latestVersion || ''
- meta.innerHTML = `
-
v${ver} ${t('about.updateReady')}
-
-
- `
- meta.querySelector('#btn-hot-reload')?.addEventListener('click', () => {
- window.location.reload()
- })
- meta.querySelector('#btn-hot-rollback')?.addEventListener('click', async () => {
- try {
- await api.rollbackFrontendUpdate()
- toast(t('about.rollbackSuccess'), 'success')
- setTimeout(() => window.location.reload(), 800)
- } catch (e) {
- toast(t('about.rollbackFailed') + (e.message || e), 'error')
- }
- })
- } else if (info.hasUpdate) {
- // 有新版本可下载
- const ver = info.latestVersion
- const manifest = info.manifest || {}
- const changelog = manifest.changelog || ''
+ if (info.hasUpdate || info.updateReady) {
+ const ver = info.latestVersion || info.manifest?.version || ''
+ const changelog = info.manifest?.changelog || ''
meta.innerHTML = `
${t('about.newVersion')}: v${ver}
${changelog ? `
${changelog}` : ''}
-
-
${t('about.fullInstaller')}
+
${t('about.downloadFromWebsite')}
+
${t('about.downloadFromGitHub')}
`
- meta.querySelector('#btn-hot-download')?.addEventListener('click', async () => {
- const btn = meta.querySelector('#btn-hot-download')
- if (btn) { btn.disabled = true; btn.textContent = t('about.downloading') }
- try {
- await api.downloadFrontendUpdate(manifest.url, manifest.hash || '')
- toast(t('about.downloadDone'), 'success')
- checkHotUpdate(cards, panelVersion)
- } catch (e) {
- toast(t('about.downloadFailed') + (e.message || e), 'error')
- if (btn) { btn.disabled = false; btn.textContent = t('about.retry') }
- }
- })
} else if (!info.compatible) {
- meta.innerHTML = `
${t('about.needFullUpdate')} ${t('about.goToWebsite')} GitHub`
+ meta.innerHTML = `
${t('about.needFullUpdate')} ${t('about.downloadFromWebsite')} ${t('about.downloadFromGitHub')}`
} else {
meta.innerHTML = `
${t('about.upToDate')}`
}
} catch (err) {
const meta = el()
if (!meta) return
- meta.innerHTML = `
${t('about.checkUpdateFailed')} ${t('about.goToWebsite')}`
+ meta.innerHTML = `
${t('about.checkUpdateFailed')} ${t('about.goToWebsite')}`
}
}
diff --git a/src/pages/assistant.js b/src/pages/assistant.js
index cf0879c..fee0fab 100644
--- a/src/pages/assistant.js
+++ b/src/pages/assistant.js
@@ -1569,9 +1569,14 @@ async function callChatCompletions(base, messages, onChunk) {
let contentChunks = 0
let reasoningChunks = 0
let reasoningBuf = ''
+ let streamError = ''
await readSSEStream(resp, (json) => {
chunkCount++
+ // 捕获流内错误事件(如模型不可用)
+ if (json.error) {
+ streamError = streamError || json.error?.message || json.error || ''
+ }
const d = json.choices?.[0]?.delta
if (!d) return
@@ -1593,6 +1598,12 @@ async function callChatCompletions(base, messages, onChunk) {
onChunk(reasoningBuf)
_lastDebugInfo.fallbackToReasoning = true
}
+ // 流式完成但没有任何内容 → 视为无效响应(防止空气泡)
+ if (contentChunks === 0 && !reasoningBuf) {
+ const errDetail = streamError || t('assistant.errInvalidResponse')
+ console.warn('[assistant] SSE 流完成但 0 内容块:', errDetail)
+ throw new Error(errDetail)
+ }
} else {
// 非流式响应:API 忽略了 stream:true,直接返回完整 JSON
_lastDebugInfo.streaming = false
@@ -1601,7 +1612,13 @@ async function callChatCompletions(base, messages, onChunk) {
console.log('[assistant] 非流式响应:', json)
const msg = json.choices?.[0]?.message
const content = msg?.content || msg?.reasoning_content || ''
- if (content) onChunk(content)
+ if (content) {
+ onChunk(content)
+ } else {
+ // 尝试从 error 字段提取错误信息
+ const errMsg = json.error?.message || json.choices?.[0]?.finish_reason || ''
+ throw new Error(errMsg || t('assistant.errInvalidResponse'))
+ }
}
}
@@ -2038,8 +2055,8 @@ async function executeToolWithSafety(toolName, args, tcForConfirm) {
return { result, approved }
}
-// 带工具调用的 AI 请求(非流式,用于 tool_calls 检测循环)
-async function callAIWithTools(messages, onStatus, onToolProgress) {
+// 带工具调用的 AI 请求(流式,支持 tool_calls 循环 + 打字机效果)
+async function callAIWithTools(messages, onStatus, onToolProgress, onChunk) {
const apiType = normalizeApiType(_config.apiType)
if (!_config.baseUrl || !_config.model || (requiresApiKey(apiType) && !_config.apiKey)) {
throw new Error(t('assistant.errConfigFirst'))
@@ -2191,11 +2208,12 @@ async function callAIWithTools(messages, onStatus, onToolProgress) {
return { content: textParts, toolHistory }
}
- // ── OpenAI 工具调用 ──
+ // ── OpenAI 工具调用(流式) ──
const body = {
model: _config.model,
messages: currentMessages,
temperature: _config.temperature || 0.7,
+ stream: true,
}
if (tools.length > 0) body.tools = tools
@@ -2213,16 +2231,98 @@ async function callAIWithTools(messages, onStatus, onToolProgress) {
throw new Error(errMsg)
}
- const data = await resp.json()
- const choice = data.choices?.[0]
- const assistantMsg = choice?.message
+ // 流式累积状态
+ let contentBuf = ''
+ let reasoningBuf = ''
+ let streamError = ''
+ const pendingToolCalls = [] // [{ id, type, function: { name, arguments } }]
+ let finishReason = ''
- if (!assistantMsg) throw new Error(t('assistant.errInvalidResponse'))
+ const ct = resp.headers.get('content-type') || ''
+ if (ct.includes('text/event-stream') || ct.includes('text/plain')) {
+ // ── SSE 流式解析 ──
+ await readSSEStream(resp, (json) => {
+ if (json.error) {
+ streamError = streamError || json.error?.message || json.error || ''
+ }
+ const choice = json.choices?.[0]
+ if (!choice) return
+ if (choice.finish_reason) finishReason = choice.finish_reason
- if (assistantMsg.tool_calls && assistantMsg.tool_calls.length > 0) {
+ const d = choice.delta
+ if (!d) return
+
+ // 累积 content → 打字机效果
+ if (d.content) {
+ contentBuf += d.content
+ if (onChunk) onChunk(d.content)
+ }
+
+ // 累积 reasoning_content
+ if (d.reasoning_content) {
+ reasoningBuf += d.reasoning_content
+ }
+
+ // 累积 tool_calls 分块
+ if (d.tool_calls) {
+ for (const tc of d.tool_calls) {
+ const idx = tc.index ?? pendingToolCalls.length
+ if (!pendingToolCalls[idx]) {
+ pendingToolCalls[idx] = {
+ id: tc.id || '',
+ type: tc.type || 'function',
+ function: { name: '', arguments: '' },
+ }
+ }
+ const slot = pendingToolCalls[idx]
+ if (tc.id) slot.id = tc.id
+ if (tc.function?.name) slot.function.name += tc.function.name
+ if (tc.function?.arguments) slot.function.arguments += tc.function.arguments
+ }
+ // 实时显示工具调用进度(显示已知名称)
+ const names = pendingToolCalls.filter(t => t.function.name).map(t => t.function.name)
+ if (names.length) {
+ onStatus(t('assistant.aiCallingTools', { tools: names.join(', ') }) || `调用工具: ${names.join(', ')}`)
+ }
+ }
+ }, _abortController?.signal)
+ } else {
+ // ── 非流式回退(API 忽略了 stream:true)──
+ const data = await resp.json()
+ const choice = data.choices?.[0]
+ const msg = choice?.message
+ if (!msg) {
+ const errMsg = data.error?.message || ''
+ throw new Error(errMsg || t('assistant.errInvalidResponse'))
+ }
+ finishReason = choice.finish_reason || ''
+ contentBuf = msg.content || msg.reasoning_content || ''
+ if (contentBuf && onChunk) onChunk(contentBuf)
+ if (msg.tool_calls) {
+ for (const tc of msg.tool_calls) {
+ pendingToolCalls.push(tc)
+ }
+ }
+ }
+
+ // 流式完成但无任何内容且无工具调用 → 无效响应
+ if (!contentBuf && !reasoningBuf && pendingToolCalls.length === 0) {
+ throw new Error(streamError || t('assistant.errInvalidResponse'))
+ }
+
+ // 无 content 但有 reasoning → 作为回复
+ if (!contentBuf && reasoningBuf && pendingToolCalls.length === 0) {
+ contentBuf = reasoningBuf
+ if (onChunk) onChunk(contentBuf)
+ }
+
+ // ── 处理工具调用 ──
+ if (pendingToolCalls.length > 0) {
+ // 构造完整 assistant message 用于上下文
+ const assistantMsg = { role: 'assistant', content: contentBuf || null, tool_calls: pendingToolCalls }
currentMessages.push(assistantMsg)
- for (const tc of assistantMsg.tool_calls) {
+ for (const tc of pendingToolCalls) {
let args
try { args = JSON.parse(tc.function.arguments) } catch { args = {} }
const toolName = tc.function.name
@@ -2245,8 +2345,7 @@ async function callAIWithTools(messages, onStatus, onToolProgress) {
continue
}
- const content = assistantMsg.content || assistantMsg.reasoning_content || ''
- return { content, toolHistory }
+ return { content: contentBuf, toolHistory }
}
}
@@ -2433,6 +2532,9 @@ function renderMessages() {
).join('')}
` : ''
return `
${imagesHtml}${textPart ? escHtml(textPart) : ''}
`
} else if (m.role === 'assistant') {
+ // 跳过空的 AI 消息(历史脏数据),除非正在流式中(最后一条是占位符)
+ const isLastMsg = idx === session.messages.length - 1
+ if (!m.content && !m.toolHistory?.length && !(isLastMsg && _isStreaming)) return ''
const toolHtml = renderToolBlocks(m.toolHistory)
return `
${toolHtml}
${renderMarkdown(m.content)}
`
}
@@ -3609,14 +3711,14 @@ async function sendMessageDirect(text) {
try {
if (toolsEnabled) {
- // ── 工具模式:非流式,支持 tool_calls 循环 ──
+ // ── 工具模式:流式 + tool_calls 循环 ──
const aiMsgContainers = _messagesEl?.querySelectorAll('.ast-msg-ai')
const lastContainer = aiMsgContainers?.[aiMsgContainers.length - 1]
const result = await callAIWithTools(contextMessages,
// onStatus
(status) => {
- if (lastBubble) lastBubble.innerHTML = `
${escHtml(status)}`
+ if (lastBubble && !aiMsg.content) lastBubble.innerHTML = `
${escHtml(status)}`
},
// onToolProgress
(history) => {
@@ -3627,13 +3729,31 @@ async function sendMessageDirect(text) {
const bubble = lastContainer.querySelector('.ast-msg-bubble-ai')
lastContainer.innerHTML = toolHtml + (bubble ? bubble.outerHTML : '')
if (_messagesEl) _messagesEl.scrollTop = _messagesEl.scrollHeight
+ },
+ // onChunk — 流式打字机效果(需从 container 重新查询 bubble,因为 onToolProgress 会替换 innerHTML)
+ (chunk) => {
+ aiMsg.content += chunk
+ throttledSave()
+ const bubble = lastContainer?.querySelector('.ast-msg-bubble-ai') || lastBubble
+ if (bubble) {
+ const now = Date.now()
+ if (now - _lastRenderTime > 50) {
+ bubble.innerHTML = renderMarkdown(aiMsg.content) + '
▊'
+ if (_messagesEl) _messagesEl.scrollTop = _messagesEl.scrollHeight
+ _lastRenderTime = now
+ }
+ }
}
)
- aiMsg.content = result.content
+ // result.content 可能在流式中已经通过 onChunk 累积到 aiMsg.content
+ // 但如果有额外内容(如 reasoning 回退),以 result 为准
+ if (result.content && !aiMsg.content) aiMsg.content = result.content
if (result.toolHistory.length > 0) {
aiMsg.toolHistory = result.toolHistory
}
+ const finalBubble = lastContainer?.querySelector('.ast-msg-bubble-ai') || lastBubble
+ if (finalBubble && aiMsg.content) finalBubble.innerHTML = renderMarkdown(aiMsg.content)
renderMessages()
} else {
// ── 普通流式模式 ──
@@ -3707,6 +3827,11 @@ async function sendMessageDirect(text) {
stopStreamRefresh()
if (_sendBtn) _sendBtn.innerHTML = sendIcon()
if (_textarea) _textarea.focus()
+ // 清理空的 AI 消息(防止持久化空气泡)
+ const _lastMsg = session.messages[session.messages.length - 1]
+ if (_lastMsg?.role === 'assistant' && !_lastMsg.content && !_lastMsg.toolHistory?.length) {
+ session.messages.pop()
+ }
session.updatedAt = Date.now()
flushSave()
if (getSessionStatus(session.id) !== 'error') {
@@ -3749,7 +3874,7 @@ async function retryAIResponse(session) {
const lastContainer = aiMsgContainers?.[aiMsgContainers.length - 1]
const result = await callAIWithTools(contextMessages,
- (status) => { if (lastBubble) lastBubble.innerHTML = `
${escHtml(status)}` },
+ (status) => { if (lastBubble && !aiMsg.content) lastBubble.innerHTML = `
${escHtml(status)}` },
(history) => {
aiMsg.toolHistory = history
throttledSave()
@@ -3758,10 +3883,26 @@ async function retryAIResponse(session) {
const bubble = lastContainer.querySelector('.ast-msg-bubble-ai')
lastContainer.innerHTML = toolHtml + (bubble ? bubble.outerHTML : '')
if (_messagesEl) _messagesEl.scrollTop = _messagesEl.scrollHeight
+ },
+ // onChunk — 流式打字机效果(需从 container 重新查询 bubble)
+ (chunk) => {
+ aiMsg.content += chunk
+ throttledSave()
+ const bubble = lastContainer?.querySelector('.ast-msg-bubble-ai') || lastBubble
+ if (bubble) {
+ const now = Date.now()
+ if (now - _lastRenderTime > 50) {
+ bubble.innerHTML = renderMarkdown(aiMsg.content) + '
▊'
+ if (_messagesEl) _messagesEl.scrollTop = _messagesEl.scrollHeight
+ _lastRenderTime = now
+ }
+ }
}
)
- aiMsg.content = result.content
+ if (result.content && !aiMsg.content) aiMsg.content = result.content
if (result.toolHistory.length > 0) aiMsg.toolHistory = result.toolHistory
+ const retryFinalBubble = lastContainer?.querySelector('.ast-msg-bubble-ai') || lastBubble
+ if (retryFinalBubble && aiMsg.content) retryFinalBubble.innerHTML = renderMarkdown(aiMsg.content)
renderMessages()
} else {
await callAI(contextMessages, (chunk) => {
@@ -3823,6 +3964,11 @@ async function retryAIResponse(session) {
stopStreamRefresh()
if (_sendBtn) _sendBtn.innerHTML = sendIcon()
if (_textarea) _textarea.focus()
+ // 清理空的 AI 消息(防止持久化空气泡)
+ const _retryLastMsg = session.messages[session.messages.length - 1]
+ if (_retryLastMsg?.role === 'assistant' && !_retryLastMsg.content && !_retryLastMsg.toolHistory?.length) {
+ session.messages.pop()
+ }
session.updatedAt = Date.now()
flushSave()
if (getSessionStatus(session.id) !== 'error') {
diff --git a/src/pages/dashboard.js b/src/pages/dashboard.js
index 67fa741..2171a83 100644
--- a/src/pages/dashboard.js
+++ b/src/pages/dashboard.js
@@ -9,6 +9,8 @@ import { navigate } from '../router.js'
import { t } from '../lib/i18n.js'
let _unsubGw = null
+let _loadInFlight = false
+let _lastGwChangeLoad = 0
export async function render() {
const page = document.createElement('div')
@@ -52,9 +54,12 @@ export async function render() {
})
page.__retryLoad = () => loadDashboardData(page).catch(() => {})
- // 监听 Gateway 状态变化,自动刷新仪表盘
+ // 监听 Gateway 状态变化,节流刷新仪表盘(至少间隔 5 秒,防止状态抖动导致 UI 闪烁)
if (_unsubGw) _unsubGw()
_unsubGw = onGatewayChange(() => {
+ const now = Date.now()
+ if (now - _lastGwChangeLoad < 5000) return
+ _lastGwChangeLoad = now
loadDashboardData(page)
})
@@ -106,6 +111,13 @@ function syncDashboardInstanceScope() {
}
async function loadDashboardData(page, fullRefresh = false) {
+ // 并发保护:如果上一次加载仍在进行,跳过本次(fullRefresh 除外)
+ if (_loadInFlight && !fullRefresh) return
+ _loadInFlight = true
+ try { await _loadDashboardDataInner(page, fullRefresh) } finally { _loadInFlight = false }
+}
+
+async function _loadDashboardDataInner(page, fullRefresh) {
syncDashboardInstanceScope()
// 分波加载:关键数据先渲染,次要数据后填充,减少白屏等待
// 轻量调用(读文件)每次都做;重量调用(spawn CLI/网络请求)只在首次或手动刷新时做
diff --git a/src/pages/skills.js b/src/pages/skills.js
index 4f63ade..91a24a1 100644
--- a/src/pages/skills.js
+++ b/src/pages/skills.js
@@ -7,6 +7,7 @@ import { toast } from '../components/toast.js'
import { t } from '../lib/i18n.js'
let _loadSeq = 0
+let _selectedAgentId = null // null = default (main)
function esc(str) {
if (!str) return ''
@@ -16,11 +17,34 @@ function esc(str) {
export async function render() {
const page = document.createElement('div')
page.className = 'page'
+
+ // 加载 Agent 列表
+ let agents = []
+ try {
+ const list = await api.listAgents()
+ if (Array.isArray(list)) agents = list
+ } catch {}
+
+ const agentOptions = agents.length > 1
+ ? `
+
+
+
`
+ : ''
+
page.innerHTML = `
+ ${agentOptions}
${t('skills.tabInstalled')}
${t('skills.tabStore')}
@@ -41,6 +65,19 @@ export async function render() {
`
bindEvents(page)
loadSkills(page)
+
+ // Agent 选择器变化时刷新
+ const agentSelect = page.querySelector('#skills-agent-select')
+ if (agentSelect) {
+ agentSelect.addEventListener('change', () => {
+ const val = agentSelect.value
+ _selectedAgentId = (val === 'main') ? null : val
+ _storeIndex = null // 清除商店缓存
+ _installedNames = new Set()
+ loadSkills(page)
+ })
+ }
+
return page
}
@@ -55,7 +92,7 @@ async function loadSkills(page) {
`
try {
- const data = await api.skillsList()
+ const data = await api.skillsList(_selectedAgentId)
if (seq !== _loadSeq) return
renderSkills(el, data)
} catch (e) {
@@ -206,7 +243,7 @@ async function handleInfo(page, name) {
detail.innerHTML = `
${t('skills.loadingDetail')}
`
detail.scrollIntoView({ behavior: 'smooth', block: 'nearest' })
try {
- const skill = await api.skillsInfo(name)
+ const skill = await api.skillsInfo(name, _selectedAgentId)
const s = skill || {}
const reqs = s.requirements || {}
const miss = s.missing || {}
@@ -272,7 +309,7 @@ async function loadStore(page) {
_storeIndex = await api.skillhubIndex()
// 获取已安装列表用于标记
try {
- const data = await api.skillsList()
+ const data = await api.skillsList(_selectedAgentId)
_installedNames = new Set((data?.skills || []).map(s => s.name))
} catch { _installedNames = new Set() }
renderStoreItems(results, _storeIndex)
@@ -346,7 +383,7 @@ async function handleStoreInstall(page, btn) {
btn.disabled = true
btn.textContent = t('skills.installing')
try {
- await api.skillhubInstall(slug)
+ await api.skillhubInstall(slug, _selectedAgentId)
toast(t('skills.skillInstalled', { name: slug }), 'success')
btn.textContent = t('skills.installed')
btn.classList.remove('btn-primary')
@@ -367,7 +404,7 @@ async function handleSkillUninstall(page, btn) {
btn.disabled = true
btn.textContent = t('skills.uninstalling')
try {
- await api.skillsUninstall(name)
+ await api.skillsUninstall(name, _selectedAgentId)
toast(t('skills.uninstalled', { name }), 'success')
await loadSkills(page)
} catch (e) {