mirror of
https://github.com/jxxghp/MoviePilot.git
synced 2026-05-11 09:59:51 +08:00
feat(agent): merge MiniMax coding presets
This commit is contained in:
@@ -449,14 +449,22 @@ class LLMHelper:
|
||||
这主要用于单测 stub 环境以及极端的最小运行环境,正常生产路径仍优先
|
||||
走 `LLMProviderManager.resolve_runtime()`。
|
||||
"""
|
||||
normalized_provider_name = str(provider_name or "").strip().lower()
|
||||
if normalized_provider_name == "minimax-coding":
|
||||
normalized_provider_name = "minimax"
|
||||
|
||||
api_key_value = api_key if api_key is not None else settings.LLM_API_KEY
|
||||
base_url_value = base_url if base_url is not None else settings.LLM_BASE_URL
|
||||
if not api_key_value:
|
||||
raise ValueError("未配置LLM API Key")
|
||||
|
||||
runtime_name = provider_name if provider_name in {"google", "deepseek"} else "openai_compatible"
|
||||
runtime_name = (
|
||||
normalized_provider_name
|
||||
if normalized_provider_name in {"google", "deepseek"}
|
||||
else "openai_compatible"
|
||||
)
|
||||
return {
|
||||
"provider_id": provider_name,
|
||||
"provider_id": normalized_provider_name,
|
||||
"runtime": runtime_name,
|
||||
"model_id": model_name,
|
||||
"api_key": api_key_value,
|
||||
@@ -510,6 +518,7 @@ class LLMHelper:
|
||||
thinking_level: str | None = None,
|
||||
api_key: str | None = settings.LLM_API_KEY,
|
||||
base_url: str | None = settings.LLM_BASE_URL,
|
||||
base_url_preset: str | None = settings.LLM_BASE_URL_PRESET,
|
||||
):
|
||||
"""
|
||||
获取LLM实例
|
||||
@@ -539,6 +548,7 @@ class LLMHelper:
|
||||
model=model_name,
|
||||
api_key=api_key,
|
||||
base_url=base_url,
|
||||
base_url_preset_id=base_url_preset,
|
||||
)
|
||||
except Exception as err:
|
||||
logger.debug(f"LLM provider 目录不可用,回退到旧运行时逻辑: {err}")
|
||||
@@ -700,6 +710,7 @@ class LLMHelper:
|
||||
thinking_level: str | None = None,
|
||||
api_key: str | None = None,
|
||||
base_url: str | None = None,
|
||||
base_url_preset: str | None = None,
|
||||
) -> dict:
|
||||
"""
|
||||
使用当前已保存配置执行一次最小 LLM 调用。
|
||||
@@ -714,6 +725,7 @@ class LLMHelper:
|
||||
thinking_level=thinking_level,
|
||||
api_key=api_key,
|
||||
base_url=base_url,
|
||||
base_url_preset=base_url_preset,
|
||||
)
|
||||
try:
|
||||
response = await asyncio.wait_for(llm.ainvoke(prompt), timeout=timeout)
|
||||
@@ -743,6 +755,7 @@ class LLMHelper:
|
||||
provider: str,
|
||||
api_key: str | None = None,
|
||||
base_url: str | None = None,
|
||||
base_url_preset: str | None = None,
|
||||
force_refresh: bool = False,
|
||||
) -> List[dict[str, Any]]:
|
||||
"""
|
||||
@@ -759,6 +772,7 @@ class LLMHelper:
|
||||
provider_id=provider,
|
||||
api_key=api_key,
|
||||
base_url=base_url,
|
||||
base_url_preset_id=base_url_preset,
|
||||
force_refresh=force_refresh,
|
||||
)
|
||||
except Exception as err:
|
||||
@@ -776,6 +790,7 @@ class LLMHelper:
|
||||
LLMProviderManager().resolve_model_list_base_url(
|
||||
provider_id=provider,
|
||||
base_url=base_url,
|
||||
base_url_preset_id=base_url_preset,
|
||||
)
|
||||
or base_url
|
||||
)
|
||||
|
||||
@@ -48,6 +48,7 @@ class ProviderAuthMethod:
|
||||
class ProviderUrlPreset:
|
||||
"""前端展示用的 Base URL 预设。"""
|
||||
|
||||
id: str
|
||||
label: str
|
||||
value: str
|
||||
model_list_base_url: Optional[str] = None
|
||||
@@ -418,11 +419,13 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
models_dev_provider_id="siliconflow-cn",
|
||||
base_url_presets=(
|
||||
url_preset(
|
||||
id="siliconflow-cn",
|
||||
label="中国大陆",
|
||||
value="https://api.siliconflow.cn/v1",
|
||||
models_dev_provider_id="siliconflow-cn",
|
||||
),
|
||||
url_preset(
|
||||
id="siliconflow-global",
|
||||
label="Global",
|
||||
value="https://api.siliconflow.com/v1",
|
||||
models_dev_provider_id="siliconflow",
|
||||
@@ -439,11 +442,13 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
models_dev_provider_id="moonshotai-cn",
|
||||
base_url_presets=(
|
||||
url_preset(
|
||||
id="moonshot-cn",
|
||||
label="中国站",
|
||||
value="https://api.moonshot.cn/v1",
|
||||
models_dev_provider_id="moonshotai-cn",
|
||||
),
|
||||
url_preset(
|
||||
id="moonshot-global",
|
||||
label="国际站",
|
||||
value="https://api.moonshot.ai/v1",
|
||||
models_dev_provider_id="moonshotai",
|
||||
@@ -469,11 +474,13 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
models_dev_provider_id="zhipuai",
|
||||
base_url_presets=(
|
||||
url_preset(
|
||||
id="zhipu-general",
|
||||
label="Token Plan / 通用 API",
|
||||
value="https://open.bigmodel.cn/api/paas/v4",
|
||||
models_dev_provider_id="zhipuai",
|
||||
),
|
||||
url_preset(
|
||||
id="zhipu-coding",
|
||||
label="Coding Plan",
|
||||
value="https://open.bigmodel.cn/api/coding/paas/v4",
|
||||
model_list_base_url="https://open.bigmodel.cn/api/paas/v4",
|
||||
@@ -490,11 +497,13 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
sort_order=66,
|
||||
base_url_presets=(
|
||||
url_preset(
|
||||
id="zai-general",
|
||||
label="Token Plan / 通用 API",
|
||||
value="https://api.z.ai/api/paas/v4",
|
||||
models_dev_provider_id="zai",
|
||||
),
|
||||
url_preset(
|
||||
id="zai-coding",
|
||||
label="Coding Plan",
|
||||
value="https://api.z.ai/api/coding/paas/v4",
|
||||
models_dev_provider_id="zai-coding-plan",
|
||||
@@ -511,22 +520,26 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
models_dev_provider_id="alibaba-cn",
|
||||
base_url_presets=(
|
||||
url_preset(
|
||||
id="alibaba-cn-general",
|
||||
label="中国内地 / 通用",
|
||||
value="https://dashscope.aliyuncs.com/compatible-mode/v1",
|
||||
models_dev_provider_id="alibaba-cn",
|
||||
),
|
||||
url_preset(
|
||||
id="alibaba-global-general",
|
||||
label="国际站 / 通用",
|
||||
value="https://dashscope-intl.aliyuncs.com/compatible-mode/v1",
|
||||
models_dev_provider_id="alibaba",
|
||||
),
|
||||
url_preset(
|
||||
id="alibaba-cn-coding",
|
||||
label="中国内地 / Coding Plan",
|
||||
value="https://coding.dashscope.aliyuncs.com/v1",
|
||||
model_list_base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
|
||||
models_dev_provider_id="alibaba-coding-plan-cn",
|
||||
),
|
||||
url_preset(
|
||||
id="alibaba-global-coding",
|
||||
label="国际站 / Coding Plan",
|
||||
value="https://coding-intl.dashscope.aliyuncs.com/v1",
|
||||
model_list_base_url="https://dashscope-intl.aliyuncs.com/compatible-mode/v1",
|
||||
@@ -543,10 +556,12 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
default_base_url="https://qianfan.baidubce.com/v2",
|
||||
base_url_presets=(
|
||||
url_preset(
|
||||
id="baidu-qianfan-general",
|
||||
label="通用 API",
|
||||
value="https://qianfan.baidubce.com/v2",
|
||||
),
|
||||
url_preset(
|
||||
id="baidu-qianfan-coding",
|
||||
label="Coding Plan",
|
||||
value="https://qianfan.baidubce.com/v2/coding",
|
||||
),
|
||||
@@ -563,10 +578,12 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
default_base_url="https://modelservice.jdcloud.com/v1",
|
||||
base_url_presets=(
|
||||
url_preset(
|
||||
id="jdcloud-general",
|
||||
label="通用 API",
|
||||
value="https://modelservice.jdcloud.com/v1",
|
||||
),
|
||||
url_preset(
|
||||
id="jdcloud-coding",
|
||||
label="Coding Plan",
|
||||
value="https://modelservice.jdcloud.com/coding/openai/v1",
|
||||
),
|
||||
@@ -593,11 +610,13 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
default_base_url="https://tokenhub.tencentmaas.com/v1",
|
||||
base_url_presets=(
|
||||
url_preset(
|
||||
id="tencent-tokenhub",
|
||||
label="TokenHub",
|
||||
value="https://tokenhub.tencentmaas.com/v1",
|
||||
models_dev_provider_id="tencent-tokenhub",
|
||||
),
|
||||
url_preset(
|
||||
id="tencent-coding",
|
||||
label="Coding Plan",
|
||||
value="https://api.lkeap.cloud.tencent.com/coding/v3",
|
||||
models_dev_provider_id="tencent-coding-plan",
|
||||
@@ -635,11 +654,13 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
sort_order=115,
|
||||
base_url_presets=(
|
||||
url_preset(
|
||||
id="opencode-zen",
|
||||
label="Zen",
|
||||
value="https://opencode.ai/zen/v1",
|
||||
models_dev_provider_id="opencode",
|
||||
),
|
||||
url_preset(
|
||||
id="opencode-go",
|
||||
label="Go",
|
||||
value="https://opencode.ai/zen/go/v1",
|
||||
models_dev_provider_id="opencode-go",
|
||||
@@ -656,39 +677,32 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
models_dev_provider_id="minimax-cn",
|
||||
base_url_presets=(
|
||||
url_preset(
|
||||
id="minimax-cn-general",
|
||||
label="中国内地 / 通用",
|
||||
value="https://api.minimaxi.com/anthropic/v1",
|
||||
models_dev_provider_id="minimax-cn",
|
||||
),
|
||||
url_preset(
|
||||
id="minimax-global-general",
|
||||
label="国际站 / 通用",
|
||||
value="https://api.minimax.io/anthropic/v1",
|
||||
models_dev_provider_id="minimax",
|
||||
),
|
||||
),
|
||||
api_key_hint="填写 MiniMax API Key,可在中国内地与国际站通用端点间切换。",
|
||||
description="MiniMax Anthropic-compatible 通用端点。",
|
||||
),
|
||||
anthropic_provider(
|
||||
provider_id="minimax-coding",
|
||||
name="MiniMax Coding Plan",
|
||||
default_base_url="https://api.minimaxi.com/anthropic/v1",
|
||||
sort_order=121,
|
||||
models_dev_provider_id="minimax-cn-coding-plan",
|
||||
base_url_presets=(
|
||||
url_preset(
|
||||
id="minimax-cn-coding",
|
||||
label="中国内地 / Coding Plan",
|
||||
value="https://api.minimaxi.com/anthropic/v1",
|
||||
models_dev_provider_id="minimax-cn-coding-plan",
|
||||
),
|
||||
url_preset(
|
||||
id="minimax-global-coding",
|
||||
label="国际站 / Coding Plan",
|
||||
value="https://api.minimax.io/anthropic/v1",
|
||||
models_dev_provider_id="minimax-coding-plan",
|
||||
),
|
||||
),
|
||||
api_key_hint="填写 MiniMax API Key,可在中国内地与国际站 Coding Plan 目录间切换。",
|
||||
description="MiniMax Coding Plan Anthropic-compatible 端点。",
|
||||
api_key_hint="填写 MiniMax API Key,可在中国内地、国际站、通用与 Coding Plan 目录间切换。",
|
||||
description="MiniMax Anthropic-compatible 端点,支持通用与 Coding Plan 目录预设。",
|
||||
),
|
||||
catalog_openai_provider(
|
||||
provider_id="xiaomi",
|
||||
@@ -697,21 +711,25 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
sort_order=130,
|
||||
base_url_presets=(
|
||||
url_preset(
|
||||
id="xiaomi-standard",
|
||||
label="标准端点",
|
||||
value="https://api.xiaomimimo.com/v1",
|
||||
models_dev_provider_id="xiaomi",
|
||||
),
|
||||
url_preset(
|
||||
id="xiaomi-token-plan-cn",
|
||||
label="Token Plan / 中国",
|
||||
value="https://token-plan-cn.xiaomimimo.com/v1",
|
||||
models_dev_provider_id="xiaomi-token-plan-cn",
|
||||
),
|
||||
url_preset(
|
||||
id="xiaomi-token-plan-sgp",
|
||||
label="Token Plan / 新加坡",
|
||||
value="https://token-plan-sgp.xiaomimimo.com/v1",
|
||||
models_dev_provider_id="xiaomi-token-plan-sgp",
|
||||
),
|
||||
url_preset(
|
||||
id="xiaomi-token-plan-ams",
|
||||
label="Token Plan / 欧洲",
|
||||
value="https://token-plan-ams.xiaomimimo.com/v1",
|
||||
models_dev_provider_id="xiaomi-token-plan-ams",
|
||||
@@ -932,11 +950,12 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
async def _get_provider_async(
|
||||
self, provider_id: str, force_refresh: bool = False
|
||||
) -> ProviderSpec:
|
||||
normalized_provider_id = self._normalize_provider_id(provider_id)
|
||||
try:
|
||||
return self.get_provider(provider_id)
|
||||
return self.get_provider(normalized_provider_id)
|
||||
except LLMProviderError:
|
||||
await self.get_models_dev_data(force_refresh=force_refresh)
|
||||
return self.get_provider(provider_id)
|
||||
return self.get_provider(normalized_provider_id)
|
||||
|
||||
def _serialize_provider(self, spec: ProviderSpec) -> dict[str, Any]:
|
||||
return {
|
||||
@@ -946,6 +965,7 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
"default_base_url": self._default_base_url_for_provider(spec) or "",
|
||||
"base_url_presets": [
|
||||
{
|
||||
"id": preset.id,
|
||||
"label": preset.label,
|
||||
"value": self._sanitize_base_url(preset.value) or "",
|
||||
}
|
||||
@@ -989,7 +1009,7 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
|
||||
def get_provider(self, provider_id: str) -> ProviderSpec:
|
||||
"""按 provider id 获取定义。"""
|
||||
normalized = (provider_id or "").strip().lower()
|
||||
normalized = self._normalize_provider_id(provider_id)
|
||||
for spec in self._provider_specs():
|
||||
if spec.id == normalized:
|
||||
return spec
|
||||
@@ -1014,9 +1034,39 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
return cls._sanitize_base_url(spec.base_url_presets[0].value)
|
||||
|
||||
@classmethod
|
||||
def _resolve_provider_model_list_base_url(
|
||||
cls, spec: ProviderSpec, base_url: Optional[str]
|
||||
def _normalize_provider_id(cls, provider_id: str) -> str:
|
||||
normalized = (provider_id or "").strip().lower()
|
||||
if normalized == "minimax-coding":
|
||||
return "minimax"
|
||||
return normalized
|
||||
|
||||
@classmethod
|
||||
def _normalize_base_url_preset_id(
|
||||
cls, provider_id: str, base_url_preset_id: Optional[str]
|
||||
) -> Optional[str]:
|
||||
normalized_provider_id = cls._normalize_provider_id(provider_id)
|
||||
normalized_preset_id = str(base_url_preset_id or "").strip().lower() or None
|
||||
if not normalized_preset_id:
|
||||
return None
|
||||
if normalized_provider_id == "minimax" and normalized_preset_id == "minimax-coding":
|
||||
return "minimax-cn-coding"
|
||||
return normalized_preset_id
|
||||
|
||||
@classmethod
|
||||
def _resolve_provider_model_list_base_url(
|
||||
cls,
|
||||
spec: ProviderSpec,
|
||||
base_url: Optional[str],
|
||||
base_url_preset_id: Optional[str] = None,
|
||||
) -> Optional[str]:
|
||||
normalized_preset_id = cls._normalize_base_url_preset_id(spec.id, base_url_preset_id)
|
||||
if normalized_preset_id:
|
||||
for preset in spec.base_url_presets:
|
||||
if preset.id != normalized_preset_id:
|
||||
continue
|
||||
preset_value = cls._sanitize_base_url(preset.value)
|
||||
return cls._sanitize_base_url(preset.model_list_base_url) or preset_value
|
||||
|
||||
normalized_base_url = cls._sanitize_base_url(base_url)
|
||||
if normalized_base_url:
|
||||
for preset in spec.base_url_presets:
|
||||
@@ -1037,8 +1087,18 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
|
||||
@classmethod
|
||||
def _resolve_provider_models_dev_provider_id(
|
||||
cls, spec: ProviderSpec, base_url: Optional[str]
|
||||
cls,
|
||||
spec: ProviderSpec,
|
||||
base_url: Optional[str],
|
||||
base_url_preset_id: Optional[str] = None,
|
||||
) -> Optional[str]:
|
||||
normalized_preset_id = cls._normalize_base_url_preset_id(spec.id, base_url_preset_id)
|
||||
if normalized_preset_id:
|
||||
for preset in spec.base_url_presets:
|
||||
if preset.id != normalized_preset_id:
|
||||
continue
|
||||
return preset.models_dev_provider_id or spec.models_dev_provider_id
|
||||
|
||||
normalized_base_url = cls._sanitize_base_url(base_url)
|
||||
if normalized_base_url:
|
||||
for preset in spec.base_url_presets:
|
||||
@@ -1058,10 +1118,17 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
return spec.models_dev_provider_id
|
||||
|
||||
def resolve_model_list_base_url(
|
||||
self, provider_id: str, base_url: Optional[str]
|
||||
self,
|
||||
provider_id: str,
|
||||
base_url: Optional[str],
|
||||
base_url_preset_id: Optional[str] = None,
|
||||
) -> Optional[str]:
|
||||
spec = self.get_provider(provider_id)
|
||||
return self._resolve_provider_model_list_base_url(spec, base_url)
|
||||
return self._resolve_provider_model_list_base_url(
|
||||
spec,
|
||||
base_url,
|
||||
base_url_preset_id=base_url_preset_id,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _httpx_proxy_key() -> str:
|
||||
@@ -1212,21 +1279,33 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
raise LLMProviderError(f"获取 models.dev 数据失败: {err}") from err
|
||||
|
||||
async def _models_dev_provider_payload(
|
||||
self, provider_id: str, base_url: Optional[str] = None
|
||||
self,
|
||||
provider_id: str,
|
||||
base_url: Optional[str] = None,
|
||||
base_url_preset_id: Optional[str] = None,
|
||||
) -> dict[str, Any]:
|
||||
spec = await self._get_provider_async(provider_id)
|
||||
models_dev_provider_id = self._resolve_provider_models_dev_provider_id(
|
||||
spec,
|
||||
base_url,
|
||||
base_url_preset_id=base_url_preset_id,
|
||||
)
|
||||
if not models_dev_provider_id:
|
||||
return {}
|
||||
return (await self.get_models_dev_data()).get(models_dev_provider_id, {}) or {}
|
||||
|
||||
async def _models_dev_model(
|
||||
self, provider_id: str, model_id: str, base_url: Optional[str] = None
|
||||
self,
|
||||
provider_id: str,
|
||||
model_id: str,
|
||||
base_url: Optional[str] = None,
|
||||
base_url_preset_id: Optional[str] = None,
|
||||
) -> dict[str, Any] | None:
|
||||
payload = await self._models_dev_provider_payload(provider_id, base_url=base_url)
|
||||
payload = await self._models_dev_provider_payload(
|
||||
provider_id,
|
||||
base_url=base_url,
|
||||
base_url_preset_id=base_url_preset_id,
|
||||
)
|
||||
models = payload.get("models") if isinstance(payload, dict) else None
|
||||
if not isinstance(models, dict):
|
||||
return None
|
||||
@@ -1394,13 +1473,18 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
provider_id: str,
|
||||
transport: str = "openai",
|
||||
base_url: Optional[str] = None,
|
||||
base_url_preset_id: Optional[str] = None,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""
|
||||
某些 provider 没有统一稳定的 models.list 行为,
|
||||
因此优先读取 models.dev 目录;若未来 provider 暴露标准 models 接口,
|
||||
再平滑补充实时刷新即可。
|
||||
"""
|
||||
payload = await self._models_dev_provider_payload(provider_id, base_url=base_url)
|
||||
payload = await self._models_dev_provider_payload(
|
||||
provider_id,
|
||||
base_url=base_url,
|
||||
base_url_preset_id=base_url_preset_id,
|
||||
)
|
||||
models = payload.get("models") if isinstance(payload, dict) else None
|
||||
if not isinstance(models, dict):
|
||||
raise LLMProviderError(f"{provider_id} 暂无可用模型目录")
|
||||
@@ -1551,11 +1635,16 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
provider_id: str,
|
||||
api_key: Optional[str] = None,
|
||||
base_url: Optional[str] = None,
|
||||
base_url_preset_id: Optional[str] = None,
|
||||
force_refresh: bool = False,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""返回标准化后的模型目录。"""
|
||||
spec = await self._get_provider_async(provider_id, force_refresh=force_refresh)
|
||||
if self._resolve_provider_models_dev_provider_id(spec, base_url):
|
||||
if self._resolve_provider_models_dev_provider_id(
|
||||
spec,
|
||||
base_url,
|
||||
base_url_preset_id=base_url_preset_id,
|
||||
):
|
||||
# 对依赖 models.dev 的 provider 主动刷新一次缓存,保证“刷新模型列表”
|
||||
# 在使用目录型 provider 时也能拿到最新参数。
|
||||
if force_refresh:
|
||||
@@ -1565,6 +1654,7 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
model=None,
|
||||
api_key=api_key,
|
||||
base_url=base_url,
|
||||
base_url_preset_id=base_url_preset_id,
|
||||
)
|
||||
|
||||
if spec.model_list_strategy == "google":
|
||||
@@ -1582,6 +1672,7 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
base_url=self._resolve_provider_model_list_base_url(
|
||||
spec,
|
||||
runtime["base_url"],
|
||||
base_url_preset_id=base_url_preset_id,
|
||||
),
|
||||
default_headers=runtime.get("default_headers"),
|
||||
)
|
||||
@@ -1591,6 +1682,7 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
provider_id=provider_id,
|
||||
transport="anthropic",
|
||||
base_url=base_url,
|
||||
base_url_preset_id=base_url_preset_id,
|
||||
)
|
||||
|
||||
if spec.model_list_strategy == "models_dev_only":
|
||||
@@ -1598,6 +1690,7 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
provider_id=provider_id,
|
||||
transport="openai",
|
||||
base_url=base_url,
|
||||
base_url_preset_id=base_url_preset_id,
|
||||
)
|
||||
|
||||
# openai-compatible / deepseek 默认走官方 models 端点。
|
||||
@@ -1607,6 +1700,7 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
base_url=self._resolve_provider_model_list_base_url(
|
||||
spec,
|
||||
runtime["base_url"],
|
||||
base_url_preset_id=base_url_preset_id,
|
||||
),
|
||||
default_headers=runtime.get("default_headers"),
|
||||
)
|
||||
@@ -1616,6 +1710,7 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
provider_id: str,
|
||||
model_id: Optional[str],
|
||||
base_url: Optional[str] = None,
|
||||
base_url_preset_id: Optional[str] = None,
|
||||
) -> dict[str, Any] | None:
|
||||
if not model_id:
|
||||
return None
|
||||
@@ -1623,6 +1718,7 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
provider_id,
|
||||
model_id,
|
||||
base_url=base_url,
|
||||
base_url_preset_id=base_url_preset_id,
|
||||
)
|
||||
if metadata:
|
||||
return metadata
|
||||
@@ -2079,13 +2175,19 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
model: Optional[str],
|
||||
api_key: Optional[str] = None,
|
||||
base_url: Optional[str] = None,
|
||||
base_url_preset_id: Optional[str] = None,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
解析 provider 运行时参数。
|
||||
|
||||
返回统一结构,供 `LLMHelper` 创建具体 LangChain 模型实例时使用。
|
||||
"""
|
||||
spec = await self._get_provider_async(provider_id)
|
||||
normalized_provider_id = self._normalize_provider_id(provider_id)
|
||||
normalized_base_url_preset_id = self._normalize_base_url_preset_id(
|
||||
normalized_provider_id,
|
||||
base_url_preset_id,
|
||||
)
|
||||
spec = await self._get_provider_async(normalized_provider_id)
|
||||
normalized_api_key = str(api_key or "").strip() or None
|
||||
normalized_base_url = self._sanitize_base_url(base_url)
|
||||
model_record = None
|
||||
@@ -2095,9 +2197,10 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
(
|
||||
item
|
||||
for item in await self.list_models(
|
||||
provider_id,
|
||||
normalized_provider_id,
|
||||
api_key=api_key,
|
||||
base_url=base_url,
|
||||
base_url_preset_id=normalized_base_url_preset_id,
|
||||
)
|
||||
if item["id"] == model
|
||||
),
|
||||
@@ -2108,21 +2211,22 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
model_record = None
|
||||
|
||||
result: dict[str, Any] = {
|
||||
"provider_id": provider_id,
|
||||
"provider_id": normalized_provider_id,
|
||||
"runtime": spec.runtime,
|
||||
"model_id": model,
|
||||
"model_record": model_record,
|
||||
"model_metadata": await self.resolve_model_metadata(
|
||||
provider_id,
|
||||
normalized_provider_id,
|
||||
model,
|
||||
base_url=base_url,
|
||||
base_url_preset_id=normalized_base_url_preset_id,
|
||||
),
|
||||
"default_headers": None,
|
||||
"use_responses_api": None,
|
||||
"auth_mode": "api_key",
|
||||
}
|
||||
|
||||
if provider_id == "chatgpt":
|
||||
if normalized_provider_id == "chatgpt":
|
||||
auth = None
|
||||
try:
|
||||
auth = await self._resolve_chatgpt_oauth()
|
||||
@@ -2160,7 +2264,7 @@ class LLMProviderManager(metaclass=Singleton):
|
||||
|
||||
raise LLMProviderAuthError("请提供 API Key 或完成 ChatGPT 授权")
|
||||
|
||||
if provider_id == "github-copilot":
|
||||
if normalized_provider_id == "github-copilot":
|
||||
auth = self.get_saved_auth("github-copilot")
|
||||
if auth and auth.get("type") == "oauth":
|
||||
token = auth.get("refresh_token") or auth.get("access_token")
|
||||
|
||||
@@ -12,6 +12,7 @@ from app.agent.llm import (
|
||||
LLMTestTimeout,
|
||||
render_auth_result_html,
|
||||
)
|
||||
from app.core.config import settings
|
||||
from app.db.models import User
|
||||
from app.db.user_oper import (
|
||||
get_current_active_superuser_async,
|
||||
@@ -29,6 +30,7 @@ class LlmTestRequest(BaseModel):
|
||||
thinking_level: Optional[str] = None
|
||||
api_key: Optional[str] = None
|
||||
base_url: Optional[str] = None
|
||||
base_url_preset: Optional[str] = None
|
||||
|
||||
|
||||
class LlmProviderAuthStartRequest(BaseModel):
|
||||
@@ -64,6 +66,7 @@ async def get_llm_models(
|
||||
provider: str,
|
||||
api_key: Optional[str] = None,
|
||||
base_url: Optional[str] = None,
|
||||
base_url_preset: Optional[str] = None,
|
||||
force_refresh: Optional[bool] = False,
|
||||
_: User = Depends(get_current_active_user_async),
|
||||
):
|
||||
@@ -76,6 +79,7 @@ async def get_llm_models(
|
||||
provider=provider,
|
||||
api_key=api_key,
|
||||
base_url=base_url,
|
||||
base_url_preset=base_url_preset,
|
||||
force_refresh=bool(force_refresh),
|
||||
)
|
||||
return schemas.Response(
|
||||
@@ -231,6 +235,7 @@ async def llm_test(
|
||||
thinking_level=settings.LLM_THINKING_LEVEL,
|
||||
api_key=settings.LLM_API_KEY,
|
||||
base_url=settings.LLM_BASE_URL,
|
||||
base_url_preset=settings.LLM_BASE_URL_PRESET,
|
||||
)
|
||||
|
||||
if not payload.provider:
|
||||
@@ -262,6 +267,7 @@ async def llm_test(
|
||||
thinking_level=payload.thinking_level,
|
||||
api_key=payload.api_key,
|
||||
base_url=payload.base_url,
|
||||
base_url_preset=payload.base_url_preset,
|
||||
)
|
||||
if not result.get("reply_preview"):
|
||||
return schemas.Response(
|
||||
|
||||
@@ -515,6 +515,8 @@ class ConfigModel(BaseModel):
|
||||
LLM_API_KEY: Optional[str] = None
|
||||
# LLM基础URL(用于自定义API端点)
|
||||
LLM_BASE_URL: Optional[str] = "https://api.deepseek.com"
|
||||
# LLM Base URL 预设标识,用于区分同一 Base URL 下的不同模型目录
|
||||
LLM_BASE_URL_PRESET: Optional[str] = None
|
||||
# LLM最大上下文Token数量(K)
|
||||
LLM_MAX_CONTEXT_TOKENS: int = 64
|
||||
# LLM温度参数
|
||||
|
||||
@@ -1214,9 +1214,15 @@ def _llm_provider_defaults(
|
||||
default_base_url = str(provider_meta.get("default_base_url") or "").strip()
|
||||
if default_base_url:
|
||||
defaults["base_url"] = default_base_url
|
||||
base_url_presets = provider_meta.get("base_url_presets") or []
|
||||
if isinstance(base_url_presets, list) and base_url_presets:
|
||||
preset_id = str((base_url_presets[0] or {}).get("id") or "").strip()
|
||||
if preset_id:
|
||||
defaults["base_url_preset"] = preset_id
|
||||
|
||||
defaults.setdefault("model", _env_default("LLM_MODEL", ""))
|
||||
defaults.setdefault("base_url", _env_default("LLM_BASE_URL", ""))
|
||||
defaults.setdefault("base_url_preset", _env_default("LLM_BASE_URL_PRESET", ""))
|
||||
return defaults
|
||||
|
||||
|
||||
@@ -1245,11 +1251,13 @@ def _load_llm_models_inner(payload: dict[str, Any]) -> list[dict[str, Any]]:
|
||||
provider_module = _load_llm_provider_module()
|
||||
api_key = str(payload.get("api_key") or "").strip() or None
|
||||
base_url = str(payload.get("base_url") or "").strip() or None
|
||||
base_url_preset = str(payload.get("base_url_preset") or "").strip() or None
|
||||
models = asyncio.run(
|
||||
provider_module.LLMProviderManager().list_models(
|
||||
provider_id=provider,
|
||||
api_key=api_key,
|
||||
base_url=base_url,
|
||||
base_url_preset_id=base_url_preset,
|
||||
force_refresh=False,
|
||||
)
|
||||
)
|
||||
@@ -1261,12 +1269,14 @@ def _load_llm_models(
|
||||
provider: str,
|
||||
api_key: Optional[str],
|
||||
base_url: Optional[str],
|
||||
base_url_preset: Optional[str],
|
||||
runtime_python: Optional[Path] = None,
|
||||
) -> list[dict[str, Any]]:
|
||||
payload = {
|
||||
"provider": str(provider or "").strip().lower(),
|
||||
"api_key": str(api_key or "").strip(),
|
||||
"base_url": str(base_url or "").strip(),
|
||||
"base_url_preset": str(base_url_preset or "").strip(),
|
||||
}
|
||||
try:
|
||||
return _load_llm_models_inner(payload)
|
||||
@@ -1795,6 +1805,9 @@ def _collect_agent_config(
|
||||
defaults = _llm_provider_defaults(provider, provider_definitions)
|
||||
current_model = _env_default("LLM_MODEL", defaults["model"])
|
||||
current_base_url = _env_default("LLM_BASE_URL", defaults["base_url"])
|
||||
current_base_url_preset = _env_default(
|
||||
"LLM_BASE_URL_PRESET", defaults.get("base_url_preset", "")
|
||||
)
|
||||
api_key_label = str(provider_meta.get("api_key_label") or "API Key").strip() or "API Key"
|
||||
api_key_hint = str(provider_meta.get("api_key_hint") or "").strip()
|
||||
requires_base_url = bool(provider_meta.get("requires_base_url"))
|
||||
@@ -1838,8 +1851,41 @@ def _collect_agent_config(
|
||||
"是否启用图片输入支持",
|
||||
default=_env_bool("LLM_SUPPORT_IMAGE_INPUT", True),
|
||||
),
|
||||
"LLM_BASE_URL_PRESET": current_base_url_preset,
|
||||
}
|
||||
|
||||
base_url_presets = provider_meta.get("base_url_presets") or []
|
||||
if isinstance(base_url_presets, list):
|
||||
duplicate_value_presets = []
|
||||
normalized_current_base_url = current_base_url.strip()
|
||||
for item in base_url_presets:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
preset_value = str(item.get("value") or "").strip()
|
||||
preset_id = str(item.get("id") or "").strip()
|
||||
if not preset_id or preset_value != normalized_current_base_url:
|
||||
continue
|
||||
duplicate_value_presets.append(item)
|
||||
|
||||
if len(duplicate_value_presets) > 1:
|
||||
choices: dict[str, str] = {}
|
||||
default_preset = current_base_url_preset
|
||||
if not default_preset or default_preset not in {
|
||||
str(item.get("id") or "").strip() for item in duplicate_value_presets
|
||||
}:
|
||||
default_preset = str((duplicate_value_presets[0] or {}).get("id") or "").strip()
|
||||
for item in duplicate_value_presets:
|
||||
preset_id = str(item.get("id") or "").strip()
|
||||
preset_label = str(item.get("label") or preset_id).strip()
|
||||
if preset_id:
|
||||
choices[preset_id] = preset_label
|
||||
if choices:
|
||||
config["LLM_BASE_URL_PRESET"] = _prompt_choice(
|
||||
"LLM Base URL 预设",
|
||||
choices=choices,
|
||||
default=default_preset,
|
||||
)
|
||||
|
||||
config["LLM_BASE_URL"] = _prompt_text(
|
||||
base_url_label,
|
||||
default=current_base_url,
|
||||
@@ -1849,6 +1895,7 @@ def _collect_agent_config(
|
||||
provider=provider,
|
||||
api_key=config["LLM_API_KEY"],
|
||||
base_url=config["LLM_BASE_URL"],
|
||||
base_url_preset=config["LLM_BASE_URL_PRESET"],
|
||||
runtime_python=runtime_python,
|
||||
)
|
||||
config["LLM_MODEL"] = _prompt_model_choice(models, default=current_model)
|
||||
|
||||
@@ -38,6 +38,7 @@ _stub_module(
|
||||
LLM_MODEL="global-model",
|
||||
LLM_API_KEY="global-key",
|
||||
LLM_BASE_URL="https://global.example.com",
|
||||
LLM_BASE_URL_PRESET=None,
|
||||
LLM_THINKING_LEVEL=None,
|
||||
LLM_TEMPERATURE=0.1,
|
||||
LLM_MAX_CONTEXT_TOKENS=64,
|
||||
@@ -76,6 +77,7 @@ class LlmHelperTestCallTest(unittest.TestCase):
|
||||
model="deepseek-chat",
|
||||
api_key="sk-test",
|
||||
base_url="https://api.deepseek.com",
|
||||
base_url_preset="deepseek-default",
|
||||
)
|
||||
)
|
||||
|
||||
@@ -86,6 +88,7 @@ class LlmHelperTestCallTest(unittest.TestCase):
|
||||
thinking_level=None,
|
||||
api_key="sk-test",
|
||||
base_url="https://api.deepseek.com",
|
||||
base_url_preset="deepseek-default",
|
||||
)
|
||||
self.assertEqual(result["provider"], "deepseek")
|
||||
self.assertEqual(result["model"], "deepseek-chat")
|
||||
|
||||
@@ -246,6 +246,54 @@ class LlmProviderRegistryTest(unittest.TestCase):
|
||||
self.assertIsNone(provider.models_dev_provider_id)
|
||||
self.assertFalse(provider.supports_model_refresh)
|
||||
|
||||
def test_builtin_minimax_provider_merges_general_and_coding_presets(self):
|
||||
manager = LLMProviderManager()
|
||||
|
||||
provider = manager.get_provider("minimax")
|
||||
serialized = manager.list_providers()
|
||||
minimax_payload = next(item for item in serialized if item["id"] == "minimax")
|
||||
|
||||
self.assertEqual(provider.name, "MiniMax")
|
||||
self.assertEqual(provider.runtime, "anthropic_compatible")
|
||||
self.assertEqual(
|
||||
tuple((preset.id, preset.label, preset.value) for preset in provider.base_url_presets),
|
||||
(
|
||||
("minimax-cn-general", "中国内地 / 通用", "https://api.minimaxi.com/anthropic/v1"),
|
||||
("minimax-global-general", "国际站 / 通用", "https://api.minimax.io/anthropic/v1"),
|
||||
("minimax-cn-coding", "中国内地 / Coding Plan", "https://api.minimaxi.com/anthropic/v1"),
|
||||
("minimax-global-coding", "国际站 / Coding Plan", "https://api.minimax.io/anthropic/v1"),
|
||||
),
|
||||
)
|
||||
self.assertEqual(
|
||||
tuple(item["id"] for item in minimax_payload["base_url_presets"]),
|
||||
(
|
||||
"minimax-cn-general",
|
||||
"minimax-global-general",
|
||||
"minimax-cn-coding",
|
||||
"minimax-global-coding",
|
||||
),
|
||||
)
|
||||
|
||||
def test_minimax_coding_alias_resolves_to_minimax_provider(self):
|
||||
manager = LLMProviderManager()
|
||||
|
||||
provider = manager.get_provider("minimax-coding")
|
||||
|
||||
self.assertEqual(provider.id, "minimax")
|
||||
|
||||
def test_resolve_models_dev_provider_id_prefers_minimax_preset_id(self):
|
||||
manager = LLMProviderManager()
|
||||
provider = manager.get_provider("minimax")
|
||||
|
||||
self.assertEqual(
|
||||
manager._resolve_provider_models_dev_provider_id(
|
||||
provider,
|
||||
base_url="https://api.minimaxi.com/anthropic/v1",
|
||||
base_url_preset_id="minimax-cn-coding",
|
||||
),
|
||||
"minimax-cn-coding-plan",
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
||||
@@ -68,6 +68,7 @@ class LocalSetupLlmProviderPromptTests(unittest.TestCase):
|
||||
provider="frogbot",
|
||||
api_key="sk-frog",
|
||||
base_url="https://override.example.com/v1",
|
||||
base_url_preset="",
|
||||
runtime_python=Path("/tmp/runtime-python"),
|
||||
)
|
||||
model_prompt.assert_called_once_with(models, default="")
|
||||
@@ -205,6 +206,66 @@ class LocalSetupLlmProviderPromptTests(unittest.TestCase):
|
||||
"https://modelservice.jdcloud.com/v1",
|
||||
)
|
||||
|
||||
def test_collect_agent_config_prompts_for_duplicate_base_url_presets(self):
|
||||
module = load_local_setup_module()
|
||||
|
||||
provider_definitions = [
|
||||
{
|
||||
"id": "minimax",
|
||||
"name": "MiniMax",
|
||||
"default_base_url": "https://api.minimaxi.com/anthropic/v1",
|
||||
"api_key_label": "API Key",
|
||||
"base_url_presets": [
|
||||
{
|
||||
"id": "minimax-cn-general",
|
||||
"label": "中国内地 / 通用",
|
||||
"value": "https://api.minimaxi.com/anthropic/v1",
|
||||
},
|
||||
{
|
||||
"id": "minimax-cn-coding",
|
||||
"label": "中国内地 / Coding Plan",
|
||||
"value": "https://api.minimaxi.com/anthropic/v1",
|
||||
},
|
||||
],
|
||||
}
|
||||
]
|
||||
|
||||
with patch.object(module, "print_step"), patch.object(
|
||||
module, "_prompt_yes_no", side_effect=[True, False, True]
|
||||
), patch.object(
|
||||
module, "_load_llm_provider_definitions", return_value=provider_definitions
|
||||
), patch.object(
|
||||
module, "_prompt_provider_choice", return_value="minimax"
|
||||
), patch.object(
|
||||
module, "_prompt_text", side_effect=["https://api.minimaxi.com/anthropic/v1"]
|
||||
), patch.object(
|
||||
module, "_prompt_secret_text", return_value="sk-minimax"
|
||||
), patch.object(
|
||||
module, "_load_llm_models", return_value=[]
|
||||
) as load_models, patch.object(
|
||||
module, "_prompt_model_choice", return_value="MiniMax-M1"
|
||||
), patch.object(
|
||||
module, "read_env_value", return_value=None
|
||||
), patch.object(
|
||||
module, "_env_default", side_effect=lambda key, default="": default
|
||||
), patch.object(
|
||||
module, "_env_bool", side_effect=lambda key, default: default
|
||||
), patch.object(
|
||||
module, "_env_llm_thinking_level_default", return_value="auto"
|
||||
), patch.object(
|
||||
module, "_prompt_choice", side_effect=["auto", "minimax-cn-coding"]
|
||||
):
|
||||
config = module._collect_agent_config()
|
||||
|
||||
self.assertEqual(config["LLM_BASE_URL_PRESET"], "minimax-cn-coding")
|
||||
load_models.assert_called_once_with(
|
||||
provider="minimax",
|
||||
api_key="sk-minimax",
|
||||
base_url="https://api.minimaxi.com/anthropic/v1",
|
||||
base_url_preset="minimax-cn-coding",
|
||||
runtime_python=None,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
||||
@@ -77,7 +77,7 @@ _stub_module("app.utils.crypto", HashUtils=_Dummy)
|
||||
_stub_module("app.utils.http", RequestUtils=_Dummy, AsyncRequestUtils=_Dummy)
|
||||
_stub_module("version", APP_VERSION="test")
|
||||
|
||||
from app.api.endpoints import system as system_endpoint
|
||||
from app.api.endpoints import llm as system_endpoint
|
||||
|
||||
|
||||
class LlmTestEndpointTest(unittest.TestCase):
|
||||
@@ -124,6 +124,8 @@ class LlmTestEndpointTest(unittest.TestCase):
|
||||
system_endpoint.settings, "LLM_API_KEY", "sk-test"
|
||||
), patch.object(
|
||||
system_endpoint.settings, "LLM_BASE_URL", "https://api.deepseek.com"
|
||||
), patch.object(
|
||||
system_endpoint.settings, "LLM_BASE_URL_PRESET", "deepseek-default"
|
||||
), patch.object(
|
||||
system_endpoint.LLMHelper,
|
||||
"test_current_settings",
|
||||
@@ -138,6 +140,7 @@ class LlmTestEndpointTest(unittest.TestCase):
|
||||
thinking_level="max",
|
||||
api_key="sk-test",
|
||||
base_url="https://api.deepseek.com",
|
||||
base_url_preset="deepseek-default",
|
||||
)
|
||||
self.assertTrue(resp.success)
|
||||
self.assertEqual(resp.data["provider"], "deepseek")
|
||||
@@ -161,6 +164,7 @@ class LlmTestEndpointTest(unittest.TestCase):
|
||||
thinking_level="high",
|
||||
api_key="sk-live",
|
||||
base_url="https://example.com/v1",
|
||||
base_url_preset="openai-default",
|
||||
)
|
||||
|
||||
with patch.object(system_endpoint.settings, "AI_AGENT_ENABLE", False), patch.object(
|
||||
@@ -183,6 +187,7 @@ class LlmTestEndpointTest(unittest.TestCase):
|
||||
thinking_level="high",
|
||||
api_key="sk-live",
|
||||
base_url="https://example.com/v1",
|
||||
base_url_preset="openai-default",
|
||||
)
|
||||
self.assertTrue(resp.success)
|
||||
self.assertEqual(resp.data["provider"], "openai")
|
||||
@@ -203,6 +208,7 @@ class LlmTestEndpointTest(unittest.TestCase):
|
||||
model="deepseek-v4-pro",
|
||||
api_key="sk-live",
|
||||
base_url="https://api.deepseek.com",
|
||||
base_url_preset="deepseek-default",
|
||||
)
|
||||
|
||||
with patch.object(system_endpoint.settings, "AI_AGENT_ENABLE", False), patch.object(
|
||||
@@ -219,6 +225,7 @@ class LlmTestEndpointTest(unittest.TestCase):
|
||||
thinking_level=None,
|
||||
api_key="sk-live",
|
||||
base_url="https://api.deepseek.com",
|
||||
base_url_preset="deepseek-default",
|
||||
)
|
||||
self.assertTrue(resp.success)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user