diff --git a/app/domain/openai_models.py b/app/domain/openai_models.py index ad6d326..8e4f6ae 100644 --- a/app/domain/openai_models.py +++ b/app/domain/openai_models.py @@ -1,5 +1,5 @@ from pydantic import BaseModel -from typing import List, Optional, Union +from typing import Any, Dict, List, Optional, Union from app.core.constants import DEFAULT_MODEL, DEFAULT_TEMPERATURE, DEFAULT_TOP_K, DEFAULT_TOP_P @@ -9,11 +9,14 @@ class ChatRequest(BaseModel): model: str = DEFAULT_MODEL temperature: Optional[float] = DEFAULT_TEMPERATURE stream: Optional[bool] = False - tools: Optional[List[dict]] = [] max_tokens: Optional[int] = None top_p: Optional[float] = DEFAULT_TOP_P top_k: Optional[int] = DEFAULT_TOP_K - stop: Optional[List[str]] = [] + stop: Optional[Union[List[str],str]] = None + reasoning_effort: Optional[str] = None + tools: Optional[Union[List[Dict[str, Any]], Dict[str, Any]]] = [] + tool_choice: Optional[str] = None + response_format: Optional[dict] = None class EmbeddingRequest(BaseModel): @@ -23,10 +26,10 @@ class EmbeddingRequest(BaseModel): class ImageGenerationRequest(BaseModel): - model: str = "DALL-E-3" + model: str = "imagen-3.0-generate-002" prompt: str = "" n: int = 1 size: Optional[str] = "1024x1024" - quality: Optional[str] = "" - style: Optional[str] = "" - response_format: Optional[str] = "url" + quality: Optional[str] = None + style: Optional[str] = None + response_format: Optional[str] = "b64_json" diff --git a/app/log/logger.py b/app/log/logger.py index 11c1cc6..896fe27 100644 --- a/app/log/logger.py +++ b/app/log/logger.py @@ -213,4 +213,8 @@ def get_message_converter_logger(): def get_api_client_logger(): - return Logger.setup_logger("api_client") \ No newline at end of file + return Logger.setup_logger("api_client") + + +def get_openai_compatible_logger(): + return Logger.setup_logger("openai_compatible") \ No newline at end of file diff --git a/app/middleware/middleware.py b/app/middleware/middleware.py index e187223..495d823 100644 --- a/app/middleware/middleware.py +++ b/app/middleware/middleware.py @@ -30,6 +30,8 @@ class AuthMiddleware(BaseHTTPMiddleware): and not request.url.path.startswith(f"/{API_VERSION}") and not request.url.path.startswith("/health") and not request.url.path.startswith("/hf") + and not request.url.path.startswith("/openai") + and not request.url.path.startswith("/api/version/check") ): auth_token = request.cookies.get("auth_token") diff --git a/app/router/openai_compatiable_routes.py b/app/router/openai_compatiable_routes.py new file mode 100644 index 0000000..bfe7451 --- /dev/null +++ b/app/router/openai_compatiable_routes.py @@ -0,0 +1,129 @@ +from fastapi import APIRouter, Depends, HTTPException +from fastapi.responses import StreamingResponse + +from app.config.config import settings +from app.core.security import SecurityService +from app.domain.openai_models import ( + ChatRequest, + EmbeddingRequest, + ImageGenerationRequest, +) +from app.handler.retry_handler import RetryHandler +from app.log.logger import get_openai_compatible_logger +from app.service.key.key_manager import KeyManager, get_key_manager_instance +from app.service.openai_compatiable_service import OpenAICompatiableService + + +router = APIRouter() +logger = get_openai_compatible_logger() + +# 初始化服务 +security_service = SecurityService() + +async def get_key_manager(): + return await get_key_manager_instance() + + +async def get_next_working_key_wrapper( + key_manager: KeyManager = Depends(get_key_manager), +): + return await key_manager.get_next_working_key() + + +async def get_openai_service(key_manager: KeyManager = Depends(get_key_manager)): + """获取OpenAI聊天服务实例""" + return OpenAICompatiableService(settings.BASE_URL, key_manager) + + +@router.get("/openai/v1/models") +async def list_models( + _=Depends(security_service.verify_authorization), + key_manager: KeyManager = Depends(get_key_manager), + openai_service: OpenAICompatiableService = Depends(get_openai_service), +): + logger.info("-" * 50 + "list_models" + "-" * 50) + logger.info("Handling models list request") + api_key = await key_manager.get_first_valid_key() + logger.info(f"Using API key: {api_key}") + try: + return await openai_service.get_models(api_key) + except Exception as e: + logger.error(f"Error getting models list: {str(e)}") + raise HTTPException( + status_code=500, detail="Internal server error while fetching models list" + ) from e + + +@router.post("/openai/v1/chat/completions") +@RetryHandler(max_retries=settings.MAX_RETRIES, key_arg="api_key") +async def chat_completion( + request: ChatRequest, + _=Depends(security_service.verify_authorization), + api_key: str = Depends(get_next_working_key_wrapper), + key_manager: KeyManager = Depends(get_key_manager), + openai_service: OpenAICompatiableService = Depends(get_openai_service), +): + # 如果model是imagen3,使用paid_key + if request.model == f"{settings.CREATE_IMAGE_MODEL}-chat": + api_key = await key_manager.get_paid_key() + logger.info("-" * 50 + "chat_completion" + "-" * 50) + logger.info(f"Handling chat completion request for model: {request.model}") + logger.debug(f"Request: \n{request.model_dump_json(indent=2)}") + logger.info(f"Using API key: {api_key}") + + try: + # 如果model是imagen3,使用paid_key + if request.model == f"{settings.CREATE_IMAGE_MODEL}-chat": + response = await openai_service.create_image_chat_completion(request, api_key) + else: + response = await openai_service.create_chat_completion(request, api_key) + # 处理流式响应 + if request.stream: + return StreamingResponse(response, media_type="text/event-stream") + logger.info("Chat completion request successful") + return response + except Exception as e: + logger.error(f"Chat completion failed after retries: {str(e)}") + raise HTTPException(status_code=500, detail="Chat completion failed") from e + + +@router.post("/openai/v1/images/generations") +async def generate_image( + request: ImageGenerationRequest, + _=Depends(security_service.verify_authorization), + openai_service: OpenAICompatiableService = Depends(get_openai_service), +): + logger.info("-" * 50 + "generate_image" + "-" * 50) + logger.info(f"Handling image generation request for prompt: {request.prompt}") + + try: + response = await openai_service.generate_images(request) + logger.info("Image generation request successful") + return response + except Exception as e: + logger.error(f"Image generation request failed: {str(e)}") + raise HTTPException( + status_code=500, detail="Image generation request failed" + ) from e + + +@router.post("/openai/v1/embeddings") +async def embedding( + request: EmbeddingRequest, + _=Depends(security_service.verify_authorization), + key_manager: KeyManager = Depends(get_key_manager), + openai_service: OpenAICompatiableService = Depends(get_openai_service), +): + logger.info("-" * 50 + "embedding" + "-" * 50) + logger.info(f"Handling embedding request for model: {request.model}") + api_key = await key_manager.get_next_working_key() + logger.info(f"Using API key: {api_key}") + try: + response = await openai_service.create_embeddings( + input_text=request.input, model=request.model, api_key=api_key + ) + logger.info("Embedding request successful") + return response + except Exception as e: + logger.error(f"Embedding request failed: {str(e)}") + raise HTTPException(status_code=500, detail="Embedding request failed") from e diff --git a/app/router/routes.py b/app/router/routes.py index 5cf23ff..473721b 100644 --- a/app/router/routes.py +++ b/app/router/routes.py @@ -8,7 +8,7 @@ from fastapi.templating import Jinja2Templates from app.core.security import verify_auth_token from app.log.logger import get_routes_logger -from app.router import error_log_routes, gemini_routes, openai_routes, config_routes, scheduler_routes, stats_routes, version_routes # 新增导入 version_routes +from app.router import error_log_routes, gemini_routes, openai_routes, config_routes, scheduler_routes, stats_routes, version_routes, openai_compatiable_routes from app.service.key.key_manager import get_key_manager_instance from app.service.stats_service import StatsService @@ -31,9 +31,10 @@ def setup_routers(app: FastAPI) -> None: app.include_router(gemini_routes.router_v1beta) app.include_router(config_routes.router) app.include_router(error_log_routes.router) - app.include_router(scheduler_routes.router) # 新增包含 scheduler 路由 - app.include_router(stats_routes.router) # 包含 stats API 路由 - app.include_router(version_routes.router) # 包含 version API 路由 + app.include_router(scheduler_routes.router) + app.include_router(stats_routes.router) + app.include_router(version_routes.router) + app.include_router(openai_compatiable_routes.router) # 添加页面路由 setup_page_routes(app) diff --git a/app/service/client/api_client.py b/app/service/client/api_client.py index bde5601..d5ac785 100644 --- a/app/service/client/api_client.py +++ b/app/service/client/api_client.py @@ -47,9 +47,9 @@ class GeminiApiClient(ApiClient): proxy_to_use = None if settings.PROXIES: proxy_to_use = random.choice(settings.PROXIES) - logger.info(f"using proxy: {proxy_to_use}") + logger.info(f"Using proxy: {proxy_to_use}") - async with httpx.AsyncClient(timeout=timeout, proxy=proxy_to_use) as client: # 修改:直接传递代理字符串 + async with httpx.AsyncClient(timeout=timeout, proxy=proxy_to_use) as client: url = f"{self.base_url}/models/{model}:generateContent?key={api_key}" response = await client.post(url, json=payload) if response.status_code != 200: @@ -64,9 +64,9 @@ class GeminiApiClient(ApiClient): proxy_to_use = None if settings.PROXIES: proxy_to_use = random.choice(settings.PROXIES) - logger.info(f"using proxy: {proxy_to_use}") + logger.info(f"Using proxy: {proxy_to_use}") - async with httpx.AsyncClient(timeout=timeout, proxy=proxy_to_use) as client: # 修改:直接传递代理字符串 + async with httpx.AsyncClient(timeout=timeout, proxy=proxy_to_use) as client: url = f"{self.base_url}/models/{model}:streamGenerateContent?alt=sse&key={api_key}" async with client.stream(method="POST", url=url, json=payload) as response: if response.status_code != 200: @@ -75,3 +75,96 @@ class GeminiApiClient(ApiClient): raise Exception(f"API call failed with status code {response.status_code}, {error_msg}") async for line in response.aiter_lines(): yield line + + +class OpenaiApiClient(ApiClient): + """OpenAI API客户端""" + + def __init__(self, base_url: str, timeout: int = DEFAULT_TIMEOUT): + self.base_url = base_url + self.timeout = timeout + + async def get_models(self, api_key: str) -> Dict[str, Any]: + timeout = httpx.Timeout(self.timeout, read=self.timeout) + async with httpx.AsyncClient(timeout=timeout) as client: + url = f"{self.base_url}/openai/models" + headers = {"Authorization": f"Bearer {api_key}"} + response = await client.get(url, headers=headers) + if response.status_code != 200: + error_content = response.text + raise Exception(f"API call failed with status code {response.status_code}, {error_content}") + return response.json() + + async def generate_content(self, payload: Dict[str, Any], api_key: str) -> Dict[str, Any]: + timeout = httpx.Timeout(self.timeout, read=self.timeout) + + proxy_to_use = None + if settings.PROXIES: + proxy_to_use = random.choice(settings.PROXIES) + logger.info(f"Using proxy: {proxy_to_use}") + + async with httpx.AsyncClient(timeout=timeout, proxy=proxy_to_use) as client: + url = f"{self.base_url}/openai/chat/completions" + headers = {"Authorization": f"Bearer {api_key}"} + response = await client.post(url, json=payload, headers=headers) + if response.status_code != 200: + error_content = response.text + raise Exception(f"API call failed with status code {response.status_code}, {error_content}") + return response.json() + + async def stream_generate_content(self, payload: Dict[str, Any], api_key: str) -> AsyncGenerator[str, None]: + timeout = httpx.Timeout(self.timeout, read=self.timeout) + + proxy_to_use = None + if settings.PROXIES: + proxy_to_use = random.choice(settings.PROXIES) + logger.info(f"Using proxy: {proxy_to_use}") + + async with httpx.AsyncClient(timeout=timeout, proxy=proxy_to_use) as client: + url = f"{self.base_url}/openai/chat/completions" + headers = {"Authorization": f"Bearer {api_key}"} + async with client.stream(method="POST", url=url, json=payload, headers=headers) as response: + if response.status_code != 200: + error_content = await response.aread() + error_msg = error_content.decode("utf-8") + raise Exception(f"API call failed with status code {response.status_code}, {error_msg}") + async for line in response.aiter_lines(): + yield line + + async def create_embeddings(self, input: str, model: str, api_key: str) -> Dict[str, Any]: + timeout = httpx.Timeout(self.timeout, read=self.timeout) + + proxy_to_use = None + if settings.PROXIES: + proxy_to_use = random.choice(settings.PROXIES) + logger.info(f"Using proxy: {proxy_to_use}") + + async with httpx.AsyncClient(timeout=timeout, proxy=proxy_to_use) as client: + url = f"{self.base_url}/openai/embeddings" + headers = {"Authorization": f"Bearer {api_key}"} + payload = { + "input": input, + "model": model, + } + response = await client.post(url, json=payload, headers=headers) + if response.status_code != 200: + error_content = response.text + raise Exception(f"API call failed with status code {response.status_code}, {error_content}") + return response.json() + + async def generate_images(self, payload: Dict[str, Any], api_key: str) -> Dict[str, Any]: + timeout = httpx.Timeout(self.timeout, read=self.timeout) + + proxy_to_use = None + if settings.PROXIES: + proxy_to_use = random.choice(settings.PROXIES) + logger.info(f"Using proxy: {proxy_to_use}") + + async with httpx.AsyncClient(timeout=timeout, proxy=proxy_to_use) as client: + url = f"{self.base_url}/openai/images/generations" + headers = {"Authorization": f"Bearer {api_key}"} + response = await client.post(url, json=payload, headers=headers) + if response.status_code != 200: + error_content = response.text + raise Exception(f"API call failed with status code {response.status_code}, {error_content}") + return response.json() \ No newline at end of file diff --git a/app/service/openai_compatiable_service.py b/app/service/openai_compatiable_service.py new file mode 100644 index 0000000..5ad67e4 --- /dev/null +++ b/app/service/openai_compatiable_service.py @@ -0,0 +1,197 @@ + +import datetime +import json +import re +import time +from typing import Any, AsyncGenerator, Dict, Union + +from app.config.config import settings +from app.database.services import ( + add_error_log, + add_request_log, +) +from app.domain.openai_models import ChatRequest, ImageGenerationRequest +from app.service.client.api_client import OpenaiApiClient +from app.service.key.key_manager import KeyManager +from app.log.logger import get_openai_compatible_logger + +logger = get_openai_compatible_logger() + +class OpenAICompatiableService: + + def __init__(self, base_url: str, key_manager: KeyManager = None): + self.key_manager = key_manager + self.base_url = base_url + self.api_client = OpenaiApiClient(base_url, settings.TIME_OUT) + + async def get_models(self, api_key: str) -> Dict[str, Any]: + return await self.api_client.get_models(api_key) + + async def create_chat_completion( + self, + request: ChatRequest, + api_key: str, + ) -> Union[Dict[str, Any], AsyncGenerator[str, None]]: + """创建聊天完成""" + request_dict = request.model_dump() + # 移除值为null的 + request_dict = {k: v for k, v in request_dict.items() if v is not None} + del request_dict["top_k"] # 删除top_k参数,目前不支持该参数 + if request.stream: + return self._handle_stream_completion(request.model, request_dict, api_key) + return await self._handle_normal_completion(request.model, request_dict, api_key) + + async def generate_images( + self, + request: ImageGenerationRequest, + ) -> Dict[str, Any]: + """生成图片""" + request_dict = request.model_dump() + # 移除值为null的 + request_dict = {k: v for k, v in request_dict.items() if v is not None} + api_key = settings.PAID_KEY + return await self.api_client.generate_images(request_dict, api_key) + + async def create_embeddings( + self, + input_text: str, + model: str, + api_key: str, + ) -> Dict[str, Any]: + """创建嵌入""" + return await self.api_client.create_embeddings(input_text, model, api_key) + + async def _handle_normal_completion( + self, model: str, request: dict, api_key: str + ) -> Dict[str, Any]: + """处理普通聊天完成""" + start_time = time.perf_counter() + request_datetime = datetime.datetime.now() + is_success = False + status_code = None + response = None + try: + response = await self.api_client.generate_content(request, api_key) + is_success = True + status_code = 200 + return response + except Exception as e: + is_success = False + error_log_msg = str(e) + logger.error(f"Normal API call failed with error: {error_log_msg}") + # Try to parse status code from exception + match = re.search(r"status code (\d+)", error_log_msg) + if match: + status_code = int(match.group(1)) + else: + status_code = 500 + + await add_error_log( + gemini_key=api_key, + model_name=model, + error_type="openai-compatiable-non-stream", + error_log=error_log_msg, + error_code=status_code, + request_msg=request, + ) + raise e + finally: + end_time = time.perf_counter() + latency_ms = int((end_time - start_time) * 1000) + await add_request_log( + model_name=model, + api_key=api_key, + is_success=is_success, + status_code=status_code, + latency_ms=latency_ms, + request_time=request_datetime, + ) + + async def _handle_stream_completion( + self, model: str, payload: dict, api_key: str + ) -> AsyncGenerator[str, None]: + """处理流式聊天完成,添加重试逻辑""" + retries = 0 + max_retries = settings.MAX_RETRIES + is_success = False + status_code = None + final_api_key = api_key + + while retries < max_retries: + start_time = time.perf_counter() + request_datetime = datetime.datetime.now() + current_attempt_key = api_key + final_api_key = current_attempt_key + try: + async for line in self.api_client.stream_generate_content( + payload, current_attempt_key + ): + if line.startswith("data:"): + # print(line) + yield line + "\n\n" + logger.info("Streaming completed successfully") + is_success = True + status_code = 200 + break # 成功后退出循环 + except Exception as e: + retries += 1 + is_success = False + error_log_msg = str(e) + logger.warning( + f"Streaming API call failed with error: {error_log_msg}. Attempt {retries} of {max_retries}" + ) + # Parse error code for logging + match = re.search(r"status code (\d+)", error_log_msg) + if match: + status_code = int(match.group(1)) + else: + status_code = 500 + + # Log error to error log table + await add_error_log( + gemini_key=current_attempt_key, + model_name=model, + error_type="openai-compatiable-stream", + error_log=error_log_msg, + error_code=status_code, + request_msg=payload, + ) + + # Attempt to switch API Key + # Ensure key_manager is available (might need adjustment if not always passed) + if self.key_manager: + api_key = await self.key_manager.handle_api_failure( + current_attempt_key, retries + ) + if api_key: + logger.info(f"Switched to new API key: {api_key}") + else: + logger.error( + f"No valid API key available after {retries} retries." + ) + break + else: + logger.error("KeyManager not available for retry logic.") + break + + if retries >= max_retries: + logger.error(f"Max retries ({max_retries}) reached for streaming.") + break + finally: + # Log the final outcome of the streaming request + end_time = time.perf_counter() + latency_ms = int((end_time - start_time) * 1000) + await add_request_log( + model_name=model, + api_key=final_api_key, + is_success=is_success, + status_code=status_code, + latency_ms=latency_ms, + request_time=request_datetime, + ) + # If the loop finished due to failure, yield error and DONE + if not is_success and retries >= max_retries: + yield f"data: {json.dumps({'error': 'Streaming failed after retries'})}\n\n" + yield "data: [DONE]\n\n" + + diff --git a/app/static/js/config_editor.js b/app/static/js/config_editor.js index 0f5b793..0d45dca 100644 --- a/app/static/js/config_editor.js +++ b/app/static/js/config_editor.js @@ -71,6 +71,12 @@ document.addEventListener('DOMContentLoaded', function() { const cancelAddProxyBtn = document.getElementById('cancelAddProxyBtn'); const confirmAddProxyBtn = document.getElementById('confirmAddProxyBtn'); const proxyBulkInput = document.getElementById('proxyBulkInput'); + const bulkDeleteProxyBtn = document.getElementById('bulkDeleteProxyBtn'); // 新增 + const bulkDeleteProxyModal = document.getElementById('bulkDeleteProxyModal'); // 新增 + const closeBulkDeleteProxyModalBtn = document.getElementById('closeBulkDeleteProxyModalBtn'); // 新增 + const cancelBulkDeleteProxyBtn = document.getElementById('cancelBulkDeleteProxyBtn'); // 新增 + const confirmBulkDeleteProxyBtn = document.getElementById('confirmBulkDeleteProxyBtn'); // 新增 + const bulkDeleteProxyInput = document.getElementById('bulkDeleteProxyInput'); // 新增 // --- 结束:Proxy 模态框相关 --- // --- 新增:重置确认模态框相关 --- @@ -120,9 +126,12 @@ document.addEventListener('DOMContentLoaded', function() { if (event.target == bulkDeleteApiKeyModal) { // 新增对批量删除模态框的处理 bulkDeleteApiKeyModal.classList.remove('show'); } - if (event.target == proxyModal) { // 新增对代理模态框的处理 + if (event.target == proxyModal) { // 新增对代理模态框的处理 proxyModal.classList.remove('show'); } + if (event.target == bulkDeleteProxyModal) { // 新增对批量删除代理模态框的处理 + bulkDeleteProxyModal.classList.remove('show'); + } }); // 确认添加 API Key @@ -205,6 +214,41 @@ document.addEventListener('DOMContentLoaded', function() { confirmAddProxyBtn.addEventListener('click', handleBulkAddProxies); } // --- 结束:Proxy 模态框事件 --- + + // --- 新增:批量删除 Proxy 相关事件 --- + // 打开批量删除模态框 + if (bulkDeleteProxyBtn) { + bulkDeleteProxyBtn.addEventListener('click', () => { + if (bulkDeleteProxyModal) { + bulkDeleteProxyModal.classList.add('show'); + } + if (bulkDeleteProxyInput) bulkDeleteProxyInput.value = ''; // 清空输入框 + }); + } + + // 关闭批量删除模态框 (X 按钮) + if (closeBulkDeleteProxyModalBtn) { + closeBulkDeleteProxyModalBtn.addEventListener('click', () => { + if (bulkDeleteProxyModal) { + bulkDeleteProxyModal.classList.remove('show'); + } + }); + } + + // 关闭批量删除模态框 (取消按钮) + if (cancelBulkDeleteProxyBtn) { + cancelBulkDeleteProxyBtn.addEventListener('click', () => { + if (bulkDeleteProxyModal) { + bulkDeleteProxyModal.classList.remove('show'); + } + }); + } + + // 确认批量删除 Proxy + if (confirmBulkDeleteProxyBtn) { + confirmBulkDeleteProxyBtn.addEventListener('click', handleBulkDeleteProxies); + } + // --- 结束:批量删除 Proxy 相关 --- // --- 新增:重置确认模态框事件监听 (移到 DOMContentLoaded 内部) --- if (closeResetModalBtn) { @@ -609,6 +653,56 @@ function handleBulkAddProxies() { } // --- 结束:处理批量添加 Proxy 的逻辑 --- +// --- 新增:处理批量删除 Proxy 的逻辑 --- +function handleBulkDeleteProxies() { + const bulkDeleteTextarea = document.getElementById('bulkDeleteProxyInput'); + const proxyContainer = document.getElementById('PROXIES_container'); + const bulkDeleteModal = document.getElementById('bulkDeleteProxyModal'); + + if (!bulkDeleteTextarea || !proxyContainer || !bulkDeleteModal) return; + + const bulkText = bulkDeleteTextarea.value; + if (!bulkText.trim()) { + showNotification('请粘贴需要删除的代理地址', 'warning'); + return; + } + + // 使用与添加时相同的正则表达式来提取要删除的代理 + const proxyRegex = /(?:https?|socks5):\/\/(?:[^:@\/]+(?::[^@\/]+)?@)?(?:[^:\/\s]+)(?::\d+)?/g; + const proxiesToDelete = new Set(bulkText.match(proxyRegex) || []); // 使用 Set 进行高效查找 + + if (proxiesToDelete.size === 0) { + showNotification('未在输入内容中提取到有效的代理地址格式', 'warning'); + return; + } + + const proxyItems = proxyContainer.querySelectorAll('.array-item'); + let deleteCount = 0; + + proxyItems.forEach(item => { + const input = item.querySelector('.array-input'); + // 检查输入框是否存在及其值是否在要删除的集合中 + if (input && proxiesToDelete.has(input.value)) { + item.remove(); // 删除整个数组项元素 + deleteCount++; + } + }); + + // 关闭模态框 + bulkDeleteModal.classList.remove('show'); + + // 提供反馈 + if (deleteCount > 0) { + showNotification(`成功删除了 ${deleteCount} 个匹配的代理`, 'success'); + } else { + showNotification('列表中未找到您输入的任何代理进行删除', 'info'); + } + + // 处理后清空文本区域 + bulkDeleteTextarea.value = ''; +} +// --- 结束:处理批量删除 Proxy 的逻辑 --- + // 切换标签 function switchTab(tabId) { // 更新标签按钮状态 @@ -1063,10 +1157,6 @@ function generateRandomToken() { } // --- 结束:生成随机令牌函数 --- -// --- 修改:添加思考模型预算映射项 (现在由添加思考模型触发) --- -// function addBudgetMapItem() { -// // 不再需要手动添加 -// } // Deprecated: This function is now effectively replaced by createAndAppendBudgetMapItem // for the initial population logic. It delegates to the new function if called. diff --git a/app/templates/config_editor.html b/app/templates/config_editor.html index e83e21c..1d9f808 100644 --- a/app/templates/config_editor.html +++ b/app/templates/config_editor.html @@ -189,11 +189,14 @@
+
- 代理服务器列表,支持 http 和 socks5 格式,例如: http://user:pass@host:port 或 socks5://host:port。点击按钮可批量添加。 + 代理服务器列表,支持 http 和 socks5 格式,例如: http://user:pass@host:port 或 socks5://host:port。点击按钮可批量添加或删除。 @@ -543,6 +546,24 @@ + + +