mirror of
https://github.com/snailyp/gemini-balance.git
synced 2026-05-06 20:32:47 +08:00
feat: 实现 OpenAI 兼容 API 端点和批量代理删除
新增与 OpenAI 规范兼容的 API 端点: - `/openai/v1/models` - `/openai/v1/chat/completions` (支持流式传输、重试和密钥切换) - `/openai/v1/embeddings` - `/openai/v1/images/generations` 包含: - 在 `app/router/openai_compatiable_routes.py` 中新增路由。 - `OpenAICompatiableService` 用于处理请求逻辑、日志记录和错误管理。 - 更新 `OpenaiApiClient` 以支持新方法和代理使用。 - 修改 `app/domain/openai_models.py` 以实现兼容性。 - 为新 API 添加专用日志记录器 (`openai_compatible`)。 - 为新路由 (`/openai`, `/api/version/check`) 添加认证中间件豁免。 增强配置编辑器 UI: - 在 `app/static/js/config_editor.js` 和 `app/templates/config_editor.html` 中添加批量代理删除功能。
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
from pydantic import BaseModel
|
||||
from typing import List, Optional, Union
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from app.core.constants import DEFAULT_MODEL, DEFAULT_TEMPERATURE, DEFAULT_TOP_K, DEFAULT_TOP_P
|
||||
|
||||
@@ -9,11 +9,14 @@ class ChatRequest(BaseModel):
|
||||
model: str = DEFAULT_MODEL
|
||||
temperature: Optional[float] = DEFAULT_TEMPERATURE
|
||||
stream: Optional[bool] = False
|
||||
tools: Optional[List[dict]] = []
|
||||
max_tokens: Optional[int] = None
|
||||
top_p: Optional[float] = DEFAULT_TOP_P
|
||||
top_k: Optional[int] = DEFAULT_TOP_K
|
||||
stop: Optional[List[str]] = []
|
||||
stop: Optional[Union[List[str],str]] = None
|
||||
reasoning_effort: Optional[str] = None
|
||||
tools: Optional[Union[List[Dict[str, Any]], Dict[str, Any]]] = []
|
||||
tool_choice: Optional[str] = None
|
||||
response_format: Optional[dict] = None
|
||||
|
||||
|
||||
class EmbeddingRequest(BaseModel):
|
||||
@@ -23,10 +26,10 @@ class EmbeddingRequest(BaseModel):
|
||||
|
||||
|
||||
class ImageGenerationRequest(BaseModel):
|
||||
model: str = "DALL-E-3"
|
||||
model: str = "imagen-3.0-generate-002"
|
||||
prompt: str = ""
|
||||
n: int = 1
|
||||
size: Optional[str] = "1024x1024"
|
||||
quality: Optional[str] = ""
|
||||
style: Optional[str] = ""
|
||||
response_format: Optional[str] = "url"
|
||||
quality: Optional[str] = None
|
||||
style: Optional[str] = None
|
||||
response_format: Optional[str] = "b64_json"
|
||||
|
||||
@@ -213,4 +213,8 @@ def get_message_converter_logger():
|
||||
|
||||
|
||||
def get_api_client_logger():
|
||||
return Logger.setup_logger("api_client")
|
||||
return Logger.setup_logger("api_client")
|
||||
|
||||
|
||||
def get_openai_compatible_logger():
|
||||
return Logger.setup_logger("openai_compatible")
|
||||
@@ -30,6 +30,8 @@ class AuthMiddleware(BaseHTTPMiddleware):
|
||||
and not request.url.path.startswith(f"/{API_VERSION}")
|
||||
and not request.url.path.startswith("/health")
|
||||
and not request.url.path.startswith("/hf")
|
||||
and not request.url.path.startswith("/openai")
|
||||
and not request.url.path.startswith("/api/version/check")
|
||||
):
|
||||
|
||||
auth_token = request.cookies.get("auth_token")
|
||||
|
||||
129
app/router/openai_compatiable_routes.py
Normal file
129
app/router/openai_compatiable_routes.py
Normal file
@@ -0,0 +1,129 @@
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from fastapi.responses import StreamingResponse
|
||||
|
||||
from app.config.config import settings
|
||||
from app.core.security import SecurityService
|
||||
from app.domain.openai_models import (
|
||||
ChatRequest,
|
||||
EmbeddingRequest,
|
||||
ImageGenerationRequest,
|
||||
)
|
||||
from app.handler.retry_handler import RetryHandler
|
||||
from app.log.logger import get_openai_compatible_logger
|
||||
from app.service.key.key_manager import KeyManager, get_key_manager_instance
|
||||
from app.service.openai_compatiable_service import OpenAICompatiableService
|
||||
|
||||
|
||||
router = APIRouter()
|
||||
logger = get_openai_compatible_logger()
|
||||
|
||||
# 初始化服务
|
||||
security_service = SecurityService()
|
||||
|
||||
async def get_key_manager():
|
||||
return await get_key_manager_instance()
|
||||
|
||||
|
||||
async def get_next_working_key_wrapper(
|
||||
key_manager: KeyManager = Depends(get_key_manager),
|
||||
):
|
||||
return await key_manager.get_next_working_key()
|
||||
|
||||
|
||||
async def get_openai_service(key_manager: KeyManager = Depends(get_key_manager)):
|
||||
"""获取OpenAI聊天服务实例"""
|
||||
return OpenAICompatiableService(settings.BASE_URL, key_manager)
|
||||
|
||||
|
||||
@router.get("/openai/v1/models")
|
||||
async def list_models(
|
||||
_=Depends(security_service.verify_authorization),
|
||||
key_manager: KeyManager = Depends(get_key_manager),
|
||||
openai_service: OpenAICompatiableService = Depends(get_openai_service),
|
||||
):
|
||||
logger.info("-" * 50 + "list_models" + "-" * 50)
|
||||
logger.info("Handling models list request")
|
||||
api_key = await key_manager.get_first_valid_key()
|
||||
logger.info(f"Using API key: {api_key}")
|
||||
try:
|
||||
return await openai_service.get_models(api_key)
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting models list: {str(e)}")
|
||||
raise HTTPException(
|
||||
status_code=500, detail="Internal server error while fetching models list"
|
||||
) from e
|
||||
|
||||
|
||||
@router.post("/openai/v1/chat/completions")
|
||||
@RetryHandler(max_retries=settings.MAX_RETRIES, key_arg="api_key")
|
||||
async def chat_completion(
|
||||
request: ChatRequest,
|
||||
_=Depends(security_service.verify_authorization),
|
||||
api_key: str = Depends(get_next_working_key_wrapper),
|
||||
key_manager: KeyManager = Depends(get_key_manager),
|
||||
openai_service: OpenAICompatiableService = Depends(get_openai_service),
|
||||
):
|
||||
# 如果model是imagen3,使用paid_key
|
||||
if request.model == f"{settings.CREATE_IMAGE_MODEL}-chat":
|
||||
api_key = await key_manager.get_paid_key()
|
||||
logger.info("-" * 50 + "chat_completion" + "-" * 50)
|
||||
logger.info(f"Handling chat completion request for model: {request.model}")
|
||||
logger.debug(f"Request: \n{request.model_dump_json(indent=2)}")
|
||||
logger.info(f"Using API key: {api_key}")
|
||||
|
||||
try:
|
||||
# 如果model是imagen3,使用paid_key
|
||||
if request.model == f"{settings.CREATE_IMAGE_MODEL}-chat":
|
||||
response = await openai_service.create_image_chat_completion(request, api_key)
|
||||
else:
|
||||
response = await openai_service.create_chat_completion(request, api_key)
|
||||
# 处理流式响应
|
||||
if request.stream:
|
||||
return StreamingResponse(response, media_type="text/event-stream")
|
||||
logger.info("Chat completion request successful")
|
||||
return response
|
||||
except Exception as e:
|
||||
logger.error(f"Chat completion failed after retries: {str(e)}")
|
||||
raise HTTPException(status_code=500, detail="Chat completion failed") from e
|
||||
|
||||
|
||||
@router.post("/openai/v1/images/generations")
|
||||
async def generate_image(
|
||||
request: ImageGenerationRequest,
|
||||
_=Depends(security_service.verify_authorization),
|
||||
openai_service: OpenAICompatiableService = Depends(get_openai_service),
|
||||
):
|
||||
logger.info("-" * 50 + "generate_image" + "-" * 50)
|
||||
logger.info(f"Handling image generation request for prompt: {request.prompt}")
|
||||
|
||||
try:
|
||||
response = await openai_service.generate_images(request)
|
||||
logger.info("Image generation request successful")
|
||||
return response
|
||||
except Exception as e:
|
||||
logger.error(f"Image generation request failed: {str(e)}")
|
||||
raise HTTPException(
|
||||
status_code=500, detail="Image generation request failed"
|
||||
) from e
|
||||
|
||||
|
||||
@router.post("/openai/v1/embeddings")
|
||||
async def embedding(
|
||||
request: EmbeddingRequest,
|
||||
_=Depends(security_service.verify_authorization),
|
||||
key_manager: KeyManager = Depends(get_key_manager),
|
||||
openai_service: OpenAICompatiableService = Depends(get_openai_service),
|
||||
):
|
||||
logger.info("-" * 50 + "embedding" + "-" * 50)
|
||||
logger.info(f"Handling embedding request for model: {request.model}")
|
||||
api_key = await key_manager.get_next_working_key()
|
||||
logger.info(f"Using API key: {api_key}")
|
||||
try:
|
||||
response = await openai_service.create_embeddings(
|
||||
input_text=request.input, model=request.model, api_key=api_key
|
||||
)
|
||||
logger.info("Embedding request successful")
|
||||
return response
|
||||
except Exception as e:
|
||||
logger.error(f"Embedding request failed: {str(e)}")
|
||||
raise HTTPException(status_code=500, detail="Embedding request failed") from e
|
||||
@@ -8,7 +8,7 @@ from fastapi.templating import Jinja2Templates
|
||||
|
||||
from app.core.security import verify_auth_token
|
||||
from app.log.logger import get_routes_logger
|
||||
from app.router import error_log_routes, gemini_routes, openai_routes, config_routes, scheduler_routes, stats_routes, version_routes # 新增导入 version_routes
|
||||
from app.router import error_log_routes, gemini_routes, openai_routes, config_routes, scheduler_routes, stats_routes, version_routes, openai_compatiable_routes
|
||||
from app.service.key.key_manager import get_key_manager_instance
|
||||
from app.service.stats_service import StatsService
|
||||
|
||||
@@ -31,9 +31,10 @@ def setup_routers(app: FastAPI) -> None:
|
||||
app.include_router(gemini_routes.router_v1beta)
|
||||
app.include_router(config_routes.router)
|
||||
app.include_router(error_log_routes.router)
|
||||
app.include_router(scheduler_routes.router) # 新增包含 scheduler 路由
|
||||
app.include_router(stats_routes.router) # 包含 stats API 路由
|
||||
app.include_router(version_routes.router) # 包含 version API 路由
|
||||
app.include_router(scheduler_routes.router)
|
||||
app.include_router(stats_routes.router)
|
||||
app.include_router(version_routes.router)
|
||||
app.include_router(openai_compatiable_routes.router)
|
||||
|
||||
# 添加页面路由
|
||||
setup_page_routes(app)
|
||||
|
||||
@@ -47,9 +47,9 @@ class GeminiApiClient(ApiClient):
|
||||
proxy_to_use = None
|
||||
if settings.PROXIES:
|
||||
proxy_to_use = random.choice(settings.PROXIES)
|
||||
logger.info(f"using proxy: {proxy_to_use}")
|
||||
logger.info(f"Using proxy: {proxy_to_use}")
|
||||
|
||||
async with httpx.AsyncClient(timeout=timeout, proxy=proxy_to_use) as client: # 修改:直接传递代理字符串
|
||||
async with httpx.AsyncClient(timeout=timeout, proxy=proxy_to_use) as client:
|
||||
url = f"{self.base_url}/models/{model}:generateContent?key={api_key}"
|
||||
response = await client.post(url, json=payload)
|
||||
if response.status_code != 200:
|
||||
@@ -64,9 +64,9 @@ class GeminiApiClient(ApiClient):
|
||||
proxy_to_use = None
|
||||
if settings.PROXIES:
|
||||
proxy_to_use = random.choice(settings.PROXIES)
|
||||
logger.info(f"using proxy: {proxy_to_use}")
|
||||
logger.info(f"Using proxy: {proxy_to_use}")
|
||||
|
||||
async with httpx.AsyncClient(timeout=timeout, proxy=proxy_to_use) as client: # 修改:直接传递代理字符串
|
||||
async with httpx.AsyncClient(timeout=timeout, proxy=proxy_to_use) as client:
|
||||
url = f"{self.base_url}/models/{model}:streamGenerateContent?alt=sse&key={api_key}"
|
||||
async with client.stream(method="POST", url=url, json=payload) as response:
|
||||
if response.status_code != 200:
|
||||
@@ -75,3 +75,96 @@ class GeminiApiClient(ApiClient):
|
||||
raise Exception(f"API call failed with status code {response.status_code}, {error_msg}")
|
||||
async for line in response.aiter_lines():
|
||||
yield line
|
||||
|
||||
|
||||
class OpenaiApiClient(ApiClient):
|
||||
"""OpenAI API客户端"""
|
||||
|
||||
def __init__(self, base_url: str, timeout: int = DEFAULT_TIMEOUT):
|
||||
self.base_url = base_url
|
||||
self.timeout = timeout
|
||||
|
||||
async def get_models(self, api_key: str) -> Dict[str, Any]:
|
||||
timeout = httpx.Timeout(self.timeout, read=self.timeout)
|
||||
async with httpx.AsyncClient(timeout=timeout) as client:
|
||||
url = f"{self.base_url}/openai/models"
|
||||
headers = {"Authorization": f"Bearer {api_key}"}
|
||||
response = await client.get(url, headers=headers)
|
||||
if response.status_code != 200:
|
||||
error_content = response.text
|
||||
raise Exception(f"API call failed with status code {response.status_code}, {error_content}")
|
||||
return response.json()
|
||||
|
||||
async def generate_content(self, payload: Dict[str, Any], api_key: str) -> Dict[str, Any]:
|
||||
timeout = httpx.Timeout(self.timeout, read=self.timeout)
|
||||
|
||||
proxy_to_use = None
|
||||
if settings.PROXIES:
|
||||
proxy_to_use = random.choice(settings.PROXIES)
|
||||
logger.info(f"Using proxy: {proxy_to_use}")
|
||||
|
||||
async with httpx.AsyncClient(timeout=timeout, proxy=proxy_to_use) as client:
|
||||
url = f"{self.base_url}/openai/chat/completions"
|
||||
headers = {"Authorization": f"Bearer {api_key}"}
|
||||
response = await client.post(url, json=payload, headers=headers)
|
||||
if response.status_code != 200:
|
||||
error_content = response.text
|
||||
raise Exception(f"API call failed with status code {response.status_code}, {error_content}")
|
||||
return response.json()
|
||||
|
||||
async def stream_generate_content(self, payload: Dict[str, Any], api_key: str) -> AsyncGenerator[str, None]:
|
||||
timeout = httpx.Timeout(self.timeout, read=self.timeout)
|
||||
|
||||
proxy_to_use = None
|
||||
if settings.PROXIES:
|
||||
proxy_to_use = random.choice(settings.PROXIES)
|
||||
logger.info(f"Using proxy: {proxy_to_use}")
|
||||
|
||||
async with httpx.AsyncClient(timeout=timeout, proxy=proxy_to_use) as client:
|
||||
url = f"{self.base_url}/openai/chat/completions"
|
||||
headers = {"Authorization": f"Bearer {api_key}"}
|
||||
async with client.stream(method="POST", url=url, json=payload, headers=headers) as response:
|
||||
if response.status_code != 200:
|
||||
error_content = await response.aread()
|
||||
error_msg = error_content.decode("utf-8")
|
||||
raise Exception(f"API call failed with status code {response.status_code}, {error_msg}")
|
||||
async for line in response.aiter_lines():
|
||||
yield line
|
||||
|
||||
async def create_embeddings(self, input: str, model: str, api_key: str) -> Dict[str, Any]:
|
||||
timeout = httpx.Timeout(self.timeout, read=self.timeout)
|
||||
|
||||
proxy_to_use = None
|
||||
if settings.PROXIES:
|
||||
proxy_to_use = random.choice(settings.PROXIES)
|
||||
logger.info(f"Using proxy: {proxy_to_use}")
|
||||
|
||||
async with httpx.AsyncClient(timeout=timeout, proxy=proxy_to_use) as client:
|
||||
url = f"{self.base_url}/openai/embeddings"
|
||||
headers = {"Authorization": f"Bearer {api_key}"}
|
||||
payload = {
|
||||
"input": input,
|
||||
"model": model,
|
||||
}
|
||||
response = await client.post(url, json=payload, headers=headers)
|
||||
if response.status_code != 200:
|
||||
error_content = response.text
|
||||
raise Exception(f"API call failed with status code {response.status_code}, {error_content}")
|
||||
return response.json()
|
||||
|
||||
async def generate_images(self, payload: Dict[str, Any], api_key: str) -> Dict[str, Any]:
|
||||
timeout = httpx.Timeout(self.timeout, read=self.timeout)
|
||||
|
||||
proxy_to_use = None
|
||||
if settings.PROXIES:
|
||||
proxy_to_use = random.choice(settings.PROXIES)
|
||||
logger.info(f"Using proxy: {proxy_to_use}")
|
||||
|
||||
async with httpx.AsyncClient(timeout=timeout, proxy=proxy_to_use) as client:
|
||||
url = f"{self.base_url}/openai/images/generations"
|
||||
headers = {"Authorization": f"Bearer {api_key}"}
|
||||
response = await client.post(url, json=payload, headers=headers)
|
||||
if response.status_code != 200:
|
||||
error_content = response.text
|
||||
raise Exception(f"API call failed with status code {response.status_code}, {error_content}")
|
||||
return response.json()
|
||||
197
app/service/openai_compatiable_service.py
Normal file
197
app/service/openai_compatiable_service.py
Normal file
@@ -0,0 +1,197 @@
|
||||
|
||||
import datetime
|
||||
import json
|
||||
import re
|
||||
import time
|
||||
from typing import Any, AsyncGenerator, Dict, Union
|
||||
|
||||
from app.config.config import settings
|
||||
from app.database.services import (
|
||||
add_error_log,
|
||||
add_request_log,
|
||||
)
|
||||
from app.domain.openai_models import ChatRequest, ImageGenerationRequest
|
||||
from app.service.client.api_client import OpenaiApiClient
|
||||
from app.service.key.key_manager import KeyManager
|
||||
from app.log.logger import get_openai_compatible_logger
|
||||
|
||||
logger = get_openai_compatible_logger()
|
||||
|
||||
class OpenAICompatiableService:
|
||||
|
||||
def __init__(self, base_url: str, key_manager: KeyManager = None):
|
||||
self.key_manager = key_manager
|
||||
self.base_url = base_url
|
||||
self.api_client = OpenaiApiClient(base_url, settings.TIME_OUT)
|
||||
|
||||
async def get_models(self, api_key: str) -> Dict[str, Any]:
|
||||
return await self.api_client.get_models(api_key)
|
||||
|
||||
async def create_chat_completion(
|
||||
self,
|
||||
request: ChatRequest,
|
||||
api_key: str,
|
||||
) -> Union[Dict[str, Any], AsyncGenerator[str, None]]:
|
||||
"""创建聊天完成"""
|
||||
request_dict = request.model_dump()
|
||||
# 移除值为null的
|
||||
request_dict = {k: v for k, v in request_dict.items() if v is not None}
|
||||
del request_dict["top_k"] # 删除top_k参数,目前不支持该参数
|
||||
if request.stream:
|
||||
return self._handle_stream_completion(request.model, request_dict, api_key)
|
||||
return await self._handle_normal_completion(request.model, request_dict, api_key)
|
||||
|
||||
async def generate_images(
|
||||
self,
|
||||
request: ImageGenerationRequest,
|
||||
) -> Dict[str, Any]:
|
||||
"""生成图片"""
|
||||
request_dict = request.model_dump()
|
||||
# 移除值为null的
|
||||
request_dict = {k: v for k, v in request_dict.items() if v is not None}
|
||||
api_key = settings.PAID_KEY
|
||||
return await self.api_client.generate_images(request_dict, api_key)
|
||||
|
||||
async def create_embeddings(
|
||||
self,
|
||||
input_text: str,
|
||||
model: str,
|
||||
api_key: str,
|
||||
) -> Dict[str, Any]:
|
||||
"""创建嵌入"""
|
||||
return await self.api_client.create_embeddings(input_text, model, api_key)
|
||||
|
||||
async def _handle_normal_completion(
|
||||
self, model: str, request: dict, api_key: str
|
||||
) -> Dict[str, Any]:
|
||||
"""处理普通聊天完成"""
|
||||
start_time = time.perf_counter()
|
||||
request_datetime = datetime.datetime.now()
|
||||
is_success = False
|
||||
status_code = None
|
||||
response = None
|
||||
try:
|
||||
response = await self.api_client.generate_content(request, api_key)
|
||||
is_success = True
|
||||
status_code = 200
|
||||
return response
|
||||
except Exception as e:
|
||||
is_success = False
|
||||
error_log_msg = str(e)
|
||||
logger.error(f"Normal API call failed with error: {error_log_msg}")
|
||||
# Try to parse status code from exception
|
||||
match = re.search(r"status code (\d+)", error_log_msg)
|
||||
if match:
|
||||
status_code = int(match.group(1))
|
||||
else:
|
||||
status_code = 500
|
||||
|
||||
await add_error_log(
|
||||
gemini_key=api_key,
|
||||
model_name=model,
|
||||
error_type="openai-compatiable-non-stream",
|
||||
error_log=error_log_msg,
|
||||
error_code=status_code,
|
||||
request_msg=request,
|
||||
)
|
||||
raise e
|
||||
finally:
|
||||
end_time = time.perf_counter()
|
||||
latency_ms = int((end_time - start_time) * 1000)
|
||||
await add_request_log(
|
||||
model_name=model,
|
||||
api_key=api_key,
|
||||
is_success=is_success,
|
||||
status_code=status_code,
|
||||
latency_ms=latency_ms,
|
||||
request_time=request_datetime,
|
||||
)
|
||||
|
||||
async def _handle_stream_completion(
|
||||
self, model: str, payload: dict, api_key: str
|
||||
) -> AsyncGenerator[str, None]:
|
||||
"""处理流式聊天完成,添加重试逻辑"""
|
||||
retries = 0
|
||||
max_retries = settings.MAX_RETRIES
|
||||
is_success = False
|
||||
status_code = None
|
||||
final_api_key = api_key
|
||||
|
||||
while retries < max_retries:
|
||||
start_time = time.perf_counter()
|
||||
request_datetime = datetime.datetime.now()
|
||||
current_attempt_key = api_key
|
||||
final_api_key = current_attempt_key
|
||||
try:
|
||||
async for line in self.api_client.stream_generate_content(
|
||||
payload, current_attempt_key
|
||||
):
|
||||
if line.startswith("data:"):
|
||||
# print(line)
|
||||
yield line + "\n\n"
|
||||
logger.info("Streaming completed successfully")
|
||||
is_success = True
|
||||
status_code = 200
|
||||
break # 成功后退出循环
|
||||
except Exception as e:
|
||||
retries += 1
|
||||
is_success = False
|
||||
error_log_msg = str(e)
|
||||
logger.warning(
|
||||
f"Streaming API call failed with error: {error_log_msg}. Attempt {retries} of {max_retries}"
|
||||
)
|
||||
# Parse error code for logging
|
||||
match = re.search(r"status code (\d+)", error_log_msg)
|
||||
if match:
|
||||
status_code = int(match.group(1))
|
||||
else:
|
||||
status_code = 500
|
||||
|
||||
# Log error to error log table
|
||||
await add_error_log(
|
||||
gemini_key=current_attempt_key,
|
||||
model_name=model,
|
||||
error_type="openai-compatiable-stream",
|
||||
error_log=error_log_msg,
|
||||
error_code=status_code,
|
||||
request_msg=payload,
|
||||
)
|
||||
|
||||
# Attempt to switch API Key
|
||||
# Ensure key_manager is available (might need adjustment if not always passed)
|
||||
if self.key_manager:
|
||||
api_key = await self.key_manager.handle_api_failure(
|
||||
current_attempt_key, retries
|
||||
)
|
||||
if api_key:
|
||||
logger.info(f"Switched to new API key: {api_key}")
|
||||
else:
|
||||
logger.error(
|
||||
f"No valid API key available after {retries} retries."
|
||||
)
|
||||
break
|
||||
else:
|
||||
logger.error("KeyManager not available for retry logic.")
|
||||
break
|
||||
|
||||
if retries >= max_retries:
|
||||
logger.error(f"Max retries ({max_retries}) reached for streaming.")
|
||||
break
|
||||
finally:
|
||||
# Log the final outcome of the streaming request
|
||||
end_time = time.perf_counter()
|
||||
latency_ms = int((end_time - start_time) * 1000)
|
||||
await add_request_log(
|
||||
model_name=model,
|
||||
api_key=final_api_key,
|
||||
is_success=is_success,
|
||||
status_code=status_code,
|
||||
latency_ms=latency_ms,
|
||||
request_time=request_datetime,
|
||||
)
|
||||
# If the loop finished due to failure, yield error and DONE
|
||||
if not is_success and retries >= max_retries:
|
||||
yield f"data: {json.dumps({'error': 'Streaming failed after retries'})}\n\n"
|
||||
yield "data: [DONE]\n\n"
|
||||
|
||||
|
||||
@@ -71,6 +71,12 @@ document.addEventListener('DOMContentLoaded', function() {
|
||||
const cancelAddProxyBtn = document.getElementById('cancelAddProxyBtn');
|
||||
const confirmAddProxyBtn = document.getElementById('confirmAddProxyBtn');
|
||||
const proxyBulkInput = document.getElementById('proxyBulkInput');
|
||||
const bulkDeleteProxyBtn = document.getElementById('bulkDeleteProxyBtn'); // 新增
|
||||
const bulkDeleteProxyModal = document.getElementById('bulkDeleteProxyModal'); // 新增
|
||||
const closeBulkDeleteProxyModalBtn = document.getElementById('closeBulkDeleteProxyModalBtn'); // 新增
|
||||
const cancelBulkDeleteProxyBtn = document.getElementById('cancelBulkDeleteProxyBtn'); // 新增
|
||||
const confirmBulkDeleteProxyBtn = document.getElementById('confirmBulkDeleteProxyBtn'); // 新增
|
||||
const bulkDeleteProxyInput = document.getElementById('bulkDeleteProxyInput'); // 新增
|
||||
// --- 结束:Proxy 模态框相关 ---
|
||||
|
||||
// --- 新增:重置确认模态框相关 ---
|
||||
@@ -120,9 +126,12 @@ document.addEventListener('DOMContentLoaded', function() {
|
||||
if (event.target == bulkDeleteApiKeyModal) { // 新增对批量删除模态框的处理
|
||||
bulkDeleteApiKeyModal.classList.remove('show');
|
||||
}
|
||||
if (event.target == proxyModal) { // 新增对代理模态框的处理
|
||||
if (event.target == proxyModal) { // 新增对代理模态框的处理
|
||||
proxyModal.classList.remove('show');
|
||||
}
|
||||
if (event.target == bulkDeleteProxyModal) { // 新增对批量删除代理模态框的处理
|
||||
bulkDeleteProxyModal.classList.remove('show');
|
||||
}
|
||||
});
|
||||
|
||||
// 确认添加 API Key
|
||||
@@ -205,6 +214,41 @@ document.addEventListener('DOMContentLoaded', function() {
|
||||
confirmAddProxyBtn.addEventListener('click', handleBulkAddProxies);
|
||||
}
|
||||
// --- 结束:Proxy 模态框事件 ---
|
||||
|
||||
// --- 新增:批量删除 Proxy 相关事件 ---
|
||||
// 打开批量删除模态框
|
||||
if (bulkDeleteProxyBtn) {
|
||||
bulkDeleteProxyBtn.addEventListener('click', () => {
|
||||
if (bulkDeleteProxyModal) {
|
||||
bulkDeleteProxyModal.classList.add('show');
|
||||
}
|
||||
if (bulkDeleteProxyInput) bulkDeleteProxyInput.value = ''; // 清空输入框
|
||||
});
|
||||
}
|
||||
|
||||
// 关闭批量删除模态框 (X 按钮)
|
||||
if (closeBulkDeleteProxyModalBtn) {
|
||||
closeBulkDeleteProxyModalBtn.addEventListener('click', () => {
|
||||
if (bulkDeleteProxyModal) {
|
||||
bulkDeleteProxyModal.classList.remove('show');
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// 关闭批量删除模态框 (取消按钮)
|
||||
if (cancelBulkDeleteProxyBtn) {
|
||||
cancelBulkDeleteProxyBtn.addEventListener('click', () => {
|
||||
if (bulkDeleteProxyModal) {
|
||||
bulkDeleteProxyModal.classList.remove('show');
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// 确认批量删除 Proxy
|
||||
if (confirmBulkDeleteProxyBtn) {
|
||||
confirmBulkDeleteProxyBtn.addEventListener('click', handleBulkDeleteProxies);
|
||||
}
|
||||
// --- 结束:批量删除 Proxy 相关 ---
|
||||
|
||||
// --- 新增:重置确认模态框事件监听 (移到 DOMContentLoaded 内部) ---
|
||||
if (closeResetModalBtn) {
|
||||
@@ -609,6 +653,56 @@ function handleBulkAddProxies() {
|
||||
}
|
||||
// --- 结束:处理批量添加 Proxy 的逻辑 ---
|
||||
|
||||
// --- 新增:处理批量删除 Proxy 的逻辑 ---
|
||||
function handleBulkDeleteProxies() {
|
||||
const bulkDeleteTextarea = document.getElementById('bulkDeleteProxyInput');
|
||||
const proxyContainer = document.getElementById('PROXIES_container');
|
||||
const bulkDeleteModal = document.getElementById('bulkDeleteProxyModal');
|
||||
|
||||
if (!bulkDeleteTextarea || !proxyContainer || !bulkDeleteModal) return;
|
||||
|
||||
const bulkText = bulkDeleteTextarea.value;
|
||||
if (!bulkText.trim()) {
|
||||
showNotification('请粘贴需要删除的代理地址', 'warning');
|
||||
return;
|
||||
}
|
||||
|
||||
// 使用与添加时相同的正则表达式来提取要删除的代理
|
||||
const proxyRegex = /(?:https?|socks5):\/\/(?:[^:@\/]+(?::[^@\/]+)?@)?(?:[^:\/\s]+)(?::\d+)?/g;
|
||||
const proxiesToDelete = new Set(bulkText.match(proxyRegex) || []); // 使用 Set 进行高效查找
|
||||
|
||||
if (proxiesToDelete.size === 0) {
|
||||
showNotification('未在输入内容中提取到有效的代理地址格式', 'warning');
|
||||
return;
|
||||
}
|
||||
|
||||
const proxyItems = proxyContainer.querySelectorAll('.array-item');
|
||||
let deleteCount = 0;
|
||||
|
||||
proxyItems.forEach(item => {
|
||||
const input = item.querySelector('.array-input');
|
||||
// 检查输入框是否存在及其值是否在要删除的集合中
|
||||
if (input && proxiesToDelete.has(input.value)) {
|
||||
item.remove(); // 删除整个数组项元素
|
||||
deleteCount++;
|
||||
}
|
||||
});
|
||||
|
||||
// 关闭模态框
|
||||
bulkDeleteModal.classList.remove('show');
|
||||
|
||||
// 提供反馈
|
||||
if (deleteCount > 0) {
|
||||
showNotification(`成功删除了 ${deleteCount} 个匹配的代理`, 'success');
|
||||
} else {
|
||||
showNotification('列表中未找到您输入的任何代理进行删除', 'info');
|
||||
}
|
||||
|
||||
// 处理后清空文本区域
|
||||
bulkDeleteTextarea.value = '';
|
||||
}
|
||||
// --- 结束:处理批量删除 Proxy 的逻辑 ---
|
||||
|
||||
// 切换标签
|
||||
function switchTab(tabId) {
|
||||
// 更新标签按钮状态
|
||||
@@ -1063,10 +1157,6 @@ function generateRandomToken() {
|
||||
}
|
||||
// --- 结束:生成随机令牌函数 ---
|
||||
|
||||
// --- 修改:添加思考模型预算映射项 (现在由添加思考模型触发) ---
|
||||
// function addBudgetMapItem() {
|
||||
// // 不再需要手动添加
|
||||
// }
|
||||
|
||||
// Deprecated: This function is now effectively replaced by createAndAppendBudgetMapItem
|
||||
// for the initial population logic. It delegates to the new function if called.
|
||||
|
||||
@@ -189,11 +189,14 @@
|
||||
<!-- 代理项将在这里动态添加 -->
|
||||
</div>
|
||||
<div class="flex justify-end gap-2">
|
||||
<button type="button" class="bg-danger-600 hover:bg-danger-700 text-white px-4 py-2 rounded-lg font-medium transition-all duration-200 flex items-center gap-2" id="bulkDeleteProxyBtn">
|
||||
<i class="fas fa-trash-alt"></i> 删除代理
|
||||
</button>
|
||||
<button type="button" class="bg-primary-600 hover:bg-primary-700 text-white px-4 py-2 rounded-lg font-medium transition-all duration-200 flex items-center gap-2" id="addProxyBtn">
|
||||
<i class="fas fa-plus"></i> 添加代理
|
||||
</button>
|
||||
</div>
|
||||
<small class="text-gray-500 mt-1 block">代理服务器列表,支持 http 和 socks5 格式,例如: http://user:pass@host:port 或 socks5://host:port。点击按钮可批量添加。</small>
|
||||
<small class="text-gray-500 mt-1 block">代理服务器列表,支持 http 和 socks5 格式,例如: http://user:pass@host:port 或 socks5://host:port。点击按钮可批量添加或删除。</small>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -543,6 +546,24 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Bulk Delete Proxy Modal -->
|
||||
<div id="bulkDeleteProxyModal" class="modal">
|
||||
<div class="w-full max-w-lg mx-auto bg-white rounded-2xl shadow-2xl overflow-hidden animate-fade-in">
|
||||
<div class="p-6">
|
||||
<div class="flex justify-between items-center mb-4">
|
||||
<h2 class="text-xl font-bold text-gray-800">批量删除代理服务器</h2>
|
||||
<button id="closeBulkDeleteProxyModalBtn" class="text-gray-400 hover:text-gray-600 text-xl">×</button>
|
||||
</div>
|
||||
<p class="text-gray-600 mb-4">每行粘贴一个或多个代理地址,将自动提取有效地址并从列表中删除。</p>
|
||||
<textarea id="bulkDeleteProxyInput" rows="10" placeholder="在此处粘贴要删除的代理地址..." class="w-full px-4 py-3 rounded-lg border border-gray-300 focus:border-danger-500 focus:ring focus:ring-danger-200 focus:ring-opacity-50 font-mono text-sm"></textarea>
|
||||
<div class="flex justify-end gap-3 mt-6">
|
||||
<button type="button" id="confirmBulkDeleteProxyBtn" class="bg-danger-600 hover:bg-danger-700 text-white px-6 py-2 rounded-lg font-medium transition">确认删除</button>
|
||||
<button type="button" id="cancelBulkDeleteProxyBtn" class="bg-gray-200 hover:bg-gray-300 text-gray-700 px-6 py-2 rounded-lg font-medium transition">取消</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Reset Confirmation Modal -->
|
||||
<div id="resetConfirmModal" class="modal">
|
||||
<div class="w-full max-w-md mx-auto bg-white rounded-2xl shadow-2xl overflow-hidden animate-fade-in">
|
||||
|
||||
Reference in New Issue
Block a user