mirror of
https://github.com/snailyp/gemini-balance.git
synced 2026-05-12 02:19:59 +08:00
本次提交主要包含以下更新:
- **错误日志页面增强**:
- 重构了 [`app/static/js/error_logs.js`](app/static/js/error_logs.js) 中的分页逻辑,将样式控制移至 CSS,简化了 JavaScript 代码。
- 更新了 [`app/templates/error_logs.html`](app/templates/error_logs.html) 中的分页样式,使其与 `keys_status.html` 保持一致,提升了视觉统一性。
- 在错误日志页面新增了“清空全部”按钮,方便用户一键清除所有错误记录。
- 调整了错误日志表格头部的文本颜色为白色,以改善深色主题下的可读性。
- **应用初始化与配置优化**:
- 调整了 [`app/config/config.py`](app/config/config.py) 中日志记录器的获取方式,确保在配置加载早期即可用。
- 在 [`app/core/application.py`](app/core/application.py) 中引入了更明确的数据库连接管理(连接、断开、初始化)逻辑。
- 优化了 [`app/utils/helpers.py`](app/utils/helpers.py) 中项目路径和版本文件路径的定义方式,使其在模块级别初始化。
- **依赖清理**:
- 从 [`requirements.txt`](requirements.txt) 中移除了不必要的注释。
这些更改旨在提升错误日志模块的用户体验和功能性,并优化应用程序的启动和配置管理流程。
193 lines
8.5 KiB
Python
193 lines
8.5 KiB
Python
# app/services/chat/api_client.py
|
|
|
|
from typing import Dict, Any, AsyncGenerator, Optional
|
|
import httpx
|
|
import random
|
|
from abc import ABC, abstractmethod
|
|
from app.config.config import settings
|
|
from app.log.logger import get_api_client_logger
|
|
from app.core.constants import DEFAULT_TIMEOUT
|
|
|
|
logger = get_api_client_logger()
|
|
|
|
class ApiClient(ABC):
|
|
"""API客户端基类"""
|
|
|
|
@abstractmethod
|
|
async def generate_content(self, payload: Dict[str, Any], model: str, api_key: str) -> Dict[str, Any]:
|
|
pass
|
|
|
|
@abstractmethod
|
|
async def stream_generate_content(self, payload: Dict[str, Any], model: str, api_key: str) -> AsyncGenerator[str, None]:
|
|
pass
|
|
|
|
|
|
class GeminiApiClient(ApiClient):
|
|
"""Gemini API客户端"""
|
|
|
|
def __init__(self, base_url: str, timeout: int = DEFAULT_TIMEOUT):
|
|
self.base_url = base_url
|
|
self.timeout = timeout
|
|
|
|
def _get_real_model(self, model: str) -> str:
|
|
if model.endswith("-search"):
|
|
model = model[:-7]
|
|
if model.endswith("-image"):
|
|
model = model[:-6]
|
|
if model.endswith("-non-thinking"):
|
|
model = model[:-13]
|
|
if "-search" in model and "-non-thinking" in model:
|
|
model = model[:-20]
|
|
return model
|
|
|
|
async def get_models(self, api_key: str) -> Optional[Dict[str, Any]]:
|
|
"""获取可用的 Gemini 模型列表"""
|
|
timeout = httpx.Timeout(timeout=5)
|
|
|
|
proxy_to_use = None
|
|
if settings.PROXIES:
|
|
proxy_to_use = random.choice(settings.PROXIES)
|
|
logger.info(f"Using proxy for getting models: {proxy_to_use}")
|
|
|
|
async with httpx.AsyncClient(timeout=timeout, proxy=proxy_to_use) as client:
|
|
url = f"{self.base_url}/models?key={api_key}"
|
|
try:
|
|
response = await client.get(url)
|
|
response.raise_for_status()
|
|
return response.json()
|
|
except httpx.HTTPStatusError as e:
|
|
logger.error(f"获取模型列表失败: {e.response.status_code}")
|
|
logger.error(e.response.text)
|
|
return None
|
|
except httpx.RequestError as e:
|
|
logger.error(f"请求模型列表失败: {e}")
|
|
return None
|
|
|
|
async def generate_content(self, payload: Dict[str, Any], model: str, api_key: str) -> Dict[str, Any]:
|
|
timeout = httpx.Timeout(self.timeout, read=self.timeout)
|
|
model = self._get_real_model(model)
|
|
|
|
proxy_to_use = None
|
|
if settings.PROXIES:
|
|
proxy_to_use = random.choice(settings.PROXIES)
|
|
logger.info(f"Using proxy: {proxy_to_use}")
|
|
|
|
async with httpx.AsyncClient(timeout=timeout, proxy=proxy_to_use) as client:
|
|
url = f"{self.base_url}/models/{model}:generateContent?key={api_key}"
|
|
response = await client.post(url, json=payload)
|
|
if response.status_code != 200:
|
|
error_content = response.text
|
|
raise Exception(f"API call failed with status code {response.status_code}, {error_content}")
|
|
return response.json()
|
|
|
|
async def stream_generate_content(self, payload: Dict[str, Any], model: str, api_key: str) -> AsyncGenerator[str, None]:
|
|
timeout = httpx.Timeout(self.timeout, read=self.timeout)
|
|
model = self._get_real_model(model)
|
|
|
|
proxy_to_use = None
|
|
if settings.PROXIES:
|
|
proxy_to_use = random.choice(settings.PROXIES)
|
|
logger.info(f"Using proxy: {proxy_to_use}")
|
|
|
|
async with httpx.AsyncClient(timeout=timeout, proxy=proxy_to_use) as client:
|
|
url = f"{self.base_url}/models/{model}:streamGenerateContent?alt=sse&key={api_key}"
|
|
async with client.stream(method="POST", url=url, json=payload) as response:
|
|
if response.status_code != 200:
|
|
error_content = await response.aread()
|
|
error_msg = error_content.decode("utf-8")
|
|
raise Exception(f"API call failed with status code {response.status_code}, {error_msg}")
|
|
async for line in response.aiter_lines():
|
|
yield line
|
|
|
|
|
|
class OpenaiApiClient(ApiClient):
|
|
"""OpenAI API客户端"""
|
|
|
|
def __init__(self, base_url: str, timeout: int = DEFAULT_TIMEOUT):
|
|
self.base_url = base_url
|
|
self.timeout = timeout
|
|
|
|
async def get_models(self, api_key: str) -> Dict[str, Any]:
|
|
timeout = httpx.Timeout(self.timeout, read=self.timeout)
|
|
async with httpx.AsyncClient(timeout=timeout) as client:
|
|
url = f"{self.base_url}/openai/models"
|
|
headers = {"Authorization": f"Bearer {api_key}"}
|
|
response = await client.get(url, headers=headers)
|
|
if response.status_code != 200:
|
|
error_content = response.text
|
|
raise Exception(f"API call failed with status code {response.status_code}, {error_content}")
|
|
return response.json()
|
|
|
|
async def generate_content(self, payload: Dict[str, Any], api_key: str) -> Dict[str, Any]:
|
|
timeout = httpx.Timeout(self.timeout, read=self.timeout)
|
|
|
|
proxy_to_use = None
|
|
if settings.PROXIES:
|
|
proxy_to_use = random.choice(settings.PROXIES)
|
|
logger.info(f"Using proxy: {proxy_to_use}")
|
|
|
|
async with httpx.AsyncClient(timeout=timeout, proxy=proxy_to_use) as client:
|
|
url = f"{self.base_url}/openai/chat/completions"
|
|
headers = {"Authorization": f"Bearer {api_key}"}
|
|
response = await client.post(url, json=payload, headers=headers)
|
|
if response.status_code != 200:
|
|
error_content = response.text
|
|
raise Exception(f"API call failed with status code {response.status_code}, {error_content}")
|
|
return response.json()
|
|
|
|
async def stream_generate_content(self, payload: Dict[str, Any], api_key: str) -> AsyncGenerator[str, None]:
|
|
timeout = httpx.Timeout(self.timeout, read=self.timeout)
|
|
|
|
proxy_to_use = None
|
|
if settings.PROXIES:
|
|
proxy_to_use = random.choice(settings.PROXIES)
|
|
logger.info(f"Using proxy: {proxy_to_use}")
|
|
|
|
async with httpx.AsyncClient(timeout=timeout, proxy=proxy_to_use) as client:
|
|
url = f"{self.base_url}/openai/chat/completions"
|
|
headers = {"Authorization": f"Bearer {api_key}"}
|
|
async with client.stream(method="POST", url=url, json=payload, headers=headers) as response:
|
|
if response.status_code != 200:
|
|
error_content = await response.aread()
|
|
error_msg = error_content.decode("utf-8")
|
|
raise Exception(f"API call failed with status code {response.status_code}, {error_msg}")
|
|
async for line in response.aiter_lines():
|
|
yield line
|
|
|
|
async def create_embeddings(self, input: str, model: str, api_key: str) -> Dict[str, Any]:
|
|
timeout = httpx.Timeout(self.timeout, read=self.timeout)
|
|
|
|
proxy_to_use = None
|
|
if settings.PROXIES:
|
|
proxy_to_use = random.choice(settings.PROXIES)
|
|
logger.info(f"Using proxy: {proxy_to_use}")
|
|
|
|
async with httpx.AsyncClient(timeout=timeout, proxy=proxy_to_use) as client:
|
|
url = f"{self.base_url}/openai/embeddings"
|
|
headers = {"Authorization": f"Bearer {api_key}"}
|
|
payload = {
|
|
"input": input,
|
|
"model": model,
|
|
}
|
|
response = await client.post(url, json=payload, headers=headers)
|
|
if response.status_code != 200:
|
|
error_content = response.text
|
|
raise Exception(f"API call failed with status code {response.status_code}, {error_content}")
|
|
return response.json()
|
|
|
|
async def generate_images(self, payload: Dict[str, Any], api_key: str) -> Dict[str, Any]:
|
|
timeout = httpx.Timeout(self.timeout, read=self.timeout)
|
|
|
|
proxy_to_use = None
|
|
if settings.PROXIES:
|
|
proxy_to_use = random.choice(settings.PROXIES)
|
|
logger.info(f"Using proxy: {proxy_to_use}")
|
|
|
|
async with httpx.AsyncClient(timeout=timeout, proxy=proxy_to_use) as client:
|
|
url = f"{self.base_url}/openai/images/generations"
|
|
headers = {"Authorization": f"Bearer {api_key}"}
|
|
response = await client.post(url, json=payload, headers=headers)
|
|
if response.status_code != 200:
|
|
error_content = response.text
|
|
raise Exception(f"API call failed with status code {response.status_code}, {error_content}")
|
|
return response.json() |