refactor(api): 优化错误处理和日志记录

对多个模块进行了重构,以改进错误处理和日志记录机制。

主要变更包括:
- 在 `gemini_routes` 中,现在会返回更具体的错误信息,包括错误码和错误消息,而不仅仅是异常的字符串表示。
- 在 `api_client` 中,简化了 Gemini API 客户端的错误处理逻辑,移除了冗余的 `try...except` 块,让异常直接向上抛出。
- 在多个服务(如 `openai_chat_service`, `embedding_service`, `tts_service` 等)中,增加了根据配置项 `ERROR_LOG_RECORD_REQUEST_BODY` 来决定是否记录请求体的逻辑,以增强隐私和性能控制。
- 在前端 `keys_status.js` 中,更新了密钥验证结果的处理逻辑,以适应后端返回的新的错误对象结构(包含 `error_code` 和 `error_message`),并移除了冗余的 `executeVerifyAllKeys` 函数。
This commit is contained in:
snaily
2025-09-18 09:59:32 +08:00
parent 68b65814bc
commit 95b5acad66
8 changed files with 103 additions and 195 deletions

View File

@@ -515,7 +515,7 @@ async def verify_key(
f"Verification exception for key: {redact_key_for_logging(api_key)}, incrementing failure count"
)
return JSONResponse({"status": "invalid", "error": str(e)})
return JSONResponse({"status": "invalid", "error": e.args[1]})
@router.post("/verify-selected-keys")
@@ -559,7 +559,7 @@ async def verify_selected_keys(
await key_manager.reset_key_failure_count(api_key)
return api_key, "valid", None
except Exception as e:
error_message = str(e)
error_message = e.args[1]
logger.warning(
f"Key verification failed for {redact_key_for_logging(api_key)}: {error_message}"
)
@@ -574,7 +574,7 @@ async def verify_selected_keys(
logger.warning(
f"Bulk verification exception for key: {redact_key_for_logging(api_key)}, initializing failure count to 1"
)
failed_keys[api_key] = error_message
failed_keys[api_key] = {"error_message": e.args[1], "error_code": e.args[0]}
return api_key, "invalid", error_message
tasks = [_verify_single_key(key) for key in keys_to_verify]
@@ -585,11 +585,6 @@ async def verify_selected_keys(
logger.error(
f"An unexpected error occurred during bulk verification task: {result}"
)
elif result:
if not isinstance(result, Exception) and result:
key, status, error = result
elif isinstance(result, Exception):
logger.error(f"Task execution error during bulk verification: {result}")
valid_count = len(successful_keys)
invalid_count = len(failed_keys)

View File

@@ -659,7 +659,11 @@ class OpenAIChatService:
error_type="openai-image-stream",
error_log=error_log_msg,
error_code=status_code,
request_msg={"image_data_truncated": image_data[:1000]},
request_msg=(
{"image_data_truncated": image_data[:1000]}
if settings.ERROR_LOG_RECORD_REQUEST_BODY
else None
),
request_datetime=request_datetime,
)
raise
@@ -709,7 +713,11 @@ class OpenAIChatService:
error_type="openai-image-non-stream",
error_log=error_log_msg,
error_code=status_code,
request_msg={"image_data_truncated": image_data[:1000]},
request_msg=(
{"image_data_truncated": image_data[:1000]}
if settings.ERROR_LOG_RECORD_REQUEST_BODY
else None
),
request_datetime=request_datetime,
)
raise

View File

@@ -99,34 +99,21 @@ class GeminiApiClient(ApiClient):
async with httpx.AsyncClient(timeout=timeout, proxy=proxy_to_use) as client:
url = f"{self.base_url}/models/{model}:generateContent?key={api_key}"
response = await client.post(url, json=payload, headers=headers)
try:
response = await client.post(url, json=payload, headers=headers)
if response.status_code != 200:
error_content = response.text
logger.error(
f"API call failed - Status: {response.status_code}, Content: {error_content}"
)
raise Exception(response.status_code, error_content)
response_data = response.json()
if response.status_code != 200:
error_content = response.text
logger.error(
f"API call failed - Status: {response.status_code}, Content: {error_content}"
)
raise Exception(response.status_code, error_content)
# 检查响应结构的基本信息
if not response_data.get("candidates"):
logger.warning("No candidates found in API response")
response_data = response.json()
# 检查响应结构的基本信息
if not response_data.get("candidates"):
logger.warning("No candidates found in API response")
return response_data
except httpx.TimeoutException as e:
logger.error(f"Request timeout: {e}")
raise Exception(500, f"Request timeout: {e}")
except httpx.RequestError as e:
logger.error(f"Request error: {e}")
raise Exception(500, f"Request error: {e}")
except Exception as e:
logger.error(f"Unexpected error: {e}")
raise Exception(500, f"Unexpected error: {e}")
return response_data
async def stream_generate_content(
self, payload: Dict[str, Any], model: str, api_key: str
@@ -196,28 +183,14 @@ class GeminiApiClient(ApiClient):
headers = self._prepare_headers()
async with httpx.AsyncClient(timeout=timeout, proxy=proxy_to_use) as client:
url = f"{self.base_url}/models/{model}:embedContent?key={api_key}"
try:
response = await client.post(url, json=payload, headers=headers)
if response.status_code != 200:
error_content = response.text
logger.error(
f"Embedding API call failed - Status: {response.status_code}, Content: {error_content}"
)
raise Exception(response.status_code, error_content)
return response.json()
except httpx.TimeoutException as e:
logger.error(f"Embedding request timeout: {e}")
raise Exception(500, f"Request timeout: {e}")
except httpx.RequestError as e:
logger.error(f"Embedding request error: {e}")
raise Exception(500, f"Request error: {e}")
except Exception as e:
logger.error(f"Unexpected embedding error: {e}")
raise Exception(500, f"Unexpected embedding error: {e}")
response = await client.post(url, json=payload, headers=headers)
if response.status_code != 200:
error_content = response.text
logger.error(
f"Embedding API call failed - Status: {response.status_code}, Content: {error_content}"
)
raise Exception(response.status_code, error_content)
return response.json()
async def batch_embed_contents(
self, payload: Dict[str, Any], model: str, api_key: str
@@ -237,28 +210,14 @@ class GeminiApiClient(ApiClient):
headers = self._prepare_headers()
async with httpx.AsyncClient(timeout=timeout, proxy=proxy_to_use) as client:
url = f"{self.base_url}/models/{model}:batchEmbedContents?key={api_key}"
try:
response = await client.post(url, json=payload, headers=headers)
if response.status_code != 200:
error_content = response.text
logger.error(
f"Batch embedding API call failed - Status: {response.status_code}, Content: {error_content}"
)
raise Exception(response.status_code, error_content)
return response.json()
except httpx.TimeoutException as e:
logger.error(f"Batch embedding request timeout: {e}")
raise Exception(500, f"Request timeout: {e}")
except httpx.RequestError as e:
logger.error(f"Batch embedding request error: {e}")
raise Exception(500, f"Request error: {e}")
except Exception as e:
logger.error(f"Unexpected batch embedding error: {e}")
raise Exception(500, f"Unexpected batch embedding error: {e}")
response = await client.post(url, json=payload, headers=headers)
if response.status_code != 200:
error_content = response.text
logger.error(
f"Batch embedding API call failed - Status: {response.status_code}, Content: {error_content}"
)
raise Exception(response.status_code, error_content)
return response.json()
class OpenaiApiClient(ApiClient):

View File

@@ -69,7 +69,11 @@ class EmbeddingService:
error_type="openai-embedding",
error_log=error_log_msg,
error_code=status_code,
request_msg=request_msg_log,
request_msg=(
request_msg_log
if settings.ERROR_LOG_RECORD_REQUEST_BODY
else None
),
request_datetime=request_datetime,
)
await add_request_log(

View File

@@ -88,7 +88,7 @@ class OpenAICompatiableService:
error_type="openai-compatiable-non-stream",
error_log=error_log_msg,
error_code=status_code,
request_msg=request,
request_msg=request if settings.ERROR_LOG_RECORD_REQUEST_BODY else None,
)
raise e
finally:

View File

@@ -3,14 +3,16 @@
继承自原始聊天服务添加原生Gemini TTS支持单人和多人保持向后兼容
"""
import time
import datetime
import time
from typing import Any, Dict
from app.service.chat.gemini_chat_service import GeminiChatService
from app.service.tts.native.tts_response_handler import TTSResponseHandler
from app.config.config import settings
from app.database.services import add_error_log, add_request_log
from app.domain.gemini_models import GeminiRequest
from app.log.logger import get_gemini_logger
from app.database.services import add_request_log, add_error_log
from app.service.chat.gemini_chat_service import GeminiChatService
from app.service.tts.native.tts_response_handler import TTSResponseHandler
logger = get_gemini_logger()
@@ -28,7 +30,9 @@ class TTSGeminiChatService(GeminiChatService):
super().__init__(base_url, key_manager)
# 使用TTS响应处理器替换原始处理器
self.response_handler = TTSResponseHandler()
logger.info("TTS Gemini Chat Service initialized with multi-speaker TTS support")
logger.info(
"TTS Gemini Chat Service initialized with multi-speaker TTS support"
)
async def generate_content(
self, model: str, request: GeminiRequest, api_key: str
@@ -55,7 +59,9 @@ class TTSGeminiChatService(GeminiChatService):
logger.error(f"TTS API call failed with error: {e}")
raise
async def _handle_tts_request(self, model: str, request: GeminiRequest, api_key: str) -> Dict[str, Any]:
async def _handle_tts_request(
self, model: str, request: GeminiRequest, api_key: str
) -> Dict[str, Any]:
"""
处理TTS特定的请求包含完整的日志记录功能
"""
@@ -89,14 +95,24 @@ class TTSGeminiChatService(GeminiChatService):
if request.generationConfig:
# 添加TTS特定字段
if request.generationConfig.responseModalities:
payload["generationConfig"]["responseModalities"] = request.generationConfig.responseModalities
logger.info(f"Added responseModalities: {request.generationConfig.responseModalities}")
payload["generationConfig"][
"responseModalities"
] = request.generationConfig.responseModalities
logger.info(
f"Added responseModalities: {request.generationConfig.responseModalities}"
)
if request.generationConfig.speechConfig:
payload["generationConfig"]["speechConfig"] = request.generationConfig.speechConfig
logger.info(f"Added speechConfig: {request.generationConfig.speechConfig}")
payload["generationConfig"][
"speechConfig"
] = request.generationConfig.speechConfig
logger.info(
f"Added speechConfig: {request.generationConfig.speechConfig}"
)
else:
logger.warning("No generationConfig found in request, TTS fields may be missing")
logger.warning(
"No generationConfig found in request, TTS fields may be missing"
)
logger.info(f"TTS payload before API call: {payload}")
@@ -117,6 +133,7 @@ class TTSGeminiChatService(GeminiChatService):
# 尝试从错误消息中提取状态码
import re
match = re.search(r"status code (\d+)", error_msg)
if match:
status_code = int(match.group(1))
@@ -130,7 +147,11 @@ class TTSGeminiChatService(GeminiChatService):
error_type="tts-api-error",
error_log=error_msg,
error_code=status_code,
request_msg=request.model_dump(exclude_none=False)
request_msg=(
request.model_dump(exclude_none=False)
if settings.ERROR_LOG_RECORD_REQUEST_BODY
else None
),
)
logger.error(f"TTS API call failed: {error_msg}")
@@ -147,5 +168,5 @@ class TTSGeminiChatService(GeminiChatService):
is_success=is_success,
status_code=status_code,
latency_ms=latency_ms,
request_time=request_datetime
request_time=request_datetime,
)

View File

@@ -40,7 +40,7 @@ class TTSService:
error_log_msg = ""
try:
client = genai.Client(api_key=api_key)
response =await client.aio.models.generate_content(
response = await client.aio.models.generate_content(
model=settings.TTS_MODEL,
contents=f"Speak in a {settings.TTS_SPEED} speed voice: {request.input}",
config={
@@ -48,7 +48,11 @@ class TTSService:
"speech_config": {
"voice_config": {
"prebuilt_voice_config": {
"voice_name": request.voice if request.voice in TTS_VOICE_NAMES else settings.TTS_VOICE_NAME
"voice_name": (
request.voice
if request.voice in TTS_VOICE_NAMES
else settings.TTS_VOICE_NAME
)
}
}
},
@@ -59,7 +63,9 @@ class TTSService:
and response.candidates[0].content.parts
and response.candidates[0].content.parts[0].inline_data
):
raw_audio_data = response.candidates[0].content.parts[0].inline_data.data
raw_audio_data = (
response.candidates[0].content.parts[0].inline_data.data
)
is_success = True
status_code = 200
return _create_wav_file(raw_audio_data)
@@ -83,13 +89,17 @@ class TTSService:
error_type="google-tts",
error_log=error_log_msg,
error_code=status_code,
request_msg=request.input
)
request_msg=(
request.input
if settings.ERROR_LOG_RECORD_REQUEST_BODY
else None
),
)
await add_request_log(
model_name=settings.TTS_MODEL,
api_key=api_key,
is_success=is_success,
status_code=status_code,
latency_ms=latency_ms,
request_time=request_datetime
)
request_time=request_datetime,
)

View File

@@ -541,30 +541,13 @@ function showVerificationResultModal(data) {
const errorGroups = {};
Object.entries(failedKeys).forEach(([key, error]) => {
// 提取错误码或使用完整错误信息作为分组键
let errorCode = error;
// 尝试提取常见的错误码模式
const errorCodePatterns = [
/status code (\d+)/,
];
for (const pattern of errorCodePatterns) {
const match = error.match(pattern);
if (match) {
errorCode = match[1] || match[0];
break;
}
}
// 如果没有匹配到特定模式使用500
if (errorCode === error) {
errorCode = 500;
}
let errorCode = error["error_code"];
let errorMessage = error["error_message"];
if (!errorGroups[errorCode]) {
errorGroups[errorCode] = [];
}
errorGroups[errorCode].push({ key, error });
errorGroups[errorCode].push({ key, errorMessage });
});
// 创建分组展示容器
@@ -609,7 +592,7 @@ function showVerificationResultModal(data) {
const keysList = document.createElement("div");
keysList.className = "group-keys-list space-y-1";
keyErrorPairs.forEach(({ key, error }) => {
keyErrorPairs.forEach(({ key, errorMessage }) => {
const keyItem = document.createElement("div");
keyItem.className = "flex flex-col items-start bg-gray-50 p-2 rounded border";
@@ -624,7 +607,7 @@ function showVerificationResultModal(data) {
const detailsButton = document.createElement("button");
detailsButton.className = "ml-2 px-2 py-0.5 bg-red-200 hover:bg-red-300 text-red-700 text-xs rounded transition-colors";
detailsButton.innerHTML = '<i class="fas fa-info-circle mr-1"></i>详情';
detailsButton.dataset.error = error;
detailsButton.dataset.error = errorMessage;
detailsButton.onclick = (e) => {
e.stopPropagation();
const button = e.currentTarget;
@@ -984,7 +967,6 @@ function initializeGlobalBatchVerificationHandlers() {
document.getElementById("verifyModal").classList.add("hidden");
};
// executeVerifyAll 变为 initializeGlobalBatchVerificationHandlers 的局部函数
async function executeVerifyAll(type) {
closeVerifyModal();
const keysToVerify = getSelectedKeys(type);
@@ -1055,8 +1037,6 @@ function initializeGlobalBatchVerificationHandlers() {
invalid_count: Object.keys(allFailedKeys).length
});
}
// The confirmButton.onclick in showVerifyModal (defined earlier in initializeGlobalBatchVerificationHandlers)
// will correctly reference this local executeVerifyAll due to closure.
}
// --- 进度条模态框函数 ---
@@ -2548,73 +2528,4 @@ function showVerifyModalForAllKeys(allKeys) {
// 显示模态框
modalElement.classList.remove("hidden");
}
// 执行验证所有密钥
async function executeVerifyAllKeys(allKeys) {
closeVerifyModal();
// 获取批次大小
const batchSizeInput = document.getElementById("batchSize");
const batchSize = parseInt(batchSizeInput.value, 10) || 10;
// 开始批量验证
showProgressModal(`批量验证所有 ${allKeys.length} 个密钥`);
let allSuccessfulKeys = [];
let allFailedKeys = {};
let processedCount = 0;
for (let i = 0; i < allKeys.length; i += batchSize) {
const batch = allKeys.slice(i, i + batchSize);
const progressText = `正在验证批次 ${Math.floor(i / batchSize) + 1} / ${Math.ceil(allKeys.length / batchSize)} (密钥 ${i + 1}-${Math.min(i + batchSize, allKeys.length)})`;
updateProgress(i, allKeys.length, progressText);
addProgressLog(`处理批次: ${batch.length}个密钥...`);
try {
const options = {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ keys: batch }),
};
const data = await fetchAPI(`/gemini/v1beta/verify-selected-keys`, options);
if (data) {
if (data.successful_keys && data.successful_keys.length > 0) {
allSuccessfulKeys = allSuccessfulKeys.concat(data.successful_keys);
addProgressLog(`✅ 批次成功: ${data.successful_keys.length}`);
}
if (data.failed_keys && Object.keys(data.failed_keys).length > 0) {
Object.assign(allFailedKeys, data.failed_keys);
addProgressLog(`❌ 批次失败: ${Object.keys(data.failed_keys).length}`, true);
}
} else {
addProgressLog(`- 批次返回空数据`, true);
}
} catch (apiError) {
addProgressLog(`❌ 批次请求失败: ${apiError.message}`, true);
// 将此批次的所有密钥标记为失败
batch.forEach(key => {
allFailedKeys[key] = apiError.message;
});
}
processedCount += batch.length;
updateProgress(processedCount, allKeys.length, progressText);
}
updateProgress(
allKeys.length,
allKeys.length,
`所有批次验证完成!`
);
// 关闭进度模态框并显示最终结果
closeProgressModal(false);
showVerificationResultModal({
successful_keys: allSuccessfulKeys,
failed_keys: allFailedKeys,
valid_count: allSuccessfulKeys.length,
invalid_count: Object.keys(allFailedKeys).length
});
}