feat: 在 OpenAI 聊天响应中集成 usage_metadata 以跟踪 token 使用情况

此更改将 `usage_metadata` 参数添加到了 `app/handler/response_handler.py` 和 `app/service/chat/openai_chat_service.py` 中的相关函数。

`usage_metadata`(通常包含 token 计数:prompt_tokens, completion_tokens, total_tokens)现在会从 OpenAI API 响应中提取,并用于填充标准化响应格式中的 `usage` 字段。

这样可以更准确地跟踪 OpenAI 聊天完成接口的 token 消耗。
This commit is contained in:
snaily
2025-05-06 18:32:47 +08:00
parent d4a3ed3a57
commit f13a4fba5f
2 changed files with 7 additions and 5 deletions

View File

@@ -29,7 +29,7 @@ class GeminiResponseHandler(ResponseHandler):
self.thinking_status = False
def handle_response(
self, response: Dict[str, Any], model: str, stream: bool = False
self, response: Dict[str, Any], model: str, stream: bool = False, usage_metadata: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
if stream:
return _handle_gemini_stream_response(response, model, stream)
@@ -59,7 +59,7 @@ def _handle_openai_stream_response(
def _handle_openai_normal_response(
response: Dict[str, Any], model: str, finish_reason: str
response: Dict[str, Any], model: str, finish_reason: str, usage_metadata: Optional[Dict[str, Any]]
) -> Dict[str, Any]:
text, tool_calls = _extract_result(
response, model, stream=False, gemini_format=False
@@ -80,7 +80,7 @@ def _handle_openai_normal_response(
"finish_reason": finish_reason,
}
],
"usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0},
"usage": {"prompt_tokens": usage_metadata.get("promptTokenCount", 0), "completion_tokens": usage_metadata.get("candidatesTokenCount",0), "total_tokens": usage_metadata.get("totalTokenCount", 0)},
}
@@ -98,10 +98,11 @@ class OpenAIResponseHandler(ResponseHandler):
model: str,
stream: bool = False,
finish_reason: str = None,
usage_metadata: Optional[Dict[str, Any]] = None,
) -> Optional[Dict[str, Any]]:
if stream:
return _handle_openai_stream_response(response, model, finish_reason)
return _handle_openai_normal_response(response, model, finish_reason)
return _handle_openai_normal_response(response, model, finish_reason, usage_metadata)
def handle_image_chat_response(
self, image_str: str, model: str, stream=False, finish_reason="stop"