From f13a4fba5f5aef9f5c258cb978aa3fc271a90d88 Mon Sep 17 00:00:00 2001 From: snaily Date: Tue, 6 May 2025 18:32:47 +0800 Subject: [PATCH] =?UTF-8?q?feat:=20=E5=9C=A8=20OpenAI=20=E8=81=8A=E5=A4=A9?= =?UTF-8?q?=E5=93=8D=E5=BA=94=E4=B8=AD=E9=9B=86=E6=88=90=20usage=5Fmetadat?= =?UTF-8?q?a=20=E4=BB=A5=E8=B7=9F=E8=B8=AA=20token=20=E4=BD=BF=E7=94=A8?= =?UTF-8?q?=E6=83=85=E5=86=B5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 此更改将 `usage_metadata` 参数添加到了 `app/handler/response_handler.py` 和 `app/service/chat/openai_chat_service.py` 中的相关函数。 `usage_metadata`(通常包含 token 计数:prompt_tokens, completion_tokens, total_tokens)现在会从 OpenAI API 响应中提取,并用于填充标准化响应格式中的 `usage` 字段。 这样可以更准确地跟踪 OpenAI 聊天完成接口的 token 消耗。 --- app/handler/response_handler.py | 9 +++++---- app/service/chat/openai_chat_service.py | 3 ++- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/app/handler/response_handler.py b/app/handler/response_handler.py index 996b1ee..7c8c0a1 100644 --- a/app/handler/response_handler.py +++ b/app/handler/response_handler.py @@ -29,7 +29,7 @@ class GeminiResponseHandler(ResponseHandler): self.thinking_status = False def handle_response( - self, response: Dict[str, Any], model: str, stream: bool = False + self, response: Dict[str, Any], model: str, stream: bool = False, usage_metadata: Optional[Dict[str, Any]] = None ) -> Dict[str, Any]: if stream: return _handle_gemini_stream_response(response, model, stream) @@ -59,7 +59,7 @@ def _handle_openai_stream_response( def _handle_openai_normal_response( - response: Dict[str, Any], model: str, finish_reason: str + response: Dict[str, Any], model: str, finish_reason: str, usage_metadata: Optional[Dict[str, Any]] ) -> Dict[str, Any]: text, tool_calls = _extract_result( response, model, stream=False, gemini_format=False @@ -80,7 +80,7 @@ def _handle_openai_normal_response( "finish_reason": finish_reason, } ], - "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, + "usage": {"prompt_tokens": usage_metadata.get("promptTokenCount", 0), "completion_tokens": usage_metadata.get("candidatesTokenCount",0), "total_tokens": usage_metadata.get("totalTokenCount", 0)}, } @@ -98,10 +98,11 @@ class OpenAIResponseHandler(ResponseHandler): model: str, stream: bool = False, finish_reason: str = None, + usage_metadata: Optional[Dict[str, Any]] = None, ) -> Optional[Dict[str, Any]]: if stream: return _handle_openai_stream_response(response, model, finish_reason) - return _handle_openai_normal_response(response, model, finish_reason) + return _handle_openai_normal_response(response, model, finish_reason, usage_metadata) def handle_image_chat_response( self, image_str: str, model: str, stream=False, finish_reason="stop" diff --git a/app/service/chat/openai_chat_service.py b/app/service/chat/openai_chat_service.py index d551bdf..33c5f8b 100644 --- a/app/service/chat/openai_chat_service.py +++ b/app/service/chat/openai_chat_service.py @@ -204,10 +204,11 @@ class OpenAIChatService: response = None try: response = await self.api_client.generate_content(payload, model, api_key) + usage_metadata = response.get("usageMetadata", {}) is_success = True status_code = 200 return self.response_handler.handle_response( - response, model, stream=False, finish_reason="stop" + response, model, stream=False, finish_reason="stop", usage_metadata=usage_metadata ) except Exception as e: is_success = False