diff --git a/app/services/chat/response_handler.py b/app/services/chat/response_handler.py index 4c919a3..1d6ede4 100644 --- a/app/services/chat/response_handler.py +++ b/app/services/chat/response_handler.py @@ -44,67 +44,83 @@ class GeminiResponseHandler(ResponseHandler): candidate = response["candidates"][0] content = candidate.get("content", {}) parts = content.get("parts", []) - if "thinking" in model: - if settings.SHOW_THINKING_PROCESS: - if len(parts) == 1: - if self.thinking_first: - self.thinking_first = False - self.thinking_status = True - text = "> thinking\n\n" + parts[0].get("text") - else: - text = parts[0].get("text") + # if "thinking" in model: + # if settings.SHOW_THINKING_PROCESS: + # if len(parts) == 1: + # if self.thinking_first: + # self.thinking_first = False + # self.thinking_status = True + # text = "> thinking\n\n" + parts[0].get("text") + # else: + # text = parts[0].get("text") - if len(parts) == 2: - self.thinking_status = False - if self.thinking_first: - self.thinking_first = False - text = ( - "> thinking\n\n" - + parts[0].get("text") - + "\n\n---\n> output\n\n" - + parts[1].get("text") - ) - else: - text = ( - parts[0].get("text") - + "\n\n---\n> output\n\n" - + parts[1].get("text") - ) - else: - if len(parts) == 1: - if self.thinking_first: - self.thinking_first = False - self.thinking_status = True - text = "" - elif self.thinking_status: - text = "" - else: - text = parts[0].get("text") + # if len(parts) == 2: + # self.thinking_status = False + # if self.thinking_first: + # self.thinking_first = False + # text = ( + # "> thinking\n\n" + # + parts[0].get("text") + # + "\n\n---\n> output\n\n" + # + parts[1].get("text") + # ) + # else: + # text = ( + # parts[0].get("text") + # + "\n\n---\n> output\n\n" + # + parts[1].get("text") + # ) + # else: + # if len(parts) == 1: + # if self.thinking_first: + # self.thinking_first = False + # self.thinking_status = True + # text = "" + # elif self.thinking_status: + # text = "" + # else: + # text = parts[0].get("text") - if len(parts) == 2: - self.thinking_status = False - if self.thinking_first: - self.thinking_first = False - text = parts[1].get("text") - else: - text = parts[1].get("text") + # if len(parts) == 2: + # self.thinking_status = False + # if self.thinking_first: + # self.thinking_first = False + # text = parts[1].get("text") + # else: + # text = parts[1].get("text") + # else: + # if "text" in parts[0]: + # text = parts[0].get("text") + # elif "executableCode" in parts[0]: + # text = _format_code_block(parts[0]["executableCode"]) + # elif "codeExecution" in parts[0]: + # text = _format_code_block(parts[0]["codeExecution"]) + # elif "executableCodeResult" in parts[0]: + # text = _format_execution_result( + # parts[0]["executableCodeResult"] + # ) + # elif "codeExecutionResult" in parts[0]: + # text = _format_execution_result( + # parts[0]["codeExecutionResult"] + # ) + # else: + # text = "" + if "text" in parts[0]: + text = parts[0].get("text") + elif "executableCode" in parts[0]: + text = _format_code_block(parts[0]["executableCode"]) + elif "codeExecution" in parts[0]: + text = _format_code_block(parts[0]["codeExecution"]) + elif "executableCodeResult" in parts[0]: + text = _format_execution_result( + parts[0]["executableCodeResult"] + ) + elif "codeExecutionResult" in parts[0]: + text = _format_execution_result( + parts[0]["codeExecutionResult"] + ) else: - if "text" in parts[0]: - text = parts[0].get("text") - elif "executableCode" in parts[0]: - text = _format_code_block(parts[0]["executableCode"]) - elif "codeExecution" in parts[0]: - text = _format_code_block(parts[0]["codeExecution"]) - elif "executableCodeResult" in parts[0]: - text = _format_execution_result( - parts[0]["executableCodeResult"] - ) - elif "codeExecutionResult" in parts[0]: - text = _format_execution_result( - parts[0]["codeExecutionResult"] - ) - else: - text = "" + text = "" text = _add_search_link_text(model, candidate, text) else: if response.get("candidates"): @@ -166,8 +182,7 @@ class OpenAIResponseHandler(ResponseHandler): "finish_reason": finish_reason }] } - - + def _handle_normal_response(self, response: Dict[str, Any], model: str, finish_reason: str) -> Dict[str, Any]: text = self._extract_text(response, model, stream=False) return { @@ -197,67 +212,84 @@ class OpenAIResponseHandler(ResponseHandler): candidate = response["candidates"][0] content = candidate.get("content", {}) parts = content.get("parts", []) - if "thinking" in model: - if settings.SHOW_THINKING_PROCESS: - if len(parts) == 1: - if self.thinking_first: - self.thinking_first = False - self.thinking_status = True - text = "> thinking\n\n" + parts[0].get("text") - else: - text = parts[0].get("text") + # if "thinking" in model: + # if settings.SHOW_THINKING_PROCESS: + # if len(parts) == 1: + # if self.thinking_first: + # self.thinking_first = False + # self.thinking_status = True + # text = "> thinking\n\n" + parts[0].get("text") + # else: + # text = parts[0].get("text") - if len(parts) == 2: - self.thinking_status = False - if self.thinking_first: - self.thinking_first = False - text = ( - "> thinking\n\n" - + parts[0].get("text") - + "\n\n---\n> output\n\n" - + parts[1].get("text") - ) - else: - text = ( - parts[0].get("text") - + "\n\n---\n> output\n\n" - + parts[1].get("text") - ) - else: - if len(parts) == 1: - if self.thinking_first: - self.thinking_first = False - self.thinking_status = True - text = "" - elif self.thinking_status: - text = "" - else: - text = parts[0].get("text") + # if len(parts) == 2: + # self.thinking_status = False + # if self.thinking_first: + # self.thinking_first = False + # text = ( + # "> thinking\n\n" + # + parts[0].get("text") + # + "\n\n---\n> output\n\n" + # + parts[1].get("text") + # ) + # else: + # text = ( + # parts[0].get("text") + # + "\n\n---\n> output\n\n" + # + parts[1].get("text") + # ) + # else: + # if len(parts) == 1: + # if self.thinking_first: + # self.thinking_first = False + # self.thinking_status = True + # text = "" + # elif self.thinking_status: + # text = "" + # else: + # text = parts[0].get("text") - if len(parts) == 2: - self.thinking_status = False - if self.thinking_first: - self.thinking_first = False - text = parts[1].get("text") - else: - text = parts[1].get("text") + # if len(parts) == 2: + # self.thinking_status = False + # if self.thinking_first: + # self.thinking_first = False + # text = parts[1].get("text") + # else: + # text = parts[1].get("text") + # else: + # if "text" in parts[0]: + # text = parts[0].get("text") + # elif "executableCode" in parts[0]: + # text = _format_code_block(parts[0]["executableCode"]) + # elif "codeExecution" in parts[0]: + # text = _format_code_block(parts[0]["codeExecution"]) + # elif "executableCodeResult" in parts[0]: + # text = _format_execution_result( + # parts[0]["executableCodeResult"] + # ) + # elif "codeExecutionResult" in parts[0]: + # text = _format_execution_result( + # parts[0]["codeExecutionResult"] + # ) + # else: + # text = "" + # text = _add_search_link_text(model, candidate, text) + if "text" in parts[0]: + text = parts[0].get("text") + elif "executableCode" in parts[0]: + text = _format_code_block(parts[0]["executableCode"]) + elif "codeExecution" in parts[0]: + text = _format_code_block(parts[0]["codeExecution"]) + elif "executableCodeResult" in parts[0]: + text = _format_execution_result( + parts[0]["executableCodeResult"] + ) + elif "codeExecutionResult" in parts[0]: + text = _format_execution_result( + parts[0]["codeExecutionResult"] + ) else: - if "text" in parts[0]: - text = parts[0].get("text") - elif "executableCode" in parts[0]: - text = _format_code_block(parts[0]["executableCode"]) - elif "codeExecution" in parts[0]: - text = _format_code_block(parts[0]["codeExecution"]) - elif "executableCodeResult" in parts[0]: - text = _format_execution_result( - parts[0]["executableCodeResult"] - ) - elif "codeExecutionResult" in parts[0]: - text = _format_execution_result( - parts[0]["codeExecutionResult"] - ) - else: - text = "" + text = "" text = _add_search_link_text(model, candidate, text) else: if response.get("candidates"): diff --git a/app/services/gemini_chat_service.py b/app/services/gemini_chat_service.py index e83ba49..0c068db 100644 --- a/app/services/gemini_chat_service.py +++ b/app/services/gemini_chat_service.py @@ -32,6 +32,7 @@ class GeminiChatService: while retries < max_retries: try: async for line in self.api_client.stream_generate_content(payload, model, api_key): + # print(line) if line.startswith("data:"): line = line[6:] line = json.dumps(self.response_handler.handle_response(json.loads(line), model, stream=True)) @@ -80,7 +81,7 @@ class GeminiChatService: def _get_safety_settings(self, model: str) -> List[Dict[str, str]]: """获取安全设置""" - if "2.0" in model and model != "gemini-2.0-flash-thinking-exp": + if "2.0" in model and "gemini-2.0-flash-thinking-exp" not in model: return [ {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "OFF"}, {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "OFF"}, diff --git a/app/services/openai_chat_service.py b/app/services/openai_chat_service.py index 3750e64..7976c83 100644 --- a/app/services/openai_chat_service.py +++ b/app/services/openai_chat_service.py @@ -63,6 +63,7 @@ class OpenAIChatService: while retries < max_retries: try: async for line in self.api_client.stream_generate_content(payload, model, api_key): + # print(line) if line.startswith("data:"): chunk = json.loads(line[6:]) openai_chunk = self.response_handler.handle_response( @@ -127,7 +128,7 @@ class OpenAIChatService: def _get_safety_settings(self, model: str) -> List[Dict[str, str]]: """获取安全设置""" - if "2.0" in model and model != "gemini-2.0-flash-thinking-exp": + if "2.0" in model and "gemini-2.0-flash-thinking-exp" not in model: return [ {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "OFF"}, {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "OFF"},