refactor: 重构错误处理并优化路由与服务结构

主要变更:
- 新增 `app/handler/error_handler.py`,引入 `handle_route_errors` 异步上下文管理器,用于统一处理路由中的错误和日志记录。
- 在 `openai_routes` 和 `openai_compatiable_routes` 中应用 `handle_route_errors`,移除冗余的 try-except 块,简化路由逻辑。
- 将 `OpenAICompatiableService` 移动到 `app/service/openai_compatiable/` 目录下。
- 将 `StatsService` 移动到 `app/service/stats/` 目录下,并更新相关导入路径。
- 修复 `response_handler` 中处理 Gemini API 响应时 `inlineData` 字段的错误(原为 `inline_data`)。
- 修复 `openai_routes` 和 `openai_compatiable_routes` 中处理图像生成聊天(如 imagen3-chat)时未正确使用付费 API key 的问题。
- 在 `requirements.txt` 中将 `httpx` 更改为 `httpx[socks]`,以增加 SOCKS 代理支持。
This commit is contained in:
snaily
2025-05-02 01:20:05 +08:00
parent 7c9b721164
commit 2072f54ca1
11 changed files with 216 additions and 204 deletions

View File

@@ -0,0 +1,32 @@
from contextlib import asynccontextmanager
from fastapi import HTTPException
import logging
@asynccontextmanager
async def handle_route_errors(logger: logging.Logger, operation_name: str, success_message: str = None, failure_message: str = None):
"""
一个异步上下文管理器,用于统一处理 FastAPI 路由中的常见错误和日志记录。
Args:
logger: 用于记录日志的 Logger 实例。
operation_name: 操作的名称,用于日志记录和错误详情。
success_message: 操作成功时记录的自定义消息 (可选)。
failure_message: 操作失败时记录的自定义消息 (可选)。
"""
default_success_msg = f"{operation_name} request successful"
default_failure_msg = f"{operation_name} request failed"
logger.info("-" * 50 + operation_name + "-" * 50)
try:
yield
logger.info(success_message or default_success_msg)
except HTTPException as http_exc:
# 如果已经是 HTTPException直接重新抛出保留原始状态码和详情
logger.error(f"{failure_message or default_failure_msg}: {http_exc.detail} (Status: {http_exc.status_code})")
raise http_exc
except Exception as e:
# 对于其他所有异常,记录错误并抛出标准的 500 错误
logger.error(f"{failure_message or default_failure_msg}: {str(e)}")
raise HTTPException(
status_code=500, detail=f"Internal server error during {operation_name}"
) from e

View File

@@ -172,7 +172,7 @@ def _extract_result(
text = _format_execution_result(parts[0]["executableCodeResult"])
elif "codeExecutionResult" in parts[0]:
text = _format_execution_result(parts[0]["codeExecutionResult"])
elif "inline_data" in parts[0]:
elif "inlineData" in parts[0]:
text = _extract_image_data(parts[0])
else:
text = ""
@@ -203,7 +203,7 @@ def _extract_result(
for part in candidate["content"]["parts"]:
if "text" in part:
text += part["text"]
elif "inline_data" in part:
elif "inlineData" in part:
text += _extract_image_data(part)
text = _add_search_link_text(model, candidate, text)
@@ -233,7 +233,7 @@ def _extract_image_data(part: dict) -> str:
)
current_date = time.strftime("%Y/%m/%d")
filename = f"{current_date}/{uuid.uuid4().hex[:8]}.png"
base64_data = part["inline_data"]["data"]
base64_data = part["inlineData"]["data"]
# 将base64_data转成bytes数组
bytes_data = base64.b64decode(base64_data)
upload_response = image_uploader.upload(bytes_data, filename)

View File

@@ -1,15 +1,16 @@
from fastapi import APIRouter, Depends, HTTPException
from fastapi.responses import StreamingResponse, JSONResponse
from copy import deepcopy
import asyncio
from app.config.config import settings
from app.log.logger import get_gemini_logger
from app.core.security import SecurityService
import asyncio # 导入 asyncio
from app.domain.gemini_models import GeminiContent, GeminiRequest, ResetSelectedKeysRequest, VerifySelectedKeysRequest # 添加导入
from app.service.chat.gemini_chat_service import GeminiChatService
from app.service.key.key_manager import KeyManager, get_key_manager_instance
from app.service.model.model_service import ModelService
from app.handler.retry_handler import RetryHandler
from app.handler.error_handler import handle_route_errors
from app.core.constants import API_VERSION
# 路由设置
@@ -43,62 +44,62 @@ async def list_models(
_=Depends(security_service.verify_key_or_goog_api_key),
key_manager: KeyManager = Depends(get_key_manager)
):
"""获取可用的Gemini模型列表"""
logger.info("-" * 50 + "list_gemini_models" + "-" * 50)
"""获取可用的 Gemini 模型列表,并根据配置添加衍生模型(搜索、图像、非思考)。"""
operation_name = "list_gemini_models"
# 注意:此路由的错误处理相对复杂,涉及模型查找和修改,
# 使用通用错误处理可能隐藏部分逻辑错误。暂时保留原有结构,
# 但如果需要更统一的处理,可以将内部逻辑封装并应用 handle_route_errors。
# 这里仅添加日志分隔符。
logger.info("-" * 50 + operation_name + "-" * 50)
logger.info("Handling Gemini models list request")
api_key = await key_manager.get_first_valid_key()
logger.info(f"Using API key: {api_key}")
models_json = model_service.get_gemini_models(api_key)
model_mapping = {x.get("name", "").split("/", maxsplit=1)[1]: x for x in models_json["models"]}
# 添加搜索模型
if settings.SEARCH_MODELS:
for name in settings.SEARCH_MODELS:
model = model_mapping.get(name)
try:
api_key = await key_manager.get_first_valid_key()
if not api_key:
raise HTTPException(status_code=503, detail="No valid API keys available to fetch models.")
logger.info(f"Using API key: {api_key}")
# 假设 get_gemini_models 是同步的,如果不是需要 await
models_data = model_service.get_gemini_models(api_key)
if not models_data or "models" not in models_data:
raise HTTPException(status_code=500, detail="Failed to fetch base models list.")
models_json = deepcopy(models_data) # 操作副本以防修改原始缓存
model_mapping = {x.get("name", "").split("/", maxsplit=1)[-1]: x for x in models_json.get("models", [])}
def add_derived_model(base_name, suffix, display_suffix):
model = model_mapping.get(base_name)
if not model:
continue
logger.warning(f"Base model '{base_name}' not found for derived model '{suffix}'.")
return
item = deepcopy(model)
item["name"] = f"models/{name}-search"
display_name = f'{item.get("displayName")} For Search'
item["name"] = f"models/{base_name}{suffix}"
display_name = f'{item.get("displayName", base_name)}{display_suffix}'
item["displayName"] = display_name
item["description"] = display_name
item["description"] = display_name # 使用 display_name 作为描述
models_json["models"].append(item)
# 添加图像生成模型
if settings.IMAGE_MODELS:
for name in settings.IMAGE_MODELS:
model = model_mapping.get(name)
if not model:
continue
item = deepcopy(model)
item["name"] = f"models/{name}-image"
display_name = f'{item.get("displayName")} For Image'
item["displayName"] = display_name
item["description"] = display_name
models_json["models"].append(item)
# 添加思考模型的非思考版本
if settings.THINKING_MODELS:
for name in settings.THINKING_MODELS:
model = model_mapping.get(name)
if not model:
continue
item = deepcopy(model)
item["name"] = f"models/{name}-non-thinking"
display_name = f'{item.get("displayName")} Non Thinking'
item["displayName"] = display_name
item["description"] = display_name
models_json["models"].append(item)
return models_json
# 添加衍生模型
if settings.SEARCH_MODELS:
for name in settings.SEARCH_MODELS:
add_derived_model(name, "-search", " For Search")
if settings.IMAGE_MODELS:
for name in settings.IMAGE_MODELS:
add_derived_model(name, "-image", " For Image")
if settings.THINKING_MODELS:
for name in settings.THINKING_MODELS:
add_derived_model(name, "-non-thinking", " Non Thinking")
logger.info("Gemini models list request successful")
return models_json
except HTTPException as http_exc:
# 重新抛出已知的 HTTP 异常
raise http_exc
except Exception as e:
logger.error(f"Error getting Gemini models list: {str(e)}")
raise HTTPException(
status_code=500, detail="Internal server error while fetching Gemini models list"
) from e
@router.post("/models/{model_name}:generateContent")
@@ -112,25 +113,22 @@ async def generate_content(
key_manager: KeyManager = Depends(get_key_manager),
chat_service: GeminiChatService = Depends(get_chat_service)
):
"""非流式生成内容"""
logger.info("-" * 50 + "gemini_generate_content" + "-" * 50)
logger.info(f"Handling Gemini content generation request for model: {model_name}")
logger.debug(f"Request: \n{request.model_dump_json(indent=2)}")
logger.info(f"Using API key: {api_key}")
if not model_service.check_model_support(model_name):
raise HTTPException(status_code=400, detail=f"Model {model_name} is not supported")
try:
"""处理 Gemini 非流式内容生成请求。"""
operation_name = "gemini_generate_content"
async with handle_route_errors(logger, operation_name, failure_message="Content generation failed"):
logger.info(f"Handling Gemini content generation request for model: {model_name}")
logger.debug(f"Request: \n{request.model_dump_json(indent=2)}")
logger.info(f"Using API key: {api_key}")
if not model_service.check_model_support(model_name):
raise HTTPException(status_code=400, detail=f"Model {model_name} is not supported")
response = await chat_service.generate_content(
model=model_name,
request=request,
api_key=api_key
)
return response
except Exception as e:
logger.error(f"Chat completion failed after retries: {str(e)}")
raise HTTPException(status_code=500, detail="Chat completion failed") from e
@router.post("/models/{model_name}:streamGenerateContent")
@@ -144,25 +142,24 @@ async def stream_generate_content(
key_manager: KeyManager = Depends(get_key_manager),
chat_service: GeminiChatService = Depends(get_chat_service)
):
"""流式生成内容"""
logger.info("-" * 50 + "gemini_stream_generate_content" + "-" * 50)
logger.info(f"Handling Gemini streaming content generation for model: {model_name}")
logger.debug(f"Request: \n{request.model_dump_json(indent=2)}")
logger.info(f"Using API key: {api_key}")
if not model_service.check_model_support(model_name):
raise HTTPException(status_code=400, detail=f"Model {model_name} is not supported")
try:
"""处理 Gemini 流式内容生成请求。"""
operation_name = "gemini_stream_generate_content"
# 流式请求的成功/失败日志在流处理中更复杂,这里仅用上下文管理器处理启动错误
async with handle_route_errors(logger, operation_name, failure_message="Streaming request initiation failed"):
logger.info(f"Handling Gemini streaming content generation for model: {model_name}")
logger.debug(f"Request: \n{request.model_dump_json(indent=2)}")
logger.info(f"Using API key: {api_key}")
if not model_service.check_model_support(model_name):
raise HTTPException(status_code=400, detail=f"Model {model_name} is not supported")
response_stream = chat_service.stream_generate_content(
model=model_name,
request=request,
api_key=api_key
)
# 注意:流本身的错误需要在服务层或流迭代中处理,这里只返回流响应
return StreamingResponse(response_stream, media_type="text/event-stream")
except Exception as e:
logger.error(f"Streaming request failed: {str(e)}")
raise HTTPException(status_code=500, detail="Streaming request failed") from e
@router.post("/reset-all-fail-counts")
async def reset_all_key_fail_counts(key_type: str = None, key_manager: KeyManager = Depends(get_key_manager)):

View File

@@ -1,4 +1,4 @@
from fastapi import APIRouter, Depends, HTTPException
from fastapi import APIRouter, Depends
from fastapi.responses import StreamingResponse
from app.config.config import settings
@@ -9,9 +9,10 @@ from app.domain.openai_models import (
ImageGenerationRequest,
)
from app.handler.retry_handler import RetryHandler
from app.handler.error_handler import handle_route_errors
from app.log.logger import get_openai_compatible_logger
from app.service.key.key_manager import KeyManager, get_key_manager_instance
from app.service.openai_compatiable_service import OpenAICompatiableService
from app.service.openai_compatiable.openai_compatiable_service import OpenAICompatiableService
router = APIRouter()
@@ -41,17 +42,13 @@ async def list_models(
key_manager: KeyManager = Depends(get_key_manager),
openai_service: OpenAICompatiableService = Depends(get_openai_service),
):
logger.info("-" * 50 + "list_models" + "-" * 50)
logger.info("Handling models list request")
api_key = await key_manager.get_first_valid_key()
logger.info(f"Using API key: {api_key}")
try:
"""获取可用模型列表。"""
operation_name = "list_models"
async with handle_route_errors(logger, operation_name):
logger.info("Handling models list request")
api_key = await key_manager.get_first_valid_key()
logger.info(f"Using API key: {api_key}")
return await openai_service.get_models(api_key)
except Exception as e:
logger.error(f"Error getting models list: {str(e)}")
raise HTTPException(
status_code=500, detail="Internal server error while fetching models list"
) from e
@router.post("/openai/v1/chat/completions")
@@ -63,28 +60,32 @@ async def chat_completion(
key_manager: KeyManager = Depends(get_key_manager),
openai_service: OpenAICompatiableService = Depends(get_openai_service),
):
# 如果model是imagen3,使用paid_key
if request.model == f"{settings.CREATE_IMAGE_MODEL}-chat":
api_key = await key_manager.get_paid_key()
logger.info("-" * 50 + "chat_completion" + "-" * 50)
logger.info(f"Handling chat completion request for model: {request.model}")
logger.debug(f"Request: \n{request.model_dump_json(indent=2)}")
logger.info(f"Using API key: {api_key}")
"""处理聊天补全请求,支持流式响应和特定模型切换。"""
operation_name = "chat_completion"
# 检查是否为图像生成相关的聊天模型,如果是,则使用付费密钥
is_image_chat = request.model == f"{settings.CREATE_IMAGE_MODEL}-chat"
current_api_key = api_key # 保存原始key可能是普通key
if is_image_chat:
current_api_key = await key_manager.get_paid_key() # 获取付费密钥
try:
# 如果model是imagen3,使用paid_key
if request.model == f"{settings.CREATE_IMAGE_MODEL}-chat":
response = await openai_service.create_image_chat_completion(request, api_key)
async with handle_route_errors(logger, operation_name):
logger.info(f"Handling chat completion request for model: {request.model}")
logger.debug(f"Request: \n{request.model_dump_json(indent=2)}")
logger.info(f"Using API key: {current_api_key}") # 使用 current_api_key
if is_image_chat:
# 图像生成聊天,调用特定服务,不处理流式
response = await openai_service.create_image_chat_completion(request, current_api_key)
return response # 直接返回结果
else:
response = await openai_service.create_chat_completion(request, api_key)
# 处理流式响应
if request.stream:
return StreamingResponse(response, media_type="text/event-stream")
logger.info("Chat completion request successful")
return response
except Exception as e:
logger.error(f"Chat completion failed after retries: {str(e)}")
raise HTTPException(status_code=500, detail="Chat completion failed") from e
# 普通聊天补全
response = await openai_service.create_chat_completion(request, current_api_key)
# 处理流式响应
if request.stream:
# 假设 openai_service.create_chat_completion 在流式时返回异步生成器
return StreamingResponse(response, media_type="text/event-stream")
# 非流式直接返回结果
return response
@router.post("/openai/v1/images/generations")
@@ -93,19 +94,13 @@ async def generate_image(
_=Depends(security_service.verify_authorization),
openai_service: OpenAICompatiableService = Depends(get_openai_service),
):
logger.info("-" * 50 + "generate_image" + "-" * 50)
logger.info(f"Handling image generation request for prompt: {request.prompt}")
request.model = settings.CREATE_IMAGE_MODEL
try:
response = await openai_service.generate_images(request)
logger.info("Image generation request successful")
return response
except Exception as e:
logger.error(f"Image generation request failed: {str(e)}")
raise HTTPException(
status_code=500, detail="Image generation request failed"
) from e
"""处理图像生成请求。"""
operation_name = "generate_image"
async with handle_route_errors(logger, operation_name):
logger.info(f"Handling image generation request for prompt: {request.prompt}")
# 强制使用配置的模型,确保请求中包含正确的模型信息
request.model = settings.CREATE_IMAGE_MODEL
return await openai_service.generate_images(request)
@router.post("/openai/v1/embeddings")
@@ -115,16 +110,12 @@ async def embedding(
key_manager: KeyManager = Depends(get_key_manager),
openai_service: OpenAICompatiableService = Depends(get_openai_service),
):
logger.info("-" * 50 + "embedding" + "-" * 50)
logger.info(f"Handling embedding request for model: {request.model}")
api_key = await key_manager.get_next_working_key()
logger.info(f"Using API key: {api_key}")
try:
response = await openai_service.create_embeddings(
"""处理文本嵌入请求。"""
operation_name = "embedding"
async with handle_route_errors(logger, operation_name):
logger.info(f"Handling embedding request for model: {request.model}")
api_key = await key_manager.get_next_working_key()
logger.info(f"Using API key: {api_key}")
return await openai_service.create_embeddings(
input_text=request.input, model=request.model, api_key=api_key
)
logger.info("Embedding request successful")
return response
except Exception as e:
logger.error(f"Embedding request failed: {str(e)}")
raise HTTPException(status_code=500, detail="Embedding request failed") from e

View File

@@ -9,6 +9,7 @@ from app.domain.openai_models import (
ImageGenerationRequest,
)
from app.handler.retry_handler import RetryHandler
from app.handler.error_handler import handle_route_errors # 导入共享错误处理器
from app.log.logger import get_openai_logger
from app.service.chat.openai_chat_service import OpenAIChatService
from app.service.embedding.embedding_service import EmbeddingService
@@ -47,17 +48,15 @@ async def list_models(
_=Depends(security_service.verify_authorization),
key_manager: KeyManager = Depends(get_key_manager),
):
logger.info("-" * 50 + "list_models" + "-" * 50)
logger.info("Handling models list request")
api_key = await key_manager.get_first_valid_key()
logger.info(f"Using API key: {api_key}")
try:
"""获取可用的 OpenAI 模型列表 (兼容 Gemini 和 OpenAI)。"""
operation_name = "list_models"
async with handle_route_errors(logger, operation_name):
logger.info("Handling models list request")
api_key = await key_manager.get_first_valid_key()
logger.info(f"Using API key: {api_key}")
# 注意:这里假设 model_service.get_gemini_openai_models 是同步函数
# 如果它是异步的,需要 await
return model_service.get_gemini_openai_models(api_key)
except Exception as e:
logger.error(f"Error getting models list: {str(e)}")
raise HTTPException(
status_code=500, detail="Internal server error while fetching models list"
) from e
@router.post("/v1/chat/completions")
@@ -70,33 +69,38 @@ async def chat_completion(
key_manager: KeyManager = Depends(get_key_manager), # 保留 key_manager 用于获取 paid_key
chat_service: OpenAIChatService = Depends(get_openai_chat_service),
):
# 如果model是imagen3,使用paid_key
if request.model == f"{settings.CREATE_IMAGE_MODEL}-chat":
api_key = await key_manager.get_paid_key()
logger.info("-" * 50 + "chat_completion" + "-" * 50)
logger.info(f"Handling chat completion request for model: {request.model}")
logger.debug(f"Request: \n{request.model_dump_json(indent=2)}")
logger.info(f"Using API key: {api_key}")
"""处理 OpenAI 聊天补全请求,支持流式响应和特定模型切换。"""
operation_name = "chat_completion"
# 检查是否为图像生成相关的聊天模型
is_image_chat = request.model == f"{settings.CREATE_IMAGE_MODEL}-chat"
current_api_key = api_key # 保存原始 key
if is_image_chat:
current_api_key = await key_manager.get_paid_key() # 获取付费密钥
if not model_service.check_model_support(request.model):
raise HTTPException(
status_code=400, detail=f"Model {request.model} is not supported"
)
async with handle_route_errors(logger, operation_name):
logger.info(f"Handling chat completion request for model: {request.model}")
logger.debug(f"Request: \n{request.model_dump_json(indent=2)}")
logger.info(f"Using API key: {current_api_key}")
try:
# 如果model是imagen3,使用paid_key
if request.model == f"{settings.CREATE_IMAGE_MODEL}-chat":
response = await chat_service.create_image_chat_completion(request, api_key)
# 检查模型支持性应在错误处理块内,以便捕获并记录错误
if not model_service.check_model_support(request.model):
# 使用 HTTPException会被 handle_route_errors 捕获并记录
raise HTTPException(
status_code=400, detail=f"Model {request.model} is not supported"
)
if is_image_chat:
# 图像生成聊天
response = await chat_service.create_image_chat_completion(request, current_api_key)
return response # 直接返回,不处理流式
else:
response = await chat_service.create_chat_completion(request, api_key)
# 处理流式响应
if request.stream:
return StreamingResponse(response, media_type="text/event-stream")
logger.info("Chat completion request successful")
return response
except Exception as e:
logger.error(f"Chat completion failed after retries: {str(e)}")
raise HTTPException(status_code=500, detail="Chat completion failed") from e
# 普通聊天补全
response = await chat_service.create_chat_completion(request, current_api_key)
# 处理流式响应
if request.stream:
return StreamingResponse(response, media_type="text/event-stream")
# 非流式直接返回结果
return response
@router.post("/v1/images/generations")
@@ -105,18 +109,14 @@ async def generate_image(
request: ImageGenerationRequest,
_=Depends(security_service.verify_authorization),
):
logger.info("-" * 50 + "generate_image" + "-" * 50)
logger.info(f"Handling image generation request for prompt: {request.prompt}")
try:
"""处理 OpenAI 图像生成请求。"""
operation_name = "generate_image"
async with handle_route_errors(logger, operation_name):
logger.info(f"Handling image generation request for prompt: {request.prompt}")
# 注意:这里假设 image_create_service.generate_images 是同步函数
# 如果它是异步的,需要 await
response = image_create_service.generate_images(request)
logger.info("Image generation request successful")
return response
except Exception as e:
logger.error(f"Image generation request failed: {str(e)}")
raise HTTPException(
status_code=500, detail="Image generation request failed"
) from e
@router.post("/v1/embeddings")
@@ -126,19 +126,16 @@ async def embedding(
_=Depends(security_service.verify_authorization),
key_manager: KeyManager = Depends(get_key_manager),
):
logger.info("-" * 50 + "embedding" + "-" * 50)
logger.info(f"Handling embedding request for model: {request.model}")
api_key = await key_manager.get_next_working_key()
logger.info(f"Using API key: {api_key}")
try:
"""处理 OpenAI 文本嵌入请求。"""
operation_name = "embedding"
async with handle_route_errors(logger, operation_name):
logger.info(f"Handling embedding request for model: {request.model}")
api_key = await key_manager.get_next_working_key()
logger.info(f"Using API key: {api_key}")
response = await embedding_service.create_embedding(
input_text=request.input, model=request.model, api_key=api_key
)
logger.info("Embedding request successful")
return response
except Exception as e:
logger.error(f"Embedding request failed: {str(e)}")
raise HTTPException(status_code=500, detail="Embedding request failed") from e
@router.get("/v1/keys/list")
@@ -147,10 +144,10 @@ async def get_keys_list(
_=Depends(security_service.verify_auth_token),
key_manager: KeyManager = Depends(get_key_manager),
):
"""获取有效和无效的API key列表"""
logger.info("-" * 50 + "get_keys_list" + "-" * 50)
logger.info("Handling keys list request")
try:
"""获取有效和无效的API key列表 (需要管理 Token 认证)。"""
operation_name = "get_keys_list"
async with handle_route_errors(logger, operation_name):
logger.info("Handling keys list request")
keys_status = await key_manager.get_keys_by_status()
return {
"status": "success",
@@ -160,8 +157,3 @@ async def get_keys_list(
},
"total": len(keys_status["valid_keys"]) + len(keys_status["invalid_keys"]),
}
except Exception as e:
logger.error(f"Error getting keys list: {str(e)}")
raise HTTPException(
status_code=500, detail="Internal server error while fetching keys list"
) from e

View File

@@ -10,7 +10,7 @@ from app.core.security import verify_auth_token
from app.log.logger import get_routes_logger
from app.router import error_log_routes, gemini_routes, openai_routes, config_routes, scheduler_routes, stats_routes, version_routes, openai_compatiable_routes
from app.service.key.key_manager import get_key_manager_instance
from app.service.stats_service import StatsService
from app.service.stats.stats_service import StatsService
logger = get_routes_logger()

View File

@@ -1,7 +1,7 @@
from fastapi import APIRouter, Depends, HTTPException, Request
from starlette import status
from app.core.security import verify_auth_token
from app.service.stats_service import StatsService
from app.service.stats.stats_service import StatsService
from app.log.logger import get_stats_logger
logger = get_stats_logger()

View File

@@ -272,6 +272,7 @@ class OpenAIChatService:
async for line in self.api_client.stream_generate_content(
payload, model, current_attempt_key
):
# print(line)
if line.startswith("data:"):
chunk = json.loads(line[6:])
openai_chunk = self.response_handler.handle_response(

View File

@@ -1,5 +1,5 @@
fastapi
httpx
httpx[socks]
openai
pydantic
pydantic_settings
@@ -16,6 +16,5 @@ sqlalchemy
aiomysql
databases
python-dotenv
apscheduler # 添加定时任务库
apscheduler
packaging