🐛 fix(ai): 兼容 DeepSeek reasoning 内容响应

- 增加 reasoning_content 字段解析与前后端类型定义

- 兼容 DeepSeek 流式和非流式响应中的推理内容

- 统一 AI 消息 payload 映射,避免历史消息丢失推理内容

- 补充 OpenAI 兼容 Provider 与前端消息映射测试
This commit is contained in:
Syngnat
2026-04-30 17:26:36 +08:00
parent d2dad75167
commit 5f9adcac37
9 changed files with 283 additions and 60 deletions

View File

@@ -28,6 +28,7 @@ import {
import { buildAIReadonlyPreviewSQL } from '../utils/aiSqlLimit';
import { resolveAITableSchemaToolResult } from '../utils/aiTableSchemaTool';
import { consumeAIChatSendShortcutOnKeyDown } from '../utils/aiChatSendShortcut';
import { toAIRequestMessage } from '../utils/aiMessagePayload';
interface AIChatPanelProps {
width?: number;
@@ -74,7 +75,7 @@ export const getDynamicMaxContextChars = (modelName?: string) => {
// 当超出指定字符上限时触发上下文自建压缩
const compressContextIfNeeded = async (sid: string, messagesPayload: any[], maxLimit: number) => {
try {
const chars = messagesPayload.reduce((sum, m) => sum + (m.content?.length || 0) + JSON.stringify(m.tool_calls || []).length, 0);
const chars = messagesPayload.reduce((sum, m) => sum + (m.content?.length || 0) + (m.reasoning_content?.length || 0) + JSON.stringify(m.tool_calls || []).length, 0);
if (chars < maxLimit) return null;
const Service = (window as any).go?.aiservice?.Service;
@@ -508,7 +509,7 @@ export const AIChatPanel: React.FC<AIChatPanelProps> = ({
let isFirstCompletion = false;
// 新增:利用 requestAnimationFrame 缓冲高频事件,避免 React 重绘阻塞导致感官吞吐变慢
const streamBuffer = { thinking: '', content: '' };
const streamBuffer = { thinking: '', reasoningContent: '', content: '' };
let flushPending = false;
const flushStreamBuffer = () => {
@@ -523,6 +524,10 @@ export const AIChatPanel: React.FC<AIChatPanelProps> = ({
updates.phase = 'thinking';
streamBuffer.thinking = '';
}
if (streamBuffer.reasoningContent) {
updates.reasoning_content = (existing.reasoning_content || '') + streamBuffer.reasoningContent;
streamBuffer.reasoningContent = '';
}
if (streamBuffer.content) {
updates.content = (existing.content || '') + streamBuffer.content;
updates.phase = 'generating';
@@ -535,7 +540,7 @@ export const AIChatPanel: React.FC<AIChatPanelProps> = ({
flushPending = false;
};
const handler = (data: { content?: string; thinking?: string; tool_calls?: AIToolCall[]; done?: boolean; error?: string }) => {
const handler = (data: { content?: string; thinking?: string; reasoning_content?: string; tool_calls?: AIToolCall[]; done?: boolean; error?: string }) => {
// Find connecting message if there's no active assistant string
if (!assistantMsgId) {
const history = useStore.getState().aiChatHistory[sid] || [];
@@ -589,7 +594,8 @@ export const AIChatPanel: React.FC<AIChatPanelProps> = ({
}
// 处理 thinking模型思考过程
if (data.thinking) {
const displayThinking = data.thinking || data.reasoning_content || '';
if (displayThinking || data.reasoning_content) {
if (!assistantMsgId) {
assistantMsgId = genId();
addAIChatMessage(sid, {
@@ -597,7 +603,8 @@ export const AIChatPanel: React.FC<AIChatPanelProps> = ({
role: 'assistant',
phase: 'thinking',
content: '',
thinking: data.thinking,
thinking: displayThinking || undefined,
reasoning_content: data.reasoning_content || undefined,
timestamp: Date.now(),
loading: true,
jvmPlanContext: pendingJVMPlanContextRef.current,
@@ -605,7 +612,10 @@ export const AIChatPanel: React.FC<AIChatPanelProps> = ({
});
if (sending) setSending(false);
} else {
streamBuffer.thinking += data.thinking;
streamBuffer.thinking += displayThinking;
if (data.reasoning_content) {
streamBuffer.reasoningContent += data.reasoning_content;
}
if (sending) setSending(false);
}
}
@@ -632,7 +642,7 @@ export const AIChatPanel: React.FC<AIChatPanelProps> = ({
}
}
if (streamBuffer.thinking || streamBuffer.content) {
if (streamBuffer.thinking || streamBuffer.reasoningContent || streamBuffer.content) {
if (!flushPending) {
flushPending = true;
requestAnimationFrame(flushStreamBuffer);
@@ -641,7 +651,7 @@ export const AIChatPanel: React.FC<AIChatPanelProps> = ({
if (data.done) {
// 如果有残留未 flush 的 buffer立刻推入状态树
if (streamBuffer.thinking || streamBuffer.content) {
if (streamBuffer.thinking || streamBuffer.reasoningContent || streamBuffer.content) {
flushStreamBuffer();
}
const doneAssistantId = assistantMsgId;
@@ -676,12 +686,7 @@ export const AIChatPanel: React.FC<AIChatPanelProps> = ({
(async () => {
try {
const currentHistory = useStore.getState().aiChatHistory[sid] || [];
const messagesPayload = currentHistory.map(m => {
const mapped: any = { role: m.role, content: m.content, images: m.images };
if (m.tool_calls) mapped.tool_calls = m.tool_calls;
if (m.tool_call_id) mapped.tool_call_id = m.tool_call_id;
return mapped;
});
const messagesPayload = currentHistory.map(toAIRequestMessage);
const sysMessages = await buildSystemContextMessages(
existing.jvmPlanContext,
existing.jvmDiagnosticPlanContext,
@@ -804,7 +809,7 @@ export const AIChatPanel: React.FC<AIChatPanelProps> = ({
addAIChatMessage(sid, connectingMsg);
const truncatedHistory = historyLocal.slice(0, lastUserMsgIndex + 1);
const messagesPayload = truncatedHistory.map(m => ({ role: m.role, content: m.content, images: m.images }));
const messagesPayload = truncatedHistory.map(toAIRequestMessage);
try {
const sysMessages = await buildSystemContextMessages(
@@ -823,6 +828,8 @@ export const AIChatPanel: React.FC<AIChatPanelProps> = ({
addAIChatMessage(sid, {
id: genId(), role: 'assistant',
content: result?.success ? result.content : `${errClean}`,
thinking: result?.success ? result.reasoning_content : undefined,
reasoning_content: result?.success ? result.reasoning_content : undefined,
rawError: (!result?.success && errClean !== errRaw) ? errRaw : undefined,
timestamp: Date.now(),
jvmPlanContext: retryJVMPlanContext,
@@ -1268,12 +1275,7 @@ SELECT * FROM users WHERE status = 1;
setSending(true);
const currentHistory = useStore.getState().aiChatHistory[sid] || [];
// 过滤掉 connecting 占位消息,不发给模型
const messagesPayload = currentHistory.filter(m => m.phase !== 'connecting').map(m => {
const mapped: any = { role: m.role, content: m.content, images: m.images };
if (m.tool_calls) mapped.tool_calls = m.tool_calls;
if (m.tool_call_id) mapped.tool_call_id = m.tool_call_id;
return mapped;
});
const messagesPayload = currentHistory.filter(m => m.phase !== 'connecting').map(toAIRequestMessage);
const sysMessages = await buildSystemContextMessages(
inheritedJVMPlanContext,
inheritedJVMDiagnosticPlanContext,
@@ -1313,6 +1315,8 @@ SELECT * FROM users WHERE status = 1;
useStore.getState().addAIChatMessage(sid, {
id: genId(), role: 'assistant',
content: result?.success ? result.content : `${errC}`,
thinking: result?.success ? result.reasoning_content : undefined,
reasoning_content: result?.success ? result.reasoning_content : undefined,
rawError: (!result?.success && errC !== errR) ? errR : undefined,
timestamp: Date.now(),
jvmPlanContext: inheritedJVMPlanContext,
@@ -1380,12 +1384,7 @@ SELECT * FROM users WHERE status = 1;
// 【过渡状态 2】上下文已组装完成即将接入模型
updateAIChatMessage(sid, connectingMsg.id, { content: '模型接入中' });
const chatMessages = [...messages, userMsg].map(m => {
const mapped: any = { role: m.role, content: m.content, images: m.images };
if (m.tool_calls) mapped.tool_calls = m.tool_calls;
if (m.tool_call_id) mapped.tool_call_id = m.tool_call_id;
return mapped;
});
const chatMessages = [...messages, userMsg].map(toAIRequestMessage);
let finalMessagesPayload = chatMessages;
const dynamicMaxLimit = getDynamicMaxContextChars(activeProvider?.model);
@@ -1421,6 +1420,8 @@ SELECT * FROM users WHERE status = 1;
const assistantMsg: AIChatMessage = {
id: genId(), role: 'assistant',
content: result?.success ? result.content : `${errC2}`,
thinking: result?.success ? result.reasoning_content : undefined,
reasoning_content: result?.success ? result.reasoning_content : undefined,
rawError: (!result?.success && errC2 !== errR2) ? errR2 : undefined,
timestamp: Date.now(),
jvmPlanContext: currentJVMPlanContext,
@@ -1588,7 +1589,7 @@ SELECT * FROM users WHERE status = 1;
return connection ? buildRpcConnectionConfig(connection.config) : undefined;
}, [inferredConnectionId, connections]);
const contextUsageChars = useMemo(() =>
messages.reduce((sum, m) => sum + (m.content?.length || 0) + JSON.stringify(m.tool_calls || []).length, 0),
messages.reduce((sum, m) => sum + (m.content?.length || 0) + (m.reasoning_content?.length || 0) + JSON.stringify(m.tool_calls || []).length, 0),
[messages]);
const contextTableNames = useMemo(() => {
const ck = activeContext?.connectionId ? `${activeContext.connectionId}:${activeContext.dbName || ''}` : 'default';

View File

@@ -554,6 +554,7 @@ export interface AIChatMessage {
phase?: ChatPhase;
content: string;
thinking?: string;
reasoning_content?: string;
timestamp: number;
loading?: boolean;
images?: string[]; // base64 encoded images with data URI prefix

View File

@@ -0,0 +1,78 @@
import { describe, expect, it } from 'vitest';
import type { AIChatMessage, AIToolCall } from '../types';
import { toAIRequestMessage } from './aiMessagePayload';
const toolCall: AIToolCall = {
id: 'call_schema',
type: 'function',
function: {
name: 'inspect_table_schema',
arguments: '{"table":"orders"}',
},
};
const message = (overrides: Partial<AIChatMessage>): AIChatMessage => ({
id: 'msg-1',
role: 'assistant',
content: '',
timestamp: 1,
...overrides,
});
describe('toAIRequestMessage', () => {
it('keeps reasoning_content on assistant tool-call messages', () => {
const payload = toAIRequestMessage(message({
tool_calls: [toolCall],
reasoning_content: '需要先检查表结构',
}));
expect(payload).toMatchObject({
role: 'assistant',
tool_calls: [toolCall],
reasoning_content: '需要先检查表结构',
});
});
it('keeps reasoning_content on assistant messages without tool calls', () => {
const payload = toAIRequestMessage(message({
content: '最终分析',
reasoning_content: '工具调用轮次的最终思考也需要保留',
}));
expect(payload).toMatchObject({
role: 'assistant',
content: '最终分析',
reasoning_content: '工具调用轮次的最终思考也需要保留',
});
});
it('omits reasoning_content from tool result messages while keeping tool_call_id', () => {
const payload = toAIRequestMessage(message({
role: 'tool',
content: '{"ok":true}',
tool_call_id: 'call_schema',
reasoning_content: '不应回传',
}));
expect(payload).toMatchObject({
role: 'tool',
content: '{"ok":true}',
tool_call_id: 'call_schema',
});
expect(payload).not.toHaveProperty('reasoning_content');
});
it('keeps user images without adding empty tool fields', () => {
const payload = toAIRequestMessage(message({
role: 'user',
content: '看图',
images: ['data:image/png;base64,abc'],
}));
expect(payload).toEqual({
role: 'user',
content: '看图',
images: ['data:image/png;base64,abc'],
});
});
});

View File

@@ -0,0 +1,32 @@
import type { AIChatMessage, AIToolCall } from '../types';
export interface AIRequestMessage {
role: AIChatMessage['role'];
content: string;
images?: string[];
tool_calls?: AIToolCall[];
tool_call_id?: string;
reasoning_content?: string;
}
export const toAIRequestMessage = (message: AIChatMessage): AIRequestMessage => {
const payload: AIRequestMessage = {
role: message.role,
content: message.content,
};
if (message.images && message.images.length > 0) {
payload.images = message.images;
}
if (message.tool_calls && message.tool_calls.length > 0) {
payload.tool_calls = message.tool_calls;
}
if (message.tool_call_id) {
payload.tool_call_id = message.tool_call_id;
}
if (message.role === 'assistant' && message.reasoning_content) {
payload.reasoning_content = message.reasoning_content;
}
return payload;
};

View File

@@ -41,6 +41,7 @@ export namespace ai {
images?: string[];
tool_call_id?: string;
tool_calls?: ToolCall[];
reasoning_content?: string;
static createFrom(source: any = {}) {
return new Message(source);
@@ -53,6 +54,7 @@ export namespace ai {
this.images = source["images"];
this.tool_call_id = source["tool_call_id"];
this.tool_calls = this.convertValues(source["tool_calls"], ToolCall);
this.reasoning_content = source["reasoning_content"];
}
convertValues(a: any, classs: any, asMap: boolean = false): any {

View File

@@ -84,21 +84,25 @@ type openAIChatRequest struct {
}
type openAIChatMessage struct {
Role string `json:"role"`
Content interface{} `json:"content,omitempty"`
ToolCalls []ai.ToolCall `json:"tool_calls,omitempty"`
ToolCallID string `json:"tool_call_id,omitempty"`
Role string `json:"role"`
Content interface{} `json:"content,omitempty"`
ToolCalls []ai.ToolCall `json:"tool_calls,omitempty"`
ToolCallID string `json:"tool_call_id,omitempty"`
ReasoningContent string `json:"reasoning_content,omitempty"`
}
func buildOpenAIMessages(reqMessages []ai.Message, modelName string, baseURL string) []openAIChatMessage {
messages := make([]openAIChatMessage, len(reqMessages))
replayReasoningContent := shouldReplayReasoningContent(modelName, baseURL)
for i, m := range reqMessages {
if m.Role == "tool" {
messages[i] = openAIChatMessage{Role: m.Role, Content: m.Content, ToolCallID: m.ToolCallID}
continue
}
if len(m.ToolCalls) > 0 {
messages[i] = openAIChatMessage{Role: m.Role, Content: m.Content, ToolCalls: m.ToolCalls}
msg := openAIChatMessage{Role: m.Role, Content: m.Content, ToolCalls: m.ToolCalls}
attachReasoningContent(&msg, m, replayReasoningContent)
messages[i] = msg
continue
}
@@ -127,20 +131,37 @@ func buildOpenAIMessages(reqMessages []ai.Message, modelName string, baseURL str
},
})
}
messages[i] = openAIChatMessage{Role: m.Role, Content: contentParts}
msg := openAIChatMessage{Role: m.Role, Content: contentParts}
attachReasoningContent(&msg, m, replayReasoningContent)
messages[i] = msg
} else {
messages[i] = openAIChatMessage{Role: m.Role, Content: m.Content}
msg := openAIChatMessage{Role: m.Role, Content: m.Content}
attachReasoningContent(&msg, m, replayReasoningContent)
messages[i] = msg
}
}
return messages
}
func attachReasoningContent(msg *openAIChatMessage, source ai.Message, enabled bool) {
if enabled && source.Role == "assistant" && source.ReasoningContent != "" {
msg.ReasoningContent = source.ReasoningContent
}
}
func shouldReplayReasoningContent(modelName string, baseURL string) bool {
model := strings.ToLower(strings.TrimSpace(modelName))
base := strings.ToLower(strings.TrimSpace(baseURL))
return strings.Contains(model, "deepseek") || strings.Contains(base, "deepseek")
}
// openAIChatResponse OpenAI API 响应体
type openAIChatResponse struct {
Choices []struct {
Message struct {
Content string `json:"content"`
ToolCalls []ai.ToolCall `json:"tool_calls,omitempty"`
Content string `json:"content"`
ReasoningContent string `json:"reasoning_content,omitempty"`
ToolCalls []ai.ToolCall `json:"tool_calls,omitempty"`
} `json:"message"`
FinishReason string `json:"finish_reason"`
} `json:"choices"`
@@ -227,7 +248,8 @@ func (p *OpenAIProvider) Chat(ctx context.Context, req ai.ChatRequest) (*ai.Chat
}
return &ai.ChatResponse{
Content: result.Choices[0].Message.Content,
Content: result.Choices[0].Message.Content,
ReasoningContent: result.Choices[0].Message.ReasoningContent,
TokensUsed: ai.TokenUsage{
PromptTokens: result.Usage.PromptTokens,
CompletionTokens: result.Usage.CompletionTokens,
@@ -342,7 +364,10 @@ func (p *OpenAIProvider) ChatStream(ctx context.Context, req ai.ChatRequest, cal
// 支持 DeepSeek/千问等模型的 reasoning_content 字段
if choice.Delta.ReasoningContent != "" {
receivedContent = true
callback(ai.StreamChunk{Thinking: choice.Delta.ReasoningContent})
callback(ai.StreamChunk{
Thinking: choice.Delta.ReasoningContent,
ReasoningContent: choice.Delta.ReasoningContent,
})
}
if choice.FinishReason != nil {

View File

@@ -2,6 +2,8 @@ package provider
import (
"GoNavi-Wails/internal/ai"
"encoding/json"
"strings"
"testing"
)
@@ -165,3 +167,80 @@ func TestOpenAIProvider_DefaultMaxTokens(t *testing.T) {
t.Fatalf("expected default max tokens 4096, got %d", op.config.MaxTokens)
}
}
func TestBuildOpenAIMessages_ReplaysDeepSeekReasoningContentForToolCalls(t *testing.T) {
toolCall := testOpenAIToolCall()
got := buildOpenAIMessages([]ai.Message{
{
Role: "assistant",
Content: "",
ToolCalls: []ai.ToolCall{toolCall},
ReasoningContent: "需要先检查表结构",
},
{
Role: "tool",
Content: `{"ok":true}`,
ToolCallID: toolCall.ID,
},
}, "deepseek-v4", "https://api.deepseek.com/v1")
if got[0].ReasoningContent != "需要先检查表结构" {
t.Fatalf("expected reasoning_content to be replayed for DeepSeek tool call, got %q", got[0].ReasoningContent)
}
if got[1].ReasoningContent != "" {
t.Fatalf("expected tool result message not to carry reasoning_content, got %q", got[1].ReasoningContent)
}
body, err := json.Marshal(got[0])
if err != nil {
t.Fatalf("marshal message: %v", err)
}
if !strings.Contains(string(body), `"reasoning_content":"需要先检查表结构"`) {
t.Fatalf("expected JSON payload to include reasoning_content, got %s", body)
}
}
func TestBuildOpenAIMessages_OmitsReasoningContentForNonDeepSeekProviders(t *testing.T) {
got := buildOpenAIMessages([]ai.Message{
{
Role: "assistant",
Content: "",
ToolCalls: []ai.ToolCall{testOpenAIToolCall()},
ReasoningContent: "reasoning should stay local",
},
}, "gpt-4o", "https://api.openai.com/v1")
if got[0].ReasoningContent != "" {
t.Fatalf("expected non-DeepSeek provider to omit reasoning_content, got %q", got[0].ReasoningContent)
}
body, err := json.Marshal(got[0])
if err != nil {
t.Fatalf("marshal message: %v", err)
}
if strings.Contains(string(body), "reasoning_content") {
t.Fatalf("expected JSON payload to omit reasoning_content for non-DeepSeek provider, got %s", body)
}
}
func TestBuildOpenAIMessages_ReplaysDeepSeekAssistantReasoningContentWithoutToolCalls(t *testing.T) {
got := buildOpenAIMessages([]ai.Message{
{
Role: "assistant",
Content: "最终分析",
ReasoningContent: "工具调用轮次的最终思考也需要保留",
},
}, "deepseek-v4", "https://api.deepseek.com/v1")
if got[0].ReasoningContent != "工具调用轮次的最终思考也需要保留" {
t.Fatalf("expected DeepSeek assistant reasoning_content to be replayed, got %q", got[0].ReasoningContent)
}
}
func testOpenAIToolCall() ai.ToolCall {
var toolCall ai.ToolCall
toolCall.ID = "call_schema"
toolCall.Type = "function"
toolCall.Function.Name = "inspect_table_schema"
toolCall.Function.Arguments = `{"table":"orders"}`
return toolCall
}

View File

@@ -866,9 +866,10 @@ func (s *Service) AIChatSend(messages []ai.Message, tools []ai.Tool) map[string]
}
return map[string]interface{}{
"success": true,
"content": resp.Content,
"tool_calls": resp.ToolCalls,
"success": true,
"content": resp.Content,
"reasoning_content": resp.ReasoningContent,
"tool_calls": resp.ToolCalls,
"tokensUsed": map[string]int{
"promptTokens": resp.TokensUsed.PromptTokens,
"completionTokens": resp.TokensUsed.CompletionTokens,
@@ -903,11 +904,12 @@ func (s *Service) AIChatStream(sessionID string, messages []ai.Message, tools []
err = p.ChatStream(streamCtx, ai.ChatRequest{Messages: messages, Tools: tools}, func(chunk ai.StreamChunk) {
wailsRuntime.EventsEmit(s.ctx, "ai:stream:"+sessionID, map[string]interface{}{
"content": chunk.Content,
"thinking": chunk.Thinking,
"tool_calls": chunk.ToolCalls,
"done": chunk.Done,
"error": chunk.Error,
"content": chunk.Content,
"thinking": chunk.Thinking,
"reasoning_content": chunk.ReasoningContent,
"tool_calls": chunk.ToolCalls,
"done": chunk.Done,
"error": chunk.Error,
})
})

View File

@@ -25,11 +25,12 @@ type Tool struct {
// Message 表示一条对话消息
type Message struct {
Role string `json:"role"` // "system" | "user" | "assistant" | "tool"
Content string `json:"content"`
Images []string `json:"images,omitempty"` // base64 encoded images with data:image/png;base64,... prefix
ToolCallID string `json:"tool_call_id,omitempty"` // 当 role 为 "tool" 时必须传递
ToolCalls []ToolCall `json:"tool_calls,omitempty"` // 当 role 为 "assistant" 并试图调工具时传递
Role string `json:"role"` // "system" | "user" | "assistant" | "tool"
Content string `json:"content"`
Images []string `json:"images,omitempty"` // base64 encoded images with data:image/png;base64,... prefix
ToolCallID string `json:"tool_call_id,omitempty"` // 当 role 为 "tool" 时必须传递
ToolCalls []ToolCall `json:"tool_calls,omitempty"` // 当 role 为 "assistant" 并试图调工具时传递
ReasoningContent string `json:"reasoning_content,omitempty"` // DeepSeek thinking mode 工具调用链路要求原样回传
}
// ChatRequest AI 对话请求
@@ -42,9 +43,10 @@ type ChatRequest struct {
// ChatResponse AI 对话响应
type ChatResponse struct {
Content string `json:"content"`
TokensUsed TokenUsage `json:"tokensUsed"`
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
Content string `json:"content"`
ReasoningContent string `json:"reasoning_content,omitempty"`
TokensUsed TokenUsage `json:"tokensUsed"`
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
}
// TokenUsage token 用量统计
@@ -56,11 +58,12 @@ type TokenUsage struct {
// StreamChunk 流式响应片段
type StreamChunk struct {
Content string `json:"content"`
Thinking string `json:"thinking,omitempty"`
Done bool `json:"done"`
Error string `json:"error,omitempty"`
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
Content string `json:"content"`
Thinking string `json:"thinking,omitempty"`
ReasoningContent string `json:"reasoning_content,omitempty"`
Done bool `json:"done"`
Error string `json:"error,omitempty"`
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
}
// ProviderConfig AI Provider 配置