Skip to content

Commit ee0e8f3

Browse files
author
sskorolev
committed
llms: preserve reasoning_content in MessageContent for tool calling round-trip
1 parent 8fea3de commit ee0e8f3

File tree

5 files changed

+114
-10
lines changed

5 files changed

+114
-10
lines changed

llms/generatecontent.go

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,13 @@ import (
1414
type MessageContent struct {
1515
Role ChatMessageType
1616
Parts []ContentPart
17+
18+
// ReasoningContent is used with reasoning models (e.g. deepseek-reasoner)
19+
// to preserve the reasoning content in assistant messages for round-trip
20+
// conversations. When the API returns reasoning_content in an assistant
21+
// message (e.g. alongside tool_calls), this field must be included when
22+
// sending the message back as part of conversation history.
23+
ReasoningContent string
1724
}
1825

1926
// TextPart creates TextContent from a given string.

llms/marshaling.go

Lines changed: 15 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -14,25 +14,29 @@ func (mc MessageContent) MarshalJSON() ([]byte, error) {
1414
if hasSingleTextPart {
1515
tp, _ := mc.Parts[0].(TextContent)
1616
return json.Marshal(struct {
17-
Role ChatMessageType `json:"role"`
18-
Text string `json:"text"`
19-
}{Role: mc.Role, Text: tp.Text})
17+
Role ChatMessageType `json:"role"`
18+
Text string `json:"text"`
19+
ReasoningContent string `json:"reasoning_content,omitempty"`
20+
}{Role: mc.Role, Text: tp.Text, ReasoningContent: mc.ReasoningContent})
2021
}
2122

2223
return json.Marshal(struct {
23-
Role ChatMessageType `json:"role"`
24-
Parts []ContentPart `json:"parts"`
24+
Role ChatMessageType `json:"role"`
25+
Parts []ContentPart `json:"parts"`
26+
ReasoningContent string `json:"reasoning_content,omitempty"`
2527
}{
26-
Role: mc.Role,
27-
Parts: mc.Parts,
28+
Role: mc.Role,
29+
Parts: mc.Parts,
30+
ReasoningContent: mc.ReasoningContent,
2831
})
2932
}
3033

3134
func (mc *MessageContent) UnmarshalJSON(data []byte) error {
3235
var m struct {
33-
Role ChatMessageType `json:"role"`
34-
Text string `json:"text"`
35-
Parts []struct {
36+
Role ChatMessageType `json:"role"`
37+
Text string `json:"text"`
38+
ReasoningContent string `json:"reasoning_content"`
39+
Parts []struct {
3640
Type string `json:"type"`
3741
Text string `json:"text,omitempty"`
3842
ImageURL struct {
@@ -60,6 +64,7 @@ func (mc *MessageContent) UnmarshalJSON(data []byte) error {
6064
return err
6165
}
6266
mc.Role = m.Role
67+
mc.ReasoningContent = m.ReasoningContent
6368

6469
for _, part := range m.Parts {
6570
switch part.Type {

llms/marshaling_test.go

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -259,6 +259,18 @@ func TestUnmarshalJSONMessageContent(t *testing.T) {
259259
},
260260
wantErr: false,
261261
},
262+
{
263+
name: "assistant message with reasoning_content",
264+
input: `{"role":"assistant","text":"final answer","reasoning_content":"step-by-step reasoning"}`,
265+
want: MessageContent{
266+
Role: "assistant",
267+
Parts: []ContentPart{
268+
TextContent{Text: "final answer"},
269+
},
270+
ReasoningContent: "step-by-step reasoning",
271+
},
272+
wantErr: false,
273+
},
262274
}
263275

264276
for _, tt := range tests {
@@ -323,6 +335,29 @@ func TestMarshalJSONMessageContent(t *testing.T) {
323335
want: `{"role":"user","parts":[{}]}`,
324336
wantErr: false,
325337
},
338+
{
339+
name: "assistant message with reasoning_content",
340+
input: MessageContent{
341+
Role: "assistant",
342+
Parts: []ContentPart{
343+
TextContent{Text: "final answer"},
344+
},
345+
ReasoningContent: "step-by-step reasoning",
346+
},
347+
want: `{"role":"assistant","text":"final answer","reasoning_content":"step-by-step reasoning"}`,
348+
wantErr: false,
349+
},
350+
{
351+
name: "message without reasoning_content omits field",
352+
input: MessageContent{
353+
Role: "user",
354+
Parts: []ContentPart{
355+
TextContent{Text: "Hello"},
356+
},
357+
},
358+
want: `{"role":"user","text":"Hello"}`,
359+
wantErr: false,
360+
},
326361
}
327362

328363
for _, tt := range tests {
@@ -485,6 +520,28 @@ role: assistant
485520
},
486521
},
487522
},
523+
{
524+
name: "assistant message with reasoning_content and tool calls",
525+
in: MessageContent{
526+
Role: "assistant",
527+
Parts: []ContentPart{
528+
ToolCall{Type: "function", ID: "tc01", FunctionCall: &FunctionCall{Name: "calculator", Arguments: `{"a":15,"b":28}`}},
529+
},
530+
ReasoningContent: "I need to use the calculator to add 15 and 28",
531+
},
532+
assertedJSON: `{"role":"assistant","parts":[{"type":"tool_call","tool_call":{"function":{"name":"calculator","arguments":"{\"a\":15,\"b\":28}"},"id":"tc01","type":"function"}}],"reasoning_content":"I need to use the calculator to add 15 and 28"}`,
533+
},
534+
{
535+
name: "assistant message with reasoning_content single text",
536+
in: MessageContent{
537+
Role: "assistant",
538+
Parts: []ContentPart{
539+
TextContent{Text: "The answer is 43"},
540+
},
541+
ReasoningContent: "I calculated 15 + 28 = 43",
542+
},
543+
assertedJSON: `{"role":"assistant","text":"The answer is 43","reasoning_content":"I calculated 15 + 28 = 43"}`,
544+
},
488545
}
489546

490547
// Round-trip both JSON and YAML:

llms/openai/internal/openaiclient/chat_test.go

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -122,3 +122,37 @@ func TestChatMessage_MarshalUnmarshal_WithReasoning(t *testing.T) {
122122
require.NoError(t, err)
123123
require.Equal(t, msg, msg2)
124124
}
125+
126+
func TestChatMessage_MarshalUnmarshal_WithReasoningAndToolCalls(t *testing.T) {
127+
t.Parallel()
128+
msg := ChatMessage{
129+
Role: "assistant",
130+
Content: "",
131+
ReasoningContent: "I need to use the calculator to add 15 and 28",
132+
ToolCalls: []ToolCall{
133+
{
134+
ID: "call_123",
135+
Type: ToolTypeFunction,
136+
Function: ToolFunction{
137+
Name: "calculator",
138+
Arguments: `{"a":15,"b":28}`,
139+
},
140+
},
141+
},
142+
}
143+
text, err := json.Marshal(msg)
144+
require.NoError(t, err)
145+
146+
// Verify reasoning_content is present in serialized JSON
147+
assert.Contains(t, string(text), `"reasoning_content"`)
148+
assert.Contains(t, string(text), `"tool_calls"`)
149+
150+
// Round-trip: unmarshal back
151+
var msg2 ChatMessage
152+
err = json.Unmarshal(text, &msg2)
153+
require.NoError(t, err)
154+
require.Equal(t, msg.ReasoningContent, msg2.ReasoningContent)
155+
require.Equal(t, len(msg.ToolCalls), len(msg2.ToolCalls))
156+
require.Equal(t, msg.ToolCalls[0].ID, msg2.ToolCalls[0].ID)
157+
require.Equal(t, msg.ToolCalls[0].Function.Name, msg2.ToolCalls[0].Function.Name)
158+
}

llms/openai/openaillm.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -151,6 +151,7 @@ func (o *LLM) GenerateContent(ctx context.Context, messages []llms.MessageConten
151151
msg.Role = RoleSystem
152152
case llms.ChatMessageTypeAI:
153153
msg.Role = RoleAssistant
154+
msg.ReasoningContent = mc.ReasoningContent
154155
case llms.ChatMessageTypeHuman:
155156
msg.Role = RoleUser
156157
// For models without system support, prepend system content to first user message

0 commit comments

Comments
 (0)