Skip to content

Commit 645e81a

Browse files
committed
Merge remote-tracking branch 'origin/main' into feature/integrationTestCI
2 parents 714f67d + 9056a59 commit 645e81a

34 files changed

+1584
-139
lines changed

.changeset/khaki-zoos-share.md

+5
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"roo-cline": patch
3+
---
4+
5+
v3.3.5

src/api/providers/openai.ts

+22-16
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import { Anthropic } from "@anthropic-ai/sdk"
22
import OpenAI, { AzureOpenAI } from "openai"
3+
34
import {
45
ApiHandlerOptions,
56
azureOpenAiDefaultApiVersion,
@@ -8,6 +9,7 @@ import {
89
} from "../../shared/api"
910
import { ApiHandler, SingleCompletionHandler } from "../index"
1011
import { convertToOpenAiMessages } from "../transform/openai-format"
12+
import { convertToR1Format } from "../transform/r1-format"
1113
import { ApiStream } from "../transform/stream"
1214

1315
export class OpenAiHandler implements ApiHandler, SingleCompletionHandler {
@@ -16,7 +18,8 @@ export class OpenAiHandler implements ApiHandler, SingleCompletionHandler {
1618

1719
constructor(options: ApiHandlerOptions) {
1820
this.options = options
19-
// Azure API shape slightly differs from the core API shape: https://github.com/openai/openai-node?tab=readme-ov-file#microsoft-azure-openai
21+
// Azure API shape slightly differs from the core API shape:
22+
// https://github.com/openai/openai-node?tab=readme-ov-file#microsoft-azure-openai
2023
const urlHost = new URL(this.options.openAiBaseUrl ?? "").host
2124
if (urlHost === "azure.com" || urlHost.endsWith(".azure.com") || options.openAiUseAzure) {
2225
this.client = new AzureOpenAI({
@@ -38,15 +41,17 @@ export class OpenAiHandler implements ApiHandler, SingleCompletionHandler {
3841

3942
const deepseekReasoner = modelId.includes("deepseek-reasoner")
4043

41-
if (!deepseekReasoner && (this.options.openAiStreamingEnabled ?? true)) {
44+
if (this.options.openAiStreamingEnabled ?? true) {
4245
const systemMessage: OpenAI.Chat.ChatCompletionSystemMessageParam = {
4346
role: "system",
4447
content: systemPrompt,
4548
}
4649
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
4750
model: modelId,
4851
temperature: 0,
49-
messages: [systemMessage, ...convertToOpenAiMessages(messages)],
52+
messages: deepseekReasoner
53+
? convertToR1Format([{ role: "user", content: systemPrompt }, ...messages])
54+
: [systemMessage, ...convertToOpenAiMessages(messages)],
5055
stream: true as const,
5156
stream_options: { include_usage: true },
5257
}
@@ -64,6 +69,12 @@ export class OpenAiHandler implements ApiHandler, SingleCompletionHandler {
6469
text: delta.content,
6570
}
6671
}
72+
if ("reasoning_content" in delta && delta.reasoning_content) {
73+
yield {
74+
type: "reasoning",
75+
text: (delta.reasoning_content as string | undefined) || "",
76+
}
77+
}
6778
if (chunk.usage) {
6879
yield {
6980
type: "usage",
@@ -73,24 +84,19 @@ export class OpenAiHandler implements ApiHandler, SingleCompletionHandler {
7384
}
7485
}
7586
} else {
76-
let systemMessage: OpenAI.Chat.ChatCompletionUserMessageParam | OpenAI.Chat.ChatCompletionSystemMessageParam
77-
7887
// o1 for instance doesnt support streaming, non-1 temp, or system prompt
79-
// deepseek reasoner supports system prompt
80-
systemMessage = deepseekReasoner
81-
? {
82-
role: "system",
83-
content: systemPrompt,
84-
}
85-
: {
86-
role: "user",
87-
content: systemPrompt,
88-
}
88+
const systemMessage: OpenAI.Chat.ChatCompletionUserMessageParam = {
89+
role: "user",
90+
content: systemPrompt,
91+
}
8992

9093
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
9194
model: modelId,
92-
messages: [systemMessage, ...convertToOpenAiMessages(messages)],
95+
messages: deepseekReasoner
96+
? convertToR1Format([{ role: "user", content: systemPrompt }, ...messages])
97+
: [systemMessage, ...convertToOpenAiMessages(messages)],
9398
}
99+
94100
const response = await this.client.chat.completions.create(requestOptions)
95101

96102
yield {

src/api/providers/openrouter.ts

+7-5
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ interface OpenRouterApiStreamUsageChunk extends ApiStreamUsageChunk {
1919
}
2020

2121
import { SingleCompletionHandler } from ".."
22+
import { convertToR1Format } from "../transform/r1-format"
2223

2324
export class OpenRouterHandler implements ApiHandler, SingleCompletionHandler {
2425
private options: ApiHandlerOptions
@@ -41,7 +42,7 @@ export class OpenRouterHandler implements ApiHandler, SingleCompletionHandler {
4142
messages: Anthropic.Messages.MessageParam[],
4243
): AsyncGenerator<ApiStreamChunk> {
4344
// Convert Anthropic messages to OpenAI format
44-
const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
45+
let openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
4546
{ role: "system", content: systemPrompt },
4647
...convertToOpenAiMessages(messages),
4748
]
@@ -113,10 +114,11 @@ export class OpenRouterHandler implements ApiHandler, SingleCompletionHandler {
113114
}
114115

115116
let temperature = 0
116-
switch (this.getModel().id) {
117-
case "deepseek/deepseek-r1":
118-
// Recommended temperature for DeepSeek reasoning models
119-
temperature = 0.6
117+
if (this.getModel().id === "deepseek/deepseek-r1" || this.getModel().id.startsWith("deepseek/deepseek-r1:")) {
118+
// Recommended temperature for DeepSeek reasoning models
119+
temperature = 0.6
120+
// DeepSeek highly recommends using user instead of system role
121+
openAiMessages = convertToR1Format([{ role: "user", content: systemPrompt }, ...messages])
120122
}
121123

122124
// https://openrouter.ai/docs/transforms
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,180 @@
1+
import { convertToR1Format } from "../r1-format"
2+
import { Anthropic } from "@anthropic-ai/sdk"
3+
import OpenAI from "openai"
4+
5+
describe("convertToR1Format", () => {
6+
it("should convert basic text messages", () => {
7+
const input: Anthropic.Messages.MessageParam[] = [
8+
{ role: "user", content: "Hello" },
9+
{ role: "assistant", content: "Hi there" },
10+
]
11+
12+
const expected: OpenAI.Chat.ChatCompletionMessageParam[] = [
13+
{ role: "user", content: "Hello" },
14+
{ role: "assistant", content: "Hi there" },
15+
]
16+
17+
expect(convertToR1Format(input)).toEqual(expected)
18+
})
19+
20+
it("should merge consecutive messages with same role", () => {
21+
const input: Anthropic.Messages.MessageParam[] = [
22+
{ role: "user", content: "Hello" },
23+
{ role: "user", content: "How are you?" },
24+
{ role: "assistant", content: "Hi!" },
25+
{ role: "assistant", content: "I'm doing well" },
26+
]
27+
28+
const expected: OpenAI.Chat.ChatCompletionMessageParam[] = [
29+
{ role: "user", content: "Hello\nHow are you?" },
30+
{ role: "assistant", content: "Hi!\nI'm doing well" },
31+
]
32+
33+
expect(convertToR1Format(input)).toEqual(expected)
34+
})
35+
36+
it("should handle image content", () => {
37+
const input: Anthropic.Messages.MessageParam[] = [
38+
{
39+
role: "user",
40+
content: [
41+
{
42+
type: "image",
43+
source: {
44+
type: "base64",
45+
media_type: "image/jpeg",
46+
data: "base64data",
47+
},
48+
},
49+
],
50+
},
51+
]
52+
53+
const expected: OpenAI.Chat.ChatCompletionMessageParam[] = [
54+
{
55+
role: "user",
56+
content: [
57+
{
58+
type: "image_url",
59+
image_url: {
60+
url: "data:image/jpeg;base64,base64data",
61+
},
62+
},
63+
],
64+
},
65+
]
66+
67+
expect(convertToR1Format(input)).toEqual(expected)
68+
})
69+
70+
it("should handle mixed text and image content", () => {
71+
const input: Anthropic.Messages.MessageParam[] = [
72+
{
73+
role: "user",
74+
content: [
75+
{ type: "text", text: "Check this image:" },
76+
{
77+
type: "image",
78+
source: {
79+
type: "base64",
80+
media_type: "image/jpeg",
81+
data: "base64data",
82+
},
83+
},
84+
],
85+
},
86+
]
87+
88+
const expected: OpenAI.Chat.ChatCompletionMessageParam[] = [
89+
{
90+
role: "user",
91+
content: [
92+
{ type: "text", text: "Check this image:" },
93+
{
94+
type: "image_url",
95+
image_url: {
96+
url: "data:image/jpeg;base64,base64data",
97+
},
98+
},
99+
],
100+
},
101+
]
102+
103+
expect(convertToR1Format(input)).toEqual(expected)
104+
})
105+
106+
it("should merge mixed content messages with same role", () => {
107+
const input: Anthropic.Messages.MessageParam[] = [
108+
{
109+
role: "user",
110+
content: [
111+
{ type: "text", text: "First image:" },
112+
{
113+
type: "image",
114+
source: {
115+
type: "base64",
116+
media_type: "image/jpeg",
117+
data: "image1",
118+
},
119+
},
120+
],
121+
},
122+
{
123+
role: "user",
124+
content: [
125+
{ type: "text", text: "Second image:" },
126+
{
127+
type: "image",
128+
source: {
129+
type: "base64",
130+
media_type: "image/png",
131+
data: "image2",
132+
},
133+
},
134+
],
135+
},
136+
]
137+
138+
const expected: OpenAI.Chat.ChatCompletionMessageParam[] = [
139+
{
140+
role: "user",
141+
content: [
142+
{ type: "text", text: "First image:" },
143+
{
144+
type: "image_url",
145+
image_url: {
146+
url: "data:image/jpeg;base64,image1",
147+
},
148+
},
149+
{ type: "text", text: "Second image:" },
150+
{
151+
type: "image_url",
152+
image_url: {
153+
url: "data:image/png;base64,image2",
154+
},
155+
},
156+
],
157+
},
158+
]
159+
160+
expect(convertToR1Format(input)).toEqual(expected)
161+
})
162+
163+
it("should handle empty messages array", () => {
164+
expect(convertToR1Format([])).toEqual([])
165+
})
166+
167+
it("should handle messages with empty content", () => {
168+
const input: Anthropic.Messages.MessageParam[] = [
169+
{ role: "user", content: "" },
170+
{ role: "assistant", content: "" },
171+
]
172+
173+
const expected: OpenAI.Chat.ChatCompletionMessageParam[] = [
174+
{ role: "user", content: "" },
175+
{ role: "assistant", content: "" },
176+
]
177+
178+
expect(convertToR1Format(input)).toEqual(expected)
179+
})
180+
})

0 commit comments

Comments
 (0)