Skip to content

Add support for OpenAI spec #205

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jun 20, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .changeset/rotten-lemons-unite.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
"workers-ai-provider": minor
---

Adds support for Chat Completions API responses
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,12 @@ export function convertToWorkersAIChatMessages(prompt: LanguageModelV1Prompt): {
text += part.text;
break;
}

case "reasoning": {
text += part.text;
break;
}

case "tool-call": {
text = JSON.stringify({
name: part.toolName,
Expand All @@ -84,7 +90,7 @@ export function convertToWorkersAIChatMessages(prompt: LanguageModelV1Prompt): {
}
default: {
const exhaustiveCheck = part;
throw new Error(`Unsupported part: ${exhaustiveCheck}`);
throw new Error(`Unsupported part type: ${exhaustiveCheck.type}`);
}
}
}
Expand All @@ -104,6 +110,7 @@ export function convertToWorkersAIChatMessages(prompt: LanguageModelV1Prompt): {

break;
}

case "tool": {
for (const toolResponse of content) {
messages.push({
Expand All @@ -114,6 +121,7 @@ export function convertToWorkersAIChatMessages(prompt: LanguageModelV1Prompt): {
}
break;
}

default: {
const exhaustiveCheck = role satisfies never;
throw new Error(`Unsupported role: ${exhaustiveCheck}`);
Expand Down
5 changes: 5 additions & 0 deletions packages/workers-ai-provider/src/streaming.ts
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,11 @@ export function getMappedStream(response: Response) {
textDelta: chunk.response,
type: "text-delta",
});
chunk?.choices?.[0]?.delta?.reasoning_content?.length &&
controller.enqueue({
type: "reasoning",
textDelta: chunk.choices[0].delta.reasoning_content,
});
}

if (partialToolCalls.length > 0) {
Expand Down
10 changes: 10 additions & 0 deletions packages/workers-ai-provider/src/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -252,6 +252,16 @@ export function processToolCalls(output: any): LanguageModelV1FunctionToolCall[]
});
}

if (
output?.choices?.[0]?.message?.tool_calls &&
Array.isArray(output.choices[0].message.tool_calls)
) {
return output.choices[0].message.tool_calls.map((toolCall: any) => {
const processedToolCall = processToolCall(toolCall);
return processedToolCall;
});
}

return [];
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,8 @@ export class WorkersAIChatLanguageModel implements LanguageModelV1 {
? JSON.stringify(output.response) // ai-sdk expects a string here
: output.response,
toolCalls: processToolCalls(output),
// @ts-ignore: Missing types
reasoning: output?.choices?.[0]?.message?.reasoning_content,
usage: mapWorkersAIUsage(output),
warnings,
};
Expand Down Expand Up @@ -218,6 +220,12 @@ export class WorkersAIChatLanguageModel implements LanguageModelV1 {
});
}
}
if (response.reasoning && typeof response.reasoning === "string") {
controller.enqueue({
type: "reasoning",
textDelta: response.reasoning,
});
}
controller.enqueue({
finishReason: mapWorkersAIFinishReason(response),
type: "finish",
Expand Down
199 changes: 199 additions & 0 deletions packages/workers-ai-provider/test/stream-text.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -291,6 +291,95 @@ describe("REST API - Streaming Text Tests", () => {
},
]);
});

it("should handle openai tool call inside response when last message is user message", async () => {
server.use(
http.post(
`https://api.cloudflare.com/client/v4/accounts/${TEST_ACCOUNT_ID}/ai/run/${TEST_MODEL}`,
// `doStream` calls `doGenerate` underneath when the last message is from the user
async () => {
return HttpResponse.json({
result: {
id: "chatcmpl-2d657a54f93d4ecbb966cc50efd42819",
object: "chat.completion",
created: 1750346826,
model: "@cf/qwen/qwen3-30b-a3b-fp8",
choices: [
{
index: 0,
message: {
role: "assistant",
reasoning_content:
"\nOkay, the user is asking for the weather in London. Let me check the tools available. There's a function called get_weather that takes a location parameter. Since London is the location mentioned, I need to call that function with \"London\" as the argument. I'll make sure the JSON is correctly formatted and enclosed within the tool_call tags.\n",
content: "\n\n",
tool_calls: [
{
id: "chatcmpl-tool-c267de54771c4833a823f423f0def197",
type: "function",
function: {
name: "get_weather",
arguments: '{"location": "London"}',
},
},
],
},
logprobs: null,
finish_reason: "tool_calls",
stop_reason: null,
},
],
usage: {
prompt_tokens: 169,
completion_tokens: 94,
total_tokens: 263,
},
prompt_logprobs: null,
},
});
},
),
);

const workersai = createWorkersAI({
accountId: TEST_ACCOUNT_ID,
apiKey: TEST_API_KEY,
});

const result = await streamText({
model: workersai(TEST_MODEL),
prompt: "Get the weather information for London",
tools: {
get_weather: {
description: "Get the weather in a location",
execute: async ({ location }) => ({
location,
weather: location === "London" ? "Raining" : "Sunny",
}),
parameters: z.object({
location: z.string().describe("The location to get the weather for"),
}),
},
},
});

const toolCalls: any = [];

for await (const chunk of result.fullStream) {
if (chunk.type === "tool-call") {
toolCalls.push(chunk);
}
}

expect(toolCalls).toHaveLength(1);
expect(toolCalls).toMatchObject([
{
args: { location: "London" },
toolCallId: "chatcmpl-tool-c267de54771c4833a823f423f0def197",
toolName: "get_weather",
type: "tool-call",
},
]);
});
});

describe("Binding - Streaming Text Tests", () => {
Expand Down Expand Up @@ -500,6 +589,116 @@ describe("Binding - Streaming Text Tests", () => {
type: "tool-call",
});
});

it("should handle new tool call inside response when last message is user message", async () => {
const workersai = createWorkersAI({
binding: {
run: async () => {
return {
id: "chatcmpl-2d657a54f93d4ecbb966cc50efd42819",
object: "chat.completion",
created: 1750346826,
model: "@cf/qwen/qwen3-30b-a3b-fp8",
choices: [
{
index: 0,
message: {
role: "assistant",
reasoning_content:
"\nOkay, the user is asking for the weather in London. Let me check the tools available. There's a function called get_weather that takes a location parameter. Since London is the location mentioned, I need to call that function with \"London\" as the argument. I'll make sure the JSON is correctly formatted and enclosed within the tool_call tags.\n",
content: "\n\n",
tool_calls: [
{
id: "chatcmpl-tool-c267de54771c4833a823f423f0def197",
type: "function",
function: {
name: "get_weather",
arguments: '{"location": "London"}',
},
},
{
id: "chatcmpl-tool-a482f0e36b0c4190b9bee3fb61408a9c",
type: "function",
function: {
name: "get_temperature",
arguments: '{"location": "London"}',
},
},
],
},
logprobs: null,
finish_reason: "tool_calls",
stop_reason: null,
},
],
usage: {
prompt_tokens: 169,
completion_tokens: 94,
total_tokens: 263,
},
prompt_logprobs: null,
};
},
},
});

const result = await streamText({
model: workersai(TEST_MODEL),
prompt: "Get the weather information for London",
tools: {
get_temperature: {
description: "Get the temperature in a location",
execute: async ({ location }) => ({
location,
weather: location === "London" ? "80" : "100",
}),
parameters: z.object({
location: z.string().describe("The location to get the temperature for"),
}),
},
get_weather: {
description: "Get the weather in a location",
execute: async ({ location }) => ({
location,
weather: location === "London" ? "Raining" : "Sunny",
}),
parameters: z.object({
location: z.string().describe("The location to get the weather for"),
}),
},
},
});

const toolCalls: any = [];
let reasoning = "";

for await (const chunk of result.fullStream) {
if (chunk.type === "tool-call") {
toolCalls.push(chunk);
}

if (chunk.type === "reasoning") {
reasoning += chunk.textDelta;
}
}

expect(reasoning).toEqual(
"\nOkay, the user is asking for the weather in London. Let me check the tools available. There's a function called get_weather that takes a location parameter. Since London is the location mentioned, I need to call that function with \"London\" as the argument. I'll make sure the JSON is correctly formatted and enclosed within the tool_call tags.\n",
);
expect(toolCalls).toHaveLength(2);
expect(toolCalls[0]).toMatchObject({
args: { location: "London" },
toolCallId: "chatcmpl-tool-c267de54771c4833a823f423f0def197",
toolName: "get_weather",
type: "tool-call",
});
expect(toolCalls[1]).toMatchObject({
args: { location: "London" },
toolCallId: "chatcmpl-tool-a482f0e36b0c4190b9bee3fb61408a9c",
toolName: "get_temperature",
type: "tool-call",
});
});
});

/**
Expand Down