Skip to content

Commit 254a86f

Browse files
authored
feat(core): Add missing openai tool calls attributes (#17226)
This PR adds missing tool call attributes, we add`gen_ai.response.tool_calls` attribute for OpenAI Chat Completion and Responses API, supporting both streaming and non-streaming requests. Core changes: 1. Request Side - Capture available tools: - Extract tools + web_search_options from request params - Set gen_ai.request.available_tools attribute 2. Response Side - Capture actual tool calls: - Chat Completion: Extract from response.choices[].message.tool_calls - Responses API: Extract from response.output[] (filter type === 'function_call') - Set gen_ai.response.tool_calls attribute for both 3. Streaming Support (in streaming.ts): - Accumulation of tool calls during streaming - Respects recordOutputs privacy setting
1 parent abbcc67 commit 254a86f

File tree

12 files changed

+886
-29
lines changed

12 files changed

+886
-29
lines changed

.size-limit.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -233,7 +233,7 @@ module.exports = [
233233
import: createImport('init'),
234234
ignore: [...builtinModules, ...nodePrefixedBuiltinModules],
235235
gzip: true,
236-
limit: '146 KB',
236+
limit: '147 KB',
237237
},
238238
{
239239
name: '@sentry/node - without tracing',
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://[email protected]/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
sendDefaultPii: true,
9+
transport: loggingTransport,
10+
integrations: [Sentry.openAIIntegration()],
11+
});
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://[email protected]/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
sendDefaultPii: false,
9+
transport: loggingTransport,
10+
integrations: [Sentry.openAIIntegration()],
11+
});
Lines changed: 297 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,297 @@
1+
import { instrumentOpenAiClient } from '@sentry/core';
2+
import * as Sentry from '@sentry/node';
3+
4+
class MockOpenAIToolCalls {
5+
constructor(config) {
6+
this.apiKey = config.apiKey;
7+
8+
this.chat = {
9+
completions: {
10+
create: async params => {
11+
// Simulate processing time
12+
await new Promise(resolve => setTimeout(resolve, 10));
13+
14+
// If stream is requested, return an async generator
15+
if (params.stream) {
16+
return this._createChatCompletionToolCallsStream(params);
17+
}
18+
19+
// Non-streaming tool calls response
20+
return {
21+
id: 'chatcmpl-tools-123',
22+
object: 'chat.completion',
23+
created: 1677652300,
24+
model: params.model,
25+
system_fingerprint: 'fp_tools_123',
26+
choices: [
27+
{
28+
index: 0,
29+
message: {
30+
role: 'assistant',
31+
content: null,
32+
tool_calls: [
33+
{
34+
id: 'call_12345xyz',
35+
type: 'function',
36+
function: {
37+
name: 'get_weather',
38+
arguments: '{"latitude":48.8566,"longitude":2.3522}',
39+
},
40+
},
41+
],
42+
},
43+
finish_reason: 'tool_calls',
44+
},
45+
],
46+
usage: {
47+
prompt_tokens: 15,
48+
completion_tokens: 25,
49+
total_tokens: 40,
50+
},
51+
};
52+
},
53+
},
54+
};
55+
56+
this.responses = {
57+
create: async params => {
58+
await new Promise(resolve => setTimeout(resolve, 10));
59+
60+
// If stream is requested, return an async generator
61+
if (params.stream) {
62+
return this._createResponsesApiToolCallsStream(params);
63+
}
64+
65+
// Non-streaming tool calls response
66+
return {
67+
id: 'resp_tools_789',
68+
object: 'response',
69+
created_at: 1677652320,
70+
model: params.model,
71+
input_text: Array.isArray(params.input) ? JSON.stringify(params.input) : params.input,
72+
status: 'completed',
73+
output: [
74+
{
75+
type: 'function_call',
76+
id: 'fc_12345xyz',
77+
call_id: 'call_12345xyz',
78+
name: 'get_weather',
79+
arguments: '{"latitude":48.8566,"longitude":2.3522}',
80+
},
81+
],
82+
usage: {
83+
input_tokens: 8,
84+
output_tokens: 12,
85+
total_tokens: 20,
86+
},
87+
};
88+
},
89+
};
90+
}
91+
92+
// Create a mock streaming response for chat completions with tool calls
93+
async *_createChatCompletionToolCallsStream(params) {
94+
// First chunk with tool call initialization
95+
yield {
96+
id: 'chatcmpl-stream-tools-123',
97+
object: 'chat.completion.chunk',
98+
created: 1677652305,
99+
model: params.model,
100+
choices: [
101+
{
102+
index: 0,
103+
delta: {
104+
role: 'assistant',
105+
tool_calls: [
106+
{
107+
index: 0,
108+
id: 'call_12345xyz',
109+
type: 'function',
110+
function: { name: 'get_weather', arguments: '' },
111+
},
112+
],
113+
},
114+
finish_reason: null,
115+
},
116+
],
117+
};
118+
119+
// Second chunk with arguments delta
120+
yield {
121+
id: 'chatcmpl-stream-tools-123',
122+
object: 'chat.completion.chunk',
123+
created: 1677652305,
124+
model: params.model,
125+
choices: [
126+
{
127+
index: 0,
128+
delta: {
129+
tool_calls: [
130+
{
131+
index: 0,
132+
function: { arguments: '{"latitude":48.8566,"longitude":2.3522}' },
133+
},
134+
],
135+
},
136+
finish_reason: 'tool_calls',
137+
},
138+
],
139+
usage: { prompt_tokens: 15, completion_tokens: 25, total_tokens: 40 },
140+
};
141+
}
142+
143+
// Create a mock streaming response for responses API with tool calls
144+
async *_createResponsesApiToolCallsStream(params) {
145+
const responseId = 'resp_stream_tools_789';
146+
147+
// Response created event
148+
yield {
149+
type: 'response.created',
150+
response: {
151+
id: responseId,
152+
object: 'response',
153+
created_at: 1677652310,
154+
model: params.model,
155+
status: 'in_progress',
156+
output: [],
157+
usage: { input_tokens: 0, output_tokens: 0, total_tokens: 0 },
158+
},
159+
sequence_number: 1,
160+
};
161+
162+
// Function call output item added
163+
yield {
164+
type: 'response.output_item.added',
165+
response_id: responseId,
166+
output_index: 0,
167+
item: {
168+
type: 'function_call',
169+
id: 'fc_12345xyz',
170+
call_id: 'call_12345xyz',
171+
name: 'get_weather',
172+
arguments: '',
173+
},
174+
sequence_number: 2,
175+
};
176+
177+
// Function call arguments delta events
178+
yield {
179+
type: 'response.function_call_arguments.delta',
180+
response_id: responseId,
181+
item_id: 'fc_12345xyz',
182+
output_index: 0,
183+
delta: '{"latitude":48.8566,"longitude":2.3522}',
184+
sequence_number: 3,
185+
};
186+
187+
// Function call arguments done
188+
yield {
189+
type: 'response.function_call_arguments.done',
190+
response_id: responseId,
191+
item_id: 'fc_12345xyz',
192+
output_index: 0,
193+
arguments: '{"latitude":48.8566,"longitude":2.3522}',
194+
sequence_number: 4,
195+
};
196+
197+
// Output item done
198+
yield {
199+
type: 'response.output_item.done',
200+
response_id: responseId,
201+
output_index: 0,
202+
item: {
203+
type: 'function_call',
204+
id: 'fc_12345xyz',
205+
call_id: 'call_12345xyz',
206+
name: 'get_weather',
207+
arguments: '{"latitude":48.8566,"longitude":2.3522}',
208+
},
209+
sequence_number: 5,
210+
};
211+
212+
// Response completed event
213+
yield {
214+
type: 'response.completed',
215+
response: {
216+
id: responseId,
217+
object: 'response',
218+
created_at: 1677652310,
219+
model: params.model,
220+
status: 'completed',
221+
output: [
222+
{
223+
type: 'function_call',
224+
id: 'fc_12345xyz',
225+
call_id: 'call_12345xyz',
226+
name: 'get_weather',
227+
arguments: '{"latitude":48.8566,"longitude":2.3522}',
228+
},
229+
],
230+
usage: { input_tokens: 8, output_tokens: 12, total_tokens: 20 },
231+
},
232+
sequence_number: 6,
233+
};
234+
}
235+
}
236+
237+
async function run() {
238+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
239+
const mockClient = new MockOpenAIToolCalls({
240+
apiKey: 'mock-api-key',
241+
});
242+
243+
const client = instrumentOpenAiClient(mockClient);
244+
245+
const weatherTool = {
246+
type: 'function',
247+
function: {
248+
name: 'get_weather',
249+
description: 'Get the current weather in a given location',
250+
parameters: {
251+
type: 'object',
252+
properties: {
253+
latitude: { type: 'number', description: 'The latitude of the location' },
254+
longitude: { type: 'number', description: 'The longitude of the location' },
255+
},
256+
required: ['latitude', 'longitude'],
257+
},
258+
},
259+
};
260+
261+
const message = { role: 'user', content: 'What is the weather like in Paris today?' };
262+
263+
// Test 1: Chat completion with tools (non-streaming)
264+
await client.chat.completions.create({
265+
model: 'gpt-4',
266+
messages: [message],
267+
tools: [weatherTool],
268+
});
269+
270+
// Test 2: Chat completion with tools (streaming)
271+
const stream1 = await client.chat.completions.create({
272+
model: 'gpt-4',
273+
messages: [message],
274+
tools: [weatherTool],
275+
stream: true,
276+
});
277+
for await (const chunk of stream1) void chunk;
278+
279+
// Test 3: Responses API with tools (non-streaming)
280+
await client.responses.create({
281+
model: 'gpt-4',
282+
input: [message],
283+
tools: [weatherTool],
284+
});
285+
286+
// Test 4: Responses API with tools (streaming)
287+
const stream2 = await client.responses.create({
288+
model: 'gpt-4',
289+
input: [message],
290+
tools: [weatherTool],
291+
stream: true,
292+
});
293+
for await (const chunk of stream2) void chunk;
294+
});
295+
}
296+
297+
run();

0 commit comments

Comments
 (0)