Skip to content

Commit d935ad1

Browse files
committed
prompts
1 parent 281a7b2 commit d935ad1

19 files changed

+324
-9
lines changed

examples/basic/prompt_template.py

Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
import argparse
2+
import asyncio
3+
import random
4+
5+
from agents import Agent, GenerateDynamicPromptData, Runner
6+
7+
8+
class DynamicContext:
9+
def __init__(self):
10+
self.poem_style = random.choice(["limerick", "palindrome", "haiku", "ballad"])
11+
print(f"[debug] DynamicContext initialized with poem_style: {self.poem_style}")
12+
13+
14+
async def _get_dynamic_prompt(data: GenerateDynamicPromptData):
15+
ctx: DynamicContext = data.context.context
16+
return {
17+
"id": "pmpt_6850729e8ba481939fd439e058c69ee004afaa19c520b78b",
18+
"version": "1",
19+
"variables": {
20+
"poem_style": ctx.poem_style,
21+
},
22+
}
23+
24+
25+
async def dynamic_prompt():
26+
context = DynamicContext()
27+
28+
agent = Agent(
29+
name="Assistant",
30+
prompt=_get_dynamic_prompt,
31+
)
32+
33+
result = await Runner.run(agent, "Tell me about recursion in programming.", context=context)
34+
print(result.final_output)
35+
36+
37+
async def static_prompt():
38+
agent = Agent(
39+
name="Assistant",
40+
prompt={
41+
"id": "pmpt_6850729e8ba481939fd439e058c69ee004afaa19c520b78b",
42+
"version": "1",
43+
"variables": {
44+
"poem_style": "limerick",
45+
},
46+
},
47+
)
48+
49+
result = await Runner.run(agent, "Tell me about recursion in programming.")
50+
print(result.final_output)
51+
52+
53+
if __name__ == "__main__":
54+
parser = argparse.ArgumentParser()
55+
parser.add_argument("--dynamic", action="store_true")
56+
args = parser.parse_args()
57+
58+
if args.dynamic:
59+
asyncio.run(dynamic_prompt())
60+
else:
61+
asyncio.run(static_prompt())

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ requires-python = ">=3.9"
77
license = "MIT"
88
authors = [{ name = "OpenAI", email = "[email protected]" }]
99
dependencies = [
10-
"openai>=1.81.0",
10+
"openai>=1.87.0",
1111
"pydantic>=2.10, <3",
1212
"griffe>=1.5.6, <2",
1313
"typing-extensions>=4.12.2, <5",

src/agents/__init__.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@
4545
from .models.openai_chatcompletions import OpenAIChatCompletionsModel
4646
from .models.openai_provider import OpenAIProvider
4747
from .models.openai_responses import OpenAIResponsesModel
48+
from .prompts import DynamicPromptFunction, GenerateDynamicPromptData, Prompt
4849
from .repl import run_demo_loop
4950
from .result import RunResult, RunResultStreaming
5051
from .run import RunConfig, Runner
@@ -178,6 +179,9 @@ def enable_verbose_stdout_logging():
178179
"AgentsException",
179180
"InputGuardrailTripwireTriggered",
180181
"OutputGuardrailTripwireTriggered",
182+
"DynamicPromptFunction",
183+
"GenerateDynamicPromptData",
184+
"Prompt",
181185
"MaxTurnsExceeded",
182186
"ModelBehaviorError",
183187
"UserError",

src/agents/agent.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
from dataclasses import dataclass, field
88
from typing import TYPE_CHECKING, Any, Callable, Generic, Literal, cast
99

10+
from openai.types.responses.response_prompt_param import ResponsePromptParam
1011
from typing_extensions import NotRequired, TypeAlias, TypedDict
1112

1213
from .agent_output import AgentOutputSchemaBase
@@ -17,6 +18,7 @@
1718
from .mcp import MCPUtil
1819
from .model_settings import ModelSettings
1920
from .models.interface import Model
21+
from .prompts import DynamicPromptFunction, Prompt, PromptUtil
2022
from .run_context import RunContextWrapper, TContext
2123
from .tool import FunctionTool, FunctionToolResult, Tool, function_tool
2224
from .util import _transforms
@@ -95,6 +97,12 @@ class Agent(Generic[TContext]):
9597
return a string.
9698
"""
9799

100+
prompt: Prompt | DynamicPromptFunction | None = None
101+
"""A prompt object (or a function that returns a Prompt). Prompts allow you to dynamically
102+
configure the instructions, tools and other config for an agent outside of your code. Only
103+
usable with OpenAI models, using the Responses API.
104+
"""
105+
98106
handoff_description: str | None = None
99107
"""A description of the agent. This is used when the agent is used as a handoff, so that an
100108
LLM knows what it does and when to invoke it.
@@ -242,6 +250,12 @@ async def get_system_prompt(self, run_context: RunContextWrapper[TContext]) -> s
242250

243251
return None
244252

253+
async def get_prompt(
254+
self, run_context: RunContextWrapper[TContext]
255+
) -> ResponsePromptParam | None:
256+
"""Get the prompt for the agent."""
257+
return await PromptUtil.to_model_input(self.prompt, run_context, self)
258+
245259
async def get_mcp_tools(self) -> list[Tool]:
246260
"""Fetches the available tools from the MCP servers."""
247261
convert_schemas_to_strict = self.mcp_config.get("convert_schemas_to_strict", False)

src/agents/extensions/models/litellm_model.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,7 @@ async def get_response(
7171
handoffs: list[Handoff],
7272
tracing: ModelTracing,
7373
previous_response_id: str | None,
74+
prompt: Any | None = None,
7475
) -> ModelResponse:
7576
with generation_span(
7677
model=str(self.model),
@@ -88,6 +89,7 @@ async def get_response(
8889
span_generation,
8990
tracing,
9091
stream=False,
92+
prompt=prompt,
9193
)
9294

9395
assert isinstance(response.choices[0], litellm.types.utils.Choices)
@@ -153,8 +155,8 @@ async def stream_response(
153155
output_schema: AgentOutputSchemaBase | None,
154156
handoffs: list[Handoff],
155157
tracing: ModelTracing,
156-
*,
157158
previous_response_id: str | None,
159+
prompt: Any | None = None,
158160
) -> AsyncIterator[TResponseStreamEvent]:
159161
with generation_span(
160162
model=str(self.model),
@@ -172,6 +174,7 @@ async def stream_response(
172174
span_generation,
173175
tracing,
174176
stream=True,
177+
prompt=prompt,
175178
)
176179

177180
final_response: Response | None = None
@@ -202,6 +205,7 @@ async def _fetch_response(
202205
span: Span[GenerationSpanData],
203206
tracing: ModelTracing,
204207
stream: Literal[True],
208+
prompt: Any | None = None,
205209
) -> tuple[Response, AsyncStream[ChatCompletionChunk]]: ...
206210

207211
@overload
@@ -216,6 +220,7 @@ async def _fetch_response(
216220
span: Span[GenerationSpanData],
217221
tracing: ModelTracing,
218222
stream: Literal[False],
223+
prompt: Any | None = None,
219224
) -> litellm.types.utils.ModelResponse: ...
220225

221226
async def _fetch_response(
@@ -229,6 +234,7 @@ async def _fetch_response(
229234
span: Span[GenerationSpanData],
230235
tracing: ModelTracing,
231236
stream: bool = False,
237+
prompt: Any | None = None,
232238
) -> litellm.types.utils.ModelResponse | tuple[Response, AsyncStream[ChatCompletionChunk]]:
233239
converted_messages = Converter.items_to_messages(input)
234240

src/agents/models/interface.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,8 @@
55
from collections.abc import AsyncIterator
66
from typing import TYPE_CHECKING
77

8+
from openai.types.responses.response_prompt_param import ResponsePromptParam
9+
810
from ..agent_output import AgentOutputSchemaBase
911
from ..handoffs import Handoff
1012
from ..items import ModelResponse, TResponseInputItem, TResponseStreamEvent
@@ -46,6 +48,7 @@ async def get_response(
4648
tracing: ModelTracing,
4749
*,
4850
previous_response_id: str | None,
51+
prompt: ResponsePromptParam | None,
4952
) -> ModelResponse:
5053
"""Get a response from the model.
5154
@@ -59,6 +62,7 @@ async def get_response(
5962
tracing: Tracing configuration.
6063
previous_response_id: the ID of the previous response. Generally not used by the model,
6164
except for the OpenAI Responses API.
65+
prompt: The prompt config to use for the model.
6266
6367
Returns:
6468
The full model response.
@@ -77,6 +81,7 @@ def stream_response(
7781
tracing: ModelTracing,
7882
*,
7983
previous_response_id: str | None,
84+
prompt: ResponsePromptParam | None,
8085
) -> AsyncIterator[TResponseStreamEvent]:
8186
"""Stream a response from the model.
8287
@@ -90,6 +95,7 @@ def stream_response(
9095
tracing: Tracing configuration.
9196
previous_response_id: the ID of the previous response. Generally not used by the model,
9297
except for the OpenAI Responses API.
98+
prompt: The prompt config to use for the model.
9399
94100
Returns:
95101
An iterator of response stream events, in OpenAI Responses format.

src/agents/models/openai_chatcompletions.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
from openai.types import ChatModel
1010
from openai.types.chat import ChatCompletion, ChatCompletionChunk
1111
from openai.types.responses import Response
12+
from openai.types.responses.response_prompt_param import ResponsePromptParam
1213
from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails
1314

1415
from .. import _debug
@@ -53,6 +54,7 @@ async def get_response(
5354
handoffs: list[Handoff],
5455
tracing: ModelTracing,
5556
previous_response_id: str | None,
57+
prompt: ResponsePromptParam | None = None,
5658
) -> ModelResponse:
5759
with generation_span(
5860
model=str(self.model),
@@ -69,6 +71,7 @@ async def get_response(
6971
span_generation,
7072
tracing,
7173
stream=False,
74+
prompt=prompt,
7275
)
7376

7477
first_choice = response.choices[0]
@@ -136,8 +139,8 @@ async def stream_response(
136139
output_schema: AgentOutputSchemaBase | None,
137140
handoffs: list[Handoff],
138141
tracing: ModelTracing,
139-
*,
140142
previous_response_id: str | None,
143+
prompt: ResponsePromptParam | None = None,
141144
) -> AsyncIterator[TResponseStreamEvent]:
142145
"""
143146
Yields a partial message as it is generated, as well as the usage information.
@@ -157,6 +160,7 @@ async def stream_response(
157160
span_generation,
158161
tracing,
159162
stream=True,
163+
prompt=prompt,
160164
)
161165

162166
final_response: Response | None = None
@@ -187,6 +191,7 @@ async def _fetch_response(
187191
span: Span[GenerationSpanData],
188192
tracing: ModelTracing,
189193
stream: Literal[True],
194+
prompt: ResponsePromptParam | None = None,
190195
) -> tuple[Response, AsyncStream[ChatCompletionChunk]]: ...
191196

192197
@overload
@@ -201,6 +206,7 @@ async def _fetch_response(
201206
span: Span[GenerationSpanData],
202207
tracing: ModelTracing,
203208
stream: Literal[False],
209+
prompt: ResponsePromptParam | None = None,
204210
) -> ChatCompletion: ...
205211

206212
async def _fetch_response(
@@ -214,6 +220,7 @@ async def _fetch_response(
214220
span: Span[GenerationSpanData],
215221
tracing: ModelTracing,
216222
stream: bool = False,
223+
prompt: ResponsePromptParam | None = None,
217224
) -> ChatCompletion | tuple[Response, AsyncStream[ChatCompletionChunk]]:
218225
converted_messages = Converter.items_to_messages(input)
219226

src/agents/models/openai_responses.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
WebSearchToolParam,
1818
response_create_params,
1919
)
20+
from openai.types.responses.response_prompt_param import ResponsePromptParam
2021

2122
from .. import _debug
2223
from ..agent_output import AgentOutputSchemaBase
@@ -74,6 +75,7 @@ async def get_response(
7475
handoffs: list[Handoff],
7576
tracing: ModelTracing,
7677
previous_response_id: str | None,
78+
prompt: ResponsePromptParam | None = None,
7779
) -> ModelResponse:
7880
with response_span(disabled=tracing.is_disabled()) as span_response:
7981
try:
@@ -86,6 +88,7 @@ async def get_response(
8688
handoffs,
8789
previous_response_id,
8890
stream=False,
91+
prompt=prompt,
8992
)
9093

9194
if _debug.DONT_LOG_MODEL_DATA:
@@ -141,6 +144,7 @@ async def stream_response(
141144
handoffs: list[Handoff],
142145
tracing: ModelTracing,
143146
previous_response_id: str | None,
147+
prompt: ResponsePromptParam | None = None,
144148
) -> AsyncIterator[ResponseStreamEvent]:
145149
"""
146150
Yields a partial message as it is generated, as well as the usage information.
@@ -156,6 +160,7 @@ async def stream_response(
156160
handoffs,
157161
previous_response_id,
158162
stream=True,
163+
prompt=prompt,
159164
)
160165

161166
final_response: Response | None = None
@@ -192,6 +197,7 @@ async def _fetch_response(
192197
handoffs: list[Handoff],
193198
previous_response_id: str | None,
194199
stream: Literal[True],
200+
prompt: ResponsePromptParam | None = None,
195201
) -> AsyncStream[ResponseStreamEvent]: ...
196202

197203
@overload
@@ -205,6 +211,7 @@ async def _fetch_response(
205211
handoffs: list[Handoff],
206212
previous_response_id: str | None,
207213
stream: Literal[False],
214+
prompt: ResponsePromptParam | None = None,
208215
) -> Response: ...
209216

210217
async def _fetch_response(
@@ -217,6 +224,7 @@ async def _fetch_response(
217224
handoffs: list[Handoff],
218225
previous_response_id: str | None,
219226
stream: Literal[True] | Literal[False] = False,
227+
prompt: ResponsePromptParam | None = None,
220228
) -> Response | AsyncStream[ResponseStreamEvent]:
221229
list_input = ItemHelpers.input_to_new_input_list(input)
222230

@@ -252,6 +260,7 @@ async def _fetch_response(
252260
input=list_input,
253261
include=converted_tools.includes,
254262
tools=converted_tools.tools,
263+
prompt=self._non_null_or_not_given(prompt),
255264
temperature=self._non_null_or_not_given(model_settings.temperature),
256265
top_p=self._non_null_or_not_given(model_settings.top_p),
257266
truncation=self._non_null_or_not_given(model_settings.truncation),

0 commit comments

Comments
 (0)