diff --git a/src/agents/extensions/models/litellm_model.py b/src/agents/extensions/models/litellm_model.py index c58a52dae..a06c61dc3 100644 --- a/src/agents/extensions/models/litellm_model.py +++ b/src/agents/extensions/models/litellm_model.py @@ -98,7 +98,11 @@ async def get_response( logger.debug("Received model response") else: logger.debug( - f"LLM resp:\n{json.dumps(response.choices[0].message.model_dump(), indent=2)}\n" + f"""LLM resp:\n{ + json.dumps( + response.choices[0].message.model_dump(), indent=2, ensure_ascii=False + ) + }\n""" ) if hasattr(response, "usage"): @@ -269,8 +273,8 @@ async def _fetch_response( else: logger.debug( f"Calling Litellm model: {self.model}\n" - f"{json.dumps(converted_messages, indent=2)}\n" - f"Tools:\n{json.dumps(converted_tools, indent=2)}\n" + f"{json.dumps(converted_messages, indent=2, ensure_ascii=False)}\n" + f"Tools:\n{json.dumps(converted_tools, indent=2, ensure_ascii=False)}\n" f"Stream: {stream}\n" f"Tool choice: {tool_choice}\n" f"Response format: {response_format}\n" diff --git a/src/agents/models/openai_chatcompletions.py b/src/agents/models/openai_chatcompletions.py index 120d726db..6de431b4d 100644 --- a/src/agents/models/openai_chatcompletions.py +++ b/src/agents/models/openai_chatcompletions.py @@ -87,7 +87,7 @@ async def get_response( if message is not None: logger.debug( "LLM resp:\n%s\n", - json.dumps(message.model_dump(), indent=2), + json.dumps(message.model_dump(), indent=2, ensure_ascii=False), ) else: finish_reason = first_choice.finish_reason if first_choice else "-" @@ -256,8 +256,8 @@ async def _fetch_response( logger.debug("Calling LLM") else: logger.debug( - f"{json.dumps(converted_messages, indent=2)}\n" - f"Tools:\n{json.dumps(converted_tools, indent=2)}\n" + f"{json.dumps(converted_messages, indent=2, ensure_ascii=False)}\n" + f"Tools:\n{json.dumps(converted_tools, indent=2, ensure_ascii=False)}\n" f"Stream: {stream}\n" f"Tool choice: {tool_choice}\n" f"Response format: {response_format}\n" diff --git a/src/agents/models/openai_responses.py b/src/agents/models/openai_responses.py index 637adaccd..a7ce62983 100644 --- a/src/agents/models/openai_responses.py +++ b/src/agents/models/openai_responses.py @@ -96,7 +96,13 @@ async def get_response( else: logger.debug( "LLM resp:\n" - f"{json.dumps([x.model_dump() for x in response.output], indent=2)}\n" + f"""{ + json.dumps( + [x.model_dump() for x in response.output], + indent=2, + ensure_ascii=False, + ) + }\n""" ) usage = ( @@ -249,8 +255,8 @@ async def _fetch_response( else: logger.debug( f"Calling LLM {self.model} with input:\n" - f"{json.dumps(list_input, indent=2)}\n" - f"Tools:\n{json.dumps(converted_tools.tools, indent=2)}\n" + f"{json.dumps(list_input, indent=2, ensure_ascii=False)}\n" + f"Tools:\n{json.dumps(converted_tools.tools, indent=2, ensure_ascii=False)}\n" f"Stream: {stream}\n" f"Tool choice: {tool_choice}\n" f"Response format: {response_format}\n"