Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
from opentelemetry import trace
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk import trace as trace_sdk
from opentelemetry.sdk.trace.export import BatchSpanProcessor
from pydantic_ai import Agent, RunContext

from openinference.instrumentation.pydantic_ai import OpenInferenceSpanProcessor

# OpenTelemetry setup
endpoint = "http://localhost:6006/v1/traces"
tracer_provider = trace_sdk.TracerProvider()
exporter = OTLPSpanExporter(endpoint=endpoint)
trace.set_tracer_provider(tracer_provider)
tracer_provider.add_span_processor(OpenInferenceSpanProcessor())
tracer_provider.add_span_processor(BatchSpanProcessor(exporter))


# Simple dependencies
class Deps:
def __init__(self, user_id: str):
self.user_id = user_id


# Create agent that will use multiple tools
agent = Agent(
"openai:gpt-4.1-nano",
deps_type=Deps,
system_prompt="You are a helpful assistant. Use available tools to gather information.",
instrument=True,
)


@agent.tool
def get_weather(ctx: RunContext[Deps], city: str) -> str:
"""Get weather information for a city."""
print(f"[TOOL 1] Getting weather for: {city}")
weather_data = {
"San Francisco": "Sunny, 72°F",
"New York": "Cloudy, 65°F",
"Seattle": "Rainy, 58°F",
}
return weather_data.get(city, f"Weather data not available for {city}")


@agent.tool
def get_time(ctx: RunContext[Deps], city: str) -> str:
"""Get current time for a city."""
print(f"[TOOL 2] Getting time for: {city}")
time_data = {
"San Francisco": "10:30 AM PST",
"New York": "1:30 PM EST",
"Seattle": "10:30 AM PST",
}
return time_data.get(city, f"Time data not available for {city}")


def main():
deps = Deps(user_id="user_123")
result = agent.run_sync("What's the weather and time in San Francisco?", deps=deps)
print("AGENT RESPONSE:")
print(result.output)
for i, msg in enumerate(result.all_messages(), 1):
print(f"\nStep {i}: {msg}")
if hasattr(msg, "parts"):
for part in msg.parts:
if part.part_kind == "tool-call":
print(f" → Tool Call: {part.tool_name}({part.args})")
elif part.part_kind == "tool-return":
print(f" ← Tool Result: {part.content}")
elif part.part_kind == "text":
print(f" 💬 Text: {part.content[:100]}...")


if __name__ == "__main__":
main()
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
from openinference.instrumentation import safe_json_dumps
from openinference.semconv.trace import (
MessageAttributes,
MessageContentAttributes,
OpenInferenceSpanKindValues,
SpanAttributes,
ToolAttributes,
Expand Down Expand Up @@ -75,6 +76,7 @@ class GenAIMessagePartFields:
TYPE = "type"
CONTENT = "content"
RESULT = "result"
NAME = "name"


class GenAIMessagePartTypes:
Expand Down Expand Up @@ -113,6 +115,8 @@ class PydanticAgentName:

class PydanticTools:
TOOLS = "tools"
TOOL_ARGUMENTS = "tool_arguments"
TOOL_RESPONSE = "tool_response"


class PydanticGenAIAttribute:
Expand All @@ -129,6 +133,7 @@ class PydanticCustomAttributes:

class PydanticModelRequestParameters:
TOOLS = "output_tools"
FUNCTION_TOOLS = "function_tools"
NAME = "name"
DESCRIPTION = "description"
PARAMETERS = "parameters"
Expand All @@ -137,7 +142,7 @@ class PydanticModelRequestParameters:
class PydanticModelRequestParametersTool:
NAME = "name"
DESCRIPTION = "description"
PARAMETERS = "parameters"
PARAMETERS = "properties"


class PydanticMessageRoleUser:
Expand Down Expand Up @@ -334,6 +339,13 @@ def _extract_tool_attributes(gen_ai_attrs: Mapping[str, Any]) -> Iterator[Tuple[

if GEN_AI_TOOL_CALL_ID in gen_ai_attrs:
yield ToolCallAttributes.TOOL_CALL_ID, gen_ai_attrs[GEN_AI_TOOL_CALL_ID]
if PydanticTools.TOOL_ARGUMENTS in gen_ai_attrs:
yield (
SpanAttributes.TOOL_PARAMETERS,
gen_ai_attrs[PydanticTools.TOOL_ARGUMENTS],
)
if PydanticTools.TOOL_RESPONSE in gen_ai_attrs:
yield SpanAttributes.OUTPUT_VALUE, gen_ai_attrs[PydanticTools.TOOL_RESPONSE]

if OTELConventions.EVENTS in gen_ai_attrs:
events = _parse_events(gen_ai_attrs[OTELConventions.EVENTS])
Expand Down Expand Up @@ -370,6 +382,31 @@ def _extract_tool_attributes(gen_ai_attrs: Mapping[str, Any]) -> Iterator[Tuple[
)


def _extract_tools(output_tools: List[Dict[str, Any]]) -> Any:
tools = []
for tool in output_tools:
if not isinstance(tool, dict):
continue

tool_info: Dict[str, Any] = {}
if PydanticModelRequestParametersTool.NAME in tool:
tool_info[SpanAttributes.TOOL_NAME] = tool[PydanticModelRequestParametersTool.NAME]
if PydanticModelRequestParametersTool.DESCRIPTION in tool:
tool_info[SpanAttributes.TOOL_DESCRIPTION] = tool[
PydanticModelRequestParametersTool.DESCRIPTION
]
if PydanticModelRequestParametersTool.PARAMETERS in tool and isinstance(
tool[PydanticModelRequestParametersTool.PARAMETERS], dict
):
tool_info[ToolAttributes.TOOL_JSON_SCHEMA] = safe_json_dumps(
tool[PydanticModelRequestParametersTool.PARAMETERS]
)

if tool_info:
tools.append(tool_info)
return tools


def _extract_tools_attributes(gen_ai_attrs: Mapping[str, Any]) -> Iterator[Tuple[str, Any]]:
"""Extract tool definitions from model request parameters."""
if PydanticCustomAttributes.MODEL_REQUEST_PARAMETERS not in gen_ai_attrs:
Expand All @@ -384,29 +421,11 @@ def _extract_tools_attributes(gen_ai_attrs: Mapping[str, Any]) -> Iterator[Tuple
if PydanticModelRequestParameters.TOOLS in params and isinstance(
params[PydanticModelRequestParameters.TOOLS], list
):
for tool in params[PydanticModelRequestParameters.TOOLS]:
if not isinstance(tool, dict):
continue

tool_info: Dict[str, Any] = {}
if PydanticModelRequestParametersTool.NAME in tool:
tool_info[SpanAttributes.TOOL_NAME] = tool[
PydanticModelRequestParametersTool.NAME
]
if PydanticModelRequestParametersTool.DESCRIPTION in tool:
tool_info[SpanAttributes.TOOL_DESCRIPTION] = tool[
PydanticModelRequestParametersTool.DESCRIPTION
]
if PydanticModelRequestParametersTool.PARAMETERS in tool and isinstance(
tool[PydanticModelRequestParametersTool.PARAMETERS], dict
):
tool_info[ToolAttributes.TOOL_JSON_SCHEMA] = safe_json_dumps(
tool[PydanticModelRequestParametersTool.PARAMETERS]
)

if tool_info:
tools.append(tool_info)

tools.extend(_extract_tools(params[PydanticModelRequestParameters.TOOLS]))
if PydanticModelRequestParameters.FUNCTION_TOOLS in params and isinstance(
params[PydanticModelRequestParameters.FUNCTION_TOOLS], list
):
tools.extend(_extract_tools(params[PydanticModelRequestParameters.FUNCTION_TOOLS]))
for idx, tool in enumerate(tools):
for key, value in tool.items():
yield f"{SpanAttributes.LLM_TOOLS}.{idx}.{key}", value
Expand Down Expand Up @@ -605,7 +624,7 @@ def _find_llm_output_value(output_messages: List[Dict[str, Any]]) -> Optional[st
args = tool_call[ToolCallAttributes.TOOL_CALL_FUNCTION_ARGUMENTS_JSON]
if isinstance(args, str):
return args
return None
return safe_json_dumps(args)
return None


Expand Down Expand Up @@ -686,17 +705,21 @@ def _extract_from_gen_ai_messages(gen_ai_attrs: Mapping[str, Any]) -> Iterator[T
if GenAIMessageFields.PARTS in msg and isinstance(
msg[GenAIMessageFields.PARTS], list
):
for part in msg[GenAIMessageFields.PARTS]:
for part_index, part in enumerate(msg[GenAIMessageFields.PARTS]):
if isinstance(part, dict):
if (
part.get(GenAIMessagePartFields.TYPE)
== GenAIMessagePartTypes.TEXT
and GenAIMessagePartFields.CONTENT in part
):
yield (
f"{SpanAttributes.LLM_INPUT_MESSAGES}.{msg_index}.{MessageAttributes.MESSAGE_CONTENT}",
f"{SpanAttributes.LLM_INPUT_MESSAGES}.{msg_index}.{MessageAttributes.MESSAGE_CONTENTS}.{part_index}.{MessageContentAttributes.MESSAGE_CONTENT_TEXT}",
part[GenAIMessagePartFields.CONTENT],
)
yield (
f"{SpanAttributes.LLM_INPUT_MESSAGES}.{msg_index}.{MessageAttributes.MESSAGE_CONTENTS}.{part_index}.{MessageContentAttributes.MESSAGE_CONTENT_TYPE}",
"text",
)

# Set INPUT_VALUE for the last user message found
if (
Expand All @@ -711,17 +734,22 @@ def _extract_from_gen_ai_messages(gen_ai_attrs: Mapping[str, Any]) -> Iterator[T
# Extract tool call information
if GenAIFunctionFields.NAME in part:
yield (
f"{SpanAttributes.LLM_INPUT_MESSAGES}.{msg_index}.{MessageAttributes.MESSAGE_TOOL_CALLS}.0.{ToolCallAttributes.TOOL_CALL_FUNCTION_NAME}",
f"{SpanAttributes.LLM_INPUT_MESSAGES}.{msg_index}.{MessageAttributes.MESSAGE_TOOL_CALLS}.{part_index}.{ToolCallAttributes.TOOL_CALL_FUNCTION_NAME}",
part[GenAIFunctionFields.NAME],
)
if GenAIFunctionFields.ARGUMENTS in part:
args = part[GenAIFunctionFields.ARGUMENTS]
if not isinstance(args, str):
args_str = safe_json_dumps(args)
else:
args_str = args
yield (
f"{SpanAttributes.LLM_INPUT_MESSAGES}.{msg_index}.{MessageAttributes.MESSAGE_TOOL_CALLS}.0.{ToolCallAttributes.TOOL_CALL_FUNCTION_ARGUMENTS_JSON}",
part[GenAIFunctionFields.ARGUMENTS],
f"{SpanAttributes.LLM_INPUT_MESSAGES}.{msg_index}.{MessageAttributes.MESSAGE_TOOL_CALLS}.{part_index}.{ToolCallAttributes.TOOL_CALL_FUNCTION_ARGUMENTS_JSON}",
args_str,
)
if GenAIToolCallFields.ID in part:
yield (
f"{SpanAttributes.LLM_INPUT_MESSAGES}.{msg_index}.{MessageAttributes.MESSAGE_TOOL_CALLS}.0.{ToolCallAttributes.TOOL_CALL_ID}",
f"{SpanAttributes.LLM_INPUT_MESSAGES}.{msg_index}.{MessageAttributes.MESSAGE_TOOL_CALLS}.{part_index}.{ToolCallAttributes.TOOL_CALL_ID}",
part[GenAIToolCallFields.ID],
)
elif (
Expand All @@ -731,7 +759,7 @@ def _extract_from_gen_ai_messages(gen_ai_attrs: Mapping[str, Any]) -> Iterator[T
message_role = GenAIMessageRoles.TOOL
if GenAIMessagePartFields.RESULT in part:
yield (
f"{SpanAttributes.LLM_INPUT_MESSAGES}.{msg_index}.{MessageAttributes.MESSAGE_CONTENT}",
f"{SpanAttributes.LLM_INPUT_MESSAGES}.{msg_index}.{MessageAttributes.MESSAGE_CONTENTS}.{part_index}.{MessageContentAttributes.MESSAGE_CONTENT_TEXT}",
part[GenAIMessagePartFields.RESULT],
)
if GenAIToolCallFields.ID in part:
Expand Down Expand Up @@ -777,7 +805,7 @@ def _extract_from_gen_ai_messages(gen_ai_attrs: Mapping[str, Any]) -> Iterator[T
if GenAIMessageFields.PARTS in msg and isinstance(
msg[GenAIMessageFields.PARTS], list
):
for part in msg[GenAIMessageFields.PARTS]:
for parts_index, part in enumerate(msg[GenAIMessageFields.PARTS]):
if isinstance(part, dict):
if (
part.get(GenAIMessagePartFields.TYPE)
Expand All @@ -788,30 +816,36 @@ def _extract_from_gen_ai_messages(gen_ai_attrs: Mapping[str, Any]) -> Iterator[T
f"{SpanAttributes.LLM_OUTPUT_MESSAGES}.{index}.{MessageAttributes.MESSAGE_CONTENT}",
part[GenAIMessagePartFields.CONTENT],
)
break
elif (
part.get(GenAIMessagePartFields.TYPE)
== GenAIMessagePartTypes.TOOL_CALL
):
# Extract tool call information
if GenAIFunctionFields.NAME in part:
yield (
f"{SpanAttributes.LLM_OUTPUT_MESSAGES}.{index}.{MessageAttributes.MESSAGE_TOOL_CALLS}.0.{ToolCallAttributes.TOOL_CALL_FUNCTION_NAME}",
f"{SpanAttributes.LLM_OUTPUT_MESSAGES}.{index}.{MessageAttributes.MESSAGE_TOOL_CALLS}.{parts_index}.{ToolCallAttributes.TOOL_CALL_FUNCTION_NAME}",
part[GenAIFunctionFields.NAME],
)
if (
part.get(GenAIFunctionFields.NAME)
== PydanticFinalResult.FINAL_RESULT
):
output_value = part[GenAIFunctionFields.ARGUMENTS]
output_value = safe_json_dumps(
part[GenAIFunctionFields.ARGUMENTS]
)
if GenAIFunctionFields.ARGUMENTS in part:
args = part[GenAIFunctionFields.ARGUMENTS]
if not isinstance(args, str):
args_str = safe_json_dumps(args)
else:
args_str = args
yield (
f"{SpanAttributes.LLM_OUTPUT_MESSAGES}.{index}.{MessageAttributes.MESSAGE_TOOL_CALLS}.0.{ToolCallAttributes.TOOL_CALL_FUNCTION_ARGUMENTS_JSON}",
part[GenAIFunctionFields.ARGUMENTS],
f"{SpanAttributes.LLM_OUTPUT_MESSAGES}.{index}.{MessageAttributes.MESSAGE_TOOL_CALLS}.{parts_index}.{ToolCallAttributes.TOOL_CALL_FUNCTION_ARGUMENTS_JSON}",
args_str,
)
if GenAIToolCallFields.ID in part:
yield (
f"{SpanAttributes.LLM_OUTPUT_MESSAGES}.{index}.{MessageAttributes.MESSAGE_TOOL_CALLS}.0.{ToolCallAttributes.TOOL_CALL_ID}",
f"{SpanAttributes.LLM_OUTPUT_MESSAGES}.{index}.{MessageAttributes.MESSAGE_TOOL_CALLS}.{parts_index}.{ToolCallAttributes.TOOL_CALL_ID}",
part[GenAIToolCallFields.ID],
)
except json.JSONDecodeError:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

from opentelemetry.context import Context
from opentelemetry.sdk.trace import ReadableSpan, SpanProcessor
from opentelemetry.trace import Span
from opentelemetry.trace import Span, Status, StatusCode

from openinference.instrumentation.pydantic_ai.semantic_conventions import get_attributes
from openinference.instrumentation.pydantic_ai.utils import SpanFilter, should_export_span
Expand Down Expand Up @@ -62,12 +62,15 @@ def on_end(self, span: ReadableSpan) -> None:

# Combine the attributes with the openinference attributes
span._attributes = {**span.attributes, **openinference_attributes}

if not span.status.status_code == StatusCode.ERROR:
span._status = Status(status_code=StatusCode.OK)
# Determine if the span should be exported
if should_export_span(span, self._span_filter):
super().on_end(span)

except Exception as e:
span._status = Status(status_code=StatusCode.ERROR, description=str(e))
logger.exception(e)
logger.warning(f"Error processing span in OpenInferenceSpanProcessor: {e}")

def shutdown(self) -> None:
Expand Down
Loading