Skip to content
This repository was archived by the owner on Aug 5, 2025. It is now read-only.

Commit c494e1f

Browse files
authored
Merge pull request #166 from Chainlit/willy/openai-agents
feat: add openai-agents tracing processor
2 parents 3eebec5 + a7c28bd commit c494e1f

28 files changed

+500
-107
lines changed

examples/attachment.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ async def main():
3232
"url": "https://api.github.com/repos/chainlit/chainlit",
3333
"mime": "application/json",
3434
"metadata": {"test": "test"},
35-
}
35+
},
3636
)
3737

3838
print(attachment.to_dict())

examples/langchain_toolcall.py

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,12 @@
1-
from literalai import LiteralClient
2-
3-
from langchain_openai import ChatOpenAI # type: ignore
1+
from dotenv import load_dotenv
2+
from langchain.agents import AgentExecutor, create_tool_calling_agent
3+
from langchain.agents.agent import BaseSingleActionAgent
44
from langchain_community.tools.tavily_search import TavilySearchResults
5-
6-
from langchain.agents import create_tool_calling_agent
7-
from langchain.agents import AgentExecutor
85
from langchain_core.messages import AIMessage, HumanMessage
96
from langchain_core.runnables.config import RunnableConfig
10-
from langchain.agents.agent import BaseSingleActionAgent
7+
from langchain_openai import ChatOpenAI # type: ignore
118

12-
from dotenv import load_dotenv
9+
from literalai import LiteralClient
1310

1411
# Add OPENAI_API_KEY and TAVILY_API_KEY for this example.
1512
load_dotenv()

examples/langchain_variable.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,7 @@
1+
from dotenv import load_dotenv
12
from langchain.chat_models import init_chat_model
2-
from literalai import LiteralClient
3-
43

5-
from dotenv import load_dotenv
4+
from literalai import LiteralClient
65

76
load_dotenv()
87

examples/llamaindex.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
1-
from literalai import LiteralClient
2-
from llama_index.core import Document, VectorStoreIndex
31
from dotenv import load_dotenv
2+
from llama_index.core import Document, VectorStoreIndex
3+
4+
from literalai import LiteralClient
45

56
load_dotenv()
67

@@ -14,7 +15,7 @@
1415
questions = [
1516
"Tell me about LLMs",
1617
"How do you fine-tune a neural network ?",
17-
"What is RAG ?"
18+
"What is RAG ?",
1819
]
1920

2021
# No context, create a Thread (it will be named after the first user query)

examples/llamaindex_workflow.py

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,8 @@
11
import asyncio
2-
from llama_index.core.workflow import (
3-
Event,
4-
StartEvent,
5-
StopEvent,
6-
Workflow,
7-
step,
8-
)
2+
3+
from llama_index.core.workflow import Event, StartEvent, StopEvent, Workflow, step
94
from llama_index.llms.openai import OpenAI
5+
106
from literalai.client import LiteralClient
117

128
lai_client = LiteralClient()
@@ -16,7 +12,8 @@
1612
class JokeEvent(Event):
1713
joke: str
1814

19-
class RewriteJoke(Event):
15+
16+
class RewriteJoke(Event):
2017
joke: str
2118

2219

examples/main.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ def get_completion(welcome_message, text):
2424
{
2525
"role": "system",
2626
"content": "Tell an inspiring quote to the user, mentioning their name. Be extremely supportive while "
27-
"keeping it short. Write one sentence per line.",
27+
"keeping it short. Write one sentence per line.",
2828
},
2929
{
3030
"role": "assistant",

examples/multimodal.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,11 @@
11
import base64
2-
import requests # type: ignore
32
import time
43

5-
from literalai import LiteralClient
6-
from openai import OpenAI
7-
4+
import requests # type: ignore
85
from dotenv import load_dotenv
6+
from openai import OpenAI
97

8+
from literalai import LiteralClient
109

1110
load_dotenv()
1211

examples/openai_agents.py

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
import asyncio
2+
3+
from agents import Agent, Runner, set_trace_processors, trace
4+
from dotenv import load_dotenv
5+
6+
from literalai import LiteralClient
7+
8+
load_dotenv()
9+
10+
client = LiteralClient()
11+
12+
13+
async def main():
14+
agent = Agent(name="Joke generator", instructions="Tell funny jokes.")
15+
16+
with trace("Joke workflow"):
17+
first_result = await Runner.run(agent, "Tell me a joke")
18+
second_result = await Runner.run(
19+
agent, f"Rate this joke: {first_result.final_output}"
20+
)
21+
print(f"Joke: {first_result.final_output}")
22+
print(f"Rating: {second_result.final_output}")
23+
24+
25+
if __name__ == "__main__":
26+
set_trace_processors([client.openai_agents_tracing_processor()])
27+
asyncio.run(main())

literalai/api/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from literalai.api.synchronous import LiteralAPI
21
from literalai.api.asynchronous import AsyncLiteralAPI
2+
from literalai.api.synchronous import LiteralAPI
33

44
__all__ = ["LiteralAPI", "AsyncLiteralAPI"]

literalai/api/helpers/attachment_helpers.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,8 @@
11
import mimetypes
22
from typing import Dict, Optional, TypedDict, Union
33

4-
from literalai.observability.step import Attachment
5-
64
from literalai.api.helpers import gql
5+
from literalai.observability.step import Attachment
76

87

98
def create_attachment_helper(

0 commit comments

Comments
 (0)