Skip to content

Commit e8fa644

Browse files
Clean up codebase and tweak styles (#4)
* Clean up codebase and tweak styles * Fix weather tool * Sync pnpm-lock.yaml * Remove unused component * Remove unused component * Update requirements * Convert tool invocations in backend
1 parent 3cb7021 commit e8fa644

32 files changed

+3347
-709
lines changed

api/index.py

+95-77
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import os
22
import json
33
from typing import List
4+
from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
45
from pydantic import BaseModel
56
from dotenv import load_dotenv
67
from fastapi import FastAPI, Query
@@ -27,8 +28,40 @@ class Request(BaseModel):
2728
"get_current_weather": get_current_weather,
2829
}
2930

31+
def do_stream(messages: List[ChatCompletionMessageParam]):
32+
stream = client.chat.completions.create(
33+
messages=messages,
34+
model="gpt-4o",
35+
stream=True,
36+
tools=[{
37+
"type": "function",
38+
"function": {
39+
"name": "get_current_weather",
40+
"description": "Get the current weather at a location",
41+
"parameters": {
42+
"type": "object",
43+
"properties": {
44+
"latitude": {
45+
"type": "number",
46+
"description": "The latitude of the location",
47+
},
48+
"longitude": {
49+
"type": "number",
50+
"description": "The longitude of the location",
51+
},
52+
},
53+
"required": ["latitude", "longitude"],
54+
},
55+
},
56+
}]
57+
)
58+
59+
return stream
60+
61+
def stream_text(messages: List[ChatCompletionMessageParam], protocol: str = 'data'):
62+
draft_tool_calls = []
63+
draft_tool_calls_index = -1
3064

31-
def stream_text(messages: List[ClientMessage], protocol: str = 'data'):
3265
stream = client.chat.completions.create(
3366
messages=messages,
3467
model="gpt-4o",
@@ -37,92 +70,77 @@ def stream_text(messages: List[ClientMessage], protocol: str = 'data'):
3770
"type": "function",
3871
"function": {
3972
"name": "get_current_weather",
40-
"description": "Get the current weather in a given location",
73+
"description": "Get the current weather at a location",
4174
"parameters": {
4275
"type": "object",
4376
"properties": {
44-
"location": {
45-
"type": "string",
46-
"description": "The city and state, e.g. San Francisco, CA",
77+
"latitude": {
78+
"type": "number",
79+
"description": "The latitude of the location",
80+
},
81+
"longitude": {
82+
"type": "number",
83+
"description": "The longitude of the location",
4784
},
48-
"unit": {
49-
"type": "string",
50-
"enum": ["celsius", "fahrenheit"]},
5185
},
52-
"required": ["location", "unit"],
86+
"required": ["latitude", "longitude"],
5387
},
5488
},
5589
}]
5690
)
5791

58-
# When protocol is set to "text", you will send a stream of plain text chunks
59-
# https://sdk.vercel.ai/docs/ai-sdk-ui/stream-protocol#text-stream-protocol
60-
61-
if (protocol == 'text'):
62-
for chunk in stream:
63-
for choice in chunk.choices:
64-
if choice.finish_reason == "stop":
65-
break
66-
else:
67-
yield "{text}".format(text=choice.delta.content)
68-
69-
# When protocol is set to "data", you will send a stream data part chunks
70-
# https://sdk.vercel.ai/docs/ai-sdk-ui/stream-protocol#data-stream-protocol
71-
72-
elif (protocol == 'data'):
73-
draft_tool_calls = []
74-
draft_tool_calls_index = -1
75-
76-
for chunk in stream:
77-
for choice in chunk.choices:
78-
if choice.finish_reason == "stop":
79-
continue
80-
81-
elif choice.finish_reason == "tool_calls":
82-
for tool_call in draft_tool_calls:
83-
yield '9:{{"toolCallId":"{id}","toolName":"{name}","args":{args}}}\n'.format(
84-
id=tool_call["id"],
85-
name=tool_call["name"],
86-
args=tool_call["arguments"])
87-
88-
for tool_call in draft_tool_calls:
89-
tool_result = available_tools[tool_call["name"]](
90-
**json.loads(tool_call["arguments"]))
91-
92-
yield 'a:{{"toolCallId":"{id}","toolName":"{name}","args":{args},"result":{result}}}\n'.format(
93-
id=tool_call["id"],
94-
name=tool_call["name"],
95-
args=tool_call["arguments"],
96-
result=json.dumps(tool_result))
97-
98-
elif choice.delta.tool_calls:
99-
for tool_call in choice.delta.tool_calls:
100-
id = tool_call.id
101-
name = tool_call.function.name
102-
arguments = tool_call.function.arguments
103-
104-
if (id is not None):
105-
draft_tool_calls_index += 1
106-
draft_tool_calls.append(
107-
{"id": id, "name": name, "arguments": ""})
108-
109-
else:
110-
draft_tool_calls[draft_tool_calls_index]["arguments"] += arguments
111-
112-
else:
113-
yield '0:{text}\n'.format(text=json.dumps(choice.delta.content))
114-
115-
if chunk.choices == []:
116-
usage = chunk.usage
117-
prompt_tokens = usage.prompt_tokens
118-
completion_tokens = usage.completion_tokens
119-
120-
yield 'd:{{"finishReason":"{reason}","usage":{{"promptTokens":{prompt},"completionTokens":{completion}}}}}\n'.format(
121-
reason="tool-calls" if len(
122-
draft_tool_calls) > 0 else "stop",
123-
prompt=prompt_tokens,
124-
completion=completion_tokens
125-
)
92+
for chunk in stream:
93+
for choice in chunk.choices:
94+
if choice.finish_reason == "stop":
95+
continue
96+
97+
elif choice.finish_reason == "tool_calls":
98+
for tool_call in draft_tool_calls:
99+
yield '9:{{"toolCallId":"{id}","toolName":"{name}","args":{args}}}\n'.format(
100+
id=tool_call["id"],
101+
name=tool_call["name"],
102+
args=tool_call["arguments"])
103+
104+
for tool_call in draft_tool_calls:
105+
tool_result = available_tools[tool_call["name"]](
106+
**json.loads(tool_call["arguments"]))
107+
108+
yield 'a:{{"toolCallId":"{id}","toolName":"{name}","args":{args},"result":{result}}}\n'.format(
109+
id=tool_call["id"],
110+
name=tool_call["name"],
111+
args=tool_call["arguments"],
112+
result=json.dumps(tool_result))
113+
114+
elif choice.delta.tool_calls:
115+
for tool_call in choice.delta.tool_calls:
116+
id = tool_call.id
117+
name = tool_call.function.name
118+
arguments = tool_call.function.arguments
119+
120+
if (id is not None):
121+
draft_tool_calls_index += 1
122+
draft_tool_calls.append(
123+
{"id": id, "name": name, "arguments": ""})
124+
125+
else:
126+
draft_tool_calls[draft_tool_calls_index]["arguments"] += arguments
127+
128+
else:
129+
yield '0:{text}\n'.format(text=json.dumps(choice.delta.content))
130+
131+
if chunk.choices == []:
132+
usage = chunk.usage
133+
prompt_tokens = usage.prompt_tokens
134+
completion_tokens = usage.completion_tokens
135+
136+
yield 'e:{{"finishReason":"{reason}","usage":{{"promptTokens":{prompt},"completionTokens":{completion}}},"isContinued":false}}\n'.format(
137+
reason="tool-calls" if len(
138+
draft_tool_calls) > 0 else "stop",
139+
prompt=prompt_tokens,
140+
completion=completion_tokens
141+
)
142+
143+
126144

127145

128146
@app.post("/api/chat")

api/utils/prompt.py

+44-4
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,36 @@
1+
import json
2+
from enum import Enum
3+
from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
14
from pydantic import BaseModel
25
import base64
3-
from typing import List, Optional
6+
from typing import List, Optional, Any
47
from .attachment import ClientAttachment
58

9+
class ToolInvocationState(str, Enum):
10+
CALL = 'call'
11+
PARTIAL_CALL = 'partial-call'
12+
RESULT = 'result'
13+
14+
class ToolInvocation(BaseModel):
15+
state: ToolInvocationState
16+
toolCallId: str
17+
toolName: str
18+
args: Any
19+
result: Any
20+
621

722
class ClientMessage(BaseModel):
823
role: str
924
content: str
1025
experimental_attachments: Optional[List[ClientAttachment]] = None
26+
toolInvocations: Optional[List[ToolInvocation]] = None
1127

12-
13-
def convert_to_openai_messages(messages: List[ClientMessage]):
28+
def convert_to_openai_messages(messages: List[ClientMessage]) -> List[ChatCompletionMessageParam]:
1429
openai_messages = []
1530

1631
for message in messages:
1732
parts = []
33+
tool_calls = []
1834

1935
parts.append({
2036
'type': 'text',
@@ -37,9 +53,33 @@ def convert_to_openai_messages(messages: List[ClientMessage]):
3753
'text': attachment.url
3854
})
3955

56+
if(message.toolInvocations):
57+
for toolInvocation in message.toolInvocations:
58+
tool_calls.append({
59+
"id": toolInvocation.toolCallId,
60+
"type": "function",
61+
"function": {
62+
"name": toolInvocation.toolName,
63+
"arguments": json.dumps(toolInvocation.args)
64+
}
65+
})
66+
67+
tool_calls_dict = {"tool_calls": tool_calls} if tool_calls else {"tool_calls": None}
68+
4069
openai_messages.append({
4170
"role": message.role,
42-
"content": parts
71+
"content": parts,
72+
**tool_calls_dict,
4373
})
4474

75+
if(message.toolInvocations):
76+
for toolInvocation in message.toolInvocations:
77+
tool_message = {
78+
"role": "tool",
79+
"tool_call_id": toolInvocation.toolCallId,
80+
"content": json.dumps(toolInvocation.result),
81+
}
82+
83+
openai_messages.append(tool_message)
84+
4585
return openai_messages

api/utils/tools.py

+17-11
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,20 @@
1-
import random
1+
import requests
22

3+
def get_current_weather(latitude, longitude):
4+
# Format the URL with proper parameter substitution
5+
url = f"https://api.open-meteo.com/v1/forecast?latitude={latitude}&longitude={longitude}&current=temperature_2m&hourly=temperature_2m&daily=sunrise,sunset&timezone=auto"
36

4-
def get_current_weather(location, unit="fahrenheit"):
5-
if unit == "celsius":
6-
temperature = random.randint(-34, 43)
7-
else:
8-
temperature = random.randint(-30, 110)
7+
try:
8+
# Make the API call
9+
response = requests.get(url)
910

10-
return {
11-
"temperature": temperature,
12-
"unit": "fahrenheit",
13-
"location": location,
14-
}
11+
# Raise an exception for bad status codes
12+
response.raise_for_status()
13+
14+
# Return the JSON response
15+
return response.json()
16+
17+
except requests.RequestException as e:
18+
# Handle any errors that occur during the request
19+
print(f"Error fetching weather data: {e}")
20+
return None

app/(chat)/page.tsx

+5
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
import { Chat } from "@/components/chat";
2+
3+
export default function Page() {
4+
return <Chat />;
5+
}

app/(examples)/chat-attachments/layout.tsx

-9
This file was deleted.

0 commit comments

Comments
 (0)