1
1
import os
2
2
import json
3
3
from typing import List
4
+ from openai .types .chat .chat_completion_message_param import ChatCompletionMessageParam
4
5
from pydantic import BaseModel
5
6
from dotenv import load_dotenv
6
7
from fastapi import FastAPI , Query
@@ -27,8 +28,40 @@ class Request(BaseModel):
27
28
"get_current_weather" : get_current_weather ,
28
29
}
29
30
31
+ def do_stream (messages : List [ChatCompletionMessageParam ]):
32
+ stream = client .chat .completions .create (
33
+ messages = messages ,
34
+ model = "gpt-4o" ,
35
+ stream = True ,
36
+ tools = [{
37
+ "type" : "function" ,
38
+ "function" : {
39
+ "name" : "get_current_weather" ,
40
+ "description" : "Get the current weather at a location" ,
41
+ "parameters" : {
42
+ "type" : "object" ,
43
+ "properties" : {
44
+ "latitude" : {
45
+ "type" : "number" ,
46
+ "description" : "The latitude of the location" ,
47
+ },
48
+ "longitude" : {
49
+ "type" : "number" ,
50
+ "description" : "The longitude of the location" ,
51
+ },
52
+ },
53
+ "required" : ["latitude" , "longitude" ],
54
+ },
55
+ },
56
+ }]
57
+ )
58
+
59
+ return stream
60
+
61
+ def stream_text (messages : List [ChatCompletionMessageParam ], protocol : str = 'data' ):
62
+ draft_tool_calls = []
63
+ draft_tool_calls_index = - 1
30
64
31
- def stream_text (messages : List [ClientMessage ], protocol : str = 'data' ):
32
65
stream = client .chat .completions .create (
33
66
messages = messages ,
34
67
model = "gpt-4o" ,
@@ -37,92 +70,77 @@ def stream_text(messages: List[ClientMessage], protocol: str = 'data'):
37
70
"type" : "function" ,
38
71
"function" : {
39
72
"name" : "get_current_weather" ,
40
- "description" : "Get the current weather in a given location" ,
73
+ "description" : "Get the current weather at a location" ,
41
74
"parameters" : {
42
75
"type" : "object" ,
43
76
"properties" : {
44
- "location" : {
45
- "type" : "string" ,
46
- "description" : "The city and state, e.g. San Francisco, CA" ,
77
+ "latitude" : {
78
+ "type" : "number" ,
79
+ "description" : "The latitude of the location" ,
80
+ },
81
+ "longitude" : {
82
+ "type" : "number" ,
83
+ "description" : "The longitude of the location" ,
47
84
},
48
- "unit" : {
49
- "type" : "string" ,
50
- "enum" : ["celsius" , "fahrenheit" ]},
51
85
},
52
- "required" : ["location " , "unit " ],
86
+ "required" : ["latitude " , "longitude " ],
53
87
},
54
88
},
55
89
}]
56
90
)
57
91
58
- # When protocol is set to "text", you will send a stream of plain text chunks
59
- # https://sdk.vercel.ai/docs/ai-sdk-ui/stream-protocol#text-stream-protocol
60
-
61
- if (protocol == 'text' ):
62
- for chunk in stream :
63
- for choice in chunk .choices :
64
- if choice .finish_reason == "stop" :
65
- break
66
- else :
67
- yield "{text}" .format (text = choice .delta .content )
68
-
69
- # When protocol is set to "data", you will send a stream data part chunks
70
- # https://sdk.vercel.ai/docs/ai-sdk-ui/stream-protocol#data-stream-protocol
71
-
72
- elif (protocol == 'data' ):
73
- draft_tool_calls = []
74
- draft_tool_calls_index = - 1
75
-
76
- for chunk in stream :
77
- for choice in chunk .choices :
78
- if choice .finish_reason == "stop" :
79
- continue
80
-
81
- elif choice .finish_reason == "tool_calls" :
82
- for tool_call in draft_tool_calls :
83
- yield '9:{{"toolCallId":"{id}","toolName":"{name}","args":{args}}}\n ' .format (
84
- id = tool_call ["id" ],
85
- name = tool_call ["name" ],
86
- args = tool_call ["arguments" ])
87
-
88
- for tool_call in draft_tool_calls :
89
- tool_result = available_tools [tool_call ["name" ]](
90
- ** json .loads (tool_call ["arguments" ]))
91
-
92
- yield 'a:{{"toolCallId":"{id}","toolName":"{name}","args":{args},"result":{result}}}\n ' .format (
93
- id = tool_call ["id" ],
94
- name = tool_call ["name" ],
95
- args = tool_call ["arguments" ],
96
- result = json .dumps (tool_result ))
97
-
98
- elif choice .delta .tool_calls :
99
- for tool_call in choice .delta .tool_calls :
100
- id = tool_call .id
101
- name = tool_call .function .name
102
- arguments = tool_call .function .arguments
103
-
104
- if (id is not None ):
105
- draft_tool_calls_index += 1
106
- draft_tool_calls .append (
107
- {"id" : id , "name" : name , "arguments" : "" })
108
-
109
- else :
110
- draft_tool_calls [draft_tool_calls_index ]["arguments" ] += arguments
111
-
112
- else :
113
- yield '0:{text}\n ' .format (text = json .dumps (choice .delta .content ))
114
-
115
- if chunk .choices == []:
116
- usage = chunk .usage
117
- prompt_tokens = usage .prompt_tokens
118
- completion_tokens = usage .completion_tokens
119
-
120
- yield 'd:{{"finishReason":"{reason}","usage":{{"promptTokens":{prompt},"completionTokens":{completion}}}}}\n ' .format (
121
- reason = "tool-calls" if len (
122
- draft_tool_calls ) > 0 else "stop" ,
123
- prompt = prompt_tokens ,
124
- completion = completion_tokens
125
- )
92
+ for chunk in stream :
93
+ for choice in chunk .choices :
94
+ if choice .finish_reason == "stop" :
95
+ continue
96
+
97
+ elif choice .finish_reason == "tool_calls" :
98
+ for tool_call in draft_tool_calls :
99
+ yield '9:{{"toolCallId":"{id}","toolName":"{name}","args":{args}}}\n ' .format (
100
+ id = tool_call ["id" ],
101
+ name = tool_call ["name" ],
102
+ args = tool_call ["arguments" ])
103
+
104
+ for tool_call in draft_tool_calls :
105
+ tool_result = available_tools [tool_call ["name" ]](
106
+ ** json .loads (tool_call ["arguments" ]))
107
+
108
+ yield 'a:{{"toolCallId":"{id}","toolName":"{name}","args":{args},"result":{result}}}\n ' .format (
109
+ id = tool_call ["id" ],
110
+ name = tool_call ["name" ],
111
+ args = tool_call ["arguments" ],
112
+ result = json .dumps (tool_result ))
113
+
114
+ elif choice .delta .tool_calls :
115
+ for tool_call in choice .delta .tool_calls :
116
+ id = tool_call .id
117
+ name = tool_call .function .name
118
+ arguments = tool_call .function .arguments
119
+
120
+ if (id is not None ):
121
+ draft_tool_calls_index += 1
122
+ draft_tool_calls .append (
123
+ {"id" : id , "name" : name , "arguments" : "" })
124
+
125
+ else :
126
+ draft_tool_calls [draft_tool_calls_index ]["arguments" ] += arguments
127
+
128
+ else :
129
+ yield '0:{text}\n ' .format (text = json .dumps (choice .delta .content ))
130
+
131
+ if chunk .choices == []:
132
+ usage = chunk .usage
133
+ prompt_tokens = usage .prompt_tokens
134
+ completion_tokens = usage .completion_tokens
135
+
136
+ yield 'e:{{"finishReason":"{reason}","usage":{{"promptTokens":{prompt},"completionTokens":{completion}}},"isContinued":false}}\n ' .format (
137
+ reason = "tool-calls" if len (
138
+ draft_tool_calls ) > 0 else "stop" ,
139
+ prompt = prompt_tokens ,
140
+ completion = completion_tokens
141
+ )
142
+
143
+
126
144
127
145
128
146
@app .post ("/api/chat" )
0 commit comments