From 676e7c3d0de7a57450df209d435fa604af3dfafe Mon Sep 17 00:00:00 2001 From: Ben Meeker Date: Wed, 24 Jul 2024 10:37:34 -0600 Subject: [PATCH 1/3] Update chat.go -- Added additional parameters to ChatCompletionRequest Added extensive list of parameter additions to ChatCompletionRequest --- chat.go | 80 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/chat.go b/chat.go index 52ae329..b648bff 100644 --- a/chat.go +++ b/chat.go @@ -83,6 +83,67 @@ type ChatCompletionRequest struct { // (Optional) // A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse User string `json:"user,omitempty"` + + //Additional fields in fork -- Ben Meeker + + // (Optional - default: false) + // Return log probabilities of output tokens. + LogProbs bool `json:"logprobs,omitempty"` + + // (Optional - default: null) + // An integer between 0 and 20 specifying the most likely tokens to return at each token position. + // LogProbs MUST be set to TRUE to use this parameter. + Top_LogProbs int `json:"top_logprobs,omitempty"` + + // (Optional - default: text) + // Specify the format the ChatGPT returns. Compatible with GPT-4o, GPT-4o mini, GPT-4 Turbo, and all GPT-3.5 Turbo models newer that gpt-3.5-turbo-1106 + // Options + // Type: "json_object" to enable JSON mode. + // Type: "text" to enable plain text mode. + Response_Format ResponseFormat `json:"response_format,omitempty"` + + // (Optional - default: null) + // System will try to sample deterministically based on the seed provided. The same seed and parameters should return the same result. + // Determinism is not guaranteed, refer to system_fingerprint response paramater. + Seed int `json:"seed,omitempty"` + + // (Optional - default: auto) + // Specifies latency tier to use for request + // 'auto' - system will use scale tier credits until exhausted + // 'default' - request processed using default service tier with lower uptime SLA and no latency guarantee. + Service_Tier string `json:"service_tier,omitempty"` + + // (Optional - default: false) + // If set, partial message deltas will be sent. Tokens will be send as data-only server-sent events as they become available. + // Stream terminated by a data: [DONE] message. + Stream bool `json:"stream,omitempty"` + + // (Optional - default: null) + // Only set this when Stream is True + // Set an additional chunk to stream before data: [DONE] message. + Stream_Options StreamOptions `json:"stream_options,omitempty"` + + // (Optional - default: null) + // A list of tools the model may call + // Provide a list of functions the model may generate JSON inputs for. 128 functions max supported. + Tools []Tool `json:"tools,omitempty"` + + // (Optional - default: none) + // Do NOT use this parameter in conjunction with Tool_Choice + // Options + // None: No tool will be called and a message will be generated + // Auto: Any number of tools can be used and/or message generation will take place + // Required: The model must call one or more tools + Tool_Choice_Type string `json:"tool_choice,omitempty"` + + // (Optional - default: none) + // Do NOT use this parameter in conjunction with Tool_Choice_Type + // Provide a tool object to be called. This forces the model to use that tool. + Tool_Choice Tool `json:"tool_choice,omitempty"` + + // (Optional - default: true) + // Whether to enable parallel function calling during tool use + Parallel_Tool_Calls bool `json:"parallel_tool_calls"` } type ChatMessage struct { @@ -110,6 +171,25 @@ type ChatResponseUsage struct { Total_Tokens int `json:"total_tokens"` } +type ResponseFormat struct { + Type string `json:"type"` +} + +type StreamOptions struct { + Include_Usage bool `json:"include_usage"` +} + +type Tool struct { + Type string `json:"type"` + Function FunctionFormat `json:"function"` +} + +type FunctionFormat struct { + Description string `json:"description"` + Name string `json:"name"` + Parameters interface{} `json:"parameters"` +} + func (c *Client) SimpleSend(ctx context.Context, message string) (*ChatResponse, error) { req := &ChatCompletionRequest{ Model: GPT35Turbo, From 118f14f05448d799ab0e6d2cfb8f91f3c6b9cbc4 Mon Sep 17 00:00:00 2001 From: Ben Meeker Date: Wed, 24 Jul 2024 11:06:29 -0600 Subject: [PATCH 2/3] Update chat.go Fixed Parallel_Tool_Calls triggering unexpectedly by adding `omitempty` to json tag --- chat.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chat.go b/chat.go index b648bff..e72deb4 100644 --- a/chat.go +++ b/chat.go @@ -143,7 +143,7 @@ type ChatCompletionRequest struct { // (Optional - default: true) // Whether to enable parallel function calling during tool use - Parallel_Tool_Calls bool `json:"parallel_tool_calls"` + Parallel_Tool_Calls bool `json:"parallel_tool_calls,omitempty"` } type ChatMessage struct { From 91b90088f93fe5623152c62ba67220caea22598e Mon Sep 17 00:00:00 2001 From: Ben Meeker Date: Wed, 24 Jul 2024 11:29:17 -0600 Subject: [PATCH 3/3] Update chat.go -- Changed struct references to pointers This allows for structs to be omitted if empty. E.g. Stream_Options does not marshal, fixing error where Stream_Options returns an error when neither Stream_Options or Stream are set. --- chat.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/chat.go b/chat.go index e72deb4..1b8c30c 100644 --- a/chat.go +++ b/chat.go @@ -100,7 +100,7 @@ type ChatCompletionRequest struct { // Options // Type: "json_object" to enable JSON mode. // Type: "text" to enable plain text mode. - Response_Format ResponseFormat `json:"response_format,omitempty"` + Response_Format *ResponseFormat `json:"response_format,omitempty"` // (Optional - default: null) // System will try to sample deterministically based on the seed provided. The same seed and parameters should return the same result. @@ -121,12 +121,12 @@ type ChatCompletionRequest struct { // (Optional - default: null) // Only set this when Stream is True // Set an additional chunk to stream before data: [DONE] message. - Stream_Options StreamOptions `json:"stream_options,omitempty"` + Stream_Options *StreamOptions `json:"stream_options,omitempty"` // (Optional - default: null) // A list of tools the model may call // Provide a list of functions the model may generate JSON inputs for. 128 functions max supported. - Tools []Tool `json:"tools,omitempty"` + Tools *[]Tool `json:"tools,omitempty"` // (Optional - default: none) // Do NOT use this parameter in conjunction with Tool_Choice @@ -139,7 +139,7 @@ type ChatCompletionRequest struct { // (Optional - default: none) // Do NOT use this parameter in conjunction with Tool_Choice_Type // Provide a tool object to be called. This forces the model to use that tool. - Tool_Choice Tool `json:"tool_choice,omitempty"` + Tool_Choice *Tool `json:"tool_choice,omitempty"` // (Optional - default: true) // Whether to enable parallel function calling during tool use