Classes
+The following classes are available globally.
+ +-
+
-
+ ++
+ + + OpenAI +
++ +++ + ++ + See more ++++Declaration
+++Swift
+
+ +final public class OpenAI : OpenAIProtocol
+
diff --git a/.gitignore b/.gitignore index e838b643..bf30bfd4 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +# hooks + + # Xcode # # gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore diff --git a/Sources/OpenAI/Public/Models/ChatQuery.swift b/Sources/OpenAI/Public/Models/ChatQuery.swift index c7a88649..8f1f521a 100644 --- a/Sources/OpenAI/Public/Models/ChatQuery.swift +++ b/Sources/OpenAI/Public/Models/ChatQuery.swift @@ -9,6 +9,7 @@ import Foundation /// Creates a model response for the given chat conversation /// https://platform.openai.com/docs/guides/text-generation +/// https://platform.openai.com/docs/api-reference/chat/create public struct ChatQuery: Equatable, Codable, Streamable { /// A list of messages comprising the conversation so far diff --git a/Sources/OpenAI/Public/Models/ChatResult.swift b/Sources/OpenAI/Public/Models/ChatResult.swift index c2f7c12d..7cfbbff6 100644 --- a/Sources/OpenAI/Public/Models/ChatResult.swift +++ b/Sources/OpenAI/Public/Models/ChatResult.swift @@ -6,9 +6,43 @@ // import Foundation - +/// https://platform.openai.com/docs/api-reference/chat/object +/// Example Completion object print +/// ``` +/// { +/// "id": "chatcmpl-123456", +/// "object": "chat.completion", +/// "created": 1728933352, +/// "model": "gpt-4o-2024-08-06", +/// "choices": [ +/// { +/// "index": 0, +/// "message": { +/// "role": "assistant", +/// "content": "Hi there! How can I assist you today?", +/// "refusal": null +/// }, +/// "logprobs": null, +/// "finish_reason": "stop" +/// } +/// ], +/// "usage": { +/// "prompt_tokens": 19, +/// "completion_tokens": 10, +/// "total_tokens": 29, +/// "prompt_tokens_details": { +/// "cached_tokens": 0 +/// }, +/// "completion_tokens_details": { +/// "reasoning_tokens": 0 +/// } +/// }, +/// "system_fingerprint": "fp_6b68a8204b" +/// } +/// ``` public struct ChatResult: Codable, Equatable { + /// mimic the choices array in the chat completion object public struct Choice: Codable, Equatable { public typealias ChatCompletionMessage = ChatQuery.ChatCompletionMessageParam diff --git a/docs/Classes.html b/docs/Classes.html new file mode 100644 index 00000000..2831f986 --- /dev/null +++ b/docs/Classes.html @@ -0,0 +1,325 @@ + + +
+Docs (100% documented)
+The following classes are available globally.
+ +
+
+
+ OpenAI
+
+ Swift
+final public class OpenAI : OpenAIProtocol
+
+ Docs (100% documented)
+final public class OpenAI : OpenAIProtocol
+
+
+
+
+ Configuration
+
+ Swift
+public struct Configuration
+
+
+
+
+ configuration
+
+ Swift
+public let configuration: Configuration
+
+
+
+
+ init(apiToken:)
+
+ Swift
+public convenience init(apiToken: String)
+
+
+
+
+ init(configuration:)
+
+ Swift
+public convenience init(configuration: Configuration)
+
+
+
+
+ init(configuration:session:)
+
+ Swift
+public convenience init(configuration: Configuration, session: URLSession = URLSession.shared)
+
+
+
+
+ completions(query:completion:)
+
+ Swift
+public func completions(query: CompletionsQuery, completion: @escaping (Result<CompletionsResult, Error>) -> Void)
+
+
+
+
+ completionsStream(query:onResult:completion:)
+
+ Swift
+public func completionsStream(query: CompletionsQuery, onResult: @escaping (Result<CompletionsResult, Error>) -> Void, completion: ((Error?) -> Void)?)
+
+
+
+
+ images(query:completion:)
+
+ Swift
+public func images(query: ImagesQuery, completion: @escaping (Result<ImagesResult, Error>) -> Void)
+
+
+
+
+ imageEdits(query:completion:)
+
+ Swift
+public func imageEdits(query: ImageEditsQuery, completion: @escaping (Result<ImagesResult, Error>) -> Void)
+
+
+
+
+ imageVariations(query:completion:)
+
+ Swift
+public func imageVariations(query: ImageVariationsQuery, completion: @escaping (Result<ImagesResult, Error>) -> Void)
+
+
+
+
+ embeddings(query:completion:)
+
+ Swift
+public func embeddings(query: EmbeddingsQuery, completion: @escaping (Result<EmbeddingsResult, Error>) -> Void)
+
+
+
+
+ chats(query:completion:)
+
+ Swift
+public func chats(query: ChatQuery, completion: @escaping (Result<ChatResult, Error>) -> Void)
+
+
+
+
+ chatsStream(query:onResult:completion:)
+
+ Swift
+public func chatsStream(query: ChatQuery, onResult: @escaping (Result<ChatStreamResult, Error>) -> Void, completion: ((Error?) -> Void)?)
+
+
+
+
+ edits(query:completion:)
+
+ Swift
+public func edits(query: EditsQuery, completion: @escaping (Result<EditsResult, Error>) -> Void)
+
+
+
+
+ model(query:completion:)
+
+ Swift
+public func model(query: ModelQuery, completion: @escaping (Result<ModelResult, Error>) -> Void)
+
+
+
+
+ models(completion:)
+
+ Swift
+public func models(completion: @escaping (Result<ModelsResult, Error>) -> Void)
+
+
+
+
+ moderations(query:completion:)
+
+ Swift
+@available(iOS 13.0, *)
+public func moderations(query: ModerationsQuery, completion: @escaping (Result<ModerationsResult, Error>) -> Void)
+
+
+
+
+ audioTranscriptions(query:completion:)
+
+ Swift
+public func audioTranscriptions(query: AudioTranscriptionQuery, completion: @escaping (Result<AudioTranscriptionResult, Error>) -> Void)
+
+
+
+
+ audioTranslations(query:completion:)
+
+ Swift
+public func audioTranslations(query: AudioTranslationQuery, completion: @escaping (Result<AudioTranslationResult, Error>) -> Void)
+
+
+
+
+ audioCreateSpeech(query:completion:)
+
+ Swift
+public func audioCreateSpeech(query: AudioSpeechQuery, completion: @escaping (Result<AudioSpeechResult, Error>) -> Void)
+
+ Docs (100% documented)
+public struct Configuration
+
+
+
+
+ token
+
+ OpenAI API token. See https://platform.openai.com/docs/api-reference/authentication
+ +Swift
+public let token: String
+
+
+
+
+ organizationIdentifier
+
+ Optional OpenAI organization identifier. See https://platform.openai.com/docs/api-reference/authentication
+ +Swift
+public let organizationIdentifier: String?
+
+
+
+
+ host
+
+ API host. Set this property if you use some kind of proxy or your own server. Default is api.openai.com
+ +Swift
+public let host: String
+
+
+
+
+ port
+
+ Swift
+public let port: Int
+
+
+
+
+ scheme
+
+ Swift
+public let scheme: String
+
+
+
+
+ timeoutInterval
+
+ Default request timeout
+ +Swift
+public let timeoutInterval: TimeInterval
+
+ Swift
+public init(token: String, organizationIdentifier: String? = nil, host: String = "api.openai.com", port: Int = 443, scheme: String = "https", timeoutInterval: TimeInterval = 60.0)
+
+ Docs (100% documented)
+The following enumerations are available globally.
+ +
+
+
+ OpenAIError
+
+ Swift
+public enum OpenAIError : Error
+
+ Docs (100% documented)
+public enum OpenAIError : Error
+
+
+
+
+ emptyData
+
+ Swift
+case emptyData
+
+ Docs (100% documented)
+The following extensions are available globally.
+ +Docs (100% documented)
+public extension Model
+
+
+
+
+ gpt4_o
+
+ gpt-4o
, currently the most advanced, multimodal flagship model that’s cheaper and faster than GPT-4 Turbo.
Swift
+static let gpt4_o: String
+
+
+
+
+ gpt4_o_mini
+
+ gpt-4o-mini
, currently the most affordable and intelligent model for fast and lightweight requests.
Swift
+static let gpt4_o_mini: String
+
+
+
+
+ gpt4_turbo
+
+ gpt-4-turbo
, The latest GPT-4 Turbo model with vision capabilities. Vision requests can now use JSON mode and function calling and more. Context window: 128,000 tokens
Swift
+static let gpt4_turbo: String
+
+
+
+
+ gpt4_turbo_preview
+
+ gpt-4-turbo
, gpt-4 model with improved instruction following, JSON mode, reproducible outputs, parallel function calling and more. Maximum of 4096 output tokens
Swift
+@available(*, deprecated, message: "Please upgrade to the newer model")
+static let gpt4_turbo_preview: String
+
+
+
+
+ gpt4_vision_preview
+
+ gpt-4-vision-preview
, able to understand images, in addition to all other GPT-4 Turbo capabilities.
Swift
+static let gpt4_vision_preview: String
+
+
+
+
+ gpt4_0125_preview
+
+ Snapshot of gpt-4-turbo-preview
from January 25th 2024. This model reduces cases of “laziness” where the model doesn’t complete a task. Also fixes the bug impacting non-English UTF-8 generations. Maximum of 4096 output tokens
Swift
+static let gpt4_0125_preview: String
+
+
+
+
+ gpt4_1106_preview
+
+ Snapshot of gpt-4-turbo-preview
from November 6th 2023. Improved instruction following, JSON mode, reproducible outputs, parallel function calling and more. Maximum of 4096 output tokens
Swift
+@available(*, deprecated, message: "Please upgrade to the newer model")
+static let gpt4_1106_preview: String
+
+
+
+
+ gpt4
+
+ Most capable gpt-4
model, outperforms any GPT-3.5 model, able to do more complex tasks, and optimized for chat.
Swift
+static let gpt4: String
+
+
+
+
+ gpt4_0613
+
+ Snapshot of gpt-4
from June 13th 2023 with function calling data. Unlike gpt-4
, this model will not receive updates, and will be deprecated 3 months after a new version is released.
Swift
+static let gpt4_0613: String
+
+
+
+
+ gpt4_0314
+
+ Snapshot of gpt-4
from March 14th 2023. Unlike gpt-4, this model will not receive updates, and will only be supported for a three month period ending on June 14th 2023.
Swift
+@available(*, deprecated, message: "Please upgrade to the newer model")
+static let gpt4_0314: String
+
+
+
+
+ gpt4_32k
+
+ Same capabilities as the base gpt-4
model but with 4x the context length. Will be updated with our latest model iteration.
Swift
+static let gpt4_32k: String
+
+
+
+
+ gpt4_32k_0613
+
+ Snapshot of gpt-4-32k
from June 13th 2023. Unlike gpt-4-32k
, this model will not receive updates, and will be deprecated 3 months after a new version is released.
Swift
+static let gpt4_32k_0613: String
+
+
+
+
+ gpt4_32k_0314
+
+ Snapshot of gpt-4-32k
from March 14th 2023. Unlike gpt-4-32k
, this model will not receive updates, and will only be supported for a three month period ending on June 14th 2023.
Swift
+@available(*, deprecated, message: "Please upgrade to the newer model")
+static let gpt4_32k_0314: String
+
+
+
+
+ gpt3_5Turbo
+
+ Most capable gpt-3.5-turbo
model and optimized for chat. Will be updated with our latest model iteration.
Swift
+static let gpt3_5Turbo: String
+
+
+
+
+ gpt3_5Turbo_0125
+
+ Snapshot of gpt-3.5-turbo
from January 25th 2024. Decreased prices by 50%. Various improvements including higher accuracy at responding in requested formats and a fix for a bug which caused a text encoding issue for non-English language function calls.
Swift
+static let gpt3_5Turbo_0125: String
+
+
+
+
+ gpt3_5Turbo_1106
+
+ Snapshot of gpt-3.5-turbo
from November 6th 2023. The latest gpt-3.5-turbo
model with improved instruction following, JSON mode, reproducible outputs, parallel function calling and more.
Swift
+@available(*, deprecated, message: "Please upgrade to the newer model")
+static let gpt3_5Turbo_1106: String
+
+
+
+
+ gpt3_5Turbo_0613
+
+ Snapshot of gpt-3.5-turbo
from June 13th 2023 with function calling data. Unlike gpt-3.5-turbo
, this model will not receive updates, and will be deprecated 3 months after a new version is released.
Swift
+@available(*, deprecated, message: "Please upgrade to the newer model")
+static let gpt3_5Turbo_0613: String
+
+
+
+
+ gpt3_5Turbo_0301
+
+ Snapshot of gpt-3.5-turbo
from March 1st 2023. Unlike gpt-3.5-turbo
, this model will not receive updates, and will only be supported for a three month period ending on June 1st 2023.
Swift
+@available(*, deprecated, message: "Please upgrade to the newer model")
+static let gpt3_5Turbo_0301: String
+
+
+
+
+ gpt3_5Turbo_16k
+
+ Same capabilities as the standard gpt-3.5-turbo
model but with 4 times the context.
Swift
+static let gpt3_5Turbo_16k: String
+
+
+
+
+ gpt3_5Turbo_16k_0613
+
+ Snapshot of gpt-3.5-turbo-16k
from June 13th 2023. Unlike gpt-3.5-turbo-16k
, this model will not receive updates, and will be deprecated 3 months after a new version is released.
Swift
+static let gpt3_5Turbo_16k_0613: String
+
+
+
+
+ textDavinci_003
+
+ Can do any language task with better quality, longer output, and consistent instruction-following than the curie, babbage, or ada models. Also supports inserting completions within text.
+ +Swift
+static let textDavinci_003: String
+
+
+
+
+ textDavinci_002
+
+ Similar capabilities to text-davinci-003 but trained with supervised fine-tuning instead of reinforcement learning.
+ +Swift
+static let textDavinci_002: String
+
+
+
+
+ textCurie
+
+ Very capable, faster and lower cost than Davinci.
+ +Swift
+static let textCurie: String
+
+
+
+
+ textBabbage
+
+ Capable of straightforward tasks, very fast, and lower cost.
+ +Swift
+static let textBabbage: String
+
+
+
+
+ textAda
+
+ Capable of very simple tasks, usually the fastest model in the GPT-3 series, and lowest cost.
+ +Swift
+static let textAda: String
+
+
+
+
+ textDavinci_001
+
+ Swift
+static let textDavinci_001: String
+
+
+
+
+ codeDavinciEdit_001
+
+ Swift
+static let codeDavinciEdit_001: String
+
+
+
+
+ tts_1
+
+ The latest text to speech model, optimized for speed.
+ +Swift
+static let tts_1: String
+
+
+
+
+ tts_1_hd
+
+ The latest text to speech model, optimized for quality.
+ +Swift
+static let tts_1_hd: String
+
+
+
+
+ whisper_1
+
+ Swift
+static let whisper_1: String
+
+
+
+
+ dall_e_2
+
+ Swift
+static let dall_e_2: String
+
+
+
+
+ dall_e_3
+
+ Swift
+static let dall_e_3: String
+
+
+
+
+ davinci
+
+ Most capable GPT-3 model. Can do any task the other models can do, often with higher quality.
+ +Swift
+static let davinci: String
+
+
+
+
+ curie
+
+ Very capable, but faster and lower cost than Davinci.
+ +Swift
+static let curie: String
+
+
+
+
+ babbage
+
+ Capable of straightforward tasks, very fast, and lower cost.
+ +Swift
+static let babbage: String
+
+
+
+
+ ada
+
+ Capable of very simple tasks, usually the fastest model in the GPT-3 series, and lowest cost.
+ +Swift
+static let ada: String
+
+
+
+
+ textEmbeddingAda
+
+ Swift
+static let textEmbeddingAda: String
+
+
+
+
+ textSearchAda
+
+ Swift
+static let textSearchAda: String
+
+
+
+
+ textSearchBabbageDoc
+
+ Swift
+static let textSearchBabbageDoc: String
+
+
+
+
+ textSearchBabbageQuery001
+
+ Swift
+static let textSearchBabbageQuery001: String
+
+
+
+
+ textEmbedding3
+
+ Swift
+static let textEmbedding3: String
+
+
+
+
+ textEmbedding3Large
+
+ Swift
+static let textEmbedding3Large: String
+
+
+
+
+ textModerationStable
+
+ Almost as capable as the latest model, but slightly older.
+ +Swift
+static let textModerationStable: String
+
+
+
+
+ textModerationLatest
+
+ Most capable moderation model. Accuracy will be slightly higher than the stable model.
+ +Swift
+static let textModerationLatest: String
+
+
+
+
+ moderation
+
+ Swift
+static let moderation: String
+
+ Docs (100% documented)
+The following protocols are available globally.
+ +
+
+
+ OpenAIProtocol
+
+ Swift
+public protocol OpenAIProtocol
+
+ Docs (100% documented)
+public protocol OpenAIProtocol
+
+
+
+
+ completions(query:completion:)
+
+ This function sends a completions query to the OpenAI API and retrieves generated completions in response. The Completions API enables you to build applications using OpenAI’s language models, like the powerful GPT-3.
+ +Example:
+let query = CompletionsQuery(model: .textDavinci_003, prompt: "What is 42?")
+openAI.completions(query: query) { result in
+ //Handle result here
+}
+
+
+ Swift
+func completions(query: CompletionsQuery, completion: @escaping (Result<CompletionsResult, Error>) -> Void)
+
+
+
+ query
+
+ |
+
+
+
+ A |
+
+
+ completion
+
+ |
+
+
+
+ A closure which receives the result when the API request finishes. The closure’s parameter, |
+
+
+
+ completionsStream(query:onResult:completion:)
+
+ This function sends a completions query to the OpenAI API and retrieves generated completions in response. The Completions API enables you to build applications using OpenAI’s language models, like the powerful GPT-3. The result is returned by chunks.
+ +Example:
+let query = CompletionsQuery(model: .textDavinci_003, prompt: "What is 42?")
+openAI.completions(query: query) { result in
+ //Handle result here
+}
+
+
+ Swift
+func completionsStream(query: CompletionsQuery, onResult: @escaping (Result<CompletionsResult, Error>) -> Void, completion: ((Error?) -> Void)?)
+
+
+
+ query
+
+ |
+
+
+
+ A |
+
+
+ onResult
+
+ |
+
+
+
+ A closure which receives the result when the API request finishes. The closure’s parameter, |
+
+
+ completion
+
+ |
+
+
+
+ A closure that is being called when all chunks are delivered or uncrecoverable error occured + |
+
+
+
+ images(query:completion:)
+
+ This function sends an images query to the OpenAI API and retrieves generated images in response. The Images Generation API enables you to create various images or graphics using OpenAI’s powerful deep learning models.
+ +Example:
+let query = ImagesQuery(prompt: "White cat with heterochromia sitting on the kitchen table", n: 1, size: ImagesQuery.Size._1024)
+openAI.images(query: query) { result in
+ //Handle result here
+}
+
+
+ Swift
+func images(query: ImagesQuery, completion: @escaping (Result<ImagesResult, Error>) -> Void)
+
+
+
+ query
+
+ |
+
+
+
+ An |
+
+
+ completion
+
+ |
+
+
+
+ A closure which receives the result when the API request finishes. The closure’s parameter, |
+
+
+
+ imageEdits(query:completion:)
+
+ This function sends an image edit query to the OpenAI API and retrieves generated images in response. The Images Edit API enables you to edit images or graphics using OpenAI’s powerful deep learning models.
+ +Example:
+let query = ImagesEditQuery(image: "@whitecat.png", prompt: "White cat with heterochromia sitting on the kitchen table with a bowl of food", n: 1, size: ImagesQuery.Size._1024)
+openAI.imageEdits(query: query) { result in
+ //Handle result here
+}
+
+
+ Swift
+func imageEdits(query: ImageEditsQuery, completion: @escaping (Result<ImagesResult, Error>) -> Void)
+
+
+
+ query
+
+ |
+
+
+
+ An |
+
+
+ completion
+
+ |
+
+
+
+ A closure which receives the result when the API request finishes. The closure’s parameter, |
+
+
+
+ imageVariations(query:completion:)
+
+ This function sends an image variation query to the OpenAI API and retrieves generated images in response. The Images Variations API enables you to create a variation of a given image using OpenAI’s powerful deep learning models.
+ +Example:
+let query = ImagesVariationQuery(image: "@whitecat.png", n: 1, size: ImagesQuery.Size._1024)
+openAI.imageVariations(query: query) { result in
+ //Handle result here
+}
+
+
+ Swift
+func imageVariations(query: ImageVariationsQuery, completion: @escaping (Result<ImagesResult, Error>) -> Void)
+
+
+
+ query
+
+ |
+
+
+
+ An |
+
+
+ completion
+
+ |
+
+
+
+ A closure which receives the result when the API request finishes. The closure’s parameter, |
+
+
+
+ embeddings(query:completion:)
+
+ This function sends an embeddings query to the OpenAI API and retrieves embeddings in response. The Embeddings API enables you to generate high-dimensional vector representations of texts, which can be used for various natural language processing tasks such as semantic similarity, clustering, and classification.
+ +Example:
+let query = EmbeddingsQuery(model: .textSearchBabbageDoc, input: "The food was delicious and the waiter...")
+openAI.embeddings(query: query) { result in
+ //Handle response here
+}
+
+
+ Swift
+func embeddings(query: EmbeddingsQuery, completion: @escaping (Result<EmbeddingsResult, Error>) -> Void)
+
+
+
+ query
+
+ |
+
+
+
+ An |
+
+
+ completion
+
+ |
+
+
+
+ A closure which receives the result when the API request finishes. The closure’s parameter, |
+
+
+
+ chats(query:completion:)
+
+ This function sends a chat query to the OpenAI API and retrieves chat conversation responses. The Chat API enables you to build chatbots or conversational applications using OpenAI’s powerful natural language models, like GPT-3.
+ +Example:
+let query = ChatQuery(model: .gpt3_5Turbo, messages: [.init(role: "user", content: "who are you")])
+openAI.chats(query: query) { result in
+ //Handle response here
+}
+
+
+ Swift
+func chats(query: ChatQuery, completion: @escaping (Result<ChatResult, Error>) -> Void)
+
+
+
+ query
+
+ |
+
+
+
+ A |
+
+
+ completion
+
+ |
+
+
+
+ A closure which receives the result when the API request finishes. The closure’s parameter, |
+
+
+
+ chatsStream(query:onResult:completion:)
+
+ This function sends a chat query to the OpenAI API and retrieves chat stream conversation responses. The Chat API enables you to build chatbots or conversational applications using OpenAI’s powerful natural language models, like GPT-3. The result is returned by chunks.
+ +Example:
+let query = ChatQuery(model: .gpt3_5Turbo, messages: [.init(role: "user", content: "who are you")])
+openAI.chats(query: query) { result in
+ //Handle response here
+}
+
+
+ Swift
+func chatsStream(query: ChatQuery, onResult: @escaping (Result<ChatStreamResult, Error>) -> Void, completion: ((Error?) -> Void)?)
+
+
+
+ query
+
+ |
+
+
+
+ A |
+
+
+ onResult
+
+ |
+
+
+
+ A closure which receives the result when the API request finishes. The closure’s parameter, |
+
+
+ completion
+
+ |
+
+
+
+ A closure that is being called when all chunks are delivered or uncrecoverable error occured + |
+
+
+
+ edits(query:completion:)
+
+ This function sends an edits query to the OpenAI API and retrieves an edited version of the prompt based on the instruction given.
+ +Example:
+let query = EditsQuery(model: .gpt4, input: "What day of the wek is it?", instruction: "Fix the spelling mistakes")
+openAI.edits(query: query) { result in
+ //Handle response here
+}
+
+
+ Swift
+func edits(query: EditsQuery, completion: @escaping (Result<EditsResult, Error>) -> Void)
+
+
+
+ query
+
+ |
+
+
+
+ An |
+
+
+ completion
+
+ |
+
+
+
+ A closure which receives the result when the API request finishes. The closure’s parameter, |
+
+
+
+ model(query:completion:)
+
+ This function sends a model query to the OpenAI API and retrieves a model instance, providing owner information. The Models API in this usage enables you to gather detailed information on the model in question, like GPT-3.
+ +Example:
+let query = ModelQuery(model: .gpt3_5Turbo)
+openAI.model(query: query) { result in
+ //Handle response here
+}
+
+
+ Swift
+func model(query: ModelQuery, completion: @escaping (Result<ModelResult, Error>) -> Void)
+
+
+
+ query
+
+ |
+
+
+
+ A |
+
+
+ completion
+
+ |
+
+
+
+ A closure which receives the result when the API request finishes. The closure’s parameter, |
+
+
+
+ models(completion:)
+
+ This function sends a models query to the OpenAI API and retrieves a list of models. The Models API in this usage enables you to list all the available models.
+ +Example:
+openAI.models() { result in
+ //Handle response here
+}
+
+
+ Swift
+func models(completion: @escaping (Result<ModelsResult, Error>) -> Void)
+
+
+
+ completion
+
+ |
+
+
+
+ A closure which receives the result when the API request finishes. The closure’s parameter, |
+
+
+
+ moderations(query:completion:)
+
+ This function sends a moderations query to the OpenAI API and retrieves a list of category results to classify how text may violate OpenAI’s Content Policy.
+ +Example:
+let query = ModerationsQuery(input: "I want to kill them.")
+openAI.moderations(query: query) { result in
+ //Handle response here
+}
+
+
+ Swift
+@available(iOS 13.0, *)
+func moderations(query: ModerationsQuery, completion: @escaping (Result<ModerationsResult, Error>) -> Void)
+
+
+
+ query
+
+ |
+
+
+
+ A |
+
+
+ completion
+
+ |
+
+
+
+ A closure which receives the result when the API request finishes. The closure’s parameter, |
+
+
+
+ audioCreateSpeech(query:completion:)
+
+ This function sends an AudioSpeechQuery
to the OpenAI API to create audio speech from text using a specific voice and format.
Example:
+let query = AudioSpeechQuery(model: .tts_1, input: "Hello, world!", voice: .alloy, responseFormat: .mp3, speed: 1.0)
+openAI.audioCreateSpeech(query: query) { result in
+ // Handle response here
+}
+
+
+ Swift
+func audioCreateSpeech(query: AudioSpeechQuery, completion: @escaping (Result<AudioSpeechResult, Error>) -> Void)
+
+
+
+ query
+
+ |
+
+
+
+ An |
+
+
+ completion
+
+ |
+
+
+
+ A closure which receives the result. The closure’s parameter, |
+
+
+
+ audioTranscriptions(query:completion:)
+
+ Transcribes audio data using OpenAI’s audio transcription API and completes the operation asynchronously.
+ +Swift
+func audioTranscriptions(query: AudioTranscriptionQuery, completion: @escaping (Result<AudioTranscriptionResult, Error>) -> Void)
+
+
+
+ query
+
+ |
+
+
+
+ The |
+
+
+ completion
+
+ |
+
+
+
+ The completion handler to be executed upon completion of the transcription request.
+ Returns a |
+
+
+
+ audioTranslations(query:completion:)
+
+ Translates audio data using OpenAI’s audio translation API and completes the operation asynchronously.
+ +Swift
+func audioTranslations(query: AudioTranslationQuery, completion: @escaping (Result<AudioTranslationResult, Error>) -> Void)
+
+
+
+ query
+
+ |
+
+
+
+ The |
+
+
+ completion
+
+ |
+
+
+
+ The completion handler to be executed upon completion of the translation request.
+ Returns a |
+
+
+
+ completions(query:)
+
+
+ Extension method, asynchronous
+
+ Swift
+func completions(
+ query: CompletionsQuery
+) async throws -> CompletionsResult
+
+
+
+
+ completionsStream(query:)
+
+
+ Extension method
+
+ Swift
+func completionsStream(
+ query: CompletionsQuery
+) -> AsyncThrowingStream<CompletionsResult, Error>
+
+
+
+
+ images(query:)
+
+
+ Extension method, asynchronous
+
+ Swift
+func images(
+ query: ImagesQuery
+) async throws -> ImagesResult
+
+
+
+
+ imageEdits(query:)
+
+
+ Extension method, asynchronous
+
+ Swift
+func imageEdits(
+ query: ImageEditsQuery
+) async throws -> ImagesResult
+
+
+
+
+ imageVariations(query:)
+
+
+ Extension method, asynchronous
+
+ Swift
+func imageVariations(
+ query: ImageVariationsQuery
+) async throws -> ImagesResult
+
+
+
+
+ embeddings(query:)
+
+
+ Extension method, asynchronous
+
+ Swift
+func embeddings(
+ query: EmbeddingsQuery
+) async throws -> EmbeddingsResult
+
+
+
+
+ chats(query:)
+
+
+ Extension method, asynchronous
+
+ Swift
+func chats(
+ query: ChatQuery
+) async throws -> ChatResult
+
+
+
+
+ chatsStream(query:)
+
+
+ Extension method
+
+ Swift
+func chatsStream(
+ query: ChatQuery
+) -> AsyncThrowingStream<ChatStreamResult, Error>
+
+
+
+
+ edits(query:)
+
+
+ Extension method, asynchronous
+
+ Swift
+func edits(
+ query: EditsQuery
+) async throws -> EditsResult
+
+
+
+
+ model(query:)
+
+
+ Extension method, asynchronous
+
+ Swift
+func model(
+ query: ModelQuery
+) async throws -> ModelResult
+
+
+
+
+ models()
+
+
+ Extension method, asynchronous
+
+ Swift
+func models() async throws -> ModelsResult
+
+
+
+
+ moderations(query:)
+
+
+ Extension method, asynchronous
+
+ Swift
+func moderations(
+ query: ModerationsQuery
+) async throws -> ModerationsResult
+
+
+
+
+ audioCreateSpeech(query:)
+
+
+ Extension method, asynchronous
+
+ Swift
+func audioCreateSpeech(
+ query: AudioSpeechQuery
+) async throws -> AudioSpeechResult
+
+
+
+
+ audioTranscriptions(query:)
+
+
+ Extension method, asynchronous
+
+ Swift
+func audioTranscriptions(
+ query: AudioTranscriptionQuery
+) async throws -> AudioTranscriptionResult
+
+
+
+
+ audioTranslations(query:)
+
+
+ Extension method, asynchronous
+
+ Swift
+func audioTranslations(
+ query: AudioTranslationQuery
+) async throws -> AudioTranslationResult
+
+
+
+
+ completions(query:)
+
+
+ Extension method
+
+ Swift
+func completions(query: CompletionsQuery) -> AnyPublisher<CompletionsResult, Error>
+
+
+
+
+ completionsStream(query:)
+
+
+ Extension method
+
+ Swift
+func completionsStream(query: CompletionsQuery) -> AnyPublisher<Result<CompletionsResult, Error>, Error>
+
+
+
+
+ images(query:)
+
+
+ Extension method
+
+ Swift
+func images(query: ImagesQuery) -> AnyPublisher<ImagesResult, Error>
+
+
+
+
+ imageEdits(query:)
+
+
+ Extension method
+
+ Swift
+func imageEdits(query: ImageEditsQuery) -> AnyPublisher<ImagesResult, Error>
+
+
+
+
+ imageVariations(query:)
+
+
+ Extension method
+
+ Swift
+func imageVariations(query: ImageVariationsQuery) -> AnyPublisher<ImagesResult, Error>
+
+
+
+
+ embeddings(query:)
+
+
+ Extension method
+
+ Swift
+func embeddings(query: EmbeddingsQuery) -> AnyPublisher<EmbeddingsResult, Error>
+
+
+
+
+ chats(query:)
+
+
+ Extension method
+
+ Swift
+func chats(query: ChatQuery) -> AnyPublisher<ChatResult, Error>
+
+
+
+
+ chatsStream(query:)
+
+
+ Extension method
+
+ Swift
+func chatsStream(query: ChatQuery) -> AnyPublisher<Result<ChatStreamResult, Error>, Error>
+
+
+
+
+ edits(query:)
+
+
+ Extension method
+
+ Swift
+func edits(query: EditsQuery) -> AnyPublisher<EditsResult, Error>
+
+
+
+
+ model(query:)
+
+
+ Extension method
+
+ Swift
+func model(query: ModelQuery) -> AnyPublisher<ModelResult, Error>
+
+
+
+
+ models()
+
+
+ Extension method
+
+ Swift
+func models() -> AnyPublisher<ModelsResult, Error>
+
+
+
+
+ moderations(query:)
+
+
+ Extension method
+
+ Swift
+func moderations(query: ModerationsQuery) -> AnyPublisher<ModerationsResult, Error>
+
+
+
+
+ audioCreateSpeech(query:)
+
+
+ Extension method
+
+ Swift
+func audioCreateSpeech(query: AudioSpeechQuery) -> AnyPublisher<AudioSpeechResult, Error>
+
+
+
+
+ audioTranscriptions(query:)
+
+
+ Extension method
+
+ Swift
+func audioTranscriptions(query: AudioTranscriptionQuery) -> AnyPublisher<AudioTranscriptionResult, Error>
+
+
+
+
+ audioTranslations(query:)
+
+
+ Extension method
+
+ Swift
+func audioTranslations(query: AudioTranslationQuery) -> AnyPublisher<AudioTranslationResult, Error>
+
+ Docs (100% documented)
+The following structures are available globally.
+ +
+
+
+ APIError
+
+ Swift
+public struct APIError : Error, Decodable, Equatable
+extension APIError: LocalizedError
+
+
+
+
+ APIErrorResponse
+
+ Swift
+public struct APIErrorResponse : Error, Decodable, Equatable
+extension APIErrorResponse: LocalizedError
+
+
+
+
+ AudioSpeechQuery
+
+ Generates audio from the input text. +Learn more: OpenAI Speech – Documentation
+ + See more +Swift
+public struct AudioSpeechQuery : Codable
+
+
+
+
+ AudioSpeechResult
+
+ The audio file content. +Learn more: OpenAI Speech – Documentation
+ + See more +Swift
+public struct AudioSpeechResult : Codable, Equatable
+
+
+
+
+ AudioTranscriptionQuery
+
+ Swift
+public struct AudioTranscriptionQuery : Codable
+
+
+
+
+ AudioTranscriptionResult
+
+ Swift
+public struct AudioTranscriptionResult : Codable, Equatable
+
+
+
+
+ AudioTranslationQuery
+
+ Translates audio into English.
+ + See more +Swift
+public struct AudioTranslationQuery : Codable
+
+
+
+
+ AudioTranslationResult
+
+ Swift
+public struct AudioTranslationResult : Codable, Equatable
+
+
+
+
+ ChatQuery
+
+ Creates a model response for the given chat conversation +https://platform.openai.com/docs/guides/text-generation +https://platform.openai.com/docs/api-reference/chat/create
+ + See more +Swift
+public struct ChatQuery : Equatable, Codable, Streamable
+
+
+
+
+ ChatResult
+
+ https://platform.openai.com/docs/api-reference/chat/object +Example Completion object print
+{
+ "id": "chatcmpl-123456",
+ "object": "chat.completion",
+ "created": 1728933352,
+ "model": "gpt-4o-2024-08-06",
+ "choices": [
+ {
+ "index": 0,
+ "message": {
+ "role": "assistant",
+ "content": "Hi there! How can I assist you today?",
+ "refusal": null
+ },
+ "logprobs": null,
+ "finish_reason": "stop"
+ }
+ ],
+ "usage": {
+ "prompt_tokens": 19,
+ "completion_tokens": 10,
+ "total_tokens": 29,
+ "prompt_tokens_details": {
+ "cached_tokens": 0
+ },
+ "completion_tokens_details": {
+ "reasoning_tokens": 0
+ }
+ },
+ "system_fingerprint": "fp_6b68a8204b"
+}
+
+
+ See more
+ Swift
+public struct ChatResult : Codable, Equatable
+
+
+
+
+ ChatStreamResult
+
+ Swift
+public struct ChatStreamResult : Codable, Equatable
+
+
+
+
+ CompletionsQuery
+
+ Swift
+public struct CompletionsQuery : Codable, Streamable
+
+
+
+
+ CompletionsResult
+
+ Swift
+public struct CompletionsResult : Codable, Equatable
+
+
+
+
+ EditsQuery
+
+ Swift
+public struct EditsQuery : Codable
+
+
+
+
+ EditsResult
+
+ Swift
+public struct EditsResult : Codable, Equatable
+
+
+
+
+ EmbeddingsQuery
+
+ Swift
+public struct EmbeddingsQuery : Codable
+
+
+
+
+ EmbeddingsResult
+
+ Swift
+public struct EmbeddingsResult : Codable, Equatable
+
+
+
+
+ ImageEditsQuery
+
+ Swift
+public struct ImageEditsQuery : Codable
+
+
+
+
+ ImageVariationsQuery
+
+ Swift
+public struct ImageVariationsQuery : Codable
+
+
+
+
+ ImagesQuery
+
+ Given a prompt and/or an input image, the model will generate a new image. +https://platform.openai.com/docs/guides/images
+ + See more +Swift
+public struct ImagesQuery : Codable
+
+
+
+
+ ImagesResult
+
+ Returns a list of image objects.
+ + See more +Swift
+public struct ImagesResult : Codable, Equatable
+
+
+
+
+ ModelQuery
+
+ Retrieves a model instance, providing basic information about the model such as the owner and permissioning.
+ + See more +Swift
+public struct ModelQuery : Codable, Equatable
+
+
+
+
+ ModelResult
+
+ The model object matching the specified ID.
+ + See more +Swift
+public struct ModelResult : Codable, Equatable
+
+
+
+
+ ModelsResult
+
+ A list of model objects.
+ + See more +Swift
+public struct ModelsResult : Codable, Equatable
+
+
+
+
+ ModerationsQuery
+
+ Swift
+public struct ModerationsQuery : Codable
+
+
+
+
+ ModerationsResult
+
+ Swift
+public struct ModerationsResult : Codable, Equatable
+extension ModerationsResult: Identifiable
+
+
+
+
+ Vector
+
+ Swift
+public struct Vector
+
+ Docs (100% documented)
+public struct APIError : Error, Decodable, Equatable
+extension APIError: LocalizedError
+
+
+
+
+ message
+
+ Swift
+public let message: String
+
+
+
+
+ type
+
+ Swift
+public let type: String
+
+
+
+
+ param
+
+ Swift
+public let param: String?
+
+
+
+
+ code
+
+ Swift
+public let code: String?
+
+
+
+
+ init(message:type:param:code:)
+
+ Swift
+public init(message: String, type: String, param: String?, code: String?)
+
+
+
+
+ init(from:)
+
+ Swift
+public init(from decoder: Decoder) throws
+
+
+
+
+ errorDescription
+
+ Swift
+public var errorDescription: String? { get }
+
+ Docs (100% documented)
+public struct APIErrorResponse : Error, Decodable, Equatable
+extension APIErrorResponse: LocalizedError
+
+
+
+
+ error
+
+ Swift
+public let error: APIError
+
+
+
+
+ errorDescription
+
+ Swift
+public var errorDescription: String? { get }
+
+ Docs (100% documented)
+public struct AudioSpeechQuery : Codable
+
+ Generates audio from the input text. +Learn more: OpenAI Speech – Documentation
+ +
+
+
+ AudioSpeechVoice
+
+ Encapsulates the voices available for audio generation.
+ +To get aquinted with each of the voices and listen to the samples visit: +OpenAI Text-to-Speech – Voice Options
+ + See more +Swift
+public enum AudioSpeechVoice : String, Codable, CaseIterable
+
+
+
+
+ AudioSpeechResponseFormat
+
+ Encapsulates the response formats available for audio data.
+ +Formats:
+ +Swift
+public enum AudioSpeechResponseFormat : String, Codable, CaseIterable
+
+
+
+
+ input
+
+ The text to generate audio for. The maximum length is 4096 characters.
+ +Swift
+public let input: String
+
+
+
+
+ model
+
+ One of the available TTS models: tts-1 or tts-1-hd
+ +Swift
+public let model: Model
+
+
+
+
+ voice
+
+ The voice to use when generating the audio. Supported voices are alloy, echo, fable, onyx, nova, and shimmer. Previews of the voices are available in the Text to speech guide. +https://platform.openai.com/docs/guides/text-to-speech/voice-options
+ +Swift
+public let voice: AudioSpeechVoice
+
+
+
+
+ responseFormat
+
+ The format to audio in. Supported formats are mp3, opus, aac, and flac. +Defaults to mp3
+ +Swift
+public let responseFormat: AudioSpeechResponseFormat?
+
+
+
+
+ speed
+
+ The speed of the generated audio. Select a value from 0.25 to 4.0. 1.0 is the default. +Defaults to 1
+ +Swift
+public let speed: String?
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ init(model:input:voice:responseFormat:speed:)
+
+ Swift
+public init(model: Model, input: String, voice: AudioSpeechVoice, responseFormat: AudioSpeechResponseFormat = .mp3, speed: Double?)
+
+
+
+
+ Speed
+
+ Swift
+enum Speed : Double
+
+
+
+
+ normalizeSpeechSpeed(_:)
+
+ Swift
+static func normalizeSpeechSpeed(_ inputSpeed: Double?) -> String
+
+ Docs (100% documented)
+public enum AudioSpeechResponseFormat : String, Codable, CaseIterable
+
+ Encapsulates the response formats available for audio data.
+ +Formats:
+ +
+
+
+ mp3
+
+ Swift
+case mp3
+
+
+
+
+ opus
+
+ Swift
+case opus
+
+
+
+
+ aac
+
+ Swift
+case aac
+
+
+
+
+ flac
+
+ Swift
+case flac
+
+ Docs (100% documented)
+public enum AudioSpeechVoice : String, Codable, CaseIterable
+
+ Encapsulates the voices available for audio generation.
+ +To get aquinted with each of the voices and listen to the samples visit: +OpenAI Text-to-Speech – Voice Options
+ +
+
+
+ alloy
+
+ Swift
+case alloy
+
+
+
+
+ echo
+
+ Swift
+case echo
+
+
+
+
+ fable
+
+ Swift
+case fable
+
+
+
+
+ onyx
+
+ Swift
+case onyx
+
+
+
+
+ nova
+
+ Swift
+case nova
+
+
+
+
+ shimmer
+
+ Swift
+case shimmer
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ model
+
+ Swift
+case model
+
+
+
+
+ input
+
+ Swift
+case input
+
+
+
+
+ voice
+
+ Swift
+case voice
+
+
+
+
+ responseFormat
+
+ Swift
+case responseFormat = "response_format"
+
+
+
+
+ speed
+
+ Swift
+case speed
+
+ Docs (100% documented)
+enum Speed : Double
+
+ Docs (100% documented)
+public struct AudioSpeechResult : Codable, Equatable
+
+ The audio file content. +Learn more: OpenAI Speech – Documentation
+ +
+
+
+ audio
+
+ Audio data for one of the following formats :mp3
, opus
, aac
, flac
Swift
+public let audio: Data
+
+ Docs (100% documented)
+public struct AudioTranscriptionQuery : Codable
+
+
+
+
+ ResponseFormat
+
+ Swift
+public enum ResponseFormat : String, Codable, Equatable, CaseIterable
+
+
+
+
+ file
+
+ The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
+ +Swift
+public let file: Data
+
+
+
+
+ fileType
+
+ Swift
+public let fileType: `Self`.FileType
+
+
+
+
+ model
+
+ ID of the model to use. Only whisper-1 is currently available.
+ +Swift
+public let model: Model
+
+
+
+
+ responseFormat
+
+ The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. +Defaults to json
+ +Swift
+public let responseFormat: `Self`.ResponseFormat?
+
+
+
+
+ prompt
+
+ An optional text to guide the model’s style or continue a previous audio segment. The prompt should match the audio language.
+ +Swift
+public let prompt: String?
+
+
+
+
+ temperature
+
+ The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit. +Defaults to 0
+ +Swift
+public let temperature: Double?
+
+
+
+
+ language
+
+ The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency. +https://platform.openai.com/docs/guides/speech-to-text/prompting
+ +Swift
+public let language: String?
+
+ Swift
+public init(file: Data, fileType: `Self`.FileType, model: Model, prompt: String? = nil, temperature: Double? = nil, language: String? = nil, responseFormat: `Self`.ResponseFormat? = nil)
+
+
+
+
+ FileType
+
+ Swift
+public enum FileType : String, Codable, Equatable, CaseIterable
+
+ Docs (100% documented)
+public enum FileType : String, Codable, Equatable, CaseIterable
+
+
+
+
+ flac
+
+ Swift
+case flac
+
+
+
+
+ mp3
+
+ Swift
+case mp3
+
+
+
+
+ mpga
+
+ Swift
+case mpga
+
+
+
+
+ mp4
+
+ Swift
+case mp4
+
+
+
+
+ m4a
+
+ Swift
+case m4a
+
+
+
+
+ mpeg
+
+ Swift
+case mpeg
+
+
+
+
+ ogg
+
+ Swift
+case ogg
+
+
+
+
+ wav
+
+ Swift
+case wav
+
+
+
+
+ webm
+
+ Swift
+case webm
+
+ Docs (100% documented)
+public enum ResponseFormat : String, Codable, Equatable, CaseIterable
+
+
+
+
+ json
+
+ Swift
+case json
+
+
+
+
+ text
+
+ Swift
+case text
+
+
+
+
+ verboseJson
+
+ Swift
+case verboseJson = "verbose_json"
+
+
+
+
+ srt
+
+ Swift
+case srt
+
+
+
+
+ vtt
+
+ Swift
+case vtt
+
+ Docs (100% documented)
+public struct AudioTranscriptionResult : Codable, Equatable
+
+
+
+
+ text
+
+ The transcribed text.
+ +Swift
+public let text: String
+
+ Docs (100% documented)
+public struct AudioTranslationQuery : Codable
+
+ Translates audio into English.
+ +
+
+
+ FileType
+
+ Swift
+public typealias FileType = AudioTranscriptionQuery.FileType
+
+
+
+
+ ResponseFormat
+
+ Swift
+public typealias ResponseFormat = AudioTranscriptionQuery.ResponseFormat
+
+
+
+
+ file
+
+ The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
+ +Swift
+public let file: Data
+
+
+
+
+ fileType
+
+ Swift
+public let fileType: `Self`.FileType
+
+
+
+
+ model
+
+ ID of the model to use. Only whisper-1 is currently available.
+ +Swift
+public let model: Model
+
+
+
+
+ responseFormat
+
+ The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. +Defaults to json
+ +Swift
+public let responseFormat: `Self`.ResponseFormat?
+
+
+
+
+ prompt
+
+ An optional text to guide the model’s style or continue a previous audio segment. The prompt should be in English. +https://platform.openai.com/docs/guides/speech-to-text/prompting
+ +Swift
+public let prompt: String?
+
+
+
+
+ temperature
+
+ The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit. +Defaults to 0
+ +Swift
+public let temperature: Double?
+
+ Swift
+public init(file: Data, fileType: `Self`.FileType, model: Model, prompt: String? = nil, temperature: Double? = nil, responseFormat: `Self`.ResponseFormat? = nil)
+
+ Docs (100% documented)
+public struct AudioTranslationResult : Codable, Equatable
+
+
+
+
+ text
+
+ The translated text.
+ +Swift
+public let text: String
+
+ Docs (100% documented)
+public struct ChatQuery : Equatable, Codable, Streamable
+
+ Creates a model response for the given chat conversation +https://platform.openai.com/docs/guides/text-generation +https://platform.openai.com/docs/api-reference/chat/create
+ +
+
+
+ messages
+
+ A list of messages comprising the conversation so far
+ +Swift
+public let messages: [`Self`.ChatCompletionMessageParam]
+
+
+
+
+ model
+
+ ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API. +https://platform.openai.com/docs/models/model-endpoint-compatibility
+ +Swift
+public let model: Model
+
+
+
+
+ frequencyPenalty
+
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model’s likelihood to repeat the same line verbatim. +Defaults to 0 +https://platform.openai.com/docs/guides/text-generation/parameter-details
+ +Swift
+public let frequencyPenalty: Double?
+
+
+
+
+ logitBias
+
+ Modify the likelihood of specified tokens appearing in the completion. +Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. +Defaults to null
+ +Swift
+public let logitBias: [String : Int]?
+
+
+
+
+ logprobs
+
+ Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message. This option is currently not available on the gpt-4-vision-preview model. +Defaults to false
+ +Swift
+public let logprobs: Bool?
+
+
+
+
+ maxTokens
+
+ The maximum number of tokens to generate in the completion. +The total length of input tokens and generated tokens is limited by the model’s context length. +https://platform.openai.com/tokenizer
+ +Swift
+public let maxTokens: Int?
+
+
+
+
+ n
+
+ How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs. +Defaults to 1
+ +Swift
+public let n: Int?
+
+
+
+
+ presencePenalty
+
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model’s likelihood to talk about new topics. +https://platform.openai.com/docs/guides/text-generation/parameter-details
+ +Swift
+public let presencePenalty: Double?
+
+
+
+
+ responseFormat
+
+ An object specifying the format that the model must output. Compatible with gpt-4-1106-preview and gpt-3.5-turbo-1106. +Setting to { “type”: “json_object” } enables JSON mode, which guarantees the message the model generates is valid JSON. +Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly “stuck” request. Also note that the message content may be partially cut off if finish_reason=“length”, which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.
+ +Swift
+public let responseFormat: `Self`.ResponseFormat?
+
+
+
+
+ seed
+
+ This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.
+ +Swift
+public let seed: Int?
+
+
+
+
+ stop
+
+ Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. +Defaults to null
+ +Swift
+public let stop: Stop?
+
+
+
+
+ temperature
+
+ What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. +We generally recommend altering this or top_p but not both. +Defaults to 1
+ +Swift
+public let temperature: Double?
+
+
+
+
+ toolChoice
+
+ Controls which (if any) function is called by the model. none means the model will not call a function and instead generates a message. auto means the model can pick between generating a message or calling a function. Specifying a particular function via {“type”: “function”, “function”: {“name”: “my_function”}} forces the model to call that function. +none is the default when no functions are present. auto is the default if functions are present
+ +Swift
+public let toolChoice: `Self`.ChatCompletionFunctionCallOptionParam?
+
+
+
+
+ tools
+
+ A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for.
+ +Swift
+public let tools: [`Self`.ChatCompletionToolParam]?
+
+
+
+
+ topLogprobs
+
+ An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used.
+ +Swift
+public let topLogprobs: Int?
+
+
+
+
+ topP
+
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. +We generally recommend altering this or temperature but not both. +Defaults to 1
+ +Swift
+public let topP: Double?
+
+
+
+
+ user
+
+ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. +https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
+ +Swift
+public let user: String?
+
+
+
+
+ stream
+
+ If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. +https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format
+ +Swift
+public var stream: Bool
+
+
+
+
+ init(messages:model:frequencyPenalty:logitBias:logprobs:maxTokens:n:presencePenalty:responseFormat:seed:stop:temperature:toolChoice:tools:topLogprobs:topP:user:stream:)
+
+ Swift
+public init(
+ messages: [Self.ChatCompletionMessageParam],
+ model: Model,
+ frequencyPenalty: Double? = nil,
+ logitBias: [String : Int]? = nil,
+ logprobs: Bool? = nil,
+ maxTokens: Int? = nil,
+ n: Int? = nil,
+ presencePenalty: Double? = nil,
+ responseFormat: Self.ResponseFormat? = nil,
+ seed: Int? = nil,
+ stop: Self.Stop? = nil,
+ temperature: Double? = nil,
+ toolChoice: Self.ChatCompletionFunctionCallOptionParam? = nil,
+ tools: [Self.ChatCompletionToolParam]? = nil,
+ topLogprobs: Int? = nil,
+ topP: Double? = nil,
+ user: String? = nil,
+ stream: Bool = false
+)
+
+
+
+
+ ChatCompletionMessageParam
+
+ Swift
+public enum ChatCompletionMessageParam : Codable, Equatable
+
+
+
+
+ Stop
+
+ Swift
+public enum Stop : Codable, Equatable
+
+
+
+
+ ResponseFormat
+
+ Swift
+public enum ResponseFormat : String, Codable, Equatable
+
+
+
+
+ ChatCompletionFunctionCallOptionParam
+
+ Swift
+public enum ChatCompletionFunctionCallOptionParam : Codable, Equatable
+
+
+
+
+ ChatCompletionToolParam
+
+ Swift
+public struct ChatCompletionToolParam : Codable, Equatable
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public enum ChatCompletionFunctionCallOptionParam : Codable, Equatable
+
+
+
+
+ none
+
+ Swift
+case none
+
+
+
+
+ auto
+
+ Swift
+case auto
+
+
+
+
+ function(_:)
+
+ Swift
+case function(String)
+
+
+
+
+ encode(to:)
+
+ Swift
+public func encode(to encoder: Encoder) throws
+
+
+
+
+ init(function:)
+
+ Swift
+public init(function: String)
+
+ Docs (100% documented)
+public enum ChatCompletionMessageParam : Codable, Equatable
+
+
+
+
+ system(_:)
+
+ Swift
+case system(`Self`.ChatCompletionSystemMessageParam)
+
+
+
+
+ user(_:)
+
+ Swift
+case user(`Self`.ChatCompletionUserMessageParam)
+
+
+
+
+ assistant(_:)
+
+ Swift
+case assistant(`Self`.ChatCompletionAssistantMessageParam)
+
+
+
+
+ tool(_:)
+
+ Swift
+case tool(`Self`.ChatCompletionToolMessageParam)
+
+
+
+
+ content
+
+ Swift
+public var content: `Self`.ChatCompletionUserMessageParam.Content? { get }
+
+
+
+
+ role
+
+ Swift
+public var role: Role { get }
+
+
+
+
+ name
+
+ Swift
+public var name: String? { get }
+
+
+
+
+ toolCallId
+
+ Swift
+public var toolCallId: String? { get }
+
+
+
+
+ toolCalls
+
+ Swift
+public var toolCalls: [`Self`.ChatCompletionAssistantMessageParam.ChatCompletionMessageToolCallParam]? { get }
+
+
+
+
+ init(role:content:name:toolCalls:toolCallId:)
+
+ Swift
+public init?(
+ role: Role,
+ content: String? = nil,
+ name: String? = nil,
+ toolCalls: [Self.ChatCompletionAssistantMessageParam.ChatCompletionMessageToolCallParam]? = nil,
+ toolCallId: String? = nil
+)
+
+
+
+
+ init(role:content:name:)
+
+ Swift
+public init?(
+ role: Role,
+ content: [ChatCompletionUserMessageParam.Content.VisionContent],
+ name: String? = nil
+)
+
+
+
+
+ encode(to:)
+
+ Swift
+public func encode(to encoder: Encoder) throws
+
+
+
+
+ ChatCompletionSystemMessageParam
+
+ Swift
+public struct ChatCompletionSystemMessageParam : Codable, Equatable
+
+
+
+
+ ChatCompletionUserMessageParam
+
+ Swift
+public struct ChatCompletionUserMessageParam : Codable, Equatable
+
+
+
+
+ ChatCompletionAssistantMessageParam
+
+ Swift
+public struct ChatCompletionAssistantMessageParam : Codable, Equatable
+
+
+
+
+ ChatCompletionToolMessageParam
+
+ Swift
+public struct ChatCompletionToolMessageParam : Codable, Equatable
+
+
+
+
+ Role
+
+ Swift
+public enum Role : String, Codable, Equatable, CaseIterable
+
+
+
+
+ init(from:)
+
+ Swift
+public init(from decoder: Decoder) throws
+
+ Docs (100% documented)
+public struct ChatCompletionAssistantMessageParam : Codable, Equatable
+
+
+
+
+ Role
+
+ Swift
+public typealias Role = ChatQuery.ChatCompletionMessageParam.Role
+
+
+
+
+ role
+
+ / The role of the messages author, in this case assistant.
+ +Swift
+public let role: `Self`.Role
+
+
+
+
+ content
+
+ The contents of the assistant message. Required unless tool_calls is specified.
+ +Swift
+public let content: String?
+
+
+
+
+ name
+
+ The name of the author of this message. name
is required if role is function
, and it should be the name of the function whose response is in the content
. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.
Swift
+public let name: String?
+
+
+
+
+ toolCalls
+
+ The tool calls generated by the model, such as function calls.
+ +Swift
+public let toolCalls: [`Self`.ChatCompletionMessageToolCallParam]?
+
+
+
+
+ init(content:name:toolCalls:)
+
+ Swift
+public init(
+ content: String? = nil,
+ name: String? = nil,
+ toolCalls: [Self.ChatCompletionMessageToolCallParam]? = nil
+)
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ ChatCompletionMessageToolCallParam
+
+ Swift
+public struct ChatCompletionMessageToolCallParam : Codable, Equatable
+
+ Docs (100% documented)
+public struct ChatCompletionMessageToolCallParam : Codable, Equatable
+
+
+
+
+ ToolsType
+
+ Swift
+public typealias ToolsType = ChatQuery.ChatCompletionToolParam.ToolsType
+
+
+
+
+ id
+
+ The ID of the tool call.
+ +Swift
+public let id: String
+
+
+
+
+ function
+
+ The function that the model called.
+ +Swift
+public let function: `Self`.FunctionCall
+
+
+
+
+ type
+
+
+
+
+ init(id:function:)
+
+ Swift
+public init(
+ id: String,
+ function: Self.FunctionCall
+)
+
+
+
+
+ FunctionCall
+
+ Swift
+public struct FunctionCall : Codable, Equatable
+
+ Docs (100% documented)
+public struct FunctionCall : Codable, Equatable
+
+
+
+
+ arguments
+
+ The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.
+ +Swift
+public let arguments: String
+
+
+
+
+ name
+
+ The name of the function to call.
+ +Swift
+public let name: String
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ name
+
+ Swift
+case name
+
+
+
+
+ role
+
+ Swift
+case role
+
+
+
+
+ content
+
+ Swift
+case content
+
+
+
+
+ toolCalls
+
+ Swift
+case toolCalls = "tool_calls"
+
+ Docs (100% documented)
+public struct ChatCompletionSystemMessageParam : Codable, Equatable
+
+
+
+
+ Role
+
+ Swift
+public typealias Role = ChatQuery.ChatCompletionMessageParam.Role
+
+
+
+
+ content
+
+ The contents of the system message.
+ +Swift
+public let content: String
+
+
+
+
+ role
+
+ The role of the messages author, in this case system.
+ +Swift
+public let role: `Self`.Role
+
+
+
+
+ name
+
+ An optional name for the participant. Provides the model information to differentiate between participants of the same role.
+ +Swift
+public let name: String?
+
+
+
+
+ init(content:name:)
+
+ Swift
+public init(
+ content: String,
+ name: String? = nil
+)
+
+ Docs (100% documented)
+public struct ChatCompletionToolMessageParam : Codable, Equatable
+
+
+
+
+ Role
+
+ Swift
+public typealias Role = ChatQuery.ChatCompletionMessageParam.Role
+
+
+
+
+ content
+
+ The contents of the tool message.
+ +Swift
+public let content: String
+
+
+
+
+ role
+
+ The role of the messages author, in this case tool.
+ +Swift
+public let role: `Self`.Role
+
+
+
+
+ toolCallId
+
+ Tool call that this message is responding to.
+ +Swift
+public let toolCallId: String
+
+
+
+
+ init(content:toolCallId:)
+
+ Swift
+public init(
+ content: String,
+ toolCallId: String
+)
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ content
+
+ Swift
+case content
+
+
+
+
+ role
+
+ Swift
+case role
+
+
+
+
+ toolCallId
+
+ Swift
+case toolCallId = "tool_call_id"
+
+ Docs (100% documented)
+public struct ChatCompletionUserMessageParam : Codable, Equatable
+
+
+
+
+ Role
+
+ Swift
+public typealias Role = ChatQuery.ChatCompletionMessageParam.Role
+
+
+
+
+ content
+
+ The contents of the user message.
+ +Swift
+public let content: Content
+
+
+
+
+ role
+
+ The role of the messages author, in this case user.
+ +Swift
+public let role: `Self`.Role
+
+
+
+
+ name
+
+ An optional name for the participant. Provides the model information to differentiate between participants of the same role.
+ +Swift
+public let name: String?
+
+
+
+
+ init(content:name:)
+
+ Swift
+public init(
+ content: Content,
+ name: String? = nil
+)
+
+
+
+
+ Content
+
+ Swift
+public enum Content : Codable, Equatable
+
+ Docs (100% documented)
+public enum Content : Codable, Equatable
+
+
+
+
+ string(_:)
+
+ Swift
+case string(String)
+
+
+
+
+ vision(_:)
+
+ Swift
+case vision([VisionContent])
+
+
+
+
+ string
+
+ Swift
+public var string: String? { get }
+
+
+
+
+ init(string:)
+
+ Swift
+public init(string: String)
+
+
+
+
+ init(vision:)
+
+ Swift
+public init(vision: [VisionContent])
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : CodingKey
+
+
+
+
+ encode(to:)
+
+ Swift
+public func encode(to encoder: Encoder) throws
+
+
+
+
+ VisionContent
+
+ Swift
+public enum VisionContent : Codable, Equatable
+
+
+
+
+ init(from:)
+
+ Swift
+public init(from decoder: Decoder) throws
+
+ Docs (100% documented)
+public enum CodingKeys : CodingKey
+
+ Docs (100% documented)
+public enum VisionContent : Codable, Equatable
+
+
+
+
+ chatCompletionContentPartTextParam(_:)
+
+ Swift
+case chatCompletionContentPartTextParam(ChatCompletionContentPartTextParam)
+
+
+
+
+ chatCompletionContentPartImageParam(_:)
+
+ Swift
+case chatCompletionContentPartImageParam(ChatCompletionContentPartImageParam)
+
+
+
+
+ text
+
+ Swift
+public var text: String? { get }
+
+
+
+
+ imageUrl
+
+ Swift
+public var imageUrl: `Self`.ChatCompletionContentPartImageParam.ImageURL? { get }
+
+
+
+
+ init(chatCompletionContentPartTextParam:)
+
+ Swift
+public init(chatCompletionContentPartTextParam: ChatCompletionContentPartTextParam)
+
+
+
+
+ init(chatCompletionContentPartImageParam:)
+
+ Swift
+public init(chatCompletionContentPartImageParam: ChatCompletionContentPartImageParam)
+
+
+
+
+ encode(to:)
+
+ Swift
+public func encode(to encoder: Encoder) throws
+
+
+
+
+ ChatCompletionContentPartTextParam
+
+ Swift
+public struct ChatCompletionContentPartTextParam : Codable, Equatable
+
+
+
+
+ ChatCompletionContentPartImageParam
+
+ Swift
+public struct ChatCompletionContentPartImageParam : Codable, Equatable
+
+ Docs (100% documented)
+public struct ChatCompletionContentPartImageParam : Codable, Equatable
+
+
+
+
+ imageUrl
+
+ Swift
+public let imageUrl: ImageURL
+
+
+
+
+ type
+
+ The type of the content part.
+ +Swift
+public let type: String
+
+
+
+
+ init(imageUrl:)
+
+ Swift
+public init(imageUrl: ImageURL)
+
+
+
+
+ ImageURL
+
+ Swift
+public struct ImageURL : Codable, Equatable
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public struct ImageURL : Codable, Equatable
+
+
+
+
+ url
+
+ Either a URL of the image or the base64 encoded image data.
+ +Swift
+public let url: String
+
+
+
+
+ detail
+
+ Specifies the detail level of the image. Learn more in the +Vision guide https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding
+ +Swift
+public let detail: Detail
+
+
+
+
+ init(url:detail:)
+
+ Swift
+public init(url: String, detail: Detail)
+
+
+
+
+ init(url:detail:)
+
+ Swift
+public init(url: Data, detail: Detail)
+
+
+
+
+ Detail
+
+ Swift
+public enum Detail : String, Codable, Equatable, CaseIterable
+
+ Docs (100% documented)
+public enum Detail : String, Codable, Equatable, CaseIterable
+
+ Docs (100% documented)
+public struct ChatCompletionContentPartTextParam : Codable, Equatable
+
+
+
+
+ text
+
+ The text content.
+ +Swift
+public let text: String
+
+
+
+
+ type
+
+ The type of the content part.
+ +Swift
+public let type: String
+
+
+
+
+ init(text:)
+
+ Swift
+public init(text: String)
+
+ Docs (100% documented)
+public enum Role : String, Codable, Equatable, CaseIterable
+
+
+
+
+ system
+
+ Swift
+case system
+
+
+
+
+ user
+
+ Swift
+case user
+
+
+
+
+ assistant
+
+ Swift
+case assistant
+
+
+
+
+ tool
+
+ Swift
+case tool
+
+ Docs (100% documented)
+public struct ChatCompletionToolParam : Codable, Equatable
+
+
+
+
+ function
+
+ Swift
+public let function: `Self`.FunctionDefinition
+
+
+
+
+ type
+
+ Swift
+public let type: `Self`.ToolsType
+
+
+
+
+ init(function:)
+
+ Swift
+public init(
+ function: Self.FunctionDefinition
+)
+
+
+
+
+ FunctionDefinition
+
+ Swift
+public struct FunctionDefinition : Codable, Equatable
+
+
+
+
+ ToolsType
+
+ Swift
+public enum ToolsType : String, Codable, Equatable
+
+ Docs (100% documented)
+public struct FunctionDefinition : Codable, Equatable
+
+
+
+
+ name
+
+ The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
+ +Swift
+public let name: String
+
+
+
+
+ description
+
+ The description of what the function does.
+ +Swift
+public let description: String?
+
+
+
+
+ parameters
+
+ The parameters the functions accepts, described as a JSON Schema object. +https://platform.openai.com/docs/guides/text-generation/function-calling +https://json-schema.org/understanding-json-schema/ +**Python library defines only [String: Object] dictionary.
+ +Swift
+public let parameters: `Self`.FunctionParameters?
+
+
+
+
+ init(name:description:parameters:)
+
+ Swift
+public init(
+ name: String,
+ description: String? = nil,
+ parameters: Self.FunctionParameters? = nil
+)
+
+
+
+
+ FunctionParameters
+
+ See the guide for examples, and the JSON Schema reference for documentation about the format.
+ + See more +Swift
+public struct FunctionParameters : Codable, Equatable
+
+ Docs (100% documented)
+public struct FunctionParameters : Codable, Equatable
+
+ See the guide for examples, and the JSON Schema reference for documentation about the format.
+ +
+
+
+ type
+
+ Swift
+public let type: `Self`.JSONType
+
+
+
+
+ properties
+
+ Swift
+public let properties: [String : Property]?
+
+
+
+
+ required
+
+ Swift
+public let required: [String]?
+
+
+
+
+ pattern
+
+ Swift
+public let pattern: String?
+
+
+
+
+ const
+
+ Swift
+public let const: String?
+
+
+
+
+ enum
+
+ Swift
+public let `enum`: [String]?
+
+
+
+
+ multipleOf
+
+ Swift
+public let multipleOf: Int?
+
+
+
+
+ minimum
+
+ Swift
+public let minimum: Int?
+
+
+
+
+ maximum
+
+ Swift
+public let maximum: Int?
+
+
+
+
+ Property
+
+ Swift
+public struct Property : Codable, Equatable
+
+
+
+
+ JSONType
+
+ Docs (100% documented)
+public enum JSONType : String, Codable
+
+
+
+
+ integer
+
+ Swift
+case integer
+
+
+
+
+ string
+
+ Swift
+case string
+
+
+
+
+ boolean
+
+ Swift
+case boolean
+
+
+
+
+ array
+
+ Swift
+case array
+
+
+
+
+ object
+
+ Swift
+case object
+
+
+
+
+ number
+
+ Swift
+case number
+
+
+
+
+ null
+
+ Swift
+case null
+
+ Docs (100% documented)
+public struct Property : Codable, Equatable
+
+
+
+
+ JSONType
+
+ Swift
+public typealias JSONType = ChatQuery.ChatCompletionToolParam.FunctionDefinition.FunctionParameters.JSONType
+
+
+
+
+ type
+
+ Swift
+public let type: `Self`.JSONType
+
+
+
+
+ description
+
+ Swift
+public let description: String?
+
+
+
+
+ format
+
+ Swift
+public let format: String?
+
+
+
+
+ items
+
+ Swift
+public let items: `Self`.Items?
+
+
+
+
+ required
+
+ Swift
+public let required: [String]?
+
+
+
+
+ pattern
+
+ Swift
+public let pattern: String?
+
+
+
+
+ const
+
+ Swift
+public let const: String?
+
+
+
+
+ enum
+
+ Swift
+public let `enum`: [String]?
+
+
+
+
+ multipleOf
+
+ Swift
+public let multipleOf: Int?
+
+
+
+
+ minimum
+
+ Swift
+public let minimum: Double?
+
+
+
+
+ maximum
+
+ Swift
+public let maximum: Double?
+
+
+
+
+ minItems
+
+ Swift
+public let minItems: Int?
+
+
+
+
+ maxItems
+
+ Swift
+public let maxItems: Int?
+
+
+
+
+ uniqueItems
+
+ Swift
+public let uniqueItems: Bool?
+
+
+
+
+ init(type:description:format:items:required:pattern:const:enum:multipleOf:minimum:maximum:minItems:maxItems:uniqueItems:)
+
+ Swift
+public init(
+ type: Self.JSONType,
+ description: String? = nil,
+ format: String? = nil,
+ items: Self.Items? = nil,
+ required: [String]? = nil,
+ pattern: String? = nil,
+ const: String? = nil,
+ enum: [String]? = nil,
+ multipleOf: Int? = nil,
+ minimum: Double? = nil,
+ maximum: Double? = nil,
+ minItems: Int? = nil,
+ maxItems: Int? = nil,
+ uniqueItems: Bool? = nil
+)
+
+
+
+
+ Items
+
+ Swift
+public struct Items : Codable, Equatable
+
+ Docs (100% documented)
+public struct Items : Codable, Equatable
+
+
+
+
+ JSONType
+
+ Swift
+public typealias JSONType = ChatQuery.ChatCompletionToolParam.FunctionDefinition.FunctionParameters.JSONType
+
+
+
+
+ type
+
+ Swift
+public let type: `Self`.JSONType
+
+
+
+
+ properties
+
+ Swift
+public let properties: [String : Property]?
+
+
+
+
+ pattern
+
+ Swift
+public let pattern: String?
+
+
+
+
+ const
+
+ Swift
+public let const: String?
+
+
+
+
+ enum
+
+ Swift
+public let `enum`: [String]?
+
+
+
+
+ multipleOf
+
+ Swift
+public let multipleOf: Int?
+
+
+
+
+ minimum
+
+ Swift
+public let minimum: Double?
+
+
+
+
+ maximum
+
+ Swift
+public let maximum: Double?
+
+
+
+
+ minItems
+
+ Swift
+public let minItems: Int?
+
+
+
+
+ maxItems
+
+ Swift
+public let maxItems: Int?
+
+
+
+
+ uniqueItems
+
+ Swift
+public let uniqueItems: Bool?
+
+
+
+
+ init(type:properties:pattern:const:enum:multipleOf:minimum:maximum:minItems:maxItems:uniqueItems:)
+
+ Swift
+public init(
+ type: Self.JSONType,
+ properties: [String : Property]? = nil,
+ pattern: String? = nil,
+ const: String? = nil,
+ `enum`: [String]? = nil,
+ multipleOf: Int? = nil,
+ minimum: Double? = nil,
+ maximum: Double? = nil,
+ minItems: Int? = nil,
+ maxItems: Int? = nil,
+ uniqueItems: Bool? = nil
+)
+
+ Docs (100% documented)
+public enum ToolsType : String, Codable, Equatable
+
+
+
+
+ function
+
+ Swift
+case function
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ messages
+
+ Swift
+case messages
+
+
+
+
+ model
+
+ Swift
+case model
+
+
+
+
+ frequencyPenalty
+
+ Swift
+case frequencyPenalty = "frequency_penalty"
+
+
+
+
+ logitBias
+
+ Swift
+case logitBias = "logit_bias"
+
+
+
+
+ logprobs
+
+ Swift
+case logprobs
+
+
+
+
+ maxTokens
+
+ Swift
+case maxTokens = "max_tokens"
+
+
+
+
+ n
+
+ Swift
+case n
+
+
+
+
+ presencePenalty
+
+ Swift
+case presencePenalty = "presence_penalty"
+
+
+
+
+ responseFormat
+
+ Swift
+case responseFormat = "response_format"
+
+
+
+
+ seed
+
+ Swift
+case seed
+
+
+
+
+ stop
+
+ Swift
+case stop
+
+
+
+
+ temperature
+
+ Swift
+case temperature
+
+
+
+
+ toolChoice
+
+ Swift
+case toolChoice = "tool_choice"
+
+
+
+
+ tools
+
+ Swift
+case tools
+
+
+
+
+ topLogprobs
+
+ Swift
+case topLogprobs = "top_logprobs"
+
+
+
+
+ topP
+
+ Swift
+case topP = "top_p"
+
+
+
+
+ user
+
+ Swift
+case user
+
+
+
+
+ stream
+
+ Swift
+case stream
+
+ Docs (100% documented)
+public enum ResponseFormat : String, Codable, Equatable
+
+
+
+
+ jsonObject
+
+ Swift
+case jsonObject = "json_object"
+
+
+
+
+ text
+
+ Swift
+case text
+
+
+
+
+ encode(to:)
+
+ Swift
+public func encode(to encoder: Encoder) throws
+
+ Docs (100% documented)
+public enum Stop : Codable, Equatable
+
+
+
+
+ string(_:)
+
+ Swift
+case string(String)
+
+
+
+
+ stringList(_:)
+
+ Swift
+case stringList([String])
+
+
+
+
+ encode(to:)
+
+ Swift
+public func encode(to encoder: Encoder) throws
+
+
+
+
+ init(string:)
+
+ Swift
+public init(string: String)
+
+
+
+
+ init(stringList:)
+
+ Swift
+public init(stringList: [String])
+
+ Docs (100% documented)
+public struct ChatResult : Codable, Equatable
+
+ https://platform.openai.com/docs/api-reference/chat/object +Example Completion object print
+{
+ "id": "chatcmpl-123456",
+ "object": "chat.completion",
+ "created": 1728933352,
+ "model": "gpt-4o-2024-08-06",
+ "choices": [
+ {
+ "index": 0,
+ "message": {
+ "role": "assistant",
+ "content": "Hi there! How can I assist you today?",
+ "refusal": null
+ },
+ "logprobs": null,
+ "finish_reason": "stop"
+ }
+ ],
+ "usage": {
+ "prompt_tokens": 19,
+ "completion_tokens": 10,
+ "total_tokens": 29,
+ "prompt_tokens_details": {
+ "cached_tokens": 0
+ },
+ "completion_tokens_details": {
+ "reasoning_tokens": 0
+ }
+ },
+ "system_fingerprint": "fp_6b68a8204b"
+}
+
+
+
+
+
+ Choice
+
+ mimic the choices array in the chat completion object
+ + See more +Swift
+public struct Choice : Codable, Equatable
+
+
+
+
+ CompletionUsage
+
+ Swift
+public struct CompletionUsage : Codable, Equatable
+
+
+
+
+ id
+
+ A unique identifier for the chat completion.
+ +Swift
+public let id: String
+
+
+
+
+ object
+
+ The object type, which is always chat.completion.
+ +Swift
+public let object: String
+
+
+
+
+ created
+
+ The Unix timestamp (in seconds) of when the chat completion was created.
+ +Swift
+public let created: TimeInterval
+
+
+
+
+ model
+
+ The model used for the chat completion.
+ +Swift
+public let model: String
+
+
+
+
+ choices
+
+ A list of chat completion choices. Can be more than one if n is greater than 1.
+ +Swift
+public let choices: [Choice]
+
+
+
+
+ usage
+
+ Usage statistics for the completion request.
+ +Swift
+public let usage: `Self`.CompletionUsage?
+
+
+
+
+ systemFingerprint
+
+ This fingerprint represents the backend configuration that the model runs with. +Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.
+ +Swift
+public let systemFingerprint: String?
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public struct Choice : Codable, Equatable
+
+ mimic the choices array in the chat completion object
+ +
+
+
+ ChatCompletionMessage
+
+ Swift
+public typealias ChatCompletionMessage = ChatQuery.ChatCompletionMessageParam
+
+
+
+
+ index
+
+ The index of the choice in the list of choices.
+ +Swift
+public let index: Int
+
+
+
+
+ logprobs
+
+ Log probability information for the choice.
+ +Swift
+public let logprobs: `Self`.ChoiceLogprobs?
+
+
+
+
+ message
+
+ A chat completion message generated by the model.
+ +Swift
+public let message: `Self`.ChatCompletionMessage
+
+
+
+
+ finishReason
+
+ The reason the model stopped generating tokens. This will be stop if the model hit a natural stop point or a provided stop sequence, length if the maximum number of tokens specified in the request was reached, content_filter if content was omitted due to a flag from our content filters, tool_calls if the model called a tool, or function_call (deprecated) if the model called a function.
+ +Swift
+public let finishReason: String?
+
+
+
+
+ ChoiceLogprobs
+
+ Swift
+public struct ChoiceLogprobs : Codable, Equatable
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ FinishReason
+
+ Swift
+public enum FinishReason : String, Codable, Equatable
+
+ Docs (100% documented)
+public struct ChoiceLogprobs : Codable, Equatable
+
+
+
+
+ content
+
+ Swift
+public let content: [`Self`.ChatCompletionTokenLogprob]?
+
+
+
+
+ ChatCompletionTokenLogprob
+
+ Swift
+public struct ChatCompletionTokenLogprob : Codable, Equatable
+
+ Docs (100% documented)
+public struct ChatCompletionTokenLogprob : Codable, Equatable
+
+
+
+
+ token
+
+ The token.
+ +Swift
+public let token: String
+
+
+
+
+ bytes
+
+ A list of integers representing the UTF-8 bytes representation of the token.
+Useful in instances where characters are represented by multiple tokens and
+their byte representations must be combined to generate the correct text
+representation. Can be null
if there is no bytes representation for the token.
Swift
+public let bytes: [Int]?
+
+
+
+
+ logprob
+
+ The log probability of this token.
+ +Swift
+public let logprob: Double
+
+
+
+
+ topLogprobs
+
+ List of the most likely tokens and their log probability, at this token position.
+In rare cases, there may be fewer than the number of requested top_logprobs
returned.
Swift
+public let topLogprobs: [TopLogprob]
+
+
+
+
+ TopLogprob
+
+ Swift
+public struct TopLogprob : Codable, Equatable
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ token
+
+ Swift
+case token
+
+
+
+
+ bytes
+
+ Swift
+case bytes
+
+
+
+
+ logprob
+
+ Swift
+case logprob
+
+
+
+
+ topLogprobs
+
+ Swift
+case topLogprobs = "top_logprobs"
+
+ Docs (100% documented)
+public struct TopLogprob : Codable, Equatable
+
+
+
+
+ token
+
+ The token.
+ +Swift
+public let token: String
+
+
+
+
+ bytes
+
+ A list of integers representing the UTF-8 bytes representation of the token.
+Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null
if there is no bytes representation for the token.
Swift
+public let bytes: [Int]?
+
+
+
+
+ logprob
+
+ The log probability of this token.
+ +Swift
+public let logprob: Double
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ index
+
+ Swift
+case index
+
+
+
+
+ logprobs
+
+ Swift
+case logprobs
+
+
+
+
+ message
+
+ Swift
+case message
+
+
+
+
+ finishReason
+
+ Swift
+case finishReason = "finish_reason"
+
+ Docs (100% documented)
+public enum FinishReason : String, Codable, Equatable
+
+
+
+
+ stop
+
+ Swift
+case stop
+
+
+
+
+ length
+
+ Swift
+case length
+
+
+
+
+ toolCalls
+
+ Swift
+case toolCalls = "tool_calls"
+
+
+
+
+ contentFilter
+
+ Swift
+case contentFilter = "content_filter"
+
+
+
+
+ functionCall
+
+ Swift
+case functionCall = "function_call"
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ id
+
+ Swift
+case id
+
+
+
+
+ object
+
+ Swift
+case object
+
+
+
+
+ created
+
+ Swift
+case created
+
+
+
+
+ model
+
+ Swift
+case model
+
+
+
+
+ choices
+
+ Swift
+case choices
+
+
+
+
+ usage
+
+ Swift
+case usage
+
+
+
+
+ systemFingerprint
+
+ Swift
+case systemFingerprint = "system_fingerprint"
+
+ Docs (100% documented)
+public struct CompletionUsage : Codable, Equatable
+
+
+
+
+ completionTokens
+
+ Number of tokens in the generated completion.
+ +Swift
+public let completionTokens: Int
+
+
+
+
+ promptTokens
+
+ Number of tokens in the prompt.
+ +Swift
+public let promptTokens: Int
+
+
+
+
+ totalTokens
+
+ Total number of tokens used in the request (prompt + completion).
+ +Swift
+public let totalTokens: Int
+
+ Docs (100% documented)
+public struct ChatStreamResult : Codable, Equatable
+
+
+
+
+ Choice
+
+ Swift
+public struct Choice : Codable, Equatable
+
+
+
+
+ id
+
+ A unique identifier for the chat completion. Each chunk has the same ID.
+ +Swift
+public let id: String
+
+
+
+
+ object
+
+ The object type, which is always chat.completion.chunk
.
Swift
+public let object: String
+
+
+
+
+ created
+
+ The Unix timestamp (in seconds) of when the chat completion was created. +Each chunk has the same timestamp.
+ +Swift
+public let created: TimeInterval
+
+
+
+
+ model
+
+ The model to generate the completion.
+ +Swift
+public let model: String
+
+
+
+
+ choices
+
+ A list of chat completion choices.
+Can be more than one if n
is greater than 1.
Swift
+public let choices: [Choice]
+
+
+
+
+ systemFingerprint
+
+ This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the seed
request parameter to understand when backend changes have been made that might impact determinism.
Swift
+public let systemFingerprint: String?
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public struct Choice : Codable, Equatable
+
+
+
+
+ FinishReason
+
+ Swift
+public typealias FinishReason = ChatResult.Choice.FinishReason
+
+
+
+
+ ChoiceDelta
+
+ Swift
+public struct ChoiceDelta : Codable, Equatable
+
+
+
+
+ index
+
+ The index of the choice in the list of choices.
+ +Swift
+public let index: Int
+
+
+
+
+ delta
+
+ A chat completion delta generated by streamed model responses.
+ +Swift
+public let delta: `Self`.ChoiceDelta
+
+
+
+
+ finishReason
+
+ The reason the model stopped generating tokens.
+This will be stop
if the model hit a natural stop point or a provided stop sequence, length
if the maximum number of tokens specified in the request was reached, content_filter
if content was omitted due to a flag from our content filters, tool_calls
if the model called a tool, or function_call
(deprecated) if the model called a function.
Swift
+public let finishReason: FinishReason?
+
+
+
+
+ logprobs
+
+ Log probability information for the choice.
+ +Swift
+public let logprobs: `Self`.ChoiceLogprobs?
+
+
+
+
+ ChoiceLogprobs
+
+ Swift
+public struct ChoiceLogprobs : Codable, Equatable
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public struct ChoiceDelta : Codable, Equatable
+
+
+
+
+ Role
+
+ Swift
+public typealias Role = ChatQuery.ChatCompletionMessageParam.Role
+
+
+
+
+ content
+
+ The contents of the chunk message.
+ +Swift
+public let content: String?
+
+
+
+
+ role
+
+ The role of the author of this message.
+ +Swift
+public let role: `Self`.Role?
+
+
+
+
+ toolCalls
+
+ Swift
+public let toolCalls: [`Self`.ChoiceDeltaToolCall]?
+
+
+
+
+ ChoiceDeltaToolCall
+
+ Swift
+public struct ChoiceDeltaToolCall : Codable, Equatable
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public struct ChoiceDeltaToolCall : Codable, Equatable
+
+
+
+
+ index
+
+ Swift
+public let index: Int
+
+
+
+
+ id
+
+ The ID of the tool call.
+ +Swift
+public let id: String?
+
+
+
+
+ function
+
+ The function that the model called.
+ +Swift
+public let function: `Self`.ChoiceDeltaToolCallFunction?
+
+
+
+
+ type
+
+ The type of the tool. Currently, only function is supported.
+ +Swift
+public let type: String?
+
+
+
+
+ init(index:id:function:)
+
+ Swift
+public init(
+ index: Int,
+ id: String? = nil,
+ function: Self.ChoiceDeltaToolCallFunction? = nil
+)
+
+
+
+
+ ChoiceDeltaToolCallFunction
+
+ Swift
+public struct ChoiceDeltaToolCallFunction : Codable, Equatable
+
+ Docs (100% documented)
+public struct ChoiceDeltaToolCallFunction : Codable, Equatable
+
+
+
+
+ arguments
+
+ The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.
+ +Swift
+public let arguments: String?
+
+
+
+
+ name
+
+ The name of the function to call.
+ +Swift
+public let name: String?
+
+
+
+
+ init(arguments:name:)
+
+ Swift
+public init(
+ arguments: String? = nil,
+ name: String? = nil
+)
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public struct ChoiceLogprobs : Codable, Equatable
+
+
+
+
+ content
+
+ A list of message content tokens with log probability information.
+ +Swift
+public let content: [`Self`.ChatCompletionTokenLogprob]?
+
+
+
+
+ ChatCompletionTokenLogprob
+
+ Swift
+public struct ChatCompletionTokenLogprob : Codable, Equatable
+
+ Docs (100% documented)
+public struct ChatCompletionTokenLogprob : Codable, Equatable
+
+
+
+
+ token
+
+ The token.
+ +Swift
+public let token: String
+
+
+
+
+ bytes
+
+ A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token.
+ +Swift
+public let bytes: [Int]?
+
+
+
+
+ logprob
+
+ The log probability of this token.
+ +Swift
+public let logprob: Double
+
+
+
+
+ topLogprobs
+
+ List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested top_logprobs returned.
+ +Swift
+public let topLogprobs: [`Self`.TopLogprob]?
+
+
+
+
+ TopLogprob
+
+ Swift
+public struct TopLogprob : Codable, Equatable
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ token
+
+ Swift
+case token
+
+
+
+
+ bytes
+
+ Swift
+case bytes
+
+
+
+
+ logprob
+
+ Swift
+case logprob
+
+
+
+
+ topLogprobs
+
+ Swift
+case topLogprobs = "top_logprobs"
+
+ Docs (100% documented)
+public struct TopLogprob : Codable, Equatable
+
+
+
+
+ token
+
+ The token.
+ +Swift
+public let token: String
+
+
+
+
+ bytes
+
+ A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token.
+ +Swift
+public let bytes: [Int]?
+
+
+
+
+ logprob
+
+ The log probability of this token.
+ +Swift
+public let logprob: Double
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ index
+
+ Swift
+case index
+
+
+
+
+ delta
+
+ Swift
+case delta
+
+
+
+
+ finishReason
+
+ Swift
+case finishReason = "finish_reason"
+
+
+
+
+ logprobs
+
+ Swift
+case logprobs
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ id
+
+ Swift
+case id
+
+
+
+
+ object
+
+ Swift
+case object
+
+
+
+
+ created
+
+ Swift
+case created
+
+
+
+
+ model
+
+ Swift
+case model
+
+
+
+
+ choices
+
+ Swift
+case choices
+
+
+
+
+ systemFingerprint
+
+ Swift
+case systemFingerprint = "system_fingerprint"
+
+ Docs (100% documented)
+public struct CompletionsQuery : Codable, Streamable
+
+
+
+
+ model
+
+ ID of the model to use.
+ +Swift
+public let model: Model
+
+
+
+
+ prompt
+
+ The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
+ +Swift
+public let prompt: String
+
+
+
+
+ temperature
+
+ What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
+ +Swift
+public let temperature: Double?
+
+
+
+
+ maxTokens
+
+ The maximum number of tokens to generate in the completion.
+ +Swift
+public let maxTokens: Int?
+
+
+
+
+ topP
+
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ +Swift
+public let topP: Double?
+
+
+
+
+ frequencyPenalty
+
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model’s likelihood to repeat the same line verbatim.
+ +Swift
+public let frequencyPenalty: Double?
+
+
+
+
+ presencePenalty
+
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model’s likelihood to talk about new topics.
+ +Swift
+public let presencePenalty: Double?
+
+
+
+
+ stop
+
+ Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
+ +Swift
+public let stop: [String]?
+
+
+
+
+ user
+
+ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
+ +Swift
+public let user: String?
+
+
+
+
+ init(model:prompt:temperature:maxTokens:topP:frequencyPenalty:presencePenalty:stop:user:)
+
+ Swift
+public init(model: Model, prompt: String, temperature: Double? = nil, maxTokens: Int? = nil, topP: Double? = nil, frequencyPenalty: Double? = nil, presencePenalty: Double? = nil, stop: [String]? = nil, user: String? = nil)
+
+ Docs (100% documented)
+public struct CompletionsResult : Codable, Equatable
+
+
+
+
+ Usage
+
+ Swift
+public struct Usage : Codable, Equatable
+
+
+
+
+ Choice
+
+ Swift
+public struct Choice : Codable, Equatable
+
+
+
+
+ id
+
+ Swift
+public let id: String
+
+
+
+
+ object
+
+ Swift
+public let object: String
+
+
+
+
+ created
+
+ Swift
+public let created: TimeInterval
+
+
+
+
+ model
+
+ Swift
+public let model: Model
+
+
+
+
+ choices
+
+ Swift
+public let choices: [Choice]
+
+
+
+
+ usage
+
+ Swift
+public let usage: Usage?
+
+ Docs (100% documented)
+public struct Choice : Codable, Equatable
+
+
+
+
+ text
+
+ Swift
+public let text: String
+
+
+
+
+ index
+
+ Swift
+public let index: Int
+
+
+
+
+ finishReason
+
+ Swift
+public let finishReason: String?
+
+ Docs (100% documented)
+public struct Usage : Codable, Equatable
+
+
+
+
+ promptTokens
+
+ Swift
+public let promptTokens: Int
+
+
+
+
+ completionTokens
+
+ Swift
+public let completionTokens: Int
+
+
+
+
+ totalTokens
+
+ Swift
+public let totalTokens: Int
+
+ Docs (100% documented)
+public struct EditsQuery : Codable
+
+
+
+
+ model
+
+ ID of the model to use.
+ +Swift
+public let model: Model
+
+
+
+
+ input
+
+ Input text to get embeddings for.
+ +Swift
+public let input: String?
+
+
+
+
+ instruction
+
+ The instruction that tells the model how to edit the prompt.
+ +Swift
+public let instruction: String
+
+
+
+
+ n
+
+ The number of images to generate. Must be between 1 and 10.
+ +Swift
+public let n: Int?
+
+
+
+
+ temperature
+
+ What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
+ +Swift
+public let temperature: Double?
+
+
+
+
+ topP
+
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ +Swift
+public let topP: Double?
+
+ Swift
+public init(model: Model, input: String?, instruction: String, n: Int? = nil, temperature: Double? = nil, topP: Double? = nil)
+
+ Docs (100% documented)
+public struct EditsResult : Codable, Equatable
+
+
+
+
+ Choice
+
+ Swift
+public struct Choice : Codable, Equatable
+
+
+
+
+ Usage
+
+ Swift
+public struct Usage : Codable, Equatable
+
+
+
+
+ object
+
+ Swift
+public let object: String
+
+
+
+
+ created
+
+ Swift
+public let created: TimeInterval
+
+
+
+
+ choices
+
+ Swift
+public let choices: [Choice]
+
+
+
+
+ usage
+
+ Swift
+public let usage: Usage
+
+ Docs (100% documented)
+public struct Choice : Codable, Equatable
+
+ Docs (100% documented)
+public struct Usage : Codable, Equatable
+
+
+
+
+ promptTokens
+
+ Swift
+public let promptTokens: Int
+
+
+
+
+ completionTokens
+
+ Swift
+public let completionTokens: Int
+
+
+
+
+ totalTokens
+
+ Swift
+public let totalTokens: Int
+
+ Docs (100% documented)
+public struct EmbeddingsQuery : Codable
+
+
+
+
+ input
+
+ Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for text-embedding-ada-002), cannot be an empty string, and any array must be 2048 dimensions or less.
+ +Swift
+public let input: `Self`.Input
+
+
+
+
+ model
+
+ ID of the model to use. You can use the List models API to see all of your available models, or see our Model overview for descriptions of them. +https://platform.openai.com/docs/api-reference/models/list +https://platform.openai.com/docs/models/overview
+ +Swift
+public let model: Model
+
+
+
+
+ encodingFormat
+
+ The format to return the embeddings in. Can be either float or base64. +https://pypi.org/project/pybase64/
+ +Swift
+public let encodingFormat: `Self`.EncodingFormat?
+
+
+
+
+ user
+
+ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. +https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
+ +Swift
+public let user: String?
+
+
+
+
+ init(input:model:encodingFormat:user:)
+
+ Swift
+public init(
+ input: Self.Input,
+ model: Model,
+ encodingFormat: Self.EncodingFormat? = nil,
+ user: String? = nil
+)
+
+
+
+
+ Input
+
+ Swift
+public enum Input : Codable, Equatable
+
+
+
+
+ EncodingFormat
+
+ Swift
+public enum EncodingFormat : String, Codable
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ input
+
+ Swift
+case input
+
+
+
+
+ model
+
+ Swift
+case model
+
+
+
+
+ encodingFormat
+
+ Swift
+case encodingFormat = "encoding_format"
+
+
+
+
+ user
+
+ Swift
+case user
+
+ Docs (100% documented)
+public enum EncodingFormat : String, Codable
+
+ Docs (100% documented)
+public enum Input : Codable, Equatable
+
+
+
+
+ string(_:)
+
+ Swift
+case string(String)
+
+
+
+
+ stringList(_:)
+
+ Swift
+case stringList([String])
+
+
+
+
+ intList(_:)
+
+ Swift
+case intList([Int])
+
+
+
+
+ intMatrix(_:)
+
+ Swift
+case intMatrix([[Int]])
+
+
+
+
+ encode(to:)
+
+ Swift
+public func encode(to encoder: Encoder) throws
+
+
+
+
+ init(string:)
+
+ Swift
+public init(string: String)
+
+
+
+
+ init(stringList:)
+
+ Swift
+public init(stringList: [String])
+
+
+
+
+ init(intList:)
+
+ Swift
+public init(intList: [Int])
+
+
+
+
+ init(intMatrix:)
+
+ Swift
+public init(intMatrix: [[Int]])
+
+ Docs (100% documented)
+public struct EmbeddingsResult : Codable, Equatable
+
+
+
+
+ Embedding
+
+ Swift
+public struct Embedding : Codable, Equatable
+
+
+
+
+ Usage
+
+ Swift
+public struct Usage : Codable, Equatable
+
+
+
+
+ data
+
+ Swift
+public let data: [Embedding]
+
+
+
+
+ model
+
+ Swift
+public let model: String
+
+
+
+
+ usage
+
+ Swift
+public let usage: Usage
+
+
+
+
+ object
+
+ The object type, which is always “list”.
+ +Swift
+public let object: String
+
+ Docs (100% documented)
+public struct Embedding : Codable, Equatable
+
+
+
+
+ object
+
+ The object type, which is always “embedding”.
+ +Swift
+public let object: String
+
+
+
+
+ embedding
+
+ The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the embedding guide. +https://platform.openai.com/docs/guides/embeddings
+ +Swift
+public let embedding: [Double]
+
+
+
+
+ index
+
+ The index of the embedding in the list of embeddings.
+ +Swift
+public let index: Int
+
+ Docs (100% documented)
+public struct Usage : Codable, Equatable
+
+
+
+
+ promptTokens
+
+ Swift
+public let promptTokens: Int
+
+
+
+
+ totalTokens
+
+ Swift
+public let totalTokens: Int
+
+ Docs (100% documented)
+public struct ImageEditsQuery : Codable
+
+
+
+
+ ResponseFormat
+
+ Swift
+public typealias ResponseFormat = ImagesQuery.ResponseFormat
+
+
+
+
+ Size
+
+ Swift
+public typealias Size = ImagesQuery.Size
+
+
+
+
+ image
+
+ The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
+ +Swift
+public let image: Data
+
+
+
+
+ mask
+
+ An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where image should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as image.
+ +Swift
+public let mask: Data?
+
+
+
+
+ prompt
+
+ A text description of the desired image(s). The maximum length is 1000 characters.
+ +Swift
+public let prompt: String
+
+
+
+
+ model
+
+ The model to use for image generation. +Defaults to dall-e-2
+ +Swift
+public let model: Model?
+
+
+
+
+ n
+
+ The number of images to generate. Must be between 1 and 10.
+ +Swift
+public let n: Int?
+
+
+
+
+ responseFormat
+
+ The format in which the generated images are returned. Must be one of url or b64_json. +Defaults to url
+ +Swift
+public let responseFormat: `Self`.ResponseFormat?
+
+
+
+
+ size
+
+ The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.
+ +Swift
+public let size: Size?
+
+
+
+
+ user
+
+ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. +https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
+ +Swift
+public let user: String?
+
+ Swift
+public init(
+ image: Data,
+ prompt: String,
+ mask: Data? = nil,
+ model: Model? = nil,
+ n: Int? = nil,
+ responseFormat: Self.ResponseFormat? = nil,
+ size: Self.Size? = nil,
+ user: String? = nil
+)
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ image
+
+ Swift
+case image
+
+
+
+
+ mask
+
+ Swift
+case mask
+
+
+
+
+ prompt
+
+ Swift
+case prompt
+
+
+
+
+ model
+
+ Swift
+case model
+
+
+
+
+ n
+
+ Swift
+case n
+
+
+
+
+ responseFormat
+
+ Swift
+case responseFormat = "response_format"
+
+
+
+
+ size
+
+ Swift
+case size
+
+
+
+
+ user
+
+ Swift
+case user
+
+ Docs (100% documented)
+public struct ImageVariationsQuery : Codable
+
+
+
+
+ ResponseFormat
+
+ Swift
+public typealias ResponseFormat = ImagesQuery.ResponseFormat
+
+
+
+
+ image
+
+ The image to edit. Must be a valid PNG file, less than 4MB, and square.
+ +Swift
+public let image: Data
+
+
+
+
+ model
+
+ The model to use for image generation. Only dall-e-2 is supported at this time. +Defaults to dall-e-2
+ +Swift
+public let model: Model?
+
+
+
+
+ n
+
+ The number of images to generate. Must be between 1 and 10. +Defaults to 1
+ +Swift
+public let n: Int?
+
+
+
+
+ responseFormat
+
+ The format in which the generated images are returned. Must be one of url or b64_json. +Defaults to url
+ +Swift
+public let responseFormat: `Self`.ResponseFormat?
+
+
+
+
+ size
+
+ The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024. +Defaults to 1024x1024
+ +Swift
+public let size: String?
+
+
+
+
+ user
+
+ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. +https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
+ +Swift
+public let user: String?
+
+
+
+
+ init(image:model:n:responseFormat:size:user:)
+
+ Swift
+public init(
+ image: Data,
+ model: Model? = nil,
+ n: Int? = nil,
+ responseFormat: Self.ResponseFormat? = nil,
+ size: String? = nil,
+ user: String? = nil
+)
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ image
+
+ Swift
+case image
+
+
+
+
+ model
+
+ Swift
+case model
+
+
+
+
+ n
+
+ Swift
+case n
+
+
+
+
+ responseFormat
+
+ Swift
+case responseFormat = "response_format"
+
+
+
+
+ size
+
+ Swift
+case size
+
+
+
+
+ user
+
+ Swift
+case user
+
+ Docs (100% documented)
+public struct ImagesQuery : Codable
+
+ Given a prompt and/or an input image, the model will generate a new image. +https://platform.openai.com/docs/guides/images
+ +
+
+
+ ResponseFormat
+
+ Swift
+public enum ResponseFormat : String, Codable, Equatable
+
+
+
+
+ prompt
+
+ A text description of the desired image(s). The maximum length is 1000 characters for dall-e-2 and 4000 characters for dall-e-3.
+ +Swift
+public let prompt: String
+
+
+
+
+ model
+
+ The model to use for image generation. +Defaults to dall-e-2
+ +Swift
+public let model: Model?
+
+
+
+
+ responseFormat
+
+ The format in which the generated images are returned. Must be one of url or b64_json. +Defaults to url
+ +Swift
+public let responseFormat: `Self`.ResponseFormat?
+
+
+
+
+ n
+
+ The number of images to generate. Must be between 1 and 10. For dall-e-3, only n=1 is supported. +Defaults to 1
+ +Swift
+public let n: Int?
+
+
+
+
+ size
+
+ The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024 for dall-e-2. Must be one of 1024x1024, 1792x1024, or 1024x1792 for dall-e-3 models. +Defaults to 1024x1024
+ +Swift
+public let size: `Self`.Size?
+
+
+
+
+ user
+
+ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. +https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
+ +Swift
+public let user: String?
+
+
+
+
+ style
+
+ The style of the generated images. Must be one of vivid or natural. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for dall-e-3. +Defaults to vivid
+ +Swift
+public let style: `Self`.Style?
+
+
+
+
+ quality
+
+ The quality of the image that will be generated. hd creates images with finer details and greater consistency across the image. This param is only supported for dall-e-3. +Defaults to standard
+ +Swift
+public let quality: `Self`.Quality?
+
+ Swift
+public init(
+ prompt: String,
+ model: Model? = nil,
+ n: Int? = nil,
+ quality:Self.Quality? = nil,
+ responseFormat: Self.ResponseFormat? = nil,
+ size: Size? = nil,
+ style: Self.Style? = nil,
+ user: String? = nil
+)
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ Style
+
+ Swift
+public enum Style : String, Codable, CaseIterable
+
+
+
+
+ Quality
+
+ Swift
+public enum Quality : String, Codable, CaseIterable
+
+
+
+
+ Size
+
+ Swift
+public enum Size : String, Codable, CaseIterable
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ model
+
+ Swift
+case model
+
+
+
+
+ prompt
+
+ Swift
+case prompt
+
+
+
+
+ n
+
+ Swift
+case n
+
+
+
+
+ size
+
+ Swift
+case size
+
+
+
+
+ user
+
+ Swift
+case user
+
+
+
+
+ style
+
+ Swift
+case style
+
+
+
+
+ responseFormat
+
+ Swift
+case responseFormat = "response_format"
+
+
+
+
+ quality
+
+ Swift
+case quality
+
+ Docs (100% documented)
+public enum Quality : String, Codable, CaseIterable
+
+ Docs (100% documented)
+public enum ResponseFormat : String, Codable, Equatable
+
+ Docs (100% documented)
+public enum Size : String, Codable, CaseIterable
+
+
+
+
+ _256
+
+ Swift
+case _256 = "256x256"
+
+
+
+
+ _512
+
+ Swift
+case _512 = "512x512"
+
+
+
+
+ _1024
+
+ Swift
+case _1024 = "1024x1024"
+
+
+
+
+ _1792_1024
+
+ Swift
+case _1792_1024 = "1792x1024"
+
+
+
+
+ _1024_1792
+
+ Swift
+case _1024_1792 = "1024x1792"
+
+ Docs (100% documented)
+public enum Style : String, Codable, CaseIterable
+
+ Docs (100% documented)
+public struct ImagesResult : Codable, Equatable
+
+ Returns a list of image objects.
+ +
+
+
+ created
+
+ Swift
+public let created: TimeInterval
+
+
+
+
+ data
+
+ Swift
+public let data: [`Self`.Image]
+
+
+
+
+ Image
+
+ Represents the url or the content of an image generated by the OpenAI API.
+ + See more +Swift
+public struct Image : Codable, Equatable
+
+ Docs (100% documented)
+public struct Image : Codable, Equatable
+
+ Represents the url or the content of an image generated by the OpenAI API.
+ +
+
+
+ b64Json
+
+ The base64-encoded JSON of the generated image, if response_format is b64_json
+ +Swift
+public let b64Json: String?
+
+
+
+
+ revisedPrompt
+
+ The prompt that was used to generate the image, if there was any revision to the prompt.
+ +Swift
+public let revisedPrompt: String?
+
+
+
+
+ url
+
+ The URL of the generated image, if response_format is url (default).
+ +Swift
+public let url: String?
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ b64Json
+
+ Swift
+case b64Json = "b64_json"
+
+
+
+
+ revisedPrompt
+
+ Swift
+case revisedPrompt = "revised_prompt"
+
+
+
+
+ url
+
+ Swift
+case url
+
+ Docs (100% documented)
+public struct ModelQuery : Codable, Equatable
+
+ Retrieves a model instance, providing basic information about the model such as the owner and permissioning.
+ +
+
+
+ model
+
+ The ID of the model to use for this request.
+ +Swift
+public let model: Model
+
+
+
+
+ init(model:)
+
+ Swift
+public init(model: Model)
+
+ Docs (100% documented)
+public struct ModelResult : Codable, Equatable
+
+ The model object matching the specified ID.
+ +
+
+
+ id
+
+ The model identifier, which can be referenced in the API endpoints.
+ +Swift
+public let id: String
+
+
+
+
+ created
+
+ The Unix timestamp (in seconds) when the model was created.
+ +Swift
+public let created: TimeInterval
+
+
+
+
+ object
+
+ The object type, which is always “model”.
+ +Swift
+public let object: String
+
+
+
+
+ ownedBy
+
+ The organization that owns the model.
+ +Swift
+public let ownedBy: String
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ id
+
+ Swift
+case id
+
+
+
+
+ created
+
+ Swift
+case created
+
+
+
+
+ object
+
+ Swift
+case object
+
+
+
+
+ ownedBy
+
+ Swift
+case ownedBy = "owned_by"
+
+ Docs (100% documented)
+public struct ModelsResult : Codable, Equatable
+
+ A list of model objects.
+ +
+
+
+ data
+
+ A list of model objects.
+ +Swift
+public let data: [ModelResult]
+
+
+
+
+ object
+
+ The object type, which is always list
Swift
+public let object: String
+
+ Docs (100% documented)
+public struct ModerationsQuery : Codable
+
+
+
+
+ input
+
+ The input text to classify.
+ +Swift
+public let input: String
+
+
+
+
+ model
+
+ ID of the model to use.
+ +Swift
+public let model: Model?
+
+
+
+
+ init(input:model:)
+
+ Swift
+public init(input: String, model: Model? = nil)
+
+ Docs (100% documented)
+public struct ModerationsResult : Codable, Equatable
+extension ModerationsResult: Identifiable
+
+
+
+
+ Moderation
+
+ Swift
+public struct Moderation : Codable, Equatable
+
+
+
+
+ id
+
+ Swift
+public let id: String
+
+
+
+
+ model
+
+ Swift
+public let model: Model
+
+
+
+
+ results
+
+ Swift
+public let results: [`Self`.Moderation]
+
+ Docs (100% documented)
+public struct Moderation : Codable, Equatable
+
+
+
+
+ Categories
+
+ Swift
+public struct Categories : Codable, Equatable, Sequence
+
+
+
+
+ CategoryScores
+
+ Swift
+public struct CategoryScores : Codable, Equatable, Sequence
+
+
+
+
+ categories
+
+ Collection of per-category binary usage policies violation flags. For each category, the value is true if the model flags the corresponding category as violated, false otherwise.
+ +Swift
+public let categories: Categories
+
+
+
+
+ categoryScores
+
+ Collection of per-category raw scores output by the model, denoting the model’s confidence that the input violates the OpenAI’s policy for the category. The value is between 0 and 1, where higher values denote higher confidence. The scores should not be interpreted as probabilities.
+ +Swift
+public let categoryScores: CategoryScores
+
+
+
+
+ flagged
+
+ True if the model classifies the content as violating OpenAI’s usage policies, false otherwise.
+ +Swift
+public let flagged: Bool
+
+ Docs (100% documented)
+public struct Categories : Codable, Equatable, Sequence
+
+
+
+
+ harassment
+
+ Content that expresses, incites, or promotes harassing language towards any target.
+ +Swift
+public let harassment: Bool
+
+
+
+
+ harassmentThreatening
+
+ Harassment content that also includes violence or serious harm towards any target.
+ +Swift
+public let harassmentThreatening: Bool
+
+
+
+
+ hate
+
+ Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste.
+ +Swift
+public let hate: Bool
+
+
+
+
+ hateThreatening
+
+ Hateful content that also includes violence or serious harm towards the targeted group.
+ +Swift
+public let hateThreatening: Bool
+
+
+
+
+ selfHarm
+
+ Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders.
+ +Swift
+public let selfHarm: Bool
+
+
+
+
+ selfHarmIntent
+
+ Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders.
+ +Swift
+public let selfHarmIntent: Bool
+
+
+
+
+ selfHarmInstructions
+
+ Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts.
+ +Swift
+public let selfHarmInstructions: Bool
+
+
+
+
+ sexual
+
+ Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness).
+ +Swift
+public let sexual: Bool
+
+
+
+
+ sexualMinors
+
+ Sexual content that includes an individual who is under 18 years old.
+ +Swift
+public let sexualMinors: Bool
+
+
+
+
+ violence
+
+ Content that promotes or glorifies violence or celebrates the suffering or humiliation of others.
+ +Swift
+public let violence: Bool
+
+
+
+
+ violenceGraphic
+
+ Violent content that depicts death, violence, or serious physical injury in extreme graphic detail.
+ +Swift
+public let violenceGraphic: Bool
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey, CaseIterable
+
+
+
+
+ makeIterator()
+
+ Swift
+public func makeIterator() -> IndexingIterator<[(String, Bool)]>
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey, CaseIterable
+
+
+
+
+ harassment
+
+ Swift
+case harassment
+
+
+
+
+ harassmentThreatening
+
+ Swift
+case harassmentThreatening = "harassment/threatening"
+
+
+
+
+ hate
+
+ Swift
+case hate
+
+
+
+
+ hateThreatening
+
+ Swift
+case hateThreatening = "hate/threatening"
+
+
+
+
+ selfHarm
+
+ Swift
+case selfHarm = "self-harm"
+
+
+
+
+ selfHarmIntent
+
+ Swift
+case selfHarmIntent = "self-harm/intent"
+
+
+
+
+ selfHarmInstructions
+
+ Swift
+case selfHarmInstructions = "self-harm/instructions"
+
+
+
+
+ sexual
+
+ Swift
+case sexual
+
+
+
+
+ sexualMinors
+
+ Swift
+case sexualMinors = "sexual/minors"
+
+
+
+
+ violence
+
+ Swift
+case violence
+
+
+
+
+ violenceGraphic
+
+ Swift
+case violenceGraphic = "violence/graphic"
+
+ Docs (100% documented)
+public struct CategoryScores : Codable, Equatable, Sequence
+
+
+
+
+ harassment
+
+ Content that expresses, incites, or promotes harassing language towards any target.
+ +Swift
+public let harassment: Double
+
+
+
+
+ harassmentThreatening
+
+ Harassment content that also includes violence or serious harm towards any target.
+ +Swift
+public let harassmentThreatening: Double
+
+
+
+
+ hate
+
+ Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste.
+ +Swift
+public let hate: Double
+
+
+
+
+ hateThreatening
+
+ Hateful content that also includes violence or serious harm towards the targeted group.
+ +Swift
+public let hateThreatening: Double
+
+
+
+
+ selfHarm
+
+ Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders.
+ +Swift
+public let selfHarm: Double
+
+
+
+
+ selfHarmIntent
+
+ Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders.
+ +Swift
+public let selfHarmIntent: Double
+
+
+
+
+ selfHarmInstructions
+
+ Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts.
+ +Swift
+public let selfHarmInstructions: Double
+
+
+
+
+ sexual
+
+ Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness).
+ +Swift
+public let sexual: Double
+
+
+
+
+ sexualMinors
+
+ Sexual content that includes an individual who is under 18 years old.
+ +Swift
+public let sexualMinors: Double
+
+
+
+
+ violence
+
+ Content that promotes or glorifies violence or celebrates the suffering or humiliation of others.
+ +Swift
+public let violence: Double
+
+
+
+
+ violenceGraphic
+
+ Violent content that depicts death, violence, or serious physical injury in extreme graphic detail.
+ +Swift
+public let violenceGraphic: Double
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey, CaseIterable
+
+
+
+
+ makeIterator()
+
+ Swift
+public func makeIterator() -> IndexingIterator<[(String, Bool)]>
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey, CaseIterable
+
+
+
+
+ harassment
+
+ Swift
+case harassment
+
+
+
+
+ harassmentThreatening
+
+ Swift
+case harassmentThreatening = "harassment/threatening"
+
+
+
+
+ hate
+
+ Swift
+case hate
+
+
+
+
+ hateThreatening
+
+ Swift
+case hateThreatening = "hate/threatening"
+
+
+
+
+ selfHarm
+
+ Swift
+case selfHarm = "self-harm"
+
+
+
+
+ selfHarmIntent
+
+ Swift
+case selfHarmIntent = "self-harm/intent"
+
+
+
+
+ selfHarmInstructions
+
+ Swift
+case selfHarmInstructions = "self-harm/instructions"
+
+
+
+
+ sexual
+
+ Swift
+case sexual
+
+
+
+
+ sexualMinors
+
+ Swift
+case sexualMinors = "sexual/minors"
+
+
+
+
+ violence
+
+ Swift
+case violence
+
+
+
+
+ violenceGraphic
+
+ Swift
+case violenceGraphic = "violence/graphic"
+
+ Docs (100% documented)
+public struct Vector
+
+
+
+
+ cosineSimilarity(a:b:)
+
+ Returns the similarity between two vectors
+ +Swift
+public static func cosineSimilarity(a: [Double], b: [Double]) -> Double
+
+
+
+ a
+
+ |
+
+
+
+ The first vector + |
+
+
+ b
+
+ |
+
+
+
+ The second vector + |
+
+
+
+ cosineDifference(a:b:)
+
+ Returns the difference between two vectors. Cosine distance is defined as 1 - cosineSimilarity(a, b)
Swift
+public func cosineDifference(a: [Double], b: [Double]) -> Double
+
+
+
+ a
+
+ |
+
+
+
+ The first vector + |
+
+
+ b
+
+ |
+
+
+
+ The second vector + |
+
Docs (100% documented)
+The following type aliases are available globally.
+ +
+
+
+ Model
+
+ Defines all available OpenAI models supported by the library.
+ +Swift
+public typealias Model = String
+
+ Docs (100% documented)
+The following classes are available globally.
+ +
+
+
+ OpenAI
+
+ Swift
+final public class OpenAI : OpenAIProtocol
+
+ Docs (100% documented)
+final public class OpenAI : OpenAIProtocol
+
+
+
+
+ Configuration
+
+ Swift
+public struct Configuration
+
+
+
+
+ configuration
+
+ Swift
+public let configuration: Configuration
+
+
+
+
+ init(apiToken:)
+
+ Swift
+public convenience init(apiToken: String)
+
+
+
+
+ init(configuration:)
+
+ Swift
+public convenience init(configuration: Configuration)
+
+
+
+
+ init(configuration:session:)
+
+ Swift
+public convenience init(configuration: Configuration, session: URLSession = URLSession.shared)
+
+
+
+
+ completions(query:completion:)
+
+ Swift
+public func completions(query: CompletionsQuery, completion: @escaping (Result<CompletionsResult, Error>) -> Void)
+
+
+
+
+ completionsStream(query:onResult:completion:)
+
+ Swift
+public func completionsStream(query: CompletionsQuery, onResult: @escaping (Result<CompletionsResult, Error>) -> Void, completion: ((Error?) -> Void)?)
+
+
+
+
+ images(query:completion:)
+
+ Swift
+public func images(query: ImagesQuery, completion: @escaping (Result<ImagesResult, Error>) -> Void)
+
+
+
+
+ imageEdits(query:completion:)
+
+ Swift
+public func imageEdits(query: ImageEditsQuery, completion: @escaping (Result<ImagesResult, Error>) -> Void)
+
+
+
+
+ imageVariations(query:completion:)
+
+ Swift
+public func imageVariations(query: ImageVariationsQuery, completion: @escaping (Result<ImagesResult, Error>) -> Void)
+
+
+
+
+ embeddings(query:completion:)
+
+ Swift
+public func embeddings(query: EmbeddingsQuery, completion: @escaping (Result<EmbeddingsResult, Error>) -> Void)
+
+
+
+
+ chats(query:completion:)
+
+ Swift
+public func chats(query: ChatQuery, completion: @escaping (Result<ChatResult, Error>) -> Void)
+
+
+
+
+ chatsStream(query:onResult:completion:)
+
+ Swift
+public func chatsStream(query: ChatQuery, onResult: @escaping (Result<ChatStreamResult, Error>) -> Void, completion: ((Error?) -> Void)?)
+
+
+
+
+ edits(query:completion:)
+
+ Swift
+public func edits(query: EditsQuery, completion: @escaping (Result<EditsResult, Error>) -> Void)
+
+
+
+
+ model(query:completion:)
+
+ Swift
+public func model(query: ModelQuery, completion: @escaping (Result<ModelResult, Error>) -> Void)
+
+
+
+
+ models(completion:)
+
+ Swift
+public func models(completion: @escaping (Result<ModelsResult, Error>) -> Void)
+
+
+
+
+ moderations(query:completion:)
+
+ Swift
+@available(iOS 13.0, *)
+public func moderations(query: ModerationsQuery, completion: @escaping (Result<ModerationsResult, Error>) -> Void)
+
+
+
+
+ audioTranscriptions(query:completion:)
+
+ Swift
+public func audioTranscriptions(query: AudioTranscriptionQuery, completion: @escaping (Result<AudioTranscriptionResult, Error>) -> Void)
+
+
+
+
+ audioTranslations(query:completion:)
+
+ Swift
+public func audioTranslations(query: AudioTranslationQuery, completion: @escaping (Result<AudioTranslationResult, Error>) -> Void)
+
+
+
+
+ audioCreateSpeech(query:completion:)
+
+ Swift
+public func audioCreateSpeech(query: AudioSpeechQuery, completion: @escaping (Result<AudioSpeechResult, Error>) -> Void)
+
+ Docs (100% documented)
+public struct Configuration
+
+
+
+
+ token
+
+ OpenAI API token. See https://platform.openai.com/docs/api-reference/authentication
+ +Swift
+public let token: String
+
+
+
+
+ organizationIdentifier
+
+ Optional OpenAI organization identifier. See https://platform.openai.com/docs/api-reference/authentication
+ +Swift
+public let organizationIdentifier: String?
+
+
+
+
+ host
+
+ API host. Set this property if you use some kind of proxy or your own server. Default is api.openai.com
+ +Swift
+public let host: String
+
+
+
+
+ port
+
+ Swift
+public let port: Int
+
+
+
+
+ scheme
+
+ Swift
+public let scheme: String
+
+
+
+
+ timeoutInterval
+
+ Default request timeout
+ +Swift
+public let timeoutInterval: TimeInterval
+
+ Swift
+public init(token: String, organizationIdentifier: String? = nil, host: String = "api.openai.com", port: Int = 443, scheme: String = "https", timeoutInterval: TimeInterval = 60.0)
+
+ Docs (100% documented)
+The following enumerations are available globally.
+ +
+
+
+ OpenAIError
+
+ Swift
+public enum OpenAIError : Error
+
+ Docs (100% documented)
+public enum OpenAIError : Error
+
+
+
+
+ emptyData
+
+ Swift
+case emptyData
+
+ Docs (100% documented)
+The following extensions are available globally.
+ +Docs (100% documented)
+public extension Model
+
+
+
+
+ gpt4_o
+
+ gpt-4o
, currently the most advanced, multimodal flagship model that’s cheaper and faster than GPT-4 Turbo.
Swift
+static let gpt4_o: String
+
+
+
+
+ gpt4_o_mini
+
+ gpt-4o-mini
, currently the most affordable and intelligent model for fast and lightweight requests.
Swift
+static let gpt4_o_mini: String
+
+
+
+
+ gpt4_turbo
+
+ gpt-4-turbo
, The latest GPT-4 Turbo model with vision capabilities. Vision requests can now use JSON mode and function calling and more. Context window: 128,000 tokens
Swift
+static let gpt4_turbo: String
+
+
+
+
+ gpt4_turbo_preview
+
+ gpt-4-turbo
, gpt-4 model with improved instruction following, JSON mode, reproducible outputs, parallel function calling and more. Maximum of 4096 output tokens
Swift
+@available(*, deprecated, message: "Please upgrade to the newer model")
+static let gpt4_turbo_preview: String
+
+
+
+
+ gpt4_vision_preview
+
+ gpt-4-vision-preview
, able to understand images, in addition to all other GPT-4 Turbo capabilities.
Swift
+static let gpt4_vision_preview: String
+
+
+
+
+ gpt4_0125_preview
+
+ Snapshot of gpt-4-turbo-preview
from January 25th 2024. This model reduces cases of “laziness” where the model doesn’t complete a task. Also fixes the bug impacting non-English UTF-8 generations. Maximum of 4096 output tokens
Swift
+static let gpt4_0125_preview: String
+
+
+
+
+ gpt4_1106_preview
+
+ Snapshot of gpt-4-turbo-preview
from November 6th 2023. Improved instruction following, JSON mode, reproducible outputs, parallel function calling and more. Maximum of 4096 output tokens
Swift
+@available(*, deprecated, message: "Please upgrade to the newer model")
+static let gpt4_1106_preview: String
+
+
+
+
+ gpt4
+
+ Most capable gpt-4
model, outperforms any GPT-3.5 model, able to do more complex tasks, and optimized for chat.
Swift
+static let gpt4: String
+
+
+
+
+ gpt4_0613
+
+ Snapshot of gpt-4
from June 13th 2023 with function calling data. Unlike gpt-4
, this model will not receive updates, and will be deprecated 3 months after a new version is released.
Swift
+static let gpt4_0613: String
+
+
+
+
+ gpt4_0314
+
+ Snapshot of gpt-4
from March 14th 2023. Unlike gpt-4, this model will not receive updates, and will only be supported for a three month period ending on June 14th 2023.
Swift
+@available(*, deprecated, message: "Please upgrade to the newer model")
+static let gpt4_0314: String
+
+
+
+
+ gpt4_32k
+
+ Same capabilities as the base gpt-4
model but with 4x the context length. Will be updated with our latest model iteration.
Swift
+static let gpt4_32k: String
+
+
+
+
+ gpt4_32k_0613
+
+ Snapshot of gpt-4-32k
from June 13th 2023. Unlike gpt-4-32k
, this model will not receive updates, and will be deprecated 3 months after a new version is released.
Swift
+static let gpt4_32k_0613: String
+
+
+
+
+ gpt4_32k_0314
+
+ Snapshot of gpt-4-32k
from March 14th 2023. Unlike gpt-4-32k
, this model will not receive updates, and will only be supported for a three month period ending on June 14th 2023.
Swift
+@available(*, deprecated, message: "Please upgrade to the newer model")
+static let gpt4_32k_0314: String
+
+
+
+
+ gpt3_5Turbo
+
+ Most capable gpt-3.5-turbo
model and optimized for chat. Will be updated with our latest model iteration.
Swift
+static let gpt3_5Turbo: String
+
+
+
+
+ gpt3_5Turbo_0125
+
+ Snapshot of gpt-3.5-turbo
from January 25th 2024. Decreased prices by 50%. Various improvements including higher accuracy at responding in requested formats and a fix for a bug which caused a text encoding issue for non-English language function calls.
Swift
+static let gpt3_5Turbo_0125: String
+
+
+
+
+ gpt3_5Turbo_1106
+
+ Snapshot of gpt-3.5-turbo
from November 6th 2023. The latest gpt-3.5-turbo
model with improved instruction following, JSON mode, reproducible outputs, parallel function calling and more.
Swift
+@available(*, deprecated, message: "Please upgrade to the newer model")
+static let gpt3_5Turbo_1106: String
+
+
+
+
+ gpt3_5Turbo_0613
+
+ Snapshot of gpt-3.5-turbo
from June 13th 2023 with function calling data. Unlike gpt-3.5-turbo
, this model will not receive updates, and will be deprecated 3 months after a new version is released.
Swift
+@available(*, deprecated, message: "Please upgrade to the newer model")
+static let gpt3_5Turbo_0613: String
+
+
+
+
+ gpt3_5Turbo_0301
+
+ Snapshot of gpt-3.5-turbo
from March 1st 2023. Unlike gpt-3.5-turbo
, this model will not receive updates, and will only be supported for a three month period ending on June 1st 2023.
Swift
+@available(*, deprecated, message: "Please upgrade to the newer model")
+static let gpt3_5Turbo_0301: String
+
+
+
+
+ gpt3_5Turbo_16k
+
+ Same capabilities as the standard gpt-3.5-turbo
model but with 4 times the context.
Swift
+static let gpt3_5Turbo_16k: String
+
+
+
+
+ gpt3_5Turbo_16k_0613
+
+ Snapshot of gpt-3.5-turbo-16k
from June 13th 2023. Unlike gpt-3.5-turbo-16k
, this model will not receive updates, and will be deprecated 3 months after a new version is released.
Swift
+static let gpt3_5Turbo_16k_0613: String
+
+
+
+
+ textDavinci_003
+
+ Can do any language task with better quality, longer output, and consistent instruction-following than the curie, babbage, or ada models. Also supports inserting completions within text.
+ +Swift
+static let textDavinci_003: String
+
+
+
+
+ textDavinci_002
+
+ Similar capabilities to text-davinci-003 but trained with supervised fine-tuning instead of reinforcement learning.
+ +Swift
+static let textDavinci_002: String
+
+
+
+
+ textCurie
+
+ Very capable, faster and lower cost than Davinci.
+ +Swift
+static let textCurie: String
+
+
+
+
+ textBabbage
+
+ Capable of straightforward tasks, very fast, and lower cost.
+ +Swift
+static let textBabbage: String
+
+
+
+
+ textAda
+
+ Capable of very simple tasks, usually the fastest model in the GPT-3 series, and lowest cost.
+ +Swift
+static let textAda: String
+
+
+
+
+ textDavinci_001
+
+ Swift
+static let textDavinci_001: String
+
+
+
+
+ codeDavinciEdit_001
+
+ Swift
+static let codeDavinciEdit_001: String
+
+
+
+
+ tts_1
+
+ The latest text to speech model, optimized for speed.
+ +Swift
+static let tts_1: String
+
+
+
+
+ tts_1_hd
+
+ The latest text to speech model, optimized for quality.
+ +Swift
+static let tts_1_hd: String
+
+
+
+
+ whisper_1
+
+ Swift
+static let whisper_1: String
+
+
+
+
+ dall_e_2
+
+ Swift
+static let dall_e_2: String
+
+
+
+
+ dall_e_3
+
+ Swift
+static let dall_e_3: String
+
+
+
+
+ davinci
+
+ Most capable GPT-3 model. Can do any task the other models can do, often with higher quality.
+ +Swift
+static let davinci: String
+
+
+
+
+ curie
+
+ Very capable, but faster and lower cost than Davinci.
+ +Swift
+static let curie: String
+
+
+
+
+ babbage
+
+ Capable of straightforward tasks, very fast, and lower cost.
+ +Swift
+static let babbage: String
+
+
+
+
+ ada
+
+ Capable of very simple tasks, usually the fastest model in the GPT-3 series, and lowest cost.
+ +Swift
+static let ada: String
+
+
+
+
+ textEmbeddingAda
+
+ Swift
+static let textEmbeddingAda: String
+
+
+
+
+ textSearchAda
+
+ Swift
+static let textSearchAda: String
+
+
+
+
+ textSearchBabbageDoc
+
+ Swift
+static let textSearchBabbageDoc: String
+
+
+
+
+ textSearchBabbageQuery001
+
+ Swift
+static let textSearchBabbageQuery001: String
+
+
+
+
+ textEmbedding3
+
+ Swift
+static let textEmbedding3: String
+
+
+
+
+ textEmbedding3Large
+
+ Swift
+static let textEmbedding3Large: String
+
+
+
+
+ textModerationStable
+
+ Almost as capable as the latest model, but slightly older.
+ +Swift
+static let textModerationStable: String
+
+
+
+
+ textModerationLatest
+
+ Most capable moderation model. Accuracy will be slightly higher than the stable model.
+ +Swift
+static let textModerationLatest: String
+
+
+
+
+ moderation
+
+ Swift
+static let moderation: String
+
+ Docs (100% documented)
+The following protocols are available globally.
+ +
+
+
+ OpenAIProtocol
+
+ Swift
+public protocol OpenAIProtocol
+
+ Docs (100% documented)
+public protocol OpenAIProtocol
+
+
+
+
+ completions(query:completion:)
+
+ This function sends a completions query to the OpenAI API and retrieves generated completions in response. The Completions API enables you to build applications using OpenAI’s language models, like the powerful GPT-3.
+ +Example:
+let query = CompletionsQuery(model: .textDavinci_003, prompt: "What is 42?")
+openAI.completions(query: query) { result in
+ //Handle result here
+}
+
+
+ Swift
+func completions(query: CompletionsQuery, completion: @escaping (Result<CompletionsResult, Error>) -> Void)
+
+
+
+ query
+
+ |
+
+
+
+ A |
+
+
+ completion
+
+ |
+
+
+
+ A closure which receives the result when the API request finishes. The closure’s parameter, |
+
+
+
+ completionsStream(query:onResult:completion:)
+
+ This function sends a completions query to the OpenAI API and retrieves generated completions in response. The Completions API enables you to build applications using OpenAI’s language models, like the powerful GPT-3. The result is returned by chunks.
+ +Example:
+let query = CompletionsQuery(model: .textDavinci_003, prompt: "What is 42?")
+openAI.completions(query: query) { result in
+ //Handle result here
+}
+
+
+ Swift
+func completionsStream(query: CompletionsQuery, onResult: @escaping (Result<CompletionsResult, Error>) -> Void, completion: ((Error?) -> Void)?)
+
+
+
+ query
+
+ |
+
+
+
+ A |
+
+
+ onResult
+
+ |
+
+
+
+ A closure which receives the result when the API request finishes. The closure’s parameter, |
+
+
+ completion
+
+ |
+
+
+
+ A closure that is being called when all chunks are delivered or uncrecoverable error occured + |
+
+
+
+ images(query:completion:)
+
+ This function sends an images query to the OpenAI API and retrieves generated images in response. The Images Generation API enables you to create various images or graphics using OpenAI’s powerful deep learning models.
+ +Example:
+let query = ImagesQuery(prompt: "White cat with heterochromia sitting on the kitchen table", n: 1, size: ImagesQuery.Size._1024)
+openAI.images(query: query) { result in
+ //Handle result here
+}
+
+
+ Swift
+func images(query: ImagesQuery, completion: @escaping (Result<ImagesResult, Error>) -> Void)
+
+
+
+ query
+
+ |
+
+
+
+ An |
+
+
+ completion
+
+ |
+
+
+
+ A closure which receives the result when the API request finishes. The closure’s parameter, |
+
+
+
+ imageEdits(query:completion:)
+
+ This function sends an image edit query to the OpenAI API and retrieves generated images in response. The Images Edit API enables you to edit images or graphics using OpenAI’s powerful deep learning models.
+ +Example:
+let query = ImagesEditQuery(image: "@whitecat.png", prompt: "White cat with heterochromia sitting on the kitchen table with a bowl of food", n: 1, size: ImagesQuery.Size._1024)
+openAI.imageEdits(query: query) { result in
+ //Handle result here
+}
+
+
+ Swift
+func imageEdits(query: ImageEditsQuery, completion: @escaping (Result<ImagesResult, Error>) -> Void)
+
+
+
+ query
+
+ |
+
+
+
+ An |
+
+
+ completion
+
+ |
+
+
+
+ A closure which receives the result when the API request finishes. The closure’s parameter, |
+
+
+
+ imageVariations(query:completion:)
+
+ This function sends an image variation query to the OpenAI API and retrieves generated images in response. The Images Variations API enables you to create a variation of a given image using OpenAI’s powerful deep learning models.
+ +Example:
+let query = ImagesVariationQuery(image: "@whitecat.png", n: 1, size: ImagesQuery.Size._1024)
+openAI.imageVariations(query: query) { result in
+ //Handle result here
+}
+
+
+ Swift
+func imageVariations(query: ImageVariationsQuery, completion: @escaping (Result<ImagesResult, Error>) -> Void)
+
+
+
+ query
+
+ |
+
+
+
+ An |
+
+
+ completion
+
+ |
+
+
+
+ A closure which receives the result when the API request finishes. The closure’s parameter, |
+
+
+
+ embeddings(query:completion:)
+
+ This function sends an embeddings query to the OpenAI API and retrieves embeddings in response. The Embeddings API enables you to generate high-dimensional vector representations of texts, which can be used for various natural language processing tasks such as semantic similarity, clustering, and classification.
+ +Example:
+let query = EmbeddingsQuery(model: .textSearchBabbageDoc, input: "The food was delicious and the waiter...")
+openAI.embeddings(query: query) { result in
+ //Handle response here
+}
+
+
+ Swift
+func embeddings(query: EmbeddingsQuery, completion: @escaping (Result<EmbeddingsResult, Error>) -> Void)
+
+
+
+ query
+
+ |
+
+
+
+ An |
+
+
+ completion
+
+ |
+
+
+
+ A closure which receives the result when the API request finishes. The closure’s parameter, |
+
+
+
+ chats(query:completion:)
+
+ This function sends a chat query to the OpenAI API and retrieves chat conversation responses. The Chat API enables you to build chatbots or conversational applications using OpenAI’s powerful natural language models, like GPT-3.
+ +Example:
+let query = ChatQuery(model: .gpt3_5Turbo, messages: [.init(role: "user", content: "who are you")])
+openAI.chats(query: query) { result in
+ //Handle response here
+}
+
+
+ Swift
+func chats(query: ChatQuery, completion: @escaping (Result<ChatResult, Error>) -> Void)
+
+
+
+ query
+
+ |
+
+
+
+ A |
+
+
+ completion
+
+ |
+
+
+
+ A closure which receives the result when the API request finishes. The closure’s parameter, |
+
+
+
+ chatsStream(query:onResult:completion:)
+
+ This function sends a chat query to the OpenAI API and retrieves chat stream conversation responses. The Chat API enables you to build chatbots or conversational applications using OpenAI’s powerful natural language models, like GPT-3. The result is returned by chunks.
+ +Example:
+let query = ChatQuery(model: .gpt3_5Turbo, messages: [.init(role: "user", content: "who are you")])
+openAI.chats(query: query) { result in
+ //Handle response here
+}
+
+
+ Swift
+func chatsStream(query: ChatQuery, onResult: @escaping (Result<ChatStreamResult, Error>) -> Void, completion: ((Error?) -> Void)?)
+
+
+
+ query
+
+ |
+
+
+
+ A |
+
+
+ onResult
+
+ |
+
+
+
+ A closure which receives the result when the API request finishes. The closure’s parameter, |
+
+
+ completion
+
+ |
+
+
+
+ A closure that is being called when all chunks are delivered or uncrecoverable error occured + |
+
+
+
+ edits(query:completion:)
+
+ This function sends an edits query to the OpenAI API and retrieves an edited version of the prompt based on the instruction given.
+ +Example:
+let query = EditsQuery(model: .gpt4, input: "What day of the wek is it?", instruction: "Fix the spelling mistakes")
+openAI.edits(query: query) { result in
+ //Handle response here
+}
+
+
+ Swift
+func edits(query: EditsQuery, completion: @escaping (Result<EditsResult, Error>) -> Void)
+
+
+
+ query
+
+ |
+
+
+
+ An |
+
+
+ completion
+
+ |
+
+
+
+ A closure which receives the result when the API request finishes. The closure’s parameter, |
+
+
+
+ model(query:completion:)
+
+ This function sends a model query to the OpenAI API and retrieves a model instance, providing owner information. The Models API in this usage enables you to gather detailed information on the model in question, like GPT-3.
+ +Example:
+let query = ModelQuery(model: .gpt3_5Turbo)
+openAI.model(query: query) { result in
+ //Handle response here
+}
+
+
+ Swift
+func model(query: ModelQuery, completion: @escaping (Result<ModelResult, Error>) -> Void)
+
+
+
+ query
+
+ |
+
+
+
+ A |
+
+
+ completion
+
+ |
+
+
+
+ A closure which receives the result when the API request finishes. The closure’s parameter, |
+
+
+
+ models(completion:)
+
+ This function sends a models query to the OpenAI API and retrieves a list of models. The Models API in this usage enables you to list all the available models.
+ +Example:
+openAI.models() { result in
+ //Handle response here
+}
+
+
+ Swift
+func models(completion: @escaping (Result<ModelsResult, Error>) -> Void)
+
+
+
+ completion
+
+ |
+
+
+
+ A closure which receives the result when the API request finishes. The closure’s parameter, |
+
+
+
+ moderations(query:completion:)
+
+ This function sends a moderations query to the OpenAI API and retrieves a list of category results to classify how text may violate OpenAI’s Content Policy.
+ +Example:
+let query = ModerationsQuery(input: "I want to kill them.")
+openAI.moderations(query: query) { result in
+ //Handle response here
+}
+
+
+ Swift
+@available(iOS 13.0, *)
+func moderations(query: ModerationsQuery, completion: @escaping (Result<ModerationsResult, Error>) -> Void)
+
+
+
+ query
+
+ |
+
+
+
+ A |
+
+
+ completion
+
+ |
+
+
+
+ A closure which receives the result when the API request finishes. The closure’s parameter, |
+
+
+
+ audioCreateSpeech(query:completion:)
+
+ This function sends an AudioSpeechQuery
to the OpenAI API to create audio speech from text using a specific voice and format.
Example:
+let query = AudioSpeechQuery(model: .tts_1, input: "Hello, world!", voice: .alloy, responseFormat: .mp3, speed: 1.0)
+openAI.audioCreateSpeech(query: query) { result in
+ // Handle response here
+}
+
+
+ Swift
+func audioCreateSpeech(query: AudioSpeechQuery, completion: @escaping (Result<AudioSpeechResult, Error>) -> Void)
+
+
+
+ query
+
+ |
+
+
+
+ An |
+
+
+ completion
+
+ |
+
+
+
+ A closure which receives the result. The closure’s parameter, |
+
+
+
+ audioTranscriptions(query:completion:)
+
+ Transcribes audio data using OpenAI’s audio transcription API and completes the operation asynchronously.
+ +Swift
+func audioTranscriptions(query: AudioTranscriptionQuery, completion: @escaping (Result<AudioTranscriptionResult, Error>) -> Void)
+
+
+
+ query
+
+ |
+
+
+
+ The |
+
+
+ completion
+
+ |
+
+
+
+ The completion handler to be executed upon completion of the transcription request.
+ Returns a |
+
+
+
+ audioTranslations(query:completion:)
+
+ Translates audio data using OpenAI’s audio translation API and completes the operation asynchronously.
+ +Swift
+func audioTranslations(query: AudioTranslationQuery, completion: @escaping (Result<AudioTranslationResult, Error>) -> Void)
+
+
+
+ query
+
+ |
+
+
+
+ The |
+
+
+ completion
+
+ |
+
+
+
+ The completion handler to be executed upon completion of the translation request.
+ Returns a |
+
+
+
+ completions(query:)
+
+
+ Extension method, asynchronous
+
+ Swift
+func completions(
+ query: CompletionsQuery
+) async throws -> CompletionsResult
+
+
+
+
+ completionsStream(query:)
+
+
+ Extension method
+
+ Swift
+func completionsStream(
+ query: CompletionsQuery
+) -> AsyncThrowingStream<CompletionsResult, Error>
+
+
+
+
+ images(query:)
+
+
+ Extension method, asynchronous
+
+ Swift
+func images(
+ query: ImagesQuery
+) async throws -> ImagesResult
+
+
+
+
+ imageEdits(query:)
+
+
+ Extension method, asynchronous
+
+ Swift
+func imageEdits(
+ query: ImageEditsQuery
+) async throws -> ImagesResult
+
+
+
+
+ imageVariations(query:)
+
+
+ Extension method, asynchronous
+
+ Swift
+func imageVariations(
+ query: ImageVariationsQuery
+) async throws -> ImagesResult
+
+
+
+
+ embeddings(query:)
+
+
+ Extension method, asynchronous
+
+ Swift
+func embeddings(
+ query: EmbeddingsQuery
+) async throws -> EmbeddingsResult
+
+
+
+
+ chats(query:)
+
+
+ Extension method, asynchronous
+
+ Swift
+func chats(
+ query: ChatQuery
+) async throws -> ChatResult
+
+
+
+
+ chatsStream(query:)
+
+
+ Extension method
+
+ Swift
+func chatsStream(
+ query: ChatQuery
+) -> AsyncThrowingStream<ChatStreamResult, Error>
+
+
+
+
+ edits(query:)
+
+
+ Extension method, asynchronous
+
+ Swift
+func edits(
+ query: EditsQuery
+) async throws -> EditsResult
+
+
+
+
+ model(query:)
+
+
+ Extension method, asynchronous
+
+ Swift
+func model(
+ query: ModelQuery
+) async throws -> ModelResult
+
+
+
+
+ models()
+
+
+ Extension method, asynchronous
+
+ Swift
+func models() async throws -> ModelsResult
+
+
+
+
+ moderations(query:)
+
+
+ Extension method, asynchronous
+
+ Swift
+func moderations(
+ query: ModerationsQuery
+) async throws -> ModerationsResult
+
+
+
+
+ audioCreateSpeech(query:)
+
+
+ Extension method, asynchronous
+
+ Swift
+func audioCreateSpeech(
+ query: AudioSpeechQuery
+) async throws -> AudioSpeechResult
+
+
+
+
+ audioTranscriptions(query:)
+
+
+ Extension method, asynchronous
+
+ Swift
+func audioTranscriptions(
+ query: AudioTranscriptionQuery
+) async throws -> AudioTranscriptionResult
+
+
+
+
+ audioTranslations(query:)
+
+
+ Extension method, asynchronous
+
+ Swift
+func audioTranslations(
+ query: AudioTranslationQuery
+) async throws -> AudioTranslationResult
+
+
+
+
+ completions(query:)
+
+
+ Extension method
+
+ Swift
+func completions(query: CompletionsQuery) -> AnyPublisher<CompletionsResult, Error>
+
+
+
+
+ completionsStream(query:)
+
+
+ Extension method
+
+ Swift
+func completionsStream(query: CompletionsQuery) -> AnyPublisher<Result<CompletionsResult, Error>, Error>
+
+
+
+
+ images(query:)
+
+
+ Extension method
+
+ Swift
+func images(query: ImagesQuery) -> AnyPublisher<ImagesResult, Error>
+
+
+
+
+ imageEdits(query:)
+
+
+ Extension method
+
+ Swift
+func imageEdits(query: ImageEditsQuery) -> AnyPublisher<ImagesResult, Error>
+
+
+
+
+ imageVariations(query:)
+
+
+ Extension method
+
+ Swift
+func imageVariations(query: ImageVariationsQuery) -> AnyPublisher<ImagesResult, Error>
+
+
+
+
+ embeddings(query:)
+
+
+ Extension method
+
+ Swift
+func embeddings(query: EmbeddingsQuery) -> AnyPublisher<EmbeddingsResult, Error>
+
+
+
+
+ chats(query:)
+
+
+ Extension method
+
+ Swift
+func chats(query: ChatQuery) -> AnyPublisher<ChatResult, Error>
+
+
+
+
+ chatsStream(query:)
+
+
+ Extension method
+
+ Swift
+func chatsStream(query: ChatQuery) -> AnyPublisher<Result<ChatStreamResult, Error>, Error>
+
+
+
+
+ edits(query:)
+
+
+ Extension method
+
+ Swift
+func edits(query: EditsQuery) -> AnyPublisher<EditsResult, Error>
+
+
+
+
+ model(query:)
+
+
+ Extension method
+
+ Swift
+func model(query: ModelQuery) -> AnyPublisher<ModelResult, Error>
+
+
+
+
+ models()
+
+
+ Extension method
+
+ Swift
+func models() -> AnyPublisher<ModelsResult, Error>
+
+
+
+
+ moderations(query:)
+
+
+ Extension method
+
+ Swift
+func moderations(query: ModerationsQuery) -> AnyPublisher<ModerationsResult, Error>
+
+
+
+
+ audioCreateSpeech(query:)
+
+
+ Extension method
+
+ Swift
+func audioCreateSpeech(query: AudioSpeechQuery) -> AnyPublisher<AudioSpeechResult, Error>
+
+
+
+
+ audioTranscriptions(query:)
+
+
+ Extension method
+
+ Swift
+func audioTranscriptions(query: AudioTranscriptionQuery) -> AnyPublisher<AudioTranscriptionResult, Error>
+
+
+
+
+ audioTranslations(query:)
+
+
+ Extension method
+
+ Swift
+func audioTranslations(query: AudioTranslationQuery) -> AnyPublisher<AudioTranslationResult, Error>
+
+ Docs (100% documented)
+The following structures are available globally.
+ +
+
+
+ APIError
+
+ Swift
+public struct APIError : Error, Decodable, Equatable
+extension APIError: LocalizedError
+
+
+
+
+ APIErrorResponse
+
+ Swift
+public struct APIErrorResponse : Error, Decodable, Equatable
+extension APIErrorResponse: LocalizedError
+
+
+
+
+ AudioSpeechQuery
+
+ Generates audio from the input text. +Learn more: OpenAI Speech – Documentation
+ + See more +Swift
+public struct AudioSpeechQuery : Codable
+
+
+
+
+ AudioSpeechResult
+
+ The audio file content. +Learn more: OpenAI Speech – Documentation
+ + See more +Swift
+public struct AudioSpeechResult : Codable, Equatable
+
+
+
+
+ AudioTranscriptionQuery
+
+ Swift
+public struct AudioTranscriptionQuery : Codable
+
+
+
+
+ AudioTranscriptionResult
+
+ Swift
+public struct AudioTranscriptionResult : Codable, Equatable
+
+
+
+
+ AudioTranslationQuery
+
+ Translates audio into English.
+ + See more +Swift
+public struct AudioTranslationQuery : Codable
+
+
+
+
+ AudioTranslationResult
+
+ Swift
+public struct AudioTranslationResult : Codable, Equatable
+
+
+
+
+ ChatQuery
+
+ Creates a model response for the given chat conversation +https://platform.openai.com/docs/guides/text-generation +https://platform.openai.com/docs/api-reference/chat/create
+ + See more +Swift
+public struct ChatQuery : Equatable, Codable, Streamable
+
+
+
+
+ ChatResult
+
+ https://platform.openai.com/docs/api-reference/chat/object +Example Completion object print
+{
+ "id": "chatcmpl-123456",
+ "object": "chat.completion",
+ "created": 1728933352,
+ "model": "gpt-4o-2024-08-06",
+ "choices": [
+ {
+ "index": 0,
+ "message": {
+ "role": "assistant",
+ "content": "Hi there! How can I assist you today?",
+ "refusal": null
+ },
+ "logprobs": null,
+ "finish_reason": "stop"
+ }
+ ],
+ "usage": {
+ "prompt_tokens": 19,
+ "completion_tokens": 10,
+ "total_tokens": 29,
+ "prompt_tokens_details": {
+ "cached_tokens": 0
+ },
+ "completion_tokens_details": {
+ "reasoning_tokens": 0
+ }
+ },
+ "system_fingerprint": "fp_6b68a8204b"
+}
+
+
+ See more
+ Swift
+public struct ChatResult : Codable, Equatable
+
+
+
+
+ ChatStreamResult
+
+ Swift
+public struct ChatStreamResult : Codable, Equatable
+
+
+
+
+ CompletionsQuery
+
+ Swift
+public struct CompletionsQuery : Codable, Streamable
+
+
+
+
+ CompletionsResult
+
+ Swift
+public struct CompletionsResult : Codable, Equatable
+
+
+
+
+ EditsQuery
+
+ Swift
+public struct EditsQuery : Codable
+
+
+
+
+ EditsResult
+
+ Swift
+public struct EditsResult : Codable, Equatable
+
+
+
+
+ EmbeddingsQuery
+
+ Swift
+public struct EmbeddingsQuery : Codable
+
+
+
+
+ EmbeddingsResult
+
+ Swift
+public struct EmbeddingsResult : Codable, Equatable
+
+
+
+
+ ImageEditsQuery
+
+ Swift
+public struct ImageEditsQuery : Codable
+
+
+
+
+ ImageVariationsQuery
+
+ Swift
+public struct ImageVariationsQuery : Codable
+
+
+
+
+ ImagesQuery
+
+ Given a prompt and/or an input image, the model will generate a new image. +https://platform.openai.com/docs/guides/images
+ + See more +Swift
+public struct ImagesQuery : Codable
+
+
+
+
+ ImagesResult
+
+ Returns a list of image objects.
+ + See more +Swift
+public struct ImagesResult : Codable, Equatable
+
+
+
+
+ ModelQuery
+
+ Retrieves a model instance, providing basic information about the model such as the owner and permissioning.
+ + See more +Swift
+public struct ModelQuery : Codable, Equatable
+
+
+
+
+ ModelResult
+
+ The model object matching the specified ID.
+ + See more +Swift
+public struct ModelResult : Codable, Equatable
+
+
+
+
+ ModelsResult
+
+ A list of model objects.
+ + See more +Swift
+public struct ModelsResult : Codable, Equatable
+
+
+
+
+ ModerationsQuery
+
+ Swift
+public struct ModerationsQuery : Codable
+
+
+
+
+ ModerationsResult
+
+ Swift
+public struct ModerationsResult : Codable, Equatable
+extension ModerationsResult: Identifiable
+
+
+
+
+ Vector
+
+ Swift
+public struct Vector
+
+ Docs (100% documented)
+public struct APIError : Error, Decodable, Equatable
+extension APIError: LocalizedError
+
+
+
+
+ message
+
+ Swift
+public let message: String
+
+
+
+
+ type
+
+ Swift
+public let type: String
+
+
+
+
+ param
+
+ Swift
+public let param: String?
+
+
+
+
+ code
+
+ Swift
+public let code: String?
+
+
+
+
+ init(message:type:param:code:)
+
+ Swift
+public init(message: String, type: String, param: String?, code: String?)
+
+
+
+
+ init(from:)
+
+ Swift
+public init(from decoder: Decoder) throws
+
+
+
+
+ errorDescription
+
+ Swift
+public var errorDescription: String? { get }
+
+ Docs (100% documented)
+public struct APIErrorResponse : Error, Decodable, Equatable
+extension APIErrorResponse: LocalizedError
+
+
+
+
+ error
+
+ Swift
+public let error: APIError
+
+
+
+
+ errorDescription
+
+ Swift
+public var errorDescription: String? { get }
+
+ Docs (100% documented)
+public struct AudioSpeechQuery : Codable
+
+ Generates audio from the input text. +Learn more: OpenAI Speech – Documentation
+ +
+
+
+ AudioSpeechVoice
+
+ Encapsulates the voices available for audio generation.
+ +To get aquinted with each of the voices and listen to the samples visit: +OpenAI Text-to-Speech – Voice Options
+ + See more +Swift
+public enum AudioSpeechVoice : String, Codable, CaseIterable
+
+
+
+
+ AudioSpeechResponseFormat
+
+ Encapsulates the response formats available for audio data.
+ +Formats:
+ +Swift
+public enum AudioSpeechResponseFormat : String, Codable, CaseIterable
+
+
+
+
+ input
+
+ The text to generate audio for. The maximum length is 4096 characters.
+ +Swift
+public let input: String
+
+
+
+
+ model
+
+ One of the available TTS models: tts-1 or tts-1-hd
+ +Swift
+public let model: Model
+
+
+
+
+ voice
+
+ The voice to use when generating the audio. Supported voices are alloy, echo, fable, onyx, nova, and shimmer. Previews of the voices are available in the Text to speech guide. +https://platform.openai.com/docs/guides/text-to-speech/voice-options
+ +Swift
+public let voice: AudioSpeechVoice
+
+
+
+
+ responseFormat
+
+ The format to audio in. Supported formats are mp3, opus, aac, and flac. +Defaults to mp3
+ +Swift
+public let responseFormat: AudioSpeechResponseFormat?
+
+
+
+
+ speed
+
+ The speed of the generated audio. Select a value from 0.25 to 4.0. 1.0 is the default. +Defaults to 1
+ +Swift
+public let speed: String?
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ init(model:input:voice:responseFormat:speed:)
+
+ Swift
+public init(model: Model, input: String, voice: AudioSpeechVoice, responseFormat: AudioSpeechResponseFormat = .mp3, speed: Double?)
+
+
+
+
+ Speed
+
+ Swift
+enum Speed : Double
+
+
+
+
+ normalizeSpeechSpeed(_:)
+
+ Swift
+static func normalizeSpeechSpeed(_ inputSpeed: Double?) -> String
+
+ Docs (100% documented)
+public enum AudioSpeechResponseFormat : String, Codable, CaseIterable
+
+ Encapsulates the response formats available for audio data.
+ +Formats:
+ +
+
+
+ mp3
+
+ Swift
+case mp3
+
+
+
+
+ opus
+
+ Swift
+case opus
+
+
+
+
+ aac
+
+ Swift
+case aac
+
+
+
+
+ flac
+
+ Swift
+case flac
+
+ Docs (100% documented)
+public enum AudioSpeechVoice : String, Codable, CaseIterable
+
+ Encapsulates the voices available for audio generation.
+ +To get aquinted with each of the voices and listen to the samples visit: +OpenAI Text-to-Speech – Voice Options
+ +
+
+
+ alloy
+
+ Swift
+case alloy
+
+
+
+
+ echo
+
+ Swift
+case echo
+
+
+
+
+ fable
+
+ Swift
+case fable
+
+
+
+
+ onyx
+
+ Swift
+case onyx
+
+
+
+
+ nova
+
+ Swift
+case nova
+
+
+
+
+ shimmer
+
+ Swift
+case shimmer
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ model
+
+ Swift
+case model
+
+
+
+
+ input
+
+ Swift
+case input
+
+
+
+
+ voice
+
+ Swift
+case voice
+
+
+
+
+ responseFormat
+
+ Swift
+case responseFormat = "response_format"
+
+
+
+
+ speed
+
+ Swift
+case speed
+
+ Docs (100% documented)
+enum Speed : Double
+
+ Docs (100% documented)
+public struct AudioSpeechResult : Codable, Equatable
+
+ The audio file content. +Learn more: OpenAI Speech – Documentation
+ +
+
+
+ audio
+
+ Audio data for one of the following formats :mp3
, opus
, aac
, flac
Swift
+public let audio: Data
+
+ Docs (100% documented)
+public struct AudioTranscriptionQuery : Codable
+
+
+
+
+ ResponseFormat
+
+ Swift
+public enum ResponseFormat : String, Codable, Equatable, CaseIterable
+
+
+
+
+ file
+
+ The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
+ +Swift
+public let file: Data
+
+
+
+
+ fileType
+
+ Swift
+public let fileType: `Self`.FileType
+
+
+
+
+ model
+
+ ID of the model to use. Only whisper-1 is currently available.
+ +Swift
+public let model: Model
+
+
+
+
+ responseFormat
+
+ The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. +Defaults to json
+ +Swift
+public let responseFormat: `Self`.ResponseFormat?
+
+
+
+
+ prompt
+
+ An optional text to guide the model’s style or continue a previous audio segment. The prompt should match the audio language.
+ +Swift
+public let prompt: String?
+
+
+
+
+ temperature
+
+ The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit. +Defaults to 0
+ +Swift
+public let temperature: Double?
+
+
+
+
+ language
+
+ The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency. +https://platform.openai.com/docs/guides/speech-to-text/prompting
+ +Swift
+public let language: String?
+
+ Swift
+public init(file: Data, fileType: `Self`.FileType, model: Model, prompt: String? = nil, temperature: Double? = nil, language: String? = nil, responseFormat: `Self`.ResponseFormat? = nil)
+
+
+
+
+ FileType
+
+ Swift
+public enum FileType : String, Codable, Equatable, CaseIterable
+
+ Docs (100% documented)
+public enum FileType : String, Codable, Equatable, CaseIterable
+
+
+
+
+ flac
+
+ Swift
+case flac
+
+
+
+
+ mp3
+
+ Swift
+case mp3
+
+
+
+
+ mpga
+
+ Swift
+case mpga
+
+
+
+
+ mp4
+
+ Swift
+case mp4
+
+
+
+
+ m4a
+
+ Swift
+case m4a
+
+
+
+
+ mpeg
+
+ Swift
+case mpeg
+
+
+
+
+ ogg
+
+ Swift
+case ogg
+
+
+
+
+ wav
+
+ Swift
+case wav
+
+
+
+
+ webm
+
+ Swift
+case webm
+
+ Docs (100% documented)
+public enum ResponseFormat : String, Codable, Equatable, CaseIterable
+
+
+
+
+ json
+
+ Swift
+case json
+
+
+
+
+ text
+
+ Swift
+case text
+
+
+
+
+ verboseJson
+
+ Swift
+case verboseJson = "verbose_json"
+
+
+
+
+ srt
+
+ Swift
+case srt
+
+
+
+
+ vtt
+
+ Swift
+case vtt
+
+ Docs (100% documented)
+public struct AudioTranscriptionResult : Codable, Equatable
+
+
+
+
+ text
+
+ The transcribed text.
+ +Swift
+public let text: String
+
+ Docs (100% documented)
+public struct AudioTranslationQuery : Codable
+
+ Translates audio into English.
+ +
+
+
+ FileType
+
+ Swift
+public typealias FileType = AudioTranscriptionQuery.FileType
+
+
+
+
+ ResponseFormat
+
+ Swift
+public typealias ResponseFormat = AudioTranscriptionQuery.ResponseFormat
+
+
+
+
+ file
+
+ The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
+ +Swift
+public let file: Data
+
+
+
+
+ fileType
+
+ Swift
+public let fileType: `Self`.FileType
+
+
+
+
+ model
+
+ ID of the model to use. Only whisper-1 is currently available.
+ +Swift
+public let model: Model
+
+
+
+
+ responseFormat
+
+ The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. +Defaults to json
+ +Swift
+public let responseFormat: `Self`.ResponseFormat?
+
+
+
+
+ prompt
+
+ An optional text to guide the model’s style or continue a previous audio segment. The prompt should be in English. +https://platform.openai.com/docs/guides/speech-to-text/prompting
+ +Swift
+public let prompt: String?
+
+
+
+
+ temperature
+
+ The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit. +Defaults to 0
+ +Swift
+public let temperature: Double?
+
+ Swift
+public init(file: Data, fileType: `Self`.FileType, model: Model, prompt: String? = nil, temperature: Double? = nil, responseFormat: `Self`.ResponseFormat? = nil)
+
+ Docs (100% documented)
+public struct AudioTranslationResult : Codable, Equatable
+
+
+
+
+ text
+
+ The translated text.
+ +Swift
+public let text: String
+
+ Docs (100% documented)
+public struct ChatQuery : Equatable, Codable, Streamable
+
+ Creates a model response for the given chat conversation +https://platform.openai.com/docs/guides/text-generation +https://platform.openai.com/docs/api-reference/chat/create
+ +
+
+
+ messages
+
+ A list of messages comprising the conversation so far
+ +Swift
+public let messages: [`Self`.ChatCompletionMessageParam]
+
+
+
+
+ model
+
+ ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API. +https://platform.openai.com/docs/models/model-endpoint-compatibility
+ +Swift
+public let model: Model
+
+
+
+
+ frequencyPenalty
+
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model’s likelihood to repeat the same line verbatim. +Defaults to 0 +https://platform.openai.com/docs/guides/text-generation/parameter-details
+ +Swift
+public let frequencyPenalty: Double?
+
+
+
+
+ logitBias
+
+ Modify the likelihood of specified tokens appearing in the completion. +Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. +Defaults to null
+ +Swift
+public let logitBias: [String : Int]?
+
+
+
+
+ logprobs
+
+ Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message. This option is currently not available on the gpt-4-vision-preview model. +Defaults to false
+ +Swift
+public let logprobs: Bool?
+
+
+
+
+ maxTokens
+
+ The maximum number of tokens to generate in the completion. +The total length of input tokens and generated tokens is limited by the model’s context length. +https://platform.openai.com/tokenizer
+ +Swift
+public let maxTokens: Int?
+
+
+
+
+ n
+
+ How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs. +Defaults to 1
+ +Swift
+public let n: Int?
+
+
+
+
+ presencePenalty
+
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model’s likelihood to talk about new topics. +https://platform.openai.com/docs/guides/text-generation/parameter-details
+ +Swift
+public let presencePenalty: Double?
+
+
+
+
+ responseFormat
+
+ An object specifying the format that the model must output. Compatible with gpt-4-1106-preview and gpt-3.5-turbo-1106. +Setting to { “type”: “json_object” } enables JSON mode, which guarantees the message the model generates is valid JSON. +Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly “stuck” request. Also note that the message content may be partially cut off if finish_reason=“length”, which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.
+ +Swift
+public let responseFormat: `Self`.ResponseFormat?
+
+
+
+
+ seed
+
+ This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.
+ +Swift
+public let seed: Int?
+
+
+
+
+ stop
+
+ Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. +Defaults to null
+ +Swift
+public let stop: Stop?
+
+
+
+
+ temperature
+
+ What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. +We generally recommend altering this or top_p but not both. +Defaults to 1
+ +Swift
+public let temperature: Double?
+
+
+
+
+ toolChoice
+
+ Controls which (if any) function is called by the model. none means the model will not call a function and instead generates a message. auto means the model can pick between generating a message or calling a function. Specifying a particular function via {“type”: “function”, “function”: {“name”: “my_function”}} forces the model to call that function. +none is the default when no functions are present. auto is the default if functions are present
+ +Swift
+public let toolChoice: `Self`.ChatCompletionFunctionCallOptionParam?
+
+
+
+
+ tools
+
+ A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for.
+ +Swift
+public let tools: [`Self`.ChatCompletionToolParam]?
+
+
+
+
+ topLogprobs
+
+ An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used.
+ +Swift
+public let topLogprobs: Int?
+
+
+
+
+ topP
+
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. +We generally recommend altering this or temperature but not both. +Defaults to 1
+ +Swift
+public let topP: Double?
+
+
+
+
+ user
+
+ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. +https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
+ +Swift
+public let user: String?
+
+
+
+
+ stream
+
+ If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. +https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format
+ +Swift
+public var stream: Bool
+
+
+
+
+ init(messages:model:frequencyPenalty:logitBias:logprobs:maxTokens:n:presencePenalty:responseFormat:seed:stop:temperature:toolChoice:tools:topLogprobs:topP:user:stream:)
+
+ Swift
+public init(
+ messages: [Self.ChatCompletionMessageParam],
+ model: Model,
+ frequencyPenalty: Double? = nil,
+ logitBias: [String : Int]? = nil,
+ logprobs: Bool? = nil,
+ maxTokens: Int? = nil,
+ n: Int? = nil,
+ presencePenalty: Double? = nil,
+ responseFormat: Self.ResponseFormat? = nil,
+ seed: Int? = nil,
+ stop: Self.Stop? = nil,
+ temperature: Double? = nil,
+ toolChoice: Self.ChatCompletionFunctionCallOptionParam? = nil,
+ tools: [Self.ChatCompletionToolParam]? = nil,
+ topLogprobs: Int? = nil,
+ topP: Double? = nil,
+ user: String? = nil,
+ stream: Bool = false
+)
+
+
+
+
+ ChatCompletionMessageParam
+
+ Swift
+public enum ChatCompletionMessageParam : Codable, Equatable
+
+
+
+
+ Stop
+
+ Swift
+public enum Stop : Codable, Equatable
+
+
+
+
+ ResponseFormat
+
+ Swift
+public enum ResponseFormat : String, Codable, Equatable
+
+
+
+
+ ChatCompletionFunctionCallOptionParam
+
+ Swift
+public enum ChatCompletionFunctionCallOptionParam : Codable, Equatable
+
+
+
+
+ ChatCompletionToolParam
+
+ Swift
+public struct ChatCompletionToolParam : Codable, Equatable
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public enum ChatCompletionFunctionCallOptionParam : Codable, Equatable
+
+
+
+
+ none
+
+ Swift
+case none
+
+
+
+
+ auto
+
+ Swift
+case auto
+
+
+
+
+ function(_:)
+
+ Swift
+case function(String)
+
+
+
+
+ encode(to:)
+
+ Swift
+public func encode(to encoder: Encoder) throws
+
+
+
+
+ init(function:)
+
+ Swift
+public init(function: String)
+
+ Docs (100% documented)
+public enum ChatCompletionMessageParam : Codable, Equatable
+
+
+
+
+ system(_:)
+
+ Swift
+case system(`Self`.ChatCompletionSystemMessageParam)
+
+
+
+
+ user(_:)
+
+ Swift
+case user(`Self`.ChatCompletionUserMessageParam)
+
+
+
+
+ assistant(_:)
+
+ Swift
+case assistant(`Self`.ChatCompletionAssistantMessageParam)
+
+
+
+
+ tool(_:)
+
+ Swift
+case tool(`Self`.ChatCompletionToolMessageParam)
+
+
+
+
+ content
+
+ Swift
+public var content: `Self`.ChatCompletionUserMessageParam.Content? { get }
+
+
+
+
+ role
+
+ Swift
+public var role: Role { get }
+
+
+
+
+ name
+
+ Swift
+public var name: String? { get }
+
+
+
+
+ toolCallId
+
+ Swift
+public var toolCallId: String? { get }
+
+
+
+
+ toolCalls
+
+ Swift
+public var toolCalls: [`Self`.ChatCompletionAssistantMessageParam.ChatCompletionMessageToolCallParam]? { get }
+
+
+
+
+ init(role:content:name:toolCalls:toolCallId:)
+
+ Swift
+public init?(
+ role: Role,
+ content: String? = nil,
+ name: String? = nil,
+ toolCalls: [Self.ChatCompletionAssistantMessageParam.ChatCompletionMessageToolCallParam]? = nil,
+ toolCallId: String? = nil
+)
+
+
+
+
+ init(role:content:name:)
+
+ Swift
+public init?(
+ role: Role,
+ content: [ChatCompletionUserMessageParam.Content.VisionContent],
+ name: String? = nil
+)
+
+
+
+
+ encode(to:)
+
+ Swift
+public func encode(to encoder: Encoder) throws
+
+
+
+
+ ChatCompletionSystemMessageParam
+
+ Swift
+public struct ChatCompletionSystemMessageParam : Codable, Equatable
+
+
+
+
+ ChatCompletionUserMessageParam
+
+ Swift
+public struct ChatCompletionUserMessageParam : Codable, Equatable
+
+
+
+
+ ChatCompletionAssistantMessageParam
+
+ Swift
+public struct ChatCompletionAssistantMessageParam : Codable, Equatable
+
+
+
+
+ ChatCompletionToolMessageParam
+
+ Swift
+public struct ChatCompletionToolMessageParam : Codable, Equatable
+
+
+
+
+ Role
+
+ Swift
+public enum Role : String, Codable, Equatable, CaseIterable
+
+
+
+
+ init(from:)
+
+ Swift
+public init(from decoder: Decoder) throws
+
+ Docs (100% documented)
+public struct ChatCompletionAssistantMessageParam : Codable, Equatable
+
+
+
+
+ Role
+
+ Swift
+public typealias Role = ChatQuery.ChatCompletionMessageParam.Role
+
+
+
+
+ role
+
+ / The role of the messages author, in this case assistant.
+ +Swift
+public let role: `Self`.Role
+
+
+
+
+ content
+
+ The contents of the assistant message. Required unless tool_calls is specified.
+ +Swift
+public let content: String?
+
+
+
+
+ name
+
+ The name of the author of this message. name
is required if role is function
, and it should be the name of the function whose response is in the content
. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.
Swift
+public let name: String?
+
+
+
+
+ toolCalls
+
+ The tool calls generated by the model, such as function calls.
+ +Swift
+public let toolCalls: [`Self`.ChatCompletionMessageToolCallParam]?
+
+
+
+
+ init(content:name:toolCalls:)
+
+ Swift
+public init(
+ content: String? = nil,
+ name: String? = nil,
+ toolCalls: [Self.ChatCompletionMessageToolCallParam]? = nil
+)
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ ChatCompletionMessageToolCallParam
+
+ Swift
+public struct ChatCompletionMessageToolCallParam : Codable, Equatable
+
+ Docs (100% documented)
+public struct ChatCompletionMessageToolCallParam : Codable, Equatable
+
+
+
+
+ ToolsType
+
+ Swift
+public typealias ToolsType = ChatQuery.ChatCompletionToolParam.ToolsType
+
+
+
+
+ id
+
+ The ID of the tool call.
+ +Swift
+public let id: String
+
+
+
+
+ function
+
+ The function that the model called.
+ +Swift
+public let function: `Self`.FunctionCall
+
+
+
+
+ type
+
+
+
+
+ init(id:function:)
+
+ Swift
+public init(
+ id: String,
+ function: Self.FunctionCall
+)
+
+
+
+
+ FunctionCall
+
+ Swift
+public struct FunctionCall : Codable, Equatable
+
+ Docs (100% documented)
+public struct FunctionCall : Codable, Equatable
+
+
+
+
+ arguments
+
+ The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.
+ +Swift
+public let arguments: String
+
+
+
+
+ name
+
+ The name of the function to call.
+ +Swift
+public let name: String
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ name
+
+ Swift
+case name
+
+
+
+
+ role
+
+ Swift
+case role
+
+
+
+
+ content
+
+ Swift
+case content
+
+
+
+
+ toolCalls
+
+ Swift
+case toolCalls = "tool_calls"
+
+ Docs (100% documented)
+public struct ChatCompletionSystemMessageParam : Codable, Equatable
+
+
+
+
+ Role
+
+ Swift
+public typealias Role = ChatQuery.ChatCompletionMessageParam.Role
+
+
+
+
+ content
+
+ The contents of the system message.
+ +Swift
+public let content: String
+
+
+
+
+ role
+
+ The role of the messages author, in this case system.
+ +Swift
+public let role: `Self`.Role
+
+
+
+
+ name
+
+ An optional name for the participant. Provides the model information to differentiate between participants of the same role.
+ +Swift
+public let name: String?
+
+
+
+
+ init(content:name:)
+
+ Swift
+public init(
+ content: String,
+ name: String? = nil
+)
+
+ Docs (100% documented)
+public struct ChatCompletionToolMessageParam : Codable, Equatable
+
+
+
+
+ Role
+
+ Swift
+public typealias Role = ChatQuery.ChatCompletionMessageParam.Role
+
+
+
+
+ content
+
+ The contents of the tool message.
+ +Swift
+public let content: String
+
+
+
+
+ role
+
+ The role of the messages author, in this case tool.
+ +Swift
+public let role: `Self`.Role
+
+
+
+
+ toolCallId
+
+ Tool call that this message is responding to.
+ +Swift
+public let toolCallId: String
+
+
+
+
+ init(content:toolCallId:)
+
+ Swift
+public init(
+ content: String,
+ toolCallId: String
+)
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ content
+
+ Swift
+case content
+
+
+
+
+ role
+
+ Swift
+case role
+
+
+
+
+ toolCallId
+
+ Swift
+case toolCallId = "tool_call_id"
+
+ Docs (100% documented)
+public struct ChatCompletionUserMessageParam : Codable, Equatable
+
+
+
+
+ Role
+
+ Swift
+public typealias Role = ChatQuery.ChatCompletionMessageParam.Role
+
+
+
+
+ content
+
+ The contents of the user message.
+ +Swift
+public let content: Content
+
+
+
+
+ role
+
+ The role of the messages author, in this case user.
+ +Swift
+public let role: `Self`.Role
+
+
+
+
+ name
+
+ An optional name for the participant. Provides the model information to differentiate between participants of the same role.
+ +Swift
+public let name: String?
+
+
+
+
+ init(content:name:)
+
+ Swift
+public init(
+ content: Content,
+ name: String? = nil
+)
+
+
+
+
+ Content
+
+ Swift
+public enum Content : Codable, Equatable
+
+ Docs (100% documented)
+public enum Content : Codable, Equatable
+
+
+
+
+ string(_:)
+
+ Swift
+case string(String)
+
+
+
+
+ vision(_:)
+
+ Swift
+case vision([VisionContent])
+
+
+
+
+ string
+
+ Swift
+public var string: String? { get }
+
+
+
+
+ init(string:)
+
+ Swift
+public init(string: String)
+
+
+
+
+ init(vision:)
+
+ Swift
+public init(vision: [VisionContent])
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : CodingKey
+
+
+
+
+ encode(to:)
+
+ Swift
+public func encode(to encoder: Encoder) throws
+
+
+
+
+ VisionContent
+
+ Swift
+public enum VisionContent : Codable, Equatable
+
+
+
+
+ init(from:)
+
+ Swift
+public init(from decoder: Decoder) throws
+
+ Docs (100% documented)
+public enum CodingKeys : CodingKey
+
+ Docs (100% documented)
+public enum VisionContent : Codable, Equatable
+
+
+
+
+ chatCompletionContentPartTextParam(_:)
+
+ Swift
+case chatCompletionContentPartTextParam(ChatCompletionContentPartTextParam)
+
+
+
+
+ chatCompletionContentPartImageParam(_:)
+
+ Swift
+case chatCompletionContentPartImageParam(ChatCompletionContentPartImageParam)
+
+
+
+
+ text
+
+ Swift
+public var text: String? { get }
+
+
+
+
+ imageUrl
+
+ Swift
+public var imageUrl: `Self`.ChatCompletionContentPartImageParam.ImageURL? { get }
+
+
+
+
+ init(chatCompletionContentPartTextParam:)
+
+ Swift
+public init(chatCompletionContentPartTextParam: ChatCompletionContentPartTextParam)
+
+
+
+
+ init(chatCompletionContentPartImageParam:)
+
+ Swift
+public init(chatCompletionContentPartImageParam: ChatCompletionContentPartImageParam)
+
+
+
+
+ encode(to:)
+
+ Swift
+public func encode(to encoder: Encoder) throws
+
+
+
+
+ ChatCompletionContentPartTextParam
+
+ Swift
+public struct ChatCompletionContentPartTextParam : Codable, Equatable
+
+
+
+
+ ChatCompletionContentPartImageParam
+
+ Swift
+public struct ChatCompletionContentPartImageParam : Codable, Equatable
+
+ Docs (100% documented)
+public struct ChatCompletionContentPartImageParam : Codable, Equatable
+
+
+
+
+ imageUrl
+
+ Swift
+public let imageUrl: ImageURL
+
+
+
+
+ type
+
+ The type of the content part.
+ +Swift
+public let type: String
+
+
+
+
+ init(imageUrl:)
+
+ Swift
+public init(imageUrl: ImageURL)
+
+
+
+
+ ImageURL
+
+ Swift
+public struct ImageURL : Codable, Equatable
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public struct ImageURL : Codable, Equatable
+
+
+
+
+ url
+
+ Either a URL of the image or the base64 encoded image data.
+ +Swift
+public let url: String
+
+
+
+
+ detail
+
+ Specifies the detail level of the image. Learn more in the +Vision guide https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding
+ +Swift
+public let detail: Detail
+
+
+
+
+ init(url:detail:)
+
+ Swift
+public init(url: String, detail: Detail)
+
+
+
+
+ init(url:detail:)
+
+ Swift
+public init(url: Data, detail: Detail)
+
+
+
+
+ Detail
+
+ Swift
+public enum Detail : String, Codable, Equatable, CaseIterable
+
+ Docs (100% documented)
+public enum Detail : String, Codable, Equatable, CaseIterable
+
+ Docs (100% documented)
+public struct ChatCompletionContentPartTextParam : Codable, Equatable
+
+
+
+
+ text
+
+ The text content.
+ +Swift
+public let text: String
+
+
+
+
+ type
+
+ The type of the content part.
+ +Swift
+public let type: String
+
+
+
+
+ init(text:)
+
+ Swift
+public init(text: String)
+
+ Docs (100% documented)
+public enum Role : String, Codable, Equatable, CaseIterable
+
+
+
+
+ system
+
+ Swift
+case system
+
+
+
+
+ user
+
+ Swift
+case user
+
+
+
+
+ assistant
+
+ Swift
+case assistant
+
+
+
+
+ tool
+
+ Swift
+case tool
+
+ Docs (100% documented)
+public struct ChatCompletionToolParam : Codable, Equatable
+
+
+
+
+ function
+
+ Swift
+public let function: `Self`.FunctionDefinition
+
+
+
+
+ type
+
+ Swift
+public let type: `Self`.ToolsType
+
+
+
+
+ init(function:)
+
+ Swift
+public init(
+ function: Self.FunctionDefinition
+)
+
+
+
+
+ FunctionDefinition
+
+ Swift
+public struct FunctionDefinition : Codable, Equatable
+
+
+
+
+ ToolsType
+
+ Swift
+public enum ToolsType : String, Codable, Equatable
+
+ Docs (100% documented)
+public struct FunctionDefinition : Codable, Equatable
+
+
+
+
+ name
+
+ The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
+ +Swift
+public let name: String
+
+
+
+
+ description
+
+ The description of what the function does.
+ +Swift
+public let description: String?
+
+
+
+
+ parameters
+
+ The parameters the functions accepts, described as a JSON Schema object. +https://platform.openai.com/docs/guides/text-generation/function-calling +https://json-schema.org/understanding-json-schema/ +**Python library defines only [String: Object] dictionary.
+ +Swift
+public let parameters: `Self`.FunctionParameters?
+
+
+
+
+ init(name:description:parameters:)
+
+ Swift
+public init(
+ name: String,
+ description: String? = nil,
+ parameters: Self.FunctionParameters? = nil
+)
+
+
+
+
+ FunctionParameters
+
+ See the guide for examples, and the JSON Schema reference for documentation about the format.
+ + See more +Swift
+public struct FunctionParameters : Codable, Equatable
+
+ Docs (100% documented)
+public struct FunctionParameters : Codable, Equatable
+
+ See the guide for examples, and the JSON Schema reference for documentation about the format.
+ +
+
+
+ type
+
+ Swift
+public let type: `Self`.JSONType
+
+
+
+
+ properties
+
+ Swift
+public let properties: [String : Property]?
+
+
+
+
+ required
+
+ Swift
+public let required: [String]?
+
+
+
+
+ pattern
+
+ Swift
+public let pattern: String?
+
+
+
+
+ const
+
+ Swift
+public let const: String?
+
+
+
+
+ enum
+
+ Swift
+public let `enum`: [String]?
+
+
+
+
+ multipleOf
+
+ Swift
+public let multipleOf: Int?
+
+
+
+
+ minimum
+
+ Swift
+public let minimum: Int?
+
+
+
+
+ maximum
+
+ Swift
+public let maximum: Int?
+
+
+
+
+ Property
+
+ Swift
+public struct Property : Codable, Equatable
+
+
+
+
+ JSONType
+
+ Docs (100% documented)
+public enum JSONType : String, Codable
+
+
+
+
+ integer
+
+ Swift
+case integer
+
+
+
+
+ string
+
+ Swift
+case string
+
+
+
+
+ boolean
+
+ Swift
+case boolean
+
+
+
+
+ array
+
+ Swift
+case array
+
+
+
+
+ object
+
+ Swift
+case object
+
+
+
+
+ number
+
+ Swift
+case number
+
+
+
+
+ null
+
+ Swift
+case null
+
+ Docs (100% documented)
+public struct Property : Codable, Equatable
+
+
+
+
+ JSONType
+
+ Swift
+public typealias JSONType = ChatQuery.ChatCompletionToolParam.FunctionDefinition.FunctionParameters.JSONType
+
+
+
+
+ type
+
+ Swift
+public let type: `Self`.JSONType
+
+
+
+
+ description
+
+ Swift
+public let description: String?
+
+
+
+
+ format
+
+ Swift
+public let format: String?
+
+
+
+
+ items
+
+ Swift
+public let items: `Self`.Items?
+
+
+
+
+ required
+
+ Swift
+public let required: [String]?
+
+
+
+
+ pattern
+
+ Swift
+public let pattern: String?
+
+
+
+
+ const
+
+ Swift
+public let const: String?
+
+
+
+
+ enum
+
+ Swift
+public let `enum`: [String]?
+
+
+
+
+ multipleOf
+
+ Swift
+public let multipleOf: Int?
+
+
+
+
+ minimum
+
+ Swift
+public let minimum: Double?
+
+
+
+
+ maximum
+
+ Swift
+public let maximum: Double?
+
+
+
+
+ minItems
+
+ Swift
+public let minItems: Int?
+
+
+
+
+ maxItems
+
+ Swift
+public let maxItems: Int?
+
+
+
+
+ uniqueItems
+
+ Swift
+public let uniqueItems: Bool?
+
+
+
+
+ init(type:description:format:items:required:pattern:const:enum:multipleOf:minimum:maximum:minItems:maxItems:uniqueItems:)
+
+ Swift
+public init(
+ type: Self.JSONType,
+ description: String? = nil,
+ format: String? = nil,
+ items: Self.Items? = nil,
+ required: [String]? = nil,
+ pattern: String? = nil,
+ const: String? = nil,
+ enum: [String]? = nil,
+ multipleOf: Int? = nil,
+ minimum: Double? = nil,
+ maximum: Double? = nil,
+ minItems: Int? = nil,
+ maxItems: Int? = nil,
+ uniqueItems: Bool? = nil
+)
+
+
+
+
+ Items
+
+ Swift
+public struct Items : Codable, Equatable
+
+ Docs (100% documented)
+public struct Items : Codable, Equatable
+
+
+
+
+ JSONType
+
+ Swift
+public typealias JSONType = ChatQuery.ChatCompletionToolParam.FunctionDefinition.FunctionParameters.JSONType
+
+
+
+
+ type
+
+ Swift
+public let type: `Self`.JSONType
+
+
+
+
+ properties
+
+ Swift
+public let properties: [String : Property]?
+
+
+
+
+ pattern
+
+ Swift
+public let pattern: String?
+
+
+
+
+ const
+
+ Swift
+public let const: String?
+
+
+
+
+ enum
+
+ Swift
+public let `enum`: [String]?
+
+
+
+
+ multipleOf
+
+ Swift
+public let multipleOf: Int?
+
+
+
+
+ minimum
+
+ Swift
+public let minimum: Double?
+
+
+
+
+ maximum
+
+ Swift
+public let maximum: Double?
+
+
+
+
+ minItems
+
+ Swift
+public let minItems: Int?
+
+
+
+
+ maxItems
+
+ Swift
+public let maxItems: Int?
+
+
+
+
+ uniqueItems
+
+ Swift
+public let uniqueItems: Bool?
+
+
+
+
+ init(type:properties:pattern:const:enum:multipleOf:minimum:maximum:minItems:maxItems:uniqueItems:)
+
+ Swift
+public init(
+ type: Self.JSONType,
+ properties: [String : Property]? = nil,
+ pattern: String? = nil,
+ const: String? = nil,
+ `enum`: [String]? = nil,
+ multipleOf: Int? = nil,
+ minimum: Double? = nil,
+ maximum: Double? = nil,
+ minItems: Int? = nil,
+ maxItems: Int? = nil,
+ uniqueItems: Bool? = nil
+)
+
+ Docs (100% documented)
+public enum ToolsType : String, Codable, Equatable
+
+
+
+
+ function
+
+ Swift
+case function
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ messages
+
+ Swift
+case messages
+
+
+
+
+ model
+
+ Swift
+case model
+
+
+
+
+ frequencyPenalty
+
+ Swift
+case frequencyPenalty = "frequency_penalty"
+
+
+
+
+ logitBias
+
+ Swift
+case logitBias = "logit_bias"
+
+
+
+
+ logprobs
+
+ Swift
+case logprobs
+
+
+
+
+ maxTokens
+
+ Swift
+case maxTokens = "max_tokens"
+
+
+
+
+ n
+
+ Swift
+case n
+
+
+
+
+ presencePenalty
+
+ Swift
+case presencePenalty = "presence_penalty"
+
+
+
+
+ responseFormat
+
+ Swift
+case responseFormat = "response_format"
+
+
+
+
+ seed
+
+ Swift
+case seed
+
+
+
+
+ stop
+
+ Swift
+case stop
+
+
+
+
+ temperature
+
+ Swift
+case temperature
+
+
+
+
+ toolChoice
+
+ Swift
+case toolChoice = "tool_choice"
+
+
+
+
+ tools
+
+ Swift
+case tools
+
+
+
+
+ topLogprobs
+
+ Swift
+case topLogprobs = "top_logprobs"
+
+
+
+
+ topP
+
+ Swift
+case topP = "top_p"
+
+
+
+
+ user
+
+ Swift
+case user
+
+
+
+
+ stream
+
+ Swift
+case stream
+
+ Docs (100% documented)
+public enum ResponseFormat : String, Codable, Equatable
+
+
+
+
+ jsonObject
+
+ Swift
+case jsonObject = "json_object"
+
+
+
+
+ text
+
+ Swift
+case text
+
+
+
+
+ encode(to:)
+
+ Swift
+public func encode(to encoder: Encoder) throws
+
+ Docs (100% documented)
+public enum Stop : Codable, Equatable
+
+
+
+
+ string(_:)
+
+ Swift
+case string(String)
+
+
+
+
+ stringList(_:)
+
+ Swift
+case stringList([String])
+
+
+
+
+ encode(to:)
+
+ Swift
+public func encode(to encoder: Encoder) throws
+
+
+
+
+ init(string:)
+
+ Swift
+public init(string: String)
+
+
+
+
+ init(stringList:)
+
+ Swift
+public init(stringList: [String])
+
+ Docs (100% documented)
+public struct ChatResult : Codable, Equatable
+
+ https://platform.openai.com/docs/api-reference/chat/object +Example Completion object print
+{
+ "id": "chatcmpl-123456",
+ "object": "chat.completion",
+ "created": 1728933352,
+ "model": "gpt-4o-2024-08-06",
+ "choices": [
+ {
+ "index": 0,
+ "message": {
+ "role": "assistant",
+ "content": "Hi there! How can I assist you today?",
+ "refusal": null
+ },
+ "logprobs": null,
+ "finish_reason": "stop"
+ }
+ ],
+ "usage": {
+ "prompt_tokens": 19,
+ "completion_tokens": 10,
+ "total_tokens": 29,
+ "prompt_tokens_details": {
+ "cached_tokens": 0
+ },
+ "completion_tokens_details": {
+ "reasoning_tokens": 0
+ }
+ },
+ "system_fingerprint": "fp_6b68a8204b"
+}
+
+
+
+
+
+ Choice
+
+ mimic the choices array in the chat completion object
+ + See more +Swift
+public struct Choice : Codable, Equatable
+
+
+
+
+ CompletionUsage
+
+ Swift
+public struct CompletionUsage : Codable, Equatable
+
+
+
+
+ id
+
+ A unique identifier for the chat completion.
+ +Swift
+public let id: String
+
+
+
+
+ object
+
+ The object type, which is always chat.completion.
+ +Swift
+public let object: String
+
+
+
+
+ created
+
+ The Unix timestamp (in seconds) of when the chat completion was created.
+ +Swift
+public let created: TimeInterval
+
+
+
+
+ model
+
+ The model used for the chat completion.
+ +Swift
+public let model: String
+
+
+
+
+ choices
+
+ A list of chat completion choices. Can be more than one if n is greater than 1.
+ +Swift
+public let choices: [Choice]
+
+
+
+
+ usage
+
+ Usage statistics for the completion request.
+ +Swift
+public let usage: `Self`.CompletionUsage?
+
+
+
+
+ systemFingerprint
+
+ This fingerprint represents the backend configuration that the model runs with. +Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.
+ +Swift
+public let systemFingerprint: String?
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public struct Choice : Codable, Equatable
+
+ mimic the choices array in the chat completion object
+ +
+
+
+ ChatCompletionMessage
+
+ Swift
+public typealias ChatCompletionMessage = ChatQuery.ChatCompletionMessageParam
+
+
+
+
+ index
+
+ The index of the choice in the list of choices.
+ +Swift
+public let index: Int
+
+
+
+
+ logprobs
+
+ Log probability information for the choice.
+ +Swift
+public let logprobs: `Self`.ChoiceLogprobs?
+
+
+
+
+ message
+
+ A chat completion message generated by the model.
+ +Swift
+public let message: `Self`.ChatCompletionMessage
+
+
+
+
+ finishReason
+
+ The reason the model stopped generating tokens. This will be stop if the model hit a natural stop point or a provided stop sequence, length if the maximum number of tokens specified in the request was reached, content_filter if content was omitted due to a flag from our content filters, tool_calls if the model called a tool, or function_call (deprecated) if the model called a function.
+ +Swift
+public let finishReason: String?
+
+
+
+
+ ChoiceLogprobs
+
+ Swift
+public struct ChoiceLogprobs : Codable, Equatable
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ FinishReason
+
+ Swift
+public enum FinishReason : String, Codable, Equatable
+
+ Docs (100% documented)
+public struct ChoiceLogprobs : Codable, Equatable
+
+
+
+
+ content
+
+ Swift
+public let content: [`Self`.ChatCompletionTokenLogprob]?
+
+
+
+
+ ChatCompletionTokenLogprob
+
+ Swift
+public struct ChatCompletionTokenLogprob : Codable, Equatable
+
+ Docs (100% documented)
+public struct ChatCompletionTokenLogprob : Codable, Equatable
+
+
+
+
+ token
+
+ The token.
+ +Swift
+public let token: String
+
+
+
+
+ bytes
+
+ A list of integers representing the UTF-8 bytes representation of the token.
+Useful in instances where characters are represented by multiple tokens and
+their byte representations must be combined to generate the correct text
+representation. Can be null
if there is no bytes representation for the token.
Swift
+public let bytes: [Int]?
+
+
+
+
+ logprob
+
+ The log probability of this token.
+ +Swift
+public let logprob: Double
+
+
+
+
+ topLogprobs
+
+ List of the most likely tokens and their log probability, at this token position.
+In rare cases, there may be fewer than the number of requested top_logprobs
returned.
Swift
+public let topLogprobs: [TopLogprob]
+
+
+
+
+ TopLogprob
+
+ Swift
+public struct TopLogprob : Codable, Equatable
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ token
+
+ Swift
+case token
+
+
+
+
+ bytes
+
+ Swift
+case bytes
+
+
+
+
+ logprob
+
+ Swift
+case logprob
+
+
+
+
+ topLogprobs
+
+ Swift
+case topLogprobs = "top_logprobs"
+
+ Docs (100% documented)
+public struct TopLogprob : Codable, Equatable
+
+
+
+
+ token
+
+ The token.
+ +Swift
+public let token: String
+
+
+
+
+ bytes
+
+ A list of integers representing the UTF-8 bytes representation of the token.
+Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null
if there is no bytes representation for the token.
Swift
+public let bytes: [Int]?
+
+
+
+
+ logprob
+
+ The log probability of this token.
+ +Swift
+public let logprob: Double
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ index
+
+ Swift
+case index
+
+
+
+
+ logprobs
+
+ Swift
+case logprobs
+
+
+
+
+ message
+
+ Swift
+case message
+
+
+
+
+ finishReason
+
+ Swift
+case finishReason = "finish_reason"
+
+ Docs (100% documented)
+public enum FinishReason : String, Codable, Equatable
+
+
+
+
+ stop
+
+ Swift
+case stop
+
+
+
+
+ length
+
+ Swift
+case length
+
+
+
+
+ toolCalls
+
+ Swift
+case toolCalls = "tool_calls"
+
+
+
+
+ contentFilter
+
+ Swift
+case contentFilter = "content_filter"
+
+
+
+
+ functionCall
+
+ Swift
+case functionCall = "function_call"
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ id
+
+ Swift
+case id
+
+
+
+
+ object
+
+ Swift
+case object
+
+
+
+
+ created
+
+ Swift
+case created
+
+
+
+
+ model
+
+ Swift
+case model
+
+
+
+
+ choices
+
+ Swift
+case choices
+
+
+
+
+ usage
+
+ Swift
+case usage
+
+
+
+
+ systemFingerprint
+
+ Swift
+case systemFingerprint = "system_fingerprint"
+
+ Docs (100% documented)
+public struct CompletionUsage : Codable, Equatable
+
+
+
+
+ completionTokens
+
+ Number of tokens in the generated completion.
+ +Swift
+public let completionTokens: Int
+
+
+
+
+ promptTokens
+
+ Number of tokens in the prompt.
+ +Swift
+public let promptTokens: Int
+
+
+
+
+ totalTokens
+
+ Total number of tokens used in the request (prompt + completion).
+ +Swift
+public let totalTokens: Int
+
+ Docs (100% documented)
+public struct ChatStreamResult : Codable, Equatable
+
+
+
+
+ Choice
+
+ Swift
+public struct Choice : Codable, Equatable
+
+
+
+
+ id
+
+ A unique identifier for the chat completion. Each chunk has the same ID.
+ +Swift
+public let id: String
+
+
+
+
+ object
+
+ The object type, which is always chat.completion.chunk
.
Swift
+public let object: String
+
+
+
+
+ created
+
+ The Unix timestamp (in seconds) of when the chat completion was created. +Each chunk has the same timestamp.
+ +Swift
+public let created: TimeInterval
+
+
+
+
+ model
+
+ The model to generate the completion.
+ +Swift
+public let model: String
+
+
+
+
+ choices
+
+ A list of chat completion choices.
+Can be more than one if n
is greater than 1.
Swift
+public let choices: [Choice]
+
+
+
+
+ systemFingerprint
+
+ This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the seed
request parameter to understand when backend changes have been made that might impact determinism.
Swift
+public let systemFingerprint: String?
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public struct Choice : Codable, Equatable
+
+
+
+
+ FinishReason
+
+ Swift
+public typealias FinishReason = ChatResult.Choice.FinishReason
+
+
+
+
+ ChoiceDelta
+
+ Swift
+public struct ChoiceDelta : Codable, Equatable
+
+
+
+
+ index
+
+ The index of the choice in the list of choices.
+ +Swift
+public let index: Int
+
+
+
+
+ delta
+
+ A chat completion delta generated by streamed model responses.
+ +Swift
+public let delta: `Self`.ChoiceDelta
+
+
+
+
+ finishReason
+
+ The reason the model stopped generating tokens.
+This will be stop
if the model hit a natural stop point or a provided stop sequence, length
if the maximum number of tokens specified in the request was reached, content_filter
if content was omitted due to a flag from our content filters, tool_calls
if the model called a tool, or function_call
(deprecated) if the model called a function.
Swift
+public let finishReason: FinishReason?
+
+
+
+
+ logprobs
+
+ Log probability information for the choice.
+ +Swift
+public let logprobs: `Self`.ChoiceLogprobs?
+
+
+
+
+ ChoiceLogprobs
+
+ Swift
+public struct ChoiceLogprobs : Codable, Equatable
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public struct ChoiceDelta : Codable, Equatable
+
+
+
+
+ Role
+
+ Swift
+public typealias Role = ChatQuery.ChatCompletionMessageParam.Role
+
+
+
+
+ content
+
+ The contents of the chunk message.
+ +Swift
+public let content: String?
+
+
+
+
+ role
+
+ The role of the author of this message.
+ +Swift
+public let role: `Self`.Role?
+
+
+
+
+ toolCalls
+
+ Swift
+public let toolCalls: [`Self`.ChoiceDeltaToolCall]?
+
+
+
+
+ ChoiceDeltaToolCall
+
+ Swift
+public struct ChoiceDeltaToolCall : Codable, Equatable
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public struct ChoiceDeltaToolCall : Codable, Equatable
+
+
+
+
+ index
+
+ Swift
+public let index: Int
+
+
+
+
+ id
+
+ The ID of the tool call.
+ +Swift
+public let id: String?
+
+
+
+
+ function
+
+ The function that the model called.
+ +Swift
+public let function: `Self`.ChoiceDeltaToolCallFunction?
+
+
+
+
+ type
+
+ The type of the tool. Currently, only function is supported.
+ +Swift
+public let type: String?
+
+
+
+
+ init(index:id:function:)
+
+ Swift
+public init(
+ index: Int,
+ id: String? = nil,
+ function: Self.ChoiceDeltaToolCallFunction? = nil
+)
+
+
+
+
+ ChoiceDeltaToolCallFunction
+
+ Swift
+public struct ChoiceDeltaToolCallFunction : Codable, Equatable
+
+ Docs (100% documented)
+public struct ChoiceDeltaToolCallFunction : Codable, Equatable
+
+
+
+
+ arguments
+
+ The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.
+ +Swift
+public let arguments: String?
+
+
+
+
+ name
+
+ The name of the function to call.
+ +Swift
+public let name: String?
+
+
+
+
+ init(arguments:name:)
+
+ Swift
+public init(
+ arguments: String? = nil,
+ name: String? = nil
+)
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public struct ChoiceLogprobs : Codable, Equatable
+
+
+
+
+ content
+
+ A list of message content tokens with log probability information.
+ +Swift
+public let content: [`Self`.ChatCompletionTokenLogprob]?
+
+
+
+
+ ChatCompletionTokenLogprob
+
+ Swift
+public struct ChatCompletionTokenLogprob : Codable, Equatable
+
+ Docs (100% documented)
+public struct ChatCompletionTokenLogprob : Codable, Equatable
+
+
+
+
+ token
+
+ The token.
+ +Swift
+public let token: String
+
+
+
+
+ bytes
+
+ A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token.
+ +Swift
+public let bytes: [Int]?
+
+
+
+
+ logprob
+
+ The log probability of this token.
+ +Swift
+public let logprob: Double
+
+
+
+
+ topLogprobs
+
+ List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested top_logprobs returned.
+ +Swift
+public let topLogprobs: [`Self`.TopLogprob]?
+
+
+
+
+ TopLogprob
+
+ Swift
+public struct TopLogprob : Codable, Equatable
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ token
+
+ Swift
+case token
+
+
+
+
+ bytes
+
+ Swift
+case bytes
+
+
+
+
+ logprob
+
+ Swift
+case logprob
+
+
+
+
+ topLogprobs
+
+ Swift
+case topLogprobs = "top_logprobs"
+
+ Docs (100% documented)
+public struct TopLogprob : Codable, Equatable
+
+
+
+
+ token
+
+ The token.
+ +Swift
+public let token: String
+
+
+
+
+ bytes
+
+ A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token.
+ +Swift
+public let bytes: [Int]?
+
+
+
+
+ logprob
+
+ The log probability of this token.
+ +Swift
+public let logprob: Double
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ index
+
+ Swift
+case index
+
+
+
+
+ delta
+
+ Swift
+case delta
+
+
+
+
+ finishReason
+
+ Swift
+case finishReason = "finish_reason"
+
+
+
+
+ logprobs
+
+ Swift
+case logprobs
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ id
+
+ Swift
+case id
+
+
+
+
+ object
+
+ Swift
+case object
+
+
+
+
+ created
+
+ Swift
+case created
+
+
+
+
+ model
+
+ Swift
+case model
+
+
+
+
+ choices
+
+ Swift
+case choices
+
+
+
+
+ systemFingerprint
+
+ Swift
+case systemFingerprint = "system_fingerprint"
+
+ Docs (100% documented)
+public struct CompletionsQuery : Codable, Streamable
+
+
+
+
+ model
+
+ ID of the model to use.
+ +Swift
+public let model: Model
+
+
+
+
+ prompt
+
+ The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
+ +Swift
+public let prompt: String
+
+
+
+
+ temperature
+
+ What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
+ +Swift
+public let temperature: Double?
+
+
+
+
+ maxTokens
+
+ The maximum number of tokens to generate in the completion.
+ +Swift
+public let maxTokens: Int?
+
+
+
+
+ topP
+
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ +Swift
+public let topP: Double?
+
+
+
+
+ frequencyPenalty
+
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model’s likelihood to repeat the same line verbatim.
+ +Swift
+public let frequencyPenalty: Double?
+
+
+
+
+ presencePenalty
+
+ Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model’s likelihood to talk about new topics.
+ +Swift
+public let presencePenalty: Double?
+
+
+
+
+ stop
+
+ Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
+ +Swift
+public let stop: [String]?
+
+
+
+
+ user
+
+ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
+ +Swift
+public let user: String?
+
+
+
+
+ init(model:prompt:temperature:maxTokens:topP:frequencyPenalty:presencePenalty:stop:user:)
+
+ Swift
+public init(model: Model, prompt: String, temperature: Double? = nil, maxTokens: Int? = nil, topP: Double? = nil, frequencyPenalty: Double? = nil, presencePenalty: Double? = nil, stop: [String]? = nil, user: String? = nil)
+
+ Docs (100% documented)
+public struct CompletionsResult : Codable, Equatable
+
+
+
+
+ Usage
+
+ Swift
+public struct Usage : Codable, Equatable
+
+
+
+
+ Choice
+
+ Swift
+public struct Choice : Codable, Equatable
+
+
+
+
+ id
+
+ Swift
+public let id: String
+
+
+
+
+ object
+
+ Swift
+public let object: String
+
+
+
+
+ created
+
+ Swift
+public let created: TimeInterval
+
+
+
+
+ model
+
+ Swift
+public let model: Model
+
+
+
+
+ choices
+
+ Swift
+public let choices: [Choice]
+
+
+
+
+ usage
+
+ Swift
+public let usage: Usage?
+
+ Docs (100% documented)
+public struct Choice : Codable, Equatable
+
+
+
+
+ text
+
+ Swift
+public let text: String
+
+
+
+
+ index
+
+ Swift
+public let index: Int
+
+
+
+
+ finishReason
+
+ Swift
+public let finishReason: String?
+
+ Docs (100% documented)
+public struct Usage : Codable, Equatable
+
+
+
+
+ promptTokens
+
+ Swift
+public let promptTokens: Int
+
+
+
+
+ completionTokens
+
+ Swift
+public let completionTokens: Int
+
+
+
+
+ totalTokens
+
+ Swift
+public let totalTokens: Int
+
+ Docs (100% documented)
+public struct EditsQuery : Codable
+
+
+
+
+ model
+
+ ID of the model to use.
+ +Swift
+public let model: Model
+
+
+
+
+ input
+
+ Input text to get embeddings for.
+ +Swift
+public let input: String?
+
+
+
+
+ instruction
+
+ The instruction that tells the model how to edit the prompt.
+ +Swift
+public let instruction: String
+
+
+
+
+ n
+
+ The number of images to generate. Must be between 1 and 10.
+ +Swift
+public let n: Int?
+
+
+
+
+ temperature
+
+ What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
+ +Swift
+public let temperature: Double?
+
+
+
+
+ topP
+
+ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ +Swift
+public let topP: Double?
+
+ Swift
+public init(model: Model, input: String?, instruction: String, n: Int? = nil, temperature: Double? = nil, topP: Double? = nil)
+
+ Docs (100% documented)
+public struct EditsResult : Codable, Equatable
+
+
+
+
+ Choice
+
+ Swift
+public struct Choice : Codable, Equatable
+
+
+
+
+ Usage
+
+ Swift
+public struct Usage : Codable, Equatable
+
+
+
+
+ object
+
+ Swift
+public let object: String
+
+
+
+
+ created
+
+ Swift
+public let created: TimeInterval
+
+
+
+
+ choices
+
+ Swift
+public let choices: [Choice]
+
+
+
+
+ usage
+
+ Swift
+public let usage: Usage
+
+ Docs (100% documented)
+public struct Choice : Codable, Equatable
+
+ Docs (100% documented)
+public struct Usage : Codable, Equatable
+
+
+
+
+ promptTokens
+
+ Swift
+public let promptTokens: Int
+
+
+
+
+ completionTokens
+
+ Swift
+public let completionTokens: Int
+
+
+
+
+ totalTokens
+
+ Swift
+public let totalTokens: Int
+
+ Docs (100% documented)
+public struct EmbeddingsQuery : Codable
+
+
+
+
+ input
+
+ Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for text-embedding-ada-002), cannot be an empty string, and any array must be 2048 dimensions or less.
+ +Swift
+public let input: `Self`.Input
+
+
+
+
+ model
+
+ ID of the model to use. You can use the List models API to see all of your available models, or see our Model overview for descriptions of them. +https://platform.openai.com/docs/api-reference/models/list +https://platform.openai.com/docs/models/overview
+ +Swift
+public let model: Model
+
+
+
+
+ encodingFormat
+
+ The format to return the embeddings in. Can be either float or base64. +https://pypi.org/project/pybase64/
+ +Swift
+public let encodingFormat: `Self`.EncodingFormat?
+
+
+
+
+ user
+
+ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. +https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
+ +Swift
+public let user: String?
+
+
+
+
+ init(input:model:encodingFormat:user:)
+
+ Swift
+public init(
+ input: Self.Input,
+ model: Model,
+ encodingFormat: Self.EncodingFormat? = nil,
+ user: String? = nil
+)
+
+
+
+
+ Input
+
+ Swift
+public enum Input : Codable, Equatable
+
+
+
+
+ EncodingFormat
+
+ Swift
+public enum EncodingFormat : String, Codable
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ input
+
+ Swift
+case input
+
+
+
+
+ model
+
+ Swift
+case model
+
+
+
+
+ encodingFormat
+
+ Swift
+case encodingFormat = "encoding_format"
+
+
+
+
+ user
+
+ Swift
+case user
+
+ Docs (100% documented)
+public enum EncodingFormat : String, Codable
+
+ Docs (100% documented)
+public enum Input : Codable, Equatable
+
+
+
+
+ string(_:)
+
+ Swift
+case string(String)
+
+
+
+
+ stringList(_:)
+
+ Swift
+case stringList([String])
+
+
+
+
+ intList(_:)
+
+ Swift
+case intList([Int])
+
+
+
+
+ intMatrix(_:)
+
+ Swift
+case intMatrix([[Int]])
+
+
+
+
+ encode(to:)
+
+ Swift
+public func encode(to encoder: Encoder) throws
+
+
+
+
+ init(string:)
+
+ Swift
+public init(string: String)
+
+
+
+
+ init(stringList:)
+
+ Swift
+public init(stringList: [String])
+
+
+
+
+ init(intList:)
+
+ Swift
+public init(intList: [Int])
+
+
+
+
+ init(intMatrix:)
+
+ Swift
+public init(intMatrix: [[Int]])
+
+ Docs (100% documented)
+public struct EmbeddingsResult : Codable, Equatable
+
+
+
+
+ Embedding
+
+ Swift
+public struct Embedding : Codable, Equatable
+
+
+
+
+ Usage
+
+ Swift
+public struct Usage : Codable, Equatable
+
+
+
+
+ data
+
+ Swift
+public let data: [Embedding]
+
+
+
+
+ model
+
+ Swift
+public let model: String
+
+
+
+
+ usage
+
+ Swift
+public let usage: Usage
+
+
+
+
+ object
+
+ The object type, which is always “list”.
+ +Swift
+public let object: String
+
+ Docs (100% documented)
+public struct Embedding : Codable, Equatable
+
+
+
+
+ object
+
+ The object type, which is always “embedding”.
+ +Swift
+public let object: String
+
+
+
+
+ embedding
+
+ The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the embedding guide. +https://platform.openai.com/docs/guides/embeddings
+ +Swift
+public let embedding: [Double]
+
+
+
+
+ index
+
+ The index of the embedding in the list of embeddings.
+ +Swift
+public let index: Int
+
+ Docs (100% documented)
+public struct Usage : Codable, Equatable
+
+
+
+
+ promptTokens
+
+ Swift
+public let promptTokens: Int
+
+
+
+
+ totalTokens
+
+ Swift
+public let totalTokens: Int
+
+ Docs (100% documented)
+public struct ImageEditsQuery : Codable
+
+
+
+
+ ResponseFormat
+
+ Swift
+public typealias ResponseFormat = ImagesQuery.ResponseFormat
+
+
+
+
+ Size
+
+ Swift
+public typealias Size = ImagesQuery.Size
+
+
+
+
+ image
+
+ The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
+ +Swift
+public let image: Data
+
+
+
+
+ mask
+
+ An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where image should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as image.
+ +Swift
+public let mask: Data?
+
+
+
+
+ prompt
+
+ A text description of the desired image(s). The maximum length is 1000 characters.
+ +Swift
+public let prompt: String
+
+
+
+
+ model
+
+ The model to use for image generation. +Defaults to dall-e-2
+ +Swift
+public let model: Model?
+
+
+
+
+ n
+
+ The number of images to generate. Must be between 1 and 10.
+ +Swift
+public let n: Int?
+
+
+
+
+ responseFormat
+
+ The format in which the generated images are returned. Must be one of url or b64_json. +Defaults to url
+ +Swift
+public let responseFormat: `Self`.ResponseFormat?
+
+
+
+
+ size
+
+ The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.
+ +Swift
+public let size: Size?
+
+
+
+
+ user
+
+ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. +https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
+ +Swift
+public let user: String?
+
+ Swift
+public init(
+ image: Data,
+ prompt: String,
+ mask: Data? = nil,
+ model: Model? = nil,
+ n: Int? = nil,
+ responseFormat: Self.ResponseFormat? = nil,
+ size: Self.Size? = nil,
+ user: String? = nil
+)
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ image
+
+ Swift
+case image
+
+
+
+
+ mask
+
+ Swift
+case mask
+
+
+
+
+ prompt
+
+ Swift
+case prompt
+
+
+
+
+ model
+
+ Swift
+case model
+
+
+
+
+ n
+
+ Swift
+case n
+
+
+
+
+ responseFormat
+
+ Swift
+case responseFormat = "response_format"
+
+
+
+
+ size
+
+ Swift
+case size
+
+
+
+
+ user
+
+ Swift
+case user
+
+ Docs (100% documented)
+public struct ImageVariationsQuery : Codable
+
+
+
+
+ ResponseFormat
+
+ Swift
+public typealias ResponseFormat = ImagesQuery.ResponseFormat
+
+
+
+
+ image
+
+ The image to edit. Must be a valid PNG file, less than 4MB, and square.
+ +Swift
+public let image: Data
+
+
+
+
+ model
+
+ The model to use for image generation. Only dall-e-2 is supported at this time. +Defaults to dall-e-2
+ +Swift
+public let model: Model?
+
+
+
+
+ n
+
+ The number of images to generate. Must be between 1 and 10. +Defaults to 1
+ +Swift
+public let n: Int?
+
+
+
+
+ responseFormat
+
+ The format in which the generated images are returned. Must be one of url or b64_json. +Defaults to url
+ +Swift
+public let responseFormat: `Self`.ResponseFormat?
+
+
+
+
+ size
+
+ The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024. +Defaults to 1024x1024
+ +Swift
+public let size: String?
+
+
+
+
+ user
+
+ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. +https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
+ +Swift
+public let user: String?
+
+
+
+
+ init(image:model:n:responseFormat:size:user:)
+
+ Swift
+public init(
+ image: Data,
+ model: Model? = nil,
+ n: Int? = nil,
+ responseFormat: Self.ResponseFormat? = nil,
+ size: String? = nil,
+ user: String? = nil
+)
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ image
+
+ Swift
+case image
+
+
+
+
+ model
+
+ Swift
+case model
+
+
+
+
+ n
+
+ Swift
+case n
+
+
+
+
+ responseFormat
+
+ Swift
+case responseFormat = "response_format"
+
+
+
+
+ size
+
+ Swift
+case size
+
+
+
+
+ user
+
+ Swift
+case user
+
+ Docs (100% documented)
+public struct ImagesQuery : Codable
+
+ Given a prompt and/or an input image, the model will generate a new image. +https://platform.openai.com/docs/guides/images
+ +
+
+
+ ResponseFormat
+
+ Swift
+public enum ResponseFormat : String, Codable, Equatable
+
+
+
+
+ prompt
+
+ A text description of the desired image(s). The maximum length is 1000 characters for dall-e-2 and 4000 characters for dall-e-3.
+ +Swift
+public let prompt: String
+
+
+
+
+ model
+
+ The model to use for image generation. +Defaults to dall-e-2
+ +Swift
+public let model: Model?
+
+
+
+
+ responseFormat
+
+ The format in which the generated images are returned. Must be one of url or b64_json. +Defaults to url
+ +Swift
+public let responseFormat: `Self`.ResponseFormat?
+
+
+
+
+ n
+
+ The number of images to generate. Must be between 1 and 10. For dall-e-3, only n=1 is supported. +Defaults to 1
+ +Swift
+public let n: Int?
+
+
+
+
+ size
+
+ The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024 for dall-e-2. Must be one of 1024x1024, 1792x1024, or 1024x1792 for dall-e-3 models. +Defaults to 1024x1024
+ +Swift
+public let size: `Self`.Size?
+
+
+
+
+ user
+
+ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. +https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
+ +Swift
+public let user: String?
+
+
+
+
+ style
+
+ The style of the generated images. Must be one of vivid or natural. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for dall-e-3. +Defaults to vivid
+ +Swift
+public let style: `Self`.Style?
+
+
+
+
+ quality
+
+ The quality of the image that will be generated. hd creates images with finer details and greater consistency across the image. This param is only supported for dall-e-3. +Defaults to standard
+ +Swift
+public let quality: `Self`.Quality?
+
+ Swift
+public init(
+ prompt: String,
+ model: Model? = nil,
+ n: Int? = nil,
+ quality:Self.Quality? = nil,
+ responseFormat: Self.ResponseFormat? = nil,
+ size: Size? = nil,
+ style: Self.Style? = nil,
+ user: String? = nil
+)
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ Style
+
+ Swift
+public enum Style : String, Codable, CaseIterable
+
+
+
+
+ Quality
+
+ Swift
+public enum Quality : String, Codable, CaseIterable
+
+
+
+
+ Size
+
+ Swift
+public enum Size : String, Codable, CaseIterable
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ model
+
+ Swift
+case model
+
+
+
+
+ prompt
+
+ Swift
+case prompt
+
+
+
+
+ n
+
+ Swift
+case n
+
+
+
+
+ size
+
+ Swift
+case size
+
+
+
+
+ user
+
+ Swift
+case user
+
+
+
+
+ style
+
+ Swift
+case style
+
+
+
+
+ responseFormat
+
+ Swift
+case responseFormat = "response_format"
+
+
+
+
+ quality
+
+ Swift
+case quality
+
+ Docs (100% documented)
+public enum Quality : String, Codable, CaseIterable
+
+ Docs (100% documented)
+public enum ResponseFormat : String, Codable, Equatable
+
+ Docs (100% documented)
+public enum Size : String, Codable, CaseIterable
+
+
+
+
+ _256
+
+ Swift
+case _256 = "256x256"
+
+
+
+
+ _512
+
+ Swift
+case _512 = "512x512"
+
+
+
+
+ _1024
+
+ Swift
+case _1024 = "1024x1024"
+
+
+
+
+ _1792_1024
+
+ Swift
+case _1792_1024 = "1792x1024"
+
+
+
+
+ _1024_1792
+
+ Swift
+case _1024_1792 = "1024x1792"
+
+ Docs (100% documented)
+public enum Style : String, Codable, CaseIterable
+
+ Docs (100% documented)
+public struct ImagesResult : Codable, Equatable
+
+ Returns a list of image objects.
+ +
+
+
+ created
+
+ Swift
+public let created: TimeInterval
+
+
+
+
+ data
+
+ Swift
+public let data: [`Self`.Image]
+
+
+
+
+ Image
+
+ Represents the url or the content of an image generated by the OpenAI API.
+ + See more +Swift
+public struct Image : Codable, Equatable
+
+ Docs (100% documented)
+public struct Image : Codable, Equatable
+
+ Represents the url or the content of an image generated by the OpenAI API.
+ +
+
+
+ b64Json
+
+ The base64-encoded JSON of the generated image, if response_format is b64_json
+ +Swift
+public let b64Json: String?
+
+
+
+
+ revisedPrompt
+
+ The prompt that was used to generate the image, if there was any revision to the prompt.
+ +Swift
+public let revisedPrompt: String?
+
+
+
+
+ url
+
+ The URL of the generated image, if response_format is url (default).
+ +Swift
+public let url: String?
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ b64Json
+
+ Swift
+case b64Json = "b64_json"
+
+
+
+
+ revisedPrompt
+
+ Swift
+case revisedPrompt = "revised_prompt"
+
+
+
+
+ url
+
+ Swift
+case url
+
+ Docs (100% documented)
+public struct ModelQuery : Codable, Equatable
+
+ Retrieves a model instance, providing basic information about the model such as the owner and permissioning.
+ +
+
+
+ model
+
+ The ID of the model to use for this request.
+ +Swift
+public let model: Model
+
+
+
+
+ init(model:)
+
+ Swift
+public init(model: Model)
+
+ Docs (100% documented)
+public struct ModelResult : Codable, Equatable
+
+ The model object matching the specified ID.
+ +
+
+
+ id
+
+ The model identifier, which can be referenced in the API endpoints.
+ +Swift
+public let id: String
+
+
+
+
+ created
+
+ The Unix timestamp (in seconds) when the model was created.
+ +Swift
+public let created: TimeInterval
+
+
+
+
+ object
+
+ The object type, which is always “model”.
+ +Swift
+public let object: String
+
+
+
+
+ ownedBy
+
+ The organization that owns the model.
+ +Swift
+public let ownedBy: String
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey
+
+
+
+
+ id
+
+ Swift
+case id
+
+
+
+
+ created
+
+ Swift
+case created
+
+
+
+
+ object
+
+ Swift
+case object
+
+
+
+
+ ownedBy
+
+ Swift
+case ownedBy = "owned_by"
+
+ Docs (100% documented)
+public struct ModelsResult : Codable, Equatable
+
+ A list of model objects.
+ +
+
+
+ data
+
+ A list of model objects.
+ +Swift
+public let data: [ModelResult]
+
+
+
+
+ object
+
+ The object type, which is always list
Swift
+public let object: String
+
+ Docs (100% documented)
+public struct ModerationsQuery : Codable
+
+
+
+
+ input
+
+ The input text to classify.
+ +Swift
+public let input: String
+
+
+
+
+ model
+
+ ID of the model to use.
+ +Swift
+public let model: Model?
+
+
+
+
+ init(input:model:)
+
+ Swift
+public init(input: String, model: Model? = nil)
+
+ Docs (100% documented)
+public struct ModerationsResult : Codable, Equatable
+extension ModerationsResult: Identifiable
+
+
+
+
+ Moderation
+
+ Swift
+public struct Moderation : Codable, Equatable
+
+
+
+
+ id
+
+ Swift
+public let id: String
+
+
+
+
+ model
+
+ Swift
+public let model: Model
+
+
+
+
+ results
+
+ Swift
+public let results: [`Self`.Moderation]
+
+ Docs (100% documented)
+public struct Moderation : Codable, Equatable
+
+
+
+
+ Categories
+
+ Swift
+public struct Categories : Codable, Equatable, Sequence
+
+
+
+
+ CategoryScores
+
+ Swift
+public struct CategoryScores : Codable, Equatable, Sequence
+
+
+
+
+ categories
+
+ Collection of per-category binary usage policies violation flags. For each category, the value is true if the model flags the corresponding category as violated, false otherwise.
+ +Swift
+public let categories: Categories
+
+
+
+
+ categoryScores
+
+ Collection of per-category raw scores output by the model, denoting the model’s confidence that the input violates the OpenAI’s policy for the category. The value is between 0 and 1, where higher values denote higher confidence. The scores should not be interpreted as probabilities.
+ +Swift
+public let categoryScores: CategoryScores
+
+
+
+
+ flagged
+
+ True if the model classifies the content as violating OpenAI’s usage policies, false otherwise.
+ +Swift
+public let flagged: Bool
+
+ Docs (100% documented)
+public struct Categories : Codable, Equatable, Sequence
+
+
+
+
+ harassment
+
+ Content that expresses, incites, or promotes harassing language towards any target.
+ +Swift
+public let harassment: Bool
+
+
+
+
+ harassmentThreatening
+
+ Harassment content that also includes violence or serious harm towards any target.
+ +Swift
+public let harassmentThreatening: Bool
+
+
+
+
+ hate
+
+ Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste.
+ +Swift
+public let hate: Bool
+
+
+
+
+ hateThreatening
+
+ Hateful content that also includes violence or serious harm towards the targeted group.
+ +Swift
+public let hateThreatening: Bool
+
+
+
+
+ selfHarm
+
+ Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders.
+ +Swift
+public let selfHarm: Bool
+
+
+
+
+ selfHarmIntent
+
+ Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders.
+ +Swift
+public let selfHarmIntent: Bool
+
+
+
+
+ selfHarmInstructions
+
+ Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts.
+ +Swift
+public let selfHarmInstructions: Bool
+
+
+
+
+ sexual
+
+ Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness).
+ +Swift
+public let sexual: Bool
+
+
+
+
+ sexualMinors
+
+ Sexual content that includes an individual who is under 18 years old.
+ +Swift
+public let sexualMinors: Bool
+
+
+
+
+ violence
+
+ Content that promotes or glorifies violence or celebrates the suffering or humiliation of others.
+ +Swift
+public let violence: Bool
+
+
+
+
+ violenceGraphic
+
+ Violent content that depicts death, violence, or serious physical injury in extreme graphic detail.
+ +Swift
+public let violenceGraphic: Bool
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey, CaseIterable
+
+
+
+
+ makeIterator()
+
+ Swift
+public func makeIterator() -> IndexingIterator<[(String, Bool)]>
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey, CaseIterable
+
+
+
+
+ harassment
+
+ Swift
+case harassment
+
+
+
+
+ harassmentThreatening
+
+ Swift
+case harassmentThreatening = "harassment/threatening"
+
+
+
+
+ hate
+
+ Swift
+case hate
+
+
+
+
+ hateThreatening
+
+ Swift
+case hateThreatening = "hate/threatening"
+
+
+
+
+ selfHarm
+
+ Swift
+case selfHarm = "self-harm"
+
+
+
+
+ selfHarmIntent
+
+ Swift
+case selfHarmIntent = "self-harm/intent"
+
+
+
+
+ selfHarmInstructions
+
+ Swift
+case selfHarmInstructions = "self-harm/instructions"
+
+
+
+
+ sexual
+
+ Swift
+case sexual
+
+
+
+
+ sexualMinors
+
+ Swift
+case sexualMinors = "sexual/minors"
+
+
+
+
+ violence
+
+ Swift
+case violence
+
+
+
+
+ violenceGraphic
+
+ Swift
+case violenceGraphic = "violence/graphic"
+
+ Docs (100% documented)
+public struct CategoryScores : Codable, Equatable, Sequence
+
+
+
+
+ harassment
+
+ Content that expresses, incites, or promotes harassing language towards any target.
+ +Swift
+public let harassment: Double
+
+
+
+
+ harassmentThreatening
+
+ Harassment content that also includes violence or serious harm towards any target.
+ +Swift
+public let harassmentThreatening: Double
+
+
+
+
+ hate
+
+ Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste.
+ +Swift
+public let hate: Double
+
+
+
+
+ hateThreatening
+
+ Hateful content that also includes violence or serious harm towards the targeted group.
+ +Swift
+public let hateThreatening: Double
+
+
+
+
+ selfHarm
+
+ Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders.
+ +Swift
+public let selfHarm: Double
+
+
+
+
+ selfHarmIntent
+
+ Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders.
+ +Swift
+public let selfHarmIntent: Double
+
+
+
+
+ selfHarmInstructions
+
+ Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts.
+ +Swift
+public let selfHarmInstructions: Double
+
+
+
+
+ sexual
+
+ Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness).
+ +Swift
+public let sexual: Double
+
+
+
+
+ sexualMinors
+
+ Sexual content that includes an individual who is under 18 years old.
+ +Swift
+public let sexualMinors: Double
+
+
+
+
+ violence
+
+ Content that promotes or glorifies violence or celebrates the suffering or humiliation of others.
+ +Swift
+public let violence: Double
+
+
+
+
+ violenceGraphic
+
+ Violent content that depicts death, violence, or serious physical injury in extreme graphic detail.
+ +Swift
+public let violenceGraphic: Double
+
+
+
+
+ CodingKeys
+
+ Swift
+public enum CodingKeys : String, CodingKey, CaseIterable
+
+
+
+
+ makeIterator()
+
+ Swift
+public func makeIterator() -> IndexingIterator<[(String, Bool)]>
+
+ Docs (100% documented)
+public enum CodingKeys : String, CodingKey, CaseIterable
+
+
+
+
+ harassment
+
+ Swift
+case harassment
+
+
+
+
+ harassmentThreatening
+
+ Swift
+case harassmentThreatening = "harassment/threatening"
+
+
+
+
+ hate
+
+ Swift
+case hate
+
+
+
+
+ hateThreatening
+
+ Swift
+case hateThreatening = "hate/threatening"
+
+
+
+
+ selfHarm
+
+ Swift
+case selfHarm = "self-harm"
+
+
+
+
+ selfHarmIntent
+
+ Swift
+case selfHarmIntent = "self-harm/intent"
+
+
+
+
+ selfHarmInstructions
+
+ Swift
+case selfHarmInstructions = "self-harm/instructions"
+
+
+
+
+ sexual
+
+ Swift
+case sexual
+
+
+
+
+ sexualMinors
+
+ Swift
+case sexualMinors = "sexual/minors"
+
+
+
+
+ violence
+
+ Swift
+case violence
+
+
+
+
+ violenceGraphic
+
+ Swift
+case violenceGraphic = "violence/graphic"
+
+ Docs (100% documented)
+public struct Vector
+
+
+
+
+ cosineSimilarity(a:b:)
+
+ Returns the similarity between two vectors
+ +Swift
+public static func cosineSimilarity(a: [Double], b: [Double]) -> Double
+
+
+
+ a
+
+ |
+
+
+
+ The first vector + |
+
+
+ b
+
+ |
+
+
+
+ The second vector + |
+
+
+
+ cosineDifference(a:b:)
+
+ Returns the difference between two vectors. Cosine distance is defined as 1 - cosineSimilarity(a, b)
Swift
+public func cosineDifference(a: [Double], b: [Double]) -> Double
+
+
+
+ a
+
+ |
+
+
+
+ The first vector + |
+
+
+ b
+
+ |
+
+
+
+ The second vector + |
+
Docs (100% documented)
+The following type aliases are available globally.
+ +
+
+
+ Model
+
+ Defines all available OpenAI models supported by the library.
+ +Swift
+public typealias Model = String
+
+ Docs (100% documented)
+This repository contains Swift community-maintained implementation over OpenAI public API.
+ +OpenAI is a non-profit artificial intelligence research organization founded in San Francisco, California in 2015. It was created with the purpose of advancing digital intelligence in ways that benefit humanity as a whole and promote societal progress. The organization strives to develop AI (Artificial Intelligence) programs and systems that can think, act and adapt quickly on their own – autonomously. OpenAI’s mission is to ensure safe and responsible use of AI for civic good, economic growth and other public benefits; this includes cutting-edge research into important topics such as general AI safety, natural language processing, applied reinforcement learning methods, machine vision algorithms etc.
+ +++The OpenAI API can be applied to virtually any task that involves understanding or generating natural language or code. We offer a spectrum of models with different levels of power suitable for different tasks, as well as the ability to fine-tune your own custom models. These models can be used for everything from content generation to semantic search and classification.
+
OpenAI is available with Swift Package Manager. +The Swift Package Manager is a tool for automating the distribution of Swift code and is integrated into the swift compiler. +Once you have your Swift package set up, adding OpenAI as a dependency is as easy as adding it to the dependencies value of your Package.swift.
+dependencies: [
+ .package(url: "https://github.com/MacPaw/OpenAI.git", branch: "main")
+]
+
+To initialize API instance you need to obtain API token from your Open AI organization.
+ +Remember that your API key is a secret! Do not share it with others or expose it in any client-side code (browsers, apps). Production requests must be routed through your own backend server where your API key can be securely loaded from an environment variable or key management service.
+ + + +Once you have a token, you can initialize OpenAI
class, which is an entry point to the API.
++⚠️ OpenAI strongly recommends developers of client-side applications proxy requests through a separate backend service to keep their API key safe. API keys can access and manipulate customer billing, usage, and organizational data, so it’s a significant risk to expose them.
+
let openAI = OpenAI(apiToken: "YOUR_TOKEN_HERE")
+
+
+Optionally you can initialize OpenAI
with token, organization identifier and timeoutInterval.
let configuration = OpenAI.Configuration(token: "YOUR_TOKEN_HERE", organizationIdentifier: "YOUR_ORGANIZATION_ID_HERE", timeoutInterval: 60.0)
+let openAI = OpenAI(configuration: configuration)
+
+
+Once token you posses the token, and the instance is initialized you are ready to make requests.
+Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position.
+ +Request
+struct CompletionsQuery: Codable {
+ /// ID of the model to use.
+ public let model: Model
+ /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
+ public let prompt: String
+ /// What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
+ public let temperature: Double?
+ /// The maximum number of tokens to generate in the completion.
+ public let maxTokens: Int?
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ public let topP: Double?
+ /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
+ public let frequencyPenalty: Double?
+ /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
+ public let presencePenalty: Double?
+ /// Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
+ public let stop: [String]?
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
+ public let user: String?
+}
+
+
+Response
+struct CompletionsResult: Codable, Equatable {
+ public struct Choice: Codable, Equatable {
+ public let text: String
+ public let index: Int
+ }
+
+ public let id: String
+ public let object: String
+ public let created: TimeInterval
+ public let model: Model
+ public let choices: [Choice]
+ public let usage: Usage
+}
+
+
+Example
+let query = CompletionsQuery(model: .textDavinci_003, prompt: "What is 42?", temperature: 0, maxTokens: 100, topP: 1, frequencyPenalty: 0, presencePenalty: 0, stop: ["\\n"])
+openAI.completions(query: query) { result in
+ //Handle result here
+}
+//or
+let result = try await openAI.completions(query: query)
+
+(lldb) po result
+▿ CompletionsResult
+ - id : "cmpl-6P9be2p2fQlwB7zTOl0NxCOetGmX3"
+ - object : "text_completion"
+ - created : 1671453146.0
+ - model : OpenAI.Model.textDavinci_003
+ ▿ choices : 1 element
+ ▿ 0 : Choice
+ - text : "\n\n42 is the answer to the ultimate question of life, the universe, and everything, according to the book The Hitchhiker\'s Guide to the Galaxy."
+ - index : 0
+
+Completions streaming is available by using completionsStream
function. Tokens will be sent one-by-one.
Closures
+openAI.completionsStream(query: query) { partialResult in
+ switch partialResult {
+ case .success(let result):
+ print(result.choices)
+ case .failure(let error):
+ //Handle chunk error here
+ }
+} completion: { error in
+ //Handle streaming error here
+}
+
+
+Combine
+openAI
+ .completionsStream(query: query)
+ .sink { completion in
+ //Handle completion result here
+ } receiveValue: { result in
+ //Handle chunk here
+ }.store(in: &cancellables)
+
+
+Structured concurrency
+for try await result in openAI.completionsStream(query: query) {
+ //Handle result here
+}
+
+
+Review Completions Documentation for more info.
+Using the OpenAI Chat API, you can build your own applications with gpt-3.5-turbo
to do things like:
Request
+ struct ChatQuery: Codable {
+ /// ID of the model to use. Currently, only gpt-3.5-turbo and gpt-3.5-turbo-0301 are supported.
+ public let model: Model
+ /// The messages to generate chat completions for
+ public let messages: [Chat]
+ /// A list of functions the model may generate JSON inputs for.
+ public let functions: [ChatFunctionDeclaration]?
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and We generally recommend altering this or top_p but not both.
+ public let temperature: Double?
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ public let topP: Double?
+ /// How many chat completion choices to generate for each input message.
+ public let n: Int?
+ /// Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
+ public let stop: [String]?
+ /// The maximum number of tokens to generate in the completion.
+ public let maxTokens: Int?
+ /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
+ public let presencePenalty: Double?
+ /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
+ public let frequencyPenalty: Double?
+ ///Modify the likelihood of specified tokens appearing in the completion.
+ public let logitBias: [String:Int]?
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
+ public let user: String?
+}
+
+
+Response
+struct ChatResult: Codable, Equatable {
+ public struct Choice: Codable, Equatable {
+ public let index: Int
+ public let message: Chat
+ public let finishReason: String
+ }
+
+ public struct Usage: Codable, Equatable {
+ public let promptTokens: Int
+ public let completionTokens: Int
+ public let totalTokens: Int
+ }
+
+ public let id: String
+ public let object: String
+ public let created: TimeInterval
+ public let model: Model
+ public let choices: [Choice]
+ public let usage: Usage
+}
+
+
+Example
+let query = ChatQuery(model: .gpt3_5Turbo, messages: [.init(role: .user, content: "who are you")])
+let result = try await openAI.chats(query: query)
+
+(lldb) po result
+▿ ChatResult
+ - id : "chatcmpl-6pwjgxGV2iPP4QGdyOLXnTY0LE3F8"
+ - object : "chat.completion"
+ - created : 1677838528.0
+ - model : "gpt-3.5-turbo-0301"
+ ▿ choices : 1 element
+ ▿ 0 : Choice
+ - index : 0
+ ▿ message : Chat
+ - role : "assistant"
+ - content : "\n\nI\'m an AI language model developed by OpenAI, created to provide assistance and support for various tasks such as answering questions, generating text, and providing recommendations. Nice to meet you!"
+ - finish_reason : "stop"
+ ▿ usage : Usage
+ - prompt_tokens : 10
+ - completion_tokens : 39
+ - total_tokens : 49
+
+Chats streaming is available by using chatStream
function. Tokens will be sent one-by-one.
Closures
+openAI.chatsStream(query: query) { partialResult in
+ switch partialResult {
+ case .success(let result):
+ print(result.choices)
+ case .failure(let error):
+ //Handle chunk error here
+ }
+} completion: { error in
+ //Handle streaming error here
+}
+
+
+Combine
+openAI
+ .chatsStream(query: query)
+ .sink { completion in
+ //Handle completion result here
+ } receiveValue: { result in
+ //Handle chunk here
+ }.store(in: &cancellables)
+
+
+Structured concurrency
+for try await result in openAI.chatsStream(query: query) {
+ //Handle result here
+}
+
+
+Function calls
+let openAI = OpenAI(apiToken: "...")
+// Declare functions which GPT-3 might decide to call.
+let functions = [
+ ChatFunctionDeclaration(
+ name: "get_current_weather",
+ description: "Get the current weather in a given location",
+ parameters:
+ JSONSchema(
+ type: .object,
+ properties: [
+ "location": .init(type: .string, description: "The city and state, e.g. San Francisco, CA"),
+ "unit": .init(type: .string, enumValues: ["celsius", "fahrenheit"])
+ ],
+ required: ["location"]
+ )
+ )
+]
+let query = ChatQuery(
+ model: "gpt-3.5-turbo-0613", // 0613 is the earliest version with function calls support.
+ messages: [
+ Chat(role: .user, content: "What's the weather like in Boston?")
+ ],
+ functions: functions
+)
+let result = try await openAI.chats(query: query)
+
+
+Result will be (serialized as JSON here for readability):
+{
+ "id": "chatcmpl-1234",
+ "object": "chat.completion",
+ "created": 1686000000,
+ "model": "gpt-3.5-turbo-0613",
+ "choices": [
+ {
+ "index": 0,
+ "message": {
+ "role": "assistant",
+ "function_call": {
+ "name": "get_current_weather",
+ "arguments": "{\n \"location\": \"Boston, MA\"\n}"
+ }
+ },
+ "finish_reason": "function_call"
+ }
+ ],
+ "usage": { "total_tokens": 100, "completion_tokens": 18, "prompt_tokens": 82 }
+}
+
+
+
+Review Chat Documentation for more info.
+Given a prompt and/or an input image, the model will generate a new image.
+ +As Artificial Intelligence continues to develop, so too does the intriguing concept of Dall-E. Developed by OpenAI, a research lab for artificial intelligence purposes, Dall-E has been classified as an AI system that can generate images based on descriptions provided by humans. With its potential applications spanning from animation and illustration to design and engineering - not to mention the endless possibilities in between - it’s easy to see why there is such excitement over this new technology.
+Request
+struct ImagesQuery: Codable {
+ /// A text description of the desired image(s). The maximum length is 1000 characters.
+ public let prompt: String
+ /// The number of images to generate. Must be between 1 and 10.
+ public let n: Int?
+ /// The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.
+ public let size: String?
+}
+
+
+Response
+struct ImagesResult: Codable, Equatable {
+ public struct URLResult: Codable, Equatable {
+ public let url: String
+ }
+ public let created: TimeInterval
+ public let data: [URLResult]
+}
+
+
+Example
+let query = ImagesQuery(prompt: "White cat with heterochromia sitting on the kitchen table", n: 1, size: "1024x1024")
+openAI.images(query: query) { result in
+ //Handle result here
+}
+//or
+let result = try await openAI.images(query: query)
+
+(lldb) po result
+▿ ImagesResult
+ - created : 1671453505.0
+ ▿ data : 1 element
+ ▿ 0 : URLResult
+ - url : "https://oaidalleapiprodscus.blob.core.windows.net/private/org-CWjU5cDIzgCcVjq10pp5yX5Q/user-GoBXgChvLBqLHdBiMJBUbPqF/img-WZVUK2dOD4HKbKwW1NeMJHBd.png?st=2022-12-19T11%3A38%3A25Z&se=2022-12-19T13%3A38%3A25Z&sp=r&sv=2021-08-06&sr=b&rscd=inline&rsct=image/png&skoid=6aaadede-4fb3-4698-a8f6-684d7786b067&sktid=a48cca56-e6da-484e-a814-9c849652bcb3&skt=2022-12-19T09%3A35%3A16Z&ske=2022-12-20T09%3A35%3A16Z&sks=b&skv=2021-08-06&sig=mh52rmtbQ8CXArv5bMaU6lhgZHFBZz/ePr4y%2BJwLKOc%3D"
+
+
+Generated image
+ + +Creates an edited or extended image given an original image and a prompt.
+ +Request
+public struct ImageEditsQuery: Codable {
+ /// The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
+ public let image: Data
+ public let fileName: String
+ /// An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where image should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as image.
+ public let mask: Data?
+ public let maskFileName: String?
+ /// A text description of the desired image(s). The maximum length is 1000 characters.
+ public let prompt: String
+ /// The number of images to generate. Must be between 1 and 10.
+ public let n: Int?
+ /// The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.
+ public let size: String?
+}
+
+
+Response
+ +Uses the ImagesResult response similarly to ImagesQuery.
+ +Example
+let data = image.pngData()
+let query = ImageEditQuery(image: data, fileName: "whitecat.png", prompt: "White cat with heterochromia sitting on the kitchen table with a bowl of food", n: 1, size: "1024x1024")
+openAI.imageEdits(query: query) { result in
+ //Handle result here
+}
+//or
+let result = try await openAI.imageEdits(query: query)
+
+Creates a variation of a given image.
+ +Request
+public struct ImageVariationsQuery: Codable {
+ /// The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
+ public let image: Data
+ public let fileName: String
+ /// The number of images to generate. Must be between 1 and 10.
+ public let n: Int?
+ /// The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.
+ public let size: String?
+}
+
+
+Response
+ +Uses the ImagesResult response similarly to ImagesQuery.
+ +Example
+let data = image.pngData()
+let query = ImageVariationQuery(image: data, fileName: "whitecat.png", n: 1, size: "1024x1024")
+openAI.imageVariations(query: query) { result in
+ //Handle result here
+}
+//or
+let result = try await openAI.imageVariations(query: query)
+
+
+Review Images Documentation for more info.
+The speech to text API provides two endpoints, transcriptions and translations, based on our state-of-the-art open source large-v2 Whisper model. They can be used to:
+ +Transcribe audio into whatever language the audio is in. +Translate and transcribe the audio into english. +File uploads are currently limited to 25 MB and the following input file types are supported: mp3, mp4, mpeg, mpga, m4a, wav, and webm.
+This function sends an AudioSpeechQuery
to the OpenAI API to create audio speech from text using a specific voice and format.
Learn more about voices.
+Learn more about models.
Request:
+public struct AudioSpeechQuery: Codable, Equatable {
+ //...
+ public let model: Model // tts-1 or tts-1-hd
+ public let input: String
+ public let voice: AudioSpeechVoice
+ public let responseFormat: AudioSpeechResponseFormat
+ public let speed: String? // Initializes with Double?
+ //...
+}
+
+
+Response:
+/// Audio data for one of the following formats :`mp3`, `opus`, `aac`, `flac`
+public let audioData: Data?
+
+
+Example:
+let query = AudioSpeechQuery(model: .tts_1, input: "Hello, world!", voice: .alloy, responseFormat: .mp3, speed: 1.0)
+
+openAI.audioCreateSpeech(query: query) { result in
+ // Handle response here
+}
+//or
+let result = try await openAI.audioCreateSpeech(query: query)
+
+
+OpenAI Create Speech – Documentation
+Transcribes audio into the input language.
+ +Request
+public struct AudioTranscriptionQuery: Codable, Equatable {
+
+ public let file: Data
+ public let fileName: String
+ public let model: Model
+
+ public let prompt: String?
+ public let temperature: Double?
+ public let language: String?
+}
+
+
+Response
+public struct AudioTranscriptionResult: Codable, Equatable {
+
+ public let text: String
+}
+
+
+Example
+let data = Data(contentsOfURL:...)
+let query = AudioTranscriptionQuery(file: data, fileName: "audio.m4a", model: .whisper_1)
+
+openAI.audioTranscriptions(query: query) { result in
+ //Handle result here
+}
+//or
+let result = try await openAI.audioTranscriptions(query: query)
+
+Translates audio into into English.
+ +Request
+public struct AudioTranslationQuery: Codable, Equatable {
+
+ public let file: Data
+ public let fileName: String
+ public let model: Model
+
+ public let prompt: String?
+ public let temperature: Double?
+}
+
+
+Response
+public struct AudioTranslationResult: Codable, Equatable {
+
+ public let text: String
+}
+
+
+Example
+let data = Data(contentsOfURL:...)
+let query = AudioTranslationQuery(file: data, fileName: "audio.m4a", model: .whisper_1)
+
+openAI.audioTranslations(query: query) { result in
+ //Handle result here
+}
+//or
+let result = try await openAI.audioTranslations(query: query)
+
+
+Review Audio Documentation for more info.
+Creates a new edit for the provided input, instruction, and parameters.
+ +Request
+struct EditsQuery: Codable {
+ /// ID of the model to use.
+ public let model: Model
+ /// Input text to get embeddings for.
+ public let input: String?
+ /// The instruction that tells the model how to edit the prompt.
+ public let instruction: String
+ /// The number of images to generate. Must be between 1 and 10.
+ public let n: Int?
+ /// What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
+ public let temperature: Double?
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ public let topP: Double?
+}
+
+
+Response
+struct EditsResult: Codable, Equatable {
+
+ public struct Choice: Codable, Equatable {
+ public let text: String
+ public let index: Int
+ }
+
+ public struct Usage: Codable, Equatable {
+ public let promptTokens: Int
+ public let completionTokens: Int
+ public let totalTokens: Int
+
+ enum CodingKeys: String, CodingKey {
+ case promptTokens = "prompt_tokens"
+ case completionTokens = "completion_tokens"
+ case totalTokens = "total_tokens"
+ }
+ }
+
+ public let object: String
+ public let created: TimeInterval
+ public let choices: [Choice]
+ public let usage: Usage
+}
+
+
+Example
+let query = EditsQuery(model: .gpt4, input: "What day of the wek is it?", instruction: "Fix the spelling mistakes")
+openAI.edits(query: query) { result in
+ //Handle response here
+}
+//or
+let result = try await openAI.edits(query: query)
+
+
+Review Edits Documentation for more info.
+Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms.
+ +Request
+struct EmbeddingsQuery: Codable {
+ /// ID of the model to use.
+ public let model: Model
+ /// Input text to get embeddings for
+ public let input: String
+}
+
+
+Response
+struct EmbeddingsResult: Codable, Equatable {
+
+ public struct Embedding: Codable, Equatable {
+
+ public let object: String
+ public let embedding: [Double]
+ public let index: Int
+ }
+ public let data: [Embedding]
+ public let usage: Usage
+}
+
+
+Example
+let query = EmbeddingsQuery(model: .textSearchBabbageDoc, input: "The food was delicious and the waiter...")
+openAI.embeddings(query: query) { result in
+ //Handle response here
+}
+//or
+let result = try await openAI.embeddings(query: query)
+
+(lldb) po result
+▿ EmbeddingsResult
+ ▿ data : 1 element
+ ▿ 0 : Embedding
+ - object : "embedding"
+ ▿ embedding : 2048 elements
+ - 0 : 0.0010535449
+ - 1 : 0.024234328
+ - 2 : -0.0084999
+ - 3 : 0.008647452
+ .......
+ - 2044 : 0.017536353
+ - 2045 : -0.005897616
+ - 2046 : -0.026559394
+ - 2047 : -0.016633155
+ - index : 0
+
+(lldb)
+
+
+Review Embeddings Documentation for more info.
+Models are represented as a typealias typealias Model = String
.
public extension Model {
+ static let gpt4_turbo_preview = "gpt-4-turbo-preview"
+ static let gpt4_vision_preview = "gpt-4-vision-preview"
+ static let gpt4_0125_preview = "gpt-4-0125-preview"
+ static let gpt4_1106_preview = "gpt-4-1106-preview"
+ static let gpt4 = "gpt-4"
+ static let gpt4_0613 = "gpt-4-0613"
+ static let gpt4_0314 = "gpt-4-0314"
+ static let gpt4_32k = "gpt-4-32k"
+ static let gpt4_32k_0613 = "gpt-4-32k-0613"
+ static let gpt4_32k_0314 = "gpt-4-32k-0314"
+
+ static let gpt3_5Turbo = "gpt-3.5-turbo"
+ static let gpt3_5Turbo_0125 = "gpt-3.5-turbo-0125"
+ static let gpt3_5Turbo_1106 = "gpt-3.5-turbo-1106"
+ static let gpt3_5Turbo_0613 = "gpt-3.5-turbo-0613"
+ static let gpt3_5Turbo_0301 = "gpt-3.5-turbo-0301"
+ static let gpt3_5Turbo_16k = "gpt-3.5-turbo-16k"
+ static let gpt3_5Turbo_16k_0613 = "gpt-3.5-turbo-16k-0613"
+
+ static let textDavinci_003 = "text-davinci-003"
+ static let textDavinci_002 = "text-davinci-002"
+ static let textCurie = "text-curie-001"
+ static let textBabbage = "text-babbage-001"
+ static let textAda = "text-ada-001"
+
+ static let textDavinci_001 = "text-davinci-001"
+ static let codeDavinciEdit_001 = "code-davinci-edit-001"
+
+ static let tts_1 = "tts-1"
+ static let tts_1_hd = "tts-1-hd"
+
+ static let whisper_1 = "whisper-1"
+
+ static let dall_e_2 = "dall-e-2"
+ static let dall_e_3 = "dall-e-3"
+
+ static let davinci = "davinci"
+ static let curie = "curie"
+ static let babbage = "babbage"
+ static let ada = "ada"
+
+ static let textEmbeddingAda = "text-embedding-ada-002"
+ static let textSearchAda = "text-search-ada-doc-001"
+ static let textSearchBabbageDoc = "text-search-babbage-doc-001"
+ static let textSearchBabbageQuery001 = "text-search-babbage-query-001"
+ static let textEmbedding3 = "text-embedding-3-small"
+ static let textEmbedding3Large = "text-embedding-3-large"
+
+ static let textModerationStable = "text-moderation-stable"
+ static let textModerationLatest = "text-moderation-latest"
+ static let moderation = "text-moderation-007"
+}
+
+
+GPT-4 models are supported.
+ +As an example: To use the gpt-4-turbo-preview
model, pass .gpt4_turbo_preview
as the parameter to the ChatQuery
init.
let query = ChatQuery(model: .gpt4_turbo_preview, messages: [
+ .init(role: .system, content: "You are Librarian-GPT. You know everything about the books."),
+ .init(role: .user, content: "Who wrote Harry Potter?")
+])
+let result = try await openAI.chats(query: query)
+XCTAssertFalse(result.choices.isEmpty)
+
+
+You can also pass a custom string if you need to use some model, that is not represented above.
+Lists the currently available models.
+ +Response
+public struct ModelsResult: Codable, Equatable {
+
+ public let data: [ModelResult]
+ public let object: String
+}
+
+
+
+Example
+openAI.models() { result in
+ //Handle result here
+}
+//or
+let result = try await openAI.models()
+
+Retrieves a model instance, providing ownership information.
+ +Request
+public struct ModelQuery: Codable, Equatable {
+
+ public let model: Model
+}
+
+
+Response
+public struct ModelResult: Codable, Equatable {
+
+ public let id: Model
+ public let object: String
+ public let ownedBy: String
+}
+
+
+Example
+let query = ModelQuery(model: .gpt4)
+openAI.model(query: query) { result in
+ //Handle result here
+}
+//or
+let result = try await openAI.model(query: query)
+
+
+Review Models Documentation for more info.
+Given a input text, outputs if the model classifies it as violating OpenAI’s content policy.
+ +Request
+public struct ModerationsQuery: Codable {
+
+ public let input: String
+ public let model: Model?
+}
+
+
+Response
+public struct ModerationsResult: Codable, Equatable {
+
+ public let id: String
+ public let model: Model
+ public let results: [CategoryResult]
+}
+
+
+Example
+let query = ModerationsQuery(input: "I want to kill them.")
+openAI.moderations(query: query) { result in
+ //Handle result here
+}
+//or
+let result = try await openAI.moderations(query: query)
+
+
+Review Moderations Documentation for more info.
+The component comes with several handy utility functions to work with the vectors.
+public struct Vector {
+
+ /// Returns the similarity between two vectors
+ ///
+ /// - Parameters:
+ /// - a: The first vector
+ /// - b: The second vector
+ public static func cosineSimilarity(a: [Double], b: [Double]) -> Double {
+ return dot(a, b) / (mag(a) * mag(b))
+ }
+
+ /// Returns the difference between two vectors. Cosine distance is defined as `1 - cosineSimilarity(a, b)`
+ ///
+ /// - Parameters:
+ /// - a: The first vector
+ /// - b: The second vector
+ public func cosineDifference(a: [Double], b: [Double]) -> Double {
+ return 1 - Self.cosineSimilarity(a: a, b: b)
+ }
+}
+
+
+Example
+let vector1 = [0.213123, 0.3214124, 0.421412, 0.3214521251, 0.412412, 0.3214124, 0.1414124, 0.3214521251, 0.213123, 0.3214124, 0.1414124, 0.4214214, 0.213123, 0.3214124, 0.1414124, 0.3214521251, 0.213123, 0.3214124, 0.1414124, 0.3214521251]
+let vector2 = [0.213123, 0.3214124, 0.1414124, 0.3214521251, 0.213123, 0.3214124, 0.1414124, 0.3214521251, 0.213123, 0.511515, 0.1414124, 0.3214521251, 0.213123, 0.3214124, 0.1414124, 0.3214521251, 0.213123, 0.3214124, 0.1414124, 0.3213213]
+let similarity = Vector.cosineSimilarity(a: vector1, b: vector2)
+print(similarity) //0.9510201910206734
+
+
+++ + + +In data analysis, cosine similarity is a measure of similarity between two sequences of numbers.
+
Read more about Cosine Similarity here.
+The library contains built-in Combine extensions.
+func completions(query: CompletionsQuery) -> AnyPublisher<CompletionsResult, Error>
+func images(query: ImagesQuery) -> AnyPublisher<ImagesResult, Error>
+func embeddings(query: EmbeddingsQuery) -> AnyPublisher<EmbeddingsResult, Error>
+func chats(query: ChatQuery) -> AnyPublisher<ChatResult, Error>
+func edits(query: EditsQuery) -> AnyPublisher<EditsResult, Error>
+func model(query: ModelQuery) -> AnyPublisher<ModelResult, Error>
+func models() -> AnyPublisher<ModelsResult, Error>
+func moderations(query: ModerationsQuery) -> AnyPublisher<ModerationsResult, Error>
+func audioTranscriptions(query: AudioTranscriptionQuery) -> AnyPublisher<AudioTranscriptionResult, Error>
+func audioTranslations(query: AudioTranslationQuery) -> AnyPublisher<AudioTranslationResult, Error>
+
+You can find example iOS application in Demo folder.
+ + +Make your Pull Requests clear and obvious to anyone viewing them.
+Set main
as your target branch.
Feat: ...
for new features and new functionality implementations.Bug: ...
for bug fixes.Fix: ...
for minor issues fixing, like typos or inaccuracies in code.Chore: ...
for boring stuff like code polishing, refactoring, deprecation fixing etc.PR naming example: Feat: Add Threads API handling
or Bug: Fix message result duplication
Branch naming example: feat/add-threads-API-handling
or bug/fix-message-result-duplication
…
+ +…
+ +…
+ +…
+ +We’ll appreciate you including tests to your code if it is needed and possible. ❤️
+MIT License
+
+Copyright (c) 2023 MacPaw Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+ Defines all available OpenAI models supported by the library.
"},"Structs/Vector.html#/s:6OpenAI6VectorV16cosineSimilarity1a1bSdSaySdG_AGtFZ":{"name":"cosineSimilarity(a:b:)","abstract":"Returns the similarity between two vectors
","parent_name":"Vector"},"Structs/Vector.html#/s:6OpenAI6VectorV16cosineDifference1a1bSdSaySdG_AGtF":{"name":"cosineDifference(a:b:)","abstract":"Returns the difference between two vectors. Cosine distance is defined as 1 - cosineSimilarity(a, b)
Content that expresses, incites, or promotes harassing language towards any target.
","parent_name":"CategoryScores"},"Structs/ModerationsResult/Moderation/CategoryScores.html#/s:6OpenAI17ModerationsResultV10ModerationV14CategoryScoresV21harassmentThreateningSdvp":{"name":"harassmentThreatening","abstract":"Harassment content that also includes violence or serious harm towards any target.
","parent_name":"CategoryScores"},"Structs/ModerationsResult/Moderation/CategoryScores.html#/s:6OpenAI17ModerationsResultV10ModerationV14CategoryScoresV4hateSdvp":{"name":"hate","abstract":"Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste.
","parent_name":"CategoryScores"},"Structs/ModerationsResult/Moderation/CategoryScores.html#/s:6OpenAI17ModerationsResultV10ModerationV14CategoryScoresV15hateThreateningSdvp":{"name":"hateThreatening","abstract":"Hateful content that also includes violence or serious harm towards the targeted group.
","parent_name":"CategoryScores"},"Structs/ModerationsResult/Moderation/CategoryScores.html#/s:6OpenAI17ModerationsResultV10ModerationV14CategoryScoresV8selfHarmSdvp":{"name":"selfHarm","abstract":"Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders.
","parent_name":"CategoryScores"},"Structs/ModerationsResult/Moderation/CategoryScores.html#/s:6OpenAI17ModerationsResultV10ModerationV14CategoryScoresV14selfHarmIntentSdvp":{"name":"selfHarmIntent","abstract":"Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders.
","parent_name":"CategoryScores"},"Structs/ModerationsResult/Moderation/CategoryScores.html#/s:6OpenAI17ModerationsResultV10ModerationV14CategoryScoresV20selfHarmInstructionsSdvp":{"name":"selfHarmInstructions","abstract":"Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts.
","parent_name":"CategoryScores"},"Structs/ModerationsResult/Moderation/CategoryScores.html#/s:6OpenAI17ModerationsResultV10ModerationV14CategoryScoresV6sexualSdvp":{"name":"sexual","abstract":"Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness).
","parent_name":"CategoryScores"},"Structs/ModerationsResult/Moderation/CategoryScores.html#/s:6OpenAI17ModerationsResultV10ModerationV14CategoryScoresV12sexualMinorsSdvp":{"name":"sexualMinors","abstract":"Sexual content that includes an individual who is under 18 years old.
","parent_name":"CategoryScores"},"Structs/ModerationsResult/Moderation/CategoryScores.html#/s:6OpenAI17ModerationsResultV10ModerationV14CategoryScoresV8violenceSdvp":{"name":"violence","abstract":"Content that promotes or glorifies violence or celebrates the suffering or humiliation of others.
","parent_name":"CategoryScores"},"Structs/ModerationsResult/Moderation/CategoryScores.html#/s:6OpenAI17ModerationsResultV10ModerationV14CategoryScoresV15violenceGraphicSdvp":{"name":"violenceGraphic","abstract":"Violent content that depicts death, violence, or serious physical injury in extreme graphic detail.
","parent_name":"CategoryScores"},"Structs/ModerationsResult/Moderation/CategoryScores/CodingKeys.html":{"name":"CodingKeys","parent_name":"CategoryScores"},"Structs/ModerationsResult/Moderation/CategoryScores.html#/s:ST12makeIterator0B0QzyF":{"name":"makeIterator()","parent_name":"CategoryScores"},"Structs/ModerationsResult/Moderation/Categories/CodingKeys.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV10CodingKeysO10harassmentyA2ImF":{"name":"harassment","parent_name":"CodingKeys"},"Structs/ModerationsResult/Moderation/Categories/CodingKeys.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV10CodingKeysO21harassmentThreateningyA2ImF":{"name":"harassmentThreatening","parent_name":"CodingKeys"},"Structs/ModerationsResult/Moderation/Categories/CodingKeys.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV10CodingKeysO4hateyA2ImF":{"name":"hate","parent_name":"CodingKeys"},"Structs/ModerationsResult/Moderation/Categories/CodingKeys.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV10CodingKeysO15hateThreateningyA2ImF":{"name":"hateThreatening","parent_name":"CodingKeys"},"Structs/ModerationsResult/Moderation/Categories/CodingKeys.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV10CodingKeysO8selfHarmyA2ImF":{"name":"selfHarm","parent_name":"CodingKeys"},"Structs/ModerationsResult/Moderation/Categories/CodingKeys.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV10CodingKeysO14selfHarmIntentyA2ImF":{"name":"selfHarmIntent","parent_name":"CodingKeys"},"Structs/ModerationsResult/Moderation/Categories/CodingKeys.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV10CodingKeysO20selfHarmInstructionsyA2ImF":{"name":"selfHarmInstructions","parent_name":"CodingKeys"},"Structs/ModerationsResult/Moderation/Categories/CodingKeys.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV10CodingKeysO6sexualyA2ImF":{"name":"sexual","parent_name":"CodingKeys"},"Structs/ModerationsResult/Moderation/Categories/CodingKeys.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV10CodingKeysO12sexualMinorsyA2ImF":{"name":"sexualMinors","parent_name":"CodingKeys"},"Structs/ModerationsResult/Moderation/Categories/CodingKeys.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV10CodingKeysO8violenceyA2ImF":{"name":"violence","parent_name":"CodingKeys"},"Structs/ModerationsResult/Moderation/Categories/CodingKeys.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV10CodingKeysO15violenceGraphicyA2ImF":{"name":"violenceGraphic","parent_name":"CodingKeys"},"Structs/ModerationsResult/Moderation/Categories.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV10harassmentSbvp":{"name":"harassment","abstract":"Content that expresses, incites, or promotes harassing language towards any target.
","parent_name":"Categories"},"Structs/ModerationsResult/Moderation/Categories.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV21harassmentThreateningSbvp":{"name":"harassmentThreatening","abstract":"Harassment content that also includes violence or serious harm towards any target.
","parent_name":"Categories"},"Structs/ModerationsResult/Moderation/Categories.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV4hateSbvp":{"name":"hate","abstract":"Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste.
","parent_name":"Categories"},"Structs/ModerationsResult/Moderation/Categories.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV15hateThreateningSbvp":{"name":"hateThreatening","abstract":"Hateful content that also includes violence or serious harm towards the targeted group.
","parent_name":"Categories"},"Structs/ModerationsResult/Moderation/Categories.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV8selfHarmSbvp":{"name":"selfHarm","abstract":"Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders.
","parent_name":"Categories"},"Structs/ModerationsResult/Moderation/Categories.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV14selfHarmIntentSbvp":{"name":"selfHarmIntent","abstract":"Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders.
","parent_name":"Categories"},"Structs/ModerationsResult/Moderation/Categories.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV20selfHarmInstructionsSbvp":{"name":"selfHarmInstructions","abstract":"Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts.
","parent_name":"Categories"},"Structs/ModerationsResult/Moderation/Categories.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV6sexualSbvp":{"name":"sexual","abstract":"Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness).
","parent_name":"Categories"},"Structs/ModerationsResult/Moderation/Categories.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV12sexualMinorsSbvp":{"name":"sexualMinors","abstract":"Sexual content that includes an individual who is under 18 years old.
","parent_name":"Categories"},"Structs/ModerationsResult/Moderation/Categories.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV8violenceSbvp":{"name":"violence","abstract":"Content that promotes or glorifies violence or celebrates the suffering or humiliation of others.
","parent_name":"Categories"},"Structs/ModerationsResult/Moderation/Categories.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV15violenceGraphicSbvp":{"name":"violenceGraphic","abstract":"Violent content that depicts death, violence, or serious physical injury in extreme graphic detail.
","parent_name":"Categories"},"Structs/ModerationsResult/Moderation/Categories/CodingKeys.html":{"name":"CodingKeys","parent_name":"Categories"},"Structs/ModerationsResult/Moderation/Categories.html#/s:ST12makeIterator0B0QzyF":{"name":"makeIterator()","parent_name":"Categories"},"Structs/ModerationsResult/Moderation/Categories.html":{"name":"Categories","parent_name":"Moderation"},"Structs/ModerationsResult/Moderation/CategoryScores.html":{"name":"CategoryScores","parent_name":"Moderation"},"Structs/ModerationsResult/Moderation.html#/s:6OpenAI17ModerationsResultV10ModerationV10categoriesAE10CategoriesVvp":{"name":"categories","abstract":"Collection of per-category binary usage policies violation flags. For each category, the value is true if the model flags the corresponding category as violated, false otherwise.
","parent_name":"Moderation"},"Structs/ModerationsResult/Moderation.html#/s:6OpenAI17ModerationsResultV10ModerationV14categoryScoresAE08CategoryG0Vvp":{"name":"categoryScores","abstract":"Collection of per-category raw scores output by the model, denoting the model’s confidence that the input violates the OpenAI’s policy for the category. The value is between 0 and 1, where higher values denote higher confidence. The scores should not be interpreted as probabilities.
","parent_name":"Moderation"},"Structs/ModerationsResult/Moderation.html#/s:6OpenAI17ModerationsResultV10ModerationV7flaggedSbvp":{"name":"flagged","abstract":"True if the model classifies the content as violating OpenAI’s usage policies, false otherwise.
","parent_name":"Moderation"},"Structs/ModerationsResult/Moderation.html":{"name":"Moderation","parent_name":"ModerationsResult"},"Structs/ModerationsResult.html#/s:s12IdentifiableP2id2IDQzvp":{"name":"id","parent_name":"ModerationsResult"},"Structs/ModerationsResult.html#/s:6OpenAI17ModerationsResultV5modelSSvp":{"name":"model","parent_name":"ModerationsResult"},"Structs/ModerationsResult.html#/s:6OpenAI17ModerationsResultV7resultsSayAC10ModerationVGvp":{"name":"results","parent_name":"ModerationsResult"},"Structs/ModerationsQuery.html#/s:6OpenAI16ModerationsQueryV5inputSSvp":{"name":"input","abstract":"The input text to classify.
","parent_name":"ModerationsQuery"},"Structs/ModerationsQuery.html#/s:6OpenAI16ModerationsQueryV5modelSSSgvp":{"name":"model","abstract":"ID of the model to use.
","parent_name":"ModerationsQuery"},"Structs/ModerationsQuery.html#/s:6OpenAI16ModerationsQueryV5input5modelACSS_SSSgtcfc":{"name":"init(input:model:)","parent_name":"ModerationsQuery"},"Structs/ModelsResult.html#/s:6OpenAI12ModelsResultV4dataSayAA05ModelD0VGvp":{"name":"data","abstract":"A list of model objects.
","parent_name":"ModelsResult"},"Structs/ModelsResult.html#/s:6OpenAI12ModelsResultV6objectSSvp":{"name":"object","abstract":"The object type, which is always list
The model identifier, which can be referenced in the API endpoints.
","parent_name":"ModelResult"},"Structs/ModelResult.html#/s:6OpenAI11ModelResultV7createdSdvp":{"name":"created","abstract":"The Unix timestamp (in seconds) when the model was created.
","parent_name":"ModelResult"},"Structs/ModelResult.html#/s:6OpenAI11ModelResultV6objectSSvp":{"name":"object","abstract":"The object type, which is always “model”.
","parent_name":"ModelResult"},"Structs/ModelResult.html#/s:6OpenAI11ModelResultV7ownedBySSvp":{"name":"ownedBy","abstract":"The organization that owns the model.
","parent_name":"ModelResult"},"Structs/ModelResult/CodingKeys.html":{"name":"CodingKeys","parent_name":"ModelResult"},"Structs/ModelQuery.html#/s:6OpenAI10ModelQueryV5modelSSvp":{"name":"model","abstract":"The ID of the model to use for this request.
","parent_name":"ModelQuery"},"Structs/ModelQuery.html#/s:6OpenAI10ModelQueryV5modelACSS_tcfc":{"name":"init(model:)","parent_name":"ModelQuery"},"Structs/ImagesResult/Image/CodingKeys.html#/s:6OpenAI12ImagesResultV5ImageV10CodingKeysO7b64JsonyA2GmF":{"name":"b64Json","parent_name":"CodingKeys"},"Structs/ImagesResult/Image/CodingKeys.html#/s:6OpenAI12ImagesResultV5ImageV10CodingKeysO13revisedPromptyA2GmF":{"name":"revisedPrompt","parent_name":"CodingKeys"},"Structs/ImagesResult/Image/CodingKeys.html#/s:6OpenAI12ImagesResultV5ImageV10CodingKeysO3urlyA2GmF":{"name":"url","parent_name":"CodingKeys"},"Structs/ImagesResult/Image.html#/s:6OpenAI12ImagesResultV5ImageV7b64JsonSSSgvp":{"name":"b64Json","abstract":"The base64-encoded JSON of the generated image, if response_format is b64_json
","parent_name":"Image"},"Structs/ImagesResult/Image.html#/s:6OpenAI12ImagesResultV5ImageV13revisedPromptSSSgvp":{"name":"revisedPrompt","abstract":"The prompt that was used to generate the image, if there was any revision to the prompt.
","parent_name":"Image"},"Structs/ImagesResult/Image.html#/s:6OpenAI12ImagesResultV5ImageV3urlSSSgvp":{"name":"url","abstract":"The URL of the generated image, if response_format is url (default).
","parent_name":"Image"},"Structs/ImagesResult/Image/CodingKeys.html":{"name":"CodingKeys","parent_name":"Image"},"Structs/ImagesResult.html#/s:6OpenAI12ImagesResultV7createdSdvp":{"name":"created","parent_name":"ImagesResult"},"Structs/ImagesResult.html#/s:6OpenAI12ImagesResultV4dataSayAC5ImageVGvp":{"name":"data","parent_name":"ImagesResult"},"Structs/ImagesResult/Image.html":{"name":"Image","abstract":"Represents the url or the content of an image generated by the OpenAI API.
","parent_name":"ImagesResult"},"Structs/ImagesQuery/Size.html#/s:6OpenAI11ImagesQueryV4SizeO4_256yA2EmF":{"name":"_256","parent_name":"Size"},"Structs/ImagesQuery/Size.html#/s:6OpenAI11ImagesQueryV4SizeO4_512yA2EmF":{"name":"_512","parent_name":"Size"},"Structs/ImagesQuery/Size.html#/s:6OpenAI11ImagesQueryV4SizeO5_1024yA2EmF":{"name":"_1024","parent_name":"Size"},"Structs/ImagesQuery/Size.html#/s:6OpenAI11ImagesQueryV4SizeO10_1792_1024yA2EmF":{"name":"_1792_1024","parent_name":"Size"},"Structs/ImagesQuery/Size.html#/s:6OpenAI11ImagesQueryV4SizeO10_1024_1792yA2EmF":{"name":"_1024_1792","parent_name":"Size"},"Structs/ImagesQuery/Quality.html#/s:6OpenAI11ImagesQueryV7QualityO8standardyA2EmF":{"name":"standard","parent_name":"Quality"},"Structs/ImagesQuery/Quality.html#/s:6OpenAI11ImagesQueryV7QualityO2hdyA2EmF":{"name":"hd","parent_name":"Quality"},"Structs/ImagesQuery/Style.html#/s:6OpenAI11ImagesQueryV5StyleO7naturalyA2EmF":{"name":"natural","parent_name":"Style"},"Structs/ImagesQuery/Style.html#/s:6OpenAI11ImagesQueryV5StyleO5vividyA2EmF":{"name":"vivid","parent_name":"Style"},"Structs/ImagesQuery/CodingKeys.html#/s:6OpenAI11ImagesQueryV10CodingKeysO5modelyA2EmF":{"name":"model","parent_name":"CodingKeys"},"Structs/ImagesQuery/CodingKeys.html#/s:6OpenAI11ImagesQueryV10CodingKeysO6promptyA2EmF":{"name":"prompt","parent_name":"CodingKeys"},"Structs/ImagesQuery/CodingKeys.html#/s:6OpenAI11ImagesQueryV10CodingKeysO1nyA2EmF":{"name":"n","parent_name":"CodingKeys"},"Structs/ImagesQuery/CodingKeys.html#/s:6OpenAI11ImagesQueryV10CodingKeysO4sizeyA2EmF":{"name":"size","parent_name":"CodingKeys"},"Structs/ImagesQuery/CodingKeys.html#/s:6OpenAI11ImagesQueryV10CodingKeysO4useryA2EmF":{"name":"user","parent_name":"CodingKeys"},"Structs/ImagesQuery/CodingKeys.html#/s:6OpenAI11ImagesQueryV10CodingKeysO5styleyA2EmF":{"name":"style","parent_name":"CodingKeys"},"Structs/ImagesQuery/CodingKeys.html#/s:6OpenAI11ImagesQueryV10CodingKeysO14responseFormatyA2EmF":{"name":"responseFormat","parent_name":"CodingKeys"},"Structs/ImagesQuery/CodingKeys.html#/s:6OpenAI11ImagesQueryV10CodingKeysO7qualityyA2EmF":{"name":"quality","parent_name":"CodingKeys"},"Structs/ImagesQuery/ResponseFormat.html#/s:6OpenAI11ImagesQueryV14ResponseFormatO3urlyA2EmF":{"name":"url","parent_name":"ResponseFormat"},"Structs/ImagesQuery/ResponseFormat.html#/s:6OpenAI11ImagesQueryV14ResponseFormatO8b64_jsonyA2EmF":{"name":"b64_json","parent_name":"ResponseFormat"},"Structs/ImagesQuery/ResponseFormat.html":{"name":"ResponseFormat","parent_name":"ImagesQuery"},"Structs/ImagesQuery.html#/s:6OpenAI11ImagesQueryV6promptSSvp":{"name":"prompt","abstract":"A text description of the desired image(s). The maximum length is 1000 characters for dall-e-2 and 4000 characters for dall-e-3.
","parent_name":"ImagesQuery"},"Structs/ImagesQuery.html#/s:6OpenAI11ImagesQueryV5modelSSSgvp":{"name":"model","abstract":"The model to use for image generation.","parent_name":"ImagesQuery"},"Structs/ImagesQuery.html#/s:6OpenAI11ImagesQueryV14responseFormatAC08ResponseF0OSgvp":{"name":"responseFormat","abstract":"
The format in which the generated images are returned. Must be one of url or b64_json.","parent_name":"ImagesQuery"},"Structs/ImagesQuery.html#/s:6OpenAI11ImagesQueryV1nSiSgvp":{"name":"n","abstract":"
The number of images to generate. Must be between 1 and 10. For dall-e-3, only n=1 is supported.","parent_name":"ImagesQuery"},"Structs/ImagesQuery.html#/s:6OpenAI11ImagesQueryV4sizeAC4SizeOSgvp":{"name":"size","abstract":"
The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024 for dall-e-2. Must be one of 1024x1024, 1792x1024, or 1024x1792 for dall-e-3 models.","parent_name":"ImagesQuery"},"Structs/ImagesQuery.html#/s:6OpenAI11ImagesQueryV4userSSSgvp":{"name":"user","abstract":"
A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.","parent_name":"ImagesQuery"},"Structs/ImagesQuery.html#/s:6OpenAI11ImagesQueryV5styleAC5StyleOSgvp":{"name":"style","abstract":"
The style of the generated images. Must be one of vivid or natural. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for dall-e-3.","parent_name":"ImagesQuery"},"Structs/ImagesQuery.html#/s:6OpenAI11ImagesQueryV7qualityAC7QualityOSgvp":{"name":"quality","abstract":"
The quality of the image that will be generated. hd creates images with finer details and greater consistency across the image. This param is only supported for dall-e-3.","parent_name":"ImagesQuery"},"Structs/ImagesQuery.html#/s:6OpenAI11ImagesQueryV6prompt5model1n7quality14responseFormat4size5style4userACSS_SSSgSiSgAC7QualityOSgAC08ResponseI0OSgAC4SizeOSgAC5StyleOSgALtcfc":{"name":"init(prompt:model:n:quality:responseFormat:size:style:user:)","parent_name":"ImagesQuery"},"Structs/ImagesQuery/CodingKeys.html":{"name":"CodingKeys","parent_name":"ImagesQuery"},"Structs/ImagesQuery/Style.html":{"name":"Style","parent_name":"ImagesQuery"},"Structs/ImagesQuery/Quality.html":{"name":"Quality","parent_name":"ImagesQuery"},"Structs/ImagesQuery/Size.html":{"name":"Size","parent_name":"ImagesQuery"},"Structs/ImageVariationsQuery/CodingKeys.html#/s:6OpenAI20ImageVariationsQueryV10CodingKeysO5imageyA2EmF":{"name":"image","parent_name":"CodingKeys"},"Structs/ImageVariationsQuery/CodingKeys.html#/s:6OpenAI20ImageVariationsQueryV10CodingKeysO5modelyA2EmF":{"name":"model","parent_name":"CodingKeys"},"Structs/ImageVariationsQuery/CodingKeys.html#/s:6OpenAI20ImageVariationsQueryV10CodingKeysO1nyA2EmF":{"name":"n","parent_name":"CodingKeys"},"Structs/ImageVariationsQuery/CodingKeys.html#/s:6OpenAI20ImageVariationsQueryV10CodingKeysO14responseFormatyA2EmF":{"name":"responseFormat","parent_name":"CodingKeys"},"Structs/ImageVariationsQuery/CodingKeys.html#/s:6OpenAI20ImageVariationsQueryV10CodingKeysO4sizeyA2EmF":{"name":"size","parent_name":"CodingKeys"},"Structs/ImageVariationsQuery/CodingKeys.html#/s:6OpenAI20ImageVariationsQueryV10CodingKeysO4useryA2EmF":{"name":"user","parent_name":"CodingKeys"},"Structs/ImageVariationsQuery.html#/s:6OpenAI20ImageVariationsQueryV14ResponseFormata":{"name":"ResponseFormat","parent_name":"ImageVariationsQuery"},"Structs/ImageVariationsQuery.html#/s:6OpenAI20ImageVariationsQueryV5image10Foundation4DataVvp":{"name":"image","abstract":"
The image to edit. Must be a valid PNG file, less than 4MB, and square.
","parent_name":"ImageVariationsQuery"},"Structs/ImageVariationsQuery.html#/s:6OpenAI20ImageVariationsQueryV5modelSSSgvp":{"name":"model","abstract":"The model to use for image generation. Only dall-e-2 is supported at this time.","parent_name":"ImageVariationsQuery"},"Structs/ImageVariationsQuery.html#/s:6OpenAI20ImageVariationsQueryV1nSiSgvp":{"name":"n","abstract":"
The number of images to generate. Must be between 1 and 10.","parent_name":"ImageVariationsQuery"},"Structs/ImageVariationsQuery.html#/s:6OpenAI20ImageVariationsQueryV14responseFormatAA06ImagesE0V08ResponseG0OSgvp":{"name":"responseFormat","abstract":"
The format in which the generated images are returned. Must be one of url or b64_json.","parent_name":"ImageVariationsQuery"},"Structs/ImageVariationsQuery.html#/s:6OpenAI20ImageVariationsQueryV4sizeSSSgvp":{"name":"size","abstract":"
The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.","parent_name":"ImageVariationsQuery"},"Structs/ImageVariationsQuery.html#/s:6OpenAI20ImageVariationsQueryV4userSSSgvp":{"name":"user","abstract":"
A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.","parent_name":"ImageVariationsQuery"},"Structs/ImageVariationsQuery.html#/s:6OpenAI20ImageVariationsQueryV5image5model1n14responseFormat4size4userAC10Foundation4DataV_SSSgSiSgAA06ImagesE0V08ResponseI0OSgA2Mtcfc":{"name":"init(image:model:n:responseFormat:size:user:)","parent_name":"ImageVariationsQuery"},"Structs/ImageVariationsQuery/CodingKeys.html":{"name":"CodingKeys","parent_name":"ImageVariationsQuery"},"Structs/ImageEditsQuery/CodingKeys.html#/s:6OpenAI15ImageEditsQueryV10CodingKeysO5imageyA2EmF":{"name":"image","parent_name":"CodingKeys"},"Structs/ImageEditsQuery/CodingKeys.html#/s:6OpenAI15ImageEditsQueryV10CodingKeysO4maskyA2EmF":{"name":"mask","parent_name":"CodingKeys"},"Structs/ImageEditsQuery/CodingKeys.html#/s:6OpenAI15ImageEditsQueryV10CodingKeysO6promptyA2EmF":{"name":"prompt","parent_name":"CodingKeys"},"Structs/ImageEditsQuery/CodingKeys.html#/s:6OpenAI15ImageEditsQueryV10CodingKeysO5modelyA2EmF":{"name":"model","parent_name":"CodingKeys"},"Structs/ImageEditsQuery/CodingKeys.html#/s:6OpenAI15ImageEditsQueryV10CodingKeysO1nyA2EmF":{"name":"n","parent_name":"CodingKeys"},"Structs/ImageEditsQuery/CodingKeys.html#/s:6OpenAI15ImageEditsQueryV10CodingKeysO14responseFormatyA2EmF":{"name":"responseFormat","parent_name":"CodingKeys"},"Structs/ImageEditsQuery/CodingKeys.html#/s:6OpenAI15ImageEditsQueryV10CodingKeysO4sizeyA2EmF":{"name":"size","parent_name":"CodingKeys"},"Structs/ImageEditsQuery/CodingKeys.html#/s:6OpenAI15ImageEditsQueryV10CodingKeysO4useryA2EmF":{"name":"user","parent_name":"CodingKeys"},"Structs/ImageEditsQuery.html#/s:6OpenAI15ImageEditsQueryV14ResponseFormata":{"name":"ResponseFormat","parent_name":"ImageEditsQuery"},"Structs/ImageEditsQuery.html#/s:6OpenAI15ImageEditsQueryV4Sizea":{"name":"Size","parent_name":"ImageEditsQuery"},"Structs/ImageEditsQuery.html#/s:6OpenAI15ImageEditsQueryV5image10Foundation4DataVvp":{"name":"image","abstract":"
The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
","parent_name":"ImageEditsQuery"},"Structs/ImageEditsQuery.html#/s:6OpenAI15ImageEditsQueryV4mask10Foundation4DataVSgvp":{"name":"mask","abstract":"An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where image should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as image.
","parent_name":"ImageEditsQuery"},"Structs/ImageEditsQuery.html#/s:6OpenAI15ImageEditsQueryV6promptSSvp":{"name":"prompt","abstract":"A text description of the desired image(s). The maximum length is 1000 characters.
","parent_name":"ImageEditsQuery"},"Structs/ImageEditsQuery.html#/s:6OpenAI15ImageEditsQueryV5modelSSSgvp":{"name":"model","abstract":"The model to use for image generation.","parent_name":"ImageEditsQuery"},"Structs/ImageEditsQuery.html#/s:6OpenAI15ImageEditsQueryV1nSiSgvp":{"name":"n","abstract":"
The number of images to generate. Must be between 1 and 10.
","parent_name":"ImageEditsQuery"},"Structs/ImageEditsQuery.html#/s:6OpenAI15ImageEditsQueryV14responseFormatAA06ImagesE0V08ResponseG0OSgvp":{"name":"responseFormat","abstract":"The format in which the generated images are returned. Must be one of url or b64_json.","parent_name":"ImageEditsQuery"},"Structs/ImageEditsQuery.html#/s:6OpenAI15ImageEditsQueryV4sizeAA06ImagesE0V4SizeOSgvp":{"name":"size","abstract":"
The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.
","parent_name":"ImageEditsQuery"},"Structs/ImageEditsQuery.html#/s:6OpenAI15ImageEditsQueryV4userSSSgvp":{"name":"user","abstract":"A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.","parent_name":"ImageEditsQuery"},"Structs/ImageEditsQuery.html#/s:6OpenAI15ImageEditsQueryV5image6prompt4mask5model1n14responseFormat4size4userAC10Foundation4DataV_SSANSgSSSgSiSgAA06ImagesE0V08ResponseK0OSgAS4SizeOSgAPtcfc":{"name":"init(image:prompt:mask:model:n:responseFormat:size:user:)","parent_name":"ImageEditsQuery"},"Structs/ImageEditsQuery/CodingKeys.html":{"name":"CodingKeys","parent_name":"ImageEditsQuery"},"Structs/EmbeddingsResult/Usage.html#/s:6OpenAI16EmbeddingsResultV5UsageV12promptTokensSivp":{"name":"promptTokens","parent_name":"Usage"},"Structs/EmbeddingsResult/Usage.html#/s:6OpenAI16EmbeddingsResultV5UsageV11totalTokensSivp":{"name":"totalTokens","parent_name":"Usage"},"Structs/EmbeddingsResult/Embedding.html#/s:6OpenAI16EmbeddingsResultV9EmbeddingV6objectSSvp":{"name":"object","abstract":"
The object type, which is always “embedding”.
","parent_name":"Embedding"},"Structs/EmbeddingsResult/Embedding.html#/s:6OpenAI16EmbeddingsResultV9EmbeddingV9embeddingSaySdGvp":{"name":"embedding","abstract":"The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the embedding guide.","parent_name":"Embedding"},"Structs/EmbeddingsResult/Embedding.html":{"name":"Embedding","parent_name":"EmbeddingsResult"},"Structs/EmbeddingsResult/Usage.html":{"name":"Usage","parent_name":"EmbeddingsResult"},"Structs/EmbeddingsResult.html#/s:6OpenAI16EmbeddingsResultV4dataSayAC9EmbeddingVGvp":{"name":"data","parent_name":"EmbeddingsResult"},"Structs/EmbeddingsResult.html#/s:6OpenAI16EmbeddingsResultV5modelSSvp":{"name":"model","parent_name":"EmbeddingsResult"},"Structs/EmbeddingsResult.html#/s:6OpenAI16EmbeddingsResultV5usageAC5UsageVvp":{"name":"usage","parent_name":"EmbeddingsResult"},"Structs/EmbeddingsResult.html#/s:6OpenAI16EmbeddingsResultV6objectSSvp":{"name":"object","abstract":"
The object type, which is always “list”.
","parent_name":"EmbeddingsResult"},"Structs/EmbeddingsQuery/CodingKeys.html#/s:6OpenAI15EmbeddingsQueryV10CodingKeysO5inputyA2EmF":{"name":"input","parent_name":"CodingKeys"},"Structs/EmbeddingsQuery/CodingKeys.html#/s:6OpenAI15EmbeddingsQueryV10CodingKeysO5modelyA2EmF":{"name":"model","parent_name":"CodingKeys"},"Structs/EmbeddingsQuery/CodingKeys.html#/s:6OpenAI15EmbeddingsQueryV10CodingKeysO14encodingFormatyA2EmF":{"name":"encodingFormat","parent_name":"CodingKeys"},"Structs/EmbeddingsQuery/CodingKeys.html#/s:6OpenAI15EmbeddingsQueryV10CodingKeysO4useryA2EmF":{"name":"user","parent_name":"CodingKeys"},"Structs/EmbeddingsQuery/EncodingFormat.html#/s:6OpenAI15EmbeddingsQueryV14EncodingFormatO5floatyA2EmF":{"name":"float","parent_name":"EncodingFormat"},"Structs/EmbeddingsQuery/EncodingFormat.html#/s:6OpenAI15EmbeddingsQueryV14EncodingFormatO6base64yA2EmF":{"name":"base64","parent_name":"EncodingFormat"},"Structs/EmbeddingsQuery/Input.html#/s:6OpenAI15EmbeddingsQueryV5InputO6stringyAESScAEmF":{"name":"string(_:)","parent_name":"Input"},"Structs/EmbeddingsQuery/Input.html#/s:6OpenAI15EmbeddingsQueryV5InputO10stringListyAESaySSGcAEmF":{"name":"stringList(_:)","parent_name":"Input"},"Structs/EmbeddingsQuery/Input.html#/s:6OpenAI15EmbeddingsQueryV5InputO7intListyAESaySiGcAEmF":{"name":"intList(_:)","parent_name":"Input"},"Structs/EmbeddingsQuery/Input.html#/s:6OpenAI15EmbeddingsQueryV5InputO9intMatrixyAESaySaySiGGcAEmF":{"name":"intMatrix(_:)","parent_name":"Input"},"Structs/EmbeddingsQuery/Input.html#/s:SE6encode2toys7Encoder_p_tKF":{"name":"encode(to:)","parent_name":"Input"},"Structs/EmbeddingsQuery/Input.html#/s:6OpenAI15EmbeddingsQueryV5InputO6stringAESS_tcfc":{"name":"init(string:)","parent_name":"Input"},"Structs/EmbeddingsQuery/Input.html#/s:6OpenAI15EmbeddingsQueryV5InputO10stringListAESaySSG_tcfc":{"name":"init(stringList:)","parent_name":"Input"},"Structs/EmbeddingsQuery/Input.html#/s:6OpenAI15EmbeddingsQueryV5InputO7intListAESaySiG_tcfc":{"name":"init(intList:)","parent_name":"Input"},"Structs/EmbeddingsQuery/Input.html#/s:6OpenAI15EmbeddingsQueryV5InputO9intMatrixAESaySaySiGG_tcfc":{"name":"init(intMatrix:)","parent_name":"Input"},"Structs/EmbeddingsQuery.html#/s:6OpenAI15EmbeddingsQueryV5inputAC5InputOvp":{"name":"input","abstract":"Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for text-embedding-ada-002), cannot be an empty string, and any array must be 2048 dimensions or less.
","parent_name":"EmbeddingsQuery"},"Structs/EmbeddingsQuery.html#/s:6OpenAI15EmbeddingsQueryV5modelSSvp":{"name":"model","abstract":"ID of the model to use. You can use the List models API to see all of your available models, or see our Model overview for descriptions of them.","parent_name":"EmbeddingsQuery"},"Structs/EmbeddingsQuery.html#/s:6OpenAI15EmbeddingsQueryV14encodingFormatAC08EncodingF0OSgvp":{"name":"encodingFormat","abstract":"
The format to return the embeddings in. Can be either float or base64.","parent_name":"EmbeddingsQuery"},"Structs/EmbeddingsQuery.html#/s:6OpenAI15EmbeddingsQueryV4userSSSgvp":{"name":"user","abstract":"
A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.","parent_name":"EmbeddingsQuery"},"Structs/EmbeddingsQuery.html#/s:6OpenAI15EmbeddingsQueryV5input5model14encodingFormat4userA2C5InputO_SSAC08EncodingH0OSgSSSgtcfc":{"name":"init(input:model:encodingFormat:user:)","parent_name":"EmbeddingsQuery"},"Structs/EmbeddingsQuery/Input.html":{"name":"Input","parent_name":"EmbeddingsQuery"},"Structs/EmbeddingsQuery/EncodingFormat.html":{"name":"EncodingFormat","parent_name":"EmbeddingsQuery"},"Structs/EmbeddingsQuery/CodingKeys.html":{"name":"CodingKeys","parent_name":"EmbeddingsQuery"},"Structs/EditsResult/Usage.html#/s:6OpenAI11EditsResultV5UsageV12promptTokensSivp":{"name":"promptTokens","parent_name":"Usage"},"Structs/EditsResult/Usage.html#/s:6OpenAI11EditsResultV5UsageV16completionTokensSivp":{"name":"completionTokens","parent_name":"Usage"},"Structs/EditsResult/Usage.html#/s:6OpenAI11EditsResultV5UsageV11totalTokensSivp":{"name":"totalTokens","parent_name":"Usage"},"Structs/EditsResult/Choice.html#/s:6OpenAI11EditsResultV6ChoiceV4textSSvp":{"name":"text","parent_name":"Choice"},"Structs/EditsResult/Choice.html":{"name":"Choice","parent_name":"EditsResult"},"Structs/EditsResult/Usage.html":{"name":"Usage","parent_name":"EditsResult"},"Structs/EditsResult.html#/s:6OpenAI11EditsResultV6objectSSvp":{"name":"object","parent_name":"EditsResult"},"Structs/EditsResult.html#/s:6OpenAI11EditsResultV7createdSdvp":{"name":"created","parent_name":"EditsResult"},"Structs/EditsResult.html#/s:6OpenAI11EditsResultV7choicesSayAC6ChoiceVGvp":{"name":"choices","parent_name":"EditsResult"},"Structs/EditsResult.html#/s:6OpenAI11EditsResultV5usageAC5UsageVvp":{"name":"usage","parent_name":"EditsResult"},"Structs/EditsQuery.html#/s:6OpenAI10EditsQueryV5modelSSvp":{"name":"model","abstract":"
ID of the model to use.
","parent_name":"EditsQuery"},"Structs/EditsQuery.html#/s:6OpenAI10EditsQueryV5inputSSSgvp":{"name":"input","abstract":"Input text to get embeddings for.
","parent_name":"EditsQuery"},"Structs/EditsQuery.html#/s:6OpenAI10EditsQueryV11instructionSSvp":{"name":"instruction","abstract":"The instruction that tells the model how to edit the prompt.
","parent_name":"EditsQuery"},"Structs/EditsQuery.html#/s:6OpenAI10EditsQueryV1nSiSgvp":{"name":"n","abstract":"The number of images to generate. Must be between 1 and 10.
","parent_name":"EditsQuery"},"Structs/EditsQuery.html#/s:6OpenAI10EditsQueryV11temperatureSdSgvp":{"name":"temperature","abstract":"What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
","parent_name":"EditsQuery"},"Structs/EditsQuery.html#/s:6OpenAI10EditsQueryV4topPSdSgvp":{"name":"topP","abstract":"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
","parent_name":"EditsQuery"},"Structs/EditsQuery.html#/s:6OpenAI10EditsQueryV5model5input11instruction1n11temperature4topPACSS_SSSgSSSiSgSdSgALtcfc":{"name":"init(model:input:instruction:n:temperature:topP:)","parent_name":"EditsQuery"},"Structs/CompletionsResult/Choice.html#/s:6OpenAI17CompletionsResultV6ChoiceV4textSSvp":{"name":"text","parent_name":"Choice"},"Structs/CompletionsResult/Choice.html#/s:6OpenAI17CompletionsResultV6ChoiceV12finishReasonSSSgvp":{"name":"finishReason","parent_name":"Choice"},"Structs/CompletionsResult/Usage.html#/s:6OpenAI17CompletionsResultV5UsageV12promptTokensSivp":{"name":"promptTokens","parent_name":"Usage"},"Structs/CompletionsResult/Usage.html#/s:6OpenAI17CompletionsResultV5UsageV16completionTokensSivp":{"name":"completionTokens","parent_name":"Usage"},"Structs/CompletionsResult/Usage.html#/s:6OpenAI17CompletionsResultV5UsageV11totalTokensSivp":{"name":"totalTokens","parent_name":"Usage"},"Structs/CompletionsResult/Usage.html":{"name":"Usage","parent_name":"CompletionsResult"},"Structs/CompletionsResult/Choice.html":{"name":"Choice","parent_name":"CompletionsResult"},"Structs/CompletionsResult.html#/s:6OpenAI17CompletionsResultV2idSSvp":{"name":"id","parent_name":"CompletionsResult"},"Structs/CompletionsResult.html#/s:6OpenAI17CompletionsResultV6objectSSvp":{"name":"object","parent_name":"CompletionsResult"},"Structs/CompletionsResult.html#/s:6OpenAI17CompletionsResultV7createdSdvp":{"name":"created","parent_name":"CompletionsResult"},"Structs/CompletionsResult.html#/s:6OpenAI17CompletionsResultV5modelSSvp":{"name":"model","parent_name":"CompletionsResult"},"Structs/CompletionsResult.html#/s:6OpenAI17CompletionsResultV7choicesSayAC6ChoiceVGvp":{"name":"choices","parent_name":"CompletionsResult"},"Structs/CompletionsResult.html#/s:6OpenAI17CompletionsResultV5usageAC5UsageVSgvp":{"name":"usage","parent_name":"CompletionsResult"},"Structs/CompletionsQuery.html#/s:6OpenAI16CompletionsQueryV5modelSSvp":{"name":"model","abstract":"ID of the model to use.
","parent_name":"CompletionsQuery"},"Structs/CompletionsQuery.html#/s:6OpenAI16CompletionsQueryV6promptSSvp":{"name":"prompt","abstract":"The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
","parent_name":"CompletionsQuery"},"Structs/CompletionsQuery.html#/s:6OpenAI16CompletionsQueryV11temperatureSdSgvp":{"name":"temperature","abstract":"What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
","parent_name":"CompletionsQuery"},"Structs/CompletionsQuery.html#/s:6OpenAI16CompletionsQueryV9maxTokensSiSgvp":{"name":"maxTokens","abstract":"The maximum number of tokens to generate in the completion.
","parent_name":"CompletionsQuery"},"Structs/CompletionsQuery.html#/s:6OpenAI16CompletionsQueryV4topPSdSgvp":{"name":"topP","abstract":"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
","parent_name":"CompletionsQuery"},"Structs/CompletionsQuery.html#/s:6OpenAI16CompletionsQueryV16frequencyPenaltySdSgvp":{"name":"frequencyPenalty","abstract":"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model’s likelihood to repeat the same line verbatim.
","parent_name":"CompletionsQuery"},"Structs/CompletionsQuery.html#/s:6OpenAI16CompletionsQueryV15presencePenaltySdSgvp":{"name":"presencePenalty","abstract":"Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model’s likelihood to talk about new topics.
","parent_name":"CompletionsQuery"},"Structs/CompletionsQuery.html#/s:6OpenAI16CompletionsQueryV4stopSaySSGSgvp":{"name":"stop","abstract":"Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
","parent_name":"CompletionsQuery"},"Structs/CompletionsQuery.html#/s:6OpenAI16CompletionsQueryV4userSSSgvp":{"name":"user","abstract":"A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
","parent_name":"CompletionsQuery"},"Structs/CompletionsQuery.html#/s:6OpenAI16CompletionsQueryV5model6prompt11temperature9maxTokens4topP16frequencyPenalty08presenceL04stop4userACSS_SSSdSgSiSgA3MSaySSGSgSSSgtcfc":{"name":"init(model:prompt:temperature:maxTokens:topP:frequencyPenalty:presencePenalty:stop:user:)","parent_name":"CompletionsQuery"},"Structs/ChatStreamResult/CodingKeys.html#/s:6OpenAI16ChatStreamResultV10CodingKeysO2idyA2EmF":{"name":"id","parent_name":"CodingKeys"},"Structs/ChatStreamResult/CodingKeys.html#/s:6OpenAI16ChatStreamResultV10CodingKeysO6objectyA2EmF":{"name":"object","parent_name":"CodingKeys"},"Structs/ChatStreamResult/CodingKeys.html#/s:6OpenAI16ChatStreamResultV10CodingKeysO7createdyA2EmF":{"name":"created","parent_name":"CodingKeys"},"Structs/ChatStreamResult/CodingKeys.html#/s:6OpenAI16ChatStreamResultV10CodingKeysO5modelyA2EmF":{"name":"model","parent_name":"CodingKeys"},"Structs/ChatStreamResult/CodingKeys.html#/s:6OpenAI16ChatStreamResultV10CodingKeysO7choicesyA2EmF":{"name":"choices","parent_name":"CodingKeys"},"Structs/ChatStreamResult/CodingKeys.html#/s:6OpenAI16ChatStreamResultV10CodingKeysO17systemFingerprintyA2EmF":{"name":"systemFingerprint","parent_name":"CodingKeys"},"Structs/ChatStreamResult/Choice/CodingKeys.html#/s:6OpenAI16ChatStreamResultV6ChoiceV10CodingKeysO5deltayA2GmF":{"name":"delta","parent_name":"CodingKeys"},"Structs/ChatStreamResult/Choice/CodingKeys.html#/s:6OpenAI16ChatStreamResultV6ChoiceV10CodingKeysO12finishReasonyA2GmF":{"name":"finishReason","parent_name":"CodingKeys"},"Structs/ChatStreamResult/Choice/CodingKeys.html#/s:6OpenAI16ChatStreamResultV6ChoiceV10CodingKeysO8logprobsyA2GmF":{"name":"logprobs","parent_name":"CodingKeys"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/CodingKeys.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F8LogprobsV0C22CompletionTokenLogprobV10CodingKeysO5tokenyA2KmF":{"name":"token","parent_name":"CodingKeys"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/CodingKeys.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F8LogprobsV0C22CompletionTokenLogprobV10CodingKeysO5bytesyA2KmF":{"name":"bytes","parent_name":"CodingKeys"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/CodingKeys.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F8LogprobsV0C22CompletionTokenLogprobV10CodingKeysO7logprobyA2KmF":{"name":"logprob","parent_name":"CodingKeys"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/CodingKeys.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F8LogprobsV0C22CompletionTokenLogprobV10CodingKeysO03topG0yA2KmF":{"name":"topLogprobs","parent_name":"CodingKeys"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/TopLogprob.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F8LogprobsV0C22CompletionTokenLogprobV03TopJ0V5tokenSSvp":{"name":"token","abstract":"The token.
","parent_name":"TopLogprob"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/TopLogprob.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F8LogprobsV0C22CompletionTokenLogprobV03TopJ0V5bytesSaySiGSgvp":{"name":"bytes","abstract":"A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token.
","parent_name":"TopLogprob"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/TopLogprob.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F8LogprobsV0C22CompletionTokenLogprobV03TopJ0V7logprobSdvp":{"name":"logprob","abstract":"The log probability of this token.
","parent_name":"TopLogprob"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F8LogprobsV0C22CompletionTokenLogprobV5tokenSSvp":{"name":"token","abstract":"The token.
","parent_name":"ChatCompletionTokenLogprob"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F8LogprobsV0C22CompletionTokenLogprobV5bytesSaySiGSgvp":{"name":"bytes","abstract":"A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token.
","parent_name":"ChatCompletionTokenLogprob"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F8LogprobsV0C22CompletionTokenLogprobV7logprobSdvp":{"name":"logprob","abstract":"The log probability of this token.
","parent_name":"ChatCompletionTokenLogprob"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F8LogprobsV0C22CompletionTokenLogprobV03topG0SayAI03TopJ0VGSgvp":{"name":"topLogprobs","abstract":"List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested top_logprobs returned.
","parent_name":"ChatCompletionTokenLogprob"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/TopLogprob.html":{"name":"TopLogprob","parent_name":"ChatCompletionTokenLogprob"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/CodingKeys.html":{"name":"CodingKeys","parent_name":"ChatCompletionTokenLogprob"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F8LogprobsV7contentSayAG0C22CompletionTokenLogprobVGSgvp":{"name":"content","abstract":"A list of message content tokens with log probability information.
","parent_name":"ChoiceLogprobs"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob.html":{"name":"ChatCompletionTokenLogprob","parent_name":"ChoiceLogprobs"},"Structs/ChatStreamResult/Choice/ChoiceDelta/CodingKeys.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F5DeltaV10CodingKeysO7contentyA2ImF":{"name":"content","parent_name":"CodingKeys"},"Structs/ChatStreamResult/Choice/ChoiceDelta/CodingKeys.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F5DeltaV10CodingKeysO4roleyA2ImF":{"name":"role","parent_name":"CodingKeys"},"Structs/ChatStreamResult/Choice/ChoiceDelta/CodingKeys.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F5DeltaV10CodingKeysO9toolCallsyA2ImF":{"name":"toolCalls","parent_name":"CodingKeys"},"Structs/ChatStreamResult/Choice/ChoiceDelta/ChoiceDeltaToolCall/ChoiceDeltaToolCallFunction.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F5DeltaV0fG8ToolCallV0fghI8FunctionV9argumentsSSSgvp":{"name":"arguments","abstract":"The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.
","parent_name":"ChoiceDeltaToolCallFunction"},"Structs/ChatStreamResult/Choice/ChoiceDelta/ChoiceDeltaToolCall/ChoiceDeltaToolCallFunction.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F5DeltaV0fG8ToolCallV0fghI8FunctionV4nameSSSgvp":{"name":"name","abstract":"The name of the function to call.
","parent_name":"ChoiceDeltaToolCallFunction"},"Structs/ChatStreamResult/Choice/ChoiceDelta/ChoiceDeltaToolCall/ChoiceDeltaToolCallFunction.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F5DeltaV0fG8ToolCallV0fghI8FunctionV9arguments4nameAKSSSg_ANtcfc":{"name":"init(arguments:name:)","parent_name":"ChoiceDeltaToolCallFunction"},"Structs/ChatStreamResult/Choice/ChoiceDelta/ChoiceDeltaToolCall.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F5DeltaV0fG8ToolCallV2idSSSgvp":{"name":"id","abstract":"The ID of the tool call.
","parent_name":"ChoiceDeltaToolCall"},"Structs/ChatStreamResult/Choice/ChoiceDelta/ChoiceDeltaToolCall.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F5DeltaV0fG8ToolCallV8functionAI0fghI8FunctionVSgvp":{"name":"function","abstract":"The function that the model called.
","parent_name":"ChoiceDeltaToolCall"},"Structs/ChatStreamResult/Choice/ChoiceDelta/ChoiceDeltaToolCall.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F5DeltaV0fG8ToolCallV4typeSSSgvp":{"name":"type","abstract":"The type of the tool. Currently, only function is supported.
","parent_name":"ChoiceDeltaToolCall"},"Structs/ChatStreamResult/Choice/ChoiceDelta/ChoiceDeltaToolCall.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F5DeltaV0fG8ToolCallV5index2id8functionAISi_SSSgAI0fghI8FunctionVSgtcfc":{"name":"init(index:id:function:)","parent_name":"ChoiceDeltaToolCall"},"Structs/ChatStreamResult/Choice/ChoiceDelta/ChoiceDeltaToolCall/ChoiceDeltaToolCallFunction.html":{"name":"ChoiceDeltaToolCallFunction","parent_name":"ChoiceDeltaToolCall"},"Structs/ChatStreamResult/Choice/ChoiceDelta.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F5DeltaV4Rolea":{"name":"Role","parent_name":"ChoiceDelta"},"Structs/ChatStreamResult/Choice/ChoiceDelta.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F5DeltaV7contentSSSgvp":{"name":"content","abstract":"The contents of the chunk message.
","parent_name":"ChoiceDelta"},"Structs/ChatStreamResult/Choice/ChoiceDelta.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F5DeltaV4roleAA0C5QueryV0C22CompletionMessageParamO4RoleOSgvp":{"name":"role","abstract":"The role of the author of this message.
","parent_name":"ChoiceDelta"},"Structs/ChatStreamResult/Choice/ChoiceDelta.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F5DeltaV9toolCallsSayAG0fG8ToolCallVGSgvp":{"name":"toolCalls","parent_name":"ChoiceDelta"},"Structs/ChatStreamResult/Choice/ChoiceDelta/ChoiceDeltaToolCall.html":{"name":"ChoiceDeltaToolCall","parent_name":"ChoiceDelta"},"Structs/ChatStreamResult/Choice/ChoiceDelta/CodingKeys.html":{"name":"CodingKeys","parent_name":"ChoiceDelta"},"Structs/ChatStreamResult/Choice.html#/s:6OpenAI16ChatStreamResultV6ChoiceV12FinishReasona":{"name":"FinishReason","parent_name":"Choice"},"Structs/ChatStreamResult/Choice/ChoiceDelta.html":{"name":"ChoiceDelta","parent_name":"Choice"},"Structs/ChatStreamResult/Choice.html#/s:6OpenAI16ChatStreamResultV6ChoiceV5deltaAE0F5DeltaVvp":{"name":"delta","abstract":"A chat completion delta generated by streamed model responses.
","parent_name":"Choice"},"Structs/ChatStreamResult/Choice.html#/s:6OpenAI16ChatStreamResultV6ChoiceV12finishReasonAA0cE0VADV06FinishH0OSgvp":{"name":"finishReason","abstract":"The reason the model stopped generating tokens.","parent_name":"Choice"},"Structs/ChatStreamResult/Choice.html#/s:6OpenAI16ChatStreamResultV6ChoiceV8logprobsAE0F8LogprobsVSgvp":{"name":"logprobs","abstract":"
Log probability information for the choice.
","parent_name":"Choice"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs.html":{"name":"ChoiceLogprobs","parent_name":"Choice"},"Structs/ChatStreamResult/Choice/CodingKeys.html":{"name":"CodingKeys","parent_name":"Choice"},"Structs/ChatStreamResult/Choice.html":{"name":"Choice","parent_name":"ChatStreamResult"},"Structs/ChatStreamResult.html#/s:6OpenAI16ChatStreamResultV2idSSvp":{"name":"id","abstract":"A unique identifier for the chat completion. Each chunk has the same ID.
","parent_name":"ChatStreamResult"},"Structs/ChatStreamResult.html#/s:6OpenAI16ChatStreamResultV6objectSSvp":{"name":"object","abstract":"The object type, which is always chat.completion.chunk
.
The Unix timestamp (in seconds) of when the chat completion was created.","parent_name":"ChatStreamResult"},"Structs/ChatStreamResult.html#/s:6OpenAI16ChatStreamResultV5modelSSvp":{"name":"model","abstract":"
The model to generate the completion.
","parent_name":"ChatStreamResult"},"Structs/ChatStreamResult.html#/s:6OpenAI16ChatStreamResultV7choicesSayAC6ChoiceVGvp":{"name":"choices","abstract":"A list of chat completion choices.","parent_name":"ChatStreamResult"},"Structs/ChatStreamResult.html#/s:6OpenAI16ChatStreamResultV17systemFingerprintSSSgvp":{"name":"systemFingerprint","abstract":"
This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the seed
request parameter to understand when backend changes have been made that might impact determinism.
Number of tokens in the generated completion.
","parent_name":"CompletionUsage"},"Structs/ChatResult/CompletionUsage.html#/s:6OpenAI10ChatResultV15CompletionUsageV12promptTokensSivp":{"name":"promptTokens","abstract":"Number of tokens in the prompt.
","parent_name":"CompletionUsage"},"Structs/ChatResult/CompletionUsage.html#/s:6OpenAI10ChatResultV15CompletionUsageV11totalTokensSivp":{"name":"totalTokens","abstract":"Total number of tokens used in the request (prompt + completion).
","parent_name":"CompletionUsage"},"Structs/ChatResult/Choice/FinishReason.html#/s:6OpenAI10ChatResultV6ChoiceV12FinishReasonO4stopyA2GmF":{"name":"stop","parent_name":"FinishReason"},"Structs/ChatResult/Choice/FinishReason.html#/s:6OpenAI10ChatResultV6ChoiceV12FinishReasonO6lengthyA2GmF":{"name":"length","parent_name":"FinishReason"},"Structs/ChatResult/Choice/FinishReason.html#/s:6OpenAI10ChatResultV6ChoiceV12FinishReasonO9toolCallsyA2GmF":{"name":"toolCalls","parent_name":"FinishReason"},"Structs/ChatResult/Choice/FinishReason.html#/s:6OpenAI10ChatResultV6ChoiceV12FinishReasonO13contentFilteryA2GmF":{"name":"contentFilter","parent_name":"FinishReason"},"Structs/ChatResult/Choice/FinishReason.html#/s:6OpenAI10ChatResultV6ChoiceV12FinishReasonO12functionCallyA2GmF":{"name":"functionCall","parent_name":"FinishReason"},"Structs/ChatResult/Choice/CodingKeys.html#/s:6OpenAI10ChatResultV6ChoiceV10CodingKeysO8logprobsyA2GmF":{"name":"logprobs","parent_name":"CodingKeys"},"Structs/ChatResult/Choice/CodingKeys.html#/s:6OpenAI10ChatResultV6ChoiceV10CodingKeysO7messageyA2GmF":{"name":"message","parent_name":"CodingKeys"},"Structs/ChatResult/Choice/CodingKeys.html#/s:6OpenAI10ChatResultV6ChoiceV10CodingKeysO12finishReasonyA2GmF":{"name":"finishReason","parent_name":"CodingKeys"},"Structs/ChatResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/CodingKeys.html#/s:6OpenAI10ChatResultV6ChoiceV0E8LogprobsV0C22CompletionTokenLogprobV10CodingKeysO5tokenyA2KmF":{"name":"token","parent_name":"CodingKeys"},"Structs/ChatResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/CodingKeys.html#/s:6OpenAI10ChatResultV6ChoiceV0E8LogprobsV0C22CompletionTokenLogprobV10CodingKeysO5bytesyA2KmF":{"name":"bytes","parent_name":"CodingKeys"},"Structs/ChatResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/CodingKeys.html#/s:6OpenAI10ChatResultV6ChoiceV0E8LogprobsV0C22CompletionTokenLogprobV10CodingKeysO7logprobyA2KmF":{"name":"logprob","parent_name":"CodingKeys"},"Structs/ChatResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/CodingKeys.html#/s:6OpenAI10ChatResultV6ChoiceV0E8LogprobsV0C22CompletionTokenLogprobV10CodingKeysO03topF0yA2KmF":{"name":"topLogprobs","parent_name":"CodingKeys"},"Structs/ChatResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/TopLogprob.html#/s:6OpenAI10ChatResultV6ChoiceV0E8LogprobsV0C22CompletionTokenLogprobV03TopI0V5tokenSSvp":{"name":"token","abstract":"The token.
","parent_name":"TopLogprob"},"Structs/ChatResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/TopLogprob.html#/s:6OpenAI10ChatResultV6ChoiceV0E8LogprobsV0C22CompletionTokenLogprobV03TopI0V5bytesSaySiGSgvp":{"name":"bytes","abstract":"A list of integers representing the UTF-8 bytes representation of the token.","parent_name":"TopLogprob"},"Structs/ChatResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/TopLogprob.html#/s:6OpenAI10ChatResultV6ChoiceV0E8LogprobsV0C22CompletionTokenLogprobV03TopI0V7logprobSdvp":{"name":"logprob","abstract":"
The log probability of this token.
","parent_name":"TopLogprob"},"Structs/ChatResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob.html#/s:6OpenAI10ChatResultV6ChoiceV0E8LogprobsV0C22CompletionTokenLogprobV5tokenSSvp":{"name":"token","abstract":"The token.
","parent_name":"ChatCompletionTokenLogprob"},"Structs/ChatResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob.html#/s:6OpenAI10ChatResultV6ChoiceV0E8LogprobsV0C22CompletionTokenLogprobV5bytesSaySiGSgvp":{"name":"bytes","abstract":"A list of integers representing the UTF-8 bytes representation of the token.","parent_name":"ChatCompletionTokenLogprob"},"Structs/ChatResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob.html#/s:6OpenAI10ChatResultV6ChoiceV0E8LogprobsV0C22CompletionTokenLogprobV7logprobSdvp":{"name":"logprob","abstract":"
The log probability of this token.
","parent_name":"ChatCompletionTokenLogprob"},"Structs/ChatResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob.html#/s:6OpenAI10ChatResultV6ChoiceV0E8LogprobsV0C22CompletionTokenLogprobV03topF0SayAI03TopI0VGvp":{"name":"topLogprobs","abstract":"List of the most likely tokens and their log probability, at this token position.","parent_name":"ChatCompletionTokenLogprob"},"Structs/ChatResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/TopLogprob.html":{"name":"TopLogprob","parent_name":"ChatCompletionTokenLogprob"},"Structs/ChatResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/CodingKeys.html":{"name":"CodingKeys","parent_name":"ChatCompletionTokenLogprob"},"Structs/ChatResult/Choice/ChoiceLogprobs.html#/s:6OpenAI10ChatResultV6ChoiceV0E8LogprobsV7contentSayAG0C22CompletionTokenLogprobVGSgvp":{"name":"content","parent_name":"ChoiceLogprobs"},"Structs/ChatResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob.html":{"name":"ChatCompletionTokenLogprob","parent_name":"ChoiceLogprobs"},"Structs/ChatResult/Choice.html#/s:6OpenAI10ChatResultV6ChoiceV0C17CompletionMessagea":{"name":"ChatCompletionMessage","parent_name":"Choice"},"Structs/ChatResult/Choice.html#/s:6OpenAI10ChatResultV6ChoiceV8logprobsAE0E8LogprobsVSgvp":{"name":"logprobs","abstract":"
Log probability information for the choice.
","parent_name":"Choice"},"Structs/ChatResult/Choice.html#/s:6OpenAI10ChatResultV6ChoiceV7messageAA0C5QueryV0C22CompletionMessageParamOvp":{"name":"message","abstract":"A chat completion message generated by the model.
","parent_name":"Choice"},"Structs/ChatResult/Choice.html#/s:6OpenAI10ChatResultV6ChoiceV12finishReasonSSSgvp":{"name":"finishReason","abstract":"The reason the model stopped generating tokens. This will be stop if the model hit a natural stop point or a provided stop sequence, length if the maximum number of tokens specified in the request was reached, content_filter if content was omitted due to a flag from our content filters, tool_calls if the model called a tool, or function_call (deprecated) if the model called a function.
","parent_name":"Choice"},"Structs/ChatResult/Choice/ChoiceLogprobs.html":{"name":"ChoiceLogprobs","parent_name":"Choice"},"Structs/ChatResult/Choice/CodingKeys.html":{"name":"CodingKeys","parent_name":"Choice"},"Structs/ChatResult/Choice/FinishReason.html":{"name":"FinishReason","parent_name":"Choice"},"Structs/ChatResult/Choice.html":{"name":"Choice","abstract":"mimic the choices array in the chat completion object
","parent_name":"ChatResult"},"Structs/ChatResult/CompletionUsage.html":{"name":"CompletionUsage","parent_name":"ChatResult"},"Structs/ChatResult.html#/s:6OpenAI10ChatResultV2idSSvp":{"name":"id","abstract":"A unique identifier for the chat completion.
","parent_name":"ChatResult"},"Structs/ChatResult.html#/s:6OpenAI10ChatResultV6objectSSvp":{"name":"object","abstract":"The object type, which is always chat.completion.
","parent_name":"ChatResult"},"Structs/ChatResult.html#/s:6OpenAI10ChatResultV7createdSdvp":{"name":"created","abstract":"The Unix timestamp (in seconds) of when the chat completion was created.
","parent_name":"ChatResult"},"Structs/ChatResult.html#/s:6OpenAI10ChatResultV5modelSSvp":{"name":"model","abstract":"The model used for the chat completion.
","parent_name":"ChatResult"},"Structs/ChatResult.html#/s:6OpenAI10ChatResultV7choicesSayAC6ChoiceVGvp":{"name":"choices","abstract":"A list of chat completion choices. Can be more than one if n is greater than 1.
","parent_name":"ChatResult"},"Structs/ChatResult.html#/s:6OpenAI10ChatResultV5usageAC15CompletionUsageVSgvp":{"name":"usage","abstract":"Usage statistics for the completion request.
","parent_name":"ChatResult"},"Structs/ChatResult.html#/s:6OpenAI10ChatResultV17systemFingerprintSSSgvp":{"name":"systemFingerprint","abstract":"This fingerprint represents the backend configuration that the model runs with.","parent_name":"ChatResult"},"Structs/ChatResult/CodingKeys.html":{"name":"CodingKeys","parent_name":"ChatResult"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO8messagesyA2EmF":{"name":"messages","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO5modelyA2EmF":{"name":"model","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO16frequencyPenaltyyA2EmF":{"name":"frequencyPenalty","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO9logitBiasyA2EmF":{"name":"logitBias","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO8logprobsyA2EmF":{"name":"logprobs","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO9maxTokensyA2EmF":{"name":"maxTokens","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO1nyA2EmF":{"name":"n","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO15presencePenaltyyA2EmF":{"name":"presencePenalty","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO14responseFormatyA2EmF":{"name":"responseFormat","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO4seedyA2EmF":{"name":"seed","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO4stopyA2EmF":{"name":"stop","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO11temperatureyA2EmF":{"name":"temperature","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO10toolChoiceyA2EmF":{"name":"toolChoice","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO5toolsyA2EmF":{"name":"tools","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO11topLogprobsyA2EmF":{"name":"topLogprobs","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO4topPyA2EmF":{"name":"topP","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO4useryA2EmF":{"name":"user","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO6streamyA2EmF":{"name":"stream","parent_name":"CodingKeys"},"Structs/ChatQuery/ChatCompletionToolParam/ToolsType.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV9ToolsTypeO8functionyA2GmF":{"name":"function","parent_name":"ToolsType"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/JSONType.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8JSONTypeO7integeryA2KmF":{"name":"integer","parent_name":"JSONType"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/JSONType.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8JSONTypeO6stringyA2KmF":{"name":"string","parent_name":"JSONType"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/JSONType.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8JSONTypeO7booleanyA2KmF":{"name":"boolean","parent_name":"JSONType"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/JSONType.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8JSONTypeO5arrayyA2KmF":{"name":"array","parent_name":"JSONType"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/JSONType.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8JSONTypeO6objectyA2KmF":{"name":"object","parent_name":"JSONType"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/JSONType.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8JSONTypeO6numberyA2KmF":{"name":"number","parent_name":"JSONType"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/JSONType.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8JSONTypeO4nullyA2KmF":{"name":"null","parent_name":"JSONType"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property/Items.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5ItemsV8JSONTypea":{"name":"JSONType","parent_name":"Items"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property/Items.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5ItemsV4typeAI8JSONTypeOvp":{"name":"type","parent_name":"Items"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property/Items.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5ItemsV10propertiesSDySSAKGSgvp":{"name":"properties","parent_name":"Items"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property/Items.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5ItemsV7patternSSSgvp":{"name":"pattern","parent_name":"Items"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property/Items.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5ItemsV5constSSSgvp":{"name":"const","parent_name":"Items"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property/Items.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5ItemsV4enumSaySSGSgvp":{"name":"enum","parent_name":"Items"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property/Items.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5ItemsV10multipleOfSiSgvp":{"name":"multipleOf","parent_name":"Items"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property/Items.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5ItemsV7minimumSdSgvp":{"name":"minimum","parent_name":"Items"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property/Items.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5ItemsV7maximumSdSgvp":{"name":"maximum","parent_name":"Items"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property/Items.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5ItemsV03minL0SiSgvp":{"name":"minItems","parent_name":"Items"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property/Items.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5ItemsV03maxL0SiSgvp":{"name":"maxItems","parent_name":"Items"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property/Items.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5ItemsV06uniqueL0SbSgvp":{"name":"uniqueItems","parent_name":"Items"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property/Items.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5ItemsV4type10properties7pattern5const4enum10multipleOf7minimum7maximum03minL003maxL006uniqueL0AmI8JSONTypeO_SDySSAKGSgSSSgA1_SaySSGSgSiSgSdSgA5_A4_A4_SbSgtcfc":{"name":"init(type:properties:pattern:const:enum:multipleOf:minimum:maximum:minItems:maxItems:uniqueItems:)","parent_name":"Items"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV8JSONTypea":{"name":"JSONType","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV4typeAI8JSONTypeOvp":{"name":"type","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV11descriptionSSSgvp":{"name":"description","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV6formatSSSgvp":{"name":"format","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5itemsAK5ItemsVSgvp":{"name":"items","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV8requiredSaySSGSgvp":{"name":"required","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV7patternSSSgvp":{"name":"pattern","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5constSSSgvp":{"name":"const","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV4enumSaySSGSgvp":{"name":"enum","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV10multipleOfSiSgvp":{"name":"multipleOf","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV7minimumSdSgvp":{"name":"minimum","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV7maximumSdSgvp":{"name":"maximum","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV8minItemsSiSgvp":{"name":"minItems","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV8maxItemsSiSgvp":{"name":"maxItems","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV11uniqueItemsSbSgvp":{"name":"uniqueItems","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV4type11description6format5items8required7pattern5const4enum10multipleOf7minimum7maximum8minItems03maxY006uniqueY0AkI8JSONTypeO_SSSgA0_AK0Y0VSgSaySSGSgA0_A0_A5_SiSgSdSgA7_A6_A6_SbSgtcfc":{"name":"init(type:description:format:items:required:pattern:const:enum:multipleOf:minimum:maximum:minItems:maxItems:uniqueItems:)","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property/Items.html":{"name":"Items","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV4typeAI8JSONTypeOvp":{"name":"type","parent_name":"FunctionParameters"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV10propertiesSDySSAI8PropertyVGSgvp":{"name":"properties","parent_name":"FunctionParameters"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8requiredSaySSGSgvp":{"name":"required","parent_name":"FunctionParameters"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV7patternSSSgvp":{"name":"pattern","parent_name":"FunctionParameters"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV5constSSSgvp":{"name":"const","parent_name":"FunctionParameters"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV4enumSaySSGSgvp":{"name":"enum","parent_name":"FunctionParameters"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV10multipleOfSiSgvp":{"name":"multipleOf","parent_name":"FunctionParameters"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV7minimumSiSgvp":{"name":"minimum","parent_name":"FunctionParameters"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV7maximumSiSgvp":{"name":"maximum","parent_name":"FunctionParameters"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV4type10properties8required7pattern5const4enum10multipleOf7minimum7maximumA2I8JSONTypeO_SDySSAI8PropertyVGSgSaySSGSgSSSgA_AZSiSgA0_A0_tcfc":{"name":"init(type:properties:required:pattern:const:enum:multipleOf:minimum:maximum:)","parent_name":"FunctionParameters"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html":{"name":"Property","parent_name":"FunctionParameters"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/JSONType.html":{"name":"JSONType","parent_name":"FunctionParameters"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV4nameSSvp":{"name":"name","abstract":"
The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
","parent_name":"FunctionDefinition"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV11descriptionSSSgvp":{"name":"description","abstract":"The description of what the function does.
","parent_name":"FunctionDefinition"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV10parametersAG0H10ParametersVSgvp":{"name":"parameters","abstract":"The parameters the functions accepts, described as a JSON Schema object.","parent_name":"FunctionDefinition"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV4name11description10parametersAGSS_SSSgAG0H10ParametersVSgtcfc":{"name":"init(name:description:parameters:)","parent_name":"FunctionDefinition"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters.html":{"name":"FunctionParameters","abstract":"
See the guide for examples, and the JSON Schema reference for documentation about the format.
","parent_name":"FunctionDefinition"},"Structs/ChatQuery/ChatCompletionToolParam.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV8functionAE18FunctionDefinitionVvp":{"name":"function","parent_name":"ChatCompletionToolParam"},"Structs/ChatQuery/ChatCompletionToolParam.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV4typeAE9ToolsTypeOvp":{"name":"type","parent_name":"ChatCompletionToolParam"},"Structs/ChatQuery/ChatCompletionToolParam.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV8functionA2E18FunctionDefinitionV_tcfc":{"name":"init(function:)","parent_name":"ChatCompletionToolParam"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition.html":{"name":"FunctionDefinition","parent_name":"ChatCompletionToolParam"},"Structs/ChatQuery/ChatCompletionToolParam/ToolsType.html":{"name":"ToolsType","parent_name":"ChatCompletionToolParam"},"Structs/ChatQuery/ChatCompletionFunctionCallOptionParam.html#/s:6OpenAI9ChatQueryV0C33CompletionFunctionCallOptionParamO4noneyA2EmF":{"name":"none","parent_name":"ChatCompletionFunctionCallOptionParam"},"Structs/ChatQuery/ChatCompletionFunctionCallOptionParam.html#/s:6OpenAI9ChatQueryV0C33CompletionFunctionCallOptionParamO4autoyA2EmF":{"name":"auto","parent_name":"ChatCompletionFunctionCallOptionParam"},"Structs/ChatQuery/ChatCompletionFunctionCallOptionParam.html#/s:6OpenAI9ChatQueryV0C33CompletionFunctionCallOptionParamO8functionyAESScAEmF":{"name":"function(_:)","parent_name":"ChatCompletionFunctionCallOptionParam"},"Structs/ChatQuery/ChatCompletionFunctionCallOptionParam.html#/s:SE6encode2toys7Encoder_p_tKF":{"name":"encode(to:)","parent_name":"ChatCompletionFunctionCallOptionParam"},"Structs/ChatQuery/ChatCompletionFunctionCallOptionParam.html#/s:6OpenAI9ChatQueryV0C33CompletionFunctionCallOptionParamO8functionAESS_tcfc":{"name":"init(function:)","parent_name":"ChatCompletionFunctionCallOptionParam"},"Structs/ChatQuery/ResponseFormat.html#/s:6OpenAI9ChatQueryV14ResponseFormatO10jsonObjectyA2EmF":{"name":"jsonObject","parent_name":"ResponseFormat"},"Structs/ChatQuery/ResponseFormat.html#/s:6OpenAI9ChatQueryV14ResponseFormatO4textyA2EmF":{"name":"text","parent_name":"ResponseFormat"},"Structs/ChatQuery/ResponseFormat.html#/s:SE6encode2toys7Encoder_p_tKF":{"name":"encode(to:)","parent_name":"ResponseFormat"},"Structs/ChatQuery/Stop.html#/s:6OpenAI9ChatQueryV4StopO6stringyAESScAEmF":{"name":"string(_:)","parent_name":"Stop"},"Structs/ChatQuery/Stop.html#/s:6OpenAI9ChatQueryV4StopO10stringListyAESaySSGcAEmF":{"name":"stringList(_:)","parent_name":"Stop"},"Structs/ChatQuery/Stop.html#/s:SE6encode2toys7Encoder_p_tKF":{"name":"encode(to:)","parent_name":"Stop"},"Structs/ChatQuery/Stop.html#/s:6OpenAI9ChatQueryV4StopO6stringAESS_tcfc":{"name":"init(string:)","parent_name":"Stop"},"Structs/ChatQuery/Stop.html#/s:6OpenAI9ChatQueryV4StopO10stringListAESaySSG_tcfc":{"name":"init(stringList:)","parent_name":"Stop"},"Structs/ChatQuery/ChatCompletionMessageParam/Role.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO4RoleO6systemyA2GmF":{"name":"system","parent_name":"Role"},"Structs/ChatQuery/ChatCompletionMessageParam/Role.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO4RoleO4useryA2GmF":{"name":"user","parent_name":"Role"},"Structs/ChatQuery/ChatCompletionMessageParam/Role.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO4RoleO9assistantyA2GmF":{"name":"assistant","parent_name":"Role"},"Structs/ChatQuery/ChatCompletionMessageParam/Role.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO4RoleO4toolyA2GmF":{"name":"tool","parent_name":"Role"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionToolMessageParam/CodingKeys.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4ToolfG0V10CodingKeysO7contentyA2ImF":{"name":"content","parent_name":"CodingKeys"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionToolMessageParam/CodingKeys.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4ToolfG0V10CodingKeysO4roleyA2ImF":{"name":"role","parent_name":"CodingKeys"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionToolMessageParam/CodingKeys.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4ToolfG0V10CodingKeysO10toolCallIdyA2ImF":{"name":"toolCallId","parent_name":"CodingKeys"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionToolMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4ToolfG0V4Rolea":{"name":"Role","parent_name":"ChatCompletionToolMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionToolMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4ToolfG0V7contentSSvp":{"name":"content","abstract":"The contents of the tool message.
","parent_name":"ChatCompletionToolMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionToolMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4ToolfG0V4roleAE4RoleOvp":{"name":"role","abstract":"The role of the messages author, in this case tool.
","parent_name":"ChatCompletionToolMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionToolMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4ToolfG0V10toolCallIdSSvp":{"name":"toolCallId","abstract":"Tool call that this message is responding to.
","parent_name":"ChatCompletionToolMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionToolMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4ToolfG0V7content10toolCallIdAGSS_SStcfc":{"name":"init(content:toolCallId:)","parent_name":"ChatCompletionToolMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionToolMessageParam/CodingKeys.html":{"name":"CodingKeys","parent_name":"ChatCompletionToolMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionAssistantMessageParam/ChatCompletionMessageToolCallParam/FunctionCall.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce9AssistantfG0V0cef8ToolCallG0V08FunctionJ0V9argumentsSSvp":{"name":"arguments","abstract":"The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.
","parent_name":"FunctionCall"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionAssistantMessageParam/ChatCompletionMessageToolCallParam/FunctionCall.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce9AssistantfG0V0cef8ToolCallG0V08FunctionJ0V4nameSSvp":{"name":"name","abstract":"The name of the function to call.
","parent_name":"FunctionCall"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionAssistantMessageParam/ChatCompletionMessageToolCallParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce9AssistantfG0V0cef8ToolCallG0V9ToolsTypea":{"name":"ToolsType","parent_name":"ChatCompletionMessageToolCallParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionAssistantMessageParam/ChatCompletionMessageToolCallParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce9AssistantfG0V0cef8ToolCallG0V2idSSvp":{"name":"id","abstract":"The ID of the tool call.
","parent_name":"ChatCompletionMessageToolCallParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionAssistantMessageParam/ChatCompletionMessageToolCallParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce9AssistantfG0V0cef8ToolCallG0V8functionAI08FunctionJ0Vvp":{"name":"function","abstract":"The function that the model called.
","parent_name":"ChatCompletionMessageToolCallParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionAssistantMessageParam/ChatCompletionMessageToolCallParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce9AssistantfG0V0cef8ToolCallG0V4typeAC0ceiG0V9ToolsTypeOvp":{"name":"type","abstract":"The type of the tool. Currently, only function
is supported.
/ The role of the messages author, in this case assistant.
","parent_name":"ChatCompletionAssistantMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionAssistantMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce9AssistantfG0V7contentSSSgvp":{"name":"content","abstract":"The contents of the assistant message. Required unless tool_calls is specified.
","parent_name":"ChatCompletionAssistantMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionAssistantMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce9AssistantfG0V4nameSSSgvp":{"name":"name","abstract":"The name of the author of this message. name
is required if role is function
, and it should be the name of the function whose response is in the content
. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.
The tool calls generated by the model, such as function calls.
","parent_name":"ChatCompletionAssistantMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionAssistantMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce9AssistantfG0V7content4name9toolCallsAGSSSg_AKSayAG0cef8ToolCallG0VGSgtcfc":{"name":"init(content:name:toolCalls:)","parent_name":"ChatCompletionAssistantMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionAssistantMessageParam/CodingKeys.html":{"name":"CodingKeys","parent_name":"ChatCompletionAssistantMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionAssistantMessageParam/ChatCompletionMessageToolCallParam.html":{"name":"ChatCompletionMessageToolCallParam","parent_name":"ChatCompletionAssistantMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam/CodingKeys.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei9PartImageG0V10CodingKeysO8imageUrlyA2OmF":{"name":"imageUrl","parent_name":"CodingKeys"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam/CodingKeys.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei9PartImageG0V10CodingKeysO4typeyA2OmF":{"name":"type","parent_name":"CodingKeys"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam/ImageURL/Detail.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei9PartImageG0V0L3URLV6DetailO4autoyA2QmF":{"name":"auto","parent_name":"Detail"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam/ImageURL/Detail.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei9PartImageG0V0L3URLV6DetailO3lowyA2QmF":{"name":"low","parent_name":"Detail"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam/ImageURL/Detail.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei9PartImageG0V0L3URLV6DetailO4highyA2QmF":{"name":"high","parent_name":"Detail"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam/ImageURL.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei9PartImageG0V0L3URLV3urlSSvp":{"name":"url","abstract":"Either a URL of the image or the base64 encoded image data.
","parent_name":"ImageURL"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam/ImageURL.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei9PartImageG0V0L3URLV6detailAO6DetailOvp":{"name":"detail","abstract":"Specifies the detail level of the image. Learn more in the","parent_name":"ImageURL"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam/ImageURL.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei9PartImageG0V0L3URLV3url6detailAOSS_AO6DetailOtcfc":{"name":"init(url:detail:)","parent_name":"ImageURL"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam/ImageURL.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei9PartImageG0V0L3URLV3url6detailAO10Foundation4DataV_AO6DetailOtcfc":{"name":"init(url:detail:)","parent_name":"ImageURL"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam/ImageURL/Detail.html":{"name":"Detail","parent_name":"ImageURL"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei9PartImageG0V8imageUrlAM0L3URLVvp":{"name":"imageUrl","parent_name":"ChatCompletionContentPartImageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei9PartImageG0V4typeSSvp":{"name":"type","abstract":"
The type of the content part.
","parent_name":"ChatCompletionContentPartImageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei9PartImageG0V8imageUrlA2M0L3URLV_tcfc":{"name":"init(imageUrl:)","parent_name":"ChatCompletionContentPartImageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam/ImageURL.html":{"name":"ImageURL","parent_name":"ChatCompletionContentPartImageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam/CodingKeys.html":{"name":"CodingKeys","parent_name":"ChatCompletionContentPartImageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartTextParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei8PartTextG0V4textSSvp":{"name":"text","abstract":"The text content.
","parent_name":"ChatCompletionContentPartTextParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartTextParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei8PartTextG0V4typeSSvp":{"name":"type","abstract":"The type of the content part.
","parent_name":"ChatCompletionContentPartTextParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartTextParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei8PartTextG0V4textAMSS_tcfc":{"name":"init(text:)","parent_name":"ChatCompletionContentPartTextParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O04chatei8PartTextG0yA2K0ceilmG0VcAKmF":{"name":"chatCompletionContentPartTextParam(_:)","parent_name":"VisionContent"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O04chatei9PartImageG0yA2K0ceilmG0VcAKmF":{"name":"chatCompletionContentPartImageParam(_:)","parent_name":"VisionContent"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O4textSSSgvp":{"name":"text","parent_name":"VisionContent"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O8imageUrlAK0cei9PartImageG0V0N3URLVSgvp":{"name":"imageUrl","parent_name":"VisionContent"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O04chatei8PartTextG0A2K0ceilmG0V_tcfc":{"name":"init(chatCompletionContentPartTextParam:)","parent_name":"VisionContent"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O04chatei9PartImageG0A2K0ceilmG0V_tcfc":{"name":"init(chatCompletionContentPartImageParam:)","parent_name":"VisionContent"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent.html#/s:SE6encode2toys7Encoder_p_tKF":{"name":"encode(to:)","parent_name":"VisionContent"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartTextParam.html":{"name":"ChatCompletionContentPartTextParam","parent_name":"VisionContent"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam.html":{"name":"ChatCompletionContentPartImageParam","parent_name":"VisionContent"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/CodingKeys.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO10CodingKeysO6stringyA2KmF":{"name":"string","parent_name":"CodingKeys"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/CodingKeys.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO10CodingKeysO6visionyA2KmF":{"name":"vision","parent_name":"CodingKeys"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO6stringyAISScAImF":{"name":"string(_:)","parent_name":"Content"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO6visionyAISayAI06VisionI0OGcAImF":{"name":"vision(_:)","parent_name":"Content"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO6stringSSSgvp":{"name":"string","parent_name":"Content"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO6stringAISS_tcfc":{"name":"init(string:)","parent_name":"Content"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO6visionAISayAI06VisionI0OG_tcfc":{"name":"init(vision:)","parent_name":"Content"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/CodingKeys.html":{"name":"CodingKeys","parent_name":"Content"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content.html#/s:SE6encode2toys7Encoder_p_tKF":{"name":"encode(to:)","parent_name":"Content"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent.html":{"name":"VisionContent","parent_name":"Content"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content.html#/s:Se4fromxs7Decoder_p_tKcfc":{"name":"init(from:)","parent_name":"Content"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V4Rolea":{"name":"Role","parent_name":"ChatCompletionUserMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7contentAG7ContentOvp":{"name":"content","abstract":"The contents of the user message.
","parent_name":"ChatCompletionUserMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V4roleAE4RoleOvp":{"name":"role","abstract":"The role of the messages author, in this case user.
","parent_name":"ChatCompletionUserMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V4nameSSSgvp":{"name":"name","abstract":"An optional name for the participant. Provides the model information to differentiate between participants of the same role.
","parent_name":"ChatCompletionUserMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7content4nameA2G7ContentO_SSSgtcfc":{"name":"init(content:name:)","parent_name":"ChatCompletionUserMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content.html":{"name":"Content","parent_name":"ChatCompletionUserMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionSystemMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce6SystemfG0V4Rolea":{"name":"Role","parent_name":"ChatCompletionSystemMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionSystemMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce6SystemfG0V7contentSSvp":{"name":"content","abstract":"The contents of the system message.
","parent_name":"ChatCompletionSystemMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionSystemMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce6SystemfG0V4roleAE4RoleOvp":{"name":"role","abstract":"The role of the messages author, in this case system.
","parent_name":"ChatCompletionSystemMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionSystemMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce6SystemfG0V4nameSSSgvp":{"name":"name","abstract":"An optional name for the participant. Provides the model information to differentiate between participants of the same role.
","parent_name":"ChatCompletionSystemMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionSystemMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce6SystemfG0V7content4nameAGSS_SSSgtcfc":{"name":"init(content:name:)","parent_name":"ChatCompletionSystemMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO6systemyA2E0ce6SystemfG0VcAEmF":{"name":"system(_:)","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO4useryA2E0ce4UserfG0VcAEmF":{"name":"user(_:)","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO9assistantyA2E0ce9AssistantfG0VcAEmF":{"name":"assistant(_:)","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO4toolyA2E0ce4ToolfG0VcAEmF":{"name":"tool(_:)","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO7contentAE0ce4UserfG0V7ContentOSgvp":{"name":"content","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO4roleAE4RoleOvp":{"name":"role","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO4nameSSSgvp":{"name":"name","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO10toolCallIdSSSgvp":{"name":"toolCallId","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO9toolCallsSayAE0ce9AssistantfG0V0cef8ToolCallG0VGSgvp":{"name":"toolCalls","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO4role7content4name9toolCalls0K6CallIdAESgAE4RoleO_SSSgANSayAE0ce9AssistantfG0V0cef4ToolmG0VGSgANtcfc":{"name":"init(role:content:name:toolCalls:toolCallId:)","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO4role7content4nameAESgAE4RoleO_SayAE0ce4UserfG0V7ContentO06VisionM0OGSSSgtcfc":{"name":"init(role:content:name:)","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam.html#/s:SE6encode2toys7Encoder_p_tKF":{"name":"encode(to:)","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionSystemMessageParam.html":{"name":"ChatCompletionSystemMessageParam","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam.html":{"name":"ChatCompletionUserMessageParam","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionAssistantMessageParam.html":{"name":"ChatCompletionAssistantMessageParam","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionToolMessageParam.html":{"name":"ChatCompletionToolMessageParam","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/Role.html":{"name":"Role","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO4fromAEs7Decoder_p_tKcfc":{"name":"init(from:)","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV8messagesSayAC0C22CompletionMessageParamOGvp":{"name":"messages","abstract":"A list of messages comprising the conversation so far
","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV5modelSSvp":{"name":"model","abstract":"ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV16frequencyPenaltySdSgvp":{"name":"frequencyPenalty","abstract":"
Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model’s likelihood to repeat the same line verbatim.","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV9logitBiasSDySSSiGSgvp":{"name":"logitBias","abstract":"
Modify the likelihood of specified tokens appearing in the completion.","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV8logprobsSbSgvp":{"name":"logprobs","abstract":"
Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message. This option is currently not available on the gpt-4-vision-preview model.","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV9maxTokensSiSgvp":{"name":"maxTokens","abstract":"
The maximum number of tokens to generate in the completion.","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV1nSiSgvp":{"name":"n","abstract":"
How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs.","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV15presencePenaltySdSgvp":{"name":"presencePenalty","abstract":"
Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model’s likelihood to talk about new topics.","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV14responseFormatAC08ResponseF0OSgvp":{"name":"responseFormat","abstract":"
An object specifying the format that the model must output. Compatible with gpt-4-1106-preview and gpt-3.5-turbo-1106.","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV4seedSiSgvp":{"name":"seed","abstract":"
This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.
","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV4stopAC4StopOSgvp":{"name":"stop","abstract":"Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV11temperatureSdSgvp":{"name":"temperature","abstract":"
What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV10toolChoiceAC0C33CompletionFunctionCallOptionParamOSgvp":{"name":"toolChoice","abstract":"
Controls which (if any) function is called by the model. none means the model will not call a function and instead generates a message. auto means the model can pick between generating a message or calling a function. Specifying a particular function via {“type”: “function”, “function”: {“name”: “my_function”}} forces the model to call that function.","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV5toolsSayAC0C19CompletionToolParamVGSgvp":{"name":"tools","abstract":"
A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for.
","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV11topLogprobsSiSgvp":{"name":"topLogprobs","abstract":"An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used.
","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV4topPSdSgvp":{"name":"topP","abstract":"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV4userSSSgvp":{"name":"user","abstract":"
A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV6streamSbvp":{"name":"stream","abstract":"
If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message.","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV8messages5model16frequencyPenalty9logitBias8logprobs9maxTokens1n08presenceH014responseFormat4seed4stop11temperature10toolChoice5tools11topLogprobs0W1P4user6streamACSayAC0C22CompletionMessageParamOG_SSSdSgSDySSSiGSgSbSgSiSgA1_AyC08ResponseP0OSgA1_AC4StopOSgAyC0C33CompletionFunctionCallOptionParamOSgSayAC0C19CompletionToolParamVGSgA1_AYSSSgSbtcfc":{"name":"init(messages:model:frequencyPenalty:logitBias:logprobs:maxTokens:n:presencePenalty:responseFormat:seed:stop:temperature:toolChoice:tools:topLogprobs:topP:user:stream:)","parent_name":"ChatQuery"},"Structs/ChatQuery/ChatCompletionMessageParam.html":{"name":"ChatCompletionMessageParam","parent_name":"ChatQuery"},"Structs/ChatQuery/Stop.html":{"name":"Stop","parent_name":"ChatQuery"},"Structs/ChatQuery/ResponseFormat.html":{"name":"ResponseFormat","parent_name":"ChatQuery"},"Structs/ChatQuery/ChatCompletionFunctionCallOptionParam.html":{"name":"ChatCompletionFunctionCallOptionParam","parent_name":"ChatQuery"},"Structs/ChatQuery/ChatCompletionToolParam.html":{"name":"ChatCompletionToolParam","parent_name":"ChatQuery"},"Structs/ChatQuery/CodingKeys.html":{"name":"CodingKeys","parent_name":"ChatQuery"},"Structs/AudioTranslationResult.html#/s:6OpenAI22AudioTranslationResultV4textSSvp":{"name":"text","abstract":"
The translated text.
","parent_name":"AudioTranslationResult"},"Structs/AudioTranslationQuery.html#/s:6OpenAI21AudioTranslationQueryV8FileTypea":{"name":"FileType","parent_name":"AudioTranslationQuery"},"Structs/AudioTranslationQuery.html#/s:6OpenAI21AudioTranslationQueryV14ResponseFormata":{"name":"ResponseFormat","parent_name":"AudioTranslationQuery"},"Structs/AudioTranslationQuery.html#/s:6OpenAI21AudioTranslationQueryV4file10Foundation4DataVvp":{"name":"file","abstract":"The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
","parent_name":"AudioTranslationQuery"},"Structs/AudioTranslationQuery.html#/s:6OpenAI21AudioTranslationQueryV8fileTypeAA0c13TranscriptionE0V04FileG0Ovp":{"name":"fileType","parent_name":"AudioTranslationQuery"},"Structs/AudioTranslationQuery.html#/s:6OpenAI21AudioTranslationQueryV5modelSSvp":{"name":"model","abstract":"ID of the model to use. Only whisper-1 is currently available.
","parent_name":"AudioTranslationQuery"},"Structs/AudioTranslationQuery.html#/s:6OpenAI21AudioTranslationQueryV14responseFormatAA0c13TranscriptionE0V08ResponseG0OSgvp":{"name":"responseFormat","abstract":"The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.","parent_name":"AudioTranslationQuery"},"Structs/AudioTranslationQuery.html#/s:6OpenAI21AudioTranslationQueryV6promptSSSgvp":{"name":"prompt","abstract":"
An optional text to guide the model’s style or continue a previous audio segment. The prompt should be in English.","parent_name":"AudioTranslationQuery"},"Structs/AudioTranslationQuery.html#/s:6OpenAI21AudioTranslationQueryV11temperatureSdSgvp":{"name":"temperature","abstract":"
The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.","parent_name":"AudioTranslationQuery"},"Structs/AudioTranslationQuery.html#/s:6OpenAI21AudioTranslationQueryV4file0F4Type5model6prompt11temperature14responseFormatAC10Foundation4DataV_AA0c13TranscriptionE0V04FileG0OS2SSgSdSgAN08ResponseL0OSgtcfc":{"name":"init(file:fileType:model:prompt:temperature:responseFormat:)","parent_name":"AudioTranslationQuery"},"Structs/AudioTranscriptionResult.html#/s:6OpenAI24AudioTranscriptionResultV4textSSvp":{"name":"text","abstract":"
The transcribed text.
","parent_name":"AudioTranscriptionResult"},"Structs/AudioTranscriptionQuery/FileType.html#/s:6OpenAI23AudioTranscriptionQueryV8FileTypeO4flacyA2EmF":{"name":"flac","parent_name":"FileType"},"Structs/AudioTranscriptionQuery/FileType.html#/s:6OpenAI23AudioTranscriptionQueryV8FileTypeO3mp3yA2EmF":{"name":"mp3","parent_name":"FileType"},"Structs/AudioTranscriptionQuery/FileType.html#/s:6OpenAI23AudioTranscriptionQueryV8FileTypeO4mpgayA2EmF":{"name":"mpga","parent_name":"FileType"},"Structs/AudioTranscriptionQuery/FileType.html#/s:6OpenAI23AudioTranscriptionQueryV8FileTypeO3mp4yA2EmF":{"name":"mp4","parent_name":"FileType"},"Structs/AudioTranscriptionQuery/FileType.html#/s:6OpenAI23AudioTranscriptionQueryV8FileTypeO3m4ayA2EmF":{"name":"m4a","parent_name":"FileType"},"Structs/AudioTranscriptionQuery/FileType.html#/s:6OpenAI23AudioTranscriptionQueryV8FileTypeO4mpegyA2EmF":{"name":"mpeg","parent_name":"FileType"},"Structs/AudioTranscriptionQuery/FileType.html#/s:6OpenAI23AudioTranscriptionQueryV8FileTypeO3oggyA2EmF":{"name":"ogg","parent_name":"FileType"},"Structs/AudioTranscriptionQuery/FileType.html#/s:6OpenAI23AudioTranscriptionQueryV8FileTypeO3wavyA2EmF":{"name":"wav","parent_name":"FileType"},"Structs/AudioTranscriptionQuery/FileType.html#/s:6OpenAI23AudioTranscriptionQueryV8FileTypeO4webmyA2EmF":{"name":"webm","parent_name":"FileType"},"Structs/AudioTranscriptionQuery/ResponseFormat.html#/s:6OpenAI23AudioTranscriptionQueryV14ResponseFormatO4jsonyA2EmF":{"name":"json","parent_name":"ResponseFormat"},"Structs/AudioTranscriptionQuery/ResponseFormat.html#/s:6OpenAI23AudioTranscriptionQueryV14ResponseFormatO4textyA2EmF":{"name":"text","parent_name":"ResponseFormat"},"Structs/AudioTranscriptionQuery/ResponseFormat.html#/s:6OpenAI23AudioTranscriptionQueryV14ResponseFormatO11verboseJsonyA2EmF":{"name":"verboseJson","parent_name":"ResponseFormat"},"Structs/AudioTranscriptionQuery/ResponseFormat.html#/s:6OpenAI23AudioTranscriptionQueryV14ResponseFormatO3srtyA2EmF":{"name":"srt","parent_name":"ResponseFormat"},"Structs/AudioTranscriptionQuery/ResponseFormat.html#/s:6OpenAI23AudioTranscriptionQueryV14ResponseFormatO3vttyA2EmF":{"name":"vtt","parent_name":"ResponseFormat"},"Structs/AudioTranscriptionQuery/ResponseFormat.html":{"name":"ResponseFormat","parent_name":"AudioTranscriptionQuery"},"Structs/AudioTranscriptionQuery.html#/s:6OpenAI23AudioTranscriptionQueryV4file10Foundation4DataVvp":{"name":"file","abstract":"The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
","parent_name":"AudioTranscriptionQuery"},"Structs/AudioTranscriptionQuery.html#/s:6OpenAI23AudioTranscriptionQueryV8fileTypeAC04FileG0Ovp":{"name":"fileType","parent_name":"AudioTranscriptionQuery"},"Structs/AudioTranscriptionQuery.html#/s:6OpenAI23AudioTranscriptionQueryV5modelSSvp":{"name":"model","abstract":"ID of the model to use. Only whisper-1 is currently available.
","parent_name":"AudioTranscriptionQuery"},"Structs/AudioTranscriptionQuery.html#/s:6OpenAI23AudioTranscriptionQueryV14responseFormatAC08ResponseG0OSgvp":{"name":"responseFormat","abstract":"The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.","parent_name":"AudioTranscriptionQuery"},"Structs/AudioTranscriptionQuery.html#/s:6OpenAI23AudioTranscriptionQueryV6promptSSSgvp":{"name":"prompt","abstract":"
An optional text to guide the model’s style or continue a previous audio segment. The prompt should match the audio language.
","parent_name":"AudioTranscriptionQuery"},"Structs/AudioTranscriptionQuery.html#/s:6OpenAI23AudioTranscriptionQueryV11temperatureSdSgvp":{"name":"temperature","abstract":"The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.","parent_name":"AudioTranscriptionQuery"},"Structs/AudioTranscriptionQuery.html#/s:6OpenAI23AudioTranscriptionQueryV8languageSSSgvp":{"name":"language","abstract":"
The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.","parent_name":"AudioTranscriptionQuery"},"Structs/AudioTranscriptionQuery.html#/s:6OpenAI23AudioTranscriptionQueryV4file0F4Type5model6prompt11temperature8language14responseFormatAC10Foundation4DataV_AC04FileG0OS2SSgSdSgApC08ResponseM0OSgtcfc":{"name":"init(file:fileType:model:prompt:temperature:language:responseFormat:)","parent_name":"AudioTranscriptionQuery"},"Structs/AudioTranscriptionQuery/FileType.html":{"name":"FileType","parent_name":"AudioTranscriptionQuery"},"Structs/AudioSpeechResult.html#/s:6OpenAI17AudioSpeechResultV5audio10Foundation4DataVvp":{"name":"audio","abstract":"
Audio data for one of the following formats :mp3
, opus
, aac
, flac
Encapsulates the voices available for audio generation.
","parent_name":"AudioSpeechQuery"},"Structs/AudioSpeechQuery/AudioSpeechResponseFormat.html":{"name":"AudioSpeechResponseFormat","abstract":"Encapsulates the response formats available for audio data.
","parent_name":"AudioSpeechQuery"},"Structs/AudioSpeechQuery.html#/s:6OpenAI16AudioSpeechQueryV5inputSSvp":{"name":"input","abstract":"The text to generate audio for. The maximum length is 4096 characters.
","parent_name":"AudioSpeechQuery"},"Structs/AudioSpeechQuery.html#/s:6OpenAI16AudioSpeechQueryV5modelSSvp":{"name":"model","abstract":"One of the available TTS models: tts-1 or tts-1-hd
","parent_name":"AudioSpeechQuery"},"Structs/AudioSpeechQuery.html#/s:6OpenAI16AudioSpeechQueryV5voiceAC0cD5VoiceOvp":{"name":"voice","abstract":"The voice to use when generating the audio. Supported voices are alloy, echo, fable, onyx, nova, and shimmer. Previews of the voices are available in the Text to speech guide.","parent_name":"AudioSpeechQuery"},"Structs/AudioSpeechQuery.html#/s:6OpenAI16AudioSpeechQueryV14responseFormatAC0cd8ResponseG0OSgvp":{"name":"responseFormat","abstract":"
The format to audio in. Supported formats are mp3, opus, aac, and flac.","parent_name":"AudioSpeechQuery"},"Structs/AudioSpeechQuery.html#/s:6OpenAI16AudioSpeechQueryV5speedSSSgvp":{"name":"speed","abstract":"
The speed of the generated audio. Select a value from 0.25 to 4.0. 1.0 is the default.","parent_name":"AudioSpeechQuery"},"Structs/AudioSpeechQuery/CodingKeys.html":{"name":"CodingKeys","parent_name":"AudioSpeechQuery"},"Structs/AudioSpeechQuery.html#/s:6OpenAI16AudioSpeechQueryV5model5input5voice14responseFormat5speedACSS_SSAC0cD5VoiceOAC0cd8ResponseJ0OSdSgtcfc":{"name":"init(model:input:voice:responseFormat:speed:)","parent_name":"AudioSpeechQuery"},"Structs/AudioSpeechQuery/Speed.html":{"name":"Speed","parent_name":"AudioSpeechQuery"},"Structs/AudioSpeechQuery.html#/s:6OpenAI16AudioSpeechQueryV09normalizeD5SpeedySSSdSgFZ":{"name":"normalizeSpeechSpeed(_:)","parent_name":"AudioSpeechQuery"},"Structs/APIErrorResponse.html#/s:6OpenAI16APIErrorResponseV5errorAA0C0Vvp":{"name":"error","parent_name":"APIErrorResponse"},"Structs/APIErrorResponse.html#/s:10Foundation14LocalizedErrorP16errorDescriptionSSSgvp":{"name":"errorDescription","parent_name":"APIErrorResponse"},"Structs/APIError.html#/s:6OpenAI8APIErrorV7messageSSvp":{"name":"message","parent_name":"APIError"},"Structs/APIError.html#/s:6OpenAI8APIErrorV4typeSSvp":{"name":"type","parent_name":"APIError"},"Structs/APIError.html#/s:6OpenAI8APIErrorV5paramSSSgvp":{"name":"param","parent_name":"APIError"},"Structs/APIError.html#/s:6OpenAI8APIErrorV4codeSSSgvp":{"name":"code","parent_name":"APIError"},"Structs/APIError.html#/s:6OpenAI8APIErrorV7message4type5param4codeACSS_S2SSgAHtcfc":{"name":"init(message:type:param:code:)","parent_name":"APIError"},"Structs/APIError.html#/s:Se4fromxs7Decoder_p_tKcfc":{"name":"init(from:)","parent_name":"APIError"},"Structs/APIError.html#/s:10Foundation14LocalizedErrorP16errorDescriptionSSSgvp":{"name":"errorDescription","parent_name":"APIError"},"Structs/APIError.html":{"name":"APIError"},"Structs/APIErrorResponse.html":{"name":"APIErrorResponse"},"Structs/AudioSpeechQuery.html":{"name":"AudioSpeechQuery","abstract":"
Generates audio from the input text."},"Structs/AudioSpeechResult.html":{"name":"AudioSpeechResult","abstract":"
The audio file content."},"Structs/AudioTranscriptionQuery.html":{"name":"AudioTranscriptionQuery"},"Structs/AudioTranscriptionResult.html":{"name":"AudioTranscriptionResult"},"Structs/AudioTranslationQuery.html":{"name":"AudioTranslationQuery","abstract":"
Translates audio into English.
"},"Structs/AudioTranslationResult.html":{"name":"AudioTranslationResult"},"Structs/ChatQuery.html":{"name":"ChatQuery","abstract":"Creates a model response for the given chat conversation"},"Structs/ChatResult.html":{"name":"ChatResult","abstract":"
https://platform.openai.com/docs/api-reference/chat/object"},"Structs/ChatStreamResult.html":{"name":"ChatStreamResult"},"Structs/CompletionsQuery.html":{"name":"CompletionsQuery"},"Structs/CompletionsResult.html":{"name":"CompletionsResult"},"Structs/EditsQuery.html":{"name":"EditsQuery"},"Structs/EditsResult.html":{"name":"EditsResult"},"Structs/EmbeddingsQuery.html":{"name":"EmbeddingsQuery"},"Structs/EmbeddingsResult.html":{"name":"EmbeddingsResult"},"Structs/ImageEditsQuery.html":{"name":"ImageEditsQuery"},"Structs/ImageVariationsQuery.html":{"name":"ImageVariationsQuery"},"Structs/ImagesQuery.html":{"name":"ImagesQuery","abstract":"
Given a prompt and/or an input image, the model will generate a new image."},"Structs/ImagesResult.html":{"name":"ImagesResult","abstract":"
Returns a list of image objects.
"},"Structs/ModelQuery.html":{"name":"ModelQuery","abstract":"Retrieves a model instance, providing basic information about the model such as the owner and permissioning.
"},"Structs/ModelResult.html":{"name":"ModelResult","abstract":"The model object matching the specified ID.
"},"Structs/ModelsResult.html":{"name":"ModelsResult","abstract":"A list of model objects.
"},"Structs/ModerationsQuery.html":{"name":"ModerationsQuery"},"Structs/ModerationsResult.html":{"name":"ModerationsResult"},"Structs/Vector.html":{"name":"Vector"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolP11completions5query10completionyAA16CompletionsQueryV_ys6ResultOyAA0gI0Vs5Error_pGctF":{"name":"completions(query:completion:)","abstract":"This function sends a completions query to the OpenAI API and retrieves generated completions in response. The Completions API enables you to build applications using OpenAI’s language models, like the powerful GPT-3.
","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolP17completionsStream5query8onResult10completionyAA16CompletionsQueryV_ys0H0OyAA0jH0Vs5Error_pGcysAN_pSgcSgtF":{"name":"completionsStream(query:onResult:completion:)","abstract":"This function sends a completions query to the OpenAI API and retrieves generated completions in response. The Completions API enables you to build applications using OpenAI’s language models, like the powerful GPT-3. The result is returned by chunks.
","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolP6images5query10completionyAA11ImagesQueryV_ys6ResultOyAA0gI0Vs5Error_pGctF":{"name":"images(query:completion:)","abstract":"This function sends an images query to the OpenAI API and retrieves generated images in response. The Images Generation API enables you to create various images or graphics using OpenAI’s powerful deep learning models.
","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolP10imageEdits5query10completionyAA05ImageE5QueryV_ys6ResultOyAA06ImagesJ0Vs5Error_pGctF":{"name":"imageEdits(query:completion:)","abstract":"This function sends an image edit query to the OpenAI API and retrieves generated images in response. The Images Edit API enables you to edit images or graphics using OpenAI’s powerful deep learning models.
","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolP15imageVariations5query10completionyAA05ImageE5QueryV_ys6ResultOyAA06ImagesJ0Vs5Error_pGctF":{"name":"imageVariations(query:completion:)","abstract":"This function sends an image variation query to the OpenAI API and retrieves generated images in response. The Images Variations API enables you to create a variation of a given image using OpenAI’s powerful deep learning models.
","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolP10embeddings5query10completionyAA15EmbeddingsQueryV_ys6ResultOyAA0gI0Vs5Error_pGctF":{"name":"embeddings(query:completion:)","abstract":"This function sends an embeddings query to the OpenAI API and retrieves embeddings in response. The Embeddings API enables you to generate high-dimensional vector representations of texts, which can be used for various natural language processing tasks such as semantic similarity, clustering, and classification.
","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolP5chats5query10completionyAA9ChatQueryV_ys6ResultOyAA0gI0Vs5Error_pGctF":{"name":"chats(query:completion:)","abstract":"This function sends a chat query to the OpenAI API and retrieves chat conversation responses. The Chat API enables you to build chatbots or conversational applications using OpenAI’s powerful natural language models, like GPT-3.
","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolP11chatsStream5query8onResult10completionyAA9ChatQueryV_ys0H0OyAA0jeH0Vs5Error_pGcysAN_pSgcSgtF":{"name":"chatsStream(query:onResult:completion:)","abstract":"This function sends a chat query to the OpenAI API and retrieves chat stream conversation responses. The Chat API enables you to build chatbots or conversational applications using OpenAI’s powerful natural language models, like GPT-3. The result is returned by chunks.
","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolP5edits5query10completionyAA10EditsQueryV_ys6ResultOyAA0gI0Vs5Error_pGctF":{"name":"edits(query:completion:)","abstract":"This function sends an edits query to the OpenAI API and retrieves an edited version of the prompt based on the instruction given.
","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolP5model5query10completionyAA10ModelQueryV_ys6ResultOyAA0gI0Vs5Error_pGctF":{"name":"model(query:completion:)","abstract":"This function sends a model query to the OpenAI API and retrieves a model instance, providing owner information. The Models API in this usage enables you to gather detailed information on the model in question, like GPT-3.
","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolP6models10completionyys6ResultOyAA06ModelsF0Vs5Error_pGc_tF":{"name":"models(completion:)","abstract":"This function sends a models query to the OpenAI API and retrieves a list of models. The Models API in this usage enables you to list all the available models.
","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolP11moderations5query10completionyAA16ModerationsQueryV_ys6ResultOyAA0gI0Vs5Error_pGctF":{"name":"moderations(query:completion:)","abstract":"This function sends a moderations query to the OpenAI API and retrieves a list of category results to classify how text may violate OpenAI’s Content Policy.
","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolP17audioCreateSpeech5query10completionyAA05AudioF5QueryV_ys6ResultOyAA0ifK0Vs5Error_pGctF":{"name":"audioCreateSpeech(query:completion:)","abstract":"This function sends an AudioSpeechQuery
to the OpenAI API to create audio speech from text using a specific voice and format.
Transcribes audio data using OpenAI’s audio transcription API and completes the operation asynchronously.
","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolP17audioTranslations5query10completionyAA21AudioTranslationQueryV_ys6ResultOyAA0hiK0Vs5Error_pGctF":{"name":"audioTranslations(query:completion:)","abstract":"Translates audio data using OpenAI’s audio translation API and completes the operation asynchronously.
","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE11completions5queryAA17CompletionsResultVAA0F5QueryV_tYaKF":{"name":"completions(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE17completionsStream5queryScsyAA17CompletionsResultVs5Error_pGAA0G5QueryV_tF":{"name":"completionsStream(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE6images5queryAA12ImagesResultVAA0F5QueryV_tYaKF":{"name":"images(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE10imageEdits5queryAA12ImagesResultVAA05ImageE5QueryV_tYaKF":{"name":"imageEdits(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE15imageVariations5queryAA12ImagesResultVAA05ImageE5QueryV_tYaKF":{"name":"imageVariations(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE10embeddings5queryAA16EmbeddingsResultVAA0F5QueryV_tYaKF":{"name":"embeddings(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE5chats5queryAA10ChatResultVAA0F5QueryV_tYaKF":{"name":"chats(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE11chatsStream5queryScsyAA04ChatE6ResultVs5Error_pGAA0G5QueryV_tF":{"name":"chatsStream(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE5edits5queryAA11EditsResultVAA0F5QueryV_tYaKF":{"name":"edits(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE5model5queryAA11ModelResultVAA0F5QueryV_tYaKF":{"name":"model(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE6modelsAA12ModelsResultVyYaKF":{"name":"models()","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE11moderations5queryAA17ModerationsResultVAA0F5QueryV_tYaKF":{"name":"moderations(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE17audioCreateSpeech5queryAA05AudioF6ResultVAA0hF5QueryV_tYaKF":{"name":"audioCreateSpeech(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE19audioTranscriptions5queryAA24AudioTranscriptionResultVAA0gH5QueryV_tYaKF":{"name":"audioTranscriptions(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE17audioTranslations5queryAA22AudioTranslationResultVAA0gH5QueryV_tYaKF":{"name":"audioTranslations(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE11completions5query7Combine12AnyPublisherVyAA17CompletionsResultVs5Error_pGAA0I5QueryV_tF":{"name":"completions(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE17completionsStream5query7Combine12AnyPublisherVys6ResultOyAA011CompletionsJ0Vs5Error_pGsAM_pGAA0K5QueryV_tF":{"name":"completionsStream(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE6images5query7Combine12AnyPublisherVyAA12ImagesResultVs5Error_pGAA0I5QueryV_tF":{"name":"images(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE10imageEdits5query7Combine12AnyPublisherVyAA12ImagesResultVs5Error_pGAA05ImageE5QueryV_tF":{"name":"imageEdits(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE15imageVariations5query7Combine12AnyPublisherVyAA12ImagesResultVs5Error_pGAA05ImageE5QueryV_tF":{"name":"imageVariations(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE10embeddings5query7Combine12AnyPublisherVyAA16EmbeddingsResultVs5Error_pGAA0I5QueryV_tF":{"name":"embeddings(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE5chats5query7Combine12AnyPublisherVyAA10ChatResultVs5Error_pGAA0I5QueryV_tF":{"name":"chats(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE11chatsStream5query7Combine12AnyPublisherVys6ResultOyAA04ChateJ0Vs5Error_pGsAM_pGAA0K5QueryV_tF":{"name":"chatsStream(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE5edits5query7Combine12AnyPublisherVyAA11EditsResultVs5Error_pGAA0I5QueryV_tF":{"name":"edits(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE5model5query7Combine12AnyPublisherVyAA11ModelResultVs5Error_pGAA0I5QueryV_tF":{"name":"model(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE6models7Combine12AnyPublisherVyAA12ModelsResultVs5Error_pGyF":{"name":"models()","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE11moderations5query7Combine12AnyPublisherVyAA17ModerationsResultVs5Error_pGAA0I5QueryV_tF":{"name":"moderations(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE17audioCreateSpeech5query7Combine12AnyPublisherVyAA05AudioF6ResultVs5Error_pGAA0kF5QueryV_tF":{"name":"audioCreateSpeech(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE19audioTranscriptions5query7Combine12AnyPublisherVyAA24AudioTranscriptionResultVs5Error_pGAA0jK5QueryV_tF":{"name":"audioTranscriptions(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE17audioTranslations5query7Combine12AnyPublisherVyAA22AudioTranslationResultVs5Error_pGAA0jK5QueryV_tF":{"name":"audioTranslations(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html":{"name":"OpenAIProtocol"},"Extensions/Model.html#/s:SS6OpenAIE6gpt4_oSSvpZ":{"name":"gpt4_o","abstract":"gpt-4o
, currently the most advanced, multimodal flagship model that’s cheaper and faster than GPT-4 Turbo.
gpt-4o-mini
, currently the most affordable and intelligent model for fast and lightweight requests.
gpt-4-turbo
, The latest GPT-4 Turbo model with vision capabilities. Vision requests can now use JSON mode and function calling and more. Context window: 128,000 tokens
gpt-4-turbo
, gpt-4 model with improved instruction following, JSON mode, reproducible outputs, parallel function calling and more. Maximum of 4096 output tokens
gpt-4-vision-preview
, able to understand images, in addition to all other GPT-4 Turbo capabilities.
Snapshot of gpt-4-turbo-preview
from January 25th 2024. This model reduces cases of “laziness” where the model doesn’t complete a task. Also fixes the bug impacting non-English UTF-8 generations. Maximum of 4096 output tokens
Snapshot of gpt-4-turbo-preview
from November 6th 2023. Improved instruction following, JSON mode, reproducible outputs, parallel function calling and more. Maximum of 4096 output tokens
Most capable gpt-4
model, outperforms any GPT-3.5 model, able to do more complex tasks, and optimized for chat.
Snapshot of gpt-4
from June 13th 2023 with function calling data. Unlike gpt-4
, this model will not receive updates, and will be deprecated 3 months after a new version is released.
Snapshot of gpt-4
from March 14th 2023. Unlike gpt-4, this model will not receive updates, and will only be supported for a three month period ending on June 14th 2023.
Same capabilities as the base gpt-4
model but with 4x the context length. Will be updated with our latest model iteration.
Snapshot of gpt-4-32k
from June 13th 2023. Unlike gpt-4-32k
, this model will not receive updates, and will be deprecated 3 months after a new version is released.
Snapshot of gpt-4-32k
from March 14th 2023. Unlike gpt-4-32k
, this model will not receive updates, and will only be supported for a three month period ending on June 14th 2023.
Most capable gpt-3.5-turbo
model and optimized for chat. Will be updated with our latest model iteration.
Snapshot of gpt-3.5-turbo
from January 25th 2024. Decreased prices by 50%. Various improvements including higher accuracy at responding in requested formats and a fix for a bug which caused a text encoding issue for non-English language function calls.
Snapshot of gpt-3.5-turbo
from November 6th 2023. The latest gpt-3.5-turbo
model with improved instruction following, JSON mode, reproducible outputs, parallel function calling and more.
Snapshot of gpt-3.5-turbo
from June 13th 2023 with function calling data. Unlike gpt-3.5-turbo
, this model will not receive updates, and will be deprecated 3 months after a new version is released.
Snapshot of gpt-3.5-turbo
from March 1st 2023. Unlike gpt-3.5-turbo
, this model will not receive updates, and will only be supported for a three month period ending on June 1st 2023.
Same capabilities as the standard gpt-3.5-turbo
model but with 4 times the context.
Snapshot of gpt-3.5-turbo-16k
from June 13th 2023. Unlike gpt-3.5-turbo-16k
, this model will not receive updates, and will be deprecated 3 months after a new version is released.
Can do any language task with better quality, longer output, and consistent instruction-following than the curie, babbage, or ada models. Also supports inserting completions within text.
","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE15textDavinci_002SSvpZ":{"name":"textDavinci_002","abstract":"Similar capabilities to text-davinci-003 but trained with supervised fine-tuning instead of reinforcement learning.
","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE9textCurieSSvpZ":{"name":"textCurie","abstract":"Very capable, faster and lower cost than Davinci.
","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE11textBabbageSSvpZ":{"name":"textBabbage","abstract":"Capable of straightforward tasks, very fast, and lower cost.
","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE7textAdaSSvpZ":{"name":"textAda","abstract":"Capable of very simple tasks, usually the fastest model in the GPT-3 series, and lowest cost.
","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE15textDavinci_001SSvpZ":{"name":"textDavinci_001","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE19codeDavinciEdit_001SSvpZ":{"name":"codeDavinciEdit_001","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE5tts_1SSvpZ":{"name":"tts_1","abstract":"The latest text to speech model, optimized for speed.
","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE8tts_1_hdSSvpZ":{"name":"tts_1_hd","abstract":"The latest text to speech model, optimized for quality.
","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE9whisper_1SSvpZ":{"name":"whisper_1","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE8dall_e_2SSvpZ":{"name":"dall_e_2","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE8dall_e_3SSvpZ":{"name":"dall_e_3","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE7davinciSSvpZ":{"name":"davinci","abstract":"Most capable GPT-3 model. Can do any task the other models can do, often with higher quality.
","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE5curieSSvpZ":{"name":"curie","abstract":"Very capable, but faster and lower cost than Davinci.
","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE7babbageSSvpZ":{"name":"babbage","abstract":"Capable of straightforward tasks, very fast, and lower cost.
","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE3adaSSvpZ":{"name":"ada","abstract":"Capable of very simple tasks, usually the fastest model in the GPT-3 series, and lowest cost.
","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE16textEmbeddingAdaSSvpZ":{"name":"textEmbeddingAda","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE13textSearchAdaSSvpZ":{"name":"textSearchAda","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE20textSearchBabbageDocSSvpZ":{"name":"textSearchBabbageDoc","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE25textSearchBabbageQuery001SSvpZ":{"name":"textSearchBabbageQuery001","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE14textEmbedding3SSvpZ":{"name":"textEmbedding3","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE19textEmbedding3LargeSSvpZ":{"name":"textEmbedding3Large","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE20textModerationStableSSvpZ":{"name":"textModerationStable","abstract":"Almost as capable as the latest model, but slightly older.
","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE20textModerationLatestSSvpZ":{"name":"textModerationLatest","abstract":"Most capable moderation model. Accuracy will be slightly higher than the stable model.
","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE10moderationSSvpZ":{"name":"moderation","parent_name":"Model"},"Extensions/Model.html":{"name":"Model"},"Enums/OpenAIError.html#/s:6OpenAI0A7AIErrorO9emptyDatayA2CmF":{"name":"emptyData","parent_name":"OpenAIError"},"Enums/OpenAIError.html":{"name":"OpenAIError"},"Classes/OpenAI/Configuration.html#/s:6OpenAIAAC13ConfigurationV5tokenSSvp":{"name":"token","abstract":"OpenAI API token. See https://platform.openai.com/docs/api-reference/authentication
","parent_name":"Configuration"},"Classes/OpenAI/Configuration.html#/s:6OpenAIAAC13ConfigurationV22organizationIdentifierSSSgvp":{"name":"organizationIdentifier","abstract":"Optional OpenAI organization identifier. See https://platform.openai.com/docs/api-reference/authentication
","parent_name":"Configuration"},"Classes/OpenAI/Configuration.html#/s:6OpenAIAAC13ConfigurationV4hostSSvp":{"name":"host","abstract":"API host. Set this property if you use some kind of proxy or your own server. Default is api.openai.com
","parent_name":"Configuration"},"Classes/OpenAI/Configuration.html#/s:6OpenAIAAC13ConfigurationV4portSivp":{"name":"port","parent_name":"Configuration"},"Classes/OpenAI/Configuration.html#/s:6OpenAIAAC13ConfigurationV6schemeSSvp":{"name":"scheme","parent_name":"Configuration"},"Classes/OpenAI/Configuration.html#/s:6OpenAIAAC13ConfigurationV15timeoutIntervalSdvp":{"name":"timeoutInterval","abstract":"Default request timeout
","parent_name":"Configuration"},"Classes/OpenAI/Configuration.html#/s:6OpenAIAAC13ConfigurationV5token22organizationIdentifier4host4port6scheme15timeoutIntervalADSS_SSSgSSSiSSSdtcfc":{"name":"init(token:organizationIdentifier:host:port:scheme:timeoutInterval:)","parent_name":"Configuration"},"Classes/OpenAI/Configuration.html":{"name":"Configuration","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAIAAC13configurationAB13ConfigurationVvp":{"name":"configuration","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAIAAC8apiTokenABSS_tcfc":{"name":"init(apiToken:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAIAAC13configurationA2B13ConfigurationV_tcfc":{"name":"init(configuration:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAIAAC13configuration7sessionA2B13ConfigurationV_So12NSURLSessionCtcfc":{"name":"init(configuration:session:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP11completions5query10completionyAA16CompletionsQueryV_ys6ResultOyAA0gI0Vs5Error_pGctF":{"name":"completions(query:completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP17completionsStream5query8onResult10completionyAA16CompletionsQueryV_ys0H0OyAA0jH0Vs5Error_pGcysAN_pSgcSgtF":{"name":"completionsStream(query:onResult:completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP6images5query10completionyAA11ImagesQueryV_ys6ResultOyAA0gI0Vs5Error_pGctF":{"name":"images(query:completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP10imageEdits5query10completionyAA05ImageE5QueryV_ys6ResultOyAA06ImagesJ0Vs5Error_pGctF":{"name":"imageEdits(query:completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP15imageVariations5query10completionyAA05ImageE5QueryV_ys6ResultOyAA06ImagesJ0Vs5Error_pGctF":{"name":"imageVariations(query:completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP10embeddings5query10completionyAA15EmbeddingsQueryV_ys6ResultOyAA0gI0Vs5Error_pGctF":{"name":"embeddings(query:completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP5chats5query10completionyAA9ChatQueryV_ys6ResultOyAA0gI0Vs5Error_pGctF":{"name":"chats(query:completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP11chatsStream5query8onResult10completionyAA9ChatQueryV_ys0H0OyAA0jeH0Vs5Error_pGcysAN_pSgcSgtF":{"name":"chatsStream(query:onResult:completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP5edits5query10completionyAA10EditsQueryV_ys6ResultOyAA0gI0Vs5Error_pGctF":{"name":"edits(query:completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP5model5query10completionyAA10ModelQueryV_ys6ResultOyAA0gI0Vs5Error_pGctF":{"name":"model(query:completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP6models10completionyys6ResultOyAA06ModelsF0Vs5Error_pGc_tF":{"name":"models(completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP11moderations5query10completionyAA16ModerationsQueryV_ys6ResultOyAA0gI0Vs5Error_pGctF":{"name":"moderations(query:completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP19audioTranscriptions5query10completionyAA23AudioTranscriptionQueryV_ys6ResultOyAA0hiK0Vs5Error_pGctF":{"name":"audioTranscriptions(query:completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP17audioTranslations5query10completionyAA21AudioTranslationQueryV_ys6ResultOyAA0hiK0Vs5Error_pGctF":{"name":"audioTranslations(query:completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP17audioCreateSpeech5query10completionyAA05AudioF5QueryV_ys6ResultOyAA0ifK0Vs5Error_pGctF":{"name":"audioCreateSpeech(query:completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html":{"name":"OpenAI"},"Classes.html":{"name":"Classes","abstract":"The following classes are available globally.
"},"Enums.html":{"name":"Enumerations","abstract":"The following enumerations are available globally.
"},"Extensions.html":{"name":"Extensions","abstract":"The following extensions are available globally.
"},"Protocols.html":{"name":"Protocols","abstract":"The following protocols are available globally.
"},"Structs.html":{"name":"Structures","abstract":"The following structures are available globally.
"},"Typealiases.html":{"name":"Type Aliases","abstract":"The following type aliases are available globally.
"}} \ No newline at end of file diff --git a/docs/docsets/.docset/Contents/Resources/Documents/undocumented.json b/docs/docsets/.docset/Contents/Resources/Documents/undocumented.json new file mode 100644 index 00000000..8b7f1785 --- /dev/null +++ b/docs/docsets/.docset/Contents/Resources/Documents/undocumented.json @@ -0,0 +1,6 @@ +{ + "warnings": [ + + ], + "source_directory": "/Users/dingxiancao/OpenAI" +} \ No newline at end of file diff --git a/docs/docsets/.docset/Contents/Resources/docSet.dsidx b/docs/docsets/.docset/Contents/Resources/docSet.dsidx new file mode 100644 index 00000000..ee954744 Binary files /dev/null and b/docs/docsets/.docset/Contents/Resources/docSet.dsidx differ diff --git a/docs/docsets/.tgz b/docs/docsets/.tgz new file mode 100644 index 00000000..64fe7e1b Binary files /dev/null and b/docs/docsets/.tgz differ diff --git a/docs/img/carat.png b/docs/img/carat.png new file mode 100755 index 00000000..29d2f7fd Binary files /dev/null and b/docs/img/carat.png differ diff --git a/docs/img/dash.png b/docs/img/dash.png new file mode 100755 index 00000000..6f694c7a Binary files /dev/null and b/docs/img/dash.png differ diff --git a/docs/img/spinner.gif b/docs/img/spinner.gif new file mode 100644 index 00000000..e3038d0a Binary files /dev/null and b/docs/img/spinner.gif differ diff --git a/docs/index.html b/docs/index.html new file mode 100644 index 00000000..38545af2 --- /dev/null +++ b/docs/index.html @@ -0,0 +1,1280 @@ + + + +Docs (100% documented)
+This repository contains Swift community-maintained implementation over OpenAI public API.
+ +OpenAI is a non-profit artificial intelligence research organization founded in San Francisco, California in 2015. It was created with the purpose of advancing digital intelligence in ways that benefit humanity as a whole and promote societal progress. The organization strives to develop AI (Artificial Intelligence) programs and systems that can think, act and adapt quickly on their own – autonomously. OpenAI’s mission is to ensure safe and responsible use of AI for civic good, economic growth and other public benefits; this includes cutting-edge research into important topics such as general AI safety, natural language processing, applied reinforcement learning methods, machine vision algorithms etc.
+ +++The OpenAI API can be applied to virtually any task that involves understanding or generating natural language or code. We offer a spectrum of models with different levels of power suitable for different tasks, as well as the ability to fine-tune your own custom models. These models can be used for everything from content generation to semantic search and classification.
+
OpenAI is available with Swift Package Manager. +The Swift Package Manager is a tool for automating the distribution of Swift code and is integrated into the swift compiler. +Once you have your Swift package set up, adding OpenAI as a dependency is as easy as adding it to the dependencies value of your Package.swift.
+dependencies: [
+ .package(url: "https://github.com/MacPaw/OpenAI.git", branch: "main")
+]
+
+To initialize API instance you need to obtain API token from your Open AI organization.
+ +Remember that your API key is a secret! Do not share it with others or expose it in any client-side code (browsers, apps). Production requests must be routed through your own backend server where your API key can be securely loaded from an environment variable or key management service.
+ + + +Once you have a token, you can initialize OpenAI
class, which is an entry point to the API.
++⚠️ OpenAI strongly recommends developers of client-side applications proxy requests through a separate backend service to keep their API key safe. API keys can access and manipulate customer billing, usage, and organizational data, so it’s a significant risk to expose them.
+
let openAI = OpenAI(apiToken: "YOUR_TOKEN_HERE")
+
+
+Optionally you can initialize OpenAI
with token, organization identifier and timeoutInterval.
let configuration = OpenAI.Configuration(token: "YOUR_TOKEN_HERE", organizationIdentifier: "YOUR_ORGANIZATION_ID_HERE", timeoutInterval: 60.0)
+let openAI = OpenAI(configuration: configuration)
+
+
+Once token you posses the token, and the instance is initialized you are ready to make requests.
+Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position.
+ +Request
+struct CompletionsQuery: Codable {
+ /// ID of the model to use.
+ public let model: Model
+ /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
+ public let prompt: String
+ /// What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
+ public let temperature: Double?
+ /// The maximum number of tokens to generate in the completion.
+ public let maxTokens: Int?
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ public let topP: Double?
+ /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
+ public let frequencyPenalty: Double?
+ /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
+ public let presencePenalty: Double?
+ /// Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
+ public let stop: [String]?
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
+ public let user: String?
+}
+
+
+Response
+struct CompletionsResult: Codable, Equatable {
+ public struct Choice: Codable, Equatable {
+ public let text: String
+ public let index: Int
+ }
+
+ public let id: String
+ public let object: String
+ public let created: TimeInterval
+ public let model: Model
+ public let choices: [Choice]
+ public let usage: Usage
+}
+
+
+Example
+let query = CompletionsQuery(model: .textDavinci_003, prompt: "What is 42?", temperature: 0, maxTokens: 100, topP: 1, frequencyPenalty: 0, presencePenalty: 0, stop: ["\\n"])
+openAI.completions(query: query) { result in
+ //Handle result here
+}
+//or
+let result = try await openAI.completions(query: query)
+
+(lldb) po result
+▿ CompletionsResult
+ - id : "cmpl-6P9be2p2fQlwB7zTOl0NxCOetGmX3"
+ - object : "text_completion"
+ - created : 1671453146.0
+ - model : OpenAI.Model.textDavinci_003
+ ▿ choices : 1 element
+ ▿ 0 : Choice
+ - text : "\n\n42 is the answer to the ultimate question of life, the universe, and everything, according to the book The Hitchhiker\'s Guide to the Galaxy."
+ - index : 0
+
+Completions streaming is available by using completionsStream
function. Tokens will be sent one-by-one.
Closures
+openAI.completionsStream(query: query) { partialResult in
+ switch partialResult {
+ case .success(let result):
+ print(result.choices)
+ case .failure(let error):
+ //Handle chunk error here
+ }
+} completion: { error in
+ //Handle streaming error here
+}
+
+
+Combine
+openAI
+ .completionsStream(query: query)
+ .sink { completion in
+ //Handle completion result here
+ } receiveValue: { result in
+ //Handle chunk here
+ }.store(in: &cancellables)
+
+
+Structured concurrency
+for try await result in openAI.completionsStream(query: query) {
+ //Handle result here
+}
+
+
+Review Completions Documentation for more info.
+Using the OpenAI Chat API, you can build your own applications with gpt-3.5-turbo
to do things like:
Request
+ struct ChatQuery: Codable {
+ /// ID of the model to use. Currently, only gpt-3.5-turbo and gpt-3.5-turbo-0301 are supported.
+ public let model: Model
+ /// The messages to generate chat completions for
+ public let messages: [Chat]
+ /// A list of functions the model may generate JSON inputs for.
+ public let functions: [ChatFunctionDeclaration]?
+ /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and We generally recommend altering this or top_p but not both.
+ public let temperature: Double?
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ public let topP: Double?
+ /// How many chat completion choices to generate for each input message.
+ public let n: Int?
+ /// Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
+ public let stop: [String]?
+ /// The maximum number of tokens to generate in the completion.
+ public let maxTokens: Int?
+ /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
+ public let presencePenalty: Double?
+ /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
+ public let frequencyPenalty: Double?
+ ///Modify the likelihood of specified tokens appearing in the completion.
+ public let logitBias: [String:Int]?
+ /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
+ public let user: String?
+}
+
+
+Response
+struct ChatResult: Codable, Equatable {
+ public struct Choice: Codable, Equatable {
+ public let index: Int
+ public let message: Chat
+ public let finishReason: String
+ }
+
+ public struct Usage: Codable, Equatable {
+ public let promptTokens: Int
+ public let completionTokens: Int
+ public let totalTokens: Int
+ }
+
+ public let id: String
+ public let object: String
+ public let created: TimeInterval
+ public let model: Model
+ public let choices: [Choice]
+ public let usage: Usage
+}
+
+
+Example
+let query = ChatQuery(model: .gpt3_5Turbo, messages: [.init(role: .user, content: "who are you")])
+let result = try await openAI.chats(query: query)
+
+(lldb) po result
+▿ ChatResult
+ - id : "chatcmpl-6pwjgxGV2iPP4QGdyOLXnTY0LE3F8"
+ - object : "chat.completion"
+ - created : 1677838528.0
+ - model : "gpt-3.5-turbo-0301"
+ ▿ choices : 1 element
+ ▿ 0 : Choice
+ - index : 0
+ ▿ message : Chat
+ - role : "assistant"
+ - content : "\n\nI\'m an AI language model developed by OpenAI, created to provide assistance and support for various tasks such as answering questions, generating text, and providing recommendations. Nice to meet you!"
+ - finish_reason : "stop"
+ ▿ usage : Usage
+ - prompt_tokens : 10
+ - completion_tokens : 39
+ - total_tokens : 49
+
+Chats streaming is available by using chatStream
function. Tokens will be sent one-by-one.
Closures
+openAI.chatsStream(query: query) { partialResult in
+ switch partialResult {
+ case .success(let result):
+ print(result.choices)
+ case .failure(let error):
+ //Handle chunk error here
+ }
+} completion: { error in
+ //Handle streaming error here
+}
+
+
+Combine
+openAI
+ .chatsStream(query: query)
+ .sink { completion in
+ //Handle completion result here
+ } receiveValue: { result in
+ //Handle chunk here
+ }.store(in: &cancellables)
+
+
+Structured concurrency
+for try await result in openAI.chatsStream(query: query) {
+ //Handle result here
+}
+
+
+Function calls
+let openAI = OpenAI(apiToken: "...")
+// Declare functions which GPT-3 might decide to call.
+let functions = [
+ ChatFunctionDeclaration(
+ name: "get_current_weather",
+ description: "Get the current weather in a given location",
+ parameters:
+ JSONSchema(
+ type: .object,
+ properties: [
+ "location": .init(type: .string, description: "The city and state, e.g. San Francisco, CA"),
+ "unit": .init(type: .string, enumValues: ["celsius", "fahrenheit"])
+ ],
+ required: ["location"]
+ )
+ )
+]
+let query = ChatQuery(
+ model: "gpt-3.5-turbo-0613", // 0613 is the earliest version with function calls support.
+ messages: [
+ Chat(role: .user, content: "What's the weather like in Boston?")
+ ],
+ functions: functions
+)
+let result = try await openAI.chats(query: query)
+
+
+Result will be (serialized as JSON here for readability):
+{
+ "id": "chatcmpl-1234",
+ "object": "chat.completion",
+ "created": 1686000000,
+ "model": "gpt-3.5-turbo-0613",
+ "choices": [
+ {
+ "index": 0,
+ "message": {
+ "role": "assistant",
+ "function_call": {
+ "name": "get_current_weather",
+ "arguments": "{\n \"location\": \"Boston, MA\"\n}"
+ }
+ },
+ "finish_reason": "function_call"
+ }
+ ],
+ "usage": { "total_tokens": 100, "completion_tokens": 18, "prompt_tokens": 82 }
+}
+
+
+
+Review Chat Documentation for more info.
+Given a prompt and/or an input image, the model will generate a new image.
+ +As Artificial Intelligence continues to develop, so too does the intriguing concept of Dall-E. Developed by OpenAI, a research lab for artificial intelligence purposes, Dall-E has been classified as an AI system that can generate images based on descriptions provided by humans. With its potential applications spanning from animation and illustration to design and engineering - not to mention the endless possibilities in between - it’s easy to see why there is such excitement over this new technology.
+Request
+struct ImagesQuery: Codable {
+ /// A text description of the desired image(s). The maximum length is 1000 characters.
+ public let prompt: String
+ /// The number of images to generate. Must be between 1 and 10.
+ public let n: Int?
+ /// The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.
+ public let size: String?
+}
+
+
+Response
+struct ImagesResult: Codable, Equatable {
+ public struct URLResult: Codable, Equatable {
+ public let url: String
+ }
+ public let created: TimeInterval
+ public let data: [URLResult]
+}
+
+
+Example
+let query = ImagesQuery(prompt: "White cat with heterochromia sitting on the kitchen table", n: 1, size: "1024x1024")
+openAI.images(query: query) { result in
+ //Handle result here
+}
+//or
+let result = try await openAI.images(query: query)
+
+(lldb) po result
+▿ ImagesResult
+ - created : 1671453505.0
+ ▿ data : 1 element
+ ▿ 0 : URLResult
+ - url : "https://oaidalleapiprodscus.blob.core.windows.net/private/org-CWjU5cDIzgCcVjq10pp5yX5Q/user-GoBXgChvLBqLHdBiMJBUbPqF/img-WZVUK2dOD4HKbKwW1NeMJHBd.png?st=2022-12-19T11%3A38%3A25Z&se=2022-12-19T13%3A38%3A25Z&sp=r&sv=2021-08-06&sr=b&rscd=inline&rsct=image/png&skoid=6aaadede-4fb3-4698-a8f6-684d7786b067&sktid=a48cca56-e6da-484e-a814-9c849652bcb3&skt=2022-12-19T09%3A35%3A16Z&ske=2022-12-20T09%3A35%3A16Z&sks=b&skv=2021-08-06&sig=mh52rmtbQ8CXArv5bMaU6lhgZHFBZz/ePr4y%2BJwLKOc%3D"
+
+
+Generated image
+ + +Creates an edited or extended image given an original image and a prompt.
+ +Request
+public struct ImageEditsQuery: Codable {
+ /// The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
+ public let image: Data
+ public let fileName: String
+ /// An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where image should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as image.
+ public let mask: Data?
+ public let maskFileName: String?
+ /// A text description of the desired image(s). The maximum length is 1000 characters.
+ public let prompt: String
+ /// The number of images to generate. Must be between 1 and 10.
+ public let n: Int?
+ /// The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.
+ public let size: String?
+}
+
+
+Response
+ +Uses the ImagesResult response similarly to ImagesQuery.
+ +Example
+let data = image.pngData()
+let query = ImageEditQuery(image: data, fileName: "whitecat.png", prompt: "White cat with heterochromia sitting on the kitchen table with a bowl of food", n: 1, size: "1024x1024")
+openAI.imageEdits(query: query) { result in
+ //Handle result here
+}
+//or
+let result = try await openAI.imageEdits(query: query)
+
+Creates a variation of a given image.
+ +Request
+public struct ImageVariationsQuery: Codable {
+ /// The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
+ public let image: Data
+ public let fileName: String
+ /// The number of images to generate. Must be between 1 and 10.
+ public let n: Int?
+ /// The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.
+ public let size: String?
+}
+
+
+Response
+ +Uses the ImagesResult response similarly to ImagesQuery.
+ +Example
+let data = image.pngData()
+let query = ImageVariationQuery(image: data, fileName: "whitecat.png", n: 1, size: "1024x1024")
+openAI.imageVariations(query: query) { result in
+ //Handle result here
+}
+//or
+let result = try await openAI.imageVariations(query: query)
+
+
+Review Images Documentation for more info.
+The speech to text API provides two endpoints, transcriptions and translations, based on our state-of-the-art open source large-v2 Whisper model. They can be used to:
+ +Transcribe audio into whatever language the audio is in. +Translate and transcribe the audio into english. +File uploads are currently limited to 25 MB and the following input file types are supported: mp3, mp4, mpeg, mpga, m4a, wav, and webm.
+This function sends an AudioSpeechQuery
to the OpenAI API to create audio speech from text using a specific voice and format.
Learn more about voices.
+Learn more about models.
Request:
+public struct AudioSpeechQuery: Codable, Equatable {
+ //...
+ public let model: Model // tts-1 or tts-1-hd
+ public let input: String
+ public let voice: AudioSpeechVoice
+ public let responseFormat: AudioSpeechResponseFormat
+ public let speed: String? // Initializes with Double?
+ //...
+}
+
+
+Response:
+/// Audio data for one of the following formats :`mp3`, `opus`, `aac`, `flac`
+public let audioData: Data?
+
+
+Example:
+let query = AudioSpeechQuery(model: .tts_1, input: "Hello, world!", voice: .alloy, responseFormat: .mp3, speed: 1.0)
+
+openAI.audioCreateSpeech(query: query) { result in
+ // Handle response here
+}
+//or
+let result = try await openAI.audioCreateSpeech(query: query)
+
+
+OpenAI Create Speech – Documentation
+Transcribes audio into the input language.
+ +Request
+public struct AudioTranscriptionQuery: Codable, Equatable {
+
+ public let file: Data
+ public let fileName: String
+ public let model: Model
+
+ public let prompt: String?
+ public let temperature: Double?
+ public let language: String?
+}
+
+
+Response
+public struct AudioTranscriptionResult: Codable, Equatable {
+
+ public let text: String
+}
+
+
+Example
+let data = Data(contentsOfURL:...)
+let query = AudioTranscriptionQuery(file: data, fileName: "audio.m4a", model: .whisper_1)
+
+openAI.audioTranscriptions(query: query) { result in
+ //Handle result here
+}
+//or
+let result = try await openAI.audioTranscriptions(query: query)
+
+Translates audio into into English.
+ +Request
+public struct AudioTranslationQuery: Codable, Equatable {
+
+ public let file: Data
+ public let fileName: String
+ public let model: Model
+
+ public let prompt: String?
+ public let temperature: Double?
+}
+
+
+Response
+public struct AudioTranslationResult: Codable, Equatable {
+
+ public let text: String
+}
+
+
+Example
+let data = Data(contentsOfURL:...)
+let query = AudioTranslationQuery(file: data, fileName: "audio.m4a", model: .whisper_1)
+
+openAI.audioTranslations(query: query) { result in
+ //Handle result here
+}
+//or
+let result = try await openAI.audioTranslations(query: query)
+
+
+Review Audio Documentation for more info.
+Creates a new edit for the provided input, instruction, and parameters.
+ +Request
+struct EditsQuery: Codable {
+ /// ID of the model to use.
+ public let model: Model
+ /// Input text to get embeddings for.
+ public let input: String?
+ /// The instruction that tells the model how to edit the prompt.
+ public let instruction: String
+ /// The number of images to generate. Must be between 1 and 10.
+ public let n: Int?
+ /// What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
+ public let temperature: Double?
+ /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
+ public let topP: Double?
+}
+
+
+Response
+struct EditsResult: Codable, Equatable {
+
+ public struct Choice: Codable, Equatable {
+ public let text: String
+ public let index: Int
+ }
+
+ public struct Usage: Codable, Equatable {
+ public let promptTokens: Int
+ public let completionTokens: Int
+ public let totalTokens: Int
+
+ enum CodingKeys: String, CodingKey {
+ case promptTokens = "prompt_tokens"
+ case completionTokens = "completion_tokens"
+ case totalTokens = "total_tokens"
+ }
+ }
+
+ public let object: String
+ public let created: TimeInterval
+ public let choices: [Choice]
+ public let usage: Usage
+}
+
+
+Example
+let query = EditsQuery(model: .gpt4, input: "What day of the wek is it?", instruction: "Fix the spelling mistakes")
+openAI.edits(query: query) { result in
+ //Handle response here
+}
+//or
+let result = try await openAI.edits(query: query)
+
+
+Review Edits Documentation for more info.
+Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms.
+ +Request
+struct EmbeddingsQuery: Codable {
+ /// ID of the model to use.
+ public let model: Model
+ /// Input text to get embeddings for
+ public let input: String
+}
+
+
+Response
+struct EmbeddingsResult: Codable, Equatable {
+
+ public struct Embedding: Codable, Equatable {
+
+ public let object: String
+ public let embedding: [Double]
+ public let index: Int
+ }
+ public let data: [Embedding]
+ public let usage: Usage
+}
+
+
+Example
+let query = EmbeddingsQuery(model: .textSearchBabbageDoc, input: "The food was delicious and the waiter...")
+openAI.embeddings(query: query) { result in
+ //Handle response here
+}
+//or
+let result = try await openAI.embeddings(query: query)
+
+(lldb) po result
+▿ EmbeddingsResult
+ ▿ data : 1 element
+ ▿ 0 : Embedding
+ - object : "embedding"
+ ▿ embedding : 2048 elements
+ - 0 : 0.0010535449
+ - 1 : 0.024234328
+ - 2 : -0.0084999
+ - 3 : 0.008647452
+ .......
+ - 2044 : 0.017536353
+ - 2045 : -0.005897616
+ - 2046 : -0.026559394
+ - 2047 : -0.016633155
+ - index : 0
+
+(lldb)
+
+
+Review Embeddings Documentation for more info.
+Models are represented as a typealias typealias Model = String
.
public extension Model {
+ static let gpt4_turbo_preview = "gpt-4-turbo-preview"
+ static let gpt4_vision_preview = "gpt-4-vision-preview"
+ static let gpt4_0125_preview = "gpt-4-0125-preview"
+ static let gpt4_1106_preview = "gpt-4-1106-preview"
+ static let gpt4 = "gpt-4"
+ static let gpt4_0613 = "gpt-4-0613"
+ static let gpt4_0314 = "gpt-4-0314"
+ static let gpt4_32k = "gpt-4-32k"
+ static let gpt4_32k_0613 = "gpt-4-32k-0613"
+ static let gpt4_32k_0314 = "gpt-4-32k-0314"
+
+ static let gpt3_5Turbo = "gpt-3.5-turbo"
+ static let gpt3_5Turbo_0125 = "gpt-3.5-turbo-0125"
+ static let gpt3_5Turbo_1106 = "gpt-3.5-turbo-1106"
+ static let gpt3_5Turbo_0613 = "gpt-3.5-turbo-0613"
+ static let gpt3_5Turbo_0301 = "gpt-3.5-turbo-0301"
+ static let gpt3_5Turbo_16k = "gpt-3.5-turbo-16k"
+ static let gpt3_5Turbo_16k_0613 = "gpt-3.5-turbo-16k-0613"
+
+ static let textDavinci_003 = "text-davinci-003"
+ static let textDavinci_002 = "text-davinci-002"
+ static let textCurie = "text-curie-001"
+ static let textBabbage = "text-babbage-001"
+ static let textAda = "text-ada-001"
+
+ static let textDavinci_001 = "text-davinci-001"
+ static let codeDavinciEdit_001 = "code-davinci-edit-001"
+
+ static let tts_1 = "tts-1"
+ static let tts_1_hd = "tts-1-hd"
+
+ static let whisper_1 = "whisper-1"
+
+ static let dall_e_2 = "dall-e-2"
+ static let dall_e_3 = "dall-e-3"
+
+ static let davinci = "davinci"
+ static let curie = "curie"
+ static let babbage = "babbage"
+ static let ada = "ada"
+
+ static let textEmbeddingAda = "text-embedding-ada-002"
+ static let textSearchAda = "text-search-ada-doc-001"
+ static let textSearchBabbageDoc = "text-search-babbage-doc-001"
+ static let textSearchBabbageQuery001 = "text-search-babbage-query-001"
+ static let textEmbedding3 = "text-embedding-3-small"
+ static let textEmbedding3Large = "text-embedding-3-large"
+
+ static let textModerationStable = "text-moderation-stable"
+ static let textModerationLatest = "text-moderation-latest"
+ static let moderation = "text-moderation-007"
+}
+
+
+GPT-4 models are supported.
+ +As an example: To use the gpt-4-turbo-preview
model, pass .gpt4_turbo_preview
as the parameter to the ChatQuery
init.
let query = ChatQuery(model: .gpt4_turbo_preview, messages: [
+ .init(role: .system, content: "You are Librarian-GPT. You know everything about the books."),
+ .init(role: .user, content: "Who wrote Harry Potter?")
+])
+let result = try await openAI.chats(query: query)
+XCTAssertFalse(result.choices.isEmpty)
+
+
+You can also pass a custom string if you need to use some model, that is not represented above.
+Lists the currently available models.
+ +Response
+public struct ModelsResult: Codable, Equatable {
+
+ public let data: [ModelResult]
+ public let object: String
+}
+
+
+
+Example
+openAI.models() { result in
+ //Handle result here
+}
+//or
+let result = try await openAI.models()
+
+Retrieves a model instance, providing ownership information.
+ +Request
+public struct ModelQuery: Codable, Equatable {
+
+ public let model: Model
+}
+
+
+Response
+public struct ModelResult: Codable, Equatable {
+
+ public let id: Model
+ public let object: String
+ public let ownedBy: String
+}
+
+
+Example
+let query = ModelQuery(model: .gpt4)
+openAI.model(query: query) { result in
+ //Handle result here
+}
+//or
+let result = try await openAI.model(query: query)
+
+
+Review Models Documentation for more info.
+Given a input text, outputs if the model classifies it as violating OpenAI’s content policy.
+ +Request
+public struct ModerationsQuery: Codable {
+
+ public let input: String
+ public let model: Model?
+}
+
+
+Response
+public struct ModerationsResult: Codable, Equatable {
+
+ public let id: String
+ public let model: Model
+ public let results: [CategoryResult]
+}
+
+
+Example
+let query = ModerationsQuery(input: "I want to kill them.")
+openAI.moderations(query: query) { result in
+ //Handle result here
+}
+//or
+let result = try await openAI.moderations(query: query)
+
+
+Review Moderations Documentation for more info.
+The component comes with several handy utility functions to work with the vectors.
+public struct Vector {
+
+ /// Returns the similarity between two vectors
+ ///
+ /// - Parameters:
+ /// - a: The first vector
+ /// - b: The second vector
+ public static func cosineSimilarity(a: [Double], b: [Double]) -> Double {
+ return dot(a, b) / (mag(a) * mag(b))
+ }
+
+ /// Returns the difference between two vectors. Cosine distance is defined as `1 - cosineSimilarity(a, b)`
+ ///
+ /// - Parameters:
+ /// - a: The first vector
+ /// - b: The second vector
+ public func cosineDifference(a: [Double], b: [Double]) -> Double {
+ return 1 - Self.cosineSimilarity(a: a, b: b)
+ }
+}
+
+
+Example
+let vector1 = [0.213123, 0.3214124, 0.421412, 0.3214521251, 0.412412, 0.3214124, 0.1414124, 0.3214521251, 0.213123, 0.3214124, 0.1414124, 0.4214214, 0.213123, 0.3214124, 0.1414124, 0.3214521251, 0.213123, 0.3214124, 0.1414124, 0.3214521251]
+let vector2 = [0.213123, 0.3214124, 0.1414124, 0.3214521251, 0.213123, 0.3214124, 0.1414124, 0.3214521251, 0.213123, 0.511515, 0.1414124, 0.3214521251, 0.213123, 0.3214124, 0.1414124, 0.3214521251, 0.213123, 0.3214124, 0.1414124, 0.3213213]
+let similarity = Vector.cosineSimilarity(a: vector1, b: vector2)
+print(similarity) //0.9510201910206734
+
+
+++ + + +In data analysis, cosine similarity is a measure of similarity between two sequences of numbers.
+
Read more about Cosine Similarity here.
+The library contains built-in Combine extensions.
+func completions(query: CompletionsQuery) -> AnyPublisher<CompletionsResult, Error>
+func images(query: ImagesQuery) -> AnyPublisher<ImagesResult, Error>
+func embeddings(query: EmbeddingsQuery) -> AnyPublisher<EmbeddingsResult, Error>
+func chats(query: ChatQuery) -> AnyPublisher<ChatResult, Error>
+func edits(query: EditsQuery) -> AnyPublisher<EditsResult, Error>
+func model(query: ModelQuery) -> AnyPublisher<ModelResult, Error>
+func models() -> AnyPublisher<ModelsResult, Error>
+func moderations(query: ModerationsQuery) -> AnyPublisher<ModerationsResult, Error>
+func audioTranscriptions(query: AudioTranscriptionQuery) -> AnyPublisher<AudioTranscriptionResult, Error>
+func audioTranslations(query: AudioTranslationQuery) -> AnyPublisher<AudioTranslationResult, Error>
+
+You can find example iOS application in Demo folder.
+ + +Make your Pull Requests clear and obvious to anyone viewing them.
+Set main
as your target branch.
Feat: ...
for new features and new functionality implementations.Bug: ...
for bug fixes.Fix: ...
for minor issues fixing, like typos or inaccuracies in code.Chore: ...
for boring stuff like code polishing, refactoring, deprecation fixing etc.PR naming example: Feat: Add Threads API handling
or Bug: Fix message result duplication
Branch naming example: feat/add-threads-API-handling
or bug/fix-message-result-duplication
…
+ +…
+ +…
+ +…
+ +We’ll appreciate you including tests to your code if it is needed and possible. ❤️
+MIT License
+
+Copyright (c) 2023 MacPaw Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+ Defines all available OpenAI models supported by the library.
"},"Structs/Vector.html#/s:6OpenAI6VectorV16cosineSimilarity1a1bSdSaySdG_AGtFZ":{"name":"cosineSimilarity(a:b:)","abstract":"Returns the similarity between two vectors
","parent_name":"Vector"},"Structs/Vector.html#/s:6OpenAI6VectorV16cosineDifference1a1bSdSaySdG_AGtF":{"name":"cosineDifference(a:b:)","abstract":"Returns the difference between two vectors. Cosine distance is defined as 1 - cosineSimilarity(a, b)
Content that expresses, incites, or promotes harassing language towards any target.
","parent_name":"CategoryScores"},"Structs/ModerationsResult/Moderation/CategoryScores.html#/s:6OpenAI17ModerationsResultV10ModerationV14CategoryScoresV21harassmentThreateningSdvp":{"name":"harassmentThreatening","abstract":"Harassment content that also includes violence or serious harm towards any target.
","parent_name":"CategoryScores"},"Structs/ModerationsResult/Moderation/CategoryScores.html#/s:6OpenAI17ModerationsResultV10ModerationV14CategoryScoresV4hateSdvp":{"name":"hate","abstract":"Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste.
","parent_name":"CategoryScores"},"Structs/ModerationsResult/Moderation/CategoryScores.html#/s:6OpenAI17ModerationsResultV10ModerationV14CategoryScoresV15hateThreateningSdvp":{"name":"hateThreatening","abstract":"Hateful content that also includes violence or serious harm towards the targeted group.
","parent_name":"CategoryScores"},"Structs/ModerationsResult/Moderation/CategoryScores.html#/s:6OpenAI17ModerationsResultV10ModerationV14CategoryScoresV8selfHarmSdvp":{"name":"selfHarm","abstract":"Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders.
","parent_name":"CategoryScores"},"Structs/ModerationsResult/Moderation/CategoryScores.html#/s:6OpenAI17ModerationsResultV10ModerationV14CategoryScoresV14selfHarmIntentSdvp":{"name":"selfHarmIntent","abstract":"Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders.
","parent_name":"CategoryScores"},"Structs/ModerationsResult/Moderation/CategoryScores.html#/s:6OpenAI17ModerationsResultV10ModerationV14CategoryScoresV20selfHarmInstructionsSdvp":{"name":"selfHarmInstructions","abstract":"Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts.
","parent_name":"CategoryScores"},"Structs/ModerationsResult/Moderation/CategoryScores.html#/s:6OpenAI17ModerationsResultV10ModerationV14CategoryScoresV6sexualSdvp":{"name":"sexual","abstract":"Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness).
","parent_name":"CategoryScores"},"Structs/ModerationsResult/Moderation/CategoryScores.html#/s:6OpenAI17ModerationsResultV10ModerationV14CategoryScoresV12sexualMinorsSdvp":{"name":"sexualMinors","abstract":"Sexual content that includes an individual who is under 18 years old.
","parent_name":"CategoryScores"},"Structs/ModerationsResult/Moderation/CategoryScores.html#/s:6OpenAI17ModerationsResultV10ModerationV14CategoryScoresV8violenceSdvp":{"name":"violence","abstract":"Content that promotes or glorifies violence or celebrates the suffering or humiliation of others.
","parent_name":"CategoryScores"},"Structs/ModerationsResult/Moderation/CategoryScores.html#/s:6OpenAI17ModerationsResultV10ModerationV14CategoryScoresV15violenceGraphicSdvp":{"name":"violenceGraphic","abstract":"Violent content that depicts death, violence, or serious physical injury in extreme graphic detail.
","parent_name":"CategoryScores"},"Structs/ModerationsResult/Moderation/CategoryScores/CodingKeys.html":{"name":"CodingKeys","parent_name":"CategoryScores"},"Structs/ModerationsResult/Moderation/CategoryScores.html#/s:ST12makeIterator0B0QzyF":{"name":"makeIterator()","parent_name":"CategoryScores"},"Structs/ModerationsResult/Moderation/Categories/CodingKeys.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV10CodingKeysO10harassmentyA2ImF":{"name":"harassment","parent_name":"CodingKeys"},"Structs/ModerationsResult/Moderation/Categories/CodingKeys.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV10CodingKeysO21harassmentThreateningyA2ImF":{"name":"harassmentThreatening","parent_name":"CodingKeys"},"Structs/ModerationsResult/Moderation/Categories/CodingKeys.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV10CodingKeysO4hateyA2ImF":{"name":"hate","parent_name":"CodingKeys"},"Structs/ModerationsResult/Moderation/Categories/CodingKeys.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV10CodingKeysO15hateThreateningyA2ImF":{"name":"hateThreatening","parent_name":"CodingKeys"},"Structs/ModerationsResult/Moderation/Categories/CodingKeys.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV10CodingKeysO8selfHarmyA2ImF":{"name":"selfHarm","parent_name":"CodingKeys"},"Structs/ModerationsResult/Moderation/Categories/CodingKeys.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV10CodingKeysO14selfHarmIntentyA2ImF":{"name":"selfHarmIntent","parent_name":"CodingKeys"},"Structs/ModerationsResult/Moderation/Categories/CodingKeys.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV10CodingKeysO20selfHarmInstructionsyA2ImF":{"name":"selfHarmInstructions","parent_name":"CodingKeys"},"Structs/ModerationsResult/Moderation/Categories/CodingKeys.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV10CodingKeysO6sexualyA2ImF":{"name":"sexual","parent_name":"CodingKeys"},"Structs/ModerationsResult/Moderation/Categories/CodingKeys.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV10CodingKeysO12sexualMinorsyA2ImF":{"name":"sexualMinors","parent_name":"CodingKeys"},"Structs/ModerationsResult/Moderation/Categories/CodingKeys.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV10CodingKeysO8violenceyA2ImF":{"name":"violence","parent_name":"CodingKeys"},"Structs/ModerationsResult/Moderation/Categories/CodingKeys.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV10CodingKeysO15violenceGraphicyA2ImF":{"name":"violenceGraphic","parent_name":"CodingKeys"},"Structs/ModerationsResult/Moderation/Categories.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV10harassmentSbvp":{"name":"harassment","abstract":"Content that expresses, incites, or promotes harassing language towards any target.
","parent_name":"Categories"},"Structs/ModerationsResult/Moderation/Categories.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV21harassmentThreateningSbvp":{"name":"harassmentThreatening","abstract":"Harassment content that also includes violence or serious harm towards any target.
","parent_name":"Categories"},"Structs/ModerationsResult/Moderation/Categories.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV4hateSbvp":{"name":"hate","abstract":"Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste.
","parent_name":"Categories"},"Structs/ModerationsResult/Moderation/Categories.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV15hateThreateningSbvp":{"name":"hateThreatening","abstract":"Hateful content that also includes violence or serious harm towards the targeted group.
","parent_name":"Categories"},"Structs/ModerationsResult/Moderation/Categories.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV8selfHarmSbvp":{"name":"selfHarm","abstract":"Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders.
","parent_name":"Categories"},"Structs/ModerationsResult/Moderation/Categories.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV14selfHarmIntentSbvp":{"name":"selfHarmIntent","abstract":"Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders.
","parent_name":"Categories"},"Structs/ModerationsResult/Moderation/Categories.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV20selfHarmInstructionsSbvp":{"name":"selfHarmInstructions","abstract":"Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts.
","parent_name":"Categories"},"Structs/ModerationsResult/Moderation/Categories.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV6sexualSbvp":{"name":"sexual","abstract":"Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness).
","parent_name":"Categories"},"Structs/ModerationsResult/Moderation/Categories.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV12sexualMinorsSbvp":{"name":"sexualMinors","abstract":"Sexual content that includes an individual who is under 18 years old.
","parent_name":"Categories"},"Structs/ModerationsResult/Moderation/Categories.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV8violenceSbvp":{"name":"violence","abstract":"Content that promotes or glorifies violence or celebrates the suffering or humiliation of others.
","parent_name":"Categories"},"Structs/ModerationsResult/Moderation/Categories.html#/s:6OpenAI17ModerationsResultV10ModerationV10CategoriesV15violenceGraphicSbvp":{"name":"violenceGraphic","abstract":"Violent content that depicts death, violence, or serious physical injury in extreme graphic detail.
","parent_name":"Categories"},"Structs/ModerationsResult/Moderation/Categories/CodingKeys.html":{"name":"CodingKeys","parent_name":"Categories"},"Structs/ModerationsResult/Moderation/Categories.html#/s:ST12makeIterator0B0QzyF":{"name":"makeIterator()","parent_name":"Categories"},"Structs/ModerationsResult/Moderation/Categories.html":{"name":"Categories","parent_name":"Moderation"},"Structs/ModerationsResult/Moderation/CategoryScores.html":{"name":"CategoryScores","parent_name":"Moderation"},"Structs/ModerationsResult/Moderation.html#/s:6OpenAI17ModerationsResultV10ModerationV10categoriesAE10CategoriesVvp":{"name":"categories","abstract":"Collection of per-category binary usage policies violation flags. For each category, the value is true if the model flags the corresponding category as violated, false otherwise.
","parent_name":"Moderation"},"Structs/ModerationsResult/Moderation.html#/s:6OpenAI17ModerationsResultV10ModerationV14categoryScoresAE08CategoryG0Vvp":{"name":"categoryScores","abstract":"Collection of per-category raw scores output by the model, denoting the model’s confidence that the input violates the OpenAI’s policy for the category. The value is between 0 and 1, where higher values denote higher confidence. The scores should not be interpreted as probabilities.
","parent_name":"Moderation"},"Structs/ModerationsResult/Moderation.html#/s:6OpenAI17ModerationsResultV10ModerationV7flaggedSbvp":{"name":"flagged","abstract":"True if the model classifies the content as violating OpenAI’s usage policies, false otherwise.
","parent_name":"Moderation"},"Structs/ModerationsResult/Moderation.html":{"name":"Moderation","parent_name":"ModerationsResult"},"Structs/ModerationsResult.html#/s:s12IdentifiableP2id2IDQzvp":{"name":"id","parent_name":"ModerationsResult"},"Structs/ModerationsResult.html#/s:6OpenAI17ModerationsResultV5modelSSvp":{"name":"model","parent_name":"ModerationsResult"},"Structs/ModerationsResult.html#/s:6OpenAI17ModerationsResultV7resultsSayAC10ModerationVGvp":{"name":"results","parent_name":"ModerationsResult"},"Structs/ModerationsQuery.html#/s:6OpenAI16ModerationsQueryV5inputSSvp":{"name":"input","abstract":"The input text to classify.
","parent_name":"ModerationsQuery"},"Structs/ModerationsQuery.html#/s:6OpenAI16ModerationsQueryV5modelSSSgvp":{"name":"model","abstract":"ID of the model to use.
","parent_name":"ModerationsQuery"},"Structs/ModerationsQuery.html#/s:6OpenAI16ModerationsQueryV5input5modelACSS_SSSgtcfc":{"name":"init(input:model:)","parent_name":"ModerationsQuery"},"Structs/ModelsResult.html#/s:6OpenAI12ModelsResultV4dataSayAA05ModelD0VGvp":{"name":"data","abstract":"A list of model objects.
","parent_name":"ModelsResult"},"Structs/ModelsResult.html#/s:6OpenAI12ModelsResultV6objectSSvp":{"name":"object","abstract":"The object type, which is always list
The model identifier, which can be referenced in the API endpoints.
","parent_name":"ModelResult"},"Structs/ModelResult.html#/s:6OpenAI11ModelResultV7createdSdvp":{"name":"created","abstract":"The Unix timestamp (in seconds) when the model was created.
","parent_name":"ModelResult"},"Structs/ModelResult.html#/s:6OpenAI11ModelResultV6objectSSvp":{"name":"object","abstract":"The object type, which is always “model”.
","parent_name":"ModelResult"},"Structs/ModelResult.html#/s:6OpenAI11ModelResultV7ownedBySSvp":{"name":"ownedBy","abstract":"The organization that owns the model.
","parent_name":"ModelResult"},"Structs/ModelResult/CodingKeys.html":{"name":"CodingKeys","parent_name":"ModelResult"},"Structs/ModelQuery.html#/s:6OpenAI10ModelQueryV5modelSSvp":{"name":"model","abstract":"The ID of the model to use for this request.
","parent_name":"ModelQuery"},"Structs/ModelQuery.html#/s:6OpenAI10ModelQueryV5modelACSS_tcfc":{"name":"init(model:)","parent_name":"ModelQuery"},"Structs/ImagesResult/Image/CodingKeys.html#/s:6OpenAI12ImagesResultV5ImageV10CodingKeysO7b64JsonyA2GmF":{"name":"b64Json","parent_name":"CodingKeys"},"Structs/ImagesResult/Image/CodingKeys.html#/s:6OpenAI12ImagesResultV5ImageV10CodingKeysO13revisedPromptyA2GmF":{"name":"revisedPrompt","parent_name":"CodingKeys"},"Structs/ImagesResult/Image/CodingKeys.html#/s:6OpenAI12ImagesResultV5ImageV10CodingKeysO3urlyA2GmF":{"name":"url","parent_name":"CodingKeys"},"Structs/ImagesResult/Image.html#/s:6OpenAI12ImagesResultV5ImageV7b64JsonSSSgvp":{"name":"b64Json","abstract":"The base64-encoded JSON of the generated image, if response_format is b64_json
","parent_name":"Image"},"Structs/ImagesResult/Image.html#/s:6OpenAI12ImagesResultV5ImageV13revisedPromptSSSgvp":{"name":"revisedPrompt","abstract":"The prompt that was used to generate the image, if there was any revision to the prompt.
","parent_name":"Image"},"Structs/ImagesResult/Image.html#/s:6OpenAI12ImagesResultV5ImageV3urlSSSgvp":{"name":"url","abstract":"The URL of the generated image, if response_format is url (default).
","parent_name":"Image"},"Structs/ImagesResult/Image/CodingKeys.html":{"name":"CodingKeys","parent_name":"Image"},"Structs/ImagesResult.html#/s:6OpenAI12ImagesResultV7createdSdvp":{"name":"created","parent_name":"ImagesResult"},"Structs/ImagesResult.html#/s:6OpenAI12ImagesResultV4dataSayAC5ImageVGvp":{"name":"data","parent_name":"ImagesResult"},"Structs/ImagesResult/Image.html":{"name":"Image","abstract":"Represents the url or the content of an image generated by the OpenAI API.
","parent_name":"ImagesResult"},"Structs/ImagesQuery/Size.html#/s:6OpenAI11ImagesQueryV4SizeO4_256yA2EmF":{"name":"_256","parent_name":"Size"},"Structs/ImagesQuery/Size.html#/s:6OpenAI11ImagesQueryV4SizeO4_512yA2EmF":{"name":"_512","parent_name":"Size"},"Structs/ImagesQuery/Size.html#/s:6OpenAI11ImagesQueryV4SizeO5_1024yA2EmF":{"name":"_1024","parent_name":"Size"},"Structs/ImagesQuery/Size.html#/s:6OpenAI11ImagesQueryV4SizeO10_1792_1024yA2EmF":{"name":"_1792_1024","parent_name":"Size"},"Structs/ImagesQuery/Size.html#/s:6OpenAI11ImagesQueryV4SizeO10_1024_1792yA2EmF":{"name":"_1024_1792","parent_name":"Size"},"Structs/ImagesQuery/Quality.html#/s:6OpenAI11ImagesQueryV7QualityO8standardyA2EmF":{"name":"standard","parent_name":"Quality"},"Structs/ImagesQuery/Quality.html#/s:6OpenAI11ImagesQueryV7QualityO2hdyA2EmF":{"name":"hd","parent_name":"Quality"},"Structs/ImagesQuery/Style.html#/s:6OpenAI11ImagesQueryV5StyleO7naturalyA2EmF":{"name":"natural","parent_name":"Style"},"Structs/ImagesQuery/Style.html#/s:6OpenAI11ImagesQueryV5StyleO5vividyA2EmF":{"name":"vivid","parent_name":"Style"},"Structs/ImagesQuery/CodingKeys.html#/s:6OpenAI11ImagesQueryV10CodingKeysO5modelyA2EmF":{"name":"model","parent_name":"CodingKeys"},"Structs/ImagesQuery/CodingKeys.html#/s:6OpenAI11ImagesQueryV10CodingKeysO6promptyA2EmF":{"name":"prompt","parent_name":"CodingKeys"},"Structs/ImagesQuery/CodingKeys.html#/s:6OpenAI11ImagesQueryV10CodingKeysO1nyA2EmF":{"name":"n","parent_name":"CodingKeys"},"Structs/ImagesQuery/CodingKeys.html#/s:6OpenAI11ImagesQueryV10CodingKeysO4sizeyA2EmF":{"name":"size","parent_name":"CodingKeys"},"Structs/ImagesQuery/CodingKeys.html#/s:6OpenAI11ImagesQueryV10CodingKeysO4useryA2EmF":{"name":"user","parent_name":"CodingKeys"},"Structs/ImagesQuery/CodingKeys.html#/s:6OpenAI11ImagesQueryV10CodingKeysO5styleyA2EmF":{"name":"style","parent_name":"CodingKeys"},"Structs/ImagesQuery/CodingKeys.html#/s:6OpenAI11ImagesQueryV10CodingKeysO14responseFormatyA2EmF":{"name":"responseFormat","parent_name":"CodingKeys"},"Structs/ImagesQuery/CodingKeys.html#/s:6OpenAI11ImagesQueryV10CodingKeysO7qualityyA2EmF":{"name":"quality","parent_name":"CodingKeys"},"Structs/ImagesQuery/ResponseFormat.html#/s:6OpenAI11ImagesQueryV14ResponseFormatO3urlyA2EmF":{"name":"url","parent_name":"ResponseFormat"},"Structs/ImagesQuery/ResponseFormat.html#/s:6OpenAI11ImagesQueryV14ResponseFormatO8b64_jsonyA2EmF":{"name":"b64_json","parent_name":"ResponseFormat"},"Structs/ImagesQuery/ResponseFormat.html":{"name":"ResponseFormat","parent_name":"ImagesQuery"},"Structs/ImagesQuery.html#/s:6OpenAI11ImagesQueryV6promptSSvp":{"name":"prompt","abstract":"A text description of the desired image(s). The maximum length is 1000 characters for dall-e-2 and 4000 characters for dall-e-3.
","parent_name":"ImagesQuery"},"Structs/ImagesQuery.html#/s:6OpenAI11ImagesQueryV5modelSSSgvp":{"name":"model","abstract":"The model to use for image generation.","parent_name":"ImagesQuery"},"Structs/ImagesQuery.html#/s:6OpenAI11ImagesQueryV14responseFormatAC08ResponseF0OSgvp":{"name":"responseFormat","abstract":"
The format in which the generated images are returned. Must be one of url or b64_json.","parent_name":"ImagesQuery"},"Structs/ImagesQuery.html#/s:6OpenAI11ImagesQueryV1nSiSgvp":{"name":"n","abstract":"
The number of images to generate. Must be between 1 and 10. For dall-e-3, only n=1 is supported.","parent_name":"ImagesQuery"},"Structs/ImagesQuery.html#/s:6OpenAI11ImagesQueryV4sizeAC4SizeOSgvp":{"name":"size","abstract":"
The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024 for dall-e-2. Must be one of 1024x1024, 1792x1024, or 1024x1792 for dall-e-3 models.","parent_name":"ImagesQuery"},"Structs/ImagesQuery.html#/s:6OpenAI11ImagesQueryV4userSSSgvp":{"name":"user","abstract":"
A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.","parent_name":"ImagesQuery"},"Structs/ImagesQuery.html#/s:6OpenAI11ImagesQueryV5styleAC5StyleOSgvp":{"name":"style","abstract":"
The style of the generated images. Must be one of vivid or natural. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for dall-e-3.","parent_name":"ImagesQuery"},"Structs/ImagesQuery.html#/s:6OpenAI11ImagesQueryV7qualityAC7QualityOSgvp":{"name":"quality","abstract":"
The quality of the image that will be generated. hd creates images with finer details and greater consistency across the image. This param is only supported for dall-e-3.","parent_name":"ImagesQuery"},"Structs/ImagesQuery.html#/s:6OpenAI11ImagesQueryV6prompt5model1n7quality14responseFormat4size5style4userACSS_SSSgSiSgAC7QualityOSgAC08ResponseI0OSgAC4SizeOSgAC5StyleOSgALtcfc":{"name":"init(prompt:model:n:quality:responseFormat:size:style:user:)","parent_name":"ImagesQuery"},"Structs/ImagesQuery/CodingKeys.html":{"name":"CodingKeys","parent_name":"ImagesQuery"},"Structs/ImagesQuery/Style.html":{"name":"Style","parent_name":"ImagesQuery"},"Structs/ImagesQuery/Quality.html":{"name":"Quality","parent_name":"ImagesQuery"},"Structs/ImagesQuery/Size.html":{"name":"Size","parent_name":"ImagesQuery"},"Structs/ImageVariationsQuery/CodingKeys.html#/s:6OpenAI20ImageVariationsQueryV10CodingKeysO5imageyA2EmF":{"name":"image","parent_name":"CodingKeys"},"Structs/ImageVariationsQuery/CodingKeys.html#/s:6OpenAI20ImageVariationsQueryV10CodingKeysO5modelyA2EmF":{"name":"model","parent_name":"CodingKeys"},"Structs/ImageVariationsQuery/CodingKeys.html#/s:6OpenAI20ImageVariationsQueryV10CodingKeysO1nyA2EmF":{"name":"n","parent_name":"CodingKeys"},"Structs/ImageVariationsQuery/CodingKeys.html#/s:6OpenAI20ImageVariationsQueryV10CodingKeysO14responseFormatyA2EmF":{"name":"responseFormat","parent_name":"CodingKeys"},"Structs/ImageVariationsQuery/CodingKeys.html#/s:6OpenAI20ImageVariationsQueryV10CodingKeysO4sizeyA2EmF":{"name":"size","parent_name":"CodingKeys"},"Structs/ImageVariationsQuery/CodingKeys.html#/s:6OpenAI20ImageVariationsQueryV10CodingKeysO4useryA2EmF":{"name":"user","parent_name":"CodingKeys"},"Structs/ImageVariationsQuery.html#/s:6OpenAI20ImageVariationsQueryV14ResponseFormata":{"name":"ResponseFormat","parent_name":"ImageVariationsQuery"},"Structs/ImageVariationsQuery.html#/s:6OpenAI20ImageVariationsQueryV5image10Foundation4DataVvp":{"name":"image","abstract":"
The image to edit. Must be a valid PNG file, less than 4MB, and square.
","parent_name":"ImageVariationsQuery"},"Structs/ImageVariationsQuery.html#/s:6OpenAI20ImageVariationsQueryV5modelSSSgvp":{"name":"model","abstract":"The model to use for image generation. Only dall-e-2 is supported at this time.","parent_name":"ImageVariationsQuery"},"Structs/ImageVariationsQuery.html#/s:6OpenAI20ImageVariationsQueryV1nSiSgvp":{"name":"n","abstract":"
The number of images to generate. Must be between 1 and 10.","parent_name":"ImageVariationsQuery"},"Structs/ImageVariationsQuery.html#/s:6OpenAI20ImageVariationsQueryV14responseFormatAA06ImagesE0V08ResponseG0OSgvp":{"name":"responseFormat","abstract":"
The format in which the generated images are returned. Must be one of url or b64_json.","parent_name":"ImageVariationsQuery"},"Structs/ImageVariationsQuery.html#/s:6OpenAI20ImageVariationsQueryV4sizeSSSgvp":{"name":"size","abstract":"
The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.","parent_name":"ImageVariationsQuery"},"Structs/ImageVariationsQuery.html#/s:6OpenAI20ImageVariationsQueryV4userSSSgvp":{"name":"user","abstract":"
A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.","parent_name":"ImageVariationsQuery"},"Structs/ImageVariationsQuery.html#/s:6OpenAI20ImageVariationsQueryV5image5model1n14responseFormat4size4userAC10Foundation4DataV_SSSgSiSgAA06ImagesE0V08ResponseI0OSgA2Mtcfc":{"name":"init(image:model:n:responseFormat:size:user:)","parent_name":"ImageVariationsQuery"},"Structs/ImageVariationsQuery/CodingKeys.html":{"name":"CodingKeys","parent_name":"ImageVariationsQuery"},"Structs/ImageEditsQuery/CodingKeys.html#/s:6OpenAI15ImageEditsQueryV10CodingKeysO5imageyA2EmF":{"name":"image","parent_name":"CodingKeys"},"Structs/ImageEditsQuery/CodingKeys.html#/s:6OpenAI15ImageEditsQueryV10CodingKeysO4maskyA2EmF":{"name":"mask","parent_name":"CodingKeys"},"Structs/ImageEditsQuery/CodingKeys.html#/s:6OpenAI15ImageEditsQueryV10CodingKeysO6promptyA2EmF":{"name":"prompt","parent_name":"CodingKeys"},"Structs/ImageEditsQuery/CodingKeys.html#/s:6OpenAI15ImageEditsQueryV10CodingKeysO5modelyA2EmF":{"name":"model","parent_name":"CodingKeys"},"Structs/ImageEditsQuery/CodingKeys.html#/s:6OpenAI15ImageEditsQueryV10CodingKeysO1nyA2EmF":{"name":"n","parent_name":"CodingKeys"},"Structs/ImageEditsQuery/CodingKeys.html#/s:6OpenAI15ImageEditsQueryV10CodingKeysO14responseFormatyA2EmF":{"name":"responseFormat","parent_name":"CodingKeys"},"Structs/ImageEditsQuery/CodingKeys.html#/s:6OpenAI15ImageEditsQueryV10CodingKeysO4sizeyA2EmF":{"name":"size","parent_name":"CodingKeys"},"Structs/ImageEditsQuery/CodingKeys.html#/s:6OpenAI15ImageEditsQueryV10CodingKeysO4useryA2EmF":{"name":"user","parent_name":"CodingKeys"},"Structs/ImageEditsQuery.html#/s:6OpenAI15ImageEditsQueryV14ResponseFormata":{"name":"ResponseFormat","parent_name":"ImageEditsQuery"},"Structs/ImageEditsQuery.html#/s:6OpenAI15ImageEditsQueryV4Sizea":{"name":"Size","parent_name":"ImageEditsQuery"},"Structs/ImageEditsQuery.html#/s:6OpenAI15ImageEditsQueryV5image10Foundation4DataVvp":{"name":"image","abstract":"
The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask.
","parent_name":"ImageEditsQuery"},"Structs/ImageEditsQuery.html#/s:6OpenAI15ImageEditsQueryV4mask10Foundation4DataVSgvp":{"name":"mask","abstract":"An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where image should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as image.
","parent_name":"ImageEditsQuery"},"Structs/ImageEditsQuery.html#/s:6OpenAI15ImageEditsQueryV6promptSSvp":{"name":"prompt","abstract":"A text description of the desired image(s). The maximum length is 1000 characters.
","parent_name":"ImageEditsQuery"},"Structs/ImageEditsQuery.html#/s:6OpenAI15ImageEditsQueryV5modelSSSgvp":{"name":"model","abstract":"The model to use for image generation.","parent_name":"ImageEditsQuery"},"Structs/ImageEditsQuery.html#/s:6OpenAI15ImageEditsQueryV1nSiSgvp":{"name":"n","abstract":"
The number of images to generate. Must be between 1 and 10.
","parent_name":"ImageEditsQuery"},"Structs/ImageEditsQuery.html#/s:6OpenAI15ImageEditsQueryV14responseFormatAA06ImagesE0V08ResponseG0OSgvp":{"name":"responseFormat","abstract":"The format in which the generated images are returned. Must be one of url or b64_json.","parent_name":"ImageEditsQuery"},"Structs/ImageEditsQuery.html#/s:6OpenAI15ImageEditsQueryV4sizeAA06ImagesE0V4SizeOSgvp":{"name":"size","abstract":"
The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.
","parent_name":"ImageEditsQuery"},"Structs/ImageEditsQuery.html#/s:6OpenAI15ImageEditsQueryV4userSSSgvp":{"name":"user","abstract":"A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.","parent_name":"ImageEditsQuery"},"Structs/ImageEditsQuery.html#/s:6OpenAI15ImageEditsQueryV5image6prompt4mask5model1n14responseFormat4size4userAC10Foundation4DataV_SSANSgSSSgSiSgAA06ImagesE0V08ResponseK0OSgAS4SizeOSgAPtcfc":{"name":"init(image:prompt:mask:model:n:responseFormat:size:user:)","parent_name":"ImageEditsQuery"},"Structs/ImageEditsQuery/CodingKeys.html":{"name":"CodingKeys","parent_name":"ImageEditsQuery"},"Structs/EmbeddingsResult/Usage.html#/s:6OpenAI16EmbeddingsResultV5UsageV12promptTokensSivp":{"name":"promptTokens","parent_name":"Usage"},"Structs/EmbeddingsResult/Usage.html#/s:6OpenAI16EmbeddingsResultV5UsageV11totalTokensSivp":{"name":"totalTokens","parent_name":"Usage"},"Structs/EmbeddingsResult/Embedding.html#/s:6OpenAI16EmbeddingsResultV9EmbeddingV6objectSSvp":{"name":"object","abstract":"
The object type, which is always “embedding”.
","parent_name":"Embedding"},"Structs/EmbeddingsResult/Embedding.html#/s:6OpenAI16EmbeddingsResultV9EmbeddingV9embeddingSaySdGvp":{"name":"embedding","abstract":"The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the embedding guide.","parent_name":"Embedding"},"Structs/EmbeddingsResult/Embedding.html":{"name":"Embedding","parent_name":"EmbeddingsResult"},"Structs/EmbeddingsResult/Usage.html":{"name":"Usage","parent_name":"EmbeddingsResult"},"Structs/EmbeddingsResult.html#/s:6OpenAI16EmbeddingsResultV4dataSayAC9EmbeddingVGvp":{"name":"data","parent_name":"EmbeddingsResult"},"Structs/EmbeddingsResult.html#/s:6OpenAI16EmbeddingsResultV5modelSSvp":{"name":"model","parent_name":"EmbeddingsResult"},"Structs/EmbeddingsResult.html#/s:6OpenAI16EmbeddingsResultV5usageAC5UsageVvp":{"name":"usage","parent_name":"EmbeddingsResult"},"Structs/EmbeddingsResult.html#/s:6OpenAI16EmbeddingsResultV6objectSSvp":{"name":"object","abstract":"
The object type, which is always “list”.
","parent_name":"EmbeddingsResult"},"Structs/EmbeddingsQuery/CodingKeys.html#/s:6OpenAI15EmbeddingsQueryV10CodingKeysO5inputyA2EmF":{"name":"input","parent_name":"CodingKeys"},"Structs/EmbeddingsQuery/CodingKeys.html#/s:6OpenAI15EmbeddingsQueryV10CodingKeysO5modelyA2EmF":{"name":"model","parent_name":"CodingKeys"},"Structs/EmbeddingsQuery/CodingKeys.html#/s:6OpenAI15EmbeddingsQueryV10CodingKeysO14encodingFormatyA2EmF":{"name":"encodingFormat","parent_name":"CodingKeys"},"Structs/EmbeddingsQuery/CodingKeys.html#/s:6OpenAI15EmbeddingsQueryV10CodingKeysO4useryA2EmF":{"name":"user","parent_name":"CodingKeys"},"Structs/EmbeddingsQuery/EncodingFormat.html#/s:6OpenAI15EmbeddingsQueryV14EncodingFormatO5floatyA2EmF":{"name":"float","parent_name":"EncodingFormat"},"Structs/EmbeddingsQuery/EncodingFormat.html#/s:6OpenAI15EmbeddingsQueryV14EncodingFormatO6base64yA2EmF":{"name":"base64","parent_name":"EncodingFormat"},"Structs/EmbeddingsQuery/Input.html#/s:6OpenAI15EmbeddingsQueryV5InputO6stringyAESScAEmF":{"name":"string(_:)","parent_name":"Input"},"Structs/EmbeddingsQuery/Input.html#/s:6OpenAI15EmbeddingsQueryV5InputO10stringListyAESaySSGcAEmF":{"name":"stringList(_:)","parent_name":"Input"},"Structs/EmbeddingsQuery/Input.html#/s:6OpenAI15EmbeddingsQueryV5InputO7intListyAESaySiGcAEmF":{"name":"intList(_:)","parent_name":"Input"},"Structs/EmbeddingsQuery/Input.html#/s:6OpenAI15EmbeddingsQueryV5InputO9intMatrixyAESaySaySiGGcAEmF":{"name":"intMatrix(_:)","parent_name":"Input"},"Structs/EmbeddingsQuery/Input.html#/s:SE6encode2toys7Encoder_p_tKF":{"name":"encode(to:)","parent_name":"Input"},"Structs/EmbeddingsQuery/Input.html#/s:6OpenAI15EmbeddingsQueryV5InputO6stringAESS_tcfc":{"name":"init(string:)","parent_name":"Input"},"Structs/EmbeddingsQuery/Input.html#/s:6OpenAI15EmbeddingsQueryV5InputO10stringListAESaySSG_tcfc":{"name":"init(stringList:)","parent_name":"Input"},"Structs/EmbeddingsQuery/Input.html#/s:6OpenAI15EmbeddingsQueryV5InputO7intListAESaySiG_tcfc":{"name":"init(intList:)","parent_name":"Input"},"Structs/EmbeddingsQuery/Input.html#/s:6OpenAI15EmbeddingsQueryV5InputO9intMatrixAESaySaySiGG_tcfc":{"name":"init(intMatrix:)","parent_name":"Input"},"Structs/EmbeddingsQuery.html#/s:6OpenAI15EmbeddingsQueryV5inputAC5InputOvp":{"name":"input","abstract":"Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for text-embedding-ada-002), cannot be an empty string, and any array must be 2048 dimensions or less.
","parent_name":"EmbeddingsQuery"},"Structs/EmbeddingsQuery.html#/s:6OpenAI15EmbeddingsQueryV5modelSSvp":{"name":"model","abstract":"ID of the model to use. You can use the List models API to see all of your available models, or see our Model overview for descriptions of them.","parent_name":"EmbeddingsQuery"},"Structs/EmbeddingsQuery.html#/s:6OpenAI15EmbeddingsQueryV14encodingFormatAC08EncodingF0OSgvp":{"name":"encodingFormat","abstract":"
The format to return the embeddings in. Can be either float or base64.","parent_name":"EmbeddingsQuery"},"Structs/EmbeddingsQuery.html#/s:6OpenAI15EmbeddingsQueryV4userSSSgvp":{"name":"user","abstract":"
A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.","parent_name":"EmbeddingsQuery"},"Structs/EmbeddingsQuery.html#/s:6OpenAI15EmbeddingsQueryV5input5model14encodingFormat4userA2C5InputO_SSAC08EncodingH0OSgSSSgtcfc":{"name":"init(input:model:encodingFormat:user:)","parent_name":"EmbeddingsQuery"},"Structs/EmbeddingsQuery/Input.html":{"name":"Input","parent_name":"EmbeddingsQuery"},"Structs/EmbeddingsQuery/EncodingFormat.html":{"name":"EncodingFormat","parent_name":"EmbeddingsQuery"},"Structs/EmbeddingsQuery/CodingKeys.html":{"name":"CodingKeys","parent_name":"EmbeddingsQuery"},"Structs/EditsResult/Usage.html#/s:6OpenAI11EditsResultV5UsageV12promptTokensSivp":{"name":"promptTokens","parent_name":"Usage"},"Structs/EditsResult/Usage.html#/s:6OpenAI11EditsResultV5UsageV16completionTokensSivp":{"name":"completionTokens","parent_name":"Usage"},"Structs/EditsResult/Usage.html#/s:6OpenAI11EditsResultV5UsageV11totalTokensSivp":{"name":"totalTokens","parent_name":"Usage"},"Structs/EditsResult/Choice.html#/s:6OpenAI11EditsResultV6ChoiceV4textSSvp":{"name":"text","parent_name":"Choice"},"Structs/EditsResult/Choice.html":{"name":"Choice","parent_name":"EditsResult"},"Structs/EditsResult/Usage.html":{"name":"Usage","parent_name":"EditsResult"},"Structs/EditsResult.html#/s:6OpenAI11EditsResultV6objectSSvp":{"name":"object","parent_name":"EditsResult"},"Structs/EditsResult.html#/s:6OpenAI11EditsResultV7createdSdvp":{"name":"created","parent_name":"EditsResult"},"Structs/EditsResult.html#/s:6OpenAI11EditsResultV7choicesSayAC6ChoiceVGvp":{"name":"choices","parent_name":"EditsResult"},"Structs/EditsResult.html#/s:6OpenAI11EditsResultV5usageAC5UsageVvp":{"name":"usage","parent_name":"EditsResult"},"Structs/EditsQuery.html#/s:6OpenAI10EditsQueryV5modelSSvp":{"name":"model","abstract":"
ID of the model to use.
","parent_name":"EditsQuery"},"Structs/EditsQuery.html#/s:6OpenAI10EditsQueryV5inputSSSgvp":{"name":"input","abstract":"Input text to get embeddings for.
","parent_name":"EditsQuery"},"Structs/EditsQuery.html#/s:6OpenAI10EditsQueryV11instructionSSvp":{"name":"instruction","abstract":"The instruction that tells the model how to edit the prompt.
","parent_name":"EditsQuery"},"Structs/EditsQuery.html#/s:6OpenAI10EditsQueryV1nSiSgvp":{"name":"n","abstract":"The number of images to generate. Must be between 1 and 10.
","parent_name":"EditsQuery"},"Structs/EditsQuery.html#/s:6OpenAI10EditsQueryV11temperatureSdSgvp":{"name":"temperature","abstract":"What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
","parent_name":"EditsQuery"},"Structs/EditsQuery.html#/s:6OpenAI10EditsQueryV4topPSdSgvp":{"name":"topP","abstract":"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
","parent_name":"EditsQuery"},"Structs/EditsQuery.html#/s:6OpenAI10EditsQueryV5model5input11instruction1n11temperature4topPACSS_SSSgSSSiSgSdSgALtcfc":{"name":"init(model:input:instruction:n:temperature:topP:)","parent_name":"EditsQuery"},"Structs/CompletionsResult/Choice.html#/s:6OpenAI17CompletionsResultV6ChoiceV4textSSvp":{"name":"text","parent_name":"Choice"},"Structs/CompletionsResult/Choice.html#/s:6OpenAI17CompletionsResultV6ChoiceV12finishReasonSSSgvp":{"name":"finishReason","parent_name":"Choice"},"Structs/CompletionsResult/Usage.html#/s:6OpenAI17CompletionsResultV5UsageV12promptTokensSivp":{"name":"promptTokens","parent_name":"Usage"},"Structs/CompletionsResult/Usage.html#/s:6OpenAI17CompletionsResultV5UsageV16completionTokensSivp":{"name":"completionTokens","parent_name":"Usage"},"Structs/CompletionsResult/Usage.html#/s:6OpenAI17CompletionsResultV5UsageV11totalTokensSivp":{"name":"totalTokens","parent_name":"Usage"},"Structs/CompletionsResult/Usage.html":{"name":"Usage","parent_name":"CompletionsResult"},"Structs/CompletionsResult/Choice.html":{"name":"Choice","parent_name":"CompletionsResult"},"Structs/CompletionsResult.html#/s:6OpenAI17CompletionsResultV2idSSvp":{"name":"id","parent_name":"CompletionsResult"},"Structs/CompletionsResult.html#/s:6OpenAI17CompletionsResultV6objectSSvp":{"name":"object","parent_name":"CompletionsResult"},"Structs/CompletionsResult.html#/s:6OpenAI17CompletionsResultV7createdSdvp":{"name":"created","parent_name":"CompletionsResult"},"Structs/CompletionsResult.html#/s:6OpenAI17CompletionsResultV5modelSSvp":{"name":"model","parent_name":"CompletionsResult"},"Structs/CompletionsResult.html#/s:6OpenAI17CompletionsResultV7choicesSayAC6ChoiceVGvp":{"name":"choices","parent_name":"CompletionsResult"},"Structs/CompletionsResult.html#/s:6OpenAI17CompletionsResultV5usageAC5UsageVSgvp":{"name":"usage","parent_name":"CompletionsResult"},"Structs/CompletionsQuery.html#/s:6OpenAI16CompletionsQueryV5modelSSvp":{"name":"model","abstract":"ID of the model to use.
","parent_name":"CompletionsQuery"},"Structs/CompletionsQuery.html#/s:6OpenAI16CompletionsQueryV6promptSSvp":{"name":"prompt","abstract":"The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
","parent_name":"CompletionsQuery"},"Structs/CompletionsQuery.html#/s:6OpenAI16CompletionsQueryV11temperatureSdSgvp":{"name":"temperature","abstract":"What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
","parent_name":"CompletionsQuery"},"Structs/CompletionsQuery.html#/s:6OpenAI16CompletionsQueryV9maxTokensSiSgvp":{"name":"maxTokens","abstract":"The maximum number of tokens to generate in the completion.
","parent_name":"CompletionsQuery"},"Structs/CompletionsQuery.html#/s:6OpenAI16CompletionsQueryV4topPSdSgvp":{"name":"topP","abstract":"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
","parent_name":"CompletionsQuery"},"Structs/CompletionsQuery.html#/s:6OpenAI16CompletionsQueryV16frequencyPenaltySdSgvp":{"name":"frequencyPenalty","abstract":"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model’s likelihood to repeat the same line verbatim.
","parent_name":"CompletionsQuery"},"Structs/CompletionsQuery.html#/s:6OpenAI16CompletionsQueryV15presencePenaltySdSgvp":{"name":"presencePenalty","abstract":"Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model’s likelihood to talk about new topics.
","parent_name":"CompletionsQuery"},"Structs/CompletionsQuery.html#/s:6OpenAI16CompletionsQueryV4stopSaySSGSgvp":{"name":"stop","abstract":"Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
","parent_name":"CompletionsQuery"},"Structs/CompletionsQuery.html#/s:6OpenAI16CompletionsQueryV4userSSSgvp":{"name":"user","abstract":"A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
","parent_name":"CompletionsQuery"},"Structs/CompletionsQuery.html#/s:6OpenAI16CompletionsQueryV5model6prompt11temperature9maxTokens4topP16frequencyPenalty08presenceL04stop4userACSS_SSSdSgSiSgA3MSaySSGSgSSSgtcfc":{"name":"init(model:prompt:temperature:maxTokens:topP:frequencyPenalty:presencePenalty:stop:user:)","parent_name":"CompletionsQuery"},"Structs/ChatStreamResult/CodingKeys.html#/s:6OpenAI16ChatStreamResultV10CodingKeysO2idyA2EmF":{"name":"id","parent_name":"CodingKeys"},"Structs/ChatStreamResult/CodingKeys.html#/s:6OpenAI16ChatStreamResultV10CodingKeysO6objectyA2EmF":{"name":"object","parent_name":"CodingKeys"},"Structs/ChatStreamResult/CodingKeys.html#/s:6OpenAI16ChatStreamResultV10CodingKeysO7createdyA2EmF":{"name":"created","parent_name":"CodingKeys"},"Structs/ChatStreamResult/CodingKeys.html#/s:6OpenAI16ChatStreamResultV10CodingKeysO5modelyA2EmF":{"name":"model","parent_name":"CodingKeys"},"Structs/ChatStreamResult/CodingKeys.html#/s:6OpenAI16ChatStreamResultV10CodingKeysO7choicesyA2EmF":{"name":"choices","parent_name":"CodingKeys"},"Structs/ChatStreamResult/CodingKeys.html#/s:6OpenAI16ChatStreamResultV10CodingKeysO17systemFingerprintyA2EmF":{"name":"systemFingerprint","parent_name":"CodingKeys"},"Structs/ChatStreamResult/Choice/CodingKeys.html#/s:6OpenAI16ChatStreamResultV6ChoiceV10CodingKeysO5deltayA2GmF":{"name":"delta","parent_name":"CodingKeys"},"Structs/ChatStreamResult/Choice/CodingKeys.html#/s:6OpenAI16ChatStreamResultV6ChoiceV10CodingKeysO12finishReasonyA2GmF":{"name":"finishReason","parent_name":"CodingKeys"},"Structs/ChatStreamResult/Choice/CodingKeys.html#/s:6OpenAI16ChatStreamResultV6ChoiceV10CodingKeysO8logprobsyA2GmF":{"name":"logprobs","parent_name":"CodingKeys"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/CodingKeys.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F8LogprobsV0C22CompletionTokenLogprobV10CodingKeysO5tokenyA2KmF":{"name":"token","parent_name":"CodingKeys"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/CodingKeys.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F8LogprobsV0C22CompletionTokenLogprobV10CodingKeysO5bytesyA2KmF":{"name":"bytes","parent_name":"CodingKeys"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/CodingKeys.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F8LogprobsV0C22CompletionTokenLogprobV10CodingKeysO7logprobyA2KmF":{"name":"logprob","parent_name":"CodingKeys"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/CodingKeys.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F8LogprobsV0C22CompletionTokenLogprobV10CodingKeysO03topG0yA2KmF":{"name":"topLogprobs","parent_name":"CodingKeys"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/TopLogprob.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F8LogprobsV0C22CompletionTokenLogprobV03TopJ0V5tokenSSvp":{"name":"token","abstract":"The token.
","parent_name":"TopLogprob"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/TopLogprob.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F8LogprobsV0C22CompletionTokenLogprobV03TopJ0V5bytesSaySiGSgvp":{"name":"bytes","abstract":"A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token.
","parent_name":"TopLogprob"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/TopLogprob.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F8LogprobsV0C22CompletionTokenLogprobV03TopJ0V7logprobSdvp":{"name":"logprob","abstract":"The log probability of this token.
","parent_name":"TopLogprob"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F8LogprobsV0C22CompletionTokenLogprobV5tokenSSvp":{"name":"token","abstract":"The token.
","parent_name":"ChatCompletionTokenLogprob"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F8LogprobsV0C22CompletionTokenLogprobV5bytesSaySiGSgvp":{"name":"bytes","abstract":"A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be null if there is no bytes representation for the token.
","parent_name":"ChatCompletionTokenLogprob"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F8LogprobsV0C22CompletionTokenLogprobV7logprobSdvp":{"name":"logprob","abstract":"The log probability of this token.
","parent_name":"ChatCompletionTokenLogprob"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F8LogprobsV0C22CompletionTokenLogprobV03topG0SayAI03TopJ0VGSgvp":{"name":"topLogprobs","abstract":"List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested top_logprobs returned.
","parent_name":"ChatCompletionTokenLogprob"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/TopLogprob.html":{"name":"TopLogprob","parent_name":"ChatCompletionTokenLogprob"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/CodingKeys.html":{"name":"CodingKeys","parent_name":"ChatCompletionTokenLogprob"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F8LogprobsV7contentSayAG0C22CompletionTokenLogprobVGSgvp":{"name":"content","abstract":"A list of message content tokens with log probability information.
","parent_name":"ChoiceLogprobs"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob.html":{"name":"ChatCompletionTokenLogprob","parent_name":"ChoiceLogprobs"},"Structs/ChatStreamResult/Choice/ChoiceDelta/CodingKeys.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F5DeltaV10CodingKeysO7contentyA2ImF":{"name":"content","parent_name":"CodingKeys"},"Structs/ChatStreamResult/Choice/ChoiceDelta/CodingKeys.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F5DeltaV10CodingKeysO4roleyA2ImF":{"name":"role","parent_name":"CodingKeys"},"Structs/ChatStreamResult/Choice/ChoiceDelta/CodingKeys.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F5DeltaV10CodingKeysO9toolCallsyA2ImF":{"name":"toolCalls","parent_name":"CodingKeys"},"Structs/ChatStreamResult/Choice/ChoiceDelta/ChoiceDeltaToolCall/ChoiceDeltaToolCallFunction.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F5DeltaV0fG8ToolCallV0fghI8FunctionV9argumentsSSSgvp":{"name":"arguments","abstract":"The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.
","parent_name":"ChoiceDeltaToolCallFunction"},"Structs/ChatStreamResult/Choice/ChoiceDelta/ChoiceDeltaToolCall/ChoiceDeltaToolCallFunction.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F5DeltaV0fG8ToolCallV0fghI8FunctionV4nameSSSgvp":{"name":"name","abstract":"The name of the function to call.
","parent_name":"ChoiceDeltaToolCallFunction"},"Structs/ChatStreamResult/Choice/ChoiceDelta/ChoiceDeltaToolCall/ChoiceDeltaToolCallFunction.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F5DeltaV0fG8ToolCallV0fghI8FunctionV9arguments4nameAKSSSg_ANtcfc":{"name":"init(arguments:name:)","parent_name":"ChoiceDeltaToolCallFunction"},"Structs/ChatStreamResult/Choice/ChoiceDelta/ChoiceDeltaToolCall.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F5DeltaV0fG8ToolCallV2idSSSgvp":{"name":"id","abstract":"The ID of the tool call.
","parent_name":"ChoiceDeltaToolCall"},"Structs/ChatStreamResult/Choice/ChoiceDelta/ChoiceDeltaToolCall.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F5DeltaV0fG8ToolCallV8functionAI0fghI8FunctionVSgvp":{"name":"function","abstract":"The function that the model called.
","parent_name":"ChoiceDeltaToolCall"},"Structs/ChatStreamResult/Choice/ChoiceDelta/ChoiceDeltaToolCall.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F5DeltaV0fG8ToolCallV4typeSSSgvp":{"name":"type","abstract":"The type of the tool. Currently, only function is supported.
","parent_name":"ChoiceDeltaToolCall"},"Structs/ChatStreamResult/Choice/ChoiceDelta/ChoiceDeltaToolCall.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F5DeltaV0fG8ToolCallV5index2id8functionAISi_SSSgAI0fghI8FunctionVSgtcfc":{"name":"init(index:id:function:)","parent_name":"ChoiceDeltaToolCall"},"Structs/ChatStreamResult/Choice/ChoiceDelta/ChoiceDeltaToolCall/ChoiceDeltaToolCallFunction.html":{"name":"ChoiceDeltaToolCallFunction","parent_name":"ChoiceDeltaToolCall"},"Structs/ChatStreamResult/Choice/ChoiceDelta.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F5DeltaV4Rolea":{"name":"Role","parent_name":"ChoiceDelta"},"Structs/ChatStreamResult/Choice/ChoiceDelta.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F5DeltaV7contentSSSgvp":{"name":"content","abstract":"The contents of the chunk message.
","parent_name":"ChoiceDelta"},"Structs/ChatStreamResult/Choice/ChoiceDelta.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F5DeltaV4roleAA0C5QueryV0C22CompletionMessageParamO4RoleOSgvp":{"name":"role","abstract":"The role of the author of this message.
","parent_name":"ChoiceDelta"},"Structs/ChatStreamResult/Choice/ChoiceDelta.html#/s:6OpenAI16ChatStreamResultV6ChoiceV0F5DeltaV9toolCallsSayAG0fG8ToolCallVGSgvp":{"name":"toolCalls","parent_name":"ChoiceDelta"},"Structs/ChatStreamResult/Choice/ChoiceDelta/ChoiceDeltaToolCall.html":{"name":"ChoiceDeltaToolCall","parent_name":"ChoiceDelta"},"Structs/ChatStreamResult/Choice/ChoiceDelta/CodingKeys.html":{"name":"CodingKeys","parent_name":"ChoiceDelta"},"Structs/ChatStreamResult/Choice.html#/s:6OpenAI16ChatStreamResultV6ChoiceV12FinishReasona":{"name":"FinishReason","parent_name":"Choice"},"Structs/ChatStreamResult/Choice/ChoiceDelta.html":{"name":"ChoiceDelta","parent_name":"Choice"},"Structs/ChatStreamResult/Choice.html#/s:6OpenAI16ChatStreamResultV6ChoiceV5deltaAE0F5DeltaVvp":{"name":"delta","abstract":"A chat completion delta generated by streamed model responses.
","parent_name":"Choice"},"Structs/ChatStreamResult/Choice.html#/s:6OpenAI16ChatStreamResultV6ChoiceV12finishReasonAA0cE0VADV06FinishH0OSgvp":{"name":"finishReason","abstract":"The reason the model stopped generating tokens.","parent_name":"Choice"},"Structs/ChatStreamResult/Choice.html#/s:6OpenAI16ChatStreamResultV6ChoiceV8logprobsAE0F8LogprobsVSgvp":{"name":"logprobs","abstract":"
Log probability information for the choice.
","parent_name":"Choice"},"Structs/ChatStreamResult/Choice/ChoiceLogprobs.html":{"name":"ChoiceLogprobs","parent_name":"Choice"},"Structs/ChatStreamResult/Choice/CodingKeys.html":{"name":"CodingKeys","parent_name":"Choice"},"Structs/ChatStreamResult/Choice.html":{"name":"Choice","parent_name":"ChatStreamResult"},"Structs/ChatStreamResult.html#/s:6OpenAI16ChatStreamResultV2idSSvp":{"name":"id","abstract":"A unique identifier for the chat completion. Each chunk has the same ID.
","parent_name":"ChatStreamResult"},"Structs/ChatStreamResult.html#/s:6OpenAI16ChatStreamResultV6objectSSvp":{"name":"object","abstract":"The object type, which is always chat.completion.chunk
.
The Unix timestamp (in seconds) of when the chat completion was created.","parent_name":"ChatStreamResult"},"Structs/ChatStreamResult.html#/s:6OpenAI16ChatStreamResultV5modelSSvp":{"name":"model","abstract":"
The model to generate the completion.
","parent_name":"ChatStreamResult"},"Structs/ChatStreamResult.html#/s:6OpenAI16ChatStreamResultV7choicesSayAC6ChoiceVGvp":{"name":"choices","abstract":"A list of chat completion choices.","parent_name":"ChatStreamResult"},"Structs/ChatStreamResult.html#/s:6OpenAI16ChatStreamResultV17systemFingerprintSSSgvp":{"name":"systemFingerprint","abstract":"
This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the seed
request parameter to understand when backend changes have been made that might impact determinism.
Number of tokens in the generated completion.
","parent_name":"CompletionUsage"},"Structs/ChatResult/CompletionUsage.html#/s:6OpenAI10ChatResultV15CompletionUsageV12promptTokensSivp":{"name":"promptTokens","abstract":"Number of tokens in the prompt.
","parent_name":"CompletionUsage"},"Structs/ChatResult/CompletionUsage.html#/s:6OpenAI10ChatResultV15CompletionUsageV11totalTokensSivp":{"name":"totalTokens","abstract":"Total number of tokens used in the request (prompt + completion).
","parent_name":"CompletionUsage"},"Structs/ChatResult/Choice/FinishReason.html#/s:6OpenAI10ChatResultV6ChoiceV12FinishReasonO4stopyA2GmF":{"name":"stop","parent_name":"FinishReason"},"Structs/ChatResult/Choice/FinishReason.html#/s:6OpenAI10ChatResultV6ChoiceV12FinishReasonO6lengthyA2GmF":{"name":"length","parent_name":"FinishReason"},"Structs/ChatResult/Choice/FinishReason.html#/s:6OpenAI10ChatResultV6ChoiceV12FinishReasonO9toolCallsyA2GmF":{"name":"toolCalls","parent_name":"FinishReason"},"Structs/ChatResult/Choice/FinishReason.html#/s:6OpenAI10ChatResultV6ChoiceV12FinishReasonO13contentFilteryA2GmF":{"name":"contentFilter","parent_name":"FinishReason"},"Structs/ChatResult/Choice/FinishReason.html#/s:6OpenAI10ChatResultV6ChoiceV12FinishReasonO12functionCallyA2GmF":{"name":"functionCall","parent_name":"FinishReason"},"Structs/ChatResult/Choice/CodingKeys.html#/s:6OpenAI10ChatResultV6ChoiceV10CodingKeysO8logprobsyA2GmF":{"name":"logprobs","parent_name":"CodingKeys"},"Structs/ChatResult/Choice/CodingKeys.html#/s:6OpenAI10ChatResultV6ChoiceV10CodingKeysO7messageyA2GmF":{"name":"message","parent_name":"CodingKeys"},"Structs/ChatResult/Choice/CodingKeys.html#/s:6OpenAI10ChatResultV6ChoiceV10CodingKeysO12finishReasonyA2GmF":{"name":"finishReason","parent_name":"CodingKeys"},"Structs/ChatResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/CodingKeys.html#/s:6OpenAI10ChatResultV6ChoiceV0E8LogprobsV0C22CompletionTokenLogprobV10CodingKeysO5tokenyA2KmF":{"name":"token","parent_name":"CodingKeys"},"Structs/ChatResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/CodingKeys.html#/s:6OpenAI10ChatResultV6ChoiceV0E8LogprobsV0C22CompletionTokenLogprobV10CodingKeysO5bytesyA2KmF":{"name":"bytes","parent_name":"CodingKeys"},"Structs/ChatResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/CodingKeys.html#/s:6OpenAI10ChatResultV6ChoiceV0E8LogprobsV0C22CompletionTokenLogprobV10CodingKeysO7logprobyA2KmF":{"name":"logprob","parent_name":"CodingKeys"},"Structs/ChatResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/CodingKeys.html#/s:6OpenAI10ChatResultV6ChoiceV0E8LogprobsV0C22CompletionTokenLogprobV10CodingKeysO03topF0yA2KmF":{"name":"topLogprobs","parent_name":"CodingKeys"},"Structs/ChatResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/TopLogprob.html#/s:6OpenAI10ChatResultV6ChoiceV0E8LogprobsV0C22CompletionTokenLogprobV03TopI0V5tokenSSvp":{"name":"token","abstract":"The token.
","parent_name":"TopLogprob"},"Structs/ChatResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/TopLogprob.html#/s:6OpenAI10ChatResultV6ChoiceV0E8LogprobsV0C22CompletionTokenLogprobV03TopI0V5bytesSaySiGSgvp":{"name":"bytes","abstract":"A list of integers representing the UTF-8 bytes representation of the token.","parent_name":"TopLogprob"},"Structs/ChatResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/TopLogprob.html#/s:6OpenAI10ChatResultV6ChoiceV0E8LogprobsV0C22CompletionTokenLogprobV03TopI0V7logprobSdvp":{"name":"logprob","abstract":"
The log probability of this token.
","parent_name":"TopLogprob"},"Structs/ChatResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob.html#/s:6OpenAI10ChatResultV6ChoiceV0E8LogprobsV0C22CompletionTokenLogprobV5tokenSSvp":{"name":"token","abstract":"The token.
","parent_name":"ChatCompletionTokenLogprob"},"Structs/ChatResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob.html#/s:6OpenAI10ChatResultV6ChoiceV0E8LogprobsV0C22CompletionTokenLogprobV5bytesSaySiGSgvp":{"name":"bytes","abstract":"A list of integers representing the UTF-8 bytes representation of the token.","parent_name":"ChatCompletionTokenLogprob"},"Structs/ChatResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob.html#/s:6OpenAI10ChatResultV6ChoiceV0E8LogprobsV0C22CompletionTokenLogprobV7logprobSdvp":{"name":"logprob","abstract":"
The log probability of this token.
","parent_name":"ChatCompletionTokenLogprob"},"Structs/ChatResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob.html#/s:6OpenAI10ChatResultV6ChoiceV0E8LogprobsV0C22CompletionTokenLogprobV03topF0SayAI03TopI0VGvp":{"name":"topLogprobs","abstract":"List of the most likely tokens and their log probability, at this token position.","parent_name":"ChatCompletionTokenLogprob"},"Structs/ChatResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/TopLogprob.html":{"name":"TopLogprob","parent_name":"ChatCompletionTokenLogprob"},"Structs/ChatResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob/CodingKeys.html":{"name":"CodingKeys","parent_name":"ChatCompletionTokenLogprob"},"Structs/ChatResult/Choice/ChoiceLogprobs.html#/s:6OpenAI10ChatResultV6ChoiceV0E8LogprobsV7contentSayAG0C22CompletionTokenLogprobVGSgvp":{"name":"content","parent_name":"ChoiceLogprobs"},"Structs/ChatResult/Choice/ChoiceLogprobs/ChatCompletionTokenLogprob.html":{"name":"ChatCompletionTokenLogprob","parent_name":"ChoiceLogprobs"},"Structs/ChatResult/Choice.html#/s:6OpenAI10ChatResultV6ChoiceV0C17CompletionMessagea":{"name":"ChatCompletionMessage","parent_name":"Choice"},"Structs/ChatResult/Choice.html#/s:6OpenAI10ChatResultV6ChoiceV8logprobsAE0E8LogprobsVSgvp":{"name":"logprobs","abstract":"
Log probability information for the choice.
","parent_name":"Choice"},"Structs/ChatResult/Choice.html#/s:6OpenAI10ChatResultV6ChoiceV7messageAA0C5QueryV0C22CompletionMessageParamOvp":{"name":"message","abstract":"A chat completion message generated by the model.
","parent_name":"Choice"},"Structs/ChatResult/Choice.html#/s:6OpenAI10ChatResultV6ChoiceV12finishReasonSSSgvp":{"name":"finishReason","abstract":"The reason the model stopped generating tokens. This will be stop if the model hit a natural stop point or a provided stop sequence, length if the maximum number of tokens specified in the request was reached, content_filter if content was omitted due to a flag from our content filters, tool_calls if the model called a tool, or function_call (deprecated) if the model called a function.
","parent_name":"Choice"},"Structs/ChatResult/Choice/ChoiceLogprobs.html":{"name":"ChoiceLogprobs","parent_name":"Choice"},"Structs/ChatResult/Choice/CodingKeys.html":{"name":"CodingKeys","parent_name":"Choice"},"Structs/ChatResult/Choice/FinishReason.html":{"name":"FinishReason","parent_name":"Choice"},"Structs/ChatResult/Choice.html":{"name":"Choice","abstract":"mimic the choices array in the chat completion object
","parent_name":"ChatResult"},"Structs/ChatResult/CompletionUsage.html":{"name":"CompletionUsage","parent_name":"ChatResult"},"Structs/ChatResult.html#/s:6OpenAI10ChatResultV2idSSvp":{"name":"id","abstract":"A unique identifier for the chat completion.
","parent_name":"ChatResult"},"Structs/ChatResult.html#/s:6OpenAI10ChatResultV6objectSSvp":{"name":"object","abstract":"The object type, which is always chat.completion.
","parent_name":"ChatResult"},"Structs/ChatResult.html#/s:6OpenAI10ChatResultV7createdSdvp":{"name":"created","abstract":"The Unix timestamp (in seconds) of when the chat completion was created.
","parent_name":"ChatResult"},"Structs/ChatResult.html#/s:6OpenAI10ChatResultV5modelSSvp":{"name":"model","abstract":"The model used for the chat completion.
","parent_name":"ChatResult"},"Structs/ChatResult.html#/s:6OpenAI10ChatResultV7choicesSayAC6ChoiceVGvp":{"name":"choices","abstract":"A list of chat completion choices. Can be more than one if n is greater than 1.
","parent_name":"ChatResult"},"Structs/ChatResult.html#/s:6OpenAI10ChatResultV5usageAC15CompletionUsageVSgvp":{"name":"usage","abstract":"Usage statistics for the completion request.
","parent_name":"ChatResult"},"Structs/ChatResult.html#/s:6OpenAI10ChatResultV17systemFingerprintSSSgvp":{"name":"systemFingerprint","abstract":"This fingerprint represents the backend configuration that the model runs with.","parent_name":"ChatResult"},"Structs/ChatResult/CodingKeys.html":{"name":"CodingKeys","parent_name":"ChatResult"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO8messagesyA2EmF":{"name":"messages","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO5modelyA2EmF":{"name":"model","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO16frequencyPenaltyyA2EmF":{"name":"frequencyPenalty","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO9logitBiasyA2EmF":{"name":"logitBias","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO8logprobsyA2EmF":{"name":"logprobs","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO9maxTokensyA2EmF":{"name":"maxTokens","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO1nyA2EmF":{"name":"n","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO15presencePenaltyyA2EmF":{"name":"presencePenalty","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO14responseFormatyA2EmF":{"name":"responseFormat","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO4seedyA2EmF":{"name":"seed","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO4stopyA2EmF":{"name":"stop","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO11temperatureyA2EmF":{"name":"temperature","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO10toolChoiceyA2EmF":{"name":"toolChoice","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO5toolsyA2EmF":{"name":"tools","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO11topLogprobsyA2EmF":{"name":"topLogprobs","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO4topPyA2EmF":{"name":"topP","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO4useryA2EmF":{"name":"user","parent_name":"CodingKeys"},"Structs/ChatQuery/CodingKeys.html#/s:6OpenAI9ChatQueryV10CodingKeysO6streamyA2EmF":{"name":"stream","parent_name":"CodingKeys"},"Structs/ChatQuery/ChatCompletionToolParam/ToolsType.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV9ToolsTypeO8functionyA2GmF":{"name":"function","parent_name":"ToolsType"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/JSONType.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8JSONTypeO7integeryA2KmF":{"name":"integer","parent_name":"JSONType"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/JSONType.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8JSONTypeO6stringyA2KmF":{"name":"string","parent_name":"JSONType"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/JSONType.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8JSONTypeO7booleanyA2KmF":{"name":"boolean","parent_name":"JSONType"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/JSONType.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8JSONTypeO5arrayyA2KmF":{"name":"array","parent_name":"JSONType"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/JSONType.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8JSONTypeO6objectyA2KmF":{"name":"object","parent_name":"JSONType"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/JSONType.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8JSONTypeO6numberyA2KmF":{"name":"number","parent_name":"JSONType"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/JSONType.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8JSONTypeO4nullyA2KmF":{"name":"null","parent_name":"JSONType"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property/Items.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5ItemsV8JSONTypea":{"name":"JSONType","parent_name":"Items"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property/Items.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5ItemsV4typeAI8JSONTypeOvp":{"name":"type","parent_name":"Items"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property/Items.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5ItemsV10propertiesSDySSAKGSgvp":{"name":"properties","parent_name":"Items"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property/Items.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5ItemsV7patternSSSgvp":{"name":"pattern","parent_name":"Items"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property/Items.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5ItemsV5constSSSgvp":{"name":"const","parent_name":"Items"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property/Items.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5ItemsV4enumSaySSGSgvp":{"name":"enum","parent_name":"Items"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property/Items.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5ItemsV10multipleOfSiSgvp":{"name":"multipleOf","parent_name":"Items"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property/Items.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5ItemsV7minimumSdSgvp":{"name":"minimum","parent_name":"Items"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property/Items.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5ItemsV7maximumSdSgvp":{"name":"maximum","parent_name":"Items"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property/Items.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5ItemsV03minL0SiSgvp":{"name":"minItems","parent_name":"Items"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property/Items.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5ItemsV03maxL0SiSgvp":{"name":"maxItems","parent_name":"Items"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property/Items.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5ItemsV06uniqueL0SbSgvp":{"name":"uniqueItems","parent_name":"Items"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property/Items.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5ItemsV4type10properties7pattern5const4enum10multipleOf7minimum7maximum03minL003maxL006uniqueL0AmI8JSONTypeO_SDySSAKGSgSSSgA1_SaySSGSgSiSgSdSgA5_A4_A4_SbSgtcfc":{"name":"init(type:properties:pattern:const:enum:multipleOf:minimum:maximum:minItems:maxItems:uniqueItems:)","parent_name":"Items"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV8JSONTypea":{"name":"JSONType","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV4typeAI8JSONTypeOvp":{"name":"type","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV11descriptionSSSgvp":{"name":"description","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV6formatSSSgvp":{"name":"format","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5itemsAK5ItemsVSgvp":{"name":"items","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV8requiredSaySSGSgvp":{"name":"required","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV7patternSSSgvp":{"name":"pattern","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV5constSSSgvp":{"name":"const","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV4enumSaySSGSgvp":{"name":"enum","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV10multipleOfSiSgvp":{"name":"multipleOf","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV7minimumSdSgvp":{"name":"minimum","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV7maximumSdSgvp":{"name":"maximum","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV8minItemsSiSgvp":{"name":"minItems","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV8maxItemsSiSgvp":{"name":"maxItems","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV11uniqueItemsSbSgvp":{"name":"uniqueItems","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8PropertyV4type11description6format5items8required7pattern5const4enum10multipleOf7minimum7maximum8minItems03maxY006uniqueY0AkI8JSONTypeO_SSSgA0_AK0Y0VSgSaySSGSgA0_A0_A5_SiSgSdSgA7_A6_A6_SbSgtcfc":{"name":"init(type:description:format:items:required:pattern:const:enum:multipleOf:minimum:maximum:minItems:maxItems:uniqueItems:)","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property/Items.html":{"name":"Items","parent_name":"Property"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV4typeAI8JSONTypeOvp":{"name":"type","parent_name":"FunctionParameters"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV10propertiesSDySSAI8PropertyVGSgvp":{"name":"properties","parent_name":"FunctionParameters"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV8requiredSaySSGSgvp":{"name":"required","parent_name":"FunctionParameters"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV7patternSSSgvp":{"name":"pattern","parent_name":"FunctionParameters"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV5constSSSgvp":{"name":"const","parent_name":"FunctionParameters"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV4enumSaySSGSgvp":{"name":"enum","parent_name":"FunctionParameters"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV10multipleOfSiSgvp":{"name":"multipleOf","parent_name":"FunctionParameters"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV7minimumSiSgvp":{"name":"minimum","parent_name":"FunctionParameters"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV7maximumSiSgvp":{"name":"maximum","parent_name":"FunctionParameters"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV0H10ParametersV4type10properties8required7pattern5const4enum10multipleOf7minimum7maximumA2I8JSONTypeO_SDySSAI8PropertyVGSgSaySSGSgSSSgA_AZSiSgA0_A0_tcfc":{"name":"init(type:properties:required:pattern:const:enum:multipleOf:minimum:maximum:)","parent_name":"FunctionParameters"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/Property.html":{"name":"Property","parent_name":"FunctionParameters"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters/JSONType.html":{"name":"JSONType","parent_name":"FunctionParameters"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV4nameSSvp":{"name":"name","abstract":"
The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
","parent_name":"FunctionDefinition"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV11descriptionSSSgvp":{"name":"description","abstract":"The description of what the function does.
","parent_name":"FunctionDefinition"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV10parametersAG0H10ParametersVSgvp":{"name":"parameters","abstract":"The parameters the functions accepts, described as a JSON Schema object.","parent_name":"FunctionDefinition"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV18FunctionDefinitionV4name11description10parametersAGSS_SSSgAG0H10ParametersVSgtcfc":{"name":"init(name:description:parameters:)","parent_name":"FunctionDefinition"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition/FunctionParameters.html":{"name":"FunctionParameters","abstract":"
See the guide for examples, and the JSON Schema reference for documentation about the format.
","parent_name":"FunctionDefinition"},"Structs/ChatQuery/ChatCompletionToolParam.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV8functionAE18FunctionDefinitionVvp":{"name":"function","parent_name":"ChatCompletionToolParam"},"Structs/ChatQuery/ChatCompletionToolParam.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV4typeAE9ToolsTypeOvp":{"name":"type","parent_name":"ChatCompletionToolParam"},"Structs/ChatQuery/ChatCompletionToolParam.html#/s:6OpenAI9ChatQueryV0C19CompletionToolParamV8functionA2E18FunctionDefinitionV_tcfc":{"name":"init(function:)","parent_name":"ChatCompletionToolParam"},"Structs/ChatQuery/ChatCompletionToolParam/FunctionDefinition.html":{"name":"FunctionDefinition","parent_name":"ChatCompletionToolParam"},"Structs/ChatQuery/ChatCompletionToolParam/ToolsType.html":{"name":"ToolsType","parent_name":"ChatCompletionToolParam"},"Structs/ChatQuery/ChatCompletionFunctionCallOptionParam.html#/s:6OpenAI9ChatQueryV0C33CompletionFunctionCallOptionParamO4noneyA2EmF":{"name":"none","parent_name":"ChatCompletionFunctionCallOptionParam"},"Structs/ChatQuery/ChatCompletionFunctionCallOptionParam.html#/s:6OpenAI9ChatQueryV0C33CompletionFunctionCallOptionParamO4autoyA2EmF":{"name":"auto","parent_name":"ChatCompletionFunctionCallOptionParam"},"Structs/ChatQuery/ChatCompletionFunctionCallOptionParam.html#/s:6OpenAI9ChatQueryV0C33CompletionFunctionCallOptionParamO8functionyAESScAEmF":{"name":"function(_:)","parent_name":"ChatCompletionFunctionCallOptionParam"},"Structs/ChatQuery/ChatCompletionFunctionCallOptionParam.html#/s:SE6encode2toys7Encoder_p_tKF":{"name":"encode(to:)","parent_name":"ChatCompletionFunctionCallOptionParam"},"Structs/ChatQuery/ChatCompletionFunctionCallOptionParam.html#/s:6OpenAI9ChatQueryV0C33CompletionFunctionCallOptionParamO8functionAESS_tcfc":{"name":"init(function:)","parent_name":"ChatCompletionFunctionCallOptionParam"},"Structs/ChatQuery/ResponseFormat.html#/s:6OpenAI9ChatQueryV14ResponseFormatO10jsonObjectyA2EmF":{"name":"jsonObject","parent_name":"ResponseFormat"},"Structs/ChatQuery/ResponseFormat.html#/s:6OpenAI9ChatQueryV14ResponseFormatO4textyA2EmF":{"name":"text","parent_name":"ResponseFormat"},"Structs/ChatQuery/ResponseFormat.html#/s:SE6encode2toys7Encoder_p_tKF":{"name":"encode(to:)","parent_name":"ResponseFormat"},"Structs/ChatQuery/Stop.html#/s:6OpenAI9ChatQueryV4StopO6stringyAESScAEmF":{"name":"string(_:)","parent_name":"Stop"},"Structs/ChatQuery/Stop.html#/s:6OpenAI9ChatQueryV4StopO10stringListyAESaySSGcAEmF":{"name":"stringList(_:)","parent_name":"Stop"},"Structs/ChatQuery/Stop.html#/s:SE6encode2toys7Encoder_p_tKF":{"name":"encode(to:)","parent_name":"Stop"},"Structs/ChatQuery/Stop.html#/s:6OpenAI9ChatQueryV4StopO6stringAESS_tcfc":{"name":"init(string:)","parent_name":"Stop"},"Structs/ChatQuery/Stop.html#/s:6OpenAI9ChatQueryV4StopO10stringListAESaySSG_tcfc":{"name":"init(stringList:)","parent_name":"Stop"},"Structs/ChatQuery/ChatCompletionMessageParam/Role.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO4RoleO6systemyA2GmF":{"name":"system","parent_name":"Role"},"Structs/ChatQuery/ChatCompletionMessageParam/Role.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO4RoleO4useryA2GmF":{"name":"user","parent_name":"Role"},"Structs/ChatQuery/ChatCompletionMessageParam/Role.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO4RoleO9assistantyA2GmF":{"name":"assistant","parent_name":"Role"},"Structs/ChatQuery/ChatCompletionMessageParam/Role.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO4RoleO4toolyA2GmF":{"name":"tool","parent_name":"Role"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionToolMessageParam/CodingKeys.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4ToolfG0V10CodingKeysO7contentyA2ImF":{"name":"content","parent_name":"CodingKeys"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionToolMessageParam/CodingKeys.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4ToolfG0V10CodingKeysO4roleyA2ImF":{"name":"role","parent_name":"CodingKeys"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionToolMessageParam/CodingKeys.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4ToolfG0V10CodingKeysO10toolCallIdyA2ImF":{"name":"toolCallId","parent_name":"CodingKeys"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionToolMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4ToolfG0V4Rolea":{"name":"Role","parent_name":"ChatCompletionToolMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionToolMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4ToolfG0V7contentSSvp":{"name":"content","abstract":"The contents of the tool message.
","parent_name":"ChatCompletionToolMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionToolMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4ToolfG0V4roleAE4RoleOvp":{"name":"role","abstract":"The role of the messages author, in this case tool.
","parent_name":"ChatCompletionToolMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionToolMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4ToolfG0V10toolCallIdSSvp":{"name":"toolCallId","abstract":"Tool call that this message is responding to.
","parent_name":"ChatCompletionToolMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionToolMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4ToolfG0V7content10toolCallIdAGSS_SStcfc":{"name":"init(content:toolCallId:)","parent_name":"ChatCompletionToolMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionToolMessageParam/CodingKeys.html":{"name":"CodingKeys","parent_name":"ChatCompletionToolMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionAssistantMessageParam/ChatCompletionMessageToolCallParam/FunctionCall.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce9AssistantfG0V0cef8ToolCallG0V08FunctionJ0V9argumentsSSvp":{"name":"arguments","abstract":"The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.
","parent_name":"FunctionCall"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionAssistantMessageParam/ChatCompletionMessageToolCallParam/FunctionCall.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce9AssistantfG0V0cef8ToolCallG0V08FunctionJ0V4nameSSvp":{"name":"name","abstract":"The name of the function to call.
","parent_name":"FunctionCall"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionAssistantMessageParam/ChatCompletionMessageToolCallParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce9AssistantfG0V0cef8ToolCallG0V9ToolsTypea":{"name":"ToolsType","parent_name":"ChatCompletionMessageToolCallParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionAssistantMessageParam/ChatCompletionMessageToolCallParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce9AssistantfG0V0cef8ToolCallG0V2idSSvp":{"name":"id","abstract":"The ID of the tool call.
","parent_name":"ChatCompletionMessageToolCallParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionAssistantMessageParam/ChatCompletionMessageToolCallParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce9AssistantfG0V0cef8ToolCallG0V8functionAI08FunctionJ0Vvp":{"name":"function","abstract":"The function that the model called.
","parent_name":"ChatCompletionMessageToolCallParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionAssistantMessageParam/ChatCompletionMessageToolCallParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce9AssistantfG0V0cef8ToolCallG0V4typeAC0ceiG0V9ToolsTypeOvp":{"name":"type","abstract":"The type of the tool. Currently, only function
is supported.
/ The role of the messages author, in this case assistant.
","parent_name":"ChatCompletionAssistantMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionAssistantMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce9AssistantfG0V7contentSSSgvp":{"name":"content","abstract":"The contents of the assistant message. Required unless tool_calls is specified.
","parent_name":"ChatCompletionAssistantMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionAssistantMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce9AssistantfG0V4nameSSSgvp":{"name":"name","abstract":"The name of the author of this message. name
is required if role is function
, and it should be the name of the function whose response is in the content
. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.
The tool calls generated by the model, such as function calls.
","parent_name":"ChatCompletionAssistantMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionAssistantMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce9AssistantfG0V7content4name9toolCallsAGSSSg_AKSayAG0cef8ToolCallG0VGSgtcfc":{"name":"init(content:name:toolCalls:)","parent_name":"ChatCompletionAssistantMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionAssistantMessageParam/CodingKeys.html":{"name":"CodingKeys","parent_name":"ChatCompletionAssistantMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionAssistantMessageParam/ChatCompletionMessageToolCallParam.html":{"name":"ChatCompletionMessageToolCallParam","parent_name":"ChatCompletionAssistantMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam/CodingKeys.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei9PartImageG0V10CodingKeysO8imageUrlyA2OmF":{"name":"imageUrl","parent_name":"CodingKeys"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam/CodingKeys.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei9PartImageG0V10CodingKeysO4typeyA2OmF":{"name":"type","parent_name":"CodingKeys"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam/ImageURL/Detail.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei9PartImageG0V0L3URLV6DetailO4autoyA2QmF":{"name":"auto","parent_name":"Detail"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam/ImageURL/Detail.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei9PartImageG0V0L3URLV6DetailO3lowyA2QmF":{"name":"low","parent_name":"Detail"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam/ImageURL/Detail.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei9PartImageG0V0L3URLV6DetailO4highyA2QmF":{"name":"high","parent_name":"Detail"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam/ImageURL.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei9PartImageG0V0L3URLV3urlSSvp":{"name":"url","abstract":"Either a URL of the image or the base64 encoded image data.
","parent_name":"ImageURL"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam/ImageURL.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei9PartImageG0V0L3URLV6detailAO6DetailOvp":{"name":"detail","abstract":"Specifies the detail level of the image. Learn more in the","parent_name":"ImageURL"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam/ImageURL.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei9PartImageG0V0L3URLV3url6detailAOSS_AO6DetailOtcfc":{"name":"init(url:detail:)","parent_name":"ImageURL"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam/ImageURL.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei9PartImageG0V0L3URLV3url6detailAO10Foundation4DataV_AO6DetailOtcfc":{"name":"init(url:detail:)","parent_name":"ImageURL"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam/ImageURL/Detail.html":{"name":"Detail","parent_name":"ImageURL"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei9PartImageG0V8imageUrlAM0L3URLVvp":{"name":"imageUrl","parent_name":"ChatCompletionContentPartImageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei9PartImageG0V4typeSSvp":{"name":"type","abstract":"
The type of the content part.
","parent_name":"ChatCompletionContentPartImageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei9PartImageG0V8imageUrlA2M0L3URLV_tcfc":{"name":"init(imageUrl:)","parent_name":"ChatCompletionContentPartImageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam/ImageURL.html":{"name":"ImageURL","parent_name":"ChatCompletionContentPartImageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam/CodingKeys.html":{"name":"CodingKeys","parent_name":"ChatCompletionContentPartImageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartTextParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei8PartTextG0V4textSSvp":{"name":"text","abstract":"The text content.
","parent_name":"ChatCompletionContentPartTextParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartTextParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei8PartTextG0V4typeSSvp":{"name":"type","abstract":"The type of the content part.
","parent_name":"ChatCompletionContentPartTextParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartTextParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O0cei8PartTextG0V4textAMSS_tcfc":{"name":"init(text:)","parent_name":"ChatCompletionContentPartTextParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O04chatei8PartTextG0yA2K0ceilmG0VcAKmF":{"name":"chatCompletionContentPartTextParam(_:)","parent_name":"VisionContent"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O04chatei9PartImageG0yA2K0ceilmG0VcAKmF":{"name":"chatCompletionContentPartImageParam(_:)","parent_name":"VisionContent"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O4textSSSgvp":{"name":"text","parent_name":"VisionContent"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O8imageUrlAK0cei9PartImageG0V0N3URLVSgvp":{"name":"imageUrl","parent_name":"VisionContent"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O04chatei8PartTextG0A2K0ceilmG0V_tcfc":{"name":"init(chatCompletionContentPartTextParam:)","parent_name":"VisionContent"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO06VisionI0O04chatei9PartImageG0A2K0ceilmG0V_tcfc":{"name":"init(chatCompletionContentPartImageParam:)","parent_name":"VisionContent"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent.html#/s:SE6encode2toys7Encoder_p_tKF":{"name":"encode(to:)","parent_name":"VisionContent"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartTextParam.html":{"name":"ChatCompletionContentPartTextParam","parent_name":"VisionContent"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent/ChatCompletionContentPartImageParam.html":{"name":"ChatCompletionContentPartImageParam","parent_name":"VisionContent"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/CodingKeys.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO10CodingKeysO6stringyA2KmF":{"name":"string","parent_name":"CodingKeys"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/CodingKeys.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO10CodingKeysO6visionyA2KmF":{"name":"vision","parent_name":"CodingKeys"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO6stringyAISScAImF":{"name":"string(_:)","parent_name":"Content"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO6visionyAISayAI06VisionI0OGcAImF":{"name":"vision(_:)","parent_name":"Content"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO6stringSSSgvp":{"name":"string","parent_name":"Content"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO6stringAISS_tcfc":{"name":"init(string:)","parent_name":"Content"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7ContentO6visionAISayAI06VisionI0OG_tcfc":{"name":"init(vision:)","parent_name":"Content"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/CodingKeys.html":{"name":"CodingKeys","parent_name":"Content"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content.html#/s:SE6encode2toys7Encoder_p_tKF":{"name":"encode(to:)","parent_name":"Content"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content/VisionContent.html":{"name":"VisionContent","parent_name":"Content"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content.html#/s:Se4fromxs7Decoder_p_tKcfc":{"name":"init(from:)","parent_name":"Content"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V4Rolea":{"name":"Role","parent_name":"ChatCompletionUserMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7contentAG7ContentOvp":{"name":"content","abstract":"The contents of the user message.
","parent_name":"ChatCompletionUserMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V4roleAE4RoleOvp":{"name":"role","abstract":"The role of the messages author, in this case user.
","parent_name":"ChatCompletionUserMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V4nameSSSgvp":{"name":"name","abstract":"An optional name for the participant. Provides the model information to differentiate between participants of the same role.
","parent_name":"ChatCompletionUserMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce4UserfG0V7content4nameA2G7ContentO_SSSgtcfc":{"name":"init(content:name:)","parent_name":"ChatCompletionUserMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam/Content.html":{"name":"Content","parent_name":"ChatCompletionUserMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionSystemMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce6SystemfG0V4Rolea":{"name":"Role","parent_name":"ChatCompletionSystemMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionSystemMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce6SystemfG0V7contentSSvp":{"name":"content","abstract":"The contents of the system message.
","parent_name":"ChatCompletionSystemMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionSystemMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce6SystemfG0V4roleAE4RoleOvp":{"name":"role","abstract":"The role of the messages author, in this case system.
","parent_name":"ChatCompletionSystemMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionSystemMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce6SystemfG0V4nameSSSgvp":{"name":"name","abstract":"An optional name for the participant. Provides the model information to differentiate between participants of the same role.
","parent_name":"ChatCompletionSystemMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionSystemMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO0ce6SystemfG0V7content4nameAGSS_SSSgtcfc":{"name":"init(content:name:)","parent_name":"ChatCompletionSystemMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO6systemyA2E0ce6SystemfG0VcAEmF":{"name":"system(_:)","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO4useryA2E0ce4UserfG0VcAEmF":{"name":"user(_:)","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO9assistantyA2E0ce9AssistantfG0VcAEmF":{"name":"assistant(_:)","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO4toolyA2E0ce4ToolfG0VcAEmF":{"name":"tool(_:)","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO7contentAE0ce4UserfG0V7ContentOSgvp":{"name":"content","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO4roleAE4RoleOvp":{"name":"role","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO4nameSSSgvp":{"name":"name","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO10toolCallIdSSSgvp":{"name":"toolCallId","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO9toolCallsSayAE0ce9AssistantfG0V0cef8ToolCallG0VGSgvp":{"name":"toolCalls","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO4role7content4name9toolCalls0K6CallIdAESgAE4RoleO_SSSgANSayAE0ce9AssistantfG0V0cef4ToolmG0VGSgANtcfc":{"name":"init(role:content:name:toolCalls:toolCallId:)","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO4role7content4nameAESgAE4RoleO_SayAE0ce4UserfG0V7ContentO06VisionM0OGSSSgtcfc":{"name":"init(role:content:name:)","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam.html#/s:SE6encode2toys7Encoder_p_tKF":{"name":"encode(to:)","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionSystemMessageParam.html":{"name":"ChatCompletionSystemMessageParam","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionUserMessageParam.html":{"name":"ChatCompletionUserMessageParam","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionAssistantMessageParam.html":{"name":"ChatCompletionAssistantMessageParam","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/ChatCompletionToolMessageParam.html":{"name":"ChatCompletionToolMessageParam","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam/Role.html":{"name":"Role","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery/ChatCompletionMessageParam.html#/s:6OpenAI9ChatQueryV0C22CompletionMessageParamO4fromAEs7Decoder_p_tKcfc":{"name":"init(from:)","parent_name":"ChatCompletionMessageParam"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV8messagesSayAC0C22CompletionMessageParamOGvp":{"name":"messages","abstract":"A list of messages comprising the conversation so far
","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV5modelSSvp":{"name":"model","abstract":"ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV16frequencyPenaltySdSgvp":{"name":"frequencyPenalty","abstract":"
Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model’s likelihood to repeat the same line verbatim.","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV9logitBiasSDySSSiGSgvp":{"name":"logitBias","abstract":"
Modify the likelihood of specified tokens appearing in the completion.","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV8logprobsSbSgvp":{"name":"logprobs","abstract":"
Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message. This option is currently not available on the gpt-4-vision-preview model.","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV9maxTokensSiSgvp":{"name":"maxTokens","abstract":"
The maximum number of tokens to generate in the completion.","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV1nSiSgvp":{"name":"n","abstract":"
How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs.","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV15presencePenaltySdSgvp":{"name":"presencePenalty","abstract":"
Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model’s likelihood to talk about new topics.","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV14responseFormatAC08ResponseF0OSgvp":{"name":"responseFormat","abstract":"
An object specifying the format that the model must output. Compatible with gpt-4-1106-preview and gpt-3.5-turbo-1106.","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV4seedSiSgvp":{"name":"seed","abstract":"
This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.
","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV4stopAC4StopOSgvp":{"name":"stop","abstract":"Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV11temperatureSdSgvp":{"name":"temperature","abstract":"
What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV10toolChoiceAC0C33CompletionFunctionCallOptionParamOSgvp":{"name":"toolChoice","abstract":"
Controls which (if any) function is called by the model. none means the model will not call a function and instead generates a message. auto means the model can pick between generating a message or calling a function. Specifying a particular function via {“type”: “function”, “function”: {“name”: “my_function”}} forces the model to call that function.","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV5toolsSayAC0C19CompletionToolParamVGSgvp":{"name":"tools","abstract":"
A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for.
","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV11topLogprobsSiSgvp":{"name":"topLogprobs","abstract":"An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used.
","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV4topPSdSgvp":{"name":"topP","abstract":"An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV4userSSSgvp":{"name":"user","abstract":"
A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV6streamSbvp":{"name":"stream","abstract":"
If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message.","parent_name":"ChatQuery"},"Structs/ChatQuery.html#/s:6OpenAI9ChatQueryV8messages5model16frequencyPenalty9logitBias8logprobs9maxTokens1n08presenceH014responseFormat4seed4stop11temperature10toolChoice5tools11topLogprobs0W1P4user6streamACSayAC0C22CompletionMessageParamOG_SSSdSgSDySSSiGSgSbSgSiSgA1_AyC08ResponseP0OSgA1_AC4StopOSgAyC0C33CompletionFunctionCallOptionParamOSgSayAC0C19CompletionToolParamVGSgA1_AYSSSgSbtcfc":{"name":"init(messages:model:frequencyPenalty:logitBias:logprobs:maxTokens:n:presencePenalty:responseFormat:seed:stop:temperature:toolChoice:tools:topLogprobs:topP:user:stream:)","parent_name":"ChatQuery"},"Structs/ChatQuery/ChatCompletionMessageParam.html":{"name":"ChatCompletionMessageParam","parent_name":"ChatQuery"},"Structs/ChatQuery/Stop.html":{"name":"Stop","parent_name":"ChatQuery"},"Structs/ChatQuery/ResponseFormat.html":{"name":"ResponseFormat","parent_name":"ChatQuery"},"Structs/ChatQuery/ChatCompletionFunctionCallOptionParam.html":{"name":"ChatCompletionFunctionCallOptionParam","parent_name":"ChatQuery"},"Structs/ChatQuery/ChatCompletionToolParam.html":{"name":"ChatCompletionToolParam","parent_name":"ChatQuery"},"Structs/ChatQuery/CodingKeys.html":{"name":"CodingKeys","parent_name":"ChatQuery"},"Structs/AudioTranslationResult.html#/s:6OpenAI22AudioTranslationResultV4textSSvp":{"name":"text","abstract":"
The translated text.
","parent_name":"AudioTranslationResult"},"Structs/AudioTranslationQuery.html#/s:6OpenAI21AudioTranslationQueryV8FileTypea":{"name":"FileType","parent_name":"AudioTranslationQuery"},"Structs/AudioTranslationQuery.html#/s:6OpenAI21AudioTranslationQueryV14ResponseFormata":{"name":"ResponseFormat","parent_name":"AudioTranslationQuery"},"Structs/AudioTranslationQuery.html#/s:6OpenAI21AudioTranslationQueryV4file10Foundation4DataVvp":{"name":"file","abstract":"The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
","parent_name":"AudioTranslationQuery"},"Structs/AudioTranslationQuery.html#/s:6OpenAI21AudioTranslationQueryV8fileTypeAA0c13TranscriptionE0V04FileG0Ovp":{"name":"fileType","parent_name":"AudioTranslationQuery"},"Structs/AudioTranslationQuery.html#/s:6OpenAI21AudioTranslationQueryV5modelSSvp":{"name":"model","abstract":"ID of the model to use. Only whisper-1 is currently available.
","parent_name":"AudioTranslationQuery"},"Structs/AudioTranslationQuery.html#/s:6OpenAI21AudioTranslationQueryV14responseFormatAA0c13TranscriptionE0V08ResponseG0OSgvp":{"name":"responseFormat","abstract":"The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.","parent_name":"AudioTranslationQuery"},"Structs/AudioTranslationQuery.html#/s:6OpenAI21AudioTranslationQueryV6promptSSSgvp":{"name":"prompt","abstract":"
An optional text to guide the model’s style or continue a previous audio segment. The prompt should be in English.","parent_name":"AudioTranslationQuery"},"Structs/AudioTranslationQuery.html#/s:6OpenAI21AudioTranslationQueryV11temperatureSdSgvp":{"name":"temperature","abstract":"
The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.","parent_name":"AudioTranslationQuery"},"Structs/AudioTranslationQuery.html#/s:6OpenAI21AudioTranslationQueryV4file0F4Type5model6prompt11temperature14responseFormatAC10Foundation4DataV_AA0c13TranscriptionE0V04FileG0OS2SSgSdSgAN08ResponseL0OSgtcfc":{"name":"init(file:fileType:model:prompt:temperature:responseFormat:)","parent_name":"AudioTranslationQuery"},"Structs/AudioTranscriptionResult.html#/s:6OpenAI24AudioTranscriptionResultV4textSSvp":{"name":"text","abstract":"
The transcribed text.
","parent_name":"AudioTranscriptionResult"},"Structs/AudioTranscriptionQuery/FileType.html#/s:6OpenAI23AudioTranscriptionQueryV8FileTypeO4flacyA2EmF":{"name":"flac","parent_name":"FileType"},"Structs/AudioTranscriptionQuery/FileType.html#/s:6OpenAI23AudioTranscriptionQueryV8FileTypeO3mp3yA2EmF":{"name":"mp3","parent_name":"FileType"},"Structs/AudioTranscriptionQuery/FileType.html#/s:6OpenAI23AudioTranscriptionQueryV8FileTypeO4mpgayA2EmF":{"name":"mpga","parent_name":"FileType"},"Structs/AudioTranscriptionQuery/FileType.html#/s:6OpenAI23AudioTranscriptionQueryV8FileTypeO3mp4yA2EmF":{"name":"mp4","parent_name":"FileType"},"Structs/AudioTranscriptionQuery/FileType.html#/s:6OpenAI23AudioTranscriptionQueryV8FileTypeO3m4ayA2EmF":{"name":"m4a","parent_name":"FileType"},"Structs/AudioTranscriptionQuery/FileType.html#/s:6OpenAI23AudioTranscriptionQueryV8FileTypeO4mpegyA2EmF":{"name":"mpeg","parent_name":"FileType"},"Structs/AudioTranscriptionQuery/FileType.html#/s:6OpenAI23AudioTranscriptionQueryV8FileTypeO3oggyA2EmF":{"name":"ogg","parent_name":"FileType"},"Structs/AudioTranscriptionQuery/FileType.html#/s:6OpenAI23AudioTranscriptionQueryV8FileTypeO3wavyA2EmF":{"name":"wav","parent_name":"FileType"},"Structs/AudioTranscriptionQuery/FileType.html#/s:6OpenAI23AudioTranscriptionQueryV8FileTypeO4webmyA2EmF":{"name":"webm","parent_name":"FileType"},"Structs/AudioTranscriptionQuery/ResponseFormat.html#/s:6OpenAI23AudioTranscriptionQueryV14ResponseFormatO4jsonyA2EmF":{"name":"json","parent_name":"ResponseFormat"},"Structs/AudioTranscriptionQuery/ResponseFormat.html#/s:6OpenAI23AudioTranscriptionQueryV14ResponseFormatO4textyA2EmF":{"name":"text","parent_name":"ResponseFormat"},"Structs/AudioTranscriptionQuery/ResponseFormat.html#/s:6OpenAI23AudioTranscriptionQueryV14ResponseFormatO11verboseJsonyA2EmF":{"name":"verboseJson","parent_name":"ResponseFormat"},"Structs/AudioTranscriptionQuery/ResponseFormat.html#/s:6OpenAI23AudioTranscriptionQueryV14ResponseFormatO3srtyA2EmF":{"name":"srt","parent_name":"ResponseFormat"},"Structs/AudioTranscriptionQuery/ResponseFormat.html#/s:6OpenAI23AudioTranscriptionQueryV14ResponseFormatO3vttyA2EmF":{"name":"vtt","parent_name":"ResponseFormat"},"Structs/AudioTranscriptionQuery/ResponseFormat.html":{"name":"ResponseFormat","parent_name":"AudioTranscriptionQuery"},"Structs/AudioTranscriptionQuery.html#/s:6OpenAI23AudioTranscriptionQueryV4file10Foundation4DataVvp":{"name":"file","abstract":"The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
","parent_name":"AudioTranscriptionQuery"},"Structs/AudioTranscriptionQuery.html#/s:6OpenAI23AudioTranscriptionQueryV8fileTypeAC04FileG0Ovp":{"name":"fileType","parent_name":"AudioTranscriptionQuery"},"Structs/AudioTranscriptionQuery.html#/s:6OpenAI23AudioTranscriptionQueryV5modelSSvp":{"name":"model","abstract":"ID of the model to use. Only whisper-1 is currently available.
","parent_name":"AudioTranscriptionQuery"},"Structs/AudioTranscriptionQuery.html#/s:6OpenAI23AudioTranscriptionQueryV14responseFormatAC08ResponseG0OSgvp":{"name":"responseFormat","abstract":"The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.","parent_name":"AudioTranscriptionQuery"},"Structs/AudioTranscriptionQuery.html#/s:6OpenAI23AudioTranscriptionQueryV6promptSSSgvp":{"name":"prompt","abstract":"
An optional text to guide the model’s style or continue a previous audio segment. The prompt should match the audio language.
","parent_name":"AudioTranscriptionQuery"},"Structs/AudioTranscriptionQuery.html#/s:6OpenAI23AudioTranscriptionQueryV11temperatureSdSgvp":{"name":"temperature","abstract":"The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.","parent_name":"AudioTranscriptionQuery"},"Structs/AudioTranscriptionQuery.html#/s:6OpenAI23AudioTranscriptionQueryV8languageSSSgvp":{"name":"language","abstract":"
The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency.","parent_name":"AudioTranscriptionQuery"},"Structs/AudioTranscriptionQuery.html#/s:6OpenAI23AudioTranscriptionQueryV4file0F4Type5model6prompt11temperature8language14responseFormatAC10Foundation4DataV_AC04FileG0OS2SSgSdSgApC08ResponseM0OSgtcfc":{"name":"init(file:fileType:model:prompt:temperature:language:responseFormat:)","parent_name":"AudioTranscriptionQuery"},"Structs/AudioTranscriptionQuery/FileType.html":{"name":"FileType","parent_name":"AudioTranscriptionQuery"},"Structs/AudioSpeechResult.html#/s:6OpenAI17AudioSpeechResultV5audio10Foundation4DataVvp":{"name":"audio","abstract":"
Audio data for one of the following formats :mp3
, opus
, aac
, flac
Encapsulates the voices available for audio generation.
","parent_name":"AudioSpeechQuery"},"Structs/AudioSpeechQuery/AudioSpeechResponseFormat.html":{"name":"AudioSpeechResponseFormat","abstract":"Encapsulates the response formats available for audio data.
","parent_name":"AudioSpeechQuery"},"Structs/AudioSpeechQuery.html#/s:6OpenAI16AudioSpeechQueryV5inputSSvp":{"name":"input","abstract":"The text to generate audio for. The maximum length is 4096 characters.
","parent_name":"AudioSpeechQuery"},"Structs/AudioSpeechQuery.html#/s:6OpenAI16AudioSpeechQueryV5modelSSvp":{"name":"model","abstract":"One of the available TTS models: tts-1 or tts-1-hd
","parent_name":"AudioSpeechQuery"},"Structs/AudioSpeechQuery.html#/s:6OpenAI16AudioSpeechQueryV5voiceAC0cD5VoiceOvp":{"name":"voice","abstract":"The voice to use when generating the audio. Supported voices are alloy, echo, fable, onyx, nova, and shimmer. Previews of the voices are available in the Text to speech guide.","parent_name":"AudioSpeechQuery"},"Structs/AudioSpeechQuery.html#/s:6OpenAI16AudioSpeechQueryV14responseFormatAC0cd8ResponseG0OSgvp":{"name":"responseFormat","abstract":"
The format to audio in. Supported formats are mp3, opus, aac, and flac.","parent_name":"AudioSpeechQuery"},"Structs/AudioSpeechQuery.html#/s:6OpenAI16AudioSpeechQueryV5speedSSSgvp":{"name":"speed","abstract":"
The speed of the generated audio. Select a value from 0.25 to 4.0. 1.0 is the default.","parent_name":"AudioSpeechQuery"},"Structs/AudioSpeechQuery/CodingKeys.html":{"name":"CodingKeys","parent_name":"AudioSpeechQuery"},"Structs/AudioSpeechQuery.html#/s:6OpenAI16AudioSpeechQueryV5model5input5voice14responseFormat5speedACSS_SSAC0cD5VoiceOAC0cd8ResponseJ0OSdSgtcfc":{"name":"init(model:input:voice:responseFormat:speed:)","parent_name":"AudioSpeechQuery"},"Structs/AudioSpeechQuery/Speed.html":{"name":"Speed","parent_name":"AudioSpeechQuery"},"Structs/AudioSpeechQuery.html#/s:6OpenAI16AudioSpeechQueryV09normalizeD5SpeedySSSdSgFZ":{"name":"normalizeSpeechSpeed(_:)","parent_name":"AudioSpeechQuery"},"Structs/APIErrorResponse.html#/s:6OpenAI16APIErrorResponseV5errorAA0C0Vvp":{"name":"error","parent_name":"APIErrorResponse"},"Structs/APIErrorResponse.html#/s:10Foundation14LocalizedErrorP16errorDescriptionSSSgvp":{"name":"errorDescription","parent_name":"APIErrorResponse"},"Structs/APIError.html#/s:6OpenAI8APIErrorV7messageSSvp":{"name":"message","parent_name":"APIError"},"Structs/APIError.html#/s:6OpenAI8APIErrorV4typeSSvp":{"name":"type","parent_name":"APIError"},"Structs/APIError.html#/s:6OpenAI8APIErrorV5paramSSSgvp":{"name":"param","parent_name":"APIError"},"Structs/APIError.html#/s:6OpenAI8APIErrorV4codeSSSgvp":{"name":"code","parent_name":"APIError"},"Structs/APIError.html#/s:6OpenAI8APIErrorV7message4type5param4codeACSS_S2SSgAHtcfc":{"name":"init(message:type:param:code:)","parent_name":"APIError"},"Structs/APIError.html#/s:Se4fromxs7Decoder_p_tKcfc":{"name":"init(from:)","parent_name":"APIError"},"Structs/APIError.html#/s:10Foundation14LocalizedErrorP16errorDescriptionSSSgvp":{"name":"errorDescription","parent_name":"APIError"},"Structs/APIError.html":{"name":"APIError"},"Structs/APIErrorResponse.html":{"name":"APIErrorResponse"},"Structs/AudioSpeechQuery.html":{"name":"AudioSpeechQuery","abstract":"
Generates audio from the input text."},"Structs/AudioSpeechResult.html":{"name":"AudioSpeechResult","abstract":"
The audio file content."},"Structs/AudioTranscriptionQuery.html":{"name":"AudioTranscriptionQuery"},"Structs/AudioTranscriptionResult.html":{"name":"AudioTranscriptionResult"},"Structs/AudioTranslationQuery.html":{"name":"AudioTranslationQuery","abstract":"
Translates audio into English.
"},"Structs/AudioTranslationResult.html":{"name":"AudioTranslationResult"},"Structs/ChatQuery.html":{"name":"ChatQuery","abstract":"Creates a model response for the given chat conversation"},"Structs/ChatResult.html":{"name":"ChatResult","abstract":"
https://platform.openai.com/docs/api-reference/chat/object"},"Structs/ChatStreamResult.html":{"name":"ChatStreamResult"},"Structs/CompletionsQuery.html":{"name":"CompletionsQuery"},"Structs/CompletionsResult.html":{"name":"CompletionsResult"},"Structs/EditsQuery.html":{"name":"EditsQuery"},"Structs/EditsResult.html":{"name":"EditsResult"},"Structs/EmbeddingsQuery.html":{"name":"EmbeddingsQuery"},"Structs/EmbeddingsResult.html":{"name":"EmbeddingsResult"},"Structs/ImageEditsQuery.html":{"name":"ImageEditsQuery"},"Structs/ImageVariationsQuery.html":{"name":"ImageVariationsQuery"},"Structs/ImagesQuery.html":{"name":"ImagesQuery","abstract":"
Given a prompt and/or an input image, the model will generate a new image."},"Structs/ImagesResult.html":{"name":"ImagesResult","abstract":"
Returns a list of image objects.
"},"Structs/ModelQuery.html":{"name":"ModelQuery","abstract":"Retrieves a model instance, providing basic information about the model such as the owner and permissioning.
"},"Structs/ModelResult.html":{"name":"ModelResult","abstract":"The model object matching the specified ID.
"},"Structs/ModelsResult.html":{"name":"ModelsResult","abstract":"A list of model objects.
"},"Structs/ModerationsQuery.html":{"name":"ModerationsQuery"},"Structs/ModerationsResult.html":{"name":"ModerationsResult"},"Structs/Vector.html":{"name":"Vector"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolP11completions5query10completionyAA16CompletionsQueryV_ys6ResultOyAA0gI0Vs5Error_pGctF":{"name":"completions(query:completion:)","abstract":"This function sends a completions query to the OpenAI API and retrieves generated completions in response. The Completions API enables you to build applications using OpenAI’s language models, like the powerful GPT-3.
","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolP17completionsStream5query8onResult10completionyAA16CompletionsQueryV_ys0H0OyAA0jH0Vs5Error_pGcysAN_pSgcSgtF":{"name":"completionsStream(query:onResult:completion:)","abstract":"This function sends a completions query to the OpenAI API and retrieves generated completions in response. The Completions API enables you to build applications using OpenAI’s language models, like the powerful GPT-3. The result is returned by chunks.
","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolP6images5query10completionyAA11ImagesQueryV_ys6ResultOyAA0gI0Vs5Error_pGctF":{"name":"images(query:completion:)","abstract":"This function sends an images query to the OpenAI API and retrieves generated images in response. The Images Generation API enables you to create various images or graphics using OpenAI’s powerful deep learning models.
","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolP10imageEdits5query10completionyAA05ImageE5QueryV_ys6ResultOyAA06ImagesJ0Vs5Error_pGctF":{"name":"imageEdits(query:completion:)","abstract":"This function sends an image edit query to the OpenAI API and retrieves generated images in response. The Images Edit API enables you to edit images or graphics using OpenAI’s powerful deep learning models.
","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolP15imageVariations5query10completionyAA05ImageE5QueryV_ys6ResultOyAA06ImagesJ0Vs5Error_pGctF":{"name":"imageVariations(query:completion:)","abstract":"This function sends an image variation query to the OpenAI API and retrieves generated images in response. The Images Variations API enables you to create a variation of a given image using OpenAI’s powerful deep learning models.
","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolP10embeddings5query10completionyAA15EmbeddingsQueryV_ys6ResultOyAA0gI0Vs5Error_pGctF":{"name":"embeddings(query:completion:)","abstract":"This function sends an embeddings query to the OpenAI API and retrieves embeddings in response. The Embeddings API enables you to generate high-dimensional vector representations of texts, which can be used for various natural language processing tasks such as semantic similarity, clustering, and classification.
","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolP5chats5query10completionyAA9ChatQueryV_ys6ResultOyAA0gI0Vs5Error_pGctF":{"name":"chats(query:completion:)","abstract":"This function sends a chat query to the OpenAI API and retrieves chat conversation responses. The Chat API enables you to build chatbots or conversational applications using OpenAI’s powerful natural language models, like GPT-3.
","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolP11chatsStream5query8onResult10completionyAA9ChatQueryV_ys0H0OyAA0jeH0Vs5Error_pGcysAN_pSgcSgtF":{"name":"chatsStream(query:onResult:completion:)","abstract":"This function sends a chat query to the OpenAI API and retrieves chat stream conversation responses. The Chat API enables you to build chatbots or conversational applications using OpenAI’s powerful natural language models, like GPT-3. The result is returned by chunks.
","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolP5edits5query10completionyAA10EditsQueryV_ys6ResultOyAA0gI0Vs5Error_pGctF":{"name":"edits(query:completion:)","abstract":"This function sends an edits query to the OpenAI API and retrieves an edited version of the prompt based on the instruction given.
","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolP5model5query10completionyAA10ModelQueryV_ys6ResultOyAA0gI0Vs5Error_pGctF":{"name":"model(query:completion:)","abstract":"This function sends a model query to the OpenAI API and retrieves a model instance, providing owner information. The Models API in this usage enables you to gather detailed information on the model in question, like GPT-3.
","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolP6models10completionyys6ResultOyAA06ModelsF0Vs5Error_pGc_tF":{"name":"models(completion:)","abstract":"This function sends a models query to the OpenAI API and retrieves a list of models. The Models API in this usage enables you to list all the available models.
","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolP11moderations5query10completionyAA16ModerationsQueryV_ys6ResultOyAA0gI0Vs5Error_pGctF":{"name":"moderations(query:completion:)","abstract":"This function sends a moderations query to the OpenAI API and retrieves a list of category results to classify how text may violate OpenAI’s Content Policy.
","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolP17audioCreateSpeech5query10completionyAA05AudioF5QueryV_ys6ResultOyAA0ifK0Vs5Error_pGctF":{"name":"audioCreateSpeech(query:completion:)","abstract":"This function sends an AudioSpeechQuery
to the OpenAI API to create audio speech from text using a specific voice and format.
Transcribes audio data using OpenAI’s audio transcription API and completes the operation asynchronously.
","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolP17audioTranslations5query10completionyAA21AudioTranslationQueryV_ys6ResultOyAA0hiK0Vs5Error_pGctF":{"name":"audioTranslations(query:completion:)","abstract":"Translates audio data using OpenAI’s audio translation API and completes the operation asynchronously.
","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE11completions5queryAA17CompletionsResultVAA0F5QueryV_tYaKF":{"name":"completions(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE17completionsStream5queryScsyAA17CompletionsResultVs5Error_pGAA0G5QueryV_tF":{"name":"completionsStream(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE6images5queryAA12ImagesResultVAA0F5QueryV_tYaKF":{"name":"images(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE10imageEdits5queryAA12ImagesResultVAA05ImageE5QueryV_tYaKF":{"name":"imageEdits(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE15imageVariations5queryAA12ImagesResultVAA05ImageE5QueryV_tYaKF":{"name":"imageVariations(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE10embeddings5queryAA16EmbeddingsResultVAA0F5QueryV_tYaKF":{"name":"embeddings(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE5chats5queryAA10ChatResultVAA0F5QueryV_tYaKF":{"name":"chats(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE11chatsStream5queryScsyAA04ChatE6ResultVs5Error_pGAA0G5QueryV_tF":{"name":"chatsStream(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE5edits5queryAA11EditsResultVAA0F5QueryV_tYaKF":{"name":"edits(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE5model5queryAA11ModelResultVAA0F5QueryV_tYaKF":{"name":"model(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE6modelsAA12ModelsResultVyYaKF":{"name":"models()","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE11moderations5queryAA17ModerationsResultVAA0F5QueryV_tYaKF":{"name":"moderations(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE17audioCreateSpeech5queryAA05AudioF6ResultVAA0hF5QueryV_tYaKF":{"name":"audioCreateSpeech(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE19audioTranscriptions5queryAA24AudioTranscriptionResultVAA0gH5QueryV_tYaKF":{"name":"audioTranscriptions(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE17audioTranslations5queryAA22AudioTranslationResultVAA0gH5QueryV_tYaKF":{"name":"audioTranslations(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE11completions5query7Combine12AnyPublisherVyAA17CompletionsResultVs5Error_pGAA0I5QueryV_tF":{"name":"completions(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE17completionsStream5query7Combine12AnyPublisherVys6ResultOyAA011CompletionsJ0Vs5Error_pGsAM_pGAA0K5QueryV_tF":{"name":"completionsStream(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE6images5query7Combine12AnyPublisherVyAA12ImagesResultVs5Error_pGAA0I5QueryV_tF":{"name":"images(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE10imageEdits5query7Combine12AnyPublisherVyAA12ImagesResultVs5Error_pGAA05ImageE5QueryV_tF":{"name":"imageEdits(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE15imageVariations5query7Combine12AnyPublisherVyAA12ImagesResultVs5Error_pGAA05ImageE5QueryV_tF":{"name":"imageVariations(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE10embeddings5query7Combine12AnyPublisherVyAA16EmbeddingsResultVs5Error_pGAA0I5QueryV_tF":{"name":"embeddings(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE5chats5query7Combine12AnyPublisherVyAA10ChatResultVs5Error_pGAA0I5QueryV_tF":{"name":"chats(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE11chatsStream5query7Combine12AnyPublisherVys6ResultOyAA04ChateJ0Vs5Error_pGsAM_pGAA0K5QueryV_tF":{"name":"chatsStream(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE5edits5query7Combine12AnyPublisherVyAA11EditsResultVs5Error_pGAA0I5QueryV_tF":{"name":"edits(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE5model5query7Combine12AnyPublisherVyAA11ModelResultVs5Error_pGAA0I5QueryV_tF":{"name":"model(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE6models7Combine12AnyPublisherVyAA12ModelsResultVs5Error_pGyF":{"name":"models()","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE11moderations5query7Combine12AnyPublisherVyAA17ModerationsResultVs5Error_pGAA0I5QueryV_tF":{"name":"moderations(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE17audioCreateSpeech5query7Combine12AnyPublisherVyAA05AudioF6ResultVs5Error_pGAA0kF5QueryV_tF":{"name":"audioCreateSpeech(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE19audioTranscriptions5query7Combine12AnyPublisherVyAA24AudioTranscriptionResultVs5Error_pGAA0jK5QueryV_tF":{"name":"audioTranscriptions(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html#/s:6OpenAI0A10AIProtocolPAAE17audioTranslations5query7Combine12AnyPublisherVyAA22AudioTranslationResultVs5Error_pGAA0jK5QueryV_tF":{"name":"audioTranslations(query:)","parent_name":"OpenAIProtocol"},"Protocols/OpenAIProtocol.html":{"name":"OpenAIProtocol"},"Extensions/Model.html#/s:SS6OpenAIE6gpt4_oSSvpZ":{"name":"gpt4_o","abstract":"gpt-4o
, currently the most advanced, multimodal flagship model that’s cheaper and faster than GPT-4 Turbo.
gpt-4o-mini
, currently the most affordable and intelligent model for fast and lightweight requests.
gpt-4-turbo
, The latest GPT-4 Turbo model with vision capabilities. Vision requests can now use JSON mode and function calling and more. Context window: 128,000 tokens
gpt-4-turbo
, gpt-4 model with improved instruction following, JSON mode, reproducible outputs, parallel function calling and more. Maximum of 4096 output tokens
gpt-4-vision-preview
, able to understand images, in addition to all other GPT-4 Turbo capabilities.
Snapshot of gpt-4-turbo-preview
from January 25th 2024. This model reduces cases of “laziness” where the model doesn’t complete a task. Also fixes the bug impacting non-English UTF-8 generations. Maximum of 4096 output tokens
Snapshot of gpt-4-turbo-preview
from November 6th 2023. Improved instruction following, JSON mode, reproducible outputs, parallel function calling and more. Maximum of 4096 output tokens
Most capable gpt-4
model, outperforms any GPT-3.5 model, able to do more complex tasks, and optimized for chat.
Snapshot of gpt-4
from June 13th 2023 with function calling data. Unlike gpt-4
, this model will not receive updates, and will be deprecated 3 months after a new version is released.
Snapshot of gpt-4
from March 14th 2023. Unlike gpt-4, this model will not receive updates, and will only be supported for a three month period ending on June 14th 2023.
Same capabilities as the base gpt-4
model but with 4x the context length. Will be updated with our latest model iteration.
Snapshot of gpt-4-32k
from June 13th 2023. Unlike gpt-4-32k
, this model will not receive updates, and will be deprecated 3 months after a new version is released.
Snapshot of gpt-4-32k
from March 14th 2023. Unlike gpt-4-32k
, this model will not receive updates, and will only be supported for a three month period ending on June 14th 2023.
Most capable gpt-3.5-turbo
model and optimized for chat. Will be updated with our latest model iteration.
Snapshot of gpt-3.5-turbo
from January 25th 2024. Decreased prices by 50%. Various improvements including higher accuracy at responding in requested formats and a fix for a bug which caused a text encoding issue for non-English language function calls.
Snapshot of gpt-3.5-turbo
from November 6th 2023. The latest gpt-3.5-turbo
model with improved instruction following, JSON mode, reproducible outputs, parallel function calling and more.
Snapshot of gpt-3.5-turbo
from June 13th 2023 with function calling data. Unlike gpt-3.5-turbo
, this model will not receive updates, and will be deprecated 3 months after a new version is released.
Snapshot of gpt-3.5-turbo
from March 1st 2023. Unlike gpt-3.5-turbo
, this model will not receive updates, and will only be supported for a three month period ending on June 1st 2023.
Same capabilities as the standard gpt-3.5-turbo
model but with 4 times the context.
Snapshot of gpt-3.5-turbo-16k
from June 13th 2023. Unlike gpt-3.5-turbo-16k
, this model will not receive updates, and will be deprecated 3 months after a new version is released.
Can do any language task with better quality, longer output, and consistent instruction-following than the curie, babbage, or ada models. Also supports inserting completions within text.
","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE15textDavinci_002SSvpZ":{"name":"textDavinci_002","abstract":"Similar capabilities to text-davinci-003 but trained with supervised fine-tuning instead of reinforcement learning.
","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE9textCurieSSvpZ":{"name":"textCurie","abstract":"Very capable, faster and lower cost than Davinci.
","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE11textBabbageSSvpZ":{"name":"textBabbage","abstract":"Capable of straightforward tasks, very fast, and lower cost.
","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE7textAdaSSvpZ":{"name":"textAda","abstract":"Capable of very simple tasks, usually the fastest model in the GPT-3 series, and lowest cost.
","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE15textDavinci_001SSvpZ":{"name":"textDavinci_001","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE19codeDavinciEdit_001SSvpZ":{"name":"codeDavinciEdit_001","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE5tts_1SSvpZ":{"name":"tts_1","abstract":"The latest text to speech model, optimized for speed.
","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE8tts_1_hdSSvpZ":{"name":"tts_1_hd","abstract":"The latest text to speech model, optimized for quality.
","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE9whisper_1SSvpZ":{"name":"whisper_1","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE8dall_e_2SSvpZ":{"name":"dall_e_2","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE8dall_e_3SSvpZ":{"name":"dall_e_3","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE7davinciSSvpZ":{"name":"davinci","abstract":"Most capable GPT-3 model. Can do any task the other models can do, often with higher quality.
","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE5curieSSvpZ":{"name":"curie","abstract":"Very capable, but faster and lower cost than Davinci.
","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE7babbageSSvpZ":{"name":"babbage","abstract":"Capable of straightforward tasks, very fast, and lower cost.
","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE3adaSSvpZ":{"name":"ada","abstract":"Capable of very simple tasks, usually the fastest model in the GPT-3 series, and lowest cost.
","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE16textEmbeddingAdaSSvpZ":{"name":"textEmbeddingAda","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE13textSearchAdaSSvpZ":{"name":"textSearchAda","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE20textSearchBabbageDocSSvpZ":{"name":"textSearchBabbageDoc","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE25textSearchBabbageQuery001SSvpZ":{"name":"textSearchBabbageQuery001","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE14textEmbedding3SSvpZ":{"name":"textEmbedding3","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE19textEmbedding3LargeSSvpZ":{"name":"textEmbedding3Large","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE20textModerationStableSSvpZ":{"name":"textModerationStable","abstract":"Almost as capable as the latest model, but slightly older.
","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE20textModerationLatestSSvpZ":{"name":"textModerationLatest","abstract":"Most capable moderation model. Accuracy will be slightly higher than the stable model.
","parent_name":"Model"},"Extensions/Model.html#/s:SS6OpenAIE10moderationSSvpZ":{"name":"moderation","parent_name":"Model"},"Extensions/Model.html":{"name":"Model"},"Enums/OpenAIError.html#/s:6OpenAI0A7AIErrorO9emptyDatayA2CmF":{"name":"emptyData","parent_name":"OpenAIError"},"Enums/OpenAIError.html":{"name":"OpenAIError"},"Classes/OpenAI/Configuration.html#/s:6OpenAIAAC13ConfigurationV5tokenSSvp":{"name":"token","abstract":"OpenAI API token. See https://platform.openai.com/docs/api-reference/authentication
","parent_name":"Configuration"},"Classes/OpenAI/Configuration.html#/s:6OpenAIAAC13ConfigurationV22organizationIdentifierSSSgvp":{"name":"organizationIdentifier","abstract":"Optional OpenAI organization identifier. See https://platform.openai.com/docs/api-reference/authentication
","parent_name":"Configuration"},"Classes/OpenAI/Configuration.html#/s:6OpenAIAAC13ConfigurationV4hostSSvp":{"name":"host","abstract":"API host. Set this property if you use some kind of proxy or your own server. Default is api.openai.com
","parent_name":"Configuration"},"Classes/OpenAI/Configuration.html#/s:6OpenAIAAC13ConfigurationV4portSivp":{"name":"port","parent_name":"Configuration"},"Classes/OpenAI/Configuration.html#/s:6OpenAIAAC13ConfigurationV6schemeSSvp":{"name":"scheme","parent_name":"Configuration"},"Classes/OpenAI/Configuration.html#/s:6OpenAIAAC13ConfigurationV15timeoutIntervalSdvp":{"name":"timeoutInterval","abstract":"Default request timeout
","parent_name":"Configuration"},"Classes/OpenAI/Configuration.html#/s:6OpenAIAAC13ConfigurationV5token22organizationIdentifier4host4port6scheme15timeoutIntervalADSS_SSSgSSSiSSSdtcfc":{"name":"init(token:organizationIdentifier:host:port:scheme:timeoutInterval:)","parent_name":"Configuration"},"Classes/OpenAI/Configuration.html":{"name":"Configuration","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAIAAC13configurationAB13ConfigurationVvp":{"name":"configuration","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAIAAC8apiTokenABSS_tcfc":{"name":"init(apiToken:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAIAAC13configurationA2B13ConfigurationV_tcfc":{"name":"init(configuration:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAIAAC13configuration7sessionA2B13ConfigurationV_So12NSURLSessionCtcfc":{"name":"init(configuration:session:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP11completions5query10completionyAA16CompletionsQueryV_ys6ResultOyAA0gI0Vs5Error_pGctF":{"name":"completions(query:completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP17completionsStream5query8onResult10completionyAA16CompletionsQueryV_ys0H0OyAA0jH0Vs5Error_pGcysAN_pSgcSgtF":{"name":"completionsStream(query:onResult:completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP6images5query10completionyAA11ImagesQueryV_ys6ResultOyAA0gI0Vs5Error_pGctF":{"name":"images(query:completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP10imageEdits5query10completionyAA05ImageE5QueryV_ys6ResultOyAA06ImagesJ0Vs5Error_pGctF":{"name":"imageEdits(query:completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP15imageVariations5query10completionyAA05ImageE5QueryV_ys6ResultOyAA06ImagesJ0Vs5Error_pGctF":{"name":"imageVariations(query:completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP10embeddings5query10completionyAA15EmbeddingsQueryV_ys6ResultOyAA0gI0Vs5Error_pGctF":{"name":"embeddings(query:completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP5chats5query10completionyAA9ChatQueryV_ys6ResultOyAA0gI0Vs5Error_pGctF":{"name":"chats(query:completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP11chatsStream5query8onResult10completionyAA9ChatQueryV_ys0H0OyAA0jeH0Vs5Error_pGcysAN_pSgcSgtF":{"name":"chatsStream(query:onResult:completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP5edits5query10completionyAA10EditsQueryV_ys6ResultOyAA0gI0Vs5Error_pGctF":{"name":"edits(query:completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP5model5query10completionyAA10ModelQueryV_ys6ResultOyAA0gI0Vs5Error_pGctF":{"name":"model(query:completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP6models10completionyys6ResultOyAA06ModelsF0Vs5Error_pGc_tF":{"name":"models(completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP11moderations5query10completionyAA16ModerationsQueryV_ys6ResultOyAA0gI0Vs5Error_pGctF":{"name":"moderations(query:completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP19audioTranscriptions5query10completionyAA23AudioTranscriptionQueryV_ys6ResultOyAA0hiK0Vs5Error_pGctF":{"name":"audioTranscriptions(query:completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP17audioTranslations5query10completionyAA21AudioTranslationQueryV_ys6ResultOyAA0hiK0Vs5Error_pGctF":{"name":"audioTranslations(query:completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html#/s:6OpenAI0A10AIProtocolP17audioCreateSpeech5query10completionyAA05AudioF5QueryV_ys6ResultOyAA0ifK0Vs5Error_pGctF":{"name":"audioCreateSpeech(query:completion:)","parent_name":"OpenAI"},"Classes/OpenAI.html":{"name":"OpenAI"},"Classes.html":{"name":"Classes","abstract":"The following classes are available globally.
"},"Enums.html":{"name":"Enumerations","abstract":"The following enumerations are available globally.
"},"Extensions.html":{"name":"Extensions","abstract":"The following extensions are available globally.
"},"Protocols.html":{"name":"Protocols","abstract":"The following protocols are available globally.
"},"Structs.html":{"name":"Structures","abstract":"The following structures are available globally.
"},"Typealiases.html":{"name":"Type Aliases","abstract":"The following type aliases are available globally.
"}} \ No newline at end of file diff --git a/docs/undocumented.json b/docs/undocumented.json new file mode 100644 index 00000000..8b7f1785 --- /dev/null +++ b/docs/undocumented.json @@ -0,0 +1,6 @@ +{ + "warnings": [ + + ], + "source_directory": "/Users/dingxiancao/OpenAI" +} \ No newline at end of file