Skip to content

Commit 0cfbd73

Browse files
committed
Update to 1.3 and minor fix documentation errors
1 parent f4d9d9d commit 0cfbd73

8 files changed

+49
-39
lines changed

OpenAI_API/Completions/CompletionEndpoint.cs

+9-9
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ internal CompletionEndpoint(OpenAIAPI api)
3737
/// Ask the API to complete the prompt(s) using the specified request. This is non-streaming, so it will wait until the API returns the full result.
3838
/// </summary>
3939
/// <param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/>.</param>
40-
/// <returns>Asynchronously returns the completion result. Look in its <see cref="CompletionResult.Choices"/> property for the completions.</returns>
40+
/// <returns>Asynchronously returns the completion result. Look in its <see cref="CompletionResult.Completions"/> property for the completions.</returns>
4141
public async Task<CompletionResult> CreateCompletionAsync(CompletionRequest request)
4242
{
4343
if (Api.Auth?.ApiKey is null)
@@ -83,7 +83,7 @@ public async Task<CompletionResult> CreateCompletionAsync(CompletionRequest requ
8383
/// </summary>
8484
/// <param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/>.</param>
8585
/// <param name="numOutputs">Overrides <see cref="CompletionRequest.NumChoicesPerPrompt"/> as a convenience.</param>
86-
/// <returns>Asynchronously returns the completion result. Look in its <see cref="CompletionResult.Choices"/> property for the completions, which should have a length equal to <paramref name="numOutputs"/>.</returns>
86+
/// <returns>Asynchronously returns the completion result. Look in its <see cref="CompletionResult.Completions"/> property for the completions, which should have a length equal to <paramref name="numOutputs"/>.</returns>
8787
public Task<CompletionResult> CreateCompletionsAsync(CompletionRequest request, int numOutputs = 5)
8888
{
8989
request.NumChoicesPerPrompt = numOutputs;
@@ -94,17 +94,17 @@ public Task<CompletionResult> CreateCompletionsAsync(CompletionRequest request,
9494
/// Ask the API to complete the prompt(s) using the specified parameters. This is non-streaming, so it will wait until the API returns the full result. Any non-specified parameters will fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/> if present.
9595
/// </summary>
9696
/// <param name="prompt">The prompt to generate from</param>
97-
/// <param name="model">The model to use. You can use <see cref="ModelsEndpoint.GetModelsAsync"/> to see all of your available models, or use a standard model like <see cref="Model.DavinciText"/>.</param>
97+
/// <param name="model">The model to use. You can use <see cref="ModelsEndpoint.GetModelsAsync()"/> to see all of your available models, or use a standard model like <see cref="Model.DavinciText"/>.</param>
9898
/// <param name="max_tokens">How many tokens to complete to. Can return fewer if a stop sequence is hit.</param>
9999
/// <param name="temperature">What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or <paramref name="top_p"/> but not both.</param>
100100
/// <param name="top_p">An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or <paramref name="temperature"/> but not both.</param>
101101
/// <param name="numOutputs">How many different choices to request for each prompt.</param>
102102
/// <param name="presencePenalty">The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
103103
/// <param name="frequencyPenalty">The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
104-
/// <param name="logProbs">Include the log probabilities on the logprobs most likely tokens, which can be found in <see cref="CompletionResult.Choices"/> -> <see cref="Choice.Logprobs"/>. So for example, if logprobs is 10, the API will return a list of the 10 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.</param>
104+
/// <param name="logProbs">Include the log probabilities on the logprobs most likely tokens, which can be found in <see cref="CompletionResult.Completions"/> -> <see cref="Choice.Logprobs"/>. So for example, if logprobs is 10, the API will return a list of the 10 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.</param>
105105
/// <param name="echo">Echo back the prompt in addition to the completion.</param>
106106
/// <param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
107-
/// <returns>Asynchronously returns the completion result. Look in its <see cref="CompletionResult.Choices"/> property for the completions.</returns>
107+
/// <returns>Asynchronously returns the completion result. Look in its <see cref="CompletionResult.Completions"/> property for the completions.</returns>
108108
public Task<CompletionResult> CreateCompletionAsync(string prompt,
109109
Model model = null,
110110
int? max_tokens = null,
@@ -232,11 +232,11 @@ public async Task StreamCompletionAsync(CompletionRequest request, Action<Comple
232232
}
233233

234234
/// <summary>
235-
/// Ask the API to complete the prompt(s) using the specified request, and stream the results to the <paramref name="resultHandler"/> as they come in.
235+
/// Ask the API to complete the prompt(s) using the specified request, and stream the results as they come in.
236236
/// If you are not using C# 8 supporting async enumerables or if you are using the .NET Framework, you may need to use <see cref="StreamCompletionAsync(CompletionRequest, Action{CompletionResult})"/> instead.
237237
/// </summary>
238238
/// <param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/>.</param>
239-
/// <returns>An async enumerable with each of the results as they come in. See <seealso cref="https://docs.microsoft.com/en-us/dotnet/csharp/whats-new/csharp-8#asynchronous-streams"/> for more details on how to consume an async enumerable.</returns>
239+
/// <returns>An async enumerable with each of the results as they come in. See <see href="https://docs.microsoft.com/en-us/dotnet/csharp/whats-new/csharp-8#asynchronous-streams"/> for more details on how to consume an async enumerable.</returns>
240240
public async IAsyncEnumerable<CompletionResult> StreamCompletionEnumerableAsync(CompletionRequest request)
241241
{
242242
if (Api.Auth?.ApiKey is null)
@@ -293,14 +293,14 @@ public async IAsyncEnumerable<CompletionResult> StreamCompletionEnumerableAsync(
293293
/// If you are not using C# 8 supporting async enumerables or if you are using the .NET Framework, you may need to use <see cref="StreamCompletionAsync(CompletionRequest, Action{CompletionResult})"/> instead.
294294
/// </summary>
295295
/// <param name="prompt">The prompt to generate from</param>
296-
/// <param name="model">The model to use. You can use <see cref="ModelsEndpoint.GetModelsAsync"/> to see all of your available models, or use a standard model like <see cref="Model.DavinciText"/>.</param>
296+
/// <param name="model">The model to use. You can use <see cref="ModelsEndpoint.GetModelsAsync()"/> to see all of your available models, or use a standard model like <see cref="Model.DavinciText"/>.</param>
297297
/// <param name="max_tokens">How many tokens to complete to. Can return fewer if a stop sequence is hit.</param>
298298
/// <param name="temperature">What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or <paramref name="top_p"/> but not both.</param>
299299
/// <param name="top_p">An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or <paramref name="temperature"/> but not both.</param>
300300
/// <param name="numOutputs">How many different choices to request for each prompt.</param>
301301
/// <param name="presencePenalty">The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
302302
/// <param name="frequencyPenalty">The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
303-
/// <param name="logProbs">Include the log probabilities on the logprobs most likely tokens, which can be found in <see cref="CompletionResult.Choices"/> -> <see cref="Choice.Logprobs"/>. So for example, if logprobs is 10, the API will return a list of the 10 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.</param>
303+
/// <param name="logProbs">Include the log probabilities on the logprobs most likely tokens, which can be found in <see cref="CompletionResult.Completions"/> -> <see cref="Choice.Logprobs"/>. So for example, if logprobs is 10, the API will return a list of the 10 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.</param>
304304
/// <param name="echo">Echo back the prompt in addition to the completion.</param>
305305
/// <param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
306306
/// <returns>An async enumerable with each of the results as they come in. See <see href="https://docs.microsoft.com/en-us/dotnet/csharp/whats-new/csharp-8#asynchronous-streams">the C# docs</see> for more details on how to consume an async enumerable.</returns>

OpenAI_API/Completions/CompletionRequest.cs

+4-4
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ namespace OpenAI_API
1515
public class CompletionRequest
1616
{
1717
/// <summary>
18-
/// ID of the model to use. You can use <see cref="ModelsEndpoint.GetModelsAsync"/> to see all of your available models, or use a standard model like <see cref="Model.DavinciText"/>.
18+
/// ID of the model to use. You can use <see cref="ModelsEndpoint.GetModelsAsync()"/> to see all of your available models, or use a standard model like <see cref="Model.DavinciText"/>.
1919
/// </summary>
2020
[JsonProperty("model")]
2121
public string Model { get; set; }
@@ -103,7 +103,7 @@ public string Prompt
103103
public bool Stream { get; internal set; } = false;
104104

105105
/// <summary>
106-
/// Include the log probabilities on the logprobs most likely tokens, which can be found in <see cref="CompletionResult.Choices"/> -> <see cref="Choice.Logprobs"/>. So for example, if logprobs is 5, the API will return a list of the 5 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response. The maximum value for logprobs is 5.
106+
/// Include the log probabilities on the logprobs most likely tokens, which can be found in <see cref="CompletionResult.Completions"/> -> <see cref="Choice.Logprobs"/>. So for example, if logprobs is 5, the API will return a list of the 5 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response. The maximum value for logprobs is 5.
107107
/// </summary>
108108
[JsonProperty("logprobs")]
109109
public int? Logprobs { get; set; }
@@ -209,15 +209,15 @@ public CompletionRequest(params string[] prompts)
209209
/// Creates a new <see cref="CompletionRequest"/> with the specified parameters
210210
/// </summary>
211211
/// <param name="prompt">The prompt to generate from</param>
212-
/// <param name="model">The model to use. You can use <see cref="ModelsEndpoint.GetModelsAsync"/> to see all of your available models, or use a standard model like <see cref="Model.DavinciText"/>.</param>
212+
/// <param name="model">The model to use. You can use <see cref="ModelsEndpoint.GetModelsAsync()"/> to see all of your available models, or use a standard model like <see cref="Model.DavinciText"/>.</param>
213213
/// <param name="max_tokens">How many tokens to complete to. Can return fewer if a stop sequence is hit.</param>
214214
/// <param name="temperature">What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or <paramref name="top_p"/> but not both.</param>
215215
/// <param name="suffix">The suffix that comes after a completion of inserted text</param>
216216
/// <param name="top_p">An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or <paramref name="temperature"/> but not both.</param>
217217
/// <param name="numOutputs">How many different choices to request for each prompt.</param>
218218
/// <param name="presencePenalty">The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
219219
/// <param name="frequencyPenalty">The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
220-
/// <param name="logProbs">Include the log probabilities on the logprobs most likely tokens, which can be found in <see cref="CompletionResult.Choices"/> -> <see cref="Choice.Logprobs"/>. So for example, if logprobs is 10, the API will return a list of the 10 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.</param>
220+
/// <param name="logProbs">Include the log probabilities on the logprobs most likely tokens, which can be found in <see cref="CompletionResult.Completions"/> -> <see cref="Choice.Logprobs"/>. So for example, if logprobs is 10, the API will return a list of the 10 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.</param>
221221
/// <param name="echo">Echo back the prompt in addition to the completion.</param>
222222
/// <param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
223223
public CompletionRequest(

OpenAI_API/Model/Model.cs

-2
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,6 @@ public static implicit operator Model(string name)
4646
/// Represents an Model with the given id/<see cref="ModelID"/>
4747
/// </summary>
4848
/// <param name="name">The id/<see cref="ModelID"/> to use.
49-
/// If the <paramref name="name"/> contains a colon (as is the case in the API's <see cref="CompletionResult.Model"/> response),
50-
/// the part before the colon is treated as the id/<see cref="ModelID"/> and the following portion is considered the <see cref="ModelRevision"/>
5149
/// </param>
5250
public Model(string name)
5351
{

OpenAI_API/Model/ModelsEndpoint.cs

+8-8
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ public Task<List<Model>> GetModelsAsync()
3636
}
3737

3838
/// <summary>
39-
/// Get details about a particular Model from the API, specifically properties such as <see cref="Model.Owner"/> and <see cref="Model.Ready"/>
39+
/// Get details about a particular Model from the API, specifically properties such as <see cref="Model.OwnedBy"/> and permissions.
4040
/// </summary>
4141
/// <param name="id">The id/name of the model to get more details about</param>
4242
/// <returns>Asynchronously returns the <see cref="Model"/> with all available properties</returns>
@@ -75,13 +75,13 @@ public static async Task<List<Model>> GetModelsAsync(APIAuthentication auth = nu
7575
}
7676
}
7777

78-
/// <summary>
79-
/// Get details about a particular Model from the API, specifically properties such as <see cref="Model.Owner"/> and <see cref="Model.Ready"/>
80-
/// </summary>
81-
/// <param name="id">The id/name of the model to get more details about</param>
82-
/// <param name="auth">API authentication in order to call the API endpoint. If not specified, attempts to use a default.</param>
83-
/// <returns>Asynchronously returns the <see cref="Model"/> with all available properties</returns>
84-
public static async Task<Model> RetrieveModelDetailsAsync(string id, APIAuthentication auth = null)
78+
/// <summary>
79+
/// Get details about a particular Model from the API, specifically properties such as <see cref="Model.OwnedBy"/> and permissions.
80+
/// </summary>
81+
/// <param name="id">The id/name of the model to get more details about</param>
82+
/// <param name="auth">API authentication in order to call the API endpoint. If not specified, attempts to use a default.</param>
83+
/// <returns>Asynchronously returns the <see cref="Model"/> with all available properties</returns>
84+
public static async Task<Model> RetrieveModelDetailsAsync(string id, APIAuthentication auth = null)
8585
{
8686
if (auth.ThisOrDefault()?.ApiKey is null)
8787
{

0 commit comments

Comments
 (0)