Skip to content
Open
2 changes: 2 additions & 0 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,8 @@ jobs:
test: "*.TestThreads"
- name: Vector Stores
test: "*.TestVectorStores"
- name: Responses
test: "*.TestResponses"
- name: Misc.
test: "*.misc.*"
steps:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ import kotlin.time.Duration.Companion.seconds
* OpenAI API.
*/
public interface OpenAI : Completions, Files, Edits, Embeddings, Models, Moderations, FineTunes, Images, Chat, Audio,
FineTuning, Assistants, Threads, Runs, Messages, VectorStores, AutoCloseable
FineTuning, Assistants, Threads, Runs, Messages, VectorStores, Responses, AutoCloseable

/**
* Creates an instance of [OpenAI].
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
package com.aallam.openai.client

import com.aallam.openai.api.core.RequestOptions
import com.aallam.openai.api.responses.Response
import com.aallam.openai.api.responses.ResponseIncludable
import com.aallam.openai.api.responses.ResponseItem
import com.aallam.openai.api.responses.ResponseRequest

/** Interface for OpenAI's Responses API */
public interface Responses {
/**
* Create a new response.
*
* @param request The request for creating a response
* @param requestOptions Optional request configuration
* @return The created response
*/
public suspend fun createResponse(
request: ResponseRequest,
requestOptions: RequestOptions? = null
): Response

/**
* Retrieves a model response with the given ID.
*
* @param responseId The ID of the response to retrieve
* @param include Additional fields to include in the response.
* @param requestOptions Optional request configuration
*/
public suspend fun getResponse(
responseId: String,
include: List<ResponseIncludable>? = null,
requestOptions: RequestOptions? = null): Response

/**
* Deletes a model response with the given ID.
*
* @param responseId The ID of the response to delete
* @param requestOptions Optional request configuration
*/
public suspend fun deleteResponse(
responseId: String,
requestOptions: RequestOptions? = null): Boolean

/**
* Returns a list of input items for a given response.
*
* @param responseId The ID of the response
* @param after An item ID to list items after, used in pagination.
* @param before An item ID to list items before, used in pagination.
* @param include Additional fields to include in the response.
* @param limit A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.
* @param order The order to return the input items in. Can be either "asc" or "desc". Default is "desc".
* @param requestOptions Optional request configuration
*/
public suspend fun listResponseItems(
responseId: String,
after: String? = null,
before: String? = null,
include: List<ResponseIncludable>? = null,
limit: Int? = null,
order: String? = null,
requestOptions: RequestOptions? = null): List<ResponseItem>

//TODO Streaming
}
Original file line number Diff line number Diff line change
Expand Up @@ -29,4 +29,5 @@ internal class OpenAIApi(
Messages by MessagesApi(requester),
VectorStores by VectorStoresApi(requester),
Batch by BatchApi(requester),
Responses by ResponsesApi(requester),
AutoCloseable by requester
Original file line number Diff line number Diff line change
Expand Up @@ -23,4 +23,5 @@ internal object ApiPath {
const val Threads = "threads"
const val VectorStores = "vector_stores"
const val Batches = "batches"
const val Responses = "responses"
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
package com.aallam.openai.client.internal.api

import com.aallam.openai.api.core.DeleteResponse
import com.aallam.openai.api.core.ListResponse
import com.aallam.openai.api.core.RequestOptions
import com.aallam.openai.api.responses.Response
import com.aallam.openai.api.responses.ResponseIncludable
import com.aallam.openai.api.responses.ResponseItem
import com.aallam.openai.api.responses.ResponseRequest
import com.aallam.openai.client.Responses
import com.aallam.openai.client.internal.extension.requestOptions
import com.aallam.openai.client.internal.http.HttpRequester
import com.aallam.openai.client.internal.http.perform

import io.ktor.client.*
import io.ktor.client.call.*
import io.ktor.client.request.*
import io.ktor.client.statement.*
import io.ktor.http.*

internal class ResponsesApi(private val requester: HttpRequester) : Responses {
override suspend fun createResponse(request: ResponseRequest, requestOptions: RequestOptions?): Response {
return requester.perform { client: HttpClient ->
client.post {
url(path = ApiPath.Responses)
setBody(request.copy(stream = false))
contentType(ContentType.Application.Json)
requestOptions(requestOptions)
}.body()
}
}

override suspend fun getResponse(
responseId: String,
include: List<ResponseIncludable>?,
requestOptions: RequestOptions?
): Response {
return requester.perform { client: HttpClient ->
client.get {
url(path = "${ApiPath.Responses}/$responseId")
parameter("include", include)
requestOptions(requestOptions)
}.body()
}
}

override suspend fun deleteResponse(responseId: String, requestOptions: RequestOptions?): Boolean {
val response = requester.perform<HttpResponse> {
it.delete {
url(path = "${ApiPath.Responses}/$responseId")
requestOptions(requestOptions)
}
}

return when (response.status) {
HttpStatusCode.NotFound -> false
else -> response.body<DeleteResponse>().deleted
}
}

override suspend fun listResponseItems(
responseId: String,
after: String?,
before: String?,
include: List<ResponseIncludable>?,
limit: Int?,
order: String?,
requestOptions: RequestOptions?
): List<ResponseItem> {
return requester.perform<ListResponse<ResponseItem>> {
it.get {
url(path = "${ApiPath.Responses}/$responseId/items")
parameter("after", after)
parameter("before", before)
parameter("include", include)
parameter("limit", limit)
parameter("order", order)
requestOptions(requestOptions)
}
}.data
}

//TODO Add streaming

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
package com.aallam.openai.client

import com.aallam.openai.api.core.Parameters.Companion.buildJsonObject
import com.aallam.openai.api.model.ModelId
import com.aallam.openai.api.responses.*
import kotlinx.serialization.json.add
import kotlinx.serialization.json.put
import kotlinx.serialization.json.putJsonArray
import kotlinx.serialization.json.putJsonObject
import kotlin.test.Test
import kotlin.test.assertNotNull

class TestResponses : TestOpenAI() {

@Test
fun basicResponse() = test {
val response = openAI.createResponse(
request = responseRequest {
model = ModelId("gpt-4o")
input = ResponseInput.from("What is the capital of France?")
}
)

assertNotNull(response)
assertNotNull(response.output)
}

@Test
fun responseWithTools() = test {
val response = openAI.createResponse(
request = responseRequest {
model = ModelId("gpt-4o")
input = ResponseInput.from("What's the weather like in Paris?")
tools {
add(
ResponseTool.ResponseFunctionTool(
name = "get_weather",
description = "Get the current weather",
parameters = buildJsonObject {
put("type", "object")
putJsonObject("properties") {
putJsonObject("location") {
put("type", "string")
put("description", "The city and state, e.g. San Francisco, CA")
}
putJsonObject("unit") {
put("type", "string")
putJsonArray("enum") {
add("celsius")
add("fahrenheit")
}
}
}
putJsonArray("required") {
add("location")
}
})
)
}
})


assertNotNull(response)
assertNotNull(response.output)
}

@Test
fun responseWithInstructions() = test {
val response = openAI.createResponse(
request = responseRequest {
model = ModelId("gpt-4o")
input = ResponseInput.from("Tell me about artificial intelligence")
instructions = "Provide a concise answer focusing on recent developments"
maxOutputTokens = 200
}
)

assertNotNull(response)
assertNotNull(response.output)
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
package com.aallam.openai.api.responses

import kotlinx.serialization.SerialName
import kotlinx.serialization.Serializable
import kotlin.jvm.JvmInline

/**
* Configuration options for reasoning models
*/
@Serializable
public data class ReasoningConfig(
/**
* Constrains effort on reasoning for reasoning models.
* Currently supported values are `low`, `medium`, and `high`.
* Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
*/
@SerialName("effort")
val effort: ReasoningEffort? = null,

/**
* A summary of the reasoning performed by the model.
* This can be useful for debugging and understanding the model's reasoning process.
* One of `concise` or `detailed`.
*/
@SerialName("generate_summary")
val generateSummary: String? = null
)


/**
* Reasoning effort levels for models with reasoning capabilities
*/
@JvmInline
@Serializable
public value class ReasoningEffort(public val value: String) {
public companion object {
/**
* Low reasoning effort
*/
public val Low: ReasoningEffort = ReasoningEffort("low")

/**
* Medium reasoning effort (default)
*/
public val Medium: ReasoningEffort = ReasoningEffort("medium")

/**
* High reasoning effort
*/
public val High: ReasoningEffort = ReasoningEffort("high")
}
}
Loading
Loading