Skip to content

Commit dba18d7

Browse files
authored
Merge pull request #113 from underthestars-zhy/main
Fix Problem on Linux
2 parents 169845a + d850bf0 commit dba18d7

File tree

3 files changed

+32
-2
lines changed

3 files changed

+32
-2
lines changed

Sources/OpenAISwift/Models/ChatMessage.swift

Lines changed: 24 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,28 @@ public struct ChatMessage: Codable, Identifiable {
3434
self.role = role
3535
self.content = content
3636
}
37+
38+
enum CodingKeys: CodingKey {
39+
case id
40+
case role
41+
case content
42+
}
43+
44+
public init(from decoder: Decoder) throws {
45+
let container: KeyedDecodingContainer<ChatMessage.CodingKeys> = try decoder.container(keyedBy: ChatMessage.CodingKeys.self)
46+
47+
self.id = UUID()
48+
self.role = try container.decodeIfPresent(ChatRole.self, forKey: ChatMessage.CodingKeys.role)
49+
self.content = try container.decodeIfPresent(String.self, forKey: ChatMessage.CodingKeys.content)
50+
51+
}
52+
53+
public func encode(to encoder: Encoder) throws {
54+
var container: KeyedEncodingContainer<ChatMessage.CodingKeys> = encoder.container(keyedBy: ChatMessage.CodingKeys.self)
55+
56+
try container.encodeIfPresent(self.role, forKey: ChatMessage.CodingKeys.role)
57+
try container.encodeIfPresent(self.content, forKey: ChatMessage.CodingKeys.content)
58+
}
3759
}
3860

3961
/// A structure that represents a chat conversation.
@@ -70,7 +92,7 @@ public struct ChatConversation: Encodable {
7092

7193
/// Modify the likelihood of specified tokens appearing in the completion. Maps tokens (specified by their token ID in the OpenAI Tokenizer—not English words) to an associated bias value from -100 to 100. Values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
7294
let logitBias: [Int: Double]?
73-
95+
7496
/// If you're generating long completions, waiting for the response can take many seconds. To get responses sooner, you can 'stream' the completion as it's being generated. This allows you to start printing or processing the beginning of the completion before the full completion is finished.
7597
/// https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb
7698
let stream: Bool?
@@ -96,6 +118,6 @@ public struct ChatError: Codable {
96118
public let message, type: String
97119
public let param, code: String?
98120
}
99-
121+
100122
public let error: Payload
101123
}

Sources/OpenAISwift/OpenAIEndpoint.swift

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,10 @@
33
//
44

55
import Foundation
6+
#if canImport(FoundationNetworking) && canImport(FoundationXML)
7+
import FoundationNetworking
8+
import FoundationXML
9+
#endif
610

711
public struct OpenAIEndpointProvider {
812
public enum API {

Sources/OpenAISwift/ServerSentEventsHandler.swift

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,10 @@
66
//
77

88
import Foundation
9+
#if canImport(FoundationNetworking) && canImport(FoundationXML)
10+
import FoundationNetworking
11+
import FoundationXML
12+
#endif
913

1014
class ServerSentEventsHandler: NSObject {
1115

0 commit comments

Comments
 (0)