Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions src/models/_model_map.js
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,10 @@ const apiMap = await (async () => {
})();

export function selectAPI(profile) {
// RC27: Guard against undefined/null profile (e.g., missing model key in profile JSON)
if (!profile) {
throw new Error('No model specified in profile configuration.');
}
if (typeof profile === 'string' || profile instanceof String) {
profile = {model: profile};
}
Expand Down
4 changes: 2 additions & 2 deletions src/models/cerebras.js
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ export class Cerebras {
this.client = new CerebrasSDK({ apiKey: getKey('CEREBRAS_API_KEY') });
}

async sendRequest(turns, systemMessage, stop_seq = '***') {
async sendRequest(turns, systemMessage, _stop_seq = '***') {
// Format messages array
const messages = strictFormat(turns);
messages.unshift({ role: 'system', content: systemMessage });
Expand Down Expand Up @@ -55,7 +55,7 @@ export class Cerebras {
return this.sendRequest(imageMessages, systemMessage);
}

async embed(text) {
async embed(_text) {
throw new Error('Embeddings are not supported by Cerebras.');
}
}
2 changes: 1 addition & 1 deletion src/models/claude.js
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ export class Claude {
return this.sendRequest(imageMessages, systemMessage);
}

async embed(text) {
async embed(_text) {
throw new Error('Embeddings are not supported by Claude.');
}
}
4 changes: 2 additions & 2 deletions src/models/deepseek.js
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import OpenAIApi from 'openai';
import { getKey, hasKey } from '../utils/keys.js';
import { getKey } from '../utils/keys.js';
import { strictFormat } from '../utils/text.js';

export class DeepSeek {
Expand Down Expand Up @@ -50,7 +50,7 @@ export class DeepSeek {
return res;
}

async embed(text) {
async embed(_text) {
throw new Error('Embeddings are not supported by Deepseek.');
}
}
Expand Down
23 changes: 17 additions & 6 deletions src/models/gemini.js
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,11 @@ export class Gemini {
}
});
const response = await result.text;
this._lastUsage = result.usageMetadata ? {
prompt_tokens: result.usageMetadata.promptTokenCount || 0,
completion_tokens: result.usageMetadata.candidatesTokenCount || 0,
total_tokens: result.usageMetadata.totalTokenCount || 0,
} : null;

console.log('Received.');

Expand Down Expand Up @@ -90,12 +95,17 @@ export class Gemini {
model: this.model_name,
contents: contents,
safetySettings: this.safetySettings,
generationConfig: {
config: {
systemInstruction: systemMessage,
...(this.params || {})
},
systemInstruction: systemMessage
}
});
res = await result.text;
this._lastUsage = result.usageMetadata ? {
prompt_tokens: result.usageMetadata.promptTokenCount || 0,
completion_tokens: result.usageMetadata.candidatesTokenCount || 0,
total_tokens: result.usageMetadata.totalTokenCount || 0,
} : null;
console.log('Received.');
} catch (err) {
console.log(err);
Expand All @@ -112,13 +122,14 @@ export class Gemini {
const result = await this.genAI.models.embedContent({
model: this.model_name || "gemini-embedding-001",
contents: text,
})
});

return result.embeddings;
// @google/genai v1.x returns result.embedding.values (not result.embeddings)
return result?.embedding?.values ?? result?.embeddings;
}
}

const sendAudioRequest = async (text, model, voice, url) => {
const sendAudioRequest = async (text, model, voice, _url) => {
const ai = new GoogleGenAI({apiKey: getKey('GEMINI_API_KEY')});

const response = await ai.models.generateContent({
Expand Down
142 changes: 71 additions & 71 deletions src/models/glhf.js
Original file line number Diff line number Diff line change
@@ -1,71 +1,71 @@
import OpenAIApi from 'openai';
import { getKey } from '../utils/keys.js';
export class GLHF {
static prefix = 'glhf';
constructor(model_name, url) {
this.model_name = model_name;
const apiKey = getKey('GHLF_API_KEY');
if (!apiKey) {
throw new Error('API key not found. Please check keys.json and ensure GHLF_API_KEY is defined.');
}
this.openai = new OpenAIApi({
apiKey,
baseURL: url || "https://glhf.chat/api/openai/v1"
});
}
async sendRequest(turns, systemMessage, stop_seq = '***') {
// Construct the message array for the API request.
let messages = [{ role: 'system', content: systemMessage }].concat(turns);
const pack = {
model: this.model_name || "hf:meta-llama/Llama-3.1-405B-Instruct",
messages,
stop: [stop_seq]
};
const maxAttempts = 5;
let attempt = 0;
let finalRes = null;
while (attempt < maxAttempts) {
attempt++;
console.log(`Awaiting glhf.chat API response... (attempt: ${attempt})`);
try {
let completion = await this.openai.chat.completions.create(pack);
if (completion.choices[0].finish_reason === 'length') {
throw new Error('Context length exceeded');
}
let res = completion.choices[0].message.content;
// If there's an open <think> tag without a corresponding </think>, retry.
if (res.includes("<think>") && !res.includes("</think>")) {
console.warn("Partial <think> block detected. Re-generating...");
continue;
}
// If there's a closing </think> tag but no opening <think>, prepend one.
if (res.includes("</think>") && !res.includes("<think>")) {
res = "<think>" + res;
}
finalRes = res.replace(/<\|separator\|>/g, '*no response*');
break; // Valid response obtained.
} catch (err) {
if ((err.message === 'Context length exceeded' || err.code === 'context_length_exceeded') && turns.length > 1) {
console.log('Context length exceeded, trying again with shorter context.');
return await this.sendRequest(turns.slice(1), systemMessage, stop_seq);
} else {
console.error(err);
finalRes = 'My brain disconnected, try again.';
break;
}
}
}
if (finalRes === null) {
finalRes = "I thought too hard, sorry, try again";
}
return finalRes;
}
async embed(text) {
throw new Error('Embeddings are not supported by glhf.');
}
}
import OpenAIApi from 'openai';
import { getKey } from '../utils/keys.js';

export class GLHF {
static prefix = 'glhf';
constructor(model_name, url) {
this.model_name = model_name;
const apiKey = getKey('GHLF_API_KEY');
if (!apiKey) {
throw new Error('API key not found. Please check keys.json and ensure GHLF_API_KEY is defined.');
}
this.openai = new OpenAIApi({
apiKey,
baseURL: url || "https://glhf.chat/api/openai/v1"
});
}

async sendRequest(turns, systemMessage, stop_seq = '***') {
// Construct the message array for the API request.
let messages = [{ role: 'system', content: systemMessage }].concat(turns);
const pack = {
model: this.model_name || "hf:meta-llama/Llama-3.1-405B-Instruct",
messages,
stop: [stop_seq]
};

const maxAttempts = 5;
let attempt = 0;
let finalRes = null;

while (attempt < maxAttempts) {
attempt++;
console.log(`Awaiting glhf.chat API response... (attempt: ${attempt})`);
try {
let completion = await this.openai.chat.completions.create(pack);
if (completion.choices[0].finish_reason === 'length') {
throw new Error('Context length exceeded');
}
let res = completion.choices[0].message.content;
// If there's an open <think> tag without a corresponding </think>, retry.
if (res.includes("<think>") && !res.includes("</think>")) {
console.warn("Partial <think> block detected. Re-generating...");
continue;
}
// If there's a closing </think> tag but no opening <think>, prepend one.
if (res.includes("</think>") && !res.includes("<think>")) {
res = "<think>" + res;
}
finalRes = res.replace(/<\|separator\|>/g, '*no response*');
break; // Valid response obtained.
} catch (err) {
if ((err.message === 'Context length exceeded' || err.code === 'context_length_exceeded') && turns.length > 1) {
console.log('Context length exceeded, trying again with shorter context.');
return await this.sendRequest(turns.slice(1), systemMessage, stop_seq);
} else {
console.error(err);
finalRes = 'My brain disconnected, try again.';
break;
}
}
}
if (finalRes === null) {
finalRes = "I thought too hard, sorry, try again";
}
return finalRes;
}

async embed(_text) {
throw new Error('Embeddings are not supported by glhf.');
}
}
4 changes: 2 additions & 2 deletions src/models/gpt.js
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ export class GPT {
}

async sendRequest(turns, systemMessage, stop_seq='***') {
let messages = strictFormat(turns);
messages = messages.map(message => {
let _messages = strictFormat(turns);
_messages = _messages.map(message => {
message.content += stop_seq;
return message;
});
Expand Down
11 changes: 8 additions & 3 deletions src/models/grok.js
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,14 @@ export class Grok {
///console.log('Messages:', messages);
let completion = await this.openai.chat.completions.create(pack);
if (completion.choices[0].finish_reason == 'length')
throw new Error('Context length exceeded');
throw new Error('Context length exceeded');
console.log('Received.')
res = completion.choices[0].message.content;
this._lastUsage = completion.usage ? {
prompt_tokens: completion.usage.prompt_tokens || 0,
completion_tokens: completion.usage.completion_tokens || 0,
total_tokens: completion.usage.total_tokens || 0,
} : null;
}
catch (err) {
if ((err.message == 'Context length exceeded' || err.code == 'context_length_exceeded') && turns.length > 1) {
Expand All @@ -52,7 +57,7 @@ export class Grok {
}
}
// sometimes outputs special token <|separator|>, just replace it
return res.replace(/<\|separator\|>/g, '*no response*');
return (res ?? '').replace(/<\|separator\|>/g, '*no response*');
}

async sendVisionRequest(messages, systemMessage, imageBuffer) {
Expand All @@ -73,7 +78,7 @@ export class Grok {
return this.sendRequest(imageMessages, systemMessage);
}

async embed(text) {
async embed(_text) {
throw new Error('Embeddings are not supported by Grok.');
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/models/huggingface.js
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ export class HuggingFace {
return finalRes;
}

async embed(text) {
async embed(_text) {
throw new Error('Embeddings are not supported by HuggingFace.');
}
}
Loading
Loading