Skip to content

Commit

Permalink
chore: update models 2024-05-07 (#50)
Browse files Browse the repository at this point in the history
* chore: update perplexity models

* chore: update mistral models

* chore: update default models
  • Loading branch information
mdjastrzebski authored May 7, 2024
1 parent 2c5e4bf commit 8bdf2b2
Show file tree
Hide file tree
Showing 6 changed files with 19 additions and 23 deletions.
2 changes: 1 addition & 1 deletion src/commands/chat/providers.tsx
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import openAi from '../../engine/providers/openAi.js';
import openAi from '../../engine/providers/open-ai.js';
import anthropic from '../../engine/providers/anthropic.js';
import perplexity from '../../engine/providers/perplexity.js';
import mistral from '../../engine/providers/mistral.js';
Expand Down
4 changes: 1 addition & 3 deletions src/commands/chat/state/init.ts
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,7 @@ export function initChatState(
? provider.modelAliases[modelOrAlias] ?? modelOrAlias
: provider.defaultModel;

const systemPrompt = !provider.skipSystemPrompt?.includes(model)
? providerFileConfig.systemPrompt ?? DEFAULT_SYSTEM_PROMPT
: undefined;
const systemPrompt = providerFileConfig.systemPrompt ?? DEFAULT_SYSTEM_PROMPT;

const providerConfig = {
apiKey: providerFileConfig.apiKey,
Expand Down
7 changes: 4 additions & 3 deletions src/engine/providers/mistral.ts
Original file line number Diff line number Diff line change
Expand Up @@ -26,14 +26,15 @@ const Mistral: Provider = {
modelPricing: {
'open-mistral-7b': { inputTokensCost: 0.25, outputTokensCost: 0.25 },
'open-mixtral-8x7b': { inputTokensCost: 0.7, outputTokensCost: 0.7 },
'mistral-small-latest': { inputTokensCost: 2, outputTokensCost: 6 },
'open-mixtral-8x22b': { inputTokensCost: 2, outputTokensCost: 6 },
'mistral-small-latest': { inputTokensCost: 1, outputTokensCost: 3 },
'mistral-medium-latest': { inputTokensCost: 2.7, outputTokensCost: 8.1 },
'mistral-large-latest': { inputTokensCost: 8, outputTokensCost: 24 },
'mistral-large-latest': { inputTokensCost: 4, outputTokensCost: 12 },
},

modelAliases: {
mistral: 'open-mistral-7b',
mixtral: 'open-mixtral-8x7b',
mixtral: 'open-mixtral-8x22b',
small: 'mistral-small-latest',
medium: 'mistral-medium-latest',
large: 'mistral-large-latest',
Expand Down
File renamed without changes.
25 changes: 12 additions & 13 deletions src/engine/providers/perplexity.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,38 +10,37 @@ const Perplexity: Provider = {
apiKeyUrl: 'https://perplexity.ai/settings/api',

// Perplexity models: https://docs.perplexity.ai/docs/model-cards
defaultModel: 'sonar-medium-chat',
defaultModel: 'llama-3-sonar-large-32k-chat',

// Price per 1M tokens [input, output], per 1k requests.
// Source: https://docs.perplexity.ai/docs/model-cards
// Source: https://docs.perplexity.ai/docs/pricing
modelPricing: {
'sonar-small-chat': { inputTokensCost: 0.2, outputTokensCost: 0.2 },
'sonar-medium-chat': { inputTokensCost: 0.6, outputTokensCost: 0.6 },
'sonar-small-online': {
'llama-3-sonar-small-32k-chat': { inputTokensCost: 0.2, outputTokensCost: 0.2 },
'llama-3-sonar-small-32k-online': {
inputTokensCost: 0.2,
outputTokensCost: 0.2,
requestsCost: 5,
},
'sonar-medium-online': {
'llama-3-sonar-large-32k-chat': { inputTokensCost: 0.6, outputTokensCost: 0.6 },
'llama-3-sonar-large-32k-online': {
inputTokensCost: 0.6,
outputTokensCost: 0.6,
requestsCost: 5,
},
'codellama-70b-instruct': { inputTokensCost: 1, outputTokensCost: 1 },
'mistral-7b-instruct': { inputTokensCost: 0.2, outputTokensCost: 0.2 },
'llama-3-8b-instruct': { inputTokensCost: 0.2, outputTokensCost: 0.2 },
'llama-3-70b-instruct': { inputTokensCost: 1, outputTokensCost: 1 },
'mixtral-8x7b-instruct': { inputTokensCost: 0.6, outputTokensCost: 0.6 },
},

modelAliases: {
online: 'sonar-medium-online',
codellama: 'codellama-70b-instruct',
mistral: 'mistral-7b-instruct',
mixtral: 'mixtral-8x7b-instruct',
'small': 'llama-3-sonar-small-32k-chat',
'large': 'llama-3-sonar-large-32k-chat',
'online': 'llama-3-sonar-large-32k-online',
'llama-3': 'llama-3-70b-instruct',
'mixtral': 'mixtral-8x7b-instruct',
},

skipSystemPrompt: ['sonar-small-online', 'sonar-medium-online'],

getChatCompletion: async (config: ProviderConfig, messages: Message[]) => {
const api = new OpenAI({
apiKey: config.apiKey,
Expand Down
4 changes: 1 addition & 3 deletions src/engine/providers/provider.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import type { Message, ModelResponse, ModelResponseUpdate } from '../inference.js';
import type { ProviderConfig } from './config.js';
import openAi from './openAi.js';
import openAi from './open-ai.js';
import perplexity from './perplexity.js';
import anthropic from './anthropic.js';
import mistral from './mistral.js';
Expand All @@ -17,8 +17,6 @@ export interface Provider {
modelPricing: Record<string, ModelPricing>;
modelAliases: Record<string, string>;

skipSystemPrompt?: string[];

getChatCompletion: (config: ProviderConfig, messages: Message[]) => Promise<ModelResponse>;
getChatCompletionStream?: (
config: ProviderConfig,
Expand Down

0 comments on commit 8bdf2b2

Please sign in to comment.