diff --git a/app/multiplayer/[multiplayerGameId]/page.tsx b/app/multiplayer/[multiplayerGameId]/page.tsx index 4e877a1..a0cd4f3 100644 --- a/app/multiplayer/[multiplayerGameId]/page.tsx +++ b/app/multiplayer/[multiplayerGameId]/page.tsx @@ -34,6 +34,9 @@ export default function MultiplayerPage({ return ( Multiplayer +
+ Cost: ${multiplayerGame.cost?.toFixed(2)} +
{ - await ctx.db.patch(args.multiplayerGameId, { + const patch: { + boardState: string[][]; + completedTurns: number; + cost?: number; + } = { boardState: args.boardState, completedTurns: args.completedTurns, - }); + }; + + if (args.cost !== undefined) { + patch.cost = args.cost; + } + + await ctx.db.patch(args.multiplayerGameId, patch); }, }); @@ -113,21 +124,16 @@ export const runMultiplayerGameTurn = internalAction({ const map = new ZombieSurvival(multiplayerGame.boardState); if (turn === "Z") { + map.stepZombies(); + const numPlayers = multiplayerGame.playerMap.length; - let zombiesToSpawn = 1; - if (numPlayers === 1) { - zombiesToSpawn = 1; - } else if (numPlayers === 2) { - zombiesToSpawn = 2; - } else if (numPlayers === 3) { - zombiesToSpawn = 2; - } else if (numPlayers === 4) { - zombiesToSpawn = 3; - } + const zombiesToSpawn = Math.min( + Math.floor(Math.random() * numPlayers) + 1, + numPlayers, + ); for (let i = 0; i < zombiesToSpawn; i++) { map.spawnRandomZombie(); } - map.stepZombies(); await ctx.runMutation( internal.multiplayerGames.updateMultiplayerGameBoardState, @@ -177,6 +183,8 @@ export const runMultiplayerGameTurn = internalAction({ turn, ); + console.log("cost", results.cost); + if (results.moveDirection && results.moveDirection !== "STAY") { const moveDirection = fromDirectionString(results.moveDirection); const p = map.getPlayer(turn); @@ -205,6 +213,7 @@ export const runMultiplayerGameTurn = internalAction({ multiplayerGameId, boardState: map.getState(), completedTurns: multiplayerGame.completedTurns, + cost: (multiplayerGame.cost ?? 0) + (results.cost ?? 0), }, ); } diff --git a/convex/schema.ts b/convex/schema.ts index 06a142b..9e13f8d 100644 --- a/convex/schema.ts +++ b/convex/schema.ts @@ -84,6 +84,7 @@ export default defineSchema({ multiplayerGames: defineTable({ boardState: v.array(v.array(v.string())), completedTurns: v.number(), + cost: v.optional(v.number()), playerMap: v.array( v.object({ modelSlug: v.string(), diff --git a/models/claude-3-5-sonnet.ts b/models/claude-3-5-sonnet.ts index be2c33c..3b91b80 100644 --- a/models/claude-3-5-sonnet.ts +++ b/models/claude-3-5-sonnet.ts @@ -1,6 +1,7 @@ import { type ModelHandler } from "."; import { Anthropic } from "@anthropic-ai/sdk"; import { z } from "zod"; +import { calculateTotalCost } from "./pricing"; const responseSchema = z.object({ playerCoordinates: z.array(z.number()), @@ -53,23 +54,6 @@ export const claude35sonnet: ModelHandler = async ( const totalTokensUsed = completion.usage.input_tokens + completion.usage.output_tokens; - // https://docs.anthropic.com/en/docs/about-claude/models - const getPriceForInputToken = (tokenCount?: number) => { - if (!tokenCount) { - return 0; - } - - return (3.0 / 1_000_000) * tokenCount; - }; - - const getPriceForOutputToken = (tokenCount?: number) => { - if (!tokenCount) { - return 0; - } - - return (15.0 / 1_000_000) * tokenCount; - }; - return { boxCoordinates: response.data.boxCoordinates, playerCoordinates: response.data.playerCoordinates, @@ -77,8 +61,10 @@ export const claude35sonnet: ModelHandler = async ( promptTokens: promptTokens, outputTokens: outputTokens, totalTokensUsed: totalTokensUsed, - totalRunCost: - getPriceForInputToken(promptTokens) + - getPriceForOutputToken(outputTokens), + totalRunCost: calculateTotalCost( + "claude-3.5-sonnet", + promptTokens, + outputTokens, + ), }; }; diff --git a/models/gemini-1.5-pro.ts b/models/gemini-1.5-pro.ts index a8549b0..45507c5 100644 --- a/models/gemini-1.5-pro.ts +++ b/models/gemini-1.5-pro.ts @@ -1,5 +1,6 @@ import { type ModelHandler } from "."; import { GoogleGenerativeAI, SchemaType } from "@google/generative-ai"; +import { calculateTotalCost } from "./pricing"; interface GeminiResponse { boxCoordinates: number[][]; @@ -73,27 +74,6 @@ export const gemini15pro: ModelHandler = async ( const outputTokens = result.response.usageMetadata?.candidatesTokenCount; const totalTokensUsed = result.response.usageMetadata?.totalTokenCount; - // https://ai.google.dev/pricing#1_5pro - const getPriceForInputToken = (tokenCount?: number) => { - if (!tokenCount) { - return 0; - } - if (tokenCount > 128_000) { - return (2.5 / 1_000_000) * tokenCount; - } - return (1.25 / 1_000_000) * tokenCount; - }; - - const getPriceForOutputToken = (tokenCount?: number) => { - if (!tokenCount) { - return 0; - } - if (tokenCount > 128_000) { - return (10.0 / 1_000_000) * tokenCount; - } - return (5.0 / 1_000_000) * tokenCount; - }; - return { boxCoordinates: parsedResponse.boxCoordinates, playerCoordinates: parsedResponse.playerCoordinates, @@ -101,8 +81,10 @@ export const gemini15pro: ModelHandler = async ( promptTokens: promptTokens, outputTokens: outputTokens, totalTokensUsed: totalTokensUsed, - totalRunCost: - getPriceForInputToken(promptTokens) + - getPriceForOutputToken(outputTokens), + totalRunCost: calculateTotalCost( + "gemini-1.5-pro", + promptTokens, + outputTokens, + ), }; }; diff --git a/models/gpt-4o.ts b/models/gpt-4o.ts index b2ef2f4..1a410f6 100644 --- a/models/gpt-4o.ts +++ b/models/gpt-4o.ts @@ -2,6 +2,11 @@ import { type ModelHandler } from "."; import OpenAI from "openai"; import { zodResponseFormat } from "openai/helpers/zod"; import { z } from "zod"; +import { + calculateTotalCost, + getPriceForInputToken, + getPriceForOutputToken, +} from "./pricing"; const responseSchema = z.object({ reasoning: z.string(), @@ -42,23 +47,6 @@ export const gpt4o: ModelHandler = async (systemPrompt, userPrompt, config) => { const outputTokens = completion.usage?.completion_tokens; const totalTokensUsed = completion.usage?.total_tokens; - // https://openai.com/api/pricing/ - const getPriceForInputToken = (tokenCount?: number) => { - if (!tokenCount) { - return 0; - } - - return (2.5 / 1_000_000) * tokenCount; - }; - - const getPriceForOutputToken = (tokenCount?: number) => { - if (!tokenCount) { - return 0; - } - - return (10.0 / 1_000_000) * tokenCount; - }; - return { boxCoordinates: response.parsed.boxCoordinates, playerCoordinates: response.parsed.playerCoordinates, @@ -66,8 +54,6 @@ export const gpt4o: ModelHandler = async (systemPrompt, userPrompt, config) => { promptTokens: promptTokens, outputTokens: outputTokens, totalTokensUsed: totalTokensUsed, - totalRunCost: - getPriceForInputToken(promptTokens) + - getPriceForOutputToken(outputTokens), + totalRunCost: calculateTotalCost("gpt-4o", promptTokens, outputTokens), }; }; diff --git a/models/mistral-large-2.ts b/models/mistral-large-2.ts index 9598f8d..d3a28ba 100644 --- a/models/mistral-large-2.ts +++ b/models/mistral-large-2.ts @@ -2,6 +2,7 @@ import { type ModelHandler } from "."; import { isJSON } from "../lib/utils"; import { Mistral } from "@mistralai/mistralai"; import { z } from "zod"; +import { calculateTotalCost } from "./pricing"; const responseSchema = z.object({ reasoning: z.string(), @@ -49,31 +50,16 @@ export const mistralLarge2: ModelHandler = async ( const outputTokens = completion.usage.completionTokens; const totalTokensUsed = completion.usage.totalTokens; - // https://mistral.ai/technology/ - const getPriceForInputToken = (tokenCount?: number) => { - if (!tokenCount) { - return 0; - } - - return (2.0 / 1_000_000) * tokenCount; - }; - - const getPriceForOutputToken = (tokenCount?: number) => { - if (!tokenCount) { - return 0; - } - - return (6.0 / 1_000_000) * tokenCount; - }; - const response = await responseSchema.safeParseAsync({ ...JSON.parse(content), promptTokens: completion.usage.promptTokens, outputTokens: completion.usage.completionTokens, totalTokensUsed: completion.usage.totalTokens, - totalRunCost: - getPriceForInputToken(promptTokens) + - getPriceForOutputToken(outputTokens), + totalRunCost: calculateTotalCost( + "mistral-large-2", + promptTokens, + outputTokens, + ), }); if (!response.success) { diff --git a/models/multiplayer/gpt-4o.ts b/models/multiplayer/gpt-4o.ts index a6e24e3..b97533f 100644 --- a/models/multiplayer/gpt-4o.ts +++ b/models/multiplayer/gpt-4o.ts @@ -1,4 +1,5 @@ import { type MultiplayerModelHandler } from "."; +import { calculateTotalCost } from "../pricing"; import OpenAI from "openai"; import { zodResponseFormat } from "openai/helpers/zod"; import { z } from "zod"; @@ -35,6 +36,9 @@ export const gpt4o: MultiplayerModelHandler = async ( const response = completion.choices[0].message; + const promptTokens = completion.usage?.prompt_tokens; + const outputTokens = completion.usage?.completion_tokens; + if (response.refusal) { throw new Error(`Refusal: ${response.refusal}`); } else if (!response.parsed) { @@ -44,5 +48,6 @@ export const gpt4o: MultiplayerModelHandler = async ( return { moveDirection: response.parsed.moveDirection, zombieToShoot: response.parsed.zombieToShoot, + cost: calculateTotalCost("gpt-4o", promptTokens, outputTokens), }; }; diff --git a/models/multiplayer/index.ts b/models/multiplayer/index.ts index 1e408ae..ca30c50 100644 --- a/models/multiplayer/index.ts +++ b/models/multiplayer/index.ts @@ -28,6 +28,7 @@ The 2d Grid is made up of characters, where each character has a meaning. - Zombies can't move through rocks. - Zombies can't move through each other. - Zombies always try to move towards the playing using BFS algorithm. +- Zombies will spawn near the edges of the map # Player Rules - Players can move horizontally or vertically. @@ -35,9 +36,9 @@ The 2d Grid is made up of characters, where each character has a meaning. - Players can throw one popsickle at a zombie each turn. - Players should move away from zombies. - Players should probably shoot at the closest zombie +- Stay away from the edges of the map because zombies spawn there. # Output Format - - Respond only with valid JSON. Do not write an introduction or summary. - Assume a position on the 2d grid is always represented as [ROW, COL]. - Your output should be a JSON object with the following format: @@ -61,6 +62,7 @@ export type MultiplayerModelHandler = ( ) => Promise<{ moveDirection: string; zombieToShoot: number[]; + cost: number; }>; const MAX_RETRIES = 1; @@ -76,6 +78,7 @@ export type RunModelResult = { moveDirection?: string; zombieToShoot?: number[]; reasoning?: string; + cost?: number; }; export async function runMultiplayerModel( @@ -114,6 +117,7 @@ export async function runMultiplayerModel( return { moveDirection: result.moveDirection, zombieToShoot: result.zombieToShoot, + cost: result.cost, }; } catch (error) { if (retry === MAX_RETRIES || reasoning === null) { diff --git a/models/perplexity-llama-3.1.ts b/models/perplexity-llama-3.1.ts index 17ea955..b40fb86 100644 --- a/models/perplexity-llama-3.1.ts +++ b/models/perplexity-llama-3.1.ts @@ -1,6 +1,7 @@ import { isJSON } from "../lib/utils"; import { z } from "zod"; import { ModelHandler } from "./index"; +import { calculateTotalCost } from "./pricing"; const completionSchema = z.object({ id: z.string(), @@ -94,29 +95,11 @@ export const perplexityLlama31: ModelHandler = async ( throw new Error("JSON returned by perplexity is malformed"); } - // https://docs.perplexity.ai/guides/pricing#perplexity-sonar-models - const getPriceForInputToken = (tokenCount?: number) => { - if (!tokenCount) { - return 0; - } - - return (1.0 / 1_000_000) * tokenCount; - }; - - const getPriceForOutputToken = (tokenCount?: number) => { - if (!tokenCount) { - return 0; - } - - return (1.0 / 1_000_000) * tokenCount; - }; - - const priceForRequest = 5 / 1_000; - - const totalRunCost = - getPriceForInputToken(promptTokens) + - getPriceForOutputToken(outputTokens) + - priceForRequest; + const totalRunCost = calculateTotalCost( + "perplexity-llama-3.1", + promptTokens, + outputTokens, + ); const parsedContent = JSON.parse(jsonContent); const response = await responseSchema.safeParseAsync({ diff --git a/models/pricing.ts b/models/pricing.ts new file mode 100644 index 0000000..45d4fa3 --- /dev/null +++ b/models/pricing.ts @@ -0,0 +1,90 @@ +import { ModelSlug } from "@/convex/constants"; + +export interface ModelPricing { + inputTokenPrice: number; // Price per 1M tokens + outputTokenPrice: number; // Price per 1M tokens + requestPrice?: number; // Fixed price per request if applicable +} + +export const MODEL_PRICING: Record = { + "gpt-4o": { + inputTokenPrice: 2.5, + outputTokenPrice: 10.0, + }, + "claude-3.5-sonnet": { + inputTokenPrice: 3.0, + outputTokenPrice: 15.0, + }, + "perplexity-llama-3.1": { + inputTokenPrice: 1.0, + outputTokenPrice: 1.0, + requestPrice: 0.005, + }, + "mistral-large-2": { + inputTokenPrice: 2.0, + outputTokenPrice: 6.0, + }, + "gemini-1.5-pro": { + inputTokenPrice: 1.25, + outputTokenPrice: 5.0, + }, +}; + +export const getPriceForInputToken = ( + modelId: ModelSlug, + tokenCount?: number, +) => { + if (!tokenCount) { + return 0; + } + + const pricing = MODEL_PRICING[modelId]; + if (!pricing) { + throw new Error(`No pricing found for model: ${modelId}`); + } + + // Special case for Gemini 1.5 Pro + if (modelId === "gemini-1.5-pro" && tokenCount > 128_000) { + return (2.5 / 1_000_000) * tokenCount; + } + + return (pricing.inputTokenPrice / 1_000_000) * tokenCount; +}; + +export const getPriceForOutputToken = ( + modelId: ModelSlug, + tokenCount?: number, +) => { + if (!tokenCount) { + return 0; + } + + const pricing = MODEL_PRICING[modelId]; + if (!pricing) { + throw new Error(`No pricing found for model: ${modelId}`); + } + + // Special case for Gemini 1.5 Pro + if (modelId === "gemini-1.5-pro" && tokenCount > 128_000) { + return (10.0 / 1_000_000) * tokenCount; + } + + return (pricing.outputTokenPrice / 1_000_000) * tokenCount; +}; + +export const getRequestPrice = (modelId: ModelSlug) => { + const pricing = MODEL_PRICING[modelId]; + return pricing?.requestPrice ?? 0; +}; + +export const calculateTotalCost = ( + modelId: ModelSlug, + promptTokens?: number, + outputTokens?: number, +) => { + return ( + getPriceForInputToken(modelId, promptTokens) + + getPriceForOutputToken(modelId, outputTokens) + + getRequestPrice(modelId) + ); +};