From b8510098112a8020aa27c06313049476ba6e5c0e Mon Sep 17 00:00:00 2001 From: CodeWithKrish12 Date: Thu, 17 Oct 2024 12:30:06 +0530 Subject: [PATCH 1/4] Added claude 3.5 sonnet and perplexity with llama model to curated ai models list --- README.md | 2 + convex/constants.ts | 8 ++++ models/claude-3-5-sonnet.ts | 47 +++++++++++++++++++ models/index.ts | 10 ++++ models/perplexity-llama.ts | 93 +++++++++++++++++++++++++++++++++++++ package.json | 2 + 6 files changed, 162 insertions(+) create mode 100644 models/claude-3-5-sonnet.ts create mode 100644 models/perplexity-llama.ts diff --git a/README.md b/README.md index 85e8af7..79f8ca6 100644 --- a/README.md +++ b/README.md @@ -29,6 +29,8 @@ Add optional environment variable/s for simulating real AI models without mockup - `npx convex env set GEMINI_API_KEY YOUR_API_KEY` - `npx convex env set OPENAI_API_KEY YOUR_API_KEY` +- `npx convex env set ANTHROPIC_API_KEY YOUR_API_KEY` +- `npx convex env set PERPLEXITY_API_KEY YOUR_API_KEY` also, you may need to run, but I think the initial setup does that. diff --git a/convex/constants.ts b/convex/constants.ts index 849abb4..0d6ba7a 100644 --- a/convex/constants.ts +++ b/convex/constants.ts @@ -7,6 +7,14 @@ export const AI_MODELS = [ model: "gpt-4o", name: "OpenAI - 4o Mini", }, + { + model: "claude-3.5-sonnet", + name: "Claude 3.5 Sonnnet" + }, + { + model: "perplexity-llama-3.1", + name: "Perplextity AI" + } ]; export const AI_MODEL_IDS = AI_MODELS.map((model) => model.model); diff --git a/models/claude-3-5-sonnet.ts b/models/claude-3-5-sonnet.ts new file mode 100644 index 0000000..3ccd970 --- /dev/null +++ b/models/claude-3-5-sonnet.ts @@ -0,0 +1,47 @@ +import { Anthropic } from '@anthropic-ai/sdk'; +import { type ModelHandler } from "."; + +const anthropic = new Anthropic({ + apiKey: process.env.ANTHROPIC_API_KEY, +}); + +export const claude35sonnet: ModelHandler = async (prompt, map) => { + try { + const response = await anthropic.messages.create({ + model: "claude-3-sonnet-20240307", + max_tokens: 1024, + temperature: 0, + system: prompt, + messages: [ + { + role: "user", + content: JSON.stringify(map), + }, + ], + }); + + const content = response.content[0]; + + if (content.type !== 'text') { + throw new Error('Unexpected response type from Claude'); + } + + const parsedResponse = JSON.parse(content.text); + + // Validate the response structure + if (!Array.isArray(parsedResponse.boxCoordinates) || + !Array.isArray(parsedResponse.playerCoordinates) || + typeof parsedResponse.reasoning !== 'string') { + throw new Error('Invalid response structure'); + } + + return { + boxCoordinates: parsedResponse.boxCoordinates, + playerCoordinates: parsedResponse.playerCoordinates, + reasoning: parsedResponse.reasoning, + }; + } catch (error) { + console.error('Error in Claude 3.5 Sonnet handler:', error); + throw new Error('Failed to process Claude 3.5 Sonnet response'); + } +}; \ No newline at end of file diff --git a/models/index.ts b/models/index.ts index 49c36c3..8615503 100644 --- a/models/index.ts +++ b/models/index.ts @@ -1,5 +1,7 @@ import { gemini15pro } from "./gemini-1.5-pro"; import { gpt4o } from "./gpt-4o"; +import { claude35sonnet } from "./claude-3-5-sonnet"; +import { perplexityModel } from "./perplexity-llama"; export type ModelHandler = ( prompt: string, @@ -88,6 +90,14 @@ export async function runModel( result = await gpt4o(prompt, map); break; } + case "claude-3.5-sonnet": { + result = await claude35sonnet(prompt, map); + break; + } + case "perplexity-llama-3.1": { + result = await perplexityModel(prompt, map); + break; + } default: { throw new Error(`Tried running unknown model '${modelId}'`); } diff --git a/models/perplexity-llama.ts b/models/perplexity-llama.ts new file mode 100644 index 0000000..336017f --- /dev/null +++ b/models/perplexity-llama.ts @@ -0,0 +1,93 @@ +import { z } from 'zod'; +import { ModelHandler } from './index'; + +const PerplexityResponseSchema = z.object({ + id: z.string(), + model: z.string(), + object: z.string(), + created: z.number(), + choices: z.array( + z.object({ + index: z.number(), + finish_reason: z.string(), + message: z.object({ + role: z.string(), + content: z.string(), + }), + delta: z.object({ + role: z.string(), + content: z.string(), + }), + }) + ), + usage: z.object({ + prompt_tokens: z.number(), + completion_tokens: z.number(), + total_tokens: z.number(), + }), +}); + +const GameResponseSchema = z.object({ + reasoning: z.string(), + playerCoordinates: z.array(z.number()), + boxCoordinates: z.array(z.array(z.number())), +}); + +export const perplexityModel: ModelHandler = async (prompt: string, map: string[][]) => { + const apiKey = process.env.PERPLEXITY_API_KEY; + if (!apiKey) { + throw new Error('PERPLEXITY_API_KEY is not set in the environment variables'); + } + + const messages = [ + { role: 'system', content: 'Be precise and concise.' }, + { role: 'user', content: prompt }, + { role: 'user', content: JSON.stringify(map) }, + ]; + + const options: RequestInit = { + method: 'POST', + headers: { + 'Authorization': `Bearer ${apiKey}`, + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + model: 'llama-3.1-sonar-large-128k-online', + messages, + temperature: 0.2, + top_p: 0.9, + return_citations: true, + search_domain_filter: ['perplexity.ai'], + return_images: false, + return_related_questions: false, + search_recency_filter: 'month', + top_k: 0, + stream: false, + presence_penalty: 0, + frequency_penalty: 1, + }), + }; + + try { + const response = await fetch('https://api.perplexity.ai/chat/completions', options); + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + const data = await response.json(); + + const validatedResponse = PerplexityResponseSchema.parse(data); + const content = validatedResponse.choices[0].message.content; + + const parsedContent = JSON.parse(content); + const gameResponse = GameResponseSchema.parse(parsedContent); + + return { + boxCoordinates: gameResponse.boxCoordinates, + playerCoordinates: gameResponse.playerCoordinates, + reasoning: gameResponse.reasoning, + }; + } catch (error) { + console.error('Error:', error); + throw new Error('Failed to run Perplexity model'); + } +}; \ No newline at end of file diff --git a/package.json b/package.json index 4322f2f..c49ddf5 100644 --- a/package.json +++ b/package.json @@ -14,6 +14,7 @@ "lint": "next lint" }, "dependencies": { + "@anthropic-ai/sdk": "^0.29.1", "@auth/core": "^0.34.2", "@convex-dev/auth": "^0.0.71", "@google/generative-ai": "^0.21.0", @@ -25,6 +26,7 @@ "@radix-ui/react-toast": "^1.2.1", "@radix-ui/react-toggle": "^1.1.0", "@radix-ui/react-toggle-group": "^1.1.0", + "axios": "^1.7.7", "class-variance-authority": "^0.7.0", "clsx": "^2.1.1", "convex": "^1.16.0", From 2317f8527a550b600677e50e071bf522ccd7816e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakob=20R=C3=B6ssner?= Date: Thu, 17 Oct 2024 19:40:55 +1100 Subject: [PATCH 2/4] Improved loading states --- app/play/[level]/page.tsx | 26 +++++++++++++++++++++++++- app/play/page.tsx | 13 ++++++++++++- components/ui/skeleton.tsx | 15 +++++++++++++++ 3 files changed, 52 insertions(+), 2 deletions(-) create mode 100644 components/ui/skeleton.tsx diff --git a/app/play/[level]/page.tsx b/app/play/[level]/page.tsx index 4e6860c..3d6b2a2 100644 --- a/app/play/[level]/page.tsx +++ b/app/play/[level]/page.tsx @@ -41,7 +41,31 @@ export default function PlayLevelPage({ const [showOriginalMap, setShowOriginalMap] = useState(true); if (!map) { - return
Loading...
; + return ( +
+
+ + {flags?.showTestPage && ( + setMode(value as "play" | "test")} + > + + Play + Test AI + + + )} +
+

Night #{level}

+ +

Loading...

+
+ ); } function handleRetryClicked() { diff --git a/app/play/page.tsx b/app/play/page.tsx index 2b2e21e..dcc04fa 100644 --- a/app/play/page.tsx +++ b/app/play/page.tsx @@ -12,12 +12,23 @@ import { CardHeader, CardTitle, } from "@/components/ui/card"; +import { Skeleton } from "@/components/ui/skeleton"; export default function PlayPage() { const maps = useQuery(api.maps.getMaps); if (!maps) { - return
Loading...
; + return ( +
+

Choose a Night

+ +
+ {Array.from({ length: 6 }).map((_, index) => ( + + ))} +
+
+ ); } return ( diff --git a/components/ui/skeleton.tsx b/components/ui/skeleton.tsx new file mode 100644 index 0000000..d7e45f7 --- /dev/null +++ b/components/ui/skeleton.tsx @@ -0,0 +1,15 @@ +import { cn } from "@/lib/utils" + +function Skeleton({ + className, + ...props +}: React.HTMLAttributes) { + return ( +
+ ) +} + +export { Skeleton } From 0a43029dc6b3d91ade0cf2057434e6627d7b482b Mon Sep 17 00:00:00 2001 From: Krish Kalaria <96818172+krishkalaria12@users.noreply.github.com> Date: Thu, 17 Oct 2024 18:07:16 +0530 Subject: [PATCH 3/4] Update claude-3-5-sonnet.ts --- models/claude-3-5-sonnet.ts | 73 ++++++++++++++++++------------------- 1 file changed, 35 insertions(+), 38 deletions(-) diff --git a/models/claude-3-5-sonnet.ts b/models/claude-3-5-sonnet.ts index 3ccd970..c47ce9b 100644 --- a/models/claude-3-5-sonnet.ts +++ b/models/claude-3-5-sonnet.ts @@ -1,47 +1,44 @@ -import { Anthropic } from '@anthropic-ai/sdk'; +import { Anthropic } from "@anthropic-ai/sdk"; import { type ModelHandler } from "."; -const anthropic = new Anthropic({ - apiKey: process.env.ANTHROPIC_API_KEY, -}); - export const claude35sonnet: ModelHandler = async (prompt, map) => { - try { - const response = await anthropic.messages.create({ - model: "claude-3-sonnet-20240307", - max_tokens: 1024, - temperature: 0, - system: prompt, - messages: [ - { - role: "user", - content: JSON.stringify(map), - }, - ], - }); + const anthropic = new Anthropic({ + apiKey: process.env.ANTHROPIC_API_KEY, + }); - const content = response.content[0]; + const response = await anthropic.messages.create({ + model: "claude-3-sonnet-20240307", + max_tokens: 1024, + temperature: 0, + system: prompt, + messages: [ + { + role: "user", + content: JSON.stringify(map), + }, + ], + }); - if (content.type !== 'text') { - throw new Error('Unexpected response type from Claude'); - } + const content = response.content[0]; - const parsedResponse = JSON.parse(content.text); + if (content.type !== "text") { + throw new Error("Unexpected response type from Claude"); + } - // Validate the response structure - if (!Array.isArray(parsedResponse.boxCoordinates) || - !Array.isArray(parsedResponse.playerCoordinates) || - typeof parsedResponse.reasoning !== 'string') { - throw new Error('Invalid response structure'); - } + const parsedResponse = JSON.parse(content.text); - return { - boxCoordinates: parsedResponse.boxCoordinates, - playerCoordinates: parsedResponse.playerCoordinates, - reasoning: parsedResponse.reasoning, - }; - } catch (error) { - console.error('Error in Claude 3.5 Sonnet handler:', error); - throw new Error('Failed to process Claude 3.5 Sonnet response'); + // Validate the response structure + if ( + !Array.isArray(parsedResponse.boxCoordinates) || + !Array.isArray(parsedResponse.playerCoordinates) || + typeof parsedResponse.reasoning !== "string" + ) { + throw new Error("Invalid response structure"); } -}; \ No newline at end of file + + return { + boxCoordinates: parsedResponse.boxCoordinates, + playerCoordinates: parsedResponse.playerCoordinates, + reasoning: parsedResponse.reasoning, + }; +}; From d69216483bd0175072e8c5e986b5c94d5baaa450 Mon Sep 17 00:00:00 2001 From: Krish Kalaria <96818172+krishkalaria12@users.noreply.github.com> Date: Thu, 17 Oct 2024 19:06:40 +0530 Subject: [PATCH 4/4] Update perplexity-llama.ts with axios --- models/perplexity-llama.ts | 56 +++++++++++++++++--------------------- 1 file changed, 25 insertions(+), 31 deletions(-) diff --git a/models/perplexity-llama.ts b/models/perplexity-llama.ts index 336017f..d3c59c2 100644 --- a/models/perplexity-llama.ts +++ b/models/perplexity-llama.ts @@ -1,3 +1,4 @@ +import axios from 'axios'; import { z } from 'zod'; import { ModelHandler } from './index'; @@ -45,39 +46,32 @@ export const perplexityModel: ModelHandler = async (prompt: string, map: string[ { role: 'user', content: JSON.stringify(map) }, ]; - const options: RequestInit = { - method: 'POST', - headers: { - 'Authorization': `Bearer ${apiKey}`, - 'Content-Type': 'application/json', - }, - body: JSON.stringify({ - model: 'llama-3.1-sonar-large-128k-online', - messages, - temperature: 0.2, - top_p: 0.9, - return_citations: true, - search_domain_filter: ['perplexity.ai'], - return_images: false, - return_related_questions: false, - search_recency_filter: 'month', - top_k: 0, - stream: false, - presence_penalty: 0, - frequency_penalty: 1, - }), + const data = { + model: 'llama-3.1-sonar-large-128k-online', + messages, + temperature: 0.2, + top_p: 0.9, + return_citations: true, + search_domain_filter: ['perplexity.ai'], + return_images: false, + return_related_questions: false, + search_recency_filter: 'month', + top_k: 0, + stream: false, + presence_penalty: 0, + frequency_penalty: 1, }; try { - const response = await fetch('https://api.perplexity.ai/chat/completions', options); - if (!response.ok) { - throw new Error(`HTTP error! status: ${response.status}`); - } - const data = await response.json(); - - const validatedResponse = PerplexityResponseSchema.parse(data); + const response = await axios.post('https://api.perplexity.ai/chat/completions', data, { + headers: { + 'Authorization': `Bearer ${apiKey}`, + 'Content-Type': 'application/json', + }, + }); + + const validatedResponse = PerplexityResponseSchema.parse(response.data); const content = validatedResponse.choices[0].message.content; - const parsedContent = JSON.parse(content); const gameResponse = GameResponseSchema.parse(parsedContent); @@ -87,7 +81,7 @@ export const perplexityModel: ModelHandler = async (prompt: string, map: string[ reasoning: gameResponse.reasoning, }; } catch (error) { - console.error('Error:', error); + console.error('Failed to run Perplexity model Error:', error); throw new Error('Failed to run Perplexity model'); } -}; \ No newline at end of file +};