diff --git a/extensions/vscode/config_schema.json b/extensions/vscode/config_schema.json index 2a908c045c..60116774a9 100644 --- a/extensions/vscode/config_schema.json +++ b/extensions/vscode/config_schema.json @@ -956,7 +956,8 @@ "gemini-2.0-flash", "gemini-2.0-flash-thinking-exp-01-21", "gemini-2.0-pro-exp-02-05", - "gemini-2.0-flash-lite-preview-02-05" + "gemini-2.0-flash-lite-preview-02-05", + "gemini-2.5-pro-latest" ] } } @@ -1562,6 +1563,7 @@ "gemini-pro", "gemini-1.5-pro-latest", "gemini-1.5-pro", + "gemini-2.5-pro", "gemini-1.5-flash-latest", "gemini-1.5-flash", "mistral-tiny", diff --git a/gui/src/pages/AddNewModel/configs/models.ts b/gui/src/pages/AddNewModel/configs/models.ts index fafdc53b7e..338fea7457 100644 --- a/gui/src/pages/AddNewModel/configs/models.ts +++ b/gui/src/pages/AddNewModel/configs/models.ts @@ -887,6 +887,20 @@ export const models: { [key: string]: ModelPackage } = { providerOptions: ["gemini"], isOpenSource: false, }, + gemini25Pro: { + title: "Gemini 2.5 Pro", + description: + "Google's thinking by default Pro model with up to 64k output context. Best for complex tasks involving reasoning.", + params: { + title: "Gemini 2.5 Pro", + model: "gemini-2.5-pro", + contextLength: 1_000_000, + apiKey: "", + }, + icon: "gemini.png", + providerOptions: ["gemini"], + isOpenSource: false, + }, commandR: { title: "Command R", description: