Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
59 changes: 59 additions & 0 deletions google-ai-gemini-examples/pom.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>

<groupId>dev.langchain4j</groupId>
<artifactId>google-ai-gemini-examples</artifactId>
<version>1.9.1</version>

<properties>
<maven.compiler.source>17</maven.compiler.source>
<maven.compiler.target>17</maven.compiler.target>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
</properties>

<dependencyManagement>
<dependencies>
<dependency>
<groupId>dev.langchain4j</groupId>
<artifactId>langchain4j-bom</artifactId>
<version>1.9.1</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependencies>
</dependencyManagement>

<dependencies>

<dependency>
<groupId>dev.langchain4j</groupId>
<artifactId>langchain4j-google-ai-gemini</artifactId>
</dependency>

<dependency>
<groupId>dev.langchain4j</groupId>
<artifactId>langchain4j</artifactId>
</dependency>

<dependency>
<groupId>dev.langchain4j</groupId>
<artifactId>langchain4j-embeddings-all-minilm-l6-v2</artifactId>
</dependency>

<dependency>
<groupId>org.tinylog</groupId>
<artifactId>tinylog-impl</artifactId>
<version>2.6.2</version>
</dependency>
<dependency>
<groupId>org.tinylog</groupId>
<artifactId>slf4j-tinylog</artifactId>
<version>2.6.2</version>
</dependency>

</dependencies>

</project>
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
package dev.langchain4j.example.gemini;

import dev.langchain4j.model.chat.ChatModel;
import dev.langchain4j.model.googleai.GoogleAiGeminiChatModel;

/**
* A simple example demonstrating basic chat functionality with Google AI Gemini.
*
* <p>This example shows how to:
* <ul>
* <li>Create a {@link GoogleAiGeminiChatModel} using the builder pattern</li>
* <li>Send a simple text message and receive a response</li>
* </ul>
*
* <p>Requires the {@code GOOGLE_AI_GEMINI_API_KEY} environment variable to be set or by other means to get the API key.
*/
public class Example01_SimpleChat {
public static void main(String[] args) {
String apiKey = System.getenv("GOOGLE_AI_GEMINI_API_KEY");
ChatModel model = GoogleAiGeminiChatModel.builder()
.apiKey(apiKey)
.modelName("gemini-2.5-flash-lite")
.build();

String response = model.chat("What is the capital of France?");

System.out.println(response);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
package dev.langchain4j.example.gemini;

import dev.langchain4j.model.chat.response.ChatResponse;
import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
import dev.langchain4j.model.googleai.GoogleAiGeminiStreamingChatModel;

import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeoutException;

import static java.util.concurrent.TimeUnit.SECONDS;

/**
* Demonstrates streaming chat responses with Google AI Gemini.
*
* <p>This example shows how to:
* <ul>
* <li>Create a {@link GoogleAiGeminiStreamingChatModel}</li>
* <li>Receive tokens as they are generated in real-time</li>
* <li>Handle completion and error callbacks</li>
* </ul>
*
* <p>Requires the {@code GOOGLE_AI_GEMINI_API_KEY} environment variable to be set.
*/
public class Example02_StreamingChat {
static class WaitingChatResponseHandler implements StreamingChatResponseHandler {
// Future used to wait for a response
private final CompletableFuture<ChatResponse> futureResponse = new CompletableFuture<>();

@Override
public void onPartialResponse(String partialResponse) {
System.out.print(partialResponse);
}

@Override
public void onCompleteResponse(ChatResponse completeResponse) {
System.out.println("\n\n--- Complete ---");
System.out.println("Finish reason: " + completeResponse.finishReason());
System.out.println("Tokens used: " + completeResponse.tokenUsage().totalTokenCount());
System.out.println("Completed message text: " + completeResponse.aiMessage().text());
futureResponse.complete(completeResponse);
}

@Override
public void onError(Throwable error) {
System.err.println("Error: " + error.getMessage());
}
};

public static void main(String[] args) throws InterruptedException, ExecutionException, TimeoutException {
var model = GoogleAiGeminiStreamingChatModel.builder()
.apiKey(System.getenv("GOOGLE_AI_GEMINI_API_KEY"))
.modelName("gemini-2.5-flash-lite")
.build();

// Set up the handler - what to do when messages are received.
var handler = new WaitingChatResponseHandler();

// Send the chat request. This will start calling onPartialResponse as tokens are returned from Gemini. And
// later calls onCompleteResponse - with the entire message - once Gemini is finished.
model.chat("Explain quantum computing in simple terms", handler);

// Wait for the future to complete or 60 seconds, whatever comes first.
handler.futureResponse.get(60, SECONDS);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
package dev.langchain4j.example.gemini;

import dev.langchain4j.data.embedding.Embedding;
import dev.langchain4j.model.googleai.GoogleAiEmbeddingModel;
import dev.langchain4j.model.output.Response;

/**
* Demonstrates generating embeddings with Google AI Gemini.
*
* <p>This example shows how to:
* <ul>
* <li>Create a {@link GoogleAiEmbeddingModel}</li>
* <li>Generate an embedding vector from text</li>
* <li>Access embedding dimensions and values</li>
* </ul>
*
* <p>Requires the {@code GOOGLE_AI_GEMINI_API_KEY} environment variable to be set.
*
* <p>Learn <a href="https://ai.google.dev/gemini-api/docs/embeddings>more</a></p>
*/
public class Example03_SimpleEmbedding {

public static void main(String[] args) {
GoogleAiEmbeddingModel model = GoogleAiEmbeddingModel.builder()
.apiKey(System.getenv("GOOGLE_AI_GEMINI_API_KEY"))
.modelName("gemini-embedding-001")
.outputDimensionality(1536)
.build();

Response<Embedding> response = model.embed("The quick brown fox jumps over the lazy dog.");

Embedding embedding = response.content();

System.out.println("Embedding dimension: " + embedding.dimension());
System.out.println("First 10 values: ");
for (int i = 0; i < 10; i++) {
System.out.printf(" [%d]: %.6f%n", i, embedding.vector()[i]);
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
package dev.langchain4j.example.gemini;

import dev.langchain4j.data.message.SystemMessage;
import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.model.googleai.GoogleAiGeminiTokenCountEstimator;

import java.util.List;

/**
* Demonstrates token counting with Google AI Gemini.
*
* <p>This example shows how to:
* <ul>
* <li>Create a {@link GoogleAiGeminiTokenCountEstimator}</li>
* <li>Estimate tokens in plain text</li>
* <li>Estimate tokens in single and multiple messages</li>
* </ul>
*
* <p>Requires the {@code GOOGLE_AI_GEMINI_API_KEY} environment variable to be set.
*
* <p>Learn <a href="https://ai.google.dev/gemini-api/docs/tokens">more</a></p>
*/
public class Example04_TokenCounting {

public static void main(String[] args) {
GoogleAiGeminiTokenCountEstimator tokenEstimator = GoogleAiGeminiTokenCountEstimator.builder()
.apiKey(System.getenv("GOOGLE_AI_GEMINI_API_KEY"))
.modelName("gemini-2.5-flash-lite")
.build();

// Count tokens in plain text
String text = "The quick brown fox jumps over the lazy dog.";
int textTokens = tokenEstimator.estimateTokenCountInText(text);
System.out.println("Plain Text token count: " + textTokens);

// Count tokens in a single message
UserMessage userMessage = UserMessage.from("What is the capital of France?");
int messageTokens = tokenEstimator.estimateTokenCountInMessage(userMessage);
System.out.println("\nSingle message token count: " + messageTokens);

// Count tokens in multiple messages
SystemMessage systemMessage = SystemMessage.from("You are a helpful assistant.");
UserMessage question = UserMessage.from("Explain quantum computing in simple terms.");
int conversationTokens = tokenEstimator.estimateTokenCountInMessages(List.of(systemMessage, question));
System.out.println("\nConversation token count: " + conversationTokens);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
package dev.langchain4j.example.gemini;

import dev.langchain4j.model.chat.ChatModel;
import dev.langchain4j.model.chat.request.ChatRequest;
import dev.langchain4j.model.chat.request.ResponseFormat;
import dev.langchain4j.model.chat.request.json.JsonObjectSchema;
import dev.langchain4j.model.chat.request.json.JsonSchema;
import dev.langchain4j.model.chat.request.json.JsonStringSchema;
import dev.langchain4j.model.chat.request.json.JsonIntegerSchema;
import dev.langchain4j.model.chat.request.json.JsonArraySchema;
import dev.langchain4j.model.chat.response.ChatResponse;
import dev.langchain4j.model.googleai.GoogleAiGeminiChatModel;
import dev.langchain4j.data.message.UserMessage;

import java.util.List;

/**
* Demonstrates structured JSON responses with Google AI Gemini.
*
* <p>This example shows how to:
* <ul>
* <li>Define a JSON schema for the expected response structure</li>
* <li>Configure the model to return responses matching the schema</li>
* <li>Parse structured data from model responses</li>
* </ul>
*
* <p>Requires the {@code GOOGLE_AI_GEMINI_API_KEY} environment variable to be set.
*
* <p>Learn <a href="https://docs.langchain4j.dev/tutorials/structured-outputs">more</a></p>
*/
public class Example05_ChatWithJsonResponse {

public static void main(String[] args) {
ChatModel model = GoogleAiGeminiChatModel.builder()
.apiKey(System.getenv("GOOGLE_AI_GEMINI_API_KEY"))
.modelName("gemini-2.5-flash-lite")
.build();

JsonObjectSchema personSchema = JsonObjectSchema.builder()
.addStringProperty("name")
.addIntegerProperty("age")
.addStringProperty("occupation")
.addProperty("hobbies", JsonArraySchema.builder()
.items(new JsonStringSchema())
.build())
.required("name", "age", "occupation", "hobbies")
.build();

ResponseFormat responseFormat = ResponseFormat.builder()
.type(ResponseFormat.JSON.type())
.jsonSchema(JsonSchema.builder().name("person").rootElement(personSchema).build())
.build();

ChatRequest request = ChatRequest.builder()
.messages(List.of(UserMessage.from(
"Generate a fictional person with a name, age, occupation, and 3 hobbies.")))
.responseFormat(responseFormat)
.build();

ChatResponse response = model.chat(request);

System.out.println("Structured JSON response:");
System.out.println(response.aiMessage().text());
}
}
Loading
Loading