diff --git a/google-ai-gemini-examples/pom.xml b/google-ai-gemini-examples/pom.xml new file mode 100644 index 00000000..e66b1ec5 --- /dev/null +++ b/google-ai-gemini-examples/pom.xml @@ -0,0 +1,59 @@ + + + 4.0.0 + + dev.langchain4j + google-ai-gemini-examples + 1.9.1 + + + 17 + 17 + UTF-8 + + + + + + dev.langchain4j + langchain4j-bom + 1.9.1 + pom + import + + + + + + + + dev.langchain4j + langchain4j-google-ai-gemini + + + + dev.langchain4j + langchain4j + + + + dev.langchain4j + langchain4j-embeddings-all-minilm-l6-v2 + + + + org.tinylog + tinylog-impl + 2.6.2 + + + org.tinylog + slf4j-tinylog + 2.6.2 + + + + + \ No newline at end of file diff --git a/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example01_SimpleChat.java b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example01_SimpleChat.java new file mode 100644 index 00000000..378d792f --- /dev/null +++ b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example01_SimpleChat.java @@ -0,0 +1,29 @@ +package dev.langchain4j.example.gemini; + +import dev.langchain4j.model.chat.ChatModel; +import dev.langchain4j.model.googleai.GoogleAiGeminiChatModel; + +/** + * A simple example demonstrating basic chat functionality with Google AI Gemini. + * + *

This example shows how to: + *

+ * + *

Requires the {@code GOOGLE_AI_GEMINI_API_KEY} environment variable to be set or by other means to get the API key. + */ +public class Example01_SimpleChat { + public static void main(String[] args) { + String apiKey = System.getenv("GOOGLE_AI_GEMINI_API_KEY"); + ChatModel model = GoogleAiGeminiChatModel.builder() + .apiKey(apiKey) + .modelName("gemini-2.5-flash-lite") + .build(); + + String response = model.chat("What is the capital of France?"); + + System.out.println(response); + } +} \ No newline at end of file diff --git a/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example02_StreamingChat.java b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example02_StreamingChat.java new file mode 100644 index 00000000..f3cd0c6b --- /dev/null +++ b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example02_StreamingChat.java @@ -0,0 +1,66 @@ +package dev.langchain4j.example.gemini; + +import dev.langchain4j.model.chat.response.ChatResponse; +import dev.langchain4j.model.chat.response.StreamingChatResponseHandler; +import dev.langchain4j.model.googleai.GoogleAiGeminiStreamingChatModel; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; + +import static java.util.concurrent.TimeUnit.SECONDS; + +/** + * Demonstrates streaming chat responses with Google AI Gemini. + * + *

This example shows how to: + *

+ * + *

Requires the {@code GOOGLE_AI_GEMINI_API_KEY} environment variable to be set. + */ +public class Example02_StreamingChat { + static class WaitingChatResponseHandler implements StreamingChatResponseHandler { + // Future used to wait for a response + private final CompletableFuture futureResponse = new CompletableFuture<>(); + + @Override + public void onPartialResponse(String partialResponse) { + System.out.print(partialResponse); + } + + @Override + public void onCompleteResponse(ChatResponse completeResponse) { + System.out.println("\n\n--- Complete ---"); + System.out.println("Finish reason: " + completeResponse.finishReason()); + System.out.println("Tokens used: " + completeResponse.tokenUsage().totalTokenCount()); + System.out.println("Completed message text: " + completeResponse.aiMessage().text()); + futureResponse.complete(completeResponse); + } + + @Override + public void onError(Throwable error) { + System.err.println("Error: " + error.getMessage()); + } + }; + + public static void main(String[] args) throws InterruptedException, ExecutionException, TimeoutException { + var model = GoogleAiGeminiStreamingChatModel.builder() + .apiKey(System.getenv("GOOGLE_AI_GEMINI_API_KEY")) + .modelName("gemini-2.5-flash-lite") + .build(); + + // Set up the handler - what to do when messages are received. + var handler = new WaitingChatResponseHandler(); + + // Send the chat request. This will start calling onPartialResponse as tokens are returned from Gemini. And + // later calls onCompleteResponse - with the entire message - once Gemini is finished. + model.chat("Explain quantum computing in simple terms", handler); + + // Wait for the future to complete or 60 seconds, whatever comes first. + handler.futureResponse.get(60, SECONDS); + } +} \ No newline at end of file diff --git a/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example03_SimpleEmbedding.java b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example03_SimpleEmbedding.java new file mode 100644 index 00000000..685ed04e --- /dev/null +++ b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example03_SimpleEmbedding.java @@ -0,0 +1,40 @@ +package dev.langchain4j.example.gemini; + +import dev.langchain4j.data.embedding.Embedding; +import dev.langchain4j.model.googleai.GoogleAiEmbeddingModel; +import dev.langchain4j.model.output.Response; + +/** + * Demonstrates generating embeddings with Google AI Gemini. + * + *

This example shows how to: + *

+ * + *

Requires the {@code GOOGLE_AI_GEMINI_API_KEY} environment variable to be set. + * + *

Learn response = model.embed("The quick brown fox jumps over the lazy dog."); + + Embedding embedding = response.content(); + + System.out.println("Embedding dimension: " + embedding.dimension()); + System.out.println("First 10 values: "); + for (int i = 0; i < 10; i++) { + System.out.printf(" [%d]: %.6f%n", i, embedding.vector()[i]); + } + } +} \ No newline at end of file diff --git a/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example04_TokenCounting.java b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example04_TokenCounting.java new file mode 100644 index 00000000..4e36b33f --- /dev/null +++ b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example04_TokenCounting.java @@ -0,0 +1,47 @@ +package dev.langchain4j.example.gemini; + +import dev.langchain4j.data.message.SystemMessage; +import dev.langchain4j.data.message.UserMessage; +import dev.langchain4j.model.googleai.GoogleAiGeminiTokenCountEstimator; + +import java.util.List; + +/** + * Demonstrates token counting with Google AI Gemini. + * + *

This example shows how to: + *

+ * + *

Requires the {@code GOOGLE_AI_GEMINI_API_KEY} environment variable to be set. + * + *

Learn more

+ */ +public class Example04_TokenCounting { + + public static void main(String[] args) { + GoogleAiGeminiTokenCountEstimator tokenEstimator = GoogleAiGeminiTokenCountEstimator.builder() + .apiKey(System.getenv("GOOGLE_AI_GEMINI_API_KEY")) + .modelName("gemini-2.5-flash-lite") + .build(); + + // Count tokens in plain text + String text = "The quick brown fox jumps over the lazy dog."; + int textTokens = tokenEstimator.estimateTokenCountInText(text); + System.out.println("Plain Text token count: " + textTokens); + + // Count tokens in a single message + UserMessage userMessage = UserMessage.from("What is the capital of France?"); + int messageTokens = tokenEstimator.estimateTokenCountInMessage(userMessage); + System.out.println("\nSingle message token count: " + messageTokens); + + // Count tokens in multiple messages + SystemMessage systemMessage = SystemMessage.from("You are a helpful assistant."); + UserMessage question = UserMessage.from("Explain quantum computing in simple terms."); + int conversationTokens = tokenEstimator.estimateTokenCountInMessages(List.of(systemMessage, question)); + System.out.println("\nConversation token count: " + conversationTokens); + } +} \ No newline at end of file diff --git a/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example05_ChatWithJsonResponse.java b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example05_ChatWithJsonResponse.java new file mode 100644 index 00000000..221dd3e9 --- /dev/null +++ b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example05_ChatWithJsonResponse.java @@ -0,0 +1,65 @@ +package dev.langchain4j.example.gemini; + +import dev.langchain4j.model.chat.ChatModel; +import dev.langchain4j.model.chat.request.ChatRequest; +import dev.langchain4j.model.chat.request.ResponseFormat; +import dev.langchain4j.model.chat.request.json.JsonObjectSchema; +import dev.langchain4j.model.chat.request.json.JsonSchema; +import dev.langchain4j.model.chat.request.json.JsonStringSchema; +import dev.langchain4j.model.chat.request.json.JsonIntegerSchema; +import dev.langchain4j.model.chat.request.json.JsonArraySchema; +import dev.langchain4j.model.chat.response.ChatResponse; +import dev.langchain4j.model.googleai.GoogleAiGeminiChatModel; +import dev.langchain4j.data.message.UserMessage; + +import java.util.List; + +/** + * Demonstrates structured JSON responses with Google AI Gemini. + * + *

This example shows how to: + *

+ * + *

Requires the {@code GOOGLE_AI_GEMINI_API_KEY} environment variable to be set. + * + *

Learn more

+ */ +public class Example05_ChatWithJsonResponse { + + public static void main(String[] args) { + ChatModel model = GoogleAiGeminiChatModel.builder() + .apiKey(System.getenv("GOOGLE_AI_GEMINI_API_KEY")) + .modelName("gemini-2.5-flash-lite") + .build(); + + JsonObjectSchema personSchema = JsonObjectSchema.builder() + .addStringProperty("name") + .addIntegerProperty("age") + .addStringProperty("occupation") + .addProperty("hobbies", JsonArraySchema.builder() + .items(new JsonStringSchema()) + .build()) + .required("name", "age", "occupation", "hobbies") + .build(); + + ResponseFormat responseFormat = ResponseFormat.builder() + .type(ResponseFormat.JSON.type()) + .jsonSchema(JsonSchema.builder().name("person").rootElement(personSchema).build()) + .build(); + + ChatRequest request = ChatRequest.builder() + .messages(List.of(UserMessage.from( + "Generate a fictional person with a name, age, occupation, and 3 hobbies."))) + .responseFormat(responseFormat) + .build(); + + ChatResponse response = model.chat(request); + + System.out.println("Structured JSON response:"); + System.out.println(response.aiMessage().text()); + } +} \ No newline at end of file diff --git a/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example06_ChatWithTools.java b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example06_ChatWithTools.java new file mode 100644 index 00000000..a3515586 --- /dev/null +++ b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example06_ChatWithTools.java @@ -0,0 +1,121 @@ +package dev.langchain4j.example.gemini; + +import dev.langchain4j.agent.tool.Tool; +import dev.langchain4j.agent.tool.ToolExecutionRequest; +import dev.langchain4j.agent.tool.ToolSpecifications; +import dev.langchain4j.data.message.AiMessage; +import dev.langchain4j.data.message.ChatMessage; +import dev.langchain4j.data.message.ToolExecutionResultMessage; +import dev.langchain4j.data.message.UserMessage; +import dev.langchain4j.model.chat.ChatModel; +import dev.langchain4j.model.chat.request.ChatRequest; +import dev.langchain4j.model.chat.response.ChatResponse; +import dev.langchain4j.model.googleai.GoogleAiGeminiChatModel; + +import java.util.ArrayList; +import java.util.List; + +/** + * Demonstrates tool/function calling with Google AI Gemini. + * + *

This example shows how to: + *

+ * + *

Requires the {@code GOOGLE_AI_GEMINI_API_KEY} environment variable to be set. + * + *

Learn more

+ */ +public class Example06_ChatWithTools { + + public static void main(String[] args) { + ChatModel model = GoogleAiGeminiChatModel.builder() + .apiKey(System.getenv("GOOGLE_AI_GEMINI_API_KEY")) + .modelName("gemini-2.5-flash-lite") + .build(); + + Calculator calculator = new Calculator(); + + List messages = new ArrayList<>(); + messages.add(UserMessage.from("What is 25 multiplied by 17, and then add 123 to the result?")); + + ChatRequest request = ChatRequest.builder() + .messages(messages) + .toolSpecifications(ToolSpecifications.toolSpecificationsFrom(calculator)) + .build(); + + ChatResponse response = model.chat(request); + AiMessage aiMessage = response.aiMessage(); + + // Process tool calls if any + while (aiMessage.hasToolExecutionRequests()) { + System.out.println("Model requested tool execution:"); + + messages.add(aiMessage); + + for (ToolExecutionRequest toolRequest : aiMessage.toolExecutionRequests()) { + System.out.println(" Tool: " + toolRequest.name()); + System.out.println(" Arguments: " + toolRequest.arguments()); + + String result = executeCalculatorTool(calculator, toolRequest); + System.out.println(" Result: " + result); + + messages.add(ToolExecutionResultMessage.from(toolRequest, result)); + } + + // Continue the conversation with tool results + request = ChatRequest.builder() + .messages(messages) + .toolSpecifications(ToolSpecifications.toolSpecificationsFrom(calculator)) + .build(); + + response = model.chat(request); + aiMessage = response.aiMessage(); + } + + System.out.println("\nFinal response: " + aiMessage.text()); + } + + private static String executeCalculatorTool(Calculator calculator, ToolExecutionRequest request) { + String args = request.arguments(); + + // Simple JSON parsing for demo purposes + double a = extractNumber(args, "a"); + double b = extractNumber(args, "b"); + + return switch (request.name()) { + case "add" -> String.valueOf(calculator.add(a, b)); + case "multiply" -> String.valueOf(calculator.multiply(a, b)); + default -> "Unknown tool"; + }; + } + + private static double extractNumber(String json, String field) { + String pattern = "\"" + field + "\""; + int idx = json.indexOf(pattern); + if (idx == -1) return 0; + + int colonIdx = json.indexOf(":", idx); + int endIdx = json.indexOf(",", colonIdx); + if (endIdx == -1) endIdx = json.indexOf("}", colonIdx); + + return Double.parseDouble(json.substring(colonIdx + 1, endIdx).trim()); + } + + static class Calculator { + + @Tool("Adds two numbers") + public double add(double a, double b) { + return a + b; + } + + @Tool("Multiplies two numbers") + public double multiply(double a, double b) { + return a * b; + } + } +} \ No newline at end of file diff --git a/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example07_EmbeddingWithTaskTypes.java b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example07_EmbeddingWithTaskTypes.java new file mode 100644 index 00000000..b2e58caa --- /dev/null +++ b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example07_EmbeddingWithTaskTypes.java @@ -0,0 +1,94 @@ +package dev.langchain4j.example.gemini; + +import dev.langchain4j.data.embedding.Embedding; +import dev.langchain4j.model.googleai.GoogleAiEmbeddingModel; +import dev.langchain4j.model.googleai.GoogleAiEmbeddingModel.TaskType; +import dev.langchain4j.model.output.Response; + +/** + * Demonstrates embedding generation with different task types using Google AI Gemini. + * + *

This example shows how to: + *

+ * + *

Task types include: + *

+ * + *

Requires the {@code GOOGLE_AI_GEMINI_API_KEY} environment variable to be set. + * + *

Learn more

+ */ +public class Example07_EmbeddingWithTaskTypes { + + public static void main(String[] args) { + String apiKey = System.getenv("GOOGLE_AI_GEMINI_API_KEY"); + + // Embedding for a search query + GoogleAiEmbeddingModel queryModel = GoogleAiEmbeddingModel.builder() + .apiKey(apiKey) + .modelName("gemini-embedding-001") + .taskType(TaskType.RETRIEVAL_QUERY) + .build(); + + String query = "What is machine learning?"; + Response queryEmbedding = queryModel.embed(query); + System.out.println("Query embedding (RETRIEVAL_QUERY):"); + System.out.println(" Text: \"" + query + "\""); + System.out.println(" Dimension: " + queryEmbedding.content().dimension()); + + // Embedding for a document to be searched + GoogleAiEmbeddingModel documentModel = GoogleAiEmbeddingModel.builder() + .apiKey(apiKey) + .modelName("gemini-embedding-001") + .taskType(TaskType.RETRIEVAL_DOCUMENT) + .build(); + + String document = "Machine learning is a subset of artificial intelligence that enables " + + "systems to learn and improve from experience without being explicitly programmed."; + Response documentEmbedding = documentModel.embed(document); + System.out.println("\nDocument embedding (RETRIEVAL_DOCUMENT):"); + System.out.println(" Text: \"" + document.substring(0, 50) + "...\""); + System.out.println(" Dimension: " + documentEmbedding.content().dimension()); + + // Embedding for semantic similarity comparison + GoogleAiEmbeddingModel similarityModel = GoogleAiEmbeddingModel.builder() + .apiKey(apiKey) + .modelName("gemini-embedding-001") + .taskType(TaskType.SEMANTIC_SIMILARITY) + .build(); + + String text1 = "The cat sat on the mat."; + String text2 = "A feline rested on the rug."; + Response embedding1 = similarityModel.embed(text1); + Response embedding2 = similarityModel.embed(text2); + + double similarity = cosineSimilarity(embedding1.content().vector(), embedding2.content().vector()); + + System.out.println("\nSemantic similarity (SEMANTIC_SIMILARITY):"); + System.out.println(" Text 1: \"" + text1 + "\""); + System.out.println(" Text 2: \"" + text2 + "\""); + System.out.printf(" Cosine similarity: %.4f%n", similarity); + } + + private static double cosineSimilarity(float[] a, float[] b) { + double dotProduct = 0.0; + double normA = 0.0; + double normB = 0.0; + for (int i = 0; i < a.length; i++) { + dotProduct += a[i] * b[i]; + normA += a[i] * a[i]; + normB += b[i] * b[i]; + } + return dotProduct / (Math.sqrt(normA) * Math.sqrt(normB)); + } +} \ No newline at end of file diff --git a/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example08_FileUpload.java b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example08_FileUpload.java new file mode 100644 index 00000000..14d1bb67 --- /dev/null +++ b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example08_FileUpload.java @@ -0,0 +1,88 @@ +package dev.langchain4j.example.gemini; + +import dev.langchain4j.data.message.PdfFileContent; +import dev.langchain4j.data.message.TextContent; +import dev.langchain4j.data.message.UserMessage; +import dev.langchain4j.model.chat.ChatModel; +import dev.langchain4j.model.chat.response.ChatResponse; +import dev.langchain4j.model.googleai.GeminiFiles; +import dev.langchain4j.model.googleai.GeminiFiles.GeminiFile; +import dev.langchain4j.model.googleai.GoogleAiGeminiChatModel; + +import java.nio.file.Path; + +/** + * Demonstrates uploading a file and using it in a chat request with Google AI Gemini. + * + *

This example shows how to: + *

    + *
  • Upload a file using the {@link GeminiFiles} API
  • + *
  • Reference the uploaded file in a chat request via its URI
  • + *
  • Combine file content with text prompts for multimodal interaction
  • + *
  • Clean up by deleting the file after use
  • + *
+ * + *

This pattern is useful for processing large files or reusing the same file + * across multiple chat requests without re-uploading. + * + *

Requires the {@code GOOGLE_AI_GEMINI_API_KEY} environment variable to be set. + */ +public class Example08_FileUpload { + + public static void main(String[] args) throws Exception { + String apiKey = System.getenv("GOOGLE_AI_GEMINI_API_KEY"); + + GeminiFiles geminiFiles = GeminiFiles.builder() + .apiKey(apiKey) + .build(); + + ChatModel model = GoogleAiGeminiChatModel.builder() + .apiKey(apiKey) + .modelName("gemini-2.5-flash-lite") + .logRequestsAndResponses(true) + .build(); + + // Create a sample text file to upload + var file = Path.of(Example08_FileUpload.class.getClassLoader().getResource("q4-planning.pdf").toURI()); + + GeminiFile uploadedFile = null; + try { + // Upload the file + System.out.println("Uploading file..."); + uploadedFile = geminiFiles.uploadFile(file, "meeting-notes.txt"); + + // Wait for processing + while (uploadedFile.isProcessing()) { + Thread.sleep(500); + uploadedFile = geminiFiles.getMetadata(uploadedFile.name()); + } + + if (uploadedFile.isFailed()) { + System.err.println("File processing failed!"); + return; + } + + System.out.println("File uploaded successfully: " + uploadedFile.uri()); + + // Use the uploaded file in a chat request + UserMessage userMessage = UserMessage.from( + PdfFileContent.from(uploadedFile.uri()), + TextContent.from("Summarize this document and list the action items with their owners.") + ); + + System.out.println("\nAsking Gemini to analyze the uploaded file...\n"); + + ChatResponse response = model.chat(userMessage); + + System.out.println("Response:"); + System.out.println(response.aiMessage().text()); + + } finally { + // Clean up: delete uploaded file and local temp file + if (uploadedFile != null) { + geminiFiles.deleteFile(uploadedFile.name()); + System.out.println("\nUploaded file deleted."); + } + } + } +} \ No newline at end of file diff --git a/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example09_MultimodalChat.java b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example09_MultimodalChat.java new file mode 100644 index 00000000..d353b168 --- /dev/null +++ b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example09_MultimodalChat.java @@ -0,0 +1,47 @@ +package dev.langchain4j.example.gemini; + +import dev.langchain4j.data.message.ImageContent; +import dev.langchain4j.data.message.TextContent; +import dev.langchain4j.data.message.UserMessage; +import dev.langchain4j.model.chat.ChatModel; +import dev.langchain4j.model.chat.response.ChatResponse; +import dev.langchain4j.model.googleai.GoogleAiGeminiChatModel; + +/** + * Demonstrates multimodal chat capabilities with Google AI Gemini. + * + *

This example shows how to: + *

    + *
  • Send images along with text prompts
  • + *
  • Use image URLs in chat requests
  • + *
  • Combine multiple content types in a single message
  • + *
+ * + *

Gemini supports various multimodal inputs including images, audio, video, and PDFs. + * + *

Requires the {@code GOOGLE_AI_GEMINI_API_KEY} environment variable to be set. + */ +public class Example09_MultimodalChat { + + public static void main(String[] args) { + ChatModel model = GoogleAiGeminiChatModel.builder() + .apiKey(System.getenv("GOOGLE_AI_GEMINI_API_KEY")) + .modelName("gemini-2.5-flash-lite") + .build(); + + // Example using a public image URL + String imageUrl = "https://upload.wikimedia.org/wikipedia/commons/thumb/3/3a/Cat03.jpg/1200px-Cat03.jpg"; + + UserMessage userMessage = UserMessage.from( + ImageContent.from(imageUrl), + TextContent.from("What do you see in this image? Describe it in detail.") + ); + + System.out.println("Sending image with question to Gemini...\n"); + + ChatResponse response = model.chat(userMessage); + + System.out.println("Response:"); + System.out.println(response.aiMessage().text()); + } +} \ No newline at end of file diff --git a/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example10_ChatWithThinking.java b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example10_ChatWithThinking.java new file mode 100644 index 00000000..01f2d59b --- /dev/null +++ b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example10_ChatWithThinking.java @@ -0,0 +1,57 @@ +package dev.langchain4j.example.gemini; + +import dev.langchain4j.data.message.UserMessage; +import dev.langchain4j.model.chat.ChatModel; +import dev.langchain4j.model.chat.response.ChatResponse; +import dev.langchain4j.model.googleai.GeminiThinkingConfig; +import dev.langchain4j.model.googleai.GoogleAiGeminiChatModel; + +/** + * Demonstrates the thinking/reasoning capability of Google AI Gemini. + * + *

This example shows how to: + *

    + *
  • Enable thinking mode using {@link GeminiThinkingConfig}
  • + *
  • Configure the thinking budget (token limit for reasoning)
  • + *
  • Access the model's reasoning process alongside the final response
  • + *
+ * + *

Thinking mode allows the model to perform step-by-step reasoning before + * providing a final answer, which is especially useful for complex problems. + * + *

Requires the {@code GOOGLE_AI_GEMINI_API_KEY} environment variable to be set. + */ +public class Example10_ChatWithThinking { + + public static void main(String[] args) { + ChatModel model = GoogleAiGeminiChatModel.builder() + .apiKey(System.getenv("GOOGLE_AI_GEMINI_API_KEY")) + .modelName("gemini-2.5-flash-lite") + .thinkingConfig(GeminiThinkingConfig.builder() + .thinkingBudget(2048) + .build()) + .returnThinking(true) + .build(); + + String problem = """ + You are in a room with three light switches, each controlling one of three light bulbs in another room + you cannot see from where you are. You can flip the switches as many times as you want, but you can + only enter the room with the bulbs once. How do you determine which switch controls which bulb? + """; + + System.out.println("Problem: " + problem); + System.out.println("Asking Gemini with thinking enabled...\n"); + + ChatResponse response = model.chat(UserMessage.from(problem)); + + // Display thinking process if available + if (response.aiMessage().thinking() != null) { + System.out.println("=== Thinking Process ==="); + System.out.println(response.aiMessage().thinking()); + System.out.println(); + } + + System.out.println("=== Final Answer ==="); + System.out.println(response.aiMessage().text()); + } +} \ No newline at end of file diff --git a/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example11_ChatWithSafetySettings.java b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example11_ChatWithSafetySettings.java new file mode 100644 index 00000000..c90b67fe --- /dev/null +++ b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example11_ChatWithSafetySettings.java @@ -0,0 +1,79 @@ +package dev.langchain4j.example.gemini; + +import dev.langchain4j.data.message.UserMessage; +import dev.langchain4j.model.chat.ChatModel; +import dev.langchain4j.model.chat.response.ChatResponse; +import dev.langchain4j.model.googleai.GoogleAiGeminiChatModel; +import dev.langchain4j.model.googleai.GeminiHarmBlockThreshold; +import dev.langchain4j.model.googleai.GeminiHarmCategory; +import dev.langchain4j.model.googleai.GeminiSafetySetting; + +import java.util.List; + +/** + * Demonstrates configuring safety settings with Google AI Gemini. + * + *

This example shows how to: + *

    + *
  • Configure safety thresholds for different harm categories
  • + *
  • Customize content filtering behavior
  • + *
  • Balance safety with response flexibility
  • + *
+ * + *

Available harm categories: + *

    + *
  • {@code HARM_CATEGORY_HARASSMENT}
  • + *
  • {@code HARM_CATEGORY_HATE_SPEECH}
  • + *
  • {@code HARM_CATEGORY_SEXUALLY_EXPLICIT}
  • + *
  • {@code HARM_CATEGORY_DANGEROUS_CONTENT}
  • + *
+ * + *

Available thresholds (from most to least restrictive): + *

    + *
  • {@code BLOCK_LOW_AND_ABOVE}
  • + *
  • {@code BLOCK_MEDIUM_AND_ABOVE}
  • + *
  • {@code BLOCK_ONLY_HIGH}
  • + *
  • {@code BLOCK_NONE}
  • + *
+ * + *

Requires the {@code GOOGLE_AI_GEMINI_API_KEY} environment variable to be set. + * + *

Learn more

+ */ +public class Example11_ChatWithSafetySettings { + public static void main(String[] args) { + List safetySettings = List.of( + new GeminiSafetySetting( + GeminiHarmCategory.HARM_CATEGORY_HARASSMENT, + GeminiHarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE), + new GeminiSafetySetting( + GeminiHarmCategory.HARM_CATEGORY_HATE_SPEECH, + GeminiHarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE), + new GeminiSafetySetting( + GeminiHarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, + GeminiHarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE), + new GeminiSafetySetting( + GeminiHarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, + GeminiHarmBlockThreshold.BLOCK_ONLY_HIGH) + ); + + ChatModel model = GoogleAiGeminiChatModel.builder() + .apiKey(System.getenv("GOOGLE_AI_GEMINI_API_KEY")) + .modelName("gemini-2.5-flash-lite") + .safetySettings(safetySettings) + .build(); + + System.out.println("Safety settings configured:"); + for (GeminiSafetySetting setting : safetySettings) { + System.out.println(" " + setting.getCategory() + ": " + setting.getThreshold()); + } + + String prompt = "Explain common safety practices when handling kitchen knives."; + + System.out.println("\nPrompt: " + prompt); + System.out.println("\nResponse:"); + + ChatResponse response = model.chat(UserMessage.from(prompt)); + System.out.println(response.aiMessage().text()); + } +} \ No newline at end of file diff --git a/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example12_BatchChatInline.java b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example12_BatchChatInline.java new file mode 100644 index 00000000..fb9e99d5 --- /dev/null +++ b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example12_BatchChatInline.java @@ -0,0 +1,123 @@ +package dev.langchain4j.example.gemini; + +import dev.langchain4j.data.message.UserMessage; +import dev.langchain4j.model.chat.request.ChatRequest; +import dev.langchain4j.model.googleai.BatchRequestResponse; +import dev.langchain4j.model.googleai.BatchRequestResponse.BatchIncomplete; +import dev.langchain4j.model.googleai.BatchRequestResponse.BatchName; +import dev.langchain4j.model.googleai.BatchRequestResponse.BatchResponse; +import dev.langchain4j.model.googleai.BatchRequestResponse.BatchSuccess; +import dev.langchain4j.model.googleai.GoogleAiGeminiBatchChatModel; + +import java.lang.reflect.Method; +import java.util.List; + +/** + * Demonstrates inline batch chat processing with Google AI Gemini. + * + *

Batch processing is ideal for large-scale, non-urgent tasks, offering: + *

    + *
  • 50% cost reduction compared to interactive requests
  • + *
  • 24-hour turnaround SLO
  • + *
  • Up to 20MB of inline requests per batch
  • + *
+ * + *

This example shows how to: + *

    + *
  • Create a batch chat model
  • + *
  • Submit multiple chat requests as an inline batch
  • + *
  • Poll for batch completion
  • + *
  • Retrieve and process batch results
  • + *
+ * + *

Requires the {@code GOOGLE_AI_GEMINI_API_KEY} environment variable to be set. + */ +public class Example12_BatchChatInline { + + public static void main(String[] args) throws Exception { + var batchModel = GoogleAiGeminiBatchChatModel.builder() + .apiKey(System.getenv("GOOGLE_AI_GEMINI_API_KEY")) + .modelName("gemini-2.5-flash-lite") + .logRequestsAndResponses(true) + .build(); + + var requests = List.of( + ChatRequest.builder() + .modelName("gemini-2.5-flash-lite") + .messages(UserMessage.from("What is the capital of France?")) + .build(), + ChatRequest.builder() + .modelName("gemini-2.5-flash-lite") + .messages(UserMessage.from("What is the capital of Japan?")) + .build(), + ChatRequest.builder() + .modelName("gemini-2.5-flash-lite") + .messages(UserMessage.from("What is the capital of Brazil?")) + .build() + ); + + System.out.println("Submitting batch with " + requests.size() + " requests..."); + + BatchResponse response = batchModel.createBatchInline("capitals-batch", 0L, requests); + BatchName batchName = getBatchName(response); + + System.out.println("Batch created: " + batchName.value()); + System.out.println("Polling for completion..."); + + // Poll until complete + do { + Thread.sleep(5000); + response = batchModel.retrieveBatchResults(batchName); + System.out.println(" Status: " + response.getClass().getSimpleName()); + } while (response instanceof BatchIncomplete); + + // Process results + if (response instanceof BatchSuccess success) { + System.out.println("\nBatch completed successfully!"); + System.out.println("Results:"); + + var results = success.responses(); + for (int i = 0; i < results.size(); i++) { + var chatResponse = (dev.langchain4j.model.chat.response.ChatResponse) results.get(i); + System.out.println(" " + (i + 1) + ". " + chatResponse.aiMessage().text()); + } + } else { + System.err.println("Batch failed: " + response); + } + + // Clean up + batchModel.deleteBatchJob(batchName); + System.out.println("\nBatch job deleted."); + } + + private static BatchName getBatchName(BatchResponse response) { + if (response instanceof BatchSuccess success) { + return success.batchName(); + } else if (response instanceof BatchIncomplete incomplete) { + return incomplete.batchName(); + } else { + throw new IllegalStateException("Unexpected response type: " + response); + } + } + + private static GoogleAiGeminiBatchChatModel createBatchModel() throws Exception { + // Use reflection to access the non-public builder() method + Method builderMethod = GoogleAiGeminiBatchChatModel.class.getDeclaredMethod("builder"); + builderMethod.setAccessible(true); + Object builder = builderMethod.invoke(null); + + Class builderClass = builder.getClass(); + + Method apiKeyMethod = builderClass.getMethod("apiKey", String.class); + builder = apiKeyMethod.invoke(builder, System.getenv("GOOGLE_AI_GEMINI_API_KEY")); + + Method modelNameMethod = builderClass.getMethod("modelName", String.class); + builder = modelNameMethod.invoke(builder, "gemini-2.5-flash-lite"); + + Method logMethodName = builderClass.getMethod("logRequestsAndResponses", Boolean.class); + builder = logMethodName.invoke(builder, true); + + Method buildMethod = builderClass.getMethod("build"); + return (GoogleAiGeminiBatchChatModel) buildMethod.invoke(builder); + } +} \ No newline at end of file diff --git a/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example13_BatchEmbeddingInline.java b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example13_BatchEmbeddingInline.java new file mode 100644 index 00000000..4dbf6d0c --- /dev/null +++ b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example13_BatchEmbeddingInline.java @@ -0,0 +1,98 @@ +package dev.langchain4j.example.gemini; + +import dev.langchain4j.data.embedding.Embedding; +import dev.langchain4j.data.segment.TextSegment; +import dev.langchain4j.model.googleai.BatchRequestResponse.BatchIncomplete; +import dev.langchain4j.model.googleai.BatchRequestResponse.BatchName; +import dev.langchain4j.model.googleai.BatchRequestResponse.BatchResponse; +import dev.langchain4j.model.googleai.BatchRequestResponse.BatchSuccess; +import dev.langchain4j.model.googleai.GoogleAiGeminiBatchEmbeddingModel; + +import java.lang.reflect.Method; +import java.util.List; + +/** + * Demonstrates inline batch embedding processing with Google AI Gemini. + * + *

Batch processing is ideal for large-scale, non-urgent embedding tasks, offering: + *

    + *
  • 50% cost reduction compared to interactive requests
  • + *
  • 24-hour turnaround SLO
  • + *
  • Up to 20MB of inline requests per batch
  • + *
  • Up to 100 segments per batch request
  • + *
+ * + *

This example shows how to: + *

    + *
  • Create a batch embedding model
  • + *
  • Submit multiple text segments as an inline batch
  • + *
  • Poll for batch completion
  • + *
  • Retrieve and process embedding results
  • + *
+ * + *

Requires the {@code GOOGLE_AI_GEMINI_API_KEY} environment variable to be set. + */ +public class Example13_BatchEmbeddingInline { + + public static void main(String[] args) throws Exception { + GoogleAiGeminiBatchEmbeddingModel batchModel = + GoogleAiGeminiBatchEmbeddingModel.builder().apiKey(System.getenv("GOOGLE_AI_GEMINI_API_KEY")) + .modelName("gemini-embedding-001") + .logRequestsAndResponses(true) + .build(); + + List segments = List.of( + TextSegment.from("Artificial intelligence is transforming industries worldwide."), + TextSegment.from("Machine learning models require large datasets for training."), + TextSegment.from("Natural language processing enables computers to understand text.") + ); + + System.out.println("Submitting batch with " + segments.size() + " text segments..."); + + BatchResponse response = batchModel.createBatchInline("embeddings-batch", 1L, segments); + BatchName batchName = getBatchName(response); + + System.out.println("Batch created: " + batchName.value()); + System.out.println("Polling for completion..."); + + // Poll until complete + do { + Thread.sleep(5000); + response = batchModel.retrieveBatchResults(batchName); + System.out.println(" Status: " + response.getClass().getSimpleName()); + } while (response instanceof BatchIncomplete); + + // Process results + if (response instanceof BatchSuccess success) { + System.out.println("\nBatch completed successfully!"); + System.out.println("Embeddings:"); + + var results = success.responses(); + for (int i = 0; i < results.size(); i++) { + var embedding = (Embedding) results.get(i); + System.out.printf(" %d. Dimension: %d, First 10 values: [", i + 1, embedding.dimension()); + float[] vector = embedding.vector(); + for (int j = 0; j < Math.min(10, vector.length); j++) { + System.out.printf("%.4f%s", vector[j], j < 9 ? ", " : ""); + } + System.out.println("...]"); + } + } else { + System.err.println("Batch failed: " + response); + } + + // Clean up + batchModel.deleteBatchJob(batchName); + System.out.println("\nBatch job deleted."); + } + + private static BatchName getBatchName(BatchResponse response) { + if (response instanceof BatchSuccess success) { + return success.batchName(); + } else if (response instanceof BatchIncomplete incomplete) { + return incomplete.batchName(); + } else { + throw new IllegalStateException("Unexpected response type: " + response); + } + } +} \ No newline at end of file diff --git a/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example14_BatchChatFromFile.java b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example14_BatchChatFromFile.java new file mode 100644 index 00000000..d7176395 --- /dev/null +++ b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example14_BatchChatFromFile.java @@ -0,0 +1,148 @@ +package dev.langchain4j.example.gemini; + +import dev.langchain4j.data.message.UserMessage; +import dev.langchain4j.model.chat.request.ChatRequest; +import dev.langchain4j.model.chat.response.ChatResponse; +import dev.langchain4j.model.googleai.BatchRequestResponse.*; +import dev.langchain4j.model.googleai.GeminiFiles; +import dev.langchain4j.model.googleai.GoogleAiGeminiBatchChatModel; +import dev.langchain4j.model.googleai.jsonl.JsonLinesWriters; + +import java.lang.reflect.Method; +import java.nio.file.Files; +import java.util.List; + +/** + * Demonstrates file-based batch chat processing with Google AI Gemini. + * + *

File-based batching is ideal for very large batches that exceed the 20MB inline limit. + * The workflow is: + *

    + *
  1. Write batch requests to a local JSONL file
  2. + *
  3. Upload the file using the Gemini Files API
  4. + *
  5. Create a batch job referencing the uploaded file
  6. + *
  7. Poll for completion and retrieve results
  8. + *
  9. Process results
  10. + *
+ * + *

Benefits: + *

    + *
  • 50% cost reduction compared to interactive requests
  • + *
  • 24-hour turnaround SLO
  • + *
  • Support for batches larger than 20MB
  • + *
+ * + *

Requires the {@code GOOGLE_AI_GEMINI_API_KEY} environment variable to be set. + */ +public class Example14_BatchChatFromFile { + + public static void main(String[] args) throws Exception { + var apiKey = System.getenv("GOOGLE_AI_GEMINI_API_KEY"); + + var batchModel = GoogleAiGeminiBatchChatModel.builder() + .apiKey(System.getenv("GOOGLE_AI_GEMINI_API_KEY")) + .modelName("gemini-2.5-flash-lite") + .logRequestsAndResponses(true) + .build(); + var geminiFiles = GeminiFiles.builder().apiKey(apiKey).build(); + + var requests = List.of( + new BatchFileRequest<>("solar-system", + ChatRequest.builder() + .messages(UserMessage.from("What is the largest planet in our solar system?")) + .build()), + new BatchFileRequest<>("speed-of-light", + ChatRequest.builder() + .messages(UserMessage.from("What is the speed of light in km/s?")) + .build()), + new BatchFileRequest<>("romeo-juliet", + ChatRequest.builder() + .messages(UserMessage.from("Who wrote Romeo and Juliet?")) + .build()), + new BatchFileRequest<>("water", + ChatRequest.builder() + .messages(UserMessage.from("What is the chemical formula for water?")) + .build()) + ); + + // Step 1: Write requests to a local JSONL file + var tempFile = Files.createTempFile("batch-requests-", ".jsonl"); + var writer = JsonLinesWriters.streaming(tempFile); + System.out.println("Writing batch requests to: " + tempFile); + batchModel.writeBatchToFile(writer, requests); + + // Verify JSONL content + System.out.println("JSONL content:"); + Files.readAllLines(tempFile).forEach(line -> System.out.println(" " + line)); + + // Step 2: Upload the file using Gemini Files API + System.out.println("\nUploading file to Gemini Files API..."); + var uploadedFile = geminiFiles.uploadFile(tempFile, "batch-requests.jsonl"); + System.out.println("Uploaded file URI: " + uploadedFile.uri()); + System.out.println("File state: " + uploadedFile.state()); + + // Wait for file to become ACTIVE + while (uploadedFile.isProcessing()) { + System.out.println(" Waiting for file to become active..."); + Thread.sleep(2000); + uploadedFile = geminiFiles.getMetadata(uploadedFile.name()); + } + + if (!uploadedFile.isActive()) { + System.err.println("File upload failed: " + uploadedFile.state()); + return; + } + + System.out.println("File is now ACTIVE"); + + // Step 3: Create batch job from the uploaded file + System.out.println("\nCreating batch job from uploaded file..."); + BatchResponse response = batchModel.createBatchFromFile("file-based-batch", uploadedFile); + BatchName batchName = getBatchName(response); + + System.out.println("Batch created: " + batchName.value()); + System.out.println("Polling for completion..."); + + // Step 4: Poll until complete + do { + Thread.sleep(5000); + response = batchModel.retrieveBatchResults(batchName); + System.out.println(" Status: " + response.getClass().getSimpleName()); + } while (response instanceof BatchIncomplete); + + // Step 5: Process results + if (response instanceof BatchSuccess success) { + System.out.println("\nBatch completed successfully!"); + System.out.println("Results:"); + + var results = success.responses(); + for (int i = 0; i < results.size(); i++) { + var chatResponse = (ChatResponse) results.get(i); + System.out.println(" " + (i + 1) + ". " + chatResponse.aiMessage().text()); + } + } else { + System.err.println("Batch failed: " + response); + } + + // Clean up + System.out.println("\nCleaning up..."); + batchModel.deleteBatchJob(batchName); + System.out.println("Batch job deleted."); + + geminiFiles.deleteFile(uploadedFile.name()); + System.out.println("Uploaded file deleted."); + + Files.deleteIfExists(tempFile); + System.out.println("Local temp file deleted."); + } + + private static BatchName getBatchName(BatchResponse response) { + if (response instanceof BatchSuccess success) { + return success.batchName(); + } else if (response instanceof BatchIncomplete incomplete) { + return incomplete.batchName(); + } else { + throw new IllegalStateException("Unexpected response type: " + response); + } + } +} \ No newline at end of file diff --git a/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example15_BatchEmbedFromFile.java b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example15_BatchEmbedFromFile.java new file mode 100644 index 00000000..e6d82763 --- /dev/null +++ b/google-ai-gemini-examples/src/main/java/dev/langchain4j/example/gemini/Example15_BatchEmbedFromFile.java @@ -0,0 +1,141 @@ +package dev.langchain4j.example.gemini; + +import dev.langchain4j.data.embedding.Embedding; +import dev.langchain4j.data.segment.TextSegment; +import dev.langchain4j.model.googleai.BatchRequestResponse.*; +import dev.langchain4j.model.googleai.GeminiFiles; +import dev.langchain4j.model.googleai.GoogleAiGeminiBatchEmbeddingModel; +import dev.langchain4j.model.googleai.jsonl.JsonLinesWriters; + +import java.nio.file.Files; +import java.util.List; + +/** + * Demonstrates file-based batch embedding processing with Google AI Gemini. + * + *

File-based batching is ideal for very large batches that exceed the 20MB inline limit. + * The workflow is: + *

    + *
  1. Write batch requests to a local JSONL file
  2. + *
  3. Upload the file using the Gemini Files API
  4. + *
  5. Create a batch job referencing the uploaded file
  6. + *
  7. Poll for completion and retrieve results
  8. + *
  9. Process results
  10. + *
+ * + *

Benefits: + *

    + *
  • 50% cost reduction compared to interactive requests
  • + *
  • 24-hour turnaround SLO
  • + *
  • Support for batches larger than 20MB
  • + *
+ * + *

Requires the {@code GOOGLE_AI_GEMINI_API_KEY} environment variable to be set. + */ +public class Example15_BatchEmbedFromFile { + + public static void main(String[] args) throws Exception { + var apiKey = System.getenv("GOOGLE_AI_GEMINI_API_KEY"); + + GoogleAiGeminiBatchEmbeddingModel batchModel = GoogleAiGeminiBatchEmbeddingModel.builder() + .apiKey(apiKey) + .modelName("gemini-embedding-001") + .logRequestsAndResponses(true) + .build(); + + var geminiFiles = GeminiFiles.builder().apiKey(apiKey).build(); + + var segments = List.of( + new BatchFileRequest<>("ai-text", TextSegment.from("Artificial intelligence is transforming industries worldwide.")), + new BatchFileRequest<>("ml-text", TextSegment.from("Machine learning enables computers to learn from data.")), + new BatchFileRequest<>("nlp-text", TextSegment.from("Natural language processing helps machines understand human language.")), + new BatchFileRequest<>("dl-text", TextSegment.from("Deep learning uses neural networks with multiple layers.")) + ); + + // Step 1: Write requests to a local JSONL file + var tempFile = Files.createTempFile("batch-embed-requests-", ".jsonl"); + var writer = JsonLinesWriters.streaming(tempFile); + System.out.println("Writing batch embedding requests to: " + tempFile); + batchModel.writeBatchToFile(writer, segments); + + // Verify JSONL content + System.out.println("JSONL content:"); + Files.readAllLines(tempFile).forEach(line -> System.out.println(" " + line)); + + // Step 2: Upload the file using Gemini Files API + System.out.println("\nUploading file to Gemini Files API..."); + var uploadedFile = geminiFiles.uploadFile(tempFile, "batch-embed-requests.jsonl"); + System.out.println("Uploaded file URI: " + uploadedFile.uri()); + System.out.println("File state: " + uploadedFile.state()); + + // Wait for file to become ACTIVE + while (uploadedFile.isProcessing()) { + System.out.println(" Waiting for file to become active..."); + Thread.sleep(2000); + uploadedFile = geminiFiles.getMetadata(uploadedFile.name()); + } + + if (!uploadedFile.isActive()) { + System.err.println("File upload failed: " + uploadedFile.state()); + return; + } + + System.out.println("File is now ACTIVE"); + + // Step 3: Create batch job from the uploaded file + System.out.println("\nCreating batch embedding job from uploaded file..."); + BatchResponse response = batchModel.createBatchFromFile("file-based-embedding-batch", uploadedFile); + BatchName batchName = getBatchName(response); + + System.out.println("Batch created: " + batchName.value()); + System.out.println("Polling for completion..."); + + // Step 4: Poll until complete + do { + Thread.sleep(5000); + response = batchModel.retrieveBatchResults(batchName); + System.out.println(" Status: " + response.getClass().getSimpleName()); + } while (response instanceof BatchIncomplete); + + // Step 5: Process results + if (response instanceof BatchSuccess success) { + System.out.println("\nBatch completed successfully!"); + System.out.println("Results:"); + + var results = success.responses(); + for (int i = 0; i < results.size(); i++) { + var embedding = (Embedding) results.get(i); + System.out.println("\n " + (i + 1) + ". " + segments.get(i).key()); + System.out.println(" Dimension: " + embedding.dimension()); + System.out.print(" Vector (first 5 values): ["); + for (int j = 0; j < 5 && j < embedding.vector().length; j++) { + System.out.printf("%.6f%s", embedding.vector()[j], j < 4 ? ", " : ""); + } + System.out.println("]"); + } + } else { + System.err.println("Batch failed: " + response); + } + + // Clean up + System.out.println("\nCleaning up..."); + batchModel.deleteBatchJob(batchName); + System.out.println("Batch job deleted."); + + geminiFiles.deleteFile(uploadedFile.name()); + System.out.println("Uploaded file deleted."); + + Files.deleteIfExists(tempFile); + System.out.println("Local temp file deleted."); + } + + private static BatchName getBatchName(BatchResponse response) { + if (response instanceof BatchSuccess success) { + return success.batchName(); + } else if (response instanceof BatchIncomplete incomplete) { + return incomplete.batchName(); + } else { + throw new IllegalStateException("Unexpected response type: " + response); + } + } +} \ No newline at end of file diff --git a/google-ai-gemini-examples/src/main/resources/q4-planning.pdf b/google-ai-gemini-examples/src/main/resources/q4-planning.pdf new file mode 100644 index 00000000..29539cd1 Binary files /dev/null and b/google-ai-gemini-examples/src/main/resources/q4-planning.pdf differ diff --git a/pom.xml b/pom.xml index 3cf26612..76268b5a 100644 --- a/pom.xml +++ b/pom.xml @@ -18,11 +18,15 @@ chroma-example couchbase-example customer-support-agent-example - tutorials elasticsearch-example github-models-examples + google-ai-gemini-examples + google-alloydb-example + gpullama3.java-example + helidon-examples infinispan-example jakartaee-microprofile-example + javafx-example jlama-examples mcp-example mcp-github-example @@ -33,27 +37,24 @@ ollama-examples open-ai-examples opensearch-example - ovh-ai-examples + oracle-example other-examples - pinecone-example + ovh-ai-examples + payara-micro-example pgvector-example + pinecone-example qdrant-example + quarkus-example rag-examples redis-example spring-boot-example + tutorials vertex-ai-gemini-examples vespa-example - weaviate-example - javafx-example - quarkus-example - oracle-example voyage-ai-examples - google-alloydb-example - wildfly-example - helidon-examples - payara-micro-example - gpullama3.java-example watsonx-ai-examples + weaviate-example + wildfly-example jvector-example yugabytedb-example